Skip to content

Commit 33488ee

Browse files
authored
types/examples: add tool_name to message and examples (#537)
1 parent 63ca747 commit 33488ee

File tree

6 files changed

+95
-2
lines changed

6 files changed

+95
-2
lines changed

examples/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md)
2525
### Tools/Function Calling - Call a function with a model
2626
- [tools.py](tools.py) - Simple example of Tools/Function Calling
2727
- [async-tools.py](async-tools.py)
28+
- [multi-tool.py](multi-tool.py) - Using multiple tools, with thinking enabled
2829

2930

3031
### Multimodal with Images - Chat with a multimodal (image chat) model

examples/async-tools.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ async def main():
7676
if response.message.tool_calls:
7777
# Add the function response to messages for the model to use
7878
messages.append(response.message)
79-
messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name})
79+
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
8080

8181
# Get final response from model with function outputs
8282
final_response = await client.chat('llama3.1', messages=messages)

examples/multi-tool.py

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import random
2+
from typing import Iterator
3+
4+
from ollama import ChatResponse, Client
5+
6+
7+
def get_temperature(city: str) -> int:
8+
"""
9+
Get the temperature for a city in Celsius
10+
11+
Args:
12+
city (str): The name of the city
13+
14+
Returns:
15+
int: The current temperature in Celsius
16+
"""
17+
# This is a mock implementation - would need to use a real weather API
18+
import random
19+
20+
if city not in ['London', 'Paris', 'New York', 'Tokyo', 'Sydney']:
21+
return 'Unknown city'
22+
23+
return str(random.randint(0, 35)) + ' degrees Celsius'
24+
25+
26+
def get_conditions(city: str) -> str:
27+
"""
28+
Get the weather conditions for a city
29+
"""
30+
if city not in ['London', 'Paris', 'New York', 'Tokyo', 'Sydney']:
31+
return 'Unknown city'
32+
# This is a mock implementation - would need to use a real weather API
33+
conditions = ['sunny', 'cloudy', 'rainy', 'snowy']
34+
return random.choice(conditions)
35+
36+
37+
available_functions = {
38+
'get_temperature': get_temperature,
39+
'get_conditions': get_conditions,
40+
}
41+
42+
43+
cities = ['London', 'Paris', 'New York', 'Tokyo', 'Sydney']
44+
city = random.choice(cities)
45+
city2 = random.choice(cities)
46+
messages = [{'role': 'user', 'content': f'What is the temperature in {city}? and what are the weather conditions in {city2}?'}]
47+
print('----- Prompt:', messages[0]['content'], '\n')
48+
49+
model = 'qwen3'
50+
client = Client()
51+
response: Iterator[ChatResponse] = client.chat(model, stream=True, messages=messages, tools=[get_temperature, get_conditions], think=True)
52+
53+
for chunk in response:
54+
if chunk.message.thinking:
55+
print(chunk.message.thinking, end='', flush=True)
56+
if chunk.message.content:
57+
print(chunk.message.content, end='', flush=True)
58+
if chunk.message.tool_calls:
59+
for tool in chunk.message.tool_calls:
60+
if function_to_call := available_functions.get(tool.function.name):
61+
print('\nCalling function:', tool.function.name, 'with arguments:', tool.function.arguments)
62+
output = function_to_call(**tool.function.arguments)
63+
print('> Function output:', output, '\n')
64+
65+
# Add the assistant message and tool call result to the messages
66+
messages.append(chunk.message)
67+
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
68+
else:
69+
print('Function', tool.function.name, 'not found')
70+
71+
print('----- Sending result back to model \n')
72+
if any(msg.get('role') == 'tool' for msg in messages):
73+
res = client.chat(model, stream=True, tools=[get_temperature, get_conditions], messages=messages, think=True)
74+
done_thinking = False
75+
for chunk in res:
76+
if chunk.message.thinking:
77+
print(chunk.message.thinking, end='', flush=True)
78+
if chunk.message.content:
79+
if not done_thinking:
80+
print('\n----- Final result:')
81+
done_thinking = True
82+
print(chunk.message.content, end='', flush=True)
83+
if chunk.message.tool_calls:
84+
# Model should be explaining the tool calls and the results in this output
85+
print('Model returned tool calls:')
86+
print(chunk.message.tool_calls)
87+
else:
88+
print('No tool calls returned')

examples/tools.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def subtract_two_numbers(a: int, b: int) -> int:
7474
if response.message.tool_calls:
7575
# Add the function response to messages for the model to use
7676
messages.append(response.message)
77-
messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name})
77+
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
7878

7979
# Get final response from model with function outputs
8080
final_response = chat('llama3.1', messages=messages)

ollama/_types.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,9 @@ class Message(SubscriptableBaseModel):
284284
Valid image formats depend on the model. See the model card for more information.
285285
"""
286286

287+
tool_name: Optional[str] = None
288+
'Name of the executed tool.'
289+
287290
class ToolCall(SubscriptableBaseModel):
288291
"""
289292
Model tool calls.

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ select = [
6060
'FLY', # flynt
6161
'RUF', # ruff-specific rules
6262
]
63+
ignore = ['FBT001'] # Boolean-typed positional argument in function definition
6364

6465
[tool.pytest.ini_options]
6566
addopts = ['--doctest-modules']

0 commit comments

Comments
 (0)