agentic-blocks 0.1.7__tar.gz → 0.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agentic_blocks-0.1.7/src/agentic_blocks.egg-info → agentic_blocks-0.1.8}/PKG-INFO +112 -57
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/README.md +111 -56
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/pyproject.toml +1 -1
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8/src/agentic_blocks.egg-info}/PKG-INFO +112 -57
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/LICENSE +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/setup.cfg +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks/__init__.py +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks/llm.py +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks/mcp_client.py +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks/messages.py +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks.egg-info/SOURCES.txt +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks.egg-info/dependency_links.txt +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks.egg-info/requires.txt +0 -0
- {agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: agentic-blocks
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.8
|
4
4
|
Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
|
5
5
|
Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
|
6
6
|
License: MIT
|
@@ -187,66 +187,119 @@ Or pass the API key directly:
|
|
187
187
|
response = call_llm(messages, api_key="your_api_key_here")
|
188
188
|
```
|
189
189
|
|
190
|
-
## Complete Example -
|
190
|
+
## Complete Example - Tool Calling with Weather API
|
191
|
+
|
192
|
+
This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
|
191
193
|
|
192
194
|
```python
|
193
|
-
from agentic_blocks import
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
tool_call = {
|
217
|
-
"id": "search_001",
|
218
|
-
"type": "function",
|
219
|
-
"function": {
|
220
|
-
"name": search_tool["function"]["name"],
|
221
|
-
"arguments": '{"query": "latest AI news"}'
|
222
|
-
}
|
195
|
+
from agentic_blocks import call_llm, Messages
|
196
|
+
|
197
|
+
# Define tools in OpenAI function calling format
|
198
|
+
tools = [
|
199
|
+
{
|
200
|
+
"type": "function",
|
201
|
+
"function": {
|
202
|
+
"name": "get_weather",
|
203
|
+
"description": "Get current weather information for a location",
|
204
|
+
"parameters": {
|
205
|
+
"type": "object",
|
206
|
+
"properties": {
|
207
|
+
"location": {
|
208
|
+
"type": "string",
|
209
|
+
"description": "The city and state, e.g. San Francisco, CA"
|
210
|
+
},
|
211
|
+
"unit": {
|
212
|
+
"type": "string",
|
213
|
+
"enum": ["celsius", "fahrenheit"],
|
214
|
+
"description": "Temperature unit"
|
215
|
+
}
|
216
|
+
},
|
217
|
+
"required": ["location"]
|
223
218
|
}
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
219
|
+
}
|
220
|
+
},
|
221
|
+
{
|
222
|
+
"type": "function",
|
223
|
+
"function": {
|
224
|
+
"name": "calculate",
|
225
|
+
"description": "Perform a mathematical calculation",
|
226
|
+
"parameters": {
|
227
|
+
"type": "object",
|
228
|
+
"properties": {
|
229
|
+
"expression": {
|
230
|
+
"type": "string",
|
231
|
+
"description": "Mathematical expression to evaluate"
|
232
|
+
}
|
233
|
+
},
|
234
|
+
"required": ["expression"]
|
235
|
+
}
|
236
|
+
}
|
237
|
+
}
|
238
|
+
]
|
239
|
+
|
240
|
+
# Create conversation with system and user prompts
|
241
|
+
messages = Messages(
|
242
|
+
system_prompt="You are a helpful assistant with access to weather and calculation tools.",
|
243
|
+
user_prompt="What is the weather in Stockholm?"
|
244
|
+
)
|
245
|
+
|
246
|
+
# Call LLM with tools - it will decide which tools to call
|
247
|
+
model = "gpt-4o-mini" # or your preferred model
|
248
|
+
response = call_llm(model=model, messages=messages, tools=tools)
|
249
|
+
|
250
|
+
# Add the LLM's response (including any tool calls) to conversation
|
251
|
+
messages.add_response_message(response)
|
247
252
|
|
248
|
-
|
249
|
-
|
253
|
+
# Display the conversation so far
|
254
|
+
for message in messages.get_messages():
|
255
|
+
print(message)
|
256
|
+
|
257
|
+
# Check if there are pending tool calls that need execution
|
258
|
+
print("Has pending tool calls:", messages.has_pending_tool_calls())
|
259
|
+
|
260
|
+
# In a real implementation, you would:
|
261
|
+
# 1. Execute the actual tool calls (get_weather, calculate, etc.)
|
262
|
+
# 2. Add tool responses using messages.add_tool_response()
|
263
|
+
# 3. Call the LLM again to get the final user-facing response
|
264
|
+
```
|
265
|
+
|
266
|
+
**Expected Output:**
|
267
|
+
```
|
268
|
+
{'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
|
269
|
+
{'role': 'user', 'content': 'What is the weather in Stockholm?'}
|
270
|
+
{'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
|
271
|
+
Has pending tool calls: True
|
272
|
+
```
|
273
|
+
|
274
|
+
**Key Features Demonstrated:**
|
275
|
+
- **Messages management**: Clean conversation history with system/user prompts
|
276
|
+
- **Tool calling**: LLM automatically decides to call the `get_weather` function
|
277
|
+
- **Response handling**: `add_response_message()` handles both content and tool calls
|
278
|
+
- **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
|
279
|
+
|
280
|
+
**Next Steps:**
|
281
|
+
After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
|
282
|
+
|
283
|
+
```python
|
284
|
+
# Implement actual weather function
|
285
|
+
def get_weather(location, unit="celsius"):
|
286
|
+
# Your weather API implementation here
|
287
|
+
return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
|
288
|
+
|
289
|
+
# Execute pending tool calls
|
290
|
+
if messages.has_pending_tool_calls():
|
291
|
+
last_message = messages.get_messages()[-1]
|
292
|
+
for tool_call in last_message.get("tool_calls", []):
|
293
|
+
if tool_call["function"]["name"] == "get_weather":
|
294
|
+
import json
|
295
|
+
args = json.loads(tool_call["function"]["arguments"])
|
296
|
+
result = get_weather(**args)
|
297
|
+
messages.add_tool_response(tool_call["id"], result)
|
298
|
+
|
299
|
+
# Get final response from LLM
|
300
|
+
final_response = call_llm(model=model, messages=messages)
|
301
|
+
messages.add_assistant_message(final_response)
|
302
|
+
print(f"Final response: {final_response}")
|
250
303
|
```
|
251
304
|
|
252
305
|
## Development Principles
|
@@ -284,6 +337,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
284
337
|
- `add_user_message(content: str)`: Add user message
|
285
338
|
- `add_assistant_message(content: str)`: Add assistant message
|
286
339
|
- `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
|
340
|
+
- `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
|
341
|
+
- `add_response_message(message)`: Add ChatCompletionMessage response to conversation
|
287
342
|
- `add_tool_response(call_id: str, content: str)`: Add tool response
|
288
343
|
- `get_messages() -> List[Dict]`: Get all messages
|
289
344
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
@@ -154,66 +154,119 @@ Or pass the API key directly:
|
|
154
154
|
response = call_llm(messages, api_key="your_api_key_here")
|
155
155
|
```
|
156
156
|
|
157
|
-
## Complete Example -
|
157
|
+
## Complete Example - Tool Calling with Weather API
|
158
|
+
|
159
|
+
This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
|
158
160
|
|
159
161
|
```python
|
160
|
-
from agentic_blocks import
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
tool_call = {
|
184
|
-
"id": "search_001",
|
185
|
-
"type": "function",
|
186
|
-
"function": {
|
187
|
-
"name": search_tool["function"]["name"],
|
188
|
-
"arguments": '{"query": "latest AI news"}'
|
189
|
-
}
|
162
|
+
from agentic_blocks import call_llm, Messages
|
163
|
+
|
164
|
+
# Define tools in OpenAI function calling format
|
165
|
+
tools = [
|
166
|
+
{
|
167
|
+
"type": "function",
|
168
|
+
"function": {
|
169
|
+
"name": "get_weather",
|
170
|
+
"description": "Get current weather information for a location",
|
171
|
+
"parameters": {
|
172
|
+
"type": "object",
|
173
|
+
"properties": {
|
174
|
+
"location": {
|
175
|
+
"type": "string",
|
176
|
+
"description": "The city and state, e.g. San Francisco, CA"
|
177
|
+
},
|
178
|
+
"unit": {
|
179
|
+
"type": "string",
|
180
|
+
"enum": ["celsius", "fahrenheit"],
|
181
|
+
"description": "Temperature unit"
|
182
|
+
}
|
183
|
+
},
|
184
|
+
"required": ["location"]
|
190
185
|
}
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
186
|
+
}
|
187
|
+
},
|
188
|
+
{
|
189
|
+
"type": "function",
|
190
|
+
"function": {
|
191
|
+
"name": "calculate",
|
192
|
+
"description": "Perform a mathematical calculation",
|
193
|
+
"parameters": {
|
194
|
+
"type": "object",
|
195
|
+
"properties": {
|
196
|
+
"expression": {
|
197
|
+
"type": "string",
|
198
|
+
"description": "Mathematical expression to evaluate"
|
199
|
+
}
|
200
|
+
},
|
201
|
+
"required": ["expression"]
|
202
|
+
}
|
203
|
+
}
|
204
|
+
}
|
205
|
+
]
|
206
|
+
|
207
|
+
# Create conversation with system and user prompts
|
208
|
+
messages = Messages(
|
209
|
+
system_prompt="You are a helpful assistant with access to weather and calculation tools.",
|
210
|
+
user_prompt="What is the weather in Stockholm?"
|
211
|
+
)
|
212
|
+
|
213
|
+
# Call LLM with tools - it will decide which tools to call
|
214
|
+
model = "gpt-4o-mini" # or your preferred model
|
215
|
+
response = call_llm(model=model, messages=messages, tools=tools)
|
216
|
+
|
217
|
+
# Add the LLM's response (including any tool calls) to conversation
|
218
|
+
messages.add_response_message(response)
|
214
219
|
|
215
|
-
|
216
|
-
|
220
|
+
# Display the conversation so far
|
221
|
+
for message in messages.get_messages():
|
222
|
+
print(message)
|
223
|
+
|
224
|
+
# Check if there are pending tool calls that need execution
|
225
|
+
print("Has pending tool calls:", messages.has_pending_tool_calls())
|
226
|
+
|
227
|
+
# In a real implementation, you would:
|
228
|
+
# 1. Execute the actual tool calls (get_weather, calculate, etc.)
|
229
|
+
# 2. Add tool responses using messages.add_tool_response()
|
230
|
+
# 3. Call the LLM again to get the final user-facing response
|
231
|
+
```
|
232
|
+
|
233
|
+
**Expected Output:**
|
234
|
+
```
|
235
|
+
{'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
|
236
|
+
{'role': 'user', 'content': 'What is the weather in Stockholm?'}
|
237
|
+
{'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
|
238
|
+
Has pending tool calls: True
|
239
|
+
```
|
240
|
+
|
241
|
+
**Key Features Demonstrated:**
|
242
|
+
- **Messages management**: Clean conversation history with system/user prompts
|
243
|
+
- **Tool calling**: LLM automatically decides to call the `get_weather` function
|
244
|
+
- **Response handling**: `add_response_message()` handles both content and tool calls
|
245
|
+
- **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
|
246
|
+
|
247
|
+
**Next Steps:**
|
248
|
+
After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
|
249
|
+
|
250
|
+
```python
|
251
|
+
# Implement actual weather function
|
252
|
+
def get_weather(location, unit="celsius"):
|
253
|
+
# Your weather API implementation here
|
254
|
+
return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
|
255
|
+
|
256
|
+
# Execute pending tool calls
|
257
|
+
if messages.has_pending_tool_calls():
|
258
|
+
last_message = messages.get_messages()[-1]
|
259
|
+
for tool_call in last_message.get("tool_calls", []):
|
260
|
+
if tool_call["function"]["name"] == "get_weather":
|
261
|
+
import json
|
262
|
+
args = json.loads(tool_call["function"]["arguments"])
|
263
|
+
result = get_weather(**args)
|
264
|
+
messages.add_tool_response(tool_call["id"], result)
|
265
|
+
|
266
|
+
# Get final response from LLM
|
267
|
+
final_response = call_llm(model=model, messages=messages)
|
268
|
+
messages.add_assistant_message(final_response)
|
269
|
+
print(f"Final response: {final_response}")
|
217
270
|
```
|
218
271
|
|
219
272
|
## Development Principles
|
@@ -251,6 +304,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
251
304
|
- `add_user_message(content: str)`: Add user message
|
252
305
|
- `add_assistant_message(content: str)`: Add assistant message
|
253
306
|
- `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
|
307
|
+
- `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
|
308
|
+
- `add_response_message(message)`: Add ChatCompletionMessage response to conversation
|
254
309
|
- `add_tool_response(call_id: str, content: str)`: Add tool response
|
255
310
|
- `get_messages() -> List[Dict]`: Get all messages
|
256
311
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
@@ -14,7 +14,7 @@ agentic_blocks = []
|
|
14
14
|
|
15
15
|
[project]
|
16
16
|
name = "agentic-blocks"
|
17
|
-
version = "0.1.
|
17
|
+
version = "0.1.8"
|
18
18
|
description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
|
19
19
|
readme = "README.md"
|
20
20
|
requires-python = ">=3.11"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: agentic-blocks
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.8
|
4
4
|
Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
|
5
5
|
Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
|
6
6
|
License: MIT
|
@@ -187,66 +187,119 @@ Or pass the API key directly:
|
|
187
187
|
response = call_llm(messages, api_key="your_api_key_here")
|
188
188
|
```
|
189
189
|
|
190
|
-
## Complete Example -
|
190
|
+
## Complete Example - Tool Calling with Weather API
|
191
|
+
|
192
|
+
This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
|
191
193
|
|
192
194
|
```python
|
193
|
-
from agentic_blocks import
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
tool_call = {
|
217
|
-
"id": "search_001",
|
218
|
-
"type": "function",
|
219
|
-
"function": {
|
220
|
-
"name": search_tool["function"]["name"],
|
221
|
-
"arguments": '{"query": "latest AI news"}'
|
222
|
-
}
|
195
|
+
from agentic_blocks import call_llm, Messages
|
196
|
+
|
197
|
+
# Define tools in OpenAI function calling format
|
198
|
+
tools = [
|
199
|
+
{
|
200
|
+
"type": "function",
|
201
|
+
"function": {
|
202
|
+
"name": "get_weather",
|
203
|
+
"description": "Get current weather information for a location",
|
204
|
+
"parameters": {
|
205
|
+
"type": "object",
|
206
|
+
"properties": {
|
207
|
+
"location": {
|
208
|
+
"type": "string",
|
209
|
+
"description": "The city and state, e.g. San Francisco, CA"
|
210
|
+
},
|
211
|
+
"unit": {
|
212
|
+
"type": "string",
|
213
|
+
"enum": ["celsius", "fahrenheit"],
|
214
|
+
"description": "Temperature unit"
|
215
|
+
}
|
216
|
+
},
|
217
|
+
"required": ["location"]
|
223
218
|
}
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
219
|
+
}
|
220
|
+
},
|
221
|
+
{
|
222
|
+
"type": "function",
|
223
|
+
"function": {
|
224
|
+
"name": "calculate",
|
225
|
+
"description": "Perform a mathematical calculation",
|
226
|
+
"parameters": {
|
227
|
+
"type": "object",
|
228
|
+
"properties": {
|
229
|
+
"expression": {
|
230
|
+
"type": "string",
|
231
|
+
"description": "Mathematical expression to evaluate"
|
232
|
+
}
|
233
|
+
},
|
234
|
+
"required": ["expression"]
|
235
|
+
}
|
236
|
+
}
|
237
|
+
}
|
238
|
+
]
|
239
|
+
|
240
|
+
# Create conversation with system and user prompts
|
241
|
+
messages = Messages(
|
242
|
+
system_prompt="You are a helpful assistant with access to weather and calculation tools.",
|
243
|
+
user_prompt="What is the weather in Stockholm?"
|
244
|
+
)
|
245
|
+
|
246
|
+
# Call LLM with tools - it will decide which tools to call
|
247
|
+
model = "gpt-4o-mini" # or your preferred model
|
248
|
+
response = call_llm(model=model, messages=messages, tools=tools)
|
249
|
+
|
250
|
+
# Add the LLM's response (including any tool calls) to conversation
|
251
|
+
messages.add_response_message(response)
|
247
252
|
|
248
|
-
|
249
|
-
|
253
|
+
# Display the conversation so far
|
254
|
+
for message in messages.get_messages():
|
255
|
+
print(message)
|
256
|
+
|
257
|
+
# Check if there are pending tool calls that need execution
|
258
|
+
print("Has pending tool calls:", messages.has_pending_tool_calls())
|
259
|
+
|
260
|
+
# In a real implementation, you would:
|
261
|
+
# 1. Execute the actual tool calls (get_weather, calculate, etc.)
|
262
|
+
# 2. Add tool responses using messages.add_tool_response()
|
263
|
+
# 3. Call the LLM again to get the final user-facing response
|
264
|
+
```
|
265
|
+
|
266
|
+
**Expected Output:**
|
267
|
+
```
|
268
|
+
{'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
|
269
|
+
{'role': 'user', 'content': 'What is the weather in Stockholm?'}
|
270
|
+
{'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
|
271
|
+
Has pending tool calls: True
|
272
|
+
```
|
273
|
+
|
274
|
+
**Key Features Demonstrated:**
|
275
|
+
- **Messages management**: Clean conversation history with system/user prompts
|
276
|
+
- **Tool calling**: LLM automatically decides to call the `get_weather` function
|
277
|
+
- **Response handling**: `add_response_message()` handles both content and tool calls
|
278
|
+
- **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
|
279
|
+
|
280
|
+
**Next Steps:**
|
281
|
+
After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
|
282
|
+
|
283
|
+
```python
|
284
|
+
# Implement actual weather function
|
285
|
+
def get_weather(location, unit="celsius"):
|
286
|
+
# Your weather API implementation here
|
287
|
+
return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
|
288
|
+
|
289
|
+
# Execute pending tool calls
|
290
|
+
if messages.has_pending_tool_calls():
|
291
|
+
last_message = messages.get_messages()[-1]
|
292
|
+
for tool_call in last_message.get("tool_calls", []):
|
293
|
+
if tool_call["function"]["name"] == "get_weather":
|
294
|
+
import json
|
295
|
+
args = json.loads(tool_call["function"]["arguments"])
|
296
|
+
result = get_weather(**args)
|
297
|
+
messages.add_tool_response(tool_call["id"], result)
|
298
|
+
|
299
|
+
# Get final response from LLM
|
300
|
+
final_response = call_llm(model=model, messages=messages)
|
301
|
+
messages.add_assistant_message(final_response)
|
302
|
+
print(f"Final response: {final_response}")
|
250
303
|
```
|
251
304
|
|
252
305
|
## Development Principles
|
@@ -284,6 +337,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
284
337
|
- `add_user_message(content: str)`: Add user message
|
285
338
|
- `add_assistant_message(content: str)`: Add assistant message
|
286
339
|
- `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
|
340
|
+
- `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
|
341
|
+
- `add_response_message(message)`: Add ChatCompletionMessage response to conversation
|
287
342
|
- `add_tool_response(call_id: str, content: str)`: Add tool response
|
288
343
|
- `get_messages() -> List[Dict]`: Get all messages
|
289
344
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{agentic_blocks-0.1.7 → agentic_blocks-0.1.8}/src/agentic_blocks.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|