agentic-blocks 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentic_blocks/messages.py +45 -0
- {agentic_blocks-0.1.7.dist-info → agentic_blocks-0.1.9.dist-info}/METADATA +113 -57
- agentic_blocks-0.1.9.dist-info/RECORD +9 -0
- agentic_blocks-0.1.7.dist-info/RECORD +0 -9
- {agentic_blocks-0.1.7.dist-info → agentic_blocks-0.1.9.dist-info}/WHEEL +0 -0
- {agentic_blocks-0.1.7.dist-info → agentic_blocks-0.1.9.dist-info}/licenses/LICENSE +0 -0
- {agentic_blocks-0.1.7.dist-info → agentic_blocks-0.1.9.dist-info}/top_level.txt +0 -0
agentic_blocks/messages.py
CHANGED
@@ -205,6 +205,51 @@ class Messages:
|
|
205
205
|
|
206
206
|
return False
|
207
207
|
|
208
|
+
def get_pending_tool_calls(self) -> List[Dict[str, Any]]:
|
209
|
+
"""
|
210
|
+
Get pending tool calls that need execution, formatted for MCPClient.call_tool().
|
211
|
+
|
212
|
+
Returns:
|
213
|
+
List of dictionaries with 'tool_name', 'arguments', and 'tool_call_id' keys
|
214
|
+
"""
|
215
|
+
pending_calls = []
|
216
|
+
|
217
|
+
if not self.messages:
|
218
|
+
return pending_calls
|
219
|
+
|
220
|
+
last_message = self.messages[-1]
|
221
|
+
|
222
|
+
# Check if the last message is an assistant message with tool calls
|
223
|
+
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
224
|
+
# Get tool call IDs that have responses
|
225
|
+
responded_tool_call_ids = set()
|
226
|
+
for msg in reversed(self.messages):
|
227
|
+
if msg.get("role") == "tool" and msg.get("tool_call_id"):
|
228
|
+
responded_tool_call_ids.add(msg.get("tool_call_id"))
|
229
|
+
|
230
|
+
# Find tool calls that don't have responses
|
231
|
+
for tool_call in last_message["tool_calls"]:
|
232
|
+
tool_call_id = tool_call.get("id")
|
233
|
+
if tool_call_id not in responded_tool_call_ids:
|
234
|
+
function_info = tool_call.get("function", {})
|
235
|
+
tool_name = function_info.get("name")
|
236
|
+
arguments_str = function_info.get("arguments", "{}")
|
237
|
+
|
238
|
+
# Parse arguments JSON string to dict
|
239
|
+
import json
|
240
|
+
try:
|
241
|
+
arguments = json.loads(arguments_str)
|
242
|
+
except json.JSONDecodeError:
|
243
|
+
arguments = {}
|
244
|
+
|
245
|
+
pending_calls.append({
|
246
|
+
"tool_name": tool_name,
|
247
|
+
"arguments": arguments,
|
248
|
+
"tool_call_id": tool_call_id
|
249
|
+
})
|
250
|
+
|
251
|
+
return pending_calls
|
252
|
+
|
208
253
|
def __str__(self) -> str:
|
209
254
|
"""Return messages in a simple, readable format."""
|
210
255
|
if not self.messages:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: agentic-blocks
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.9
|
4
4
|
Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
|
5
5
|
Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
|
6
6
|
License: MIT
|
@@ -23,6 +23,7 @@ Requires-Dist: mcp
|
|
23
23
|
Requires-Dist: requests
|
24
24
|
Requires-Dist: python-dotenv
|
25
25
|
Requires-Dist: openai
|
26
|
+
Requires-Dist: langchain-core
|
26
27
|
Provides-Extra: test
|
27
28
|
Requires-Dist: pytest; extra == "test"
|
28
29
|
Provides-Extra: dev
|
@@ -187,66 +188,119 @@ Or pass the API key directly:
|
|
187
188
|
response = call_llm(messages, api_key="your_api_key_here")
|
188
189
|
```
|
189
190
|
|
190
|
-
## Complete Example -
|
191
|
+
## Complete Example - Tool Calling with Weather API
|
192
|
+
|
193
|
+
This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
|
191
194
|
|
192
195
|
```python
|
193
|
-
from agentic_blocks import
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
tool_call = {
|
217
|
-
"id": "search_001",
|
218
|
-
"type": "function",
|
219
|
-
"function": {
|
220
|
-
"name": search_tool["function"]["name"],
|
221
|
-
"arguments": '{"query": "latest AI news"}'
|
222
|
-
}
|
196
|
+
from agentic_blocks import call_llm, Messages
|
197
|
+
|
198
|
+
# Define tools in OpenAI function calling format
|
199
|
+
tools = [
|
200
|
+
{
|
201
|
+
"type": "function",
|
202
|
+
"function": {
|
203
|
+
"name": "get_weather",
|
204
|
+
"description": "Get current weather information for a location",
|
205
|
+
"parameters": {
|
206
|
+
"type": "object",
|
207
|
+
"properties": {
|
208
|
+
"location": {
|
209
|
+
"type": "string",
|
210
|
+
"description": "The city and state, e.g. San Francisco, CA"
|
211
|
+
},
|
212
|
+
"unit": {
|
213
|
+
"type": "string",
|
214
|
+
"enum": ["celsius", "fahrenheit"],
|
215
|
+
"description": "Temperature unit"
|
216
|
+
}
|
217
|
+
},
|
218
|
+
"required": ["location"]
|
223
219
|
}
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
220
|
+
}
|
221
|
+
},
|
222
|
+
{
|
223
|
+
"type": "function",
|
224
|
+
"function": {
|
225
|
+
"name": "calculate",
|
226
|
+
"description": "Perform a mathematical calculation",
|
227
|
+
"parameters": {
|
228
|
+
"type": "object",
|
229
|
+
"properties": {
|
230
|
+
"expression": {
|
231
|
+
"type": "string",
|
232
|
+
"description": "Mathematical expression to evaluate"
|
233
|
+
}
|
234
|
+
},
|
235
|
+
"required": ["expression"]
|
236
|
+
}
|
237
|
+
}
|
238
|
+
}
|
239
|
+
]
|
240
|
+
|
241
|
+
# Create conversation with system and user prompts
|
242
|
+
messages = Messages(
|
243
|
+
system_prompt="You are a helpful assistant with access to weather and calculation tools.",
|
244
|
+
user_prompt="What is the weather in Stockholm?"
|
245
|
+
)
|
246
|
+
|
247
|
+
# Call LLM with tools - it will decide which tools to call
|
248
|
+
model = "gpt-4o-mini" # or your preferred model
|
249
|
+
response = call_llm(model=model, messages=messages, tools=tools)
|
250
|
+
|
251
|
+
# Add the LLM's response (including any tool calls) to conversation
|
252
|
+
messages.add_response_message(response)
|
247
253
|
|
248
|
-
|
249
|
-
|
254
|
+
# Display the conversation so far
|
255
|
+
for message in messages.get_messages():
|
256
|
+
print(message)
|
257
|
+
|
258
|
+
# Check if there are pending tool calls that need execution
|
259
|
+
print("Has pending tool calls:", messages.has_pending_tool_calls())
|
260
|
+
|
261
|
+
# In a real implementation, you would:
|
262
|
+
# 1. Execute the actual tool calls (get_weather, calculate, etc.)
|
263
|
+
# 2. Add tool responses using messages.add_tool_response()
|
264
|
+
# 3. Call the LLM again to get the final user-facing response
|
265
|
+
```
|
266
|
+
|
267
|
+
**Expected Output:**
|
268
|
+
```
|
269
|
+
{'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
|
270
|
+
{'role': 'user', 'content': 'What is the weather in Stockholm?'}
|
271
|
+
{'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
|
272
|
+
Has pending tool calls: True
|
273
|
+
```
|
274
|
+
|
275
|
+
**Key Features Demonstrated:**
|
276
|
+
- **Messages management**: Clean conversation history with system/user prompts
|
277
|
+
- **Tool calling**: LLM automatically decides to call the `get_weather` function
|
278
|
+
- **Response handling**: `add_response_message()` handles both content and tool calls
|
279
|
+
- **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
|
280
|
+
|
281
|
+
**Next Steps:**
|
282
|
+
After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
|
283
|
+
|
284
|
+
```python
|
285
|
+
# Implement actual weather function
|
286
|
+
def get_weather(location, unit="celsius"):
|
287
|
+
# Your weather API implementation here
|
288
|
+
return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
|
289
|
+
|
290
|
+
# Execute pending tool calls
|
291
|
+
if messages.has_pending_tool_calls():
|
292
|
+
last_message = messages.get_messages()[-1]
|
293
|
+
for tool_call in last_message.get("tool_calls", []):
|
294
|
+
if tool_call["function"]["name"] == "get_weather":
|
295
|
+
import json
|
296
|
+
args = json.loads(tool_call["function"]["arguments"])
|
297
|
+
result = get_weather(**args)
|
298
|
+
messages.add_tool_response(tool_call["id"], result)
|
299
|
+
|
300
|
+
# Get final response from LLM
|
301
|
+
final_response = call_llm(model=model, messages=messages)
|
302
|
+
messages.add_assistant_message(final_response)
|
303
|
+
print(f"Final response: {final_response}")
|
250
304
|
```
|
251
305
|
|
252
306
|
## Development Principles
|
@@ -284,6 +338,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
|
|
284
338
|
- `add_user_message(content: str)`: Add user message
|
285
339
|
- `add_assistant_message(content: str)`: Add assistant message
|
286
340
|
- `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
|
341
|
+
- `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
|
342
|
+
- `add_response_message(message)`: Add ChatCompletionMessage response to conversation
|
287
343
|
- `add_tool_response(call_id: str, content: str)`: Add tool response
|
288
344
|
- `get_messages() -> List[Dict]`: Get all messages
|
289
345
|
- `has_pending_tool_calls() -> bool`: Check for pending tool calls
|
@@ -0,0 +1,9 @@
|
|
1
|
+
agentic_blocks/__init__.py,sha256=LJy2tzTwX9ZjPw8dqkXOWiude7ZDDIaBIvaLC8U4d_Y,435
|
2
|
+
agentic_blocks/llm.py,sha256=CznQ5iNFz_nQsGqjSmtZbCz1YyL6ha1qvnaoFOwsJtk,4868
|
3
|
+
agentic_blocks/mcp_client.py,sha256=15mIN_Qw0OVNJAvfgO3jVZS4-AU4TtvEQSFDlL9ruqA,9773
|
4
|
+
agentic_blocks/messages.py,sha256=dxaR_-IiH8n7pZygFDiUt8n7mMOm--FRuN-xCQEuewo,11758
|
5
|
+
agentic_blocks-0.1.9.dist-info/licenses/LICENSE,sha256=r4IcBaAjTv3-yfjXgDPuRD953Qci0Y0nQn5JfHwLyBY,1073
|
6
|
+
agentic_blocks-0.1.9.dist-info/METADATA,sha256=RTvQSPxogrsYO0Scv0sxZp-0BsG3EKnovV38F-VhM0g,11943
|
7
|
+
agentic_blocks-0.1.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
8
|
+
agentic_blocks-0.1.9.dist-info/top_level.txt,sha256=-1a4RAemqicXLU1rRzw4QHV3KlNeQDNxVs3m2gAT238,15
|
9
|
+
agentic_blocks-0.1.9.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
agentic_blocks/__init__.py,sha256=LJy2tzTwX9ZjPw8dqkXOWiude7ZDDIaBIvaLC8U4d_Y,435
|
2
|
-
agentic_blocks/llm.py,sha256=CznQ5iNFz_nQsGqjSmtZbCz1YyL6ha1qvnaoFOwsJtk,4868
|
3
|
-
agentic_blocks/mcp_client.py,sha256=15mIN_Qw0OVNJAvfgO3jVZS4-AU4TtvEQSFDlL9ruqA,9773
|
4
|
-
agentic_blocks/messages.py,sha256=WhZDFYb9afvJH9YcieTvEbPdSOkRcclFkCSiMM-9YjY,9904
|
5
|
-
agentic_blocks-0.1.7.dist-info/licenses/LICENSE,sha256=r4IcBaAjTv3-yfjXgDPuRD953Qci0Y0nQn5JfHwLyBY,1073
|
6
|
-
agentic_blocks-0.1.7.dist-info/METADATA,sha256=wQIebdtAorFhdsiVKrLi-M_3C60S7qI1oZ9l2ivlLQs,9445
|
7
|
-
agentic_blocks-0.1.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
8
|
-
agentic_blocks-0.1.7.dist-info/top_level.txt,sha256=-1a4RAemqicXLU1rRzw4QHV3KlNeQDNxVs3m2gAT238,15
|
9
|
-
agentic_blocks-0.1.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|