agentic-blocks 0.1.7__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -187,66 +187,119 @@ Or pass the API key directly:
187
187
  response = call_llm(messages, api_key="your_api_key_here")
188
188
  ```
189
189
 
190
- ## Complete Example - Agent with MCP Tools and LLM
190
+ ## Complete Example - Tool Calling with Weather API
191
+
192
+ This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
191
193
 
192
194
  ```python
193
- from agentic_blocks import MCPClient, Messages, call_llm
194
-
195
- def simple_agent():
196
- # Initialize MCP client and conversation
197
- client = MCPClient("https://example.com/mcp/server/sse")
198
- messages = Messages(
199
- system_prompt="You are a helpful research assistant.",
200
- add_date_and_time=True
201
- )
202
-
203
- # Get available tools
204
- tools = client.list_tools()
205
- print(f"Connected to MCP server with {len(tools)} tools")
206
-
207
- # Simulate user query
208
- user_query = "What's the latest news about AI?"
209
- messages.add_user_message(user_query)
210
-
211
- # Agent decides to use a search tool
212
- if tools:
213
- search_tool = next((t for t in tools if "search" in t["function"]["name"]), None)
214
- if search_tool:
215
- # Add tool call to messages
216
- tool_call = {
217
- "id": "search_001",
218
- "type": "function",
219
- "function": {
220
- "name": search_tool["function"]["name"],
221
- "arguments": '{"query": "latest AI news"}'
222
- }
195
+ from agentic_blocks import call_llm, Messages
196
+
197
+ # Define tools in OpenAI function calling format
198
+ tools = [
199
+ {
200
+ "type": "function",
201
+ "function": {
202
+ "name": "get_weather",
203
+ "description": "Get current weather information for a location",
204
+ "parameters": {
205
+ "type": "object",
206
+ "properties": {
207
+ "location": {
208
+ "type": "string",
209
+ "description": "The city and state, e.g. San Francisco, CA"
210
+ },
211
+ "unit": {
212
+ "type": "string",
213
+ "enum": ["celsius", "fahrenheit"],
214
+ "description": "Temperature unit"
215
+ }
216
+ },
217
+ "required": ["location"]
223
218
  }
224
- messages.add_tool_call(tool_call)
225
-
226
- # Execute the tool
227
- result = client.call_tool(
228
- search_tool["function"]["name"],
229
- {"query": "latest AI news"}
230
- )
231
-
232
- # Add tool response
233
- if result["content"]:
234
- response_text = result["content"][0]["text"]
235
- messages.add_tool_response("search_001", response_text)
236
-
237
- # Use LLM to generate response based on search results
238
- messages.add_user_message("Based on the search results, please summarize the key AI news.")
239
- llm_response = call_llm(messages, temperature=0.7)
240
- messages.add_assistant_message(llm_response)
241
-
242
- # Print conversation
243
- print("\\nConversation:")
244
- print(messages)
245
-
246
- return messages.get_messages()
219
+ }
220
+ },
221
+ {
222
+ "type": "function",
223
+ "function": {
224
+ "name": "calculate",
225
+ "description": "Perform a mathematical calculation",
226
+ "parameters": {
227
+ "type": "object",
228
+ "properties": {
229
+ "expression": {
230
+ "type": "string",
231
+ "description": "Mathematical expression to evaluate"
232
+ }
233
+ },
234
+ "required": ["expression"]
235
+ }
236
+ }
237
+ }
238
+ ]
239
+
240
+ # Create conversation with system and user prompts
241
+ messages = Messages(
242
+ system_prompt="You are a helpful assistant with access to weather and calculation tools.",
243
+ user_prompt="What is the weather in Stockholm?"
244
+ )
245
+
246
+ # Call LLM with tools - it will decide which tools to call
247
+ model = "gpt-4o-mini" # or your preferred model
248
+ response = call_llm(model=model, messages=messages, tools=tools)
249
+
250
+ # Add the LLM's response (including any tool calls) to conversation
251
+ messages.add_response_message(response)
247
252
 
248
- if __name__ == "__main__":
249
- simple_agent()
253
+ # Display the conversation so far
254
+ for message in messages.get_messages():
255
+ print(message)
256
+
257
+ # Check if there are pending tool calls that need execution
258
+ print("Has pending tool calls:", messages.has_pending_tool_calls())
259
+
260
+ # In a real implementation, you would:
261
+ # 1. Execute the actual tool calls (get_weather, calculate, etc.)
262
+ # 2. Add tool responses using messages.add_tool_response()
263
+ # 3. Call the LLM again to get the final user-facing response
264
+ ```
265
+
266
+ **Expected Output:**
267
+ ```
268
+ {'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
269
+ {'role': 'user', 'content': 'What is the weather in Stockholm?'}
270
+ {'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
271
+ Has pending tool calls: True
272
+ ```
273
+
274
+ **Key Features Demonstrated:**
275
+ - **Messages management**: Clean conversation history with system/user prompts
276
+ - **Tool calling**: LLM automatically decides to call the `get_weather` function
277
+ - **Response handling**: `add_response_message()` handles both content and tool calls
278
+ - **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
279
+
280
+ **Next Steps:**
281
+ After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
282
+
283
+ ```python
284
+ # Implement actual weather function
285
+ def get_weather(location, unit="celsius"):
286
+ # Your weather API implementation here
287
+ return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
288
+
289
+ # Execute pending tool calls
290
+ if messages.has_pending_tool_calls():
291
+ last_message = messages.get_messages()[-1]
292
+ for tool_call in last_message.get("tool_calls", []):
293
+ if tool_call["function"]["name"] == "get_weather":
294
+ import json
295
+ args = json.loads(tool_call["function"]["arguments"])
296
+ result = get_weather(**args)
297
+ messages.add_tool_response(tool_call["id"], result)
298
+
299
+ # Get final response from LLM
300
+ final_response = call_llm(model=model, messages=messages)
301
+ messages.add_assistant_message(final_response)
302
+ print(f"Final response: {final_response}")
250
303
  ```
251
304
 
252
305
  ## Development Principles
@@ -284,6 +337,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
284
337
  - `add_user_message(content: str)`: Add user message
285
338
  - `add_assistant_message(content: str)`: Add assistant message
286
339
  - `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
340
+ - `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
341
+ - `add_response_message(message)`: Add ChatCompletionMessage response to conversation
287
342
  - `add_tool_response(call_id: str, content: str)`: Add tool response
288
343
  - `get_messages() -> List[Dict]`: Get all messages
289
344
  - `has_pending_tool_calls() -> bool`: Check for pending tool calls
@@ -154,66 +154,119 @@ Or pass the API key directly:
154
154
  response = call_llm(messages, api_key="your_api_key_here")
155
155
  ```
156
156
 
157
- ## Complete Example - Agent with MCP Tools and LLM
157
+ ## Complete Example - Tool Calling with Weather API
158
+
159
+ This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
158
160
 
159
161
  ```python
160
- from agentic_blocks import MCPClient, Messages, call_llm
161
-
162
- def simple_agent():
163
- # Initialize MCP client and conversation
164
- client = MCPClient("https://example.com/mcp/server/sse")
165
- messages = Messages(
166
- system_prompt="You are a helpful research assistant.",
167
- add_date_and_time=True
168
- )
169
-
170
- # Get available tools
171
- tools = client.list_tools()
172
- print(f"Connected to MCP server with {len(tools)} tools")
173
-
174
- # Simulate user query
175
- user_query = "What's the latest news about AI?"
176
- messages.add_user_message(user_query)
177
-
178
- # Agent decides to use a search tool
179
- if tools:
180
- search_tool = next((t for t in tools if "search" in t["function"]["name"]), None)
181
- if search_tool:
182
- # Add tool call to messages
183
- tool_call = {
184
- "id": "search_001",
185
- "type": "function",
186
- "function": {
187
- "name": search_tool["function"]["name"],
188
- "arguments": '{"query": "latest AI news"}'
189
- }
162
+ from agentic_blocks import call_llm, Messages
163
+
164
+ # Define tools in OpenAI function calling format
165
+ tools = [
166
+ {
167
+ "type": "function",
168
+ "function": {
169
+ "name": "get_weather",
170
+ "description": "Get current weather information for a location",
171
+ "parameters": {
172
+ "type": "object",
173
+ "properties": {
174
+ "location": {
175
+ "type": "string",
176
+ "description": "The city and state, e.g. San Francisco, CA"
177
+ },
178
+ "unit": {
179
+ "type": "string",
180
+ "enum": ["celsius", "fahrenheit"],
181
+ "description": "Temperature unit"
182
+ }
183
+ },
184
+ "required": ["location"]
190
185
  }
191
- messages.add_tool_call(tool_call)
192
-
193
- # Execute the tool
194
- result = client.call_tool(
195
- search_tool["function"]["name"],
196
- {"query": "latest AI news"}
197
- )
198
-
199
- # Add tool response
200
- if result["content"]:
201
- response_text = result["content"][0]["text"]
202
- messages.add_tool_response("search_001", response_text)
203
-
204
- # Use LLM to generate response based on search results
205
- messages.add_user_message("Based on the search results, please summarize the key AI news.")
206
- llm_response = call_llm(messages, temperature=0.7)
207
- messages.add_assistant_message(llm_response)
208
-
209
- # Print conversation
210
- print("\\nConversation:")
211
- print(messages)
212
-
213
- return messages.get_messages()
186
+ }
187
+ },
188
+ {
189
+ "type": "function",
190
+ "function": {
191
+ "name": "calculate",
192
+ "description": "Perform a mathematical calculation",
193
+ "parameters": {
194
+ "type": "object",
195
+ "properties": {
196
+ "expression": {
197
+ "type": "string",
198
+ "description": "Mathematical expression to evaluate"
199
+ }
200
+ },
201
+ "required": ["expression"]
202
+ }
203
+ }
204
+ }
205
+ ]
206
+
207
+ # Create conversation with system and user prompts
208
+ messages = Messages(
209
+ system_prompt="You are a helpful assistant with access to weather and calculation tools.",
210
+ user_prompt="What is the weather in Stockholm?"
211
+ )
212
+
213
+ # Call LLM with tools - it will decide which tools to call
214
+ model = "gpt-4o-mini" # or your preferred model
215
+ response = call_llm(model=model, messages=messages, tools=tools)
216
+
217
+ # Add the LLM's response (including any tool calls) to conversation
218
+ messages.add_response_message(response)
214
219
 
215
- if __name__ == "__main__":
216
- simple_agent()
220
+ # Display the conversation so far
221
+ for message in messages.get_messages():
222
+ print(message)
223
+
224
+ # Check if there are pending tool calls that need execution
225
+ print("Has pending tool calls:", messages.has_pending_tool_calls())
226
+
227
+ # In a real implementation, you would:
228
+ # 1. Execute the actual tool calls (get_weather, calculate, etc.)
229
+ # 2. Add tool responses using messages.add_tool_response()
230
+ # 3. Call the LLM again to get the final user-facing response
231
+ ```
232
+
233
+ **Expected Output:**
234
+ ```
235
+ {'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
236
+ {'role': 'user', 'content': 'What is the weather in Stockholm?'}
237
+ {'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
238
+ Has pending tool calls: True
239
+ ```
240
+
241
+ **Key Features Demonstrated:**
242
+ - **Messages management**: Clean conversation history with system/user prompts
243
+ - **Tool calling**: LLM automatically decides to call the `get_weather` function
244
+ - **Response handling**: `add_response_message()` handles both content and tool calls
245
+ - **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
246
+
247
+ **Next Steps:**
248
+ After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
249
+
250
+ ```python
251
+ # Implement actual weather function
252
+ def get_weather(location, unit="celsius"):
253
+ # Your weather API implementation here
254
+ return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
255
+
256
+ # Execute pending tool calls
257
+ if messages.has_pending_tool_calls():
258
+ last_message = messages.get_messages()[-1]
259
+ for tool_call in last_message.get("tool_calls", []):
260
+ if tool_call["function"]["name"] == "get_weather":
261
+ import json
262
+ args = json.loads(tool_call["function"]["arguments"])
263
+ result = get_weather(**args)
264
+ messages.add_tool_response(tool_call["id"], result)
265
+
266
+ # Get final response from LLM
267
+ final_response = call_llm(model=model, messages=messages)
268
+ messages.add_assistant_message(final_response)
269
+ print(f"Final response: {final_response}")
217
270
  ```
218
271
 
219
272
  ## Development Principles
@@ -251,6 +304,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
251
304
  - `add_user_message(content: str)`: Add user message
252
305
  - `add_assistant_message(content: str)`: Add assistant message
253
306
  - `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
307
+ - `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
308
+ - `add_response_message(message)`: Add ChatCompletionMessage response to conversation
254
309
  - `add_tool_response(call_id: str, content: str)`: Add tool response
255
310
  - `get_messages() -> List[Dict]`: Get all messages
256
311
  - `has_pending_tool_calls() -> bool`: Check for pending tool calls
@@ -14,7 +14,7 @@ agentic_blocks = []
14
14
 
15
15
  [project]
16
16
  name = "agentic-blocks"
17
- version = "0.1.7"
17
+ version = "0.1.8"
18
18
  description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.11"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -187,66 +187,119 @@ Or pass the API key directly:
187
187
  response = call_llm(messages, api_key="your_api_key_here")
188
188
  ```
189
189
 
190
- ## Complete Example - Agent with MCP Tools and LLM
190
+ ## Complete Example - Tool Calling with Weather API
191
+
192
+ This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
191
193
 
192
194
  ```python
193
- from agentic_blocks import MCPClient, Messages, call_llm
194
-
195
- def simple_agent():
196
- # Initialize MCP client and conversation
197
- client = MCPClient("https://example.com/mcp/server/sse")
198
- messages = Messages(
199
- system_prompt="You are a helpful research assistant.",
200
- add_date_and_time=True
201
- )
202
-
203
- # Get available tools
204
- tools = client.list_tools()
205
- print(f"Connected to MCP server with {len(tools)} tools")
206
-
207
- # Simulate user query
208
- user_query = "What's the latest news about AI?"
209
- messages.add_user_message(user_query)
210
-
211
- # Agent decides to use a search tool
212
- if tools:
213
- search_tool = next((t for t in tools if "search" in t["function"]["name"]), None)
214
- if search_tool:
215
- # Add tool call to messages
216
- tool_call = {
217
- "id": "search_001",
218
- "type": "function",
219
- "function": {
220
- "name": search_tool["function"]["name"],
221
- "arguments": '{"query": "latest AI news"}'
222
- }
195
+ from agentic_blocks import call_llm, Messages
196
+
197
+ # Define tools in OpenAI function calling format
198
+ tools = [
199
+ {
200
+ "type": "function",
201
+ "function": {
202
+ "name": "get_weather",
203
+ "description": "Get current weather information for a location",
204
+ "parameters": {
205
+ "type": "object",
206
+ "properties": {
207
+ "location": {
208
+ "type": "string",
209
+ "description": "The city and state, e.g. San Francisco, CA"
210
+ },
211
+ "unit": {
212
+ "type": "string",
213
+ "enum": ["celsius", "fahrenheit"],
214
+ "description": "Temperature unit"
215
+ }
216
+ },
217
+ "required": ["location"]
223
218
  }
224
- messages.add_tool_call(tool_call)
225
-
226
- # Execute the tool
227
- result = client.call_tool(
228
- search_tool["function"]["name"],
229
- {"query": "latest AI news"}
230
- )
231
-
232
- # Add tool response
233
- if result["content"]:
234
- response_text = result["content"][0]["text"]
235
- messages.add_tool_response("search_001", response_text)
236
-
237
- # Use LLM to generate response based on search results
238
- messages.add_user_message("Based on the search results, please summarize the key AI news.")
239
- llm_response = call_llm(messages, temperature=0.7)
240
- messages.add_assistant_message(llm_response)
241
-
242
- # Print conversation
243
- print("\\nConversation:")
244
- print(messages)
245
-
246
- return messages.get_messages()
219
+ }
220
+ },
221
+ {
222
+ "type": "function",
223
+ "function": {
224
+ "name": "calculate",
225
+ "description": "Perform a mathematical calculation",
226
+ "parameters": {
227
+ "type": "object",
228
+ "properties": {
229
+ "expression": {
230
+ "type": "string",
231
+ "description": "Mathematical expression to evaluate"
232
+ }
233
+ },
234
+ "required": ["expression"]
235
+ }
236
+ }
237
+ }
238
+ ]
239
+
240
+ # Create conversation with system and user prompts
241
+ messages = Messages(
242
+ system_prompt="You are a helpful assistant with access to weather and calculation tools.",
243
+ user_prompt="What is the weather in Stockholm?"
244
+ )
245
+
246
+ # Call LLM with tools - it will decide which tools to call
247
+ model = "gpt-4o-mini" # or your preferred model
248
+ response = call_llm(model=model, messages=messages, tools=tools)
249
+
250
+ # Add the LLM's response (including any tool calls) to conversation
251
+ messages.add_response_message(response)
247
252
 
248
- if __name__ == "__main__":
249
- simple_agent()
253
+ # Display the conversation so far
254
+ for message in messages.get_messages():
255
+ print(message)
256
+
257
+ # Check if there are pending tool calls that need execution
258
+ print("Has pending tool calls:", messages.has_pending_tool_calls())
259
+
260
+ # In a real implementation, you would:
261
+ # 1. Execute the actual tool calls (get_weather, calculate, etc.)
262
+ # 2. Add tool responses using messages.add_tool_response()
263
+ # 3. Call the LLM again to get the final user-facing response
264
+ ```
265
+
266
+ **Expected Output:**
267
+ ```
268
+ {'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
269
+ {'role': 'user', 'content': 'What is the weather in Stockholm?'}
270
+ {'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
271
+ Has pending tool calls: True
272
+ ```
273
+
274
+ **Key Features Demonstrated:**
275
+ - **Messages management**: Clean conversation history with system/user prompts
276
+ - **Tool calling**: LLM automatically decides to call the `get_weather` function
277
+ - **Response handling**: `add_response_message()` handles both content and tool calls
278
+ - **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
279
+
280
+ **Next Steps:**
281
+ After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
282
+
283
+ ```python
284
+ # Implement actual weather function
285
+ def get_weather(location, unit="celsius"):
286
+ # Your weather API implementation here
287
+ return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
288
+
289
+ # Execute pending tool calls
290
+ if messages.has_pending_tool_calls():
291
+ last_message = messages.get_messages()[-1]
292
+ for tool_call in last_message.get("tool_calls", []):
293
+ if tool_call["function"]["name"] == "get_weather":
294
+ import json
295
+ args = json.loads(tool_call["function"]["arguments"])
296
+ result = get_weather(**args)
297
+ messages.add_tool_response(tool_call["id"], result)
298
+
299
+ # Get final response from LLM
300
+ final_response = call_llm(model=model, messages=messages)
301
+ messages.add_assistant_message(final_response)
302
+ print(f"Final response: {final_response}")
250
303
  ```
251
304
 
252
305
  ## Development Principles
@@ -284,6 +337,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
284
337
  - `add_user_message(content: str)`: Add user message
285
338
  - `add_assistant_message(content: str)`: Add assistant message
286
339
  - `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
340
+ - `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
341
+ - `add_response_message(message)`: Add ChatCompletionMessage response to conversation
287
342
  - `add_tool_response(call_id: str, content: str)`: Add tool response
288
343
  - `get_messages() -> List[Dict]`: Get all messages
289
344
  - `has_pending_tool_calls() -> bool`: Check for pending tool calls
File without changes
File without changes