agentic-blocks 0.1.6__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -187,66 +187,119 @@ Or pass the API key directly:
187
187
  response = call_llm(messages, api_key="your_api_key_here")
188
188
  ```
189
189
 
190
- ## Complete Example - Agent with MCP Tools and LLM
190
+ ## Complete Example - Tool Calling with Weather API
191
+
192
+ This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
191
193
 
192
194
  ```python
193
- from agentic_blocks import MCPClient, Messages, call_llm
194
-
195
- def simple_agent():
196
- # Initialize MCP client and conversation
197
- client = MCPClient("https://example.com/mcp/server/sse")
198
- messages = Messages(
199
- system_prompt="You are a helpful research assistant.",
200
- add_date_and_time=True
201
- )
202
-
203
- # Get available tools
204
- tools = client.list_tools()
205
- print(f"Connected to MCP server with {len(tools)} tools")
206
-
207
- # Simulate user query
208
- user_query = "What's the latest news about AI?"
209
- messages.add_user_message(user_query)
210
-
211
- # Agent decides to use a search tool
212
- if tools:
213
- search_tool = next((t for t in tools if "search" in t["function"]["name"]), None)
214
- if search_tool:
215
- # Add tool call to messages
216
- tool_call = {
217
- "id": "search_001",
218
- "type": "function",
219
- "function": {
220
- "name": search_tool["function"]["name"],
221
- "arguments": '{"query": "latest AI news"}'
222
- }
195
+ from agentic_blocks import call_llm, Messages
196
+
197
+ # Define tools in OpenAI function calling format
198
+ tools = [
199
+ {
200
+ "type": "function",
201
+ "function": {
202
+ "name": "get_weather",
203
+ "description": "Get current weather information for a location",
204
+ "parameters": {
205
+ "type": "object",
206
+ "properties": {
207
+ "location": {
208
+ "type": "string",
209
+ "description": "The city and state, e.g. San Francisco, CA"
210
+ },
211
+ "unit": {
212
+ "type": "string",
213
+ "enum": ["celsius", "fahrenheit"],
214
+ "description": "Temperature unit"
215
+ }
216
+ },
217
+ "required": ["location"]
223
218
  }
224
- messages.add_tool_call(tool_call)
225
-
226
- # Execute the tool
227
- result = client.call_tool(
228
- search_tool["function"]["name"],
229
- {"query": "latest AI news"}
230
- )
231
-
232
- # Add tool response
233
- if result["content"]:
234
- response_text = result["content"][0]["text"]
235
- messages.add_tool_response("search_001", response_text)
236
-
237
- # Use LLM to generate response based on search results
238
- messages.add_user_message("Based on the search results, please summarize the key AI news.")
239
- llm_response = call_llm(messages, temperature=0.7)
240
- messages.add_assistant_message(llm_response)
241
-
242
- # Print conversation
243
- print("\\nConversation:")
244
- print(messages)
245
-
246
- return messages.get_messages()
219
+ }
220
+ },
221
+ {
222
+ "type": "function",
223
+ "function": {
224
+ "name": "calculate",
225
+ "description": "Perform a mathematical calculation",
226
+ "parameters": {
227
+ "type": "object",
228
+ "properties": {
229
+ "expression": {
230
+ "type": "string",
231
+ "description": "Mathematical expression to evaluate"
232
+ }
233
+ },
234
+ "required": ["expression"]
235
+ }
236
+ }
237
+ }
238
+ ]
239
+
240
+ # Create conversation with system and user prompts
241
+ messages = Messages(
242
+ system_prompt="You are a helpful assistant with access to weather and calculation tools.",
243
+ user_prompt="What is the weather in Stockholm?"
244
+ )
245
+
246
+ # Call LLM with tools - it will decide which tools to call
247
+ model = "gpt-4o-mini" # or your preferred model
248
+ response = call_llm(model=model, messages=messages, tools=tools)
249
+
250
+ # Add the LLM's response (including any tool calls) to conversation
251
+ messages.add_response_message(response)
247
252
 
248
- if __name__ == "__main__":
249
- simple_agent()
253
+ # Display the conversation so far
254
+ for message in messages.get_messages():
255
+ print(message)
256
+
257
+ # Check if there are pending tool calls that need execution
258
+ print("Has pending tool calls:", messages.has_pending_tool_calls())
259
+
260
+ # In a real implementation, you would:
261
+ # 1. Execute the actual tool calls (get_weather, calculate, etc.)
262
+ # 2. Add tool responses using messages.add_tool_response()
263
+ # 3. Call the LLM again to get the final user-facing response
264
+ ```
265
+
266
+ **Expected Output:**
267
+ ```
268
+ {'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
269
+ {'role': 'user', 'content': 'What is the weather in Stockholm?'}
270
+ {'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
271
+ Has pending tool calls: True
272
+ ```
273
+
274
+ **Key Features Demonstrated:**
275
+ - **Messages management**: Clean conversation history with system/user prompts
276
+ - **Tool calling**: LLM automatically decides to call the `get_weather` function
277
+ - **Response handling**: `add_response_message()` handles both content and tool calls
278
+ - **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
279
+
280
+ **Next Steps:**
281
+ After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
282
+
283
+ ```python
284
+ # Implement actual weather function
285
+ def get_weather(location, unit="celsius"):
286
+ # Your weather API implementation here
287
+ return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
288
+
289
+ # Execute pending tool calls
290
+ if messages.has_pending_tool_calls():
291
+ last_message = messages.get_messages()[-1]
292
+ for tool_call in last_message.get("tool_calls", []):
293
+ if tool_call["function"]["name"] == "get_weather":
294
+ import json
295
+ args = json.loads(tool_call["function"]["arguments"])
296
+ result = get_weather(**args)
297
+ messages.add_tool_response(tool_call["id"], result)
298
+
299
+ # Get final response from LLM
300
+ final_response = call_llm(model=model, messages=messages)
301
+ messages.add_assistant_message(final_response)
302
+ print(f"Final response: {final_response}")
250
303
  ```
251
304
 
252
305
  ## Development Principles
@@ -284,6 +337,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
284
337
  - `add_user_message(content: str)`: Add user message
285
338
  - `add_assistant_message(content: str)`: Add assistant message
286
339
  - `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
340
+ - `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
341
+ - `add_response_message(message)`: Add ChatCompletionMessage response to conversation
287
342
  - `add_tool_response(call_id: str, content: str)`: Add tool response
288
343
  - `get_messages() -> List[Dict]`: Get all messages
289
344
  - `has_pending_tool_calls() -> bool`: Check for pending tool calls
@@ -154,66 +154,119 @@ Or pass the API key directly:
154
154
  response = call_llm(messages, api_key="your_api_key_here")
155
155
  ```
156
156
 
157
- ## Complete Example - Agent with MCP Tools and LLM
157
+ ## Complete Example - Tool Calling with Weather API
158
+
159
+ This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
158
160
 
159
161
  ```python
160
- from agentic_blocks import MCPClient, Messages, call_llm
161
-
162
- def simple_agent():
163
- # Initialize MCP client and conversation
164
- client = MCPClient("https://example.com/mcp/server/sse")
165
- messages = Messages(
166
- system_prompt="You are a helpful research assistant.",
167
- add_date_and_time=True
168
- )
169
-
170
- # Get available tools
171
- tools = client.list_tools()
172
- print(f"Connected to MCP server with {len(tools)} tools")
173
-
174
- # Simulate user query
175
- user_query = "What's the latest news about AI?"
176
- messages.add_user_message(user_query)
177
-
178
- # Agent decides to use a search tool
179
- if tools:
180
- search_tool = next((t for t in tools if "search" in t["function"]["name"]), None)
181
- if search_tool:
182
- # Add tool call to messages
183
- tool_call = {
184
- "id": "search_001",
185
- "type": "function",
186
- "function": {
187
- "name": search_tool["function"]["name"],
188
- "arguments": '{"query": "latest AI news"}'
189
- }
162
+ from agentic_blocks import call_llm, Messages
163
+
164
+ # Define tools in OpenAI function calling format
165
+ tools = [
166
+ {
167
+ "type": "function",
168
+ "function": {
169
+ "name": "get_weather",
170
+ "description": "Get current weather information for a location",
171
+ "parameters": {
172
+ "type": "object",
173
+ "properties": {
174
+ "location": {
175
+ "type": "string",
176
+ "description": "The city and state, e.g. San Francisco, CA"
177
+ },
178
+ "unit": {
179
+ "type": "string",
180
+ "enum": ["celsius", "fahrenheit"],
181
+ "description": "Temperature unit"
182
+ }
183
+ },
184
+ "required": ["location"]
190
185
  }
191
- messages.add_tool_call(tool_call)
192
-
193
- # Execute the tool
194
- result = client.call_tool(
195
- search_tool["function"]["name"],
196
- {"query": "latest AI news"}
197
- )
198
-
199
- # Add tool response
200
- if result["content"]:
201
- response_text = result["content"][0]["text"]
202
- messages.add_tool_response("search_001", response_text)
203
-
204
- # Use LLM to generate response based on search results
205
- messages.add_user_message("Based on the search results, please summarize the key AI news.")
206
- llm_response = call_llm(messages, temperature=0.7)
207
- messages.add_assistant_message(llm_response)
208
-
209
- # Print conversation
210
- print("\\nConversation:")
211
- print(messages)
212
-
213
- return messages.get_messages()
186
+ }
187
+ },
188
+ {
189
+ "type": "function",
190
+ "function": {
191
+ "name": "calculate",
192
+ "description": "Perform a mathematical calculation",
193
+ "parameters": {
194
+ "type": "object",
195
+ "properties": {
196
+ "expression": {
197
+ "type": "string",
198
+ "description": "Mathematical expression to evaluate"
199
+ }
200
+ },
201
+ "required": ["expression"]
202
+ }
203
+ }
204
+ }
205
+ ]
206
+
207
+ # Create conversation with system and user prompts
208
+ messages = Messages(
209
+ system_prompt="You are a helpful assistant with access to weather and calculation tools.",
210
+ user_prompt="What is the weather in Stockholm?"
211
+ )
212
+
213
+ # Call LLM with tools - it will decide which tools to call
214
+ model = "gpt-4o-mini" # or your preferred model
215
+ response = call_llm(model=model, messages=messages, tools=tools)
216
+
217
+ # Add the LLM's response (including any tool calls) to conversation
218
+ messages.add_response_message(response)
214
219
 
215
- if __name__ == "__main__":
216
- simple_agent()
220
+ # Display the conversation so far
221
+ for message in messages.get_messages():
222
+ print(message)
223
+
224
+ # Check if there are pending tool calls that need execution
225
+ print("Has pending tool calls:", messages.has_pending_tool_calls())
226
+
227
+ # In a real implementation, you would:
228
+ # 1. Execute the actual tool calls (get_weather, calculate, etc.)
229
+ # 2. Add tool responses using messages.add_tool_response()
230
+ # 3. Call the LLM again to get the final user-facing response
231
+ ```
232
+
233
+ **Expected Output:**
234
+ ```
235
+ {'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
236
+ {'role': 'user', 'content': 'What is the weather in Stockholm?'}
237
+ {'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
238
+ Has pending tool calls: True
239
+ ```
240
+
241
+ **Key Features Demonstrated:**
242
+ - **Messages management**: Clean conversation history with system/user prompts
243
+ - **Tool calling**: LLM automatically decides to call the `get_weather` function
244
+ - **Response handling**: `add_response_message()` handles both content and tool calls
245
+ - **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
246
+
247
+ **Next Steps:**
248
+ After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
249
+
250
+ ```python
251
+ # Implement actual weather function
252
+ def get_weather(location, unit="celsius"):
253
+ # Your weather API implementation here
254
+ return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
255
+
256
+ # Execute pending tool calls
257
+ if messages.has_pending_tool_calls():
258
+ last_message = messages.get_messages()[-1]
259
+ for tool_call in last_message.get("tool_calls", []):
260
+ if tool_call["function"]["name"] == "get_weather":
261
+ import json
262
+ args = json.loads(tool_call["function"]["arguments"])
263
+ result = get_weather(**args)
264
+ messages.add_tool_response(tool_call["id"], result)
265
+
266
+ # Get final response from LLM
267
+ final_response = call_llm(model=model, messages=messages)
268
+ messages.add_assistant_message(final_response)
269
+ print(f"Final response: {final_response}")
217
270
  ```
218
271
 
219
272
  ## Development Principles
@@ -251,6 +304,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
251
304
  - `add_user_message(content: str)`: Add user message
252
305
  - `add_assistant_message(content: str)`: Add assistant message
253
306
  - `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
307
+ - `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
308
+ - `add_response_message(message)`: Add ChatCompletionMessage response to conversation
254
309
  - `add_tool_response(call_id: str, content: str)`: Add tool response
255
310
  - `get_messages() -> List[Dict]`: Get all messages
256
311
  - `has_pending_tool_calls() -> bool`: Check for pending tool calls
@@ -14,7 +14,7 @@ agentic_blocks = []
14
14
 
15
15
  [project]
16
16
  name = "agentic-blocks"
17
- version = "0.1.6"
17
+ version = "0.1.8"
18
18
  description = "Simple building blocks for agentic AI systems with MCP client and conversation management"
19
19
  readme = "README.md"
20
20
  requires-python = ">=3.11"
@@ -68,9 +68,11 @@ class Messages:
68
68
  tool_call: The tool call dictionary with id, type, function, etc.
69
69
  """
70
70
  # Check if the latest message is an assistant message with tool_calls
71
- if (self.messages
72
- and self.messages[-1].get("role") == "assistant"
73
- and "tool_calls" in self.messages[-1]):
71
+ if (
72
+ self.messages
73
+ and self.messages[-1].get("role") == "assistant"
74
+ and "tool_calls" in self.messages[-1]
75
+ ):
74
76
  # Append to existing assistant message
75
77
  self.messages[-1]["tool_calls"].append(tool_call)
76
78
  else:
@@ -82,6 +84,33 @@ class Messages:
82
84
  }
83
85
  self.messages.append(assistant_message)
84
86
 
87
+ def add_tool_calls(self, tool_calls):
88
+ """
89
+ Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects.
90
+
91
+ Args:
92
+ tool_calls: A list of ChatCompletionMessageFunctionToolCall objects or a single object
93
+ """
94
+ # Handle single tool call or list of tool calls
95
+ if not isinstance(tool_calls, list):
96
+ tool_calls = [tool_calls]
97
+
98
+ # Create assistant message with empty content and tool calls
99
+ assistant_message = {"role": "assistant", "content": "", "tool_calls": []}
100
+
101
+ for tool_call in tool_calls:
102
+ tool_call_dict = {
103
+ "id": tool_call.id,
104
+ "type": tool_call.type,
105
+ "function": {
106
+ "name": tool_call.function.name,
107
+ "arguments": tool_call.function.arguments,
108
+ },
109
+ }
110
+ assistant_message["tool_calls"].append(tool_call_dict)
111
+
112
+ self.messages.append(assistant_message)
113
+
85
114
  def add_tool_response(self, tool_call_id: str, content: str):
86
115
  """
87
116
  Add a tool response message.
@@ -108,7 +137,7 @@ class Messages:
108
137
  for response in tool_responses:
109
138
  tool_call_id = response.get("tool_call_id", "unknown")
110
139
  is_error = response.get("is_error", False)
111
-
140
+
112
141
  if is_error:
113
142
  content = f"Error: {response.get('error', 'Unknown error')}"
114
143
  else:
@@ -125,34 +154,22 @@ class Messages:
125
154
 
126
155
  self.add_tool_response(tool_call_id, content)
127
156
 
128
- def add_response_message(self, message):
157
+ def add_response_message(self, model_response):
129
158
  """
130
159
  Add a response message (ChatCompletionMessage) to the conversation.
131
160
 
132
161
  Args:
133
- message: A ChatCompletionMessage object with role, content, and potentially tool_calls
162
+ model_response: A ChatCompletionMessage object with role, content, and potentially tool_calls
134
163
  """
135
- # Convert the ChatCompletionMessage to a dictionary format
136
- msg_dict = {"role": message.role}
137
-
138
- # Add content (handle None case)
139
- msg_dict["content"] = message.content or ""
140
-
141
- # Add tool_calls if present
142
- if message.tool_calls:
143
- msg_dict["tool_calls"] = []
144
- for tool_call in message.tool_calls:
145
- tool_call_dict = {
146
- "id": tool_call.id,
147
- "type": tool_call.type,
148
- "function": {
149
- "name": tool_call.function.name,
150
- "arguments": tool_call.function.arguments
151
- }
152
- }
153
- msg_dict["tool_calls"].append(tool_call_dict)
154
-
155
- self.messages.append(msg_dict)
164
+ # If there are tool calls, use add_tool_calls
165
+ if model_response.tool_calls:
166
+ self.add_tool_calls(model_response.tool_calls)
167
+ # If there's also content, update the message content
168
+ if model_response.content:
169
+ self.messages[-1]["content"] = model_response.content
170
+ else:
171
+ # No tool calls, just add content as assistant message
172
+ self.add_assistant_message(model_response.content or "")
156
173
 
157
174
  def get_messages(self) -> List[Dict[str, Any]]:
158
175
  """Get the current messages list."""
@@ -169,17 +186,20 @@ class Messages:
169
186
  return False
170
187
 
171
188
  last_message = self.messages[-1]
172
-
189
+
173
190
  # Check if the last message is an assistant message with tool calls
174
191
  if last_message.get("role") == "assistant" and "tool_calls" in last_message:
175
192
  # Check if there are subsequent tool responses
176
193
  tool_call_ids = {tc.get("id") for tc in last_message["tool_calls"]}
177
-
194
+
178
195
  # Look for tool responses after this message
179
196
  for msg in reversed(self.messages):
180
- if msg.get("role") == "tool" and msg.get("tool_call_id") in tool_call_ids:
197
+ if (
198
+ msg.get("role") == "tool"
199
+ and msg.get("tool_call_id") in tool_call_ids
200
+ ):
181
201
  tool_call_ids.remove(msg.get("tool_call_id"))
182
-
202
+
183
203
  # If there are still unresponded tool call IDs, we have pending calls
184
204
  return len(tool_call_ids) > 0
185
205
 
@@ -201,7 +221,7 @@ class Messages:
201
221
  for j, tool_call in enumerate(message["tool_calls"], 1):
202
222
  function_name = tool_call.get("function", {}).get("name", "unknown")
203
223
  lines.append(f" └─ Tool Call {j}: {function_name}")
204
-
224
+
205
225
  # Handle tool messages
206
226
  elif role == "tool":
207
227
  tool_call_id = message.get("tool_call_id", "unknown")
@@ -209,7 +229,7 @@ class Messages:
209
229
  if len(content) > 200:
210
230
  content = content[:197] + "..."
211
231
  lines.append(f"{i}. {role} [{tool_call_id[:8]}...]: {content}")
212
-
232
+
213
233
  # Handle other message types
214
234
  else:
215
235
  # Truncate long content for readability
@@ -227,28 +247,28 @@ def example_usage():
227
247
  messages = Messages(
228
248
  system_prompt="You are a helpful assistant.",
229
249
  user_prompt="Hello, how are you?",
230
- add_date_and_time=True
250
+ add_date_and_time=True,
231
251
  )
232
-
252
+
233
253
  # Add assistant response
234
254
  messages.add_assistant_message("I'm doing well, thank you!")
235
-
255
+
236
256
  # Add a tool call
237
257
  tool_call = {
238
258
  "id": "call_123",
239
259
  "type": "function",
240
- "function": {"name": "get_weather", "arguments": '{"location": "Paris"}'}
260
+ "function": {"name": "get_weather", "arguments": '{"location": "Paris"}'},
241
261
  }
242
262
  messages.add_tool_call(tool_call)
243
-
263
+
244
264
  # Add tool response
245
265
  messages.add_tool_response("call_123", "The weather in Paris is sunny, 22°C")
246
-
266
+
247
267
  print("Conversation:")
248
268
  print(messages)
249
-
269
+
250
270
  print(f"\nHas pending tool calls: {messages.has_pending_tool_calls()}")
251
271
 
252
272
 
253
273
  if __name__ == "__main__":
254
- example_usage()
274
+ example_usage()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agentic-blocks
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: Simple building blocks for agentic AI systems with MCP client and conversation management
5
5
  Author-email: Magnus Bjelkenhed <bjelkenhed@gmail.com>
6
6
  License: MIT
@@ -187,66 +187,119 @@ Or pass the API key directly:
187
187
  response = call_llm(messages, api_key="your_api_key_here")
188
188
  ```
189
189
 
190
- ## Complete Example - Agent with MCP Tools and LLM
190
+ ## Complete Example - Tool Calling with Weather API
191
+
192
+ This example demonstrates a complete workflow using function calling with an LLM. For a full interactive notebook version, see `notebooks/agentic_example.ipynb`.
191
193
 
192
194
  ```python
193
- from agentic_blocks import MCPClient, Messages, call_llm
194
-
195
- def simple_agent():
196
- # Initialize MCP client and conversation
197
- client = MCPClient("https://example.com/mcp/server/sse")
198
- messages = Messages(
199
- system_prompt="You are a helpful research assistant.",
200
- add_date_and_time=True
201
- )
202
-
203
- # Get available tools
204
- tools = client.list_tools()
205
- print(f"Connected to MCP server with {len(tools)} tools")
206
-
207
- # Simulate user query
208
- user_query = "What's the latest news about AI?"
209
- messages.add_user_message(user_query)
210
-
211
- # Agent decides to use a search tool
212
- if tools:
213
- search_tool = next((t for t in tools if "search" in t["function"]["name"]), None)
214
- if search_tool:
215
- # Add tool call to messages
216
- tool_call = {
217
- "id": "search_001",
218
- "type": "function",
219
- "function": {
220
- "name": search_tool["function"]["name"],
221
- "arguments": '{"query": "latest AI news"}'
222
- }
195
+ from agentic_blocks import call_llm, Messages
196
+
197
+ # Define tools in OpenAI function calling format
198
+ tools = [
199
+ {
200
+ "type": "function",
201
+ "function": {
202
+ "name": "get_weather",
203
+ "description": "Get current weather information for a location",
204
+ "parameters": {
205
+ "type": "object",
206
+ "properties": {
207
+ "location": {
208
+ "type": "string",
209
+ "description": "The city and state, e.g. San Francisco, CA"
210
+ },
211
+ "unit": {
212
+ "type": "string",
213
+ "enum": ["celsius", "fahrenheit"],
214
+ "description": "Temperature unit"
215
+ }
216
+ },
217
+ "required": ["location"]
223
218
  }
224
- messages.add_tool_call(tool_call)
225
-
226
- # Execute the tool
227
- result = client.call_tool(
228
- search_tool["function"]["name"],
229
- {"query": "latest AI news"}
230
- )
231
-
232
- # Add tool response
233
- if result["content"]:
234
- response_text = result["content"][0]["text"]
235
- messages.add_tool_response("search_001", response_text)
236
-
237
- # Use LLM to generate response based on search results
238
- messages.add_user_message("Based on the search results, please summarize the key AI news.")
239
- llm_response = call_llm(messages, temperature=0.7)
240
- messages.add_assistant_message(llm_response)
241
-
242
- # Print conversation
243
- print("\\nConversation:")
244
- print(messages)
245
-
246
- return messages.get_messages()
219
+ }
220
+ },
221
+ {
222
+ "type": "function",
223
+ "function": {
224
+ "name": "calculate",
225
+ "description": "Perform a mathematical calculation",
226
+ "parameters": {
227
+ "type": "object",
228
+ "properties": {
229
+ "expression": {
230
+ "type": "string",
231
+ "description": "Mathematical expression to evaluate"
232
+ }
233
+ },
234
+ "required": ["expression"]
235
+ }
236
+ }
237
+ }
238
+ ]
239
+
240
+ # Create conversation with system and user prompts
241
+ messages = Messages(
242
+ system_prompt="You are a helpful assistant with access to weather and calculation tools.",
243
+ user_prompt="What is the weather in Stockholm?"
244
+ )
245
+
246
+ # Call LLM with tools - it will decide which tools to call
247
+ model = "gpt-4o-mini" # or your preferred model
248
+ response = call_llm(model=model, messages=messages, tools=tools)
249
+
250
+ # Add the LLM's response (including any tool calls) to conversation
251
+ messages.add_response_message(response)
247
252
 
248
- if __name__ == "__main__":
249
- simple_agent()
253
+ # Display the conversation so far
254
+ for message in messages.get_messages():
255
+ print(message)
256
+
257
+ # Check if there are pending tool calls that need execution
258
+ print("Has pending tool calls:", messages.has_pending_tool_calls())
259
+
260
+ # In a real implementation, you would:
261
+ # 1. Execute the actual tool calls (get_weather, calculate, etc.)
262
+ # 2. Add tool responses using messages.add_tool_response()
263
+ # 3. Call the LLM again to get the final user-facing response
264
+ ```
265
+
266
+ **Expected Output:**
267
+ ```
268
+ {'role': 'system', 'content': 'You are a helpful assistant with access to weather and calculation tools.'}
269
+ {'role': 'user', 'content': 'What is the weather in Stockholm?'}
270
+ {'role': 'assistant', 'content': '', 'tool_calls': [{'id': 'call_abc123', 'type': 'function', 'function': {'name': 'get_weather', 'arguments': '{"location": "Stockholm, Sweden", "unit": "celsius"}'}}]}
271
+ Has pending tool calls: True
272
+ ```
273
+
274
+ **Key Features Demonstrated:**
275
+ - **Messages management**: Clean conversation history with system/user prompts
276
+ - **Tool calling**: LLM automatically decides to call the `get_weather` function
277
+ - **Response handling**: `add_response_message()` handles both content and tool calls
278
+ - **Pending detection**: `has_pending_tool_calls()` identifies when tools need execution
279
+
280
+ **Next Steps:**
281
+ After the LLM makes tool calls, you would implement the actual tool functions and continue the conversation:
282
+
283
+ ```python
284
+ # Implement actual weather function
285
+ def get_weather(location, unit="celsius"):
286
+ # Your weather API implementation here
287
+ return f"The weather in {location} is sunny, 22°{unit[0].upper()}"
288
+
289
+ # Execute pending tool calls
290
+ if messages.has_pending_tool_calls():
291
+ last_message = messages.get_messages()[-1]
292
+ for tool_call in last_message.get("tool_calls", []):
293
+ if tool_call["function"]["name"] == "get_weather":
294
+ import json
295
+ args = json.loads(tool_call["function"]["arguments"])
296
+ result = get_weather(**args)
297
+ messages.add_tool_response(tool_call["id"], result)
298
+
299
+ # Get final response from LLM
300
+ final_response = call_llm(model=model, messages=messages)
301
+ messages.add_assistant_message(final_response)
302
+ print(f"Final response: {final_response}")
250
303
  ```
251
304
 
252
305
  ## Development Principles
@@ -284,6 +337,8 @@ Messages(system_prompt=None, user_prompt=None, add_date_and_time=False)
284
337
  - `add_user_message(content: str)`: Add user message
285
338
  - `add_assistant_message(content: str)`: Add assistant message
286
339
  - `add_tool_call(tool_call: Dict)`: Add tool call to assistant message
340
+ - `add_tool_calls(tool_calls)`: Add multiple tool calls from ChatCompletionMessageFunctionToolCall objects
341
+ - `add_response_message(message)`: Add ChatCompletionMessage response to conversation
287
342
  - `add_tool_response(call_id: str, content: str)`: Add tool response
288
343
  - `get_messages() -> List[Dict]`: Get all messages
289
344
  - `has_pending_tool_calls() -> bool`: Check for pending tool calls
File without changes
File without changes