agnt5 0.1.0__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. agnt5/__init__.py +307 -0
  2. agnt5/__pycache__/__init__.cpython-311.pyc +0 -0
  3. agnt5/__pycache__/agent.cpython-311.pyc +0 -0
  4. agnt5/__pycache__/context.cpython-311.pyc +0 -0
  5. agnt5/__pycache__/durable.cpython-311.pyc +0 -0
  6. agnt5/__pycache__/extraction.cpython-311.pyc +0 -0
  7. agnt5/__pycache__/memory.cpython-311.pyc +0 -0
  8. agnt5/__pycache__/reflection.cpython-311.pyc +0 -0
  9. agnt5/__pycache__/runtime.cpython-311.pyc +0 -0
  10. agnt5/__pycache__/task.cpython-311.pyc +0 -0
  11. agnt5/__pycache__/tool.cpython-311.pyc +0 -0
  12. agnt5/__pycache__/tracing.cpython-311.pyc +0 -0
  13. agnt5/__pycache__/types.cpython-311.pyc +0 -0
  14. agnt5/__pycache__/workflow.cpython-311.pyc +0 -0
  15. agnt5/_core.abi3.so +0 -0
  16. agnt5/agent.py +1086 -0
  17. agnt5/context.py +406 -0
  18. agnt5/durable.py +1050 -0
  19. agnt5/extraction.py +410 -0
  20. agnt5/llm/__init__.py +179 -0
  21. agnt5/llm/__pycache__/__init__.cpython-311.pyc +0 -0
  22. agnt5/llm/__pycache__/anthropic.cpython-311.pyc +0 -0
  23. agnt5/llm/__pycache__/azure.cpython-311.pyc +0 -0
  24. agnt5/llm/__pycache__/base.cpython-311.pyc +0 -0
  25. agnt5/llm/__pycache__/google.cpython-311.pyc +0 -0
  26. agnt5/llm/__pycache__/mistral.cpython-311.pyc +0 -0
  27. agnt5/llm/__pycache__/openai.cpython-311.pyc +0 -0
  28. agnt5/llm/__pycache__/together.cpython-311.pyc +0 -0
  29. agnt5/llm/anthropic.py +319 -0
  30. agnt5/llm/azure.py +348 -0
  31. agnt5/llm/base.py +315 -0
  32. agnt5/llm/google.py +373 -0
  33. agnt5/llm/mistral.py +330 -0
  34. agnt5/llm/model_registry.py +467 -0
  35. agnt5/llm/models.json +227 -0
  36. agnt5/llm/openai.py +334 -0
  37. agnt5/llm/together.py +377 -0
  38. agnt5/memory.py +746 -0
  39. agnt5/reflection.py +514 -0
  40. agnt5/runtime.py +699 -0
  41. agnt5/task.py +476 -0
  42. agnt5/testing.py +451 -0
  43. agnt5/tool.py +516 -0
  44. agnt5/tracing.py +624 -0
  45. agnt5/types.py +210 -0
  46. agnt5/workflow.py +897 -0
  47. agnt5-0.1.0.dist-info/METADATA +93 -0
  48. agnt5-0.1.0.dist-info/RECORD +49 -0
  49. agnt5-0.1.0.dist-info/WHEEL +4 -0
agnt5/llm/models.json ADDED
@@ -0,0 +1,227 @@
1
+ {
2
+ "_comment": "AGNT5 Model Registry Configuration",
3
+ "_description": "This configuration file provides model definitions and capabilities without hardcoding model names in source code. Models are organized by provider with capability-based aliases for intelligent selection.",
4
+ "_version": "1.0",
5
+ "_last_updated": "2024-12-06",
6
+
7
+ "anthropic": {
8
+ "claude-3-5-sonnet-20241022": {
9
+ "tier": "flagship",
10
+ "capabilities": ["text_generation", "tool_calling", "vision", "reasoning", "analysis", "code_generation", "creative_writing"],
11
+ "aliases": ["claude-latest", "claude-best", "best-reasoning", "best-analysis"],
12
+ "context_length": 200000,
13
+ "max_output_tokens": 8192,
14
+ "supports_streaming": true,
15
+ "supports_tools": true,
16
+ "supports_vision": true,
17
+ "metadata": {
18
+ "release_date": "2024-10-22",
19
+ "description": "Latest Claude 3.5 Sonnet with enhanced reasoning"
20
+ }
21
+ },
22
+ "claude-3-5-haiku-20241022": {
23
+ "tier": "efficient",
24
+ "capabilities": ["text_generation", "tool_calling", "fast_inference", "cost_efficient"],
25
+ "aliases": ["claude-fast", "claude-cheap", "fastest", "most-affordable"],
26
+ "context_length": 200000,
27
+ "max_output_tokens": 8192,
28
+ "supports_streaming": true,
29
+ "supports_tools": true,
30
+ "supports_vision": false,
31
+ "metadata": {
32
+ "release_date": "2024-10-22",
33
+ "description": "Fast and efficient Claude 3.5 Haiku"
34
+ }
35
+ }
36
+ },
37
+
38
+ "openai": {
39
+ "gpt-4o": {
40
+ "tier": "flagship",
41
+ "capabilities": ["text_generation", "tool_calling", "vision", "reasoning", "analysis", "code_generation"],
42
+ "aliases": ["gpt-latest", "gpt-best", "best-multimodal", "best-vision"],
43
+ "context_length": 128000,
44
+ "max_output_tokens": 16384,
45
+ "supports_streaming": true,
46
+ "supports_tools": true,
47
+ "supports_vision": true,
48
+ "metadata": {
49
+ "description": "GPT-4 Omni with multimodal capabilities"
50
+ }
51
+ },
52
+ "gpt-4o-mini": {
53
+ "tier": "efficient",
54
+ "capabilities": ["text_generation", "tool_calling", "fast_inference", "cost_efficient"],
55
+ "aliases": ["gpt-fast", "gpt-cheap", "gpt-mini"],
56
+ "context_length": 128000,
57
+ "max_output_tokens": 16384,
58
+ "supports_streaming": true,
59
+ "supports_tools": true,
60
+ "supports_vision": true,
61
+ "metadata": {
62
+ "description": "Efficient GPT-4 Omni mini version"
63
+ }
64
+ },
65
+ "gpt-4-turbo": {
66
+ "tier": "performance",
67
+ "capabilities": ["text_generation", "tool_calling", "vision", "reasoning", "analysis"],
68
+ "aliases": ["gpt-turbo"],
69
+ "context_length": 128000,
70
+ "max_output_tokens": 4096,
71
+ "supports_streaming": true,
72
+ "supports_tools": true,
73
+ "supports_vision": true
74
+ }
75
+ },
76
+
77
+ "google": {
78
+ "gemini-1.5-pro": {
79
+ "tier": "flagship",
80
+ "capabilities": ["text_generation", "tool_calling", "vision", "long_context", "reasoning", "analysis"],
81
+ "aliases": ["gemini-latest", "gemini-pro", "best-context"],
82
+ "context_length": 2000000,
83
+ "max_output_tokens": 8192,
84
+ "supports_streaming": true,
85
+ "supports_tools": true,
86
+ "supports_vision": true,
87
+ "metadata": {
88
+ "description": "Gemini 1.5 Pro with 2M context window"
89
+ }
90
+ },
91
+ "gemini-1.5-flash": {
92
+ "tier": "efficient",
93
+ "capabilities": ["text_generation", "tool_calling", "fast_inference", "long_context"],
94
+ "aliases": ["gemini-fast", "gemini-flash"],
95
+ "context_length": 1000000,
96
+ "max_output_tokens": 8192,
97
+ "supports_streaming": true,
98
+ "supports_tools": true,
99
+ "supports_vision": true,
100
+ "metadata": {
101
+ "description": "Fast Gemini 1.5 Flash with 1M context"
102
+ }
103
+ }
104
+ },
105
+
106
+ "mistral": {
107
+ "mistral-large-latest": {
108
+ "tier": "flagship",
109
+ "capabilities": ["text_generation", "tool_calling", "reasoning", "multilingual", "analysis"],
110
+ "aliases": ["mistral-latest", "mistral-large", "mistral-best"],
111
+ "context_length": 128000,
112
+ "supports_streaming": true,
113
+ "supports_tools": true,
114
+ "metadata": {
115
+ "description": "Latest Mistral Large model"
116
+ }
117
+ },
118
+ "mistral-small-latest": {
119
+ "tier": "efficient",
120
+ "capabilities": ["text_generation", "fast_inference", "cost_efficient", "multilingual"],
121
+ "aliases": ["mistral-fast", "mistral-small", "mistral-cheap"],
122
+ "context_length": 128000,
123
+ "supports_streaming": true,
124
+ "supports_tools": false,
125
+ "metadata": {
126
+ "description": "Efficient Mistral Small model"
127
+ }
128
+ },
129
+ "codestral-latest": {
130
+ "tier": "performance",
131
+ "capabilities": ["text_generation", "code_generation", "tool_calling"],
132
+ "aliases": ["codestral", "best-coding", "mistral-code"],
133
+ "context_length": 32000,
134
+ "supports_streaming": true,
135
+ "supports_tools": true,
136
+ "metadata": {
137
+ "description": "Specialized coding model"
138
+ }
139
+ }
140
+ },
141
+
142
+ "together": {
143
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": {
144
+ "tier": "performance",
145
+ "capabilities": ["text_generation", "tool_calling", "reasoning", "analysis"],
146
+ "aliases": ["llama-latest", "llama-70b", "llama-large"],
147
+ "context_length": 131072,
148
+ "supports_streaming": true,
149
+ "supports_tools": true,
150
+ "metadata": {
151
+ "description": "Meta's Llama 3.1 70B parameter model",
152
+ "open_source": true
153
+ }
154
+ },
155
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": {
156
+ "tier": "efficient",
157
+ "capabilities": ["text_generation", "tool_calling", "fast_inference"],
158
+ "aliases": ["llama-fast", "llama-8b", "llama-small"],
159
+ "context_length": 131072,
160
+ "supports_streaming": true,
161
+ "supports_tools": true,
162
+ "metadata": {
163
+ "description": "Meta's Llama 3.1 8B parameter model",
164
+ "open_source": true
165
+ }
166
+ },
167
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo": {
168
+ "tier": "performance",
169
+ "capabilities": ["text_generation", "tool_calling", "vision", "multimodal"],
170
+ "aliases": ["llama-vision", "llama-multimodal"],
171
+ "context_length": 131072,
172
+ "supports_streaming": true,
173
+ "supports_tools": true,
174
+ "supports_vision": true,
175
+ "metadata": {
176
+ "description": "Meta's Llama 3.2 with vision capabilities",
177
+ "open_source": true
178
+ }
179
+ },
180
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": {
181
+ "tier": "balanced",
182
+ "capabilities": ["text_generation", "tool_calling", "multilingual", "reasoning"],
183
+ "aliases": ["mixtral-latest", "mixtral", "mixtral-8x7b"],
184
+ "context_length": 32768,
185
+ "supports_streaming": true,
186
+ "supports_tools": true,
187
+ "metadata": {
188
+ "description": "Mistral's Mixtral 8x7B mixture of experts",
189
+ "open_source": true
190
+ }
191
+ },
192
+ "Qwen/Qwen2.5-72B-Instruct-Turbo": {
193
+ "tier": "performance",
194
+ "capabilities": ["text_generation", "tool_calling", "multilingual", "reasoning", "code_generation"],
195
+ "aliases": ["qwen-latest", "qwen-72b", "qwen"],
196
+ "context_length": 131072,
197
+ "supports_streaming": true,
198
+ "supports_tools": true,
199
+ "metadata": {
200
+ "description": "Alibaba's Qwen 2.5 72B parameter model",
201
+ "open_source": true
202
+ }
203
+ }
204
+ },
205
+
206
+ "_capability_descriptions": {
207
+ "text_generation": "Basic text generation capabilities",
208
+ "tool_calling": "Ability to call external tools/functions",
209
+ "vision": "Can process and understand images",
210
+ "reasoning": "Strong logical reasoning and problem-solving",
211
+ "analysis": "Data analysis and interpretation",
212
+ "code_generation": "Programming and code generation",
213
+ "creative_writing": "Creative content generation",
214
+ "long_context": "Extended context window support",
215
+ "multilingual": "Support for multiple languages",
216
+ "fast_inference": "Optimized for speed",
217
+ "cost_efficient": "Optimized for cost"
218
+ },
219
+
220
+ "_tier_descriptions": {
221
+ "flagship": "Highest performance, latest features, premium cost",
222
+ "performance": "High performance, good feature set, moderate cost",
223
+ "balanced": "Good balance of performance and cost",
224
+ "efficient": "Optimized for speed and cost efficiency",
225
+ "experimental": "Beta or preview models"
226
+ }
227
+ }
agnt5/llm/openai.py ADDED
@@ -0,0 +1,334 @@
1
+ """
2
+ OpenAI integration for AGNT5 SDK.
3
+
4
+ Provides integration with OpenAI's GPT models including proper message conversion,
5
+ tool calling, and streaming support.
6
+ """
7
+
8
+ import json
9
+ import os
10
+ from typing import Any, AsyncIterator, Dict, List, Optional, Union
11
+
12
+ from .base import (
13
+ LanguageModel,
14
+ LanguageModelResponse,
15
+ LanguageModelType,
16
+ LLMError,
17
+ Message,
18
+ Role,
19
+ TokenUsage,
20
+ ToolCall,
21
+ )
22
+
23
+ try:
24
+ import openai
25
+ from openai.types.chat import (
26
+ ChatCompletion,
27
+ ChatCompletionChunk,
28
+ ChatCompletionMessage,
29
+ ChatCompletionMessageToolCall,
30
+ )
31
+ OPENAI_AVAILABLE = True
32
+ except ImportError:
33
+ OPENAI_AVAILABLE = False
34
+
35
+
36
+ class OpenAIError(LLMError):
37
+ """OpenAI-specific errors."""
38
+ pass
39
+
40
+
41
+ class OpenAILanguageModel(LanguageModel):
42
+ """
43
+ OpenAI GPT language model implementation.
44
+
45
+ Supports all GPT models with proper message conversion, tool calling,
46
+ and streaming capabilities.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ llm_model: LanguageModelType,
52
+ api_key: Optional[str] = None,
53
+ base_url: Optional[str] = None,
54
+ organization: Optional[str] = None,
55
+ system_prompt: Optional[str] = None,
56
+ **kwargs
57
+ ):
58
+ if not OPENAI_AVAILABLE:
59
+ raise OpenAIError("OpenAI library not installed. Install with: pip install openai")
60
+
61
+ super().__init__(llm_model, system_prompt, **kwargs)
62
+
63
+ # Get API key
64
+ self.api_key = api_key or os.getenv("OPENAI_API_KEY")
65
+ if not self.api_key:
66
+ raise OpenAIError("OpenAI API key required. Set OPENAI_API_KEY or pass api_key parameter")
67
+
68
+ # Initialize client
69
+ client_kwargs = {"api_key": self.api_key}
70
+ if base_url:
71
+ client_kwargs["base_url"] = base_url
72
+ if organization:
73
+ client_kwargs["organization"] = organization
74
+
75
+ self.client = openai.AsyncOpenAI(**client_kwargs)
76
+
77
+ # Validate model is supported by OpenAI
78
+ if not self.model_name.startswith("gpt"):
79
+ raise OpenAIError(f"Model {self.model_name} is not an OpenAI model")
80
+
81
+ async def generate(
82
+ self,
83
+ messages: List[Message],
84
+ tools: Optional[List[Dict[str, Any]]] = None,
85
+ max_tokens: int = 1024,
86
+ temperature: float = 0.7,
87
+ top_p: float = 1.0,
88
+ stream: bool = False,
89
+ **kwargs
90
+ ) -> Union[LanguageModelResponse, AsyncIterator[LanguageModelResponse]]:
91
+ """Generate response using OpenAI GPT."""
92
+ try:
93
+ # Validate and prepare messages
94
+ self.validate_messages(messages)
95
+ prepared_messages = self.prepare_system_message(messages)
96
+
97
+ # Convert to OpenAI format
98
+ openai_messages = self.convert_messages_to_provider_format(prepared_messages)
99
+
100
+ # Prepare request parameters
101
+ request_params = {
102
+ "model": self.model_name,
103
+ "messages": openai_messages,
104
+ "max_tokens": max_tokens,
105
+ "temperature": temperature,
106
+ "top_p": top_p,
107
+ "stream": stream,
108
+ }
109
+
110
+ if tools:
111
+ request_params["tools"] = self.convert_tools_to_provider_format(tools)
112
+ request_params["tool_choice"] = "auto"
113
+
114
+ # Add any additional parameters
115
+ request_params.update(kwargs)
116
+
117
+ if stream:
118
+ return self._generate_stream(**request_params)
119
+ else:
120
+ return await self._generate_single(**request_params)
121
+
122
+ except openai.APIError as e:
123
+ raise OpenAIError(f"OpenAI API error: {e}", provider="openai", model=self.model_name) from e
124
+ except Exception as e:
125
+ raise OpenAIError(f"Unexpected error: {e}", provider="openai", model=self.model_name) from e
126
+
127
+ async def _generate_single(self, **request_params) -> LanguageModelResponse:
128
+ """Generate a single response."""
129
+ response = await self.client.chat.completions.create(**request_params)
130
+
131
+ message = response.choices[0].message
132
+
133
+ # Extract text content
134
+ response_text = message.content or ""
135
+
136
+ # Extract tool calls
137
+ tool_calls = []
138
+ if message.tool_calls:
139
+ for tool_call in message.tool_calls:
140
+ if tool_call.type == "function":
141
+ try:
142
+ arguments = json.loads(tool_call.function.arguments)
143
+ except json.JSONDecodeError:
144
+ arguments = {"raw_arguments": tool_call.function.arguments}
145
+
146
+ tool_calls.append(ToolCall(
147
+ id=tool_call.id,
148
+ name=tool_call.function.name,
149
+ arguments=arguments
150
+ ))
151
+
152
+ # Calculate token usage
153
+ usage = TokenUsage()
154
+ if response.usage:
155
+ usage = TokenUsage(
156
+ prompt_tokens=response.usage.prompt_tokens,
157
+ completion_tokens=response.usage.completion_tokens,
158
+ total_tokens=response.usage.total_tokens
159
+ )
160
+
161
+ return LanguageModelResponse(
162
+ message=response_text,
163
+ usage=usage,
164
+ tool_calls=tool_calls if tool_calls else None,
165
+ model=response.model,
166
+ finish_reason=response.choices[0].finish_reason,
167
+ metadata={"response_id": response.id}
168
+ )
169
+
170
+ async def _generate_stream(self, **request_params) -> AsyncIterator[LanguageModelResponse]:
171
+ """Generate streaming response."""
172
+ stream = await self.client.chat.completions.create(**request_params)
173
+
174
+ async for chunk in stream:
175
+ if chunk.choices:
176
+ choice = chunk.choices[0]
177
+
178
+ # Handle content delta
179
+ if choice.delta.content:
180
+ yield LanguageModelResponse(
181
+ message=choice.delta.content,
182
+ usage=TokenUsage(),
183
+ model=chunk.model
184
+ )
185
+
186
+ # Handle tool calls
187
+ if choice.delta.tool_calls:
188
+ for tool_call_delta in choice.delta.tool_calls:
189
+ if tool_call_delta.function:
190
+ # Note: In streaming, tool calls come in pieces
191
+ # This is a simplified version - full implementation would
192
+ # need to accumulate the complete tool call
193
+ yield LanguageModelResponse(
194
+ message="",
195
+ usage=TokenUsage(),
196
+ model=chunk.model,
197
+ metadata={"tool_call_delta": tool_call_delta}
198
+ )
199
+
200
+ def convert_messages_to_provider_format(self, messages: List[Message]) -> List[Dict[str, Any]]:
201
+ """Convert internal messages to OpenAI format."""
202
+ openai_messages = []
203
+
204
+ for message in messages:
205
+ # Convert role
206
+ if message.role == Role.SYSTEM:
207
+ role = "system"
208
+ elif message.role == Role.USER:
209
+ role = "user"
210
+ elif message.role == Role.ASSISTANT:
211
+ role = "assistant"
212
+ elif message.role == Role.TOOL:
213
+ role = "tool"
214
+ else:
215
+ continue # Skip unsupported roles
216
+
217
+ # Prepare content
218
+ if isinstance(message.content, str):
219
+ content = message.content
220
+ elif isinstance(message.content, list):
221
+ content = self._convert_content_blocks(message.content)
222
+ else:
223
+ content = str(message.content)
224
+
225
+ openai_message = {
226
+ "role": role,
227
+ "content": content
228
+ }
229
+
230
+ # Add name if present
231
+ if message.name:
232
+ openai_message["name"] = message.name
233
+
234
+ # Handle tool calls for assistant messages
235
+ if message.tool_calls and message.role == Role.ASSISTANT:
236
+ openai_message["tool_calls"] = [
237
+ {
238
+ "id": tool_call.id,
239
+ "type": "function",
240
+ "function": {
241
+ "name": tool_call.name,
242
+ "arguments": json.dumps(tool_call.arguments)
243
+ }
244
+ }
245
+ for tool_call in message.tool_calls
246
+ ]
247
+
248
+ # Handle tool call ID for tool messages
249
+ if message.tool_call_id:
250
+ openai_message["tool_call_id"] = message.tool_call_id
251
+
252
+ openai_messages.append(openai_message)
253
+
254
+ return openai_messages
255
+
256
+ def _convert_content_blocks(self, content_blocks: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
257
+ """Convert content blocks to OpenAI format."""
258
+ # For simple text-only blocks, return as string
259
+ if len(content_blocks) == 1 and content_blocks[0].get("type") == "text":
260
+ return content_blocks[0].get("text", str(content_blocks[0]))
261
+
262
+ # For complex content, return as structured blocks
263
+ converted_blocks = []
264
+
265
+ for block in content_blocks:
266
+ if isinstance(block, str):
267
+ converted_blocks.append({"type": "text", "text": block})
268
+ elif isinstance(block, dict):
269
+ block_type = block.get("type", "text")
270
+
271
+ if block_type == "text":
272
+ converted_blocks.append({
273
+ "type": "text",
274
+ "text": block.get("text", str(block))
275
+ })
276
+ elif block_type == "image_url":
277
+ converted_blocks.append({
278
+ "type": "image_url",
279
+ "image_url": block.get("image_url", {})
280
+ })
281
+ else:
282
+ # Convert unknown blocks to text
283
+ converted_blocks.append({
284
+ "type": "text",
285
+ "text": str(block)
286
+ })
287
+ else:
288
+ converted_blocks.append({"type": "text", "text": str(block)})
289
+
290
+ return converted_blocks
291
+
292
+ def convert_tools_to_provider_format(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
293
+ """Convert tools to OpenAI format."""
294
+ openai_tools = []
295
+
296
+ for tool in tools:
297
+ if "function" in tool:
298
+ # Already in OpenAI format
299
+ openai_tools.append(tool)
300
+ else:
301
+ # Convert from simple format
302
+ openai_tool = {
303
+ "type": "function",
304
+ "function": {
305
+ "name": tool.get("name", "unknown"),
306
+ "description": tool.get("description", ""),
307
+ "parameters": tool.get("parameters", tool.get("input_schema", {}))
308
+ }
309
+ }
310
+ openai_tools.append(openai_tool)
311
+
312
+ return openai_tools
313
+
314
+ def extract_tool_calls_from_response(self, response: Any) -> List[ToolCall]:
315
+ """Extract tool calls from OpenAI response."""
316
+ tool_calls = []
317
+
318
+ if hasattr(response, "choices") and response.choices:
319
+ message = response.choices[0].message
320
+ if hasattr(message, "tool_calls") and message.tool_calls:
321
+ for tool_call in message.tool_calls:
322
+ if tool_call.type == "function":
323
+ try:
324
+ arguments = json.loads(tool_call.function.arguments)
325
+ except json.JSONDecodeError:
326
+ arguments = {"raw_arguments": tool_call.function.arguments}
327
+
328
+ tool_calls.append(ToolCall(
329
+ id=tool_call.id,
330
+ name=tool_call.function.name,
331
+ arguments=arguments
332
+ ))
333
+
334
+ return tool_calls