agnt5 0.1.0__cp39-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. agnt5/__init__.py +307 -0
  2. agnt5/__pycache__/__init__.cpython-311.pyc +0 -0
  3. agnt5/__pycache__/agent.cpython-311.pyc +0 -0
  4. agnt5/__pycache__/context.cpython-311.pyc +0 -0
  5. agnt5/__pycache__/durable.cpython-311.pyc +0 -0
  6. agnt5/__pycache__/extraction.cpython-311.pyc +0 -0
  7. agnt5/__pycache__/memory.cpython-311.pyc +0 -0
  8. agnt5/__pycache__/reflection.cpython-311.pyc +0 -0
  9. agnt5/__pycache__/runtime.cpython-311.pyc +0 -0
  10. agnt5/__pycache__/task.cpython-311.pyc +0 -0
  11. agnt5/__pycache__/tool.cpython-311.pyc +0 -0
  12. agnt5/__pycache__/tracing.cpython-311.pyc +0 -0
  13. agnt5/__pycache__/types.cpython-311.pyc +0 -0
  14. agnt5/__pycache__/workflow.cpython-311.pyc +0 -0
  15. agnt5/_core.abi3.so +0 -0
  16. agnt5/agent.py +1086 -0
  17. agnt5/context.py +406 -0
  18. agnt5/durable.py +1050 -0
  19. agnt5/extraction.py +410 -0
  20. agnt5/llm/__init__.py +179 -0
  21. agnt5/llm/__pycache__/__init__.cpython-311.pyc +0 -0
  22. agnt5/llm/__pycache__/anthropic.cpython-311.pyc +0 -0
  23. agnt5/llm/__pycache__/azure.cpython-311.pyc +0 -0
  24. agnt5/llm/__pycache__/base.cpython-311.pyc +0 -0
  25. agnt5/llm/__pycache__/google.cpython-311.pyc +0 -0
  26. agnt5/llm/__pycache__/mistral.cpython-311.pyc +0 -0
  27. agnt5/llm/__pycache__/openai.cpython-311.pyc +0 -0
  28. agnt5/llm/__pycache__/together.cpython-311.pyc +0 -0
  29. agnt5/llm/anthropic.py +319 -0
  30. agnt5/llm/azure.py +348 -0
  31. agnt5/llm/base.py +315 -0
  32. agnt5/llm/google.py +373 -0
  33. agnt5/llm/mistral.py +330 -0
  34. agnt5/llm/model_registry.py +467 -0
  35. agnt5/llm/models.json +227 -0
  36. agnt5/llm/openai.py +334 -0
  37. agnt5/llm/together.py +377 -0
  38. agnt5/memory.py +746 -0
  39. agnt5/reflection.py +514 -0
  40. agnt5/runtime.py +699 -0
  41. agnt5/task.py +476 -0
  42. agnt5/testing.py +451 -0
  43. agnt5/tool.py +516 -0
  44. agnt5/tracing.py +624 -0
  45. agnt5/types.py +210 -0
  46. agnt5/workflow.py +897 -0
  47. agnt5-0.1.0.dist-info/METADATA +93 -0
  48. agnt5-0.1.0.dist-info/RECORD +49 -0
  49. agnt5-0.1.0.dist-info/WHEEL +4 -0
agnt5/llm/anthropic.py ADDED
@@ -0,0 +1,319 @@
1
+ """
2
+ Anthropic Claude integration for AGNT5 SDK.
3
+
4
+ Provides integration with Anthropic's Claude models including proper message conversion,
5
+ tool calling, and streaming support.
6
+ """
7
+
8
+ import json
9
+ import os
10
+ from typing import Any, AsyncIterator, Dict, List, Optional, Union
11
+
12
+ from .base import (
13
+ LanguageModel,
14
+ LanguageModelResponse,
15
+ LanguageModelType,
16
+ LLMError,
17
+ Message,
18
+ Role,
19
+ TokenUsage,
20
+ ToolCall,
21
+ )
22
+
23
+ try:
24
+ import anthropic
25
+ from anthropic.types import (
26
+ ContentBlock,
27
+ MessageParam,
28
+ TextBlock,
29
+ ToolUseBlock,
30
+ )
31
+ ANTHROPIC_AVAILABLE = True
32
+ except ImportError:
33
+ ANTHROPIC_AVAILABLE = False
34
+
35
+
36
+ class AnthropicError(LLMError):
37
+ """Anthropic-specific errors."""
38
+ pass
39
+
40
+
41
+ class AnthropicLanguageModel(LanguageModel):
42
+ """
43
+ Anthropic Claude language model implementation.
44
+
45
+ Supports all Claude models with proper message conversion, tool calling,
46
+ and streaming capabilities.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ llm_model: LanguageModelType,
52
+ api_key: Optional[str] = None,
53
+ system_prompt: Optional[str] = None,
54
+ **kwargs
55
+ ):
56
+ if not ANTHROPIC_AVAILABLE:
57
+ raise AnthropicError("Anthropic library not installed. Install with: pip install anthropic")
58
+
59
+ super().__init__(llm_model, system_prompt, **kwargs)
60
+
61
+ # Get API key
62
+ self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
63
+ if not self.api_key:
64
+ raise AnthropicError("Anthropic API key required. Set ANTHROPIC_API_KEY or pass api_key parameter")
65
+
66
+ # Initialize client
67
+ self.client = anthropic.AsyncAnthropic(api_key=self.api_key)
68
+
69
+ # Validate model is supported by Anthropic
70
+ if not self.model_name.startswith("claude"):
71
+ raise AnthropicError(f"Model {self.model_name} is not an Anthropic model")
72
+
73
+ async def generate(
74
+ self,
75
+ messages: List[Message],
76
+ tools: Optional[List[Dict[str, Any]]] = None,
77
+ max_tokens: int = 1024,
78
+ temperature: float = 0.7,
79
+ top_p: float = 1.0,
80
+ stream: bool = False,
81
+ **kwargs
82
+ ) -> Union[LanguageModelResponse, AsyncIterator[LanguageModelResponse]]:
83
+ """Generate response using Anthropic Claude."""
84
+ try:
85
+ # Validate and prepare messages
86
+ self.validate_messages(messages)
87
+ prepared_messages = self.prepare_system_message(messages)
88
+
89
+ # Convert to Anthropic format
90
+ anthropic_messages = self.convert_messages_to_provider_format(prepared_messages)
91
+
92
+ # Extract system message if present
93
+ system_message = None
94
+ if anthropic_messages and anthropic_messages[0].get("role") == "system":
95
+ system_message = anthropic_messages[0]["content"]
96
+ anthropic_messages = anthropic_messages[1:]
97
+
98
+ # Prepare request parameters
99
+ request_params = {
100
+ "model": self.model_name,
101
+ "messages": anthropic_messages,
102
+ "max_tokens": max_tokens,
103
+ "temperature": temperature,
104
+ "top_p": top_p,
105
+ }
106
+
107
+ if system_message:
108
+ request_params["system"] = system_message
109
+
110
+ if tools:
111
+ request_params["tools"] = self.convert_tools_to_provider_format(tools)
112
+
113
+ # Add any additional parameters
114
+ request_params.update(kwargs)
115
+
116
+ if stream:
117
+ return self._generate_stream(**request_params)
118
+ else:
119
+ return await self._generate_single(**request_params)
120
+
121
+ except anthropic.APIError as e:
122
+ raise AnthropicError(f"Anthropic API error: {e}", provider="anthropic", model=self.model_name) from e
123
+ except Exception as e:
124
+ raise AnthropicError(f"Unexpected error: {e}", provider="anthropic", model=self.model_name) from e
125
+
126
+ async def _generate_single(self, **request_params) -> LanguageModelResponse:
127
+ """Generate a single response."""
128
+ response = await self.client.messages.create(**request_params)
129
+
130
+ # Extract text content and tool calls
131
+ response_text = ""
132
+ tool_calls = []
133
+
134
+ for content_block in response.content:
135
+ if isinstance(content_block, TextBlock):
136
+ response_text += content_block.text
137
+ elif isinstance(content_block, ToolUseBlock):
138
+ tool_calls.append(ToolCall(
139
+ id=content_block.id,
140
+ name=content_block.name,
141
+ arguments=content_block.input
142
+ ))
143
+
144
+ # Calculate token usage
145
+ usage = TokenUsage(
146
+ prompt_tokens=response.usage.input_tokens,
147
+ completion_tokens=response.usage.output_tokens,
148
+ total_tokens=response.usage.input_tokens + response.usage.output_tokens
149
+ )
150
+
151
+ return LanguageModelResponse(
152
+ message=response_text,
153
+ usage=usage,
154
+ tool_calls=tool_calls if tool_calls else None,
155
+ model=response.model,
156
+ finish_reason=response.stop_reason,
157
+ metadata={"response_id": response.id}
158
+ )
159
+
160
+ async def _generate_stream(self, **request_params) -> AsyncIterator[LanguageModelResponse]:
161
+ """Generate streaming response."""
162
+ request_params["stream"] = True
163
+
164
+ async with self.client.messages.stream(**request_params) as stream:
165
+ async for chunk in stream:
166
+ if chunk.type == "content_block_delta":
167
+ if hasattr(chunk.delta, "text"):
168
+ yield LanguageModelResponse(
169
+ message=chunk.delta.text,
170
+ usage=TokenUsage(),
171
+ model=self.model_name
172
+ )
173
+ elif chunk.type == "message_start":
174
+ # Initial message metadata
175
+ yield LanguageModelResponse(
176
+ message="",
177
+ usage=TokenUsage(),
178
+ model=chunk.message.model,
179
+ metadata={"message_id": chunk.message.id}
180
+ )
181
+
182
+ def convert_messages_to_provider_format(self, messages: List[Message]) -> List[Dict[str, Any]]:
183
+ """Convert internal messages to Anthropic format."""
184
+ anthropic_messages = []
185
+
186
+ for message in messages:
187
+ # Handle system messages
188
+ if message.role == Role.SYSTEM:
189
+ anthropic_messages.append({
190
+ "role": "system",
191
+ "content": message.content if isinstance(message.content, str) else str(message.content)
192
+ })
193
+ continue
194
+
195
+ # Convert role
196
+ if message.role == Role.USER:
197
+ role = "user"
198
+ elif message.role == Role.ASSISTANT:
199
+ role = "assistant"
200
+ elif message.role == Role.TOOL:
201
+ # Tool results are handled as user messages with tool_result content
202
+ role = "user"
203
+ else:
204
+ continue # Skip unsupported roles
205
+
206
+ # Prepare content
207
+ if isinstance(message.content, str):
208
+ content = message.content
209
+ elif isinstance(message.content, list):
210
+ content = self._convert_content_blocks(message.content)
211
+ else:
212
+ content = str(message.content)
213
+
214
+ anthropic_message = {
215
+ "role": role,
216
+ "content": content
217
+ }
218
+
219
+ # Handle tool calls for assistant messages
220
+ if message.tool_calls and message.role == Role.ASSISTANT:
221
+ if isinstance(content, str):
222
+ content = [{"type": "text", "text": content}]
223
+ elif not isinstance(content, list):
224
+ content = [{"type": "text", "text": str(content)}]
225
+
226
+ # Add tool use blocks
227
+ for tool_call in message.tool_calls:
228
+ content.append({
229
+ "type": "tool_use",
230
+ "id": tool_call.id,
231
+ "name": tool_call.name,
232
+ "input": tool_call.arguments
233
+ })
234
+
235
+ anthropic_message["content"] = content
236
+
237
+ # Handle tool results
238
+ if message.tool_call_id:
239
+ anthropic_message["content"] = [{
240
+ "type": "tool_result",
241
+ "tool_use_id": message.tool_call_id,
242
+ "content": content if isinstance(content, str) else str(content)
243
+ }]
244
+
245
+ anthropic_messages.append(anthropic_message)
246
+
247
+ return anthropic_messages
248
+
249
+ def _convert_content_blocks(self, content_blocks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
250
+ """Convert content blocks to Anthropic format."""
251
+ converted_blocks = []
252
+
253
+ for block in content_blocks:
254
+ if isinstance(block, str):
255
+ converted_blocks.append({"type": "text", "text": block})
256
+ elif isinstance(block, dict):
257
+ block_type = block.get("type", "text")
258
+
259
+ if block_type == "text":
260
+ converted_blocks.append({
261
+ "type": "text",
262
+ "text": block.get("text", str(block))
263
+ })
264
+ elif block_type == "image":
265
+ # Handle image blocks
266
+ converted_blocks.append({
267
+ "type": "image",
268
+ "source": block.get("source", {})
269
+ })
270
+ else:
271
+ # Convert unknown blocks to text
272
+ converted_blocks.append({
273
+ "type": "text",
274
+ "text": str(block)
275
+ })
276
+ else:
277
+ converted_blocks.append({"type": "text", "text": str(block)})
278
+
279
+ return converted_blocks
280
+
281
+ def convert_tools_to_provider_format(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
282
+ """Convert tools to Anthropic format."""
283
+ anthropic_tools = []
284
+
285
+ for tool in tools:
286
+ if "function" in tool:
287
+ # OpenAI-style tool format
288
+ func = tool["function"]
289
+ anthropic_tool = {
290
+ "name": func["name"],
291
+ "description": func.get("description", ""),
292
+ "input_schema": func.get("parameters", {})
293
+ }
294
+ else:
295
+ # Direct Anthropic format or simple format
296
+ anthropic_tool = {
297
+ "name": tool.get("name", "unknown"),
298
+ "description": tool.get("description", ""),
299
+ "input_schema": tool.get("input_schema", tool.get("parameters", {}))
300
+ }
301
+
302
+ anthropic_tools.append(anthropic_tool)
303
+
304
+ return anthropic_tools
305
+
306
+ def extract_tool_calls_from_response(self, response: Any) -> List[ToolCall]:
307
+ """Extract tool calls from Anthropic response."""
308
+ tool_calls = []
309
+
310
+ if hasattr(response, "content"):
311
+ for content_block in response.content:
312
+ if isinstance(content_block, ToolUseBlock):
313
+ tool_calls.append(ToolCall(
314
+ id=content_block.id,
315
+ name=content_block.name,
316
+ arguments=content_block.input
317
+ ))
318
+
319
+ return tool_calls
agnt5/llm/azure.py ADDED
@@ -0,0 +1,348 @@
1
+ """
2
+ Azure OpenAI integration for AGNT5 SDK.
3
+
4
+ Provides integration with Azure OpenAI Service with proper message conversion,
5
+ tool calling, and streaming support using OpenAI-compatible API.
6
+ """
7
+
8
+ import json
9
+ import os
10
+ from typing import Any, AsyncIterator, Dict, List, Optional, Union
11
+
12
+ from .base import (
13
+ LanguageModel,
14
+ LanguageModelResponse,
15
+ LanguageModelType,
16
+ LLMError,
17
+ Message,
18
+ Role,
19
+ TokenUsage,
20
+ ToolCall,
21
+ )
22
+
23
+ try:
24
+ import openai
25
+ OPENAI_AVAILABLE = True
26
+ except ImportError:
27
+ OPENAI_AVAILABLE = False
28
+
29
+
30
+ class AzureOpenAIError(LLMError):
31
+ """Azure OpenAI-specific errors."""
32
+ pass
33
+
34
+
35
+ class AzureOpenAILanguageModel(LanguageModel):
36
+ """
37
+ Azure OpenAI language model implementation.
38
+
39
+ Supports all Azure OpenAI models with proper message conversion, tool calling,
40
+ and streaming capabilities. Requires Azure-specific configuration.
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ llm_model: LanguageModelType,
46
+ api_key: Optional[str] = None,
47
+ azure_endpoint: Optional[str] = None,
48
+ api_version: str = "2024-02-15-preview",
49
+ deployment_name: Optional[str] = None,
50
+ system_prompt: Optional[str] = None,
51
+ **kwargs
52
+ ):
53
+ if not OPENAI_AVAILABLE:
54
+ raise AzureOpenAIError("OpenAI library not installed. Install with: pip install openai")
55
+
56
+ super().__init__(llm_model, system_prompt, **kwargs)
57
+
58
+ # Get Azure configuration
59
+ self.api_key = api_key or os.getenv("AZURE_OPENAI_API_KEY")
60
+ self.azure_endpoint = azure_endpoint or os.getenv("AZURE_OPENAI_ENDPOINT")
61
+ self.api_version = api_version
62
+ self.deployment_name = deployment_name or os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME") or self.model_name
63
+
64
+ if not self.api_key:
65
+ raise AzureOpenAIError("Azure OpenAI API key required. Set AZURE_OPENAI_API_KEY or pass api_key parameter")
66
+
67
+ if not self.azure_endpoint:
68
+ raise AzureOpenAIError("Azure OpenAI endpoint required. Set AZURE_OPENAI_ENDPOINT or pass azure_endpoint parameter")
69
+
70
+ # Initialize Azure OpenAI client
71
+ self.client = openai.AsyncAzureOpenAI(
72
+ api_key=self.api_key,
73
+ azure_endpoint=self.azure_endpoint,
74
+ api_version=self.api_version,
75
+ **kwargs
76
+ )
77
+
78
+ @property
79
+ def provider_name(self) -> str:
80
+ return "azure_openai"
81
+
82
+ async def generate(
83
+ self,
84
+ messages: List[Message],
85
+ tools: Optional[List[Dict[str, Any]]] = None,
86
+ max_tokens: int = 1024,
87
+ temperature: float = 0.7,
88
+ top_p: float = 1.0,
89
+ stream: bool = False,
90
+ **kwargs
91
+ ) -> Union[LanguageModelResponse, AsyncIterator[LanguageModelResponse]]:
92
+ """Generate response using Azure OpenAI."""
93
+ try:
94
+ # Validate and prepare messages
95
+ self.validate_messages(messages)
96
+ prepared_messages = self.prepare_system_message(messages)
97
+
98
+ # Convert to OpenAI format
99
+ openai_messages = self.convert_messages_to_provider_format(prepared_messages)
100
+
101
+ # Prepare request parameters (use deployment name instead of model)
102
+ request_params = {
103
+ "model": self.deployment_name, # Azure uses deployment name
104
+ "messages": openai_messages,
105
+ "max_tokens": max_tokens,
106
+ "temperature": temperature,
107
+ "top_p": top_p,
108
+ "stream": stream,
109
+ }
110
+
111
+ if tools:
112
+ request_params["tools"] = self.convert_tools_to_provider_format(tools)
113
+ request_params["tool_choice"] = "auto"
114
+
115
+ # Add any additional parameters
116
+ request_params.update(kwargs)
117
+
118
+ if stream:
119
+ return self._generate_stream(**request_params)
120
+ else:
121
+ return await self._generate_single(**request_params)
122
+
123
+ except openai.APIError as e:
124
+ # Handle Azure-specific API errors
125
+ error_msg = str(e)
126
+ if "authentication" in error_msg.lower() or "unauthorized" in error_msg.lower():
127
+ raise AzureOpenAIError(f"Azure OpenAI authentication error: {e}", provider="azure_openai", model=self.model_name) from e
128
+ elif "rate_limit" in error_msg.lower() or "quota" in error_msg.lower():
129
+ raise AzureOpenAIError(f"Azure OpenAI quota/rate limit error: {e}", provider="azure_openai", model=self.model_name) from e
130
+ elif "deployment" in error_msg.lower():
131
+ raise AzureOpenAIError(f"Azure OpenAI deployment error: {e}. Check deployment name: {self.deployment_name}", provider="azure_openai", model=self.model_name) from e
132
+ else:
133
+ raise AzureOpenAIError(f"Azure OpenAI API error: {e}", provider="azure_openai", model=self.model_name) from e
134
+ except Exception as e:
135
+ raise AzureOpenAIError(f"Unexpected error: {e}", provider="azure_openai", model=self.model_name) from e
136
+
137
+ async def _generate_single(self, **request_params) -> LanguageModelResponse:
138
+ """Generate a single response."""
139
+ response = await self.client.chat.completions.create(**request_params)
140
+
141
+ message = response.choices[0].message
142
+
143
+ # Extract text content
144
+ response_text = message.content or ""
145
+
146
+ # Extract tool calls
147
+ tool_calls = []
148
+ if message.tool_calls:
149
+ for tool_call in message.tool_calls:
150
+ if tool_call.type == "function":
151
+ try:
152
+ arguments = json.loads(tool_call.function.arguments)
153
+ except json.JSONDecodeError:
154
+ arguments = {"raw_arguments": tool_call.function.arguments}
155
+
156
+ tool_calls.append(ToolCall(
157
+ id=tool_call.id,
158
+ name=tool_call.function.name,
159
+ arguments=arguments
160
+ ))
161
+
162
+ # Calculate token usage
163
+ usage = TokenUsage()
164
+ if response.usage:
165
+ usage = TokenUsage(
166
+ prompt_tokens=response.usage.prompt_tokens,
167
+ completion_tokens=response.usage.completion_tokens,
168
+ total_tokens=response.usage.total_tokens
169
+ )
170
+
171
+ return LanguageModelResponse(
172
+ message=response_text,
173
+ usage=usage,
174
+ tool_calls=tool_calls if tool_calls else None,
175
+ model=response.model,
176
+ finish_reason=response.choices[0].finish_reason,
177
+ metadata={
178
+ "response_id": response.id,
179
+ "deployment_name": self.deployment_name,
180
+ "azure_endpoint": self.azure_endpoint
181
+ }
182
+ )
183
+
184
+ async def _generate_stream(self, **request_params) -> AsyncIterator[LanguageModelResponse]:
185
+ """Generate streaming response."""
186
+ stream = await self.client.chat.completions.create(**request_params)
187
+
188
+ async for chunk in stream:
189
+ if chunk.choices:
190
+ choice = chunk.choices[0]
191
+
192
+ # Handle content delta
193
+ if choice.delta.content:
194
+ yield LanguageModelResponse(
195
+ message=choice.delta.content,
196
+ usage=TokenUsage(),
197
+ model=chunk.model or self.deployment_name
198
+ )
199
+
200
+ # Handle tool calls
201
+ if choice.delta.tool_calls:
202
+ for tool_call_delta in choice.delta.tool_calls:
203
+ if tool_call_delta.function:
204
+ # Note: In streaming, tool calls come in pieces
205
+ # This is a simplified version - full implementation would
206
+ # need to accumulate the complete tool call
207
+ yield LanguageModelResponse(
208
+ message="",
209
+ usage=TokenUsage(),
210
+ model=chunk.model or self.deployment_name,
211
+ metadata={"tool_call_delta": tool_call_delta}
212
+ )
213
+
214
+ def convert_messages_to_provider_format(self, messages: List[Message]) -> List[Dict[str, Any]]:
215
+ """Convert internal messages to OpenAI format."""
216
+ openai_messages = []
217
+
218
+ for message in messages:
219
+ # Convert role
220
+ if message.role == Role.SYSTEM:
221
+ role = "system"
222
+ elif message.role == Role.USER:
223
+ role = "user"
224
+ elif message.role == Role.ASSISTANT:
225
+ role = "assistant"
226
+ elif message.role == Role.TOOL:
227
+ role = "tool"
228
+ else:
229
+ continue # Skip unsupported roles
230
+
231
+ # Prepare content
232
+ if isinstance(message.content, str):
233
+ content = message.content
234
+ elif isinstance(message.content, list):
235
+ content = self._convert_content_blocks(message.content)
236
+ else:
237
+ content = str(message.content)
238
+
239
+ openai_message = {
240
+ "role": role,
241
+ "content": content
242
+ }
243
+
244
+ # Add name if present
245
+ if message.name:
246
+ openai_message["name"] = message.name
247
+
248
+ # Handle tool calls for assistant messages
249
+ if message.tool_calls and message.role == Role.ASSISTANT:
250
+ openai_message["tool_calls"] = [
251
+ {
252
+ "id": tool_call.id,
253
+ "type": "function",
254
+ "function": {
255
+ "name": tool_call.name,
256
+ "arguments": json.dumps(tool_call.arguments)
257
+ }
258
+ }
259
+ for tool_call in message.tool_calls
260
+ ]
261
+
262
+ # Handle tool call ID for tool messages
263
+ if message.tool_call_id:
264
+ openai_message["tool_call_id"] = message.tool_call_id
265
+
266
+ openai_messages.append(openai_message)
267
+
268
+ return openai_messages
269
+
270
+ def _convert_content_blocks(self, content_blocks: List[Dict[str, Any]]) -> Union[str, List[Dict[str, Any]]]:
271
+ """Convert content blocks to OpenAI format."""
272
+ # For simple text-only blocks, return as string
273
+ if len(content_blocks) == 1 and content_blocks[0].get("type") == "text":
274
+ return content_blocks[0].get("text", str(content_blocks[0]))
275
+
276
+ # For complex content, return as structured blocks
277
+ converted_blocks = []
278
+
279
+ for block in content_blocks:
280
+ if isinstance(block, str):
281
+ converted_blocks.append({"type": "text", "text": block})
282
+ elif isinstance(block, dict):
283
+ block_type = block.get("type", "text")
284
+
285
+ if block_type == "text":
286
+ converted_blocks.append({
287
+ "type": "text",
288
+ "text": block.get("text", str(block))
289
+ })
290
+ elif block_type == "image_url":
291
+ converted_blocks.append({
292
+ "type": "image_url",
293
+ "image_url": block.get("image_url", {})
294
+ })
295
+ else:
296
+ # Convert unknown blocks to text
297
+ converted_blocks.append({
298
+ "type": "text",
299
+ "text": str(block)
300
+ })
301
+ else:
302
+ converted_blocks.append({"type": "text", "text": str(block)})
303
+
304
+ return converted_blocks
305
+
306
+ def convert_tools_to_provider_format(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
307
+ """Convert tools to OpenAI format."""
308
+ openai_tools = []
309
+
310
+ for tool in tools:
311
+ if "function" in tool:
312
+ # Already in OpenAI format
313
+ openai_tools.append(tool)
314
+ else:
315
+ # Convert from simple format
316
+ openai_tool = {
317
+ "type": "function",
318
+ "function": {
319
+ "name": tool.get("name", "unknown"),
320
+ "description": tool.get("description", ""),
321
+ "parameters": tool.get("parameters", tool.get("input_schema", {}))
322
+ }
323
+ }
324
+ openai_tools.append(openai_tool)
325
+
326
+ return openai_tools
327
+
328
+ def extract_tool_calls_from_response(self, response: Any) -> List[ToolCall]:
329
+ """Extract tool calls from Azure OpenAI response."""
330
+ tool_calls = []
331
+
332
+ if hasattr(response, "choices") and response.choices:
333
+ message = response.choices[0].message
334
+ if hasattr(message, "tool_calls") and message.tool_calls:
335
+ for tool_call in message.tool_calls:
336
+ if tool_call.type == "function":
337
+ try:
338
+ arguments = json.loads(tool_call.function.arguments)
339
+ except json.JSONDecodeError:
340
+ arguments = {"raw_arguments": tool_call.function.arguments}
341
+
342
+ tool_calls.append(ToolCall(
343
+ id=tool_call.id,
344
+ name=tool_call.function.name,
345
+ arguments=arguments
346
+ ))
347
+
348
+ return tool_calls