proxilion 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- proxilion/__init__.py +136 -0
- proxilion/audit/__init__.py +133 -0
- proxilion/audit/base_exporters.py +527 -0
- proxilion/audit/compliance/__init__.py +130 -0
- proxilion/audit/compliance/base.py +457 -0
- proxilion/audit/compliance/eu_ai_act.py +603 -0
- proxilion/audit/compliance/iso27001.py +544 -0
- proxilion/audit/compliance/soc2.py +491 -0
- proxilion/audit/events.py +493 -0
- proxilion/audit/explainability.py +1173 -0
- proxilion/audit/exporters/__init__.py +58 -0
- proxilion/audit/exporters/aws_s3.py +636 -0
- proxilion/audit/exporters/azure_storage.py +608 -0
- proxilion/audit/exporters/cloud_base.py +468 -0
- proxilion/audit/exporters/gcp_storage.py +570 -0
- proxilion/audit/exporters/multi_exporter.py +498 -0
- proxilion/audit/hash_chain.py +652 -0
- proxilion/audit/logger.py +543 -0
- proxilion/caching/__init__.py +49 -0
- proxilion/caching/tool_cache.py +633 -0
- proxilion/context/__init__.py +73 -0
- proxilion/context/context_window.py +556 -0
- proxilion/context/message_history.py +505 -0
- proxilion/context/session.py +735 -0
- proxilion/contrib/__init__.py +51 -0
- proxilion/contrib/anthropic.py +609 -0
- proxilion/contrib/google.py +1012 -0
- proxilion/contrib/langchain.py +641 -0
- proxilion/contrib/mcp.py +893 -0
- proxilion/contrib/openai.py +646 -0
- proxilion/core.py +3058 -0
- proxilion/decorators.py +966 -0
- proxilion/engines/__init__.py +287 -0
- proxilion/engines/base.py +266 -0
- proxilion/engines/casbin_engine.py +412 -0
- proxilion/engines/opa_engine.py +493 -0
- proxilion/engines/simple.py +437 -0
- proxilion/exceptions.py +887 -0
- proxilion/guards/__init__.py +54 -0
- proxilion/guards/input_guard.py +522 -0
- proxilion/guards/output_guard.py +634 -0
- proxilion/observability/__init__.py +198 -0
- proxilion/observability/cost_tracker.py +866 -0
- proxilion/observability/hooks.py +683 -0
- proxilion/observability/metrics.py +798 -0
- proxilion/observability/session_cost_tracker.py +1063 -0
- proxilion/policies/__init__.py +67 -0
- proxilion/policies/base.py +304 -0
- proxilion/policies/builtin.py +486 -0
- proxilion/policies/registry.py +376 -0
- proxilion/providers/__init__.py +201 -0
- proxilion/providers/adapter.py +468 -0
- proxilion/providers/anthropic_adapter.py +330 -0
- proxilion/providers/gemini_adapter.py +391 -0
- proxilion/providers/openai_adapter.py +294 -0
- proxilion/py.typed +0 -0
- proxilion/resilience/__init__.py +81 -0
- proxilion/resilience/degradation.py +615 -0
- proxilion/resilience/fallback.py +555 -0
- proxilion/resilience/retry.py +554 -0
- proxilion/scheduling/__init__.py +57 -0
- proxilion/scheduling/priority_queue.py +419 -0
- proxilion/scheduling/scheduler.py +459 -0
- proxilion/security/__init__.py +244 -0
- proxilion/security/agent_trust.py +968 -0
- proxilion/security/behavioral_drift.py +794 -0
- proxilion/security/cascade_protection.py +869 -0
- proxilion/security/circuit_breaker.py +428 -0
- proxilion/security/cost_limiter.py +690 -0
- proxilion/security/idor_protection.py +460 -0
- proxilion/security/intent_capsule.py +849 -0
- proxilion/security/intent_validator.py +495 -0
- proxilion/security/memory_integrity.py +767 -0
- proxilion/security/rate_limiter.py +509 -0
- proxilion/security/scope_enforcer.py +680 -0
- proxilion/security/sequence_validator.py +636 -0
- proxilion/security/trust_boundaries.py +784 -0
- proxilion/streaming/__init__.py +70 -0
- proxilion/streaming/detector.py +761 -0
- proxilion/streaming/transformer.py +674 -0
- proxilion/timeouts/__init__.py +55 -0
- proxilion/timeouts/decorators.py +477 -0
- proxilion/timeouts/manager.py +545 -0
- proxilion/tools/__init__.py +69 -0
- proxilion/tools/decorators.py +493 -0
- proxilion/tools/registry.py +732 -0
- proxilion/types.py +339 -0
- proxilion/validation/__init__.py +93 -0
- proxilion/validation/pydantic_schema.py +351 -0
- proxilion/validation/schema.py +651 -0
- proxilion-0.0.1.dist-info/METADATA +872 -0
- proxilion-0.0.1.dist-info/RECORD +94 -0
- proxilion-0.0.1.dist-info/WHEEL +4 -0
- proxilion-0.0.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google Gemini adapter for Proxilion.
|
|
3
|
+
|
|
4
|
+
Provides translation between Google Gemini/Vertex AI's function calling format
|
|
5
|
+
and Proxilion's unified format.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import logging
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from proxilion.providers.adapter import (
|
|
14
|
+
BaseAdapter,
|
|
15
|
+
Provider,
|
|
16
|
+
UnifiedResponse,
|
|
17
|
+
UnifiedToolCall,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class GeminiAdapter(BaseAdapter):
|
|
24
|
+
"""
|
|
25
|
+
Adapter for Google Gemini / Vertex AI API.
|
|
26
|
+
|
|
27
|
+
Handles function calls from Gemini's GenerateContent API,
|
|
28
|
+
including parallel function calls.
|
|
29
|
+
|
|
30
|
+
Example:
|
|
31
|
+
>>> from vertexai.generative_models import GenerativeModel
|
|
32
|
+
>>> from proxilion.providers import GeminiAdapter
|
|
33
|
+
>>>
|
|
34
|
+
>>> adapter = GeminiAdapter()
|
|
35
|
+
>>> model = GenerativeModel("gemini-1.5-pro")
|
|
36
|
+
>>>
|
|
37
|
+
>>> response = model.generate_content(
|
|
38
|
+
... "Get weather",
|
|
39
|
+
... tools=[...],
|
|
40
|
+
... )
|
|
41
|
+
>>>
|
|
42
|
+
>>> # Extract tool calls
|
|
43
|
+
>>> tool_calls = adapter.extract_tool_calls(response)
|
|
44
|
+
>>> for call in tool_calls:
|
|
45
|
+
... print(f"Tool: {call.name}, Args: {call.arguments}")
|
|
46
|
+
>>>
|
|
47
|
+
>>> # Format result for continuation
|
|
48
|
+
>>> result_part = adapter.format_tool_result(tool_calls[0], {"temp": 72})
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def provider(self) -> Provider:
|
|
53
|
+
"""Get the provider type."""
|
|
54
|
+
return Provider.GEMINI
|
|
55
|
+
|
|
56
|
+
def extract_tool_calls(self, response: Any) -> list[UnifiedToolCall]:
|
|
57
|
+
"""
|
|
58
|
+
Extract tool calls from Gemini response.
|
|
59
|
+
|
|
60
|
+
Gemini returns function_call parts within candidates.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
response: Gemini GenerateContentResponse.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
List of unified tool calls.
|
|
67
|
+
"""
|
|
68
|
+
# Handle dictionary form
|
|
69
|
+
if isinstance(response, dict):
|
|
70
|
+
return self._extract_from_dict(response)
|
|
71
|
+
|
|
72
|
+
# Handle object form
|
|
73
|
+
candidates = getattr(response, "candidates", None)
|
|
74
|
+
if not candidates:
|
|
75
|
+
return []
|
|
76
|
+
|
|
77
|
+
tool_calls = []
|
|
78
|
+
for candidate in candidates:
|
|
79
|
+
content = getattr(candidate, "content", None)
|
|
80
|
+
if not content:
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
parts = getattr(content, "parts", None)
|
|
84
|
+
if not parts:
|
|
85
|
+
continue
|
|
86
|
+
|
|
87
|
+
for part in parts:
|
|
88
|
+
# Check for function_call attribute
|
|
89
|
+
function_call = getattr(part, "function_call", None)
|
|
90
|
+
if function_call:
|
|
91
|
+
tool_calls.append(UnifiedToolCall.from_gemini(function_call))
|
|
92
|
+
|
|
93
|
+
return tool_calls
|
|
94
|
+
|
|
95
|
+
def _extract_from_dict(self, response: dict) -> list[UnifiedToolCall]:
|
|
96
|
+
"""Extract tool calls from dictionary response."""
|
|
97
|
+
candidates = response.get("candidates", [])
|
|
98
|
+
tool_calls = []
|
|
99
|
+
|
|
100
|
+
for candidate in candidates:
|
|
101
|
+
content = candidate.get("content", {})
|
|
102
|
+
parts = content.get("parts", [])
|
|
103
|
+
|
|
104
|
+
for part in parts:
|
|
105
|
+
if "functionCall" in part:
|
|
106
|
+
# Gemini API returns camelCase
|
|
107
|
+
fc = part["functionCall"]
|
|
108
|
+
tool_calls.append(UnifiedToolCall.from_gemini({
|
|
109
|
+
"name": fc.get("name"),
|
|
110
|
+
"args": fc.get("args", {}),
|
|
111
|
+
}))
|
|
112
|
+
elif "function_call" in part:
|
|
113
|
+
# Handle snake_case variant
|
|
114
|
+
tool_calls.append(UnifiedToolCall.from_gemini(part["function_call"]))
|
|
115
|
+
|
|
116
|
+
return tool_calls
|
|
117
|
+
|
|
118
|
+
def extract_response(self, response: Any) -> UnifiedResponse:
|
|
119
|
+
"""
|
|
120
|
+
Extract full response from Gemini response.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
response: Gemini GenerateContentResponse.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
UnifiedResponse instance.
|
|
127
|
+
"""
|
|
128
|
+
tool_calls = self.extract_tool_calls(response)
|
|
129
|
+
|
|
130
|
+
# Handle dictionary form
|
|
131
|
+
if isinstance(response, dict):
|
|
132
|
+
text_content = self._extract_text_from_dict(response)
|
|
133
|
+
finish_reason = self._extract_finish_reason_from_dict(response)
|
|
134
|
+
|
|
135
|
+
usage = response.get("usageMetadata", {})
|
|
136
|
+
usage_dict = {
|
|
137
|
+
"input_tokens": usage.get("promptTokenCount", 0),
|
|
138
|
+
"output_tokens": usage.get("candidatesTokenCount", 0),
|
|
139
|
+
"total_tokens": usage.get("totalTokenCount", 0),
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
return UnifiedResponse(
|
|
143
|
+
content=text_content,
|
|
144
|
+
tool_calls=tool_calls,
|
|
145
|
+
finish_reason=finish_reason,
|
|
146
|
+
provider=Provider.GEMINI,
|
|
147
|
+
usage=usage_dict,
|
|
148
|
+
raw=response,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Handle object form
|
|
152
|
+
text_content = self._extract_text_from_object(response)
|
|
153
|
+
finish_reason = self._extract_finish_reason_from_object(response)
|
|
154
|
+
|
|
155
|
+
usage_dict = {}
|
|
156
|
+
usage_metadata = getattr(response, "usage_metadata", None)
|
|
157
|
+
if usage_metadata:
|
|
158
|
+
usage_dict = {
|
|
159
|
+
"input_tokens": getattr(usage_metadata, "prompt_token_count", 0),
|
|
160
|
+
"output_tokens": getattr(usage_metadata, "candidates_token_count", 0),
|
|
161
|
+
"total_tokens": getattr(usage_metadata, "total_token_count", 0),
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
return UnifiedResponse(
|
|
165
|
+
content=text_content,
|
|
166
|
+
tool_calls=tool_calls,
|
|
167
|
+
finish_reason=finish_reason,
|
|
168
|
+
provider=Provider.GEMINI,
|
|
169
|
+
usage=usage_dict,
|
|
170
|
+
raw=response,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
def _extract_text_from_dict(self, response: dict) -> str | None:
|
|
174
|
+
"""Extract text content from dictionary response."""
|
|
175
|
+
candidates = response.get("candidates", [])
|
|
176
|
+
text_parts = []
|
|
177
|
+
|
|
178
|
+
for candidate in candidates:
|
|
179
|
+
content = candidate.get("content", {})
|
|
180
|
+
parts = content.get("parts", [])
|
|
181
|
+
|
|
182
|
+
for part in parts:
|
|
183
|
+
if "text" in part:
|
|
184
|
+
text_parts.append(part["text"])
|
|
185
|
+
|
|
186
|
+
return "".join(text_parts) if text_parts else None
|
|
187
|
+
|
|
188
|
+
def _extract_text_from_object(self, response: Any) -> str | None:
|
|
189
|
+
"""Extract text content from object response."""
|
|
190
|
+
candidates = getattr(response, "candidates", [])
|
|
191
|
+
text_parts = []
|
|
192
|
+
|
|
193
|
+
for candidate in candidates:
|
|
194
|
+
content = getattr(candidate, "content", None)
|
|
195
|
+
if not content:
|
|
196
|
+
continue
|
|
197
|
+
|
|
198
|
+
parts = getattr(content, "parts", [])
|
|
199
|
+
for part in parts:
|
|
200
|
+
text = getattr(part, "text", None)
|
|
201
|
+
if text:
|
|
202
|
+
text_parts.append(text)
|
|
203
|
+
|
|
204
|
+
return "".join(text_parts) if text_parts else None
|
|
205
|
+
|
|
206
|
+
def _extract_finish_reason_from_dict(self, response: dict) -> str | None:
|
|
207
|
+
"""Extract finish reason from dictionary response."""
|
|
208
|
+
candidates = response.get("candidates", [])
|
|
209
|
+
if candidates:
|
|
210
|
+
return candidates[0].get("finishReason")
|
|
211
|
+
return None
|
|
212
|
+
|
|
213
|
+
def _extract_finish_reason_from_object(self, response: Any) -> str | None:
|
|
214
|
+
"""Extract finish reason from object response."""
|
|
215
|
+
candidates = getattr(response, "candidates", [])
|
|
216
|
+
if candidates:
|
|
217
|
+
return getattr(candidates[0], "finish_reason", None)
|
|
218
|
+
return None
|
|
219
|
+
|
|
220
|
+
def format_tool_result(
|
|
221
|
+
self,
|
|
222
|
+
tool_call: UnifiedToolCall,
|
|
223
|
+
result: Any,
|
|
224
|
+
is_error: bool = False,
|
|
225
|
+
) -> dict[str, Any]:
|
|
226
|
+
"""
|
|
227
|
+
Format tool result for Gemini API.
|
|
228
|
+
|
|
229
|
+
Creates a function_response part for continuing the conversation.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
tool_call: The original tool call.
|
|
233
|
+
result: The result to send back.
|
|
234
|
+
is_error: Whether the result represents an error.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
Dictionary with function_response part.
|
|
238
|
+
|
|
239
|
+
Example:
|
|
240
|
+
>>> result_part = adapter.format_tool_result(
|
|
241
|
+
... tool_call,
|
|
242
|
+
... {"temperature": 72}
|
|
243
|
+
... )
|
|
244
|
+
>>> # Use with Vertex AI
|
|
245
|
+
>>> from vertexai.generative_models import Part
|
|
246
|
+
>>> response_part = Part.from_function_response(
|
|
247
|
+
... name=tool_call.name,
|
|
248
|
+
... response=result_part["response"],
|
|
249
|
+
... )
|
|
250
|
+
"""
|
|
251
|
+
if is_error:
|
|
252
|
+
response_data = {"error": self._serialize_result(result)}
|
|
253
|
+
else:
|
|
254
|
+
# Gemini expects dict response
|
|
255
|
+
response_data = result if isinstance(result, dict) else {"result": result}
|
|
256
|
+
|
|
257
|
+
return {
|
|
258
|
+
"function_response": {
|
|
259
|
+
"name": tool_call.name,
|
|
260
|
+
"response": response_data,
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
def format_tools(
|
|
265
|
+
self,
|
|
266
|
+
tools: list[Any],
|
|
267
|
+
) -> list[dict[str, Any]]:
|
|
268
|
+
"""
|
|
269
|
+
Format tool definitions for Gemini API.
|
|
270
|
+
|
|
271
|
+
Converts ToolDefinition objects to Gemini's function declaration format.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
tools: List of ToolDefinition objects.
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
List of tool definitions in Gemini format.
|
|
278
|
+
|
|
279
|
+
Example:
|
|
280
|
+
>>> gemini_tools = adapter.format_tools(registry.list_enabled())
|
|
281
|
+
>>> from vertexai.generative_models import Tool, FunctionDeclaration
|
|
282
|
+
>>> tool = Tool(function_declarations=[
|
|
283
|
+
... FunctionDeclaration(**td) for td in gemini_tools
|
|
284
|
+
... ])
|
|
285
|
+
"""
|
|
286
|
+
formatted = []
|
|
287
|
+
for tool in tools:
|
|
288
|
+
# Check if it's a ToolDefinition
|
|
289
|
+
if hasattr(tool, "to_gemini_format"):
|
|
290
|
+
formatted.append(tool.to_gemini_format())
|
|
291
|
+
elif hasattr(tool, "name") and hasattr(tool, "description"):
|
|
292
|
+
# Manual conversion
|
|
293
|
+
formatted.append({
|
|
294
|
+
"name": tool.name,
|
|
295
|
+
"description": tool.description,
|
|
296
|
+
"parameters": getattr(tool, "parameters", {
|
|
297
|
+
"type": "object",
|
|
298
|
+
"properties": {},
|
|
299
|
+
}),
|
|
300
|
+
})
|
|
301
|
+
elif isinstance(tool, dict):
|
|
302
|
+
# Already in correct format
|
|
303
|
+
formatted.append(tool)
|
|
304
|
+
|
|
305
|
+
return formatted
|
|
306
|
+
|
|
307
|
+
def format_content_with_results(
|
|
308
|
+
self,
|
|
309
|
+
results: list[tuple[UnifiedToolCall, Any, bool]],
|
|
310
|
+
) -> list[dict[str, Any]]:
|
|
311
|
+
"""
|
|
312
|
+
Format multiple tool results as content parts.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
results: List of (tool_call, result, is_error) tuples.
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
List of function_response parts.
|
|
319
|
+
|
|
320
|
+
Example:
|
|
321
|
+
>>> results = [
|
|
322
|
+
... (call1, {"temp": 72}, False),
|
|
323
|
+
... (call2, "Error", True),
|
|
324
|
+
... ]
|
|
325
|
+
>>> response_parts = adapter.format_content_with_results(results)
|
|
326
|
+
"""
|
|
327
|
+
return [
|
|
328
|
+
self.format_tool_result(tc, result, is_error)
|
|
329
|
+
for tc, result, is_error in results
|
|
330
|
+
]
|
|
331
|
+
|
|
332
|
+
def create_vertex_tool(self, tools: list[Any]) -> Any:
|
|
333
|
+
"""
|
|
334
|
+
Create a Vertex AI Tool object from tool definitions.
|
|
335
|
+
|
|
336
|
+
Requires vertexai library to be installed.
|
|
337
|
+
|
|
338
|
+
Args:
|
|
339
|
+
tools: List of ToolDefinition objects.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
Vertex AI Tool object.
|
|
343
|
+
|
|
344
|
+
Raises:
|
|
345
|
+
ImportError: If vertexai is not installed.
|
|
346
|
+
"""
|
|
347
|
+
try:
|
|
348
|
+
from vertexai.generative_models import FunctionDeclaration, Tool
|
|
349
|
+
except ImportError:
|
|
350
|
+
raise ImportError(
|
|
351
|
+
"vertexai library required. Install with: pip install google-cloud-aiplatform"
|
|
352
|
+
) from None
|
|
353
|
+
|
|
354
|
+
formatted = self.format_tools(tools)
|
|
355
|
+
declarations = [FunctionDeclaration(**fd) for fd in formatted]
|
|
356
|
+
return Tool(function_declarations=declarations)
|
|
357
|
+
|
|
358
|
+
def create_function_response_part(
|
|
359
|
+
self,
|
|
360
|
+
tool_call: UnifiedToolCall,
|
|
361
|
+
result: Any,
|
|
362
|
+
is_error: bool = False,
|
|
363
|
+
) -> Any:
|
|
364
|
+
"""
|
|
365
|
+
Create a Vertex AI Part object for function response.
|
|
366
|
+
|
|
367
|
+
Requires vertexai library to be installed.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
tool_call: The original tool call.
|
|
371
|
+
result: The result to send back.
|
|
372
|
+
is_error: Whether the result represents an error.
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Vertex AI Part object.
|
|
376
|
+
|
|
377
|
+
Raises:
|
|
378
|
+
ImportError: If vertexai is not installed.
|
|
379
|
+
"""
|
|
380
|
+
try:
|
|
381
|
+
from vertexai.generative_models import Part
|
|
382
|
+
except ImportError:
|
|
383
|
+
raise ImportError(
|
|
384
|
+
"vertexai library required. Install with: pip install google-cloud-aiplatform"
|
|
385
|
+
) from None
|
|
386
|
+
|
|
387
|
+
formatted = self.format_tool_result(tool_call, result, is_error)
|
|
388
|
+
return Part.from_function_response(
|
|
389
|
+
name=formatted["function_response"]["name"],
|
|
390
|
+
response=formatted["function_response"]["response"],
|
|
391
|
+
)
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI adapter for Proxilion.
|
|
3
|
+
|
|
4
|
+
Provides translation between OpenAI's tool calling format
|
|
5
|
+
and Proxilion's unified format.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from proxilion.providers.adapter import (
|
|
15
|
+
BaseAdapter,
|
|
16
|
+
Provider,
|
|
17
|
+
UnifiedResponse,
|
|
18
|
+
UnifiedToolCall,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class OpenAIAdapter(BaseAdapter):
|
|
25
|
+
"""
|
|
26
|
+
Adapter for OpenAI API.
|
|
27
|
+
|
|
28
|
+
Handles tool calls from OpenAI's chat completion API,
|
|
29
|
+
including function calling and parallel tool calls.
|
|
30
|
+
|
|
31
|
+
Example:
|
|
32
|
+
>>> from openai import OpenAI
|
|
33
|
+
>>> from proxilion.providers import OpenAIAdapter
|
|
34
|
+
>>>
|
|
35
|
+
>>> adapter = OpenAIAdapter()
|
|
36
|
+
>>> client = OpenAI()
|
|
37
|
+
>>>
|
|
38
|
+
>>> response = client.chat.completions.create(
|
|
39
|
+
... model="gpt-4o",
|
|
40
|
+
... messages=[{"role": "user", "content": "Get weather"}],
|
|
41
|
+
... tools=[...],
|
|
42
|
+
... )
|
|
43
|
+
>>>
|
|
44
|
+
>>> # Extract tool calls
|
|
45
|
+
>>> tool_calls = adapter.extract_tool_calls(response)
|
|
46
|
+
>>> for call in tool_calls:
|
|
47
|
+
... print(f"Tool: {call.name}, Args: {call.arguments}")
|
|
48
|
+
>>>
|
|
49
|
+
>>> # Format result for continuation
|
|
50
|
+
>>> result_msg = adapter.format_tool_result(tool_calls[0], {"temp": 72})
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def provider(self) -> Provider:
|
|
55
|
+
"""Get the provider type."""
|
|
56
|
+
return Provider.OPENAI
|
|
57
|
+
|
|
58
|
+
def extract_tool_calls(self, response: Any) -> list[UnifiedToolCall]:
|
|
59
|
+
"""
|
|
60
|
+
Extract tool calls from OpenAI response.
|
|
61
|
+
|
|
62
|
+
Handles both ChatCompletion objects and dictionaries.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
response: OpenAI ChatCompletion response.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
List of unified tool calls.
|
|
69
|
+
"""
|
|
70
|
+
# Handle dictionary form (e.g., from JSON API response)
|
|
71
|
+
if isinstance(response, dict):
|
|
72
|
+
return self._extract_from_dict(response)
|
|
73
|
+
|
|
74
|
+
# Handle object form (e.g., from openai library)
|
|
75
|
+
if not hasattr(response, "choices") or not response.choices:
|
|
76
|
+
return []
|
|
77
|
+
|
|
78
|
+
choice = response.choices[0]
|
|
79
|
+
message = getattr(choice, "message", None)
|
|
80
|
+
if message is None:
|
|
81
|
+
return []
|
|
82
|
+
|
|
83
|
+
tool_calls = getattr(message, "tool_calls", None)
|
|
84
|
+
if not tool_calls:
|
|
85
|
+
return []
|
|
86
|
+
|
|
87
|
+
return [UnifiedToolCall.from_openai(tc) for tc in tool_calls]
|
|
88
|
+
|
|
89
|
+
def _extract_from_dict(self, response: dict) -> list[UnifiedToolCall]:
|
|
90
|
+
"""Extract tool calls from dictionary response."""
|
|
91
|
+
choices = response.get("choices", [])
|
|
92
|
+
if not choices:
|
|
93
|
+
return []
|
|
94
|
+
|
|
95
|
+
message = choices[0].get("message", {})
|
|
96
|
+
tool_calls = message.get("tool_calls") or []
|
|
97
|
+
|
|
98
|
+
return [UnifiedToolCall.from_openai(tc) for tc in tool_calls]
|
|
99
|
+
|
|
100
|
+
def extract_response(self, response: Any) -> UnifiedResponse:
|
|
101
|
+
"""
|
|
102
|
+
Extract full response from OpenAI response.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
response: OpenAI ChatCompletion response.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
UnifiedResponse instance.
|
|
109
|
+
"""
|
|
110
|
+
tool_calls = self.extract_tool_calls(response)
|
|
111
|
+
|
|
112
|
+
# Handle dictionary form
|
|
113
|
+
if isinstance(response, dict):
|
|
114
|
+
choices = response.get("choices", [])
|
|
115
|
+
if choices:
|
|
116
|
+
message = choices[0].get("message", {})
|
|
117
|
+
content = message.get("content")
|
|
118
|
+
finish_reason = choices[0].get("finish_reason")
|
|
119
|
+
else:
|
|
120
|
+
content = None
|
|
121
|
+
finish_reason = None
|
|
122
|
+
|
|
123
|
+
usage = response.get("usage", {})
|
|
124
|
+
usage_dict = {
|
|
125
|
+
"input_tokens": usage.get("prompt_tokens", 0),
|
|
126
|
+
"output_tokens": usage.get("completion_tokens", 0),
|
|
127
|
+
"total_tokens": usage.get("total_tokens", 0),
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return UnifiedResponse(
|
|
131
|
+
content=content,
|
|
132
|
+
tool_calls=tool_calls,
|
|
133
|
+
finish_reason=finish_reason,
|
|
134
|
+
provider=Provider.OPENAI,
|
|
135
|
+
usage=usage_dict,
|
|
136
|
+
raw=response,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Handle object form
|
|
140
|
+
content = None
|
|
141
|
+
finish_reason = None
|
|
142
|
+
usage_dict = {}
|
|
143
|
+
|
|
144
|
+
if hasattr(response, "choices") and response.choices:
|
|
145
|
+
choice = response.choices[0]
|
|
146
|
+
message = getattr(choice, "message", None)
|
|
147
|
+
if message:
|
|
148
|
+
content = getattr(message, "content", None)
|
|
149
|
+
finish_reason = getattr(choice, "finish_reason", None)
|
|
150
|
+
|
|
151
|
+
if hasattr(response, "usage") and response.usage:
|
|
152
|
+
usage = response.usage
|
|
153
|
+
usage_dict = {
|
|
154
|
+
"input_tokens": getattr(usage, "prompt_tokens", 0),
|
|
155
|
+
"output_tokens": getattr(usage, "completion_tokens", 0),
|
|
156
|
+
"total_tokens": getattr(usage, "total_tokens", 0),
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
return UnifiedResponse(
|
|
160
|
+
content=content,
|
|
161
|
+
tool_calls=tool_calls,
|
|
162
|
+
finish_reason=finish_reason,
|
|
163
|
+
provider=Provider.OPENAI,
|
|
164
|
+
usage=usage_dict,
|
|
165
|
+
raw=response,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
def format_tool_result(
|
|
169
|
+
self,
|
|
170
|
+
tool_call: UnifiedToolCall,
|
|
171
|
+
result: Any,
|
|
172
|
+
is_error: bool = False,
|
|
173
|
+
) -> dict[str, Any]:
|
|
174
|
+
"""
|
|
175
|
+
Format tool result for OpenAI API.
|
|
176
|
+
|
|
177
|
+
Creates a message suitable for adding to the messages list
|
|
178
|
+
when continuing a conversation with tool results.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
tool_call: The original tool call.
|
|
182
|
+
result: The result to send back.
|
|
183
|
+
is_error: Whether the result represents an error.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
Dictionary with role, tool_call_id, and content.
|
|
187
|
+
|
|
188
|
+
Example:
|
|
189
|
+
>>> result_msg = adapter.format_tool_result(
|
|
190
|
+
... tool_call,
|
|
191
|
+
... {"temperature": 72, "conditions": "sunny"}
|
|
192
|
+
... )
|
|
193
|
+
>>> messages.append(result_msg)
|
|
194
|
+
"""
|
|
195
|
+
content = self._serialize_result(result)
|
|
196
|
+
|
|
197
|
+
if is_error:
|
|
198
|
+
content = json.dumps({"error": content})
|
|
199
|
+
|
|
200
|
+
return {
|
|
201
|
+
"role": "tool",
|
|
202
|
+
"tool_call_id": tool_call.id,
|
|
203
|
+
"content": content,
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
def format_tools(
|
|
207
|
+
self,
|
|
208
|
+
tools: list[Any],
|
|
209
|
+
) -> list[dict[str, Any]]:
|
|
210
|
+
"""
|
|
211
|
+
Format tool definitions for OpenAI API.
|
|
212
|
+
|
|
213
|
+
Converts ToolDefinition objects to OpenAI's function format.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
tools: List of ToolDefinition objects.
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
List of tool definitions in OpenAI format.
|
|
220
|
+
|
|
221
|
+
Example:
|
|
222
|
+
>>> openai_tools = adapter.format_tools(registry.list_enabled())
|
|
223
|
+
>>> response = client.chat.completions.create(
|
|
224
|
+
... model="gpt-4o",
|
|
225
|
+
... messages=[...],
|
|
226
|
+
... tools=openai_tools,
|
|
227
|
+
... )
|
|
228
|
+
"""
|
|
229
|
+
formatted = []
|
|
230
|
+
for tool in tools:
|
|
231
|
+
# Check if it's a ToolDefinition
|
|
232
|
+
if hasattr(tool, "to_openai_format"):
|
|
233
|
+
formatted.append(tool.to_openai_format())
|
|
234
|
+
elif hasattr(tool, "name") and hasattr(tool, "description"):
|
|
235
|
+
# Manual conversion
|
|
236
|
+
formatted.append({
|
|
237
|
+
"type": "function",
|
|
238
|
+
"function": {
|
|
239
|
+
"name": tool.name,
|
|
240
|
+
"description": tool.description,
|
|
241
|
+
"parameters": getattr(tool, "parameters", {
|
|
242
|
+
"type": "object",
|
|
243
|
+
"properties": {},
|
|
244
|
+
}),
|
|
245
|
+
},
|
|
246
|
+
})
|
|
247
|
+
elif isinstance(tool, dict):
|
|
248
|
+
# Already in correct format or needs wrapping
|
|
249
|
+
if tool.get("type") == "function":
|
|
250
|
+
formatted.append(tool)
|
|
251
|
+
else:
|
|
252
|
+
formatted.append({
|
|
253
|
+
"type": "function",
|
|
254
|
+
"function": tool,
|
|
255
|
+
})
|
|
256
|
+
|
|
257
|
+
return formatted
|
|
258
|
+
|
|
259
|
+
def format_assistant_message(
|
|
260
|
+
self,
|
|
261
|
+
content: str | None,
|
|
262
|
+
tool_calls: list[UnifiedToolCall],
|
|
263
|
+
) -> dict[str, Any]:
|
|
264
|
+
"""
|
|
265
|
+
Format an assistant message with tool calls.
|
|
266
|
+
|
|
267
|
+
Useful for reconstructing conversation history.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
content: Text content of the message.
|
|
271
|
+
tool_calls: List of tool calls to include.
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Dictionary suitable for OpenAI messages list.
|
|
275
|
+
"""
|
|
276
|
+
message: dict[str, Any] = {"role": "assistant"}
|
|
277
|
+
|
|
278
|
+
if content:
|
|
279
|
+
message["content"] = content
|
|
280
|
+
|
|
281
|
+
if tool_calls:
|
|
282
|
+
message["tool_calls"] = [
|
|
283
|
+
{
|
|
284
|
+
"id": tc.id,
|
|
285
|
+
"type": "function",
|
|
286
|
+
"function": {
|
|
287
|
+
"name": tc.name,
|
|
288
|
+
"arguments": json.dumps(tc.arguments),
|
|
289
|
+
},
|
|
290
|
+
}
|
|
291
|
+
for tc in tool_calls
|
|
292
|
+
]
|
|
293
|
+
|
|
294
|
+
return message
|
proxilion/py.typed
ADDED
|
File without changes
|