synth-ai 0.1.0.dev30__py3-none-any.whl → 0.1.0.dev32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- public_tests/test_gemini_output.py +188 -0
- synth_ai/zyk/lms/caching/handler.py +19 -9
- synth_ai/zyk/lms/core/main.py +27 -13
- synth_ai/zyk/lms/structured_outputs/handler.py +10 -10
- synth_ai/zyk/lms/vendors/core/gemini_api.py +45 -16
- {synth_ai-0.1.0.dev30.dist-info → synth_ai-0.1.0.dev32.dist-info}/METADATA +1 -1
- {synth_ai-0.1.0.dev30.dist-info → synth_ai-0.1.0.dev32.dist-info}/RECORD +10 -9
- {synth_ai-0.1.0.dev30.dist-info → synth_ai-0.1.0.dev32.dist-info}/WHEEL +0 -0
- {synth_ai-0.1.0.dev30.dist-info → synth_ai-0.1.0.dev32.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.1.0.dev30.dist-info → synth_ai-0.1.0.dev32.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,188 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
from typing import List
|
4
|
+
|
5
|
+
import pytest
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
|
8
|
+
from synth_ai.zyk import LM
|
9
|
+
from synth_ai.zyk.lms.tools.base import BaseTool
|
10
|
+
|
11
|
+
# Set up logging
|
12
|
+
logging.basicConfig(
|
13
|
+
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
14
|
+
)
|
15
|
+
logger = logging.getLogger(__name__)
|
16
|
+
|
17
|
+
|
18
|
+
# 1. Define the Tool Input Schema using BaseModel and Field
|
19
|
+
class CraftaxToolArgs(BaseModel):
|
20
|
+
instance_id: str = Field(
|
21
|
+
description="The ID of the Craftax instance to interact with"
|
22
|
+
)
|
23
|
+
actions_list: List[str] = Field(
|
24
|
+
description="A sequence of actions to execute in the environment (e.g., ['up', 'left', 'do'])"
|
25
|
+
)
|
26
|
+
service_url: str = Field(description="The URL of the Craftax environment service")
|
27
|
+
|
28
|
+
|
29
|
+
# 2. Define the Tool class by extending BaseTool
|
30
|
+
class CraftaxTool(BaseTool):
|
31
|
+
name: str = "interact_with_craftax"
|
32
|
+
description: str = "Interacts with the Craftax environment by sending a sequence of actions to the service."
|
33
|
+
arguments = CraftaxToolArgs
|
34
|
+
|
35
|
+
async def execute(self, args: dict):
|
36
|
+
"""Mock execution function for testing"""
|
37
|
+
logger.info(
|
38
|
+
f"Would execute actions: {args['actions_list']} for instance {args['instance_id']}"
|
39
|
+
)
|
40
|
+
return {
|
41
|
+
"observation": f"Executed actions: {args['actions_list']}",
|
42
|
+
"reward": 1.0,
|
43
|
+
"done": False,
|
44
|
+
"info": {"achievements": {"collect_wood": True}},
|
45
|
+
}
|
46
|
+
|
47
|
+
|
48
|
+
# Helper function to create a simple tool dict (without RepeatedComposite)
|
49
|
+
def create_simplified_tool():
|
50
|
+
return {
|
51
|
+
"name": "interact_with_craftax",
|
52
|
+
"description": "Interacts with the Craftax environment by sending a sequence of actions to the service.",
|
53
|
+
"parameters": {
|
54
|
+
"type": "object",
|
55
|
+
"properties": {
|
56
|
+
"instance_id": {
|
57
|
+
"type": "string",
|
58
|
+
"description": "The ID of the Craftax instance to interact with",
|
59
|
+
},
|
60
|
+
"actions_list": {
|
61
|
+
"type": "array",
|
62
|
+
"items": {"type": "string"},
|
63
|
+
"description": "A sequence of actions to execute in the environment",
|
64
|
+
},
|
65
|
+
"service_url": {
|
66
|
+
"type": "string",
|
67
|
+
"description": "The URL of the Craftax environment service",
|
68
|
+
},
|
69
|
+
},
|
70
|
+
"required": ["instance_id", "actions_list", "service_url"],
|
71
|
+
},
|
72
|
+
}
|
73
|
+
|
74
|
+
|
75
|
+
# Define test constants
|
76
|
+
SYSTEM_MESSAGE = """You are an agent playing Craftax. Your goal is to collect resources.
|
77
|
+
You have access to a tool called `interact_with_craftax` to control the agent."""
|
78
|
+
|
79
|
+
USER_MESSAGE = """# Map
|
80
|
+
## Terrain_underneath_you
|
81
|
+
grass
|
82
|
+
## Surroundings
|
83
|
+
- Tree is 1 steps up
|
84
|
+
|
85
|
+
# Inventory
|
86
|
+
## Resources
|
87
|
+
- wood: 0
|
88
|
+
|
89
|
+
Instructions: Collect 1 wood.
|
90
|
+
Instance ID: test-instance-123
|
91
|
+
Service URL: http://localhost:8002
|
92
|
+
|
93
|
+
Make a tool call to execute actions. Do not explain what you're doing."""
|
94
|
+
|
95
|
+
|
96
|
+
@pytest.mark.asyncio
|
97
|
+
async def test_base_tool_to_json():
|
98
|
+
"""Test that a BaseTool can be serialized to JSON in OpenAI and Gemini formats"""
|
99
|
+
tool = CraftaxTool()
|
100
|
+
|
101
|
+
# Test that the tool can be converted to OpenAI format
|
102
|
+
openai_format = tool.to_openai_tool()
|
103
|
+
openai_json = json.dumps(openai_format, indent=2)
|
104
|
+
assert "function" in openai_json
|
105
|
+
assert "interact_with_craftax" in openai_json
|
106
|
+
|
107
|
+
# Test that the tool can be converted to Gemini format
|
108
|
+
gemini_format = tool.to_gemini_tool()
|
109
|
+
gemini_json = json.dumps(gemini_format, indent=2)
|
110
|
+
assert "parameters" in gemini_json
|
111
|
+
assert "interact_with_craftax" in gemini_json
|
112
|
+
|
113
|
+
|
114
|
+
@pytest.mark.asyncio
|
115
|
+
async def test_simplified_gemini_tool():
|
116
|
+
"""Test that a simplified Gemini tool can be serialized to JSON"""
|
117
|
+
simplified_tool = create_simplified_tool()
|
118
|
+
tool_json = json.dumps(simplified_tool, indent=2)
|
119
|
+
assert "parameters" in tool_json
|
120
|
+
assert "interact_with_craftax" in tool_json
|
121
|
+
|
122
|
+
|
123
|
+
@pytest.mark.asyncio
|
124
|
+
async def test_direct_gemini_tool_call():
|
125
|
+
"""Test that calling Gemini with a directly formatted tool works"""
|
126
|
+
lm = LM(
|
127
|
+
model_name="gemini-2-flash",
|
128
|
+
formatting_model_name="gpt-4o-mini",
|
129
|
+
temperature=0,
|
130
|
+
max_retries="Few",
|
131
|
+
synth_logging=True,
|
132
|
+
)
|
133
|
+
|
134
|
+
# Create a direct function-only tool format
|
135
|
+
direct_tool = [create_simplified_tool()]
|
136
|
+
|
137
|
+
# We're expecting this to complete without errors
|
138
|
+
response = await lm.respond_async(
|
139
|
+
system_message=SYSTEM_MESSAGE,
|
140
|
+
user_message=USER_MESSAGE,
|
141
|
+
tools=direct_tool,
|
142
|
+
)
|
143
|
+
|
144
|
+
# Just check we got a response
|
145
|
+
assert response is not None
|
146
|
+
logger.info(f"Response with direct tool format: {response.raw_response}")
|
147
|
+
|
148
|
+
# If there are tool calls, validate basic structure
|
149
|
+
if response.tool_calls:
|
150
|
+
logger.info(f"Tool calls: {response.tool_calls}")
|
151
|
+
# Verify at least one tool call has the right structure
|
152
|
+
assert any("function" in tc for tc in response.tool_calls)
|
153
|
+
|
154
|
+
|
155
|
+
@pytest.mark.asyncio
|
156
|
+
async def test_base_tool_gemini_call():
|
157
|
+
"""Test that calling Gemini with a BaseTool works"""
|
158
|
+
lm = LM(
|
159
|
+
model_name="gemini-2-flash",
|
160
|
+
formatting_model_name="gpt-4o-mini",
|
161
|
+
temperature=0,
|
162
|
+
max_retries="Few",
|
163
|
+
synth_logging=True,
|
164
|
+
)
|
165
|
+
|
166
|
+
# Use our properly defined BaseTool
|
167
|
+
tool = CraftaxTool()
|
168
|
+
|
169
|
+
# We're expecting this to complete without errors
|
170
|
+
response = await lm.respond_async(
|
171
|
+
system_message=SYSTEM_MESSAGE,
|
172
|
+
user_message=USER_MESSAGE,
|
173
|
+
tools=[tool],
|
174
|
+
)
|
175
|
+
|
176
|
+
# Just check we got a response
|
177
|
+
assert response is not None
|
178
|
+
logger.info(f"Response with BaseTool: {response.raw_response}")
|
179
|
+
|
180
|
+
# If there are tool calls, validate basic structure
|
181
|
+
if response.tool_calls:
|
182
|
+
logger.info(f"Tool calls: {response.tool_calls}")
|
183
|
+
# Verify at least one tool call has the right structure
|
184
|
+
assert any("function" in tc for tc in response.tool_calls)
|
185
|
+
|
186
|
+
|
187
|
+
if __name__ == "__main__":
|
188
|
+
pytest.main(["-xvs", __file__])
|
@@ -17,7 +17,7 @@ def map_params_to_key(
|
|
17
17
|
model: str,
|
18
18
|
temperature: float,
|
19
19
|
response_model: Optional[Type[BaseModel]],
|
20
|
-
tools: Optional[List
|
20
|
+
tools: Optional[List] = None,
|
21
21
|
) -> str:
|
22
22
|
if not all([isinstance(msg["content"], str) for msg in messages]):
|
23
23
|
normalized_messages = "".join([str(msg["content"]) for msg in messages])
|
@@ -31,12 +31,22 @@ def map_params_to_key(
|
|
31
31
|
normalized_tools = ""
|
32
32
|
if tools:
|
33
33
|
tool_schemas = []
|
34
|
-
for tool in
|
35
|
-
tool_schema = {
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
34
|
+
for tool in tools:
|
35
|
+
tool_schema = {}
|
36
|
+
try:
|
37
|
+
tool_schema = {
|
38
|
+
"name": tool.name,
|
39
|
+
"description": tool.description,
|
40
|
+
"arguments": tool.arguments.model_json_schema(),
|
41
|
+
}
|
42
|
+
except AttributeError:
|
43
|
+
if isinstance(tool, dict) and "name" in tool:
|
44
|
+
tool_schema = {
|
45
|
+
"name": tool.get("name", ""),
|
46
|
+
"description": tool.get("description", ""),
|
47
|
+
"parameters": tool.get("parameters", {}),
|
48
|
+
}
|
49
|
+
|
40
50
|
tool_schemas.append(str(tool_schema))
|
41
51
|
normalized_tools = "".join(tool_schemas)
|
42
52
|
|
@@ -72,7 +82,7 @@ class CacheHandler:
|
|
72
82
|
model: str,
|
73
83
|
messages: List[Dict[str, Any]],
|
74
84
|
lm_config: Dict[str, Any],
|
75
|
-
tools: Optional[List
|
85
|
+
tools: Optional[List] = None,
|
76
86
|
) -> Optional[BaseLMResponse]:
|
77
87
|
"""Hit the cache with the given key."""
|
78
88
|
self._validate_messages(messages)
|
@@ -101,7 +111,7 @@ class CacheHandler:
|
|
101
111
|
messages: List[Dict[str, Any]],
|
102
112
|
lm_config: Dict[str, Any],
|
103
113
|
output: BaseLMResponse,
|
104
|
-
tools: Optional[List
|
114
|
+
tools: Optional[List] = None,
|
105
115
|
) -> None:
|
106
116
|
"""Add the given output to the cache."""
|
107
117
|
self._validate_messages(messages)
|
synth_ai/zyk/lms/core/main.py
CHANGED
@@ -9,8 +9,7 @@ from synth_ai.zyk.lms.core.vendor_clients import (
|
|
9
9
|
openai_naming_regexes,
|
10
10
|
)
|
11
11
|
from synth_ai.zyk.lms.structured_outputs.handler import StructuredOutputHandler
|
12
|
-
from synth_ai.zyk.lms.vendors.base import
|
13
|
-
from synth_ai.zyk.lms.tools.base import BaseTool
|
12
|
+
from synth_ai.zyk.lms.vendors.base import BaseLMResponse, VendorBase
|
14
13
|
|
15
14
|
REASONING_MODELS = ["deepseek-reasoner", "o1-mini", "o1-preview", "o1", "o3"]
|
16
15
|
|
@@ -121,7 +120,7 @@ class LM:
|
|
121
120
|
images_as_bytes: List[Any] = [],
|
122
121
|
response_model: Optional[BaseModel] = None,
|
123
122
|
use_ephemeral_cache_only: bool = False,
|
124
|
-
tools: Optional[List
|
123
|
+
tools: Optional[List] = None,
|
125
124
|
) -> BaseLMResponse:
|
126
125
|
assert (system_message is None) == (
|
127
126
|
user_message is None
|
@@ -129,7 +128,9 @@ class LM:
|
|
129
128
|
assert (
|
130
129
|
(messages is None) != (system_message is None)
|
131
130
|
), "Must provide either messages or system_message/user_message pair, but not both"
|
132
|
-
assert not (
|
131
|
+
assert not (
|
132
|
+
response_model and tools
|
133
|
+
), "Cannot provide both response_model and tools"
|
133
134
|
if messages is None:
|
134
135
|
messages = build_messages(
|
135
136
|
system_message, user_message, images_as_bytes, self.model_name
|
@@ -162,8 +163,13 @@ class LM:
|
|
162
163
|
tools=tools,
|
163
164
|
)
|
164
165
|
assert isinstance(result.raw_response, str), "Raw response must be a string"
|
165
|
-
assert (
|
166
|
-
|
166
|
+
assert (
|
167
|
+
isinstance(result.structured_output, BaseModel)
|
168
|
+
or result.structured_output is None
|
169
|
+
), "Structured output must be a Pydantic model or None"
|
170
|
+
assert (
|
171
|
+
isinstance(result.tool_calls, list) or result.tool_calls is None
|
172
|
+
), "Tool calls must be a list or None"
|
167
173
|
return result
|
168
174
|
|
169
175
|
async def respond_async(
|
@@ -174,7 +180,7 @@ class LM:
|
|
174
180
|
images_as_bytes: List[Any] = [],
|
175
181
|
response_model: Optional[BaseModel] = None,
|
176
182
|
use_ephemeral_cache_only: bool = False,
|
177
|
-
tools: Optional[List
|
183
|
+
tools: Optional[List] = None,
|
178
184
|
) -> BaseLMResponse:
|
179
185
|
# "In respond_async")
|
180
186
|
assert (system_message is None) == (
|
@@ -184,7 +190,9 @@ class LM:
|
|
184
190
|
(messages is None) != (system_message is None)
|
185
191
|
), "Must provide either messages or system_message/user_message pair, but not both"
|
186
192
|
|
187
|
-
assert not (
|
193
|
+
assert not (
|
194
|
+
response_model and tools
|
195
|
+
), "Cannot provide both response_model and tools"
|
188
196
|
if messages is None:
|
189
197
|
messages = build_messages(
|
190
198
|
system_message, user_message, images_as_bytes, self.model_name
|
@@ -192,7 +200,7 @@ class LM:
|
|
192
200
|
result = None
|
193
201
|
if response_model:
|
194
202
|
try:
|
195
|
-
#print("Trying structured output handler")
|
203
|
+
# print("Trying structured output handler")
|
196
204
|
result = await self.structured_output_handler.call_async(
|
197
205
|
messages,
|
198
206
|
model=self.model_name,
|
@@ -201,7 +209,7 @@ class LM:
|
|
201
209
|
use_ephemeral_cache_only=use_ephemeral_cache_only,
|
202
210
|
)
|
203
211
|
except StructuredOutputCoercionFailureException:
|
204
|
-
#print("Falling back to backup handler")
|
212
|
+
# print("Falling back to backup handler")
|
205
213
|
result = await self.backup_structured_output_handler.call_async(
|
206
214
|
messages,
|
207
215
|
model=self.model_name,
|
@@ -210,7 +218,7 @@ class LM:
|
|
210
218
|
use_ephemeral_cache_only=use_ephemeral_cache_only,
|
211
219
|
)
|
212
220
|
else:
|
213
|
-
#print("Calling API no response model")
|
221
|
+
# print("Calling API no response model")
|
214
222
|
result = await self.client._hit_api_async(
|
215
223
|
messages=messages,
|
216
224
|
model=self.model_name,
|
@@ -219,10 +227,16 @@ class LM:
|
|
219
227
|
tools=tools,
|
220
228
|
)
|
221
229
|
assert isinstance(result.raw_response, str), "Raw response must be a string"
|
222
|
-
assert (
|
223
|
-
|
230
|
+
assert (
|
231
|
+
isinstance(result.structured_output, BaseModel)
|
232
|
+
or result.structured_output is None
|
233
|
+
), "Structured output must be a Pydantic model or None"
|
234
|
+
assert (
|
235
|
+
isinstance(result.tool_calls, list) or result.tool_calls is None
|
236
|
+
), "Tool calls must be a list or None"
|
224
237
|
return result
|
225
238
|
|
239
|
+
|
226
240
|
if __name__ == "__main__":
|
227
241
|
import asyncio
|
228
242
|
|
@@ -174,24 +174,24 @@ class StringifiedJSONHandler(StructuredHandlerBase):
|
|
174
174
|
type(raw_text_response_or_cached_hit) in [str, BaseLMResponse]
|
175
175
|
), f"Expected str or BaseLMResponse, got {type(raw_text_response_or_cached_hit)}"
|
176
176
|
if type(raw_text_response_or_cached_hit) == BaseLMResponse:
|
177
|
-
print("Got cached hit, returning directly")
|
177
|
+
#print("Got cached hit, returning directly")
|
178
178
|
raw_text_response = raw_text_response_or_cached_hit.raw_response
|
179
179
|
else:
|
180
180
|
raw_text_response = raw_text_response_or_cached_hit
|
181
181
|
logger.debug(f"Raw response from model:\n{raw_text_response}")
|
182
182
|
|
183
|
-
print("Trying to parse structured output")
|
183
|
+
#print("Trying to parse structured output")
|
184
184
|
try:
|
185
185
|
structured_output = pull_out_structured_output(
|
186
186
|
raw_text_response, response_model
|
187
187
|
)
|
188
188
|
|
189
|
-
print("Successfully parsed structured output on first attempt")
|
189
|
+
#print("Successfully parsed structured output on first attempt")
|
190
190
|
break
|
191
191
|
except Exception as e:
|
192
192
|
logger.warning(f"Failed to parse structured output: {str(e)}")
|
193
193
|
try:
|
194
|
-
print("Attempting to fix with forced JSON parser")
|
194
|
+
#print("Attempting to fix with forced JSON parser")
|
195
195
|
structured_output = await fix_errant_forced_async(
|
196
196
|
messages_with_json_formatting_instructions,
|
197
197
|
raw_text_response,
|
@@ -200,7 +200,7 @@ class StringifiedJSONHandler(StructuredHandlerBase):
|
|
200
200
|
)
|
201
201
|
assert isinstance(structured_output, BaseModel), "Structured output must be a Pydantic model"
|
202
202
|
assert not isinstance(structured_output, BaseLMResponse), "Got BaseLMResponse instead of Pydantic model"
|
203
|
-
print("Successfully fixed and parsed structured output")
|
203
|
+
#print("Successfully fixed and parsed structured output")
|
204
204
|
break
|
205
205
|
except Exception as e:
|
206
206
|
logger.error(f"Failed to fix structured output: {str(e)}")
|
@@ -215,8 +215,8 @@ class StringifiedJSONHandler(StructuredHandlerBase):
|
|
215
215
|
raise StructuredOutputCoercionFailureException(
|
216
216
|
"Failed to get structured output"
|
217
217
|
)
|
218
|
-
print("Successfully parsed structured output")
|
219
|
-
print(structured_output)
|
218
|
+
#print("Successfully parsed structured output")
|
219
|
+
#print(structured_output)
|
220
220
|
assert isinstance(structured_output, BaseModel), "Structured output must be a Pydantic model"
|
221
221
|
assert not isinstance(structured_output, BaseLMResponse),"Got BaseLMResponse instead of Pydantic model"
|
222
222
|
return BaseLMResponse(
|
@@ -277,16 +277,16 @@ class StringifiedJSONHandler(StructuredHandlerBase):
|
|
277
277
|
structured_output = pull_out_structured_output(
|
278
278
|
raw_text_response, response_model
|
279
279
|
)
|
280
|
-
print("Successfully parsed structured output on first attempt")
|
280
|
+
#print("Successfully parsed structured output on first attempt")
|
281
281
|
break
|
282
282
|
except Exception as e:
|
283
283
|
logger.warning(f"Failed to parse structured output: {str(e)}")
|
284
284
|
try:
|
285
|
-
print("Attempting to fix with forced JSON parser")
|
285
|
+
#print("Attempting to fix with forced JSON parser")
|
286
286
|
structured_output = fix_errant_forced_sync(
|
287
287
|
raw_text_response, response_model, "gpt-4o-mini"
|
288
288
|
)
|
289
|
-
print("Successfully fixed and parsed structured output")
|
289
|
+
#print("Successfully fixed and parsed structured output")
|
290
290
|
break
|
291
291
|
except Exception as e:
|
292
292
|
logger.error(f"Failed to fix structured output: {str(e)}")
|
@@ -66,12 +66,39 @@ class GeminiAPI(VendorBase):
|
|
66
66
|
)
|
67
67
|
return contents
|
68
68
|
|
69
|
-
def _convert_tools_to_gemini_format(self, tools: List[
|
69
|
+
def _convert_tools_to_gemini_format(self, tools: List[Any]) -> Tool:
|
70
70
|
function_declarations = []
|
71
71
|
for tool in tools:
|
72
|
-
|
72
|
+
# Try to use to_gemini_tool method if available, otherwise assume it's a dict
|
73
|
+
try:
|
74
|
+
function_declarations.append(tool.to_gemini_tool())
|
75
|
+
except AttributeError:
|
76
|
+
# If tool is a properly formatted dict, use it directly
|
77
|
+
if "name" in tool and "parameters" in tool:
|
78
|
+
function_declarations.append(tool)
|
79
|
+
else:
|
80
|
+
raise ValueError(
|
81
|
+
f"Unsupported tool format. Tools must be BaseTool instances or properly formatted dictionaries."
|
82
|
+
)
|
73
83
|
return Tool(function_declarations=function_declarations)
|
74
84
|
|
85
|
+
def _convert_args_to_dict(self, args):
|
86
|
+
"""
|
87
|
+
Recursively convert Gemini's args objects to Python dictionaries.
|
88
|
+
"""
|
89
|
+
# Try to convert dict-like objects
|
90
|
+
try:
|
91
|
+
return {k: self._convert_args_to_dict(v) for k, v in args.items()}
|
92
|
+
except (AttributeError, TypeError):
|
93
|
+
# Try to convert list-like objects
|
94
|
+
try:
|
95
|
+
if isinstance(args, (str, bytes)):
|
96
|
+
return args
|
97
|
+
return [self._convert_args_to_dict(item) for item in args]
|
98
|
+
except (TypeError, AttributeError):
|
99
|
+
# Base case: primitive value
|
100
|
+
return args
|
101
|
+
|
75
102
|
async def _private_request_async(
|
76
103
|
self,
|
77
104
|
messages: List[Dict],
|
@@ -90,11 +117,11 @@ class GeminiAPI(VendorBase):
|
|
90
117
|
tools_config = self._convert_tools_to_gemini_format(tools)
|
91
118
|
|
92
119
|
# Extract tool_config from lm_config if provided
|
93
|
-
tool_config =
|
94
|
-
"
|
95
|
-
|
96
|
-
}
|
97
|
-
|
120
|
+
tool_config = (
|
121
|
+
lm_config.get("tool_config")
|
122
|
+
if lm_config
|
123
|
+
else {"function_calling_config": {"mode": "any"}}
|
124
|
+
)
|
98
125
|
|
99
126
|
code_generation_model = genai.GenerativeModel(
|
100
127
|
model_name=model_name,
|
@@ -113,8 +140,9 @@ class GeminiAPI(VendorBase):
|
|
113
140
|
tool_calls = []
|
114
141
|
for part in result.candidates[0].content.parts:
|
115
142
|
if part.function_call:
|
116
|
-
# Convert
|
117
|
-
args_dict =
|
143
|
+
# Convert complex objects to Python dictionaries recursively
|
144
|
+
args_dict = self._convert_args_to_dict(part.function_call.args)
|
145
|
+
# Ensure serializable arguments
|
118
146
|
tool_calls.append(
|
119
147
|
{
|
120
148
|
"id": f"call_{len(tool_calls) + 1}", # Generate unique IDs
|
@@ -145,11 +173,11 @@ class GeminiAPI(VendorBase):
|
|
145
173
|
tools_config = self._convert_tools_to_gemini_format(tools)
|
146
174
|
|
147
175
|
# Extract tool_config from lm_config if provided
|
148
|
-
tool_config =
|
149
|
-
"
|
150
|
-
|
151
|
-
}
|
152
|
-
|
176
|
+
tool_config = (
|
177
|
+
lm_config.get("tool_config")
|
178
|
+
if lm_config
|
179
|
+
else {"function_calling_config": {"mode": "any"}}
|
180
|
+
)
|
153
181
|
|
154
182
|
code_generation_model = genai.GenerativeModel(
|
155
183
|
model_name=model_name,
|
@@ -168,8 +196,9 @@ class GeminiAPI(VendorBase):
|
|
168
196
|
tool_calls = []
|
169
197
|
for part in result.candidates[0].content.parts:
|
170
198
|
if part.function_call:
|
171
|
-
# Convert
|
172
|
-
args_dict =
|
199
|
+
# Convert complex objects to Python dictionaries recursively
|
200
|
+
args_dict = self._convert_args_to_dict(part.function_call.args)
|
201
|
+
# Ensure serializable arguments
|
173
202
|
tool_calls.append(
|
174
203
|
{
|
175
204
|
"id": f"call_{len(tool_calls) + 1}", # Generate unique IDs
|
@@ -4,6 +4,7 @@ public_tests/test_all_structured_outputs.py,sha256=bIcchimaVkq8q8D-GKO25d1_SauTF
|
|
4
4
|
public_tests/test_anthropic_structured_outputs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
5
|
public_tests/test_deepseek_structured_outputs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
6
|
public_tests/test_deepseek_tools.py,sha256=MxEaiT_zinuMPeylqfNMsD11zRaMEwY0Fi28bg5op0A,1895
|
7
|
+
public_tests/test_gemini_output.py,sha256=704NCnxNepYjUxJj3eEms6zHRCps2PSaR8A-lcsQxb4,6062
|
7
8
|
public_tests/test_gemini_structured_outputs.py,sha256=yKa3CDVJxE_Vb2BbVROje83Pb35MBusF0Nb-ttWbqS8,4001
|
8
9
|
public_tests/test_models.py,sha256=QGevBfBuQzwyKw1ez34igDyJpMTBVOc3meW6yqFE-bM,5853
|
9
10
|
public_tests/test_openai_structured_outputs.py,sha256=oIhdZ2QVLmn0LaqBpCP3Qhbn2KHJv633DGn6u9Ousak,3999
|
@@ -22,19 +23,19 @@ synth_ai/zyk/lms/caching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
22
23
|
synth_ai/zyk/lms/caching/constants.py,sha256=fPi3x9p-yRdvixMSIyclvmwmwCRliXLXQjEm6dRnG8s,52
|
23
24
|
synth_ai/zyk/lms/caching/dbs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
25
|
synth_ai/zyk/lms/caching/ephemeral.py,sha256=pNMG5Rzzp2m0Ln1UYmWxz1qbXwq3iNIrhjYAS0yO3ZE,2370
|
25
|
-
synth_ai/zyk/lms/caching/handler.py,sha256=
|
26
|
+
synth_ai/zyk/lms/caching/handler.py,sha256=a-4FBxXLWeHCXiGDWP8QU-LPxMAvGbJ_5lUrXnZytn0,4478
|
26
27
|
synth_ai/zyk/lms/caching/initialize.py,sha256=zZls6RKAax6Z-8oJInGaSg_RPN_fEZ6e_RCX64lMLJw,416
|
27
28
|
synth_ai/zyk/lms/caching/persistent.py,sha256=ZaY1A9qhvfNKzcAI9FnwbIrgMKvVeIfb_yCyl3M8dxE,2860
|
28
29
|
synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
30
|
synth_ai/zyk/lms/core/all.py,sha256=wakK0HhvYRuaQZmxClURyNf3vUkTbm3OABw3TgpMjOQ,1185
|
30
31
|
synth_ai/zyk/lms/core/exceptions.py,sha256=K0BVdAzxVIchsvYZAaHEH1GAWBZvpxhFi-SPcJOjyPQ,205
|
31
|
-
synth_ai/zyk/lms/core/main.py,sha256=
|
32
|
+
synth_ai/zyk/lms/core/main.py,sha256=kKxk-1TZQMNXDrLv7qA42fNOsXes-G9kLtNg-LtrpYY,10370
|
32
33
|
synth_ai/zyk/lms/core/vendor_clients.py,sha256=go6VGF3-JkZyUD81LwRkcBaxdWSVaV9vRxVTNqKSxvM,2781
|
33
34
|
synth_ai/zyk/lms/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
35
|
synth_ai/zyk/lms/cost/monitor.py,sha256=cSKIvw6WdPZIRubADWxQoh1MdB40T8-jjgfNUeUHIn0,5
|
35
36
|
synth_ai/zyk/lms/cost/statefulness.py,sha256=TOsuXL8IjtKOYJ2aJQF8TwJVqn_wQ7AIwJJmdhMye7U,36
|
36
37
|
synth_ai/zyk/lms/structured_outputs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
37
|
-
synth_ai/zyk/lms/structured_outputs/handler.py,sha256=
|
38
|
+
synth_ai/zyk/lms/structured_outputs/handler.py,sha256=BQ0T4HBFXC9qesF8v0lG8MuiOecWm2YEF75nUt1mB_s,16925
|
38
39
|
synth_ai/zyk/lms/structured_outputs/inject.py,sha256=Fy-zDeleRxOZ8ZRM6IuZ6CP2XZnMe4K2PEn4Q9c_KPY,11777
|
39
40
|
synth_ai/zyk/lms/structured_outputs/rehabilitate.py,sha256=GuIhzsb7rTvwgn7f9I9omNnXBz5Me_qrtNYcTWzw5_U,7909
|
40
41
|
synth_ai/zyk/lms/tools/base.py,sha256=j7wYb1xAvaAm3qVrINphgUhGS-UjZmRpbouseQYgh7A,3228
|
@@ -45,7 +46,7 @@ synth_ai/zyk/lms/vendors/openai_standard.py,sha256=Th_0QjmrJ7gemxsKnWmij46lIz4QW
|
|
45
46
|
synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
|
46
47
|
synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
48
|
synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=QM4xuaigdVOjBuzkPyT-RSOtvT2wiKxAiHRfI77GYn8,13461
|
48
|
-
synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=
|
49
|
+
synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=I1goLy5R8eBLrun2jpnD4o87NlmzWgPrfYaeu9RZN8M,11008
|
49
50
|
synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=-EMPBEIoYxxDMxukmcmKL8AGAHPNYe4w-76gsPtmrhk,11860
|
50
51
|
synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=QkQqba851EEGf9n5H31-pJ6WexhTZkdPWQap0oGy2Ho,6713
|
51
52
|
synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -55,11 +56,11 @@ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR3
|
|
55
56
|
synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
|
56
57
|
synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
|
57
58
|
synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
|
58
|
-
synth_ai-0.1.0.
|
59
|
+
synth_ai-0.1.0.dev32.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
|
59
60
|
tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
|
60
61
|
tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
|
61
62
|
tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
|
62
|
-
synth_ai-0.1.0.
|
63
|
-
synth_ai-0.1.0.
|
64
|
-
synth_ai-0.1.0.
|
65
|
-
synth_ai-0.1.0.
|
63
|
+
synth_ai-0.1.0.dev32.dist-info/METADATA,sha256=zgAF8JSt2QckCC9M4iLt2vVn2i1c7N6MEUj4YEKTV9k,2702
|
64
|
+
synth_ai-0.1.0.dev32.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
65
|
+
synth_ai-0.1.0.dev32.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
|
66
|
+
synth_ai-0.1.0.dev32.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|