synth-ai 0.1.0.dev29__py3-none-any.whl → 0.1.0.dev31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,188 @@
1
+ import json
2
+ import logging
3
+ from typing import List
4
+
5
+ import pytest
6
+ from pydantic import BaseModel, Field
7
+
8
+ from synth_ai.zyk import LM
9
+ from synth_ai.zyk.lms.tools.base import BaseTool
10
+
11
+ # Set up logging
12
+ logging.basicConfig(
13
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
14
+ )
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # 1. Define the Tool Input Schema using BaseModel and Field
19
+ class CraftaxToolArgs(BaseModel):
20
+ instance_id: str = Field(
21
+ description="The ID of the Craftax instance to interact with"
22
+ )
23
+ actions_list: List[str] = Field(
24
+ description="A sequence of actions to execute in the environment (e.g., ['up', 'left', 'do'])"
25
+ )
26
+ service_url: str = Field(description="The URL of the Craftax environment service")
27
+
28
+
29
+ # 2. Define the Tool class by extending BaseTool
30
+ class CraftaxTool(BaseTool):
31
+ name: str = "interact_with_craftax"
32
+ description: str = "Interacts with the Craftax environment by sending a sequence of actions to the service."
33
+ arguments = CraftaxToolArgs
34
+
35
+ async def execute(self, args: dict):
36
+ """Mock execution function for testing"""
37
+ logger.info(
38
+ f"Would execute actions: {args['actions_list']} for instance {args['instance_id']}"
39
+ )
40
+ return {
41
+ "observation": f"Executed actions: {args['actions_list']}",
42
+ "reward": 1.0,
43
+ "done": False,
44
+ "info": {"achievements": {"collect_wood": True}},
45
+ }
46
+
47
+
48
+ # Helper function to create a simple tool dict (without RepeatedComposite)
49
+ def create_simplified_tool():
50
+ return {
51
+ "name": "interact_with_craftax",
52
+ "description": "Interacts with the Craftax environment by sending a sequence of actions to the service.",
53
+ "parameters": {
54
+ "type": "object",
55
+ "properties": {
56
+ "instance_id": {
57
+ "type": "string",
58
+ "description": "The ID of the Craftax instance to interact with",
59
+ },
60
+ "actions_list": {
61
+ "type": "array",
62
+ "items": {"type": "string"},
63
+ "description": "A sequence of actions to execute in the environment",
64
+ },
65
+ "service_url": {
66
+ "type": "string",
67
+ "description": "The URL of the Craftax environment service",
68
+ },
69
+ },
70
+ "required": ["instance_id", "actions_list", "service_url"],
71
+ },
72
+ }
73
+
74
+
75
+ # Define test constants
76
+ SYSTEM_MESSAGE = """You are an agent playing Craftax. Your goal is to collect resources.
77
+ You have access to a tool called `interact_with_craftax` to control the agent."""
78
+
79
+ USER_MESSAGE = """# Map
80
+ ## Terrain_underneath_you
81
+ grass
82
+ ## Surroundings
83
+ - Tree is 1 steps up
84
+
85
+ # Inventory
86
+ ## Resources
87
+ - wood: 0
88
+
89
+ Instructions: Collect 1 wood.
90
+ Instance ID: test-instance-123
91
+ Service URL: http://localhost:8002
92
+
93
+ Make a tool call to execute actions. Do not explain what you're doing."""
94
+
95
+
96
+ @pytest.mark.asyncio
97
+ async def test_base_tool_to_json():
98
+ """Test that a BaseTool can be serialized to JSON in OpenAI and Gemini formats"""
99
+ tool = CraftaxTool()
100
+
101
+ # Test that the tool can be converted to OpenAI format
102
+ openai_format = tool.to_openai_tool()
103
+ openai_json = json.dumps(openai_format, indent=2)
104
+ assert "function" in openai_json
105
+ assert "interact_with_craftax" in openai_json
106
+
107
+ # Test that the tool can be converted to Gemini format
108
+ gemini_format = tool.to_gemini_tool()
109
+ gemini_json = json.dumps(gemini_format, indent=2)
110
+ assert "parameters" in gemini_json
111
+ assert "interact_with_craftax" in gemini_json
112
+
113
+
114
+ @pytest.mark.asyncio
115
+ async def test_simplified_gemini_tool():
116
+ """Test that a simplified Gemini tool can be serialized to JSON"""
117
+ simplified_tool = create_simplified_tool()
118
+ tool_json = json.dumps(simplified_tool, indent=2)
119
+ assert "parameters" in tool_json
120
+ assert "interact_with_craftax" in tool_json
121
+
122
+
123
+ @pytest.mark.asyncio
124
+ async def test_direct_gemini_tool_call():
125
+ """Test that calling Gemini with a directly formatted tool works"""
126
+ lm = LM(
127
+ model_name="gemini-2-flash",
128
+ formatting_model_name="gpt-4o-mini",
129
+ temperature=0,
130
+ max_retries="Few",
131
+ synth_logging=True,
132
+ )
133
+
134
+ # Create a direct function-only tool format
135
+ direct_tool = [create_simplified_tool()]
136
+
137
+ # We're expecting this to complete without errors
138
+ response = await lm.respond_async(
139
+ system_message=SYSTEM_MESSAGE,
140
+ user_message=USER_MESSAGE,
141
+ tools=direct_tool,
142
+ )
143
+
144
+ # Just check we got a response
145
+ assert response is not None
146
+ logger.info(f"Response with direct tool format: {response.raw_response}")
147
+
148
+ # If there are tool calls, validate basic structure
149
+ if response.tool_calls:
150
+ logger.info(f"Tool calls: {response.tool_calls}")
151
+ # Verify at least one tool call has the right structure
152
+ assert any("function" in tc for tc in response.tool_calls)
153
+
154
+
155
+ @pytest.mark.asyncio
156
+ async def test_base_tool_gemini_call():
157
+ """Test that calling Gemini with a BaseTool works"""
158
+ lm = LM(
159
+ model_name="gemini-2-flash",
160
+ formatting_model_name="gpt-4o-mini",
161
+ temperature=0,
162
+ max_retries="Few",
163
+ synth_logging=True,
164
+ )
165
+
166
+ # Use our properly defined BaseTool
167
+ tool = CraftaxTool()
168
+
169
+ # We're expecting this to complete without errors
170
+ response = await lm.respond_async(
171
+ system_message=SYSTEM_MESSAGE,
172
+ user_message=USER_MESSAGE,
173
+ tools=[tool],
174
+ )
175
+
176
+ # Just check we got a response
177
+ assert response is not None
178
+ logger.info(f"Response with BaseTool: {response.raw_response}")
179
+
180
+ # If there are tool calls, validate basic structure
181
+ if response.tool_calls:
182
+ logger.info(f"Tool calls: {response.tool_calls}")
183
+ # Verify at least one tool call has the right structure
184
+ assert any("function" in tc for tc in response.tool_calls)
185
+
186
+
187
+ if __name__ == "__main__":
188
+ pytest.main(["-xvs", __file__])
@@ -17,7 +17,7 @@ def map_params_to_key(
17
17
  model: str,
18
18
  temperature: float,
19
19
  response_model: Optional[Type[BaseModel]],
20
- tools: Optional[List[BaseTool]] = None,
20
+ tools: Optional[List] = None,
21
21
  ) -> str:
22
22
  if not all([isinstance(msg["content"], str) for msg in messages]):
23
23
  normalized_messages = "".join([str(msg["content"]) for msg in messages])
@@ -31,12 +31,22 @@ def map_params_to_key(
31
31
  normalized_tools = ""
32
32
  if tools:
33
33
  tool_schemas = []
34
- for tool in sorted(tools, key=lambda x: x.name): # Sort by name for consistency
35
- tool_schema = {
36
- "name": tool.name,
37
- "description": tool.description,
38
- "arguments": tool.arguments.schema(),
39
- }
34
+ for tool in tools:
35
+ tool_schema = {}
36
+ try:
37
+ tool_schema = {
38
+ "name": tool.name,
39
+ "description": tool.description,
40
+ "arguments": tool.arguments.model_json_schema(),
41
+ }
42
+ except AttributeError:
43
+ if isinstance(tool, dict) and "name" in tool:
44
+ tool_schema = {
45
+ "name": tool.get("name", ""),
46
+ "description": tool.get("description", ""),
47
+ "parameters": tool.get("parameters", {}),
48
+ }
49
+
40
50
  tool_schemas.append(str(tool_schema))
41
51
  normalized_tools = "".join(tool_schemas)
42
52
 
@@ -72,7 +82,7 @@ class CacheHandler:
72
82
  model: str,
73
83
  messages: List[Dict[str, Any]],
74
84
  lm_config: Dict[str, Any],
75
- tools: Optional[List[BaseTool]] = None,
85
+ tools: Optional[List] = None,
76
86
  ) -> Optional[BaseLMResponse]:
77
87
  """Hit the cache with the given key."""
78
88
  self._validate_messages(messages)
@@ -101,7 +111,7 @@ class CacheHandler:
101
111
  messages: List[Dict[str, Any]],
102
112
  lm_config: Dict[str, Any],
103
113
  output: BaseLMResponse,
104
- tools: Optional[List[BaseTool]] = None,
114
+ tools: Optional[List] = None,
105
115
  ) -> None:
106
116
  """Add the given output to the cache."""
107
117
  self._validate_messages(messages)
@@ -9,8 +9,8 @@ from synth_ai.zyk.lms.core.vendor_clients import (
9
9
  openai_naming_regexes,
10
10
  )
11
11
  from synth_ai.zyk.lms.structured_outputs.handler import StructuredOutputHandler
12
- from synth_ai.zyk.lms.vendors.base import VendorBase
13
- from synth_ai.zyk.lms.tools.base import BaseTool
12
+ from synth_ai.zyk.lms.vendors.base import BaseLMResponse, VendorBase
13
+
14
14
  REASONING_MODELS = ["deepseek-reasoner", "o1-mini", "o1-preview", "o1", "o3"]
15
15
 
16
16
 
@@ -120,15 +120,17 @@ class LM:
120
120
  images_as_bytes: List[Any] = [],
121
121
  response_model: Optional[BaseModel] = None,
122
122
  use_ephemeral_cache_only: bool = False,
123
- tools: Optional[List[BaseTool]] = None,
124
- ):
123
+ tools: Optional[List] = None,
124
+ ) -> BaseLMResponse:
125
125
  assert (system_message is None) == (
126
126
  user_message is None
127
127
  ), "Must provide both system_message and user_message or neither"
128
128
  assert (
129
129
  (messages is None) != (system_message is None)
130
130
  ), "Must provide either messages or system_message/user_message pair, but not both"
131
- assert not (response_model and tools), "Cannot provide both response_model and tools"
131
+ assert not (
132
+ response_model and tools
133
+ ), "Cannot provide both response_model and tools"
132
134
  if messages is None:
133
135
  messages = build_messages(
134
136
  system_message, user_message, images_as_bytes, self.model_name
@@ -161,8 +163,13 @@ class LM:
161
163
  tools=tools,
162
164
  )
163
165
  assert isinstance(result.raw_response, str), "Raw response must be a string"
164
- assert (isinstance(result.structured_output, BaseModel) or result.structured_output is None), "Structured output must be a Pydantic model or None"
165
- assert (isinstance(result.tool_calls, list) or result.tool_calls is None), "Tool calls must be a list or None"
166
+ assert (
167
+ isinstance(result.structured_output, BaseModel)
168
+ or result.structured_output is None
169
+ ), "Structured output must be a Pydantic model or None"
170
+ assert (
171
+ isinstance(result.tool_calls, list) or result.tool_calls is None
172
+ ), "Tool calls must be a list or None"
166
173
  return result
167
174
 
168
175
  async def respond_async(
@@ -173,8 +180,8 @@ class LM:
173
180
  images_as_bytes: List[Any] = [],
174
181
  response_model: Optional[BaseModel] = None,
175
182
  use_ephemeral_cache_only: bool = False,
176
- tools: Optional[List[BaseTool]] = None,
177
- ):
183
+ tools: Optional[List] = None,
184
+ ) -> BaseLMResponse:
178
185
  # "In respond_async")
179
186
  assert (system_message is None) == (
180
187
  user_message is None
@@ -183,7 +190,9 @@ class LM:
183
190
  (messages is None) != (system_message is None)
184
191
  ), "Must provide either messages or system_message/user_message pair, but not both"
185
192
 
186
- assert not (response_model and tools), "Cannot provide both response_model and tools"
193
+ assert not (
194
+ response_model and tools
195
+ ), "Cannot provide both response_model and tools"
187
196
  if messages is None:
188
197
  messages = build_messages(
189
198
  system_message, user_message, images_as_bytes, self.model_name
@@ -191,7 +200,7 @@ class LM:
191
200
  result = None
192
201
  if response_model:
193
202
  try:
194
- print("Trying structured output handler")
203
+ # print("Trying structured output handler")
195
204
  result = await self.structured_output_handler.call_async(
196
205
  messages,
197
206
  model=self.model_name,
@@ -200,7 +209,7 @@ class LM:
200
209
  use_ephemeral_cache_only=use_ephemeral_cache_only,
201
210
  )
202
211
  except StructuredOutputCoercionFailureException:
203
- print("Falling back to backup handler")
212
+ # print("Falling back to backup handler")
204
213
  result = await self.backup_structured_output_handler.call_async(
205
214
  messages,
206
215
  model=self.model_name,
@@ -209,7 +218,7 @@ class LM:
209
218
  use_ephemeral_cache_only=use_ephemeral_cache_only,
210
219
  )
211
220
  else:
212
- print("Calling API no response model")
221
+ # print("Calling API no response model")
213
222
  result = await self.client._hit_api_async(
214
223
  messages=messages,
215
224
  model=self.model_name,
@@ -218,10 +227,16 @@ class LM:
218
227
  tools=tools,
219
228
  )
220
229
  assert isinstance(result.raw_response, str), "Raw response must be a string"
221
- assert (isinstance(result.structured_output, BaseModel) or result.structured_output is None), "Structured output must be a Pydantic model or None"
222
- assert (isinstance(result.tool_calls, list) or result.tool_calls is None), "Tool calls must be a list or None"
230
+ assert (
231
+ isinstance(result.structured_output, BaseModel)
232
+ or result.structured_output is None
233
+ ), "Structured output must be a Pydantic model or None"
234
+ assert (
235
+ isinstance(result.tool_calls, list) or result.tool_calls is None
236
+ ), "Tool calls must be a list or None"
223
237
  return result
224
238
 
239
+
225
240
  if __name__ == "__main__":
226
241
  import asyncio
227
242
 
@@ -66,12 +66,39 @@ class GeminiAPI(VendorBase):
66
66
  )
67
67
  return contents
68
68
 
69
- def _convert_tools_to_gemini_format(self, tools: List[BaseTool]) -> Tool:
69
+ def _convert_tools_to_gemini_format(self, tools: List[Any]) -> Tool:
70
70
  function_declarations = []
71
71
  for tool in tools:
72
- function_declarations.append(tool.to_gemini_tool())
72
+ # Try to use to_gemini_tool method if available, otherwise assume it's a dict
73
+ try:
74
+ function_declarations.append(tool.to_gemini_tool())
75
+ except AttributeError:
76
+ # If tool is a properly formatted dict, use it directly
77
+ if "name" in tool and "parameters" in tool:
78
+ function_declarations.append(tool)
79
+ else:
80
+ raise ValueError(
81
+ f"Unsupported tool format. Tools must be BaseTool instances or properly formatted dictionaries."
82
+ )
73
83
  return Tool(function_declarations=function_declarations)
74
84
 
85
+ def _convert_args_to_dict(self, args):
86
+ """
87
+ Recursively convert Gemini's args objects to Python dictionaries.
88
+ """
89
+ # Try to convert dict-like objects
90
+ try:
91
+ return {k: self._convert_args_to_dict(v) for k, v in args.items()}
92
+ except (AttributeError, TypeError):
93
+ # Try to convert list-like objects
94
+ try:
95
+ if isinstance(args, (str, bytes)):
96
+ return args
97
+ return [self._convert_args_to_dict(item) for item in args]
98
+ except (TypeError, AttributeError):
99
+ # Base case: primitive value
100
+ return args
101
+
75
102
  async def _private_request_async(
76
103
  self,
77
104
  messages: List[Dict],
@@ -90,11 +117,11 @@ class GeminiAPI(VendorBase):
90
117
  tools_config = self._convert_tools_to_gemini_format(tools)
91
118
 
92
119
  # Extract tool_config from lm_config if provided
93
- tool_config = lm_config.get("tool_config") if lm_config else {
94
- "function_calling_config": {
95
- "mode": "any"
96
- }
97
- }
120
+ tool_config = (
121
+ lm_config.get("tool_config")
122
+ if lm_config
123
+ else {"function_calling_config": {"mode": "any"}}
124
+ )
98
125
 
99
126
  code_generation_model = genai.GenerativeModel(
100
127
  model_name=model_name,
@@ -113,8 +140,9 @@ class GeminiAPI(VendorBase):
113
140
  tool_calls = []
114
141
  for part in result.candidates[0].content.parts:
115
142
  if part.function_call:
116
- # Convert MapComposite args to dict
117
- args_dict = dict(part.function_call.args)
143
+ # Convert complex objects to Python dictionaries recursively
144
+ args_dict = self._convert_args_to_dict(part.function_call.args)
145
+ # Ensure serializable arguments
118
146
  tool_calls.append(
119
147
  {
120
148
  "id": f"call_{len(tool_calls) + 1}", # Generate unique IDs
@@ -145,11 +173,11 @@ class GeminiAPI(VendorBase):
145
173
  tools_config = self._convert_tools_to_gemini_format(tools)
146
174
 
147
175
  # Extract tool_config from lm_config if provided
148
- tool_config = lm_config.get("tool_config") if lm_config else {
149
- "function_calling_config": {
150
- "mode": "any"
151
- }
152
- }
176
+ tool_config = (
177
+ lm_config.get("tool_config")
178
+ if lm_config
179
+ else {"function_calling_config": {"mode": "any"}}
180
+ )
153
181
 
154
182
  code_generation_model = genai.GenerativeModel(
155
183
  model_name=model_name,
@@ -168,8 +196,9 @@ class GeminiAPI(VendorBase):
168
196
  tool_calls = []
169
197
  for part in result.candidates[0].content.parts:
170
198
  if part.function_call:
171
- # Convert MapComposite args to dict
172
- args_dict = dict(part.function_call.args)
199
+ # Convert complex objects to Python dictionaries recursively
200
+ args_dict = self._convert_args_to_dict(part.function_call.args)
201
+ # Ensure serializable arguments
173
202
  tool_calls.append(
174
203
  {
175
204
  "id": f"call_{len(tool_calls) + 1}", # Generate unique IDs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev29
3
+ Version: 0.1.0.dev31
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Author: Josh Purtell
6
6
  Author-email: Josh Purtell <josh@usesynth.ai>
@@ -4,6 +4,7 @@ public_tests/test_all_structured_outputs.py,sha256=bIcchimaVkq8q8D-GKO25d1_SauTF
4
4
  public_tests/test_anthropic_structured_outputs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  public_tests/test_deepseek_structured_outputs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  public_tests/test_deepseek_tools.py,sha256=MxEaiT_zinuMPeylqfNMsD11zRaMEwY0Fi28bg5op0A,1895
7
+ public_tests/test_gemini_output.py,sha256=704NCnxNepYjUxJj3eEms6zHRCps2PSaR8A-lcsQxb4,6062
7
8
  public_tests/test_gemini_structured_outputs.py,sha256=yKa3CDVJxE_Vb2BbVROje83Pb35MBusF0Nb-ttWbqS8,4001
8
9
  public_tests/test_models.py,sha256=QGevBfBuQzwyKw1ez34igDyJpMTBVOc3meW6yqFE-bM,5853
9
10
  public_tests/test_openai_structured_outputs.py,sha256=oIhdZ2QVLmn0LaqBpCP3Qhbn2KHJv633DGn6u9Ousak,3999
@@ -22,13 +23,13 @@ synth_ai/zyk/lms/caching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
22
23
  synth_ai/zyk/lms/caching/constants.py,sha256=fPi3x9p-yRdvixMSIyclvmwmwCRliXLXQjEm6dRnG8s,52
23
24
  synth_ai/zyk/lms/caching/dbs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
25
  synth_ai/zyk/lms/caching/ephemeral.py,sha256=pNMG5Rzzp2m0Ln1UYmWxz1qbXwq3iNIrhjYAS0yO3ZE,2370
25
- synth_ai/zyk/lms/caching/handler.py,sha256=6DhILKDiF7SRhzHyKgpdq_JS_VN1qS_ZQZRJW-0VF_o,4149
26
+ synth_ai/zyk/lms/caching/handler.py,sha256=a-4FBxXLWeHCXiGDWP8QU-LPxMAvGbJ_5lUrXnZytn0,4478
26
27
  synth_ai/zyk/lms/caching/initialize.py,sha256=zZls6RKAax6Z-8oJInGaSg_RPN_fEZ6e_RCX64lMLJw,416
27
28
  synth_ai/zyk/lms/caching/persistent.py,sha256=ZaY1A9qhvfNKzcAI9FnwbIrgMKvVeIfb_yCyl3M8dxE,2860
28
29
  synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
30
  synth_ai/zyk/lms/core/all.py,sha256=wakK0HhvYRuaQZmxClURyNf3vUkTbm3OABw3TgpMjOQ,1185
30
31
  synth_ai/zyk/lms/core/exceptions.py,sha256=K0BVdAzxVIchsvYZAaHEH1GAWBZvpxhFi-SPcJOjyPQ,205
31
- synth_ai/zyk/lms/core/main.py,sha256=xuoCBAzWnyY52DVdBUJPjIoEIUaCtheAcHG1ZyP8ndQ,10223
32
+ synth_ai/zyk/lms/core/main.py,sha256=kKxk-1TZQMNXDrLv7qA42fNOsXes-G9kLtNg-LtrpYY,10370
32
33
  synth_ai/zyk/lms/core/vendor_clients.py,sha256=go6VGF3-JkZyUD81LwRkcBaxdWSVaV9vRxVTNqKSxvM,2781
33
34
  synth_ai/zyk/lms/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
35
  synth_ai/zyk/lms/cost/monitor.py,sha256=cSKIvw6WdPZIRubADWxQoh1MdB40T8-jjgfNUeUHIn0,5
@@ -45,7 +46,7 @@ synth_ai/zyk/lms/vendors/openai_standard.py,sha256=Th_0QjmrJ7gemxsKnWmij46lIz4QW
45
46
  synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
46
47
  synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
48
  synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=QM4xuaigdVOjBuzkPyT-RSOtvT2wiKxAiHRfI77GYn8,13461
48
- synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=j8pGGGkJNOVmadpkZsGYhwoSkwsdpX_jAXhrfydoNk8,9631
49
+ synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=I1goLy5R8eBLrun2jpnD4o87NlmzWgPrfYaeu9RZN8M,11008
49
50
  synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=-EMPBEIoYxxDMxukmcmKL8AGAHPNYe4w-76gsPtmrhk,11860
50
51
  synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=QkQqba851EEGf9n5H31-pJ6WexhTZkdPWQap0oGy2Ho,6713
51
52
  synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -55,11 +56,11 @@ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR3
55
56
  synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
56
57
  synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
57
58
  synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
58
- synth_ai-0.1.0.dev29.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
59
+ synth_ai-0.1.0.dev31.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
59
60
  tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
60
61
  tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
61
62
  tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
62
- synth_ai-0.1.0.dev29.dist-info/METADATA,sha256=roTXgOH0Ms_JHCBW8uyG1nUnwV3QDamb-PlvmGqZ-Hw,2702
63
- synth_ai-0.1.0.dev29.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
64
- synth_ai-0.1.0.dev29.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
65
- synth_ai-0.1.0.dev29.dist-info/RECORD,,
63
+ synth_ai-0.1.0.dev31.dist-info/METADATA,sha256=2_iOE8RJoBGSLhzESPdzowRaUwb_D9_5o1dDckh0UoE,2702
64
+ synth_ai-0.1.0.dev31.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
65
+ synth_ai-0.1.0.dev31.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
66
+ synth_ai-0.1.0.dev31.dist-info/RECORD,,