synth-ai 0.1.0.dev35__py3-none-any.whl → 0.1.0.dev37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,75 @@
1
+ import asyncio
2
+ import pytest
3
+ from synth_ai.zyk.lms.core.main import LM
4
+ FORMATTING_MODEL_NAME = "gpt-4o-mini"
5
+
6
+ # List of reasoning models to test
7
+ # Note: Ensure these models are correctly configured and accessible in your environment
8
+ # And that they are included in REASONING_MODELS in main.py
9
+ REASONING_MODELS_TO_TEST = [
10
+ "o4-mini",
11
+ "claude-3-7-sonnet-latest",
12
+ "gemini-2.5-pro"
13
+ ]
14
+
15
+ # Define effort levels (adjust if specific models use different terms)
16
+ EFFORT_LEVELS = ["low", "medium", "high"]
17
+
18
+ @pytest.mark.parametrize("model_name", REASONING_MODELS_TO_TEST)
19
+ @pytest.mark.parametrize("effort", EFFORT_LEVELS)
20
+ @pytest.mark.asyncio
21
+ async def test_reasoning_effort_levels(model_name, effort):
22
+ """
23
+ Tests that the reasoning_effort parameter is accepted and calls succeed for various models and levels.
24
+ Note: This test primarily checks for successful API calls across effort levels.
25
+ Comparing output length or quality based on 'effort' is complex and model-specific.
26
+ Anthropic's 'thinking' budget might correlate, but OpenAI/others might handle 'effort' differently or ignore it.
27
+ """
28
+ print(f"\nTesting model: {model_name} with effort: {effort}")
29
+ lm = LM(
30
+ model_name=model_name,
31
+ formatting_model_name=FORMATTING_MODEL_NAME,
32
+ temperature=0,
33
+ )
34
+
35
+ system_prompt = "You are a helpful assistant designed to explain complex topics simply."
36
+ user_prompt = f"Explain the concept of quantum entanglement step by step using a simple analogy. Be concise if effort is low, detailed if high. Current effort: {effort}."
37
+
38
+ try:
39
+ result = await lm.respond_async(
40
+ system_message=system_prompt,
41
+ user_message=user_prompt,
42
+ reasoning_effort=effort, # Pass the effort level
43
+ )
44
+
45
+ response = result.raw_response
46
+
47
+ # Assert call succeeded and response is non-empty
48
+ assert isinstance(response, str), f"Model {model_name} (effort={effort}) failed. Response type: {type(response)}"
49
+ assert len(response) > 0, f"Model {model_name} (effort={effort}): Response is empty."
50
+
51
+ print(f" Response length (effort={effort}): {len(response)}")
52
+ # print(f" Response snippet: {response[:100]}...") # Optional: print snippet
53
+
54
+ except Exception as e:
55
+ pytest.fail(f"Model {model_name} (effort={effort}) raised an exception: {e}")
56
+
57
+ # Optional: Add a separate test to compare lengths between low and high effort for specific models if needed.
58
+
59
+ if __name__ == "__main__":
60
+ async def main():
61
+ print("Running effort tests directly...")
62
+ test_models = REASONING_MODELS_TO_TEST
63
+ effort_levels_to_run = EFFORT_LEVELS
64
+
65
+ all_tasks = []
66
+ for model in test_models:
67
+ for effort_level in effort_levels_to_run:
68
+ # Create a task for each combination
69
+ all_tasks.append(test_reasoning_effort_levels(model, effort_level))
70
+
71
+ # Run all tests concurrently (be mindful of rate limits)
72
+ await asyncio.gather(*all_tasks)
73
+ print("\nTest run finished.")
74
+
75
+ asyncio.run(main())
@@ -11,7 +11,7 @@ from synth_ai.zyk.lms.core.vendor_clients import (
11
11
  from synth_ai.zyk.lms.structured_outputs.handler import StructuredOutputHandler
12
12
  from synth_ai.zyk.lms.vendors.base import BaseLMResponse, VendorBase
13
13
 
14
- REASONING_MODELS = ["deepseek-reasoner", "o1-mini", "o1-preview", "o1", "o3"]
14
+ REASONING_MODELS = ["deepseek-reasoner", "o1-mini", "o1-preview", "o1", "o3", "o4-mini", "claude-3-7-latest-thinking", "gemini-1.5-pro-latest"]
15
15
 
16
16
 
17
17
  def build_messages(
@@ -92,18 +92,33 @@ class LM:
92
92
  )
93
93
  # print(self.client.__class__)
94
94
 
95
+ # Determine if the primary model supports forced JSON or specific formatting modes
96
+ # primary_model_supports_forced_json = self.client.supports_forced_json()
97
+
98
+ # Choose the structured output mode based on primary model capability
99
+ # effective_structured_output_mode = structured_output_mode
100
+ # if not primary_model_supports_forced_json and structured_output_mode == "forced_json":
101
+ # # Fallback or adjust if the primary model doesn't support the desired mode
102
+ # # For simplicity, let's assume we might want to fallback to stringified_json or handle differently
103
+ # # print(f"Warning: Model {model_name} does not support forced_json. Adjusting strategy.")
104
+ # effective_structured_output_mode = "stringified_json" # Example fallback
105
+
106
+
95
107
  formatting_client = get_client(formatting_model_name, with_formatting=True)
96
108
 
109
+
97
110
  max_retries_dict = {"None": 0, "Few": 2, "Many": 5}
111
+ # Use the effective mode for the primary handler
98
112
  self.structured_output_handler = StructuredOutputHandler(
99
113
  self.client,
100
114
  formatting_client,
101
- structured_output_mode,
115
+ structured_output_mode, # Use original mode
102
116
  {"max_retries": max_retries_dict.get(max_retries, 2)},
103
117
  )
118
+ # Always have a forced_json backup handler ready
104
119
  self.backup_structured_output_handler = StructuredOutputHandler(
105
- self.client,
106
- formatting_client,
120
+ self.client, # This should ideally use a client capable of forced_json if primary isn't
121
+ formatting_client, # Formatting client must support forced_json
107
122
  "forced_json",
108
123
  {"max_retries": max_retries_dict.get(max_retries, 2)},
109
124
  )
@@ -121,6 +136,7 @@ class LM:
121
136
  response_model: Optional[BaseModel] = None,
122
137
  use_ephemeral_cache_only: bool = False,
123
138
  tools: Optional[List] = None,
139
+ reasoning_effort: Optional[str] = None,
124
140
  ) -> BaseLMResponse:
125
141
  assert (system_message is None) == (
126
142
  user_message is None
@@ -131,6 +147,14 @@ class LM:
131
147
  assert not (
132
148
  response_model and tools
133
149
  ), "Cannot provide both response_model and tools"
150
+
151
+ current_lm_config = self.lm_config.copy()
152
+ if self.model_name in REASONING_MODELS:
153
+ # Removed logic that set max_tokens based on reasoning_tokens
154
+ # Vendor clients will now receive reasoning_effort directly
155
+ pass
156
+
157
+
134
158
  if messages is None:
135
159
  messages = build_messages(
136
160
  system_message, user_message, images_as_bytes, self.model_name
@@ -139,28 +163,31 @@ class LM:
139
163
  if response_model:
140
164
  try:
141
165
  result = self.structured_output_handler.call_sync(
142
- messages,
166
+ messages=messages,
143
167
  model=self.model_name,
144
- lm_config=self.lm_config,
145
168
  response_model=response_model,
146
169
  use_ephemeral_cache_only=use_ephemeral_cache_only,
170
+ lm_config=current_lm_config,
171
+ reasoning_effort=reasoning_effort,
147
172
  )
148
173
  except StructuredOutputCoercionFailureException:
149
174
  # print("Falling back to backup handler")
150
175
  result = self.backup_structured_output_handler.call_sync(
151
- messages,
176
+ messages=messages,
152
177
  model=self.model_name,
153
- lm_config=self.lm_config,
154
178
  response_model=response_model,
155
179
  use_ephemeral_cache_only=use_ephemeral_cache_only,
180
+ lm_config=current_lm_config,
181
+ reasoning_effort=reasoning_effort,
156
182
  )
157
183
  else:
158
184
  result = self.client._hit_api_sync(
159
185
  messages=messages,
160
186
  model=self.model_name,
161
- lm_config=self.lm_config,
187
+ lm_config=current_lm_config,
162
188
  use_ephemeral_cache_only=use_ephemeral_cache_only,
163
189
  tools=tools,
190
+ reasoning_effort=reasoning_effort,
164
191
  )
165
192
  assert isinstance(result.raw_response, str), "Raw response must be a string"
166
193
  assert (
@@ -181,6 +208,7 @@ class LM:
181
208
  response_model: Optional[BaseModel] = None,
182
209
  use_ephemeral_cache_only: bool = False,
183
210
  tools: Optional[List] = None,
211
+ reasoning_effort: Optional[str] = None,
184
212
  ) -> BaseLMResponse:
185
213
  # "In respond_async")
186
214
  assert (system_message is None) == (
@@ -193,6 +221,13 @@ class LM:
193
221
  assert not (
194
222
  response_model and tools
195
223
  ), "Cannot provide both response_model and tools"
224
+
225
+ current_lm_config = self.lm_config.copy()
226
+ if self.model_name in REASONING_MODELS:
227
+ # Removed logic that set max_tokens based on reasoning_tokens
228
+ # Vendor clients will now receive reasoning_effort directly
229
+ pass
230
+
196
231
  if messages is None:
197
232
  messages = build_messages(
198
233
  system_message, user_message, images_as_bytes, self.model_name
@@ -202,29 +237,32 @@ class LM:
202
237
  try:
203
238
  # print("Trying structured output handler")
204
239
  result = await self.structured_output_handler.call_async(
205
- messages,
240
+ messages=messages,
206
241
  model=self.model_name,
207
- lm_config=self.lm_config,
208
242
  response_model=response_model,
209
243
  use_ephemeral_cache_only=use_ephemeral_cache_only,
244
+ lm_config=current_lm_config,
245
+ reasoning_effort=reasoning_effort,
210
246
  )
211
247
  except StructuredOutputCoercionFailureException:
212
248
  # print("Falling back to backup handler")
213
249
  result = await self.backup_structured_output_handler.call_async(
214
- messages,
250
+ messages=messages,
215
251
  model=self.model_name,
216
- lm_config=self.lm_config,
217
252
  response_model=response_model,
218
253
  use_ephemeral_cache_only=use_ephemeral_cache_only,
254
+ lm_config=current_lm_config,
255
+ reasoning_effort=reasoning_effort,
219
256
  )
220
257
  else:
221
258
  # print("Calling API no response model")
222
259
  result = await self.client._hit_api_async(
223
260
  messages=messages,
224
261
  model=self.model_name,
225
- lm_config=self.lm_config,
262
+ lm_config=current_lm_config,
226
263
  use_ephemeral_cache_only=use_ephemeral_cache_only,
227
264
  tools=tools,
265
+ reasoning_effort=reasoning_effort,
228
266
  )
229
267
  assert isinstance(result.raw_response, str), "Raw response must be a string"
230
268
  assert (
@@ -13,7 +13,7 @@ from synth_ai.zyk.lms.core.all import (
13
13
  )
14
14
 
15
15
  openai_naming_regexes: List[Pattern] = [
16
- re.compile(r"^(ft:)?(o[1,3](-.*)?|gpt-.*)$"),
16
+ re.compile(r"^(ft:)?(o[1,3,4,5](-.*)?|gpt-.*)$"),
17
17
  ]
18
18
  openai_formatting_model_regexes: List[Pattern] = [
19
19
  re.compile(r"^(ft:)?gpt-4o(-.*)?$"),
@@ -429,6 +429,7 @@ class StructuredOutputHandler:
429
429
  lm_config: Dict[str, Any] = {},
430
430
  reasoning_effort: str = "high",
431
431
  ) -> BaseLMResponse:
432
+ # print("Output handler call sync")
432
433
  return self.handler.call_sync(
433
434
  messages=messages,
434
435
  model=model,
@@ -17,8 +17,8 @@ ANTHROPIC_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (anthropic.APIError
17
17
 
18
18
 
19
19
  sonnet_37_budgets = {
20
- "high": 4000,
21
- "medium": 2000,
20
+ "high": 8000,
21
+ "medium": 4000,
22
22
  "low": 1000,
23
23
  }
24
24
 
@@ -111,6 +111,9 @@ class GeminiAPI(VendorBase):
111
111
  generation_config = {
112
112
  "temperature": temperature,
113
113
  }
114
+ # Add max_output_tokens if max_tokens is in lm_config
115
+ if lm_config and "max_tokens" in lm_config:
116
+ generation_config["max_output_tokens"] = lm_config["max_tokens"]
114
117
 
115
118
  tools_config = None
116
119
  if tools:
@@ -167,6 +170,9 @@ class GeminiAPI(VendorBase):
167
170
  generation_config = {
168
171
  "temperature": temperature,
169
172
  }
173
+ # Add max_output_tokens if max_tokens is in lm_config
174
+ if lm_config and "max_tokens" in lm_config:
175
+ generation_config["max_output_tokens"] = lm_config["max_tokens"]
170
176
 
171
177
  tools_config = None
172
178
  if tools:
@@ -10,7 +10,7 @@ from pydantic import BaseModel
10
10
  from synth_ai.zyk.lms.caching.initialize import get_cache_handler
11
11
  from synth_ai.zyk.lms.tools.base import BaseTool
12
12
  from synth_ai.zyk.lms.vendors.base import BaseLMResponse
13
- from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
13
+ from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS, openai_reasoners
14
14
  from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
15
15
 
16
16
  OPENAI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (
@@ -70,21 +70,20 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
70
70
  return (
71
71
  cache_result["response"] if type(cache_result) == dict else cache_result
72
72
  )
73
- if model in ["o3-mini", "o3", "o1-mini", "o1"]:
74
- output = await self.async_client.beta.chat.completions.parse(
73
+ if model in openai_reasoners:
74
+ output = await self.async_client.chat.completions.create(
75
75
  model=model,
76
76
  messages=messages,
77
- temperature=lm_config.get(
78
- "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
79
- ),
80
- response_format=response_model,
81
- reasoning_effort=reasoning_effort,
77
+ temperature=temperature,
78
+ tools=tools,
79
+ max_completion_tokens=lm_config.get("max_tokens"),
82
80
  )
83
81
  else:
84
- output = await self.async_client.beta.chat.completions.parse(
82
+ output = await self.async_client.chat.completions.create(
85
83
  model=model,
86
84
  messages=messages,
87
- response_format=response_model,
85
+ temperature=temperature,
86
+ max_tokens=lm_config.get("max_tokens"),
88
87
  )
89
88
  # "Output", output)
90
89
  api_result = response_model(**json.loads(output.choices[0].message.content))
@@ -126,20 +125,19 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
126
125
  cache_result["response"] if type(cache_result) == dict else cache_result
127
126
  )
128
127
  if model in ["o3-mini", "o3", "o1-mini", "o1"]:
129
- output = self.sync_client.beta.chat.completions.parse(
128
+ output = self.sync_client.chat.completions.create(
130
129
  model=model,
131
130
  messages=messages,
132
- temperature=lm_config.get(
133
- "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
134
- ),
135
- response_format=response_model,
136
- reasoning_effort=reasoning_effort,
131
+ temperature=temperature,
132
+ tools=tools,
133
+ max_tokens=lm_config.get("max_tokens"),
137
134
  )
138
135
  else:
139
- output = self.sync_client.beta.chat.completions.parse(
136
+ output = self.sync_client.chat.completions.create(
140
137
  model=model,
141
138
  messages=messages,
142
- response_format=response_model,
139
+ temperature=temperature,
140
+ max_tokens=lm_config.get("max_tokens"),
143
141
  )
144
142
  api_result = response_model(**json.loads(output.choices[0].message.content))
145
143
 
@@ -97,6 +97,9 @@ class OpenAIStandard(VendorBase):
97
97
  "model": model,
98
98
  "messages": messages,
99
99
  }
100
+ # Add max_tokens if present in lm_config
101
+ if "max_tokens" in lm_config:
102
+ api_params["max_completion_tokens"] = lm_config["max_tokens"]
100
103
 
101
104
  # Add tools if provided
102
105
  if tools:
@@ -180,6 +183,9 @@ class OpenAIStandard(VendorBase):
180
183
  "model": model,
181
184
  "messages": messages,
182
185
  }
186
+ # Add max_tokens if present in lm_config
187
+ if "max_tokens" in lm_config:
188
+ api_params["max_tokens"] = lm_config["max_tokens"]
183
189
 
184
190
  # Add tools if provided
185
191
  if tools:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev35
3
+ Version: 0.1.0.dev37
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Author: Josh Purtell
6
6
  Author-email: Josh Purtell <josh@usesynth.ai>
@@ -8,6 +8,7 @@ public_tests/test_gemini_output.py,sha256=704NCnxNepYjUxJj3eEms6zHRCps2PSaR8A-lc
8
8
  public_tests/test_gemini_structured_outputs.py,sha256=yKa3CDVJxE_Vb2BbVROje83Pb35MBusF0Nb-ttWbqS8,4001
9
9
  public_tests/test_models.py,sha256=QGevBfBuQzwyKw1ez34igDyJpMTBVOc3meW6yqFE-bM,5853
10
10
  public_tests/test_openai_structured_outputs.py,sha256=oIhdZ2QVLmn0LaqBpCP3Qhbn2KHJv633DGn6u9Ousak,3999
11
+ public_tests/test_reasoning_effort.py,sha256=w4dIiEaEU8gnfAmjrpCC5y-c9w-eH9NzFjwUHe2deyg,3089
11
12
  public_tests/test_reasoning_models.py,sha256=Vr4sFRYcrYOBAZMFz2a0fZQqa-WjRwbtwc6lXy6bF4I,2897
12
13
  public_tests/test_recursive_structured_outputs.py,sha256=rrqzsU5ExNt-m_wu9j_fkbHiEsAtbKEK66uK5Ub2ojs,6296
13
14
  public_tests/test_structured.py,sha256=rftVwvYgMSHkRZM1WUJzga5Uvl9hmc5OpXzBshEXNF0,3740
@@ -29,26 +30,26 @@ synth_ai/zyk/lms/caching/persistent.py,sha256=ZaY1A9qhvfNKzcAI9FnwbIrgMKvVeIfb_y
29
30
  synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
31
  synth_ai/zyk/lms/core/all.py,sha256=wakK0HhvYRuaQZmxClURyNf3vUkTbm3OABw3TgpMjOQ,1185
31
32
  synth_ai/zyk/lms/core/exceptions.py,sha256=K0BVdAzxVIchsvYZAaHEH1GAWBZvpxhFi-SPcJOjyPQ,205
32
- synth_ai/zyk/lms/core/main.py,sha256=kKxk-1TZQMNXDrLv7qA42fNOsXes-G9kLtNg-LtrpYY,10370
33
- synth_ai/zyk/lms/core/vendor_clients.py,sha256=go6VGF3-JkZyUD81LwRkcBaxdWSVaV9vRxVTNqKSxvM,2781
33
+ synth_ai/zyk/lms/core/main.py,sha256=NNPd4wwpgscFtCCrVPgz6gcrg7kOTSKsBFhldV0kwv0,12502
34
+ synth_ai/zyk/lms/core/vendor_clients.py,sha256=C4ICuczCG2yRpDbrraT0LUoaPFYytuetfJLqhgvGn8A,2785
34
35
  synth_ai/zyk/lms/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
36
  synth_ai/zyk/lms/cost/monitor.py,sha256=cSKIvw6WdPZIRubADWxQoh1MdB40T8-jjgfNUeUHIn0,5
36
37
  synth_ai/zyk/lms/cost/statefulness.py,sha256=TOsuXL8IjtKOYJ2aJQF8TwJVqn_wQ7AIwJJmdhMye7U,36
37
38
  synth_ai/zyk/lms/structured_outputs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
- synth_ai/zyk/lms/structured_outputs/handler.py,sha256=BQ0T4HBFXC9qesF8v0lG8MuiOecWm2YEF75nUt1mB_s,16925
39
+ synth_ai/zyk/lms/structured_outputs/handler.py,sha256=Y7qQ8VReofLKDX6M7L5OXBUmTyHw6bWEfYz0jqvQIZ0,16969
39
40
  synth_ai/zyk/lms/structured_outputs/inject.py,sha256=Fy-zDeleRxOZ8ZRM6IuZ6CP2XZnMe4K2PEn4Q9c_KPY,11777
40
41
  synth_ai/zyk/lms/structured_outputs/rehabilitate.py,sha256=GuIhzsb7rTvwgn7f9I9omNnXBz5Me_qrtNYcTWzw5_U,7909
41
42
  synth_ai/zyk/lms/tools/base.py,sha256=j7wYb1xAvaAm3qVrINphgUhGS-UjZmRpbouseQYgh7A,3228
42
43
  synth_ai/zyk/lms/vendors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
44
  synth_ai/zyk/lms/vendors/base.py,sha256=aK4PEtkMLt_o3qD22kW-x3HJUEKdIk06zlH4kX0VkAE,760
44
45
  synth_ai/zyk/lms/vendors/constants.py,sha256=3CCq45otD80yaLts5sFHvPgLCQNkcjHkc9cqOQ0zH4Y,320
45
- synth_ai/zyk/lms/vendors/openai_standard.py,sha256=oii23QtG_sh_V2yFV1ZMF7F0t9Q_mGL8yM_QxZnZ9QA,12091
46
+ synth_ai/zyk/lms/vendors/openai_standard.py,sha256=tgZKV9VUos7is05wd9vJJ3EXZPol5G4uosEjmKiqA0w,12402
46
47
  synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
47
48
  synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=vxANYEcU46n6flRJ4y5j4VrSA1ky4EXo8nWgYPLi3HU,13829
49
- synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=I1goLy5R8eBLrun2jpnD4o87NlmzWgPrfYaeu9RZN8M,11008
49
+ synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=Yr7xNKa2XnQe5ReEWbxt-3zHv-IQkaV8PmwZqSEB_no,13829
50
+ synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=PcR4ZHu9NzBabIFeJc2IkT3jCat3Nk8hZm1KoD7uPRM,11390
50
51
  synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=-EMPBEIoYxxDMxukmcmKL8AGAHPNYe4w-76gsPtmrhk,11860
51
- synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=QkQqba851EEGf9n5H31-pJ6WexhTZkdPWQap0oGy2Ho,6713
52
+ synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=GDCHIc0kpCnNPj2oW8RE3Cj2U_HcbXzzA5JV1ArAQlE,6600
52
53
  synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
54
  synth_ai/zyk/lms/vendors/local/ollama.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
55
  synth_ai/zyk/lms/vendors/supported/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -56,11 +57,11 @@ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR3
56
57
  synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
57
58
  synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
58
59
  synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
59
- synth_ai-0.1.0.dev35.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
60
+ synth_ai-0.1.0.dev37.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
60
61
  tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
61
62
  tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
62
63
  tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
63
- synth_ai-0.1.0.dev35.dist-info/METADATA,sha256=T0RAPsjK51rDJ_uTIeny_E99x-lL9fRQHuQjowIyVuk,2702
64
- synth_ai-0.1.0.dev35.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
65
- synth_ai-0.1.0.dev35.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
66
- synth_ai-0.1.0.dev35.dist-info/RECORD,,
64
+ synth_ai-0.1.0.dev37.dist-info/METADATA,sha256=WksAY7eMseikIiWk5JaogfO4yyPQsUqQ6xgSLC152AU,2702
65
+ synth_ai-0.1.0.dev37.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
66
+ synth_ai-0.1.0.dev37.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
67
+ synth_ai-0.1.0.dev37.dist-info/RECORD,,