synth-ai 0.1.0.dev37__py3-none-any.whl → 0.1.0.dev39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/zyk/lms/caching/handler.py +12 -0
- synth_ai/zyk/lms/vendors/core/anthropic_api.py +40 -4
- synth_ai/zyk/lms/vendors/core/gemini_api.py +4 -4
- synth_ai/zyk/lms/vendors/openai_standard.py +26 -9
- {synth_ai-0.1.0.dev37.dist-info → synth_ai-0.1.0.dev39.dist-info}/METADATA +1 -1
- {synth_ai-0.1.0.dev37.dist-info → synth_ai-0.1.0.dev39.dist-info}/RECORD +9 -9
- {synth_ai-0.1.0.dev37.dist-info → synth_ai-0.1.0.dev39.dist-info}/WHEEL +0 -0
- {synth_ai-0.1.0.dev37.dist-info → synth_ai-0.1.0.dev39.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.1.0.dev37.dist-info → synth_ai-0.1.0.dev39.dist-info}/top_level.txt +0 -0
@@ -18,6 +18,8 @@ def map_params_to_key(
|
|
18
18
|
temperature: float,
|
19
19
|
response_model: Optional[Type[BaseModel]],
|
20
20
|
tools: Optional[List] = None,
|
21
|
+
reasoning_effort: Optional[str] = None,
|
22
|
+
max_tokens: Optional[int] = None,
|
21
23
|
) -> str:
|
22
24
|
if not all([isinstance(msg["content"], str) for msg in messages]):
|
23
25
|
normalized_messages = "".join([str(msg["content"]) for msg in messages])
|
@@ -26,6 +28,8 @@ def map_params_to_key(
|
|
26
28
|
normalized_model = model
|
27
29
|
normalized_temperature = f"{temperature:.2f}"[:4]
|
28
30
|
normalized_response_model = str(response_model.schema()) if response_model else ""
|
31
|
+
normalized_reasoning_effort = str(reasoning_effort) if reasoning_effort is not None else ""
|
32
|
+
normalized_max_tokens = str(max_tokens) if max_tokens is not None else ""
|
29
33
|
|
30
34
|
# Normalize tools if present
|
31
35
|
normalized_tools = ""
|
@@ -57,6 +61,8 @@ def map_params_to_key(
|
|
57
61
|
+ normalized_temperature
|
58
62
|
+ normalized_response_model
|
59
63
|
+ normalized_tools
|
64
|
+
+ normalized_reasoning_effort
|
65
|
+
+ normalized_max_tokens
|
60
66
|
).encode()
|
61
67
|
).hexdigest()
|
62
68
|
|
@@ -83,6 +89,7 @@ class CacheHandler:
|
|
83
89
|
messages: List[Dict[str, Any]],
|
84
90
|
lm_config: Dict[str, Any],
|
85
91
|
tools: Optional[List] = None,
|
92
|
+
reasoning_effort: Optional[str] = None,
|
86
93
|
) -> Optional[BaseLMResponse]:
|
87
94
|
"""Hit the cache with the given key."""
|
88
95
|
self._validate_messages(messages)
|
@@ -93,6 +100,8 @@ class CacheHandler:
|
|
93
100
|
lm_config.get("temperature", 0.0),
|
94
101
|
lm_config.get("response_model", None),
|
95
102
|
tools,
|
103
|
+
reasoning_effort,
|
104
|
+
lm_config.get("max_tokens"),
|
96
105
|
)
|
97
106
|
if self.use_persistent_store:
|
98
107
|
return persistent_cache.hit_cache(
|
@@ -112,6 +121,7 @@ class CacheHandler:
|
|
112
121
|
lm_config: Dict[str, Any],
|
113
122
|
output: BaseLMResponse,
|
114
123
|
tools: Optional[List] = None,
|
124
|
+
reasoning_effort: Optional[str] = None,
|
115
125
|
) -> None:
|
116
126
|
"""Add the given output to the cache."""
|
117
127
|
self._validate_messages(messages)
|
@@ -123,6 +133,8 @@ class CacheHandler:
|
|
123
133
|
lm_config.get("temperature", 0.0),
|
124
134
|
lm_config.get("response_model", None),
|
125
135
|
tools,
|
136
|
+
reasoning_effort,
|
137
|
+
lm_config.get("max_tokens"),
|
126
138
|
)
|
127
139
|
if self.use_persistent_store:
|
128
140
|
persistent_cache.add_to_cache(key, output)
|
@@ -64,7 +64,7 @@ class AnthropicAPI(VendorBase):
|
|
64
64
|
), "response_model is not supported for standard calls"
|
65
65
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
66
66
|
cache_result = used_cache_handler.hit_managed_cache(
|
67
|
-
model, messages, lm_config=lm_config, tools=tools
|
67
|
+
model, messages, lm_config=lm_config, tools=tools, reasoning_effort=reasoning_effort
|
68
68
|
)
|
69
69
|
if cache_result:
|
70
70
|
return cache_result
|
@@ -135,7 +135,7 @@ class AnthropicAPI(VendorBase):
|
|
135
135
|
)
|
136
136
|
|
137
137
|
used_cache_handler.add_to_managed_cache(
|
138
|
-
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
138
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools, reasoning_effort=reasoning_effort
|
139
139
|
)
|
140
140
|
return lm_response
|
141
141
|
|
@@ -162,7 +162,7 @@ class AnthropicAPI(VendorBase):
|
|
162
162
|
use_ephemeral_cache_only=use_ephemeral_cache_only
|
163
163
|
)
|
164
164
|
cache_result = used_cache_handler.hit_managed_cache(
|
165
|
-
model, messages, lm_config=lm_config, tools=tools
|
165
|
+
model, messages, lm_config=lm_config, tools=tools, reasoning_effort=reasoning_effort
|
166
166
|
)
|
167
167
|
if cache_result:
|
168
168
|
return cache_result
|
@@ -235,7 +235,7 @@ class AnthropicAPI(VendorBase):
|
|
235
235
|
)
|
236
236
|
|
237
237
|
used_cache_handler.add_to_managed_cache(
|
238
|
-
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
238
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools, reasoning_effort=reasoning_effort
|
239
239
|
)
|
240
240
|
return lm_response
|
241
241
|
|
@@ -249,6 +249,17 @@ class AnthropicAPI(VendorBase):
|
|
249
249
|
reasoning_effort: str = "high",
|
250
250
|
**vendor_params: Dict[str, Any],
|
251
251
|
) -> BaseLMResponse:
|
252
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
253
|
+
lm_config = {"temperature": temperature, "response_model": response_model}
|
254
|
+
cache_result = used_cache_handler.hit_managed_cache(
|
255
|
+
model=model,
|
256
|
+
messages=messages,
|
257
|
+
lm_config=lm_config,
|
258
|
+
reasoning_effort=reasoning_effort,
|
259
|
+
)
|
260
|
+
if cache_result:
|
261
|
+
return cache_result
|
262
|
+
|
252
263
|
try:
|
253
264
|
# First try with Anthropic
|
254
265
|
reasoning_effort = vendor_params.get("reasoning_effort", reasoning_effort)
|
@@ -283,6 +294,13 @@ class AnthropicAPI(VendorBase):
|
|
283
294
|
structured_output=response_model(**parsed),
|
284
295
|
tool_calls=None,
|
285
296
|
)
|
297
|
+
used_cache_handler.add_to_managed_cache(
|
298
|
+
model=model,
|
299
|
+
messages=messages,
|
300
|
+
lm_config=lm_config,
|
301
|
+
output=lm_response,
|
302
|
+
reasoning_effort=reasoning_effort,
|
303
|
+
)
|
286
304
|
return lm_response
|
287
305
|
except (json.JSONDecodeError, pydantic.ValidationError):
|
288
306
|
# If Anthropic fails, fallback to OpenAI
|
@@ -306,6 +324,17 @@ class AnthropicAPI(VendorBase):
|
|
306
324
|
reasoning_effort: str = "high",
|
307
325
|
**vendor_params: Dict[str, Any],
|
308
326
|
) -> BaseLMResponse:
|
327
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
328
|
+
lm_config = {"temperature": temperature, "response_model": response_model}
|
329
|
+
cache_result = used_cache_handler.hit_managed_cache(
|
330
|
+
model=model,
|
331
|
+
messages=messages,
|
332
|
+
lm_config=lm_config,
|
333
|
+
reasoning_effort=reasoning_effort,
|
334
|
+
)
|
335
|
+
if cache_result:
|
336
|
+
return cache_result
|
337
|
+
|
309
338
|
try:
|
310
339
|
# First try with Anthropic
|
311
340
|
reasoning_effort = vendor_params.get("reasoning_effort", reasoning_effort)
|
@@ -342,6 +371,13 @@ class AnthropicAPI(VendorBase):
|
|
342
371
|
structured_output=response_model(**parsed),
|
343
372
|
tool_calls=None,
|
344
373
|
)
|
374
|
+
used_cache_handler.add_to_managed_cache(
|
375
|
+
model=model,
|
376
|
+
messages=messages,
|
377
|
+
lm_config=lm_config,
|
378
|
+
output=lm_response,
|
379
|
+
reasoning_effort=reasoning_effort,
|
380
|
+
)
|
345
381
|
return lm_response
|
346
382
|
except (json.JSONDecodeError, pydantic.ValidationError):
|
347
383
|
# If Anthropic fails, fallback to OpenAI
|
@@ -237,7 +237,7 @@ class GeminiAPI(VendorBase):
|
|
237
237
|
), "response_model is not supported for standard calls"
|
238
238
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
239
239
|
cache_result = used_cache_handler.hit_managed_cache(
|
240
|
-
model, messages, lm_config=lm_config, tools=tools
|
240
|
+
model, messages, lm_config=lm_config, tools=tools, reasoning_effort=reasoning_effort
|
241
241
|
)
|
242
242
|
if cache_result:
|
243
243
|
return cache_result
|
@@ -256,7 +256,7 @@ class GeminiAPI(VendorBase):
|
|
256
256
|
)
|
257
257
|
|
258
258
|
used_cache_handler.add_to_managed_cache(
|
259
|
-
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
259
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools, reasoning_effort=reasoning_effort
|
260
260
|
)
|
261
261
|
return lm_response
|
262
262
|
|
@@ -282,7 +282,7 @@ class GeminiAPI(VendorBase):
|
|
282
282
|
use_ephemeral_cache_only=use_ephemeral_cache_only
|
283
283
|
)
|
284
284
|
cache_result = used_cache_handler.hit_managed_cache(
|
285
|
-
model, messages, lm_config=lm_config, tools=tools
|
285
|
+
model, messages, lm_config=lm_config, tools=tools, reasoning_effort=reasoning_effort
|
286
286
|
)
|
287
287
|
if cache_result:
|
288
288
|
return cache_result
|
@@ -301,6 +301,6 @@ class GeminiAPI(VendorBase):
|
|
301
301
|
)
|
302
302
|
|
303
303
|
used_cache_handler.add_to_managed_cache(
|
304
|
-
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
304
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools, reasoning_effort=reasoning_effort
|
305
305
|
)
|
306
306
|
return lm_response
|
@@ -87,7 +87,7 @@ class OpenAIStandard(VendorBase):
|
|
87
87
|
messages = special_orion_transform(model, messages)
|
88
88
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
89
89
|
cache_result = used_cache_handler.hit_managed_cache(
|
90
|
-
model, messages, lm_config=lm_config, tools=tools
|
90
|
+
model, messages, lm_config=lm_config, tools=tools, reasoning_effort=reasoning_effort
|
91
91
|
)
|
92
92
|
if cache_result:
|
93
93
|
return cache_result
|
@@ -118,7 +118,6 @@ class OpenAIStandard(VendorBase):
|
|
118
118
|
|
119
119
|
# Add reasoning_effort only for o3-mini
|
120
120
|
if model in openai_reasoners:
|
121
|
-
print("Reasoning effort:", reasoning_effort)
|
122
121
|
api_params["reasoning_effort"] = reasoning_effort
|
123
122
|
|
124
123
|
output = await self.async_client.chat.completions.create(**api_params)
|
@@ -145,7 +144,7 @@ class OpenAIStandard(VendorBase):
|
|
145
144
|
tool_calls=tool_calls,
|
146
145
|
)
|
147
146
|
used_cache_handler.add_to_managed_cache(
|
148
|
-
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
147
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools, reasoning_effort=reasoning_effort
|
149
148
|
)
|
150
149
|
return lm_response
|
151
150
|
|
@@ -173,7 +172,7 @@ class OpenAIStandard(VendorBase):
|
|
173
172
|
use_ephemeral_cache_only=use_ephemeral_cache_only
|
174
173
|
)
|
175
174
|
cache_result = used_cache_handler.hit_managed_cache(
|
176
|
-
model, messages, lm_config=lm_config, tools=tools
|
175
|
+
model, messages, lm_config=lm_config, tools=tools, reasoning_effort=reasoning_effort
|
177
176
|
)
|
178
177
|
if cache_result:
|
179
178
|
return cache_result
|
@@ -230,7 +229,7 @@ class OpenAIStandard(VendorBase):
|
|
230
229
|
tool_calls=tool_calls,
|
231
230
|
)
|
232
231
|
used_cache_handler.add_to_managed_cache(
|
233
|
-
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
232
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools, reasoning_effort=reasoning_effort
|
234
233
|
)
|
235
234
|
return lm_response
|
236
235
|
|
@@ -248,7 +247,11 @@ class OpenAIStandard(VendorBase):
|
|
248
247
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
249
248
|
cache_result: Union[BaseLMResponse, None] = (
|
250
249
|
used_cache_handler.hit_managed_cache(
|
251
|
-
model,
|
250
|
+
model,
|
251
|
+
messages,
|
252
|
+
lm_config=lm_config,
|
253
|
+
tools=tools,
|
254
|
+
reasoning_effort=reasoning_effort,
|
252
255
|
)
|
253
256
|
)
|
254
257
|
if cache_result is not None:
|
@@ -291,7 +294,12 @@ class OpenAIStandard(VendorBase):
|
|
291
294
|
tool_calls=tool_calls,
|
292
295
|
)
|
293
296
|
used_cache_handler.add_to_managed_cache(
|
294
|
-
model,
|
297
|
+
model,
|
298
|
+
messages,
|
299
|
+
lm_config=lm_config,
|
300
|
+
output=lm_response,
|
301
|
+
tools=tools,
|
302
|
+
reasoning_effort=reasoning_effort,
|
295
303
|
)
|
296
304
|
return lm_response
|
297
305
|
|
@@ -309,7 +317,11 @@ class OpenAIStandard(VendorBase):
|
|
309
317
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
310
318
|
cache_result: Union[BaseLMResponse, None] = (
|
311
319
|
used_cache_handler.hit_managed_cache(
|
312
|
-
model,
|
320
|
+
model,
|
321
|
+
messages,
|
322
|
+
lm_config=lm_config,
|
323
|
+
tools=tools,
|
324
|
+
reasoning_effort=reasoning_effort,
|
313
325
|
)
|
314
326
|
)
|
315
327
|
if cache_result is not None:
|
@@ -352,6 +364,11 @@ class OpenAIStandard(VendorBase):
|
|
352
364
|
tool_calls=tool_calls,
|
353
365
|
)
|
354
366
|
used_cache_handler.add_to_managed_cache(
|
355
|
-
model,
|
367
|
+
model,
|
368
|
+
messages,
|
369
|
+
lm_config=lm_config,
|
370
|
+
output=lm_response,
|
371
|
+
tools=tools,
|
372
|
+
reasoning_effort=reasoning_effort,
|
356
373
|
)
|
357
374
|
return lm_response
|
@@ -24,7 +24,7 @@ synth_ai/zyk/lms/caching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
24
24
|
synth_ai/zyk/lms/caching/constants.py,sha256=fPi3x9p-yRdvixMSIyclvmwmwCRliXLXQjEm6dRnG8s,52
|
25
25
|
synth_ai/zyk/lms/caching/dbs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
26
|
synth_ai/zyk/lms/caching/ephemeral.py,sha256=pNMG5Rzzp2m0Ln1UYmWxz1qbXwq3iNIrhjYAS0yO3ZE,2370
|
27
|
-
synth_ai/zyk/lms/caching/handler.py,sha256=
|
27
|
+
synth_ai/zyk/lms/caching/handler.py,sha256=4h4Kywf0_-WohE1RxBt4cqPo-kHRjZv-2K50WWO91V4,5050
|
28
28
|
synth_ai/zyk/lms/caching/initialize.py,sha256=zZls6RKAax6Z-8oJInGaSg_RPN_fEZ6e_RCX64lMLJw,416
|
29
29
|
synth_ai/zyk/lms/caching/persistent.py,sha256=ZaY1A9qhvfNKzcAI9FnwbIrgMKvVeIfb_yCyl3M8dxE,2860
|
30
30
|
synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -43,11 +43,11 @@ synth_ai/zyk/lms/tools/base.py,sha256=j7wYb1xAvaAm3qVrINphgUhGS-UjZmRpbouseQYgh7
|
|
43
43
|
synth_ai/zyk/lms/vendors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
44
|
synth_ai/zyk/lms/vendors/base.py,sha256=aK4PEtkMLt_o3qD22kW-x3HJUEKdIk06zlH4kX0VkAE,760
|
45
45
|
synth_ai/zyk/lms/vendors/constants.py,sha256=3CCq45otD80yaLts5sFHvPgLCQNkcjHkc9cqOQ0zH4Y,320
|
46
|
-
synth_ai/zyk/lms/vendors/openai_standard.py,sha256=
|
46
|
+
synth_ai/zyk/lms/vendors/openai_standard.py,sha256=SOhm64gI65ZQD2VtvNxZz0dqK3hANEQ2WP6V0kSku20,12877
|
47
47
|
synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
|
48
48
|
synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
49
|
-
synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=
|
50
|
-
synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=
|
49
|
+
synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=cgSPFgR9YM8eO1if92fch6pVv0Dxk3xigSXExGSdu-4,15329
|
50
|
+
synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=gsWudX1RP2a4mjuyWZXT3LUek-UMUYhCfE5TMYxp0nA,11530
|
51
51
|
synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=-EMPBEIoYxxDMxukmcmKL8AGAHPNYe4w-76gsPtmrhk,11860
|
52
52
|
synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=GDCHIc0kpCnNPj2oW8RE3Cj2U_HcbXzzA5JV1ArAQlE,6600
|
53
53
|
synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -57,11 +57,11 @@ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR3
|
|
57
57
|
synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
|
58
58
|
synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
|
59
59
|
synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
|
60
|
-
synth_ai-0.1.0.
|
60
|
+
synth_ai-0.1.0.dev39.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
|
61
61
|
tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
|
62
62
|
tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
|
63
63
|
tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
|
64
|
-
synth_ai-0.1.0.
|
65
|
-
synth_ai-0.1.0.
|
66
|
-
synth_ai-0.1.0.
|
67
|
-
synth_ai-0.1.0.
|
64
|
+
synth_ai-0.1.0.dev39.dist-info/METADATA,sha256=dUv2K-LU-yTLqXf_wI1bfuLWwy1xLjsV3qnYvnkq830,2702
|
65
|
+
synth_ai-0.1.0.dev39.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
66
|
+
synth_ai-0.1.0.dev39.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
|
67
|
+
synth_ai-0.1.0.dev39.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|