synth-ai 0.1.0.dev50__py3-none-any.whl → 0.1.0.dev52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. synth_ai/__init__.py +1 -1
  2. synth_ai/zyk/__init__.py +3 -0
  3. synth_ai/zyk/lms/__init__.py +0 -0
  4. synth_ai/zyk/lms/caching/__init__.py +0 -0
  5. synth_ai/zyk/lms/caching/constants.py +1 -0
  6. synth_ai/zyk/lms/caching/dbs.py +0 -0
  7. synth_ai/zyk/lms/caching/ephemeral.py +72 -0
  8. synth_ai/zyk/lms/caching/handler.py +137 -0
  9. synth_ai/zyk/lms/caching/initialize.py +13 -0
  10. synth_ai/zyk/lms/caching/persistent.py +83 -0
  11. synth_ai/zyk/lms/config.py +10 -0
  12. synth_ai/zyk/lms/constants.py +22 -0
  13. synth_ai/zyk/lms/core/__init__.py +0 -0
  14. synth_ai/zyk/lms/core/all.py +47 -0
  15. synth_ai/zyk/lms/core/exceptions.py +9 -0
  16. synth_ai/zyk/lms/core/main.py +268 -0
  17. synth_ai/zyk/lms/core/vendor_clients.py +85 -0
  18. synth_ai/zyk/lms/cost/__init__.py +0 -0
  19. synth_ai/zyk/lms/cost/monitor.py +1 -0
  20. synth_ai/zyk/lms/cost/statefulness.py +1 -0
  21. synth_ai/zyk/lms/structured_outputs/__init__.py +0 -0
  22. synth_ai/zyk/lms/structured_outputs/handler.py +441 -0
  23. synth_ai/zyk/lms/structured_outputs/inject.py +314 -0
  24. synth_ai/zyk/lms/structured_outputs/rehabilitate.py +187 -0
  25. synth_ai/zyk/lms/tools/base.py +118 -0
  26. synth_ai/zyk/lms/vendors/__init__.py +0 -0
  27. synth_ai/zyk/lms/vendors/base.py +31 -0
  28. synth_ai/zyk/lms/vendors/core/__init__.py +0 -0
  29. synth_ai/zyk/lms/vendors/core/anthropic_api.py +365 -0
  30. synth_ai/zyk/lms/vendors/core/gemini_api.py +282 -0
  31. synth_ai/zyk/lms/vendors/core/mistral_api.py +331 -0
  32. synth_ai/zyk/lms/vendors/core/openai_api.py +187 -0
  33. synth_ai/zyk/lms/vendors/local/__init__.py +0 -0
  34. synth_ai/zyk/lms/vendors/local/ollama.py +0 -0
  35. synth_ai/zyk/lms/vendors/openai_standard.py +345 -0
  36. synth_ai/zyk/lms/vendors/retries.py +3 -0
  37. synth_ai/zyk/lms/vendors/supported/__init__.py +0 -0
  38. synth_ai/zyk/lms/vendors/supported/deepseek.py +73 -0
  39. synth_ai/zyk/lms/vendors/supported/groq.py +16 -0
  40. synth_ai/zyk/lms/vendors/supported/ollama.py +14 -0
  41. synth_ai/zyk/lms/vendors/supported/together.py +11 -0
  42. {synth_ai-0.1.0.dev50.dist-info → synth_ai-0.1.0.dev52.dist-info}/METADATA +2 -1
  43. synth_ai-0.1.0.dev52.dist-info/RECORD +46 -0
  44. synth_ai-0.1.0.dev50.dist-info/RECORD +0 -6
  45. {synth_ai-0.1.0.dev50.dist-info → synth_ai-0.1.0.dev52.dist-info}/WHEEL +0 -0
  46. {synth_ai-0.1.0.dev50.dist-info → synth_ai-0.1.0.dev52.dist-info}/licenses/LICENSE +0 -0
  47. {synth_ai-0.1.0.dev50.dist-info → synth_ai-0.1.0.dev52.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,345 @@
1
+ from typing import Any, Dict, List, Optional, Union
2
+
3
+ import groq
4
+ import openai
5
+ import pydantic_core
6
+ from pydantic import BaseModel
7
+
8
+ from synth_ai.zyk.lms.caching.initialize import (
9
+ get_cache_handler,
10
+ )
11
+ from synth_ai.zyk.lms.tools.base import BaseTool
12
+ from synth_ai.zyk.lms.vendors.base import BaseLMResponse, VendorBase
13
+ from synth_ai.zyk.lms.constants import SPECIAL_BASE_TEMPS
14
+
15
+ DEFAULT_EXCEPTIONS_TO_RETRY = (
16
+ pydantic_core._pydantic_core.ValidationError,
17
+ openai.APIConnectionError,
18
+ openai.APITimeoutError,
19
+ groq.InternalServerError,
20
+ groq.APITimeoutError,
21
+ groq.APIConnectionError,
22
+ )
23
+
24
+
25
+ def special_orion_transform(
26
+ model: str, messages: List[Dict[str, Any]]
27
+ ) -> List[Dict[str, Any]]:
28
+ if "o1-" in model:
29
+ messages = [
30
+ {
31
+ "role": "user",
32
+ "content": f"<instructions>{messages[0]['content']}</instructions><information>{messages[1]}</information>",
33
+ }
34
+ ]
35
+ return messages
36
+
37
+
38
+ def on_backoff_handler_async(details):
39
+ # Print every 5th retry attempt, excluding the first retry
40
+ if details["tries"] > 1 and (details["tries"] - 1) % 5 == 0:
41
+ print(f"Retrying async API call (attempt {details['tries'] - 1})")
42
+
43
+
44
+ def on_backoff_handler_sync(details):
45
+ # Print every 5th retry attempt, excluding the first retry
46
+ if details["tries"] > 1 and (details["tries"] - 1) % 5 == 0:
47
+ print(f"Retrying sync API call (attempt {details['tries'] - 1})")
48
+
49
+
50
+ class OpenAIStandard(VendorBase):
51
+ used_for_structured_outputs: bool = True
52
+ exceptions_to_retry: List = DEFAULT_EXCEPTIONS_TO_RETRY
53
+ sync_client: Any
54
+ async_client: Any
55
+
56
+ def __init__(
57
+ self,
58
+ sync_client: Any,
59
+ async_client: Any,
60
+ exceptions_to_retry: List[Exception] = DEFAULT_EXCEPTIONS_TO_RETRY,
61
+ used_for_structured_outputs: bool = False,
62
+ ):
63
+ self.sync_client = sync_client
64
+ self.async_client = async_client
65
+ self.used_for_structured_outputs = used_for_structured_outputs
66
+ self.exceptions_to_retry = exceptions_to_retry
67
+
68
+ # @backoff.on_exception(
69
+ # backoff.expo,
70
+ # exceptions_to_retry,
71
+ # max_tries=BACKOFF_TOLERANCE,
72
+ # on_backoff=on_backoff_handler_async,
73
+ # on_giveup=lambda e: print(e),
74
+ # )
75
+ async def _hit_api_async(
76
+ self,
77
+ model: str,
78
+ messages: List[Dict[str, Any]],
79
+ lm_config: Dict[str, Any],
80
+ use_ephemeral_cache_only: bool = False,
81
+ reasoning_effort: str = "high",
82
+ tools: Optional[List[BaseTool]] = None,
83
+ ) -> BaseLMResponse:
84
+ assert (
85
+ lm_config.get("response_model", None) is None
86
+ ), "response_model is not supported for standard calls"
87
+ messages = special_orion_transform(model, messages)
88
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
89
+ lm_config["reasoning_effort"] = reasoning_effort
90
+ cache_result = used_cache_handler.hit_managed_cache(
91
+ model, messages, lm_config=lm_config, tools=tools
92
+ )
93
+ if cache_result:
94
+ return cache_result
95
+
96
+ # Common API call params
97
+ api_params = {
98
+ "model": model,
99
+ "messages": messages,
100
+ }
101
+
102
+ # Add tools if provided
103
+ if tools and all(isinstance(tool, BaseTool) for tool in tools):
104
+ api_params["tools"] = [tool.to_openai_tool() for tool in tools]
105
+ elif tools:
106
+ api_params["tools"] = tools
107
+
108
+ # Only add temperature for non o1/o3 models
109
+ if not any(prefix in model for prefix in ["o1-", "o3-"]):
110
+ api_params["temperature"] = lm_config.get(
111
+ "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
112
+ )
113
+
114
+ # Add reasoning_effort only for o3-mini
115
+ if model in ["o3-mini"]:
116
+ print("Reasoning effort:", reasoning_effort)
117
+ api_params["reasoning_effort"] = reasoning_effort
118
+
119
+ output = await self.async_client.chat.completions.create(**api_params)
120
+ message = output.choices[0].message
121
+
122
+ # Convert tool calls to dict format
123
+ tool_calls = None
124
+ if message.tool_calls:
125
+ tool_calls = [
126
+ {
127
+ "id": tc.id,
128
+ "type": tc.type,
129
+ "function": {
130
+ "name": tc.function.name,
131
+ "arguments": tc.function.arguments,
132
+ },
133
+ }
134
+ for tc in message.tool_calls
135
+ ]
136
+
137
+ lm_response = BaseLMResponse(
138
+ raw_response=message.content or "", # Use empty string if no content
139
+ structured_output=None,
140
+ tool_calls=tool_calls,
141
+ )
142
+ lm_config["reasoning_effort"] = reasoning_effort
143
+ used_cache_handler.add_to_managed_cache(
144
+ model, messages, lm_config=lm_config, output=lm_response, tools=tools
145
+ )
146
+ return lm_response
147
+
148
+ # @backoff.on_exception(
149
+ # backoff.expo,
150
+ # exceptions_to_retry,
151
+ # max_tries=BACKOFF_TOLERANCE,
152
+ # on_backoff=on_backoff_handler_sync,
153
+ # on_giveup=lambda e: print(e),
154
+ # )
155
+ def _hit_api_sync(
156
+ self,
157
+ model: str,
158
+ messages: List[Dict[str, Any]],
159
+ lm_config: Dict[str, Any],
160
+ use_ephemeral_cache_only: bool = False,
161
+ reasoning_effort: str = "high",
162
+ tools: Optional[List[BaseTool]] = None,
163
+ ) -> BaseLMResponse:
164
+ assert (
165
+ lm_config.get("response_model", None) is None
166
+ ), "response_model is not supported for standard calls"
167
+ messages = special_orion_transform(model, messages)
168
+ used_cache_handler = get_cache_handler(
169
+ use_ephemeral_cache_only=use_ephemeral_cache_only
170
+ )
171
+ lm_config["reasoning_effort"] = reasoning_effort
172
+ cache_result = used_cache_handler.hit_managed_cache(
173
+ model, messages, lm_config=lm_config, tools=tools
174
+ )
175
+ if cache_result:
176
+ return cache_result
177
+
178
+ # Common API call params
179
+ api_params = {
180
+ "model": model,
181
+ "messages": messages,
182
+ }
183
+
184
+ # Add tools if provided
185
+ if tools and all(isinstance(tool, BaseTool) for tool in tools):
186
+ api_params["tools"] = [tool.to_openai_tool() for tool in tools]
187
+ elif tools:
188
+ api_params["tools"] = tools
189
+
190
+ # Only add temperature for non o1/o3 models
191
+ if not any(prefix in model for prefix in ["o1-", "o3-"]):
192
+ api_params["temperature"] = lm_config.get(
193
+ "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
194
+ )
195
+
196
+ # Add reasoning_effort only for o3-mini
197
+ if model in ["o3-mini"]:
198
+ api_params["reasoning_effort"] = reasoning_effort
199
+
200
+ output = self.sync_client.chat.completions.create(**api_params)
201
+ message = output.choices[0].message
202
+
203
+ # Convert tool calls to dict format
204
+ tool_calls = None
205
+ if message.tool_calls:
206
+ tool_calls = [
207
+ {
208
+ "id": tc.id,
209
+ "type": tc.type,
210
+ "function": {
211
+ "name": tc.function.name,
212
+ "arguments": tc.function.arguments,
213
+ },
214
+ }
215
+ for tc in message.tool_calls
216
+ ]
217
+
218
+ lm_response = BaseLMResponse(
219
+ raw_response=message.content or "", # Use empty string if no content
220
+ structured_output=None,
221
+ tool_calls=tool_calls,
222
+ )
223
+ lm_config["reasoning_effort"] = reasoning_effort
224
+ used_cache_handler.add_to_managed_cache(
225
+ model, messages, lm_config=lm_config, output=lm_response, tools=tools
226
+ )
227
+ return lm_response
228
+
229
+ async def _hit_api_async_structured_output(
230
+ self,
231
+ model: str,
232
+ messages: List[Dict[str, Any]],
233
+ response_model: BaseModel,
234
+ temperature: float,
235
+ use_ephemeral_cache_only: bool = False,
236
+ reasoning_effort: str = "high",
237
+ tools: Optional[List[BaseTool]] = None,
238
+ ) -> BaseLMResponse:
239
+ lm_config = {"temperature": temperature, "response_model": response_model, "reasoning_effort": reasoning_effort}
240
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
241
+ cache_result: Union[BaseLMResponse, None] = (
242
+ used_cache_handler.hit_managed_cache(
243
+ model, messages, lm_config=lm_config, tools=tools
244
+ )
245
+ )
246
+ if cache_result is not None:
247
+ return cache_result
248
+
249
+ # Common API call params
250
+ api_params = {
251
+ "model": model,
252
+ "messages": messages,
253
+ }
254
+
255
+ # Add tools if provided
256
+ if tools and all(isinstance(tool, BaseTool) for tool in tools):
257
+ api_params["tools"] = [tool.to_openai_tool() for tool in tools]
258
+ elif tools:
259
+ api_params["tools"] = tools
260
+
261
+ # Only add temperature for non o1/o3 models
262
+ if not any(prefix in model for prefix in ["o1-", "o3-"]):
263
+ api_params["temperature"] = lm_config.get(
264
+ "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
265
+ )
266
+
267
+ # Add reasoning_effort only for o3-mini
268
+ if model in ["o3-mini"]:
269
+ api_params["reasoning_effort"] = reasoning_effort
270
+
271
+ output = await self.async_client.chat.completions.create(**api_params)
272
+
273
+ structured_output_api_result = response_model(
274
+ **output.choices[0].message.content
275
+ )
276
+ tool_calls = output.choices[0].message.tool_calls
277
+ lm_response = BaseLMResponse(
278
+ raw_response=output.choices[0].message.content,
279
+ structured_output=structured_output_api_result,
280
+ tool_calls=tool_calls,
281
+ )
282
+ lm_config["reasoning_effort"] = reasoning_effort
283
+ used_cache_handler.add_to_managed_cache(
284
+ model, messages, lm_config=lm_config, output=lm_response, tools=tools
285
+ )
286
+ return lm_response
287
+
288
+ def _hit_api_sync_structured_output(
289
+ self,
290
+ model: str,
291
+ messages: List[Dict[str, Any]],
292
+ response_model: BaseModel,
293
+ temperature: float,
294
+ use_ephemeral_cache_only: bool = False,
295
+ reasoning_effort: str = "high",
296
+ tools: Optional[List[BaseTool]] = None,
297
+ ) -> BaseLMResponse:
298
+ lm_config = {"temperature": temperature, "response_model": response_model, "reasoning_effort": reasoning_effort}
299
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
300
+ cache_result: Union[BaseLMResponse, None] = (
301
+ used_cache_handler.hit_managed_cache(
302
+ model, messages, lm_config=lm_config, tools=tools
303
+ )
304
+ )
305
+ if cache_result is not None:
306
+ return cache_result
307
+
308
+ # Common API call params
309
+ api_params = {
310
+ "model": model,
311
+ "messages": messages,
312
+ }
313
+
314
+ # Add tools if provided
315
+ if tools and all(isinstance(tool, BaseTool) for tool in tools):
316
+ api_params["tools"] = [tool.to_openai_tool() for tool in tools]
317
+ elif tools:
318
+ api_params["tools"] = tools
319
+
320
+ # Only add temperature for non o1/o3 models
321
+ if not any(prefix in model for prefix in ["o1-", "o3-"]):
322
+ api_params["temperature"] = lm_config.get(
323
+ "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
324
+ )
325
+
326
+ # Add reasoning_effort only for o3-mini
327
+ if model in ["o3-mini"]:
328
+ api_params["reasoning_effort"] = reasoning_effort
329
+
330
+ output = self.sync_client.chat.completions.create(**api_params)
331
+
332
+ structured_output_api_result = response_model(
333
+ **output.choices[0].message.content
334
+ )
335
+ tool_calls = output.choices[0].message.tool_calls
336
+ lm_response = BaseLMResponse(
337
+ raw_response=output.choices[0].message.content,
338
+ structured_output=structured_output_api_result,
339
+ tool_calls=tool_calls,
340
+ )
341
+ lm_config["reasoning_effort"] = reasoning_effort
342
+ used_cache_handler.add_to_managed_cache(
343
+ model, messages, lm_config=lm_config, output=lm_response, tools=tools
344
+ )
345
+ return lm_response
@@ -0,0 +1,3 @@
1
+ import backoff
2
+
3
+ BACKOFF_TOLERANCE = 20
File without changes
@@ -0,0 +1,73 @@
1
+ import os
2
+ from typing import Any, Dict, List, Optional, Tuple
3
+
4
+ from openai import AsyncOpenAI, OpenAI
5
+
6
+ from synth_ai.zyk.lms.tools.base import BaseTool
7
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
8
+
9
+
10
+ class DeepSeekAPI(OpenAIStandard):
11
+ def __init__(self):
12
+ # print("Setting up DeepSeek API")
13
+ self.sync_client = OpenAI(
14
+ api_key=os.environ.get("DEEPSEEK_API_KEY"),
15
+ base_url="https://api.deepseek.com",
16
+ )
17
+ self.async_client = AsyncOpenAI(
18
+ api_key=os.environ.get("DEEPSEEK_API_KEY"),
19
+ base_url="https://api.deepseek.com",
20
+ )
21
+
22
+ def _convert_tools_to_openai_format(self, tools: List[BaseTool]) -> List[Dict]:
23
+ return [tool.to_openai_tool() for tool in tools]
24
+
25
+ async def _private_request_async(
26
+ self,
27
+ messages: List[Dict],
28
+ temperature: float = 0,
29
+ model_name: str = "deepseek-chat",
30
+ reasoning_effort: str = "high",
31
+ tools: Optional[List[BaseTool]] = None,
32
+ lm_config: Optional[Dict[str, Any]] = None,
33
+ ) -> Tuple[str, Optional[List[Dict]]]:
34
+ request_params = {
35
+ "model": model_name,
36
+ "messages": messages,
37
+ "temperature": temperature,
38
+ }
39
+
40
+ if tools:
41
+ request_params["tools"] = self._convert_tools_to_openai_format(tools)
42
+
43
+ response = await self.async_client.chat.completions.create(**request_params)
44
+ message = response.choices[0].message
45
+
46
+ return message.content, message.tool_calls if hasattr(
47
+ message, "tool_calls"
48
+ ) else None
49
+
50
+ def _private_request_sync(
51
+ self,
52
+ messages: List[Dict],
53
+ temperature: float = 0,
54
+ model_name: str = "deepseek-chat",
55
+ reasoning_effort: str = "high",
56
+ tools: Optional[List[BaseTool]] = None,
57
+ lm_config: Optional[Dict[str, Any]] = None,
58
+ ) -> Tuple[str, Optional[List[Dict]]]:
59
+ request_params = {
60
+ "model": model_name,
61
+ "messages": messages,
62
+ "temperature": temperature,
63
+ }
64
+
65
+ if tools:
66
+ request_params["tools"] = self._convert_tools_to_openai_format(tools)
67
+
68
+ response = self.sync_client.chat.completions.create(**request_params)
69
+ message = response.choices[0].message
70
+
71
+ return message.content, message.tool_calls if hasattr(
72
+ message, "tool_calls"
73
+ ) else None
@@ -0,0 +1,16 @@
1
+ import os
2
+
3
+ from dotenv import load_dotenv
4
+ from groq import AsyncGroq, Groq
5
+
6
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
7
+
8
+ load_dotenv()
9
+
10
+
11
+ class GroqAPI(OpenAIStandard):
12
+ def __init__(self):
13
+ super().__init__(
14
+ sync_client=Groq(api_key=os.getenv("GROQ_API_KEY")),
15
+ async_client=AsyncGroq(api_key=os.getenv("GROQ_API_KEY")),
16
+ )
@@ -0,0 +1,14 @@
1
+ from openai import OpenAI, AsyncOpenAI
2
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
3
+
4
+
5
+ class OllamaAPI(OpenAIStandard):
6
+ def __init__(self):
7
+ self.sync_client = OpenAI(
8
+ base_url="http://localhost:11434/v1",
9
+ api_key="ollama", # required, but unused
10
+ )
11
+ self.async_client = AsyncOpenAI(
12
+ base_url="http://localhost:11434/v1",
13
+ api_key="ollama", # required, but unused
14
+ )
@@ -0,0 +1,11 @@
1
+ import os
2
+
3
+ from together import AsyncTogether, Together
4
+
5
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
6
+
7
+
8
+ class TogetherAPI(OpenAIStandard):
9
+ def __init__(self):
10
+ self.sync_client = Together(api_key=os.getenv("TOGETHER_API_KEY"))
11
+ self.async_client = AsyncTogether(api_key=os.getenv("TOGETHER_API_KEY"))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev50
3
+ Version: 0.1.0.dev52
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Home-page: https://github.com/synth-laboratories/synth-ai
6
6
  Author: Josh Purtell
@@ -43,6 +43,7 @@ Requires-Dist: anthropic>=0.34.2
43
43
  Requires-Dist: google>=3.0.0
44
44
  Requires-Dist: google-api-core
45
45
  Requires-Dist: google-generativeai
46
+ Requires-Dist: google-genai
46
47
  Requires-Dist: together>=1.2.12
47
48
  Requires-Dist: langfuse>=2.56.1
48
49
  Requires-Dist: datasets>=3.2.0
@@ -0,0 +1,46 @@
1
+ synth_ai/__init__.py,sha256=dflUvGJ59nrEo81cf7GqUA4ExYbjePhQShSLsr1B0qE,325
2
+ synth_ai/zyk/__init__.py,sha256=kGMD-drlBVdsyT-QFODMwaZUtxPCJ9mg58GKQUvFqo0,134
3
+ synth_ai/zyk/lms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ synth_ai/zyk/lms/config.py,sha256=UBMi0DIFQDBV_eGPK5vG8R7VwxXcV10BPGq1iV8vVjg,282
5
+ synth_ai/zyk/lms/constants.py,sha256=AIZZBGagbcEATK4zOBIC_jkSguSc41z-EfwHGh-xyuU,656
6
+ synth_ai/zyk/lms/caching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ synth_ai/zyk/lms/caching/constants.py,sha256=fPi3x9p-yRdvixMSIyclvmwmwCRliXLXQjEm6dRnG8s,52
8
+ synth_ai/zyk/lms/caching/dbs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ synth_ai/zyk/lms/caching/ephemeral.py,sha256=pNMG5Rzzp2m0Ln1UYmWxz1qbXwq3iNIrhjYAS0yO3ZE,2370
10
+ synth_ai/zyk/lms/caching/handler.py,sha256=DGBxc6Xw1KGCncI5e_lVRgLC-S7qQ5bSTJMifFTZSc4,4914
11
+ synth_ai/zyk/lms/caching/initialize.py,sha256=zZls6RKAax6Z-8oJInGaSg_RPN_fEZ6e_RCX64lMLJw,416
12
+ synth_ai/zyk/lms/caching/persistent.py,sha256=ZaY1A9qhvfNKzcAI9FnwbIrgMKvVeIfb_yCyl3M8dxE,2860
13
+ synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ synth_ai/zyk/lms/core/all.py,sha256=wakK0HhvYRuaQZmxClURyNf3vUkTbm3OABw3TgpMjOQ,1185
15
+ synth_ai/zyk/lms/core/exceptions.py,sha256=K0BVdAzxVIchsvYZAaHEH1GAWBZvpxhFi-SPcJOjyPQ,205
16
+ synth_ai/zyk/lms/core/main.py,sha256=pLz7COTdvDWQivYaA1iYYF2onUOosD_sFaPJG48bdKM,10598
17
+ synth_ai/zyk/lms/core/vendor_clients.py,sha256=1h428pLiP6VeHvLOYRCtxfmuwJceYp30jPF5Fr48JtI,2783
18
+ synth_ai/zyk/lms/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ synth_ai/zyk/lms/cost/monitor.py,sha256=cSKIvw6WdPZIRubADWxQoh1MdB40T8-jjgfNUeUHIn0,5
20
+ synth_ai/zyk/lms/cost/statefulness.py,sha256=TOsuXL8IjtKOYJ2aJQF8TwJVqn_wQ7AIwJJmdhMye7U,36
21
+ synth_ai/zyk/lms/structured_outputs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ synth_ai/zyk/lms/structured_outputs/handler.py,sha256=TKN38zHFdVnAJoYqUgaXuUzTVFj9XBBJHx45whGjVeE,16918
23
+ synth_ai/zyk/lms/structured_outputs/inject.py,sha256=Fy-zDeleRxOZ8ZRM6IuZ6CP2XZnMe4K2PEn4Q9c_KPY,11777
24
+ synth_ai/zyk/lms/structured_outputs/rehabilitate.py,sha256=ecKGWrgWYUSplqHzK40KdohwaN8gBV0xl4LUReLN_vg,7910
25
+ synth_ai/zyk/lms/tools/base.py,sha256=i-AIVRlitiQ4JMJ_BBFRSpUcWgxWIUYoHxAqfxHN_7E,4056
26
+ synth_ai/zyk/lms/vendors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
+ synth_ai/zyk/lms/vendors/base.py,sha256=aK4PEtkMLt_o3qD22kW-x3HJUEKdIk06zlH4kX0VkAE,760
28
+ synth_ai/zyk/lms/vendors/openai_standard.py,sha256=dgHC7RWrxwaWto6_frKdfEKazKvvAMYJM1YgJgbVpb8,12279
29
+ synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
30
+ synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
+ synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=AdlKHK4jiOxb3QxS7my7EfmGCuugPljvGNN3JBkiDfQ,13689
32
+ synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=LER31s5iQCrsHdi_Y8GFK5XBini-xwcsXt3xLUyCsJg,10947
33
+ synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=eoEaxiMuKQEY0K4rGHA2_ZG6sBnzEm-8hBWO_JqW96M,12080
34
+ synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=O5KbRpy0pDDofVjxgZdeU69ueUl6S1DAuKykcH3gThg,6784
35
+ synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
+ synth_ai/zyk/lms/vendors/local/ollama.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
+ synth_ai/zyk/lms/vendors/supported/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
+ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR36rSNXK5LldeHJkQrc,2430
39
+ synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
40
+ synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
41
+ synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
42
+ synth_ai-0.1.0.dev52.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
43
+ synth_ai-0.1.0.dev52.dist-info/METADATA,sha256=A_WiPC2LlKIMWt11Ma5mQiK19q00dWePd5GQUPTcIpE,2759
44
+ synth_ai-0.1.0.dev52.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
45
+ synth_ai-0.1.0.dev52.dist-info/top_level.txt,sha256=fBmtZyVHuKaGa29oHBaaUkrUIWTqSpoVMPiVdCDP3k8,9
46
+ synth_ai-0.1.0.dev52.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- synth_ai/__init__.py,sha256=6e_S1pzQpTcURfvJX9Vyd2ZBlHd98LhseeYUlFptN3s,317
2
- synth_ai-0.1.0.dev50.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
3
- synth_ai-0.1.0.dev50.dist-info/METADATA,sha256=RBepPnjBxtIJkkaBarnZ9Zx8RG6w8drL5YnsUeKRca0,2731
4
- synth_ai-0.1.0.dev50.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
5
- synth_ai-0.1.0.dev50.dist-info/top_level.txt,sha256=fBmtZyVHuKaGa29oHBaaUkrUIWTqSpoVMPiVdCDP3k8,9
6
- synth_ai-0.1.0.dev50.dist-info/RECORD,,