synth-ai 0.1.0.dev4__py3-none-any.whl → 0.1.0.dev6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (46) hide show
  1. public_tests/synth_sdk.py +389 -0
  2. public_tests/test_agent.py +538 -0
  3. public_tests/test_recursive_structured_outputs.py +180 -0
  4. public_tests/test_structured_outputs.py +100 -0
  5. synth_ai/zyk/lms/__init__.py +0 -0
  6. synth_ai/zyk/lms/caching/__init__.py +0 -0
  7. synth_ai/zyk/lms/caching/constants.py +1 -0
  8. synth_ai/zyk/lms/caching/dbs.py +0 -0
  9. synth_ai/zyk/lms/caching/ephemeral.py +50 -0
  10. synth_ai/zyk/lms/caching/handler.py +92 -0
  11. synth_ai/zyk/lms/caching/initialize.py +13 -0
  12. synth_ai/zyk/lms/caching/persistent.py +55 -0
  13. synth_ai/zyk/lms/config.py +8 -0
  14. synth_ai/zyk/lms/core/__init__.py +0 -0
  15. synth_ai/zyk/lms/core/all.py +35 -0
  16. synth_ai/zyk/lms/core/exceptions.py +9 -0
  17. synth_ai/zyk/lms/core/main.py +245 -0
  18. synth_ai/zyk/lms/core/vendor_clients.py +60 -0
  19. synth_ai/zyk/lms/cost/__init__.py +0 -0
  20. synth_ai/zyk/lms/cost/monitor.py +1 -0
  21. synth_ai/zyk/lms/cost/statefulness.py +1 -0
  22. synth_ai/zyk/lms/structured_outputs/__init__.py +0 -0
  23. synth_ai/zyk/lms/structured_outputs/handler.py +388 -0
  24. synth_ai/zyk/lms/structured_outputs/inject.py +185 -0
  25. synth_ai/zyk/lms/structured_outputs/rehabilitate.py +186 -0
  26. synth_ai/zyk/lms/vendors/__init__.py +0 -0
  27. synth_ai/zyk/lms/vendors/base.py +15 -0
  28. synth_ai/zyk/lms/vendors/constants.py +5 -0
  29. synth_ai/zyk/lms/vendors/core/__init__.py +0 -0
  30. synth_ai/zyk/lms/vendors/core/anthropic_api.py +191 -0
  31. synth_ai/zyk/lms/vendors/core/gemini_api.py +146 -0
  32. synth_ai/zyk/lms/vendors/core/openai_api.py +145 -0
  33. synth_ai/zyk/lms/vendors/local/__init__.py +0 -0
  34. synth_ai/zyk/lms/vendors/local/ollama.py +0 -0
  35. synth_ai/zyk/lms/vendors/openai_standard.py +141 -0
  36. synth_ai/zyk/lms/vendors/retries.py +3 -0
  37. synth_ai/zyk/lms/vendors/supported/__init__.py +0 -0
  38. synth_ai/zyk/lms/vendors/supported/deepseek.py +18 -0
  39. synth_ai/zyk/lms/vendors/supported/together.py +11 -0
  40. {synth_ai-0.1.0.dev4.dist-info → synth_ai-0.1.0.dev6.dist-info}/METADATA +1 -1
  41. synth_ai-0.1.0.dev6.dist-info/RECORD +46 -0
  42. synth_ai-0.1.0.dev6.dist-info/top_level.txt +2 -0
  43. synth_ai-0.1.0.dev4.dist-info/RECORD +0 -7
  44. synth_ai-0.1.0.dev4.dist-info/top_level.txt +0 -1
  45. {synth_ai-0.1.0.dev4.dist-info → synth_ai-0.1.0.dev6.dist-info}/LICENSE +0 -0
  46. {synth_ai-0.1.0.dev4.dist-info → synth_ai-0.1.0.dev6.dist-info}/WHEEL +0 -0
@@ -0,0 +1,146 @@
1
+ import logging
2
+ import os
3
+ import warnings
4
+ from typing import Any, Dict, List, Tuple, Type
5
+
6
+ import google.generativeai as genai
7
+ from google.api_core.exceptions import ResourceExhausted
8
+ from google.generativeai.types import HarmBlockThreshold, HarmCategory
9
+
10
+ from synth_ai.zyk.lms.caching.initialize import (
11
+ get_cache_handler,
12
+ )
13
+ from synth_ai.zyk.lms.vendors.base import VendorBase
14
+ from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
15
+ from synth_ai.zyk.lms.vendors.retries import BACKOFF_TOLERANCE, backoff
16
+
17
+ GEMINI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (ResourceExhausted,)
18
+ logging.getLogger("google.generativeai").setLevel(logging.ERROR)
19
+ os.environ["GRPC_VERBOSITY"] = "ERROR"
20
+ # Suppress TensorFlow logging
21
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
22
+ warnings.filterwarnings("ignore")
23
+
24
+ SAFETY_SETTINGS = {
25
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
26
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
27
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
28
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
29
+ }
30
+
31
+
32
+ class GeminiAPI(VendorBase):
33
+ used_for_structured_outputs: bool = True
34
+ exceptions_to_retry: Tuple[Type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY
35
+
36
+ def __init__(
37
+ self,
38
+ exceptions_to_retry: Tuple[Type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY,
39
+ used_for_structured_outputs: bool = False,
40
+ ):
41
+ self.used_for_structured_outputs = used_for_structured_outputs
42
+ self.exceptions_to_retry = exceptions_to_retry
43
+
44
+ async def _private_request_async(
45
+ self,
46
+ messages: List[Dict],
47
+ temperature: float = 0,
48
+ model_name: str = "gemini-1.5-flash",
49
+ ) -> str:
50
+ code_generation_model = genai.GenerativeModel(
51
+ model_name=model_name,
52
+ generation_config={"temperature": temperature},
53
+ system_instruction=messages[0]["content"],
54
+ )
55
+ result = await code_generation_model.generate_content_async(
56
+ messages[1]["content"],
57
+ safety_settings=SAFETY_SETTINGS,
58
+ )
59
+ return result.text
60
+
61
+ def _private_request_sync(
62
+ self,
63
+ messages: List[Dict],
64
+ temperature: float = 0,
65
+ model_name: str = "gemini-1.5-flash",
66
+ ) -> str:
67
+ code_generation_model = genai.GenerativeModel(
68
+ model_name=model_name,
69
+ generation_config={"temperature": temperature},
70
+ system_instruction=messages[0]["content"],
71
+ )
72
+ result = code_generation_model.generate_content(
73
+ messages[1]["content"],
74
+ safety_settings=SAFETY_SETTINGS,
75
+ )
76
+ return result.text
77
+
78
+ @backoff.on_exception(
79
+ backoff.expo,
80
+ exceptions_to_retry,
81
+ max_tries=BACKOFF_TOLERANCE,
82
+ on_giveup=lambda e: print(e),
83
+ )
84
+ async def _hit_api_async(
85
+ self,
86
+ model: str,
87
+ messages: List[Dict[str, Any]],
88
+ lm_config: Dict[str, Any],
89
+ use_ephemeral_cache_only: bool = False,
90
+ ) -> str:
91
+ assert (
92
+ lm_config.get("response_model", None) is None
93
+ ), "response_model is not supported for standard calls"
94
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
95
+ cache_result = used_cache_handler.hit_managed_cache(
96
+ model, messages, lm_config=lm_config
97
+ )
98
+ if cache_result:
99
+ return (
100
+ cache_result["response"]
101
+ if isinstance(cache_result, dict)
102
+ else cache_result
103
+ )
104
+ api_result = await self._private_request_async(
105
+ messages,
106
+ temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
107
+ )
108
+ used_cache_handler.add_to_managed_cache(
109
+ model, messages, lm_config=lm_config, output=api_result
110
+ )
111
+ return api_result
112
+
113
+ @backoff.on_exception(
114
+ backoff.expo,
115
+ exceptions_to_retry,
116
+ max_tries=BACKOFF_TOLERANCE,
117
+ on_giveup=lambda e: print(e),
118
+ )
119
+ def _hit_api_sync(
120
+ self,
121
+ model: str,
122
+ messages: List[Dict[str, Any]],
123
+ lm_config: Dict[str, Any],
124
+ use_ephemeral_cache_only: bool = False,
125
+ ) -> str:
126
+ assert (
127
+ lm_config.get("response_model", None) is None
128
+ ), "response_model is not supported for standard calls"
129
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
130
+ cache_result = used_cache_handler.hit_managed_cache(
131
+ model, messages, lm_config=lm_config
132
+ )
133
+ if cache_result:
134
+ return (
135
+ cache_result["response"]
136
+ if isinstance(cache_result, dict)
137
+ else cache_result
138
+ )
139
+ api_result = self._private_request_sync(
140
+ messages,
141
+ temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
142
+ )
143
+ used_cache_handler.add_to_managed_cache(
144
+ model, messages, lm_config=lm_config, output=api_result
145
+ )
146
+ return api_result
@@ -0,0 +1,145 @@
1
+ import json
2
+ from typing import Any, Dict, List, Tuple, Type
3
+
4
+ import openai
5
+ import pydantic_core
6
+
7
+ # from openai import AsyncOpenAI, OpenAI
8
+ from pydantic import BaseModel
9
+
10
+ from synth_ai.zyk.lms.caching.initialize import get_cache_handler
11
+ from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
12
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
13
+
14
+ OPENAI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (
15
+ pydantic_core._pydantic_core.ValidationError,
16
+ openai.OpenAIError,
17
+ openai.APIConnectionError,
18
+ openai.RateLimitError,
19
+ openai.APIError,
20
+ openai.Timeout,
21
+ openai.InternalServerError,
22
+ openai.APIConnectionError,
23
+ )
24
+
25
+
26
+ class OpenAIStructuredOutputClient(OpenAIStandard):
27
+ def __init__(self, synth_logging: bool = True):
28
+ if synth_logging:
29
+ # print("Using synth logging - OpenAIStructuredOutputClient")
30
+ from synth_sdk import AsyncOpenAI, OpenAI
31
+ else:
32
+ # print("Not using synth logging - OpenAIStructuredOutputClient")
33
+ from openai import AsyncOpenAI, OpenAI
34
+
35
+ super().__init__(
36
+ used_for_structured_outputs=True,
37
+ exceptions_to_retry=OPENAI_EXCEPTIONS_TO_RETRY,
38
+ sync_client=OpenAI(),
39
+ async_client=AsyncOpenAI(),
40
+ )
41
+
42
+ async def _hit_api_async_structured_output(
43
+ self,
44
+ model: str,
45
+ messages: List[Dict[str, Any]],
46
+ response_model: BaseModel,
47
+ temperature: float,
48
+ use_ephemeral_cache_only: bool = False,
49
+ ) -> str:
50
+ # "Hit client")
51
+ lm_config = {"temperature": temperature, "response_model": response_model}
52
+ used_cache_handler = get_cache_handler(
53
+ use_ephemeral_cache_only=use_ephemeral_cache_only
54
+ )
55
+ cache_result = used_cache_handler.hit_managed_cache(
56
+ model, messages, lm_config=lm_config
57
+ )
58
+ if cache_result:
59
+ # print("Hit cache")
60
+ return (
61
+ cache_result["response"]
62
+ if isinstance(cache_result, dict)
63
+ else cache_result
64
+ )
65
+ output = await self.async_client.beta.chat.completions.parse(
66
+ model=model,
67
+ messages=messages,
68
+ temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
69
+ response_format=response_model,
70
+ )
71
+ # "Output", output)
72
+ api_result = response_model(**json.loads(output.choices[0].message.content))
73
+ used_cache_handler.add_to_managed_cache(
74
+ model, messages, lm_config, output=output.choices[0].message.content
75
+ )
76
+ return api_result
77
+
78
+ def _hit_api_sync_structured_output(
79
+ self,
80
+ model: str,
81
+ messages: List[Dict[str, Any]],
82
+ response_model: BaseModel,
83
+ temperature: float,
84
+ use_ephemeral_cache_only: bool = False,
85
+ ) -> str:
86
+ lm_config = {"temperature": temperature, "response_model": response_model}
87
+ used_cache_handler = get_cache_handler(
88
+ use_ephemeral_cache_only=use_ephemeral_cache_only
89
+ )
90
+ cache_result = used_cache_handler.hit_managed_cache(
91
+ model, messages, lm_config=lm_config
92
+ )
93
+ if cache_result:
94
+ return (
95
+ cache_result["response"]
96
+ if isinstance(cache_result, dict)
97
+ else cache_result
98
+ )
99
+ output = self.sync_client.beta.chat.completions.parse(
100
+ model=model,
101
+ messages=messages,
102
+ temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
103
+ response_format=response_model,
104
+ )
105
+ api_result = response_model(**json.loads(output.choices[0].message.content))
106
+ used_cache_handler.add_to_managed_cache(
107
+ model,
108
+ messages,
109
+ lm_config=lm_config,
110
+ output=output.choices[0].message.content,
111
+ )
112
+ return api_result
113
+
114
+
115
+ class OpenAIPrivate(OpenAIStandard):
116
+ def __init__(self, synth_logging: bool = True):
117
+ if synth_logging:
118
+ # print("Using synth logging - OpenAIPrivate")
119
+ from synth_sdk import AsyncOpenAI, OpenAI
120
+ else:
121
+ # print("Not using synth logging - OpenAIPrivate")
122
+ from openai import AsyncOpenAI, OpenAI
123
+
124
+ self.sync_client = OpenAI()
125
+ self.async_client = AsyncOpenAI()
126
+
127
+
128
+ if __name__ == "__main__":
129
+ client = OpenAIStructuredOutputClient(
130
+ sync_client=openai.OpenAI(),
131
+ async_client=openai.AsyncOpenAI(),
132
+ used_for_structured_outputs=True,
133
+ exceptions_to_retry=[],
134
+ )
135
+
136
+ class TestModel(BaseModel):
137
+ name: str
138
+
139
+ sync_model_response = client._hit_api_sync_structured_output(
140
+ model="gpt-4o-mini-2024-07-18",
141
+ messages=[{"role": "user", "content": " What is the capital of the moon?"}],
142
+ response_model=TestModel,
143
+ temperature=0.0,
144
+ )
145
+ # print(sync_model_response)
File without changes
File without changes
@@ -0,0 +1,141 @@
1
+ from typing import Any, Dict, List
2
+
3
+ import openai
4
+ import pydantic_core
5
+
6
+ from synth_ai.zyk.lms.caching.initialize import (
7
+ get_cache_handler,
8
+ )
9
+ from synth_ai.zyk.lms.vendors.base import VendorBase
10
+ from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
11
+ from synth_ai.zyk.lms.vendors.retries import BACKOFF_TOLERANCE, backoff
12
+
13
+ DEFAULT_EXCEPTIONS_TO_RETRY = (
14
+ pydantic_core._pydantic_core.ValidationError,
15
+ openai.APIConnectionError,
16
+ openai.APITimeoutError,
17
+ )
18
+
19
+
20
+ def special_orion_transform(
21
+ model: str, messages: List[Dict[str, Any]]
22
+ ) -> List[Dict[str, Any]]:
23
+ if "o1-" in model:
24
+ messages = [
25
+ {
26
+ "role": "user",
27
+ "content": f"<instructions>{messages[0]['content']}</instructions><information>{messages[1]}</information>",
28
+ }
29
+ ]
30
+ return messages
31
+
32
+
33
+ def on_backoff_handler_async(details):
34
+ # Print every 5th retry attempt, excluding the first retry
35
+ if details["tries"] > 1 and (details["tries"] - 1) % 5 == 0:
36
+ print(f"Retrying async API call (attempt {details['tries'] - 1})")
37
+
38
+
39
+ def on_backoff_handler_sync(details):
40
+ # Print every 5th retry attempt, excluding the first retry
41
+ if details["tries"] > 1 and (details["tries"] - 1) % 5 == 0:
42
+ print(f"Retrying sync API call (attempt {details['tries'] - 1})")
43
+
44
+
45
+ class OpenAIStandard(VendorBase):
46
+ used_for_structured_outputs: bool = True
47
+ exceptions_to_retry: List = DEFAULT_EXCEPTIONS_TO_RETRY
48
+ sync_client: Any
49
+ async_client: Any
50
+
51
+ def __init__(
52
+ self,
53
+ sync_client: Any,
54
+ async_client: Any,
55
+ exceptions_to_retry: List[Exception] = DEFAULT_EXCEPTIONS_TO_RETRY,
56
+ used_for_structured_outputs: bool = False,
57
+ ):
58
+ self.sync_client = sync_client
59
+ self.async_client = async_client
60
+ self.used_for_structured_outputs = used_for_structured_outputs
61
+ self.exceptions_to_retry = exceptions_to_retry
62
+
63
+ @backoff.on_exception(
64
+ backoff.expo,
65
+ exceptions_to_retry,
66
+ max_tries=BACKOFF_TOLERANCE,
67
+ on_backoff=on_backoff_handler_async,
68
+ on_giveup=lambda e: print(e),
69
+ )
70
+ async def _hit_api_async(
71
+ self,
72
+ model: str,
73
+ messages: List[Dict[str, Any]],
74
+ lm_config: Dict[str, Any],
75
+ use_ephemeral_cache_only: bool = False,
76
+ ) -> str:
77
+ assert (
78
+ lm_config.get("response_model", None) is None
79
+ ), "response_model is not supported for standard calls"
80
+ messages = special_orion_transform(model, messages)
81
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
82
+ cache_result = used_cache_handler.hit_managed_cache(
83
+ model, messages, lm_config=lm_config
84
+ )
85
+ if cache_result:
86
+ return (
87
+ cache_result["response"]
88
+ if isinstance(cache_result, dict)
89
+ else cache_result
90
+ )
91
+ output = await self.async_client.chat.completions.create(
92
+ model=model,
93
+ messages=messages,
94
+ temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
95
+ )
96
+ api_result = output.choices[0].message.content
97
+ used_cache_handler.add_to_managed_cache(
98
+ model, messages, lm_config=lm_config, output=api_result
99
+ )
100
+ return api_result
101
+
102
+ @backoff.on_exception(
103
+ backoff.expo,
104
+ exceptions_to_retry,
105
+ max_tries=BACKOFF_TOLERANCE,
106
+ on_backoff=on_backoff_handler_sync,
107
+ on_giveup=lambda e: print(e),
108
+ )
109
+ def _hit_api_sync(
110
+ self,
111
+ model: str,
112
+ messages: List[Dict[str, Any]],
113
+ lm_config: Dict[str, Any],
114
+ use_ephemeral_cache_only: bool = False,
115
+ ) -> str:
116
+ assert (
117
+ lm_config.get("response_model", None) is None
118
+ ), "response_model is not supported for standard calls"
119
+ messages = special_orion_transform(model, messages)
120
+ used_cache_handler = get_cache_handler(
121
+ use_ephemeral_cache_only=use_ephemeral_cache_only
122
+ )
123
+ cache_result = used_cache_handler.hit_managed_cache(
124
+ model, messages, lm_config=lm_config
125
+ )
126
+ if cache_result:
127
+ return (
128
+ cache_result["response"]
129
+ if isinstance(cache_result, dict)
130
+ else cache_result
131
+ )
132
+ output = self.sync_client.chat.completions.create(
133
+ model=model,
134
+ messages=messages,
135
+ temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
136
+ )
137
+ api_result = output.choices[0].message.content
138
+ used_cache_handler.add_to_managed_cache(
139
+ model, messages, lm_config=lm_config, output=api_result
140
+ )
141
+ return api_result
@@ -0,0 +1,3 @@
1
+ import backoff
2
+
3
+ BACKOFF_TOLERANCE = 20
File without changes
@@ -0,0 +1,18 @@
1
+ import os
2
+
3
+ from openai import AsyncOpenAI, OpenAI
4
+
5
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
6
+
7
+
8
+ class DeepSeekAPI(OpenAIStandard):
9
+ def __init__(self):
10
+ # print("Setting up DeepSeek API")
11
+ self.sync_client = OpenAI(
12
+ api_key=os.environ.get("DEEPSEEK_API_KEY"),
13
+ base_url="https://api.deepseek.com",
14
+ )
15
+ self.async_client = AsyncOpenAI(
16
+ api_key=os.environ.get("DEEPSEEK_API_KEY"),
17
+ base_url="https://api.deepseek.com",
18
+ )
@@ -0,0 +1,11 @@
1
+ import os
2
+
3
+ from together import AsyncTogether, Together
4
+
5
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
6
+
7
+
8
+ class TogetherAPI(OpenAIStandard):
9
+ def __init__(self):
10
+ self.sync_client = Together(api_key=os.getenv("TOGETHER_API_KEY"))
11
+ self.async_client = AsyncTogether(api_key=os.getenv("TOGETHER_API_KEY"))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev4
3
+ Version: 0.1.0.dev6
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Author-email: Josh Purtell <josh@usesynth.ai>
6
6
  License: MIT License
@@ -0,0 +1,46 @@
1
+ public_tests/synth_sdk.py,sha256=fqkzyzLb_NW4k8EiP2mJ5HZk3lDTi1juyTf9Gv_9wfc,14238
2
+ public_tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
3
+ public_tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
4
+ public_tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
5
+ synth_ai/__init__.py,sha256=2siivzLbT2r-EA7m91dcJB-6Vsurc5_sX3WiKf4_o8Y,198
6
+ synth_ai/zyk/__init__.py,sha256=zoPor1PI2OrgpCu-MBLZXcX1jAbSgD9q0kqZpTghTcQ,60
7
+ synth_ai/zyk/lms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ synth_ai/zyk/lms/config.py,sha256=CcN5NL99j0UZubyGo-MUfbPD3pWosAMju_sqgfvqLVY,201
9
+ synth_ai/zyk/lms/caching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ synth_ai/zyk/lms/caching/constants.py,sha256=fPi3x9p-yRdvixMSIyclvmwmwCRliXLXQjEm6dRnG8s,52
11
+ synth_ai/zyk/lms/caching/dbs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ synth_ai/zyk/lms/caching/ephemeral.py,sha256=OduhhWc8TdEohli0HJkbtTxVM0egSxmOHDVfErTXTPw,1725
13
+ synth_ai/zyk/lms/caching/handler.py,sha256=sewq5rRfqXHzCEiXvdckbuxYp9ze_EjVSndnUTsOAJY,2962
14
+ synth_ai/zyk/lms/caching/initialize.py,sha256=zZls6RKAax6Z-8oJInGaSg_RPN_fEZ6e_RCX64lMLJw,416
15
+ synth_ai/zyk/lms/caching/persistent.py,sha256=mQmP1z0rWVYjxwso5zIwd51Df2dWZvdHonuqsOY6SFI,2075
16
+ synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ synth_ai/zyk/lms/core/all.py,sha256=oUplKT1AsTv9Uvx2SqVjGjOsa0ml5G_GJFdKp6rPiGs,894
18
+ synth_ai/zyk/lms/core/exceptions.py,sha256=K0BVdAzxVIchsvYZAaHEH1GAWBZvpxhFi-SPcJOjyPQ,205
19
+ synth_ai/zyk/lms/core/main.py,sha256=LMPsr8fF93kRk5sts5Q9-acKyLsX4tmJEoifj7DQHvo,8786
20
+ synth_ai/zyk/lms/core/vendor_clients.py,sha256=O2KWCFJ0XHWhd4-_UE-T0WQKKgz_SNT7X4nV9j1YEPM,1878
21
+ synth_ai/zyk/lms/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ synth_ai/zyk/lms/cost/monitor.py,sha256=cSKIvw6WdPZIRubADWxQoh1MdB40T8-jjgfNUeUHIn0,5
23
+ synth_ai/zyk/lms/cost/statefulness.py,sha256=TOsuXL8IjtKOYJ2aJQF8TwJVqn_wQ7AIwJJmdhMye7U,36
24
+ synth_ai/zyk/lms/structured_outputs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
+ synth_ai/zyk/lms/structured_outputs/handler.py,sha256=2TixVuVs2w3Q3VB8n2DvNtJcJvUmQzAR1WfXn3FaX7M,13804
26
+ synth_ai/zyk/lms/structured_outputs/inject.py,sha256=U3I4-f-9RFn5Pc6wOxqBtJCUOcf_qI5MJGzpxcjhnGI,6732
27
+ synth_ai/zyk/lms/structured_outputs/rehabilitate.py,sha256=_QOfnI1rJxIE9-zUMhC0PedCOr6y5m6WuGScDb5gcUo,7787
28
+ synth_ai/zyk/lms/vendors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
+ synth_ai/zyk/lms/vendors/base.py,sha256=bs27mNfrCjQ5NsmxWnKfxZmpoAa9mTNAFr9AnGVwqLk,552
30
+ synth_ai/zyk/lms/vendors/constants.py,sha256=zqCOyXZqo297wboR9EKVSkvpq6JCMSJyeso8HdZPKa4,102
31
+ synth_ai/zyk/lms/vendors/openai_standard.py,sha256=nTBrI7grru1RjWHluguVU24xt__apXrOnWaq61S71nA,4796
32
+ synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
33
+ synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=fcb0uVlInADl50MNYMT-IimM9mzO19D8_mSg9Gqp92Q,6986
35
+ synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=vHwsIv6n1KxzxPtx10yxZcsVDr8j9ZUx8dPE9zNLWjM,5141
36
+ synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=bpS7NcOJugF6rVOxsw3FMV30hhPQVkvZ0uV-MppisyE,4967
37
+ synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
+ synth_ai/zyk/lms/vendors/local/ollama.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
+ synth_ai/zyk/lms/vendors/supported/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
+ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=diFfdhPMO5bLFZxnYj7VT0v6jKTlOYESBkspUuVa2eY,529
41
+ synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
42
+ synth_ai-0.1.0.dev6.dist-info/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
43
+ synth_ai-0.1.0.dev6.dist-info/METADATA,sha256=A4nzNGkU2zdFNpRiu0KruOeZHI4h0XXjAErI5WBA-Kc,2491
44
+ synth_ai-0.1.0.dev6.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
45
+ synth_ai-0.1.0.dev6.dist-info/top_level.txt,sha256=MKoWqlbnW0ZKcm_eLzeCpgAihgL59ZrZZ8Q1HnZwHdg,22
46
+ synth_ai-0.1.0.dev6.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ public_tests
2
+ synth_ai
@@ -1,7 +0,0 @@
1
- synth_ai/__init__.py,sha256=2siivzLbT2r-EA7m91dcJB-6Vsurc5_sX3WiKf4_o8Y,198
2
- synth_ai/zyk/__init__.py,sha256=zoPor1PI2OrgpCu-MBLZXcX1jAbSgD9q0kqZpTghTcQ,60
3
- synth_ai-0.1.0.dev4.dist-info/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
4
- synth_ai-0.1.0.dev4.dist-info/METADATA,sha256=IOYF_pOhAYtMDK1ClxHlfpx8WCk5HgjkVkorEGfSE4g,2491
5
- synth_ai-0.1.0.dev4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
6
- synth_ai-0.1.0.dev4.dist-info/top_level.txt,sha256=fBmtZyVHuKaGa29oHBaaUkrUIWTqSpoVMPiVdCDP3k8,9
7
- synth_ai-0.1.0.dev4.dist-info/RECORD,,
@@ -1 +0,0 @@
1
- synth_ai