synth-ai 0.1.0.dev27__py3-none-any.whl → 0.1.0.dev29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- public_tests/test_agent.py +11 -11
- public_tests/test_all_structured_outputs.py +32 -37
- public_tests/test_anthropic_structured_outputs.py +0 -0
- public_tests/test_deepseek_structured_outputs.py +0 -0
- public_tests/test_deepseek_tools.py +64 -0
- public_tests/test_gemini_structured_outputs.py +106 -0
- public_tests/test_models.py +27 -27
- public_tests/test_openai_structured_outputs.py +106 -0
- public_tests/test_reasoning_models.py +9 -7
- public_tests/test_recursive_structured_outputs.py +30 -30
- public_tests/test_structured.py +137 -0
- public_tests/test_structured_outputs.py +22 -13
- public_tests/test_text.py +160 -0
- public_tests/test_tools.py +300 -0
- synth_ai/__init__.py +1 -4
- synth_ai/zyk/__init__.py +2 -2
- synth_ai/zyk/lms/caching/ephemeral.py +54 -32
- synth_ai/zyk/lms/caching/handler.py +43 -15
- synth_ai/zyk/lms/caching/persistent.py +55 -27
- synth_ai/zyk/lms/core/main.py +26 -14
- synth_ai/zyk/lms/core/vendor_clients.py +1 -1
- synth_ai/zyk/lms/structured_outputs/handler.py +79 -45
- synth_ai/zyk/lms/structured_outputs/rehabilitate.py +3 -2
- synth_ai/zyk/lms/tools/base.py +104 -0
- synth_ai/zyk/lms/vendors/base.py +22 -6
- synth_ai/zyk/lms/vendors/core/anthropic_api.py +130 -95
- synth_ai/zyk/lms/vendors/core/gemini_api.py +153 -34
- synth_ai/zyk/lms/vendors/core/mistral_api.py +160 -54
- synth_ai/zyk/lms/vendors/core/openai_api.py +64 -53
- synth_ai/zyk/lms/vendors/openai_standard.py +197 -41
- synth_ai/zyk/lms/vendors/supported/deepseek.py +55 -0
- {synth_ai-0.1.0.dev27.dist-info → synth_ai-0.1.0.dev29.dist-info}/METADATA +2 -5
- synth_ai-0.1.0.dev29.dist-info/RECORD +65 -0
- public_tests/test_sonnet_thinking.py +0 -178
- synth_ai-0.1.0.dev27.dist-info/RECORD +0 -57
- {synth_ai-0.1.0.dev27.dist-info → synth_ai-0.1.0.dev29.dist-info}/WHEEL +0 -0
- {synth_ai-0.1.0.dev27.dist-info → synth_ai-0.1.0.dev29.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.1.0.dev27.dist-info → synth_ai-0.1.0.dev29.dist-info}/top_level.txt +0 -0
@@ -1,15 +1,17 @@
|
|
1
|
-
from typing import Any, Dict, List
|
1
|
+
from typing import Any, Dict, List, Optional, Union
|
2
2
|
|
3
|
+
import groq
|
3
4
|
import openai
|
4
5
|
import pydantic_core
|
6
|
+
from pydantic import BaseModel
|
5
7
|
|
6
8
|
from synth_ai.zyk.lms.caching.initialize import (
|
7
9
|
get_cache_handler,
|
8
10
|
)
|
9
|
-
from synth_ai.zyk.lms.
|
11
|
+
from synth_ai.zyk.lms.tools.base import BaseTool
|
12
|
+
from synth_ai.zyk.lms.vendors.base import BaseLMResponse, VendorBase
|
10
13
|
from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
|
11
|
-
|
12
|
-
import groq
|
14
|
+
|
13
15
|
DEFAULT_EXCEPTIONS_TO_RETRY = (
|
14
16
|
pydantic_core._pydantic_core.ValidationError,
|
15
17
|
openai.APIConnectionError,
|
@@ -23,7 +25,7 @@ DEFAULT_EXCEPTIONS_TO_RETRY = (
|
|
23
25
|
def special_orion_transform(
|
24
26
|
model: str, messages: List[Dict[str, Any]]
|
25
27
|
) -> List[Dict[str, Any]]:
|
26
|
-
if "o1-" in model
|
28
|
+
if "o1-" in model:
|
27
29
|
messages = [
|
28
30
|
{
|
29
31
|
"role": "user",
|
@@ -63,13 +65,13 @@ class OpenAIStandard(VendorBase):
|
|
63
65
|
self.used_for_structured_outputs = used_for_structured_outputs
|
64
66
|
self.exceptions_to_retry = exceptions_to_retry
|
65
67
|
|
66
|
-
@backoff.on_exception(
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
)
|
68
|
+
# @backoff.on_exception(
|
69
|
+
# backoff.expo,
|
70
|
+
# exceptions_to_retry,
|
71
|
+
# max_tries=BACKOFF_TOLERANCE,
|
72
|
+
# on_backoff=on_backoff_handler_async,
|
73
|
+
# on_giveup=lambda e: print(e),
|
74
|
+
# )
|
73
75
|
async def _hit_api_async(
|
74
76
|
self,
|
75
77
|
model: str,
|
@@ -77,21 +79,18 @@ class OpenAIStandard(VendorBase):
|
|
77
79
|
lm_config: Dict[str, Any],
|
78
80
|
use_ephemeral_cache_only: bool = False,
|
79
81
|
reasoning_effort: str = "high",
|
80
|
-
|
82
|
+
tools: Optional[List[BaseTool]] = None,
|
83
|
+
) -> BaseLMResponse:
|
81
84
|
assert (
|
82
85
|
lm_config.get("response_model", None) is None
|
83
86
|
), "response_model is not supported for standard calls"
|
84
87
|
messages = special_orion_transform(model, messages)
|
85
88
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
86
89
|
cache_result = used_cache_handler.hit_managed_cache(
|
87
|
-
model, messages, lm_config=lm_config
|
90
|
+
model, messages, lm_config=lm_config, tools=tools
|
88
91
|
)
|
89
92
|
if cache_result:
|
90
|
-
return
|
91
|
-
cache_result["response"]
|
92
|
-
if isinstance(cache_result, dict)
|
93
|
-
else cache_result
|
94
|
-
)
|
93
|
+
return cache_result
|
95
94
|
|
96
95
|
# Common API call params
|
97
96
|
api_params = {
|
@@ -99,6 +98,10 @@ class OpenAIStandard(VendorBase):
|
|
99
98
|
"messages": messages,
|
100
99
|
}
|
101
100
|
|
101
|
+
# Add tools if provided
|
102
|
+
if tools:
|
103
|
+
api_params["tools"] = [tool.to_openai_tool() for tool in tools]
|
104
|
+
|
102
105
|
# Only add temperature for non o1/o3 models
|
103
106
|
if not any(prefix in model for prefix in ["o1-", "o3-"]):
|
104
107
|
api_params["temperature"] = lm_config.get(
|
@@ -106,25 +109,45 @@ class OpenAIStandard(VendorBase):
|
|
106
109
|
)
|
107
110
|
|
108
111
|
# Add reasoning_effort only for o3-mini
|
109
|
-
if "o3-mini"
|
110
|
-
|
112
|
+
if model in ["o3-mini"]:
|
113
|
+
print("Reasoning effort:", reasoning_effort)
|
111
114
|
api_params["reasoning_effort"] = reasoning_effort
|
112
115
|
|
113
116
|
output = await self.async_client.chat.completions.create(**api_params)
|
117
|
+
message = output.choices[0].message
|
118
|
+
|
119
|
+
# Convert tool calls to dict format
|
120
|
+
tool_calls = None
|
121
|
+
if message.tool_calls:
|
122
|
+
tool_calls = [
|
123
|
+
{
|
124
|
+
"id": tc.id,
|
125
|
+
"type": tc.type,
|
126
|
+
"function": {
|
127
|
+
"name": tc.function.name,
|
128
|
+
"arguments": tc.function.arguments,
|
129
|
+
},
|
130
|
+
}
|
131
|
+
for tc in message.tool_calls
|
132
|
+
]
|
114
133
|
|
115
|
-
|
134
|
+
lm_response = BaseLMResponse(
|
135
|
+
raw_response=message.content or "", # Use empty string if no content
|
136
|
+
structured_output=None,
|
137
|
+
tool_calls=tool_calls,
|
138
|
+
)
|
116
139
|
used_cache_handler.add_to_managed_cache(
|
117
|
-
model, messages, lm_config=lm_config, output=
|
140
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
118
141
|
)
|
119
|
-
return
|
120
|
-
|
121
|
-
@backoff.on_exception(
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
)
|
142
|
+
return lm_response
|
143
|
+
|
144
|
+
# @backoff.on_exception(
|
145
|
+
# backoff.expo,
|
146
|
+
# exceptions_to_retry,
|
147
|
+
# max_tries=BACKOFF_TOLERANCE,
|
148
|
+
# on_backoff=on_backoff_handler_sync,
|
149
|
+
# on_giveup=lambda e: print(e),
|
150
|
+
# )
|
128
151
|
def _hit_api_sync(
|
129
152
|
self,
|
130
153
|
model: str,
|
@@ -132,7 +155,8 @@ class OpenAIStandard(VendorBase):
|
|
132
155
|
lm_config: Dict[str, Any],
|
133
156
|
use_ephemeral_cache_only: bool = False,
|
134
157
|
reasoning_effort: str = "high",
|
135
|
-
|
158
|
+
tools: Optional[List[BaseTool]] = None,
|
159
|
+
) -> BaseLMResponse:
|
136
160
|
assert (
|
137
161
|
lm_config.get("response_model", None) is None
|
138
162
|
), "response_model is not supported for standard calls"
|
@@ -141,21 +165,145 @@ class OpenAIStandard(VendorBase):
|
|
141
165
|
use_ephemeral_cache_only=use_ephemeral_cache_only
|
142
166
|
)
|
143
167
|
cache_result = used_cache_handler.hit_managed_cache(
|
144
|
-
model, messages, lm_config=lm_config
|
168
|
+
model, messages, lm_config=lm_config, tools=tools
|
145
169
|
)
|
146
170
|
if cache_result:
|
147
|
-
return
|
148
|
-
|
149
|
-
|
150
|
-
|
171
|
+
return cache_result
|
172
|
+
|
173
|
+
# Common API call params
|
174
|
+
api_params = {
|
175
|
+
"model": model,
|
176
|
+
"messages": messages,
|
177
|
+
}
|
178
|
+
|
179
|
+
# Add tools if provided
|
180
|
+
if tools:
|
181
|
+
api_params["tools"] = [tool.to_openai_tool() for tool in tools]
|
182
|
+
|
183
|
+
# Only add temperature for non o1/o3 models
|
184
|
+
if not any(prefix in model for prefix in ["o1-", "o3-"]):
|
185
|
+
api_params["temperature"] = lm_config.get(
|
186
|
+
"temperature", SPECIAL_BASE_TEMPS.get(model, 0)
|
151
187
|
)
|
152
188
|
|
189
|
+
# Add reasoning_effort only for o3-mini
|
190
|
+
if model in ["o3-mini"]:
|
191
|
+
api_params["reasoning_effort"] = reasoning_effort
|
192
|
+
|
193
|
+
output = self.sync_client.chat.completions.create(**api_params)
|
194
|
+
message = output.choices[0].message
|
195
|
+
|
196
|
+
# Convert tool calls to dict format
|
197
|
+
tool_calls = None
|
198
|
+
if message.tool_calls:
|
199
|
+
tool_calls = [
|
200
|
+
{
|
201
|
+
"id": tc.id,
|
202
|
+
"type": tc.type,
|
203
|
+
"function": {
|
204
|
+
"name": tc.function.name,
|
205
|
+
"arguments": tc.function.arguments,
|
206
|
+
},
|
207
|
+
}
|
208
|
+
for tc in message.tool_calls
|
209
|
+
]
|
210
|
+
|
211
|
+
lm_response = BaseLMResponse(
|
212
|
+
raw_response=message.content or "", # Use empty string if no content
|
213
|
+
structured_output=None,
|
214
|
+
tool_calls=tool_calls,
|
215
|
+
)
|
216
|
+
used_cache_handler.add_to_managed_cache(
|
217
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
218
|
+
)
|
219
|
+
return lm_response
|
220
|
+
|
221
|
+
async def _hit_api_async_structured_output(
|
222
|
+
self,
|
223
|
+
model: str,
|
224
|
+
messages: List[Dict[str, Any]],
|
225
|
+
response_model: BaseModel,
|
226
|
+
temperature: float,
|
227
|
+
use_ephemeral_cache_only: bool = False,
|
228
|
+
reasoning_effort: str = "high",
|
229
|
+
tools: Optional[List[BaseTool]] = None,
|
230
|
+
) -> BaseLMResponse:
|
231
|
+
lm_config = {"temperature": temperature, "response_model": response_model}
|
232
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
233
|
+
cache_result: Union[BaseLMResponse, None] = (
|
234
|
+
used_cache_handler.hit_managed_cache(
|
235
|
+
model, messages, lm_config=lm_config, tools=tools
|
236
|
+
)
|
237
|
+
)
|
238
|
+
if cache_result is not None:
|
239
|
+
return cache_result
|
240
|
+
|
241
|
+
# Common API call params
|
242
|
+
api_params = {
|
243
|
+
"model": model,
|
244
|
+
"messages": messages,
|
245
|
+
}
|
246
|
+
|
247
|
+
# Add tools if provided
|
248
|
+
if tools:
|
249
|
+
api_params["tools"] = [tool.to_openai_tool() for tool in tools]
|
250
|
+
|
251
|
+
# Only add temperature for non o1/o3 models
|
252
|
+
if not any(prefix in model for prefix in ["o1-", "o3-"]):
|
253
|
+
api_params["temperature"] = lm_config.get(
|
254
|
+
"temperature", SPECIAL_BASE_TEMPS.get(model, 0)
|
255
|
+
)
|
256
|
+
|
257
|
+
# Add reasoning_effort only for o3-mini
|
258
|
+
if model in ["o3-mini"]:
|
259
|
+
api_params["reasoning_effort"] = reasoning_effort
|
260
|
+
|
261
|
+
output = await self.async_client.chat.completions.create(**api_params)
|
262
|
+
|
263
|
+
structured_output_api_result = response_model(
|
264
|
+
**output.choices[0].message.content
|
265
|
+
)
|
266
|
+
tool_calls = output.choices[0].message.tool_calls
|
267
|
+
lm_response = BaseLMResponse(
|
268
|
+
raw_response=output.choices[0].message.content,
|
269
|
+
structured_output=structured_output_api_result,
|
270
|
+
tool_calls=tool_calls,
|
271
|
+
)
|
272
|
+
used_cache_handler.add_to_managed_cache(
|
273
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
274
|
+
)
|
275
|
+
return lm_response
|
276
|
+
|
277
|
+
def _hit_api_sync_structured_output(
|
278
|
+
self,
|
279
|
+
model: str,
|
280
|
+
messages: List[Dict[str, Any]],
|
281
|
+
response_model: BaseModel,
|
282
|
+
temperature: float,
|
283
|
+
use_ephemeral_cache_only: bool = False,
|
284
|
+
reasoning_effort: str = "high",
|
285
|
+
tools: Optional[List[BaseTool]] = None,
|
286
|
+
) -> BaseLMResponse:
|
287
|
+
lm_config = {"temperature": temperature, "response_model": response_model}
|
288
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
289
|
+
cache_result: Union[BaseLMResponse, None] = (
|
290
|
+
used_cache_handler.hit_managed_cache(
|
291
|
+
model, messages, lm_config=lm_config, tools=tools
|
292
|
+
)
|
293
|
+
)
|
294
|
+
if cache_result is not None:
|
295
|
+
return cache_result
|
296
|
+
|
153
297
|
# Common API call params
|
154
298
|
api_params = {
|
155
299
|
"model": model,
|
156
300
|
"messages": messages,
|
157
301
|
}
|
158
302
|
|
303
|
+
# Add tools if provided
|
304
|
+
if tools:
|
305
|
+
api_params["tools"] = [tool.to_openai_tool() for tool in tools]
|
306
|
+
|
159
307
|
# Only add temperature for non o1/o3 models
|
160
308
|
if not any(prefix in model for prefix in ["o1-", "o3-"]):
|
161
309
|
api_params["temperature"] = lm_config.get(
|
@@ -168,8 +316,16 @@ class OpenAIStandard(VendorBase):
|
|
168
316
|
|
169
317
|
output = self.sync_client.chat.completions.create(**api_params)
|
170
318
|
|
171
|
-
|
319
|
+
structured_output_api_result = response_model(
|
320
|
+
**output.choices[0].message.content
|
321
|
+
)
|
322
|
+
tool_calls = output.choices[0].message.tool_calls
|
323
|
+
lm_response = BaseLMResponse(
|
324
|
+
raw_response=output.choices[0].message.content,
|
325
|
+
structured_output=structured_output_api_result,
|
326
|
+
tool_calls=tool_calls,
|
327
|
+
)
|
172
328
|
used_cache_handler.add_to_managed_cache(
|
173
|
-
model, messages, lm_config=lm_config, output=
|
329
|
+
model, messages, lm_config=lm_config, output=lm_response, tools=tools
|
174
330
|
)
|
175
|
-
return
|
331
|
+
return lm_response
|
@@ -1,7 +1,9 @@
|
|
1
1
|
import os
|
2
|
+
from typing import Any, Dict, List, Optional, Tuple
|
2
3
|
|
3
4
|
from openai import AsyncOpenAI, OpenAI
|
4
5
|
|
6
|
+
from synth_ai.zyk.lms.tools.base import BaseTool
|
5
7
|
from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
|
6
8
|
|
7
9
|
|
@@ -16,3 +18,56 @@ class DeepSeekAPI(OpenAIStandard):
|
|
16
18
|
api_key=os.environ.get("DEEPSEEK_API_KEY"),
|
17
19
|
base_url="https://api.deepseek.com",
|
18
20
|
)
|
21
|
+
|
22
|
+
def _convert_tools_to_openai_format(self, tools: List[BaseTool]) -> List[Dict]:
|
23
|
+
return [tool.to_openai_tool() for tool in tools]
|
24
|
+
|
25
|
+
async def _private_request_async(
|
26
|
+
self,
|
27
|
+
messages: List[Dict],
|
28
|
+
temperature: float = 0,
|
29
|
+
model_name: str = "deepseek-chat",
|
30
|
+
reasoning_effort: str = "high",
|
31
|
+
tools: Optional[List[BaseTool]] = None,
|
32
|
+
lm_config: Optional[Dict[str, Any]] = None,
|
33
|
+
) -> Tuple[str, Optional[List[Dict]]]:
|
34
|
+
request_params = {
|
35
|
+
"model": model_name,
|
36
|
+
"messages": messages,
|
37
|
+
"temperature": temperature,
|
38
|
+
}
|
39
|
+
|
40
|
+
if tools:
|
41
|
+
request_params["tools"] = self._convert_tools_to_openai_format(tools)
|
42
|
+
|
43
|
+
response = await self.async_client.chat.completions.create(**request_params)
|
44
|
+
message = response.choices[0].message
|
45
|
+
|
46
|
+
return message.content, message.tool_calls if hasattr(
|
47
|
+
message, "tool_calls"
|
48
|
+
) else None
|
49
|
+
|
50
|
+
def _private_request_sync(
|
51
|
+
self,
|
52
|
+
messages: List[Dict],
|
53
|
+
temperature: float = 0,
|
54
|
+
model_name: str = "deepseek-chat",
|
55
|
+
reasoning_effort: str = "high",
|
56
|
+
tools: Optional[List[BaseTool]] = None,
|
57
|
+
lm_config: Optional[Dict[str, Any]] = None,
|
58
|
+
) -> Tuple[str, Optional[List[Dict]]]:
|
59
|
+
request_params = {
|
60
|
+
"model": model_name,
|
61
|
+
"messages": messages,
|
62
|
+
"temperature": temperature,
|
63
|
+
}
|
64
|
+
|
65
|
+
if tools:
|
66
|
+
request_params["tools"] = self._convert_tools_to_openai_format(tools)
|
67
|
+
|
68
|
+
response = self.sync_client.chat.completions.create(**request_params)
|
69
|
+
message = response.choices[0].message
|
70
|
+
|
71
|
+
return message.content, message.tool_calls if hasattr(
|
72
|
+
message, "tool_calls"
|
73
|
+
) else None
|
@@ -1,8 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: synth-ai
|
3
|
-
Version: 0.1.0.
|
3
|
+
Version: 0.1.0.dev29
|
4
4
|
Summary: Software for aiding the best and multiplying the will.
|
5
|
-
Home-page: https://github.com/synth-laboratories/synth-ai
|
6
5
|
Author: Josh Purtell
|
7
6
|
Author-email: Josh Purtell <josh@usesynth.ai>
|
8
7
|
License: MIT License
|
@@ -44,15 +43,13 @@ Requires-Dist: google>=3.0.0
|
|
44
43
|
Requires-Dist: google-generativeai>=0.8.1
|
45
44
|
Requires-Dist: together>=1.2.12
|
46
45
|
Requires-Dist: langfuse>=2.56.1
|
47
|
-
Requires-Dist: synth-sdk
|
46
|
+
Requires-Dist: synth-sdk==0.3.1.dev5
|
48
47
|
Requires-Dist: datasets>=3.2.0
|
49
48
|
Requires-Dist: groq>=0.18.0
|
50
49
|
Requires-Dist: pytest-timeout>=2.3.1
|
51
50
|
Requires-Dist: lock>=2018.3.25.2110
|
52
51
|
Requires-Dist: ollama>=0.4.7
|
53
52
|
Requires-Dist: mistralai>=1.5.0
|
54
|
-
Dynamic: author
|
55
|
-
Dynamic: home-page
|
56
53
|
Dynamic: license-file
|
57
54
|
|
58
55
|
AI Infra used by the Synth AI Team
|
@@ -0,0 +1,65 @@
|
|
1
|
+
private_tests/try_synth_sdk.py,sha256=vk4lUEfpQfLACFl6Qw468t_lsuYxuoIIr05WRgWKGKY,24
|
2
|
+
public_tests/test_agent.py,sha256=OGmUsd-f0ZRL0xIwJ0MO_dPYQOshNdnmmESxfTSytDM,22552
|
3
|
+
public_tests/test_all_structured_outputs.py,sha256=bIcchimaVkq8q8D-GKO25d1_SauTFh4a0c3OEKh3_8o,6524
|
4
|
+
public_tests/test_anthropic_structured_outputs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
public_tests/test_deepseek_structured_outputs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
public_tests/test_deepseek_tools.py,sha256=MxEaiT_zinuMPeylqfNMsD11zRaMEwY0Fi28bg5op0A,1895
|
7
|
+
public_tests/test_gemini_structured_outputs.py,sha256=yKa3CDVJxE_Vb2BbVROje83Pb35MBusF0Nb-ttWbqS8,4001
|
8
|
+
public_tests/test_models.py,sha256=QGevBfBuQzwyKw1ez34igDyJpMTBVOc3meW6yqFE-bM,5853
|
9
|
+
public_tests/test_openai_structured_outputs.py,sha256=oIhdZ2QVLmn0LaqBpCP3Qhbn2KHJv633DGn6u9Ousak,3999
|
10
|
+
public_tests/test_reasoning_models.py,sha256=Vr4sFRYcrYOBAZMFz2a0fZQqa-WjRwbtwc6lXy6bF4I,2897
|
11
|
+
public_tests/test_recursive_structured_outputs.py,sha256=rrqzsU5ExNt-m_wu9j_fkbHiEsAtbKEK66uK5Ub2ojs,6296
|
12
|
+
public_tests/test_structured.py,sha256=rftVwvYgMSHkRZM1WUJzga5Uvl9hmc5OpXzBshEXNF0,3740
|
13
|
+
public_tests/test_structured_outputs.py,sha256=9SFpH4RQ6nRcphBVmELRNSvhRjYaJBu_z-r6xqKAYpg,4213
|
14
|
+
public_tests/test_synth_sdk.py,sha256=jqJHKpvBn9qj21P76z9onXfPg88jyUmBTKmdvCsQMk8,14885
|
15
|
+
public_tests/test_text.py,sha256=UyPZ0ci-XBjK35tAeV0kN1X8Njf-0pHfEPZhsWDZ0-c,4072
|
16
|
+
public_tests/test_tools.py,sha256=LXR78QWYssjtIQwUIJAn5O747tUDWbbPTGpf0VyojS8,10111
|
17
|
+
synth_ai/__init__.py,sha256=tX_fcK8u64BoPEboRa3dIKK_WpLy5KAxL2Ucl-l0xVg,147
|
18
|
+
synth_ai/zyk/__init__.py,sha256=kGMD-drlBVdsyT-QFODMwaZUtxPCJ9mg58GKQUvFqo0,134
|
19
|
+
synth_ai/zyk/lms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
+
synth_ai/zyk/lms/config.py,sha256=CcN5NL99j0UZubyGo-MUfbPD3pWosAMju_sqgfvqLVY,201
|
21
|
+
synth_ai/zyk/lms/caching/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
+
synth_ai/zyk/lms/caching/constants.py,sha256=fPi3x9p-yRdvixMSIyclvmwmwCRliXLXQjEm6dRnG8s,52
|
23
|
+
synth_ai/zyk/lms/caching/dbs.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
|
+
synth_ai/zyk/lms/caching/ephemeral.py,sha256=pNMG5Rzzp2m0Ln1UYmWxz1qbXwq3iNIrhjYAS0yO3ZE,2370
|
25
|
+
synth_ai/zyk/lms/caching/handler.py,sha256=6DhILKDiF7SRhzHyKgpdq_JS_VN1qS_ZQZRJW-0VF_o,4149
|
26
|
+
synth_ai/zyk/lms/caching/initialize.py,sha256=zZls6RKAax6Z-8oJInGaSg_RPN_fEZ6e_RCX64lMLJw,416
|
27
|
+
synth_ai/zyk/lms/caching/persistent.py,sha256=ZaY1A9qhvfNKzcAI9FnwbIrgMKvVeIfb_yCyl3M8dxE,2860
|
28
|
+
synth_ai/zyk/lms/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
+
synth_ai/zyk/lms/core/all.py,sha256=wakK0HhvYRuaQZmxClURyNf3vUkTbm3OABw3TgpMjOQ,1185
|
30
|
+
synth_ai/zyk/lms/core/exceptions.py,sha256=K0BVdAzxVIchsvYZAaHEH1GAWBZvpxhFi-SPcJOjyPQ,205
|
31
|
+
synth_ai/zyk/lms/core/main.py,sha256=xuoCBAzWnyY52DVdBUJPjIoEIUaCtheAcHG1ZyP8ndQ,10223
|
32
|
+
synth_ai/zyk/lms/core/vendor_clients.py,sha256=go6VGF3-JkZyUD81LwRkcBaxdWSVaV9vRxVTNqKSxvM,2781
|
33
|
+
synth_ai/zyk/lms/cost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
|
+
synth_ai/zyk/lms/cost/monitor.py,sha256=cSKIvw6WdPZIRubADWxQoh1MdB40T8-jjgfNUeUHIn0,5
|
35
|
+
synth_ai/zyk/lms/cost/statefulness.py,sha256=TOsuXL8IjtKOYJ2aJQF8TwJVqn_wQ7AIwJJmdhMye7U,36
|
36
|
+
synth_ai/zyk/lms/structured_outputs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
37
|
+
synth_ai/zyk/lms/structured_outputs/handler.py,sha256=gh3KtmZCyUima5BunpjaeKcanmRWJvCy8Ly8gUgDf_M,16915
|
38
|
+
synth_ai/zyk/lms/structured_outputs/inject.py,sha256=Fy-zDeleRxOZ8ZRM6IuZ6CP2XZnMe4K2PEn4Q9c_KPY,11777
|
39
|
+
synth_ai/zyk/lms/structured_outputs/rehabilitate.py,sha256=GuIhzsb7rTvwgn7f9I9omNnXBz5Me_qrtNYcTWzw5_U,7909
|
40
|
+
synth_ai/zyk/lms/tools/base.py,sha256=j7wYb1xAvaAm3qVrINphgUhGS-UjZmRpbouseQYgh7A,3228
|
41
|
+
synth_ai/zyk/lms/vendors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
42
|
+
synth_ai/zyk/lms/vendors/base.py,sha256=aK4PEtkMLt_o3qD22kW-x3HJUEKdIk06zlH4kX0VkAE,760
|
43
|
+
synth_ai/zyk/lms/vendors/constants.py,sha256=zqCOyXZqo297wboR9EKVSkvpq6JCMSJyeso8HdZPKa4,102
|
44
|
+
synth_ai/zyk/lms/vendors/openai_standard.py,sha256=Th_0QjmrJ7gemxsKnWmij46lIz4QWZOi7Du5OOiLUcc,11413
|
45
|
+
synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
|
46
|
+
synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
|
+
synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=QM4xuaigdVOjBuzkPyT-RSOtvT2wiKxAiHRfI77GYn8,13461
|
48
|
+
synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=j8pGGGkJNOVmadpkZsGYhwoSkwsdpX_jAXhrfydoNk8,9631
|
49
|
+
synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=-EMPBEIoYxxDMxukmcmKL8AGAHPNYe4w-76gsPtmrhk,11860
|
50
|
+
synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=QkQqba851EEGf9n5H31-pJ6WexhTZkdPWQap0oGy2Ho,6713
|
51
|
+
synth_ai/zyk/lms/vendors/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
52
|
+
synth_ai/zyk/lms/vendors/local/ollama.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
53
|
+
synth_ai/zyk/lms/vendors/supported/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
|
+
synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR36rSNXK5LldeHJkQrc,2430
|
55
|
+
synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
|
56
|
+
synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
|
57
|
+
synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
|
58
|
+
synth_ai-0.1.0.dev29.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
|
59
|
+
tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
|
60
|
+
tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
|
61
|
+
tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
|
62
|
+
synth_ai-0.1.0.dev29.dist-info/METADATA,sha256=roTXgOH0Ms_JHCBW8uyG1nUnwV3QDamb-PlvmGqZ-Hw,2702
|
63
|
+
synth_ai-0.1.0.dev29.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
64
|
+
synth_ai-0.1.0.dev29.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
|
65
|
+
synth_ai-0.1.0.dev29.dist-info/RECORD,,
|
@@ -1,178 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import unittest
|
3
|
-
|
4
|
-
from synth_ai.zyk import LM
|
5
|
-
|
6
|
-
|
7
|
-
class TestSonnetThinking(unittest.TestCase):
|
8
|
-
@classmethod
|
9
|
-
def setUpClass(cls):
|
10
|
-
cls.lm = LM(
|
11
|
-
model_name="claude-3-7-sonnet-latest",
|
12
|
-
formatting_model_name="gpt-4o-mini",
|
13
|
-
temperature=0,
|
14
|
-
)
|
15
|
-
# Set reasoning_effort in lm_config
|
16
|
-
cls.lm.lm_config["reasoning_effort"] = "high"
|
17
|
-
|
18
|
-
async def test_thinking_response(self):
|
19
|
-
messages = [
|
20
|
-
{"role": "system", "content": "You are a helpful AI assistant."},
|
21
|
-
{
|
22
|
-
"role": "user",
|
23
|
-
"content": "Please solve this math problem step by step: If a train travels at 60 mph for 2.5 hours, how far does it travel?",
|
24
|
-
},
|
25
|
-
]
|
26
|
-
|
27
|
-
response = await self.lm.respond_async(messages=messages)
|
28
|
-
print("\n=== Math Problem Test ===")
|
29
|
-
print(f"Response:\n{response}\n")
|
30
|
-
self.assertIsInstance(response, str)
|
31
|
-
self.assertGreater(len(response), 0)
|
32
|
-
|
33
|
-
# Test that the response includes numerical calculation
|
34
|
-
self.assertTrue(any(char.isdigit() for char in response))
|
35
|
-
|
36
|
-
async def test_thinking_structured_output(self):
|
37
|
-
from pydantic import BaseModel
|
38
|
-
|
39
|
-
class MathSolution(BaseModel):
|
40
|
-
steps: list[str]
|
41
|
-
final_answer: float
|
42
|
-
units: str
|
43
|
-
|
44
|
-
messages = [
|
45
|
-
{"role": "system", "content": "You are a math problem solver."},
|
46
|
-
{
|
47
|
-
"role": "user",
|
48
|
-
"content": "If a car travels at 30 mph for 45 minutes, how far does it travel? Provide steps.",
|
49
|
-
},
|
50
|
-
]
|
51
|
-
|
52
|
-
response = await self.lm.respond_async(
|
53
|
-
messages=messages, response_model=MathSolution
|
54
|
-
)
|
55
|
-
|
56
|
-
print("\n=== Structured Math Problem Test ===")
|
57
|
-
print(f"Steps:")
|
58
|
-
for i, step in enumerate(response.steps, 1):
|
59
|
-
print(f"{i}. {step}")
|
60
|
-
print(f"Final Answer: {response.final_answer} {response.units}\n")
|
61
|
-
|
62
|
-
self.assertIsInstance(response, MathSolution)
|
63
|
-
self.assertGreater(len(response.steps), 0)
|
64
|
-
self.assertIsInstance(response.final_answer, float)
|
65
|
-
self.assertIsInstance(response.units, str)
|
66
|
-
|
67
|
-
async def test_thinking_with_high_effort(self):
|
68
|
-
messages = [
|
69
|
-
{
|
70
|
-
"role": "system",
|
71
|
-
"content": "You are a problem-solving AI. Break down complex problems into detailed steps.",
|
72
|
-
},
|
73
|
-
{
|
74
|
-
"role": "user",
|
75
|
-
"content": "Design a system to automate a coffee shop's inventory management. Consider all aspects.",
|
76
|
-
},
|
77
|
-
]
|
78
|
-
|
79
|
-
print("\n=== High Effort Thinking Test ===")
|
80
|
-
response = await self.lm.respond_async(messages=messages)
|
81
|
-
print(f"High Effort Response:\n{response}\n")
|
82
|
-
self.assertIsInstance(response, str)
|
83
|
-
self.assertGreater(len(response), 100) # Expecting detailed response
|
84
|
-
|
85
|
-
# Test with medium effort
|
86
|
-
lm_medium = LM(
|
87
|
-
model_name="claude-3-7-sonnet-latest",
|
88
|
-
formatting_model_name="gpt-4o-mini",
|
89
|
-
temperature=0,
|
90
|
-
)
|
91
|
-
lm_medium.lm_config["reasoning_effort"] = "medium"
|
92
|
-
print("\n=== Medium Effort Thinking Test ===")
|
93
|
-
response_medium = await lm_medium.respond_async(messages=messages)
|
94
|
-
print(f"Medium Effort Response:\n{response_medium}\n")
|
95
|
-
self.assertIsInstance(response_medium, str)
|
96
|
-
|
97
|
-
async def test_thinking_blocks_attributes(self):
|
98
|
-
"""Test to verify thinking blocks have the correct attributes and structure"""
|
99
|
-
messages = [
|
100
|
-
{"role": "system", "content": "You are a helpful AI assistant."},
|
101
|
-
{
|
102
|
-
"role": "user",
|
103
|
-
"content": "Please solve this math problem step by step: If a train travels at 60 mph for 2.5 hours, how far does it travel?",
|
104
|
-
},
|
105
|
-
]
|
106
|
-
|
107
|
-
print("\n=== Testing Thinking Blocks Structure ===")
|
108
|
-
try:
|
109
|
-
response = await self.lm.respond_async(messages=messages)
|
110
|
-
print(f"Response received successfully: {response[:100]}...")
|
111
|
-
self.assertIsInstance(response, str)
|
112
|
-
self.assertGreater(len(response), 0)
|
113
|
-
except AttributeError as e:
|
114
|
-
if "'TextBlock' object has no attribute 'value'" in str(e):
|
115
|
-
self.fail(
|
116
|
-
"TextBlock missing 'value' attribute - API response structure may have changed"
|
117
|
-
)
|
118
|
-
raise
|
119
|
-
|
120
|
-
async def test_thinking_blocks_with_structured_output(self):
|
121
|
-
"""Test thinking blocks with structured output to verify attribute handling"""
|
122
|
-
from pydantic import BaseModel
|
123
|
-
|
124
|
-
class SimpleResponse(BaseModel):
|
125
|
-
answer: str
|
126
|
-
explanation: str
|
127
|
-
|
128
|
-
messages = [
|
129
|
-
{"role": "system", "content": "You are a helpful AI assistant."},
|
130
|
-
{"role": "user", "content": "What is 2+2? Provide answer and explanation."},
|
131
|
-
]
|
132
|
-
|
133
|
-
print("\n=== Testing Thinking Blocks with Structured Output ===")
|
134
|
-
try:
|
135
|
-
response = await self.lm.respond_async(
|
136
|
-
messages=messages, response_model=SimpleResponse
|
137
|
-
)
|
138
|
-
print(f"Structured response received: {response}")
|
139
|
-
self.assertIsInstance(response, SimpleResponse)
|
140
|
-
self.assertTrue(hasattr(response, "answer"))
|
141
|
-
self.assertTrue(hasattr(response, "explanation"))
|
142
|
-
except AttributeError as e:
|
143
|
-
if "'TextBlock' object has no attribute 'value'" in str(e):
|
144
|
-
self.fail("TextBlock missing 'value' attribute in structured output")
|
145
|
-
raise
|
146
|
-
|
147
|
-
async def test_thinking_blocks_raw_response(self):
|
148
|
-
"""Test to examine the raw response structure from the API"""
|
149
|
-
messages = [
|
150
|
-
{"role": "system", "content": "You are a helpful AI assistant."},
|
151
|
-
{"role": "user", "content": "Count from 1 to 3."},
|
152
|
-
]
|
153
|
-
|
154
|
-
print("\n=== Testing Raw Response Structure ===")
|
155
|
-
try:
|
156
|
-
# Access the raw response if possible
|
157
|
-
response = await self.lm.respond_async(messages=messages)
|
158
|
-
print(f"Raw response type: {type(response)}")
|
159
|
-
print(f"Raw response content: {response}")
|
160
|
-
self.assertIsInstance(response, str)
|
161
|
-
except Exception as e:
|
162
|
-
print(f"Exception type: {type(e)}")
|
163
|
-
print(f"Exception message: {str(e)}")
|
164
|
-
raise
|
165
|
-
|
166
|
-
def test_all(self):
|
167
|
-
print("\nStarting Claude 3.7 Sonnet Thinking Tests...")
|
168
|
-
asyncio.run(self.test_thinking_response())
|
169
|
-
asyncio.run(self.test_thinking_structured_output())
|
170
|
-
asyncio.run(self.test_thinking_with_high_effort())
|
171
|
-
asyncio.run(self.test_thinking_blocks_attributes())
|
172
|
-
asyncio.run(self.test_thinking_blocks_with_structured_output())
|
173
|
-
asyncio.run(self.test_thinking_blocks_raw_response())
|
174
|
-
print("\nAll tests completed successfully!")
|
175
|
-
|
176
|
-
|
177
|
-
if __name__ == "__main__":
|
178
|
-
unittest.main()
|