synth-ai 0.1.0.dev32__py3-none-any.whl → 0.1.0.dev34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -147,6 +147,25 @@ def test_weather_tool_anthropic_lm():
147
147
  assert "location" in arguments
148
148
  assert "Paris" in arguments
149
149
 
150
+ def test_weather_tool_anthropic_35():
151
+ lm = LM(
152
+ model_name="claude-3-5-sonnet-latest",
153
+ formatting_model_name="claude-3-5-sonnet-20241022",
154
+ temperature=0,
155
+ )
156
+
157
+ response = lm.respond_sync(
158
+ system_message="You are a helpful assistant that uses tools when appropriate.",
159
+ user_message="What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
160
+ tools=[weather_tool],
161
+ )
162
+
163
+ assert response.tool_calls is not None
164
+ assert len(response.tool_calls) > 0
165
+ assert response.tool_calls[0]["function"]["name"] == "get_weather"
166
+ assert "arguments" in response.tool_calls[0]["function"]
167
+ arguments = response.tool_calls[0]["function"]["arguments"]
168
+ assert isinstance(arguments, str)
150
169
 
151
170
  # Gemini Tests
152
171
  def test_weather_tool_gemini_direct():
@@ -2,4 +2,21 @@ SPECIAL_BASE_TEMPS = {
2
2
  "claude-3-5-sonnet-20240620": 0.7,
3
3
  "o1-mini": 1,
4
4
  "o1-preview": 1,
5
- }
5
+ "o1": 1,
6
+ "o3-mini": 1,
7
+ "o3": 1,
8
+ "o4-mini": 1,
9
+ "o4": 1,
10
+ }
11
+
12
+ openai_reasoners = [
13
+ "o1-mini",
14
+ "o1-preview",
15
+ "o1",
16
+ "o3-mini",
17
+ "o3",
18
+ "o4-mini",
19
+ "o4",
20
+ "o5-mini",
21
+ "o5",
22
+ ]
@@ -82,7 +82,12 @@ class AnthropicAPI(VendorBase):
82
82
 
83
83
  # Add tools if provided
84
84
  if tools:
85
- api_params["tools"] = [tool.to_anthropic_tool() for tool in tools]
85
+ api_params["tools"] = []
86
+ for tool in tools:
87
+ if isinstance(tool, BaseTool):
88
+ api_params["tools"].append(tool.to_anthropic_tool())
89
+ else:
90
+ api_params["tools"].append(tool)
86
91
 
87
92
  # Only try to add thinking if supported by the SDK
88
93
  try:
@@ -175,7 +180,12 @@ class AnthropicAPI(VendorBase):
175
180
 
176
181
  # Add tools if provided
177
182
  if tools:
178
- api_params["tools"] = [tool.to_anthropic_tool() for tool in tools]
183
+ api_params["tools"] = []
184
+ for tool in tools:
185
+ if isinstance(tool, BaseTool):
186
+ api_params["tools"].append(tool.to_anthropic_tool())
187
+ else:
188
+ api_params["tools"].append(tool)
179
189
 
180
190
  # Only try to add thinking if supported by the SDK
181
191
  try:
@@ -10,7 +10,7 @@ from synth_ai.zyk.lms.caching.initialize import (
10
10
  )
11
11
  from synth_ai.zyk.lms.tools.base import BaseTool
12
12
  from synth_ai.zyk.lms.vendors.base import BaseLMResponse, VendorBase
13
- from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
13
+ from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS, openai_reasoners
14
14
 
15
15
  DEFAULT_EXCEPTIONS_TO_RETRY = (
16
16
  pydantic_core._pydantic_core.ValidationError,
@@ -100,16 +100,21 @@ class OpenAIStandard(VendorBase):
100
100
 
101
101
  # Add tools if provided
102
102
  if tools:
103
- api_params["tools"] = [tool.to_openai_tool() for tool in tools]
103
+ api_params["tools"] = []
104
+ for tool in tools:
105
+ if isinstance(tool, BaseTool):
106
+ api_params["tools"].append(tool.to_openai_tool())
107
+ else:
108
+ api_params["tools"].append(tool)
104
109
 
105
110
  # Only add temperature for non o1/o3 models
106
- if not any(prefix in model for prefix in ["o1-", "o3-"]):
111
+ if model not in openai_reasoners:
107
112
  api_params["temperature"] = lm_config.get(
108
113
  "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
109
114
  )
110
115
 
111
116
  # Add reasoning_effort only for o3-mini
112
- if model in ["o3-mini"]:
117
+ if model in openai_reasoners:
113
118
  print("Reasoning effort:", reasoning_effort)
114
119
  api_params["reasoning_effort"] = reasoning_effort
115
120
 
@@ -178,16 +183,21 @@ class OpenAIStandard(VendorBase):
178
183
 
179
184
  # Add tools if provided
180
185
  if tools:
181
- api_params["tools"] = [tool.to_openai_tool() for tool in tools]
186
+ api_params["tools"] = []
187
+ for tool in tools:
188
+ if isinstance(tool, BaseTool):
189
+ api_params["tools"].append(tool.to_openai_tool())
190
+ else:
191
+ api_params["tools"].append(tool)
182
192
 
183
193
  # Only add temperature for non o1/o3 models
184
- if not any(prefix in model for prefix in ["o1-", "o3-"]):
194
+ if model not in openai_reasoners:
185
195
  api_params["temperature"] = lm_config.get(
186
196
  "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
187
197
  )
188
198
 
189
199
  # Add reasoning_effort only for o3-mini
190
- if model in ["o3-mini"]:
200
+ if model in openai_reasoners:
191
201
  api_params["reasoning_effort"] = reasoning_effort
192
202
 
193
203
  output = self.sync_client.chat.completions.create(**api_params)
@@ -246,16 +256,21 @@ class OpenAIStandard(VendorBase):
246
256
 
247
257
  # Add tools if provided
248
258
  if tools:
249
- api_params["tools"] = [tool.to_openai_tool() for tool in tools]
259
+ api_params["tools"] = []
260
+ for tool in tools:
261
+ if isinstance(tool, BaseTool):
262
+ api_params["tools"].append(tool.to_openai_tool())
263
+ else:
264
+ api_params["tools"].append(tool)
250
265
 
251
266
  # Only add temperature for non o1/o3 models
252
- if not any(prefix in model for prefix in ["o1-", "o3-"]):
267
+ if model not in openai_reasoners:
253
268
  api_params["temperature"] = lm_config.get(
254
269
  "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
255
270
  )
256
271
 
257
272
  # Add reasoning_effort only for o3-mini
258
- if model in ["o3-mini"]:
273
+ if model in openai_reasoners:
259
274
  api_params["reasoning_effort"] = reasoning_effort
260
275
 
261
276
  output = await self.async_client.chat.completions.create(**api_params)
@@ -302,16 +317,21 @@ class OpenAIStandard(VendorBase):
302
317
 
303
318
  # Add tools if provided
304
319
  if tools:
305
- api_params["tools"] = [tool.to_openai_tool() for tool in tools]
320
+ api_params["tools"] = []
321
+ for tool in tools:
322
+ if isinstance(tool, BaseTool):
323
+ api_params["tools"].append(tool.to_openai_tool())
324
+ else:
325
+ api_params["tools"].append(tool)
306
326
 
307
327
  # Only add temperature for non o1/o3 models
308
- if not any(prefix in model for prefix in ["o1-", "o3-"]):
328
+ if model not in openai_reasoners:
309
329
  api_params["temperature"] = lm_config.get(
310
330
  "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
311
331
  )
312
332
 
313
333
  # Add reasoning_effort only for o3-mini
314
- if model in ["o3-mini"]:
334
+ if model in openai_reasoners:
315
335
  api_params["reasoning_effort"] = reasoning_effort
316
336
 
317
337
  output = self.sync_client.chat.completions.create(**api_params)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev32
3
+ Version: 0.1.0.dev34
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Author: Josh Purtell
6
6
  Author-email: Josh Purtell <josh@usesynth.ai>
@@ -14,7 +14,7 @@ public_tests/test_structured.py,sha256=rftVwvYgMSHkRZM1WUJzga5Uvl9hmc5OpXzBshEXN
14
14
  public_tests/test_structured_outputs.py,sha256=9SFpH4RQ6nRcphBVmELRNSvhRjYaJBu_z-r6xqKAYpg,4213
15
15
  public_tests/test_synth_sdk.py,sha256=jqJHKpvBn9qj21P76z9onXfPg88jyUmBTKmdvCsQMk8,14885
16
16
  public_tests/test_text.py,sha256=UyPZ0ci-XBjK35tAeV0kN1X8Njf-0pHfEPZhsWDZ0-c,4072
17
- public_tests/test_tools.py,sha256=LXR78QWYssjtIQwUIJAn5O747tUDWbbPTGpf0VyojS8,10111
17
+ public_tests/test_tools.py,sha256=QBwJ70dmPCm27BEwbNaZXXAf8DJxObsfwFX1rlBcYME,10904
18
18
  synth_ai/__init__.py,sha256=tX_fcK8u64BoPEboRa3dIKK_WpLy5KAxL2Ucl-l0xVg,147
19
19
  synth_ai/zyk/__init__.py,sha256=kGMD-drlBVdsyT-QFODMwaZUtxPCJ9mg58GKQUvFqo0,134
20
20
  synth_ai/zyk/lms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -41,11 +41,11 @@ synth_ai/zyk/lms/structured_outputs/rehabilitate.py,sha256=GuIhzsb7rTvwgn7f9I9om
41
41
  synth_ai/zyk/lms/tools/base.py,sha256=j7wYb1xAvaAm3qVrINphgUhGS-UjZmRpbouseQYgh7A,3228
42
42
  synth_ai/zyk/lms/vendors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
43
  synth_ai/zyk/lms/vendors/base.py,sha256=aK4PEtkMLt_o3qD22kW-x3HJUEKdIk06zlH4kX0VkAE,760
44
- synth_ai/zyk/lms/vendors/constants.py,sha256=zqCOyXZqo297wboR9EKVSkvpq6JCMSJyeso8HdZPKa4,102
45
- synth_ai/zyk/lms/vendors/openai_standard.py,sha256=Th_0QjmrJ7gemxsKnWmij46lIz4QWZOi7Du5OOiLUcc,11413
44
+ synth_ai/zyk/lms/vendors/constants.py,sha256=3CCq45otD80yaLts5sFHvPgLCQNkcjHkc9cqOQ0zH4Y,320
45
+ synth_ai/zyk/lms/vendors/openai_standard.py,sha256=oii23QtG_sh_V2yFV1ZMF7F0t9Q_mGL8yM_QxZnZ9QA,12091
46
46
  synth_ai/zyk/lms/vendors/retries.py,sha256=m-WvAiPix9ovnO2S-m53Td5VZDWBVBFuHuSK9--OVxw,38
47
47
  synth_ai/zyk/lms/vendors/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=QM4xuaigdVOjBuzkPyT-RSOtvT2wiKxAiHRfI77GYn8,13461
48
+ synth_ai/zyk/lms/vendors/core/anthropic_api.py,sha256=vxANYEcU46n6flRJ4y5j4VrSA1ky4EXo8nWgYPLi3HU,13829
49
49
  synth_ai/zyk/lms/vendors/core/gemini_api.py,sha256=I1goLy5R8eBLrun2jpnD4o87NlmzWgPrfYaeu9RZN8M,11008
50
50
  synth_ai/zyk/lms/vendors/core/mistral_api.py,sha256=-EMPBEIoYxxDMxukmcmKL8AGAHPNYe4w-76gsPtmrhk,11860
51
51
  synth_ai/zyk/lms/vendors/core/openai_api.py,sha256=QkQqba851EEGf9n5H31-pJ6WexhTZkdPWQap0oGy2Ho,6713
@@ -56,11 +56,11 @@ synth_ai/zyk/lms/vendors/supported/deepseek.py,sha256=BElW0NGpkSA62wOqzzMtDw8XR3
56
56
  synth_ai/zyk/lms/vendors/supported/groq.py,sha256=Fbi7QvhdLx0F-VHO5PY-uIQlPR0bo3C9h1MvIOx8nz0,388
57
57
  synth_ai/zyk/lms/vendors/supported/ollama.py,sha256=K30VBFRTd7NYyPmyBVRZS2sm0UB651AHp9i3wd55W64,469
58
58
  synth_ai/zyk/lms/vendors/supported/together.py,sha256=Ni_jBqqGPN0PkkY-Ew64s3gNKk51k3FCpLSwlNhKbf0,342
59
- synth_ai-0.1.0.dev32.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
59
+ synth_ai-0.1.0.dev34.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
60
60
  tests/test_agent.py,sha256=CjPPWuMWC_TzX1DkDald-bbAxgjXE-HPQvFhq2B--5k,22363
61
61
  tests/test_recursive_structured_outputs.py,sha256=Ne-9XwnOxN7eSpGbNHOpegR-sRj589I84T6y8Z_4QnA,5781
62
62
  tests/test_structured_outputs.py,sha256=J7sfbGZ7OeB5ONIKpcCTymyayNyAdFfGokC1bcUrSx0,3651
63
- synth_ai-0.1.0.dev32.dist-info/METADATA,sha256=zgAF8JSt2QckCC9M4iLt2vVn2i1c7N6MEUj4YEKTV9k,2702
64
- synth_ai-0.1.0.dev32.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
65
- synth_ai-0.1.0.dev32.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
66
- synth_ai-0.1.0.dev32.dist-info/RECORD,,
63
+ synth_ai-0.1.0.dev34.dist-info/METADATA,sha256=LTm345VekKr11dhEH3ujbP1BYBUdfZ0FueYfr9M99Pw,2702
64
+ synth_ai-0.1.0.dev34.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
65
+ synth_ai-0.1.0.dev34.dist-info/top_level.txt,sha256=5GzJO9j-KbJ_4ppxhmCUa_qdhHM4-9cHHNU76yAI8do,42
66
+ synth_ai-0.1.0.dev34.dist-info/RECORD,,