synth-ai 0.1.0.dev38__py3-none-any.whl → 0.1.0.dev49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +3 -1
- {synth_ai-0.1.0.dev38.dist-info → synth_ai-0.1.0.dev49.dist-info}/METADATA +12 -11
- synth_ai-0.1.0.dev49.dist-info/RECORD +6 -0
- {synth_ai-0.1.0.dev38.dist-info → synth_ai-0.1.0.dev49.dist-info}/WHEEL +1 -1
- synth_ai-0.1.0.dev49.dist-info/top_level.txt +1 -0
- private_tests/try_synth_sdk.py +0 -1
- public_tests/test_agent.py +0 -538
- public_tests/test_all_structured_outputs.py +0 -196
- public_tests/test_anthropic_structured_outputs.py +0 -0
- public_tests/test_deepseek_structured_outputs.py +0 -0
- public_tests/test_deepseek_tools.py +0 -64
- public_tests/test_gemini_output.py +0 -188
- public_tests/test_gemini_structured_outputs.py +0 -106
- public_tests/test_models.py +0 -183
- public_tests/test_openai_structured_outputs.py +0 -106
- public_tests/test_reasoning_effort.py +0 -75
- public_tests/test_reasoning_models.py +0 -92
- public_tests/test_recursive_structured_outputs.py +0 -180
- public_tests/test_structured.py +0 -137
- public_tests/test_structured_outputs.py +0 -109
- public_tests/test_synth_sdk.py +0 -384
- public_tests/test_text.py +0 -160
- public_tests/test_tools.py +0 -319
- synth_ai/zyk/__init__.py +0 -3
- synth_ai/zyk/lms/__init__.py +0 -0
- synth_ai/zyk/lms/caching/__init__.py +0 -0
- synth_ai/zyk/lms/caching/constants.py +0 -1
- synth_ai/zyk/lms/caching/dbs.py +0 -0
- synth_ai/zyk/lms/caching/ephemeral.py +0 -72
- synth_ai/zyk/lms/caching/handler.py +0 -142
- synth_ai/zyk/lms/caching/initialize.py +0 -13
- synth_ai/zyk/lms/caching/persistent.py +0 -83
- synth_ai/zyk/lms/config.py +0 -8
- synth_ai/zyk/lms/core/__init__.py +0 -0
- synth_ai/zyk/lms/core/all.py +0 -47
- synth_ai/zyk/lms/core/exceptions.py +0 -9
- synth_ai/zyk/lms/core/main.py +0 -314
- synth_ai/zyk/lms/core/vendor_clients.py +0 -85
- synth_ai/zyk/lms/cost/__init__.py +0 -0
- synth_ai/zyk/lms/cost/monitor.py +0 -1
- synth_ai/zyk/lms/cost/statefulness.py +0 -1
- synth_ai/zyk/lms/structured_outputs/__init__.py +0 -0
- synth_ai/zyk/lms/structured_outputs/handler.py +0 -442
- synth_ai/zyk/lms/structured_outputs/inject.py +0 -314
- synth_ai/zyk/lms/structured_outputs/rehabilitate.py +0 -187
- synth_ai/zyk/lms/tools/base.py +0 -104
- synth_ai/zyk/lms/vendors/__init__.py +0 -0
- synth_ai/zyk/lms/vendors/base.py +0 -31
- synth_ai/zyk/lms/vendors/constants.py +0 -22
- synth_ai/zyk/lms/vendors/core/__init__.py +0 -0
- synth_ai/zyk/lms/vendors/core/anthropic_api.py +0 -413
- synth_ai/zyk/lms/vendors/core/gemini_api.py +0 -306
- synth_ai/zyk/lms/vendors/core/mistral_api.py +0 -327
- synth_ai/zyk/lms/vendors/core/openai_api.py +0 -185
- synth_ai/zyk/lms/vendors/local/__init__.py +0 -0
- synth_ai/zyk/lms/vendors/local/ollama.py +0 -0
- synth_ai/zyk/lms/vendors/openai_standard.py +0 -375
- synth_ai/zyk/lms/vendors/retries.py +0 -3
- synth_ai/zyk/lms/vendors/supported/__init__.py +0 -0
- synth_ai/zyk/lms/vendors/supported/deepseek.py +0 -73
- synth_ai/zyk/lms/vendors/supported/groq.py +0 -16
- synth_ai/zyk/lms/vendors/supported/ollama.py +0 -14
- synth_ai/zyk/lms/vendors/supported/together.py +0 -11
- synth_ai-0.1.0.dev38.dist-info/RECORD +0 -67
- synth_ai-0.1.0.dev38.dist-info/top_level.txt +0 -4
- tests/test_agent.py +0 -538
- tests/test_recursive_structured_outputs.py +0 -180
- tests/test_structured_outputs.py +0 -100
- {synth_ai-0.1.0.dev38.dist-info → synth_ai-0.1.0.dev49.dist-info}/licenses/LICENSE +0 -0
public_tests/test_text.py
DELETED
@@ -1,160 +0,0 @@
|
|
1
|
-
import openai
|
2
|
-
|
3
|
-
from synth_ai.zyk import LM
|
4
|
-
from synth_ai.zyk.lms.vendors.core.anthropic_api import AnthropicAPI
|
5
|
-
from synth_ai.zyk.lms.vendors.core.gemini_api import GeminiAPI
|
6
|
-
from synth_ai.zyk.lms.vendors.core.mistral_api import MistralAPI
|
7
|
-
from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
|
8
|
-
|
9
|
-
TEST_PROMPT = "What is 2+2? Answer with just the number."
|
10
|
-
|
11
|
-
|
12
|
-
def test_openai_text():
|
13
|
-
client = OpenAIStandard(
|
14
|
-
sync_client=openai.OpenAI(),
|
15
|
-
async_client=openai.AsyncOpenAI(),
|
16
|
-
used_for_structured_outputs=False,
|
17
|
-
exceptions_to_retry=[],
|
18
|
-
)
|
19
|
-
|
20
|
-
response = client._hit_api_sync(
|
21
|
-
model="gpt-4o-mini",
|
22
|
-
messages=[{"role": "user", "content": TEST_PROMPT}],
|
23
|
-
lm_config={"temperature": 0},
|
24
|
-
)
|
25
|
-
|
26
|
-
assert response.raw_response.strip() == "4"
|
27
|
-
|
28
|
-
|
29
|
-
def test_openai_text_lm():
|
30
|
-
lm = LM(
|
31
|
-
model_name="gpt-4o-mini",
|
32
|
-
formatting_model_name="gpt-4o-mini",
|
33
|
-
temperature=0,
|
34
|
-
)
|
35
|
-
|
36
|
-
response = lm.respond_sync(
|
37
|
-
system_message="",
|
38
|
-
user_message=TEST_PROMPT,
|
39
|
-
)
|
40
|
-
|
41
|
-
assert response.raw_response.strip() == "4"
|
42
|
-
|
43
|
-
|
44
|
-
def test_anthropic_text():
|
45
|
-
client = AnthropicAPI(
|
46
|
-
used_for_structured_outputs=False,
|
47
|
-
exceptions_to_retry=[],
|
48
|
-
)
|
49
|
-
|
50
|
-
response = client._hit_api_sync(
|
51
|
-
model="claude-3-haiku-20240307",
|
52
|
-
messages=[
|
53
|
-
{
|
54
|
-
"role": "system",
|
55
|
-
"content": "You are a helpful assistant that provides direct answers.",
|
56
|
-
},
|
57
|
-
{"role": "user", "content": TEST_PROMPT},
|
58
|
-
],
|
59
|
-
lm_config={"temperature": 0},
|
60
|
-
)
|
61
|
-
|
62
|
-
assert response.raw_response.strip() == "4"
|
63
|
-
|
64
|
-
|
65
|
-
def test_anthropic_text_lm():
|
66
|
-
lm = LM(
|
67
|
-
model_name="claude-3-haiku-20240307",
|
68
|
-
formatting_model_name="claude-3-haiku-20240307",
|
69
|
-
temperature=0,
|
70
|
-
)
|
71
|
-
|
72
|
-
response = lm.respond_sync(
|
73
|
-
system_message="You are a helpful assistant that provides direct answers.",
|
74
|
-
user_message=TEST_PROMPT,
|
75
|
-
)
|
76
|
-
|
77
|
-
assert response.raw_response.strip() == "4"
|
78
|
-
|
79
|
-
|
80
|
-
def test_gemini_text():
|
81
|
-
client = GeminiAPI(
|
82
|
-
used_for_structured_outputs=False,
|
83
|
-
exceptions_to_retry=[],
|
84
|
-
)
|
85
|
-
|
86
|
-
response = client._hit_api_sync(
|
87
|
-
model="gemini-2.0-flash",
|
88
|
-
messages=[
|
89
|
-
{
|
90
|
-
"role": "system",
|
91
|
-
"content": "You are a helpful assistant that provides direct answers.",
|
92
|
-
},
|
93
|
-
{"role": "user", "content": TEST_PROMPT},
|
94
|
-
],
|
95
|
-
lm_config={"temperature": 0},
|
96
|
-
)
|
97
|
-
|
98
|
-
assert response.raw_response.strip() == "4"
|
99
|
-
|
100
|
-
|
101
|
-
def test_gemini_text_lm():
|
102
|
-
lm = LM(
|
103
|
-
model_name="gemini-2.0-flash",
|
104
|
-
formatting_model_name="gemini-2.0-flash",
|
105
|
-
temperature=0,
|
106
|
-
)
|
107
|
-
|
108
|
-
response = lm.respond_sync(
|
109
|
-
system_message="You are a helpful assistant that provides direct answers.",
|
110
|
-
user_message=TEST_PROMPT,
|
111
|
-
)
|
112
|
-
|
113
|
-
assert response.raw_response.strip() == "4"
|
114
|
-
|
115
|
-
|
116
|
-
def test_mistral_text():
|
117
|
-
client = MistralAPI(
|
118
|
-
used_for_structured_outputs=False,
|
119
|
-
exceptions_to_retry=[],
|
120
|
-
)
|
121
|
-
|
122
|
-
response = client._hit_api_sync(
|
123
|
-
model="mistral-small-latest",
|
124
|
-
messages=[
|
125
|
-
{
|
126
|
-
"role": "system",
|
127
|
-
"content": "You are a helpful assistant that provides direct answers.",
|
128
|
-
},
|
129
|
-
{"role": "user", "content": TEST_PROMPT},
|
130
|
-
],
|
131
|
-
lm_config={"temperature": 0},
|
132
|
-
)
|
133
|
-
|
134
|
-
assert response.raw_response.strip() == "4"
|
135
|
-
|
136
|
-
|
137
|
-
def test_mistral_text_lm():
|
138
|
-
lm = LM(
|
139
|
-
model_name="mistral-small-latest",
|
140
|
-
formatting_model_name="mistral-small-latest",
|
141
|
-
temperature=0,
|
142
|
-
)
|
143
|
-
|
144
|
-
response = lm.respond_sync(
|
145
|
-
system_message="You are a helpful assistant that provides direct answers.",
|
146
|
-
user_message=TEST_PROMPT,
|
147
|
-
)
|
148
|
-
|
149
|
-
assert response.raw_response.strip() == "4"
|
150
|
-
|
151
|
-
|
152
|
-
if __name__ == "__main__":
|
153
|
-
test_openai_text_lm()
|
154
|
-
test_anthropic_text_lm()
|
155
|
-
test_gemini_text_lm()
|
156
|
-
test_mistral_text_lm()
|
157
|
-
test_openai_text()
|
158
|
-
test_anthropic_text()
|
159
|
-
test_gemini_text()
|
160
|
-
test_mistral_text()
|
public_tests/test_tools.py
DELETED
@@ -1,319 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
import os
|
3
|
-
from typing import List
|
4
|
-
|
5
|
-
import openai
|
6
|
-
import pytest
|
7
|
-
from pydantic import BaseModel, Field
|
8
|
-
|
9
|
-
from synth_ai.zyk import LM
|
10
|
-
from synth_ai.zyk.lms.tools.base import BaseTool
|
11
|
-
from synth_ai.zyk.lms.vendors.core.anthropic_api import AnthropicAPI
|
12
|
-
from synth_ai.zyk.lms.vendors.core.gemini_api import GeminiAPI
|
13
|
-
from synth_ai.zyk.lms.vendors.core.mistral_api import MistralAPI
|
14
|
-
from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
|
15
|
-
|
16
|
-
|
17
|
-
class WeatherParams(BaseModel):
|
18
|
-
location: str
|
19
|
-
unit: str
|
20
|
-
|
21
|
-
|
22
|
-
weather_tool = BaseTool(
|
23
|
-
name="get_weather",
|
24
|
-
description="Get current temperature for a given location.",
|
25
|
-
arguments=WeatherParams,
|
26
|
-
strict=True,
|
27
|
-
)
|
28
|
-
|
29
|
-
|
30
|
-
class TestToolArguments(BaseModel):
|
31
|
-
name: str = Field(..., description="The name of the person")
|
32
|
-
age: int = Field(..., description="The age of the person")
|
33
|
-
hobbies: List[str] = Field(
|
34
|
-
default_factory=list,
|
35
|
-
description="List of the person's hobbies (use empty list if not specified)",
|
36
|
-
)
|
37
|
-
|
38
|
-
|
39
|
-
class TestTool(BaseTool):
|
40
|
-
name: str = "test_tool"
|
41
|
-
arguments: type = TestToolArguments
|
42
|
-
description: str = "Store information about a person including their name, age, and hobbies. Always include hobbies as a list, even if empty."
|
43
|
-
|
44
|
-
|
45
|
-
# OpenAI Tests
|
46
|
-
def test_weather_tool_oai_direct():
|
47
|
-
client = OpenAIStandard(
|
48
|
-
sync_client=openai.OpenAI(),
|
49
|
-
async_client=openai.AsyncOpenAI(),
|
50
|
-
used_for_structured_outputs=False,
|
51
|
-
exceptions_to_retry=[],
|
52
|
-
)
|
53
|
-
|
54
|
-
response = client._hit_api_sync(
|
55
|
-
model="gpt-4o-mini",
|
56
|
-
messages=[
|
57
|
-
{
|
58
|
-
"role": "user",
|
59
|
-
"content": "What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
60
|
-
}
|
61
|
-
],
|
62
|
-
tools=[weather_tool],
|
63
|
-
lm_config={
|
64
|
-
"temperature": 0,
|
65
|
-
},
|
66
|
-
)
|
67
|
-
|
68
|
-
assert response.tool_calls is not None
|
69
|
-
assert len(response.tool_calls) > 0
|
70
|
-
assert response.tool_calls[0]["function"]["name"] == "get_weather"
|
71
|
-
assert "arguments" in response.tool_calls[0]["function"]
|
72
|
-
assert isinstance(response.tool_calls[0]["function"]["arguments"], str)
|
73
|
-
|
74
|
-
|
75
|
-
def test_weather_tool_oai_lm():
|
76
|
-
lm = LM(
|
77
|
-
model_name="gpt-4o-mini", formatting_model_name="gpt-4o-mini", temperature=0
|
78
|
-
)
|
79
|
-
|
80
|
-
response = lm.respond_sync(
|
81
|
-
system_message="",
|
82
|
-
user_message="What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
83
|
-
tools=[weather_tool],
|
84
|
-
)
|
85
|
-
|
86
|
-
assert response.tool_calls is not None
|
87
|
-
assert len(response.tool_calls) > 0
|
88
|
-
assert response.tool_calls[0]["function"]["name"] == "get_weather"
|
89
|
-
assert "arguments" in response.tool_calls[0]["function"]
|
90
|
-
assert isinstance(response.tool_calls[0]["function"]["arguments"], str)
|
91
|
-
|
92
|
-
|
93
|
-
# Anthropic Tests
|
94
|
-
def test_weather_tool_anthropic_direct():
|
95
|
-
client = AnthropicAPI(
|
96
|
-
used_for_structured_outputs=False,
|
97
|
-
exceptions_to_retry=[],
|
98
|
-
)
|
99
|
-
|
100
|
-
response = client._hit_api_sync(
|
101
|
-
model="claude-3-haiku-20240307",
|
102
|
-
messages=[
|
103
|
-
{
|
104
|
-
"role": "system",
|
105
|
-
"content": "You are a helpful assistant that uses tools when appropriate.",
|
106
|
-
},
|
107
|
-
{
|
108
|
-
"role": "user",
|
109
|
-
"content": "What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
110
|
-
},
|
111
|
-
],
|
112
|
-
tools=[weather_tool],
|
113
|
-
lm_config={
|
114
|
-
"temperature": 0,
|
115
|
-
},
|
116
|
-
)
|
117
|
-
|
118
|
-
assert response.tool_calls is not None
|
119
|
-
assert len(response.tool_calls) > 0
|
120
|
-
assert response.tool_calls[0]["function"]["name"] == "get_weather"
|
121
|
-
assert "arguments" in response.tool_calls[0]["function"]
|
122
|
-
arguments = response.tool_calls[0]["function"]["arguments"]
|
123
|
-
assert isinstance(arguments, str)
|
124
|
-
assert "location" in arguments
|
125
|
-
assert "Paris" in arguments
|
126
|
-
|
127
|
-
|
128
|
-
def test_weather_tool_anthropic_lm():
|
129
|
-
lm = LM(
|
130
|
-
model_name="claude-3-haiku-20240307",
|
131
|
-
formatting_model_name="claude-3-haiku-20240307",
|
132
|
-
temperature=0,
|
133
|
-
)
|
134
|
-
|
135
|
-
response = lm.respond_sync(
|
136
|
-
system_message="You are a helpful assistant that uses tools when appropriate.",
|
137
|
-
user_message="What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
138
|
-
tools=[weather_tool],
|
139
|
-
)
|
140
|
-
|
141
|
-
assert response.tool_calls is not None
|
142
|
-
assert len(response.tool_calls) > 0
|
143
|
-
assert response.tool_calls[0]["function"]["name"] == "get_weather"
|
144
|
-
assert "arguments" in response.tool_calls[0]["function"]
|
145
|
-
arguments = response.tool_calls[0]["function"]["arguments"]
|
146
|
-
assert isinstance(arguments, str)
|
147
|
-
assert "location" in arguments
|
148
|
-
assert "Paris" in arguments
|
149
|
-
|
150
|
-
def test_weather_tool_anthropic_35():
|
151
|
-
lm = LM(
|
152
|
-
model_name="claude-3-5-sonnet-latest",
|
153
|
-
formatting_model_name="claude-3-5-sonnet-20241022",
|
154
|
-
temperature=0,
|
155
|
-
)
|
156
|
-
|
157
|
-
response = lm.respond_sync(
|
158
|
-
system_message="You are a helpful assistant that uses tools when appropriate.",
|
159
|
-
user_message="What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
160
|
-
tools=[weather_tool],
|
161
|
-
)
|
162
|
-
|
163
|
-
assert response.tool_calls is not None
|
164
|
-
assert len(response.tool_calls) > 0
|
165
|
-
assert response.tool_calls[0]["function"]["name"] == "get_weather"
|
166
|
-
assert "arguments" in response.tool_calls[0]["function"]
|
167
|
-
arguments = response.tool_calls[0]["function"]["arguments"]
|
168
|
-
assert isinstance(arguments, str)
|
169
|
-
|
170
|
-
# Gemini Tests
|
171
|
-
def test_weather_tool_gemini_direct():
|
172
|
-
client = GeminiAPI(
|
173
|
-
used_for_structured_outputs=False,
|
174
|
-
exceptions_to_retry=[],
|
175
|
-
)
|
176
|
-
|
177
|
-
response = client._hit_api_sync(
|
178
|
-
model="gemini-2.0-flash",
|
179
|
-
messages=[
|
180
|
-
{
|
181
|
-
"role": "system",
|
182
|
-
"content": "You are a helpful assistant that uses tools when appropriate.",
|
183
|
-
},
|
184
|
-
{
|
185
|
-
"role": "user",
|
186
|
-
"content": "What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
187
|
-
},
|
188
|
-
],
|
189
|
-
tools=[weather_tool],
|
190
|
-
lm_config={
|
191
|
-
"temperature": 0,
|
192
|
-
"tool_config": {"function_calling_config": {"mode": "any"}},
|
193
|
-
},
|
194
|
-
)
|
195
|
-
|
196
|
-
assert response.tool_calls is not None
|
197
|
-
|
198
|
-
|
199
|
-
def test_weather_tool_gemini_lm():
|
200
|
-
lm = LM(
|
201
|
-
model_name="gemini-2.0-flash",
|
202
|
-
formatting_model_name="gemini-2.0-flash",
|
203
|
-
temperature=0,
|
204
|
-
)
|
205
|
-
|
206
|
-
lm.lm_config["tool_config"] = {"function_calling_config": {"mode": "ALWAYS"}}
|
207
|
-
|
208
|
-
response = lm.respond_sync(
|
209
|
-
system_message="You are a helpful assistant that uses tools when appropriate.",
|
210
|
-
user_message="What's the weather in Paris? Use the tools and explain your reasoning. Units local to the country, please!",
|
211
|
-
tools=[weather_tool],
|
212
|
-
)
|
213
|
-
|
214
|
-
assert response.tool_calls is not None
|
215
|
-
|
216
|
-
|
217
|
-
# Mistral Tests
|
218
|
-
@pytest.mark.asyncio
|
219
|
-
async def test_mistral_tool_async():
|
220
|
-
if not os.getenv("MISTRAL_API_KEY"):
|
221
|
-
pytest.skip("MISTRAL_API_KEY not set")
|
222
|
-
|
223
|
-
client = MistralAPI()
|
224
|
-
tool = TestTool()
|
225
|
-
messages = [
|
226
|
-
{
|
227
|
-
"role": "system",
|
228
|
-
"content": "You are a helpful assistant that stores information about people using the test_tool function. ",
|
229
|
-
},
|
230
|
-
{
|
231
|
-
"role": "user",
|
232
|
-
"content": "Please store information for John who is 30 years old.",
|
233
|
-
},
|
234
|
-
]
|
235
|
-
|
236
|
-
response = await client._hit_api_async(
|
237
|
-
model="mistral-small-latest",
|
238
|
-
messages=messages,
|
239
|
-
lm_config={"temperature": 0},
|
240
|
-
tools=[tool],
|
241
|
-
)
|
242
|
-
|
243
|
-
assert response.tool_calls is not None, "No tool calls were made"
|
244
|
-
assert len(response.tool_calls) == 1, "Expected exactly one tool call"
|
245
|
-
tool_call = response.tool_calls[0]
|
246
|
-
assert tool_call["type"] == "function", "Tool call type should be function"
|
247
|
-
assert tool_call["function"]["name"] == "test_tool", "Wrong function called"
|
248
|
-
|
249
|
-
args = json.loads(tool_call["function"]["arguments"])
|
250
|
-
assert args["name"] == "John", "Wrong name in arguments"
|
251
|
-
assert args["age"] == 30, "Wrong age in arguments"
|
252
|
-
assert "hobbies" in args, "Missing hobbies field"
|
253
|
-
assert isinstance(args["hobbies"], list), "Hobbies should be a list"
|
254
|
-
|
255
|
-
|
256
|
-
def test_mistral_tool_sync():
|
257
|
-
if not os.getenv("MISTRAL_API_KEY"):
|
258
|
-
pytest.skip("MISTRAL_API_KEY not set")
|
259
|
-
|
260
|
-
client = MistralAPI()
|
261
|
-
tool = TestTool()
|
262
|
-
messages = [
|
263
|
-
{
|
264
|
-
"role": "system",
|
265
|
-
"content": "You are a helpful assistant that stores information about people using the test_tool function. When given information about a person, always use the test_tool function to store it. Always include hobbies as an empty list if no hobbies are mentioned.",
|
266
|
-
},
|
267
|
-
{
|
268
|
-
"role": "user",
|
269
|
-
"content": "Please store information for John who is 30 years old.",
|
270
|
-
},
|
271
|
-
]
|
272
|
-
|
273
|
-
response = client._hit_api_sync(
|
274
|
-
model="mistral-small-latest",
|
275
|
-
messages=messages,
|
276
|
-
lm_config={"temperature": 0},
|
277
|
-
tools=[tool],
|
278
|
-
)
|
279
|
-
|
280
|
-
assert response.tool_calls is not None, "No tool calls were made"
|
281
|
-
assert len(response.tool_calls) == 1, "Expected exactly one tool call"
|
282
|
-
tool_call = response.tool_calls[0]
|
283
|
-
assert tool_call["type"] == "function", "Tool call type should be function"
|
284
|
-
assert tool_call["function"]["name"] == "test_tool", "Wrong function called"
|
285
|
-
|
286
|
-
args = json.loads(tool_call["function"]["arguments"])
|
287
|
-
assert args["name"] == "John", "Wrong name in arguments"
|
288
|
-
assert args["age"] == 30, "Wrong age in arguments"
|
289
|
-
assert "hobbies" in args, "Missing hobbies field"
|
290
|
-
assert isinstance(args["hobbies"], list), "Hobbies should be a list"
|
291
|
-
|
292
|
-
|
293
|
-
def test_mistral_tool_schema():
|
294
|
-
tool = TestTool()
|
295
|
-
schema = tool.to_mistral_tool()
|
296
|
-
|
297
|
-
assert schema["type"] == "function", "Missing type field"
|
298
|
-
assert "function" in schema, "Missing function wrapper"
|
299
|
-
function = schema["function"]
|
300
|
-
assert function["name"] == "test_tool"
|
301
|
-
assert (
|
302
|
-
function["description"]
|
303
|
-
== "Store information about a person including their name, age, and hobbies. Always include hobbies as a list, even if empty."
|
304
|
-
)
|
305
|
-
assert "parameters" in function
|
306
|
-
assert not function["parameters"].get("additionalProperties", True)
|
307
|
-
|
308
|
-
params = function["parameters"]
|
309
|
-
assert "name" in params["properties"]
|
310
|
-
assert params["properties"]["name"]["type"] == "string"
|
311
|
-
assert "age" in params["properties"]
|
312
|
-
assert params["properties"]["age"]["type"] == "integer"
|
313
|
-
assert "hobbies" in params["properties"]
|
314
|
-
assert params["properties"]["hobbies"]["type"] == "array"
|
315
|
-
assert params["properties"]["hobbies"]["items"]["type"] == "string"
|
316
|
-
|
317
|
-
|
318
|
-
if __name__ == "__main__":
|
319
|
-
test_weather_tool_oai_lm()
|
synth_ai/zyk/__init__.py
DELETED
synth_ai/zyk/lms/__init__.py
DELETED
File without changes
|
File without changes
|
@@ -1 +0,0 @@
|
|
1
|
-
DISKCACHE_SIZE_LIMIT = 10 * 1024 * 1024 * 1024 #10GB
|
synth_ai/zyk/lms/caching/dbs.py
DELETED
File without changes
|
@@ -1,72 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
from dataclasses import dataclass
|
3
|
-
from typing import Optional, Union
|
4
|
-
|
5
|
-
from diskcache import Cache
|
6
|
-
from pydantic import BaseModel
|
7
|
-
|
8
|
-
from synth_ai.zyk.lms.caching.constants import DISKCACHE_SIZE_LIMIT
|
9
|
-
from synth_ai.zyk.lms.vendors.base import BaseLMResponse
|
10
|
-
|
11
|
-
|
12
|
-
@dataclass
|
13
|
-
class EphemeralCache:
|
14
|
-
def __init__(self, fast_cache_dir: str = ".cache/ephemeral_cache"):
|
15
|
-
os.makedirs(fast_cache_dir, exist_ok=True)
|
16
|
-
self.fast_cache = Cache(fast_cache_dir, size_limit=DISKCACHE_SIZE_LIMIT)
|
17
|
-
|
18
|
-
def hit_cache(
|
19
|
-
self, key: str, response_model: Optional[BaseModel] = None
|
20
|
-
) -> Optional[BaseLMResponse]:
|
21
|
-
if key not in self.fast_cache:
|
22
|
-
return None
|
23
|
-
|
24
|
-
try:
|
25
|
-
cache_data = self.fast_cache[key]
|
26
|
-
except AttributeError:
|
27
|
-
return None
|
28
|
-
|
29
|
-
if not isinstance(cache_data, dict):
|
30
|
-
return BaseLMResponse(
|
31
|
-
raw_response=cache_data, structured_output=None, tool_calls=None
|
32
|
-
)
|
33
|
-
|
34
|
-
raw_response = cache_data.get("raw_response")
|
35
|
-
tool_calls = cache_data.get("tool_calls")
|
36
|
-
structured_output = cache_data.get("structured_output")
|
37
|
-
|
38
|
-
if response_model and structured_output:
|
39
|
-
structured_output = response_model(**structured_output)
|
40
|
-
|
41
|
-
return BaseLMResponse(
|
42
|
-
raw_response=raw_response,
|
43
|
-
structured_output=structured_output,
|
44
|
-
tool_calls=tool_calls,
|
45
|
-
)
|
46
|
-
|
47
|
-
def add_to_cache(self, key: str, response: Union[BaseLMResponse, str]) -> None:
|
48
|
-
if isinstance(response, str):
|
49
|
-
self.fast_cache[key] = response
|
50
|
-
return
|
51
|
-
|
52
|
-
if isinstance(response, BaseLMResponse):
|
53
|
-
cache_data = {
|
54
|
-
"raw_response": response.raw_response
|
55
|
-
if response.raw_response is not None
|
56
|
-
else None,
|
57
|
-
"tool_calls": response.tool_calls
|
58
|
-
if response.tool_calls is not None
|
59
|
-
else None,
|
60
|
-
"structured_output": (
|
61
|
-
response.structured_output.model_dump()
|
62
|
-
if response.structured_output is not None
|
63
|
-
else None
|
64
|
-
),
|
65
|
-
}
|
66
|
-
self.fast_cache[key] = cache_data
|
67
|
-
return
|
68
|
-
|
69
|
-
raise ValueError(f"Invalid response type: {type(response)}")
|
70
|
-
|
71
|
-
def close(self):
|
72
|
-
self.fast_cache.close()
|