synth-ai 0.1.0.dev10__tar.gz → 0.1.0.dev11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (60) hide show
  1. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/PKG-INFO +3 -1
  2. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/public_tests/test_all_structured_outputs.py +6 -0
  3. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/pyproject.toml +3 -1
  4. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/setup.py +1 -1
  5. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/core/all.py +6 -0
  6. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/core/vendor_clients.py +7 -0
  7. synth_ai-0.1.0.dev11/synth_ai/zyk/lms/vendors/core/mistral_api.py +221 -0
  8. synth_ai-0.1.0.dev11/synth_ai/zyk/lms/vendors/supported/ollama.py +14 -0
  9. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai.egg-info/PKG-INFO +3 -1
  10. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai.egg-info/SOURCES.txt +2 -0
  11. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai.egg-info/requires.txt +2 -0
  12. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/LICENSE +0 -0
  13. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/README.md +0 -0
  14. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/private_tests/try_synth_sdk.py +0 -0
  15. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/public_tests/test_agent.py +0 -0
  16. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/public_tests/test_recursive_structured_outputs.py +0 -0
  17. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/public_tests/test_structured_outputs.py +0 -0
  18. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/public_tests/test_synth_sdk.py +0 -0
  19. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/setup.cfg +0 -0
  20. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/__init__.py +0 -0
  21. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/__init__.py +0 -0
  22. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/__init__.py +0 -0
  23. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/__init__.py +0 -0
  24. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/constants.py +0 -0
  25. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/dbs.py +0 -0
  26. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/ephemeral.py +0 -0
  27. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/handler.py +0 -0
  28. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/initialize.py +0 -0
  29. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/caching/persistent.py +0 -0
  30. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/config.py +0 -0
  31. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/core/__init__.py +0 -0
  32. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/core/exceptions.py +0 -0
  33. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/core/main.py +0 -0
  34. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/cost/__init__.py +0 -0
  35. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/cost/monitor.py +0 -0
  36. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/cost/statefulness.py +0 -0
  37. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/structured_outputs/__init__.py +0 -0
  38. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/structured_outputs/handler.py +0 -0
  39. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/structured_outputs/inject.py +0 -0
  40. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/structured_outputs/rehabilitate.py +0 -0
  41. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/__init__.py +0 -0
  42. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/base.py +0 -0
  43. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/constants.py +0 -0
  44. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/core/__init__.py +0 -0
  45. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/core/anthropic_api.py +0 -0
  46. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/core/gemini_api.py +0 -0
  47. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/core/openai_api.py +0 -0
  48. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/local/__init__.py +0 -0
  49. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/local/ollama.py +0 -0
  50. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/openai_standard.py +0 -0
  51. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/retries.py +0 -0
  52. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/supported/__init__.py +0 -0
  53. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/supported/deepseek.py +0 -0
  54. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/supported/groq.py +0 -0
  55. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai/zyk/lms/vendors/supported/together.py +0 -0
  56. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai.egg-info/dependency_links.txt +0 -0
  57. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/synth_ai.egg-info/top_level.txt +0 -0
  58. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/tests/test_agent.py +0 -0
  59. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/tests/test_recursive_structured_outputs.py +0 -0
  60. {synth_ai-0.1.0.dev10 → synth_ai-0.1.0.dev11}/tests/test_structured_outputs.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev10
3
+ Version: 0.1.0.dev11
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Home-page: https://github.com/synth-laboratories/synth-ai
6
6
  Author: Josh Purtell
@@ -49,6 +49,8 @@ Requires-Dist: datasets>=3.2.0
49
49
  Requires-Dist: groq>=0.18.0
50
50
  Requires-Dist: pytest-timeout>=2.3.1
51
51
  Requires-Dist: lock>=2018.3.25.2110
52
+ Requires-Dist: ollama>=0.4.7
53
+ Requires-Dist: mistralai>=1.5.0
52
54
  Dynamic: author
53
55
  Dynamic: home-page
54
56
 
@@ -68,6 +68,12 @@ def models():
68
68
  temperature=0.1,
69
69
  structured_output_mode="stringified_json",
70
70
  ),
71
+ "mistral-small-latest": LM(
72
+ model_name="mistral-small-latest",
73
+ formatting_model_name="gpt-4o-mini",
74
+ temperature=0.1,
75
+ structured_output_mode="stringified_json",
76
+ ),
71
77
  }
72
78
 
73
79
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "synth-ai"
3
- version = "0.1.0.dev10"
3
+ version = "0.1.0.dev11"
4
4
  description = "Software for aiding the best and multiplying the will."
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Josh Purtell", email = "josh@usesynth.ai" }]
@@ -26,6 +26,8 @@ dependencies = [
26
26
  "groq>=0.18.0",
27
27
  "pytest-timeout>=2.3.1",
28
28
  "lock>=2018.3.25.2110",
29
+ "ollama>=0.4.7",
30
+ "mistralai>=1.5.0",
29
31
  ]
30
32
  requires-python = ">=3.10"
31
33
 
@@ -2,7 +2,7 @@ from setuptools import find_packages, setup
2
2
 
3
3
  setup(
4
4
  name="synth-ai",
5
- version="0.1.0.dev10",
5
+ version="0.1.0.dev11",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "openai",
@@ -7,6 +7,7 @@ from synth_ai.zyk.lms.vendors.core.openai_api import (
7
7
  from synth_ai.zyk.lms.vendors.supported.deepseek import DeepSeekAPI
8
8
  from synth_ai.zyk.lms.vendors.supported.together import TogetherAPI
9
9
  from synth_ai.zyk.lms.vendors.supported.groq import GroqAPI
10
+ from synth_ai.zyk.lms.vendors.core.mistral_api import MistralAPI
10
11
 
11
12
 
12
13
  class OpenAIClient(OpenAIPrivate):
@@ -39,3 +40,8 @@ class TogetherClient(TogetherAPI):
39
40
  class GroqClient(GroqAPI):
40
41
  def __init__(self):
41
42
  super().__init__()
43
+
44
+
45
+ class MistralClient(MistralAPI):
46
+ def __init__(self):
47
+ super().__init__()
@@ -9,6 +9,7 @@ from synth_ai.zyk.lms.core.all import (
9
9
  OpenAIStructuredOutputClient,
10
10
  TogetherClient,
11
11
  GroqAPI,
12
+ MistralAPI,
12
13
  )
13
14
 
14
15
  openai_naming_regexes: List[Pattern] = [
@@ -35,6 +36,10 @@ groq_naming_regexes: List[Pattern] = [
35
36
  re.compile(r"^llama-3.1-8b-instant$"),
36
37
  ]
37
38
 
39
+ mistral_naming_regexes: List[Pattern] = [
40
+ re.compile(r"^mistral-.*$"),
41
+ ]
42
+
38
43
 
39
44
  def get_client(
40
45
  model_name: str,
@@ -64,5 +69,7 @@ def get_client(
64
69
  return TogetherClient()
65
70
  elif any(regex.match(model_name) for regex in groq_naming_regexes):
66
71
  return GroqAPI()
72
+ elif any(regex.match(model_name) for regex in mistral_naming_regexes):
73
+ return MistralAPI()
67
74
  else:
68
75
  raise ValueError(f"Invalid model name: {model_name}")
@@ -0,0 +1,221 @@
1
+ import json
2
+ import os
3
+ from typing import Any, Dict, List, Tuple, Type
4
+
5
+ import pydantic
6
+ from mistralai import Mistral # use Mistral as both sync and async client
7
+ from pydantic import BaseModel
8
+
9
+ from synth_ai.zyk.lms.caching.initialize import get_cache_handler
10
+ from synth_ai.zyk.lms.vendors.base import VendorBase
11
+ from synth_ai.zyk.lms.vendors.constants import SPECIAL_BASE_TEMPS
12
+ from synth_ai.zyk.lms.vendors.core.openai_api import OpenAIStructuredOutputClient
13
+ from synth_ai.zyk.lms.vendors.retries import BACKOFF_TOLERANCE, backoff
14
+
15
+ # Since the mistralai package doesn't expose an exceptions module,
16
+ # we fallback to catching all Exceptions for retry.
17
+ MISTRAL_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (Exception,)
18
+
19
+
20
+ class MistralAPI(VendorBase):
21
+ used_for_structured_outputs: bool = True
22
+ exceptions_to_retry: Tuple = MISTRAL_EXCEPTIONS_TO_RETRY
23
+ _openai_fallback: Any
24
+
25
+ def __init__(
26
+ self,
27
+ exceptions_to_retry: Tuple[Type[Exception], ...] = MISTRAL_EXCEPTIONS_TO_RETRY,
28
+ used_for_structured_outputs: bool = False,
29
+ ):
30
+ self.used_for_structured_outputs = used_for_structured_outputs
31
+ self.exceptions_to_retry = exceptions_to_retry
32
+ self._openai_fallback = None
33
+
34
+ @backoff.on_exception(
35
+ backoff.expo,
36
+ MISTRAL_EXCEPTIONS_TO_RETRY,
37
+ max_tries=BACKOFF_TOLERANCE,
38
+ on_giveup=lambda e: print(e),
39
+ )
40
+ async def _hit_api_async(
41
+ self,
42
+ model: str,
43
+ messages: List[Dict[str, Any]],
44
+ lm_config: Dict[str, Any],
45
+ use_ephemeral_cache_only: bool = False,
46
+ ) -> str:
47
+ assert (
48
+ lm_config.get("response_model", None) is None
49
+ ), "response_model is not supported for standard calls"
50
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
51
+ cache_result = used_cache_handler.hit_managed_cache(
52
+ model, messages, lm_config=lm_config
53
+ )
54
+ if cache_result:
55
+ return (
56
+ cache_result["response"]
57
+ if isinstance(cache_result, dict)
58
+ else cache_result
59
+ )
60
+
61
+ mistral_messages = [
62
+ {"role": msg["role"], "content": msg["content"]} for msg in messages
63
+ ]
64
+ async with Mistral(api_key=os.getenv("MISTRAL_API_KEY", "")) as client:
65
+ response = await client.chat.complete_async(
66
+ model=model,
67
+ messages=mistral_messages,
68
+ max_tokens=lm_config.get("max_tokens", 4096),
69
+ temperature=lm_config.get(
70
+ "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
71
+ ),
72
+ stream=False,
73
+ )
74
+ api_result = response.choices[0].message.content
75
+ used_cache_handler.add_to_managed_cache(
76
+ model, messages, lm_config=lm_config, output=api_result
77
+ )
78
+ return api_result
79
+
80
+ @backoff.on_exception(
81
+ backoff.expo,
82
+ MISTRAL_EXCEPTIONS_TO_RETRY,
83
+ max_tries=BACKOFF_TOLERANCE,
84
+ on_giveup=lambda e: print(e),
85
+ )
86
+ def _hit_api_sync(
87
+ self,
88
+ model: str,
89
+ messages: List[Dict[str, Any]],
90
+ lm_config: Dict[str, Any],
91
+ use_ephemeral_cache_only: bool = False,
92
+ ) -> str:
93
+ assert (
94
+ lm_config.get("response_model", None) is None
95
+ ), "response_model is not supported for standard calls"
96
+ used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
97
+ cache_result = used_cache_handler.hit_managed_cache(
98
+ model, messages, lm_config=lm_config
99
+ )
100
+ if cache_result:
101
+ return (
102
+ cache_result["response"]
103
+ if isinstance(cache_result, dict)
104
+ else cache_result
105
+ )
106
+
107
+ mistral_messages = [
108
+ {"role": msg["role"], "content": msg["content"]} for msg in messages
109
+ ]
110
+ with Mistral(api_key=os.getenv("MISTRAL_API_KEY", "")) as client:
111
+ response = client.chat.complete(
112
+ model=model,
113
+ messages=mistral_messages,
114
+ max_tokens=lm_config.get("max_tokens", 4096),
115
+ temperature=lm_config.get(
116
+ "temperature", SPECIAL_BASE_TEMPS.get(model, 0)
117
+ ),
118
+ stream=False,
119
+ )
120
+ api_result = response.choices[0].message.content
121
+ used_cache_handler.add_to_managed_cache(
122
+ model, messages, lm_config=lm_config, output=api_result
123
+ )
124
+ return api_result
125
+
126
+ async def _hit_api_async_structured_output(
127
+ self,
128
+ model: str,
129
+ messages: List[Dict[str, Any]],
130
+ response_model: BaseModel,
131
+ temperature: float,
132
+ use_ephemeral_cache_only: bool = False,
133
+ ) -> Any:
134
+ try:
135
+ mistral_messages = [
136
+ {"role": msg["role"], "content": msg["content"]} for msg in messages
137
+ ]
138
+ async with Mistral(api_key=os.getenv("MISTRAL_API_KEY", "")) as client:
139
+ response = await client.chat.complete_async(
140
+ model=model,
141
+ messages=mistral_messages,
142
+ max_tokens=4096,
143
+ temperature=temperature,
144
+ stream=False,
145
+ )
146
+ result = response.choices[0].message.content
147
+ parsed = json.loads(result)
148
+ return response_model(**parsed)
149
+ except (json.JSONDecodeError, pydantic.ValidationError):
150
+ if self._openai_fallback is None:
151
+ self._openai_fallback = OpenAIStructuredOutputClient()
152
+ return await self._openai_fallback._hit_api_async_structured_output(
153
+ model="gpt-4o",
154
+ messages=messages,
155
+ response_model=response_model,
156
+ temperature=temperature,
157
+ use_ephemeral_cache_only=use_ephemeral_cache_only,
158
+ )
159
+
160
+ def _hit_api_sync_structured_output(
161
+ self,
162
+ model: str,
163
+ messages: List[Dict[str, Any]],
164
+ response_model: BaseModel,
165
+ temperature: float,
166
+ use_ephemeral_cache_only: bool = False,
167
+ ) -> Any:
168
+ try:
169
+ mistral_messages = [
170
+ {"role": msg["role"], "content": msg["content"]} for msg in messages
171
+ ]
172
+ with Mistral(api_key=os.getenv("MISTRAL_API_KEY", "")) as client:
173
+ response = client.chat.complete(
174
+ model=model,
175
+ messages=mistral_messages,
176
+ max_tokens=4096,
177
+ temperature=temperature,
178
+ stream=False,
179
+ )
180
+ result = response.choices[0].message.content
181
+ parsed = json.loads(result)
182
+ return response_model(**parsed)
183
+ except (json.JSONDecodeError, pydantic.ValidationError):
184
+ print("WARNING - Falling back to OpenAI - THIS IS SLOW")
185
+ if self._openai_fallback is None:
186
+ self._openai_fallback = OpenAIStructuredOutputClient()
187
+ return self._openai_fallback._hit_api_sync_structured_output(
188
+ model="gpt-4o",
189
+ messages=messages,
190
+ response_model=response_model,
191
+ temperature=temperature,
192
+ use_ephemeral_cache_only=use_ephemeral_cache_only,
193
+ )
194
+
195
+
196
+ if __name__ == "__main__":
197
+ import asyncio
198
+
199
+ from pydantic import BaseModel
200
+
201
+ class TestModel(BaseModel):
202
+ name: str
203
+
204
+ client = MistralAPI(used_for_structured_outputs=True, exceptions_to_retry=[])
205
+ import time
206
+
207
+ t = time.time()
208
+
209
+ async def run_async():
210
+ response = await client._hit_api_async_structured_output(
211
+ model="mistral-large-latest",
212
+ messages=[{"role": "user", "content": "What is the capital of the moon?"}],
213
+ response_model=TestModel,
214
+ temperature=0.0,
215
+ )
216
+ print(response)
217
+ return response
218
+
219
+ response = asyncio.run(run_async())
220
+ t2 = time.time()
221
+ print(f"Got {len(response.name)} chars in {t2-t} seconds")
@@ -0,0 +1,14 @@
1
+ from openai import OpenAI, AsyncOpenAI
2
+ from synth_ai.zyk.lms.vendors.openai_standard import OpenAIStandard
3
+
4
+
5
+ class OllamaAPI(OpenAIStandard):
6
+ def __init__(self):
7
+ self.sync_client = OpenAI(
8
+ base_url="http://localhost:11434/v1",
9
+ api_key="ollama", # required, but unused
10
+ )
11
+ self.async_client = AsyncOpenAI(
12
+ base_url="http://localhost:11434/v1",
13
+ api_key="ollama", # required, but unused
14
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: synth-ai
3
- Version: 0.1.0.dev10
3
+ Version: 0.1.0.dev11
4
4
  Summary: Software for aiding the best and multiplying the will.
5
5
  Home-page: https://github.com/synth-laboratories/synth-ai
6
6
  Author: Josh Purtell
@@ -49,6 +49,8 @@ Requires-Dist: datasets>=3.2.0
49
49
  Requires-Dist: groq>=0.18.0
50
50
  Requires-Dist: pytest-timeout>=2.3.1
51
51
  Requires-Dist: lock>=2018.3.25.2110
52
+ Requires-Dist: ollama>=0.4.7
53
+ Requires-Dist: mistralai>=1.5.0
52
54
  Dynamic: author
53
55
  Dynamic: home-page
54
56
 
@@ -44,12 +44,14 @@ synth_ai/zyk/lms/vendors/retries.py
44
44
  synth_ai/zyk/lms/vendors/core/__init__.py
45
45
  synth_ai/zyk/lms/vendors/core/anthropic_api.py
46
46
  synth_ai/zyk/lms/vendors/core/gemini_api.py
47
+ synth_ai/zyk/lms/vendors/core/mistral_api.py
47
48
  synth_ai/zyk/lms/vendors/core/openai_api.py
48
49
  synth_ai/zyk/lms/vendors/local/__init__.py
49
50
  synth_ai/zyk/lms/vendors/local/ollama.py
50
51
  synth_ai/zyk/lms/vendors/supported/__init__.py
51
52
  synth_ai/zyk/lms/vendors/supported/deepseek.py
52
53
  synth_ai/zyk/lms/vendors/supported/groq.py
54
+ synth_ai/zyk/lms/vendors/supported/ollama.py
53
55
  synth_ai/zyk/lms/vendors/supported/together.py
54
56
  tests/test_agent.py
55
57
  tests/test_recursive_structured_outputs.py
@@ -12,3 +12,5 @@ datasets>=3.2.0
12
12
  groq>=0.18.0
13
13
  pytest-timeout>=2.3.1
14
14
  lock>=2018.3.25.2110
15
+ ollama>=0.4.7
16
+ mistralai>=1.5.0
File without changes
File without changes
File without changes