synth-ai 0.2.4.dev6__py3-none-any.whl → 0.2.4.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (229) hide show
  1. synth_ai/__init__.py +18 -9
  2. synth_ai/cli/__init__.py +10 -5
  3. synth_ai/cli/balance.py +22 -17
  4. synth_ai/cli/calc.py +2 -3
  5. synth_ai/cli/demo.py +3 -5
  6. synth_ai/cli/legacy_root_backup.py +58 -32
  7. synth_ai/cli/man.py +22 -19
  8. synth_ai/cli/recent.py +9 -8
  9. synth_ai/cli/root.py +58 -13
  10. synth_ai/cli/status.py +13 -6
  11. synth_ai/cli/traces.py +45 -21
  12. synth_ai/cli/watch.py +40 -37
  13. synth_ai/config/base_url.py +1 -3
  14. synth_ai/core/experiment.py +1 -2
  15. synth_ai/environments/__init__.py +2 -6
  16. synth_ai/environments/environment/artifacts/base.py +3 -1
  17. synth_ai/environments/environment/db/sqlite.py +1 -1
  18. synth_ai/environments/environment/registry.py +19 -20
  19. synth_ai/environments/environment/resources/sqlite.py +2 -3
  20. synth_ai/environments/environment/rewards/core.py +3 -2
  21. synth_ai/environments/environment/tools/__init__.py +6 -4
  22. synth_ai/environments/examples/crafter_classic/__init__.py +1 -1
  23. synth_ai/environments/examples/crafter_classic/engine.py +13 -13
  24. synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +1 -0
  25. synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +2 -1
  26. synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +2 -1
  27. synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +3 -2
  28. synth_ai/environments/examples/crafter_classic/environment.py +16 -15
  29. synth_ai/environments/examples/crafter_classic/taskset.py +2 -2
  30. synth_ai/environments/examples/crafter_classic/trace_hooks_v3.py +2 -3
  31. synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +2 -1
  32. synth_ai/environments/examples/crafter_custom/crafter/__init__.py +2 -2
  33. synth_ai/environments/examples/crafter_custom/crafter/config.py +2 -2
  34. synth_ai/environments/examples/crafter_custom/crafter/env.py +1 -5
  35. synth_ai/environments/examples/crafter_custom/crafter/objects.py +1 -2
  36. synth_ai/environments/examples/crafter_custom/crafter/worldgen.py +1 -2
  37. synth_ai/environments/examples/crafter_custom/dataset_builder.py +5 -5
  38. synth_ai/environments/examples/crafter_custom/environment.py +13 -13
  39. synth_ai/environments/examples/crafter_custom/run_dataset.py +5 -5
  40. synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +2 -2
  41. synth_ai/environments/examples/enron/art_helpers/local_email_db.py +5 -4
  42. synth_ai/environments/examples/enron/art_helpers/types_enron.py +2 -1
  43. synth_ai/environments/examples/enron/engine.py +18 -14
  44. synth_ai/environments/examples/enron/environment.py +12 -11
  45. synth_ai/environments/examples/enron/taskset.py +7 -7
  46. synth_ai/environments/examples/minigrid/__init__.py +6 -6
  47. synth_ai/environments/examples/minigrid/engine.py +6 -6
  48. synth_ai/environments/examples/minigrid/environment.py +6 -6
  49. synth_ai/environments/examples/minigrid/puzzle_loader.py +3 -2
  50. synth_ai/environments/examples/minigrid/taskset.py +13 -13
  51. synth_ai/environments/examples/nethack/achievements.py +1 -1
  52. synth_ai/environments/examples/nethack/engine.py +8 -7
  53. synth_ai/environments/examples/nethack/environment.py +10 -9
  54. synth_ai/environments/examples/nethack/helpers/__init__.py +8 -9
  55. synth_ai/environments/examples/nethack/helpers/action_mapping.py +1 -1
  56. synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +2 -1
  57. synth_ai/environments/examples/nethack/helpers/observation_utils.py +1 -1
  58. synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +3 -4
  59. synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +6 -5
  60. synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +5 -5
  61. synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +7 -6
  62. synth_ai/environments/examples/nethack/taskset.py +5 -5
  63. synth_ai/environments/examples/red/engine.py +9 -8
  64. synth_ai/environments/examples/red/engine_helpers/reward_components.py +2 -1
  65. synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +7 -7
  66. synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +2 -1
  67. synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +2 -1
  68. synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +2 -1
  69. synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +2 -1
  70. synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +2 -1
  71. synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +2 -1
  72. synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +2 -1
  73. synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +2 -1
  74. synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +2 -1
  75. synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +2 -1
  76. synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +2 -1
  77. synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +3 -2
  78. synth_ai/environments/examples/red/engine_helpers/state_extraction.py +2 -1
  79. synth_ai/environments/examples/red/environment.py +18 -15
  80. synth_ai/environments/examples/red/taskset.py +5 -3
  81. synth_ai/environments/examples/sokoban/engine.py +16 -13
  82. synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +3 -2
  83. synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +2 -1
  84. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +1 -1
  85. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +7 -5
  86. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +1 -1
  87. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +2 -1
  88. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +5 -4
  89. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +3 -2
  90. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +2 -1
  91. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +5 -4
  92. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +1 -1
  93. synth_ai/environments/examples/sokoban/environment.py +15 -14
  94. synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +5 -3
  95. synth_ai/environments/examples/sokoban/puzzle_loader.py +3 -2
  96. synth_ai/environments/examples/sokoban/taskset.py +13 -10
  97. synth_ai/environments/examples/tictactoe/engine.py +6 -6
  98. synth_ai/environments/examples/tictactoe/environment.py +8 -7
  99. synth_ai/environments/examples/tictactoe/taskset.py +6 -5
  100. synth_ai/environments/examples/verilog/engine.py +4 -3
  101. synth_ai/environments/examples/verilog/environment.py +11 -10
  102. synth_ai/environments/examples/verilog/taskset.py +14 -12
  103. synth_ai/environments/examples/wordle/__init__.py +5 -5
  104. synth_ai/environments/examples/wordle/engine.py +32 -25
  105. synth_ai/environments/examples/wordle/environment.py +21 -16
  106. synth_ai/environments/examples/wordle/helpers/generate_instances_wordfreq.py +6 -6
  107. synth_ai/environments/examples/wordle/taskset.py +20 -12
  108. synth_ai/environments/reproducibility/core.py +1 -1
  109. synth_ai/environments/reproducibility/tree.py +21 -21
  110. synth_ai/environments/service/app.py +3 -2
  111. synth_ai/environments/service/core_routes.py +104 -110
  112. synth_ai/environments/service/external_registry.py +1 -2
  113. synth_ai/environments/service/registry.py +1 -1
  114. synth_ai/environments/stateful/core.py +1 -2
  115. synth_ai/environments/stateful/engine.py +1 -1
  116. synth_ai/environments/tasks/api.py +4 -4
  117. synth_ai/environments/tasks/core.py +14 -12
  118. synth_ai/environments/tasks/filters.py +6 -4
  119. synth_ai/environments/tasks/utils.py +13 -11
  120. synth_ai/evals/base.py +2 -3
  121. synth_ai/experimental/synth_oss.py +4 -4
  122. synth_ai/learning/gateway.py +1 -3
  123. synth_ai/learning/prompts/banking77_injection_eval.py +15 -10
  124. synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +26 -14
  125. synth_ai/learning/prompts/mipro.py +61 -52
  126. synth_ai/learning/prompts/random_search.py +42 -43
  127. synth_ai/learning/prompts/run_mipro_banking77.py +32 -20
  128. synth_ai/learning/prompts/run_random_search_banking77.py +71 -52
  129. synth_ai/lm/__init__.py +5 -5
  130. synth_ai/lm/caching/ephemeral.py +9 -9
  131. synth_ai/lm/caching/handler.py +20 -20
  132. synth_ai/lm/caching/persistent.py +10 -10
  133. synth_ai/lm/config.py +3 -3
  134. synth_ai/lm/constants.py +7 -7
  135. synth_ai/lm/core/all.py +17 -3
  136. synth_ai/lm/core/exceptions.py +0 -2
  137. synth_ai/lm/core/main.py +26 -41
  138. synth_ai/lm/core/main_v3.py +20 -10
  139. synth_ai/lm/core/vendor_clients.py +18 -17
  140. synth_ai/lm/injection.py +7 -8
  141. synth_ai/lm/overrides.py +21 -19
  142. synth_ai/lm/provider_support/__init__.py +1 -1
  143. synth_ai/lm/provider_support/anthropic.py +15 -15
  144. synth_ai/lm/provider_support/openai.py +23 -21
  145. synth_ai/lm/structured_outputs/handler.py +34 -32
  146. synth_ai/lm/structured_outputs/inject.py +24 -27
  147. synth_ai/lm/structured_outputs/rehabilitate.py +19 -15
  148. synth_ai/lm/tools/base.py +17 -16
  149. synth_ai/lm/unified_interface.py +17 -18
  150. synth_ai/lm/vendors/base.py +20 -18
  151. synth_ai/lm/vendors/core/anthropic_api.py +36 -27
  152. synth_ai/lm/vendors/core/gemini_api.py +31 -36
  153. synth_ai/lm/vendors/core/mistral_api.py +19 -19
  154. synth_ai/lm/vendors/core/openai_api.py +11 -10
  155. synth_ai/lm/vendors/openai_standard.py +113 -87
  156. synth_ai/lm/vendors/openai_standard_responses.py +74 -61
  157. synth_ai/lm/vendors/retries.py +9 -1
  158. synth_ai/lm/vendors/supported/custom_endpoint.py +26 -26
  159. synth_ai/lm/vendors/supported/deepseek.py +10 -10
  160. synth_ai/lm/vendors/supported/grok.py +8 -8
  161. synth_ai/lm/vendors/supported/ollama.py +2 -1
  162. synth_ai/lm/vendors/supported/openrouter.py +11 -9
  163. synth_ai/lm/vendors/synth_client.py +69 -63
  164. synth_ai/lm/warmup.py +8 -7
  165. synth_ai/tracing/__init__.py +22 -10
  166. synth_ai/tracing_v1/__init__.py +22 -20
  167. synth_ai/tracing_v3/__init__.py +7 -7
  168. synth_ai/tracing_v3/abstractions.py +56 -52
  169. synth_ai/tracing_v3/config.py +4 -2
  170. synth_ai/tracing_v3/db_config.py +6 -8
  171. synth_ai/tracing_v3/decorators.py +29 -30
  172. synth_ai/tracing_v3/examples/basic_usage.py +12 -12
  173. synth_ai/tracing_v3/hooks.py +21 -21
  174. synth_ai/tracing_v3/llm_call_record_helpers.py +85 -98
  175. synth_ai/tracing_v3/lm_call_record_abstractions.py +2 -4
  176. synth_ai/tracing_v3/migration_helper.py +3 -5
  177. synth_ai/tracing_v3/replica_sync.py +30 -32
  178. synth_ai/tracing_v3/session_tracer.py +35 -29
  179. synth_ai/tracing_v3/storage/__init__.py +1 -1
  180. synth_ai/tracing_v3/storage/base.py +8 -7
  181. synth_ai/tracing_v3/storage/config.py +4 -4
  182. synth_ai/tracing_v3/storage/factory.py +4 -4
  183. synth_ai/tracing_v3/storage/utils.py +9 -9
  184. synth_ai/tracing_v3/turso/__init__.py +3 -3
  185. synth_ai/tracing_v3/turso/daemon.py +9 -9
  186. synth_ai/tracing_v3/turso/manager.py +60 -48
  187. synth_ai/tracing_v3/turso/models.py +24 -19
  188. synth_ai/tracing_v3/utils.py +5 -5
  189. synth_ai/tui/__main__.py +1 -1
  190. synth_ai/tui/cli/query_experiments.py +2 -3
  191. synth_ai/tui/cli/query_experiments_v3.py +2 -3
  192. synth_ai/tui/dashboard.py +97 -86
  193. synth_ai/v0/tracing/abstractions.py +28 -28
  194. synth_ai/v0/tracing/base_client.py +9 -9
  195. synth_ai/v0/tracing/client_manager.py +7 -7
  196. synth_ai/v0/tracing/config.py +7 -7
  197. synth_ai/v0/tracing/context.py +6 -6
  198. synth_ai/v0/tracing/decorators.py +6 -5
  199. synth_ai/v0/tracing/events/manage.py +1 -1
  200. synth_ai/v0/tracing/events/store.py +5 -4
  201. synth_ai/v0/tracing/immediate_client.py +4 -5
  202. synth_ai/v0/tracing/local.py +3 -3
  203. synth_ai/v0/tracing/log_client_base.py +4 -5
  204. synth_ai/v0/tracing/retry_queue.py +5 -6
  205. synth_ai/v0/tracing/trackers.py +25 -25
  206. synth_ai/v0/tracing/upload.py +6 -0
  207. synth_ai/v0/tracing_v1/__init__.py +1 -1
  208. synth_ai/v0/tracing_v1/abstractions.py +28 -28
  209. synth_ai/v0/tracing_v1/base_client.py +9 -9
  210. synth_ai/v0/tracing_v1/client_manager.py +7 -7
  211. synth_ai/v0/tracing_v1/config.py +7 -7
  212. synth_ai/v0/tracing_v1/context.py +6 -6
  213. synth_ai/v0/tracing_v1/decorators.py +7 -6
  214. synth_ai/v0/tracing_v1/events/manage.py +1 -1
  215. synth_ai/v0/tracing_v1/events/store.py +5 -4
  216. synth_ai/v0/tracing_v1/immediate_client.py +4 -5
  217. synth_ai/v0/tracing_v1/local.py +3 -3
  218. synth_ai/v0/tracing_v1/log_client_base.py +4 -5
  219. synth_ai/v0/tracing_v1/retry_queue.py +5 -6
  220. synth_ai/v0/tracing_v1/trackers.py +25 -25
  221. synth_ai/v0/tracing_v1/upload.py +25 -24
  222. synth_ai/zyk/__init__.py +1 -0
  223. {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/METADATA +1 -11
  224. synth_ai-0.2.4.dev7.dist-info/RECORD +299 -0
  225. synth_ai-0.2.4.dev6.dist-info/RECORD +0 -299
  226. {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/WHEEL +0 -0
  227. {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/entry_points.txt +0 -0
  228. {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/licenses/LICENSE +0 -0
  229. {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/top_level.txt +0 -0
@@ -2,22 +2,21 @@ import json
2
2
  import logging
3
3
  import os
4
4
  import warnings
5
- from typing import Any, Dict, List, Optional, Tuple, Type
5
+ from typing import Any
6
6
 
7
7
  import google.genai as genai
8
8
  from google.api_core.exceptions import ResourceExhausted
9
9
  from google.genai import types
10
+
10
11
  from synth_ai.lm.caching.initialize import get_cache_handler
11
- from synth_ai.lm.tools.base import BaseTool
12
- from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
13
12
  from synth_ai.lm.constants import (
14
- SPECIAL_BASE_TEMPS,
15
13
  GEMINI_REASONING_MODELS,
16
14
  GEMINI_THINKING_BUDGETS,
15
+ SPECIAL_BASE_TEMPS,
17
16
  )
17
+ from synth_ai.lm.tools.base import BaseTool
18
+ from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
18
19
  from synth_ai.lm.vendors.retries import BACKOFF_TOLERANCE, MAX_BACKOFF, backoff
19
- import logging
20
-
21
20
 
22
21
  ALIASES = {
23
22
  "gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
@@ -25,7 +24,7 @@ ALIASES = {
25
24
 
26
25
  logger = logging.getLogger(__name__)
27
26
  _CLIENT = None # Initialize lazily when needed
28
- GEMINI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (ResourceExhausted,)
27
+ GEMINI_EXCEPTIONS_TO_RETRY: tuple[type[Exception], ...] = (ResourceExhausted,)
29
28
  logging.getLogger("google.genai").setLevel(logging.ERROR)
30
29
  os.environ["GRPC_VERBOSITY"] = "ERROR"
31
30
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
@@ -49,11 +48,11 @@ def _get_client():
49
48
 
50
49
  class GeminiAPI(VendorBase):
51
50
  used_for_structured_outputs: bool = True
52
- exceptions_to_retry: Tuple[Type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY
51
+ exceptions_to_retry: tuple[type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY
53
52
 
54
53
  def __init__(
55
54
  self,
56
- exceptions_to_retry: Tuple[Type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY,
55
+ exceptions_to_retry: tuple[type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY,
57
56
  used_for_structured_outputs: bool = False,
58
57
  ):
59
58
  self.used_for_structured_outputs = used_for_structured_outputs
@@ -65,7 +64,7 @@ class GeminiAPI(VendorBase):
65
64
  return model_name
66
65
 
67
66
  @staticmethod
68
- def _msg_to_contents(messages: List[Dict[str, Any]]) -> List[types.Content]:
67
+ def _msg_to_contents(messages: list[dict[str, Any]]) -> list[types.Content]:
69
68
  # contents, sys_instr = [], None
70
69
  contents = []
71
70
  for m in messages:
@@ -82,16 +81,12 @@ class GeminiAPI(VendorBase):
82
81
  return contents
83
82
 
84
83
  @staticmethod
85
- def _tools_to_genai(tools: List[BaseTool]) -> List[types.Tool]:
84
+ def _tools_to_genai(tools: list[BaseTool]) -> list[types.Tool]:
86
85
  """Convert internal BaseTool → genai Tool."""
87
- out: List[types.Tool] = []
86
+ out: list[types.Tool] = []
88
87
  for t in tools:
89
88
  # Assume t.to_gemini_tool() now correctly returns a FunctionDeclaration
90
- # func_decl = t.to_gemini_tool()
91
- if isinstance(t, dict):
92
- func_decl = t
93
- else:
94
- func_decl = t.to_gemini_tool()
89
+ func_decl = t if isinstance(t, dict) else t.to_gemini_tool()
95
90
  if not isinstance(func_decl, types.FunctionDeclaration):
96
91
  # Or fetch schema parts if to_gemini_tool still returns dict
97
92
  # This depends on BaseTool.to_gemini_tool implementation
@@ -106,15 +101,15 @@ class GeminiAPI(VendorBase):
106
101
 
107
102
  async def _gen_content_async(
108
103
  self,
109
- messages: List[Dict],
104
+ messages: list[dict],
110
105
  temperature: float,
111
106
  model_name: str,
112
107
  reasoning_effort: str,
113
- tools: Optional[List[BaseTool]],
114
- lm_config: Optional[Dict[str, Any]],
115
- ) -> Tuple[str, Optional[List[Dict]]]:
108
+ tools: list[BaseTool] | None,
109
+ lm_config: dict[str, Any] | None,
110
+ ) -> tuple[str, list[dict] | None]:
116
111
  model_name = self.get_aliased_model_name(model_name)
117
- cfg_kwargs: Dict[str, Any] = {"temperature": temperature}
112
+ cfg_kwargs: dict[str, Any] = {"temperature": temperature}
118
113
  if model_name in GEMINI_REASONING_MODELS and reasoning_effort in GEMINI_THINKING_BUDGETS:
119
114
  cfg_kwargs["thinking_config"] = types.ThinkingConfig(
120
115
  thinking_budget=GEMINI_THINKING_BUDGETS[reasoning_effort]
@@ -141,15 +136,15 @@ class GeminiAPI(VendorBase):
141
136
 
142
137
  def _gen_content_sync(
143
138
  self,
144
- messages: List[Dict],
139
+ messages: list[dict],
145
140
  temperature: float,
146
141
  model_name: str,
147
142
  reasoning_effort: str,
148
- tools: Optional[List[BaseTool]],
149
- lm_config: Optional[Dict[str, Any]],
150
- ) -> Tuple[str, Optional[List[Dict]]]:
143
+ tools: list[BaseTool] | None,
144
+ lm_config: dict[str, Any] | None,
145
+ ) -> tuple[str, list[dict] | None]:
151
146
  model_name = self.get_aliased_model_name(model_name)
152
- cfg_kwargs: Dict[str, Any] = {"temperature": temperature}
147
+ cfg_kwargs: dict[str, Any] = {"temperature": temperature}
153
148
  if model_name in GEMINI_REASONING_MODELS and reasoning_effort in GEMINI_THINKING_BUDGETS:
154
149
  cfg_kwargs["thinking_config"] = types.ThinkingConfig(
155
150
  thinking_budget=GEMINI_THINKING_BUDGETS[reasoning_effort]
@@ -174,7 +169,7 @@ class GeminiAPI(VendorBase):
174
169
  return self._extract(resp)
175
170
 
176
171
  @staticmethod
177
- def _extract(response) -> Tuple[str, Optional[List[Dict]]]:
172
+ def _extract(response) -> tuple[str, list[dict] | None]:
178
173
  # Extract text, handling cases where it might be missing
179
174
  try:
180
175
  text = response.text
@@ -208,13 +203,13 @@ class GeminiAPI(VendorBase):
208
203
  async def _hit_api_async(
209
204
  self,
210
205
  model: str,
211
- messages: List[Dict[str, Any]],
212
- lm_config: Dict[str, Any],
206
+ messages: list[dict[str, Any]],
207
+ lm_config: dict[str, Any],
213
208
  use_ephemeral_cache_only: bool = False,
214
209
  reasoning_effort: str = "high",
215
- tools: Optional[List[BaseTool]] = None,
210
+ tools: list[BaseTool] | None = None,
216
211
  ) -> BaseLMResponse:
217
- assert lm_config.get("response_model", None) is None, (
212
+ assert lm_config.get("response_model") is None, (
218
213
  "response_model is not supported for standard calls"
219
214
  )
220
215
  used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
@@ -257,13 +252,13 @@ class GeminiAPI(VendorBase):
257
252
  def _hit_api_sync(
258
253
  self,
259
254
  model: str,
260
- messages: List[Dict[str, Any]],
261
- lm_config: Dict[str, Any],
255
+ messages: list[dict[str, Any]],
256
+ lm_config: dict[str, Any],
262
257
  use_ephemeral_cache_only: bool = False,
263
258
  reasoning_effort: str = "high",
264
- tools: Optional[List[BaseTool]] = None,
259
+ tools: list[BaseTool] | None = None,
265
260
  ) -> BaseLMResponse:
266
- assert lm_config.get("response_model", None) is None, (
261
+ assert lm_config.get("response_model") is None, (
267
262
  "response_model is not supported for standard calls"
268
263
  )
269
264
  used_cache_handler = get_cache_handler(use_ephemeral_cache_only=use_ephemeral_cache_only)
@@ -1,30 +1,30 @@
1
1
  import json
2
2
  import os
3
- from typing import Any, Dict, List, Optional, Tuple, Type
3
+ from typing import Any
4
4
 
5
5
  import pydantic
6
6
  from mistralai import Mistral # use Mistral as both sync and async client
7
7
  from pydantic import BaseModel
8
8
 
9
9
  from synth_ai.lm.caching.initialize import get_cache_handler
10
+ from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
10
11
  from synth_ai.lm.tools.base import BaseTool
11
12
  from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
12
- from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
13
13
  from synth_ai.lm.vendors.core.openai_api import OpenAIStructuredOutputClient
14
14
 
15
15
  # Since the mistralai package doesn't expose an exceptions module,
16
16
  # we fallback to catching all Exceptions for retry.
17
- MISTRAL_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (Exception,)
17
+ MISTRAL_EXCEPTIONS_TO_RETRY: tuple[type[Exception], ...] = (Exception,)
18
18
 
19
19
 
20
20
  class MistralAPI(VendorBase):
21
21
  used_for_structured_outputs: bool = True
22
- exceptions_to_retry: Tuple = MISTRAL_EXCEPTIONS_TO_RETRY
22
+ exceptions_to_retry: tuple = MISTRAL_EXCEPTIONS_TO_RETRY
23
23
  _openai_fallback: Any
24
24
 
25
25
  def __init__(
26
26
  self,
27
- exceptions_to_retry: Tuple[Type[Exception], ...] = MISTRAL_EXCEPTIONS_TO_RETRY,
27
+ exceptions_to_retry: tuple[type[Exception], ...] = MISTRAL_EXCEPTIONS_TO_RETRY,
28
28
  used_for_structured_outputs: bool = False,
29
29
  ):
30
30
  self.used_for_structured_outputs = used_for_structured_outputs
@@ -40,14 +40,14 @@ class MistralAPI(VendorBase):
40
40
  async def _hit_api_async(
41
41
  self,
42
42
  model: str,
43
- messages: List[Dict[str, Any]],
44
- lm_config: Dict[str, Any],
45
- response_model: Optional[BaseModel] = None,
43
+ messages: list[dict[str, Any]],
44
+ lm_config: dict[str, Any],
45
+ response_model: BaseModel | None = None,
46
46
  use_ephemeral_cache_only: bool = False,
47
47
  reasoning_effort: str = "high",
48
- tools: Optional[List[BaseTool]] = None,
48
+ tools: list[BaseTool] | None = None,
49
49
  ) -> BaseLMResponse:
50
- assert lm_config.get("response_model", None) is None, (
50
+ assert lm_config.get("response_model") is None, (
51
51
  "response_model is not supported for standard calls"
52
52
  )
53
53
  assert not (response_model and tools), "Cannot provide both response_model and tools"
@@ -63,7 +63,7 @@ class MistralAPI(VendorBase):
63
63
  ], f"Expected BaseLMResponse or str, got {type(cache_result)}"
64
64
  return (
65
65
  cache_result
66
- if type(cache_result) == BaseLMResponse
66
+ if isinstance(cache_result, BaseLMResponse)
67
67
  else BaseLMResponse(
68
68
  raw_response=cache_result, structured_output=None, tool_calls=None
69
69
  )
@@ -130,14 +130,14 @@ class MistralAPI(VendorBase):
130
130
  def _hit_api_sync(
131
131
  self,
132
132
  model: str,
133
- messages: List[Dict[str, Any]],
134
- lm_config: Dict[str, Any],
135
- response_model: Optional[BaseModel] = None,
133
+ messages: list[dict[str, Any]],
134
+ lm_config: dict[str, Any],
135
+ response_model: BaseModel | None = None,
136
136
  use_ephemeral_cache_only: bool = False,
137
137
  reasoning_effort: str = "high",
138
- tools: Optional[List[BaseTool]] = None,
138
+ tools: list[BaseTool] | None = None,
139
139
  ) -> BaseLMResponse:
140
- assert lm_config.get("response_model", None) is None, (
140
+ assert lm_config.get("response_model") is None, (
141
141
  "response_model is not supported for standard calls"
142
142
  )
143
143
  assert not (response_model and tools), "Cannot provide both response_model and tools"
@@ -154,7 +154,7 @@ class MistralAPI(VendorBase):
154
154
  ], f"Expected BaseLMResponse or str, got {type(cache_result)}"
155
155
  return (
156
156
  cache_result
157
- if type(cache_result) == BaseLMResponse
157
+ if isinstance(cache_result, BaseLMResponse)
158
158
  else BaseLMResponse(
159
159
  raw_response=cache_result, structured_output=None, tool_calls=None
160
160
  )
@@ -217,7 +217,7 @@ class MistralAPI(VendorBase):
217
217
  async def _hit_api_async_structured_output(
218
218
  self,
219
219
  model: str,
220
- messages: List[Dict[str, Any]],
220
+ messages: list[dict[str, Any]],
221
221
  response_model: BaseModel,
222
222
  temperature: float,
223
223
  use_ephemeral_cache_only: bool = False,
@@ -256,7 +256,7 @@ class MistralAPI(VendorBase):
256
256
  def _hit_api_sync_structured_output(
257
257
  self,
258
258
  model: str,
259
- messages: List[Dict[str, Any]],
259
+ messages: list[dict[str, Any]],
260
260
  response_model: BaseModel,
261
261
  temperature: float,
262
262
  use_ephemeral_cache_only: bool = False,
@@ -6,7 +6,7 @@ supporting both standard and structured output modes.
6
6
  """
7
7
 
8
8
  import json
9
- from typing import Any, Dict, List, Optional, Tuple, Type
9
+ from typing import Any
10
10
 
11
11
  import openai
12
12
  import pydantic_core
@@ -15,13 +15,13 @@ import pydantic_core
15
15
  from pydantic import BaseModel
16
16
 
17
17
  from synth_ai.lm.caching.initialize import get_cache_handler
18
+ from synth_ai.lm.constants import OPENAI_REASONING_MODELS, SPECIAL_BASE_TEMPS
18
19
  from synth_ai.lm.tools.base import BaseTool
19
20
  from synth_ai.lm.vendors.base import BaseLMResponse
20
- from synth_ai.lm.constants import SPECIAL_BASE_TEMPS, OPENAI_REASONING_MODELS
21
21
  from synth_ai.lm.vendors.openai_standard import OpenAIStandard
22
22
 
23
23
  # Exceptions that should trigger retry logic for OpenAI API calls
24
- OPENAI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (
24
+ OPENAI_EXCEPTIONS_TO_RETRY: tuple[type[Exception], ...] = (
25
25
  pydantic_core._pydantic_core.ValidationError,
26
26
  openai.OpenAIError,
27
27
  openai.APIConnectionError,
@@ -36,10 +36,11 @@ OPENAI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (
36
36
  class OpenAIStructuredOutputClient(OpenAIStandard):
37
37
  """
38
38
  OpenAI client with support for structured outputs.
39
-
39
+
40
40
  This client extends the standard OpenAI client to support structured outputs
41
41
  using OpenAI's native structured output feature or response format parameter.
42
42
  """
43
+
43
44
  def __init__(self, synth_logging: bool = True):
44
45
  if synth_logging:
45
46
  # print("Using synth logging - OpenAIStructuredOutputClient")
@@ -58,11 +59,11 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
58
59
  async def _hit_api_async_structured_output(
59
60
  self,
60
61
  model: str,
61
- messages: List[Dict[str, Any]],
62
+ messages: list[dict[str, Any]],
62
63
  response_model: BaseModel,
63
64
  temperature: float,
64
65
  use_ephemeral_cache_only: bool = False,
65
- tools: Optional[List[BaseTool]] = None,
66
+ tools: list[BaseTool] | None = None,
66
67
  reasoning_effort: str = "high",
67
68
  ) -> str:
68
69
  if tools:
@@ -81,7 +82,7 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
81
82
  dict,
82
83
  BaseLMResponse,
83
84
  ], f"Expected dict or BaseLMResponse, got {type(cache_result)}"
84
- return cache_result["response"] if type(cache_result) == dict else cache_result
85
+ return cache_result["response"] if isinstance(cache_result, dict) else cache_result
85
86
  if model in OPENAI_REASONING_MODELS:
86
87
  output = await self.async_client.beta.chat.completions.parse(
87
88
  model=model,
@@ -109,11 +110,11 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
109
110
  def _hit_api_sync_structured_output(
110
111
  self,
111
112
  model: str,
112
- messages: List[Dict[str, Any]],
113
+ messages: list[dict[str, Any]],
113
114
  response_model: BaseModel,
114
115
  temperature: float,
115
116
  use_ephemeral_cache_only: bool = False,
116
- tools: Optional[List[BaseTool]] = None,
117
+ tools: list[BaseTool] | None = None,
117
118
  reasoning_effort: str = "high",
118
119
  ) -> str:
119
120
  if tools:
@@ -130,7 +131,7 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
130
131
  dict,
131
132
  BaseLMResponse,
132
133
  ], f"Expected dict or BaseLMResponse, got {type(cache_result)}"
133
- return cache_result["response"] if type(cache_result) == dict else cache_result
134
+ return cache_result["response"] if isinstance(cache_result, dict) else cache_result
134
135
  if model in OPENAI_REASONING_MODELS:
135
136
  output = self.sync_client.beta.chat.completions.parse(
136
137
  model=model,