synth-ai 0.2.4.dev6__py3-none-any.whl → 0.2.4.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +18 -9
- synth_ai/cli/__init__.py +10 -5
- synth_ai/cli/balance.py +22 -17
- synth_ai/cli/calc.py +2 -3
- synth_ai/cli/demo.py +3 -5
- synth_ai/cli/legacy_root_backup.py +58 -32
- synth_ai/cli/man.py +22 -19
- synth_ai/cli/recent.py +9 -8
- synth_ai/cli/root.py +58 -13
- synth_ai/cli/status.py +13 -6
- synth_ai/cli/traces.py +45 -21
- synth_ai/cli/watch.py +40 -37
- synth_ai/config/base_url.py +1 -3
- synth_ai/core/experiment.py +1 -2
- synth_ai/environments/__init__.py +2 -6
- synth_ai/environments/environment/artifacts/base.py +3 -1
- synth_ai/environments/environment/db/sqlite.py +1 -1
- synth_ai/environments/environment/registry.py +19 -20
- synth_ai/environments/environment/resources/sqlite.py +2 -3
- synth_ai/environments/environment/rewards/core.py +3 -2
- synth_ai/environments/environment/tools/__init__.py +6 -4
- synth_ai/environments/examples/crafter_classic/__init__.py +1 -1
- synth_ai/environments/examples/crafter_classic/engine.py +13 -13
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +1 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +2 -1
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +2 -1
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +3 -2
- synth_ai/environments/examples/crafter_classic/environment.py +16 -15
- synth_ai/environments/examples/crafter_classic/taskset.py +2 -2
- synth_ai/environments/examples/crafter_classic/trace_hooks_v3.py +2 -3
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +2 -1
- synth_ai/environments/examples/crafter_custom/crafter/__init__.py +2 -2
- synth_ai/environments/examples/crafter_custom/crafter/config.py +2 -2
- synth_ai/environments/examples/crafter_custom/crafter/env.py +1 -5
- synth_ai/environments/examples/crafter_custom/crafter/objects.py +1 -2
- synth_ai/environments/examples/crafter_custom/crafter/worldgen.py +1 -2
- synth_ai/environments/examples/crafter_custom/dataset_builder.py +5 -5
- synth_ai/environments/examples/crafter_custom/environment.py +13 -13
- synth_ai/environments/examples/crafter_custom/run_dataset.py +5 -5
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +2 -2
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +5 -4
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +2 -1
- synth_ai/environments/examples/enron/engine.py +18 -14
- synth_ai/environments/examples/enron/environment.py +12 -11
- synth_ai/environments/examples/enron/taskset.py +7 -7
- synth_ai/environments/examples/minigrid/__init__.py +6 -6
- synth_ai/environments/examples/minigrid/engine.py +6 -6
- synth_ai/environments/examples/minigrid/environment.py +6 -6
- synth_ai/environments/examples/minigrid/puzzle_loader.py +3 -2
- synth_ai/environments/examples/minigrid/taskset.py +13 -13
- synth_ai/environments/examples/nethack/achievements.py +1 -1
- synth_ai/environments/examples/nethack/engine.py +8 -7
- synth_ai/environments/examples/nethack/environment.py +10 -9
- synth_ai/environments/examples/nethack/helpers/__init__.py +8 -9
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +1 -1
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +2 -1
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +1 -1
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +3 -4
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +6 -5
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +5 -5
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +7 -6
- synth_ai/environments/examples/nethack/taskset.py +5 -5
- synth_ai/environments/examples/red/engine.py +9 -8
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +7 -7
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +3 -2
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +2 -1
- synth_ai/environments/examples/red/environment.py +18 -15
- synth_ai/environments/examples/red/taskset.py +5 -3
- synth_ai/environments/examples/sokoban/engine.py +16 -13
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +3 -2
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +2 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +1 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +7 -5
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +1 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +2 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +5 -4
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +3 -2
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +2 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +5 -4
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +1 -1
- synth_ai/environments/examples/sokoban/environment.py +15 -14
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +5 -3
- synth_ai/environments/examples/sokoban/puzzle_loader.py +3 -2
- synth_ai/environments/examples/sokoban/taskset.py +13 -10
- synth_ai/environments/examples/tictactoe/engine.py +6 -6
- synth_ai/environments/examples/tictactoe/environment.py +8 -7
- synth_ai/environments/examples/tictactoe/taskset.py +6 -5
- synth_ai/environments/examples/verilog/engine.py +4 -3
- synth_ai/environments/examples/verilog/environment.py +11 -10
- synth_ai/environments/examples/verilog/taskset.py +14 -12
- synth_ai/environments/examples/wordle/__init__.py +5 -5
- synth_ai/environments/examples/wordle/engine.py +32 -25
- synth_ai/environments/examples/wordle/environment.py +21 -16
- synth_ai/environments/examples/wordle/helpers/generate_instances_wordfreq.py +6 -6
- synth_ai/environments/examples/wordle/taskset.py +20 -12
- synth_ai/environments/reproducibility/core.py +1 -1
- synth_ai/environments/reproducibility/tree.py +21 -21
- synth_ai/environments/service/app.py +3 -2
- synth_ai/environments/service/core_routes.py +104 -110
- synth_ai/environments/service/external_registry.py +1 -2
- synth_ai/environments/service/registry.py +1 -1
- synth_ai/environments/stateful/core.py +1 -2
- synth_ai/environments/stateful/engine.py +1 -1
- synth_ai/environments/tasks/api.py +4 -4
- synth_ai/environments/tasks/core.py +14 -12
- synth_ai/environments/tasks/filters.py +6 -4
- synth_ai/environments/tasks/utils.py +13 -11
- synth_ai/evals/base.py +2 -3
- synth_ai/experimental/synth_oss.py +4 -4
- synth_ai/learning/gateway.py +1 -3
- synth_ai/learning/prompts/banking77_injection_eval.py +15 -10
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +26 -14
- synth_ai/learning/prompts/mipro.py +61 -52
- synth_ai/learning/prompts/random_search.py +42 -43
- synth_ai/learning/prompts/run_mipro_banking77.py +32 -20
- synth_ai/learning/prompts/run_random_search_banking77.py +71 -52
- synth_ai/lm/__init__.py +5 -5
- synth_ai/lm/caching/ephemeral.py +9 -9
- synth_ai/lm/caching/handler.py +20 -20
- synth_ai/lm/caching/persistent.py +10 -10
- synth_ai/lm/config.py +3 -3
- synth_ai/lm/constants.py +7 -7
- synth_ai/lm/core/all.py +17 -3
- synth_ai/lm/core/exceptions.py +0 -2
- synth_ai/lm/core/main.py +26 -41
- synth_ai/lm/core/main_v3.py +20 -10
- synth_ai/lm/core/vendor_clients.py +18 -17
- synth_ai/lm/injection.py +7 -8
- synth_ai/lm/overrides.py +21 -19
- synth_ai/lm/provider_support/__init__.py +1 -1
- synth_ai/lm/provider_support/anthropic.py +15 -15
- synth_ai/lm/provider_support/openai.py +23 -21
- synth_ai/lm/structured_outputs/handler.py +34 -32
- synth_ai/lm/structured_outputs/inject.py +24 -27
- synth_ai/lm/structured_outputs/rehabilitate.py +19 -15
- synth_ai/lm/tools/base.py +17 -16
- synth_ai/lm/unified_interface.py +17 -18
- synth_ai/lm/vendors/base.py +20 -18
- synth_ai/lm/vendors/core/anthropic_api.py +36 -27
- synth_ai/lm/vendors/core/gemini_api.py +31 -36
- synth_ai/lm/vendors/core/mistral_api.py +19 -19
- synth_ai/lm/vendors/core/openai_api.py +11 -10
- synth_ai/lm/vendors/openai_standard.py +113 -87
- synth_ai/lm/vendors/openai_standard_responses.py +74 -61
- synth_ai/lm/vendors/retries.py +9 -1
- synth_ai/lm/vendors/supported/custom_endpoint.py +26 -26
- synth_ai/lm/vendors/supported/deepseek.py +10 -10
- synth_ai/lm/vendors/supported/grok.py +8 -8
- synth_ai/lm/vendors/supported/ollama.py +2 -1
- synth_ai/lm/vendors/supported/openrouter.py +11 -9
- synth_ai/lm/vendors/synth_client.py +69 -63
- synth_ai/lm/warmup.py +8 -7
- synth_ai/tracing/__init__.py +22 -10
- synth_ai/tracing_v1/__init__.py +22 -20
- synth_ai/tracing_v3/__init__.py +7 -7
- synth_ai/tracing_v3/abstractions.py +56 -52
- synth_ai/tracing_v3/config.py +4 -2
- synth_ai/tracing_v3/db_config.py +6 -8
- synth_ai/tracing_v3/decorators.py +29 -30
- synth_ai/tracing_v3/examples/basic_usage.py +12 -12
- synth_ai/tracing_v3/hooks.py +21 -21
- synth_ai/tracing_v3/llm_call_record_helpers.py +85 -98
- synth_ai/tracing_v3/lm_call_record_abstractions.py +2 -4
- synth_ai/tracing_v3/migration_helper.py +3 -5
- synth_ai/tracing_v3/replica_sync.py +30 -32
- synth_ai/tracing_v3/session_tracer.py +35 -29
- synth_ai/tracing_v3/storage/__init__.py +1 -1
- synth_ai/tracing_v3/storage/base.py +8 -7
- synth_ai/tracing_v3/storage/config.py +4 -4
- synth_ai/tracing_v3/storage/factory.py +4 -4
- synth_ai/tracing_v3/storage/utils.py +9 -9
- synth_ai/tracing_v3/turso/__init__.py +3 -3
- synth_ai/tracing_v3/turso/daemon.py +9 -9
- synth_ai/tracing_v3/turso/manager.py +60 -48
- synth_ai/tracing_v3/turso/models.py +24 -19
- synth_ai/tracing_v3/utils.py +5 -5
- synth_ai/tui/__main__.py +1 -1
- synth_ai/tui/cli/query_experiments.py +2 -3
- synth_ai/tui/cli/query_experiments_v3.py +2 -3
- synth_ai/tui/dashboard.py +97 -86
- synth_ai/v0/tracing/abstractions.py +28 -28
- synth_ai/v0/tracing/base_client.py +9 -9
- synth_ai/v0/tracing/client_manager.py +7 -7
- synth_ai/v0/tracing/config.py +7 -7
- synth_ai/v0/tracing/context.py +6 -6
- synth_ai/v0/tracing/decorators.py +6 -5
- synth_ai/v0/tracing/events/manage.py +1 -1
- synth_ai/v0/tracing/events/store.py +5 -4
- synth_ai/v0/tracing/immediate_client.py +4 -5
- synth_ai/v0/tracing/local.py +3 -3
- synth_ai/v0/tracing/log_client_base.py +4 -5
- synth_ai/v0/tracing/retry_queue.py +5 -6
- synth_ai/v0/tracing/trackers.py +25 -25
- synth_ai/v0/tracing/upload.py +6 -0
- synth_ai/v0/tracing_v1/__init__.py +1 -1
- synth_ai/v0/tracing_v1/abstractions.py +28 -28
- synth_ai/v0/tracing_v1/base_client.py +9 -9
- synth_ai/v0/tracing_v1/client_manager.py +7 -7
- synth_ai/v0/tracing_v1/config.py +7 -7
- synth_ai/v0/tracing_v1/context.py +6 -6
- synth_ai/v0/tracing_v1/decorators.py +7 -6
- synth_ai/v0/tracing_v1/events/manage.py +1 -1
- synth_ai/v0/tracing_v1/events/store.py +5 -4
- synth_ai/v0/tracing_v1/immediate_client.py +4 -5
- synth_ai/v0/tracing_v1/local.py +3 -3
- synth_ai/v0/tracing_v1/log_client_base.py +4 -5
- synth_ai/v0/tracing_v1/retry_queue.py +5 -6
- synth_ai/v0/tracing_v1/trackers.py +25 -25
- synth_ai/v0/tracing_v1/upload.py +25 -24
- synth_ai/zyk/__init__.py +1 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/METADATA +1 -11
- synth_ai-0.2.4.dev7.dist-info/RECORD +299 -0
- synth_ai-0.2.4.dev6.dist-info/RECORD +0 -299
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev7.dist-info}/top_level.txt +0 -0
@@ -2,22 +2,21 @@ import json
|
|
2
2
|
import logging
|
3
3
|
import os
|
4
4
|
import warnings
|
5
|
-
from typing import Any
|
5
|
+
from typing import Any
|
6
6
|
|
7
7
|
import google.genai as genai
|
8
8
|
from google.api_core.exceptions import ResourceExhausted
|
9
9
|
from google.genai import types
|
10
|
+
|
10
11
|
from synth_ai.lm.caching.initialize import get_cache_handler
|
11
|
-
from synth_ai.lm.tools.base import BaseTool
|
12
|
-
from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
|
13
12
|
from synth_ai.lm.constants import (
|
14
|
-
SPECIAL_BASE_TEMPS,
|
15
13
|
GEMINI_REASONING_MODELS,
|
16
14
|
GEMINI_THINKING_BUDGETS,
|
15
|
+
SPECIAL_BASE_TEMPS,
|
17
16
|
)
|
17
|
+
from synth_ai.lm.tools.base import BaseTool
|
18
|
+
from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
|
18
19
|
from synth_ai.lm.vendors.retries import BACKOFF_TOLERANCE, MAX_BACKOFF, backoff
|
19
|
-
import logging
|
20
|
-
|
21
20
|
|
22
21
|
ALIASES = {
|
23
22
|
"gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
|
@@ -25,7 +24,7 @@ ALIASES = {
|
|
25
24
|
|
26
25
|
logger = logging.getLogger(__name__)
|
27
26
|
_CLIENT = None # Initialize lazily when needed
|
28
|
-
GEMINI_EXCEPTIONS_TO_RETRY:
|
27
|
+
GEMINI_EXCEPTIONS_TO_RETRY: tuple[type[Exception], ...] = (ResourceExhausted,)
|
29
28
|
logging.getLogger("google.genai").setLevel(logging.ERROR)
|
30
29
|
os.environ["GRPC_VERBOSITY"] = "ERROR"
|
31
30
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
@@ -49,11 +48,11 @@ def _get_client():
|
|
49
48
|
|
50
49
|
class GeminiAPI(VendorBase):
|
51
50
|
used_for_structured_outputs: bool = True
|
52
|
-
exceptions_to_retry:
|
51
|
+
exceptions_to_retry: tuple[type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY
|
53
52
|
|
54
53
|
def __init__(
|
55
54
|
self,
|
56
|
-
exceptions_to_retry:
|
55
|
+
exceptions_to_retry: tuple[type[Exception], ...] = GEMINI_EXCEPTIONS_TO_RETRY,
|
57
56
|
used_for_structured_outputs: bool = False,
|
58
57
|
):
|
59
58
|
self.used_for_structured_outputs = used_for_structured_outputs
|
@@ -65,7 +64,7 @@ class GeminiAPI(VendorBase):
|
|
65
64
|
return model_name
|
66
65
|
|
67
66
|
@staticmethod
|
68
|
-
def _msg_to_contents(messages:
|
67
|
+
def _msg_to_contents(messages: list[dict[str, Any]]) -> list[types.Content]:
|
69
68
|
# contents, sys_instr = [], None
|
70
69
|
contents = []
|
71
70
|
for m in messages:
|
@@ -82,16 +81,12 @@ class GeminiAPI(VendorBase):
|
|
82
81
|
return contents
|
83
82
|
|
84
83
|
@staticmethod
|
85
|
-
def _tools_to_genai(tools:
|
84
|
+
def _tools_to_genai(tools: list[BaseTool]) -> list[types.Tool]:
|
86
85
|
"""Convert internal BaseTool → genai Tool."""
|
87
|
-
out:
|
86
|
+
out: list[types.Tool] = []
|
88
87
|
for t in tools:
|
89
88
|
# Assume t.to_gemini_tool() now correctly returns a FunctionDeclaration
|
90
|
-
|
91
|
-
if isinstance(t, dict):
|
92
|
-
func_decl = t
|
93
|
-
else:
|
94
|
-
func_decl = t.to_gemini_tool()
|
89
|
+
func_decl = t if isinstance(t, dict) else t.to_gemini_tool()
|
95
90
|
if not isinstance(func_decl, types.FunctionDeclaration):
|
96
91
|
# Or fetch schema parts if to_gemini_tool still returns dict
|
97
92
|
# This depends on BaseTool.to_gemini_tool implementation
|
@@ -106,15 +101,15 @@ class GeminiAPI(VendorBase):
|
|
106
101
|
|
107
102
|
async def _gen_content_async(
|
108
103
|
self,
|
109
|
-
messages:
|
104
|
+
messages: list[dict],
|
110
105
|
temperature: float,
|
111
106
|
model_name: str,
|
112
107
|
reasoning_effort: str,
|
113
|
-
tools:
|
114
|
-
lm_config:
|
115
|
-
) ->
|
108
|
+
tools: list[BaseTool] | None,
|
109
|
+
lm_config: dict[str, Any] | None,
|
110
|
+
) -> tuple[str, list[dict] | None]:
|
116
111
|
model_name = self.get_aliased_model_name(model_name)
|
117
|
-
cfg_kwargs:
|
112
|
+
cfg_kwargs: dict[str, Any] = {"temperature": temperature}
|
118
113
|
if model_name in GEMINI_REASONING_MODELS and reasoning_effort in GEMINI_THINKING_BUDGETS:
|
119
114
|
cfg_kwargs["thinking_config"] = types.ThinkingConfig(
|
120
115
|
thinking_budget=GEMINI_THINKING_BUDGETS[reasoning_effort]
|
@@ -141,15 +136,15 @@ class GeminiAPI(VendorBase):
|
|
141
136
|
|
142
137
|
def _gen_content_sync(
|
143
138
|
self,
|
144
|
-
messages:
|
139
|
+
messages: list[dict],
|
145
140
|
temperature: float,
|
146
141
|
model_name: str,
|
147
142
|
reasoning_effort: str,
|
148
|
-
tools:
|
149
|
-
lm_config:
|
150
|
-
) ->
|
143
|
+
tools: list[BaseTool] | None,
|
144
|
+
lm_config: dict[str, Any] | None,
|
145
|
+
) -> tuple[str, list[dict] | None]:
|
151
146
|
model_name = self.get_aliased_model_name(model_name)
|
152
|
-
cfg_kwargs:
|
147
|
+
cfg_kwargs: dict[str, Any] = {"temperature": temperature}
|
153
148
|
if model_name in GEMINI_REASONING_MODELS and reasoning_effort in GEMINI_THINKING_BUDGETS:
|
154
149
|
cfg_kwargs["thinking_config"] = types.ThinkingConfig(
|
155
150
|
thinking_budget=GEMINI_THINKING_BUDGETS[reasoning_effort]
|
@@ -174,7 +169,7 @@ class GeminiAPI(VendorBase):
|
|
174
169
|
return self._extract(resp)
|
175
170
|
|
176
171
|
@staticmethod
|
177
|
-
def _extract(response) ->
|
172
|
+
def _extract(response) -> tuple[str, list[dict] | None]:
|
178
173
|
# Extract text, handling cases where it might be missing
|
179
174
|
try:
|
180
175
|
text = response.text
|
@@ -208,13 +203,13 @@ class GeminiAPI(VendorBase):
|
|
208
203
|
async def _hit_api_async(
|
209
204
|
self,
|
210
205
|
model: str,
|
211
|
-
messages:
|
212
|
-
lm_config:
|
206
|
+
messages: list[dict[str, Any]],
|
207
|
+
lm_config: dict[str, Any],
|
213
208
|
use_ephemeral_cache_only: bool = False,
|
214
209
|
reasoning_effort: str = "high",
|
215
|
-
tools:
|
210
|
+
tools: list[BaseTool] | None = None,
|
216
211
|
) -> BaseLMResponse:
|
217
|
-
assert lm_config.get("response_model"
|
212
|
+
assert lm_config.get("response_model") is None, (
|
218
213
|
"response_model is not supported for standard calls"
|
219
214
|
)
|
220
215
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
@@ -257,13 +252,13 @@ class GeminiAPI(VendorBase):
|
|
257
252
|
def _hit_api_sync(
|
258
253
|
self,
|
259
254
|
model: str,
|
260
|
-
messages:
|
261
|
-
lm_config:
|
255
|
+
messages: list[dict[str, Any]],
|
256
|
+
lm_config: dict[str, Any],
|
262
257
|
use_ephemeral_cache_only: bool = False,
|
263
258
|
reasoning_effort: str = "high",
|
264
|
-
tools:
|
259
|
+
tools: list[BaseTool] | None = None,
|
265
260
|
) -> BaseLMResponse:
|
266
|
-
assert lm_config.get("response_model"
|
261
|
+
assert lm_config.get("response_model") is None, (
|
267
262
|
"response_model is not supported for standard calls"
|
268
263
|
)
|
269
264
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only=use_ephemeral_cache_only)
|
@@ -1,30 +1,30 @@
|
|
1
1
|
import json
|
2
2
|
import os
|
3
|
-
from typing import Any
|
3
|
+
from typing import Any
|
4
4
|
|
5
5
|
import pydantic
|
6
6
|
from mistralai import Mistral # use Mistral as both sync and async client
|
7
7
|
from pydantic import BaseModel
|
8
8
|
|
9
9
|
from synth_ai.lm.caching.initialize import get_cache_handler
|
10
|
+
from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
|
10
11
|
from synth_ai.lm.tools.base import BaseTool
|
11
12
|
from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
|
12
|
-
from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
|
13
13
|
from synth_ai.lm.vendors.core.openai_api import OpenAIStructuredOutputClient
|
14
14
|
|
15
15
|
# Since the mistralai package doesn't expose an exceptions module,
|
16
16
|
# we fallback to catching all Exceptions for retry.
|
17
|
-
MISTRAL_EXCEPTIONS_TO_RETRY:
|
17
|
+
MISTRAL_EXCEPTIONS_TO_RETRY: tuple[type[Exception], ...] = (Exception,)
|
18
18
|
|
19
19
|
|
20
20
|
class MistralAPI(VendorBase):
|
21
21
|
used_for_structured_outputs: bool = True
|
22
|
-
exceptions_to_retry:
|
22
|
+
exceptions_to_retry: tuple = MISTRAL_EXCEPTIONS_TO_RETRY
|
23
23
|
_openai_fallback: Any
|
24
24
|
|
25
25
|
def __init__(
|
26
26
|
self,
|
27
|
-
exceptions_to_retry:
|
27
|
+
exceptions_to_retry: tuple[type[Exception], ...] = MISTRAL_EXCEPTIONS_TO_RETRY,
|
28
28
|
used_for_structured_outputs: bool = False,
|
29
29
|
):
|
30
30
|
self.used_for_structured_outputs = used_for_structured_outputs
|
@@ -40,14 +40,14 @@ class MistralAPI(VendorBase):
|
|
40
40
|
async def _hit_api_async(
|
41
41
|
self,
|
42
42
|
model: str,
|
43
|
-
messages:
|
44
|
-
lm_config:
|
45
|
-
response_model:
|
43
|
+
messages: list[dict[str, Any]],
|
44
|
+
lm_config: dict[str, Any],
|
45
|
+
response_model: BaseModel | None = None,
|
46
46
|
use_ephemeral_cache_only: bool = False,
|
47
47
|
reasoning_effort: str = "high",
|
48
|
-
tools:
|
48
|
+
tools: list[BaseTool] | None = None,
|
49
49
|
) -> BaseLMResponse:
|
50
|
-
assert lm_config.get("response_model"
|
50
|
+
assert lm_config.get("response_model") is None, (
|
51
51
|
"response_model is not supported for standard calls"
|
52
52
|
)
|
53
53
|
assert not (response_model and tools), "Cannot provide both response_model and tools"
|
@@ -63,7 +63,7 @@ class MistralAPI(VendorBase):
|
|
63
63
|
], f"Expected BaseLMResponse or str, got {type(cache_result)}"
|
64
64
|
return (
|
65
65
|
cache_result
|
66
|
-
if
|
66
|
+
if isinstance(cache_result, BaseLMResponse)
|
67
67
|
else BaseLMResponse(
|
68
68
|
raw_response=cache_result, structured_output=None, tool_calls=None
|
69
69
|
)
|
@@ -130,14 +130,14 @@ class MistralAPI(VendorBase):
|
|
130
130
|
def _hit_api_sync(
|
131
131
|
self,
|
132
132
|
model: str,
|
133
|
-
messages:
|
134
|
-
lm_config:
|
135
|
-
response_model:
|
133
|
+
messages: list[dict[str, Any]],
|
134
|
+
lm_config: dict[str, Any],
|
135
|
+
response_model: BaseModel | None = None,
|
136
136
|
use_ephemeral_cache_only: bool = False,
|
137
137
|
reasoning_effort: str = "high",
|
138
|
-
tools:
|
138
|
+
tools: list[BaseTool] | None = None,
|
139
139
|
) -> BaseLMResponse:
|
140
|
-
assert lm_config.get("response_model"
|
140
|
+
assert lm_config.get("response_model") is None, (
|
141
141
|
"response_model is not supported for standard calls"
|
142
142
|
)
|
143
143
|
assert not (response_model and tools), "Cannot provide both response_model and tools"
|
@@ -154,7 +154,7 @@ class MistralAPI(VendorBase):
|
|
154
154
|
], f"Expected BaseLMResponse or str, got {type(cache_result)}"
|
155
155
|
return (
|
156
156
|
cache_result
|
157
|
-
if
|
157
|
+
if isinstance(cache_result, BaseLMResponse)
|
158
158
|
else BaseLMResponse(
|
159
159
|
raw_response=cache_result, structured_output=None, tool_calls=None
|
160
160
|
)
|
@@ -217,7 +217,7 @@ class MistralAPI(VendorBase):
|
|
217
217
|
async def _hit_api_async_structured_output(
|
218
218
|
self,
|
219
219
|
model: str,
|
220
|
-
messages:
|
220
|
+
messages: list[dict[str, Any]],
|
221
221
|
response_model: BaseModel,
|
222
222
|
temperature: float,
|
223
223
|
use_ephemeral_cache_only: bool = False,
|
@@ -256,7 +256,7 @@ class MistralAPI(VendorBase):
|
|
256
256
|
def _hit_api_sync_structured_output(
|
257
257
|
self,
|
258
258
|
model: str,
|
259
|
-
messages:
|
259
|
+
messages: list[dict[str, Any]],
|
260
260
|
response_model: BaseModel,
|
261
261
|
temperature: float,
|
262
262
|
use_ephemeral_cache_only: bool = False,
|
@@ -6,7 +6,7 @@ supporting both standard and structured output modes.
|
|
6
6
|
"""
|
7
7
|
|
8
8
|
import json
|
9
|
-
from typing import Any
|
9
|
+
from typing import Any
|
10
10
|
|
11
11
|
import openai
|
12
12
|
import pydantic_core
|
@@ -15,13 +15,13 @@ import pydantic_core
|
|
15
15
|
from pydantic import BaseModel
|
16
16
|
|
17
17
|
from synth_ai.lm.caching.initialize import get_cache_handler
|
18
|
+
from synth_ai.lm.constants import OPENAI_REASONING_MODELS, SPECIAL_BASE_TEMPS
|
18
19
|
from synth_ai.lm.tools.base import BaseTool
|
19
20
|
from synth_ai.lm.vendors.base import BaseLMResponse
|
20
|
-
from synth_ai.lm.constants import SPECIAL_BASE_TEMPS, OPENAI_REASONING_MODELS
|
21
21
|
from synth_ai.lm.vendors.openai_standard import OpenAIStandard
|
22
22
|
|
23
23
|
# Exceptions that should trigger retry logic for OpenAI API calls
|
24
|
-
OPENAI_EXCEPTIONS_TO_RETRY:
|
24
|
+
OPENAI_EXCEPTIONS_TO_RETRY: tuple[type[Exception], ...] = (
|
25
25
|
pydantic_core._pydantic_core.ValidationError,
|
26
26
|
openai.OpenAIError,
|
27
27
|
openai.APIConnectionError,
|
@@ -36,10 +36,11 @@ OPENAI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (
|
|
36
36
|
class OpenAIStructuredOutputClient(OpenAIStandard):
|
37
37
|
"""
|
38
38
|
OpenAI client with support for structured outputs.
|
39
|
-
|
39
|
+
|
40
40
|
This client extends the standard OpenAI client to support structured outputs
|
41
41
|
using OpenAI's native structured output feature or response format parameter.
|
42
42
|
"""
|
43
|
+
|
43
44
|
def __init__(self, synth_logging: bool = True):
|
44
45
|
if synth_logging:
|
45
46
|
# print("Using synth logging - OpenAIStructuredOutputClient")
|
@@ -58,11 +59,11 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
58
59
|
async def _hit_api_async_structured_output(
|
59
60
|
self,
|
60
61
|
model: str,
|
61
|
-
messages:
|
62
|
+
messages: list[dict[str, Any]],
|
62
63
|
response_model: BaseModel,
|
63
64
|
temperature: float,
|
64
65
|
use_ephemeral_cache_only: bool = False,
|
65
|
-
tools:
|
66
|
+
tools: list[BaseTool] | None = None,
|
66
67
|
reasoning_effort: str = "high",
|
67
68
|
) -> str:
|
68
69
|
if tools:
|
@@ -81,7 +82,7 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
81
82
|
dict,
|
82
83
|
BaseLMResponse,
|
83
84
|
], f"Expected dict or BaseLMResponse, got {type(cache_result)}"
|
84
|
-
return cache_result["response"] if
|
85
|
+
return cache_result["response"] if isinstance(cache_result, dict) else cache_result
|
85
86
|
if model in OPENAI_REASONING_MODELS:
|
86
87
|
output = await self.async_client.beta.chat.completions.parse(
|
87
88
|
model=model,
|
@@ -109,11 +110,11 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
109
110
|
def _hit_api_sync_structured_output(
|
110
111
|
self,
|
111
112
|
model: str,
|
112
|
-
messages:
|
113
|
+
messages: list[dict[str, Any]],
|
113
114
|
response_model: BaseModel,
|
114
115
|
temperature: float,
|
115
116
|
use_ephemeral_cache_only: bool = False,
|
116
|
-
tools:
|
117
|
+
tools: list[BaseTool] | None = None,
|
117
118
|
reasoning_effort: str = "high",
|
118
119
|
) -> str:
|
119
120
|
if tools:
|
@@ -130,7 +131,7 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
130
131
|
dict,
|
131
132
|
BaseLMResponse,
|
132
133
|
], f"Expected dict or BaseLMResponse, got {type(cache_result)}"
|
133
|
-
return cache_result["response"] if
|
134
|
+
return cache_result["response"] if isinstance(cache_result, dict) else cache_result
|
134
135
|
if model in OPENAI_REASONING_MODELS:
|
135
136
|
output = self.sync_client.beta.chat.completions.parse(
|
136
137
|
model=model,
|