synth-ai 0.2.0__py3-none-any.whl → 0.2.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +28 -2
- synth_ai/core/system.py +4 -0
- synth_ai/environments/__init__.py +35 -0
- synth_ai/environments/environment/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/base.py +50 -0
- synth_ai/environments/environment/core.py +22 -0
- synth_ai/environments/environment/db/__init__.py +1 -0
- synth_ai/environments/environment/db/sqlite.py +45 -0
- synth_ai/environments/environment/registry.py +24 -0
- synth_ai/environments/environment/resources/sqlite.py +46 -0
- synth_ai/environments/environment/results.py +1 -0
- synth_ai/environments/environment/rewards/__init__.py +1 -0
- synth_ai/environments/environment/rewards/core.py +28 -0
- synth_ai/environments/environment/shared_engine.py +26 -0
- synth_ai/environments/environment/tools/__init__.py +34 -0
- synth_ai/environments/examples/__init__.py +1 -0
- synth_ai/environments/examples/crafter_classic/__init__.py +8 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +58 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +51 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +872 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/test_crafter_react_agent.py +1110 -0
- synth_ai/environments/examples/crafter_classic/config_logging.py +111 -0
- synth_ai/environments/examples/crafter_classic/engine.py +502 -0
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +63 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +5 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +74 -0
- synth_ai/environments/examples/crafter_classic/environment.py +255 -0
- synth_ai/environments/examples/crafter_classic/taskset.py +228 -0
- synth_ai/environments/examples/enron/agent_demos/test_synth_react.py +535 -0
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +156 -0
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +280 -0
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +24 -0
- synth_ai/environments/examples/enron/engine.py +291 -0
- synth_ai/environments/examples/enron/environment.py +165 -0
- synth_ai/environments/examples/enron/taskset.py +112 -0
- synth_ai/environments/examples/enron/units/keyword_stats.py +111 -0
- synth_ai/environments/examples/enron/units/test_email_index.py +8 -0
- synth_ai/environments/examples/minigrid/__init__.py +48 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +47 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +220 -0
- synth_ai/environments/examples/minigrid/agent_demos/test_minigrid_react_agent.py +393 -0
- synth_ai/environments/examples/minigrid/engine.py +589 -0
- synth_ai/environments/examples/minigrid/environment.py +274 -0
- synth_ai/environments/examples/minigrid/environment_mapping.py +242 -0
- synth_ai/environments/examples/minigrid/puzzle_loader.py +416 -0
- synth_ai/environments/examples/minigrid/taskset.py +583 -0
- synth_ai/environments/examples/minigrid/units/test_action_behavior.py +226 -0
- synth_ai/environments/examples/minigrid/units/test_debug_messages.py +83 -0
- synth_ai/environments/examples/minigrid/units/test_exploration.py +120 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_engine.py +214 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment.py +238 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment_mapping.py +301 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_taskset.py +210 -0
- synth_ai/environments/examples/nethack/__init__.py +7 -0
- synth_ai/environments/examples/nethack/achievements.py +337 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +832 -0
- synth_ai/environments/examples/nethack/agent_demos/test_nethack_react_agent.py +1112 -0
- synth_ai/environments/examples/nethack/engine.py +738 -0
- synth_ai/environments/examples/nethack/environment.py +255 -0
- synth_ai/environments/examples/nethack/helpers/__init__.py +42 -0
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +301 -0
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +401 -0
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +433 -0
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +201 -0
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +268 -0
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +308 -0
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +430 -0
- synth_ai/environments/examples/nethack/taskset.py +323 -0
- synth_ai/environments/examples/nethack/units/test_nethack_engine.py +277 -0
- synth_ai/environments/examples/nethack/units/test_nethack_environment.py +281 -0
- synth_ai/environments/examples/nethack/units/test_nethack_taskset.py +213 -0
- synth_ai/environments/examples/nethack/units/test_recording.py +307 -0
- synth_ai/environments/examples/red/__init__.py +7 -0
- synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
- synth_ai/environments/examples/red/agent_demos/test_synth_react.py +1471 -0
- synth_ai/environments/examples/red/config_logging.py +110 -0
- synth_ai/environments/examples/red/engine.py +693 -0
- synth_ai/environments/examples/red/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +28 -0
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +275 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +142 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +283 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +149 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +137 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +330 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +120 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +558 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +312 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +147 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +246 -0
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +367 -0
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +139 -0
- synth_ai/environments/examples/red/environment.py +235 -0
- synth_ai/environments/examples/red/taskset.py +77 -0
- synth_ai/environments/examples/red/test_fixes.py +125 -0
- synth_ai/environments/examples/red/test_fixes_mock.py +148 -0
- synth_ai/environments/examples/red/units/__init__.py +1 -0
- synth_ai/environments/examples/red/units/test_basic_functionality.py +97 -0
- synth_ai/environments/examples/red/units/test_button_press_requirements.py +217 -0
- synth_ai/environments/examples/red/units/test_engine.py +192 -0
- synth_ai/environments/examples/red/units/test_environment.py +455 -0
- synth_ai/environments/examples/red/units/test_exploration_strategy.py +227 -0
- synth_ai/environments/examples/red/units/test_integration.py +217 -0
- synth_ai/environments/examples/red/units/test_memory_extraction.py +111 -0
- synth_ai/environments/examples/red/units/test_menu_bug_reproduction.py +1100 -0
- synth_ai/environments/examples/red/units/test_movement_debug.py +255 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_debug.py +163 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_verbose.py +117 -0
- synth_ai/environments/examples/red/units/test_red_basic.py +145 -0
- synth_ai/environments/examples/red/units/test_red_comprehensive.py +323 -0
- synth_ai/environments/examples/red/units/test_retry_movement.py +195 -0
- synth_ai/environments/examples/red/units/test_reward_components.py +186 -0
- synth_ai/environments/examples/red/units/test_rom_integration.py +260 -0
- synth_ai/environments/examples/red/units/test_taskset.py +116 -0
- synth_ai/environments/examples/red/units/test_tree.py +448 -0
- synth_ai/environments/examples/sokoban/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +900 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_dspy_react.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_sokoban_react_agent.py +498 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_lats.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_locally.py +748 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_service.py +296 -0
- synth_ai/environments/examples/sokoban/engine.py +675 -0
- synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +656 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +17 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +3 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +129 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +370 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +331 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +305 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +66 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +114 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +122 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +394 -0
- synth_ai/environments/examples/sokoban/environment.py +228 -0
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +438 -0
- synth_ai/environments/examples/sokoban/puzzle_loader.py +311 -0
- synth_ai/environments/examples/sokoban/taskset.py +425 -0
- synth_ai/environments/examples/sokoban/units/astar_common.py +94 -0
- synth_ai/environments/examples/sokoban/units/test_building_task_set.py +49 -0
- synth_ai/environments/examples/sokoban/units/test_false_positive.py +120 -0
- synth_ai/environments/examples/sokoban/units/test_simple_run_through_environment.py +119 -0
- synth_ai/environments/examples/sokoban/units/test_sokoban_environment.py +98 -0
- synth_ai/environments/examples/sokoban/units/test_tree.py +364 -0
- synth_ai/environments/examples/tictactoe/__init__.py +1 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_synth_react.py +266 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_tictactoe_react_agent.py +470 -0
- synth_ai/environments/examples/tictactoe/engine.py +368 -0
- synth_ai/environments/examples/tictactoe/environment.py +239 -0
- synth_ai/environments/examples/tictactoe/taskset.py +214 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_engine.py +393 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_environment.py +493 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_taskset.py +191 -0
- synth_ai/environments/examples/verilog/__init__.py +10 -0
- synth_ai/environments/examples/verilog/agent_demos/test_synth_react.py +520 -0
- synth_ai/environments/examples/verilog/engine.py +328 -0
- synth_ai/environments/examples/verilog/environment.py +349 -0
- synth_ai/environments/examples/verilog/taskset.py +418 -0
- synth_ai/environments/examples/verilog/units/test_verilog_engine.py +466 -0
- synth_ai/environments/examples/verilog/units/test_verilog_environment.py +585 -0
- synth_ai/environments/examples/verilog/units/test_verilog_integration.py +383 -0
- synth_ai/environments/examples/verilog/units/test_verilog_taskset.py +457 -0
- synth_ai/environments/reproducibility/core.py +42 -0
- synth_ai/environments/reproducibility/tree.py +364 -0
- synth_ai/environments/service/app.py +78 -0
- synth_ai/environments/service/core_routes.py +775 -0
- synth_ai/environments/service/external_registry.py +57 -0
- synth_ai/environments/service/registry.py +9 -0
- synth_ai/environments/stateful/__init__.py +1 -0
- synth_ai/environments/stateful/core.py +28 -0
- synth_ai/environments/stateful/engine.py +21 -0
- synth_ai/environments/stateful/state.py +7 -0
- synth_ai/environments/tasks/api.py +19 -0
- synth_ai/environments/tasks/core.py +78 -0
- synth_ai/environments/tasks/filters.py +39 -0
- synth_ai/environments/tasks/utils.py +89 -0
- synth_ai/environments/v0_observability/history.py +3 -0
- synth_ai/environments/v0_observability/log.py +2 -0
- synth_ai/lm/caching/constants.py +1 -0
- synth_ai/{zyk/lms → lm}/caching/ephemeral.py +4 -8
- synth_ai/{zyk/lms → lm}/caching/handler.py +15 -15
- synth_ai/{zyk/lms → lm}/caching/initialize.py +2 -4
- synth_ai/{zyk/lms → lm}/caching/persistent.py +4 -10
- synth_ai/{zyk/lms → lm}/config.py +2 -1
- synth_ai/{zyk/lms → lm}/constants.py +2 -2
- synth_ai/{zyk/lms → lm}/core/all.py +10 -10
- synth_ai/{zyk/lms → lm}/core/main.py +57 -33
- synth_ai/{zyk/lms → lm}/core/vendor_clients.py +12 -10
- synth_ai/lm/cost/monitor.py +1 -0
- synth_ai/lm/cost/statefulness.py +1 -0
- synth_ai/lm/provider_support/__init__.py +8 -0
- synth_ai/lm/provider_support/anthropic.py +945 -0
- synth_ai/lm/provider_support/openai.py +1115 -0
- synth_ai/lm/provider_support/suppress_logging.py +31 -0
- synth_ai/{zyk/lms → lm}/structured_outputs/handler.py +58 -80
- synth_ai/{zyk/lms → lm}/structured_outputs/inject.py +6 -20
- synth_ai/{zyk/lms → lm}/structured_outputs/rehabilitate.py +6 -12
- synth_ai/{zyk/lms → lm}/vendors/core/anthropic_api.py +21 -30
- synth_ai/{zyk/lms → lm}/vendors/core/gemini_api.py +35 -32
- synth_ai/{zyk/lms → lm}/vendors/core/mistral_api.py +19 -28
- synth_ai/{zyk/lms → lm}/vendors/core/openai_api.py +26 -36
- synth_ai/{zyk/lms → lm}/vendors/openai_standard.py +29 -33
- synth_ai/{zyk/lms → lm}/vendors/retries.py +1 -1
- synth_ai/lm/vendors/supported/__init__.py +0 -0
- synth_ai/{zyk/lms → lm}/vendors/supported/custom_endpoint.py +131 -118
- synth_ai/{zyk/lms → lm}/vendors/supported/deepseek.py +4 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/grok.py +6 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/groq.py +1 -1
- synth_ai/{zyk/lms → lm}/vendors/supported/ollama.py +2 -2
- synth_ai/{zyk/lms → lm}/vendors/supported/openrouter.py +18 -16
- synth_ai/{zyk/lms → lm}/vendors/supported/together.py +1 -1
- synth_ai/tracing/__init__.py +0 -0
- synth_ai/tracing/abstractions.py +224 -0
- synth_ai/tracing/base_client.py +91 -0
- synth_ai/tracing/client_manager.py +131 -0
- synth_ai/tracing/config.py +140 -0
- synth_ai/tracing/context.py +146 -0
- synth_ai/tracing/decorators.py +679 -0
- synth_ai/tracing/events/__init__.py +0 -0
- synth_ai/tracing/events/manage.py +147 -0
- synth_ai/tracing/events/scope.py +86 -0
- synth_ai/tracing/events/store.py +227 -0
- synth_ai/tracing/immediate_client.py +152 -0
- synth_ai/tracing/local.py +18 -0
- synth_ai/tracing/log_client_base.py +74 -0
- synth_ai/tracing/retry_queue.py +187 -0
- synth_ai/tracing/trackers.py +515 -0
- synth_ai/tracing/upload.py +504 -0
- synth_ai/tracing/utils.py +9 -0
- synth_ai/zyk/__init__.py +28 -2
- synth_ai-0.2.1.dev0.dist-info/METADATA +349 -0
- synth_ai-0.2.1.dev0.dist-info/RECORD +261 -0
- {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info}/WHEEL +1 -1
- synth_ai/zyk/lms/caching/constants.py +0 -1
- synth_ai/zyk/lms/cost/monitor.py +0 -1
- synth_ai/zyk/lms/cost/statefulness.py +0 -1
- synth_ai-0.2.0.dist-info/METADATA +0 -36
- synth_ai-0.2.0.dist-info/RECORD +0 -50
- /synth_ai/{zyk/lms/__init__.py → environments/reproducibility/helpers.py} +0 -0
- /synth_ai/{zyk/lms/caching → lm}/__init__.py +0 -0
- /synth_ai/{zyk/lms/core → lm/caching}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/caching/dbs.py +0 -0
- /synth_ai/{zyk/lms/cost → lm/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/core/exceptions.py +0 -0
- /synth_ai/{zyk/lms/structured_outputs → lm/cost}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors → lm/structured_outputs}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/core → lm/vendors}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/local → lm/vendors/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors/supported → lm/vendors/local}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/local/ollama.py +0 -0
- {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info/licenses}/LICENSE +0 -0
- {synth_ai-0.2.0.dist-info → synth_ai-0.2.1.dev0.dist-info}/top_level.txt +0 -0
@@ -4,18 +4,18 @@ import os
|
|
4
4
|
import warnings
|
5
5
|
from typing import Any, Dict, List, Optional, Tuple, Type
|
6
6
|
|
7
|
-
|
7
|
+
import google.genai as genai
|
8
8
|
from google.api_core.exceptions import ResourceExhausted
|
9
9
|
from google.genai import types
|
10
|
-
from synth_ai.
|
11
|
-
from synth_ai.
|
12
|
-
from synth_ai.
|
13
|
-
from synth_ai.
|
10
|
+
from synth_ai.lm.caching.initialize import get_cache_handler
|
11
|
+
from synth_ai.lm.tools.base import BaseTool
|
12
|
+
from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
|
13
|
+
from synth_ai.lm.constants import (
|
14
14
|
SPECIAL_BASE_TEMPS,
|
15
15
|
GEMINI_REASONING_MODELS,
|
16
16
|
GEMINI_THINKING_BUDGETS,
|
17
17
|
)
|
18
|
-
from synth_ai.
|
18
|
+
from synth_ai.lm.vendors.retries import BACKOFF_TOLERANCE, MAX_BACKOFF, backoff
|
19
19
|
import logging
|
20
20
|
|
21
21
|
|
@@ -59,7 +59,6 @@ class GeminiAPI(VendorBase):
|
|
59
59
|
self.used_for_structured_outputs = used_for_structured_outputs
|
60
60
|
self.exceptions_to_retry = exceptions_to_retry
|
61
61
|
|
62
|
-
|
63
62
|
def get_aliased_model_name(self, model_name: str) -> str:
|
64
63
|
if model_name in ALIASES:
|
65
64
|
return ALIASES[model_name]
|
@@ -77,7 +76,9 @@ class GeminiAPI(VendorBase):
|
|
77
76
|
if m["role"].lower() not in ["user", "assistant"]:
|
78
77
|
continue
|
79
78
|
role = "user" if m["role"] == "user" else "assistant"
|
80
|
-
contents.append(
|
79
|
+
contents.append(
|
80
|
+
types.Content(role=role, parts=[types.Part.from_text(text=m["content"])])
|
81
|
+
)
|
81
82
|
return contents
|
82
83
|
|
83
84
|
@staticmethod
|
@@ -86,19 +87,19 @@ class GeminiAPI(VendorBase):
|
|
86
87
|
out: List[types.Tool] = []
|
87
88
|
for t in tools:
|
88
89
|
# Assume t.to_gemini_tool() now correctly returns a FunctionDeclaration
|
89
|
-
#func_decl = t.to_gemini_tool()
|
90
|
+
# func_decl = t.to_gemini_tool()
|
90
91
|
if isinstance(t, dict):
|
91
92
|
func_decl = t
|
92
93
|
else:
|
93
94
|
func_decl = t.to_gemini_tool()
|
94
95
|
if not isinstance(func_decl, types.FunctionDeclaration):
|
95
|
-
|
96
|
-
|
97
|
-
tool_dict = func_decl
|
96
|
+
# Or fetch schema parts if to_gemini_tool still returns dict
|
97
|
+
# This depends on BaseTool.to_gemini_tool implementation
|
98
|
+
tool_dict = func_decl # Assuming it's a dict for now
|
98
99
|
func_decl = types.FunctionDeclaration(
|
99
|
-
name=tool_dict[
|
100
|
-
description=tool_dict[
|
101
|
-
parameters=tool_dict[
|
100
|
+
name=tool_dict["name"],
|
101
|
+
description=tool_dict["description"],
|
102
|
+
parameters=tool_dict["parameters"], # Expects OpenAPI-style dict
|
102
103
|
)
|
103
104
|
out.append(types.Tool(function_declarations=[func_decl]))
|
104
105
|
return out
|
@@ -118,21 +119,23 @@ class GeminiAPI(VendorBase):
|
|
118
119
|
cfg_kwargs["thinking_config"] = types.ThinkingConfig(
|
119
120
|
thinking_budget=GEMINI_THINKING_BUDGETS[reasoning_effort]
|
120
121
|
)
|
121
|
-
|
122
|
+
|
122
123
|
if any(m["role"] == "system" for m in messages):
|
123
|
-
cfg_kwargs["system_instruction"] = next(
|
124
|
-
|
124
|
+
cfg_kwargs["system_instruction"] = next(
|
125
|
+
m["content"] for m in messages if m["role"] == "system"
|
126
|
+
)
|
127
|
+
|
125
128
|
generation_config = types.GenerateContentConfig(
|
126
129
|
**cfg_kwargs,
|
127
130
|
tool_config=lm_config.get("tool_config") if lm_config else None,
|
128
|
-
tools=self._tools_to_genai(tools) if tools else None
|
131
|
+
tools=self._tools_to_genai(tools) if tools else None,
|
129
132
|
)
|
130
133
|
client = _get_client()
|
131
134
|
resp = await client.aio.models.generate_content(
|
132
135
|
model=model_name,
|
133
136
|
contents=self._msg_to_contents(messages),
|
134
137
|
config=generation_config,
|
135
|
-
#safety_settings=SAFETY_SETTINGS,
|
138
|
+
# safety_settings=SAFETY_SETTINGS,
|
136
139
|
)
|
137
140
|
return self._extract(resp)
|
138
141
|
|
@@ -152,11 +155,13 @@ class GeminiAPI(VendorBase):
|
|
152
155
|
thinking_budget=GEMINI_THINKING_BUDGETS[reasoning_effort]
|
153
156
|
)
|
154
157
|
if any(m["role"] == "system" for m in messages):
|
155
|
-
cfg_kwargs["system_instruction"] = next(
|
158
|
+
cfg_kwargs["system_instruction"] = next(
|
159
|
+
m["content"] for m in messages if m["role"] == "system"
|
160
|
+
)
|
156
161
|
generation_config = types.GenerateContentConfig(
|
157
162
|
**cfg_kwargs,
|
158
163
|
tool_config=lm_config.get("tool_config") if lm_config else None,
|
159
|
-
tools=self._tools_to_genai(tools) if tools else None
|
164
|
+
tools=self._tools_to_genai(tools) if tools else None,
|
160
165
|
)
|
161
166
|
|
162
167
|
client = _get_client()
|
@@ -173,8 +178,8 @@ class GeminiAPI(VendorBase):
|
|
173
178
|
# Extract text, handling cases where it might be missing
|
174
179
|
try:
|
175
180
|
text = response.text
|
176
|
-
except ValueError:
|
177
|
-
text = ""
|
181
|
+
except ValueError: # Handle cases where only non-text parts exist
|
182
|
+
text = ""
|
178
183
|
|
179
184
|
calls = []
|
180
185
|
# Access parts through candidates[0].content
|
@@ -209,9 +214,9 @@ class GeminiAPI(VendorBase):
|
|
209
214
|
reasoning_effort: str = "high",
|
210
215
|
tools: Optional[List[BaseTool]] = None,
|
211
216
|
) -> BaseLMResponse:
|
212
|
-
assert (
|
213
|
-
|
214
|
-
)
|
217
|
+
assert lm_config.get("response_model", None) is None, (
|
218
|
+
"response_model is not supported for standard calls"
|
219
|
+
)
|
215
220
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
216
221
|
lm_config["reasoning_effort"] = reasoning_effort
|
217
222
|
cache_result = used_cache_handler.hit_managed_cache(
|
@@ -258,12 +263,10 @@ class GeminiAPI(VendorBase):
|
|
258
263
|
reasoning_effort: str = "high",
|
259
264
|
tools: Optional[List[BaseTool]] = None,
|
260
265
|
) -> BaseLMResponse:
|
261
|
-
assert (
|
262
|
-
|
263
|
-
), "response_model is not supported for standard calls"
|
264
|
-
used_cache_handler = get_cache_handler(
|
265
|
-
use_ephemeral_cache_only=use_ephemeral_cache_only
|
266
|
+
assert lm_config.get("response_model", None) is None, (
|
267
|
+
"response_model is not supported for standard calls"
|
266
268
|
)
|
269
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only=use_ephemeral_cache_only)
|
267
270
|
lm_config["reasoning_effort"] = reasoning_effort
|
268
271
|
cache_result = used_cache_handler.hit_managed_cache(
|
269
272
|
model, messages, lm_config=lm_config, tools=tools
|
@@ -6,11 +6,11 @@ import pydantic
|
|
6
6
|
from mistralai import Mistral # use Mistral as both sync and async client
|
7
7
|
from pydantic import BaseModel
|
8
8
|
|
9
|
-
from synth_ai.
|
10
|
-
from synth_ai.
|
11
|
-
from synth_ai.
|
12
|
-
from synth_ai.
|
13
|
-
from synth_ai.
|
9
|
+
from synth_ai.lm.caching.initialize import get_cache_handler
|
10
|
+
from synth_ai.lm.tools.base import BaseTool
|
11
|
+
from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
|
12
|
+
from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
|
13
|
+
from synth_ai.lm.vendors.core.openai_api import OpenAIStructuredOutputClient
|
14
14
|
|
15
15
|
# Since the mistralai package doesn't expose an exceptions module,
|
16
16
|
# we fallback to catching all Exceptions for retry.
|
@@ -47,9 +47,9 @@ class MistralAPI(VendorBase):
|
|
47
47
|
reasoning_effort: str = "high",
|
48
48
|
tools: Optional[List[BaseTool]] = None,
|
49
49
|
) -> BaseLMResponse:
|
50
|
-
assert (
|
51
|
-
|
52
|
-
)
|
50
|
+
assert lm_config.get("response_model", None) is None, (
|
51
|
+
"response_model is not supported for standard calls"
|
52
|
+
)
|
53
53
|
assert not (response_model and tools), "Cannot provide both response_model and tools"
|
54
54
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
55
55
|
lm_config["reasoning_effort"] = reasoning_effort
|
@@ -69,20 +69,15 @@ class MistralAPI(VendorBase):
|
|
69
69
|
)
|
70
70
|
)
|
71
71
|
|
72
|
-
mistral_messages = [
|
73
|
-
{"role": msg["role"], "content": msg["content"]} for msg in messages
|
74
|
-
]
|
72
|
+
mistral_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
75
73
|
functions = [tool.to_mistral_tool() for tool in tools] if tools else None
|
76
74
|
params = {
|
77
75
|
"model": model,
|
78
76
|
"messages": mistral_messages,
|
79
77
|
"max_tokens": lm_config.get("max_tokens", 4096),
|
80
|
-
"temperature": lm_config.get(
|
81
|
-
"temperature", SPECIAL_BASE_TEMPS.get(model, 0)
|
82
|
-
),
|
78
|
+
"temperature": lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
|
83
79
|
"stream": False,
|
84
80
|
"tool_choice": "auto" if functions else None,
|
85
|
-
|
86
81
|
}
|
87
82
|
if response_model:
|
88
83
|
params["response_format"] = response_model
|
@@ -142,11 +137,11 @@ class MistralAPI(VendorBase):
|
|
142
137
|
reasoning_effort: str = "high",
|
143
138
|
tools: Optional[List[BaseTool]] = None,
|
144
139
|
) -> BaseLMResponse:
|
145
|
-
assert (
|
146
|
-
|
147
|
-
)
|
140
|
+
assert lm_config.get("response_model", None) is None, (
|
141
|
+
"response_model is not supported for standard calls"
|
142
|
+
)
|
148
143
|
assert not (response_model and tools), "Cannot provide both response_model and tools"
|
149
|
-
|
144
|
+
|
150
145
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
151
146
|
lm_config["reasoning_effort"] = reasoning_effort
|
152
147
|
cache_result = used_cache_handler.hit_managed_cache(
|
@@ -165,21 +160,17 @@ class MistralAPI(VendorBase):
|
|
165
160
|
)
|
166
161
|
)
|
167
162
|
|
168
|
-
mistral_messages = [
|
169
|
-
{"role": msg["role"], "content": msg["content"]} for msg in messages
|
170
|
-
]
|
163
|
+
mistral_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
171
164
|
functions = [tool.to_mistral_tool() for tool in tools] if tools else None
|
172
|
-
|
165
|
+
|
173
166
|
params = {
|
174
167
|
"model": model,
|
175
168
|
"messages": mistral_messages,
|
176
169
|
"max_tokens": lm_config.get("max_tokens", 4096),
|
177
|
-
"temperature": lm_config.get(
|
178
|
-
"temperature", SPECIAL_BASE_TEMPS.get(model, 0)
|
179
|
-
),
|
170
|
+
"temperature": lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
|
180
171
|
"stream": False,
|
181
172
|
"tool_choice": "auto" if functions else None,
|
182
|
-
#"tools": functions,
|
173
|
+
# "tools": functions,
|
183
174
|
}
|
184
175
|
if response_model:
|
185
176
|
params["response_format"] = response_model
|
@@ -328,4 +319,4 @@ if __name__ == "__main__":
|
|
328
319
|
|
329
320
|
response = asyncio.run(run_async())
|
330
321
|
t2 = time.time()
|
331
|
-
print(f"Got {len(response.name)} chars in {t2-t} seconds")
|
322
|
+
print(f"Got {len(response.name)} chars in {t2 - t} seconds")
|
@@ -7,11 +7,11 @@ import pydantic_core
|
|
7
7
|
# from openai import AsyncOpenAI, OpenAI
|
8
8
|
from pydantic import BaseModel
|
9
9
|
|
10
|
-
from synth_ai.
|
11
|
-
from synth_ai.
|
12
|
-
from synth_ai.
|
13
|
-
from synth_ai.
|
14
|
-
from synth_ai.
|
10
|
+
from synth_ai.lm.caching.initialize import get_cache_handler
|
11
|
+
from synth_ai.lm.tools.base import BaseTool
|
12
|
+
from synth_ai.lm.vendors.base import BaseLMResponse
|
13
|
+
from synth_ai.lm.constants import SPECIAL_BASE_TEMPS, OPENAI_REASONING_MODELS
|
14
|
+
from synth_ai.lm.vendors.openai_standard import OpenAIStandard
|
15
15
|
|
16
16
|
OPENAI_EXCEPTIONS_TO_RETRY: Tuple[Type[Exception], ...] = (
|
17
17
|
pydantic_core._pydantic_core.ValidationError,
|
@@ -29,7 +29,7 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
29
29
|
def __init__(self, synth_logging: bool = True):
|
30
30
|
if synth_logging:
|
31
31
|
# print("Using synth logging - OpenAIStructuredOutputClient")
|
32
|
-
from
|
32
|
+
from synth_ai.lm.provider_support.openai import AsyncOpenAI, OpenAI
|
33
33
|
else:
|
34
34
|
# print("Not using synth logging - OpenAIStructuredOutputClient")
|
35
35
|
from openai import AsyncOpenAI, OpenAI
|
@@ -54,29 +54,25 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
54
54
|
if tools:
|
55
55
|
raise ValueError("Tools are not supported for async structured output")
|
56
56
|
# "Hit client")
|
57
|
-
lm_config = {
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
)
|
57
|
+
lm_config = {
|
58
|
+
"temperature": temperature,
|
59
|
+
"response_model": response_model,
|
60
|
+
"reasoning_effort": reasoning_effort,
|
61
|
+
}
|
62
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only=use_ephemeral_cache_only)
|
63
|
+
cache_result = used_cache_handler.hit_managed_cache(model, messages, lm_config=lm_config)
|
64
64
|
if cache_result:
|
65
65
|
# print("Hit cache")
|
66
66
|
assert type(cache_result) in [
|
67
67
|
dict,
|
68
68
|
BaseLMResponse,
|
69
69
|
], f"Expected dict or BaseLMResponse, got {type(cache_result)}"
|
70
|
-
return (
|
71
|
-
cache_result["response"] if type(cache_result) == dict else cache_result
|
72
|
-
)
|
70
|
+
return cache_result["response"] if type(cache_result) == dict else cache_result
|
73
71
|
if model in OPENAI_REASONING_MODELS:
|
74
72
|
output = await self.async_client.beta.chat.completions.parse(
|
75
73
|
model=model,
|
76
74
|
messages=messages,
|
77
|
-
temperature=lm_config.get(
|
78
|
-
"temperature", SPECIAL_BASE_TEMPS.get(model, 0)
|
79
|
-
),
|
75
|
+
temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
|
80
76
|
response_format=response_model,
|
81
77
|
reasoning_effort=reasoning_effort,
|
82
78
|
)
|
@@ -93,9 +89,7 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
93
89
|
structured_output=api_result,
|
94
90
|
tool_calls=None,
|
95
91
|
)
|
96
|
-
used_cache_handler.add_to_managed_cache(
|
97
|
-
model, messages, lm_config, output=lm_response
|
98
|
-
)
|
92
|
+
used_cache_handler.add_to_managed_cache(model, messages, lm_config, output=lm_response)
|
99
93
|
return lm_response
|
100
94
|
|
101
95
|
def _hit_api_sync_structured_output(
|
@@ -110,28 +104,24 @@ class OpenAIStructuredOutputClient(OpenAIStandard):
|
|
110
104
|
) -> str:
|
111
105
|
if tools:
|
112
106
|
raise ValueError("Tools are not supported for sync structured output")
|
113
|
-
lm_config = {
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
)
|
107
|
+
lm_config = {
|
108
|
+
"temperature": temperature,
|
109
|
+
"response_model": response_model,
|
110
|
+
"reasoning_effort": reasoning_effort,
|
111
|
+
}
|
112
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only=use_ephemeral_cache_only)
|
113
|
+
cache_result = used_cache_handler.hit_managed_cache(model, messages, lm_config=lm_config)
|
120
114
|
if cache_result:
|
121
115
|
assert type(cache_result) in [
|
122
116
|
dict,
|
123
117
|
BaseLMResponse,
|
124
118
|
], f"Expected dict or BaseLMResponse, got {type(cache_result)}"
|
125
|
-
return (
|
126
|
-
cache_result["response"] if type(cache_result) == dict else cache_result
|
127
|
-
)
|
119
|
+
return cache_result["response"] if type(cache_result) == dict else cache_result
|
128
120
|
if model in OPENAI_REASONING_MODELS:
|
129
121
|
output = self.sync_client.beta.chat.completions.parse(
|
130
122
|
model=model,
|
131
123
|
messages=messages,
|
132
|
-
temperature=lm_config.get(
|
133
|
-
"temperature", SPECIAL_BASE_TEMPS.get(model, 0)
|
134
|
-
),
|
124
|
+
temperature=lm_config.get("temperature", SPECIAL_BASE_TEMPS.get(model, 0)),
|
135
125
|
response_format=response_model,
|
136
126
|
reasoning_effort=reasoning_effort,
|
137
127
|
)
|
@@ -158,7 +148,7 @@ class OpenAIPrivate(OpenAIStandard):
|
|
158
148
|
def __init__(self, synth_logging: bool = True):
|
159
149
|
if synth_logging:
|
160
150
|
# print("Using synth logging - OpenAIPrivate")
|
161
|
-
from
|
151
|
+
from synth_ai.lm.provider_support.openai import AsyncOpenAI, OpenAI
|
162
152
|
else:
|
163
153
|
# print("Not using synth logging - OpenAIPrivate")
|
164
154
|
from openai import AsyncOpenAI, OpenAI
|
@@ -5,13 +5,13 @@ import openai
|
|
5
5
|
import pydantic_core
|
6
6
|
from pydantic import BaseModel
|
7
7
|
|
8
|
-
from synth_ai.
|
8
|
+
from synth_ai.lm.caching.initialize import (
|
9
9
|
get_cache_handler,
|
10
10
|
)
|
11
|
-
from synth_ai.
|
12
|
-
from synth_ai.
|
13
|
-
from synth_ai.
|
14
|
-
from synth_ai.
|
11
|
+
from synth_ai.lm.tools.base import BaseTool
|
12
|
+
from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
|
13
|
+
from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
|
14
|
+
from synth_ai.lm.vendors.retries import MAX_BACKOFF
|
15
15
|
import backoff
|
16
16
|
|
17
17
|
DEFAULT_EXCEPTIONS_TO_RETRY = (
|
@@ -24,9 +24,7 @@ DEFAULT_EXCEPTIONS_TO_RETRY = (
|
|
24
24
|
)
|
25
25
|
|
26
26
|
|
27
|
-
def special_orion_transform(
|
28
|
-
model: str, messages: List[Dict[str, Any]]
|
29
|
-
) -> List[Dict[str, Any]]:
|
27
|
+
def special_orion_transform(model: str, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
30
28
|
if "o1-" in model:
|
31
29
|
messages = [
|
32
30
|
{
|
@@ -76,9 +74,9 @@ class OpenAIStandard(VendorBase):
|
|
76
74
|
reasoning_effort: str = "high",
|
77
75
|
tools: Optional[List[BaseTool]] = None,
|
78
76
|
) -> BaseLMResponse:
|
79
|
-
assert (
|
80
|
-
|
81
|
-
)
|
77
|
+
assert lm_config.get("response_model", None) is None, (
|
78
|
+
"response_model is not supported for standard calls"
|
79
|
+
)
|
82
80
|
messages = special_orion_transform(model, messages)
|
83
81
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
84
82
|
lm_config["reasoning_effort"] = reasoning_effort
|
@@ -156,13 +154,11 @@ class OpenAIStandard(VendorBase):
|
|
156
154
|
reasoning_effort: str = "high",
|
157
155
|
tools: Optional[List[BaseTool]] = None,
|
158
156
|
) -> BaseLMResponse:
|
159
|
-
assert (
|
160
|
-
|
161
|
-
), "response_model is not supported for standard calls"
|
162
|
-
messages = special_orion_transform(model, messages)
|
163
|
-
used_cache_handler = get_cache_handler(
|
164
|
-
use_ephemeral_cache_only=use_ephemeral_cache_only
|
157
|
+
assert lm_config.get("response_model", None) is None, (
|
158
|
+
"response_model is not supported for standard calls"
|
165
159
|
)
|
160
|
+
messages = special_orion_transform(model, messages)
|
161
|
+
used_cache_handler = get_cache_handler(use_ephemeral_cache_only=use_ephemeral_cache_only)
|
166
162
|
lm_config["reasoning_effort"] = reasoning_effort
|
167
163
|
cache_result = used_cache_handler.hit_managed_cache(
|
168
164
|
model, messages, lm_config=lm_config, tools=tools
|
@@ -231,12 +227,14 @@ class OpenAIStandard(VendorBase):
|
|
231
227
|
reasoning_effort: str = "high",
|
232
228
|
tools: Optional[List[BaseTool]] = None,
|
233
229
|
) -> BaseLMResponse:
|
234
|
-
lm_config = {
|
230
|
+
lm_config = {
|
231
|
+
"temperature": temperature,
|
232
|
+
"response_model": response_model,
|
233
|
+
"reasoning_effort": reasoning_effort,
|
234
|
+
}
|
235
235
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
236
|
-
cache_result: Union[BaseLMResponse, None] = (
|
237
|
-
|
238
|
-
model, messages, lm_config=lm_config, tools=tools
|
239
|
-
)
|
236
|
+
cache_result: Union[BaseLMResponse, None] = used_cache_handler.hit_managed_cache(
|
237
|
+
model, messages, lm_config=lm_config, tools=tools
|
240
238
|
)
|
241
239
|
if cache_result is not None:
|
242
240
|
return cache_result
|
@@ -265,9 +263,7 @@ class OpenAIStandard(VendorBase):
|
|
265
263
|
|
266
264
|
output = await self.async_client.chat.completions.create(**api_params)
|
267
265
|
|
268
|
-
structured_output_api_result = response_model(
|
269
|
-
**output.choices[0].message.content
|
270
|
-
)
|
266
|
+
structured_output_api_result = response_model(**output.choices[0].message.content)
|
271
267
|
tool_calls = output.choices[0].message.tool_calls
|
272
268
|
lm_response = BaseLMResponse(
|
273
269
|
raw_response=output.choices[0].message.content,
|
@@ -290,12 +286,14 @@ class OpenAIStandard(VendorBase):
|
|
290
286
|
reasoning_effort: str = "high",
|
291
287
|
tools: Optional[List[BaseTool]] = None,
|
292
288
|
) -> BaseLMResponse:
|
293
|
-
lm_config = {
|
289
|
+
lm_config = {
|
290
|
+
"temperature": temperature,
|
291
|
+
"response_model": response_model,
|
292
|
+
"reasoning_effort": reasoning_effort,
|
293
|
+
}
|
294
294
|
used_cache_handler = get_cache_handler(use_ephemeral_cache_only)
|
295
|
-
cache_result: Union[BaseLMResponse, None] = (
|
296
|
-
|
297
|
-
model, messages, lm_config=lm_config, tools=tools
|
298
|
-
)
|
295
|
+
cache_result: Union[BaseLMResponse, None] = used_cache_handler.hit_managed_cache(
|
296
|
+
model, messages, lm_config=lm_config, tools=tools
|
299
297
|
)
|
300
298
|
if cache_result is not None:
|
301
299
|
return cache_result
|
@@ -324,9 +322,7 @@ class OpenAIStandard(VendorBase):
|
|
324
322
|
|
325
323
|
output = self.sync_client.chat.completions.create(**api_params)
|
326
324
|
|
327
|
-
structured_output_api_result = response_model(
|
328
|
-
**output.choices[0].message.content
|
329
|
-
)
|
325
|
+
structured_output_api_result = response_model(**output.choices[0].message.content)
|
330
326
|
tool_calls = output.choices[0].message.tool_calls
|
331
327
|
lm_response = BaseLMResponse(
|
332
328
|
raw_response=output.choices[0].message.content,
|
File without changes
|