synth-ai 0.2.4.dev6__py3-none-any.whl → 0.2.4.dev8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +18 -9
- synth_ai/cli/__init__.py +10 -5
- synth_ai/cli/balance.py +25 -32
- synth_ai/cli/calc.py +2 -3
- synth_ai/cli/demo.py +3 -5
- synth_ai/cli/legacy_root_backup.py +58 -32
- synth_ai/cli/man.py +22 -19
- synth_ai/cli/recent.py +9 -8
- synth_ai/cli/root.py +58 -13
- synth_ai/cli/status.py +13 -6
- synth_ai/cli/traces.py +45 -21
- synth_ai/cli/watch.py +40 -37
- synth_ai/config/base_url.py +47 -2
- synth_ai/core/experiment.py +1 -2
- synth_ai/environments/__init__.py +2 -6
- synth_ai/environments/environment/artifacts/base.py +3 -1
- synth_ai/environments/environment/db/sqlite.py +1 -1
- synth_ai/environments/environment/registry.py +19 -20
- synth_ai/environments/environment/resources/sqlite.py +2 -3
- synth_ai/environments/environment/rewards/core.py +3 -2
- synth_ai/environments/environment/tools/__init__.py +6 -4
- synth_ai/environments/examples/crafter_classic/__init__.py +1 -1
- synth_ai/environments/examples/crafter_classic/engine.py +13 -13
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +1 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +2 -1
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +2 -1
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +3 -2
- synth_ai/environments/examples/crafter_classic/environment.py +16 -15
- synth_ai/environments/examples/crafter_classic/taskset.py +2 -2
- synth_ai/environments/examples/crafter_classic/trace_hooks_v3.py +2 -3
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +2 -1
- synth_ai/environments/examples/crafter_custom/crafter/__init__.py +2 -2
- synth_ai/environments/examples/crafter_custom/crafter/config.py +2 -2
- synth_ai/environments/examples/crafter_custom/crafter/env.py +1 -5
- synth_ai/environments/examples/crafter_custom/crafter/objects.py +1 -2
- synth_ai/environments/examples/crafter_custom/crafter/worldgen.py +1 -2
- synth_ai/environments/examples/crafter_custom/dataset_builder.py +5 -5
- synth_ai/environments/examples/crafter_custom/environment.py +13 -13
- synth_ai/environments/examples/crafter_custom/run_dataset.py +5 -5
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +2 -2
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +5 -4
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +2 -1
- synth_ai/environments/examples/enron/engine.py +18 -14
- synth_ai/environments/examples/enron/environment.py +12 -11
- synth_ai/environments/examples/enron/taskset.py +7 -7
- synth_ai/environments/examples/minigrid/__init__.py +6 -6
- synth_ai/environments/examples/minigrid/engine.py +6 -6
- synth_ai/environments/examples/minigrid/environment.py +6 -6
- synth_ai/environments/examples/minigrid/puzzle_loader.py +3 -2
- synth_ai/environments/examples/minigrid/taskset.py +13 -13
- synth_ai/environments/examples/nethack/achievements.py +1 -1
- synth_ai/environments/examples/nethack/engine.py +8 -7
- synth_ai/environments/examples/nethack/environment.py +10 -9
- synth_ai/environments/examples/nethack/helpers/__init__.py +8 -9
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +1 -1
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +2 -1
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +1 -1
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +3 -4
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +6 -5
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +5 -5
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +7 -6
- synth_ai/environments/examples/nethack/taskset.py +5 -5
- synth_ai/environments/examples/red/engine.py +9 -8
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +7 -7
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +2 -1
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +3 -2
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +2 -1
- synth_ai/environments/examples/red/environment.py +18 -15
- synth_ai/environments/examples/red/taskset.py +5 -3
- synth_ai/environments/examples/sokoban/engine.py +16 -13
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +3 -2
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +2 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +1 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +7 -5
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +1 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +2 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +5 -4
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +3 -2
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +2 -1
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +5 -4
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +1 -1
- synth_ai/environments/examples/sokoban/environment.py +15 -14
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +5 -3
- synth_ai/environments/examples/sokoban/puzzle_loader.py +3 -2
- synth_ai/environments/examples/sokoban/taskset.py +13 -10
- synth_ai/environments/examples/tictactoe/engine.py +6 -6
- synth_ai/environments/examples/tictactoe/environment.py +8 -7
- synth_ai/environments/examples/tictactoe/taskset.py +6 -5
- synth_ai/environments/examples/verilog/engine.py +4 -3
- synth_ai/environments/examples/verilog/environment.py +11 -10
- synth_ai/environments/examples/verilog/taskset.py +14 -12
- synth_ai/environments/examples/wordle/__init__.py +5 -5
- synth_ai/environments/examples/wordle/engine.py +32 -25
- synth_ai/environments/examples/wordle/environment.py +21 -16
- synth_ai/environments/examples/wordle/helpers/generate_instances_wordfreq.py +6 -6
- synth_ai/environments/examples/wordle/taskset.py +20 -12
- synth_ai/environments/reproducibility/core.py +1 -1
- synth_ai/environments/reproducibility/tree.py +21 -21
- synth_ai/environments/service/app.py +3 -2
- synth_ai/environments/service/core_routes.py +104 -110
- synth_ai/environments/service/external_registry.py +1 -2
- synth_ai/environments/service/registry.py +1 -1
- synth_ai/environments/stateful/core.py +1 -2
- synth_ai/environments/stateful/engine.py +1 -1
- synth_ai/environments/tasks/api.py +4 -4
- synth_ai/environments/tasks/core.py +14 -12
- synth_ai/environments/tasks/filters.py +6 -4
- synth_ai/environments/tasks/utils.py +13 -11
- synth_ai/evals/base.py +2 -3
- synth_ai/experimental/synth_oss.py +4 -4
- synth_ai/http.py +102 -0
- synth_ai/inference/__init__.py +7 -0
- synth_ai/inference/client.py +20 -0
- synth_ai/jobs/client.py +246 -0
- synth_ai/learning/__init__.py +24 -0
- synth_ai/learning/client.py +149 -0
- synth_ai/learning/config.py +43 -0
- synth_ai/learning/constants.py +29 -0
- synth_ai/learning/ft_client.py +59 -0
- synth_ai/learning/gateway.py +1 -3
- synth_ai/learning/health.py +43 -0
- synth_ai/learning/jobs.py +205 -0
- synth_ai/learning/prompts/banking77_injection_eval.py +15 -10
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +26 -14
- synth_ai/learning/prompts/mipro.py +61 -52
- synth_ai/learning/prompts/random_search.py +42 -43
- synth_ai/learning/prompts/run_mipro_banking77.py +32 -20
- synth_ai/learning/prompts/run_random_search_banking77.py +71 -52
- synth_ai/learning/rl_client.py +256 -0
- synth_ai/learning/sse.py +58 -0
- synth_ai/learning/validators.py +48 -0
- synth_ai/lm/__init__.py +5 -5
- synth_ai/lm/caching/ephemeral.py +9 -9
- synth_ai/lm/caching/handler.py +20 -20
- synth_ai/lm/caching/persistent.py +10 -10
- synth_ai/lm/config.py +3 -3
- synth_ai/lm/constants.py +7 -7
- synth_ai/lm/core/all.py +17 -3
- synth_ai/lm/core/exceptions.py +0 -2
- synth_ai/lm/core/main.py +26 -41
- synth_ai/lm/core/main_v3.py +33 -10
- synth_ai/lm/core/synth_models.py +48 -0
- synth_ai/lm/core/vendor_clients.py +26 -22
- synth_ai/lm/injection.py +7 -8
- synth_ai/lm/overrides.py +21 -19
- synth_ai/lm/provider_support/__init__.py +1 -1
- synth_ai/lm/provider_support/anthropic.py +15 -15
- synth_ai/lm/provider_support/openai.py +23 -21
- synth_ai/lm/structured_outputs/handler.py +34 -32
- synth_ai/lm/structured_outputs/inject.py +24 -27
- synth_ai/lm/structured_outputs/rehabilitate.py +19 -15
- synth_ai/lm/tools/base.py +17 -16
- synth_ai/lm/unified_interface.py +17 -18
- synth_ai/lm/vendors/base.py +20 -18
- synth_ai/lm/vendors/core/anthropic_api.py +36 -27
- synth_ai/lm/vendors/core/gemini_api.py +31 -36
- synth_ai/lm/vendors/core/mistral_api.py +19 -19
- synth_ai/lm/vendors/core/openai_api.py +42 -13
- synth_ai/lm/vendors/openai_standard.py +158 -101
- synth_ai/lm/vendors/openai_standard_responses.py +74 -61
- synth_ai/lm/vendors/retries.py +9 -1
- synth_ai/lm/vendors/supported/custom_endpoint.py +38 -28
- synth_ai/lm/vendors/supported/deepseek.py +10 -10
- synth_ai/lm/vendors/supported/grok.py +8 -8
- synth_ai/lm/vendors/supported/ollama.py +2 -1
- synth_ai/lm/vendors/supported/openrouter.py +11 -9
- synth_ai/lm/vendors/synth_client.py +425 -75
- synth_ai/lm/warmup.py +8 -7
- synth_ai/rl/__init__.py +30 -0
- synth_ai/rl/contracts.py +32 -0
- synth_ai/rl/env_keys.py +137 -0
- synth_ai/rl/secrets.py +19 -0
- synth_ai/scripts/verify_rewards.py +100 -0
- synth_ai/task/__init__.py +10 -0
- synth_ai/task/contracts.py +120 -0
- synth_ai/task/health.py +28 -0
- synth_ai/task/validators.py +12 -0
- synth_ai/tracing/__init__.py +22 -10
- synth_ai/tracing_v1/__init__.py +22 -20
- synth_ai/tracing_v3/__init__.py +7 -7
- synth_ai/tracing_v3/abstractions.py +56 -52
- synth_ai/tracing_v3/config.py +4 -2
- synth_ai/tracing_v3/db_config.py +6 -8
- synth_ai/tracing_v3/decorators.py +29 -30
- synth_ai/tracing_v3/examples/basic_usage.py +12 -12
- synth_ai/tracing_v3/hooks.py +24 -22
- synth_ai/tracing_v3/llm_call_record_helpers.py +85 -98
- synth_ai/tracing_v3/lm_call_record_abstractions.py +2 -4
- synth_ai/tracing_v3/migration_helper.py +3 -5
- synth_ai/tracing_v3/replica_sync.py +30 -32
- synth_ai/tracing_v3/session_tracer.py +158 -31
- synth_ai/tracing_v3/storage/__init__.py +1 -1
- synth_ai/tracing_v3/storage/base.py +8 -7
- synth_ai/tracing_v3/storage/config.py +4 -4
- synth_ai/tracing_v3/storage/factory.py +4 -4
- synth_ai/tracing_v3/storage/utils.py +9 -9
- synth_ai/tracing_v3/turso/__init__.py +3 -3
- synth_ai/tracing_v3/turso/daemon.py +9 -9
- synth_ai/tracing_v3/turso/manager.py +278 -48
- synth_ai/tracing_v3/turso/models.py +77 -19
- synth_ai/tracing_v3/utils.py +5 -5
- synth_ai/v0/tracing/abstractions.py +28 -28
- synth_ai/v0/tracing/base_client.py +9 -9
- synth_ai/v0/tracing/client_manager.py +7 -7
- synth_ai/v0/tracing/config.py +7 -7
- synth_ai/v0/tracing/context.py +6 -6
- synth_ai/v0/tracing/decorators.py +6 -5
- synth_ai/v0/tracing/events/manage.py +1 -1
- synth_ai/v0/tracing/events/store.py +5 -4
- synth_ai/v0/tracing/immediate_client.py +4 -5
- synth_ai/v0/tracing/local.py +3 -3
- synth_ai/v0/tracing/log_client_base.py +4 -5
- synth_ai/v0/tracing/retry_queue.py +5 -6
- synth_ai/v0/tracing/trackers.py +25 -25
- synth_ai/v0/tracing/upload.py +6 -0
- synth_ai/v0/tracing_v1/__init__.py +1 -1
- synth_ai/v0/tracing_v1/abstractions.py +28 -28
- synth_ai/v0/tracing_v1/base_client.py +9 -9
- synth_ai/v0/tracing_v1/client_manager.py +7 -7
- synth_ai/v0/tracing_v1/config.py +7 -7
- synth_ai/v0/tracing_v1/context.py +6 -6
- synth_ai/v0/tracing_v1/decorators.py +7 -6
- synth_ai/v0/tracing_v1/events/manage.py +1 -1
- synth_ai/v0/tracing_v1/events/store.py +5 -4
- synth_ai/v0/tracing_v1/immediate_client.py +4 -5
- synth_ai/v0/tracing_v1/local.py +3 -3
- synth_ai/v0/tracing_v1/log_client_base.py +4 -5
- synth_ai/v0/tracing_v1/retry_queue.py +5 -6
- synth_ai/v0/tracing_v1/trackers.py +25 -25
- synth_ai/v0/tracing_v1/upload.py +25 -24
- synth_ai/zyk/__init__.py +1 -0
- synth_ai-0.2.4.dev8.dist-info/METADATA +635 -0
- synth_ai-0.2.4.dev8.dist-info/RECORD +317 -0
- synth_ai/tui/__init__.py +0 -1
- synth_ai/tui/__main__.py +0 -13
- synth_ai/tui/cli/__init__.py +0 -1
- synth_ai/tui/cli/query_experiments.py +0 -165
- synth_ai/tui/cli/query_experiments_v3.py +0 -165
- synth_ai/tui/dashboard.py +0 -329
- synth_ai-0.2.4.dev6.dist-info/METADATA +0 -203
- synth_ai-0.2.4.dev6.dist-info/RECORD +0 -299
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev8.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev8.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev8.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.4.dev6.dist-info → synth_ai-0.2.4.dev8.dist-info}/top_level.txt +0 -0
@@ -4,24 +4,164 @@ Provides async and sync interfaces matching OpenAI's API.
|
|
4
4
|
"""
|
5
5
|
|
6
6
|
import asyncio
|
7
|
-
import httpx
|
8
7
|
import json
|
9
8
|
import logging
|
10
|
-
|
9
|
+
import os
|
10
|
+
from typing import Any, Optional
|
11
|
+
|
12
|
+
import httpx
|
11
13
|
|
12
14
|
from ..config import SynthConfig
|
13
15
|
|
14
16
|
logger = logging.getLogger(__name__)
|
15
17
|
|
16
18
|
|
19
|
+
class ChatInterface:
|
20
|
+
"""Nested interface to match OpenAI client structure."""
|
21
|
+
|
22
|
+
def __init__(self, client):
|
23
|
+
self._client = client
|
24
|
+
self.completions = self
|
25
|
+
|
26
|
+
async def create(self, **kwargs):
|
27
|
+
"""Create chat completion - matches OpenAI interface."""
|
28
|
+
result = await self._client.chat_completions_create(**kwargs)
|
29
|
+
# If streaming was requested and the result is an async-iterable, return it directly
|
30
|
+
if kwargs.get("stream") and hasattr(result, "__aiter__"):
|
31
|
+
return result
|
32
|
+
# Convert dict response to object-like structure for OpenAI compatibility
|
33
|
+
return OpenAIResponse(result)
|
34
|
+
|
35
|
+
|
36
|
+
class OpenAIResponse:
|
37
|
+
"""Wrapper to make dict response behave like OpenAI response object."""
|
38
|
+
|
39
|
+
def __init__(self, data: dict):
|
40
|
+
self._data = data
|
41
|
+
|
42
|
+
@property
|
43
|
+
def choices(self):
|
44
|
+
return [OpenAIChoice(choice) for choice in self._data.get("choices", [])]
|
45
|
+
|
46
|
+
@property
|
47
|
+
def usage(self):
|
48
|
+
return self._data.get("usage")
|
49
|
+
|
50
|
+
@property
|
51
|
+
def id(self):
|
52
|
+
return self._data.get("id")
|
53
|
+
|
54
|
+
@property
|
55
|
+
def model(self):
|
56
|
+
return self._data.get("model")
|
57
|
+
|
58
|
+
@property
|
59
|
+
def object(self):
|
60
|
+
return self._data.get("object")
|
61
|
+
|
62
|
+
|
63
|
+
class OpenAIChoice:
|
64
|
+
"""Wrapper for choice objects."""
|
65
|
+
|
66
|
+
def __init__(self, data: dict):
|
67
|
+
self._data = data
|
68
|
+
|
69
|
+
@property
|
70
|
+
def message(self):
|
71
|
+
return OpenAIMessage(self._data.get("message", {}))
|
72
|
+
|
73
|
+
@property
|
74
|
+
def finish_reason(self):
|
75
|
+
return self._data.get("finish_reason")
|
76
|
+
|
77
|
+
|
78
|
+
class OpenAIMessage:
|
79
|
+
"""Wrapper for message objects."""
|
80
|
+
|
81
|
+
def __init__(self, data: dict):
|
82
|
+
self._data = data
|
83
|
+
|
84
|
+
@property
|
85
|
+
def role(self):
|
86
|
+
return self._data.get("role")
|
87
|
+
|
88
|
+
@property
|
89
|
+
def content(self):
|
90
|
+
return self._data.get("content")
|
91
|
+
|
92
|
+
@property
|
93
|
+
def tool_calls(self):
|
94
|
+
return self._data.get("tool_calls")
|
95
|
+
|
96
|
+
|
97
|
+
class StreamDelta:
|
98
|
+
"""Wrapper for stream delta objects."""
|
99
|
+
|
100
|
+
def __init__(self, data: dict):
|
101
|
+
self._data = data or {}
|
102
|
+
|
103
|
+
@property
|
104
|
+
def content(self) -> Optional[str]:
|
105
|
+
return self._data.get("content")
|
106
|
+
|
107
|
+
|
108
|
+
class StreamChoice:
|
109
|
+
"""Wrapper for stream choice objects."""
|
110
|
+
|
111
|
+
def __init__(self, data: dict):
|
112
|
+
self._data = data or {}
|
113
|
+
|
114
|
+
@property
|
115
|
+
def delta(self) -> StreamDelta:
|
116
|
+
return StreamDelta(self._data.get("delta", {}))
|
117
|
+
|
118
|
+
|
119
|
+
class StreamChunk:
|
120
|
+
"""Wrapper for stream chunk to expose .choices[0].delta.content."""
|
121
|
+
|
122
|
+
def __init__(self, data: dict):
|
123
|
+
self._data = data or {}
|
124
|
+
|
125
|
+
@property
|
126
|
+
def choices(self):
|
127
|
+
return [StreamChoice(c) for c in self._data.get("choices", [])]
|
128
|
+
|
129
|
+
|
130
|
+
def _wrap_stream_chunk(data: dict) -> StreamChunk:
|
131
|
+
return StreamChunk(data)
|
132
|
+
|
133
|
+
|
17
134
|
class AsyncSynthClient:
|
18
135
|
"""Async client with OpenAI-compatible interface."""
|
19
136
|
|
20
|
-
def __init__(
|
21
|
-
|
137
|
+
def __init__(
|
138
|
+
self,
|
139
|
+
config: SynthConfig | None = None,
|
140
|
+
api_key: Optional[str] = None,
|
141
|
+
base_url: Optional[str] = None,
|
142
|
+
**_: Any,
|
143
|
+
):
|
144
|
+
"""Initialize with config or OpenAI-style parameters/env.
|
145
|
+
|
146
|
+
Precedence: explicit args -> OPENAI_* env -> SYNTH_* env -> SynthConfig.from_env().
|
147
|
+
"""
|
148
|
+
if config is None and (api_key or base_url):
|
149
|
+
config = SynthConfig(
|
150
|
+
base_url=base_url or os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL"),
|
151
|
+
api_key=api_key or os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY"),
|
152
|
+
)
|
153
|
+
elif config is None and (os.getenv("OPENAI_API_BASE") and os.getenv("OPENAI_API_KEY")):
|
154
|
+
config = SynthConfig(
|
155
|
+
base_url=os.getenv("OPENAI_API_BASE"),
|
156
|
+
api_key=os.getenv("OPENAI_API_KEY"),
|
157
|
+
)
|
22
158
|
self.config = config or SynthConfig.from_env()
|
23
159
|
self._client = None
|
24
160
|
|
161
|
+
# Create nested OpenAI-style interface
|
162
|
+
self.chat = ChatInterface(self)
|
163
|
+
self.completions = self.chat # Alias for backward compatibility
|
164
|
+
|
25
165
|
async def __aenter__(self):
|
26
166
|
self._client = httpx.AsyncClient(
|
27
167
|
timeout=self.config.timeout,
|
@@ -50,15 +190,15 @@ class AsyncSynthClient:
|
|
50
190
|
async def responses_create(
|
51
191
|
self,
|
52
192
|
model: str,
|
53
|
-
messages:
|
54
|
-
previous_response_id:
|
55
|
-
tools:
|
56
|
-
tool_choice:
|
193
|
+
messages: list[dict[str, Any]],
|
194
|
+
previous_response_id: str | None = None,
|
195
|
+
tools: list[dict[str, Any]] | None = None,
|
196
|
+
tool_choice: str | dict[str, Any] | None = "auto",
|
57
197
|
**kwargs,
|
58
|
-
) ->
|
198
|
+
) -> dict[str, Any]:
|
59
199
|
"""
|
60
200
|
Create response using Synth Responses API.
|
61
|
-
|
201
|
+
|
62
202
|
Args:
|
63
203
|
model: Model identifier
|
64
204
|
messages: List of message dicts with 'role' and 'content'
|
@@ -66,71 +206,97 @@ class AsyncSynthClient:
|
|
66
206
|
tools: List of available tools
|
67
207
|
tool_choice: How to choose tools
|
68
208
|
**kwargs: Additional parameters
|
69
|
-
|
209
|
+
|
70
210
|
Returns:
|
71
211
|
Responses API-compatible response dict
|
72
212
|
"""
|
73
213
|
await self._ensure_client()
|
74
|
-
|
214
|
+
|
75
215
|
# Build payload for Responses API
|
76
216
|
payload = {
|
77
217
|
"model": model,
|
78
218
|
"messages": messages,
|
79
219
|
}
|
80
|
-
|
220
|
+
|
81
221
|
# Add optional parameters
|
82
222
|
if previous_response_id is not None:
|
83
223
|
payload["previous_response_id"] = previous_response_id
|
84
224
|
if tools is not None:
|
85
225
|
payload["tools"] = tools
|
86
226
|
payload["tool_choice"] = tool_choice
|
87
|
-
|
227
|
+
|
88
228
|
# Add any additional kwargs
|
89
229
|
payload.update(kwargs)
|
90
|
-
|
230
|
+
|
91
231
|
# Retry logic
|
92
232
|
for attempt in range(self.config.max_retries):
|
93
233
|
try:
|
94
234
|
url = f"{self.config.get_base_url_without_v1()}/v1/responses"
|
95
235
|
response = await self._client.post(url, json=payload)
|
96
|
-
|
236
|
+
|
97
237
|
if response.status_code == 200:
|
98
238
|
return response.json()
|
99
|
-
|
239
|
+
|
100
240
|
# Handle rate limits with exponential backoff
|
101
241
|
if response.status_code == 429:
|
102
242
|
wait_time = 2**attempt
|
103
243
|
await asyncio.sleep(wait_time)
|
104
244
|
continue
|
105
|
-
|
245
|
+
|
106
246
|
# Other errors
|
107
247
|
response.raise_for_status()
|
108
|
-
|
248
|
+
|
109
249
|
except Exception as e:
|
110
250
|
if attempt == self.config.max_retries - 1:
|
111
251
|
logger.error(f"Failed after {self.config.max_retries} attempts: {e}")
|
112
252
|
raise
|
113
253
|
await asyncio.sleep(2**attempt)
|
114
|
-
|
254
|
+
|
115
255
|
raise Exception(f"Failed to create response after {self.config.max_retries} attempts")
|
116
256
|
|
117
257
|
async def chat_completions_create(
|
118
258
|
self,
|
119
259
|
model: str,
|
120
|
-
messages:
|
260
|
+
messages: list[dict[str, Any]],
|
261
|
+
temperature: float = 0.7,
|
262
|
+
max_tokens: int | None = None,
|
263
|
+
top_p: float = 1.0,
|
264
|
+
frequency_penalty: float = 0.0,
|
265
|
+
presence_penalty: float = 0.0,
|
266
|
+
stop: str | list[str] | None = None,
|
267
|
+
stream: bool = False,
|
268
|
+
tools: list[dict[str, Any]] | None = None,
|
269
|
+
tool_choice: str | dict[str, Any] | None = "auto",
|
270
|
+
response_format: dict[str, Any] | None = None,
|
271
|
+
seed: int | None = None,
|
272
|
+
**kwargs,
|
273
|
+
) -> dict[str, Any]:
|
274
|
+
"""
|
275
|
+
Create chat completion with OpenAI-compatible API.
|
276
|
+
This method provides the OpenAI client interface structure.
|
277
|
+
"""
|
278
|
+
return await self._chat_completions_create(
|
279
|
+
model, messages, temperature, max_tokens, top_p, frequency_penalty,
|
280
|
+
presence_penalty, stop, stream, tools, tool_choice, response_format, seed, **kwargs
|
281
|
+
)
|
282
|
+
|
283
|
+
async def _chat_completions_create(
|
284
|
+
self,
|
285
|
+
model: str,
|
286
|
+
messages: list[dict[str, Any]],
|
121
287
|
temperature: float = 0.7,
|
122
|
-
max_tokens:
|
288
|
+
max_tokens: int | None = None,
|
123
289
|
top_p: float = 1.0,
|
124
290
|
frequency_penalty: float = 0.0,
|
125
291
|
presence_penalty: float = 0.0,
|
126
|
-
stop:
|
292
|
+
stop: str | list[str] | None = None,
|
127
293
|
stream: bool = False,
|
128
|
-
tools:
|
129
|
-
tool_choice:
|
130
|
-
response_format:
|
131
|
-
seed:
|
294
|
+
tools: list[dict[str, Any]] | None = None,
|
295
|
+
tool_choice: str | dict[str, Any] | None = "auto",
|
296
|
+
response_format: dict[str, Any] | None = None,
|
297
|
+
seed: int | None = None,
|
132
298
|
**kwargs,
|
133
|
-
) ->
|
299
|
+
) -> dict[str, Any]:
|
134
300
|
"""
|
135
301
|
Create chat completion with OpenAI-compatible API.
|
136
302
|
|
@@ -179,37 +345,92 @@ class AsyncSynthClient:
|
|
179
345
|
if seed is not None:
|
180
346
|
payload["seed"] = seed
|
181
347
|
|
182
|
-
# Add any additional kwargs
|
348
|
+
# Add any additional kwargs (including thinking_mode and thinking_budget)
|
183
349
|
payload.update(kwargs)
|
184
350
|
|
351
|
+
# Apply env defaults for thinking if not set explicitly
|
352
|
+
try:
|
353
|
+
if "thinking_mode" not in payload:
|
354
|
+
env_mode = os.getenv("SYNTH_THINKING_MODE")
|
355
|
+
if env_mode in ("think", "no_think"):
|
356
|
+
payload["thinking_mode"] = env_mode
|
357
|
+
if "thinking_budget" not in payload:
|
358
|
+
env_budget = os.getenv("SYNTH_THINKING_BUDGET")
|
359
|
+
if env_budget and str(env_budget).strip().isdigit():
|
360
|
+
payload["thinking_budget"] = int(env_budget)
|
361
|
+
except Exception:
|
362
|
+
pass
|
363
|
+
|
364
|
+
# Local warn if budget exceeds max_tokens (do not mutate payload)
|
365
|
+
try:
|
366
|
+
bt = payload.get("thinking_budget")
|
367
|
+
mt = payload.get("max_tokens")
|
368
|
+
if isinstance(bt, int) and isinstance(mt, int) and bt > mt:
|
369
|
+
logger.warning(
|
370
|
+
"thinking_budget (%s) exceeds max_tokens (%s) – forwarding as-is",
|
371
|
+
str(bt), str(mt)
|
372
|
+
)
|
373
|
+
except Exception:
|
374
|
+
pass
|
375
|
+
|
185
376
|
# Retry logic
|
186
377
|
for attempt in range(self.config.max_retries):
|
187
378
|
try:
|
188
379
|
url = f"{self.config.get_base_url_without_v1()}/v1/chat/completions"
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
print(f"🔍 SYNTH DEBUG:
|
193
|
-
|
194
|
-
|
380
|
+
_debug_client = os.getenv("SYNTH_CLIENT_DEBUG") == "1"
|
381
|
+
if _debug_client:
|
382
|
+
print(f"🔍 SYNTH DEBUG: Making request to URL: {url}")
|
383
|
+
print(f"🔍 SYNTH DEBUG: Payload keys: {list(payload.keys())}")
|
384
|
+
if "tools" in payload:
|
385
|
+
# Only print counts, avoid dumping tool schemas unless explicitly enabled
|
386
|
+
print(f"🔍 SYNTH DEBUG: Tools in payload: {len(payload['tools'])} tools")
|
387
|
+
|
388
|
+
# If streaming requested, return an async stream adapter
|
389
|
+
if stream:
|
390
|
+
async def _astream():
|
391
|
+
await self._ensure_client()
|
392
|
+
async with self._client.stream("POST", url, json=payload) as r: # type: ignore
|
393
|
+
r.raise_for_status()
|
394
|
+
async for line in r.aiter_lines():
|
395
|
+
if not line:
|
396
|
+
continue
|
397
|
+
if line.startswith("data:"):
|
398
|
+
data_line = line[len("data:") :].strip()
|
399
|
+
if data_line == "[DONE]":
|
400
|
+
return
|
401
|
+
try:
|
402
|
+
chunk = json.loads(data_line)
|
403
|
+
yield _wrap_stream_chunk(chunk)
|
404
|
+
except json.JSONDecodeError:
|
405
|
+
logger.debug("Non-JSON stream line: %s", data_line)
|
406
|
+
|
407
|
+
class _AsyncStream:
|
408
|
+
def __aiter__(self):
|
409
|
+
return _astream()
|
410
|
+
|
411
|
+
async def __aenter__(self):
|
412
|
+
return self
|
413
|
+
|
414
|
+
async def __aexit__(self, *exc):
|
415
|
+
return False
|
416
|
+
|
417
|
+
return _AsyncStream()
|
418
|
+
|
195
419
|
response = await self._client.post(url, json=payload)
|
196
|
-
|
197
|
-
|
198
|
-
|
420
|
+
|
421
|
+
if _debug_client:
|
422
|
+
print(f"🔍 SYNTH DEBUG: Response status: {response.status_code}")
|
423
|
+
|
199
424
|
if response.status_code == 200:
|
200
425
|
result = response.json()
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
message
|
207
|
-
|
208
|
-
|
209
|
-
print(f"🔍 SYNTH DEBUG: Tool calls: {message['tool_calls']}")
|
210
|
-
else:
|
211
|
-
print(f"🔍 SYNTH DEBUG: No tool_calls in message")
|
212
|
-
print(f"🔍 SYNTH DEBUG: Message content: {message.get('content', 'N/A')[:200]}...")
|
426
|
+
if _debug_client:
|
427
|
+
print(f"🔍 SYNTH DEBUG: Response keys: {list(result.keys())}")
|
428
|
+
if "choices" in result and result["choices"]:
|
429
|
+
choice = result["choices"][0]
|
430
|
+
print(f"🔍 SYNTH DEBUG: Choice keys: {list(choice.keys())}")
|
431
|
+
if "message" in choice:
|
432
|
+
message = choice["message"]
|
433
|
+
print(f"🔍 SYNTH DEBUG: Message keys: {list(message.keys())}")
|
213
434
|
return result
|
214
435
|
|
215
436
|
# Handle rate limits with exponential backoff
|
@@ -245,14 +466,48 @@ class AsyncSynthClient:
|
|
245
466
|
await self._client.aclose()
|
246
467
|
|
247
468
|
|
469
|
+
class SyncChatInterface:
|
470
|
+
"""Nested interface to match OpenAI client structure (sync version)."""
|
471
|
+
|
472
|
+
def __init__(self, client):
|
473
|
+
self._client = client
|
474
|
+
self.completions = self
|
475
|
+
|
476
|
+
def create(self, **kwargs):
|
477
|
+
"""Create chat completion - matches OpenAI interface."""
|
478
|
+
result = self._client.chat_completions_create(**kwargs)
|
479
|
+
# Convert dict response to object-like structure for OpenAI compatibility
|
480
|
+
return OpenAIResponse(result)
|
481
|
+
|
482
|
+
|
248
483
|
class SyncSynthClient:
|
249
484
|
"""Sync client with OpenAI-compatible interface."""
|
250
485
|
|
251
|
-
def __init__(
|
252
|
-
|
486
|
+
def __init__(
|
487
|
+
self,
|
488
|
+
config: SynthConfig | None = None,
|
489
|
+
api_key: Optional[str] = None,
|
490
|
+
base_url: Optional[str] = None,
|
491
|
+
**_: Any,
|
492
|
+
):
|
493
|
+
"""Initialize with config or OpenAI-style parameters/env."""
|
494
|
+
if config is None and (api_key or base_url):
|
495
|
+
config = SynthConfig(
|
496
|
+
base_url=base_url or os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL"),
|
497
|
+
api_key=api_key or os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY"),
|
498
|
+
)
|
499
|
+
elif config is None and (os.getenv("OPENAI_API_BASE") and os.getenv("OPENAI_API_KEY")):
|
500
|
+
config = SynthConfig(
|
501
|
+
base_url=os.getenv("OPENAI_API_BASE"),
|
502
|
+
api_key=os.getenv("OPENAI_API_KEY"),
|
503
|
+
)
|
253
504
|
self.config = config or SynthConfig.from_env()
|
254
505
|
self._client = None
|
255
506
|
|
507
|
+
# Create nested OpenAI-style interface
|
508
|
+
self.chat = SyncChatInterface(self)
|
509
|
+
self.completions = self.chat # Alias for backward compatibility
|
510
|
+
|
256
511
|
def __enter__(self):
|
257
512
|
self._client = httpx.Client(
|
258
513
|
timeout=self.config.timeout,
|
@@ -281,69 +536,70 @@ class SyncSynthClient:
|
|
281
536
|
def responses_create(
|
282
537
|
self,
|
283
538
|
model: str,
|
284
|
-
messages:
|
285
|
-
previous_response_id:
|
286
|
-
tools:
|
287
|
-
tool_choice:
|
539
|
+
messages: list[dict[str, Any]],
|
540
|
+
previous_response_id: str | None = None,
|
541
|
+
tools: list[dict[str, Any]] | None = None,
|
542
|
+
tool_choice: str | dict[str, Any] | None = "auto",
|
288
543
|
**kwargs,
|
289
|
-
) ->
|
544
|
+
) -> dict[str, Any]:
|
290
545
|
"""
|
291
546
|
Create response using Synth Responses API (sync version).
|
292
|
-
|
547
|
+
|
293
548
|
See AsyncSynthClient.responses_create for full parameter documentation.
|
294
549
|
"""
|
295
550
|
self._ensure_client()
|
296
|
-
|
551
|
+
|
297
552
|
# Build payload for Responses API
|
298
553
|
payload = {
|
299
554
|
"model": model,
|
300
555
|
"messages": messages,
|
301
556
|
}
|
302
|
-
|
557
|
+
|
303
558
|
# Add optional parameters
|
304
559
|
if previous_response_id is not None:
|
305
560
|
payload["previous_response_id"] = previous_response_id
|
306
561
|
if tools is not None:
|
307
562
|
payload["tools"] = tools
|
308
563
|
payload["tool_choice"] = tool_choice
|
309
|
-
|
564
|
+
|
310
565
|
# Add any additional kwargs
|
311
566
|
payload.update(kwargs)
|
312
|
-
|
567
|
+
|
313
568
|
# Retry logic
|
314
569
|
for attempt in range(self.config.max_retries):
|
315
570
|
try:
|
316
571
|
response = self._client.post(
|
317
572
|
f"{self.config.get_base_url_without_v1()}/v1/responses", json=payload
|
318
573
|
)
|
319
|
-
|
574
|
+
|
320
575
|
if response.status_code == 200:
|
321
576
|
return response.json()
|
322
|
-
|
577
|
+
|
323
578
|
# Handle rate limits
|
324
579
|
if response.status_code == 429:
|
325
580
|
wait_time = 2**attempt
|
326
581
|
logger.warning(f"Rate limited, waiting {wait_time}s...")
|
327
582
|
import time
|
583
|
+
|
328
584
|
time.sleep(wait_time)
|
329
585
|
continue
|
330
|
-
|
586
|
+
|
331
587
|
# Other errors
|
332
588
|
error_msg = f"API error {response.status_code}: {response.text}"
|
333
589
|
logger.error(error_msg)
|
334
590
|
raise Exception(error_msg)
|
335
|
-
|
591
|
+
|
336
592
|
except httpx.TimeoutException:
|
337
593
|
if attempt < self.config.max_retries - 1:
|
338
594
|
logger.warning(f"Timeout on attempt {attempt + 1}, retrying...")
|
339
595
|
continue
|
340
596
|
raise
|
341
|
-
|
597
|
+
|
342
598
|
raise Exception(f"Failed after {self.config.max_retries} attempts")
|
343
599
|
|
344
600
|
def chat_completions_create(
|
345
|
-
self, model: str, messages:
|
346
|
-
) ->
|
601
|
+
self, model: str, messages: list[dict[str, Any]], **kwargs
|
602
|
+
) -> dict[str, Any]:
|
347
603
|
"""
|
348
604
|
Create chat completion with OpenAI-compatible API (sync version).
|
349
605
|
|
@@ -393,7 +649,7 @@ class SyncSynthClient:
|
|
393
649
|
|
394
650
|
|
395
651
|
# Factory functions for easy instantiation
|
396
|
-
def create_async_client(config:
|
652
|
+
def create_async_client(config: SynthConfig | None = None) -> AsyncSynthClient:
|
397
653
|
"""
|
398
654
|
Create async Synth client.
|
399
655
|
|
@@ -406,7 +662,7 @@ def create_async_client(config: Optional[SynthConfig] = None) -> AsyncSynthClien
|
|
406
662
|
return AsyncSynthClient(config)
|
407
663
|
|
408
664
|
|
409
|
-
def create_sync_client(config:
|
665
|
+
def create_sync_client(config: SynthConfig | None = None) -> SyncSynthClient:
|
410
666
|
"""
|
411
667
|
Create sync Synth client.
|
412
668
|
|
@@ -419,10 +675,104 @@ def create_sync_client(config: Optional[SynthConfig] = None) -> SyncSynthClient:
|
|
419
675
|
return SyncSynthClient(config)
|
420
676
|
|
421
677
|
|
678
|
+
# Drop-in replacements for OpenAI clients
|
679
|
+
# These allow Synth to be used as a complete replacement for OpenAI
|
680
|
+
|
681
|
+
class AsyncOpenAI(AsyncSynthClient):
|
682
|
+
"""
|
683
|
+
Drop-in replacement for openai.AsyncOpenAI.
|
684
|
+
|
685
|
+
Use Synth backend instead of OpenAI while maintaining the same API.
|
686
|
+
|
687
|
+
Example:
|
688
|
+
from synth_ai.lm.vendors.synth_client import AsyncOpenAI
|
689
|
+
|
690
|
+
client = AsyncOpenAI(
|
691
|
+
api_key="sk_live_...",
|
692
|
+
base_url="https://synth-backend-dev-docker.onrender.com/api"
|
693
|
+
)
|
694
|
+
|
695
|
+
# Works exactly like openai.AsyncOpenAI!
|
696
|
+
response = await client.chat.completions.create(
|
697
|
+
model="Qwen/Qwen3-0.6B",
|
698
|
+
messages=[{"role": "user", "content": "Hello"}]
|
699
|
+
)
|
700
|
+
"""
|
701
|
+
|
702
|
+
def __init__(self, api_key: str | None = None, base_url: str | None = None, **kwargs):
|
703
|
+
"""
|
704
|
+
Initialize AsyncOpenAI-compatible Synth client.
|
705
|
+
|
706
|
+
Args:
|
707
|
+
api_key: Synth API key (if not provided, uses SYNTH_API_KEY env var)
|
708
|
+
base_url: Synth base URL (if not provided, uses OPENAI_API_BASE env var)
|
709
|
+
**kwargs: Additional arguments passed to AsyncSynthClient
|
710
|
+
"""
|
711
|
+
# Handle OpenAI-style initialization
|
712
|
+
from ..config import SynthConfig
|
713
|
+
if api_key or base_url:
|
714
|
+
config = SynthConfig(
|
715
|
+
base_url=base_url or os.getenv("OPENAI_API_BASE", "https://synth-backend-dev-docker.onrender.com/api"),
|
716
|
+
api_key=api_key or os.getenv("OPENAI_API_KEY", "")
|
717
|
+
)
|
718
|
+
else:
|
719
|
+
# Fallback to environment variables (OPENAI_* first, then SYNTH_*)
|
720
|
+
env_base = os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL")
|
721
|
+
env_key = os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY")
|
722
|
+
config = SynthConfig(base_url=env_base, api_key=env_key) if env_base and env_key else None
|
723
|
+
|
724
|
+
super().__init__(config, **kwargs)
|
725
|
+
|
726
|
+
|
727
|
+
class OpenAI(SyncSynthClient):
|
728
|
+
"""
|
729
|
+
Drop-in replacement for openai.OpenAI.
|
730
|
+
|
731
|
+
Synchronous version of AsyncOpenAI for Synth backend.
|
732
|
+
"""
|
733
|
+
|
734
|
+
def __init__(self, api_key: str | None = None, base_url: str | None = None, **kwargs):
|
735
|
+
"""
|
736
|
+
Initialize OpenAI-compatible Synth client.
|
737
|
+
|
738
|
+
Args:
|
739
|
+
api_key: Synth API key (if not provided, uses SYNTH_API_KEY env var)
|
740
|
+
base_url: Synth base URL (if not provided, uses OPENAI_API_BASE env var)
|
741
|
+
**kwargs: Additional arguments passed to SyncSynthClient
|
742
|
+
"""
|
743
|
+
# Handle OpenAI-style initialization
|
744
|
+
from ..config import SynthConfig
|
745
|
+
if api_key or base_url:
|
746
|
+
config = SynthConfig(
|
747
|
+
base_url=base_url or os.getenv("OPENAI_API_BASE", "https://synth-backend-dev-docker.onrender.com/api"),
|
748
|
+
api_key=api_key or os.getenv("OPENAI_API_KEY", "")
|
749
|
+
)
|
750
|
+
else:
|
751
|
+
env_base = os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL")
|
752
|
+
env_key = os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY")
|
753
|
+
config = SynthConfig(base_url=env_base, api_key=env_key) if env_base and env_key else None
|
754
|
+
|
755
|
+
super().__init__(config, **kwargs)
|
756
|
+
|
757
|
+
|
758
|
+
# Convenience imports for easy usage
|
759
|
+
__all__ = [
|
760
|
+
"AsyncSynthClient",
|
761
|
+
"SyncSynthClient",
|
762
|
+
"AsyncOpenAI", # Drop-in replacement for openai.AsyncOpenAI
|
763
|
+
"OpenAI", # Drop-in replacement for openai.OpenAI
|
764
|
+
"create_async_client",
|
765
|
+
"create_sync_client",
|
766
|
+
"create_chat_completion_async",
|
767
|
+
"create_chat_completion_sync",
|
768
|
+
"SynthConfig",
|
769
|
+
]
|
770
|
+
|
771
|
+
|
422
772
|
# Convenience functions for one-off requests
|
423
773
|
async def create_chat_completion_async(
|
424
|
-
model: str, messages:
|
425
|
-
) ->
|
774
|
+
model: str, messages: list[dict[str, Any]], config: SynthConfig | None = None, **kwargs
|
775
|
+
) -> dict[str, Any]:
|
426
776
|
"""
|
427
777
|
Create a chat completion with automatic client management.
|
428
778
|
|
@@ -440,8 +790,8 @@ async def create_chat_completion_async(
|
|
440
790
|
|
441
791
|
|
442
792
|
def create_chat_completion_sync(
|
443
|
-
model: str, messages:
|
444
|
-
) ->
|
793
|
+
model: str, messages: list[dict[str, Any]], config: SynthConfig | None = None, **kwargs
|
794
|
+
) -> dict[str, Any]:
|
445
795
|
"""
|
446
796
|
Create a chat completion with automatic client management (sync version).
|
447
797
|
|