synth-ai 0.2.4.dev7__py3-none-any.whl → 0.2.4.dev8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. synth_ai/__init__.py +1 -1
  2. synth_ai/cli/balance.py +3 -15
  3. synth_ai/config/base_url.py +47 -0
  4. synth_ai/http.py +102 -0
  5. synth_ai/inference/__init__.py +7 -0
  6. synth_ai/inference/client.py +20 -0
  7. synth_ai/jobs/client.py +246 -0
  8. synth_ai/learning/__init__.py +24 -0
  9. synth_ai/learning/client.py +149 -0
  10. synth_ai/learning/config.py +43 -0
  11. synth_ai/learning/constants.py +29 -0
  12. synth_ai/learning/ft_client.py +59 -0
  13. synth_ai/learning/health.py +43 -0
  14. synth_ai/learning/jobs.py +205 -0
  15. synth_ai/learning/rl_client.py +256 -0
  16. synth_ai/learning/sse.py +58 -0
  17. synth_ai/learning/validators.py +48 -0
  18. synth_ai/lm/core/main_v3.py +13 -0
  19. synth_ai/lm/core/synth_models.py +48 -0
  20. synth_ai/lm/core/vendor_clients.py +9 -6
  21. synth_ai/lm/vendors/core/openai_api.py +31 -3
  22. synth_ai/lm/vendors/openai_standard.py +45 -14
  23. synth_ai/lm/vendors/supported/custom_endpoint.py +12 -2
  24. synth_ai/lm/vendors/synth_client.py +372 -28
  25. synth_ai/rl/__init__.py +30 -0
  26. synth_ai/rl/contracts.py +32 -0
  27. synth_ai/rl/env_keys.py +137 -0
  28. synth_ai/rl/secrets.py +19 -0
  29. synth_ai/scripts/verify_rewards.py +100 -0
  30. synth_ai/task/__init__.py +10 -0
  31. synth_ai/task/contracts.py +120 -0
  32. synth_ai/task/health.py +28 -0
  33. synth_ai/task/validators.py +12 -0
  34. synth_ai/tracing_v3/hooks.py +3 -1
  35. synth_ai/tracing_v3/session_tracer.py +123 -2
  36. synth_ai/tracing_v3/turso/manager.py +218 -0
  37. synth_ai/tracing_v3/turso/models.py +53 -0
  38. synth_ai-0.2.4.dev8.dist-info/METADATA +635 -0
  39. {synth_ai-0.2.4.dev7.dist-info → synth_ai-0.2.4.dev8.dist-info}/RECORD +43 -25
  40. synth_ai/tui/__init__.py +0 -1
  41. synth_ai/tui/__main__.py +0 -13
  42. synth_ai/tui/cli/__init__.py +0 -1
  43. synth_ai/tui/cli/query_experiments.py +0 -164
  44. synth_ai/tui/cli/query_experiments_v3.py +0 -164
  45. synth_ai/tui/dashboard.py +0 -340
  46. synth_ai-0.2.4.dev7.dist-info/METADATA +0 -193
  47. {synth_ai-0.2.4.dev7.dist-info → synth_ai-0.2.4.dev8.dist-info}/WHEEL +0 -0
  48. {synth_ai-0.2.4.dev7.dist-info → synth_ai-0.2.4.dev8.dist-info}/entry_points.txt +0 -0
  49. {synth_ai-0.2.4.dev7.dist-info → synth_ai-0.2.4.dev8.dist-info}/licenses/LICENSE +0 -0
  50. {synth_ai-0.2.4.dev7.dist-info → synth_ai-0.2.4.dev8.dist-info}/top_level.txt +0 -0
@@ -6,7 +6,8 @@ Provides async and sync interfaces matching OpenAI's API.
6
6
  import asyncio
7
7
  import json
8
8
  import logging
9
- from typing import Any
9
+ import os
10
+ from typing import Any, Optional
10
11
 
11
12
  import httpx
12
13
 
@@ -15,14 +16,152 @@ from ..config import SynthConfig
15
16
  logger = logging.getLogger(__name__)
16
17
 
17
18
 
19
+ class ChatInterface:
20
+ """Nested interface to match OpenAI client structure."""
21
+
22
+ def __init__(self, client):
23
+ self._client = client
24
+ self.completions = self
25
+
26
+ async def create(self, **kwargs):
27
+ """Create chat completion - matches OpenAI interface."""
28
+ result = await self._client.chat_completions_create(**kwargs)
29
+ # If streaming was requested and the result is an async-iterable, return it directly
30
+ if kwargs.get("stream") and hasattr(result, "__aiter__"):
31
+ return result
32
+ # Convert dict response to object-like structure for OpenAI compatibility
33
+ return OpenAIResponse(result)
34
+
35
+
36
+ class OpenAIResponse:
37
+ """Wrapper to make dict response behave like OpenAI response object."""
38
+
39
+ def __init__(self, data: dict):
40
+ self._data = data
41
+
42
+ @property
43
+ def choices(self):
44
+ return [OpenAIChoice(choice) for choice in self._data.get("choices", [])]
45
+
46
+ @property
47
+ def usage(self):
48
+ return self._data.get("usage")
49
+
50
+ @property
51
+ def id(self):
52
+ return self._data.get("id")
53
+
54
+ @property
55
+ def model(self):
56
+ return self._data.get("model")
57
+
58
+ @property
59
+ def object(self):
60
+ return self._data.get("object")
61
+
62
+
63
+ class OpenAIChoice:
64
+ """Wrapper for choice objects."""
65
+
66
+ def __init__(self, data: dict):
67
+ self._data = data
68
+
69
+ @property
70
+ def message(self):
71
+ return OpenAIMessage(self._data.get("message", {}))
72
+
73
+ @property
74
+ def finish_reason(self):
75
+ return self._data.get("finish_reason")
76
+
77
+
78
+ class OpenAIMessage:
79
+ """Wrapper for message objects."""
80
+
81
+ def __init__(self, data: dict):
82
+ self._data = data
83
+
84
+ @property
85
+ def role(self):
86
+ return self._data.get("role")
87
+
88
+ @property
89
+ def content(self):
90
+ return self._data.get("content")
91
+
92
+ @property
93
+ def tool_calls(self):
94
+ return self._data.get("tool_calls")
95
+
96
+
97
+ class StreamDelta:
98
+ """Wrapper for stream delta objects."""
99
+
100
+ def __init__(self, data: dict):
101
+ self._data = data or {}
102
+
103
+ @property
104
+ def content(self) -> Optional[str]:
105
+ return self._data.get("content")
106
+
107
+
108
+ class StreamChoice:
109
+ """Wrapper for stream choice objects."""
110
+
111
+ def __init__(self, data: dict):
112
+ self._data = data or {}
113
+
114
+ @property
115
+ def delta(self) -> StreamDelta:
116
+ return StreamDelta(self._data.get("delta", {}))
117
+
118
+
119
+ class StreamChunk:
120
+ """Wrapper for stream chunk to expose .choices[0].delta.content."""
121
+
122
+ def __init__(self, data: dict):
123
+ self._data = data or {}
124
+
125
+ @property
126
+ def choices(self):
127
+ return [StreamChoice(c) for c in self._data.get("choices", [])]
128
+
129
+
130
+ def _wrap_stream_chunk(data: dict) -> StreamChunk:
131
+ return StreamChunk(data)
132
+
133
+
18
134
  class AsyncSynthClient:
19
135
  """Async client with OpenAI-compatible interface."""
20
136
 
21
- def __init__(self, config: SynthConfig | None = None):
22
- """Initialize with config from environment if not provided."""
137
+ def __init__(
138
+ self,
139
+ config: SynthConfig | None = None,
140
+ api_key: Optional[str] = None,
141
+ base_url: Optional[str] = None,
142
+ **_: Any,
143
+ ):
144
+ """Initialize with config or OpenAI-style parameters/env.
145
+
146
+ Precedence: explicit args -> OPENAI_* env -> SYNTH_* env -> SynthConfig.from_env().
147
+ """
148
+ if config is None and (api_key or base_url):
149
+ config = SynthConfig(
150
+ base_url=base_url or os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL"),
151
+ api_key=api_key or os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY"),
152
+ )
153
+ elif config is None and (os.getenv("OPENAI_API_BASE") and os.getenv("OPENAI_API_KEY")):
154
+ config = SynthConfig(
155
+ base_url=os.getenv("OPENAI_API_BASE"),
156
+ api_key=os.getenv("OPENAI_API_KEY"),
157
+ )
23
158
  self.config = config or SynthConfig.from_env()
24
159
  self._client = None
25
160
 
161
+ # Create nested OpenAI-style interface
162
+ self.chat = ChatInterface(self)
163
+ self.completions = self.chat # Alias for backward compatibility
164
+
26
165
  async def __aenter__(self):
27
166
  self._client = httpx.AsyncClient(
28
167
  timeout=self.config.timeout,
@@ -134,6 +273,32 @@ class AsyncSynthClient:
134
273
  ) -> dict[str, Any]:
135
274
  """
136
275
  Create chat completion with OpenAI-compatible API.
276
+ This method provides the OpenAI client interface structure.
277
+ """
278
+ return await self._chat_completions_create(
279
+ model, messages, temperature, max_tokens, top_p, frequency_penalty,
280
+ presence_penalty, stop, stream, tools, tool_choice, response_format, seed, **kwargs
281
+ )
282
+
283
+ async def _chat_completions_create(
284
+ self,
285
+ model: str,
286
+ messages: list[dict[str, Any]],
287
+ temperature: float = 0.7,
288
+ max_tokens: int | None = None,
289
+ top_p: float = 1.0,
290
+ frequency_penalty: float = 0.0,
291
+ presence_penalty: float = 0.0,
292
+ stop: str | list[str] | None = None,
293
+ stream: bool = False,
294
+ tools: list[dict[str, Any]] | None = None,
295
+ tool_choice: str | dict[str, Any] | None = "auto",
296
+ response_format: dict[str, Any] | None = None,
297
+ seed: int | None = None,
298
+ **kwargs,
299
+ ) -> dict[str, Any]:
300
+ """
301
+ Create chat completion with OpenAI-compatible API.
137
302
 
138
303
  Args:
139
304
  model: Model identifier
@@ -180,41 +345,92 @@ class AsyncSynthClient:
180
345
  if seed is not None:
181
346
  payload["seed"] = seed
182
347
 
183
- # Add any additional kwargs
348
+ # Add any additional kwargs (including thinking_mode and thinking_budget)
184
349
  payload.update(kwargs)
185
350
 
351
+ # Apply env defaults for thinking if not set explicitly
352
+ try:
353
+ if "thinking_mode" not in payload:
354
+ env_mode = os.getenv("SYNTH_THINKING_MODE")
355
+ if env_mode in ("think", "no_think"):
356
+ payload["thinking_mode"] = env_mode
357
+ if "thinking_budget" not in payload:
358
+ env_budget = os.getenv("SYNTH_THINKING_BUDGET")
359
+ if env_budget and str(env_budget).strip().isdigit():
360
+ payload["thinking_budget"] = int(env_budget)
361
+ except Exception:
362
+ pass
363
+
364
+ # Local warn if budget exceeds max_tokens (do not mutate payload)
365
+ try:
366
+ bt = payload.get("thinking_budget")
367
+ mt = payload.get("max_tokens")
368
+ if isinstance(bt, int) and isinstance(mt, int) and bt > mt:
369
+ logger.warning(
370
+ "thinking_budget (%s) exceeds max_tokens (%s) – forwarding as-is",
371
+ str(bt), str(mt)
372
+ )
373
+ except Exception:
374
+ pass
375
+
186
376
  # Retry logic
187
377
  for attempt in range(self.config.max_retries):
188
378
  try:
189
379
  url = f"{self.config.get_base_url_without_v1()}/v1/chat/completions"
190
- print(f"🔍 SYNTH DEBUG: Making request to URL: {url}")
191
- print(f"🔍 SYNTH DEBUG: Payload keys: {list(payload.keys())}")
192
- if "tools" in payload:
193
- print(f"🔍 SYNTH DEBUG: Tools in payload: {len(payload['tools'])} tools")
194
- print(
195
- f"🔍 SYNTH DEBUG: First tool: {json.dumps(payload['tools'][0], indent=2)}"
196
- )
380
+ _debug_client = os.getenv("SYNTH_CLIENT_DEBUG") == "1"
381
+ if _debug_client:
382
+ print(f"🔍 SYNTH DEBUG: Making request to URL: {url}")
383
+ print(f"🔍 SYNTH DEBUG: Payload keys: {list(payload.keys())}")
384
+ if "tools" in payload:
385
+ # Only print counts, avoid dumping tool schemas unless explicitly enabled
386
+ print(f"🔍 SYNTH DEBUG: Tools in payload: {len(payload['tools'])} tools")
387
+
388
+ # If streaming requested, return an async stream adapter
389
+ if stream:
390
+ async def _astream():
391
+ await self._ensure_client()
392
+ async with self._client.stream("POST", url, json=payload) as r: # type: ignore
393
+ r.raise_for_status()
394
+ async for line in r.aiter_lines():
395
+ if not line:
396
+ continue
397
+ if line.startswith("data:"):
398
+ data_line = line[len("data:") :].strip()
399
+ if data_line == "[DONE]":
400
+ return
401
+ try:
402
+ chunk = json.loads(data_line)
403
+ yield _wrap_stream_chunk(chunk)
404
+ except json.JSONDecodeError:
405
+ logger.debug("Non-JSON stream line: %s", data_line)
406
+
407
+ class _AsyncStream:
408
+ def __aiter__(self):
409
+ return _astream()
410
+
411
+ async def __aenter__(self):
412
+ return self
413
+
414
+ async def __aexit__(self, *exc):
415
+ return False
416
+
417
+ return _AsyncStream()
197
418
 
198
419
  response = await self._client.post(url, json=payload)
199
420
 
200
- print(f"🔍 SYNTH DEBUG: Response status: {response.status_code}")
421
+ if _debug_client:
422
+ print(f"🔍 SYNTH DEBUG: Response status: {response.status_code}")
201
423
 
202
424
  if response.status_code == 200:
203
425
  result = response.json()
204
- print(f"🔍 SYNTH DEBUG: Response keys: {list(result.keys())}")
205
- if "choices" in result and result["choices"]:
206
- choice = result["choices"][0]
207
- print(f"🔍 SYNTH DEBUG: Choice keys: {list(choice.keys())}")
208
- if "message" in choice:
209
- message = choice["message"]
210
- print(f"🔍 SYNTH DEBUG: Message keys: {list(message.keys())}")
211
- if "tool_calls" in message:
212
- print(f"🔍 SYNTH DEBUG: Tool calls: {message['tool_calls']}")
213
- else:
214
- print("🔍 SYNTH DEBUG: No tool_calls in message")
215
- print(
216
- f"🔍 SYNTH DEBUG: Message content: {message.get('content', 'N/A')[:200]}..."
217
- )
426
+ if _debug_client:
427
+ print(f"🔍 SYNTH DEBUG: Response keys: {list(result.keys())}")
428
+ if "choices" in result and result["choices"]:
429
+ choice = result["choices"][0]
430
+ print(f"🔍 SYNTH DEBUG: Choice keys: {list(choice.keys())}")
431
+ if "message" in choice:
432
+ message = choice["message"]
433
+ print(f"🔍 SYNTH DEBUG: Message keys: {list(message.keys())}")
218
434
  return result
219
435
 
220
436
  # Handle rate limits with exponential backoff
@@ -250,14 +466,48 @@ class AsyncSynthClient:
250
466
  await self._client.aclose()
251
467
 
252
468
 
469
+ class SyncChatInterface:
470
+ """Nested interface to match OpenAI client structure (sync version)."""
471
+
472
+ def __init__(self, client):
473
+ self._client = client
474
+ self.completions = self
475
+
476
+ def create(self, **kwargs):
477
+ """Create chat completion - matches OpenAI interface."""
478
+ result = self._client.chat_completions_create(**kwargs)
479
+ # Convert dict response to object-like structure for OpenAI compatibility
480
+ return OpenAIResponse(result)
481
+
482
+
253
483
  class SyncSynthClient:
254
484
  """Sync client with OpenAI-compatible interface."""
255
485
 
256
- def __init__(self, config: SynthConfig | None = None):
257
- """Initialize with config from environment if not provided."""
486
+ def __init__(
487
+ self,
488
+ config: SynthConfig | None = None,
489
+ api_key: Optional[str] = None,
490
+ base_url: Optional[str] = None,
491
+ **_: Any,
492
+ ):
493
+ """Initialize with config or OpenAI-style parameters/env."""
494
+ if config is None and (api_key or base_url):
495
+ config = SynthConfig(
496
+ base_url=base_url or os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL"),
497
+ api_key=api_key or os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY"),
498
+ )
499
+ elif config is None and (os.getenv("OPENAI_API_BASE") and os.getenv("OPENAI_API_KEY")):
500
+ config = SynthConfig(
501
+ base_url=os.getenv("OPENAI_API_BASE"),
502
+ api_key=os.getenv("OPENAI_API_KEY"),
503
+ )
258
504
  self.config = config or SynthConfig.from_env()
259
505
  self._client = None
260
506
 
507
+ # Create nested OpenAI-style interface
508
+ self.chat = SyncChatInterface(self)
509
+ self.completions = self.chat # Alias for backward compatibility
510
+
261
511
  def __enter__(self):
262
512
  self._client = httpx.Client(
263
513
  timeout=self.config.timeout,
@@ -425,6 +675,100 @@ def create_sync_client(config: SynthConfig | None = None) -> SyncSynthClient:
425
675
  return SyncSynthClient(config)
426
676
 
427
677
 
678
+ # Drop-in replacements for OpenAI clients
679
+ # These allow Synth to be used as a complete replacement for OpenAI
680
+
681
+ class AsyncOpenAI(AsyncSynthClient):
682
+ """
683
+ Drop-in replacement for openai.AsyncOpenAI.
684
+
685
+ Use Synth backend instead of OpenAI while maintaining the same API.
686
+
687
+ Example:
688
+ from synth_ai.lm.vendors.synth_client import AsyncOpenAI
689
+
690
+ client = AsyncOpenAI(
691
+ api_key="sk_live_...",
692
+ base_url="https://synth-backend-dev-docker.onrender.com/api"
693
+ )
694
+
695
+ # Works exactly like openai.AsyncOpenAI!
696
+ response = await client.chat.completions.create(
697
+ model="Qwen/Qwen3-0.6B",
698
+ messages=[{"role": "user", "content": "Hello"}]
699
+ )
700
+ """
701
+
702
+ def __init__(self, api_key: str | None = None, base_url: str | None = None, **kwargs):
703
+ """
704
+ Initialize AsyncOpenAI-compatible Synth client.
705
+
706
+ Args:
707
+ api_key: Synth API key (if not provided, uses SYNTH_API_KEY env var)
708
+ base_url: Synth base URL (if not provided, uses OPENAI_API_BASE env var)
709
+ **kwargs: Additional arguments passed to AsyncSynthClient
710
+ """
711
+ # Handle OpenAI-style initialization
712
+ from ..config import SynthConfig
713
+ if api_key or base_url:
714
+ config = SynthConfig(
715
+ base_url=base_url or os.getenv("OPENAI_API_BASE", "https://synth-backend-dev-docker.onrender.com/api"),
716
+ api_key=api_key or os.getenv("OPENAI_API_KEY", "")
717
+ )
718
+ else:
719
+ # Fallback to environment variables (OPENAI_* first, then SYNTH_*)
720
+ env_base = os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL")
721
+ env_key = os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY")
722
+ config = SynthConfig(base_url=env_base, api_key=env_key) if env_base and env_key else None
723
+
724
+ super().__init__(config, **kwargs)
725
+
726
+
727
+ class OpenAI(SyncSynthClient):
728
+ """
729
+ Drop-in replacement for openai.OpenAI.
730
+
731
+ Synchronous version of AsyncOpenAI for Synth backend.
732
+ """
733
+
734
+ def __init__(self, api_key: str | None = None, base_url: str | None = None, **kwargs):
735
+ """
736
+ Initialize OpenAI-compatible Synth client.
737
+
738
+ Args:
739
+ api_key: Synth API key (if not provided, uses SYNTH_API_KEY env var)
740
+ base_url: Synth base URL (if not provided, uses OPENAI_API_BASE env var)
741
+ **kwargs: Additional arguments passed to SyncSynthClient
742
+ """
743
+ # Handle OpenAI-style initialization
744
+ from ..config import SynthConfig
745
+ if api_key or base_url:
746
+ config = SynthConfig(
747
+ base_url=base_url or os.getenv("OPENAI_API_BASE", "https://synth-backend-dev-docker.onrender.com/api"),
748
+ api_key=api_key or os.getenv("OPENAI_API_KEY", "")
749
+ )
750
+ else:
751
+ env_base = os.getenv("OPENAI_API_BASE") or os.getenv("SYNTH_BASE_URL")
752
+ env_key = os.getenv("OPENAI_API_KEY") or os.getenv("SYNTH_API_KEY")
753
+ config = SynthConfig(base_url=env_base, api_key=env_key) if env_base and env_key else None
754
+
755
+ super().__init__(config, **kwargs)
756
+
757
+
758
+ # Convenience imports for easy usage
759
+ __all__ = [
760
+ "AsyncSynthClient",
761
+ "SyncSynthClient",
762
+ "AsyncOpenAI", # Drop-in replacement for openai.AsyncOpenAI
763
+ "OpenAI", # Drop-in replacement for openai.OpenAI
764
+ "create_async_client",
765
+ "create_sync_client",
766
+ "create_chat_completion_async",
767
+ "create_chat_completion_sync",
768
+ "SynthConfig",
769
+ ]
770
+
771
+
428
772
  # Convenience functions for one-off requests
429
773
  async def create_chat_completion_async(
430
774
  model: str, messages: list[dict[str, Any]], config: SynthConfig | None = None, **kwargs
@@ -0,0 +1,30 @@
1
+ from .contracts import (
2
+ RolloutEnvSpec,
3
+ RolloutPolicySpec,
4
+ RolloutRecordConfig,
5
+ RolloutSafetyConfig,
6
+ RolloutRequest,
7
+ RolloutStep,
8
+ RolloutTrajectory,
9
+ RolloutMetrics,
10
+ RolloutResponse,
11
+ )
12
+ from .env_keys import MAX_ENVIRONMENT_API_KEY_BYTES, encrypt_for_backend, setup_environment_api_key
13
+ from .secrets import mint_environment_api_key
14
+
15
+ __all__ = [
16
+ "RolloutEnvSpec",
17
+ "RolloutPolicySpec",
18
+ "RolloutRecordConfig",
19
+ "RolloutSafetyConfig",
20
+ "RolloutRequest",
21
+ "RolloutStep",
22
+ "RolloutTrajectory",
23
+ "RolloutMetrics",
24
+ "RolloutResponse",
25
+ "encrypt_for_backend",
26
+ "setup_environment_api_key",
27
+ "mint_environment_api_key",
28
+ "MAX_ENVIRONMENT_API_KEY_BYTES",
29
+ ]
30
+
@@ -0,0 +1,32 @@
1
+ from __future__ import annotations
2
+
3
+ """
4
+ Compatibility layer: re-export Task App rollout contracts from synth_ai.task.contracts
5
+ so existing imports continue to work while consolidating under synth_ai.task.
6
+ """
7
+
8
+ from synth_ai.task.contracts import (
9
+ RolloutEnvSpec,
10
+ RolloutPolicySpec,
11
+ RolloutRecordConfig,
12
+ RolloutSafetyConfig,
13
+ RolloutRequest,
14
+ RolloutStep,
15
+ RolloutTrajectory,
16
+ RolloutMetrics,
17
+ RolloutResponse,
18
+ )
19
+
20
+ __all__ = [
21
+ "RolloutEnvSpec",
22
+ "RolloutPolicySpec",
23
+ "RolloutRecordConfig",
24
+ "RolloutSafetyConfig",
25
+ "RolloutRequest",
26
+ "RolloutStep",
27
+ "RolloutTrajectory",
28
+ "RolloutMetrics",
29
+ "RolloutResponse",
30
+ ]
31
+
32
+
@@ -0,0 +1,137 @@
1
+ from __future__ import annotations
2
+
3
+ """Helpers for uploading RL environment credentials to the backend."""
4
+
5
+ import base64
6
+ import binascii
7
+ import json
8
+ from typing import Any, Dict
9
+ import os
10
+
11
+ import requests
12
+ from nacl.public import PublicKey, SealedBox
13
+
14
+ __all__ = ["encrypt_for_backend", "setup_environment_api_key", "MAX_ENVIRONMENT_API_KEY_BYTES"]
15
+
16
+ MAX_ENVIRONMENT_API_KEY_BYTES = 8 * 1024
17
+ _ALGORITHM = "libsodium.sealedbox.v1"
18
+
19
+
20
+ def encrypt_for_backend(pubkey_b64: str, secret: str | bytes) -> str:
21
+ """Encrypt ``secret`` for storage by the backend using libsodium sealed boxes."""
22
+
23
+ if not isinstance(pubkey_b64, str) or not pubkey_b64.strip():
24
+ raise ValueError("public key must be a non-empty base64 string")
25
+
26
+ try:
27
+ key_bytes = base64.b64decode(pubkey_b64, validate=True)
28
+ except binascii.Error as exc: # pragma: no cover - defensive guard
29
+ raise ValueError("public key must be base64-encoded") from exc
30
+
31
+ if len(key_bytes) != 32:
32
+ raise ValueError("public key must be 32 bytes for X25519")
33
+
34
+ if isinstance(secret, str):
35
+ secret_bytes = secret.encode("utf-8")
36
+ elif isinstance(secret, bytes):
37
+ secret_bytes = secret
38
+ else: # pragma: no cover - type guard
39
+ raise TypeError("secret must be str or bytes")
40
+
41
+ if not secret_bytes:
42
+ raise ValueError("secret must not be empty")
43
+
44
+ box = SealedBox(PublicKey(key_bytes))
45
+ ciphertext = box.encrypt(secret_bytes)
46
+ return base64.b64encode(ciphertext).decode("ascii")
47
+
48
+
49
+ def setup_environment_api_key(
50
+ backend_base: str,
51
+ synth_api_key: str,
52
+ token: str | None = None,
53
+ *,
54
+ timeout: float = 15.0,
55
+ ) -> Dict[str, Any]:
56
+ """Upload an ENVIRONMENT_API_KEY to the backend."""
57
+
58
+ backend = backend_base.rstrip("/")
59
+ if not backend:
60
+ raise ValueError("backend_base must be provided")
61
+ if not synth_api_key:
62
+ raise ValueError("synth_api_key must be provided")
63
+
64
+ # Require caller-provided plaintext. If not provided, read from ENVIRONMENT_API_KEY.
65
+ plaintext = token if token is not None else os.getenv("ENVIRONMENT_API_KEY", "").strip()
66
+ if not plaintext:
67
+ raise ValueError("ENVIRONMENT_API_KEY must be set (or pass token=...) to upload")
68
+ if not isinstance(plaintext, str): # pragma: no cover - defensive guard
69
+ raise TypeError("token must be a string")
70
+
71
+ token_bytes = plaintext.encode("utf-8")
72
+ if not token_bytes:
73
+ raise ValueError("ENVIRONMENT_API_KEY token must not be empty")
74
+ if len(token_bytes) > MAX_ENVIRONMENT_API_KEY_BYTES:
75
+ raise ValueError("ENVIRONMENT_API_KEY token exceeds 8 KiB limit")
76
+
77
+ headers = {"Authorization": f"Bearer {synth_api_key}"}
78
+ pub_url = f"{backend}/api/v1/crypto/public-key"
79
+ response = requests.get(pub_url, headers=headers, timeout=timeout)
80
+ _raise_with_detail(response)
81
+
82
+ try:
83
+ doc = response.json()
84
+ except ValueError as exc: # pragma: no cover - backend invariant
85
+ raise RuntimeError("backend returned invalid JSON for public key") from exc
86
+
87
+ if not isinstance(doc, dict):
88
+ raise RuntimeError("backend public key response must be an object")
89
+
90
+ pubkey = doc.get("public_key")
91
+ if not isinstance(pubkey, str) or not pubkey:
92
+ raise RuntimeError("backend response missing public_key")
93
+
94
+ # The backend currently returns a single algorithm identifier; keep a guard in
95
+ # case future versions change the value and we need to surface that to callers.
96
+ alg = doc.get("alg")
97
+ if alg is not None and alg != _ALGORITHM:
98
+ raise RuntimeError(f"unsupported sealed box algorithm: {alg}")
99
+
100
+ ciphertext_b64 = encrypt_for_backend(pubkey, token_bytes)
101
+
102
+ body = {"name": "ENVIRONMENT_API_KEY", "ciphertext_b64": ciphertext_b64}
103
+ post_url = f"{backend}/api/v1/env-keys"
104
+ response2 = requests.post(post_url, headers={**headers, "Content-Type": "application/json"}, json=body, timeout=timeout)
105
+ _raise_with_detail(response2)
106
+
107
+ try:
108
+ upload_doc = response2.json()
109
+ except ValueError:
110
+ upload_doc = {}
111
+
112
+ if not isinstance(upload_doc, dict):
113
+ upload_doc = {}
114
+
115
+ return {
116
+ "stored": True,
117
+ "id": upload_doc.get("id"),
118
+ "name": upload_doc.get("name"),
119
+ "updated_at": upload_doc.get("updated_at"),
120
+ }
121
+
122
+
123
+ def _raise_with_detail(response: requests.Response) -> None:
124
+ try:
125
+ response.raise_for_status()
126
+ except requests.HTTPError as exc:
127
+ detail_snippet: str | None = None
128
+ try:
129
+ detail = response.json()
130
+ detail_snippet = json.dumps(detail, separators=(",", ":"))[:200]
131
+ except Exception:
132
+ body = response.text if response.text is not None else ""
133
+ detail_snippet = body[:200] if body else None
134
+ message = str(exc)
135
+ if detail_snippet:
136
+ message = f"{message} | body={detail_snippet}"
137
+ raise requests.HTTPError(message, request=exc.request, response=exc.response) from None
synth_ai/rl/secrets.py ADDED
@@ -0,0 +1,19 @@
1
+ from __future__ import annotations
2
+
3
+ """Helpers for generating RL environment credentials."""
4
+
5
+ import secrets
6
+
7
+ __all__ = ["mint_environment_api_key"]
8
+
9
+
10
+ def mint_environment_api_key() -> str:
11
+ """Mint a random ENVIRONMENT_API_KEY value.
12
+
13
+ The current format is 64 hexadecimal characters (256 bits of entropy), which
14
+ matches the shell helpers used by the RL examples. This keeps the token easy
15
+ to copy while remaining suitably strong for authentication.
16
+ """
17
+
18
+ # secrets.token_hex(32) → 32 random bytes rendered as 64 hex characters.
19
+ return secrets.token_hex(32)