synth-ai 0.2.4.dev9__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- synth_ai/__init__.py +1 -1
- synth_ai/demos/core/cli.py +71 -7
- {synth_ai-0.2.4.dev9.dist-info → synth_ai-0.2.5.dist-info}/METADATA +18 -3
- {synth_ai-0.2.4.dev9.dist-info → synth_ai-0.2.5.dist-info}/RECORD +8 -8
- {synth_ai-0.2.4.dev9.dist-info → synth_ai-0.2.5.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.4.dev9.dist-info → synth_ai-0.2.5.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.4.dev9.dist-info → synth_ai-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.2.4.dev9.dist-info → synth_ai-0.2.5.dist-info}/top_level.txt +0 -0
synth_ai/__init__.py
CHANGED
|
@@ -23,7 +23,7 @@ from synth_ai.tracing_v1.abstractions import (
|
|
|
23
23
|
from synth_ai.tracing_v1.decorators import trace_event_async, trace_event_sync
|
|
24
24
|
from synth_ai.tracing_v1.upload import upload
|
|
25
25
|
|
|
26
|
-
__version__ = "0.2.
|
|
26
|
+
__version__ = "0.2.5"
|
|
27
27
|
__all__ = [
|
|
28
28
|
"LM",
|
|
29
29
|
"tracing",
|
synth_ai/demos/core/cli.py
CHANGED
|
@@ -129,6 +129,43 @@ def _popen_capture(cmd: list[str], cwd: str | None = None, env: dict | None = No
|
|
|
129
129
|
return 1, str(e)
|
|
130
130
|
|
|
131
131
|
|
|
132
|
+
def _popen_stream(cmd: list[str], cwd: str | None = None, env: dict | None = None) -> int:
|
|
133
|
+
"""Stream subprocess output line-by-line to stdout for real-time feedback."""
|
|
134
|
+
|
|
135
|
+
import subprocess
|
|
136
|
+
import threading
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
proc = subprocess.Popen(
|
|
140
|
+
cmd,
|
|
141
|
+
cwd=cwd,
|
|
142
|
+
env=env,
|
|
143
|
+
stdout=subprocess.PIPE,
|
|
144
|
+
stderr=subprocess.STDOUT,
|
|
145
|
+
text=True,
|
|
146
|
+
bufsize=1,
|
|
147
|
+
)
|
|
148
|
+
except Exception as exc:
|
|
149
|
+
print(f"Failed to launch {' '.join(cmd)}: {exc}")
|
|
150
|
+
return 1
|
|
151
|
+
|
|
152
|
+
def _pump(stdout) -> None:
|
|
153
|
+
try:
|
|
154
|
+
for line in stdout:
|
|
155
|
+
print(line.rstrip())
|
|
156
|
+
except Exception:
|
|
157
|
+
pass
|
|
158
|
+
|
|
159
|
+
if proc.stdout is not None:
|
|
160
|
+
t = threading.Thread(target=_pump, args=(proc.stdout,), daemon=True)
|
|
161
|
+
t.start()
|
|
162
|
+
proc.wait()
|
|
163
|
+
t.join(timeout=1.0)
|
|
164
|
+
else:
|
|
165
|
+
proc.wait()
|
|
166
|
+
return int(proc.returncode or 0)
|
|
167
|
+
|
|
168
|
+
|
|
132
169
|
def cmd_deploy(args: argparse.Namespace) -> int:
|
|
133
170
|
env = demo_core.load_env()
|
|
134
171
|
url = ""
|
|
@@ -322,11 +359,35 @@ def cmd_configure(args: argparse.Namespace) -> int:
|
|
|
322
359
|
base_path = defaults[idx]
|
|
323
360
|
with open(base_path, "r") as fh:
|
|
324
361
|
text = fh.read()
|
|
325
|
-
gpu_type = input("GPU type (e.g., A100): ").strip() or "A100"
|
|
326
|
-
gpu_count = input("GPU count (e.g., 4): ").strip() or "4"
|
|
327
|
-
model = input("Model (e.g., Qwen/Qwen3-0.6B): ").strip() or "Qwen/Qwen3-0.6B"
|
|
328
|
-
tp = input("Tensor parallel (e.g., 2): ").strip() or "2"
|
|
329
362
|
import re
|
|
363
|
+
# Extract current defaults from the selected TOML
|
|
364
|
+
def _extract(pattern: str, default: str) -> str:
|
|
365
|
+
m = re.search(pattern, text, flags=re.M)
|
|
366
|
+
if not m:
|
|
367
|
+
return default
|
|
368
|
+
val = (m.group(1) or "").strip()
|
|
369
|
+
return val if val else default
|
|
370
|
+
current_gpu_type = _extract(r"^gpu_type\s*=\s*\"([^\"]+)\"$", "A100")
|
|
371
|
+
# topology form gpu_type = "TYPE:COUNT" also supported for deriving defaults
|
|
372
|
+
topo_gpu = _extract(r"^gpu_type\s*=\s*\"([^\":]+):(\d+)\"$", current_gpu_type)
|
|
373
|
+
if ":" in topo_gpu:
|
|
374
|
+
current_gpu_type = topo_gpu.split(":", 1)[0]
|
|
375
|
+
current_gpu_count = _extract(r"^gpu_count\s*=\s*(\d+)$", "4")
|
|
376
|
+
if ":" in topo_gpu:
|
|
377
|
+
current_gpu_count = topo_gpu.split(":", 1)[1]
|
|
378
|
+
current_model = _extract(r"^name\s*=\s*\"([^\"]+)\"$", "Qwen/Qwen3-0.6B")
|
|
379
|
+
current_tp = _extract(r"^tensor_parallel_size\s*=\s*(\d+)$", "2")
|
|
380
|
+
|
|
381
|
+
# Prompts with defaults shown; Enter keeps current
|
|
382
|
+
def _prompt(label: str, default_val: str) -> str:
|
|
383
|
+
entered = input(f"{label} [{default_val}]: ").strip()
|
|
384
|
+
return entered or default_val
|
|
385
|
+
|
|
386
|
+
gpu_type = _prompt("GPU type", current_gpu_type)
|
|
387
|
+
gpu_count = _prompt("GPU count", current_gpu_count)
|
|
388
|
+
model = _prompt("Model", current_model)
|
|
389
|
+
tp = _prompt("Tensor parallel", current_tp)
|
|
390
|
+
|
|
330
391
|
text = re.sub(r"(?m)^gpu_type\s*=\s*\".*?\"$", f"gpu_type = \"{gpu_type}\"", text)
|
|
331
392
|
text = re.sub(r"(?m)^gpu_count\s*=\s*\d+$", f"gpu_count = {int(gpu_count)}", text)
|
|
332
393
|
text = re.sub(r"(?m)^name\s*=\s*\".*?\"$", f"name = \"{model}\"", text)
|
|
@@ -427,9 +488,12 @@ def cmd_run(args: argparse.Namespace) -> int:
|
|
|
427
488
|
run_env["RL_GROUP_SIZE"] = str(int(args.group_size))
|
|
428
489
|
if args.model:
|
|
429
490
|
run_env["RL_MODEL"] = args.model
|
|
430
|
-
|
|
431
|
-
print(
|
|
432
|
-
|
|
491
|
+
cmd = ["uv", "run", "python", launcher]
|
|
492
|
+
print(f"Launching monorepo clustered runner: {' '.join(cmd)}")
|
|
493
|
+
code = _popen_stream(cmd, env=run_env)
|
|
494
|
+
if code != 0:
|
|
495
|
+
print(f"Clustered runner exited with code {code}")
|
|
496
|
+
return code
|
|
433
497
|
|
|
434
498
|
# Fallback: legacy jobs API flow
|
|
435
499
|
import tomllib
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: synth-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.5
|
|
4
4
|
Summary: Software for aiding the best and multiplying the will - Core AI functionality and tracing
|
|
5
5
|
Author-email: Synth AI <josh@usesynth.ai>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -75,7 +75,7 @@ Dynamic: license-file
|
|
|
75
75
|
|
|
76
76
|
[](https://www.python.org/)
|
|
77
77
|
[](LICENSE)
|
|
78
|
-
[](https://pypi.org/project/synth-ai/)
|
|
79
79
|

|
|
80
80
|

|
|
81
81
|
|
|
@@ -83,9 +83,24 @@ Docs: [Synth‑AI Documentation](https://docs.usesynth.ai/synth-ai/introduction)
|
|
|
83
83
|
|
|
84
84
|
Fast and effective reinforcement learning for agents, via an API
|
|
85
85
|
|
|
86
|
-
|
|
86
|
+
## Highlights
|
|
87
87
|
|
|
88
88
|
- Easily scale gpu topologies - train on 3 a10gs or 8 H100s (multi-node available upon request)
|
|
89
89
|
- Requires only a thin fastapi wrapper to integrate with existing agent software.
|
|
90
90
|
- Supports the best OSS models like Qwen3. (gpt-oss available upon request, GA soon)
|
|
91
91
|
- Own your trained models
|
|
92
|
+
|
|
93
|
+
## Getting Started
|
|
94
|
+
|
|
95
|
+
synth-ai comes with a built-in RL example tailored for training Qwen/Qwen3-0.6B to succeed at Math.
|
|
96
|
+
|
|
97
|
+
Please create an account at [Synth](https://usesynth.ai) and [Modal](https://modal.com) for the Math hello‑world test run. Then run:
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
uvx synth-ai rl_demo check
|
|
101
|
+
uvx synth-ai rl_demo deploy
|
|
102
|
+
uvx synth-ai rl_demo configure
|
|
103
|
+
uvx synth-ai rl_demo run
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
To walk through kicking off your first RL run, see the [Synth‑AI Documentation](https://docs.usesynth.ai/synth-ai/introduction).
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
synth_ai/__init__.py,sha256=
|
|
1
|
+
synth_ai/__init__.py,sha256=7l4obY2aQlcZEIxfKJqXCMOhfgdRzoWsG4f5KxJcbSs,1349
|
|
2
2
|
synth_ai/__main__.py,sha256=Kh1xBKkTE5Vs2qNMtDuuOXerHUptMcOiF3YziOpC6DA,146
|
|
3
3
|
synth_ai/http.py,sha256=aKIGsGwMBi7S0Tg57Q1Nxdoxjh2sn9xzNziLYhfSA3c,4427
|
|
4
4
|
synth_ai/install_sqld.sh,sha256=AMBhlfq661PxeTTc6D4K_Nei_qwMvA84ei4NhQzmUUk,928
|
|
@@ -19,7 +19,7 @@ synth_ai/config/base_url.py,sha256=Bk7Bd9jKJP-LF0SW--WE01JhMfvOB6NUkFMRgPMnJuQ,3
|
|
|
19
19
|
synth_ai/core/experiment.py,sha256=hLkPtzUFA7iY3-QpeJ5K8YjvQeyfqnjab5P2CFaojys,236
|
|
20
20
|
synth_ai/core/system.py,sha256=s-Z7np2ISYmYc1r9YN-y2yb3cgRlOalrh0iaqnxeo84,206
|
|
21
21
|
synth_ai/demos/core/__init__.py,sha256=A2FjhY7KXGtyzdQXqeTPCkEhHfrH-eQg6bvP8HaYhZM,36
|
|
22
|
-
synth_ai/demos/core/cli.py,sha256=
|
|
22
|
+
synth_ai/demos/core/cli.py,sha256=MAZ_gFdJriYPp0v8jiNGHM13Pl4OkI4Ue5CVYYMz3Q8,28795
|
|
23
23
|
synth_ai/demos/demo_task_apps/__init__.py,sha256=8aUGEGpWUw11GRb3wQXRAmQ99yjAt5qd5FCTDJpXWjI,44
|
|
24
24
|
synth_ai/demos/demo_task_apps/core.py,sha256=3-C2dGdaqVqrVjnsxU2n6kGcuaprwuszBcTHePBypwo,13580
|
|
25
25
|
synth_ai/demos/demo_task_apps/math/__init__.py,sha256=WBzpZwSn7pRarBmhopQi34i9bEm05-71eM3siboOavY,43
|
|
@@ -408,9 +408,9 @@ synth_ai/v0/tracing_v1/events/manage.py,sha256=ZDXXP-ZwLH9LCsmw7Ru9o55d7bl_diPtJ
|
|
|
408
408
|
synth_ai/v0/tracing_v1/events/scope.py,sha256=BuBkhSpVHUJt8iGT9HJZF82rbb88mQcd2vM2shg-w2I,2550
|
|
409
409
|
synth_ai/v0/tracing_v1/events/store.py,sha256=0342lvAcalyJbVEIzQFaPuMQGgwiFm7M5rE6gr-G0E8,9041
|
|
410
410
|
synth_ai/zyk/__init__.py,sha256=htVLnzTYQ5rxzYpzSYBm7_o6uNKZ3pB_PrqkBrgTRS4,771
|
|
411
|
-
synth_ai-0.2.
|
|
412
|
-
synth_ai-0.2.
|
|
413
|
-
synth_ai-0.2.
|
|
414
|
-
synth_ai-0.2.
|
|
415
|
-
synth_ai-0.2.
|
|
416
|
-
synth_ai-0.2.
|
|
411
|
+
synth_ai-0.2.5.dist-info/licenses/LICENSE,sha256=ynhjRQUfqA_RdGRATApfFA_fBAy9cno04sLtLUqxVFM,1069
|
|
412
|
+
synth_ai-0.2.5.dist-info/METADATA,sha256=layjilLCIDa_4Rz2c1Y9jAXKcSpl_HbN6o0feGXfaD0,4009
|
|
413
|
+
synth_ai-0.2.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
414
|
+
synth_ai-0.2.5.dist-info/entry_points.txt,sha256=Neq-3bT7TAijjgOIR77pKL-WYg6TWBDeO8pp_nL4vGY,91
|
|
415
|
+
synth_ai-0.2.5.dist-info/top_level.txt,sha256=fBmtZyVHuKaGa29oHBaaUkrUIWTqSpoVMPiVdCDP3k8,9
|
|
416
|
+
synth_ai-0.2.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|