synth-ai 0.2.17__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (169) hide show
  1. examples/baseline/banking77_baseline.py +204 -0
  2. examples/baseline/crafter_baseline.py +407 -0
  3. examples/baseline/pokemon_red_baseline.py +326 -0
  4. examples/baseline/simple_baseline.py +56 -0
  5. examples/baseline/warming_up_to_rl_baseline.py +239 -0
  6. examples/blog_posts/gepa/README.md +355 -0
  7. examples/blog_posts/gepa/configs/banking77_gepa_local.toml +95 -0
  8. examples/blog_posts/gepa/configs/banking77_gepa_test.toml +82 -0
  9. examples/blog_posts/gepa/configs/banking77_mipro_local.toml +52 -0
  10. examples/blog_posts/gepa/configs/hotpotqa_gepa_local.toml +59 -0
  11. examples/blog_posts/gepa/configs/hotpotqa_gepa_qwen.toml +36 -0
  12. examples/blog_posts/gepa/configs/hotpotqa_mipro_local.toml +53 -0
  13. examples/blog_posts/gepa/configs/hover_gepa_local.toml +59 -0
  14. examples/blog_posts/gepa/configs/hover_gepa_qwen.toml +36 -0
  15. examples/blog_posts/gepa/configs/hover_mipro_local.toml +53 -0
  16. examples/blog_posts/gepa/configs/ifbench_gepa_local.toml +59 -0
  17. examples/blog_posts/gepa/configs/ifbench_gepa_qwen.toml +36 -0
  18. examples/blog_posts/gepa/configs/ifbench_mipro_local.toml +53 -0
  19. examples/blog_posts/gepa/configs/pupa_gepa_local.toml +60 -0
  20. examples/blog_posts/gepa/configs/pupa_mipro_local.toml +54 -0
  21. examples/blog_posts/gepa/deploy_banking77_task_app.sh +41 -0
  22. examples/blog_posts/gepa/gepa_baseline.py +204 -0
  23. examples/blog_posts/gepa/query_prompts_example.py +97 -0
  24. examples/blog_posts/gepa/run_gepa_banking77.sh +87 -0
  25. examples/blog_posts/gepa/task_apps.py +105 -0
  26. examples/blog_posts/gepa/test_gepa_local.sh +67 -0
  27. examples/blog_posts/gepa/verify_banking77_setup.sh +123 -0
  28. examples/blog_posts/pokemon_vl/configs/eval_gpt5nano.toml +26 -0
  29. examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +12 -10
  30. examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +1 -0
  31. examples/blog_posts/pokemon_vl/extract_images.py +239 -0
  32. examples/blog_posts/pokemon_vl/pokemon_vl_baseline.py +326 -0
  33. examples/blog_posts/pokemon_vl/run_eval_extract_images.py +209 -0
  34. examples/blog_posts/pokemon_vl/run_qwen_eval_extract_images.py +212 -0
  35. examples/blog_posts/pokemon_vl/text_box_analysis.md +106 -0
  36. examples/blog_posts/warming_up_to_rl/ARCHITECTURE.md +195 -0
  37. examples/blog_posts/warming_up_to_rl/FINAL_TEST_RESULTS.md +127 -0
  38. examples/blog_posts/warming_up_to_rl/INFERENCE_SUCCESS.md +132 -0
  39. examples/blog_posts/warming_up_to_rl/SMOKE_TESTING.md +164 -0
  40. examples/blog_posts/warming_up_to_rl/SMOKE_TEST_COMPLETE.md +253 -0
  41. examples/blog_posts/warming_up_to_rl/configs/eval_baseline_qwen32b_10x20.toml +25 -0
  42. examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b_10x20.toml +26 -0
  43. examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +1 -1
  44. examples/blog_posts/warming_up_to_rl/configs/smoke_test.toml +75 -0
  45. examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +60 -10
  46. examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +1 -1
  47. examples/blog_posts/warming_up_to_rl/warming_up_to_rl_baseline.py +187 -0
  48. examples/multi_step/configs/VERILOG_REWARDS.md +4 -0
  49. examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +4 -0
  50. examples/multi_step/configs/crafter_rl_outcome.toml +1 -0
  51. examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +1 -0
  52. examples/multi_step/configs/crafter_rl_stepwise_simple.toml +1 -0
  53. examples/rl/configs/rl_from_base_qwen17.toml +1 -0
  54. examples/swe/task_app/hosted/inference/openai_client.py +0 -34
  55. examples/swe/task_app/hosted/policy_routes.py +17 -0
  56. examples/swe/task_app/hosted/rollout.py +4 -2
  57. examples/task_apps/banking77/__init__.py +6 -0
  58. examples/task_apps/banking77/banking77_task_app.py +841 -0
  59. examples/task_apps/banking77/deploy_wrapper.py +46 -0
  60. examples/task_apps/crafter/CREATE_SFT_DATASET.md +4 -0
  61. examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +4 -0
  62. examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +4 -0
  63. examples/task_apps/crafter/task_app/grpo_crafter.py +24 -2
  64. examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +49 -0
  65. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +355 -58
  66. examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +68 -7
  67. examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +78 -21
  68. examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +194 -1
  69. examples/task_apps/gepa_benchmarks/__init__.py +7 -0
  70. examples/task_apps/gepa_benchmarks/common.py +260 -0
  71. examples/task_apps/gepa_benchmarks/hotpotqa_task_app.py +507 -0
  72. examples/task_apps/gepa_benchmarks/hover_task_app.py +436 -0
  73. examples/task_apps/gepa_benchmarks/ifbench_task_app.py +563 -0
  74. examples/task_apps/gepa_benchmarks/pupa_task_app.py +460 -0
  75. examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +4 -0
  76. examples/task_apps/pokemon_red/task_app.py +254 -36
  77. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +1 -0
  78. examples/warming_up_to_rl/task_app/grpo_crafter.py +53 -4
  79. examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +49 -0
  80. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +152 -41
  81. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +31 -1
  82. examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +33 -3
  83. examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +67 -0
  84. examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +1 -0
  85. synth_ai/api/train/builders.py +90 -1
  86. synth_ai/api/train/cli.py +396 -21
  87. synth_ai/api/train/config_finder.py +13 -2
  88. synth_ai/api/train/configs/__init__.py +15 -1
  89. synth_ai/api/train/configs/prompt_learning.py +442 -0
  90. synth_ai/api/train/configs/rl.py +29 -0
  91. synth_ai/api/train/task_app.py +1 -1
  92. synth_ai/api/train/validators.py +277 -0
  93. synth_ai/baseline/__init__.py +25 -0
  94. synth_ai/baseline/config.py +209 -0
  95. synth_ai/baseline/discovery.py +214 -0
  96. synth_ai/baseline/execution.py +146 -0
  97. synth_ai/cli/__init__.py +85 -17
  98. synth_ai/cli/__main__.py +0 -0
  99. synth_ai/cli/claude.py +70 -0
  100. synth_ai/cli/codex.py +84 -0
  101. synth_ai/cli/commands/__init__.py +1 -0
  102. synth_ai/cli/commands/baseline/__init__.py +12 -0
  103. synth_ai/cli/commands/baseline/core.py +637 -0
  104. synth_ai/cli/commands/baseline/list.py +93 -0
  105. synth_ai/cli/commands/eval/core.py +13 -10
  106. synth_ai/cli/commands/filter/core.py +53 -17
  107. synth_ai/cli/commands/help/core.py +0 -1
  108. synth_ai/cli/commands/smoke/__init__.py +7 -0
  109. synth_ai/cli/commands/smoke/core.py +1436 -0
  110. synth_ai/cli/commands/status/subcommands/pricing.py +22 -0
  111. synth_ai/cli/commands/status/subcommands/usage.py +203 -0
  112. synth_ai/cli/commands/train/judge_schemas.py +1 -0
  113. synth_ai/cli/commands/train/judge_validation.py +1 -0
  114. synth_ai/cli/commands/train/validation.py +0 -57
  115. synth_ai/cli/demo.py +35 -3
  116. synth_ai/cli/deploy/__init__.py +40 -25
  117. synth_ai/cli/deploy.py +162 -0
  118. synth_ai/cli/legacy_root_backup.py +14 -8
  119. synth_ai/cli/opencode.py +107 -0
  120. synth_ai/cli/root.py +9 -5
  121. synth_ai/cli/task_app_deploy.py +1 -1
  122. synth_ai/cli/task_apps.py +53 -53
  123. synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +7 -4
  124. synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +9 -5
  125. synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +4 -3
  126. synth_ai/judge_schemas.py +1 -0
  127. synth_ai/learning/__init__.py +10 -0
  128. synth_ai/learning/prompt_learning_client.py +276 -0
  129. synth_ai/learning/prompt_learning_types.py +184 -0
  130. synth_ai/pricing/__init__.py +2 -0
  131. synth_ai/pricing/model_pricing.py +57 -0
  132. synth_ai/streaming/handlers.py +53 -4
  133. synth_ai/streaming/streamer.py +19 -0
  134. synth_ai/task/apps/__init__.py +1 -0
  135. synth_ai/task/config.py +2 -0
  136. synth_ai/task/tracing_utils.py +25 -25
  137. synth_ai/task/validators.py +44 -8
  138. synth_ai/task_app_cfgs.py +21 -0
  139. synth_ai/tracing_v3/config.py +162 -19
  140. synth_ai/tracing_v3/constants.py +1 -1
  141. synth_ai/tracing_v3/db_config.py +24 -38
  142. synth_ai/tracing_v3/storage/config.py +47 -13
  143. synth_ai/tracing_v3/storage/factory.py +3 -3
  144. synth_ai/tracing_v3/turso/daemon.py +113 -11
  145. synth_ai/tracing_v3/turso/native_manager.py +92 -16
  146. synth_ai/types.py +8 -0
  147. synth_ai/urls.py +11 -0
  148. synth_ai/utils/__init__.py +30 -1
  149. synth_ai/utils/agents.py +74 -0
  150. synth_ai/utils/bin.py +39 -0
  151. synth_ai/utils/cli.py +149 -5
  152. synth_ai/utils/env.py +17 -17
  153. synth_ai/utils/json.py +72 -0
  154. synth_ai/utils/modal.py +283 -1
  155. synth_ai/utils/paths.py +48 -0
  156. synth_ai/utils/uvicorn.py +113 -0
  157. {synth_ai-0.2.17.dist-info → synth_ai-0.2.19.dist-info}/METADATA +102 -4
  158. {synth_ai-0.2.17.dist-info → synth_ai-0.2.19.dist-info}/RECORD +162 -88
  159. synth_ai/cli/commands/deploy/__init__.py +0 -23
  160. synth_ai/cli/commands/deploy/core.py +0 -614
  161. synth_ai/cli/commands/deploy/errors.py +0 -72
  162. synth_ai/cli/commands/deploy/validation.py +0 -11
  163. synth_ai/cli/deploy/core.py +0 -5
  164. synth_ai/cli/deploy/errors.py +0 -23
  165. synth_ai/cli/deploy/validation.py +0 -5
  166. {synth_ai-0.2.17.dist-info → synth_ai-0.2.19.dist-info}/WHEEL +0 -0
  167. {synth_ai-0.2.17.dist-info → synth_ai-0.2.19.dist-info}/entry_points.txt +0 -0
  168. {synth_ai-0.2.17.dist-info → synth_ai-0.2.19.dist-info}/licenses/LICENSE +0 -0
  169. {synth_ai-0.2.17.dist-info → synth_ai-0.2.19.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1436 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import contextlib
5
+ import logging
6
+ import os
7
+ import subprocess
8
+ import sys
9
+ import time
10
+ import uuid
11
+ from pathlib import Path
12
+ from typing import Any
13
+ from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
14
+
15
+ import click
16
+ import httpx
17
+ import toml
18
+ from synth_ai.task.client import TaskAppClient
19
+ from synth_ai.task.contracts import (
20
+ RolloutEnvSpec,
21
+ RolloutMode,
22
+ RolloutPolicySpec,
23
+ RolloutRecordConfig,
24
+ RolloutRequest,
25
+ RolloutSafetyConfig,
26
+ )
27
+ from synth_ai.task.validators import (
28
+ normalize_inference_url,
29
+ validate_rollout_response_for_rl,
30
+ validate_task_app_url,
31
+ )
32
+ from synth_ai.tracing_v3.config import resolve_trace_db_settings
33
+ from synth_ai.tracing_v3.turso.daemon import start_sqld
34
+
35
+
36
+ def _append_query_param(url: str, key: str, value: str) -> str:
37
+ parsed = urlparse(url)
38
+ params = dict(parse_qsl(parsed.query, keep_blank_values=True))
39
+ params[key] = value
40
+ new_query = urlencode(params)
41
+ return urlunparse(parsed._replace(query=new_query))
42
+
43
+
44
+ def _ensure_local_libsql() -> None:
45
+ """Start a local sqld/libSQL instance or abort the smoke test."""
46
+
47
+ traces_root = Path(os.getenv("SYNTH_TRACES_DIR", str((Path.cwd() / "traces" / "v3").resolve())))
48
+ traces_root.mkdir(parents=True, exist_ok=True)
49
+
50
+ local_db_path = Path(os.getenv("SQLD_DB_PATH", str(traces_root / "local.db"))).resolve()
51
+ local_db_path.parent.mkdir(parents=True, exist_ok=True)
52
+
53
+ hrana_port = int(os.getenv("SQLD_HTTP_PORT", "8080"))
54
+ http_port = hrana_port + 1
55
+ os.environ["SQLD_DB_PATH"] = str(local_db_path)
56
+ os.environ["SQLD_HTTP_PORT"] = str(hrana_port)
57
+
58
+ try:
59
+ start_sqld(db_path=str(local_db_path), hrana_port=hrana_port, http_port=http_port)
60
+ started_new = True
61
+ except Exception as exc:
62
+ # If address in use, assume an existing sqld instance; verify health below
63
+ if "Address already in use" in str(exc):
64
+ started_new = False
65
+ click.echo(
66
+ f"[libsql] sqld already running on 127.0.0.1:{hrana_port} (hrana) and 127.0.0.1:{http_port} (http); attempting to reuse", err=True
67
+ )
68
+ else:
69
+ raise click.ClickException(
70
+ f"Failed to start local sqld on 127.0.0.1:{hrana_port}: {exc}"
71
+ ) from exc
72
+
73
+ health_url = f"http://127.0.0.1:{http_port}/health"
74
+ deadline = time.time() + 5.0
75
+ healthy = False
76
+ while time.time() < deadline:
77
+ try:
78
+ resp = httpx.get(health_url, timeout=0.5)
79
+ if resp.status_code == 200:
80
+ healthy = True
81
+ break
82
+ except Exception:
83
+ pass
84
+ time.sleep(0.1)
85
+
86
+ if not healthy:
87
+ msg = (
88
+ f"Tracing backend not reachable at {health_url}. "
89
+ "Start sqld manually or disable tracing (TASKAPP_TRACING_ENABLED=0)."
90
+ )
91
+ raise click.ClickException(msg)
92
+
93
+ click.echo(
94
+ f"[libsql] sqld ready on libsql://127.0.0.1:{hrana_port} with HTTP API on :{http_port} (started_new={started_new})",
95
+ err=True,
96
+ )
97
+
98
+ # Python libsql client uses HTTP API port, not Hrana WebSocket port
99
+ local_dsn = f"http://127.0.0.1:{http_port}"
100
+ os.environ["LIBSQL_URL"] = local_dsn
101
+ os.environ["SYNTH_TRACES_DB"] = local_dsn
102
+ os.environ.pop("LIBSQL_AUTH_TOKEN", None)
103
+ os.environ.pop("TURSO_AUTH_TOKEN", None)
104
+
105
+
106
+ def _refresh_tracing_config() -> None:
107
+ """Rebuild global tracing configuration so new env vars take effect."""
108
+
109
+ from synth_ai.tracing_v3 import config as tracing_config_module
110
+ from synth_ai.tracing_v3.storage import config as storage_config_module
111
+
112
+ tracing_config_module.CONFIG = tracing_config_module.TursoConfig() # type: ignore[assignment]
113
+ storage_config_module.STORAGE_CONFIG = storage_config_module.StorageConfig( # type: ignore[assignment]
114
+ connection_string=os.environ["SYNTH_TRACES_DB"],
115
+ backend=storage_config_module.StorageBackend.TURSO_NATIVE,
116
+ )
117
+
118
+
119
+ def _load_smoke_config(config_path: Path | None) -> dict[str, Any]:
120
+ """Load [smoke] section from TOML config file.
121
+
122
+ Returns an empty dict if no config file or no [smoke] section.
123
+ """
124
+ if not config_path:
125
+ return {}
126
+
127
+ try:
128
+ with open(config_path) as f:
129
+ full_config = toml.load(f)
130
+
131
+ smoke_config = full_config.get("smoke", {})
132
+
133
+ if smoke_config:
134
+ click.echo(f"[smoke] Loaded configuration from {config_path}", err=True)
135
+ click.echo(f"[smoke] Config keys: {', '.join(smoke_config.keys())}", err=True)
136
+
137
+ return smoke_config
138
+ except Exception as exc:
139
+ click.echo(f"[smoke] Warning: Failed to load config from {config_path}: {exc}", err=True)
140
+ return {}
141
+
142
+
143
+ def _kill_process_on_port(port: int) -> None:
144
+ """Kill any process listening on the given port."""
145
+ try:
146
+ # Use lsof to find and kill process on port
147
+ result = subprocess.run(
148
+ ["lsof", "-ti", f":{port}"],
149
+ capture_output=True,
150
+ text=True,
151
+ timeout=2,
152
+ )
153
+ if result.stdout.strip():
154
+ pids = result.stdout.strip().split('\n')
155
+ for pid in pids:
156
+ try:
157
+ subprocess.run(["kill", "-9", pid], timeout=2)
158
+ click.echo(f"[smoke] Killed existing process {pid} on port {port}", err=True)
159
+ except Exception:
160
+ pass
161
+ time.sleep(2.0) # Give OS time to release port
162
+ except Exception as exc:
163
+ click.echo(f"[smoke] Warning: Could not check/kill port {port}: {exc}", err=True)
164
+
165
+
166
+ def _start_task_app_server(
167
+ task_app_name: str,
168
+ port: int,
169
+ env_file: str | None,
170
+ force: bool
171
+ ) -> tuple[Any, str]:
172
+ """Start a task app server in the background using task-app serve.
173
+
174
+ Returns (process, url) tuple.
175
+ """
176
+ import subprocess
177
+ import time as time_module
178
+
179
+ # Build command using task-app serve (for TaskAppConfig-based apps)
180
+ cmd = [
181
+ "nohup",
182
+ "uvx", "synth-ai",
183
+ "task-app", "serve", task_app_name,
184
+ "--port", str(port),
185
+ ]
186
+
187
+ if env_file:
188
+ cmd.extend(["--env-file", env_file])
189
+
190
+ if force:
191
+ cmd.append("--force")
192
+
193
+ # Resolve the synth-ai root directory
194
+ import synth_ai
195
+ synth_ai_root = Path(synth_ai.__file__).resolve().parent.parent
196
+
197
+ click.echo(f"[smoke] Starting task app '{task_app_name}' on port {port}...", err=True)
198
+ click.echo(f"[smoke] Command: {' '.join(cmd)}", err=True)
199
+ click.echo(f"[smoke] Working directory: {synth_ai_root}", err=True)
200
+
201
+ # nohup requires output redirection to a file
202
+ # Open file, start process, then close file handle so process is fully detached
203
+ # Run from synth-ai root so task app discovery works
204
+ nohup_log = Path(synth_ai_root) / "nohup_task_app.out"
205
+
206
+ # Inherit SYNTH_QUIET environment variable to suppress patch messages
207
+ env = os.environ.copy()
208
+ if os.getenv("SYNTH_QUIET"):
209
+ env["SYNTH_QUIET"] = "1"
210
+
211
+ with open(nohup_log, "w") as log_file:
212
+ proc = subprocess.Popen(
213
+ cmd,
214
+ stdout=log_file,
215
+ stderr=subprocess.STDOUT,
216
+ text=True,
217
+ cwd=str(synth_ai_root),
218
+ env=env,
219
+ )
220
+ # File is closed immediately so process is detached
221
+
222
+ # Wait for server to be ready
223
+ url = f"http://localhost:{port}"
224
+ click.echo(f"[smoke] Waiting for task app to be ready at {url}...", err=True)
225
+
226
+ import httpx
227
+ deadline = time.time() + 120.0 # Give it 2 minutes for initial setup
228
+ attempt = 0
229
+ last_log_line = None
230
+ while time.time() < deadline:
231
+ attempt += 1
232
+ try:
233
+ resp = httpx.get(f"{url}/health", timeout=1.0)
234
+ # Accept both 200 and 400 - 400 means server is up but auth is failing (which is fine for smoke test)
235
+ if resp.status_code in (200, 400):
236
+ click.echo(f"[smoke] Task app ready at {url} (status={resp.status_code})", err=True)
237
+ return proc, url
238
+ except Exception:
239
+ pass
240
+
241
+ # Show polling progress every 5 seconds with last log line
242
+ if attempt % 10 == 0:
243
+ elapsed = int(time.time() - (deadline - 120.0))
244
+ # Try to read last line from nohup log
245
+ try:
246
+ if nohup_log.exists():
247
+ with open(nohup_log) as f:
248
+ lines = f.readlines()
249
+ if lines:
250
+ # Get last non-empty line
251
+ for line in reversed(lines[-10:]):
252
+ stripped = line.strip()
253
+ if stripped and stripped != last_log_line:
254
+ last_log_line = stripped
255
+ # Truncate if too long
256
+ if len(stripped) > 80:
257
+ stripped = stripped[:77] + "..."
258
+ click.echo(f"[smoke] Waiting ({elapsed}s): {stripped}", err=True)
259
+ break
260
+ else:
261
+ click.echo(f"[smoke] Still waiting for task app... ({elapsed}s elapsed)", err=True)
262
+ else:
263
+ click.echo(f"[smoke] Still waiting for task app... ({elapsed}s elapsed)", err=True)
264
+ except Exception:
265
+ click.echo(f"[smoke] Still waiting for task app... ({elapsed}s elapsed)", err=True)
266
+
267
+ # Check if process died
268
+ if proc.poll() is not None:
269
+ # Build a manual command that the user can copy-paste
270
+ manual_cmd_parts = ["uvx", "synth-ai", "task-app", "serve", task_app_name, "--port", str(port)]
271
+ if env_file:
272
+ manual_cmd_parts.extend(["--env-file", env_file])
273
+ if force:
274
+ manual_cmd_parts.append("--force")
275
+
276
+ raise click.ClickException(
277
+ f"Task app '{task_app_name}' process exited unexpectedly (code={proc.returncode}). "
278
+ f"Check that the task app name is correct and .env has required keys. "
279
+ f"Try running manually: {' '.join(manual_cmd_parts)}"
280
+ )
281
+
282
+ time_module.sleep(0.5)
283
+
284
+ proc.kill()
285
+ raise click.ClickException("Task app failed to start within 120 seconds")
286
+
287
+
288
+ def _start_sqld_server(
289
+ db_path: str,
290
+ hrana_port: int,
291
+ http_port: int
292
+ ) -> Any:
293
+ """Start sqld server in the background.
294
+
295
+ Returns the process handle.
296
+ """
297
+ import shutil
298
+ import subprocess
299
+
300
+ # Check if sqld is available
301
+ sqld_bin = shutil.which("sqld")
302
+ if not sqld_bin:
303
+ click.echo("[smoke] Warning: sqld not found in PATH, skipping auto-start", err=True)
304
+ click.echo("[smoke] Install sqld: brew install sqld", err=True)
305
+ return None
306
+
307
+ # Ensure db directory exists
308
+ db_path_obj = Path(db_path).expanduser().resolve()
309
+ db_path_obj.parent.mkdir(parents=True, exist_ok=True)
310
+
311
+ # Kill any existing processes on these ports
312
+ for port in [hrana_port, http_port]:
313
+ _kill_process_on_port(port)
314
+
315
+ cmd = [
316
+ sqld_bin,
317
+ "--db-path", str(db_path_obj),
318
+ "--hrana-listen-addr", f"127.0.0.1:{hrana_port}",
319
+ "--http-listen-addr", f"127.0.0.1:{http_port}",
320
+ ]
321
+
322
+ click.echo("[smoke] Starting sqld server...", err=True)
323
+ click.echo(f"[smoke] DB path: {db_path_obj}", err=True)
324
+ click.echo(f"[smoke] Hrana port: {hrana_port}, HTTP port: {http_port}", err=True)
325
+ click.echo(f"[smoke] Command: {' '.join(cmd)}", err=True)
326
+
327
+ # Redirect to devnull to avoid process dying from pipe buffer issues
328
+ proc = subprocess.Popen(
329
+ cmd,
330
+ stdout=subprocess.DEVNULL,
331
+ stderr=subprocess.DEVNULL,
332
+ text=True,
333
+ )
334
+
335
+ # Wait for server to be ready
336
+ health_url = f"http://127.0.0.1:{http_port}/health"
337
+ click.echo(f"[smoke] Waiting for sqld to be ready at {health_url}...", err=True)
338
+
339
+ deadline = time.time() + 10.0
340
+ while time.time() < deadline:
341
+ try:
342
+ resp = httpx.get(health_url, timeout=0.5)
343
+ if resp.status_code == 200:
344
+ click.echo("[smoke] sqld ready", err=True)
345
+ # Set environment variables for tracing
346
+ os.environ["SQLD_DB_PATH"] = str(db_path_obj)
347
+ os.environ["SQLD_HTTP_PORT"] = str(hrana_port)
348
+ os.environ["LIBSQL_URL"] = f"http://127.0.0.1:{http_port}"
349
+ os.environ["SYNTH_TRACES_DB"] = f"http://127.0.0.1:{http_port}"
350
+ return proc
351
+ except Exception:
352
+ pass
353
+
354
+ # Check if process died
355
+ if proc.poll() is not None:
356
+ click.echo(f"[smoke] Warning: sqld process exited with code {proc.returncode}", err=True)
357
+ return None
358
+
359
+ time.sleep(0.2)
360
+
361
+ click.echo("[smoke] Warning: sqld health check timed out, continuing anyway...", err=True)
362
+ return proc
363
+
364
+ class MockRLTrainer:
365
+ """Minimal trainer emulator with a local FastAPI mock for GPT-5-Nano.
366
+
367
+ In ``synthetic`` mode it emits deterministic tool calls so the rollout can
368
+ progress without relying on external inference. In ``openai`` mode it acts
369
+ as a thin proxy around the real OpenAI chat completions endpoint (useful to
370
+ reproduce production behaviour locally).
371
+ """
372
+
373
+ def __init__(self, *, port: int = 0, backend: str = "synthetic") -> None:
374
+ self.port = port
375
+ self.backend = backend.lower().strip() or "synthetic"
376
+ self._server = None
377
+ self._task: asyncio.Task | None = None
378
+ self._openai_endpoint = os.getenv(
379
+ "SMOKE_OPENAI_ENDPOINT", "https://api.openai.com/v1/chat/completions"
380
+ )
381
+ self._openai_api_key = (
382
+ os.getenv("SMOKE_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY") or ""
383
+ )
384
+
385
+ def _build_app(self):
386
+ import json
387
+
388
+ from fastapi import Body, FastAPI
389
+ from fastapi.responses import JSONResponse
390
+
391
+ try:
392
+ logger = logging.getLogger(__name__)
393
+ except Exception: # pragma: no cover - logging failures should not crash
394
+ logger = None
395
+
396
+ app = FastAPI()
397
+ backend = self.backend
398
+
399
+ @app.post("/v1/chat/completions")
400
+ async def chat_completions(body: dict = Body(...), cid: str | None = None):
401
+ log = logger or logging.getLogger("MockRLTrainer")
402
+ try:
403
+ msg_count = len(body.get("messages") or [])
404
+ except Exception:
405
+ msg_count = -1
406
+ click.echo(
407
+ f"[mock-rl] ← request backend={backend} model={body.get('model')} messages={msg_count} cid={cid}",
408
+ err=True,
409
+ )
410
+
411
+ # Explicit Body(...) avoids FastAPI interpreting parameters as query args
412
+ model = (body.get("model") or "gpt-5-nano")
413
+ messages = body.get("messages") or []
414
+ tools = body.get("tools") or []
415
+
416
+ # Decide whether to emit a tool call (to drive env steps) or plain text
417
+ emit_tool = False
418
+ tool_name = ""
419
+ for t in tools:
420
+ try:
421
+ if (t or {}).get("type") == "function":
422
+ fn = (t or {}).get("function") or {}
423
+ name = (fn or {}).get("name") or ""
424
+ if name:
425
+ tool_name = name
426
+ emit_tool = True
427
+ break
428
+ except Exception:
429
+ continue
430
+
431
+ # Simple heuristic actions to move/explore then interact
432
+ actions = ["move_right", "move_right", "move_down", "move_left", "do"]
433
+
434
+ correlation = cid
435
+
436
+ if backend == "openai":
437
+ if not self._openai_api_key:
438
+ return JSONResponse(
439
+ {
440
+ "error": "OPENAI_API_KEY (or SMOKE_OPENAI_API_KEY) is required for mock backend 'openai'"
441
+ },
442
+ status_code=500,
443
+ )
444
+ try:
445
+ from examples.task_apps.crafter.task_app.synth_envs_hosted.inference.openai_client import (
446
+ OpenAIClient as _HostedOpenAIClient,
447
+ )
448
+
449
+ hosted_client = _HostedOpenAIClient(
450
+ base_url=self._openai_endpoint,
451
+ api_key=self._openai_api_key,
452
+ )
453
+ except Exception as exc:
454
+ if logger is not None:
455
+ logger.error("MockRLTrainer failed to import HostedOpenAIClient: %s", exc)
456
+ return JSONResponse(
457
+ {"error": f"OpenAI proxy unavailable: {exc}"},
458
+ status_code=500,
459
+ )
460
+
461
+ try:
462
+ result = await hosted_client.generate_with_retries( # type: ignore[attr-defined]
463
+ request=body,
464
+ base_url=self._openai_endpoint,
465
+ max_retries=0,
466
+ )
467
+ except Exception as exc:
468
+ if logger is not None:
469
+ logger.error("MockRLTrainer OpenAI generate failed: %s", exc)
470
+ return JSONResponse(
471
+ {"error": f"OpenAI proxy request failed: {exc}"},
472
+ status_code=502,
473
+ )
474
+
475
+ if isinstance(result, dict):
476
+ data_typed = dict(result)
477
+ synth_meta = data_typed.get("synth")
478
+ if not isinstance(synth_meta, dict):
479
+ synth_meta = {}
480
+ data_typed["synth"] = synth_meta
481
+ if correlation:
482
+ synth_meta.setdefault("cid", correlation)
483
+
484
+ # Fallback: if the upstream response failed to emit tool calls,
485
+ # synthesize a deterministic action plan so the rollout can proceed.
486
+ try:
487
+ choices = data_typed.get("choices") or []
488
+ first = choices[0] if choices else {}
489
+ message = first.get("message") if isinstance(first, dict) else {}
490
+ tc = message.get("tool_calls") if isinstance(message, dict) else None
491
+ if not tc:
492
+ if logger is not None:
493
+ logger.warning(
494
+ "MockRLTrainer fallback: OpenAI returned no tool calls; injecting deterministic actions."
495
+ )
496
+ fallback_message = dict(message or {})
497
+ fallback_message.setdefault("role", "assistant")
498
+ fallback_message["content"] = ""
499
+ fallback_message["tool_calls"] = [
500
+ {
501
+ "id": f"call_{uuid.uuid4().hex[:8]}",
502
+ "type": "function",
503
+ "function": {
504
+ "name": tool_name or "interact_many",
505
+ "arguments": json.dumps({"actions": actions}),
506
+ },
507
+ }
508
+ ]
509
+ fallback_message["function_call"] = {
510
+ "name": tool_name or "interact_many",
511
+ "arguments": json.dumps({"actions": actions}),
512
+ }
513
+ if choices:
514
+ choices[0]["message"] = fallback_message
515
+ else:
516
+ data_typed["choices"] = [
517
+ {
518
+ "index": 0,
519
+ "message": fallback_message,
520
+ "finish_reason": "tool_calls",
521
+ }
522
+ ]
523
+ except Exception as exc:
524
+ if logger is not None:
525
+ logger.debug("MockRLTrainer fallback injection failed: %s", exc)
526
+
527
+ tool_call_count = 0
528
+ try:
529
+ choices = data_typed.get("choices") or []
530
+ first = choices[0] if choices else {}
531
+ message = first.get("message") if isinstance(first, dict) else {}
532
+ if isinstance(message, dict):
533
+ tool_call_count = len(message.get("tool_calls") or [])
534
+ except Exception:
535
+ tool_call_count = 0
536
+
537
+ log.info(
538
+ "MockRLTrainer proxy returning response with %s tool calls (cid=%s)",
539
+ tool_call_count,
540
+ cid,
541
+ )
542
+ if tool_call_count == 0:
543
+ log.error(
544
+ "MockRLTrainer proxy still missing tool_calls after fallback injection (cid=%s)",
545
+ cid,
546
+ )
547
+ click.echo(
548
+ "[mock-rl] ✗ proxy response missing tool_calls; failing request", err=True
549
+ )
550
+ return JSONResponse(data_typed)
551
+ return JSONResponse(result)
552
+
553
+ if emit_tool:
554
+ # Emit BOTH legacy function_call and modern tool_calls for broad compatibility
555
+ message_payload = {
556
+ "role": "assistant",
557
+ "content": "",
558
+ "function_call": {
559
+ "name": tool_name,
560
+ "arguments": json.dumps({"actions": actions}),
561
+ },
562
+ "tool_calls": [
563
+ {
564
+ "id": f"call_{uuid.uuid4().hex[:8]}",
565
+ "type": "function",
566
+ "function": {
567
+ "name": tool_name,
568
+ "arguments": json.dumps({"actions": actions}),
569
+ },
570
+ }
571
+ ],
572
+ }
573
+ finish_reason = "tool_calls"
574
+ else:
575
+ # Fallback: echo last user content as plain text
576
+ click.echo(
577
+ f"[mock-rl] ! no tool schema supplied; returning text response (cid={cid})",
578
+ err=True,
579
+ )
580
+ log.warning(
581
+ "MockRLTrainer received request without tool schema; responding with text content (cid=%s)",
582
+ cid,
583
+ )
584
+ last_user = next((m.get("content", "") for m in reversed(messages) if m.get("role") == "user"), "")
585
+ text = (last_user or "").strip()
586
+ if len(text) > 160:
587
+ text = text[:160] + "..."
588
+ message_payload = {"role": "assistant", "content": f"MOCK(gpt-5-nano): {text or 'ack'}"}
589
+ finish_reason = "stop"
590
+
591
+ response = {
592
+ "id": f"cmpl_{uuid.uuid4().hex[:12]}",
593
+ "object": "chat.completion",
594
+ "created": int(asyncio.get_event_loop().time()),
595
+ "model": model,
596
+ "choices": [{"index": 0, "message": message_payload, "finish_reason": finish_reason}],
597
+ "usage": {"prompt_tokens": 32, "completion_tokens": 16, "total_tokens": 48},
598
+ "synth": {"cid": correlation},
599
+ }
600
+ if finish_reason == "tool_calls":
601
+ # Type-safe extraction of tool call count
602
+ tc = 0
603
+ try:
604
+ choices = response.get("choices")
605
+ if isinstance(choices, list) and choices:
606
+ first_choice = choices[0]
607
+ if isinstance(first_choice, dict):
608
+ msg = first_choice.get("message")
609
+ if isinstance(msg, dict):
610
+ tool_calls = msg.get("tool_calls")
611
+ if isinstance(tool_calls, list):
612
+ tc = len(tool_calls)
613
+ except Exception:
614
+ pass
615
+ log.debug(
616
+ "MockRLTrainer synthetic response emitting %s tool calls (cid=%s)",
617
+ tc,
618
+ cid,
619
+ )
620
+ assert tc > 0, "MockRLTrainer synthetic response missing tool_calls"
621
+ click.echo(
622
+ f"[mock-rl] → response tool_calls={tc} backend={backend} cid={cid}",
623
+ err=True,
624
+ )
625
+ else:
626
+ click.echo(
627
+ f"[mock-rl] → response finish_reason={finish_reason} backend={backend} cid={cid}",
628
+ err=True,
629
+ )
630
+ return JSONResponse(response)
631
+
632
+ return app
633
+
634
+ async def start(self) -> None:
635
+ import socket
636
+
637
+ import uvicorn
638
+
639
+ def _allocate_port() -> int:
640
+ nonlocal socket
641
+ if self.port:
642
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as probe:
643
+ probe.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
644
+ try:
645
+ probe.bind(("127.0.0.1", self.port))
646
+ return self.port
647
+ except OSError:
648
+ pass
649
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as probe:
650
+ probe.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
651
+ probe.bind(("127.0.0.1", 0))
652
+ self.port = probe.getsockname()[1]
653
+ return self.port
654
+
655
+ retries = 0
656
+ while True:
657
+ selected_port = _allocate_port()
658
+ config = uvicorn.Config(
659
+ self._build_app(),
660
+ host="127.0.0.1",
661
+ port=selected_port,
662
+ log_level="warning",
663
+ )
664
+ self._server = uvicorn.Server(config)
665
+ self._task = asyncio.create_task(self._server.serve())
666
+
667
+ for _ in range(100):
668
+ if getattr(self._server, "started", False):
669
+ break
670
+ if self._task.done():
671
+ break
672
+ await asyncio.sleep(0.05)
673
+
674
+ if getattr(self._server, "started", False):
675
+ try:
676
+ logging.getLogger(__name__).info(
677
+ "MockRLTrainer started on http://127.0.0.1:%s (backend=%s)",
678
+ self.port,
679
+ self.backend,
680
+ )
681
+ click.echo(
682
+ f"[mock-rl] server ready http://127.0.0.1:{self.port} backend={self.backend}",
683
+ err=True,
684
+ )
685
+ except Exception:
686
+ pass
687
+ return
688
+
689
+ # Startup failed; stop server and retry on a new port if possible
690
+ await self.stop()
691
+ if retries >= 5:
692
+ raise RuntimeError("MockRLTrainer failed to start after multiple attempts")
693
+ self.port = 0
694
+ retries += 1
695
+
696
+ async def stop(self) -> None:
697
+ if self._server is not None:
698
+ self._server.should_exit = True
699
+ if self._task is not None:
700
+ with contextlib.suppress(Exception):
701
+ await asyncio.wait_for(self._task, timeout=2.0)
702
+ self._task = None
703
+ self._server = None
704
+ click.echo("[mock-rl] server stopped", err=True)
705
+
706
+ async def _run_smoke_async(
707
+ *,
708
+ task_app_url: str,
709
+ api_key: str | None,
710
+ env_name_opt: str | None,
711
+ policy_name: str,
712
+ model: str,
713
+ inference_url_opt: str | None,
714
+ inference_policy: str | None,
715
+ max_steps: int,
716
+ return_trace: bool,
717
+ use_mock: bool,
718
+ mock_port: int,
719
+ mock_backend: str,
720
+ config_path: Path | None,
721
+ rollouts: int = 1,
722
+ group_size: int = 1,
723
+ batch_size: int | None = None,
724
+ ) -> int:
725
+ # If config is provided, derive defaults (URL/env/model)
726
+ cfg: Any | None = None
727
+ if config_path is not None:
728
+ try:
729
+ from synth_ai.api.train.configs.rl import (
730
+ RLConfig as _RLConfig, # lazy import to avoid heavy deps when unused
731
+ )
732
+ cfg = _RLConfig.from_path(config_path)
733
+ except Exception as exc:
734
+ click.echo(f"Failed to load RL config {config_path}: {exc}", err=True)
735
+ return 2
736
+
737
+ # Prefer explicit CLI --url; only use config services.task_url if URL not provided
738
+ try:
739
+ if not task_app_url and cfg.services and getattr(cfg.services, "task_url", None):
740
+ task_app_url = cfg.services.task_url
741
+ except Exception:
742
+ pass
743
+ # Fill env and model if not explicitly set
744
+ try:
745
+ if not env_name_opt and cfg.rollout and getattr(cfg.rollout, "env_name", None):
746
+ env_name_opt = cfg.rollout.env_name
747
+ except Exception:
748
+ pass
749
+ try:
750
+ if model == "gpt-5-nano":
751
+ # Prefer smoke config model over policy model for smoke tests
752
+ smoke_cfg = getattr(cfg, "smoke", None)
753
+ smoke_model = None
754
+ if smoke_cfg and hasattr(smoke_cfg, "model"):
755
+ smoke_model = smoke_cfg.model
756
+ if smoke_model:
757
+ model = str(smoke_model).strip()
758
+ elif cfg.policy:
759
+ if getattr(cfg.policy, "model_name", None):
760
+ model = str(cfg.policy.model_name).strip()
761
+ elif getattr(cfg.policy, "source", None):
762
+ model = str(cfg.policy.source).strip()
763
+ elif cfg.model and getattr(cfg.model, "source", None):
764
+ model = str(cfg.model.source).strip()
765
+ elif cfg.model and getattr(cfg.model, "base", None):
766
+ model = str(cfg.model.base).strip()
767
+ except Exception:
768
+ pass
769
+
770
+ base = validate_task_app_url(task_app_url)
771
+ mock_backend = (mock_backend or "synthetic").strip().lower()
772
+
773
+ # Discover environment if not provided
774
+ async with TaskAppClient(base_url=base, api_key=api_key) as client:
775
+ # Probe basic info quickly
776
+ try:
777
+ _ = await client.health()
778
+ except Exception:
779
+ click.echo("Auth or connectivity check failed on /health. If this endpoint requires a key, pass --api-key or set ENVIRONMENT_API_KEY.", err=True)
780
+ # Continue; rollout may still clarify the error
781
+
782
+ # Fetch a sample task instance to infer environment name if not provided
783
+ env_name = env_name_opt
784
+ if not env_name:
785
+ try:
786
+ ti = await client.task_info(seeds=[0])
787
+ # task_info returns TaskInfo or list[TaskInfo]; normalize
788
+ info: Any = ti[0] if isinstance(ti, list) else ti
789
+ env_name = getattr(info, "environment", None) or getattr(info, "task", {}).get("name") # type: ignore[attr-defined]
790
+ except Exception:
791
+ env_name = None
792
+ if not env_name:
793
+ click.echo("Could not infer environment name; pass --env-name.", err=True)
794
+ return 2
795
+
796
+ # Build ops: alternating agent/env for max_steps
797
+ ops: list[str] = []
798
+ for _ in range(max_steps):
799
+ ops.append("agent")
800
+ ops.append("env")
801
+
802
+ # Inference URL: user override > preset > local mock > Synth API default
803
+ synth_base = (os.getenv("SYNTH_API_BASE") or os.getenv("SYNTH_BASE_URL") or "https://api.synth.run").rstrip("/")
804
+ # Avoid double '/api' if base already includes it
805
+ if synth_base.endswith("/api"):
806
+ default_infer = f"{synth_base}/inference/v1/chat/completions"
807
+ else:
808
+ default_infer = f"{synth_base}/api/inference/v1/chat/completions"
809
+
810
+ # Helper to execute one or more rollouts and return exit code
811
+ async def __do_rollouts(inference_url_raw: str) -> int:
812
+ successes = 0
813
+ total_steps = 0
814
+ nonzero_returns = 0
815
+ v3_traces = 0
816
+
817
+ # Derive sampling params from config if present
818
+ sampling: dict[str, Any] = {}
819
+ try:
820
+ if cfg and cfg.policy:
821
+ if getattr(cfg.policy, "temperature", None) is not None:
822
+ sampling["temperature"] = cfg.policy.temperature
823
+ if getattr(cfg.policy, "top_p", None) is not None:
824
+ sampling["top_p"] = cfg.policy.top_p
825
+ if getattr(cfg.policy, "max_tokens", None) is not None:
826
+ sampling["max_tokens"] = cfg.policy.max_tokens
827
+ except Exception:
828
+ pass
829
+
830
+ num_outer = batch_size if (batch_size is not None and batch_size > 0) else max(1, int(rollouts))
831
+ for i in range(num_outer):
832
+ for g in range(max(1, int(group_size))):
833
+ if inference_url_raw.startswith("/"):
834
+ inference_url_abs = f"{base}{inference_url_raw}"
835
+ else:
836
+ inference_url_abs = inference_url_raw
837
+ inference_url_norm = normalize_inference_url(inference_url_abs)
838
+ correlation_id = f"smoke-{uuid.uuid4()}"
839
+ inference_url_with_cid = _append_query_param(inference_url_norm, "cid", correlation_id)
840
+
841
+ run_id = correlation_id
842
+ policy_cfg: dict[str, Any] = {
843
+ "model": model,
844
+ "inference_url": inference_url_with_cid,
845
+ }
846
+ if sampling:
847
+ policy_cfg.update(sampling)
848
+
849
+ request = RolloutRequest(
850
+ run_id=run_id,
851
+ env=RolloutEnvSpec(env_name=env_name, config={}, seed=i),
852
+ policy=RolloutPolicySpec(policy_name=policy_name, config=policy_cfg),
853
+ ops=ops,
854
+ record=RolloutRecordConfig(
855
+ trajectories=True,
856
+ logprobs=False,
857
+ value=False,
858
+ return_trace=return_trace,
859
+ trace_format=("structured" if return_trace else "compact"),
860
+ ),
861
+ on_done="reset",
862
+ safety=RolloutSafetyConfig(max_ops=max_steps * 4, max_time_s=900.0),
863
+ training_session_id=None,
864
+ synth_base_url=synth_base,
865
+ mode=RolloutMode.RL,
866
+ )
867
+
868
+ try:
869
+ click.echo(f">> POST /rollout run_id={run_id} env={env_name} policy={policy_name} url={inference_url_with_cid}")
870
+ click.echo(f" ops={ops[:10]}{'...' if len(ops) > 10 else ''}")
871
+ response = await client.rollout(request)
872
+ except Exception as exc:
873
+ click.echo(f"Rollout[{i}:{g}] failed: {type(exc).__name__}: {exc}", err=True)
874
+ import traceback
875
+ click.echo(f"Traceback: {traceback.format_exc()}", err=True)
876
+ continue
877
+
878
+ successes += 1
879
+ try:
880
+ validate_rollout_response_for_rl(response.model_dump())
881
+ except Exception as vexc:
882
+ click.echo(f" ⚠ RL response validation warning: {vexc}", err=True)
883
+
884
+ pm = response.pipeline_metadata or {}
885
+ inferred_url = pm.get("inference_url") if isinstance(pm, dict) else None
886
+ metrics = response.metrics
887
+ if inferred_url:
888
+ click.echo(f" rollout[{i}:{g}] inference_url: {inferred_url}")
889
+ click.echo(f" rollout[{i}:{g}] episodes={metrics.num_episodes} steps={metrics.num_steps} mean_return={metrics.mean_return:.4f}")
890
+
891
+ total_steps += int(metrics.num_steps)
892
+ if (metrics.mean_return or 0.0) != 0.0:
893
+ nonzero_returns += 1
894
+ if response.trace is not None and isinstance(response.trace, dict):
895
+ v3_traces += 1
896
+
897
+ if i == 0 and g == 0:
898
+ try:
899
+ traj0 = response.trajectories[0]
900
+ step_meta_url = None
901
+ for step in traj0.steps:
902
+ info = getattr(step, "info", None) or {}
903
+ meta = info.get("meta") if isinstance(info, dict) else None
904
+ if isinstance(meta, dict) and meta.get("inference_url"):
905
+ step_meta_url = meta.get("inference_url")
906
+ break
907
+ if step_meta_url:
908
+ click.echo(f" step.meta.inference_url: {str(step_meta_url)[:120]}...")
909
+ except Exception:
910
+ pass
911
+
912
+ try:
913
+ try:
914
+ metrics_dump = response.metrics.model_dump()
915
+ except Exception:
916
+ metrics_dump = {
917
+ "episode_returns": getattr(response.metrics, "episode_returns", None),
918
+ "mean_return": getattr(response.metrics, "mean_return", None),
919
+ "num_steps": getattr(response.metrics, "num_steps", None),
920
+ "num_episodes": getattr(response.metrics, "num_episodes", None),
921
+ "outcome_score": getattr(response.metrics, "outcome_score", None),
922
+ "events_score": getattr(response.metrics, "events_score", None),
923
+ }
924
+ click.echo(" reward.info (metrics): " + str(metrics_dump))
925
+
926
+ try:
927
+ traj = response.trajectories[0]
928
+ step_rewards = []
929
+ all_achievements = set()
930
+ for st in getattr(traj, "steps", []) or []:
931
+ try:
932
+ step_rewards.append(getattr(st, "reward", None))
933
+ except Exception:
934
+ step_rewards.append(None)
935
+ # Extract achievements from step info
936
+ try:
937
+ step_info = getattr(st, "info", None)
938
+ if isinstance(step_info, dict):
939
+ achievements_status = step_info.get("achievements_status")
940
+ if isinstance(achievements_status, dict):
941
+ for ach_name, ach_val in achievements_status.items():
942
+ if ach_val:
943
+ all_achievements.add(str(ach_name))
944
+ except Exception:
945
+ pass
946
+ click.echo(" reward.per_step: " + str(step_rewards))
947
+ if all_achievements:
948
+ click.echo(f" achievements: {sorted(all_achievements)}")
949
+ else:
950
+ click.echo(" achievements: none")
951
+ except Exception:
952
+ pass
953
+
954
+ # Extract and display tool calls from v3 trace
955
+ #
956
+ # IMPORTANT: Tool calls are extracted from the structured v3 trace format.
957
+ # The trace must be requested with return_trace=True for this to work.
958
+ #
959
+ # Trace structure:
960
+ # trace.event_history[] - list of events (policy calls, env steps)
961
+ # ├─ event.call_records[] - LLM calls made during this event
962
+ # ├─ call_record.output_tool_calls[] - tool calls from LLM response
963
+ # ├─ tool_call.name - function name (e.g., "interact_many")
964
+ # └─ tool_call.arguments_json - JSON string of arguments
965
+ #
966
+ # This provides visibility into what actions the policy is taking,
967
+ # which is critical for debugging RL training issues.
968
+ tr = response.trace if isinstance(response.trace, dict) else None
969
+ if tr:
970
+ event_history = tr.get("event_history", [])
971
+ tool_call_count = 0
972
+
973
+ # Extract tool calls from event_history call_records
974
+ if event_history and isinstance(event_history, list):
975
+ for event in event_history:
976
+ if not isinstance(event, dict):
977
+ continue
978
+ # Policy events contain call_records with LLM interactions
979
+ call_records = event.get("call_records")
980
+ if call_records and isinstance(call_records, list):
981
+ for call_record in call_records:
982
+ if isinstance(call_record, dict):
983
+ # Extract tool calls from this LLM call
984
+ output_tool_calls = call_record.get("output_tool_calls", [])
985
+ if output_tool_calls and isinstance(output_tool_calls, list):
986
+ for tc in output_tool_calls:
987
+ if isinstance(tc, dict):
988
+ fn_name = tc.get("name", "unknown")
989
+ fn_args = tc.get("arguments_json", "{}")
990
+ # Display tool call with truncated args for readability
991
+ click.echo(f" TOOL_CALL[{tool_call_count}]: {fn_name}({fn_args[:100]}{'...' if len(fn_args) > 100 else ''})")
992
+ tool_call_count += 1
993
+
994
+ if tool_call_count > 0:
995
+ click.echo(f" ✓ {tool_call_count} tool calls executed")
996
+ else:
997
+ # No tool calls found - might indicate:
998
+ # 1. return_trace=False (trace not requested)
999
+ # 2. Policy didn't make tool calls (unlikely for most RL tasks)
1000
+ # 3. Trace format mismatch (structure changed)
1001
+ click.echo(" ⚠ No tool calls found in trace")
1002
+ else:
1003
+ click.echo(" ⚠ Trace not available")
1004
+ except Exception as e:
1005
+ click.echo(f" trace error: {e}", err=True)
1006
+
1007
+ click.echo("✓ Smoke rollouts complete")
1008
+ denom = num_outer * max(1, int(group_size))
1009
+ click.echo(f" successes={successes}/{denom} total_steps={total_steps} v3_traces={v3_traces}/{denom} nonzero_returns={nonzero_returns}/{denom}")
1010
+
1011
+ if successes == 0:
1012
+ click.echo(" ⚠ All rollouts failed", err=True)
1013
+ return 3
1014
+ if v3_traces < successes:
1015
+ click.echo(" ⚠ Some rollouts missing v3 traces (trace field)", err=True)
1016
+ if total_steps == 0:
1017
+ click.echo(" ⚠ No steps executed; check ops/policy config", err=True)
1018
+
1019
+ return 0
1020
+
1021
+ # Initialize to default; policy/flags may override below
1022
+ inference_url_raw = inference_url_opt or default_infer
1023
+ mock: MockRLTrainer | None = None
1024
+ preset = (inference_policy or "").strip().lower()
1025
+
1026
+ # Respect explicit preset overrides
1027
+ if preset == "mock":
1028
+ use_mock = True
1029
+ elif preset == "gpt-5-nano":
1030
+ if not inference_url_opt:
1031
+ inference_url_raw = default_infer
1032
+ if not model:
1033
+ model = "gpt-5-nano"
1034
+ elif preset == "openai":
1035
+ inference_url_raw = "https://api.openai.com/v1/chat/completions"
1036
+ elif preset == "groq":
1037
+ inference_url_raw = "https://api.groq.com/openai/v1/chat/completions"
1038
+
1039
+ # Start mock proxy only when explicitly requested
1040
+ if use_mock:
1041
+ backend_choice = mock_backend
1042
+ if backend_choice == "openai" and not (
1043
+ os.getenv("SMOKE_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY")
1044
+ ):
1045
+ click.echo(
1046
+ " ⚠ OPENAI_API_KEY not configured; falling back to synthetic mock.",
1047
+ err=True,
1048
+ )
1049
+ backend_choice = "synthetic"
1050
+ mock = MockRLTrainer(port=mock_port, backend=backend_choice)
1051
+ await mock.start()
1052
+ inference_url_raw = f"http://127.0.0.1:{mock.port}"
1053
+
1054
+ try:
1055
+ result = await __do_rollouts(inference_url_raw)
1056
+ finally:
1057
+ if mock is not None:
1058
+ with contextlib.suppress(Exception):
1059
+ await mock.stop()
1060
+ return result
1061
+ async def _run_train_step(
1062
+ *,
1063
+ task_app_url: str,
1064
+ api_key: str | None,
1065
+ env_name_opt: str | None,
1066
+ policy_name: str,
1067
+ model: str,
1068
+ inference_policy: str | None,
1069
+ inference_url_opt: str | None,
1070
+ max_steps: int,
1071
+ return_trace: bool,
1072
+ use_mock: bool,
1073
+ mock_backend: str,
1074
+ mock_port: int,
1075
+ config_path: Path | None,
1076
+ parallel: int,
1077
+ ) -> int:
1078
+ import time
1079
+ start = time.perf_counter()
1080
+
1081
+ async def one(seed_idx: int) -> dict[str, Any]:
1082
+ t0 = time.perf_counter()
1083
+ try:
1084
+ code = await _run_smoke_async(
1085
+ task_app_url=task_app_url,
1086
+ api_key=api_key,
1087
+ env_name_opt=env_name_opt,
1088
+ policy_name=policy_name,
1089
+ model=model,
1090
+ inference_policy=inference_policy,
1091
+ inference_url_opt=inference_url_opt,
1092
+ max_steps=max_steps,
1093
+ return_trace=return_trace,
1094
+ use_mock=use_mock,
1095
+ mock_backend=mock_backend,
1096
+ mock_port=mock_port,
1097
+ config_path=config_path,
1098
+ rollouts=1,
1099
+ group_size=1,
1100
+ batch_size=None,
1101
+ )
1102
+ wall_ms = (time.perf_counter() - t0) * 1000.0
1103
+ return {"exit": int(code), "wall_ms": wall_ms}
1104
+ except Exception as e:
1105
+ wall_ms = (time.perf_counter() - t0) * 1000.0
1106
+ return {"exit": 99, "wall_ms": wall_ms, "error": f"{type(e).__name__}: {e}"}
1107
+
1108
+ # Launch N rollouts concurrently
1109
+ tasks = [one(i) for i in range(max(1, int(parallel)))]
1110
+ results = await asyncio.gather(*tasks, return_exceptions=False)
1111
+ total_wall_ms = (time.perf_counter() - start) * 1000.0
1112
+
1113
+ # Print summary
1114
+ def _exit_code(result: dict[str, Any]) -> int:
1115
+ value = result.get("exit")
1116
+ if isinstance(value, (int, float)):
1117
+ return int(value)
1118
+ if isinstance(value, str) and value.strip():
1119
+ try:
1120
+ return int(value.strip())
1121
+ except ValueError:
1122
+ return 1
1123
+ return 1
1124
+
1125
+ successes = sum(1 for r in results if _exit_code(r) == 0)
1126
+ avg_wall = sum(float(r.get("wall_ms", 0.0)) for r in results) / max(len(results), 1)
1127
+ click.echo("✓ Train-step emulation complete")
1128
+ click.echo(f" parallel={parallel} successes={successes}/{len(results)} total_wall_ms={total_wall_ms:.1f} avg_rollout_wall_ms={avg_wall:.1f}")
1129
+
1130
+ # Show brief failure codes to aid diagnosis
1131
+ if successes < len(results):
1132
+ codes: dict[int, int] = {}
1133
+ for r in results:
1134
+ if not isinstance(r, dict):
1135
+ continue
1136
+ c = _exit_code(r)
1137
+ codes[c] = codes.get(c, 0) + 1
1138
+ click.echo(f" failure_codes={codes}")
1139
+
1140
+ return 0 if successes == len(results) else 3
1141
+
1142
+
1143
+ @click.command("smoke")
1144
+ @click.option("--url", "task_app_url", type=str, default=lambda: os.getenv("TASK_APP_URL", "http://localhost:8765"), help="Task app base URL.")
1145
+ @click.option(
1146
+ "--api-key",
1147
+ type=str,
1148
+ default=lambda: os.getenv("ENVIRONMENT_API_KEY", ""),
1149
+ envvar="ENVIRONMENT_API_KEY",
1150
+ help="Environment API key (X-API-Key).",
1151
+ )
1152
+ @click.option("--env-name", type=str, default=None, help="Environment name to roll out (auto-detected if possible).")
1153
+ @click.option("--policy-name", type=str, default="react", help="Policy name to pass to task app.")
1154
+ @click.option("--model", type=str, default="gpt-5-nano", help="Model id to route in inference payload.")
1155
+ @click.option(
1156
+ "--policy",
1157
+ "inference_policy",
1158
+ type=click.Choice(["mock", "gpt-5-nano", "openai", "groq"], case_sensitive=False),
1159
+ default=None,
1160
+ help="Inference route preset (mock, gpt-5-nano via Synth, OpenAI or Groq).",
1161
+ )
1162
+ @click.option("--inference-url", type=str, default=None, help="Override inference URL (default: Synth API chat completions).")
1163
+ @click.option("--max-steps", type=int, default=3, show_default=True, help="Number of agent/env step pairs.")
1164
+ @click.option("--return-trace", is_flag=True, help="Request v3 trace in response if supported.")
1165
+ @click.option("--use-mock/--no-mock", default=True, show_default=True, help="Use local mock inference server (GPT-5-Nano emulation).")
1166
+ @click.option(
1167
+ "--mock-backend",
1168
+ type=click.Choice(["synthetic", "openai"], case_sensitive=False),
1169
+ default="synthetic",
1170
+ show_default=True,
1171
+ help="Mock inference backend: synthetic deterministic tooling or OpenAI passthrough.",
1172
+ )
1173
+ @click.option("--mock-port", type=int, default=0, show_default=True, help="Port for local mock inference server (0 = auto).")
1174
+ @click.option("--config", type=click.Path(exists=True, dir_okay=False, path_type=Path), default=None, help="RL TOML config to derive URL/env/model.")
1175
+ @click.option("--env-file", type=click.Path(exists=True, dir_okay=False, path_type=Path), default=None, help="Path to .env to load before running.")
1176
+ @click.option("--rollouts", type=int, default=1, show_default=True, help="Number of rollouts (seeds 0..N-1).")
1177
+ @click.option("--group-size", type=int, default=1, show_default=True, help="Completions per seed to emulate GRPO grouping.")
1178
+ @click.option("--batch-size", type=int, default=None, help="Alias for rollouts; when set, overrides --rollouts.")
1179
+ @click.option(
1180
+ "--parallel",
1181
+ type=int,
1182
+ default=0,
1183
+ show_default=True,
1184
+ help="Emulate a train step by running this many rollouts concurrently (0 = sequential).",
1185
+ )
1186
+ def command(
1187
+ task_app_url: str,
1188
+ api_key: str,
1189
+ env_name: str | None,
1190
+ policy_name: str,
1191
+ model: str,
1192
+ inference_policy: str | None,
1193
+ inference_url: str | None,
1194
+ max_steps: int,
1195
+ return_trace: bool,
1196
+ use_mock: bool,
1197
+ mock_backend: str,
1198
+ mock_port: int,
1199
+ config: Path | None,
1200
+ env_file: Path | None,
1201
+ rollouts: int,
1202
+ group_size: int,
1203
+ batch_size: int | None,
1204
+ parallel: int,
1205
+ ) -> None:
1206
+ """Smoke-test a Task App by emulating a trainer rollout using GPT-5-Nano.
1207
+
1208
+ This command posts a minimal RL rollout to the task app, with a valid
1209
+ OpenAI-compatible inference URL including a trace correlation id, and
1210
+ validates that the response contains the fields required by the RL trainer
1211
+ (e.g. pipeline_metadata.inference_url and per-step info.meta.inference_url).
1212
+
1213
+ If --config is provided, loads settings from the [smoke] section in the TOML file.
1214
+ CLI arguments override TOML values.
1215
+ """
1216
+
1217
+ # Load [smoke] section from TOML if config is provided
1218
+ smoke_config = _load_smoke_config(config)
1219
+
1220
+ # Track background processes for cleanup
1221
+ background_procs: list[Any] = []
1222
+
1223
+ try:
1224
+ # Auto-start sqld if configured
1225
+ if smoke_config.get("sqld_auto_start"):
1226
+ sqld_db_path = smoke_config.get("sqld_db_path", "./traces/local.db")
1227
+ sqld_hrana_port = smoke_config.get("sqld_hrana_port", 8080)
1228
+ sqld_http_port = smoke_config.get("sqld_http_port", 8081)
1229
+
1230
+ sqld_proc = _start_sqld_server(
1231
+ db_path=sqld_db_path,
1232
+ hrana_port=sqld_hrana_port,
1233
+ http_port=sqld_http_port,
1234
+ )
1235
+ if sqld_proc:
1236
+ background_procs.append(("sqld", sqld_proc))
1237
+
1238
+ # Auto-start task app if configured
1239
+ task_app_override_url = None
1240
+ if smoke_config.get("task_app_name"):
1241
+ task_app_name = smoke_config["task_app_name"]
1242
+ task_app_port = smoke_config.get("task_app_port", 8765)
1243
+ task_app_env_file = smoke_config.get("task_app_env_file")
1244
+ task_app_force = smoke_config.get("task_app_force", True)
1245
+
1246
+ task_app_proc, task_app_url = _start_task_app_server(
1247
+ task_app_name=task_app_name,
1248
+ port=task_app_port,
1249
+ env_file=task_app_env_file,
1250
+ force=task_app_force,
1251
+ )
1252
+ background_procs.append(("task_app", task_app_proc))
1253
+ task_app_override_url = task_app_url
1254
+ click.echo(f"[smoke] Task app started, will use URL: {task_app_url}", err=True)
1255
+ except Exception as exc:
1256
+ # Cleanup any processes that did start
1257
+ for proc_name, proc in background_procs:
1258
+ if proc and proc.poll() is None:
1259
+ click.echo(f"[smoke] Cleaning up {proc_name}...", err=True)
1260
+ proc.terminate()
1261
+ try:
1262
+ proc.wait(timeout=3)
1263
+ except Exception:
1264
+ proc.kill()
1265
+
1266
+ click.echo(f"[smoke] ERROR: Auto-start failed: {exc}", err=True)
1267
+ raise click.ClickException(f"Auto-start failed: {exc}") from exc
1268
+
1269
+ # Apply TOML defaults (CLI args take precedence)
1270
+ # Override task_url with auto-started task app URL if applicable
1271
+ if task_app_override_url:
1272
+ task_app_url = task_app_override_url
1273
+ # For string/int args: use TOML value if CLI value matches the default
1274
+ ctx = click.get_current_context()
1275
+
1276
+ # Helper to check if a CLI param was explicitly provided or is using default
1277
+ def use_toml_default(param_name: str, cli_value: Any, toml_key: str) -> Any:
1278
+ """Use TOML value if CLI param is at its default, otherwise use CLI value."""
1279
+ if not smoke_config or toml_key not in smoke_config:
1280
+ return cli_value
1281
+
1282
+ param = next((p for p in ctx.command.params if p.name == param_name), None)
1283
+ if not param:
1284
+ return cli_value
1285
+
1286
+ # Check if value was explicitly provided (not default)
1287
+ # If it matches the default, use TOML value
1288
+ param_default = param.default() if callable(param.default) else param.default
1289
+ if cli_value == param_default:
1290
+ toml_value = smoke_config[toml_key]
1291
+ click.echo(f"[smoke] Using {toml_key}={toml_value} from config", err=True)
1292
+ return toml_value
1293
+
1294
+ return cli_value
1295
+
1296
+ # Apply TOML defaults
1297
+ task_app_url = use_toml_default("task_app_url", task_app_url, "task_url")
1298
+ env_name = use_toml_default("env_name", env_name, "env_name")
1299
+ policy_name = use_toml_default("policy_name", policy_name, "policy_name")
1300
+ model = use_toml_default("model", model, "model")
1301
+ inference_policy = use_toml_default("inference_policy", inference_policy, "policy")
1302
+ inference_url = use_toml_default("inference_url", inference_url, "inference_url")
1303
+ max_steps = use_toml_default("max_steps", max_steps, "max_steps")
1304
+ return_trace = use_toml_default("return_trace", return_trace, "return_trace")
1305
+ use_mock = use_toml_default("use_mock", use_mock, "use_mock")
1306
+ mock_backend = use_toml_default("mock_backend", mock_backend, "mock_backend")
1307
+ mock_port = use_toml_default("mock_port", mock_port, "mock_port")
1308
+ api_key = use_toml_default("api_key", api_key, "api_key")
1309
+
1310
+ # Auto-configure tracing to avoid interactive prompts
1311
+ try:
1312
+ os.environ.setdefault("CI", "true")
1313
+ os.environ.setdefault("SYNTH_TRACING_AUTO_YES", "1")
1314
+ # Derive a default traces directory relative to CWD
1315
+ traces_dir = os.environ.get("SYNTH_TRACES_DIR")
1316
+ if not traces_dir:
1317
+ traces_dir = str((Path.cwd() / "traces" / "v3").resolve())
1318
+ os.environ["SYNTH_TRACES_DIR"] = traces_dir
1319
+ with contextlib.suppress(Exception):
1320
+ Path(traces_dir).mkdir(parents=True, exist_ok=True)
1321
+ _ensure_local_libsql()
1322
+ # Prefer a libsql/turso/sqld URL when provided to enable concurrent writes
1323
+ libsql_url = (
1324
+ os.getenv("TRACING_DB_URL")
1325
+ or os.getenv("LIBSQL_URL")
1326
+ or os.getenv("TURSO_DATABASE_URL")
1327
+ or os.getenv("LIBSQL_HTTP_URL")
1328
+ )
1329
+ if libsql_url:
1330
+ os.environ.setdefault("LIBSQL_URL", libsql_url)
1331
+
1332
+ auth_hint = (
1333
+ os.getenv("TRACING_DB_AUTH_TOKEN")
1334
+ or os.getenv("LIBSQL_AUTH_TOKEN")
1335
+ or os.getenv("TURSO_AUTH_TOKEN")
1336
+ )
1337
+ if auth_hint:
1338
+ os.environ.setdefault("LIBSQL_AUTH_TOKEN", auth_hint)
1339
+
1340
+ resolved_url, resolved_token = resolve_trace_db_settings()
1341
+ os.environ.setdefault("SYNTH_TRACES_DB", resolved_url)
1342
+ if resolved_token and not (
1343
+ os.getenv("LIBSQL_AUTH_TOKEN") or os.getenv("TURSO_AUTH_TOKEN")
1344
+ ):
1345
+ os.environ["LIBSQL_AUTH_TOKEN"] = resolved_token
1346
+
1347
+ _refresh_tracing_config()
1348
+ except Exception:
1349
+ pass
1350
+
1351
+ # Load env file(s) before resolving API key
1352
+ try:
1353
+ # Explicit --env-file takes precedence
1354
+ if env_file is not None:
1355
+ try:
1356
+ from dotenv import load_dotenv as _ld
1357
+ _ld(env_file, override=False)
1358
+ except Exception:
1359
+ pass
1360
+ else:
1361
+ # Best-effort auto-discovery from CWD
1362
+ try:
1363
+ from dotenv import find_dotenv as _fd
1364
+ from dotenv import load_dotenv as _ld
1365
+ _ld(_fd(usecwd=True), override=False)
1366
+ except Exception:
1367
+ pass
1368
+
1369
+ # If api_key not passed, try to read from env now
1370
+ if not api_key:
1371
+ api_key = os.getenv("ENVIRONMENT_API_KEY", "")
1372
+ except Exception:
1373
+ pass
1374
+
1375
+ try:
1376
+ if parallel and parallel > 0:
1377
+ exit_code = asyncio.run(
1378
+ _run_train_step(
1379
+ task_app_url=task_app_url,
1380
+ api_key=(api_key or None),
1381
+ env_name_opt=env_name,
1382
+ policy_name=policy_name,
1383
+ model=model,
1384
+ inference_policy=inference_policy,
1385
+ inference_url_opt=inference_url,
1386
+ max_steps=max_steps,
1387
+ return_trace=return_trace,
1388
+ use_mock=use_mock,
1389
+ mock_backend=mock_backend,
1390
+ mock_port=mock_port,
1391
+ config_path=config,
1392
+ parallel=parallel,
1393
+ )
1394
+ )
1395
+ else:
1396
+ exit_code = asyncio.run(
1397
+ _run_smoke_async(
1398
+ task_app_url=task_app_url,
1399
+ api_key=(api_key or None),
1400
+ env_name_opt=env_name,
1401
+ policy_name=policy_name,
1402
+ model=model,
1403
+ inference_policy=inference_policy,
1404
+ inference_url_opt=inference_url,
1405
+ max_steps=max_steps,
1406
+ return_trace=return_trace,
1407
+ use_mock=use_mock,
1408
+ mock_backend=mock_backend,
1409
+ mock_port=mock_port,
1410
+ config_path=config,
1411
+ rollouts=rollouts,
1412
+ group_size=group_size,
1413
+ batch_size=batch_size,
1414
+ )
1415
+ )
1416
+ except KeyboardInterrupt:
1417
+ click.echo("Interrupted", err=True)
1418
+ sys.exit(130)
1419
+ finally:
1420
+ # Cleanup background processes
1421
+ for proc_name, proc in background_procs:
1422
+ if proc and proc.poll() is None:
1423
+ click.echo(f"[smoke] Stopping {proc_name}...", err=True)
1424
+ proc.terminate()
1425
+ try:
1426
+ proc.wait(timeout=5)
1427
+ except Exception:
1428
+ proc.kill()
1429
+ if background_procs:
1430
+ click.echo("[smoke] Background services stopped", err=True)
1431
+
1432
+ sys.exit(exit_code)
1433
+
1434
+
1435
+ def register(cli: click.Group) -> None:
1436
+ cli.add_command(command)