synth-ai 0.2.14__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (1091) hide show
  1. synth_ai/__init__.py +19 -40
  2. synth_ai/__main__.py +30 -3
  3. synth_ai/cli/__init__.py +105 -70
  4. synth_ai/cli/__main__.py +42 -0
  5. synth_ai/cli/_internal/__init__.py +5 -0
  6. synth_ai/cli/_internal/modal_wrapper.py +31 -0
  7. synth_ai/cli/_internal/storage.py +20 -0
  8. synth_ai/cli/_internal/typer_patch.py +47 -0
  9. synth_ai/cli/_internal/validate_task_app.py +29 -0
  10. synth_ai/cli/agents/__init__.py +17 -0
  11. synth_ai/cli/agents/claude.py +77 -0
  12. synth_ai/cli/agents/codex.py +265 -0
  13. synth_ai/cli/agents/opencode.py +253 -0
  14. synth_ai/cli/commands/__init__.py +18 -0
  15. synth_ai/cli/commands/artifacts/__init__.py +13 -0
  16. synth_ai/cli/commands/artifacts/client.py +119 -0
  17. synth_ai/cli/commands/artifacts/config.py +57 -0
  18. synth_ai/cli/commands/artifacts/core.py +24 -0
  19. synth_ai/cli/commands/artifacts/download.py +188 -0
  20. synth_ai/cli/commands/artifacts/export.py +186 -0
  21. synth_ai/cli/commands/artifacts/list.py +156 -0
  22. synth_ai/cli/commands/artifacts/parsing.py +250 -0
  23. synth_ai/cli/commands/artifacts/show.py +336 -0
  24. synth_ai/cli/commands/baseline/__init__.py +12 -0
  25. synth_ai/cli/commands/baseline/core.py +636 -0
  26. synth_ai/cli/commands/baseline/list.py +94 -0
  27. synth_ai/cli/commands/demo/__init__.py +3 -0
  28. synth_ai/cli/commands/demo/core.py +153 -0
  29. synth_ai/cli/commands/eval/__init__.py +19 -0
  30. synth_ai/cli/commands/eval/core.py +1113 -0
  31. synth_ai/cli/commands/eval/errors.py +81 -0
  32. synth_ai/cli/commands/eval/validation.py +133 -0
  33. synth_ai/cli/commands/filter/__init__.py +12 -0
  34. synth_ai/cli/commands/filter/core.py +424 -0
  35. synth_ai/cli/commands/filter/errors.py +55 -0
  36. synth_ai/cli/commands/filter/validation.py +77 -0
  37. synth_ai/cli/commands/help/__init__.py +185 -0
  38. synth_ai/cli/commands/help/core.py +72 -0
  39. synth_ai/cli/commands/scan/__init__.py +19 -0
  40. synth_ai/cli/commands/scan/cloudflare_scanner.py +403 -0
  41. synth_ai/cli/commands/scan/core.py +344 -0
  42. synth_ai/cli/commands/scan/health_checker.py +242 -0
  43. synth_ai/cli/commands/scan/local_scanner.py +278 -0
  44. synth_ai/cli/commands/scan/models.py +83 -0
  45. synth_ai/cli/commands/smoke/__init__.py +7 -0
  46. synth_ai/cli/commands/smoke/core.py +1438 -0
  47. synth_ai/cli/commands/status/__init__.py +66 -0
  48. synth_ai/cli/commands/status/client.py +192 -0
  49. synth_ai/cli/commands/status/config.py +92 -0
  50. synth_ai/cli/commands/status/errors.py +20 -0
  51. synth_ai/cli/commands/status/formatters.py +164 -0
  52. synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
  53. synth_ai/cli/commands/status/subcommands/files.py +79 -0
  54. synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
  55. synth_ai/cli/commands/status/subcommands/models.py +79 -0
  56. synth_ai/cli/commands/status/subcommands/pricing.py +23 -0
  57. synth_ai/cli/commands/status/subcommands/runs.py +81 -0
  58. synth_ai/cli/commands/status/subcommands/session.py +182 -0
  59. synth_ai/cli/commands/status/subcommands/summary.py +47 -0
  60. synth_ai/cli/commands/status/subcommands/usage.py +203 -0
  61. synth_ai/cli/commands/status/utils.py +114 -0
  62. synth_ai/cli/commands/train/__init__.py +53 -0
  63. synth_ai/cli/commands/train/core.py +22 -0
  64. synth_ai/cli/commands/train/errors.py +117 -0
  65. synth_ai/cli/commands/train/judge_schemas.py +201 -0
  66. synth_ai/cli/commands/train/judge_validation.py +305 -0
  67. synth_ai/cli/commands/train/prompt_learning_validation.py +633 -0
  68. synth_ai/cli/commands/train/validation.py +392 -0
  69. synth_ai/cli/demo_apps/__init__.py +10 -0
  70. synth_ai/cli/demo_apps/core/__init__.py +28 -0
  71. synth_ai/cli/demo_apps/core/cli.py +1735 -0
  72. synth_ai/cli/demo_apps/crafter/crafter_fft_4b.toml +55 -0
  73. synth_ai/cli/demo_apps/crafter/grpo_crafter_task_app.py +186 -0
  74. synth_ai/cli/demo_apps/crafter/rl_from_base_qwen4b.toml +74 -0
  75. synth_ai/cli/demo_apps/demo_registry.py +176 -0
  76. synth_ai/cli/demo_apps/demo_task_apps/core.py +440 -0
  77. synth_ai/cli/demo_apps/demo_task_apps/crafter/__init__.py +1 -0
  78. synth_ai/cli/demo_apps/demo_task_apps/crafter/grpo_crafter_task_app.py +185 -0
  79. synth_ai/cli/demo_apps/demo_task_apps/math/modal_task_app.py +742 -0
  80. synth_ai/cli/demo_apps/demo_task_apps/math/task_app_entry.py +39 -0
  81. synth_ai/cli/demo_apps/math/__init__.py +1 -0
  82. synth_ai/cli/demo_apps/math/_common.py +16 -0
  83. synth_ai/cli/demo_apps/math/app.py +38 -0
  84. synth_ai/cli/demo_apps/math/config.toml +76 -0
  85. synth_ai/cli/demo_apps/math/deploy_modal.py +54 -0
  86. synth_ai/cli/demo_apps/math/modal_task_app.py +702 -0
  87. synth_ai/cli/demo_apps/math/task_app_entry.py +53 -0
  88. synth_ai/cli/demo_apps/mipro/main.py +271 -0
  89. synth_ai/cli/demo_apps/mipro/task_app.py +933 -0
  90. synth_ai/cli/demo_apps/mipro/train_cfg.toml +92 -0
  91. synth_ai/cli/demos/__init__.py +12 -0
  92. synth_ai/cli/demos/demo.py +32 -0
  93. synth_ai/cli/demos/rl_demo.py +254 -0
  94. synth_ai/cli/deploy.py +216 -0
  95. synth_ai/cli/infra/__init__.py +14 -0
  96. synth_ai/cli/infra/balance.py +216 -0
  97. synth_ai/cli/infra/mcp.py +35 -0
  98. synth_ai/cli/infra/modal_app.py +36 -0
  99. synth_ai/cli/infra/setup.py +69 -0
  100. synth_ai/cli/infra/status.py +16 -0
  101. synth_ai/cli/infra/turso.py +77 -0
  102. synth_ai/cli/lib/__init__.py +10 -0
  103. synth_ai/cli/lib/agents.py +76 -0
  104. synth_ai/cli/lib/apps/modal_app.py +101 -0
  105. synth_ai/cli/lib/apps/task_app.py +643 -0
  106. synth_ai/cli/lib/bin.py +39 -0
  107. synth_ai/cli/lib/env.py +375 -0
  108. synth_ai/cli/lib/errors.py +85 -0
  109. synth_ai/cli/lib/modal.py +315 -0
  110. synth_ai/cli/lib/plotting.py +126 -0
  111. synth_ai/cli/lib/prompt_args.py +39 -0
  112. synth_ai/cli/lib/prompts.py +284 -0
  113. synth_ai/cli/lib/sqld.py +122 -0
  114. synth_ai/cli/lib/task_app_discovery.py +884 -0
  115. synth_ai/cli/lib/task_app_env.py +295 -0
  116. synth_ai/cli/lib/train_cfgs.py +300 -0
  117. synth_ai/cli/lib/tunnel_records.py +207 -0
  118. synth_ai/cli/local/__init__.py +14 -0
  119. synth_ai/cli/local/experiment_queue/__init__.py +72 -0
  120. synth_ai/cli/local/experiment_queue/api_schemas.py +221 -0
  121. synth_ai/cli/local/experiment_queue/celery_app.py +208 -0
  122. synth_ai/cli/local/experiment_queue/config.py +128 -0
  123. synth_ai/cli/local/experiment_queue/config_utils.py +272 -0
  124. synth_ai/cli/local/experiment_queue/database.py +175 -0
  125. synth_ai/cli/local/experiment_queue/dispatcher.py +119 -0
  126. synth_ai/cli/local/experiment_queue/models.py +231 -0
  127. synth_ai/cli/local/experiment_queue/progress_info.py +160 -0
  128. synth_ai/cli/local/experiment_queue/results.py +373 -0
  129. synth_ai/cli/local/experiment_queue/schemas.py +131 -0
  130. synth_ai/cli/local/experiment_queue/service.py +344 -0
  131. synth_ai/cli/local/experiment_queue/status.py +372 -0
  132. synth_ai/cli/local/experiment_queue/status_tracker.py +360 -0
  133. synth_ai/cli/local/experiment_queue/tasks.py +1984 -0
  134. synth_ai/cli/local/experiment_queue/trace_storage.py +65 -0
  135. synth_ai/cli/local/experiment_queue/validation.py +157 -0
  136. synth_ai/cli/local/session/__init__.py +92 -0
  137. synth_ai/cli/local/session/client.py +383 -0
  138. synth_ai/cli/local/session/constants.py +63 -0
  139. synth_ai/cli/local/session/exceptions.py +105 -0
  140. synth_ai/cli/local/session/manager.py +139 -0
  141. synth_ai/cli/local/session/models.py +89 -0
  142. synth_ai/cli/local/session/query.py +110 -0
  143. synth_ai/cli/root.py +30 -6
  144. synth_ai/cli/task_apps/__init__.py +26 -0
  145. synth_ai/cli/task_apps/commands.py +3153 -0
  146. synth_ai/cli/task_apps/deploy.py +7 -0
  147. synth_ai/cli/task_apps/list.py +26 -0
  148. synth_ai/cli/task_apps/main.py +36 -0
  149. synth_ai/cli/task_apps/modal_serve.py +11 -0
  150. synth_ai/cli/task_apps/serve.py +11 -0
  151. synth_ai/cli/training/__init__.py +8 -0
  152. synth_ai/cli/training/train.py +5 -0
  153. synth_ai/cli/training/train_cfg.py +34 -0
  154. synth_ai/cli/training/watch.py +506 -0
  155. synth_ai/cli/turso.py +34 -55
  156. synth_ai/cli/usage.py +159 -0
  157. synth_ai/cli/utils/__init__.py +8 -0
  158. synth_ai/cli/utils/experiments.py +235 -0
  159. synth_ai/cli/utils/queue.py +504 -0
  160. synth_ai/cli/utils/recent.py +133 -0
  161. synth_ai/cli/utils/traces.py +164 -0
  162. synth_ai/contracts/__init__.py +67 -0
  163. synth_ai/core/__init__.py +100 -0
  164. synth_ai/core/_utils/__init__.py +54 -0
  165. synth_ai/core/_utils/base_url.py +10 -0
  166. synth_ai/core/_utils/http.py +10 -0
  167. synth_ai/core/_utils/prompts.py +14 -0
  168. synth_ai/core/_utils/task_app_state.py +12 -0
  169. synth_ai/core/_utils/user_config.py +10 -0
  170. synth_ai/core/apps/common.py +116 -0
  171. synth_ai/core/auth.py +95 -0
  172. synth_ai/core/cfgs.py +240 -0
  173. synth_ai/core/config/__init__.py +16 -0
  174. synth_ai/core/config/base.py +168 -0
  175. synth_ai/core/config/resolver.py +89 -0
  176. synth_ai/core/env.py +220 -0
  177. synth_ai/core/errors.py +126 -0
  178. synth_ai/core/http.py +230 -0
  179. synth_ai/core/integrations/__init__.py +11 -0
  180. synth_ai/core/integrations/cloudflare.py +1710 -0
  181. synth_ai/core/integrations/mcp/__init__.py +6 -0
  182. synth_ai/core/integrations/mcp/__main__.py +8 -0
  183. synth_ai/core/integrations/mcp/claude.py +36 -0
  184. synth_ai/core/integrations/mcp/main.py +254 -0
  185. synth_ai/core/integrations/mcp/setup.py +100 -0
  186. synth_ai/core/integrations/modal.py +277 -0
  187. synth_ai/core/json.py +72 -0
  188. synth_ai/core/log_filter.py +99 -0
  189. synth_ai/core/logging.py +82 -0
  190. synth_ai/core/paths.py +107 -0
  191. synth_ai/core/pricing.py +109 -0
  192. synth_ai/core/process.py +233 -0
  193. synth_ai/core/ssl.py +25 -0
  194. synth_ai/core/storage/__init__.py +71 -0
  195. synth_ai/core/task_app_state.py +318 -0
  196. synth_ai/core/telemetry.py +282 -0
  197. synth_ai/core/tracing_v3/__init__.py +99 -0
  198. synth_ai/core/tracing_v3/abstractions.py +302 -0
  199. synth_ai/core/tracing_v3/config.py +229 -0
  200. synth_ai/core/tracing_v3/constants.py +21 -0
  201. synth_ai/core/tracing_v3/db_config.py +182 -0
  202. synth_ai/core/tracing_v3/decorators.py +401 -0
  203. synth_ai/core/tracing_v3/llm_call_record_helpers.py +437 -0
  204. synth_ai/core/tracing_v3/migration_helper.py +119 -0
  205. synth_ai/core/tracing_v3/session_tracer.py +542 -0
  206. synth_ai/core/tracing_v3/storage/base.py +211 -0
  207. synth_ai/core/tracing_v3/storage/config.py +109 -0
  208. synth_ai/core/tracing_v3/storage/factory.py +39 -0
  209. synth_ai/core/tracing_v3/trace_utils.py +326 -0
  210. synth_ai/core/tracing_v3/turso/daemon.py +278 -0
  211. synth_ai/core/tracing_v3/turso/models.py +470 -0
  212. synth_ai/core/tracing_v3/turso/native_manager.py +1385 -0
  213. synth_ai/core/tracing_v3/utils.py +108 -0
  214. synth_ai/core/urls.py +18 -0
  215. synth_ai/core/user_config.py +137 -0
  216. synth_ai/core/uvicorn.py +222 -0
  217. synth_ai/data/__init__.py +110 -0
  218. synth_ai/data/enums.py +141 -0
  219. synth_ai/data/rewards.py +152 -0
  220. synth_ai/data/specs.py +36 -0
  221. synth_ai/data/traces.py +35 -0
  222. synth_ai/products/__init__.py +6 -0
  223. synth_ai/products/graph_evolve/__init__.py +46 -0
  224. synth_ai/products/graph_evolve/client.py +226 -0
  225. synth_ai/products/graph_evolve/config.py +591 -0
  226. synth_ai/products/graph_evolve/converters/__init__.py +42 -0
  227. synth_ai/products/graph_evolve/converters/openai_sft.py +484 -0
  228. synth_ai/products/graph_evolve/examples/hotpotqa/config.toml +109 -0
  229. synth_ai/products/graph_evolve/run.py +222 -0
  230. synth_ai/sdk/__init__.py +119 -0
  231. synth_ai/sdk/api/__init__.py +1 -0
  232. synth_ai/sdk/api/models/supported.py +514 -0
  233. synth_ai/sdk/api/research_agent/__init__.py +86 -0
  234. synth_ai/sdk/api/research_agent/cli.py +428 -0
  235. synth_ai/sdk/api/research_agent/config.py +357 -0
  236. synth_ai/sdk/api/research_agent/job.py +717 -0
  237. synth_ai/sdk/api/train/__init__.py +85 -0
  238. synth_ai/sdk/api/train/builders.py +895 -0
  239. synth_ai/sdk/api/train/cli.py +2188 -0
  240. synth_ai/sdk/api/train/config_finder.py +267 -0
  241. synth_ai/sdk/api/train/configs/__init__.py +65 -0
  242. synth_ai/sdk/api/train/configs/prompt_learning.py +1706 -0
  243. synth_ai/sdk/api/train/configs/rl.py +188 -0
  244. synth_ai/sdk/api/train/configs/sft.py +99 -0
  245. synth_ai/sdk/api/train/configs/shared.py +81 -0
  246. synth_ai/sdk/api/train/context_learning.py +312 -0
  247. synth_ai/sdk/api/train/env_resolver.py +418 -0
  248. synth_ai/sdk/api/train/graph_validators.py +216 -0
  249. synth_ai/sdk/api/train/graphgen.py +984 -0
  250. synth_ai/sdk/api/train/graphgen_models.py +823 -0
  251. synth_ai/sdk/api/train/graphgen_validators.py +109 -0
  252. synth_ai/sdk/api/train/pollers.py +124 -0
  253. synth_ai/sdk/api/train/progress/__init__.py +97 -0
  254. synth_ai/sdk/api/train/progress/dataclasses.py +569 -0
  255. synth_ai/sdk/api/train/progress/events.py +326 -0
  256. synth_ai/sdk/api/train/progress/results.py +428 -0
  257. synth_ai/sdk/api/train/progress/tracker.py +641 -0
  258. synth_ai/sdk/api/train/prompt_learning.py +470 -0
  259. synth_ai/sdk/api/train/rl.py +442 -0
  260. synth_ai/sdk/api/train/sft.py +396 -0
  261. synth_ai/sdk/api/train/summary.py +522 -0
  262. synth_ai/sdk/api/train/supported_algos.py +147 -0
  263. synth_ai/sdk/api/train/task_app.py +331 -0
  264. synth_ai/sdk/api/train/utils.py +279 -0
  265. synth_ai/sdk/api/train/validators.py +2424 -0
  266. synth_ai/sdk/baseline/__init__.py +25 -0
  267. synth_ai/sdk/baseline/config.py +209 -0
  268. synth_ai/sdk/baseline/discovery.py +216 -0
  269. synth_ai/sdk/baseline/execution.py +154 -0
  270. synth_ai/sdk/graphs/__init__.py +15 -0
  271. synth_ai/sdk/graphs/completions.py +570 -0
  272. synth_ai/sdk/inference/__init__.py +6 -0
  273. synth_ai/sdk/inference/client.py +128 -0
  274. synth_ai/sdk/jobs/__init__.py +16 -0
  275. synth_ai/sdk/jobs/client.py +371 -0
  276. synth_ai/sdk/judging/__init__.py +15 -0
  277. synth_ai/sdk/judging/base.py +24 -0
  278. synth_ai/sdk/judging/client.py +191 -0
  279. synth_ai/sdk/judging/schemas.py +222 -0
  280. synth_ai/sdk/learning/__init__.py +69 -0
  281. synth_ai/sdk/learning/client.py +240 -0
  282. synth_ai/sdk/learning/ft_client.py +7 -0
  283. synth_ai/sdk/learning/health.py +49 -0
  284. synth_ai/sdk/learning/jobs.py +202 -0
  285. synth_ai/sdk/learning/prompt_extraction.py +334 -0
  286. synth_ai/sdk/learning/prompt_learning_client.py +455 -0
  287. synth_ai/sdk/learning/prompt_learning_types.py +185 -0
  288. synth_ai/sdk/learning/rl/client.py +268 -0
  289. synth_ai/sdk/learning/rl/contracts.py +27 -0
  290. synth_ai/sdk/learning/rl/env_keys.py +166 -0
  291. synth_ai/sdk/learning/rl/secrets.py +13 -0
  292. synth_ai/sdk/learning/sft/client.py +95 -0
  293. synth_ai/sdk/learning/sft/config.py +270 -0
  294. synth_ai/sdk/learning/sft/data.py +698 -0
  295. synth_ai/sdk/learning/validators.py +52 -0
  296. synth_ai/sdk/research_agent/__init__.py +34 -0
  297. synth_ai/sdk/research_agent/container_builder.py +328 -0
  298. synth_ai/sdk/research_agent/container_spec.py +198 -0
  299. synth_ai/sdk/research_agent/defaults.py +34 -0
  300. synth_ai/sdk/research_agent/results_collector.py +69 -0
  301. synth_ai/sdk/specs/__init__.py +46 -0
  302. synth_ai/sdk/specs/dataclasses.py +149 -0
  303. synth_ai/sdk/specs/loader.py +144 -0
  304. synth_ai/sdk/specs/serializer.py +199 -0
  305. synth_ai/sdk/specs/validation.py +250 -0
  306. synth_ai/sdk/streaming/__init__.py +35 -0
  307. synth_ai/sdk/streaming/config.py +94 -0
  308. synth_ai/sdk/streaming/handlers.py +1997 -0
  309. synth_ai/sdk/streaming/streamer.py +704 -0
  310. synth_ai/sdk/streaming/types.py +112 -0
  311. synth_ai/sdk/task/__init__.py +151 -0
  312. synth_ai/sdk/task/apps/__init__.py +133 -0
  313. synth_ai/sdk/task/config.py +261 -0
  314. synth_ai/sdk/task/contracts.py +298 -0
  315. synth_ai/sdk/task/datasets.py +108 -0
  316. synth_ai/sdk/task/in_process.py +1190 -0
  317. synth_ai/sdk/task/in_process_runner.py +309 -0
  318. synth_ai/sdk/task/inference_api.py +299 -0
  319. synth_ai/sdk/task/proxy.py +287 -0
  320. synth_ai/sdk/task/rubrics/__init__.py +55 -0
  321. synth_ai/sdk/task/rubrics/loaders.py +156 -0
  322. synth_ai/sdk/task/rubrics.py +219 -0
  323. synth_ai/sdk/task/server.py +580 -0
  324. synth_ai/sdk/task/trace_correlation_helpers.py +506 -0
  325. synth_ai/sdk/task/tracing_utils.py +95 -0
  326. synth_ai/sdk/task/validators.py +456 -0
  327. synth_ai/sdk/tracing/__init__.py +39 -0
  328. synth_ai/sdk/training/__init__.py +102 -0
  329. synth_ai/sdk/usage/__init__.py +37 -0
  330. synth_ai/sdk/usage/client.py +171 -0
  331. synth_ai/sdk/usage/models.py +261 -0
  332. synth_ai/utils/__init__.py +213 -0
  333. synth_ai-0.4.1.dist-info/METADATA +195 -0
  334. synth_ai-0.4.1.dist-info/RECORD +379 -0
  335. synth_ai-0.4.1.dist-info/top_level.txt +1 -0
  336. examples/__init__.py +0 -16
  337. examples/analyze_semantic_words.sh +0 -17
  338. examples/crafter_debug_render.py +0 -186
  339. examples/dev/qwen3_32b_qlora_4xh100.toml +0 -40
  340. examples/multi_step/configs/README_verilog_rl.md +0 -77
  341. examples/multi_step/configs/VERILOG_REWARDS.md +0 -90
  342. examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +0 -183
  343. examples/multi_step/configs/crafter_eval_synth_qwen4b.toml +0 -35
  344. examples/multi_step/configs/crafter_eval_text_only_groq_qwen32b.toml +0 -36
  345. examples/multi_step/configs/crafter_rl_outcome.toml +0 -74
  346. examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +0 -187
  347. examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +0 -83
  348. examples/multi_step/configs/crafter_rl_stepwise_simple.toml +0 -78
  349. examples/multi_step/configs/crafter_synth_backend.md +0 -40
  350. examples/multi_step/configs/verilog_eval_groq_qwen32b.toml +0 -31
  351. examples/multi_step/configs/verilog_eval_synth_qwen8b.toml +0 -33
  352. examples/multi_step/configs/verilog_rl_lora.toml +0 -190
  353. examples/multi_step/crafter_rl_lora.md +0 -70
  354. examples/multi_step/judges/crafter_backend_judge.py +0 -220
  355. examples/multi_step/judges/verilog_backend_judge.py +0 -234
  356. examples/multi_step/readme.md +0 -48
  357. examples/multi_step/sse_metrics_streaming_notes.md +0 -357
  358. examples/multi_step/task_app_config_notes.md +0 -494
  359. examples/multi_step/verilog_rl_lora.md +0 -218
  360. examples/qwen_coder/README.md +0 -102
  361. examples/qwen_coder/_shared.py +0 -113
  362. examples/qwen_coder/configs/coder_lora_30b.toml +0 -61
  363. examples/qwen_coder/configs/coder_lora_4b.toml +0 -57
  364. examples/qwen_coder/configs/coder_lora_small.toml +0 -58
  365. examples/qwen_coder/generate_dataset.py +0 -98
  366. examples/qwen_coder/infer_ft_smoke.py +0 -65
  367. examples/qwen_coder/infer_prod_proxy.py +0 -73
  368. examples/qwen_coder/infer_via_synth.py +0 -87
  369. examples/qwen_coder/scripts/infer_coder.sh +0 -19
  370. examples/qwen_coder/scripts/train_coder_30b.sh +0 -22
  371. examples/qwen_coder/sft_full_17b.py +0 -103
  372. examples/qwen_coder/sft_lora_30b.py +0 -110
  373. examples/qwen_coder/subset_jsonl.py +0 -39
  374. examples/qwen_coder/todos.md +0 -38
  375. examples/qwen_coder/validate_jsonl.py +0 -60
  376. examples/rl/README.md +0 -169
  377. examples/rl/download_dataset.py +0 -80
  378. examples/run_crafter_demo.sh +0 -10
  379. examples/sft/README.md +0 -139
  380. examples/sft/configs/crafter_fft_qwen0p6b.toml +0 -44
  381. examples/sft/configs/crafter_lora_qwen0p6b.toml +0 -45
  382. examples/sft/evaluate.py +0 -119
  383. examples/sft/export_dataset.py +0 -117
  384. examples/sft/generate_traces.py +0 -164
  385. examples/swe/__init__.py +0 -12
  386. examples/swe/task_app/README.md +0 -105
  387. examples/swe/task_app/__init__.py +0 -2
  388. examples/swe/task_app/grpo_swe_mini.py +0 -601
  389. examples/swe/task_app/grpo_swe_mini_task_app.py +0 -136
  390. examples/swe/task_app/hosted/README.md +0 -173
  391. examples/swe/task_app/hosted/__init__.py +0 -5
  392. examples/swe/task_app/hosted/branching.py +0 -143
  393. examples/swe/task_app/hosted/environment_routes.py +0 -1289
  394. examples/swe/task_app/hosted/envs/__init__.py +0 -1
  395. examples/swe/task_app/hosted/envs/crafter/__init__.py +0 -6
  396. examples/swe/task_app/hosted/envs/crafter/app.py +0 -1
  397. examples/swe/task_app/hosted/envs/crafter/environment.py +0 -522
  398. examples/swe/task_app/hosted/envs/crafter/policy.py +0 -478
  399. examples/swe/task_app/hosted/envs/crafter/react_agent.py +0 -108
  400. examples/swe/task_app/hosted/envs/crafter/shared.py +0 -305
  401. examples/swe/task_app/hosted/envs/crafter/tools.py +0 -47
  402. examples/swe/task_app/hosted/envs/mini_swe/__init__.py +0 -8
  403. examples/swe/task_app/hosted/envs/mini_swe/environment.py +0 -1164
  404. examples/swe/task_app/hosted/envs/mini_swe/policy.py +0 -355
  405. examples/swe/task_app/hosted/envs/mini_swe/shared.py +0 -83
  406. examples/swe/task_app/hosted/envs/mini_swe/tools.py +0 -96
  407. examples/swe/task_app/hosted/hosted_app.py +0 -204
  408. examples/swe/task_app/hosted/inference/__init__.py +0 -5
  409. examples/swe/task_app/hosted/inference/openai_client.py +0 -618
  410. examples/swe/task_app/hosted/main.py +0 -100
  411. examples/swe/task_app/hosted/policy_routes.py +0 -1079
  412. examples/swe/task_app/hosted/registry.py +0 -195
  413. examples/swe/task_app/hosted/rollout.py +0 -1911
  414. examples/swe/task_app/hosted/storage/__init__.py +0 -5
  415. examples/swe/task_app/hosted/storage/volume.py +0 -211
  416. examples/swe/task_app/hosted/test_agents.py +0 -161
  417. examples/swe/task_app/hosted/test_service.py +0 -136
  418. examples/swe/task_app/hosted/utils.py +0 -62
  419. examples/task_apps/IMAGE_ONLY_EVAL_QUICKSTART.md +0 -258
  420. examples/task_apps/TESTING.md +0 -275
  421. examples/task_apps/crafter/CREATE_SFT_DATASET.md +0 -273
  422. examples/task_apps/crafter/EVAL_IMAGE_ONLY_RESULTS.md +0 -152
  423. examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +0 -174
  424. examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +0 -268
  425. examples/task_apps/crafter/QUERY_EXAMPLES.md +0 -203
  426. examples/task_apps/crafter/README_IMAGE_ONLY_EVAL.md +0 -316
  427. examples/task_apps/crafter/__init__.py +0 -0
  428. examples/task_apps/crafter/eval_image_only_gpt4o.toml +0 -28
  429. examples/task_apps/crafter/eval_text_only_groq_llama.toml +0 -36
  430. examples/task_apps/crafter/filter_sft_dataset.toml +0 -16
  431. examples/task_apps/crafter/task_app/README.md +0 -42
  432. examples/task_apps/crafter/task_app/__init__.py +0 -5
  433. examples/task_apps/crafter/task_app/grpo_crafter.py +0 -973
  434. examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +0 -146
  435. examples/task_apps/crafter/task_app/synth_envs_hosted/README.md +0 -173
  436. examples/task_apps/crafter/task_app/synth_envs_hosted/__init__.py +0 -5
  437. examples/task_apps/crafter/task_app/synth_envs_hosted/branching.py +0 -143
  438. examples/task_apps/crafter/task_app/synth_envs_hosted/environment_routes.py +0 -1226
  439. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/__init__.py +0 -1
  440. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/__init__.py +0 -6
  441. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/app.py +0 -1
  442. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/environment.py +0 -532
  443. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +0 -547
  444. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +0 -123
  445. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/shared.py +0 -305
  446. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/tools.py +0 -47
  447. examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +0 -204
  448. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/__init__.py +0 -5
  449. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +0 -704
  450. examples/task_apps/crafter/task_app/synth_envs_hosted/main.py +0 -100
  451. examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +0 -1152
  452. examples/task_apps/crafter/task_app/synth_envs_hosted/registry.py +0 -195
  453. examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +0 -2160
  454. examples/task_apps/crafter/task_app/synth_envs_hosted/storage/__init__.py +0 -5
  455. examples/task_apps/crafter/task_app/synth_envs_hosted/storage/volume.py +0 -211
  456. examples/task_apps/crafter/task_app/synth_envs_hosted/test_agents.py +0 -161
  457. examples/task_apps/crafter/task_app/synth_envs_hosted/test_service.py +0 -136
  458. examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +0 -218
  459. examples/task_apps/dev/pokemon_emerald/__init__.py +0 -2
  460. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/README.md +0 -811
  461. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/__init__.py +0 -120
  462. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/action.py +0 -160
  463. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/memory.py +0 -155
  464. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/perception.py +0 -69
  465. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/planning.py +0 -96
  466. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/simple.py +0 -1502
  467. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/system_prompt.py +0 -4
  468. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/grab_map.py +0 -68
  469. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/manual.py +0 -216
  470. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/__init__.py +0 -35
  471. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/emerald_utils.py +0 -631
  472. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/emulator.py +0 -1544
  473. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/enums.py +0 -1428
  474. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/memory_reader.py +0 -4848
  475. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/types.py +0 -41
  476. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/utils.py +0 -298
  477. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pyproject.toml +0 -95
  478. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/run.py +0 -204
  479. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/__init__.py +0 -0
  480. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/app.py +0 -2152
  481. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/client.py +0 -429
  482. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/frame_server.py +0 -155
  483. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/README.md +0 -78
  484. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/__init__.py +0 -0
  485. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/run_tests.py +0 -122
  486. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_agent_direct.py +0 -76
  487. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_agent_prompts.py +0 -413
  488. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_battle_state_formatting.py +0 -204
  489. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_dialogue_detection.py +0 -133
  490. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_dialogue_detection_comprehensive.py +0 -229
  491. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_direct_agent_emulator.py +0 -300
  492. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_fps_adjustment_pytest.py +0 -205
  493. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_house_to_outside_direct.py +0 -200
  494. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_house_to_outside_transition.py +0 -284
  495. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_map_ground_truth_comparison.py +0 -468
  496. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_memory_map.py +0 -575
  497. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_server_map_validation.py +0 -311
  498. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_torchic_state.py +0 -259
  499. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/__init__.py +0 -0
  500. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/anticheat.py +0 -372
  501. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/checkpoint.py +0 -296
  502. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/error_handler.py +0 -275
  503. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/get_local_ip.py +0 -22
  504. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/helpers.py +0 -44
  505. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/llm_logger.py +0 -514
  506. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_formatter.py +0 -415
  507. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_stitcher.py +0 -1763
  508. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_stitcher_singleton.py +0 -33
  509. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_trimmer.py +0 -106
  510. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_visualizer.py +0 -334
  511. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/ocr_dialogue.py +0 -1020
  512. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/recording.py +0 -188
  513. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/state_formatter.py +0 -1481
  514. examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/vlm.py +0 -862
  515. examples/task_apps/dev/pokemon_emerald/modal_app.py +0 -114
  516. examples/task_apps/dev/pokemon_emerald/task_app/README.md +0 -81
  517. examples/task_apps/dev/pokemon_emerald/task_app/__init__.py +0 -6
  518. examples/task_apps/dev/pokemon_emerald/task_app/pokemon_emerald.py +0 -685
  519. examples/task_apps/enron/__init__.py +0 -1
  520. examples/task_apps/enron/eval_groq_qwen32.toml +0 -16
  521. examples/task_apps/enron/filter_sft.toml +0 -5
  522. examples/task_apps/enron/task_app/README.md +0 -14
  523. examples/task_apps/enron/task_app/__init__.py +0 -1
  524. examples/task_apps/enron/task_app/grpo_enron.py +0 -906
  525. examples/task_apps/enron/task_app/grpo_enron_task_app.py +0 -146
  526. examples/task_apps/enron/tests/__init__.py +0 -4
  527. examples/task_apps/enron/tests/conftest.py +0 -115
  528. examples/task_apps/enron/tests/integration/__init__.py +0 -4
  529. examples/task_apps/enron/tests/integration/test_enron_eval.py +0 -179
  530. examples/task_apps/enron/tests/integration/test_enron_rollout.py +0 -135
  531. examples/task_apps/enron/tests/unit/__init__.py +0 -4
  532. examples/task_apps/enron/tests/unit/test_enron_environment.py +0 -126
  533. examples/task_apps/math/README.md +0 -22
  534. examples/task_apps/math/__init__.py +0 -0
  535. examples/task_apps/math/math_single_step.py +0 -1000
  536. examples/task_apps/math/math_task_app.py +0 -115
  537. examples/task_apps/pokemon_battle/__init__.py +0 -2
  538. examples/task_apps/pokemon_battle/modal_app.py +0 -104
  539. examples/task_apps/pokemon_battle/task_app/README.md +0 -68
  540. examples/task_apps/pokemon_battle/task_app/__init__.py +0 -6
  541. examples/task_apps/pokemon_battle/task_app/pokemon_showdown.py +0 -932
  542. examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_COMPLETE.md +0 -283
  543. examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_STATUS.md +0 -155
  544. examples/task_apps/pokemon_red/README.md +0 -357
  545. examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +0 -415
  546. examples/task_apps/pokemon_red/__init__.py +0 -3
  547. examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +0 -29
  548. examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +0 -225
  549. examples/task_apps/pokemon_red/pallet_town_rl_config.toml +0 -75
  550. examples/task_apps/pokemon_red/task_app.py +0 -799
  551. examples/task_apps/pokemon_red/test_pallet_town_rewards.py +0 -193
  552. examples/task_apps/sokoban/README.md +0 -307
  553. examples/task_apps/sokoban/__init__.py +0 -3
  554. examples/task_apps/sokoban/eval_groq_qwen32.toml +0 -16
  555. examples/task_apps/sokoban/eval_openai_gpt5.toml +0 -16
  556. examples/task_apps/sokoban/filter_sft.toml +0 -5
  557. examples/task_apps/sokoban/task_app.py +0 -1058
  558. examples/task_apps/sokoban/tests/__init__.py +0 -4
  559. examples/task_apps/sokoban/tests/conftest.py +0 -113
  560. examples/task_apps/sokoban/tests/integration/__init__.py +0 -4
  561. examples/task_apps/sokoban/tests/integration/test_sokoban_eval.py +0 -57
  562. examples/task_apps/sokoban/tests/integration/test_sokoban_rollout.py +0 -198
  563. examples/task_apps/sokoban/tests/unit/__init__.py +0 -4
  564. examples/task_apps/sokoban/tests/unit/test_sokoban_environment.py +0 -114
  565. examples/task_apps/verilog/__init__.py +0 -1
  566. examples/task_apps/verilog/eval_groq_qwen32b.toml +0 -24
  567. examples/task_apps/verilog/filter_sft.toml +0 -5
  568. examples/task_apps/verilog/task_app/README.md +0 -12
  569. examples/task_apps/verilog/task_app/__init__.py +0 -1
  570. examples/task_apps/verilog/task_app/grpo_verilog.py +0 -1166
  571. examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +0 -145
  572. examples/task_apps/verilog/tests/__init__.py +0 -4
  573. examples/task_apps/verilog/tests/conftest.py +0 -115
  574. examples/task_apps/verilog/tests/integration/__init__.py +0 -4
  575. examples/task_apps/verilog/tests/integration/test_verilog_eval.py +0 -181
  576. examples/task_apps/verilog/tests/integration/test_verilog_rollout.py +0 -55
  577. examples/task_apps/verilog/tests/unit/__init__.py +0 -4
  578. examples/task_apps/verilog/tests/unit/test_verilog_scoring.py +0 -118
  579. examples/vlm/PROPOSAL.md +0 -53
  580. examples/vlm/README.md +0 -68
  581. examples/vlm/configs/crafter_vlm_gpt4o.toml +0 -44
  582. examples/vlm/crafter_image_only_agent.py +0 -207
  583. examples/vlm/crafter_openai_vlm_agent.py +0 -277
  584. examples/vlm/filter_image_rows.py +0 -63
  585. examples/vlm/run_crafter_vlm_benchmark.py +0 -316
  586. examples/warming_up_to_rl/analyze_trace_db.py +0 -422
  587. examples/warming_up_to_rl/configs/crafter_fft.toml +0 -48
  588. examples/warming_up_to_rl/configs/crafter_fft_4b.toml +0 -54
  589. examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +0 -20
  590. examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +0 -13
  591. examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +0 -23
  592. examples/warming_up_to_rl/configs/eval_stepwise_complex.toml +0 -35
  593. examples/warming_up_to_rl/configs/eval_stepwise_consistent.toml +0 -26
  594. examples/warming_up_to_rl/configs/eval_stepwise_per_achievement.toml +0 -36
  595. examples/warming_up_to_rl/configs/eval_stepwise_simple.toml +0 -32
  596. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +0 -83
  597. examples/warming_up_to_rl/configs/rl_from_ft.toml +0 -56
  598. examples/warming_up_to_rl/export_trace_sft.py +0 -723
  599. examples/warming_up_to_rl/groq_test.py +0 -97
  600. examples/warming_up_to_rl/manage_secrets.py +0 -131
  601. examples/warming_up_to_rl/old/event_rewards.md +0 -234
  602. examples/warming_up_to_rl/old/notes.md +0 -73
  603. examples/warming_up_to_rl/readme.md +0 -179
  604. examples/warming_up_to_rl/run_eval.py +0 -736
  605. examples/warming_up_to_rl/run_fft_and_save.py +0 -380
  606. examples/warming_up_to_rl/run_local_rollout.py +0 -239
  607. examples/warming_up_to_rl/run_local_rollout_modal.py +0 -248
  608. examples/warming_up_to_rl/run_local_rollout_parallel.py +0 -405
  609. examples/warming_up_to_rl/run_local_rollout_traced.py +0 -477
  610. examples/warming_up_to_rl/run_rl_and_save.py +0 -124
  611. examples/warming_up_to_rl/run_rollout_remote.py +0 -156
  612. examples/workflows/__init__.py +0 -0
  613. examples/workflows/math_rl/__init__.py +0 -0
  614. examples/workflows/math_rl/configs/eval_base_qwen.toml +0 -15
  615. examples/workflows/math_rl/configs/eval_rl_qwen.toml +0 -11
  616. examples/workflows/math_rl/configs/rl_from_base_qwen.toml +0 -35
  617. examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +0 -74
  618. examples/workflows/math_rl/configs/rl_from_ft_qwen.toml +0 -35
  619. examples/workflows/math_rl/download_dataset.py +0 -80
  620. examples/workflows/math_rl/run_eval.py +0 -436
  621. examples/workflows/math_rl/run_rl_and_save.py +0 -111
  622. synth_ai/api/models/supported.py +0 -377
  623. synth_ai/api/train/__init__.py +0 -5
  624. synth_ai/api/train/builders.py +0 -351
  625. synth_ai/api/train/cli.py +0 -635
  626. synth_ai/api/train/config_finder.py +0 -228
  627. synth_ai/api/train/configs/__init__.py +0 -44
  628. synth_ai/api/train/configs/rl.py +0 -134
  629. synth_ai/api/train/configs/sft.py +0 -95
  630. synth_ai/api/train/configs/shared.py +0 -24
  631. synth_ai/api/train/env_resolver.py +0 -349
  632. synth_ai/api/train/pollers.py +0 -75
  633. synth_ai/api/train/supported_algos.py +0 -147
  634. synth_ai/api/train/task_app.py +0 -195
  635. synth_ai/api/train/utils.py +0 -225
  636. synth_ai/cli/_modal_wrapper.py +0 -29
  637. synth_ai/cli/_storage.py +0 -20
  638. synth_ai/cli/_typer_patch.py +0 -49
  639. synth_ai/cli/_validate_task_app.py +0 -11
  640. synth_ai/cli/balance.py +0 -216
  641. synth_ai/cli/calc.py +0 -84
  642. synth_ai/cli/demo.py +0 -165
  643. synth_ai/cli/legacy_root_backup.py +0 -468
  644. synth_ai/cli/man.py +0 -106
  645. synth_ai/cli/recent.py +0 -132
  646. synth_ai/cli/rl_demo.py +0 -254
  647. synth_ai/cli/status.py +0 -134
  648. synth_ai/cli/task_apps.py +0 -4523
  649. synth_ai/cli/traces.py +0 -164
  650. synth_ai/cli/tui.py +0 -57
  651. synth_ai/cli/watch.py +0 -506
  652. synth_ai/compound/cais.py +0 -0
  653. synth_ai/config/base_url.py +0 -107
  654. synth_ai/core/experiment.py +0 -13
  655. synth_ai/core/system.py +0 -15
  656. synth_ai/demo_registry.py +0 -295
  657. synth_ai/demos/core/__init__.py +0 -1
  658. synth_ai/demos/core/cli.py +0 -1718
  659. synth_ai/demos/demo_task_apps/core.py +0 -440
  660. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +0 -184
  661. synth_ai/demos/demo_task_apps/math/deploy_task_app.sh +0 -22
  662. synth_ai/demos/demo_task_apps/math/modal_task_app.py +0 -739
  663. synth_ai/demos/demo_task_apps/math/task_app_entry.py +0 -37
  664. synth_ai/environments/__init__.py +0 -31
  665. synth_ai/environments/environment/__init__.py +0 -1
  666. synth_ai/environments/environment/artifacts/__init__.py +0 -1
  667. synth_ai/environments/environment/artifacts/base.py +0 -52
  668. synth_ai/environments/environment/core.py +0 -67
  669. synth_ai/environments/environment/db/__init__.py +0 -1
  670. synth_ai/environments/environment/db/sqlite.py +0 -45
  671. synth_ai/environments/environment/registry.py +0 -233
  672. synth_ai/environments/environment/resources/sqlite.py +0 -45
  673. synth_ai/environments/environment/results.py +0 -1
  674. synth_ai/environments/environment/rewards/__init__.py +0 -1
  675. synth_ai/environments/environment/rewards/core.py +0 -29
  676. synth_ai/environments/environment/shared_engine.py +0 -26
  677. synth_ai/environments/environment/tools/__init__.py +0 -200
  678. synth_ai/environments/examples/__init__.py +0 -1
  679. synth_ai/environments/examples/bandit/__init__.py +0 -33
  680. synth_ai/environments/examples/bandit/engine.py +0 -302
  681. synth_ai/environments/examples/bandit/environment.py +0 -194
  682. synth_ai/environments/examples/bandit/taskset.py +0 -200
  683. synth_ai/environments/examples/crafter_classic/__init__.py +0 -8
  684. synth_ai/environments/examples/crafter_classic/agent_demos/analyze_semantic_words_markdown.py +0 -250
  685. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +0 -59
  686. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +0 -152
  687. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_config.toml +0 -24
  688. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +0 -1194
  689. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/crafter_synth_config.toml +0 -56
  690. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_config_modal.toml +0 -32
  691. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +0 -738
  692. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/kick_off_ft_modal.py +0 -384
  693. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_action_results.py +0 -53
  694. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_agent_actions.py +0 -178
  695. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_latest_run.py +0 -222
  696. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_lm_traces.py +0 -183
  697. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_no_rewards.py +0 -210
  698. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/analyze_trace_issue.py +0 -206
  699. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_db_schema.py +0 -49
  700. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/check_latest_results.py +0 -64
  701. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/debug_agent_responses.py +0 -88
  702. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/old/quick_trace_check.py +0 -77
  703. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/compare_experiments.py +0 -324
  704. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +0 -580
  705. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/kick_off_ft_oai.py +0 -362
  706. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/multi_model_config.toml +0 -49
  707. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_enhanced_hooks.py +0 -332
  708. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_events.py +0 -97
  709. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/analyze_hook_results.py +0 -217
  710. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_hook_storage.py +0 -87
  711. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/check_seeds.py +0 -88
  712. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/compare_seed_performance.py +0 -195
  713. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/custom_eval_pipelines.py +0 -400
  714. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/plot_hook_frequency.py +0 -195
  715. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/old/seed_analysis_summary.py +0 -56
  716. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/run_rollouts_for_models_and_compare_v3.py +0 -858
  717. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +0 -52
  718. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +0 -874
  719. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +0 -1412
  720. synth_ai/environments/examples/crafter_classic/agent_demos/example_v3_usage.py +0 -216
  721. synth_ai/environments/examples/crafter_classic/agent_demos/old/compare_traces.py +0 -296
  722. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_comprehensive_evaluation.py +0 -58
  723. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_env_serialization.py +0 -464
  724. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_evaluation_browser.py +0 -152
  725. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_quick_evaluation.py +0 -51
  726. synth_ai/environments/examples/crafter_classic/agent_demos/old/crafter_trace_evaluation.py +0 -1412
  727. synth_ai/environments/examples/crafter_classic/agent_demos/old/debug_player_loss.py +0 -112
  728. synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_service.py +0 -203
  729. synth_ai/environments/examples/crafter_classic/agent_demos/old/diagnose_slowness.py +0 -305
  730. synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_by_difficulty.py +0 -126
  731. synth_ai/environments/examples/crafter_classic/agent_demos/old/eval_example.py +0 -94
  732. synth_ai/environments/examples/crafter_classic/agent_demos/old/explore_saved_states.py +0 -142
  733. synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft.py +0 -26
  734. synth_ai/environments/examples/crafter_classic/agent_demos/old/filter_traces_sft_OLD.py +0 -984
  735. synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_gemini.py +0 -724
  736. synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_data_modal.py +0 -386
  737. synth_ai/environments/examples/crafter_classic/agent_demos/old/generate_ft_metadata.py +0 -205
  738. synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_gemini.py +0 -150
  739. synth_ai/environments/examples/crafter_classic/agent_demos/old/kick_off_ft_modal.py +0 -283
  740. synth_ai/environments/examples/crafter_classic/agent_demos/old/prepare_vertex_ft.py +0 -280
  741. synth_ai/environments/examples/crafter_classic/agent_demos/old/profile_env_slowness.py +0 -456
  742. synth_ai/environments/examples/crafter_classic/agent_demos/old/replicate_issue.py +0 -166
  743. synth_ai/environments/examples/crafter_classic/agent_demos/old/run_and_eval.py +0 -102
  744. synth_ai/environments/examples/crafter_classic/agent_demos/old/run_comparison.py +0 -128
  745. synth_ai/environments/examples/crafter_classic/agent_demos/old/run_qwen_rollouts.py +0 -655
  746. synth_ai/environments/examples/crafter_classic/agent_demos/old/trace_eval_OLD.py +0 -202
  747. synth_ai/environments/examples/crafter_classic/agent_demos/old/validate_openai_format.py +0 -166
  748. synth_ai/environments/examples/crafter_classic/config_logging.py +0 -111
  749. synth_ai/environments/examples/crafter_classic/debug_translation.py +0 -0
  750. synth_ai/environments/examples/crafter_classic/engine.py +0 -579
  751. synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +0 -64
  752. synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +0 -6
  753. synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +0 -75
  754. synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +0 -267
  755. synth_ai/environments/examples/crafter_classic/environment.py +0 -495
  756. synth_ai/environments/examples/crafter_classic/taskset.py +0 -233
  757. synth_ai/environments/examples/crafter_classic/trace_hooks_v3.py +0 -228
  758. synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +0 -299
  759. synth_ai/environments/examples/crafter_custom/__init__.py +0 -4
  760. synth_ai/environments/examples/crafter_custom/agent_demos/__init__.py +0 -1
  761. synth_ai/environments/examples/crafter_custom/agent_demos/trace_eval.py +0 -202
  762. synth_ai/environments/examples/crafter_custom/crafter/__init__.py +0 -7
  763. synth_ai/environments/examples/crafter_custom/crafter/config.py +0 -182
  764. synth_ai/environments/examples/crafter_custom/crafter/constants.py +0 -8
  765. synth_ai/environments/examples/crafter_custom/crafter/engine.py +0 -269
  766. synth_ai/environments/examples/crafter_custom/crafter/env.py +0 -262
  767. synth_ai/environments/examples/crafter_custom/crafter/objects.py +0 -417
  768. synth_ai/environments/examples/crafter_custom/crafter/recorder.py +0 -187
  769. synth_ai/environments/examples/crafter_custom/crafter/worldgen.py +0 -118
  770. synth_ai/environments/examples/crafter_custom/dataset_builder.py +0 -373
  771. synth_ai/environments/examples/crafter_custom/environment.py +0 -312
  772. synth_ai/environments/examples/crafter_custom/old/analyze_diamond_issue.py +0 -159
  773. synth_ai/environments/examples/crafter_custom/old/analyze_diamond_spawning.py +0 -158
  774. synth_ai/environments/examples/crafter_custom/old/compare_worlds.py +0 -71
  775. synth_ai/environments/examples/crafter_custom/old/dataset_stats.py +0 -105
  776. synth_ai/environments/examples/crafter_custom/old/diamond_spawning_summary.py +0 -119
  777. synth_ai/environments/examples/crafter_custom/old/example_dataset_usage.py +0 -52
  778. synth_ai/environments/examples/crafter_custom/run_dataset.py +0 -305
  779. synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +0 -156
  780. synth_ai/environments/examples/enron/art_helpers/local_email_db.py +0 -281
  781. synth_ai/environments/examples/enron/art_helpers/types_enron.py +0 -25
  782. synth_ai/environments/examples/enron/engine.py +0 -300
  783. synth_ai/environments/examples/enron/environment.py +0 -234
  784. synth_ai/environments/examples/enron/taskset.py +0 -112
  785. synth_ai/environments/examples/enron/units/keyword_stats.py +0 -112
  786. synth_ai/environments/examples/minigrid/__init__.py +0 -48
  787. synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +0 -1188
  788. synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +0 -48
  789. synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +0 -562
  790. synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +0 -221
  791. synth_ai/environments/examples/minigrid/engine.py +0 -589
  792. synth_ai/environments/examples/minigrid/environment.py +0 -274
  793. synth_ai/environments/examples/minigrid/environment_mapping.py +0 -242
  794. synth_ai/environments/examples/minigrid/puzzle_loader.py +0 -417
  795. synth_ai/environments/examples/minigrid/taskset.py +0 -583
  796. synth_ai/environments/examples/nethack/__init__.py +0 -7
  797. synth_ai/environments/examples/nethack/achievements.py +0 -337
  798. synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +0 -981
  799. synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +0 -74
  800. synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +0 -831
  801. synth_ai/environments/examples/nethack/engine.py +0 -739
  802. synth_ai/environments/examples/nethack/environment.py +0 -256
  803. synth_ai/environments/examples/nethack/helpers/__init__.py +0 -41
  804. synth_ai/environments/examples/nethack/helpers/action_mapping.py +0 -301
  805. synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +0 -402
  806. synth_ai/environments/examples/nethack/helpers/observation_utils.py +0 -433
  807. synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +0 -200
  808. synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +0 -269
  809. synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +0 -308
  810. synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +0 -431
  811. synth_ai/environments/examples/nethack/taskset.py +0 -323
  812. synth_ai/environments/examples/red/__init__.py +0 -7
  813. synth_ai/environments/examples/red/agent_demos/__init__.py +0 -1
  814. synth_ai/environments/examples/red/config_logging.py +0 -110
  815. synth_ai/environments/examples/red/engine.py +0 -721
  816. synth_ai/environments/examples/red/engine_helpers/__init__.py +0 -1
  817. synth_ai/environments/examples/red/engine_helpers/memory_map.py +0 -35
  818. synth_ai/environments/examples/red/engine_helpers/reward_components.py +0 -276
  819. synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +0 -142
  820. synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +0 -57
  821. synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +0 -284
  822. synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +0 -150
  823. synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +0 -138
  824. synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +0 -57
  825. synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +0 -331
  826. synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +0 -121
  827. synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_progression.py +0 -477
  828. synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +0 -559
  829. synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +0 -313
  830. synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +0 -148
  831. synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +0 -247
  832. synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +0 -368
  833. synth_ai/environments/examples/red/engine_helpers/state_extraction.py +0 -172
  834. synth_ai/environments/examples/red/environment.py +0 -298
  835. synth_ai/environments/examples/red/taskset.py +0 -79
  836. synth_ai/environments/examples/red/units/__init__.py +0 -1
  837. synth_ai/environments/examples/sokoban/__init__.py +0 -1
  838. synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +0 -899
  839. synth_ai/environments/examples/sokoban/engine.py +0 -678
  840. synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +0 -1
  841. synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +0 -657
  842. synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +0 -18
  843. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +0 -3
  844. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +0 -131
  845. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +0 -370
  846. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +0 -332
  847. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +0 -306
  848. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +0 -67
  849. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +0 -115
  850. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +0 -123
  851. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +0 -394
  852. synth_ai/environments/examples/sokoban/environment.py +0 -229
  853. synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +0 -440
  854. synth_ai/environments/examples/sokoban/puzzle_loader.py +0 -312
  855. synth_ai/environments/examples/sokoban/taskset.py +0 -544
  856. synth_ai/environments/examples/tictactoe/__init__.py +0 -1
  857. synth_ai/environments/examples/tictactoe/engine.py +0 -368
  858. synth_ai/environments/examples/tictactoe/environment.py +0 -240
  859. synth_ai/environments/examples/tictactoe/taskset.py +0 -215
  860. synth_ai/environments/examples/verilog/__init__.py +0 -10
  861. synth_ai/environments/examples/verilog/engine.py +0 -421
  862. synth_ai/environments/examples/verilog/environment.py +0 -350
  863. synth_ai/environments/examples/verilog/taskset.py +0 -420
  864. synth_ai/environments/examples/wordle/__init__.py +0 -29
  865. synth_ai/environments/examples/wordle/engine.py +0 -398
  866. synth_ai/environments/examples/wordle/environment.py +0 -159
  867. synth_ai/environments/examples/wordle/helpers/generate_instances_wordfreq.py +0 -75
  868. synth_ai/environments/examples/wordle/taskset.py +0 -230
  869. synth_ai/environments/reproducibility/core.py +0 -42
  870. synth_ai/environments/reproducibility/helpers.py +0 -0
  871. synth_ai/environments/reproducibility/tree.py +0 -363
  872. synth_ai/environments/service/app.py +0 -97
  873. synth_ai/environments/service/core_routes.py +0 -1021
  874. synth_ai/environments/service/external_registry.py +0 -56
  875. synth_ai/environments/service/registry.py +0 -9
  876. synth_ai/environments/stateful/__init__.py +0 -1
  877. synth_ai/environments/stateful/core.py +0 -163
  878. synth_ai/environments/stateful/engine.py +0 -21
  879. synth_ai/environments/stateful/state.py +0 -7
  880. synth_ai/environments/tasks/api.py +0 -19
  881. synth_ai/environments/tasks/core.py +0 -81
  882. synth_ai/environments/tasks/filters.py +0 -40
  883. synth_ai/environments/tasks/utils.py +0 -90
  884. synth_ai/environments/v0_observability/history.py +0 -3
  885. synth_ai/environments/v0_observability/log.py +0 -2
  886. synth_ai/evals/__init__.py +0 -15
  887. synth_ai/evals/base.py +0 -13
  888. synth_ai/evals/client.py +0 -82
  889. synth_ai/handshake.py +0 -109
  890. synth_ai/http.py +0 -26
  891. synth_ai/http_client.py +0 -136
  892. synth_ai/inference/__init__.py +0 -5
  893. synth_ai/inference/client.py +0 -34
  894. synth_ai/jobs/client.py +0 -295
  895. synth_ai/judge_schemas.py +0 -127
  896. synth_ai/learning/__init__.py +0 -59
  897. synth_ai/learning/client.py +0 -241
  898. synth_ai/learning/ft_client.py +0 -7
  899. synth_ai/learning/health.py +0 -49
  900. synth_ai/learning/jobs.py +0 -201
  901. synth_ai/learning/rl/client.py +0 -267
  902. synth_ai/learning/rl/contracts.py +0 -27
  903. synth_ai/learning/rl/env_keys.py +0 -166
  904. synth_ai/learning/rl/secrets.py +0 -13
  905. synth_ai/learning/sft/client.py +0 -68
  906. synth_ai/learning/sft/config.py +0 -270
  907. synth_ai/learning/sft/data.py +0 -295
  908. synth_ai/learning/validators.py +0 -49
  909. synth_ai/lm/__init__.py +0 -25
  910. synth_ai/task/__init__.py +0 -121
  911. synth_ai/task/apps/__init__.py +0 -129
  912. synth_ai/task/config.py +0 -257
  913. synth_ai/task/contracts.py +0 -236
  914. synth_ai/task/datasets.py +0 -108
  915. synth_ai/task/proxy.py +0 -251
  916. synth_ai/task/rubrics/__init__.py +0 -56
  917. synth_ai/task/rubrics/loaders.py +0 -152
  918. synth_ai/task/server.py +0 -432
  919. synth_ai/task/trace_correlation_helpers.py +0 -315
  920. synth_ai/task/tracing_utils.py +0 -84
  921. synth_ai/task/validators.py +0 -418
  922. synth_ai/tracing_v3/__init__.py +0 -97
  923. synth_ai/tracing_v3/abstractions.py +0 -302
  924. synth_ai/tracing_v3/config.py +0 -84
  925. synth_ai/tracing_v3/db_config.py +0 -194
  926. synth_ai/tracing_v3/decorators.py +0 -398
  927. synth_ai/tracing_v3/llm_call_record_helpers.py +0 -391
  928. synth_ai/tracing_v3/migration_helper.py +0 -120
  929. synth_ai/tracing_v3/session_tracer.py +0 -540
  930. synth_ai/tracing_v3/storage/base.py +0 -210
  931. synth_ai/tracing_v3/storage/config.py +0 -75
  932. synth_ai/tracing_v3/storage/factory.py +0 -39
  933. synth_ai/tracing_v3/trace_utils.py +0 -317
  934. synth_ai/tracing_v3/turso/daemon.py +0 -151
  935. synth_ai/tracing_v3/turso/models.py +0 -469
  936. synth_ai/tracing_v3/turso/native_manager.py +0 -1209
  937. synth_ai/tracing_v3/utils.py +0 -108
  938. synth_ai/tui/__init__.py +0 -5
  939. synth_ai/tui/__main__.py +0 -13
  940. synth_ai/tui/cli/__init__.py +0 -1
  941. synth_ai/tui/cli/query_experiments.py +0 -164
  942. synth_ai/tui/cli/query_experiments_v3.py +0 -164
  943. synth_ai/tui/dashboard.py +0 -906
  944. synth_ai/v0/api/__init__.py +0 -8
  945. synth_ai/v0/api/models/__init__.py +0 -8
  946. synth_ai/v0/api/models/supported.py +0 -8
  947. synth_ai/v0/config/__init__.py +0 -15
  948. synth_ai/v0/config/base_url.py +0 -12
  949. synth_ai/v0/lm/__init__.py +0 -51
  950. synth_ai/v0/lm/caching/__init__.py +0 -0
  951. synth_ai/v0/lm/caching/constants.py +0 -6
  952. synth_ai/v0/lm/caching/dbs.py +0 -0
  953. synth_ai/v0/lm/caching/ephemeral.py +0 -100
  954. synth_ai/v0/lm/caching/handler.py +0 -137
  955. synth_ai/v0/lm/caching/initialize.py +0 -11
  956. synth_ai/v0/lm/caching/persistent.py +0 -114
  957. synth_ai/v0/lm/config.py +0 -115
  958. synth_ai/v0/lm/constants.py +0 -32
  959. synth_ai/v0/lm/core/__init__.py +0 -8
  960. synth_ai/v0/lm/core/all.py +0 -73
  961. synth_ai/v0/lm/core/exceptions.py +0 -5
  962. synth_ai/v0/lm/core/main.py +0 -331
  963. synth_ai/v0/lm/core/main_v3.py +0 -594
  964. synth_ai/v0/lm/core/synth_models.py +0 -35
  965. synth_ai/v0/lm/core/vendor_clients.py +0 -190
  966. synth_ai/v0/lm/cost/__init__.py +0 -0
  967. synth_ai/v0/lm/cost/monitor.py +0 -1
  968. synth_ai/v0/lm/cost/statefulness.py +0 -1
  969. synth_ai/v0/lm/injection.py +0 -80
  970. synth_ai/v0/lm/overrides.py +0 -206
  971. synth_ai/v0/lm/provider_support/__init__.py +0 -8
  972. synth_ai/v0/lm/provider_support/anthropic.py +0 -972
  973. synth_ai/v0/lm/provider_support/openai.py +0 -1139
  974. synth_ai/v0/lm/provider_support/suppress_logging.py +0 -31
  975. synth_ai/v0/lm/structured_outputs/__init__.py +0 -0
  976. synth_ai/v0/lm/structured_outputs/handler.py +0 -440
  977. synth_ai/v0/lm/structured_outputs/inject.py +0 -297
  978. synth_ai/v0/lm/structured_outputs/rehabilitate.py +0 -185
  979. synth_ai/v0/lm/tools/__init__.py +0 -3
  980. synth_ai/v0/lm/tools/base.py +0 -172
  981. synth_ai/v0/lm/unified_interface.py +0 -202
  982. synth_ai/v0/lm/vendors/__init__.py +0 -0
  983. synth_ai/v0/lm/vendors/base.py +0 -81
  984. synth_ai/v0/lm/vendors/core/__init__.py +0 -0
  985. synth_ai/v0/lm/vendors/core/anthropic_api.py +0 -387
  986. synth_ai/v0/lm/vendors/core/gemini_api.py +0 -292
  987. synth_ai/v0/lm/vendors/core/mistral_api.py +0 -322
  988. synth_ai/v0/lm/vendors/core/openai_api.py +0 -227
  989. synth_ai/v0/lm/vendors/core/synth_dev_api.py +0 -0
  990. synth_ai/v0/lm/vendors/local/__init__.py +0 -0
  991. synth_ai/v0/lm/vendors/local/ollama.py +0 -0
  992. synth_ai/v0/lm/vendors/openai_standard.py +0 -782
  993. synth_ai/v0/lm/vendors/openai_standard_responses.py +0 -259
  994. synth_ai/v0/lm/vendors/retries.py +0 -22
  995. synth_ai/v0/lm/vendors/supported/__init__.py +0 -0
  996. synth_ai/v0/lm/vendors/supported/custom_endpoint.py +0 -415
  997. synth_ai/v0/lm/vendors/supported/deepseek.py +0 -69
  998. synth_ai/v0/lm/vendors/supported/grok.py +0 -75
  999. synth_ai/v0/lm/vendors/supported/groq.py +0 -16
  1000. synth_ai/v0/lm/vendors/supported/ollama.py +0 -15
  1001. synth_ai/v0/lm/vendors/supported/openrouter.py +0 -74
  1002. synth_ai/v0/lm/vendors/supported/together.py +0 -11
  1003. synth_ai/v0/lm/vendors/synth_client.py +0 -835
  1004. synth_ai/v0/lm/warmup.py +0 -186
  1005. synth_ai/v0/tracing/__init__.py +0 -0
  1006. synth_ai/v0/tracing/abstractions.py +0 -224
  1007. synth_ai/v0/tracing/base_client.py +0 -91
  1008. synth_ai/v0/tracing/client_manager.py +0 -131
  1009. synth_ai/v0/tracing/config.py +0 -142
  1010. synth_ai/v0/tracing/context.py +0 -146
  1011. synth_ai/v0/tracing/decorators.py +0 -682
  1012. synth_ai/v0/tracing/events/__init__.py +0 -0
  1013. synth_ai/v0/tracing/events/manage.py +0 -147
  1014. synth_ai/v0/tracing/events/scope.py +0 -86
  1015. synth_ai/v0/tracing/events/store.py +0 -228
  1016. synth_ai/v0/tracing/immediate_client.py +0 -151
  1017. synth_ai/v0/tracing/local.py +0 -18
  1018. synth_ai/v0/tracing/log_client_base.py +0 -73
  1019. synth_ai/v0/tracing/retry_queue.py +0 -186
  1020. synth_ai/v0/tracing/trackers.py +0 -515
  1021. synth_ai/v0/tracing/upload.py +0 -409
  1022. synth_ai/v0/tracing/utils.py +0 -9
  1023. synth_ai/v0/tracing_v1/__init__.py +0 -16
  1024. synth_ai/v0/tracing_v1/abstractions.py +0 -224
  1025. synth_ai/v0/tracing_v1/base_client.py +0 -91
  1026. synth_ai/v0/tracing_v1/client_manager.py +0 -131
  1027. synth_ai/v0/tracing_v1/config.py +0 -142
  1028. synth_ai/v0/tracing_v1/context.py +0 -146
  1029. synth_ai/v0/tracing_v1/decorators.py +0 -703
  1030. synth_ai/v0/tracing_v1/events/__init__.py +0 -0
  1031. synth_ai/v0/tracing_v1/events/manage.py +0 -147
  1032. synth_ai/v0/tracing_v1/events/scope.py +0 -86
  1033. synth_ai/v0/tracing_v1/events/store.py +0 -228
  1034. synth_ai/v0/tracing_v1/immediate_client.py +0 -151
  1035. synth_ai/v0/tracing_v1/local.py +0 -18
  1036. synth_ai/v0/tracing_v1/log_client_base.py +0 -73
  1037. synth_ai/v0/tracing_v1/retry_queue.py +0 -186
  1038. synth_ai/v0/tracing_v1/trackers.py +0 -515
  1039. synth_ai/v0/tracing_v1/upload.py +0 -527
  1040. synth_ai/v0/tracing_v1/utils.py +0 -9
  1041. synth_ai/v0/tracing_v3/__init__.py +0 -10
  1042. synth_ai/v0/tracing_v3/abstractions.py +0 -3
  1043. synth_ai/v0/tracing_v3/decorators.py +0 -3
  1044. synth_ai/v0/tracing_v3/llm_call_record_helpers.py +0 -3
  1045. synth_ai/v0/tracing_v3/session_tracer.py +0 -3
  1046. synth_ai-0.2.14.dist-info/METADATA +0 -139
  1047. synth_ai-0.2.14.dist-info/RECORD +0 -762
  1048. synth_ai-0.2.14.dist-info/top_level.txt +0 -2
  1049. /synth_ai/{demos/demo_task_apps → cli/demo_apps}/crafter/__init__.py +0 -0
  1050. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/__init__.py +0 -0
  1051. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/crafter/configs/crafter_fft_4b.toml +0 -0
  1052. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/crafter/configs/rl_from_base_qwen4b.toml +0 -0
  1053. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/__init__.py +0 -0
  1054. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/_common.py +0 -0
  1055. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/app.py +0 -0
  1056. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/config.toml +0 -0
  1057. /synth_ai/{demos → cli/demo_apps}/demo_task_apps/math/deploy_modal.py +0 -0
  1058. {examples/task_apps → synth_ai/core/apps}/__init__.py +0 -0
  1059. /synth_ai/{tracing_v3 → core/tracing_v3}/examples/basic_usage.py +0 -0
  1060. /synth_ai/{tracing_v3 → core/tracing_v3}/hooks.py +0 -0
  1061. /synth_ai/{tracing_v3 → core/tracing_v3}/lm_call_record_abstractions.py +0 -0
  1062. /synth_ai/{tracing_v3 → core/tracing_v3}/replica_sync.py +0 -0
  1063. /synth_ai/{tracing_v3 → core/tracing_v3}/serialization.py +0 -0
  1064. /synth_ai/{tracing_v3 → core/tracing_v3}/storage/__init__.py +0 -0
  1065. /synth_ai/{tracing_v3 → core/tracing_v3}/storage/exceptions.py +0 -0
  1066. /synth_ai/{tracing_v3 → core/tracing_v3}/storage/types.py +0 -0
  1067. /synth_ai/{tracing_v3 → core/tracing_v3}/storage/utils.py +0 -0
  1068. /synth_ai/{tracing_v3 → core/tracing_v3}/turso/__init__.py +0 -0
  1069. /synth_ai/{evals → sdk/judging}/types.py +0 -0
  1070. /synth_ai/{learning → sdk/learning}/algorithms.py +0 -0
  1071. /synth_ai/{learning → sdk/learning}/config.py +0 -0
  1072. /synth_ai/{learning → sdk/learning}/constants.py +0 -0
  1073. /synth_ai/{learning → sdk/learning}/core.py +0 -0
  1074. /synth_ai/{learning → sdk/learning}/gateway.py +0 -0
  1075. /synth_ai/{learning → sdk/learning}/rl/__init__.py +0 -0
  1076. /synth_ai/{learning → sdk/learning}/rl/config.py +0 -0
  1077. /synth_ai/{learning → sdk/learning}/rl_client.py +0 -0
  1078. /synth_ai/{learning → sdk/learning}/sft/__init__.py +0 -0
  1079. /synth_ai/{learning → sdk/learning}/sse.py +0 -0
  1080. /synth_ai/{task → sdk/task}/auth.py +0 -0
  1081. /synth_ai/{task → sdk/task}/client.py +0 -0
  1082. /synth_ai/{task → sdk/task}/errors.py +0 -0
  1083. /synth_ai/{task → sdk/task}/health.py +0 -0
  1084. /synth_ai/{task → sdk/task}/json.py +0 -0
  1085. /synth_ai/{task → sdk/task}/rubrics/models.py +0 -0
  1086. /synth_ai/{task → sdk/task}/rubrics/scoring.py +0 -0
  1087. /synth_ai/{task → sdk/task}/rubrics/strict.py +0 -0
  1088. /synth_ai/{task → sdk/task}/vendors.py +0 -0
  1089. {synth_ai-0.2.14.dist-info → synth_ai-0.4.1.dist-info}/WHEEL +0 -0
  1090. {synth_ai-0.2.14.dist-info → synth_ai-0.4.1.dist-info}/entry_points.txt +0 -0
  1091. {synth_ai-0.2.14.dist-info → synth_ai-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,2188 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import contextlib
5
+ import importlib
6
+ import json
7
+ import os
8
+ from collections.abc import Callable, Mapping
9
+ from pathlib import Path
10
+ from typing import Any, NoReturn, cast
11
+
12
+ import click
13
+
14
+ from synth_ai.cli.lib.env import get_synth_and_env_keys, mask_str
15
+ from synth_ai.cli.lib.train_cfgs import find_train_cfgs_in_cwd, validate_train_cfg
16
+ from synth_ai.core.paths import print_paths_formatted
17
+
18
+ try:
19
+ _config_module = cast(
20
+ Any, importlib.import_module("synth_ai.core.env")
21
+ )
22
+ get_backend_from_env = cast(Callable[[], str], _config_module.get_backend_from_env)
23
+ except Exception as exc: # pragma: no cover - critical dependency
24
+ raise RuntimeError("Unable to load backend configuration helpers") from exc
25
+
26
+ from synth_ai.cli.lib.env import load_env_file
27
+ from synth_ai.cli.lib.errors import format_error_message, get_required_value
28
+ from synth_ai.core.telemetry import flush_logger, log_error, log_info
29
+ from synth_ai.sdk.streaming import (
30
+ GraphGenHandler,
31
+ CLIHandler,
32
+ JobStreamer,
33
+ LossCurveHandler,
34
+ PromptLearningHandler,
35
+ StreamConfig,
36
+ StreamEndpoints,
37
+ StreamType,
38
+ )
39
+
40
+ from .builders import build_prompt_learning_payload, build_rl_payload, build_sft_payload
41
+ from .task_app import check_task_app_health
42
+ from .graphgen import GraphGenJob
43
+ from .graphgen_models import load_graphgen_taskset
44
+ from .context_learning import ContextLearningJob
45
+ from .utils import (
46
+ TrainError,
47
+ ensure_api_base,
48
+ http_get,
49
+ http_post,
50
+ limit_jsonl_examples,
51
+ mask_value,
52
+ post_multipart,
53
+ preview_json,
54
+ sleep,
55
+ validate_sft_jsonl,
56
+ )
57
+
58
+ # Constants for prompt learning event types
59
+ _PROMPT_LEARNING_EVENT_BEST_PROMPT = "prompt.learning.best.prompt"
60
+ _PROMPT_LEARNING_EVENT_FINAL_RESULTS = "prompt.learning.final.results"
61
+ _PROMPT_LEARNING_EVENT_VALIDATION_SCORED = "prompt.learning.validation.scored"
62
+ _PROMPT_LEARNING_EVENT_GEPA_COMPLETE = "prompt.learning.gepa.complete"
63
+ _PROMPT_LEARNING_EVENT_MIPRO_COMPLETE = "prompt.learning.mipro.complete"
64
+ _PROMPT_LEARNING_EVENT_GEPA_NEW_BEST = "prompt.learning.gepa.new_best"
65
+ _PROMPT_LEARNING_EVENT_PHASE_CHANGED = "prompt.learning.phase.changed"
66
+ _PROMPT_LEARNING_EVENT_PROGRESS = "prompt.learning.progress"
67
+ _PROMPT_LEARNING_EVENT_STREAM_CONNECTED = "prompt.learning.stream.connected"
68
+
69
+ # Constants for formatting
70
+ _MAX_TEXT_REPLACEMENTS_DISPLAY = 3 # Max number of text replacements to show in output
71
+ _RESULTS_FILE_MAX_EVENTS = 10000 # Max events to fetch for results file generation
72
+
73
+
74
+ def _format_text_replacements(obj: dict[str, Any] | None, max_display: int = _MAX_TEXT_REPLACEMENTS_DISPLAY) -> list[str]:
75
+ """Extract and format text replacements from a candidate object.
76
+
77
+ Args:
78
+ obj: Candidate object dictionary containing text_replacements
79
+ max_display: Maximum number of replacements to display
80
+
81
+ Returns:
82
+ List of formatted lines showing role and replacement text
83
+ """
84
+ lines = []
85
+ if not obj or not isinstance(obj, dict):
86
+ return lines
87
+
88
+ text_replacements = obj.get("text_replacements", [])
89
+ if not text_replacements or not isinstance(text_replacements, list):
90
+ return lines
91
+
92
+ for replacement in text_replacements[:max_display]:
93
+ if isinstance(replacement, dict):
94
+ new_text = replacement.get("new_text", "")
95
+ role = replacement.get("apply_to_role", "system")
96
+ if new_text:
97
+ lines.append(f" [{role.upper()}]: {new_text}")
98
+ lines.append("")
99
+
100
+ return lines
101
+
102
+
103
+ def _default_backend() -> str:
104
+ """Resolve backend URL with proper production default.
105
+
106
+ Priority order:
107
+ 1. BACKEND_BASE_URL env var (highest priority) - checked FIRST before any .env loading
108
+ 2. BACKEND_OVERRIDE env var
109
+ 3. get_backend_from_env() standard resolution (which may use SYNTH_BASE_URL from .env)
110
+
111
+ CRITICAL: This function MUST check BACKEND_BASE_URL directly from os.getenv()
112
+ to ensure it's not overridden by .env file loading.
113
+ """
114
+ # Check explicit override first (BACKEND_BASE_URL takes absolute precedence)
115
+ # Read directly from os.environ to avoid any dotenv interference
116
+ explicit = os.environ.get("BACKEND_BASE_URL", "").strip()
117
+ if explicit:
118
+ # Return as-is, ensure_api_base() will normalize it
119
+ return explicit
120
+
121
+ # Fallback to BACKEND_OVERRIDE (also read directly from environ)
122
+ override = os.environ.get("BACKEND_OVERRIDE", "").strip()
123
+ if override:
124
+ return override
125
+
126
+ # Use standard resolution logic (may use SYNTH_BASE_URL from .env)
127
+ base, _ = get_backend_from_env()
128
+ return f"{base}/api" if not base.endswith("/api") else base
129
+
130
+
131
+ _DEFAULT_SFT_HIDDEN_EVENTS = {
132
+ "sft.created",
133
+ "sft.pricing.check.requested",
134
+ "sft.pricing.check.allowed",
135
+ "sft.stage",
136
+ "snapshot.fetch",
137
+ "hatchet.preflight",
138
+ "hatchet.submission.attempt",
139
+ "hatchet.submission.result",
140
+ "sft.running",
141
+ "sft.status",
142
+ "sft.worker.alive",
143
+ "sft.dispatch.selected",
144
+ "sft.config.prepared",
145
+ "sft.strategy.selected",
146
+ "sft.training.args",
147
+ }
148
+
149
+ _DEFAULT_RL_HIDDEN_SUBSTRINGS = {"modal", "hatchet"}
150
+
151
+ _DEFAULT_PROMPT_LEARNING_HIDDEN_EVENTS = {
152
+ "prompt.learning.policy.tokens",
153
+ "mipro.bootstrap.progress", # Hide individual bootstrap seed scores
154
+ "mipro.tpe.rankings", # Hide verbose TPE rankings
155
+ "mipro.tpe.selected", # Hide TPE selection details
156
+ "mipro.tpe.update", # Hide TPE density updates
157
+ "mipro.trial.duplicate", # Hide duplicate trial messages
158
+ "mipro.trial.started", # Hide individual trial start messages (too verbose with instructions)
159
+ "mipro.trial.minibatch", # Hide minibatch completion (only show full eval)
160
+ "mipro.trial.complete", # Hide individual trial completion
161
+ "mipro.iteration.skip_generation", # Hide skip generation messages
162
+ "mipro.budget.update", # Hide verbose budget updates (progress handler shows summary)
163
+ "mipro.instruction.proposed", # Hide proposed instructions (shown in results/logs only)
164
+ "gepa.transformation.proposed", # Hide proposed transformations (shown in results/logs only)
165
+ # Note: mipro.stage_proposer.called is shown so users know instruction generation is happening
166
+ }
167
+
168
+
169
+ def _load_toml_config(config_path: Path) -> dict[str, Any]:
170
+ """Load TOML config file."""
171
+ try:
172
+ import tomli # type: ignore[import-untyped]
173
+ except ImportError:
174
+ # Fallback to tomllib for Python 3.11+
175
+ try:
176
+ import tomllib as tomli
177
+ except ImportError:
178
+ return {}
179
+
180
+ try:
181
+ with open(config_path, "rb") as f:
182
+ return tomli.load(f)
183
+ except Exception:
184
+ return {}
185
+
186
+
187
+ def parse_env_file_path_from_config(config_path: Path) -> str | None:
188
+ """Parse env_file_path from TOML config.
189
+
190
+ Checks both [prompt_learning] and top-level sections.
191
+ """
192
+ config = _load_toml_config(config_path)
193
+
194
+ # Check prompt_learning section first
195
+ pl_section = config.get("prompt_learning", {})
196
+ if isinstance(pl_section, dict):
197
+ env_file_path = pl_section.get("env_file_path")
198
+ if env_file_path:
199
+ return str(env_file_path)
200
+
201
+ # Check top-level
202
+ env_file_path = config.get("env_file_path")
203
+ if env_file_path:
204
+ return str(env_file_path)
205
+
206
+ return None
207
+
208
+
209
+ def parse_results_folder(config_path: Path) -> Path:
210
+ """Parse results_folder from TOML config and validate it exists.
211
+
212
+ Checks both [prompt_learning] and top-level sections.
213
+ Raises ClickException if missing or invalid.
214
+ """
215
+ config = _load_toml_config(config_path)
216
+
217
+ # Check prompt_learning section first
218
+ pl_section = config.get("prompt_learning", {})
219
+ if isinstance(pl_section, dict):
220
+ results_folder = pl_section.get("results_folder")
221
+ if results_folder:
222
+ results_folder_str = str(results_folder).strip()
223
+ # Resolve relative to config file's directory if path is relative
224
+ if not Path(results_folder_str).is_absolute():
225
+ config_dir = config_path.parent.resolve()
226
+ results_path = (config_dir / results_folder_str).resolve()
227
+ else:
228
+ results_path = Path(results_folder_str).expanduser().resolve()
229
+
230
+ # Validate that the folder exists or can be created
231
+ try:
232
+ results_path.mkdir(parents=True, exist_ok=True)
233
+ except (OSError, PermissionError) as e:
234
+ raise click.ClickException(
235
+ f"Could not create results folder: {results_path}\n"
236
+ f" Error: {e}\n"
237
+ f" Config: {config_path}\n"
238
+ f" TOML results_folder: {results_folder}"
239
+ ) from e
240
+
241
+ return results_path
242
+
243
+ # Check top-level section
244
+ results_folder = config.get("results_folder")
245
+ if results_folder:
246
+ results_folder_str = str(results_folder).strip()
247
+ # Resolve relative to config file's directory if path is relative
248
+ if not Path(results_folder_str).is_absolute():
249
+ config_dir = config_path.parent.resolve()
250
+ results_path = (config_dir / results_folder_str).resolve()
251
+ else:
252
+ results_path = Path(results_folder_str).expanduser().resolve()
253
+
254
+ # Validate that the folder exists or can be created
255
+ try:
256
+ results_path.mkdir(parents=True, exist_ok=True)
257
+ except (OSError, PermissionError) as e:
258
+ raise click.ClickException(
259
+ f"Could not create results folder: {results_path}\n"
260
+ f" Error: {e}\n"
261
+ f" Config: {config_path}\n"
262
+ f" TOML results_folder: {results_folder}"
263
+ ) from e
264
+
265
+ return results_path
266
+
267
+ # Missing - raise error
268
+ raise click.ClickException(
269
+ f"Missing required 'results_folder' field in TOML config: {config_path}\n"
270
+ f" Please add 'results_folder = \"path/to/results\"' to [prompt_learning] section or top-level.\n"
271
+ f" Paths can be relative (to config file directory) or absolute."
272
+ )
273
+
274
+
275
+ def parse_display_config(config_path: Path) -> dict[str, Any]:
276
+ """Parse [display] section from TOML config."""
277
+ config = _load_toml_config(config_path)
278
+ display_section = config.get("display", {})
279
+
280
+ # Also extract termination_config for max limits
281
+ termination_section = config.get("termination_config", {})
282
+ # Also check prompt_learning.termination_config
283
+ pl_section = config.get("prompt_learning", {})
284
+ if isinstance(pl_section, dict):
285
+ pl_termination = pl_section.get("termination_config", {})
286
+ if isinstance(pl_termination, dict):
287
+ # Merge with top-level termination_config (top-level takes precedence)
288
+ termination_section = {**pl_termination, **termination_section}
289
+
290
+ return {
291
+ "local_backend": display_section.get("local_backend", False),
292
+ "tui": display_section.get("tui", False),
293
+ "show_curve": display_section.get("show_curve", True),
294
+ "verbose_summary": display_section.get("verbose_summary", True),
295
+ "show_trial_results": display_section.get("show_trial_results", True),
296
+ "show_transformations": display_section.get("show_transformations", False),
297
+ "show_validation": display_section.get("show_validation", True),
298
+ "max_tokens": termination_section.get("max_tokens"),
299
+ "max_time_seconds": termination_section.get("max_time_seconds"),
300
+ "max_rollouts": termination_section.get("max_rollouts"),
301
+ }
302
+
303
+
304
+ def _build_stream_components(
305
+ stream_format: str,
306
+ *,
307
+ hidden_event_types: set[str] | None = None,
308
+ hidden_event_substrings: set[str] | None = None,
309
+ ) -> tuple[StreamConfig, list]:
310
+ """Return stream configuration and handlers for the requested format."""
311
+ if stream_format == "chart":
312
+ config = StreamConfig(
313
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
314
+ event_types={
315
+ "sft.progress",
316
+ "sft.training.started",
317
+ "sft.training.finish",
318
+ "sft.validation.summary",
319
+ "rl.train.step",
320
+ "rl.train.started",
321
+ "rl.train.completed",
322
+ "workflow.completed",
323
+ "workflow.failed",
324
+ },
325
+ metric_names={"train.loss"},
326
+ )
327
+ handlers = [LossCurveHandler()]
328
+ else:
329
+ config = StreamConfig.default()
330
+ handlers = [
331
+ CLIHandler(
332
+ hidden_event_types=hidden_event_types or set(),
333
+ hidden_event_substrings=hidden_event_substrings or set(),
334
+ )
335
+ ]
336
+ return config, handlers
337
+
338
+
339
+ def _validate_openai_key_if_provider_is_openai(cfg_path: Path) -> None:
340
+ """Validate that OPENAI_API_KEY is set if the provider is OpenAI.
341
+
342
+ For prompt learning jobs, checks if policy.provider is 'openai' and raises
343
+ a ClickException if OPENAI_API_KEY is not set in the environment.
344
+ """
345
+ cfg = _load_toml_config(cfg_path)
346
+
347
+ # Check prompt_learning section
348
+ pl_section = cfg.get("prompt_learning", {})
349
+ if not isinstance(pl_section, dict):
350
+ return
351
+
352
+ policy = pl_section.get("policy", {})
353
+ if not isinstance(policy, dict):
354
+ return
355
+
356
+ provider = policy.get("provider", "").lower()
357
+
358
+ if provider == "openai":
359
+ openai_key = os.environ.get("OPENAI_API_KEY", "").strip()
360
+ if not openai_key:
361
+ raise click.ClickException(
362
+ "OPENAI_API_KEY is required when using provider='openai'.\n"
363
+ "Please set OPENAI_API_KEY in your .env file or environment."
364
+ )
365
+
366
+
367
+ # Module-level logging to track import and registration
368
+ import logging as _logging # noqa: E402
369
+ import sys # noqa: E402
370
+
371
+ _logger = _logging.getLogger(__name__)
372
+ _logger.debug("[TRAIN_MODULE] Module synth_ai.sdk.api.train.cli imported")
373
+
374
+ @click.command("train")
375
+ @click.argument(
376
+ "cfg_path",
377
+ required=False,
378
+ type=click.Path(exists=True, path_type=Path)
379
+ )
380
+ @click.option(
381
+ "--env",
382
+ "env_file",
383
+ type=click.Path(exists=True, path_type=Path),
384
+ help=".env file(s) to preload (skips selection prompt)",
385
+ )
386
+ @click.option(
387
+ "--task-url",
388
+ default=None,
389
+ help="Override task app base URL (RL only)"
390
+ )
391
+ @click.option(
392
+ "--dataset",
393
+ "dataset_path",
394
+ type=click.Path(),
395
+ default=None,
396
+ help="Override dataset JSONL path (SFT)",
397
+ )
398
+ @click.option("--model", default=None, help="Override model identifier")
399
+ @click.option(
400
+ "--allow-experimental",
401
+ "allow_experimental",
402
+ is_flag=True,
403
+ flag_value=True,
404
+ default=None,
405
+ help="Allow experimental models (overrides SDK_EXPERIMENTAL env)",
406
+ )
407
+ @click.option(
408
+ "--no-allow-experimental",
409
+ "allow_experimental",
410
+ is_flag=True,
411
+ flag_value=False,
412
+ help="Disallow experimental models (overrides SDK_EXPERIMENTAL env)",
413
+ )
414
+ @click.option("--idempotency", default=None, help="Idempotency-Key header for job creation")
415
+ @click.option("--dry-run", is_flag=True, hidden=True, help="Deprecated: no-op")
416
+ @click.option("--poll/--no-poll", default=True, help="Poll job status until terminal state")
417
+ @click.option(
418
+ "--poll-timeout", default=3600.0, type=float, help="Maximum seconds to poll before timing out"
419
+ )
420
+ @click.option("--poll-interval", default=5.0, type=float, help="Seconds between poll attempts")
421
+ @click.option(
422
+ "--stream-format",
423
+ type=click.Choice(["cli", "chart"]),
424
+ default="cli",
425
+ show_default=True,
426
+ help="Streaming output style (cli = line updates, chart = live loss panel)",
427
+ )
428
+ @click.option(
429
+ "--examples",
430
+ "examples_limit",
431
+ type=int,
432
+ default=None,
433
+ help="Limit SFT training to the first N examples",
434
+ )
435
+ @click.option(
436
+ "--backend",
437
+ "backend_override",
438
+ default=None,
439
+ help="Backend base URL (e.g., http://localhost:8000). Overrides BACKEND_BASE_URL env var.",
440
+ )
441
+ @click.option(
442
+ "--local-backend",
443
+ is_flag=True,
444
+ default=None,
445
+ help="Use local backend (localhost:8000). Overrides TOML [display].local_backend",
446
+ )
447
+ @click.option(
448
+ "--tui",
449
+ is_flag=True,
450
+ default=None,
451
+ help="Enable live TUI dashboard. Overrides TOML [display].tui",
452
+ )
453
+ @click.option(
454
+ "--show-curve",
455
+ is_flag=True,
456
+ default=None,
457
+ help="Show optimization curve at end. Overrides TOML [display].show_curve",
458
+ )
459
+ @click.option(
460
+ "--verbose-summary",
461
+ is_flag=True,
462
+ default=None,
463
+ help="Show detailed final summary. Overrides TOML [display].verbose_summary",
464
+ )
465
+ @click.option(
466
+ "--type",
467
+ "train_type_override",
468
+ type=click.Choice(["prompt", "rl", "sft", "adas", "context_learning"]),
469
+ default=None,
470
+ help="Explicitly set training type. Required for ADAS (uses JSON datasets).",
471
+ )
472
+ @click.option(
473
+ "--rollout-budget",
474
+ "rollout_budget",
475
+ type=int,
476
+ default=None,
477
+ help="Rollout budget for ADAS optimization (default: 100)",
478
+ )
479
+ @click.option(
480
+ "--proposer-effort",
481
+ "proposer_effort",
482
+ type=click.Choice(["low", "medium", "high"]),
483
+ default=None,
484
+ help="Proposer effort level for ADAS (default: medium)",
485
+ )
486
+ def train_command(
487
+ cfg_path: Path | None,
488
+ env_file: Path | None,
489
+ task_url: str | None,
490
+ dataset_path: str | None,
491
+ model: str | None,
492
+ allow_experimental: bool | None,
493
+ idempotency: str | None,
494
+ dry_run: bool,
495
+ poll: bool,
496
+ poll_timeout: float,
497
+ poll_interval: float,
498
+ stream_format: str,
499
+ examples_limit: int | None,
500
+ backend_override: str | None,
501
+ local_backend: bool | None,
502
+ tui: bool | None,
503
+ show_curve: bool | None,
504
+ verbose_summary: bool | None,
505
+ train_type_override: str | None,
506
+ rollout_budget: int | None,
507
+ proposer_effort: str | None,
508
+ ) -> None:
509
+
510
+ """Interactive launcher for RL / SFT / Prompt Learning / ADAS / Context Learning jobs."""
511
+ import traceback
512
+
513
+ ctx: dict[str, Any] = {
514
+ "cfg_path": str(cfg_path) if cfg_path else None,
515
+ "poll": poll,
516
+ "poll_timeout": poll_timeout,
517
+ "poll_interval": poll_interval,
518
+ "stream_format": stream_format,
519
+ "backend_override": backend_override,
520
+ }
521
+ log_info("train_command invoked", ctx=ctx)
522
+
523
+ # Wrap entire function in try-except to catch ALL exceptions
524
+ try:
525
+ # Log entry point IMMEDIATELY - this should always appear
526
+ sys.stderr.write("[TRAIN_CMD] Starting train command\n")
527
+ sys.stderr.flush()
528
+ click.echo(f"[TRAIN_CMD] Args: cfg_path={cfg_path}, poll={poll}", err=True)
529
+ click.echo(f"[TRAIN_CMD] Python executable: {sys.executable}", err=True)
530
+ click.echo(f"[TRAIN_CMD] Working directory: {os.getcwd()}", err=True)
531
+
532
+ try:
533
+ load_env_file()
534
+ click.echo("[TRAIN_CMD] Environment file loaded", err=True)
535
+ except Exception as e:
536
+ click.echo(f"[TRAIN_CMD] ERROR loading env file: {e}", err=True)
537
+ traceback.print_exc(file=sys.stderr)
538
+ raise
539
+
540
+ # CRITICAL: Load explicit .env file BEFORE config validation to ensure BACKEND_BASE_URL is available
541
+ if env_file and Path(env_file).exists():
542
+ from dotenv import load_dotenv
543
+ # Load with override=True to ensure BACKEND_BASE_URL from .env takes precedence
544
+ load_dotenv(Path(env_file), override=True)
545
+ click.echo(f"[TRAIN_CMD] Loaded explicit .env: {env_file}", err=True)
546
+
547
+ # Handle ADAS specially - it uses JSON datasets, not TOML configs
548
+ if train_type_override == "adas":
549
+ # For ADAS, dataset_path is required and cfg_path is ignored
550
+ if not dataset_path:
551
+ raise click.ClickException(
552
+ "ADAS requires --dataset flag with path to JSON dataset file.\n"
553
+ "Usage: synth-ai train --type adas --dataset my_tasks.json"
554
+ )
555
+ train_type = "adas"
556
+ click.echo(f"[TRAIN_CMD] ADAS mode: using dataset {dataset_path}", err=True)
557
+ else:
558
+ # Non-ADAS: use TOML config
559
+ if not cfg_path:
560
+ available_cfgs = find_train_cfgs_in_cwd()
561
+ if len(available_cfgs) == 1:
562
+ train_type, cfg_path_str, _ = available_cfgs[0]
563
+ cfg_path = Path(cfg_path_str)
564
+ print(f"Automatically selected {train_type} training config at", cfg_path)
565
+ else:
566
+ if len(available_cfgs) == 0:
567
+ print("No training config found in cwd.")
568
+ print("Validate your training config: synth-ai train-cfg check [CFG_PATH]")
569
+ else:
570
+ print("Multiple training configs found. Please specify which one to use:")
571
+ print_paths_formatted(available_cfgs)
572
+ print("Usage: synth-ai train --config [CFG_PATH]")
573
+ return None
574
+
575
+ train_type = train_type_override or validate_train_cfg(cfg_path)
576
+
577
+ synth_api_key, _ = get_synth_and_env_keys(env_file)
578
+
579
+ # Resolve backend URL with priority: --backend flag > BACKEND_BASE_URL env > default
580
+ if backend_override:
581
+ # CLI flag takes highest precedence
582
+ backend_base = ensure_api_base(backend_override.strip())
583
+ click.echo(f"Backend base: {backend_base} (from --backend flag)")
584
+ else:
585
+ # Check BACKEND_BASE_URL AFTER loading env file
586
+ backend_base_url_env = os.environ.get("BACKEND_BASE_URL", "").strip()
587
+ backend_override_env = os.environ.get("BACKEND_OVERRIDE", "").strip()
588
+
589
+ # Debug: Show what env vars are set
590
+ click.echo(f"🔍 DEBUG: BACKEND_BASE_URL={backend_base_url_env or '(not set)'}", err=True)
591
+ click.echo(f"🔍 DEBUG: BACKEND_OVERRIDE={backend_override_env or '(not set)'}", err=True)
592
+
593
+ # Use _default_backend() to respect BACKEND_BASE_URL env var
594
+ backend_raw = _default_backend()
595
+ click.echo(f"🔍 DEBUG: _default_backend() returned: {backend_raw}", err=True)
596
+ backend_base = ensure_api_base(backend_raw)
597
+
598
+ # Assertion: Validate backend URL is what we expect
599
+ if backend_base_url_env:
600
+ expected_backend = ensure_api_base(backend_base_url_env)
601
+ if backend_base != expected_backend:
602
+ raise click.ClickException(
603
+ f"Backend URL mismatch! Expected: {expected_backend}, Got: {backend_base}. "
604
+ f"BACKEND_BASE_URL={backend_base_url_env} but resolved to {backend_base}. "
605
+ f"This indicates BACKEND_BASE_URL is not being respected.\n"
606
+ f"💡 Solutions:\n"
607
+ f" 1. Add BACKEND_BASE_URL=http://localhost:8000 to your .env file\n"
608
+ f" 2. Use --backend http://localhost:8000 flag (requires package rebuild)\n"
609
+ f" 3. Set BACKEND_OVERRIDE=http://localhost:8000 in your shell\n"
610
+ f" 4. Set SYNTH_BACKEND_URL_OVERRIDE=local and LOCAL_BACKEND_URL=http://localhost:8000"
611
+ )
612
+
613
+ click.echo(f"Backend base: {backend_base} (key {mask_str(synth_api_key)})")
614
+ if backend_base_url_env:
615
+ click.echo(f" (from BACKEND_BASE_URL={backend_base_url_env})")
616
+
617
+ # Skip TOML-based validation for ADAS (uses JSON datasets)
618
+ if train_type != "adas" and cfg_path:
619
+ _validate_openai_key_if_provider_is_openai(cfg_path)
620
+
621
+ match train_type:
622
+ case "prompt":
623
+ if not cfg_path:
624
+ raise click.ClickException("Prompt Learning requires a TOML config file.")
625
+ handle_prompt_learning(
626
+ cfg_path=cfg_path,
627
+ backend_base=backend_base,
628
+ synth_key=synth_api_key,
629
+ task_url_override=task_url,
630
+ allow_experimental=allow_experimental,
631
+ dry_run=dry_run,
632
+ poll=poll,
633
+ poll_timeout=poll_timeout,
634
+ poll_interval=poll_interval,
635
+ stream_format=stream_format,
636
+ )
637
+ case "context_learning":
638
+ if not cfg_path:
639
+ raise click.ClickException(
640
+ "Context Learning requires a TOML config file.\n"
641
+ "Usage: synth-ai train --type context_learning --config my_context.toml"
642
+ )
643
+ handle_context_learning(
644
+ cfg_path=cfg_path,
645
+ backend_base=backend_base,
646
+ synth_key=synth_api_key,
647
+ poll=poll,
648
+ stream_format=stream_format,
649
+ )
650
+ case "rl":
651
+ if not cfg_path:
652
+ raise click.ClickException("RL requires a TOML config file.")
653
+ handle_rl(
654
+ cfg_path=cfg_path,
655
+ backend_base=backend_base,
656
+ synth_key=synth_api_key,
657
+ task_url_override=task_url,
658
+ model_override=model,
659
+ idempotency=idempotency,
660
+ allow_experimental=allow_experimental,
661
+ dry_run=dry_run,
662
+ poll=poll,
663
+ poll_timeout=poll_timeout,
664
+ poll_interval=poll_interval,
665
+ stream_format=stream_format,
666
+ )
667
+ case "sft":
668
+ if not cfg_path:
669
+ raise click.ClickException("SFT requires a TOML config file.")
670
+ dataset_override_path = Path(dataset_path).expanduser().resolve() if dataset_path else None
671
+ handle_sft(
672
+ cfg_path=cfg_path,
673
+ backend_base=backend_base,
674
+ synth_key=synth_api_key,
675
+ dataset_override=dataset_override_path,
676
+ allow_experimental=allow_experimental,
677
+ dry_run=dry_run,
678
+ poll=poll,
679
+ poll_timeout=poll_timeout,
680
+ poll_interval=poll_interval,
681
+ stream_format=stream_format,
682
+ examples_limit=examples_limit,
683
+ )
684
+ case "adas":
685
+ if not dataset_path:
686
+ raise click.ClickException("ADAS requires a dataset path.")
687
+ adas_dataset_path = Path(dataset_path).expanduser().resolve()
688
+ handle_adas(
689
+ dataset_path=adas_dataset_path,
690
+ backend_base=backend_base,
691
+ synth_key=synth_api_key,
692
+ policy_model=model,
693
+ rollout_budget=rollout_budget,
694
+ proposer_effort=proposer_effort,
695
+ poll=poll,
696
+ poll_timeout=poll_timeout,
697
+ poll_interval=poll_interval,
698
+ stream_format=stream_format,
699
+ )
700
+ except Exception as e:
701
+ ctx["error"] = type(e).__name__
702
+ log_error("train_command failed", ctx=ctx)
703
+ click.echo(f"[TRAIN_CMD] FATAL ERROR: {e}", err=True)
704
+ traceback.print_exc(file=sys.stderr)
705
+ raise
706
+ finally:
707
+ flush_logger()
708
+
709
+
710
+ def handle_context_learning(
711
+ *,
712
+ cfg_path: Path,
713
+ backend_base: str,
714
+ synth_key: str,
715
+ poll: bool,
716
+ stream_format: str,
717
+ ) -> None:
718
+ """Submit and stream a Context Learning job.
719
+
720
+ Context Learning is SSE-first; polling flags are ignored.
721
+ """
722
+ if not poll:
723
+ click.echo("Note: --no-poll is ignored for context learning (SSE streaming only).")
724
+
725
+ click.echo("\n=== Submitting Context Learning Job ===")
726
+ try:
727
+ job = ContextLearningJob.from_config(
728
+ cfg_path,
729
+ backend_url=backend_base,
730
+ api_key=synth_key,
731
+ )
732
+ result = job.submit()
733
+ except Exception as e:
734
+ raise click.ClickException(str(e))
735
+
736
+ click.echo("\n✓ Job created:")
737
+ click.echo(f" Context Learning Job ID: {result.job_id}")
738
+ click.echo(f" Status: {result.status}")
739
+
740
+ click.echo("\n=== Streaming Job Progress ===")
741
+ if stream_format == "chart":
742
+ click.echo("Chart stream format is not supported for context learning; using CLI output.")
743
+
744
+ try:
745
+ final_status = job.stream_until_complete()
746
+ except Exception as e:
747
+ raise click.ClickException(str(e))
748
+
749
+ status = final_status.get("status") if isinstance(final_status, dict) else "unknown"
750
+ click.echo(f"\nFinal status: {status}")
751
+ click.echo(preview_json(final_status, limit=600))
752
+
753
+ if status in {"succeeded", "completed"}:
754
+ click.echo("\n=== Best Preflight Script ===")
755
+ try:
756
+ best = job.download_best_script()
757
+ if best.preflight_script:
758
+ click.echo(best.preflight_script[:2000])
759
+ if len(best.preflight_script) > 2000:
760
+ click.echo(
761
+ f"\n... (truncated, {len(best.preflight_script)} chars total)"
762
+ )
763
+ except Exception as e:
764
+ click.echo(f"⚠️ Could not download best script: {e}")
765
+
766
+
767
+ def _wait_for_training_file(
768
+ backend_base: str, api_key: str, file_id: str, *, timeout: float = 10.0
769
+ ) -> None:
770
+ """Wait for training file to be visible after upload.
771
+
772
+ Reduced from 120s to 10s because:
773
+ - POST response already confirms file is uploaded
774
+ - Backend now forces read-your-writes consistency
775
+ - By job creation time, replica lag has resolved
776
+ - Quick sanity check only, not critical path
777
+ """
778
+ url = f"{backend_base.rstrip('/')}/files/{file_id}"
779
+ headers = {"Authorization": f"Bearer {api_key}"}
780
+ elapsed = 0.0
781
+ interval = 2.0
782
+ first_check = True
783
+ while True:
784
+ resp = http_get(url, headers=headers, timeout=30.0)
785
+ if resp.status_code == 200:
786
+ try:
787
+ data = resp.json()
788
+ except json.JSONDecodeError:
789
+ data = {}
790
+ status = str(
791
+ data.get("status") or data.get("state") or data.get("storage_state") or "ready"
792
+ ).lower()
793
+ if first_check:
794
+ click.echo(f"File uploaded successfully (id={file_id}, status={status})")
795
+ first_check = False
796
+ if status in {"ready", "uploaded", "stored", "complete"}:
797
+ click.echo(f"✓ Training file ready (status={status})")
798
+ return
799
+ # Show progress for processing states
800
+ if status in {"processing", "pending", "validating"}:
801
+ click.echo(
802
+ f" Waiting for file processing... (status={status}, {elapsed:.0f}s elapsed)"
803
+ )
804
+ elif resp.status_code == 404:
805
+ # Keep polling; object may not be visible yet
806
+ if first_check:
807
+ click.echo(f"Waiting for file {file_id} to become visible...")
808
+ first_check = False
809
+ elif resp.status_code in {401, 403}:
810
+ # Auth errors won't resolve by polling - fail immediately
811
+ try:
812
+ error_body = resp.json()
813
+ except json.JSONDecodeError:
814
+ error_body = resp.text[:400]
815
+ click.echo("\n[ERROR] Authentication failed when checking training file:")
816
+ click.echo(f" URL: {url}")
817
+ click.echo(f" Status: {resp.status_code}")
818
+ click.echo(f" Response: {error_body}")
819
+ click.echo(f" API key: {mask_value(api_key)}")
820
+ raise click.ClickException(
821
+ f"Authentication error ({resp.status_code}). "
822
+ "Check that your SYNTH_API_KEY is valid and has permission to access this organization's files."
823
+ )
824
+ else:
825
+ # Other errors - show details but keep polling
826
+ try:
827
+ error_body = resp.json()
828
+ except json.JSONDecodeError:
829
+ error_body = resp.text[:400]
830
+ click.echo(f"[WARN] Unexpected response checking file {file_id}:")
831
+ click.echo(f" URL: {url}")
832
+ click.echo(f" Status: {resp.status_code}")
833
+ click.echo(f" Response: {error_body}")
834
+
835
+ if elapsed >= timeout:
836
+ raise click.ClickException(
837
+ f"Training file {file_id} not ready after {timeout:.0f}s (last status: {resp.status_code})"
838
+ )
839
+ sleep(interval)
840
+ elapsed += interval
841
+
842
+
843
+ def handle_rl(
844
+ *,
845
+ cfg_path: Path,
846
+ backend_base: str,
847
+ synth_key: str,
848
+ task_url_override: str | None,
849
+ model_override: str | None,
850
+ idempotency: str | None,
851
+ allow_experimental: bool | None,
852
+ dry_run: bool,
853
+ poll: bool,
854
+ poll_timeout: float,
855
+ poll_interval: float,
856
+ stream_format: str,
857
+ ) -> None:
858
+ ctx: dict[str, Any] = {
859
+ "cfg_path": str(cfg_path),
860
+ "backend_base": backend_base,
861
+ "task_url_override": task_url_override,
862
+ "poll": poll,
863
+ }
864
+ log_info("handle_rl invoked", ctx=ctx)
865
+ overrides: dict[str, Any] = {
866
+ "backend": backend_base,
867
+ "task_url": task_url_override,
868
+ "model": model_override,
869
+ }
870
+ build = build_rl_payload(
871
+ config_path=cfg_path,
872
+ task_url=task_url_override or os.environ.get("TASK_APP_URL", ""),
873
+ overrides=overrides,
874
+ idempotency=idempotency,
875
+ allow_experimental=allow_experimental,
876
+ )
877
+
878
+ # Backend-side verification: try ALL org environment keys against /health and /task_info
879
+ verify_url = f"{backend_base}/rl/verify_task_app"
880
+ verify_headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
881
+ try:
882
+ vresp = http_post(
883
+ verify_url, headers=verify_headers, json_body={"endpoint_base_url": build.task_url}
884
+ )
885
+ try:
886
+ parsed_json = vresp.json()
887
+ except json.JSONDecodeError:
888
+ parsed_json = None
889
+
890
+ if isinstance(parsed_json, Mapping):
891
+ vjs: dict[str, Any] = dict(parsed_json)
892
+ else:
893
+ vjs = {
894
+ "status": vresp.status_code,
895
+ "text": (vresp.text or "")[:400],
896
+ }
897
+ if parsed_json is not None:
898
+ vjs["body"] = parsed_json
899
+ except Exception as _ve:
900
+ raise click.ClickException(
901
+ f"Task app verification call failed: {type(_ve).__name__}: {_ve}"
902
+ ) from _ve
903
+ if vresp.status_code is not None and vresp.status_code >= 400:
904
+ click.echo("Task app verification error:\n" + preview_json(vjs, limit=800))
905
+ raise click.ClickException(f"Verification failed with status {vresp.status_code}")
906
+ if not bool(vjs.get("any_ok")):
907
+ click.echo("Task app verification failed; no auth combination succeeded. Full report:")
908
+ click.echo(preview_json(vjs, limit=1200))
909
+ raise click.ClickException("Task app verification failed (auth)")
910
+ else:
911
+ # Print concise summary
912
+ try:
913
+ cands = vjs.get("candidates_first15") or []
914
+ attempts_raw = vjs.get("attempts")
915
+ attempts: list[Mapping[str, Any]] = (
916
+ [a for a in attempts_raw if isinstance(a, Mapping)]
917
+ if isinstance(attempts_raw, list)
918
+ else []
919
+ )
920
+ statuses = [attempt.get("status") for attempt in attempts]
921
+ click.echo(f"Verification OK (candidates={cands}, statuses={statuses})")
922
+ except (KeyError, ValueError, AttributeError):
923
+ # Parsing verification summary failed, but verification itself succeeded
924
+ click.echo("Verification OK")
925
+
926
+ env_key = get_required_value(
927
+ "environment_api_key",
928
+ env_value=os.environ.get("ENVIRONMENT_API_KEY"),
929
+ )
930
+ os.environ["ENVIRONMENT_API_KEY"] = env_key
931
+
932
+ click.echo("Performing task app health check…")
933
+ health = check_task_app_health(build.task_url, env_key)
934
+ if not health.ok:
935
+ click.echo(f"Task app health check failed: {health.detail}")
936
+ raise click.ClickException("Aborting due to failing health check")
937
+ else:
938
+ click.echo("Task app healthy")
939
+
940
+ create_url = f"{backend_base}/rl/jobs"
941
+ headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
942
+ if build.idempotency:
943
+ headers["Idempotency-Key"] = build.idempotency
944
+
945
+ click.echo(f"POST {create_url}")
946
+ click.echo("Payload preview:\n" + preview_json(build.payload, limit=800))
947
+
948
+ resp = http_post(create_url, headers=headers, json_body=build.payload)
949
+ try:
950
+ js = resp.json()
951
+ except json.JSONDecodeError as e:
952
+ click.echo(f"⚠️ Failed to parse JSON response: {e}")
953
+ js = {"status": resp.status_code, "text": resp.text[:400]}
954
+ click.echo(f"Response {resp.status_code}: {preview_json(js, limit=400)}")
955
+ if resp.status_code not in (200, 201):
956
+ raise click.ClickException("Job creation failed")
957
+ job_id = js.get("job_id") or js.get("id")
958
+ if not job_id:
959
+ raise click.ClickException("Response missing job id")
960
+
961
+ if not poll:
962
+ click.echo(f"Created job {job_id} (polling disabled)")
963
+ return
964
+
965
+ click.echo("\n=== Streaming Job Progress ===")
966
+
967
+ # Enable metrics for prompt learning
968
+ if stream_format == "chart":
969
+ config = StreamConfig(
970
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
971
+ event_types={
972
+ "prompt.learning.progress",
973
+ "prompt.learning.gepa.start",
974
+ "prompt.learning.gepa.complete",
975
+ },
976
+ metric_names={"gepa.transformation.mean_score"},
977
+ )
978
+ handlers = [LossCurveHandler()]
979
+ click.echo("Using live chart (metric=gepa.transformation.mean_score)")
980
+ else:
981
+ config = StreamConfig(
982
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
983
+ metric_names={"gepa.transformation.mean_score"},
984
+ )
985
+ handlers = [CLIHandler(hidden_event_substrings=_DEFAULT_RL_HIDDEN_SUBSTRINGS)]
986
+
987
+ streamer = JobStreamer(
988
+ base_url=backend_base,
989
+ api_key=synth_key,
990
+ job_id=job_id,
991
+ endpoints=StreamEndpoints.rl(job_id),
992
+ config=config,
993
+ handlers=handlers,
994
+ interval_seconds=poll_interval,
995
+ timeout_seconds=poll_timeout,
996
+ )
997
+ final_status = asyncio.run(streamer.stream_until_terminal())
998
+ click.echo(f"Final status: {final_status.get('status', 'unknown')}")
999
+ click.echo(preview_json(final_status, limit=600))
1000
+
1001
+
1002
+ def handle_sft(
1003
+ *,
1004
+ cfg_path: Path,
1005
+ backend_base: str,
1006
+ synth_key: str,
1007
+ dataset_override: Path | None,
1008
+ allow_experimental: bool | None,
1009
+ dry_run: bool,
1010
+ poll: bool,
1011
+ poll_timeout: float,
1012
+ poll_interval: float,
1013
+ stream_format: str,
1014
+ examples_limit: int | None,
1015
+ ) -> None:
1016
+ ctx: dict[str, Any] = {
1017
+ "cfg_path": str(cfg_path),
1018
+ "backend_base": backend_base,
1019
+ "dataset_override": str(dataset_override) if dataset_override else None,
1020
+ "poll": poll,
1021
+ }
1022
+ log_info("handle_sft invoked", ctx=ctx)
1023
+ try:
1024
+ build = build_sft_payload(
1025
+ config_path=cfg_path,
1026
+ dataset_override=dataset_override,
1027
+ allow_experimental=allow_experimental,
1028
+ )
1029
+ except TrainError as exc:
1030
+ _raise_sft_usage_error(exc)
1031
+
1032
+ limited_path: Path | None = None
1033
+
1034
+ try:
1035
+ if examples_limit is not None:
1036
+ limited_path = limit_jsonl_examples(build.train_file, examples_limit)
1037
+ click.echo(
1038
+ f"Using first {examples_limit} examples from {build.train_file} -> {limited_path}"
1039
+ )
1040
+ build.train_file = limited_path
1041
+
1042
+ click.echo("Validating training dataset…")
1043
+ validate_sft_jsonl(build.train_file)
1044
+ if build.validation_file and build.validation_file.suffix == ".jsonl":
1045
+ click.echo("Validating validation dataset…")
1046
+ validate_sft_jsonl(build.validation_file)
1047
+
1048
+ upload_url = f"{backend_base.rstrip('/')}/files"
1049
+ click.echo("\n=== Uploading Training Data ===")
1050
+ click.echo(f"Dataset: {build.train_file}")
1051
+ click.echo(f"Destination: {upload_url}")
1052
+ resp = post_multipart(
1053
+ upload_url, api_key=synth_key, file_field="file", file_path=build.train_file
1054
+ )
1055
+ js = (
1056
+ resp.json()
1057
+ if resp.headers.get("content-type", "").startswith("application/json")
1058
+ else {}
1059
+ )
1060
+ if resp.status_code is not None and resp.status_code >= 400 or "id" not in js:
1061
+ click.echo("\n[ERROR] Training file upload failed:")
1062
+ click.echo(f" URL: {upload_url}")
1063
+ click.echo(f" Status: {resp.status_code}")
1064
+ click.echo(f" Response: {js or resp.text[:400]}")
1065
+ click.echo(f" File: {build.train_file}")
1066
+ raise click.ClickException(
1067
+ f"Training file upload failed with status {resp.status_code}"
1068
+ )
1069
+ train_file_id = js["id"]
1070
+ click.echo(f"✓ Training file uploaded (id={train_file_id})")
1071
+ val_file_id = None
1072
+ if build.validation_file:
1073
+ click.echo(f"Uploading validation dataset: {build.validation_file}")
1074
+ vresp = post_multipart(
1075
+ upload_url,
1076
+ api_key=synth_key,
1077
+ file_field="file",
1078
+ file_path=build.validation_file,
1079
+ )
1080
+ vjs = (
1081
+ vresp.json()
1082
+ if vresp.headers.get("content-type", "").startswith("application/json")
1083
+ else {}
1084
+ )
1085
+ if vresp.status_code is not None and vresp.status_code < 400 and "id" in vjs:
1086
+ val_file_id = vjs["id"]
1087
+ click.echo(f"✓ Validation file uploaded (id={val_file_id})")
1088
+ else:
1089
+ click.echo(
1090
+ f"[WARN] Validation upload failed ({vresp.status_code}): {vjs or vresp.text[:200]}"
1091
+ )
1092
+ payload = dict(build.payload)
1093
+ payload["training_file_id"] = train_file_id
1094
+ if val_file_id:
1095
+ payload.setdefault("metadata", {}).setdefault("effective_config", {}).setdefault(
1096
+ "data", {}
1097
+ )["validation_files"] = [val_file_id]
1098
+
1099
+ click.echo("\n=== Checking File Processing Status ===")
1100
+ try:
1101
+ _wait_for_training_file(backend_base, synth_key, train_file_id)
1102
+ except click.ClickException as exc:
1103
+ click.echo(f"[WARN] File readiness check failed: {exc}")
1104
+ click.echo("Proceeding anyway - backend will validate file during job creation...")
1105
+
1106
+ click.echo("\n=== Creating Training Job ===")
1107
+ click.echo("Job payload preview:")
1108
+ click.echo(preview_json(payload, limit=800))
1109
+
1110
+ create_url = f"{backend_base}/learning/jobs"
1111
+ headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
1112
+ click.echo(f"\nPOST {create_url}")
1113
+ resp = http_post(create_url, headers=headers, json_body=payload)
1114
+ js = (
1115
+ resp.json()
1116
+ if resp.headers.get("content-type", "").startswith("application/json")
1117
+ else {}
1118
+ )
1119
+ if resp.status_code not in (200, 201):
1120
+ click.echo("\n[ERROR] Job creation failed:")
1121
+ click.echo(f" URL: {create_url}")
1122
+ click.echo(f" Status: {resp.status_code}")
1123
+ click.echo(f" Response: {preview_json(js, limit=600)}")
1124
+ raise click.ClickException(f"Job creation failed with status {resp.status_code}")
1125
+ job_id = js.get("job_id") or js.get("id")
1126
+ if not job_id:
1127
+ raise click.ClickException("Response missing job id")
1128
+ click.echo(f"✓ Job created (id={job_id})")
1129
+
1130
+ click.echo("\n=== Starting Training Job ===")
1131
+ start_url = f"{backend_base}/learning/jobs/{job_id}/start"
1132
+ click.echo(f"POST {start_url}")
1133
+ start_resp = http_post(start_url, headers=headers, json_body={})
1134
+ if start_resp.status_code not in (200, 201):
1135
+ click.echo(f"[WARN] Job start returned status {start_resp.status_code}")
1136
+ else:
1137
+ click.echo("✓ Job started")
1138
+
1139
+ if not poll:
1140
+ click.echo(f"Started job {job_id} (polling disabled)")
1141
+ return
1142
+
1143
+ click.echo("\n=== Streaming Job Progress ===")
1144
+ config, handlers = _build_stream_components(
1145
+ stream_format, hidden_event_types=_DEFAULT_SFT_HIDDEN_EVENTS
1146
+ )
1147
+ if stream_format == "chart":
1148
+ click.echo("Using live loss chart (metric=train.loss)")
1149
+ streamer = JobStreamer(
1150
+ base_url=backend_base,
1151
+ api_key=synth_key,
1152
+ job_id=job_id,
1153
+ endpoints=StreamEndpoints.learning(job_id),
1154
+ config=config,
1155
+ handlers=handlers,
1156
+ interval_seconds=poll_interval,
1157
+ timeout_seconds=poll_timeout,
1158
+ )
1159
+ final_status = asyncio.run(streamer.stream_until_terminal())
1160
+ status = final_status.get('status') if isinstance(final_status, dict) else 'unknown'
1161
+ click.echo(f"Final status: {status}")
1162
+ click.echo(preview_json(final_status, limit=600))
1163
+ finally:
1164
+ if limited_path is not None:
1165
+ with contextlib.suppress(OSError):
1166
+ limited_path.unlink(missing_ok=True)
1167
+ # Clean up empty parent directory if possible
1168
+ with contextlib.suppress(OSError):
1169
+ limited_path.parent.rmdir()
1170
+
1171
+
1172
+ def handle_adas(
1173
+ *,
1174
+ dataset_path: Path,
1175
+ backend_base: str,
1176
+ synth_key: str,
1177
+ policy_model: str | None,
1178
+ rollout_budget: int | None,
1179
+ proposer_effort: str | None,
1180
+ poll: bool,
1181
+ poll_timeout: float,
1182
+ poll_interval: float,
1183
+ stream_format: str,
1184
+ ) -> None:
1185
+ """Handle ADAS workflow optimization job creation and streaming.
1186
+
1187
+ ADAS uses JSON dataset files and auto-generates task apps.
1188
+ """
1189
+ ctx: dict[str, Any] = {
1190
+ "dataset_path": str(dataset_path),
1191
+ "backend_base": backend_base,
1192
+ "poll": poll,
1193
+ }
1194
+ log_info("handle_adas invoked", ctx=ctx)
1195
+
1196
+ # Load dataset
1197
+ click.echo(f"Loading ADAS dataset from: {dataset_path}")
1198
+ try:
1199
+ dataset = load_graphgen_taskset(dataset_path)
1200
+ except FileNotFoundError:
1201
+ raise click.ClickException(f"Dataset file not found: {dataset_path}")
1202
+ except ValueError as e:
1203
+ raise click.ClickException(f"Invalid ADAS dataset format: {e}")
1204
+
1205
+ click.echo(f"Dataset loaded: {dataset.metadata.name}")
1206
+ click.echo(f" Tasks: {len(dataset.tasks)}")
1207
+ click.echo(f" Gold outputs: {len(dataset.gold_outputs)}")
1208
+ click.echo(f" Judge mode: {dataset.judge_config.mode}")
1209
+
1210
+ # Create ADAS job
1211
+ job = GraphGenJob.from_dataset(
1212
+ dataset=dataset,
1213
+ policy_model=policy_model or "gpt-4o-mini",
1214
+ rollout_budget=rollout_budget or 100,
1215
+ proposer_effort=proposer_effort or "medium", # type: ignore
1216
+ backend_url=backend_base,
1217
+ api_key=synth_key,
1218
+ auto_start=True,
1219
+ )
1220
+
1221
+ click.echo("\n=== Submitting ADAS Job ===")
1222
+ click.echo(f"Policy model: {job.config.policy_model}")
1223
+ click.echo(f"Rollout budget: {job.config.rollout_budget}")
1224
+ click.echo(f"Proposer effort: {job.config.proposer_effort}")
1225
+
1226
+ try:
1227
+ result = job.submit()
1228
+ except RuntimeError as e:
1229
+ raise click.ClickException(str(e))
1230
+
1231
+ click.echo(f"\n✓ Job created:")
1232
+ click.echo(f" ADAS Job ID: {result.graphgen_job_id}")
1233
+ click.echo(f" Status: {result.status}")
1234
+
1235
+ if not poll:
1236
+ click.echo(f"\nCreated job {result.graphgen_job_id} (polling disabled)")
1237
+ return
1238
+
1239
+ click.echo("\n=== Streaming Job Progress ===")
1240
+
1241
+ # Build stream handlers
1242
+ if stream_format == "chart":
1243
+ config = StreamConfig(
1244
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
1245
+ metric_names={"gepa.transformation.mean_score"},
1246
+ )
1247
+ handlers = [LossCurveHandler()]
1248
+ click.echo("Using live loss chart (metric=gepa.transformation.mean_score)")
1249
+ else:
1250
+ config = StreamConfig(
1251
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
1252
+ max_events_per_poll=500,
1253
+ deduplicate=True,
1254
+ )
1255
+ handlers = [GraphGenHandler()]
1256
+
1257
+ # Stream until complete
1258
+ try:
1259
+ final_status = job.stream_until_complete(
1260
+ timeout=poll_timeout,
1261
+ interval=poll_interval,
1262
+ handlers=handlers,
1263
+ )
1264
+ except TimeoutError as e:
1265
+ raise click.ClickException(str(e))
1266
+
1267
+ status = final_status.get('status') if isinstance(final_status, dict) else 'unknown'
1268
+ click.echo(f"\nFinal status: {status}")
1269
+ click.echo(preview_json(final_status, limit=600))
1270
+
1271
+ # Download and display best prompt if succeeded
1272
+ if status == "succeeded" or status == "completed":
1273
+ click.echo("\n=== Best Optimized Prompt ===")
1274
+ try:
1275
+ prompt = job.download_prompt()
1276
+ if prompt:
1277
+ click.echo(prompt[:2000])
1278
+ if len(prompt) > 2000:
1279
+ click.echo(f"\n... (truncated, {len(prompt)} chars total)")
1280
+ except Exception as e:
1281
+ click.echo(f"⚠️ Could not download prompt: {e}")
1282
+
1283
+
1284
+ def _raise_sft_usage_error(exc: TrainError) -> NoReturn:
1285
+ message = str(exc).strip()
1286
+ lower_msg = message.lower()
1287
+ context = "Preparing SFT training job payload"
1288
+ impact = "Cannot submit training job without a valid dataset path"
1289
+
1290
+ if "dataset not specified" in lower_msg:
1291
+ raise click.UsageError(
1292
+ format_error_message(
1293
+ summary="Dataset path required",
1294
+ context=context,
1295
+ problem="No dataset path was provided via config or CLI",
1296
+ impact=impact,
1297
+ solutions=[
1298
+ ("Add [job].data = \"/path/to/data.jsonl\" to the config", "Persist the dataset path in the TOML file"),
1299
+ ("Re-run with --dataset /path/to/data.jsonl", "Override the dataset path from the CLI"),
1300
+ ("Use an absolute path accessible from the current working directory", "Relative paths are resolved from the shell cwd"),
1301
+ ],
1302
+ )
1303
+ ) from exc
1304
+
1305
+ if "dataset not found" in lower_msg:
1306
+ raise click.UsageError(
1307
+ format_error_message(
1308
+ summary="Dataset path not found",
1309
+ context=context,
1310
+ problem=message,
1311
+ impact=impact,
1312
+ solutions=[
1313
+ ("Verify the dataset path exists on disk", "Double-check spelling and that the file hasn't moved"),
1314
+ ("Provide an absolute path to the dataset file", "Avoid relying on relative paths that resolve incorrectly"),
1315
+ ("Sync the dataset to this machine before running the CLI", "Remote paths must be accessible locally"),
1316
+ ],
1317
+ )
1318
+ ) from exc
1319
+
1320
+ raise click.ClickException(message) from exc
1321
+
1322
+
1323
+ def _save_verbose_log_file(
1324
+ events: list[dict[str, Any]],
1325
+ log_file: Path,
1326
+ algorithm_name: str,
1327
+ job_id: str,
1328
+ append_summary: bool = False,
1329
+ ) -> None:
1330
+ """Save a verbose log file with all events in chronological order, including summary.
1331
+
1332
+ If append_summary is True, only append the summary section (events were already streamed live).
1333
+ """
1334
+ import json
1335
+ from datetime import datetime
1336
+
1337
+ try:
1338
+ lines = []
1339
+ if not append_summary:
1340
+ # Full log file with header and all events
1341
+ lines.append("=" * 80)
1342
+ lines.append(f"{algorithm_name} PROMPT LEARNING VERBOSE LOG")
1343
+ lines.append("=" * 80)
1344
+ lines.append(f"Job ID: {job_id}")
1345
+ lines.append(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
1346
+ lines.append(f"Total Events: {len(events)}")
1347
+ lines.append("=" * 80)
1348
+ lines.append("")
1349
+
1350
+ # Sort events by timestamp if available
1351
+ def get_timestamp(event: dict[str, Any]) -> str:
1352
+ return event.get("timestamp", event.get("created_at", ""))
1353
+
1354
+ sorted_events = sorted(events, key=get_timestamp)
1355
+
1356
+ # Only include events if not appending summary (events were already streamed live)
1357
+ if not append_summary:
1358
+ for idx, event in enumerate(sorted_events, 1):
1359
+ if not isinstance(event, dict):
1360
+ continue
1361
+
1362
+ event_type = event.get("type", "unknown")
1363
+ timestamp = event.get("timestamp") or event.get("created_at", "")
1364
+ level = event.get("level", "info")
1365
+ message = event.get("message", "")
1366
+ data = event.get("data", {})
1367
+
1368
+ lines.append(f"[{idx}] {timestamp} [{level.upper()}] {event_type}")
1369
+ if message:
1370
+ lines.append(f" Message: {message}")
1371
+ if data:
1372
+ # Format data nicely (truncate very long values)
1373
+ formatted_data = {}
1374
+ for key, value in data.items():
1375
+ if isinstance(value, dict | list):
1376
+ # Convert to JSON string, truncate if too long
1377
+ json_str = json.dumps(value, indent=2)
1378
+ if len(json_str) > 1000:
1379
+ json_str = json_str[:1000] + "... (truncated)"
1380
+ formatted_data[key] = json_str
1381
+ elif isinstance(value, str) and len(value) > 500:
1382
+ formatted_data[key] = value[:500] + "... (truncated)"
1383
+ else:
1384
+ formatted_data[key] = value
1385
+
1386
+ if formatted_data:
1387
+ lines.append(f" Data: {json.dumps(formatted_data, indent=2)}")
1388
+ lines.append("")
1389
+
1390
+ # Add summary table and chart at the end (always included)
1391
+ if append_summary:
1392
+ lines.append("\n\n")
1393
+ lines.append("=" * 80)
1394
+ lines.append("FINAL SUMMARY")
1395
+ lines.append("=" * 80)
1396
+
1397
+ try:
1398
+ from .summary import _generate_summary_text
1399
+ # Extract optimization curve from events
1400
+ optimization_curve = None
1401
+ trial_scores = []
1402
+ for event in sorted_events:
1403
+ if isinstance(event, dict):
1404
+ event_type = event.get("type", "")
1405
+ if event_type in ("prompt.learning.trial.complete", "mipro.new_incumbent"):
1406
+ data = event.get("data", {})
1407
+ trial_num = data.get("trial") or data.get("trial_num")
1408
+ score = data.get("score") or data.get("minibatch_score")
1409
+ if trial_num is not None and score is not None:
1410
+ trial_scores.append((trial_num, score))
1411
+
1412
+ if trial_scores:
1413
+ best_so_far = {}
1414
+ for trial_num, score in sorted(trial_scores):
1415
+ if trial_num not in best_so_far or score > best_so_far[trial_num]:
1416
+ best_so_far[trial_num] = score
1417
+ optimization_curve = sorted(best_so_far.items())
1418
+
1419
+ summary_text, curve_text = _generate_summary_text(
1420
+ events=sorted_events,
1421
+ algorithm=algorithm_name.lower() if algorithm_name else None,
1422
+ optimization_curve=optimization_curve,
1423
+ )
1424
+ if summary_text:
1425
+ lines.append(summary_text)
1426
+ if curve_text:
1427
+ lines.append("")
1428
+ lines.append(curve_text)
1429
+ except Exception as e:
1430
+ lines.append(f"⚠️ Could not generate summary: {e}")
1431
+
1432
+ lines.append("=" * 80)
1433
+ lines.append("END OF LOG")
1434
+ lines.append("=" * 80)
1435
+
1436
+ # Write to file (append if summary-only mode)
1437
+ mode = "a" if append_summary else "w"
1438
+ with open(log_file, mode, encoding="utf-8") as f:
1439
+ if append_summary:
1440
+ f.write("\n")
1441
+ f.write("\n".join(lines))
1442
+
1443
+ except Exception as e:
1444
+ click.echo(f"⚠️ Could not save verbose log file: {e}")
1445
+
1446
+
1447
+ def _save_prompt_learning_results_locally(
1448
+ *,
1449
+ backend_base: str,
1450
+ api_key: str,
1451
+ job_id: str,
1452
+ config_path: Path,
1453
+ results_folder: Path,
1454
+ ) -> None:
1455
+ """Fetch events and generate results file locally after prompt learning completes."""
1456
+ from datetime import datetime
1457
+
1458
+ try:
1459
+ # Fetch all events
1460
+ url = f"{backend_base}/prompt-learning/online/jobs/{job_id}/events?limit={_RESULTS_FILE_MAX_EVENTS}"
1461
+ headers = {"Authorization": f"Bearer {api_key}"}
1462
+ resp = http_get(url, headers=headers, timeout=30.0)
1463
+
1464
+ if resp.status_code != 200:
1465
+ click.echo(f"⚠️ Could not fetch events to generate results file (status={resp.status_code})")
1466
+ return
1467
+
1468
+ data = resp.json()
1469
+ # Handle both list response (backend) and dict response (legacy compatibility)
1470
+ if isinstance(data, list):
1471
+ events = data
1472
+ elif isinstance(data, dict):
1473
+ events = data.get("events", [])
1474
+ if not isinstance(events, list):
1475
+ click.echo(f"⚠️ Events field is not a list: {type(events).__name__}")
1476
+ return
1477
+ else:
1478
+ click.echo(f"⚠️ Unexpected response type: {type(data).__name__}")
1479
+ return
1480
+
1481
+ if not events:
1482
+ return
1483
+
1484
+ # Extract key data from events
1485
+ best_score = None
1486
+ best_prompt = None
1487
+ baseline_score = None
1488
+ attempted_candidates = []
1489
+ optimized_candidates = []
1490
+ mipro_topk_candidates = [] # Collect MIPRO top-K candidates
1491
+ proposed_instructions = [] # Collect proposed instructions from MIPRO
1492
+ proposed_transformations = [] # Collect proposed transformations from GEPA
1493
+
1494
+ for event in events:
1495
+ if not isinstance(event, dict):
1496
+ continue # Skip malformed events
1497
+
1498
+ event_type = event.get("type", "")
1499
+ event_data = event.get("data", {})
1500
+ if not isinstance(event_data, dict):
1501
+ event_data = {} # Fallback to empty dict for safety
1502
+
1503
+ if event_type == _PROMPT_LEARNING_EVENT_BEST_PROMPT:
1504
+ best_score = event_data.get("best_score")
1505
+ best_prompt = event_data.get("best_prompt")
1506
+ elif event_type == _PROMPT_LEARNING_EVENT_FINAL_RESULTS:
1507
+ attempted_candidates = event_data.get("attempted_candidates", [])
1508
+ optimized_candidates = event_data.get("optimized_candidates", [])
1509
+ elif event_type == _PROMPT_LEARNING_EVENT_VALIDATION_SCORED:
1510
+ # Check if this is the baseline by checking for is_baseline flag or baseline in message
1511
+ is_baseline = event_data.get("is_baseline", False)
1512
+ if not is_baseline:
1513
+ msg = event.get("message", "")
1514
+ is_baseline = "baseline" in msg.lower()
1515
+ if is_baseline:
1516
+ baseline_score = event_data.get("accuracy")
1517
+ elif event_type == _PROMPT_LEARNING_EVENT_GEPA_COMPLETE and best_score is None:
1518
+ best_score = event_data.get("best_score")
1519
+ elif event_type == _PROMPT_LEARNING_EVENT_MIPRO_COMPLETE:
1520
+ # MIPRO completion event includes best_prompt and best_score
1521
+ if best_score is None:
1522
+ best_score = event_data.get("best_score")
1523
+ if best_prompt is None:
1524
+ best_prompt = event_data.get("best_prompt")
1525
+ elif event_type == "mipro.topk.evaluated":
1526
+ # Extract MIPRO top-K candidate data with full details
1527
+ rank = event_data.get("rank")
1528
+ train_score = event_data.get("train_score")
1529
+ test_score = event_data.get("test_score")
1530
+ if rank is not None and train_score is not None and test_score is not None:
1531
+ # Extract full instruction text (may be multi-line)
1532
+ instruction_text = event_data.get("instruction_text", "")
1533
+ if not instruction_text:
1534
+ # Try to get from instruction_lines if available
1535
+ instruction_lines = event_data.get("instruction_lines", [])
1536
+ if instruction_lines:
1537
+ instruction_text = "\n".join(str(line) for line in instruction_lines)
1538
+
1539
+ mipro_topk_candidates.append({
1540
+ "rank": rank,
1541
+ "train_score": train_score,
1542
+ "test_score": test_score,
1543
+ "lift_absolute": event_data.get("lift_absolute"),
1544
+ "lift_percent": event_data.get("lift_percent"),
1545
+ "instruction_text": instruction_text,
1546
+ "instruction_lines": event_data.get("instruction_lines", []),
1547
+ "demo_indices": event_data.get("demo_indices", []),
1548
+ "stage_payloads": event_data.get("stage_payloads", {}),
1549
+ "instruction_indices": event_data.get("instruction_indices", []),
1550
+ "test_per_seed": event_data.get("test_per_seed", {}),
1551
+ })
1552
+ elif event_type == "mipro.baseline.test":
1553
+ # Extract baseline test score
1554
+ if baseline_score is None:
1555
+ baseline_score = event_data.get("test_score")
1556
+ elif event_type == "mipro.instruction.proposed":
1557
+ # Collect proposed instructions
1558
+ proposed_instructions.append({
1559
+ "iteration": event_data.get("iteration"),
1560
+ "stage_id": event_data.get("stage_id"),
1561
+ "module_id": event_data.get("module_id"),
1562
+ "instruction_id": event_data.get("instruction_id"),
1563
+ "instruction_text": event_data.get("instruction_text", ""),
1564
+ "instruction_lines": event_data.get("instruction_lines", []),
1565
+ "demo_indices": event_data.get("demo_indices", []),
1566
+ "proposal_id": event_data.get("proposal_id"),
1567
+ "timestamp": event.get("created_at"),
1568
+ })
1569
+ elif event_type == "gepa.transformation.proposed":
1570
+ # Collect proposed transformations
1571
+ proposed_transformations.append({
1572
+ "generation": event_data.get("generation"),
1573
+ "mutation_type": event_data.get("mutation_type"),
1574
+ "operator": event_data.get("operator"),
1575
+ "transformation_id": event_data.get("transformation_id"),
1576
+ "parent_id": event_data.get("parent_id"),
1577
+ "transformation_text": event_data.get("transformation_text", ""),
1578
+ "transformation_dict": event_data.get("transformation_dict", {}),
1579
+ "mutation_params": event_data.get("mutation_params", {}),
1580
+ "timestamp": event.get("created_at"),
1581
+ })
1582
+
1583
+ # Check if we have any results to display (best_prompt, best_score, or candidates)
1584
+ has_results = bool(attempted_candidates or optimized_candidates or best_prompt or best_score is not None)
1585
+ if not has_results:
1586
+ return
1587
+
1588
+ # Determine algorithm name from events
1589
+ algorithm_name = "PROMPT LEARNING"
1590
+ for event in events:
1591
+ if isinstance(event, dict):
1592
+ event_type = event.get("type", "")
1593
+ if "gepa" in event_type.lower():
1594
+ algorithm_name = "GEPA"
1595
+ break
1596
+ elif "mipro" in event_type.lower():
1597
+ algorithm_name = "MIPRO"
1598
+ break
1599
+
1600
+ # Generate formatted report
1601
+ lines = []
1602
+ lines.append("=" * 80)
1603
+ lines.append(f"{algorithm_name} PROMPT LEARNING RESULTS")
1604
+ lines.append("=" * 80)
1605
+ lines.append(f"Job ID: {job_id}")
1606
+ lines.append(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
1607
+ lines.append("")
1608
+ if baseline_score is not None:
1609
+ lines.append(f"📊 Baseline Score: {baseline_score:.4f} ({baseline_score*100:.1f}%)")
1610
+ if best_score is not None:
1611
+ lines.append(f"🏆 Best Score: {best_score:.4f} ({best_score*100:.1f}%)")
1612
+ if baseline_score is not None and best_score is not None:
1613
+ improvement = ((best_score - baseline_score) / baseline_score) * 100 if baseline_score > 0 else 0
1614
+ lines.append(f"📈 Improvement: {improvement:+.1f}% relative ({(best_score - baseline_score)*100:+.1f} pp absolute)")
1615
+ lines.append("=" * 80)
1616
+ lines.append("")
1617
+
1618
+ # Add best prompt if available
1619
+ if best_prompt and isinstance(best_prompt, dict):
1620
+ lines.append("🏆 BEST PROMPT")
1621
+ lines.append("-" * 80)
1622
+ sections = best_prompt.get("sections", [])
1623
+ if not isinstance(sections, list):
1624
+ sections = []
1625
+ for sec in sections:
1626
+ if not isinstance(sec, dict):
1627
+ continue
1628
+ role = sec.get("role", "unknown")
1629
+ content = sec.get("content", "")
1630
+ lines.append(f"\n[{role.upper()}]:")
1631
+ lines.append(content)
1632
+ lines.append("")
1633
+
1634
+ # Add optimized candidates
1635
+ if optimized_candidates and isinstance(optimized_candidates, list):
1636
+ lines.append("=" * 80)
1637
+ lines.append(f"✨ TOP OPTIMIZED CANDIDATES ({len(optimized_candidates)})")
1638
+ lines.append("=" * 80)
1639
+ lines.append("")
1640
+
1641
+ for idx, cand in enumerate(optimized_candidates):
1642
+ if not isinstance(cand, dict):
1643
+ continue
1644
+ candidate_score = cand.get("score") or {}
1645
+ accuracy = candidate_score.get("accuracy", 0.0)
1646
+ prompt_length = candidate_score.get("prompt_length", 0)
1647
+ payload_kind = cand.get("payload_kind", "unknown")
1648
+
1649
+ # Try score.instance_scores first, then cand.instance_scores (explicit check)
1650
+ instance_scores = (
1651
+ candidate_score.get('instance_scores')
1652
+ if 'instance_scores' in candidate_score
1653
+ else cand.get('instance_scores')
1654
+ )
1655
+ n_eval = len(instance_scores) if instance_scores and isinstance(instance_scores, list) else 0
1656
+
1657
+ lines.append(f"[{idx+1}] Accuracy: {accuracy:.4f} | Length: {prompt_length} | Type: {payload_kind} | N: {n_eval}")
1658
+ lines.append("-" * 80)
1659
+
1660
+ obj = cand.get("object")
1661
+ if obj and isinstance(obj, dict) and payload_kind == "transformation":
1662
+ # For transformations, text_replacements are nested in data
1663
+ data_obj = obj.get("data", {})
1664
+ replacement_lines = _format_text_replacements(data_obj)
1665
+ lines.extend(replacement_lines)
1666
+ lines.append("")
1667
+
1668
+ # Add MIPRO top-K candidates
1669
+ if mipro_topk_candidates and isinstance(mipro_topk_candidates, list):
1670
+ # Sort by rank
1671
+ mipro_topk_candidates.sort(key=lambda x: x.get("rank", 999))
1672
+ lines.append("=" * 80)
1673
+ lines.append(f"🎯 TOP-K CANDIDATES ({len(mipro_topk_candidates)})")
1674
+ lines.append("=" * 80)
1675
+ lines.append("")
1676
+
1677
+ for cand in mipro_topk_candidates:
1678
+ rank = cand.get("rank", 0)
1679
+ train_score = cand.get("train_score", 0.0)
1680
+ test_score = cand.get("test_score", 0.0)
1681
+ lift_abs = cand.get("lift_absolute")
1682
+ lift_pct = cand.get("lift_percent")
1683
+ instruction_text = cand.get("instruction_text", "")
1684
+ instruction_lines = cand.get("instruction_lines", [])
1685
+ demo_indices = cand.get("demo_indices", [])
1686
+ instruction_indices = cand.get("instruction_indices", [])
1687
+ stage_payloads = cand.get("stage_payloads", {})
1688
+ test_per_seed = cand.get("test_per_seed", {})
1689
+
1690
+ lift_str = ""
1691
+ if lift_abs is not None and lift_pct is not None:
1692
+ lift_str = f" | Lift: {lift_abs:+.3f} ({lift_pct:+.1f}%)"
1693
+
1694
+ lines.append(f"[Rank {rank}] Train: {train_score:.4f} ({train_score*100:.1f}%) | Test: {test_score:.4f} ({test_score*100:.1f}%){lift_str}")
1695
+ lines.append("-" * 80)
1696
+
1697
+ # Show full instruction text (use instruction_lines if available, otherwise instruction_text)
1698
+ if instruction_lines:
1699
+ lines.append("Instructions:")
1700
+ for idx, instr_line in enumerate(instruction_lines, 1):
1701
+ lines.append(f" {idx}. {instr_line}")
1702
+ elif instruction_text:
1703
+ # Split multi-line instructions
1704
+ instr_parts = instruction_text.split("\n")
1705
+ if len(instr_parts) > 1:
1706
+ lines.append("Instructions:")
1707
+ for idx, part in enumerate(instr_parts, 1):
1708
+ if part.strip():
1709
+ lines.append(f" {idx}. {part.strip()}")
1710
+ else:
1711
+ lines.append(f"Instruction: {instruction_text}")
1712
+
1713
+ if instruction_indices:
1714
+ lines.append(f"Instruction Indices: {instruction_indices}")
1715
+ if demo_indices:
1716
+ lines.append(f"Demo Indices: {demo_indices}")
1717
+
1718
+ # Show per-stage breakdown if available
1719
+ if stage_payloads:
1720
+ lines.append("Per-stage breakdown:")
1721
+ for stage_id, payload in stage_payloads.items():
1722
+ if isinstance(payload, dict):
1723
+ instr_ids = payload.get("instruction_indices", [])
1724
+ demo_ids = payload.get("demo_indices", [])
1725
+ module_id = payload.get("module_id", "unknown")
1726
+ lines.append(f" [{module_id}/{stage_id}] instr_ids={instr_ids} demo_ids={demo_ids}")
1727
+
1728
+ # Show test per-seed scores if available
1729
+ if test_per_seed:
1730
+ seed_scores = []
1731
+ for seed, score in sorted(test_per_seed.items()):
1732
+ seed_scores.append(f"{seed}: {score:.2f}")
1733
+ if seed_scores:
1734
+ lines.append(f"Test per-seed: {', '.join(seed_scores)}")
1735
+
1736
+ lines.append("")
1737
+
1738
+ # Add all proposal candidates
1739
+ if attempted_candidates and isinstance(attempted_candidates, list):
1740
+ lines.append("=" * 80)
1741
+ lines.append(f"💡 ALL PROPOSAL CANDIDATES ({len(attempted_candidates)})")
1742
+ lines.append("=" * 80)
1743
+ lines.append("")
1744
+
1745
+ for idx, cand in enumerate(attempted_candidates):
1746
+ if not isinstance(cand, dict):
1747
+ continue
1748
+ accuracy = cand.get('accuracy', 0.0)
1749
+ prompt_length = cand.get('prompt_length', 0)
1750
+ tool_rate = cand.get('tool_call_rate', 0.0)
1751
+ instance_scores = cand.get('instance_scores', [])
1752
+ n_eval = len(instance_scores) if instance_scores else 0
1753
+
1754
+ lines.append(f"[{idx+1}] Accuracy: {accuracy:.4f} | Length: {prompt_length} | Tool Rate: {tool_rate:.2f} | N: {n_eval}")
1755
+ lines.append("-" * 80)
1756
+
1757
+ obj = cand.get("object")
1758
+ if obj and isinstance(obj, dict):
1759
+ # For proposals, text_replacements are at top level of object
1760
+ replacement_lines = _format_text_replacements(obj)
1761
+ lines.extend(replacement_lines)
1762
+ lines.append("")
1763
+
1764
+ # Add proposed instructions section (MIPRO)
1765
+ if proposed_instructions and isinstance(proposed_instructions, list):
1766
+ lines.append("=" * 80)
1767
+ lines.append(f"💡 PROPOSED INSTRUCTIONS ({len(proposed_instructions)})")
1768
+ lines.append("=" * 80)
1769
+ lines.append("")
1770
+
1771
+ for idx, instr in enumerate(proposed_instructions):
1772
+ if not isinstance(instr, dict):
1773
+ continue
1774
+ iteration = instr.get("iteration", "?")
1775
+ stage_id = instr.get("stage_id", "?")
1776
+ module_id = instr.get("module_id", "?")
1777
+ instruction_id = instr.get("instruction_id", "?")
1778
+ instruction_text = instr.get("instruction_text", "")
1779
+ instruction_lines = instr.get("instruction_lines", [])
1780
+ demo_indices = instr.get("demo_indices", [])
1781
+
1782
+ lines.append(f"[{idx+1}] Iteration {iteration} | Stage: {stage_id} | Module: {module_id} | ID: {instruction_id}")
1783
+ if demo_indices:
1784
+ lines.append(f"Demo Indices: {demo_indices}")
1785
+ lines.append("-" * 80)
1786
+
1787
+ # Show instruction text (use instruction_lines if available, otherwise instruction_text)
1788
+ if instruction_lines:
1789
+ for line_idx, line in enumerate(instruction_lines, 1):
1790
+ if line.strip():
1791
+ lines.append(f" {line_idx}. {line.strip()}")
1792
+ elif instruction_text:
1793
+ # Split multi-line instructions
1794
+ instr_parts = instruction_text.split("\n")
1795
+ if len(instr_parts) > 1:
1796
+ for line_idx, part in enumerate(instr_parts, 1):
1797
+ if part.strip():
1798
+ lines.append(f" {line_idx}. {part.strip()}")
1799
+ else:
1800
+ lines.append(f" {instruction_text}")
1801
+
1802
+ lines.append("")
1803
+
1804
+ # Add proposed transformations section (GEPA)
1805
+ if proposed_transformations and isinstance(proposed_transformations, list):
1806
+ lines.append("=" * 80)
1807
+ lines.append(f"🧬 PROPOSED TRANSFORMATIONS ({len(proposed_transformations)})")
1808
+ lines.append("=" * 80)
1809
+ lines.append("")
1810
+
1811
+ for idx, trans in enumerate(proposed_transformations):
1812
+ if not isinstance(trans, dict):
1813
+ continue
1814
+ generation = trans.get("generation", "?")
1815
+ mutation_type = trans.get("mutation_type", "?")
1816
+ operator = trans.get("operator", "?")
1817
+ transformation_id = trans.get("transformation_id", "?")
1818
+ parent_id = trans.get("parent_id", "?")
1819
+ transformation_text = trans.get("transformation_text", "")
1820
+ transformation_dict = trans.get("transformation_dict", {})
1821
+
1822
+ lines.append(f"[{idx+1}] Generation {generation} | Type: {mutation_type} | Operator: {operator}")
1823
+ lines.append(f"Transformation ID: {transformation_id} | Parent ID: {parent_id}")
1824
+ lines.append("-" * 80)
1825
+
1826
+ # Show transformation text
1827
+ if transformation_text:
1828
+ lines.append("Transformation Text:")
1829
+ lines.append(f" {transformation_text}")
1830
+
1831
+ # Show transformation dict details if available
1832
+ if transformation_dict:
1833
+ text_replacements = transformation_dict.get("text_replacements", [])
1834
+ if text_replacements:
1835
+ lines.append("Text Replacements:")
1836
+ for repl_idx, repl in enumerate(text_replacements, 1):
1837
+ if isinstance(repl, dict):
1838
+ apply_to = repl.get("apply_to_role", "unknown")
1839
+ old_text = repl.get("old_text", "")[:100]
1840
+ new_text = repl.get("new_text", "")[:200]
1841
+ lines.append(f" {repl_idx}. [{apply_to}]")
1842
+ if old_text:
1843
+ lines.append(f" Old: {old_text}...")
1844
+ if new_text:
1845
+ lines.append(f" New: {new_text}...")
1846
+
1847
+ lines.append("")
1848
+
1849
+ # Add summary table and chart before END OF REPORT
1850
+ lines.append("")
1851
+ lines.append("=" * 80)
1852
+ lines.append("FINAL SUMMARY")
1853
+ lines.append("=" * 80)
1854
+
1855
+ # Generate summary table text (reuse summary.py logic)
1856
+ try:
1857
+ from .summary import _generate_summary_text
1858
+ # Extract optimization curve from events if available
1859
+ optimization_curve = None
1860
+ # Try to extract curve from trial events
1861
+ trial_scores = []
1862
+ for event in events:
1863
+ if isinstance(event, dict):
1864
+ event_type = event.get("type", "")
1865
+ if event_type in ("prompt.learning.trial.complete", "mipro.new_incumbent"):
1866
+ data = event.get("data", {})
1867
+ trial_num = data.get("trial") or data.get("trial_num")
1868
+ score = data.get("score") or data.get("minibatch_score")
1869
+ if trial_num is not None and score is not None:
1870
+ trial_scores.append((trial_num, score))
1871
+
1872
+ if trial_scores:
1873
+ # Build optimization curve (best score so far at each trial)
1874
+ best_so_far = {}
1875
+ for trial_num, score in sorted(trial_scores):
1876
+ if trial_num not in best_so_far or score > best_so_far[trial_num]:
1877
+ best_so_far[trial_num] = score
1878
+ optimization_curve = sorted(best_so_far.items())
1879
+
1880
+ summary_text, curve_text = _generate_summary_text(
1881
+ events=events,
1882
+ algorithm=algorithm_name.lower() if algorithm_name else None,
1883
+ optimization_curve=optimization_curve,
1884
+ )
1885
+ if summary_text:
1886
+ lines.append(summary_text)
1887
+ if curve_text:
1888
+ lines.append("")
1889
+ lines.append(curve_text)
1890
+ except Exception as e:
1891
+ lines.append(f"⚠️ Could not generate summary: {e}")
1892
+
1893
+ lines.append("=" * 80)
1894
+ lines.append("END OF REPORT")
1895
+ lines.append("=" * 80)
1896
+
1897
+ # Determine save location
1898
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1899
+
1900
+ # Use results_folder from config (create if it doesn't exist)
1901
+ output_dir = results_folder
1902
+ output_dir.mkdir(parents=True, exist_ok=True)
1903
+
1904
+ # Use algorithm-specific filename
1905
+ algorithm_prefix = algorithm_name.lower() if algorithm_name else "prompt_learning"
1906
+ output_file = output_dir / f"{algorithm_prefix}_results_{job_id}_{timestamp}.txt"
1907
+
1908
+ with open(output_file, "w", encoding="utf-8") as f:
1909
+ f.write("\n".join(lines))
1910
+
1911
+ click.echo(f"\n📄 Results saved locally to: {output_file}")
1912
+
1913
+ # Also save verbose log file with all events (append summary if log was streamed live)
1914
+ log_file = output_dir / f"{algorithm_prefix}_log_{job_id}_{timestamp}.log"
1915
+ append_summary = log_file.exists() # If log file exists, it was streamed live, so just append summary
1916
+ _save_verbose_log_file(events, log_file, algorithm_name, job_id, append_summary=append_summary)
1917
+ click.echo(f"📋 Verbose log saved locally to: {log_file}")
1918
+
1919
+ except (PermissionError, OSError) as e:
1920
+ click.echo(f"⚠️ Could not save results file locally: {e}")
1921
+ except Exception as e:
1922
+ click.echo(f"⚠️ Unexpected error saving results file: {e}")
1923
+
1924
+
1925
+ def handle_prompt_learning(
1926
+ *,
1927
+ cfg_path: Path,
1928
+ backend_base: str,
1929
+ synth_key: str,
1930
+ task_url_override: str | None,
1931
+ allow_experimental: bool | None,
1932
+ dry_run: bool,
1933
+ poll: bool,
1934
+ poll_timeout: float,
1935
+ poll_interval: float,
1936
+ stream_format: str,
1937
+ display_config: dict[str, Any] | None = None,
1938
+ tui: bool = False,
1939
+ show_curve: bool = True,
1940
+ verbose_summary: bool = True,
1941
+ ) -> None:
1942
+ """Handle prompt learning job creation (MIPRO or GEPA)."""
1943
+ ctx: dict[str, Any] = {
1944
+ "cfg_path": str(cfg_path),
1945
+ "backend_base": backend_base,
1946
+ "task_url_override": task_url_override,
1947
+ "poll": poll,
1948
+ }
1949
+ log_info("handle_prompt_learning invoked", ctx=ctx)
1950
+ env_key = get_required_value(
1951
+ "environment_api_key",
1952
+ env_value=os.environ.get("ENVIRONMENT_API_KEY"),
1953
+ )
1954
+ os.environ["ENVIRONMENT_API_KEY"] = env_key
1955
+
1956
+ overrides: dict[str, Any] = {
1957
+ "backend": backend_base,
1958
+ "task_url": task_url_override,
1959
+ }
1960
+
1961
+ build = build_prompt_learning_payload(
1962
+ config_path=cfg_path,
1963
+ task_url=task_url_override,
1964
+ overrides=overrides,
1965
+ allow_experimental=allow_experimental,
1966
+ )
1967
+
1968
+ # Assertion: Validate task app URL is reachable from backend perspective
1969
+ # If backend is localhost and task app is localhost, they should be able to communicate
1970
+ task_app_url = build.task_url or ""
1971
+ if backend_base.startswith("http://localhost") or backend_base.startswith("http://127.0.0.1"):
1972
+ if task_app_url.startswith("http://localhost") or task_app_url.startswith("http://127.0.0.1"):
1973
+ # Both are local - this should work
1974
+ pass
1975
+ else:
1976
+ click.echo(f"⚠️ WARNING: Backend is local ({backend_base}) but task app is remote ({task_app_url})")
1977
+ click.echo(" The backend may not be able to reach the task app. Consider using a tunnel or local task app.")
1978
+
1979
+ click.echo("Performing task app health check…")
1980
+ click.echo(f"Task app URL: {build.task_url}")
1981
+ click.echo("⏳ Checking /health endpoint (timeout: 10s)...")
1982
+ health = check_task_app_health(build.task_url, env_key, timeout=10.0)
1983
+ if not health.ok:
1984
+ click.echo(f"❌ Task app health check failed: {health.detail}")
1985
+ click.echo(f" Health status: {health.health_status}")
1986
+ click.echo(f" Task info status: {health.task_info_status}")
1987
+ click.echo("💡 Troubleshooting:")
1988
+ click.echo(" 1. Ensure the task app is running: lsof -i :8102")
1989
+ click.echo(" 2. Test manually: curl -v http://127.0.0.1:8102/health")
1990
+ click.echo(" 3. Check task app logs for errors")
1991
+ click.echo(" 4. Restart the task app if it's hung")
1992
+ raise click.ClickException("Aborting due to failing health check")
1993
+ else:
1994
+ click.echo("Task app healthy")
1995
+
1996
+ # Ensure backend_base has /api prefix
1997
+ if not backend_base.endswith("/api"):
1998
+ backend_base = ensure_api_base(backend_base)
1999
+
2000
+ # Assertion: Validate backend URL before making request
2001
+ if not backend_base.startswith("http"):
2002
+ raise click.ClickException(
2003
+ f"Invalid backend URL: {backend_base}. Must start with http:// or https://"
2004
+ )
2005
+
2006
+ create_url = f"{backend_base}/prompt-learning/online/jobs"
2007
+ headers = {"Authorization": f"Bearer {synth_key}", "Content-Type": "application/json"}
2008
+
2009
+ click.echo(f"POST {create_url}")
2010
+ click.echo("Payload preview:\n" + preview_json(build.payload, limit=800))
2011
+
2012
+ # Assertion: If using local backend, verify it's actually localhost
2013
+ if (
2014
+ os.getenv("BACKEND_BASE_URL")
2015
+ and "localhost" in os.getenv("BACKEND_BASE_URL", "").lower()
2016
+ and "localhost" not in backend_base.lower()
2017
+ and "127.0.0.1" not in backend_base
2018
+ ):
2019
+ raise click.ClickException(
2020
+ f"BACKEND_BASE_URL was set to localhost but backend_base resolved to {backend_base}. "
2021
+ f"This indicates the environment variable is not being respected."
2022
+ )
2023
+
2024
+ # Increase timeout for job creation (can take longer due to validation checks)
2025
+ resp = http_post(create_url, headers=headers, json_body=build.payload, timeout=180.0)
2026
+ try:
2027
+ js = resp.json()
2028
+ except json.JSONDecodeError as e:
2029
+ click.echo(f"⚠️ Failed to parse JSON response: {e}")
2030
+ js = {"status": resp.status_code, "text": resp.text[:400]}
2031
+ click.echo(f"Response {resp.status_code}: {preview_json(js, limit=400)}")
2032
+ if resp.status_code not in (200, 201):
2033
+ raise click.ClickException("Job creation failed")
2034
+ job_id = js.get("job_id") or js.get("id")
2035
+ if not job_id:
2036
+ raise click.ClickException("Response missing job id")
2037
+
2038
+ if not poll:
2039
+ click.echo(f"Created job {job_id} (polling disabled)")
2040
+ return
2041
+
2042
+ algorithm = str(build.payload.get("algorithm") or "").lower()
2043
+ metric_names: set[str] | None = None
2044
+ if algorithm == "gepa":
2045
+ metric_names = {"gepa.transformation.mean_score"}
2046
+
2047
+ chart_mode = stream_format == "chart" and algorithm == "gepa"
2048
+ if stream_format == "chart" and not chart_mode:
2049
+ click.echo("Chart streaming is only available for GEPA jobs; showing textual updates instead.")
2050
+
2051
+ # Prepare log file path for real-time streaming
2052
+ results_folder = parse_results_folder(cfg_path)
2053
+ from datetime import datetime
2054
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
2055
+ algorithm_prefix = algorithm.lower() if algorithm else "prompt_learning"
2056
+ log_file = results_folder / f"{algorithm_prefix}_log_{job_id}_{timestamp}.log"
2057
+
2058
+ # Write initial streaming message to log file if handler will be created
2059
+ if not chart_mode:
2060
+ try:
2061
+ log_file.parent.mkdir(parents=True, exist_ok=True)
2062
+ with open(log_file, "a", encoding="utf-8") as f:
2063
+ f.write("\n=== Streaming Job Progress ===\n")
2064
+ except Exception:
2065
+ pass # Continue even if log file can't be written
2066
+
2067
+ click.echo("\n=== Streaming Job Progress ===")
2068
+
2069
+ # Create appropriate handler based on algorithm
2070
+ if algorithm == "gepa":
2071
+ if chart_mode:
2072
+ config = StreamConfig(
2073
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS},
2074
+ event_types={
2075
+ "prompt.learning.progress",
2076
+ "prompt.learning.gepa.start",
2077
+ "prompt.learning.gepa.complete",
2078
+ },
2079
+ metric_names=metric_names,
2080
+ )
2081
+ handlers = [LossCurveHandler()]
2082
+ click.echo("Using live loss chart (metric=gepa.transformation.mean_score)")
2083
+ else:
2084
+ config = StreamConfig(
2085
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS, StreamType.TIMELINE},
2086
+ metric_names=metric_names,
2087
+ max_events_per_poll=500, # Capture more events per poll
2088
+ deduplicate=True, # Still deduplicate but capture more
2089
+ # Don't filter events - show all of them
2090
+ event_types=None, # No whitelist - show all event types
2091
+ event_types_exclude=None, # No blacklist - show all events
2092
+ event_levels=None, # Show all levels
2093
+ )
2094
+ # Use PromptLearningHandler for enhanced event handling
2095
+ handler = PromptLearningHandler(
2096
+ show_trial_results=display_config.get("show_trial_results", True) if display_config else True,
2097
+ show_transformations=display_config.get("show_transformations", False) if display_config else False,
2098
+ show_validation=display_config.get("show_validation", True) if display_config else True,
2099
+ max_tokens=display_config.get("max_tokens") if display_config else None,
2100
+ max_time_seconds=display_config.get("max_time_seconds") if display_config else None,
2101
+ max_rollouts=display_config.get("max_rollouts") if display_config else None,
2102
+ log_file=log_file,
2103
+ )
2104
+ handlers = [handler]
2105
+ else:
2106
+ # Use PromptLearningHandler for MIPRO (same as GEPA)
2107
+ config = StreamConfig(
2108
+ enabled_streams={StreamType.STATUS, StreamType.EVENTS, StreamType.METRICS, StreamType.TIMELINE},
2109
+ metric_names=metric_names,
2110
+ max_events_per_poll=500, # Capture more events per poll
2111
+ deduplicate=True, # Still deduplicate but capture more
2112
+ # Don't filter events - show all of them
2113
+ event_types=None, # No whitelist - show all event types
2114
+ event_types_exclude=None, # No blacklist - show all events
2115
+ event_levels=None, # Show all levels
2116
+ )
2117
+ handler = PromptLearningHandler(
2118
+ show_trial_results=display_config.get("show_trial_results", True) if display_config else True,
2119
+ show_transformations=display_config.get("show_transformations", False) if display_config else False,
2120
+ show_validation=display_config.get("show_validation", True) if display_config else True,
2121
+ max_tokens=display_config.get("max_tokens") if display_config else None,
2122
+ max_time_seconds=display_config.get("max_time_seconds") if display_config else None,
2123
+ max_rollouts=display_config.get("max_rollouts") if display_config else None,
2124
+ log_file=log_file,
2125
+ )
2126
+ handlers = [handler]
2127
+
2128
+ streamer = JobStreamer(
2129
+ base_url=backend_base,
2130
+ api_key=synth_key,
2131
+ job_id=job_id,
2132
+ endpoints=StreamEndpoints.prompt_learning(job_id),
2133
+ config=config,
2134
+ handlers=handlers,
2135
+ interval_seconds=poll_interval,
2136
+ timeout_seconds=poll_timeout,
2137
+ )
2138
+ final_status = asyncio.run(streamer.stream_until_terminal())
2139
+
2140
+ # Write final status to log file if handler has one
2141
+ if isinstance(handlers[0], PromptLearningHandler) and handlers[0]._log_file_handle:
2142
+ handlers[0]._write_log(f"Final status: {final_status.get('status', 'unknown')}")
2143
+ handlers[0]._write_log(preview_json(final_status, limit=600))
2144
+
2145
+ click.echo(f"Final status: {final_status.get('status', 'unknown')}")
2146
+ click.echo(preview_json(final_status, limit=600))
2147
+
2148
+ # Display final summary for GEPA/MIPRO jobs if requested
2149
+ if verbose_summary and algorithm in ("gepa", "mipro"):
2150
+ optimization_curve = None
2151
+ if isinstance(handlers[0], PromptLearningHandler):
2152
+ optimization_curve = handlers[0].optimization_curve
2153
+
2154
+ from .summary import display_prompt_learning_summary
2155
+ # Pass log_writer if handler has one
2156
+ log_writer = None
2157
+ if isinstance(handlers[0], PromptLearningHandler) and handlers[0]._log_file_handle:
2158
+ log_writer = handlers[0]._write_log
2159
+ display_prompt_learning_summary(
2160
+ job_id=job_id,
2161
+ backend_base=backend_base,
2162
+ api_key=synth_key,
2163
+ optimization_curve=optimization_curve,
2164
+ show_curve=show_curve,
2165
+ algorithm=algorithm,
2166
+ log_writer=log_writer,
2167
+ )
2168
+
2169
+ # Save results file locally
2170
+ # Parse and validate results_folder from config (already done above, but ensure it's available)
2171
+ if 'results_folder' not in locals():
2172
+ results_folder = parse_results_folder(cfg_path)
2173
+
2174
+ # Close log file if handler has one (flush is already called by streamer, but ensure it's closed)
2175
+ if isinstance(handlers[0], PromptLearningHandler) and handlers[0]._log_file_handle:
2176
+ handlers[0].flush()
2177
+
2178
+ _save_prompt_learning_results_locally(
2179
+ backend_base=backend_base,
2180
+ api_key=synth_key,
2181
+ job_id=job_id,
2182
+ config_path=cfg_path,
2183
+ results_folder=results_folder,
2184
+ )
2185
+
2186
+
2187
+ def register(cli: click.Group) -> None:
2188
+ cli.add_command(train_command)