synth-ai 0.2.9.dev4__py3-none-any.whl → 0.2.9.dev6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (353) hide show
  1. examples/__init__.py +16 -0
  2. examples/crafter_debug_render.py +23 -17
  3. examples/qwen_coder/README.md +102 -0
  4. examples/qwen_coder/_shared.py +113 -0
  5. examples/qwen_coder/configs/coder_lora_30b.toml +61 -0
  6. examples/qwen_coder/configs/coder_lora_4b.toml +57 -0
  7. examples/qwen_coder/configs/coder_lora_small.toml +58 -0
  8. examples/qwen_coder/generate_dataset.py +98 -0
  9. examples/qwen_coder/infer_ft_smoke.py +64 -0
  10. examples/qwen_coder/infer_prod_proxy.py +73 -0
  11. examples/qwen_coder/infer_via_synth.py +87 -0
  12. examples/qwen_coder/scripts/infer_coder.sh +18 -0
  13. examples/qwen_coder/scripts/train_coder_30b.sh +21 -0
  14. examples/qwen_coder/sft_full_17b.py +103 -0
  15. examples/qwen_coder/sft_lora_30b.py +110 -0
  16. examples/qwen_coder/subset_jsonl.py +38 -0
  17. examples/qwen_coder/validate_jsonl.py +59 -0
  18. examples/rl/configs/eval_base_qwen.toml +1 -1
  19. examples/rl/configs/rl_from_base_qwen17.toml +1 -1
  20. examples/rl/download_dataset.py +26 -10
  21. examples/rl/run_eval.py +53 -52
  22. examples/rl/run_rl_and_save.py +29 -12
  23. examples/rl/task_app/math_single_step.py +180 -41
  24. examples/rl/task_app/math_task_app.py +14 -6
  25. examples/sft/README.md +139 -0
  26. examples/sft/configs/crafter_fft_qwen0p6b.toml +44 -0
  27. examples/sft/configs/crafter_lora_qwen0p6b.toml +45 -0
  28. examples/sft/evaluate.py +117 -0
  29. examples/sft/export_dataset.py +117 -0
  30. examples/sft/generate_traces.py +162 -0
  31. examples/swe/__init__.py +12 -0
  32. examples/swe/task_app/README.md +105 -0
  33. examples/swe/task_app/__init__.py +2 -0
  34. examples/swe/task_app/grpo_swe_mini.py +571 -0
  35. examples/swe/task_app/grpo_swe_mini_task_app.py +136 -0
  36. examples/swe/task_app/hosted/README.md +173 -0
  37. examples/swe/task_app/hosted/__init__.py +5 -0
  38. examples/swe/task_app/hosted/branching.py +143 -0
  39. examples/swe/task_app/hosted/environment_routes.py +1289 -0
  40. examples/swe/task_app/hosted/envs/__init__.py +1 -0
  41. examples/swe/task_app/hosted/envs/crafter/__init__.py +6 -0
  42. examples/swe/task_app/hosted/envs/crafter/app.py +1 -0
  43. examples/swe/task_app/hosted/envs/crafter/environment.py +522 -0
  44. examples/swe/task_app/hosted/envs/crafter/policy.py +478 -0
  45. examples/swe/task_app/hosted/envs/crafter/react_agent.py +108 -0
  46. examples/swe/task_app/hosted/envs/crafter/shared.py +305 -0
  47. examples/swe/task_app/hosted/envs/crafter/tools.py +47 -0
  48. examples/swe/task_app/hosted/envs/mini_swe/__init__.py +8 -0
  49. examples/swe/task_app/hosted/envs/mini_swe/environment.py +1164 -0
  50. examples/swe/task_app/hosted/envs/mini_swe/policy.py +355 -0
  51. examples/swe/task_app/hosted/envs/mini_swe/shared.py +83 -0
  52. examples/swe/task_app/hosted/envs/mini_swe/tools.py +96 -0
  53. examples/swe/task_app/hosted/hosted_app.py +204 -0
  54. examples/swe/task_app/hosted/inference/__init__.py +5 -0
  55. examples/swe/task_app/hosted/inference/openai_client.py +618 -0
  56. examples/swe/task_app/hosted/main.py +100 -0
  57. examples/swe/task_app/hosted/policy_routes.py +1079 -0
  58. examples/swe/task_app/hosted/registry.py +195 -0
  59. examples/swe/task_app/hosted/rollout.py +1869 -0
  60. examples/swe/task_app/hosted/storage/__init__.py +5 -0
  61. examples/swe/task_app/hosted/storage/volume.py +211 -0
  62. examples/swe/task_app/hosted/test_agents.py +161 -0
  63. examples/swe/task_app/hosted/test_service.py +137 -0
  64. examples/swe/task_app/hosted/utils.py +62 -0
  65. examples/vlm/README.md +68 -0
  66. examples/vlm/configs/crafter_vlm_gpt4o.toml +44 -0
  67. examples/vlm/crafter_image_only_agent.py +207 -0
  68. examples/vlm/crafter_openai_vlm_agent.py +277 -0
  69. examples/vlm/filter_image_rows.py +63 -0
  70. examples/vlm/run_crafter_vlm_benchmark.py +316 -0
  71. examples/warming_up_to_rl/analyze_trace_db.py +12 -10
  72. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +11 -1
  73. examples/warming_up_to_rl/export_trace_sft.py +218 -36
  74. examples/warming_up_to_rl/groq_test.py +15 -8
  75. examples/warming_up_to_rl/manage_secrets.py +29 -25
  76. examples/warming_up_to_rl/readme.md +9 -2
  77. examples/warming_up_to_rl/run_eval.py +137 -61
  78. examples/warming_up_to_rl/run_fft_and_save.py +131 -60
  79. examples/warming_up_to_rl/run_local_rollout.py +88 -39
  80. examples/warming_up_to_rl/run_local_rollout_modal.py +114 -28
  81. examples/warming_up_to_rl/run_local_rollout_parallel.py +81 -20
  82. examples/warming_up_to_rl/run_local_rollout_traced.py +126 -23
  83. examples/warming_up_to_rl/run_rl_and_save.py +35 -12
  84. examples/warming_up_to_rl/run_rollout_remote.py +44 -19
  85. examples/warming_up_to_rl/task_app/README.md +6 -2
  86. examples/warming_up_to_rl/task_app/grpo_crafter.py +319 -57
  87. examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +11 -30
  88. examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +1 -1
  89. examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +9 -11
  90. examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +137 -182
  91. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -1
  92. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +1 -1
  93. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -1
  94. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +150 -57
  95. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +105 -69
  96. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +19 -7
  97. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +45 -42
  98. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +1 -1
  99. examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +47 -45
  100. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +1 -1
  101. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +198 -92
  102. examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +0 -2
  103. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +361 -263
  104. examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +21 -23
  105. examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +394 -274
  106. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +1 -1
  107. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +56 -62
  108. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +1 -0
  109. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +6 -15
  110. examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +4 -3
  111. synth/__init__.py +14 -0
  112. synth_ai/__init__.py +20 -4
  113. synth_ai/api/models/supported.py +376 -0
  114. synth_ai/api/train/builders.py +157 -26
  115. synth_ai/api/train/cli.py +213 -57
  116. synth_ai/api/train/config_finder.py +65 -5
  117. synth_ai/api/train/env_resolver.py +33 -15
  118. synth_ai/api/train/pollers.py +13 -4
  119. synth_ai/api/train/supported_algos.py +139 -0
  120. synth_ai/api/train/task_app.py +5 -3
  121. synth_ai/api/train/utils.py +33 -48
  122. synth_ai/cli/__init__.py +19 -4
  123. synth_ai/cli/_modal_wrapper.py +28 -0
  124. synth_ai/cli/_typer_patch.py +49 -0
  125. synth_ai/cli/balance.py +2 -3
  126. synth_ai/cli/calc.py +1 -1
  127. synth_ai/cli/demo.py +21 -6
  128. synth_ai/cli/recent.py +2 -2
  129. synth_ai/cli/rl_demo.py +77 -17
  130. synth_ai/cli/root.py +116 -39
  131. synth_ai/cli/status.py +2 -2
  132. synth_ai/cli/task_apps.py +1709 -243
  133. synth_ai/cli/traces.py +7 -4
  134. synth_ai/cli/turso.py +73 -0
  135. synth_ai/cli/watch.py +12 -18
  136. synth_ai/core/experiment.py +0 -2
  137. synth_ai/demo_registry.py +68 -31
  138. synth_ai/demos/core/cli.py +516 -194
  139. synth_ai/demos/demo_task_apps/__init__.py +3 -3
  140. synth_ai/demos/demo_task_apps/core.py +64 -28
  141. synth_ai/demos/demo_task_apps/crafter/configs/crafter_fft_4b.toml +2 -3
  142. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +37 -30
  143. synth_ai/demos/demo_task_apps/math/_common.py +1 -2
  144. synth_ai/demos/demo_task_apps/math/app.py +2 -1
  145. synth_ai/demos/demo_task_apps/math/deploy_modal.py +3 -6
  146. synth_ai/demos/demo_task_apps/math/modal_task_app.py +183 -82
  147. synth_ai/demos/demo_task_apps/math/task_app_entry.py +0 -2
  148. synth_ai/environments/examples/bandit/engine.py +12 -4
  149. synth_ai/environments/examples/bandit/taskset.py +4 -4
  150. synth_ai/environments/examples/crafter_classic/environment.py +76 -1
  151. synth_ai/environments/reproducibility/tree.py +5 -6
  152. synth_ai/environments/service/app.py +11 -12
  153. synth_ai/environments/service/core_routes.py +10 -9
  154. synth_ai/environments/stateful/engine.py +1 -1
  155. synth_ai/environments/tasks/core.py +1 -0
  156. synth_ai/environments/tasks/filters.py +5 -6
  157. synth_ai/environments/tasks/utils.py +4 -5
  158. synth_ai/evals/base.py +0 -2
  159. synth_ai/handshake.py +11 -9
  160. synth_ai/http.py +1 -1
  161. synth_ai/http_client.py +43 -11
  162. synth_ai/inference/__init__.py +0 -2
  163. synth_ai/inference/client.py +20 -6
  164. synth_ai/jobs/client.py +103 -78
  165. synth_ai/learning/__init__.py +41 -6
  166. synth_ai/learning/algorithms.py +14 -0
  167. synth_ai/learning/client.py +121 -29
  168. synth_ai/learning/config.py +2 -40
  169. synth_ai/learning/constants.py +0 -2
  170. synth_ai/learning/ft_client.py +4 -56
  171. synth_ai/learning/health.py +13 -7
  172. synth_ai/learning/jobs.py +43 -47
  173. synth_ai/{rl → learning/rl}/__init__.py +14 -5
  174. synth_ai/learning/rl/client.py +267 -0
  175. synth_ai/learning/rl/config.py +31 -0
  176. synth_ai/{rl → learning/rl}/contracts.py +5 -10
  177. synth_ai/{rl → learning/rl}/env_keys.py +45 -16
  178. synth_ai/learning/rl/secrets.py +13 -0
  179. synth_ai/learning/rl_client.py +2 -253
  180. synth_ai/learning/sft/__init__.py +29 -0
  181. synth_ai/learning/sft/client.py +68 -0
  182. synth_ai/learning/sft/config.py +270 -0
  183. synth_ai/learning/sft/data.py +295 -0
  184. synth_ai/learning/sse.py +25 -26
  185. synth_ai/learning/validators.py +25 -24
  186. synth_ai/lm/__init__.py +21 -47
  187. synth_ai/task/__init__.py +26 -27
  188. synth_ai/task/apps/__init__.py +18 -19
  189. synth_ai/task/auth.py +35 -23
  190. synth_ai/task/client.py +15 -13
  191. synth_ai/task/contracts.py +37 -35
  192. synth_ai/task/datasets.py +9 -6
  193. synth_ai/task/errors.py +11 -10
  194. synth_ai/task/health.py +17 -11
  195. synth_ai/task/json.py +58 -24
  196. synth_ai/task/proxy.py +15 -14
  197. synth_ai/task/rubrics.py +22 -15
  198. synth_ai/task/server.py +43 -17
  199. synth_ai/task/tracing_utils.py +12 -7
  200. synth_ai/task/validators.py +0 -1
  201. synth_ai/task/vendors.py +5 -7
  202. synth_ai/tracing_v3/__init__.py +2 -0
  203. synth_ai/tracing_v3/abstractions.py +21 -4
  204. synth_ai/tracing_v3/db_config.py +26 -1
  205. synth_ai/tracing_v3/decorators.py +18 -15
  206. synth_ai/tracing_v3/examples/basic_usage.py +3 -2
  207. synth_ai/tracing_v3/hooks.py +6 -4
  208. synth_ai/tracing_v3/llm_call_record_helpers.py +6 -6
  209. synth_ai/tracing_v3/replica_sync.py +1 -0
  210. synth_ai/tracing_v3/session_tracer.py +63 -16
  211. synth_ai/tracing_v3/storage/base.py +89 -1
  212. synth_ai/tracing_v3/storage/config.py +21 -8
  213. synth_ai/tracing_v3/storage/factory.py +10 -8
  214. synth_ai/tracing_v3/storage/utils.py +4 -2
  215. synth_ai/tracing_v3/turso/daemon.py +7 -2
  216. synth_ai/tracing_v3/turso/models.py +5 -2
  217. synth_ai/tracing_v3/turso/native_manager.py +1173 -0
  218. synth_ai/tracing_v3/utils.py +4 -3
  219. synth_ai/v0/api/__init__.py +8 -0
  220. synth_ai/v0/api/models/__init__.py +8 -0
  221. synth_ai/v0/api/models/supported.py +8 -0
  222. synth_ai/v0/config/__init__.py +15 -0
  223. synth_ai/v0/config/base_url.py +12 -0
  224. synth_ai/v0/lm/__init__.py +51 -0
  225. synth_ai/{lm → v0/lm}/caching/ephemeral.py +3 -5
  226. synth_ai/{lm → v0/lm}/caching/handler.py +4 -4
  227. synth_ai/{lm → v0/lm}/caching/initialize.py +1 -1
  228. synth_ai/{lm → v0/lm}/caching/persistent.py +1 -1
  229. synth_ai/{lm → v0/lm}/config.py +6 -1
  230. synth_ai/{lm → v0/lm}/core/all.py +9 -9
  231. synth_ai/{lm → v0/lm}/core/exceptions.py +0 -2
  232. synth_ai/{lm → v0/lm}/core/main.py +19 -7
  233. synth_ai/{lm → v0/lm}/core/main_v3.py +10 -10
  234. synth_ai/{lm → v0/lm}/core/synth_models.py +2 -15
  235. synth_ai/{lm → v0/lm}/core/vendor_clients.py +6 -4
  236. synth_ai/{lm → v0/lm}/overrides.py +4 -4
  237. synth_ai/{lm → v0/lm}/provider_support/anthropic.py +4 -4
  238. synth_ai/{lm → v0/lm}/provider_support/openai.py +5 -5
  239. synth_ai/{lm → v0/lm}/structured_outputs/handler.py +5 -5
  240. synth_ai/{lm → v0/lm}/structured_outputs/rehabilitate.py +1 -1
  241. synth_ai/{lm → v0/lm}/vendors/core/anthropic_api.py +16 -16
  242. synth_ai/{lm → v0/lm}/vendors/core/gemini_api.py +5 -5
  243. synth_ai/{lm → v0/lm}/vendors/core/mistral_api.py +5 -5
  244. synth_ai/{lm → v0/lm}/vendors/core/openai_api.py +12 -10
  245. synth_ai/{lm → v0/lm}/vendors/openai_standard.py +11 -9
  246. synth_ai/{lm → v0/lm}/vendors/openai_standard_responses.py +8 -5
  247. synth_ai/{lm → v0/lm}/vendors/supported/custom_endpoint.py +4 -6
  248. synth_ai/{lm → v0/lm}/vendors/supported/deepseek.py +2 -2
  249. synth_ai/{lm → v0/lm}/vendors/supported/grok.py +2 -2
  250. synth_ai/{lm → v0/lm}/vendors/supported/groq.py +1 -1
  251. synth_ai/{lm → v0/lm}/vendors/supported/ollama.py +1 -1
  252. synth_ai/{lm → v0/lm}/vendors/supported/openrouter.py +3 -3
  253. synth_ai/{lm → v0/lm}/vendors/supported/together.py +1 -1
  254. synth_ai/{lm → v0/lm}/vendors/synth_client.py +38 -11
  255. synth_ai/v0/tracing/upload.py +32 -135
  256. synth_ai/v0/tracing_v3/__init__.py +10 -0
  257. synth_ai/v0/tracing_v3/abstractions.py +3 -0
  258. synth_ai/v0/tracing_v3/decorators.py +3 -0
  259. synth_ai/v0/tracing_v3/llm_call_record_helpers.py +3 -0
  260. synth_ai/v0/tracing_v3/session_tracer.py +3 -0
  261. synth_ai-0.2.9.dev6.dist-info/METADATA +191 -0
  262. {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/RECORD +291 -264
  263. {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/top_level.txt +1 -0
  264. examples/common_old/backend.py +0 -21
  265. examples/evals_old/README.md +0 -98
  266. examples/evals_old/__init__.py +0 -6
  267. examples/evals_old/compare_models.py +0 -1037
  268. examples/evals_old/example_log.md +0 -145
  269. examples/evals_old/run_demo.sh +0 -126
  270. examples/evals_old/trace_analysis.py +0 -270
  271. examples/finetuning_old/_backup_synth_qwen/config.toml +0 -29
  272. examples/finetuning_old/_backup_synth_qwen/example_log.md +0 -324
  273. examples/finetuning_old/_backup_synth_qwen/filter_traces.py +0 -60
  274. examples/finetuning_old/_backup_synth_qwen/filter_traces_achievements.py +0 -239
  275. examples/finetuning_old/_backup_synth_qwen/purge_v3_traces.py +0 -109
  276. examples/finetuning_old/_backup_synth_qwen/react_agent_lm.py +0 -1924
  277. examples/finetuning_old/_backup_synth_qwen/readme.md +0 -49
  278. examples/finetuning_old/_backup_synth_qwen/run_crafter_qwen4b.py +0 -114
  279. examples/finetuning_old/_backup_synth_qwen/run_demo.sh +0 -195
  280. examples/finetuning_old/_backup_synth_qwen/sft_kickoff.py +0 -118
  281. examples/finetuning_old/synth_qwen_v1/README.md +0 -68
  282. examples/finetuning_old/synth_qwen_v1/filter_traces.py +0 -60
  283. examples/finetuning_old/synth_qwen_v1/filter_traces_achievements.py +0 -239
  284. examples/finetuning_old/synth_qwen_v1/finetune.py +0 -46
  285. examples/finetuning_old/synth_qwen_v1/hello_ft_model.py +0 -71
  286. examples/finetuning_old/synth_qwen_v1/infer.py +0 -37
  287. examples/finetuning_old/synth_qwen_v1/poll.py +0 -44
  288. examples/finetuning_old/synth_qwen_v1/prepare_data.py +0 -35
  289. examples/finetuning_old/synth_qwen_v1/purge_v3_traces.py +0 -109
  290. examples/finetuning_old/synth_qwen_v1/react_agent_lm.py +0 -1932
  291. examples/finetuning_old/synth_qwen_v1/run_crafter_sft_job.py +0 -207
  292. examples/finetuning_old/synth_qwen_v1/run_ft_job.py +0 -232
  293. examples/finetuning_old/synth_qwen_v1/upload_data.py +0 -34
  294. examples/finetuning_old/synth_qwen_v1/util.py +0 -147
  295. examples/rl_old/task_app.py +0 -962
  296. examples/warming_up_to_rl/old/event_rewards.md +0 -234
  297. examples/warming_up_to_rl/old/notes.md +0 -73
  298. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_stepwise_rewards.py +0 -58
  299. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +0 -738
  300. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +0 -580
  301. synth_ai/environments/examples/sokoban/units/astar_common.py +0 -95
  302. synth_ai/experimental/synth_oss.py +0 -446
  303. synth_ai/install_sqld.sh +0 -40
  304. synth_ai/learning/filtering.py +0 -0
  305. synth_ai/learning/offline/dpo.py +0 -0
  306. synth_ai/learning/offline/providers.py +0 -7
  307. synth_ai/learning/offline/sft.py +0 -0
  308. synth_ai/learning/offline/shared.py +0 -0
  309. synth_ai/learning/online/grpo.py +0 -0
  310. synth_ai/learning/online/irft.py +0 -0
  311. synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
  312. synth_ai/learning/prompts/gepa.py +0 -0
  313. synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -213
  314. synth_ai/learning/prompts/mipro.py +0 -289
  315. synth_ai/learning/prompts/random_search.py +0 -246
  316. synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
  317. synth_ai/learning/prompts/run_random_search_banking77.py +0 -324
  318. synth_ai/rl/secrets.py +0 -19
  319. synth_ai/scripts/verify_rewards.py +0 -100
  320. synth_ai/tracing/__init__.py +0 -30
  321. synth_ai/tracing_v1/__init__.py +0 -33
  322. synth_ai/tracing_v3/turso/__init__.py +0 -25
  323. synth_ai/tracing_v3/turso/manager.py +0 -774
  324. synth_ai/zyk/__init__.py +0 -30
  325. synth_ai-0.2.9.dev4.dist-info/METADATA +0 -131
  326. /synth_ai/{lm → v0/lm}/caching/__init__.py +0 -0
  327. /synth_ai/{lm → v0/lm}/caching/constants.py +0 -0
  328. /synth_ai/{lm → v0/lm}/caching/dbs.py +0 -0
  329. /synth_ai/{lm → v0/lm}/constants.py +0 -0
  330. /synth_ai/{lm → v0/lm}/core/__init__.py +0 -0
  331. /synth_ai/{lm → v0/lm}/cost/__init__.py +0 -0
  332. /synth_ai/{lm → v0/lm}/cost/monitor.py +0 -0
  333. /synth_ai/{lm → v0/lm}/cost/statefulness.py +0 -0
  334. /synth_ai/{lm → v0/lm}/injection.py +0 -0
  335. /synth_ai/{lm → v0/lm}/provider_support/__init__.py +0 -0
  336. /synth_ai/{lm → v0/lm}/provider_support/suppress_logging.py +0 -0
  337. /synth_ai/{lm → v0/lm}/structured_outputs/__init__.py +0 -0
  338. /synth_ai/{lm → v0/lm}/structured_outputs/inject.py +0 -0
  339. /synth_ai/{lm → v0/lm}/tools/__init__.py +0 -0
  340. /synth_ai/{lm → v0/lm}/tools/base.py +0 -0
  341. /synth_ai/{lm → v0/lm}/unified_interface.py +0 -0
  342. /synth_ai/{lm → v0/lm}/vendors/__init__.py +0 -0
  343. /synth_ai/{lm → v0/lm}/vendors/base.py +0 -0
  344. /synth_ai/{lm → v0/lm}/vendors/core/__init__.py +0 -0
  345. /synth_ai/{lm → v0/lm}/vendors/core/synth_dev_api.py +0 -0
  346. /synth_ai/{lm → v0/lm}/vendors/local/__init__.py +0 -0
  347. /synth_ai/{lm → v0/lm}/vendors/local/ollama.py +0 -0
  348. /synth_ai/{lm → v0/lm}/vendors/retries.py +0 -0
  349. /synth_ai/{lm → v0/lm}/vendors/supported/__init__.py +0 -0
  350. /synth_ai/{lm → v0/lm}/warmup.py +0 -0
  351. {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/WHEEL +0 -0
  352. {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/entry_points.txt +0 -0
  353. {synth_ai-0.2.9.dev4.dist-info → synth_ai-0.2.9.dev6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1079 @@
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import logging
5
+ import os
6
+ from datetime import datetime
7
+ from typing import Any
8
+
9
+ from fastapi import APIRouter, HTTPException, Request
10
+ from pydantic import BaseModel
11
+
12
+ from .envs.crafter.policy import CrafterPolicy
13
+ from .envs.mini_swe.policy import MiniSwePolicy
14
+ from .inference.openai_client import create_inference_client
15
+ from .registry import registry
16
+ from .storage.volume import storage
17
+
18
+ # Token budgeting (shared logic with inference server)
19
+ try:
20
+ from ..core.algorithms.gspo.inference.token_limits import (
21
+ clamp_effective_max_ctx,
22
+ )
23
+ except Exception: # pragma: no cover - defensive import path fallback
24
+ clamp_effective_max_ctx = None # type: ignore
25
+
26
+ try:
27
+ import tiktoken # type: ignore
28
+ except Exception: # pragma: no cover
29
+ tiktoken = None # type: ignore
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+ router = APIRouter()
34
+
35
+
36
+ class PolicyCreateRequest(BaseModel):
37
+ policy_name: str
38
+ config: dict[str, Any] = {}
39
+ parent_policy_id: str | None = None
40
+ rl_run_id: str
41
+ bound_env_id: str | None = None
42
+
43
+
44
+ class PolicyCreateResponse(BaseModel):
45
+ policy_id: str
46
+
47
+
48
+ class PolicyStepRequest(BaseModel):
49
+ policy_id: str
50
+ observation: dict[str, Any]
51
+ state: dict[str, Any] | None = None
52
+ metadata: dict[str, Any] | None = None
53
+ dry_run: bool = False
54
+
55
+
56
+ class PolicyStepResponse(BaseModel):
57
+ tool_calls: list[dict[str, Any]]
58
+ meta: dict[str, Any]
59
+
60
+
61
+ class PolicySnapshotRequest(BaseModel):
62
+ policy_id: str
63
+
64
+
65
+ class PolicySnapshotResponse(BaseModel):
66
+ snapshot_id: str
67
+ path: str
68
+ rl_run_id: str
69
+ size: int
70
+
71
+
72
+ class PolicyRestoreRequest(BaseModel):
73
+ snapshot_id: str
74
+
75
+
76
+ class PolicyRestoreResponse(BaseModel):
77
+ policy_id: str
78
+
79
+
80
+ class PolicyTerminateRequest(BaseModel):
81
+ policy_id: str
82
+
83
+
84
+ class PolicyTerminateResponse(BaseModel):
85
+ ok: bool
86
+
87
+
88
+ @router.post("/create", response_model=PolicyCreateResponse)
89
+ async def create_policy(
90
+ request: PolicyCreateRequest,
91
+ req: Request,
92
+ ) -> PolicyCreateResponse:
93
+ """Create a new policy instance."""
94
+ try:
95
+ task_app = getattr(req.app.state, "task_app", None)
96
+
97
+ # Set defaults from TaskApp / environment if not provided
98
+ config = dict(request.config or {})
99
+ inference_url_env = os.getenv("SWE_MINI_INFERENCE_URL") or os.getenv(
100
+ "SWE_MINI_VLLM_BASE_URL"
101
+ )
102
+ if "inference_url" not in config:
103
+ if inference_url_env:
104
+ config["inference_url"] = inference_url_env
105
+ elif task_app is not None and getattr(task_app, "vllm_base_url", None):
106
+ config["inference_url"] = task_app.vllm_base_url
107
+ if "model" not in config:
108
+ default_model = (
109
+ config.get("model")
110
+ or os.getenv("SWE_MINI_DEFAULT_MODEL")
111
+ or (task_app.default_model if task_app is not None else None)
112
+ )
113
+ if default_model:
114
+ config["model"] = default_model
115
+ if request.policy_name.lower() in {"swe-mini-react", "swe-mini", "swe_mini"}:
116
+ # Ensure tool-using policies surface run_command/submit_patch to the model.
117
+ # Compatibility hacks for legacy configs sometimes toggle use_tools off; for Groq
118
+ # and other OpenAI-compatible endpoints we hard-enable structured tool calls.
119
+ config["use_tools"] = True
120
+
121
+ # Create policy instance based on name
122
+ pname = request.policy_name.lower()
123
+ if pname in ["crafter-react", "crafter"]:
124
+ policy = CrafterPolicy(
125
+ inference_url=config["inference_url"],
126
+ model=config["model"],
127
+ )
128
+ await policy.initialize(config)
129
+ elif pname in ["swe-mini-react", "swe-mini", "swe_mini"]:
130
+ policy = MiniSwePolicy(
131
+ inference_url=config.get("inference_url"),
132
+ model=config.get("model"),
133
+ )
134
+ await policy.initialize(config)
135
+ elif pname in ["wordle-react", "wordle"]:
136
+ try:
137
+ from .envs.wordle.policy import WordlePolicy
138
+ except Exception as e:
139
+ raise HTTPException(
140
+ status_code=500, detail=f"Wordle policy unavailable: {e}"
141
+ ) from e
142
+
143
+ policy = WordlePolicy(
144
+ inference_url=config["inference_url"],
145
+ model=config["model"],
146
+ word_length=int(config["word_length"]),
147
+ max_guesses=int(config["max_guesses"]),
148
+ )
149
+ await policy.initialize(config)
150
+ elif pname in ["sokoban-react", "sokoban"]:
151
+ try:
152
+ from .envs.sokoban.policy import SokobanPolicy
153
+ except Exception as e:
154
+ raise HTTPException(
155
+ status_code=500, detail=f"Sokoban policy unavailable: {e}"
156
+ ) from e
157
+
158
+ policy = SokobanPolicy(
159
+ inference_url=config["inference_url"],
160
+ model=config["model"],
161
+ )
162
+ await policy.initialize(config)
163
+ elif pname in ["math-react", "math"]:
164
+ try:
165
+ from .envs.math.policy import MathPolicy
166
+ except Exception as e:
167
+ raise HTTPException(status_code=500, detail=f"Math policy unavailable: {e}") from e
168
+
169
+ policy = MathPolicy(
170
+ inference_url=config["inference_url"],
171
+ model=config["model"],
172
+ )
173
+ await policy.initialize(config)
174
+ else:
175
+ raise HTTPException(
176
+ status_code=422,
177
+ detail=f"Unknown policy name: {request.policy_name}",
178
+ )
179
+
180
+ # Register in memory
181
+ policy_id = registry.register_policy(
182
+ policy=policy,
183
+ rl_run_id=request.rl_run_id,
184
+ bound_env_id=request.bound_env_id,
185
+ )
186
+
187
+ return PolicyCreateResponse(policy_id=policy_id)
188
+
189
+ except Exception as e:
190
+ logger.error(f"Failed to create policy: {e}")
191
+ raise HTTPException(status_code=500, detail=str(e)) from e
192
+
193
+
194
+ @router.post("/step", response_model=PolicyStepResponse)
195
+ async def step_policy(
196
+ request: PolicyStepRequest,
197
+ req: Request,
198
+ ) -> PolicyStepResponse:
199
+ """Execute a policy step to generate actions."""
200
+ handle = registry.get_policy(request.policy_id)
201
+ if not handle:
202
+ raise HTTPException(status_code=404, detail=f"Policy {request.policy_id} not found")
203
+
204
+ try:
205
+ task_app = req.app.state.task_app
206
+ policy = handle.policy
207
+ tracing_context = getattr(req.state, "rollout_tracing", None)
208
+
209
+ # Format observation text conditionally for each env
210
+ if isinstance(request.observation, dict):
211
+ if isinstance(policy, CrafterPolicy):
212
+ from .envs.crafter.shared import format_observation as format_crafter
213
+
214
+ obs_text = format_crafter(request.observation)
215
+ elif isinstance(policy, MiniSwePolicy):
216
+ from .envs.mini_swe.shared import format_observation as format_mini_swe
217
+
218
+ obs_text = format_mini_swe(request.observation)
219
+ elif True:
220
+ try:
221
+ from .envs.wordle.policy import WordlePolicy
222
+ except Exception:
223
+ wordle_policy_cls = None # type: ignore[assignment]
224
+ else:
225
+ wordle_policy_cls = WordlePolicy
226
+
227
+ if wordle_policy_cls is not None and isinstance(policy, wordle_policy_cls):
228
+ from .envs.wordle.shared import format_observation_wordle
229
+
230
+ # ASSERTION: Validate observation structure
231
+ assert request.observation is not None, "request.observation cannot be None"
232
+ assert isinstance(request.observation, dict), (
233
+ f"request.observation must be dict, got {type(request.observation)}"
234
+ )
235
+
236
+ # Required keys for Wordle observation
237
+ required_keys = {
238
+ "text",
239
+ "status",
240
+ "remaining_guesses",
241
+ "guesses",
242
+ "feedback",
243
+ "reward_last",
244
+ "total_reward",
245
+ "terminated",
246
+ }
247
+ missing_keys = required_keys - set(request.observation.keys())
248
+ assert not missing_keys, f"Wordle observation missing required keys: {missing_keys}"
249
+
250
+ print("DEBUG POLICY_ROUTES: About to format Wordle observation")
251
+ print(f"DEBUG POLICY_ROUTES: Observation type: {type(request.observation)}")
252
+ print(f"DEBUG POLICY_ROUTES: Observation keys: {list(request.observation.keys())}")
253
+ feedback_val = request.observation["feedback"]
254
+ print(f"DEBUG POLICY_ROUTES: Observation feedback: {feedback_val}")
255
+ print(f"DEBUG POLICY_ROUTES: Observation guesses: {request.observation['guesses']}")
256
+ print(
257
+ f"DEBUG POLICY_ROUTES: Observation text length: {len(request.observation['text'])}"
258
+ )
259
+
260
+ # ASSERTION: Validate feedback data
261
+ guesses = request.observation["guesses"]
262
+ feedback = request.observation["feedback"]
263
+ assert isinstance(guesses, list), f"guesses must be list, got {type(guesses)}"
264
+ assert isinstance(feedback, list), f"feedback must be list, got {type(feedback)}"
265
+ # Note: We don't assert equal lengths here since the environment is broken
266
+
267
+ obs_text = format_observation_wordle(request.observation)
268
+
269
+ # ASSERTION: Validate formatted output
270
+ assert isinstance(obs_text, str), f"obs_text must be string, got {type(obs_text)}"
271
+ assert len(obs_text) > 0, "obs_text cannot be empty"
272
+ assert "WORDLE" in obs_text, "obs_text must contain 'WORDLE' header"
273
+ assert "Respond with a single tool call" in obs_text, (
274
+ "obs_text must contain instruction text"
275
+ )
276
+
277
+ print(f"DEBUG POLICY_ROUTES: Formatted obs_text length: {len(obs_text)}")
278
+ print(f"DEBUG POLICY_ROUTES: Formatted obs_text contains 🟩: {'🟩' in obs_text}")
279
+ print(f"DEBUG POLICY_ROUTES: Formatted obs_text contains 🟨: {'🟨' in obs_text}")
280
+ print(f"DEBUG POLICY_ROUTES: Formatted obs_text contains ⬛: {'⬛' in obs_text}")
281
+ print(f"DEBUG POLICY_ROUTES: Formatted obs_text first 200 chars: {obs_text[:200]}")
282
+ elif True:
283
+ try:
284
+ from .envs.sokoban.policy import SokobanPolicy
285
+ except Exception:
286
+ sokoban_policy_cls = None # type: ignore[assignment]
287
+ else:
288
+ sokoban_policy_cls = SokobanPolicy
289
+
290
+ if sokoban_policy_cls is not None and isinstance(policy, sokoban_policy_cls):
291
+ from .envs.sokoban.shared import format_observation_sokoban
292
+
293
+ obs_text = format_observation_sokoban(request.observation)
294
+ elif True:
295
+ try:
296
+ from .envs.math.policy import MathPolicy
297
+ except Exception:
298
+ math_policy_cls = None # type: ignore[assignment]
299
+ else:
300
+ math_policy_cls = MathPolicy
301
+ if math_policy_cls is not None and isinstance(policy, math_policy_cls):
302
+ # Simple extraction of problem text
303
+ try:
304
+ obs_text = str(
305
+ request.observation.get("problem_text") or request.observation
306
+ )
307
+ except Exception:
308
+ obs_text = str(request.observation)
309
+ else:
310
+ obs_text = str(request.observation)
311
+ else:
312
+ obs_text = request.observation
313
+
314
+ # Merge metadata with raw observation for multimodal policies
315
+ step_metadata: dict[str, Any] = dict(request.metadata or {})
316
+ step_metadata["raw_observation"] = request.observation
317
+
318
+ # Execute policy step to get inference request
319
+ tool_calls, meta = await policy.step(
320
+ observation_text=obs_text,
321
+ state=request.state,
322
+ metadata=step_metadata,
323
+ )
324
+ # Compact tool call summary
325
+ with contextlib.suppress(Exception):
326
+ _summary: list[dict[str, Any]] = []
327
+ _tc = tool_calls or []
328
+ for _item in (_tc if isinstance(_tc, list) else []):
329
+ if isinstance(_item, dict):
330
+ _tool = _item.get("tool")
331
+ _args = _item.get("args")
332
+ _keys = list(_args.keys()) if isinstance(_args, dict) else []
333
+ _summary.append({"tool": _tool, "args_keys": _keys})
334
+ logger.info(
335
+ "POLICY_STEP: tool_calls=%d summary=%s",
336
+ len(_tc),
337
+ _summary,
338
+ )
339
+
340
+ # If not dry run, perform inference
341
+ if not request.dry_run and "inference_request" in meta:
342
+ # CRITICAL: Validate that the inference request contains the correct prompts for the policy
343
+ inf_req = meta["inference_request"]
344
+ msgs = inf_req["messages"]
345
+ model_name = inf_req.get("model") or getattr(policy, "model", None) or ""
346
+ system_messages: list[str] = []
347
+ user_messages: list[str] = []
348
+ if msgs and len(msgs) > 0 and msgs[0]["role"] == "system":
349
+ sys_text = msgs[0]["content"]
350
+ policy_name = getattr(policy, "name", "") or type(policy).__name__.lower()
351
+
352
+ # Assert environment-specific prompts match the policy
353
+ if policy_name in ("wordle-react", "wordle"):
354
+ if "Wordle" not in sys_text:
355
+ raise ValueError(
356
+ f"PROMPT MISMATCH: Wordle policy {policy_name} received system prompt without 'Wordle' keyword: {sys_text[:200]}..."
357
+ )
358
+ if "Crafter" in sys_text:
359
+ raise ValueError(
360
+ f"PROMPT MISMATCH: Wordle policy {policy_name} received Crafter system prompt: {sys_text[:200]}..."
361
+ )
362
+
363
+ elif policy_name in ("crafter-react", "crafter") or isinstance(
364
+ policy, CrafterPolicy
365
+ ):
366
+ if "Crafter" not in sys_text:
367
+ raise ValueError(
368
+ f"PROMPT MISMATCH: Crafter policy {policy_name} received system prompt without 'Crafter' keyword: {sys_text[:200]}..."
369
+ )
370
+ if "Wordle" in sys_text:
371
+ raise ValueError(
372
+ f"PROMPT MISMATCH: Crafter policy {policy_name} received Wordle system prompt: {sys_text[:200]}..."
373
+ )
374
+ elif policy_name in ("swe-mini-react", "swe-mini", "swe_mini") or isinstance(
375
+ policy, MiniSwePolicy
376
+ ):
377
+ if "bash" not in sys_text.lower() and "shell" not in sys_text.lower():
378
+ raise ValueError(
379
+ f"PROMPT MISMATCH: mini-swe policy {policy_name} missing shell instructions: {sys_text[:200]}..."
380
+ )
381
+
382
+ elif policy_name in ("sokoban-react", "sokoban"):
383
+ if "Sokoban" not in sys_text:
384
+ raise ValueError(
385
+ f"PROMPT MISMATCH: Sokoban policy {policy_name} received system prompt without 'Sokoban' keyword: {sys_text[:200]}..."
386
+ )
387
+ if "Crafter" in sys_text or "Wordle" in sys_text:
388
+ raise ValueError(
389
+ f"PROMPT MISMATCH: Sokoban policy {policy_name} received wrong environment system prompt: {sys_text[:200]}..."
390
+ )
391
+
392
+ logger.info(
393
+ f"✅ PROMPT VALIDATION: {policy_name} policy has correct system prompt containing expected environment keywords"
394
+ )
395
+ else:
396
+ logger.warning(
397
+ f"⚠️ PROMPT VALIDATION: No system message found in inference request for policy {getattr(policy, 'name', type(policy).__name__)}"
398
+ )
399
+
400
+ # Emit full system/user prompts for observability (no secrets included)
401
+ try:
402
+
403
+ def _as_text(content: object) -> str:
404
+ if isinstance(content, str):
405
+ return content
406
+ if isinstance(content, list):
407
+ # Concatenate any dict segments that resemble OpenAI content parts
408
+ parts: list[str] = []
409
+ for seg in content:
410
+ try:
411
+ if isinstance(seg, dict):
412
+ txt = seg.get("text") or seg.get("content") or ""
413
+ if isinstance(txt, str):
414
+ parts.append(txt)
415
+ except Exception:
416
+ continue
417
+ return "".join(parts)
418
+ return str(content)
419
+
420
+ system_prompt_records: list[dict[str, Any]] = []
421
+ user_prompt_records: list[dict[str, Any]] = []
422
+ for message in msgs:
423
+ role = message.get("role")
424
+ raw_content = message.get("content")
425
+ content = _as_text(raw_content)
426
+ record = {"role": role, "text": content, "content": raw_content}
427
+ if role == "system":
428
+ system_prompt_records.append(record)
429
+ elif role == "user":
430
+ user_prompt_records.append(record)
431
+
432
+ logger.info(
433
+ "PROMPTS: system_msgs=%d user_msgs=%d last_user_chars=%d",
434
+ len(system_prompt_records),
435
+ len(user_prompt_records),
436
+ len(user_prompt_records[-1].get("text", "")) if user_prompt_records else 0,
437
+ )
438
+
439
+ if system_prompt_records:
440
+ logger.info("PROMPT_DUMP_SYSTEM_BEGIN")
441
+ for idx, rec in enumerate(system_prompt_records):
442
+ smsg = rec.get("text", "")
443
+ logger.info(f"SYSTEM[{idx}]\n{smsg}")
444
+ logger.info("PROMPT_DUMP_SYSTEM_END")
445
+
446
+ if user_prompt_records:
447
+ logger.info("PROMPT_DUMP_USER_BEGIN")
448
+ for idx, rec in enumerate(user_prompt_records):
449
+ umsg = rec.get("text", "")
450
+ logger.info(f"USER[{idx}]\n{umsg}")
451
+ logger.info("PROMPT_DUMP_USER_END")
452
+ # Print concise preview for visibility in standard logs
453
+ with contextlib.suppress(Exception):
454
+ last_user = (
455
+ user_prompt_records[-1].get("text", "")
456
+ if user_prompt_records
457
+ else ""
458
+ )
459
+ print(f"[task:crafter] user prompt: {last_user}", flush=True)
460
+ except Exception as e:
461
+ logger.warning(f"PROMPT_DUMP_FAILED: {e}")
462
+
463
+ if tracing_context is not None:
464
+ try:
465
+ await tracing_context.record_policy_prompts(
466
+ system_prompt_records, user_prompt_records
467
+ )
468
+ except Exception as exc:
469
+ logger.debug(f"TRACING_PROMPTS_FAIL: {exc}")
470
+
471
+ # Create inference client (choose API key by target provider)
472
+ # Require inference_url to be set explicitly by the rollout policy config.
473
+ target_url = (
474
+ meta.get("inference_url")
475
+ or getattr(policy, "inference_url", None)
476
+ or getattr(task_app, "vllm_base_url", None)
477
+ )
478
+
479
+ # Ensure meta carries the final target URL for downstream logging/clients
480
+ with contextlib.suppress(Exception):
481
+ meta["inference_url"] = target_url
482
+
483
+ # Select API key based on resolved target URL
484
+ api_key_override = None
485
+ try:
486
+ import os as _os
487
+
488
+ if isinstance(target_url, str):
489
+ low_url = target_url.lower()
490
+ # Proxy endpoints should not receive a bearer; the server-side proxy holds the vendor key
491
+ if "/proxy/groq" in low_url or "/proxy/openai" in low_url:
492
+ api_key_override = None
493
+ elif "openai.com" in low_url:
494
+ api_key_override = _os.getenv("OPENAI_API_KEY") or getattr(
495
+ task_app, "openai_api_key", None
496
+ )
497
+ elif "groq.com" in low_url or "/proxy/groq" in low_url:
498
+ api_key_override = _os.getenv("GROQ_API_KEY")
499
+ else:
500
+ api_key_override = (
501
+ _os.getenv("SYNTH_API_KEY")
502
+ or _os.getenv("OPENAI_API_KEY")
503
+ or getattr(task_app, "openai_api_key", None)
504
+ )
505
+ else:
506
+ api_key_override = (
507
+ _os.getenv("SYNTH_API_KEY")
508
+ or _os.getenv("OPENAI_API_KEY")
509
+ or getattr(task_app, "openai_api_key", None)
510
+ )
511
+ except Exception:
512
+ api_key_override = None
513
+
514
+ if api_key_override:
515
+ try:
516
+ masked = f"{api_key_override[:6]}…{api_key_override[-4:]}"
517
+ except Exception:
518
+ masked = "<masked>"
519
+ logger.debug(f"INFERENCE_AUTH: Using bearer key {masked}")
520
+ else:
521
+ logger.warning(
522
+ "INFERENCE_AUTH: No API key resolved for inference request; downstream may 401"
523
+ )
524
+
525
+ client = create_inference_client(task_app, api_key=api_key_override)
526
+
527
+ # Add policy identification header for observability
528
+ policy_name = getattr(policy, "name", "") or type(policy).__name__.lower()
529
+ extra_headers = {"X-Policy-Name": policy_name}
530
+
531
+ # Apply input truncation to avoid 422 from inference server
532
+ try:
533
+ model_name = inf_req.get("model") or getattr(policy, "model", None) or ""
534
+ env_max_ctx = None
535
+ try:
536
+ _env_max = int(os.getenv("CHAT_MAX_MODEL_LEN", "0") or 0)
537
+ env_max_ctx = _env_max if _env_max > 0 else None
538
+ except Exception:
539
+ env_max_ctx = None
540
+ # Compute effective max context and safety margin
541
+ eff_ctx = None
542
+ if clamp_effective_max_ctx is not None:
543
+ eff_ctx = clamp_effective_max_ctx(
544
+ model_name=model_name,
545
+ configured_max_model_len=None,
546
+ env_max_model_len=env_max_ctx,
547
+ )
548
+ # Hard lower-only chat input cap if provided
549
+ try:
550
+ hard_input_cap = int(os.getenv("CHAT_MAX_INPUT_TOKENS", "0") or 0)
551
+ hard_input_cap = hard_input_cap if hard_input_cap > 0 else None
552
+ except Exception:
553
+ hard_input_cap = None
554
+ try:
555
+ safety_margin = int(os.getenv("CHAT_BUDGET_SAFETY", "64").strip() or 64)
556
+ except Exception:
557
+ safety_margin = 64
558
+
559
+ # Determine budget
560
+ budget = None
561
+ if isinstance(eff_ctx, int) and eff_ctx > 0:
562
+ budget = max(256, eff_ctx - safety_margin)
563
+ if isinstance(hard_input_cap, int) and hard_input_cap > 0:
564
+ budget = min(budget, hard_input_cap) if budget is not None else hard_input_cap
565
+
566
+ if budget is not None and budget > 0 and isinstance(msgs, list):
567
+ # Choose tokenizer
568
+ enc = None
569
+ if tiktoken is not None:
570
+ try:
571
+ if model_name:
572
+ enc = tiktoken.encoding_for_model(model_name)
573
+ else:
574
+ enc = tiktoken.get_encoding("cl100k_base")
575
+ except Exception:
576
+ try:
577
+ enc = tiktoken.get_encoding("cl100k_base")
578
+ except Exception:
579
+ enc = None
580
+
581
+ def _content_to_text(content: object) -> str:
582
+ if isinstance(content, str):
583
+ return content
584
+ if isinstance(content, list):
585
+ parts: list[str] = []
586
+ for seg in content:
587
+ try:
588
+ if isinstance(seg, dict):
589
+ txt = seg.get("text") or seg.get("content") or ""
590
+ if isinstance(txt, str):
591
+ parts.append(txt)
592
+ except Exception:
593
+ continue
594
+ return "".join(parts)
595
+ try:
596
+ return str(content)
597
+ except Exception:
598
+ return ""
599
+
600
+ def _count_tokens(text: str) -> int:
601
+ if enc is None:
602
+ # Fall back to character count heuristic (~4 chars per token)
603
+ try:
604
+ return max(1, int(len(text) / 4))
605
+ except Exception:
606
+ return len(text)
607
+ try:
608
+ return len(enc.encode(text))
609
+ except Exception:
610
+ return max(1, int(len(text) / 4))
611
+
612
+ def _count_messages_tokens(messages: list[dict[str, Any]]) -> int:
613
+ total = 0
614
+ for m in messages:
615
+ total += _count_tokens(_content_to_text(m.get("content")))
616
+ return total
617
+
618
+ def _truncate_messages_to_budget(
619
+ messages: list[dict[str, Any]],
620
+ max_tokens: int,
621
+ ) -> tuple[list[dict[str, Any]], int, int, int]:
622
+ before = _count_messages_tokens(messages)
623
+ if before <= max_tokens:
624
+ return messages, before, before, len(messages)
625
+ # Always try to preserve the first system message if present
626
+ system_msg = None
627
+ start_idx = 0
628
+ if messages and messages[0].get("role") == "system":
629
+ system_msg = messages[0]
630
+ start_idx = 1
631
+ kept_rev: list[dict[str, Any]] = []
632
+ total = _count_messages_tokens([system_msg] if system_msg else [])
633
+ # Walk from the end keeping most recent messages
634
+ for m in reversed(messages[start_idx:]):
635
+ t = _count_tokens(_content_to_text(m.get("content")))
636
+ if total + t <= max_tokens:
637
+ kept_rev.append(m)
638
+ total += t
639
+ else:
640
+ # Try to keep a truncated version of this message if we have some budget left
641
+ remaining = max_tokens - total
642
+ if remaining > 16: # keep at least a little context
643
+ txt = _content_to_text(m.get("content"))
644
+ # Binary search-ish trim by tokens
645
+ low, high = 0, len(txt)
646
+ best = None
647
+ while low <= high:
648
+ mid = (low + high) // 2
649
+ candidate = txt[-mid:]
650
+ if _count_tokens(candidate) <= remaining:
651
+ best = candidate
652
+ low = mid + 1
653
+ else:
654
+ high = mid - 1
655
+ if best is not None and best:
656
+ m2 = dict(m)
657
+ m2["content"] = best
658
+ kept_rev.append(m2)
659
+ total += _count_tokens(best)
660
+ break
661
+ kept = list(reversed(kept_rev))
662
+ if system_msg is not None:
663
+ kept = [system_msg] + kept
664
+ after = _count_messages_tokens(kept)
665
+ return kept, before, after, len(kept)
666
+
667
+ new_msgs, before_toks, after_toks, kept_count = _truncate_messages_to_budget(
668
+ msgs, int(budget)
669
+ )
670
+ if new_msgs is not msgs:
671
+ inf_req["messages"] = new_msgs
672
+ with contextlib.suppress(Exception):
673
+ logger.info(
674
+ {
675
+ "chat_truncated": True,
676
+ "token_budget": int(budget),
677
+ "before_tokens": int(before_toks),
678
+ "after_tokens": int(after_toks),
679
+ "kept_msgs": int(kept_count),
680
+ }
681
+ )
682
+ except Exception as _trunc_e:
683
+ logger.warning(f"CHAT_TRUNCATION_FAILED: {type(_trunc_e).__name__}: {_trunc_e}")
684
+
685
+ # Formal assertion: If tools are expected, ensure tool_choice and tools are set
686
+ if policy_name in (
687
+ "wordle-react",
688
+ "sokoban-react",
689
+ "crafter-react",
690
+ ) and getattr(policy, "use_tools", True):
691
+ req_tools = meta["inference_request"]["tools"]
692
+ req_tool_choice = meta["inference_request"]["tool_choice"]
693
+ req_stop_after = meta["inference_request"]["stop_after_tool_calls"]
694
+ logger.info(
695
+ f"TOOLCALL_CONFIG: policy={policy_name} tools_present={bool(req_tools)} tool_choice={req_tool_choice} stop_after={req_stop_after}"
696
+ )
697
+ if not req_tools or req_tool_choice != "required":
698
+ raise HTTPException(
699
+ status_code=500,
700
+ detail=f"TOOLCALL_ASSERTION_FAIL: Missing tools or tool_choice!=required for policy {policy_name}",
701
+ )
702
+
703
+ # Call inference service with retries for Flash cold-start (503)
704
+ import time as _t
705
+
706
+ # Prompt diagnostics before sending to inference: build chat template locally,
707
+ # count tokens, and log the first 10k tokens if oversized. Also stash a
708
+ # compact preview in meta so the trainer can surface it.
709
+ with contextlib.suppress(Exception):
710
+ req_for_diag = meta.get("inference_request", {})
711
+ model_for_diag = req_for_diag.get("model") or getattr(policy, "model", None) or ""
712
+ messages_for_diag = req_for_diag.get("messages") or []
713
+ if model_for_diag and messages_for_diag:
714
+ from transformers import AutoTokenizer
715
+
716
+ tok = AutoTokenizer.from_pretrained(model_for_diag)
717
+ prompt_preview = tok.apply_chat_template(
718
+ messages_for_diag,
719
+ add_generation_prompt=True,
720
+ tokenize=False,
721
+ )
722
+ ids = tok.encode(prompt_preview, add_special_tokens=False)
723
+ max_len = getattr(tok, "model_max_length", None)
724
+ over_limit = False
725
+ with contextlib.suppress(Exception):
726
+ over_limit = (
727
+ isinstance(max_len, int) and max_len > 0 and len(ids) > int(max_len)
728
+ )
729
+ if over_limit or len(ids) > 10000:
730
+ preview_ids = ids[:10000]
731
+ preview_text = tok.decode(
732
+ preview_ids,
733
+ skip_special_tokens=False,
734
+ )
735
+ with contextlib.suppress(Exception):
736
+ logger.warning(
737
+ {
738
+ "prompt_token_overflow_local": True,
739
+ "model": str(model_for_diag),
740
+ "token_count": int(len(ids)),
741
+ "model_max_length": int(max_len)
742
+ if isinstance(max_len, int)
743
+ else None,
744
+ "preview_tokens_logged": int(len(preview_ids)),
745
+ "prompt_preview_first_10k_tokens": preview_text,
746
+ }
747
+ )
748
+ with contextlib.suppress(Exception):
749
+ meta["prompt_debug"] = {
750
+ "token_count": int(len(ids)),
751
+ "model_max_length": int(max_len)
752
+ if isinstance(max_len, int)
753
+ else None,
754
+ "preview_first_10k_tokens": preview_text,
755
+ }
756
+
757
+ # Emit the exact prompt/messages and tools before calling the LLM (bounded preview)
758
+ with contextlib.suppress(Exception):
759
+ req_dump = meta.get("inference_request", {})
760
+ msgs = req_dump.get("messages")
761
+ tools_dump = req_dump.get("tools")
762
+ if isinstance(msgs, list):
763
+ # Print compact messages structure and tool schema with bounded length
764
+ import json as _json
765
+
766
+ msgs_compact = _json.dumps(msgs)[:20000]
767
+ tools_compact = (
768
+ _json.dumps(tools_dump)[:8000] if tools_dump is not None else None
769
+ )
770
+ print(
771
+ {
772
+ "llm.call": True,
773
+ "policy": str(policy_name),
774
+ "messages_preview": msgs_compact,
775
+ "tools_preview": tools_compact,
776
+ }
777
+ )
778
+
779
+ # Normalize request for non-OpenAI endpoints (strict schemas)
780
+ with contextlib.suppress(Exception):
781
+ base = str(target_url or "")
782
+ is_openai_dotcom = "openai.com" in base.lower()
783
+ if not is_openai_dotcom:
784
+ req_body = meta.get("inference_request", {})
785
+ if isinstance(req_body, dict):
786
+ # Force structured tool_choice if a bare "required" is present
787
+ if req_body.get("tool_choice") == "required":
788
+ func_name = "interact_many"
789
+ with contextlib.suppress(Exception):
790
+ tools_arr = req_body.get("tools") or []
791
+ if isinstance(tools_arr, list) and tools_arr:
792
+ f = (
793
+ tools_arr[0].get("function")
794
+ if isinstance(tools_arr[0], dict)
795
+ else None
796
+ )
797
+ cand = (f or {}).get("name") if isinstance(f, dict) else None
798
+ if isinstance(cand, str) and cand:
799
+ func_name = cand
800
+ req_body["tool_choice"] = {
801
+ "type": "function",
802
+ "function": {"name": func_name},
803
+ }
804
+ req_body["parallel_tool_calls"] = False
805
+ req_body.setdefault("function_call", {"name": func_name})
806
+ # Inject extra_body for thinking controls expected by Modal service
807
+ with contextlib.suppress(Exception):
808
+ tb = req_body.get("thinking_budget")
809
+ tm = str(req_body.get("thinking_mode") or "").lower()
810
+ enable_thinking = bool(tb) or tm == "think"
811
+ extra = dict(req_body.get("extra_body") or {})
812
+ chat_kwargs = dict(extra.get("chat_template_kwargs") or {})
813
+ if enable_thinking:
814
+ chat_kwargs["enable_thinking"] = True
815
+ if isinstance(tb, int | float | str) and str(tb).strip():
816
+ with contextlib.suppress(Exception):
817
+ chat_kwargs["thinking_budget"] = int(tb)
818
+ if chat_kwargs:
819
+ extra["chat_template_kwargs"] = chat_kwargs
820
+ # Ensure stop_after_tool_calls honored via extra_body for stricter servers
821
+ extra.setdefault("stop_after_tool_calls", 1)
822
+ if extra:
823
+ req_body["extra_body"] = extra
824
+ # Provide a conservative default temperature if missing
825
+ if "temperature" not in req_body:
826
+ req_body["temperature"] = 0.1
827
+ meta["inference_request"] = req_body
828
+
829
+ # Strip image parts: Crafter policy currently only uses text prompts.
830
+ # Some providers reject image_url payloads entirely, so always flatten to plain text.
831
+ req_body2 = meta.get("inference_request", {})
832
+ if isinstance(req_body2, dict):
833
+ msgs = req_body2.get("messages")
834
+ if isinstance(msgs, list):
835
+ new_msgs = []
836
+ changed = False
837
+ for m in msgs:
838
+ try:
839
+ if isinstance(m, dict):
840
+ content = m.get("content")
841
+ if isinstance(content, list):
842
+ parts: list[str] = []
843
+ for seg in content:
844
+ if isinstance(seg, dict):
845
+ txt = seg.get("text") or seg.get("content")
846
+ if isinstance(txt, str) and txt:
847
+ parts.append(txt)
848
+ m2 = dict(m)
849
+ m2["content"] = "\n".join(parts)
850
+ new_msgs.append(m2)
851
+ changed = True
852
+ else:
853
+ new_msgs.append(m)
854
+ else:
855
+ new_msgs.append(m)
856
+ except Exception:
857
+ new_msgs.append(m)
858
+ if changed:
859
+ req_body2["messages"] = new_msgs
860
+ meta["inference_request"] = req_body2
861
+
862
+ _t_start = _t.time()
863
+ call_started_at = datetime.utcnow()
864
+ inference_response = await client.generate_with_retries(
865
+ request=meta["inference_request"],
866
+ base_url=meta["inference_url"],
867
+ max_retries=12,
868
+ backoff_factor=2.0,
869
+ extra_headers=extra_headers,
870
+ )
871
+ meta["inference_ms"] = int((_t.time() - _t_start) * 1000)
872
+ call_completed_at = datetime.utcnow()
873
+
874
+ provider_url = str(meta.get("inference_url") or "")
875
+ low_url = provider_url.lower()
876
+ if "groq" in low_url:
877
+ provider_name = "groq"
878
+ elif "openai" in low_url:
879
+ provider_name = "openai"
880
+ else:
881
+ provider_name = "custom"
882
+
883
+ # Parse response to tool calls
884
+ tool_calls = policy.parse_response_to_tool_calls(
885
+ response=inference_response,
886
+ use_tools=getattr(policy, "use_tools", True),
887
+ )
888
+
889
+ # Debug logging (echo tool calls)
890
+ if not tool_calls:
891
+ # Structured error log with small preview; avoid dumping full response repeatedly
892
+ preview = str(inference_response)[:400]
893
+ logger.error(
894
+ f"TOOLCALL_PARSE_FAIL: policy={policy_name} parsed=0 preview={preview}"
895
+ )
896
+ else:
897
+ try:
898
+ import json as _json
899
+
900
+ print(
901
+ {
902
+ "tool_calls_parsed": int(len(tool_calls)),
903
+ "tool_calls_preview": _json.dumps(tool_calls)[:20000],
904
+ }
905
+ )
906
+ except Exception:
907
+ logger.info(f"Parsed {len(tool_calls)} tool calls: {tool_calls}")
908
+
909
+ # Add response to metadata
910
+ # Parse tool calls from model response using policy-specific parser
911
+ try:
912
+ if hasattr(policy, "parse_response_to_tool_calls"):
913
+ parsed = policy.parse_response_to_tool_calls(
914
+ inference_response, getattr(policy, "use_tools", True)
915
+ )
916
+ else:
917
+ parsed = policy.parse_model_response(inference_response, request.observation)
918
+ # Replace tool_calls with parsed result
919
+ if isinstance(parsed, list):
920
+ tool_calls = parsed
921
+ with contextlib.suppress(Exception):
922
+ logger.info(
923
+ "TOOLCALL_PARSE: parsed=%d has_tools=%s example=%r",
924
+ len(tool_calls) if isinstance(tool_calls, list) else -1,
925
+ bool(getattr(policy, "use_tools", True)),
926
+ (tool_calls[0] if isinstance(tool_calls, list) and tool_calls else None),
927
+ )
928
+ except Exception as _pe:
929
+ logger.warning(f"Failed to parse tool calls: {str(_pe)}")
930
+ # Attach raw response + usage for observability
931
+ meta["raw_response"] = inference_response
932
+ if "usage" in inference_response:
933
+ meta["usage"] = inference_response["usage"]
934
+
935
+ if tracing_context is not None:
936
+ try:
937
+ await tracing_context.record_llm_call(
938
+ inference_request=meta["inference_request"],
939
+ inference_response=inference_response,
940
+ tool_calls=tool_calls,
941
+ provider=provider_name,
942
+ model_name=model_name,
943
+ started_at=call_started_at,
944
+ completed_at=call_completed_at,
945
+ latency_ms=meta.get("inference_ms"),
946
+ )
947
+ except Exception as exc:
948
+ logger.debug(f"TRACING_LLM_FAIL: {exc}")
949
+
950
+ return PolicyStepResponse(
951
+ tool_calls=tool_calls,
952
+ meta=meta,
953
+ )
954
+
955
+ except Exception as e:
956
+ logger.error(f"Failed to step policy {request.policy_id}: {e}")
957
+ raise HTTPException(status_code=500, detail=str(e)) from e
958
+
959
+
960
+ @router.post("/snapshot", response_model=PolicySnapshotResponse)
961
+ async def snapshot_policy(request: PolicySnapshotRequest) -> PolicySnapshotResponse:
962
+ """Create a snapshot of the policy state."""
963
+ handle = registry.get_policy(request.policy_id)
964
+ if not handle:
965
+ raise HTTPException(status_code=404, detail=f"Policy {request.policy_id} not found")
966
+
967
+ try:
968
+ # Serialize policy state
969
+ state_dict = await handle.policy.serialize()
970
+
971
+ # Save to volume
972
+ snapshot_id, path, size = storage.save_snapshot(
973
+ rl_run_id=handle.rl_run_id,
974
+ kind="policy",
975
+ state_dict=state_dict,
976
+ )
977
+
978
+ # Register snapshot
979
+ registry.register_snapshot(
980
+ kind="policy",
981
+ rl_run_id=handle.rl_run_id,
982
+ size=size,
983
+ path=path,
984
+ )
985
+
986
+ return PolicySnapshotResponse(
987
+ snapshot_id=snapshot_id,
988
+ path=path,
989
+ rl_run_id=handle.rl_run_id,
990
+ size=size,
991
+ )
992
+
993
+ except Exception as e:
994
+ logger.error(f"Failed to snapshot policy {request.policy_id}: {e}")
995
+ raise HTTPException(status_code=500, detail=str(e)) from e
996
+
997
+
998
+ @router.post("/restore", response_model=PolicyRestoreResponse)
999
+ async def restore_policy(request: PolicyRestoreRequest) -> PolicyRestoreResponse:
1000
+ """Restore a policy from a snapshot."""
1001
+ snapshot = registry.get_snapshot(request.snapshot_id)
1002
+ if not snapshot:
1003
+ raise HTTPException(status_code=404, detail=f"Snapshot {request.snapshot_id} not found")
1004
+
1005
+ if snapshot.kind != "policy":
1006
+ raise HTTPException(
1007
+ status_code=422,
1008
+ detail=f"Snapshot {request.snapshot_id} is not a policy snapshot",
1009
+ )
1010
+
1011
+ try:
1012
+ # Load snapshot from volume
1013
+ state_dict, meta = storage.load_snapshot(
1014
+ rl_run_id=snapshot.rl_run_id,
1015
+ kind="policy",
1016
+ snapshot_id=request.snapshot_id,
1017
+ )
1018
+
1019
+ # Recreate policy
1020
+ policy_name = state_dict["name"]
1021
+ low = policy_name.lower()
1022
+ if low in ["crafter-react", "crafter"]:
1023
+ policy = await CrafterPolicy.deserialize(state_dict)
1024
+ elif low in ["swe-mini-react", "swe-mini", "swe_mini"]:
1025
+ policy = await MiniSwePolicy.deserialize(state_dict)
1026
+ elif low in ["wordle-react", "wordle"]:
1027
+ try:
1028
+ from .envs.wordle.policy import WordlePolicy
1029
+ except Exception as e:
1030
+ raise HTTPException(
1031
+ status_code=500, detail=f"Wordle policy unavailable: {e}"
1032
+ ) from e
1033
+ policy = await WordlePolicy.deserialize(state_dict)
1034
+ elif low in ["sokoban-react", "sokoban"]:
1035
+ try:
1036
+ from .envs.sokoban.policy import SokobanPolicy
1037
+ except Exception as e:
1038
+ raise HTTPException(
1039
+ status_code=500, detail=f"Sokoban policy unavailable: {e}"
1040
+ ) from e
1041
+ policy = await SokobanPolicy.deserialize(state_dict)
1042
+ else:
1043
+ raise HTTPException(
1044
+ status_code=422,
1045
+ detail=f"Unknown policy name in snapshot: {policy_name}",
1046
+ )
1047
+
1048
+ # Register new instance
1049
+ policy_id = registry.register_policy(
1050
+ policy=policy,
1051
+ rl_run_id=snapshot.rl_run_id,
1052
+ )
1053
+
1054
+ return PolicyRestoreResponse(policy_id=policy_id)
1055
+
1056
+ except Exception as e:
1057
+ logger.error(f"Failed to restore policy from snapshot {request.snapshot_id}: {e}")
1058
+ raise HTTPException(status_code=500, detail=str(e)) from e
1059
+
1060
+
1061
+ @router.post("/terminate", response_model=PolicyTerminateResponse)
1062
+ async def terminate_policy(request: PolicyTerminateRequest) -> PolicyTerminateResponse:
1063
+ """Terminate a policy and clean up resources."""
1064
+ handle = registry.get_policy(request.policy_id)
1065
+ if not handle:
1066
+ raise HTTPException(status_code=404, detail=f"Policy {request.policy_id} not found")
1067
+
1068
+ try:
1069
+ # Call terminate on the policy
1070
+ await handle.policy.terminate()
1071
+
1072
+ # Remove from registry
1073
+ registry.remove_policy(request.policy_id)
1074
+
1075
+ return PolicyTerminateResponse(ok=True)
1076
+
1077
+ except Exception as e:
1078
+ logger.error(f"Failed to terminate policy {request.policy_id}: {e}")
1079
+ raise HTTPException(status_code=500, detail=str(e)) from e