synth-ai 0.2.13.dev2__py3-none-any.whl → 0.2.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (293) hide show
  1. examples/README.md +1 -0
  2. examples/multi_step/SFT_README.md +147 -0
  3. examples/multi_step/configs/README_verilog_rl.md +77 -0
  4. examples/multi_step/configs/VERILOG_REWARDS.md +90 -0
  5. examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +183 -0
  6. examples/multi_step/configs/crafter_eval_synth_qwen4b.toml +35 -0
  7. examples/multi_step/configs/crafter_eval_text_only_groq_qwen32b.toml +36 -0
  8. examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +12 -11
  9. examples/multi_step/configs/crafter_sft_qwen30b_lora.toml +62 -0
  10. examples/multi_step/configs/crafter_synth_backend.md +40 -0
  11. examples/multi_step/configs/verilog_eval_groq_qwen32b.toml +31 -0
  12. examples/multi_step/configs/verilog_eval_synth_qwen8b.toml +33 -0
  13. examples/multi_step/configs/verilog_rl_lora.toml +190 -0
  14. examples/multi_step/convert_traces_to_sft.py +84 -0
  15. examples/multi_step/judges/crafter_backend_judge.py +220 -0
  16. examples/multi_step/judges/verilog_backend_judge.py +234 -0
  17. examples/multi_step/readme.md +48 -0
  18. examples/multi_step/run_sft_qwen30b.sh +45 -0
  19. examples/multi_step/verilog_rl_lora.md +218 -0
  20. examples/qwen_coder/configs/coder_lora_30b.toml +3 -2
  21. examples/qwen_coder/configs/coder_lora_4b.toml +2 -1
  22. examples/qwen_coder/configs/coder_lora_small.toml +2 -1
  23. examples/qwen_vl/BUGS_AND_FIXES.md +232 -0
  24. examples/qwen_vl/IMAGE_VALIDATION_COMPLETE.md +271 -0
  25. examples/qwen_vl/IMAGE_VALIDATION_SUMMARY.md +260 -0
  26. examples/qwen_vl/INFERENCE_SFT_TESTS.md +412 -0
  27. examples/qwen_vl/NEXT_STEPS_2B.md +325 -0
  28. examples/qwen_vl/QUICKSTART.md +327 -0
  29. examples/qwen_vl/QUICKSTART_RL_VISION.md +110 -0
  30. examples/qwen_vl/README.md +154 -0
  31. examples/qwen_vl/RL_VISION_COMPLETE.md +475 -0
  32. examples/qwen_vl/RL_VISION_TESTING.md +333 -0
  33. examples/qwen_vl/SDK_VISION_INTEGRATION.md +328 -0
  34. examples/qwen_vl/SETUP_COMPLETE.md +275 -0
  35. examples/qwen_vl/VISION_TESTS_COMPLETE.md +490 -0
  36. examples/qwen_vl/VLM_PIPELINE_COMPLETE.md +242 -0
  37. examples/qwen_vl/__init__.py +2 -0
  38. examples/qwen_vl/collect_data_via_cli.md +423 -0
  39. examples/qwen_vl/collect_vision_traces.py +368 -0
  40. examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +127 -0
  41. examples/qwen_vl/configs/crafter_vlm_sft_example.toml +60 -0
  42. examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +43 -0
  43. examples/qwen_vl/configs/eval_gpt4o_vision_proper.toml +29 -0
  44. examples/qwen_vl/configs/eval_gpt5nano_vision.toml +45 -0
  45. examples/qwen_vl/configs/eval_qwen2vl_vision.toml +44 -0
  46. examples/qwen_vl/configs/filter_qwen2vl_sft.toml +50 -0
  47. examples/qwen_vl/configs/filter_vision_sft.toml +53 -0
  48. examples/qwen_vl/configs/filter_vision_test.toml +8 -0
  49. examples/qwen_vl/configs/sft_qwen3_vl_2b_test.toml +54 -0
  50. examples/qwen_vl/crafter_gpt5nano_agent.py +308 -0
  51. examples/qwen_vl/crafter_qwen_vl_agent.py +300 -0
  52. examples/qwen_vl/run_vision_comparison.sh +62 -0
  53. examples/qwen_vl/run_vision_sft_pipeline.sh +175 -0
  54. examples/qwen_vl/test_image_validation.py +201 -0
  55. examples/qwen_vl/test_sft_vision_data.py +110 -0
  56. examples/rl/README.md +1 -1
  57. examples/rl/configs/eval_base_qwen.toml +17 -0
  58. examples/rl/configs/eval_rl_qwen.toml +13 -0
  59. examples/rl/configs/rl_from_base_qwen.toml +37 -0
  60. examples/rl/configs/rl_from_base_qwen17.toml +76 -0
  61. examples/rl/configs/rl_from_ft_qwen.toml +37 -0
  62. examples/rl/run_eval.py +436 -0
  63. examples/rl/run_rl_and_save.py +111 -0
  64. examples/rl/task_app/README.md +22 -0
  65. examples/rl/task_app/math_single_step.py +990 -0
  66. examples/rl/task_app/math_task_app.py +111 -0
  67. examples/sft/README.md +5 -5
  68. examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -2
  69. examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -3
  70. examples/sft/evaluate.py +4 -4
  71. examples/sft/export_dataset.py +7 -4
  72. examples/sft/generate_traces.py +2 -0
  73. examples/swe/task_app/README.md +1 -1
  74. examples/swe/task_app/grpo_swe_mini.py +1 -1
  75. examples/swe/task_app/grpo_swe_mini_task_app.py +0 -12
  76. examples/swe/task_app/hosted/envs/mini_swe/environment.py +13 -13
  77. examples/swe/task_app/hosted/policy_routes.py +0 -2
  78. examples/swe/task_app/hosted/rollout.py +2 -8
  79. examples/task_apps/IMAGE_ONLY_EVAL_QUICKSTART.md +258 -0
  80. examples/task_apps/crafter/CREATE_SFT_DATASET.md +273 -0
  81. examples/task_apps/crafter/EVAL_IMAGE_ONLY_RESULTS.md +152 -0
  82. examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +174 -0
  83. examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +268 -0
  84. examples/task_apps/crafter/QUERY_EXAMPLES.md +203 -0
  85. examples/task_apps/crafter/README_IMAGE_ONLY_EVAL.md +316 -0
  86. examples/task_apps/crafter/eval_image_only_gpt4o.toml +28 -0
  87. examples/task_apps/crafter/eval_text_only_groq_llama.toml +36 -0
  88. examples/task_apps/crafter/filter_sft_dataset.toml +16 -0
  89. examples/task_apps/crafter/task_app/__init__.py +3 -0
  90. examples/task_apps/crafter/task_app/grpo_crafter.py +309 -14
  91. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/environment.py +10 -0
  92. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +75 -4
  93. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +17 -2
  94. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +55 -3
  95. examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +114 -32
  96. examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +127 -27
  97. examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +156 -0
  98. examples/task_apps/enron/__init__.py +1 -0
  99. examples/task_apps/enron/filter_sft.toml +5 -0
  100. examples/task_apps/enron/tests/__init__.py +2 -0
  101. examples/task_apps/enron/tests/integration/__init__.py +2 -0
  102. examples/task_apps/enron/tests/integration/test_enron_eval.py +2 -0
  103. examples/task_apps/enron/tests/unit/__init__.py +2 -0
  104. examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_COMPLETE.md +283 -0
  105. examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_STATUS.md +155 -0
  106. examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +415 -0
  107. examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +29 -0
  108. examples/task_apps/pokemon_red/pallet_town_rl_config.toml +2 -0
  109. examples/task_apps/pokemon_red/task_app.py +199 -6
  110. examples/task_apps/pokemon_red/test_pallet_town_rewards.py +2 -0
  111. examples/task_apps/sokoban/filter_sft.toml +5 -0
  112. examples/task_apps/sokoban/tests/__init__.py +2 -0
  113. examples/task_apps/sokoban/tests/integration/__init__.py +2 -0
  114. examples/task_apps/sokoban/tests/unit/__init__.py +2 -0
  115. examples/task_apps/verilog/eval_groq_qwen32b.toml +8 -4
  116. examples/task_apps/verilog/filter_sft.toml +5 -0
  117. examples/task_apps/verilog/task_app/grpo_verilog.py +258 -23
  118. examples/task_apps/verilog/tests/__init__.py +2 -0
  119. examples/task_apps/verilog/tests/integration/__init__.py +2 -0
  120. examples/task_apps/verilog/tests/integration/test_verilog_eval.py +2 -0
  121. examples/task_apps/verilog/tests/unit/__init__.py +2 -0
  122. examples/vlm/README.md +3 -3
  123. examples/vlm/configs/crafter_vlm_gpt4o.toml +2 -0
  124. examples/vlm/crafter_openai_vlm_agent.py +3 -5
  125. examples/vlm/filter_image_rows.py +1 -1
  126. examples/vlm/run_crafter_vlm_benchmark.py +2 -2
  127. examples/warming_up_to_rl/_utils.py +92 -0
  128. examples/warming_up_to_rl/analyze_trace_db.py +1 -1
  129. examples/warming_up_to_rl/configs/crafter_fft.toml +2 -0
  130. examples/warming_up_to_rl/configs/crafter_fft_4b.toml +2 -0
  131. examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +2 -0
  132. examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +2 -0
  133. examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +2 -1
  134. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +2 -1
  135. examples/warming_up_to_rl/configs/rl_from_ft.toml +2 -0
  136. examples/warming_up_to_rl/export_trace_sft.py +174 -60
  137. examples/warming_up_to_rl/groq_test.py +2 -0
  138. examples/warming_up_to_rl/readme.md +63 -132
  139. examples/warming_up_to_rl/run_fft_and_save.py +1 -1
  140. examples/warming_up_to_rl/run_local_rollout.py +2 -0
  141. examples/warming_up_to_rl/run_local_rollout_modal.py +2 -0
  142. examples/warming_up_to_rl/run_local_rollout_parallel.py +2 -0
  143. examples/warming_up_to_rl/run_local_rollout_traced.py +2 -0
  144. examples/warming_up_to_rl/run_rl_and_save.py +1 -1
  145. examples/warming_up_to_rl/run_rollout_remote.py +2 -0
  146. examples/warming_up_to_rl/task_app/README.md +42 -0
  147. examples/warming_up_to_rl/task_app/grpo_crafter.py +696 -0
  148. examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +135 -0
  149. examples/warming_up_to_rl/task_app/synth_envs_hosted/README.md +173 -0
  150. examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +5 -0
  151. examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +143 -0
  152. examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +1226 -0
  153. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -0
  154. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
  155. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
  156. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +522 -0
  157. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +478 -0
  158. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +108 -0
  159. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
  160. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
  161. examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +204 -0
  162. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +5 -0
  163. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +618 -0
  164. examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +100 -0
  165. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +1081 -0
  166. examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +195 -0
  167. examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +1861 -0
  168. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +5 -0
  169. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +211 -0
  170. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +161 -0
  171. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +137 -0
  172. examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +62 -0
  173. synth_ai/__init__.py +44 -30
  174. synth_ai/_utils/__init__.py +47 -0
  175. synth_ai/_utils/base_url.py +10 -0
  176. synth_ai/_utils/http.py +10 -0
  177. synth_ai/_utils/prompts.py +10 -0
  178. synth_ai/_utils/task_app_state.py +12 -0
  179. synth_ai/_utils/user_config.py +10 -0
  180. synth_ai/api/models/supported.py +145 -7
  181. synth_ai/api/train/__init__.py +13 -1
  182. synth_ai/api/train/cli.py +30 -7
  183. synth_ai/api/train/config_finder.py +18 -11
  184. synth_ai/api/train/env_resolver.py +13 -10
  185. synth_ai/cli/__init__.py +66 -49
  186. synth_ai/cli/_modal_wrapper.py +9 -6
  187. synth_ai/cli/_typer_patch.py +0 -2
  188. synth_ai/cli/_validate_task_app.py +22 -4
  189. synth_ai/cli/legacy_root_backup.py +3 -1
  190. synth_ai/cli/lib/__init__.py +10 -0
  191. synth_ai/cli/lib/task_app_discovery.py +7 -0
  192. synth_ai/cli/lib/task_app_env.py +518 -0
  193. synth_ai/cli/recent.py +1 -0
  194. synth_ai/cli/setup.py +266 -0
  195. synth_ai/cli/task_app_deploy.py +16 -0
  196. synth_ai/cli/task_app_list.py +25 -0
  197. synth_ai/cli/task_app_modal_serve.py +16 -0
  198. synth_ai/cli/task_app_serve.py +18 -0
  199. synth_ai/cli/task_apps.py +392 -141
  200. synth_ai/cli/train.py +18 -0
  201. synth_ai/cli/tui.py +62 -0
  202. synth_ai/demos/__init__.py +10 -0
  203. synth_ai/demos/core/__init__.py +28 -1
  204. synth_ai/demos/crafter/__init__.py +1 -0
  205. synth_ai/demos/crafter/crafter_fft_4b.toml +55 -0
  206. synth_ai/demos/crafter/grpo_crafter_task_app.py +185 -0
  207. synth_ai/demos/crafter/rl_from_base_qwen4b.toml +74 -0
  208. synth_ai/demos/demo_registry.py +176 -0
  209. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
  210. synth_ai/demos/math/__init__.py +1 -0
  211. synth_ai/demos/math/_common.py +16 -0
  212. synth_ai/demos/math/app.py +38 -0
  213. synth_ai/demos/math/config.toml +76 -0
  214. synth_ai/demos/math/deploy_modal.py +54 -0
  215. synth_ai/demos/math/modal_task_app.py +702 -0
  216. synth_ai/demos/math/task_app_entry.py +51 -0
  217. synth_ai/environments/environment/core.py +7 -1
  218. synth_ai/environments/examples/bandit/engine.py +0 -1
  219. synth_ai/environments/examples/bandit/environment.py +0 -1
  220. synth_ai/environments/examples/crafter_classic/environment.py +1 -1
  221. synth_ai/environments/examples/verilog/engine.py +76 -10
  222. synth_ai/environments/examples/wordle/environment.py +0 -1
  223. synth_ai/evals/base.py +16 -5
  224. synth_ai/evals/client.py +1 -1
  225. synth_ai/inference/client.py +1 -1
  226. synth_ai/learning/client.py +1 -1
  227. synth_ai/learning/health.py +1 -1
  228. synth_ai/learning/jobs.py +1 -1
  229. synth_ai/learning/rl/client.py +1 -1
  230. synth_ai/learning/rl/env_keys.py +1 -1
  231. synth_ai/learning/rl/secrets.py +1 -1
  232. synth_ai/learning/sft/client.py +1 -1
  233. synth_ai/learning/sft/data.py +407 -4
  234. synth_ai/learning/validators.py +4 -1
  235. synth_ai/task/__init__.py +11 -1
  236. synth_ai/task/apps/__init__.py +5 -2
  237. synth_ai/task/config.py +259 -0
  238. synth_ai/task/contracts.py +15 -2
  239. synth_ai/task/rubrics/__init__.py +4 -2
  240. synth_ai/task/rubrics/loaders.py +27 -4
  241. synth_ai/task/rubrics/scoring.py +3 -0
  242. synth_ai/task/rubrics.py +219 -0
  243. synth_ai/task/trace_correlation_helpers.py +328 -0
  244. synth_ai/task/tracing_utils.py +14 -3
  245. synth_ai/task/validators.py +145 -2
  246. synth_ai/tracing_v3/config.py +15 -13
  247. synth_ai/tracing_v3/constants.py +21 -0
  248. synth_ai/tracing_v3/db_config.py +3 -1
  249. synth_ai/tracing_v3/decorators.py +10 -7
  250. synth_ai/tracing_v3/session_tracer.py +10 -0
  251. synth_ai/tracing_v3/turso/daemon.py +2 -2
  252. synth_ai/tracing_v3/turso/native_manager.py +108 -77
  253. synth_ai/tracing_v3/utils.py +1 -1
  254. synth_ai/tui/__init__.py +5 -0
  255. synth_ai/tui/__main__.py +13 -0
  256. synth_ai/tui/cli/__init__.py +1 -0
  257. synth_ai/tui/cli/query_experiments.py +164 -0
  258. synth_ai/tui/cli/query_experiments_v3.py +164 -0
  259. synth_ai/tui/dashboard.py +911 -0
  260. synth_ai/utils/__init__.py +101 -0
  261. synth_ai/utils/base_url.py +94 -0
  262. synth_ai/utils/cli.py +131 -0
  263. synth_ai/utils/env.py +287 -0
  264. synth_ai/utils/http.py +169 -0
  265. synth_ai/utils/modal.py +308 -0
  266. synth_ai/utils/process.py +212 -0
  267. synth_ai/utils/prompts.py +39 -0
  268. synth_ai/utils/sqld.py +122 -0
  269. synth_ai/utils/task_app_discovery.py +882 -0
  270. synth_ai/utils/task_app_env.py +186 -0
  271. synth_ai/utils/task_app_state.py +318 -0
  272. synth_ai/utils/user_config.py +137 -0
  273. synth_ai/v0/config/__init__.py +1 -5
  274. synth_ai/v0/config/base_url.py +1 -7
  275. synth_ai/v0/tracing/config.py +1 -1
  276. synth_ai/v0/tracing/decorators.py +1 -1
  277. synth_ai/v0/tracing/upload.py +1 -1
  278. synth_ai/v0/tracing_v1/config.py +1 -1
  279. synth_ai/v0/tracing_v1/decorators.py +1 -1
  280. synth_ai/v0/tracing_v1/upload.py +1 -1
  281. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/METADATA +85 -31
  282. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/RECORD +286 -135
  283. synth_ai/cli/man.py +0 -106
  284. synth_ai/compound/cais.py +0 -0
  285. synth_ai/core/experiment.py +0 -13
  286. synth_ai/core/system.py +0 -15
  287. synth_ai/demo_registry.py +0 -295
  288. synth_ai/handshake.py +0 -109
  289. synth_ai/http.py +0 -26
  290. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/WHEEL +0 -0
  291. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/entry_points.txt +0 -0
  292. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/licenses/LICENSE +0 -0
  293. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1081 @@
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import logging
5
+ import os
6
+ from datetime import datetime
7
+ from typing import Any
8
+
9
+ from fastapi import APIRouter, HTTPException, Request
10
+ from pydantic import BaseModel
11
+
12
+ from .envs.crafter.policy import CrafterPolicy
13
+ from .inference.openai_client import create_inference_client
14
+ from .registry import registry
15
+ from .storage.volume import storage
16
+
17
+ # Token budgeting (shared logic with inference server)
18
+ try:
19
+ from ..core.algorithms.gspo.inference.token_limits import (
20
+ clamp_effective_max_ctx,
21
+ )
22
+ except Exception: # pragma: no cover - defensive import path fallback
23
+ clamp_effective_max_ctx = None # type: ignore
24
+
25
+ try:
26
+ import tiktoken # type: ignore
27
+ except Exception: # pragma: no cover
28
+ tiktoken = None # type: ignore
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+ router = APIRouter()
33
+
34
+
35
+ class PolicyCreateRequest(BaseModel):
36
+ policy_name: str
37
+ config: dict[str, Any] = {}
38
+ parent_policy_id: str | None = None
39
+ rl_run_id: str
40
+ bound_env_id: str | None = None
41
+
42
+
43
+ class PolicyCreateResponse(BaseModel):
44
+ policy_id: str
45
+
46
+
47
+ class PolicyStepRequest(BaseModel):
48
+ policy_id: str
49
+ observation: dict[str, Any]
50
+ state: dict[str, Any] | None = None
51
+ metadata: dict[str, Any] | None = None
52
+ dry_run: bool = False
53
+
54
+
55
+ class PolicyStepResponse(BaseModel):
56
+ tool_calls: list[dict[str, Any]]
57
+ meta: dict[str, Any]
58
+
59
+
60
+ class PolicySnapshotRequest(BaseModel):
61
+ policy_id: str
62
+
63
+
64
+ class PolicySnapshotResponse(BaseModel):
65
+ snapshot_id: str
66
+ path: str
67
+ rl_run_id: str
68
+ size: int
69
+
70
+
71
+ class PolicyRestoreRequest(BaseModel):
72
+ snapshot_id: str
73
+
74
+
75
+ class PolicyRestoreResponse(BaseModel):
76
+ policy_id: str
77
+
78
+
79
+ class PolicyTerminateRequest(BaseModel):
80
+ policy_id: str
81
+
82
+
83
+ class PolicyTerminateResponse(BaseModel):
84
+ ok: bool
85
+
86
+
87
+ @router.post("/create", response_model=PolicyCreateResponse)
88
+ async def create_policy(
89
+ request: PolicyCreateRequest,
90
+ req: Request,
91
+ ) -> PolicyCreateResponse:
92
+ """Create a new policy instance."""
93
+ try:
94
+ task_app = getattr(req.app.state, "task_app", None)
95
+
96
+ # Set defaults from TaskApp / environment if not provided
97
+ config = dict(request.config or {})
98
+ if "inference_url" not in config and task_app is not None:
99
+ base_url = getattr(task_app, "vllm_base_url", None)
100
+ if base_url:
101
+ config["inference_url"] = base_url
102
+ if "model" not in config and task_app is not None:
103
+ default_model = getattr(task_app, "default_model", None)
104
+ if default_model:
105
+ config["model"] = default_model
106
+ if "inference_url" not in config or "model" not in config:
107
+ raise HTTPException(
108
+ status_code=422,
109
+ detail="Policy configuration must include 'inference_url' and 'model'.",
110
+ )
111
+
112
+ # Create policy instance based on name
113
+ pname = request.policy_name.lower()
114
+ if pname in ["crafter-react", "crafter"]:
115
+ policy = CrafterPolicy(
116
+ inference_url=config["inference_url"],
117
+ model=config["model"],
118
+ )
119
+ await policy.initialize(config)
120
+ elif pname in ["wordle-react", "wordle"]:
121
+ try:
122
+ from .envs.wordle.policy import WordlePolicy
123
+ except Exception as e:
124
+ raise HTTPException(
125
+ status_code=500, detail=f"Wordle policy unavailable: {e}"
126
+ ) from e
127
+
128
+ policy = WordlePolicy(
129
+ inference_url=config["inference_url"],
130
+ model=config["model"],
131
+ word_length=int(config["word_length"]),
132
+ max_guesses=int(config["max_guesses"]),
133
+ )
134
+ await policy.initialize(config)
135
+ elif pname in ["sokoban-react", "sokoban"]:
136
+ try:
137
+ from .envs.sokoban.policy import SokobanPolicy
138
+ except Exception as e:
139
+ raise HTTPException(
140
+ status_code=500, detail=f"Sokoban policy unavailable: {e}"
141
+ ) from e
142
+
143
+ policy = SokobanPolicy(
144
+ inference_url=config["inference_url"],
145
+ model=config["model"],
146
+ )
147
+ await policy.initialize(config)
148
+ elif pname in ["math-react", "math"]:
149
+ try:
150
+ from .envs.math.policy import MathPolicy
151
+ except Exception as e:
152
+ raise HTTPException(status_code=500, detail=f"Math policy unavailable: {e}") from e
153
+
154
+ policy = MathPolicy(
155
+ inference_url=config["inference_url"],
156
+ model=config["model"],
157
+ )
158
+ await policy.initialize(config)
159
+ else:
160
+ raise HTTPException(
161
+ status_code=422,
162
+ detail=f"Unknown policy name: {request.policy_name}",
163
+ )
164
+
165
+ # Register in memory
166
+ policy_id = registry.register_policy(
167
+ policy=policy,
168
+ rl_run_id=request.rl_run_id,
169
+ bound_env_id=request.bound_env_id,
170
+ )
171
+
172
+ return PolicyCreateResponse(policy_id=policy_id)
173
+
174
+ except Exception as e:
175
+ logger.error(f"Failed to create policy: {e}")
176
+ raise HTTPException(status_code=500, detail=str(e)) from e
177
+
178
+
179
+ @router.post("/step", response_model=PolicyStepResponse)
180
+ async def step_policy(
181
+ request: PolicyStepRequest,
182
+ req: Request,
183
+ ) -> PolicyStepResponse:
184
+ """Execute a policy step to generate actions."""
185
+ handle = registry.get_policy(request.policy_id)
186
+ if not handle:
187
+ raise HTTPException(status_code=404, detail=f"Policy {request.policy_id} not found")
188
+
189
+ try:
190
+ task_app = req.app.state.task_app
191
+ policy = handle.policy
192
+ tracing_context = getattr(req.state, "rollout_tracing", None)
193
+
194
+ obs_text = request.observation
195
+ if isinstance(request.observation, dict):
196
+ if isinstance(policy, CrafterPolicy):
197
+ from .envs.crafter.shared import format_observation as format_crafter
198
+
199
+ obs_text = format_crafter(request.observation)
200
+ else:
201
+ formatted: str | None = None
202
+
203
+ # Wordle formatting
204
+ try:
205
+ from .envs.wordle.policy import WordlePolicy
206
+ except Exception:
207
+ wordle_policy_cls = None # type: ignore[assignment]
208
+ else:
209
+ wordle_policy_cls = WordlePolicy
210
+
211
+ if formatted is None and wordle_policy_cls is not None and isinstance(
212
+ policy, wordle_policy_cls
213
+ ):
214
+ from .envs.wordle.shared import format_observation_wordle
215
+
216
+ # ASSERTION: Validate observation structure
217
+ assert request.observation is not None, "request.observation cannot be None"
218
+ assert isinstance(request.observation, dict), (
219
+ f"request.observation must be dict, got {type(request.observation)}"
220
+ )
221
+
222
+ required_keys = {
223
+ "text",
224
+ "status",
225
+ "remaining_guesses",
226
+ "guesses",
227
+ "feedback",
228
+ "reward_last",
229
+ "total_reward",
230
+ "terminated",
231
+ }
232
+ missing_keys = required_keys - set(request.observation.keys())
233
+ assert (
234
+ not missing_keys
235
+ ), f"Wordle observation missing required keys: {missing_keys}"
236
+
237
+ print("DEBUG POLICY_ROUTES: About to format Wordle observation")
238
+ print(f"DEBUG POLICY_ROUTES: Observation type: {type(request.observation)}")
239
+ print(
240
+ f"DEBUG POLICY_ROUTES: Observation keys: {list(request.observation.keys())}"
241
+ )
242
+ feedback_val = request.observation["feedback"]
243
+ print(f"DEBUG POLICY_ROUTES: Observation feedback: {feedback_val}")
244
+ print(
245
+ f"DEBUG POLICY_ROUTES: Observation guesses: {request.observation['guesses']}"
246
+ )
247
+ print(
248
+ "DEBUG POLICY_ROUTES: Observation text length: "
249
+ f"{len(request.observation['text'])}"
250
+ )
251
+
252
+ guesses = request.observation["guesses"]
253
+ feedback = request.observation["feedback"]
254
+ assert isinstance(guesses, list), f"guesses must be list, got {type(guesses)}"
255
+ assert isinstance(
256
+ feedback, list
257
+ ), f"feedback must be list, got {type(feedback)}"
258
+
259
+ formatted = format_observation_wordle(request.observation)
260
+
261
+ assert isinstance(formatted, str), (
262
+ f"obs_text must be string, got {type(formatted)}"
263
+ )
264
+ assert len(formatted) > 0, "obs_text cannot be empty"
265
+ assert "WORDLE" in formatted, "obs_text must contain 'WORDLE' header"
266
+ assert "Respond with a single tool call" in formatted, (
267
+ "obs_text must contain instruction text"
268
+ )
269
+
270
+ print(
271
+ f"DEBUG POLICY_ROUTES: Formatted obs_text length: {len(formatted)}"
272
+ )
273
+ print(
274
+ "DEBUG POLICY_ROUTES: Formatted obs_text contains 🟩: "
275
+ f"{'🟩' in formatted}"
276
+ )
277
+ print(
278
+ "DEBUG POLICY_ROUTES: Formatted obs_text contains 🟨: "
279
+ f"{'🟨' in formatted}"
280
+ )
281
+ print(
282
+ "DEBUG POLICY_ROUTES: Formatted obs_text contains ⬛: "
283
+ f"{'⬛' in formatted}"
284
+ )
285
+ print(
286
+ "DEBUG POLICY_ROUTES: Formatted obs_text first 200 chars: "
287
+ f"{formatted[:200]}"
288
+ )
289
+
290
+ # Sokoban formatting
291
+ try:
292
+ from .envs.sokoban.policy import SokobanPolicy
293
+ except Exception:
294
+ sokoban_policy_cls = None # type: ignore[assignment]
295
+ else:
296
+ sokoban_policy_cls = SokobanPolicy
297
+
298
+ if formatted is None and sokoban_policy_cls is not None and isinstance(
299
+ policy, sokoban_policy_cls
300
+ ):
301
+ from .envs.sokoban.shared import format_observation_sokoban
302
+
303
+ formatted = format_observation_sokoban(request.observation)
304
+
305
+ # Math formatting
306
+ try:
307
+ from .envs.math.policy import MathPolicy
308
+ except Exception:
309
+ math_policy_cls = None # type: ignore[assignment]
310
+ else:
311
+ math_policy_cls = MathPolicy
312
+
313
+ if formatted is None and math_policy_cls is not None and isinstance(
314
+ policy, math_policy_cls
315
+ ):
316
+ try:
317
+ formatted = str(
318
+ request.observation.get("problem_text") or request.observation
319
+ )
320
+ except Exception:
321
+ formatted = str(request.observation)
322
+
323
+ if formatted is None:
324
+ formatted = str(request.observation)
325
+
326
+ obs_text = formatted
327
+
328
+ # Merge metadata with raw observation for multimodal policies
329
+ step_metadata: dict[str, Any] = dict(request.metadata or {})
330
+ step_metadata["raw_observation"] = request.observation
331
+
332
+ # Execute policy step to get inference request
333
+ tool_calls, meta = await policy.step(
334
+ observation_text=obs_text,
335
+ state=request.state,
336
+ metadata=step_metadata,
337
+ )
338
+ # Compact tool call summary
339
+ with contextlib.suppress(Exception):
340
+ _summary: list[dict[str, Any]] = []
341
+ _tc = tool_calls or []
342
+ for _item in (_tc if isinstance(_tc, list) else []):
343
+ if isinstance(_item, dict):
344
+ _tool = _item.get("tool")
345
+ _args = _item.get("args")
346
+ _keys = list(_args.keys()) if isinstance(_args, dict) else []
347
+ _summary.append({"tool": _tool, "args_keys": _keys})
348
+ logger.info(
349
+ "POLICY_STEP: tool_calls=%d summary=%s",
350
+ len(_tc),
351
+ _summary,
352
+ )
353
+
354
+ # If not dry run, perform inference
355
+ if not request.dry_run and "inference_request" in meta:
356
+ # CRITICAL: Validate that the inference request contains the correct prompts for the policy
357
+ inf_req = meta["inference_request"]
358
+ msgs = inf_req["messages"]
359
+ model_name = inf_req.get("model") or getattr(policy, "model", None) or ""
360
+ if msgs and len(msgs) > 0 and msgs[0]["role"] == "system":
361
+ sys_text = msgs[0]["content"]
362
+ policy_name = getattr(policy, "name", "") or type(policy).__name__.lower()
363
+
364
+ # Assert environment-specific prompts match the policy
365
+ if policy_name in ("wordle-react", "wordle"):
366
+ if "Wordle" not in sys_text:
367
+ raise ValueError(
368
+ f"PROMPT MISMATCH: Wordle policy {policy_name} received system prompt without 'Wordle' keyword: {sys_text[:200]}..."
369
+ )
370
+ if "Crafter" in sys_text:
371
+ raise ValueError(
372
+ f"PROMPT MISMATCH: Wordle policy {policy_name} received Crafter system prompt: {sys_text[:200]}..."
373
+ )
374
+
375
+ elif policy_name in ("crafter-react", "crafter") or isinstance(
376
+ policy, CrafterPolicy
377
+ ):
378
+ if "Crafter" not in sys_text:
379
+ raise ValueError(
380
+ f"PROMPT MISMATCH: Crafter policy {policy_name} received system prompt without 'Crafter' keyword: {sys_text[:200]}..."
381
+ )
382
+ if "Wordle" in sys_text:
383
+ raise ValueError(
384
+ f"PROMPT MISMATCH: Crafter policy {policy_name} received Wordle system prompt: {sys_text[:200]}..."
385
+ )
386
+ elif policy_name in ("sokoban-react", "sokoban"):
387
+ if "Sokoban" not in sys_text:
388
+ raise ValueError(
389
+ f"PROMPT MISMATCH: Sokoban policy {policy_name} received system prompt without 'Sokoban' keyword: {sys_text[:200]}..."
390
+ )
391
+ if "Crafter" in sys_text or "Wordle" in sys_text:
392
+ raise ValueError(
393
+ f"PROMPT MISMATCH: Sokoban policy {policy_name} received wrong environment system prompt: {sys_text[:200]}..."
394
+ )
395
+
396
+ logger.info(
397
+ f"✅ PROMPT VALIDATION: {policy_name} policy has correct system prompt containing expected environment keywords"
398
+ )
399
+ else:
400
+ logger.warning(
401
+ f"⚠️ PROMPT VALIDATION: No system message found in inference request for policy {getattr(policy, 'name', type(policy).__name__)}"
402
+ )
403
+
404
+ # Emit full system/user prompts for observability (no secrets included)
405
+ try:
406
+
407
+ def _as_text(content: object) -> str:
408
+ if isinstance(content, str):
409
+ return content
410
+ if isinstance(content, list):
411
+ # Concatenate any dict segments that resemble OpenAI content parts
412
+ parts: list[str] = []
413
+ for seg in content:
414
+ try:
415
+ if isinstance(seg, dict):
416
+ txt = seg.get("text") or seg.get("content") or ""
417
+ if isinstance(txt, str):
418
+ parts.append(txt)
419
+ except Exception:
420
+ continue
421
+ return "".join(parts)
422
+ return str(content)
423
+
424
+ system_prompt_records: list[dict[str, Any]] = []
425
+ user_prompt_records: list[dict[str, Any]] = []
426
+ for message in msgs:
427
+ role = message.get("role")
428
+ raw_content = message.get("content")
429
+ content = _as_text(raw_content)
430
+ record = {"role": role, "text": content, "content": raw_content}
431
+ if role == "system":
432
+ system_prompt_records.append(record)
433
+ elif role == "user":
434
+ user_prompt_records.append(record)
435
+
436
+ logger.info(
437
+ "PROMPTS: system_msgs=%d user_msgs=%d last_user_chars=%d",
438
+ len(system_prompt_records),
439
+ len(user_prompt_records),
440
+ len(user_prompt_records[-1].get("text", "")) if user_prompt_records else 0,
441
+ )
442
+
443
+ if system_prompt_records:
444
+ logger.info("PROMPT_DUMP_SYSTEM_BEGIN")
445
+ for idx, rec in enumerate(system_prompt_records):
446
+ smsg = rec.get("text", "")
447
+ logger.info(f"SYSTEM[{idx}]\n{smsg}")
448
+ logger.info("PROMPT_DUMP_SYSTEM_END")
449
+
450
+ if user_prompt_records:
451
+ logger.info("PROMPT_DUMP_USER_BEGIN")
452
+ for idx, rec in enumerate(user_prompt_records):
453
+ umsg = rec.get("text", "")
454
+ logger.info(f"USER[{idx}]\n{umsg}")
455
+ logger.info("PROMPT_DUMP_USER_END")
456
+ # Print concise preview for visibility in standard logs
457
+ with contextlib.suppress(Exception):
458
+ last_user = (
459
+ user_prompt_records[-1].get("text", "")
460
+ if user_prompt_records
461
+ else ""
462
+ )
463
+ print(f"[task:crafter] user prompt: {last_user}", flush=True)
464
+ except Exception as e:
465
+ logger.warning(f"PROMPT_DUMP_FAILED: {e}")
466
+
467
+ if tracing_context is not None:
468
+ try:
469
+ await tracing_context.record_policy_prompts(
470
+ system_prompt_records, user_prompt_records
471
+ )
472
+ except Exception as exc:
473
+ logger.debug(f"TRACING_PROMPTS_FAIL: {exc}")
474
+
475
+ # Create inference client (choose API key by target provider)
476
+ # Require inference_url to be set explicitly by the rollout policy config.
477
+ target_url = (
478
+ meta.get("inference_url")
479
+ or getattr(policy, "inference_url", None)
480
+ or getattr(task_app, "vllm_base_url", None)
481
+ )
482
+
483
+ # Ensure meta carries the final target URL for downstream logging/clients
484
+ with contextlib.suppress(Exception):
485
+ meta["inference_url"] = target_url
486
+
487
+ # Select API key based on resolved target URL
488
+ api_key_override = None
489
+ try:
490
+ import os as _os
491
+
492
+ if isinstance(target_url, str):
493
+ low_url = target_url.lower()
494
+ # Proxy endpoints should not receive a bearer; the server-side proxy holds the vendor key
495
+ if "/proxy/groq" in low_url or "/proxy/openai" in low_url:
496
+ api_key_override = None
497
+ elif "openai.com" in low_url:
498
+ api_key_override = _os.getenv("OPENAI_API_KEY") or getattr(
499
+ task_app, "openai_api_key", None
500
+ )
501
+ elif "groq.com" in low_url or "/proxy/groq" in low_url:
502
+ api_key_override = _os.getenv("GROQ_API_KEY")
503
+ else:
504
+ api_key_override = (
505
+ _os.getenv("SYNTH_API_KEY")
506
+ or _os.getenv("OPENAI_API_KEY")
507
+ or getattr(task_app, "openai_api_key", None)
508
+ )
509
+ else:
510
+ api_key_override = (
511
+ _os.getenv("SYNTH_API_KEY")
512
+ or _os.getenv("OPENAI_API_KEY")
513
+ or getattr(task_app, "openai_api_key", None)
514
+ )
515
+ except Exception:
516
+ api_key_override = None
517
+
518
+ if api_key_override:
519
+ try:
520
+ masked = f"{api_key_override[:6]}…{api_key_override[-4:]}"
521
+ except Exception:
522
+ masked = "<masked>"
523
+ logger.debug(f"INFERENCE_AUTH: Using bearer key {masked}")
524
+ else:
525
+ logger.warning(
526
+ "INFERENCE_AUTH: No API key resolved for inference request; downstream may 401"
527
+ )
528
+
529
+ client = create_inference_client(task_app, api_key=api_key_override)
530
+
531
+ # Add policy identification header for observability
532
+ policy_name = getattr(policy, "name", "") or type(policy).__name__.lower()
533
+ extra_headers = {"X-Policy-Name": policy_name}
534
+
535
+ # Apply input truncation to avoid 422 from inference server
536
+ try:
537
+ model_name = inf_req.get("model") or getattr(policy, "model", None) or ""
538
+ env_max_ctx = None
539
+ try:
540
+ _env_max = int(os.getenv("CHAT_MAX_MODEL_LEN", "0") or 0)
541
+ env_max_ctx = _env_max if _env_max > 0 else None
542
+ except Exception:
543
+ env_max_ctx = None
544
+ # Compute effective max context and safety margin
545
+ eff_ctx = None
546
+ if clamp_effective_max_ctx is not None:
547
+ eff_ctx = clamp_effective_max_ctx(
548
+ model_name=model_name,
549
+ configured_max_model_len=None,
550
+ env_max_model_len=env_max_ctx,
551
+ )
552
+ # Hard lower-only chat input cap if provided
553
+ try:
554
+ hard_input_cap = int(os.getenv("CHAT_MAX_INPUT_TOKENS", "0") or 0)
555
+ hard_input_cap = hard_input_cap if hard_input_cap > 0 else None
556
+ except Exception:
557
+ hard_input_cap = None
558
+ try:
559
+ safety_margin = int(os.getenv("CHAT_BUDGET_SAFETY", "64").strip() or 64)
560
+ except Exception:
561
+ safety_margin = 64
562
+
563
+ # Determine budget
564
+ budget = None
565
+ if isinstance(eff_ctx, int) and eff_ctx > 0:
566
+ budget = max(256, eff_ctx - safety_margin)
567
+ if isinstance(hard_input_cap, int) and hard_input_cap > 0:
568
+ budget = min(budget, hard_input_cap) if budget is not None else hard_input_cap
569
+
570
+ if budget is not None and budget > 0 and isinstance(msgs, list):
571
+ # Choose tokenizer
572
+ enc = None
573
+ if tiktoken is not None:
574
+ try:
575
+ if model_name:
576
+ enc = tiktoken.encoding_for_model(model_name)
577
+ else:
578
+ enc = tiktoken.get_encoding("cl100k_base")
579
+ except Exception:
580
+ try:
581
+ enc = tiktoken.get_encoding("cl100k_base")
582
+ except Exception:
583
+ enc = None
584
+
585
+ def _content_to_text(content: object) -> str:
586
+ if isinstance(content, str):
587
+ return content
588
+ if isinstance(content, list):
589
+ parts: list[str] = []
590
+ for seg in content:
591
+ try:
592
+ if isinstance(seg, dict):
593
+ txt = seg.get("text") or seg.get("content") or ""
594
+ if isinstance(txt, str):
595
+ parts.append(txt)
596
+ except Exception:
597
+ continue
598
+ return "".join(parts)
599
+ try:
600
+ return str(content)
601
+ except Exception:
602
+ return ""
603
+
604
+ def _count_tokens(text: str) -> int:
605
+ if enc is None:
606
+ # Fall back to character count heuristic (~4 chars per token)
607
+ try:
608
+ return max(1, int(len(text) / 4))
609
+ except Exception:
610
+ return len(text)
611
+ try:
612
+ return len(enc.encode(text))
613
+ except Exception:
614
+ return max(1, int(len(text) / 4))
615
+
616
+ def _count_messages_tokens(messages: list[dict[str, Any]]) -> int:
617
+ total = 0
618
+ for m in messages:
619
+ total += _count_tokens(_content_to_text(m.get("content")))
620
+ return total
621
+
622
+ def _truncate_messages_to_budget(
623
+ messages: list[dict[str, Any]],
624
+ max_tokens: int,
625
+ ) -> tuple[list[dict[str, Any]], int, int, int]:
626
+ before = _count_messages_tokens(messages)
627
+ if before <= max_tokens:
628
+ return messages, before, before, len(messages)
629
+ # Always try to preserve the first system message if present
630
+ system_msg = None
631
+ start_idx = 0
632
+ if messages and messages[0].get("role") == "system":
633
+ system_msg = messages[0]
634
+ start_idx = 1
635
+ kept_rev: list[dict[str, Any]] = []
636
+ total = _count_messages_tokens([system_msg] if system_msg else [])
637
+ # Walk from the end keeping most recent messages
638
+ for m in reversed(messages[start_idx:]):
639
+ t = _count_tokens(_content_to_text(m.get("content")))
640
+ if total + t <= max_tokens:
641
+ kept_rev.append(m)
642
+ total += t
643
+ else:
644
+ # Try to keep a truncated version of this message if we have some budget left
645
+ remaining = max_tokens - total
646
+ if remaining > 16: # keep at least a little context
647
+ txt = _content_to_text(m.get("content"))
648
+ # Binary search-ish trim by tokens
649
+ low, high = 0, len(txt)
650
+ best = None
651
+ while low <= high:
652
+ mid = (low + high) // 2
653
+ candidate = txt[-mid:]
654
+ if _count_tokens(candidate) <= remaining:
655
+ best = candidate
656
+ low = mid + 1
657
+ else:
658
+ high = mid - 1
659
+ if best is not None and best:
660
+ m2 = dict(m)
661
+ m2["content"] = best
662
+ kept_rev.append(m2)
663
+ total += _count_tokens(best)
664
+ break
665
+ kept = list(reversed(kept_rev))
666
+ if system_msg is not None:
667
+ kept = [system_msg] + kept
668
+ after = _count_messages_tokens(kept)
669
+ return kept, before, after, len(kept)
670
+
671
+ new_msgs, before_toks, after_toks, kept_count = _truncate_messages_to_budget(
672
+ msgs, int(budget)
673
+ )
674
+ if new_msgs is not msgs:
675
+ inf_req["messages"] = new_msgs
676
+ with contextlib.suppress(Exception):
677
+ logger.info(
678
+ {
679
+ "chat_truncated": True,
680
+ "token_budget": int(budget),
681
+ "before_tokens": int(before_toks),
682
+ "after_tokens": int(after_toks),
683
+ "kept_msgs": int(kept_count),
684
+ }
685
+ )
686
+ except Exception as _trunc_e:
687
+ logger.warning(f"CHAT_TRUNCATION_FAILED: {type(_trunc_e).__name__}: {_trunc_e}")
688
+
689
+ # Formal assertion: If tools are expected, ensure tool_choice and tools are set
690
+ if policy_name in (
691
+ "wordle-react",
692
+ "sokoban-react",
693
+ "crafter-react",
694
+ ) and getattr(policy, "use_tools", True):
695
+ req_tools = meta["inference_request"]["tools"]
696
+ req_tool_choice = meta["inference_request"]["tool_choice"]
697
+ req_stop_after = meta["inference_request"]["stop_after_tool_calls"]
698
+ logger.info(
699
+ f"TOOLCALL_CONFIG: policy={policy_name} tools_present={bool(req_tools)} tool_choice={req_tool_choice} stop_after={req_stop_after}"
700
+ )
701
+ if not req_tools or req_tool_choice != "required":
702
+ raise HTTPException(
703
+ status_code=500,
704
+ detail=f"TOOLCALL_ASSERTION_FAIL: Missing tools or tool_choice!=required for policy {policy_name}",
705
+ )
706
+
707
+ # Call inference service with retries for Flash cold-start (503)
708
+ import time as _t
709
+
710
+ # Prompt diagnostics before sending to inference: build chat template locally,
711
+ # count tokens, and log the first 10k tokens if oversized. Also stash a
712
+ # compact preview in meta so the trainer can surface it.
713
+ with contextlib.suppress(Exception):
714
+ req_for_diag = meta.get("inference_request", {})
715
+ model_for_diag = req_for_diag.get("model") or getattr(policy, "model", None) or ""
716
+ messages_for_diag = req_for_diag.get("messages") or []
717
+ if model_for_diag and messages_for_diag:
718
+ from transformers import AutoTokenizer
719
+
720
+ tok = AutoTokenizer.from_pretrained(model_for_diag)
721
+ prompt_preview = tok.apply_chat_template(
722
+ messages_for_diag,
723
+ add_generation_prompt=True,
724
+ tokenize=False,
725
+ )
726
+ ids = tok.encode(prompt_preview, add_special_tokens=False)
727
+ max_len = getattr(tok, "model_max_length", None)
728
+ over_limit = False
729
+ with contextlib.suppress(Exception):
730
+ over_limit = (
731
+ isinstance(max_len, int) and max_len > 0 and len(ids) > int(max_len)
732
+ )
733
+ if over_limit or len(ids) > 10000:
734
+ preview_ids = ids[:10000]
735
+ preview_text = tok.decode(
736
+ preview_ids,
737
+ skip_special_tokens=False,
738
+ )
739
+ with contextlib.suppress(Exception):
740
+ logger.warning(
741
+ {
742
+ "prompt_token_overflow_local": True,
743
+ "model": str(model_for_diag),
744
+ "token_count": int(len(ids)),
745
+ "model_max_length": int(max_len)
746
+ if isinstance(max_len, int)
747
+ else None,
748
+ "preview_tokens_logged": int(len(preview_ids)),
749
+ "prompt_preview_first_10k_tokens": preview_text,
750
+ }
751
+ )
752
+ with contextlib.suppress(Exception):
753
+ meta["prompt_debug"] = {
754
+ "token_count": int(len(ids)),
755
+ "model_max_length": int(max_len)
756
+ if isinstance(max_len, int)
757
+ else None,
758
+ "preview_first_10k_tokens": preview_text,
759
+ }
760
+
761
+ # Emit the exact prompt/messages and tools before calling the LLM (bounded preview)
762
+ with contextlib.suppress(Exception):
763
+ req_dump = meta.get("inference_request", {})
764
+ msgs = req_dump.get("messages")
765
+ tools_dump = req_dump.get("tools")
766
+ if isinstance(msgs, list):
767
+ # Print compact messages structure and tool schema with bounded length
768
+ import json as _json
769
+
770
+ msgs_compact = _json.dumps(msgs)[:20000]
771
+ tools_compact = (
772
+ _json.dumps(tools_dump)[:8000] if tools_dump is not None else None
773
+ )
774
+ print(
775
+ {
776
+ "llm.call": True,
777
+ "policy": str(policy_name),
778
+ "messages_preview": msgs_compact,
779
+ "tools_preview": tools_compact,
780
+ }
781
+ )
782
+
783
+ # Normalize request for non-OpenAI endpoints (strict schemas)
784
+ with contextlib.suppress(Exception):
785
+ base = str(target_url or "")
786
+ is_openai_dotcom = "openai.com" in base.lower()
787
+ if not is_openai_dotcom:
788
+ req_body = meta.get("inference_request", {})
789
+ if isinstance(req_body, dict):
790
+ # Force structured tool_choice if a bare "required" is present
791
+ if req_body.get("tool_choice") == "required":
792
+ func_name = "interact_many"
793
+ with contextlib.suppress(Exception):
794
+ tools_arr = req_body.get("tools") or []
795
+ if isinstance(tools_arr, list) and tools_arr:
796
+ f = (
797
+ tools_arr[0].get("function")
798
+ if isinstance(tools_arr[0], dict)
799
+ else None
800
+ )
801
+ cand = (f or {}).get("name") if isinstance(f, dict) else None
802
+ if isinstance(cand, str) and cand:
803
+ func_name = cand
804
+ req_body["tool_choice"] = {
805
+ "type": "function",
806
+ "function": {"name": func_name},
807
+ }
808
+ req_body["parallel_tool_calls"] = False
809
+ req_body.setdefault("function_call", {"name": func_name})
810
+ # Inject extra_body for thinking controls expected by Modal service
811
+ with contextlib.suppress(Exception):
812
+ tb = req_body.get("thinking_budget")
813
+ tm = str(req_body.get("thinking_mode") or "").lower()
814
+ enable_thinking = bool(tb) or tm == "think"
815
+ extra = dict(req_body.get("extra_body") or {})
816
+ chat_kwargs = dict(extra.get("chat_template_kwargs") or {})
817
+ if enable_thinking:
818
+ chat_kwargs["enable_thinking"] = True
819
+ if isinstance(tb, int | float | str) and str(tb).strip():
820
+ with contextlib.suppress(Exception):
821
+ chat_kwargs["thinking_budget"] = int(tb)
822
+ if chat_kwargs:
823
+ extra["chat_template_kwargs"] = chat_kwargs
824
+ # Ensure stop_after_tool_calls honored via extra_body for stricter servers
825
+ extra.setdefault("stop_after_tool_calls", 1)
826
+ if extra:
827
+ req_body["extra_body"] = extra
828
+ # Provide a conservative default temperature if missing
829
+ if "temperature" not in req_body:
830
+ req_body["temperature"] = 0.1
831
+ meta["inference_request"] = req_body
832
+
833
+ # Strip image parts: Crafter policy currently only uses text prompts.
834
+ # Some providers reject image_url payloads entirely, so always flatten to plain text.
835
+ req_body2 = meta.get("inference_request", {})
836
+ if isinstance(req_body2, dict):
837
+ msgs = req_body2.get("messages")
838
+ if isinstance(msgs, list):
839
+ new_msgs = []
840
+ changed = False
841
+ for m in msgs:
842
+ try:
843
+ if isinstance(m, dict):
844
+ content = m.get("content")
845
+ if isinstance(content, list):
846
+ parts: list[str] = []
847
+ for seg in content:
848
+ if isinstance(seg, dict):
849
+ txt = seg.get("text") or seg.get("content")
850
+ if isinstance(txt, str) and txt:
851
+ parts.append(txt)
852
+ m2 = dict(m)
853
+ m2["content"] = "\n".join(parts)
854
+ new_msgs.append(m2)
855
+ changed = True
856
+ else:
857
+ new_msgs.append(m)
858
+ else:
859
+ new_msgs.append(m)
860
+ except Exception:
861
+ new_msgs.append(m)
862
+ if changed:
863
+ req_body2["messages"] = new_msgs
864
+ meta["inference_request"] = req_body2
865
+
866
+ _t_start = _t.time()
867
+ call_started_at = datetime.utcnow()
868
+ inference_response = await client.generate_with_retries(
869
+ request=meta["inference_request"],
870
+ base_url=meta["inference_url"],
871
+ max_retries=12,
872
+ backoff_factor=2.0,
873
+ extra_headers=extra_headers,
874
+ )
875
+ meta["inference_ms"] = int((_t.time() - _t_start) * 1000)
876
+ call_completed_at = datetime.utcnow()
877
+
878
+ provider_url = str(meta.get("inference_url") or "")
879
+ low_url = provider_url.lower()
880
+ if "groq" in low_url:
881
+ provider_name = "groq"
882
+ elif "openai" in low_url:
883
+ provider_name = "openai"
884
+ else:
885
+ provider_name = "custom"
886
+
887
+ # Parse response to tool calls
888
+ tool_calls = policy.parse_response_to_tool_calls(
889
+ response=inference_response,
890
+ use_tools=getattr(policy, "use_tools", True),
891
+ )
892
+
893
+ # Debug logging (echo tool calls)
894
+ if not tool_calls:
895
+ # Structured error log with small preview; avoid dumping full response repeatedly
896
+ preview = str(inference_response)[:400]
897
+ logger.error(
898
+ f"TOOLCALL_PARSE_FAIL: policy={policy_name} parsed=0 preview={preview}"
899
+ )
900
+ else:
901
+ try:
902
+ import json as _json
903
+
904
+ print(
905
+ {
906
+ "tool_calls_parsed": int(len(tool_calls)),
907
+ "tool_calls_preview": _json.dumps(tool_calls)[:20000],
908
+ }
909
+ )
910
+ except Exception:
911
+ logger.info(f"Parsed {len(tool_calls)} tool calls: {tool_calls}")
912
+
913
+ # Add response to metadata
914
+ # Parse tool calls from model response using policy-specific parser
915
+ try:
916
+ if hasattr(policy, "parse_response_to_tool_calls"):
917
+ parsed = policy.parse_response_to_tool_calls(
918
+ inference_response, getattr(policy, "use_tools", True)
919
+ )
920
+ else:
921
+ parsed = policy.parse_model_response(inference_response, request.observation)
922
+ # Replace tool_calls with parsed result
923
+ if isinstance(parsed, list):
924
+ tool_calls = parsed
925
+ with contextlib.suppress(Exception):
926
+ logger.info(
927
+ "TOOLCALL_PARSE: parsed=%d has_tools=%s example=%r",
928
+ len(tool_calls) if isinstance(tool_calls, list) else -1,
929
+ bool(getattr(policy, "use_tools", True)),
930
+ (tool_calls[0] if isinstance(tool_calls, list) and tool_calls else None),
931
+ )
932
+ except Exception as _pe:
933
+ logger.warning(f"Failed to parse tool calls: {str(_pe)}")
934
+ # Attach raw response + usage for observability
935
+ meta["raw_response"] = inference_response
936
+ if "usage" in inference_response:
937
+ meta["usage"] = inference_response["usage"]
938
+
939
+ if tracing_context is not None:
940
+ try:
941
+ await tracing_context.record_llm_call(
942
+ inference_request=meta["inference_request"],
943
+ inference_response=inference_response,
944
+ tool_calls=tool_calls,
945
+ provider=provider_name,
946
+ model_name=model_name,
947
+ started_at=call_started_at,
948
+ completed_at=call_completed_at,
949
+ latency_ms=meta.get("inference_ms"),
950
+ )
951
+ except Exception as exc:
952
+ logger.debug(f"TRACING_LLM_FAIL: {exc}")
953
+
954
+ return PolicyStepResponse(
955
+ tool_calls=tool_calls,
956
+ meta=meta,
957
+ )
958
+
959
+ except Exception as e:
960
+ logger.error(f"Failed to step policy {request.policy_id}: {e}")
961
+ raise HTTPException(status_code=500, detail=str(e)) from e
962
+
963
+
964
+ @router.post("/snapshot", response_model=PolicySnapshotResponse)
965
+ async def snapshot_policy(request: PolicySnapshotRequest) -> PolicySnapshotResponse:
966
+ """Create a snapshot of the policy state."""
967
+ handle = registry.get_policy(request.policy_id)
968
+ if not handle:
969
+ raise HTTPException(status_code=404, detail=f"Policy {request.policy_id} not found")
970
+
971
+ try:
972
+ # Serialize policy state
973
+ state_dict = await handle.policy.serialize()
974
+
975
+ # Save to volume
976
+ snapshot_id, path, size = storage.save_snapshot(
977
+ rl_run_id=handle.rl_run_id,
978
+ kind="policy",
979
+ state_dict=state_dict,
980
+ )
981
+
982
+ # Register snapshot
983
+ registry.register_snapshot(
984
+ kind="policy",
985
+ rl_run_id=handle.rl_run_id,
986
+ size=size,
987
+ path=path,
988
+ )
989
+
990
+ return PolicySnapshotResponse(
991
+ snapshot_id=snapshot_id,
992
+ path=path,
993
+ rl_run_id=handle.rl_run_id,
994
+ size=size,
995
+ )
996
+
997
+ except Exception as e:
998
+ logger.error(f"Failed to snapshot policy {request.policy_id}: {e}")
999
+ raise HTTPException(status_code=500, detail=str(e)) from e
1000
+
1001
+
1002
+ @router.post("/restore", response_model=PolicyRestoreResponse)
1003
+ async def restore_policy(request: PolicyRestoreRequest) -> PolicyRestoreResponse:
1004
+ """Restore a policy from a snapshot."""
1005
+ snapshot = registry.get_snapshot(request.snapshot_id)
1006
+ if not snapshot:
1007
+ raise HTTPException(status_code=404, detail=f"Snapshot {request.snapshot_id} not found")
1008
+
1009
+ if snapshot.kind != "policy":
1010
+ raise HTTPException(
1011
+ status_code=422,
1012
+ detail=f"Snapshot {request.snapshot_id} is not a policy snapshot",
1013
+ )
1014
+
1015
+ try:
1016
+ # Load snapshot from volume
1017
+ state_dict, meta = storage.load_snapshot(
1018
+ rl_run_id=snapshot.rl_run_id,
1019
+ kind="policy",
1020
+ snapshot_id=request.snapshot_id,
1021
+ )
1022
+
1023
+ # Recreate policy
1024
+ policy_name = state_dict["name"]
1025
+ low = policy_name.lower()
1026
+ if low in ["crafter-react", "crafter"]:
1027
+ policy = await CrafterPolicy.deserialize(state_dict)
1028
+ elif low in ["wordle-react", "wordle"]:
1029
+ try:
1030
+ from .envs.wordle.policy import WordlePolicy
1031
+ except Exception as e:
1032
+ raise HTTPException(
1033
+ status_code=500, detail=f"Wordle policy unavailable: {e}"
1034
+ ) from e
1035
+ policy = await WordlePolicy.deserialize(state_dict)
1036
+ elif low in ["sokoban-react", "sokoban"]:
1037
+ try:
1038
+ from .envs.sokoban.policy import SokobanPolicy
1039
+ except Exception as e:
1040
+ raise HTTPException(
1041
+ status_code=500, detail=f"Sokoban policy unavailable: {e}"
1042
+ ) from e
1043
+ policy = await SokobanPolicy.deserialize(state_dict)
1044
+ else:
1045
+ raise HTTPException(
1046
+ status_code=422,
1047
+ detail=f"Unknown policy name in snapshot: {policy_name}",
1048
+ )
1049
+
1050
+ # Register new instance
1051
+ policy_id = registry.register_policy(
1052
+ policy=policy,
1053
+ rl_run_id=snapshot.rl_run_id,
1054
+ )
1055
+
1056
+ return PolicyRestoreResponse(policy_id=policy_id)
1057
+
1058
+ except Exception as e:
1059
+ logger.error(f"Failed to restore policy from snapshot {request.snapshot_id}: {e}")
1060
+ raise HTTPException(status_code=500, detail=str(e)) from e
1061
+
1062
+
1063
+ @router.post("/terminate", response_model=PolicyTerminateResponse)
1064
+ async def terminate_policy(request: PolicyTerminateRequest) -> PolicyTerminateResponse:
1065
+ """Terminate a policy and clean up resources."""
1066
+ handle = registry.get_policy(request.policy_id)
1067
+ if not handle:
1068
+ raise HTTPException(status_code=404, detail=f"Policy {request.policy_id} not found")
1069
+
1070
+ try:
1071
+ # Call terminate on the policy
1072
+ await handle.policy.terminate()
1073
+
1074
+ # Remove from registry
1075
+ registry.remove_policy(request.policy_id)
1076
+
1077
+ return PolicyTerminateResponse(ok=True)
1078
+
1079
+ except Exception as e:
1080
+ logger.error(f"Failed to terminate policy {request.policy_id}: {e}")
1081
+ raise HTTPException(status_code=500, detail=str(e)) from e