synth-ai 0.2.13.dev2__py3-none-any.whl → 0.2.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (293) hide show
  1. examples/README.md +1 -0
  2. examples/multi_step/SFT_README.md +147 -0
  3. examples/multi_step/configs/README_verilog_rl.md +77 -0
  4. examples/multi_step/configs/VERILOG_REWARDS.md +90 -0
  5. examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +183 -0
  6. examples/multi_step/configs/crafter_eval_synth_qwen4b.toml +35 -0
  7. examples/multi_step/configs/crafter_eval_text_only_groq_qwen32b.toml +36 -0
  8. examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +12 -11
  9. examples/multi_step/configs/crafter_sft_qwen30b_lora.toml +62 -0
  10. examples/multi_step/configs/crafter_synth_backend.md +40 -0
  11. examples/multi_step/configs/verilog_eval_groq_qwen32b.toml +31 -0
  12. examples/multi_step/configs/verilog_eval_synth_qwen8b.toml +33 -0
  13. examples/multi_step/configs/verilog_rl_lora.toml +190 -0
  14. examples/multi_step/convert_traces_to_sft.py +84 -0
  15. examples/multi_step/judges/crafter_backend_judge.py +220 -0
  16. examples/multi_step/judges/verilog_backend_judge.py +234 -0
  17. examples/multi_step/readme.md +48 -0
  18. examples/multi_step/run_sft_qwen30b.sh +45 -0
  19. examples/multi_step/verilog_rl_lora.md +218 -0
  20. examples/qwen_coder/configs/coder_lora_30b.toml +3 -2
  21. examples/qwen_coder/configs/coder_lora_4b.toml +2 -1
  22. examples/qwen_coder/configs/coder_lora_small.toml +2 -1
  23. examples/qwen_vl/BUGS_AND_FIXES.md +232 -0
  24. examples/qwen_vl/IMAGE_VALIDATION_COMPLETE.md +271 -0
  25. examples/qwen_vl/IMAGE_VALIDATION_SUMMARY.md +260 -0
  26. examples/qwen_vl/INFERENCE_SFT_TESTS.md +412 -0
  27. examples/qwen_vl/NEXT_STEPS_2B.md +325 -0
  28. examples/qwen_vl/QUICKSTART.md +327 -0
  29. examples/qwen_vl/QUICKSTART_RL_VISION.md +110 -0
  30. examples/qwen_vl/README.md +154 -0
  31. examples/qwen_vl/RL_VISION_COMPLETE.md +475 -0
  32. examples/qwen_vl/RL_VISION_TESTING.md +333 -0
  33. examples/qwen_vl/SDK_VISION_INTEGRATION.md +328 -0
  34. examples/qwen_vl/SETUP_COMPLETE.md +275 -0
  35. examples/qwen_vl/VISION_TESTS_COMPLETE.md +490 -0
  36. examples/qwen_vl/VLM_PIPELINE_COMPLETE.md +242 -0
  37. examples/qwen_vl/__init__.py +2 -0
  38. examples/qwen_vl/collect_data_via_cli.md +423 -0
  39. examples/qwen_vl/collect_vision_traces.py +368 -0
  40. examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +127 -0
  41. examples/qwen_vl/configs/crafter_vlm_sft_example.toml +60 -0
  42. examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +43 -0
  43. examples/qwen_vl/configs/eval_gpt4o_vision_proper.toml +29 -0
  44. examples/qwen_vl/configs/eval_gpt5nano_vision.toml +45 -0
  45. examples/qwen_vl/configs/eval_qwen2vl_vision.toml +44 -0
  46. examples/qwen_vl/configs/filter_qwen2vl_sft.toml +50 -0
  47. examples/qwen_vl/configs/filter_vision_sft.toml +53 -0
  48. examples/qwen_vl/configs/filter_vision_test.toml +8 -0
  49. examples/qwen_vl/configs/sft_qwen3_vl_2b_test.toml +54 -0
  50. examples/qwen_vl/crafter_gpt5nano_agent.py +308 -0
  51. examples/qwen_vl/crafter_qwen_vl_agent.py +300 -0
  52. examples/qwen_vl/run_vision_comparison.sh +62 -0
  53. examples/qwen_vl/run_vision_sft_pipeline.sh +175 -0
  54. examples/qwen_vl/test_image_validation.py +201 -0
  55. examples/qwen_vl/test_sft_vision_data.py +110 -0
  56. examples/rl/README.md +1 -1
  57. examples/rl/configs/eval_base_qwen.toml +17 -0
  58. examples/rl/configs/eval_rl_qwen.toml +13 -0
  59. examples/rl/configs/rl_from_base_qwen.toml +37 -0
  60. examples/rl/configs/rl_from_base_qwen17.toml +76 -0
  61. examples/rl/configs/rl_from_ft_qwen.toml +37 -0
  62. examples/rl/run_eval.py +436 -0
  63. examples/rl/run_rl_and_save.py +111 -0
  64. examples/rl/task_app/README.md +22 -0
  65. examples/rl/task_app/math_single_step.py +990 -0
  66. examples/rl/task_app/math_task_app.py +111 -0
  67. examples/sft/README.md +5 -5
  68. examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -2
  69. examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -3
  70. examples/sft/evaluate.py +4 -4
  71. examples/sft/export_dataset.py +7 -4
  72. examples/sft/generate_traces.py +2 -0
  73. examples/swe/task_app/README.md +1 -1
  74. examples/swe/task_app/grpo_swe_mini.py +1 -1
  75. examples/swe/task_app/grpo_swe_mini_task_app.py +0 -12
  76. examples/swe/task_app/hosted/envs/mini_swe/environment.py +13 -13
  77. examples/swe/task_app/hosted/policy_routes.py +0 -2
  78. examples/swe/task_app/hosted/rollout.py +2 -8
  79. examples/task_apps/IMAGE_ONLY_EVAL_QUICKSTART.md +258 -0
  80. examples/task_apps/crafter/CREATE_SFT_DATASET.md +273 -0
  81. examples/task_apps/crafter/EVAL_IMAGE_ONLY_RESULTS.md +152 -0
  82. examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +174 -0
  83. examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +268 -0
  84. examples/task_apps/crafter/QUERY_EXAMPLES.md +203 -0
  85. examples/task_apps/crafter/README_IMAGE_ONLY_EVAL.md +316 -0
  86. examples/task_apps/crafter/eval_image_only_gpt4o.toml +28 -0
  87. examples/task_apps/crafter/eval_text_only_groq_llama.toml +36 -0
  88. examples/task_apps/crafter/filter_sft_dataset.toml +16 -0
  89. examples/task_apps/crafter/task_app/__init__.py +3 -0
  90. examples/task_apps/crafter/task_app/grpo_crafter.py +309 -14
  91. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/environment.py +10 -0
  92. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +75 -4
  93. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +17 -2
  94. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +55 -3
  95. examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +114 -32
  96. examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +127 -27
  97. examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +156 -0
  98. examples/task_apps/enron/__init__.py +1 -0
  99. examples/task_apps/enron/filter_sft.toml +5 -0
  100. examples/task_apps/enron/tests/__init__.py +2 -0
  101. examples/task_apps/enron/tests/integration/__init__.py +2 -0
  102. examples/task_apps/enron/tests/integration/test_enron_eval.py +2 -0
  103. examples/task_apps/enron/tests/unit/__init__.py +2 -0
  104. examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_COMPLETE.md +283 -0
  105. examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_STATUS.md +155 -0
  106. examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +415 -0
  107. examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +29 -0
  108. examples/task_apps/pokemon_red/pallet_town_rl_config.toml +2 -0
  109. examples/task_apps/pokemon_red/task_app.py +199 -6
  110. examples/task_apps/pokemon_red/test_pallet_town_rewards.py +2 -0
  111. examples/task_apps/sokoban/filter_sft.toml +5 -0
  112. examples/task_apps/sokoban/tests/__init__.py +2 -0
  113. examples/task_apps/sokoban/tests/integration/__init__.py +2 -0
  114. examples/task_apps/sokoban/tests/unit/__init__.py +2 -0
  115. examples/task_apps/verilog/eval_groq_qwen32b.toml +8 -4
  116. examples/task_apps/verilog/filter_sft.toml +5 -0
  117. examples/task_apps/verilog/task_app/grpo_verilog.py +258 -23
  118. examples/task_apps/verilog/tests/__init__.py +2 -0
  119. examples/task_apps/verilog/tests/integration/__init__.py +2 -0
  120. examples/task_apps/verilog/tests/integration/test_verilog_eval.py +2 -0
  121. examples/task_apps/verilog/tests/unit/__init__.py +2 -0
  122. examples/vlm/README.md +3 -3
  123. examples/vlm/configs/crafter_vlm_gpt4o.toml +2 -0
  124. examples/vlm/crafter_openai_vlm_agent.py +3 -5
  125. examples/vlm/filter_image_rows.py +1 -1
  126. examples/vlm/run_crafter_vlm_benchmark.py +2 -2
  127. examples/warming_up_to_rl/_utils.py +92 -0
  128. examples/warming_up_to_rl/analyze_trace_db.py +1 -1
  129. examples/warming_up_to_rl/configs/crafter_fft.toml +2 -0
  130. examples/warming_up_to_rl/configs/crafter_fft_4b.toml +2 -0
  131. examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +2 -0
  132. examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +2 -0
  133. examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +2 -1
  134. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +2 -1
  135. examples/warming_up_to_rl/configs/rl_from_ft.toml +2 -0
  136. examples/warming_up_to_rl/export_trace_sft.py +174 -60
  137. examples/warming_up_to_rl/groq_test.py +2 -0
  138. examples/warming_up_to_rl/readme.md +63 -132
  139. examples/warming_up_to_rl/run_fft_and_save.py +1 -1
  140. examples/warming_up_to_rl/run_local_rollout.py +2 -0
  141. examples/warming_up_to_rl/run_local_rollout_modal.py +2 -0
  142. examples/warming_up_to_rl/run_local_rollout_parallel.py +2 -0
  143. examples/warming_up_to_rl/run_local_rollout_traced.py +2 -0
  144. examples/warming_up_to_rl/run_rl_and_save.py +1 -1
  145. examples/warming_up_to_rl/run_rollout_remote.py +2 -0
  146. examples/warming_up_to_rl/task_app/README.md +42 -0
  147. examples/warming_up_to_rl/task_app/grpo_crafter.py +696 -0
  148. examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +135 -0
  149. examples/warming_up_to_rl/task_app/synth_envs_hosted/README.md +173 -0
  150. examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +5 -0
  151. examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +143 -0
  152. examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +1226 -0
  153. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -0
  154. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
  155. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
  156. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +522 -0
  157. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +478 -0
  158. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +108 -0
  159. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
  160. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
  161. examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +204 -0
  162. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +5 -0
  163. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +618 -0
  164. examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +100 -0
  165. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +1081 -0
  166. examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +195 -0
  167. examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +1861 -0
  168. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +5 -0
  169. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +211 -0
  170. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +161 -0
  171. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +137 -0
  172. examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +62 -0
  173. synth_ai/__init__.py +44 -30
  174. synth_ai/_utils/__init__.py +47 -0
  175. synth_ai/_utils/base_url.py +10 -0
  176. synth_ai/_utils/http.py +10 -0
  177. synth_ai/_utils/prompts.py +10 -0
  178. synth_ai/_utils/task_app_state.py +12 -0
  179. synth_ai/_utils/user_config.py +10 -0
  180. synth_ai/api/models/supported.py +145 -7
  181. synth_ai/api/train/__init__.py +13 -1
  182. synth_ai/api/train/cli.py +30 -7
  183. synth_ai/api/train/config_finder.py +18 -11
  184. synth_ai/api/train/env_resolver.py +13 -10
  185. synth_ai/cli/__init__.py +66 -49
  186. synth_ai/cli/_modal_wrapper.py +9 -6
  187. synth_ai/cli/_typer_patch.py +0 -2
  188. synth_ai/cli/_validate_task_app.py +22 -4
  189. synth_ai/cli/legacy_root_backup.py +3 -1
  190. synth_ai/cli/lib/__init__.py +10 -0
  191. synth_ai/cli/lib/task_app_discovery.py +7 -0
  192. synth_ai/cli/lib/task_app_env.py +518 -0
  193. synth_ai/cli/recent.py +1 -0
  194. synth_ai/cli/setup.py +266 -0
  195. synth_ai/cli/task_app_deploy.py +16 -0
  196. synth_ai/cli/task_app_list.py +25 -0
  197. synth_ai/cli/task_app_modal_serve.py +16 -0
  198. synth_ai/cli/task_app_serve.py +18 -0
  199. synth_ai/cli/task_apps.py +392 -141
  200. synth_ai/cli/train.py +18 -0
  201. synth_ai/cli/tui.py +62 -0
  202. synth_ai/demos/__init__.py +10 -0
  203. synth_ai/demos/core/__init__.py +28 -1
  204. synth_ai/demos/crafter/__init__.py +1 -0
  205. synth_ai/demos/crafter/crafter_fft_4b.toml +55 -0
  206. synth_ai/demos/crafter/grpo_crafter_task_app.py +185 -0
  207. synth_ai/demos/crafter/rl_from_base_qwen4b.toml +74 -0
  208. synth_ai/demos/demo_registry.py +176 -0
  209. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
  210. synth_ai/demos/math/__init__.py +1 -0
  211. synth_ai/demos/math/_common.py +16 -0
  212. synth_ai/demos/math/app.py +38 -0
  213. synth_ai/demos/math/config.toml +76 -0
  214. synth_ai/demos/math/deploy_modal.py +54 -0
  215. synth_ai/demos/math/modal_task_app.py +702 -0
  216. synth_ai/demos/math/task_app_entry.py +51 -0
  217. synth_ai/environments/environment/core.py +7 -1
  218. synth_ai/environments/examples/bandit/engine.py +0 -1
  219. synth_ai/environments/examples/bandit/environment.py +0 -1
  220. synth_ai/environments/examples/crafter_classic/environment.py +1 -1
  221. synth_ai/environments/examples/verilog/engine.py +76 -10
  222. synth_ai/environments/examples/wordle/environment.py +0 -1
  223. synth_ai/evals/base.py +16 -5
  224. synth_ai/evals/client.py +1 -1
  225. synth_ai/inference/client.py +1 -1
  226. synth_ai/learning/client.py +1 -1
  227. synth_ai/learning/health.py +1 -1
  228. synth_ai/learning/jobs.py +1 -1
  229. synth_ai/learning/rl/client.py +1 -1
  230. synth_ai/learning/rl/env_keys.py +1 -1
  231. synth_ai/learning/rl/secrets.py +1 -1
  232. synth_ai/learning/sft/client.py +1 -1
  233. synth_ai/learning/sft/data.py +407 -4
  234. synth_ai/learning/validators.py +4 -1
  235. synth_ai/task/__init__.py +11 -1
  236. synth_ai/task/apps/__init__.py +5 -2
  237. synth_ai/task/config.py +259 -0
  238. synth_ai/task/contracts.py +15 -2
  239. synth_ai/task/rubrics/__init__.py +4 -2
  240. synth_ai/task/rubrics/loaders.py +27 -4
  241. synth_ai/task/rubrics/scoring.py +3 -0
  242. synth_ai/task/rubrics.py +219 -0
  243. synth_ai/task/trace_correlation_helpers.py +328 -0
  244. synth_ai/task/tracing_utils.py +14 -3
  245. synth_ai/task/validators.py +145 -2
  246. synth_ai/tracing_v3/config.py +15 -13
  247. synth_ai/tracing_v3/constants.py +21 -0
  248. synth_ai/tracing_v3/db_config.py +3 -1
  249. synth_ai/tracing_v3/decorators.py +10 -7
  250. synth_ai/tracing_v3/session_tracer.py +10 -0
  251. synth_ai/tracing_v3/turso/daemon.py +2 -2
  252. synth_ai/tracing_v3/turso/native_manager.py +108 -77
  253. synth_ai/tracing_v3/utils.py +1 -1
  254. synth_ai/tui/__init__.py +5 -0
  255. synth_ai/tui/__main__.py +13 -0
  256. synth_ai/tui/cli/__init__.py +1 -0
  257. synth_ai/tui/cli/query_experiments.py +164 -0
  258. synth_ai/tui/cli/query_experiments_v3.py +164 -0
  259. synth_ai/tui/dashboard.py +911 -0
  260. synth_ai/utils/__init__.py +101 -0
  261. synth_ai/utils/base_url.py +94 -0
  262. synth_ai/utils/cli.py +131 -0
  263. synth_ai/utils/env.py +287 -0
  264. synth_ai/utils/http.py +169 -0
  265. synth_ai/utils/modal.py +308 -0
  266. synth_ai/utils/process.py +212 -0
  267. synth_ai/utils/prompts.py +39 -0
  268. synth_ai/utils/sqld.py +122 -0
  269. synth_ai/utils/task_app_discovery.py +882 -0
  270. synth_ai/utils/task_app_env.py +186 -0
  271. synth_ai/utils/task_app_state.py +318 -0
  272. synth_ai/utils/user_config.py +137 -0
  273. synth_ai/v0/config/__init__.py +1 -5
  274. synth_ai/v0/config/base_url.py +1 -7
  275. synth_ai/v0/tracing/config.py +1 -1
  276. synth_ai/v0/tracing/decorators.py +1 -1
  277. synth_ai/v0/tracing/upload.py +1 -1
  278. synth_ai/v0/tracing_v1/config.py +1 -1
  279. synth_ai/v0/tracing_v1/decorators.py +1 -1
  280. synth_ai/v0/tracing_v1/upload.py +1 -1
  281. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/METADATA +85 -31
  282. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/RECORD +286 -135
  283. synth_ai/cli/man.py +0 -106
  284. synth_ai/compound/cais.py +0 -0
  285. synth_ai/core/experiment.py +0 -13
  286. synth_ai/core/system.py +0 -15
  287. synth_ai/demo_registry.py +0 -295
  288. synth_ai/handshake.py +0 -109
  289. synth_ai/http.py +0 -26
  290. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/WHEEL +0 -0
  291. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/entry_points.txt +0 -0
  292. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/licenses/LICENSE +0 -0
  293. {synth_ai-0.2.13.dev2.dist-info → synth_ai-0.2.16.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,990 @@
1
+ """Task app configuration for a single-step math reasoning environment."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import contextlib
6
+ import os
7
+ import random
8
+ import re
9
+ import uuid
10
+ from collections.abc import Iterable, Mapping, MutableMapping, Sequence
11
+ from dataclasses import dataclass
12
+ from pathlib import Path
13
+ from typing import Any, cast
14
+
15
+ import httpx
16
+ from datasets import load_dataset
17
+ from fastapi import APIRouter, HTTPException, Request
18
+ from pydantic import BaseModel, Field
19
+ from synth_ai.task.apps import ModalDeploymentConfig, TaskAppEntry, register_task_app
20
+ from synth_ai.task.contracts import (
21
+ RolloutMetrics,
22
+ RolloutRequest,
23
+ RolloutResponse,
24
+ RolloutStep,
25
+ RolloutTrajectory,
26
+ TaskInfo,
27
+ )
28
+ from synth_ai.task.datasets import TaskDatasetRegistry, TaskDatasetSpec
29
+ from synth_ai.task.errors import http_exception
30
+ from synth_ai.task.rubrics import Rubric, load_rubric
31
+ from synth_ai.task.server import ProxyConfig, RubricBundle, TaskAppConfig
32
+ from synth_ai.task.tracing_utils import (
33
+ build_tracer_factory,
34
+ resolve_sft_output_dir,
35
+ resolve_tracing_db_url,
36
+ tracing_env_enabled,
37
+ )
38
+ from synth_ai.task.vendors import normalize_vendor_keys
39
+ from synth_ai.tracing_v3.session_tracer import SessionTracer
40
+
41
+ REPO_ROOT = Path(__file__).resolve().parents[3]
42
+
43
+ _modal_volume_candidate = Path(
44
+ os.getenv("MATH_MODAL_DATASET_DIR", "/modal_volumes/math_dataset")
45
+ ).expanduser()
46
+ _modal_volume_root: Path | None = None
47
+ try:
48
+ _modal_volume_candidate.mkdir(parents=True, exist_ok=True)
49
+ _modal_volume_root = _modal_volume_candidate
50
+ except Exception:
51
+ if _modal_volume_candidate.exists():
52
+ _modal_volume_root = _modal_volume_candidate
53
+
54
+ if _modal_volume_root is not None:
55
+ hf_cache_path = _modal_volume_root / "hf_cache"
56
+ local_dataset_dir = _modal_volume_root / "jsonl"
57
+ local_dataset_dir.mkdir(parents=True, exist_ok=True)
58
+ os.environ.setdefault("MATH_DATASET_LOCAL_DIR", str(local_dataset_dir))
59
+ else:
60
+ hf_cache_path = Path(
61
+ os.getenv("MATH_DATASET_CACHE_DIR", str(REPO_ROOT / ".cache" / "hf-datasets"))
62
+ ).expanduser()
63
+
64
+ hf_cache_path.mkdir(parents=True, exist_ok=True)
65
+ os.environ.setdefault("MATH_DATASET_CACHE_DIR", str(hf_cache_path))
66
+ os.environ.setdefault("HF_HOME", str(hf_cache_path))
67
+ os.environ.setdefault("HF_DATASETS_CACHE", str(hf_cache_path))
68
+ os.environ.setdefault("HUGGINGFACE_HUB_CACHE", str(hf_cache_path))
69
+
70
+ HF_DATASETS_CACHE = hf_cache_path
71
+ DATASET_NAME = os.getenv("MATH_DATASET_NAME", "nlile/hendrycks-MATH-benchmark")
72
+ DATASET_CONFIG = os.getenv("MATH_DATASET_CONFIG", "")
73
+ DEFAULT_SPLIT = os.getenv("MATH_DATASET_DEFAULT_SPLIT", "train")
74
+ AVAILABLE_SPLITS: tuple[str, ...] = (
75
+ DEFAULT_SPLIT,
76
+ os.getenv("MATH_DATASET_VALIDATION_SPLIT", "test"),
77
+ os.getenv("MATH_DATASET_TEST_SPLIT", "test"),
78
+ )
79
+ TOOL_NAME = "math_submit"
80
+ PROBLEM_KEYS: tuple[str, ...] = ("problem", "question", "prompt", "query")
81
+ SOLUTION_KEYS: tuple[str, ...] = ("solution", "answer", "final_answer", "solution_text")
82
+
83
+ REWARD_POSITIVE = float(os.getenv("MATH_REWARD_POSITIVE", "1.0"))
84
+ REWARD_NEGATIVE_NO_TOOL = float(os.getenv("MATH_REWARD_NEGATIVE_NO_TOOL", "-1.0"))
85
+ REWARD_NEGATIVE_NO_ANSWER = float(os.getenv("MATH_REWARD_NEGATIVE_NO_ANSWER", "-0.5"))
86
+
87
+ HF_TOKEN_ENV_KEYS: tuple[str, ...] = (
88
+ "HF_DATASETS_TOKEN",
89
+ "HUGGINGFACEHUB_API_TOKEN",
90
+ "HUGGINGFACE_TOKEN",
91
+ )
92
+
93
+ ## Single-source dataset policy: use a single known-good HF dataset id by default.
94
+
95
+ MATH_DATASET_SPEC = TaskDatasetSpec(
96
+ id="math_single_step",
97
+ name="MATH Single Step",
98
+ version="1.0.0",
99
+ splits=list(dict.fromkeys(split for split in AVAILABLE_SPLITS if split)),
100
+ default_split=DEFAULT_SPLIT,
101
+ description="Single-step math reasoning problems sourced from the Hendrycks MATH dataset.",
102
+ )
103
+
104
+
105
+ _BOXED_MARKERS: tuple[str, ...] = ("\\boxed", "boxed")
106
+
107
+
108
+ def _extract_boxed(text: str) -> str | None:
109
+ if not text:
110
+ return None
111
+ for marker in _BOXED_MARKERS:
112
+ start = text.find(marker)
113
+ if start == -1:
114
+ continue
115
+ brace_start = text.find("{", start)
116
+ if brace_start == -1:
117
+ continue
118
+ depth = 1
119
+ idx = brace_start + 1
120
+ while idx < len(text) and depth > 0:
121
+ ch = text[idx]
122
+ if ch == "{":
123
+ depth += 1
124
+ elif ch == "}":
125
+ depth -= 1
126
+ idx += 1
127
+ if depth == 0:
128
+ return text[brace_start + 1 : idx - 1].strip()
129
+ return None
130
+
131
+
132
+ _FRAC_PATTERN = re.compile(r"\\?frac\{([^{}]+)\}\{([^{}]+)\}")
133
+ _SQRT_PATTERN = re.compile(r"\\?sqrt\{([^{}]+)\}")
134
+
135
+
136
+ def _normalize_final_answer(text: str) -> str:
137
+ raw = str(text or "").strip()
138
+ if not raw:
139
+ return ""
140
+ boxed = _extract_boxed(raw)
141
+ if boxed:
142
+ raw = boxed
143
+ raw = raw.strip().strip("$")
144
+ raw = raw.replace("\\left", "").replace("\\right", "")
145
+ raw = raw.replace("\\!", "").replace("\\,", " ").replace("\\;", " ")
146
+ raw = raw.replace("left", "").replace("right", "")
147
+ raw = raw.replace("\\times", "*").replace("\\cdot", "*")
148
+ raw = raw.replace("\\pi", "pi").replace("\\theta", "theta").replace("\\phi", "phi")
149
+ raw = raw.replace("\\pm", "+/-").replace("\\mp", "-/+")
150
+ raw = raw.replace("^{\\circ}", "deg").replace("^\\circ", "deg").replace("\\circ", "deg")
151
+
152
+ def _frac_sub(match: re.Match[str]) -> str:
153
+ num = match.group(1).strip()
154
+ den = match.group(2).strip()
155
+ return f"({num})/({den})"
156
+
157
+ def _sqrt_sub(match: re.Match[str]) -> str:
158
+ inner = match.group(1).strip()
159
+ return f"sqrt({inner})"
160
+
161
+ raw = _FRAC_PATTERN.sub(_frac_sub, raw)
162
+ raw = _SQRT_PATTERN.sub(_sqrt_sub, raw)
163
+ raw = raw.replace("\\", "")
164
+ raw = raw.replace("{", "").replace("}", "")
165
+ raw = raw.replace(" ", "")
166
+ raw = raw.rstrip(".")
167
+ return raw
168
+
169
+
170
+ class MathDataset:
171
+ """Lazy Hugging Face dataset loader for EleutherAI/math splits."""
172
+
173
+ def __init__(self, *, name: str, config: str, splits: Sequence[str]) -> None:
174
+ self.name = name
175
+ self.config = config
176
+ self.splits = [split for split in splits if split]
177
+ self._cache: dict[str, Any] = {}
178
+ self._local_dir = os.getenv("MATH_DATASET_LOCAL_DIR")
179
+ self._hf_token: str | None = None
180
+ for key in HF_TOKEN_ENV_KEYS:
181
+ value = os.getenv(key)
182
+ if value:
183
+ trimmed = value.strip()
184
+ if trimmed:
185
+ self._hf_token = trimmed
186
+ break
187
+ # No multi-candidate fallback: enforce explicit dataset id
188
+
189
+ def _local_file_for_split(self, split: str) -> Path | None:
190
+ specific = os.getenv(f"MATH_DATASET_LOCAL_{split.upper()}_FILE")
191
+ if specific:
192
+ path = Path(specific).expanduser()
193
+ if path.exists():
194
+ return path
195
+ if self._local_dir:
196
+ candidate = Path(self._local_dir).expanduser() / f"{split}.jsonl"
197
+ if candidate.exists():
198
+ return candidate
199
+ return None
200
+
201
+ def _load_split(self, split: str):
202
+ # Treat 'validation' as an alias for 'test' for datasets without a separate validation split
203
+ if split not in self.splits and split.lower() == "validation":
204
+ split = "test"
205
+ if split not in self.splits:
206
+ raise ValueError(f"Unknown split '{split}'. Available: {self.splits}")
207
+ if split not in self._cache:
208
+ local_file = self._local_file_for_split(split)
209
+ if local_file is not None:
210
+ dataset = load_dataset(
211
+ "json", data_files=str(local_file), cache_dir=str(HF_DATASETS_CACHE)
212
+ )
213
+ self._cache[split] = dataset["train"]
214
+ else:
215
+ try:
216
+ load_kwargs: dict[str, Any] = {"split": split}
217
+ if self.config:
218
+ load_kwargs["name"] = self.config
219
+ if self._hf_token:
220
+ load_kwargs["use_auth_token"] = self._hf_token
221
+ ds = load_dataset(self.name, cache_dir=str(HF_DATASETS_CACHE), **load_kwargs)
222
+ self._cache[split] = ds
223
+ if self._local_dir:
224
+ local_dir = Path(self._local_dir).expanduser()
225
+ target = local_dir / f"{split}.jsonl"
226
+ if not target.exists() and hasattr(ds, "to_json"):
227
+ tmp_path = target.with_name(target.name + ".tmp")
228
+ try:
229
+ local_dir.mkdir(parents=True, exist_ok=True)
230
+ ds.to_json(str(tmp_path))
231
+ tmp_path.replace(target)
232
+ except Exception:
233
+ with contextlib.suppress(FileNotFoundError):
234
+ tmp_path.unlink()
235
+ except Exception as exc:
236
+ hints = [
237
+ "Failed to download MATH dataset from Hugging Face.",
238
+ f"Dataset: {self.name} | Config: {self.config or 'none'} | Split: {split}",
239
+ "If this persists, verify MATH_DATASET_NAME/MATH_DATASET_CONFIG or set MATH_DATASET_LOCAL_DIR to pre-downloaded JSONL files.",
240
+ ]
241
+ raise RuntimeError(" ".join(hints)) from exc
242
+ return self._cache[split]
243
+
244
+ def sample(self, *, split: str, index: int | None = None) -> dict[str, Any]:
245
+ dataset = self._load_split(split)
246
+ if len(dataset) == 0:
247
+ raise RuntimeError(f"Dataset split '{split}' is empty")
248
+ if index is None:
249
+ index = random.randint(0, len(dataset) - 1)
250
+ idx = int(index) % len(dataset)
251
+ item = dataset[int(idx)]
252
+
253
+ raw_problem = ""
254
+ for key in PROBLEM_KEYS:
255
+ value = item.get(key)
256
+ if isinstance(value, str) and value.strip():
257
+ raw_problem = value.strip()
258
+ break
259
+ if not raw_problem:
260
+ raise RuntimeError(f"Sample missing problem field for split '{split}' index {idx}")
261
+
262
+ solution_value: Any = None
263
+ for key in SOLUTION_KEYS:
264
+ if key in item:
265
+ solution_value = item[key]
266
+ break
267
+ if solution_value is None:
268
+ raise RuntimeError(f"Sample missing solution field for split '{split}' index {idx}")
269
+
270
+ # Solutions can contain reasoning and final answer; take final line by convention
271
+ if isinstance(solution_value, list):
272
+ solution_text = "\n".join(str(part) for part in solution_value)
273
+ else:
274
+ solution_text = str(solution_value)
275
+ lines = [line.strip() for line in solution_text.strip().splitlines() if line.strip()]
276
+ final_line = ""
277
+ for line in reversed(lines):
278
+ lowered = line.lower()
279
+ if "boxed" in lowered or "answer" in lowered:
280
+ final_line = line
281
+ break
282
+ if not final_line and lines:
283
+ final_line = lines[-1]
284
+ candidate_answer = final_line or solution_text.strip()
285
+ normalized_answer = _normalize_final_answer(candidate_answer)
286
+ if not normalized_answer:
287
+ normalized_answer = _normalize_final_answer(solution_text)
288
+ return {
289
+ "index": idx,
290
+ "split": split,
291
+ "problem": raw_problem,
292
+ "answer": normalized_answer,
293
+ "raw_solution": solution_text,
294
+ }
295
+
296
+ def size(self, split: str) -> int:
297
+ dataset = self._load_split(split)
298
+ return len(dataset)
299
+
300
+ def ensure_ready(self, required_splits: Sequence[str]) -> None:
301
+ errors: list[str] = []
302
+ for split in required_splits:
303
+ if not split:
304
+ continue
305
+ try:
306
+ self._load_split(split)
307
+ except Exception as exc:
308
+ errors.append(f"{split}: {exc}")
309
+ if errors:
310
+ raise RuntimeError("Dataset preparation failed:\n" + "\n".join(errors))
311
+
312
+
313
+ @dataclass
314
+ class MathEnvState:
315
+ env_id: str
316
+ split: str
317
+ index: int
318
+ problem: str
319
+ answer: str
320
+ raw_solution: str
321
+ done: bool = False
322
+
323
+
324
+ class MathEnvironmentManager:
325
+ """Stores in-flight environment state keyed by env_id."""
326
+
327
+ def __init__(self, dataset: MathDataset) -> None:
328
+ self.dataset = dataset
329
+ self._states: dict[str, MathEnvState] = {}
330
+
331
+ def create(self, *, split: str, index: int | None, seed: int | None) -> MathEnvState:
332
+ if index is None and seed is not None:
333
+ index = seed
334
+ sample = self.dataset.sample(split=split, index=index)
335
+ env_id = str(uuid.uuid4())
336
+ state = MathEnvState(
337
+ env_id=env_id,
338
+ split=split,
339
+ index=int(sample["index"]),
340
+ problem=sample["problem"],
341
+ answer=sample["answer"],
342
+ raw_solution=sample["raw_solution"],
343
+ )
344
+ self._states[env_id] = state
345
+ return state
346
+
347
+ def get(self, env_id: str) -> MathEnvState:
348
+ if env_id not in self._states:
349
+ raise KeyError(f"Unknown env_id: {env_id}")
350
+ return self._states[env_id]
351
+
352
+ def terminate(self, env_id: str) -> None:
353
+ self._states.pop(env_id, None)
354
+
355
+
356
+ class InitializePayload(BaseModel):
357
+ seed: int | None = None
358
+ config: dict[str, Any] = Field(default_factory=dict)
359
+
360
+
361
+ def _observation_from_state(state: MathEnvState) -> dict[str, Any]:
362
+ return {
363
+ "problem": state.problem,
364
+ "split": state.split,
365
+ "index": state.index,
366
+ }
367
+
368
+
369
+ def _score_submission(
370
+ state: MathEnvState, tool_calls: Sequence[Mapping[str, Any]]
371
+ ) -> tuple[float, str, bool]:
372
+ if not tool_calls:
373
+ return REWARD_NEGATIVE_NO_TOOL, "missing_tool_call", False
374
+ call = tool_calls[0]
375
+ tool_name = str(call.get("tool") or "").strip()
376
+ if tool_name != TOOL_NAME:
377
+ return REWARD_NEGATIVE_NO_TOOL, "wrong_tool", False
378
+ args = call.get("args") or {}
379
+ answer = _normalize_final_answer(str(args.get("answer") or ""))
380
+ if not answer:
381
+ return REWARD_NEGATIVE_NO_ANSWER, "blank_answer", False
382
+ is_correct = answer == state.answer
383
+ return (
384
+ (REWARD_POSITIVE if is_correct else 0.0),
385
+ ("correct" if is_correct else "incorrect"),
386
+ is_correct,
387
+ )
388
+
389
+
390
+ math_router = APIRouter()
391
+
392
+
393
+ def _preview_tool_calls(tool_calls: Sequence[Mapping[str, Any]]) -> list[dict[str, Any]]:
394
+ """Return a compact, log-friendly preview of tool calls.
395
+
396
+ Truncates long fields to avoid noisy logs and leaking excessive content.
397
+ """
398
+ preview: list[dict[str, Any]] = []
399
+ for call in list(tool_calls or [])[:3]:
400
+ args = dict(call.get("args") or {})
401
+ answer = str(args.get("answer") or "")
402
+ # Hard truncate to keep logs compact
403
+ answer_short = answer[:120] + ("…" if len(answer) > 120 else "")
404
+ preview.append(
405
+ {
406
+ "tool": call.get("tool"),
407
+ "answer": answer_short,
408
+ }
409
+ )
410
+ return preview
411
+
412
+
413
+ def _event_and_outcome_components(
414
+ tool_calls: Sequence[Mapping[str, Any]], *, correct: bool, reward: float
415
+ ) -> dict[str, float]:
416
+ """Approximate component-wise scores for RL-style logs.
417
+
418
+ - env: task-level scalar reward (our single-step outcome)
419
+ - rubric_event: 1.0 if a valid tool call with non-empty answer was made else 0.0
420
+ - rubric_outcome: 1.0 if final answer was correct else 0.0
421
+ """
422
+ has_valid_tool = False
423
+ if tool_calls:
424
+ first = tool_calls[0] or {}
425
+ if str(first.get("tool") or "") == TOOL_NAME:
426
+ args = first.get("args") or {}
427
+ ans = str(args.get("answer") or "").strip()
428
+ has_valid_tool = bool(ans)
429
+ return {
430
+ "env": float(reward),
431
+ "rubric_event": 1.0 if has_valid_tool else 0.0,
432
+ "rubric_outcome": 1.0 if bool(correct) else 0.0,
433
+ }
434
+
435
+
436
+ @math_router.post("/env/math/initialize")
437
+ async def initialize_env(request: Request, payload: InitializePayload) -> dict[str, Any]:
438
+ manager: MathEnvironmentManager = request.app.state.math_env_manager
439
+ split = str(payload.config.get("split") or DEFAULT_SPLIT)
440
+ seed = payload.seed
441
+ index = None
442
+ if payload.config.get("index") is not None:
443
+ index = int(payload.config["index"])
444
+ state = manager.create(split=split, index=index, seed=seed)
445
+ return {
446
+ "env_id": state.env_id,
447
+ "observation": _observation_from_state(state),
448
+ "info": {"raw_solution": state.raw_solution},
449
+ }
450
+
451
+
452
+ @math_router.post("/env/math/step")
453
+ async def step_env(request: Request, payload: dict[str, Any]) -> dict[str, Any]:
454
+ manager: MathEnvironmentManager = request.app.state.math_env_manager
455
+ env_id = str(payload.get("env_id") or "")
456
+ if not env_id:
457
+ raise HTTPException(status_code=400, detail="env_id required")
458
+ try:
459
+ state = manager.get(env_id)
460
+ except KeyError as exc: # pragma: no cover - defensive
461
+ raise HTTPException(status_code=404, detail=str(exc)) from exc
462
+
463
+ action = payload.get("action") or {}
464
+ tool_calls = action.get("tool_calls") or payload.get("tool_calls") or []
465
+ reward, status, correct = _score_submission(state, tool_calls)
466
+ with contextlib.suppress(Exception):
467
+ print(
468
+ "[MATH_STEP] env_id=",
469
+ state.env_id,
470
+ " split=",
471
+ state.split,
472
+ " index=",
473
+ state.index,
474
+ " calls=",
475
+ _preview_tool_calls(tool_calls),
476
+ " reward=",
477
+ reward,
478
+ " status=",
479
+ status,
480
+ " correct=",
481
+ correct,
482
+ " components=",
483
+ _event_and_outcome_components(tool_calls, correct=correct, reward=reward),
484
+ flush=True,
485
+ )
486
+ state.done = True
487
+
488
+ observation = _observation_from_state(state)
489
+ observation["status"] = status
490
+ return {
491
+ "observation": observation,
492
+ "done": True,
493
+ "reward": reward,
494
+ "info": {
495
+ "correct": correct,
496
+ "expected_answer": state.answer,
497
+ "raw_solution": state.raw_solution,
498
+ },
499
+ }
500
+
501
+
502
+ @math_router.post("/env/math/terminate")
503
+ async def terminate_env(request: Request, payload: dict[str, Any]) -> dict[str, Any]:
504
+ manager: MathEnvironmentManager = request.app.state.math_env_manager
505
+ env_id = str(payload.get("env_id") or "")
506
+ if env_id:
507
+ manager.terminate(env_id)
508
+ return {"ok": True}
509
+
510
+
511
+ def _resolve_inference_url(base_url: str) -> str:
512
+ normalized = (base_url or "").rstrip("/")
513
+ if not normalized:
514
+ raise RuntimeError("policy.config.inference_url required")
515
+ if normalized.endswith("/v1/chat/completions"):
516
+ return normalized
517
+ if normalized.endswith("/chat/completions"):
518
+ return normalized
519
+ if normalized.endswith("/v1"):
520
+ return f"{normalized}/chat/completions"
521
+ return f"{normalized}/v1/chat/completions"
522
+
523
+
524
+ async def _call_inference(
525
+ policy_config: Mapping[str, Any], observation: Mapping[str, Any]
526
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
527
+ inference_url = str(policy_config.get("inference_url") or "").rstrip("/")
528
+ if not inference_url:
529
+ raise RuntimeError("policy.config.inference_url required for rollout")
530
+ model = policy_config.get("model")
531
+ max_tokens = policy_config.get("max_tokens", 512)
532
+ temperature = policy_config.get("temperature", 0.0)
533
+ top_p = policy_config.get("top_p", 1.0)
534
+
535
+ messages = [
536
+ {
537
+ "role": "system",
538
+ "content": (
539
+ "You are a math solver. Read the problem carefully and respond with a single"
540
+ f" tool call using the function `{TOOL_NAME}`."
541
+ "\nRules:\n"
542
+ "- Do all reasoning internally.\n"
543
+ "- The tool call must include ONLY the final numeric or simplified answer in the"
544
+ " `answer` field.\n"
545
+ "- DO NOT include explanations, units, or extra text in the answer."
546
+ ),
547
+ },
548
+ {
549
+ "role": "user",
550
+ "content": (
551
+ "Problem:\n"
552
+ + str(observation.get("problem") or "")
553
+ + "\nSubmit the final answer via the tool call."
554
+ ),
555
+ },
556
+ ]
557
+
558
+ payload: dict[str, Any] = {
559
+ "model": model,
560
+ "messages": messages,
561
+ "tools": [
562
+ {
563
+ "type": "function",
564
+ "function": {
565
+ "name": TOOL_NAME,
566
+ "description": "Submit the final answer for the math problem.",
567
+ "parameters": {
568
+ "type": "object",
569
+ "properties": {
570
+ "answer": {
571
+ "type": "string",
572
+ "description": "Final answer in simplest form",
573
+ },
574
+ "explanation": {
575
+ "type": "string",
576
+ "description": "Optional explanation of reasoning",
577
+ },
578
+ },
579
+ "required": ["answer"],
580
+ "additionalProperties": False,
581
+ },
582
+ },
583
+ }
584
+ ],
585
+ "tool_choice": {"type": "function", "function": {"name": TOOL_NAME}},
586
+ "temperature": temperature,
587
+ "top_p": top_p,
588
+ "max_tokens": max_tokens,
589
+ }
590
+
591
+ final_url = _resolve_inference_url(inference_url)
592
+ async with httpx.AsyncClient(timeout=httpx.Timeout(60.0)) as client:
593
+ response = await client.post(final_url, json=payload)
594
+ try:
595
+ data = response.json()
596
+ except Exception as exc:
597
+ raise http_exception(
598
+ 502,
599
+ "inference_invalid_response",
600
+ "Inference provider returned invalid JSON",
601
+ extra={"body": response.text[:800]},
602
+ ) from exc
603
+ if response.status_code >= 500:
604
+ raise http_exception(
605
+ 502,
606
+ "inference_upstream_error",
607
+ "Inference provider returned an error",
608
+ extra={"status": response.status_code, "body": data},
609
+ )
610
+ if response.status_code >= 400:
611
+ raise http_exception(
612
+ 400,
613
+ "inference_request_invalid",
614
+ "Invalid inference request",
615
+ extra={"status": response.status_code, "body": data},
616
+ )
617
+
618
+ tool_calls = []
619
+ choices = data.get("choices") or []
620
+ if choices:
621
+ message = choices[0].get("message") or {}
622
+ raw_calls = message.get("tool_calls") or []
623
+ for call in raw_calls:
624
+ function = call.get("function") or {}
625
+ name = function.get("name")
626
+ arguments = function.get("arguments")
627
+ parsed_args: dict[str, Any]
628
+ if isinstance(arguments, str):
629
+ try:
630
+ import json
631
+
632
+ parsed_args = json.loads(arguments)
633
+ except Exception:
634
+ parsed_args = {}
635
+ elif isinstance(arguments, MutableMapping):
636
+ parsed_args = dict(arguments)
637
+ else:
638
+ parsed_args = {}
639
+ tool_calls.append({"tool": name, "args": parsed_args})
640
+ # Lightweight provider-side logging
641
+ with contextlib.suppress(Exception):
642
+ print(
643
+ "[MATH_INFER] model=",
644
+ model,
645
+ " calls=",
646
+ _preview_tool_calls(tool_calls),
647
+ flush=True,
648
+ )
649
+ return tool_calls, data
650
+
651
+
652
+ async def rollout_executor(request: RolloutRequest, fastapi_request: Request) -> RolloutResponse:
653
+ dataset: MathDataset = fastapi_request.app.state.math_dataset
654
+ split = str(((request.env.config or {}).get("split")) or DEFAULT_SPLIT)
655
+ sample = dataset.sample(split=split, index=request.env.seed)
656
+
657
+ observation = {
658
+ "problem": sample["problem"],
659
+ "split": sample["split"],
660
+ "index": sample["index"],
661
+ }
662
+
663
+ tool_calls: list[dict[str, Any]] = []
664
+ inference_payload: dict[str, Any] | None = None
665
+ error_info: dict[str, Any] = {}
666
+ try:
667
+ tool_calls, inference_payload = await _call_inference(
668
+ request.policy.config or {}, observation
669
+ )
670
+ except HTTPException as http_err:
671
+ tool_calls = []
672
+ error_info = {"error": http_err.detail, "code": http_err.status_code}
673
+ except Exception as exc:
674
+ tool_calls = []
675
+ error_info = {"error": str(exc)}
676
+
677
+ reward, status, correct = _score_submission(
678
+ MathEnvState(
679
+ env_id="rollout",
680
+ split=sample["split"],
681
+ index=sample["index"],
682
+ problem=sample["problem"],
683
+ answer=sample["answer"],
684
+ raw_solution=sample["raw_solution"],
685
+ ),
686
+ tool_calls,
687
+ )
688
+
689
+ # Log a concise summary so we can debug reward=0 issues in production
690
+ with contextlib.suppress(Exception):
691
+ print(
692
+ "[MATH_ROLLOUT] run=",
693
+ request.run_id,
694
+ " split=",
695
+ sample["split"],
696
+ " index=",
697
+ sample["index"],
698
+ " calls=",
699
+ _preview_tool_calls(tool_calls),
700
+ " reward=",
701
+ reward,
702
+ " status=",
703
+ status,
704
+ " correct=",
705
+ correct,
706
+ " components=",
707
+ _event_and_outcome_components(tool_calls, correct=correct, reward=reward),
708
+ flush=True,
709
+ )
710
+
711
+ step = RolloutStep(
712
+ obs=observation,
713
+ tool_calls=tool_calls,
714
+ reward=reward,
715
+ done=True,
716
+ info={
717
+ "expected_answer": sample["answer"],
718
+ "status": status,
719
+ "correct": correct,
720
+ "raw_solution": sample["raw_solution"],
721
+ "tool_call_preview": _preview_tool_calls(tool_calls),
722
+ **error_info,
723
+ },
724
+ )
725
+
726
+ trajectory = RolloutTrajectory(
727
+ env_id=f"math::{sample['split']}::{sample['index']}",
728
+ policy_id=request.policy.policy_id or "policy",
729
+ steps=[step],
730
+ final={
731
+ "observation": {**observation, "status": status},
732
+ "reward": reward,
733
+ },
734
+ length=1,
735
+ )
736
+ metrics = RolloutMetrics(
737
+ episode_returns=[reward],
738
+ mean_return=reward,
739
+ num_steps=1,
740
+ num_episodes=1,
741
+ outcome_score=reward,
742
+ events_score=reward,
743
+ details={"status": status, "correct": correct},
744
+ )
745
+
746
+ # Include a minimal trace when requested or tracing is enabled via env
747
+ include_trace = bool(
748
+ (request.record and getattr(request.record, "return_trace", False))
749
+ or os.getenv("TASKAPP_TRACING_ENABLED")
750
+ )
751
+ trace_payload = None
752
+ if include_trace:
753
+ try:
754
+ # Minimal structured trace for assertions
755
+ trace_payload = {
756
+ "session_id": str(uuid.uuid4()),
757
+ "events_count": 1,
758
+ "decision_rewards": [reward],
759
+ "lm_calls": (
760
+ [{"prompt": str(observation.get("problem", "")), "response": str(tool_calls)}]
761
+ if tool_calls
762
+ else []
763
+ ),
764
+ "metadata": {
765
+ "env": "math_single_step",
766
+ "split": sample["split"],
767
+ "index": sample["index"],
768
+ "status": status,
769
+ },
770
+ }
771
+ except Exception:
772
+ trace_payload = None
773
+
774
+ return RolloutResponse(
775
+ run_id=request.run_id,
776
+ trajectories=[trajectory],
777
+ branches={},
778
+ metrics=metrics,
779
+ aborted=False,
780
+ ops_executed=2,
781
+ trace=trace_payload,
782
+ )
783
+
784
+
785
+ def build_dataset() -> tuple[TaskDatasetRegistry, MathDataset]:
786
+ registry = TaskDatasetRegistry()
787
+ dataset = MathDataset(name=DATASET_NAME, config=DATASET_CONFIG, splits=AVAILABLE_SPLITS)
788
+ # Ensure default split is available when the task app boots
789
+ try:
790
+ dataset.ensure_ready([DEFAULT_SPLIT])
791
+ except Exception as exc:
792
+ raise RuntimeError(
793
+ "Failed to initialise math dataset. Set MATH_DATASET_LOCAL_DIR or ensure network access.\n"
794
+ f"Underlying error: {exc}"
795
+ ) from exc
796
+ registry.register(MATH_DATASET_SPEC, lambda _spec: dataset, cache=True)
797
+ return registry, dataset
798
+
799
+
800
+ def _base_task_info() -> TaskInfo:
801
+ return TaskInfo(
802
+ task={"id": "math_single_step", "name": "Math Single Step", "version": "1.0.0"},
803
+ environments=["math"],
804
+ action_space={
805
+ "type": "tool_call",
806
+ "tools": [
807
+ {
808
+ "name": TOOL_NAME,
809
+ "description": "Submit the final answer for the math problem.",
810
+ "schema": {"answer": "string"},
811
+ }
812
+ ],
813
+ "max_calls": 1,
814
+ },
815
+ observation={
816
+ "summary": "Single math word problem presented as plain text.",
817
+ "keys": ["problem"],
818
+ },
819
+ dataset={
820
+ **MATH_DATASET_SPEC.model_dump(),
821
+ "hf_dataset": DATASET_NAME,
822
+ "hf_config": DATASET_CONFIG,
823
+ },
824
+ rubric={
825
+ "version": "1",
826
+ "criteria_count": 1,
827
+ "source": "inline",
828
+ },
829
+ inference={
830
+ "supports_proxy": True,
831
+ "tool": {"name": TOOL_NAME, "parallel_tool_calls": False},
832
+ },
833
+ capabilities={
834
+ "supports_rollout": True,
835
+ "supports_env_lifecycle": True,
836
+ "requires_api_key_header": True,
837
+ },
838
+ limits={"max_turns": 1},
839
+ )
840
+
841
+
842
+ OUTCOME_RUBRIC: Rubric = cast(
843
+ Rubric,
844
+ load_rubric(
845
+ {
846
+ "version": "1",
847
+ "goal_text": "Encourage correct single-step math answers via tool calls.",
848
+ "aggregation": "weighted_sum",
849
+ "criteria": [
850
+ {
851
+ "id": "correct_answer",
852
+ "description": "Submit the correct final answer using the math_submit tool.",
853
+ "weight": 1.0,
854
+ }
855
+ ],
856
+ }
857
+ ),
858
+ )
859
+
860
+ EVENTS_RUBRIC: Rubric = cast(
861
+ Rubric,
862
+ load_rubric(
863
+ {
864
+ "version": "1",
865
+ "goal_text": "Penalize missing or malformed tool calls.",
866
+ "aggregation": "weighted_sum",
867
+ "criteria": [
868
+ {
869
+ "id": "tool_usage",
870
+ "description": "Make exactly one tool call with an answer string.",
871
+ "weight": 1.0,
872
+ }
873
+ ],
874
+ }
875
+ ),
876
+ )
877
+
878
+
879
+ def describe_taskset(dataset: MathDataset) -> dict[str, Any]:
880
+ return {
881
+ **MATH_DATASET_SPEC.model_dump(),
882
+ "hf_dataset": DATASET_NAME,
883
+ "hf_config": DATASET_CONFIG,
884
+ "sizes": {split: dataset.size(split) for split in dataset.splits},
885
+ }
886
+
887
+
888
+ def provide_task_instances(dataset: MathDataset, seeds: Sequence[int]) -> Iterable[TaskInfo]:
889
+ info = _base_task_info()
890
+ for seed in seeds:
891
+ sample = dataset.sample(split=DEFAULT_SPLIT, index=seed)
892
+ yield TaskInfo(
893
+ task=info.task,
894
+ environments=info.environments,
895
+ action_space=info.action_space,
896
+ observation={**info.observation, "sample_index": sample["index"]},
897
+ dataset={
898
+ **info.dataset,
899
+ "split": sample["split"],
900
+ "index": sample["index"],
901
+ },
902
+ rubric=info.rubric,
903
+ inference=info.inference,
904
+ capabilities=info.capabilities,
905
+ limits=info.limits,
906
+ )
907
+
908
+
909
+ def build_config() -> TaskAppConfig:
910
+ registry, dataset = build_dataset()
911
+ base_info = _base_task_info()
912
+
913
+ tracing_enabled = tracing_env_enabled()
914
+ tracing_db_url = resolve_tracing_db_url()
915
+ tracer_factory = build_tracer_factory(
916
+ SessionTracer, enabled=tracing_enabled, db_url=tracing_db_url
917
+ )
918
+ sft_output_dir = resolve_sft_output_dir()
919
+
920
+ app_state: dict[str, Any] = {
921
+ "math_dataset": dataset,
922
+ "math_env_manager": MathEnvironmentManager(dataset),
923
+ "tracing_enabled": tracing_enabled,
924
+ }
925
+ if tracer_factory is not None:
926
+ app_state["session_tracer_factory"] = tracer_factory
927
+ if sft_output_dir:
928
+ app_state["sft_output_dir"] = sft_output_dir
929
+
930
+ proxy_keys = normalize_vendor_keys()
931
+ openai_key = proxy_keys.get("OPENAI_API_KEY")
932
+ groq_key = proxy_keys.get("GROQ_API_KEY")
933
+ proxy_config = ProxyConfig(
934
+ enable_openai=openai_key is not None,
935
+ enable_groq=groq_key is not None,
936
+ system_hint=(
937
+ "You must respond with a single math_submit tool call containing only the final answer."
938
+ ),
939
+ )
940
+
941
+ config = TaskAppConfig(
942
+ app_id="math-single-step",
943
+ name="Math Single Step Task",
944
+ description="Single-step math reasoning environment built on the MATH dataset.",
945
+ base_task_info=base_info,
946
+ describe_taskset=lambda: describe_taskset(dataset),
947
+ provide_task_instances=lambda seeds: provide_task_instances(dataset, seeds),
948
+ rollout=rollout_executor,
949
+ dataset_registry=registry,
950
+ rubrics=RubricBundle(outcome=OUTCOME_RUBRIC, events=EVENTS_RUBRIC),
951
+ proxy=proxy_config,
952
+ routers=(math_router,),
953
+ app_state=app_state,
954
+ cors_origins=["*"],
955
+ )
956
+ return config
957
+
958
+
959
+ register_task_app(
960
+ entry=TaskAppEntry(
961
+ app_id="math-single-step",
962
+ description="Single-step math reasoning task app using EleutherAI/math dataset.",
963
+ config_factory=build_config,
964
+ aliases=("math-rl",),
965
+ modal=ModalDeploymentConfig(
966
+ app_name="synth-math-single-step",
967
+ pip_packages=(
968
+ "datasets>=4.0.0",
969
+ "fastapi>=0.115.0",
970
+ "pydantic>=2.0.0",
971
+ "httpx>=0.26.0",
972
+ "requests>=2.32.0",
973
+ "python-dotenv>=1.0.0",
974
+ "diskcache>=5.6.3",
975
+ "duckdb>=1.0.0",
976
+ "ty>=0.0.1a5",
977
+ "toml>=0.10.2",
978
+ "aiosqlite>=0.21.0",
979
+ "libsql>=0.1.8",
980
+ "pynacl>=1.5.0",
981
+ "sqlalchemy>=2.0.42",
982
+ ),
983
+ extra_local_dirs=(
984
+ (str(REPO_ROOT / "synth_ai"), "/opt/synth_ai_repo/synth_ai"),
985
+ (str(REPO_ROOT / "examples" / "rl"), "/opt/synth_ai_repo/examples/rl"),
986
+ ),
987
+ volume_mounts=(("math-dataset-cache", "/modal_volumes/math_dataset"),),
988
+ ),
989
+ )
990
+ )