synth-ai 0.2.9.dev7__py3-none-any.whl → 0.2.9.dev9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (327) hide show
  1. examples/__init__.py +16 -0
  2. examples/crafter_debug_render.py +8 -11
  3. examples/qwen_coder/README.md +102 -0
  4. examples/qwen_coder/_shared.py +113 -0
  5. examples/qwen_coder/configs/coder_lora_30b.toml +61 -0
  6. examples/qwen_coder/configs/coder_lora_4b.toml +57 -0
  7. examples/qwen_coder/configs/coder_lora_small.toml +58 -0
  8. examples/qwen_coder/generate_dataset.py +98 -0
  9. examples/qwen_coder/infer_ft_smoke.py +64 -0
  10. examples/qwen_coder/infer_prod_proxy.py +73 -0
  11. examples/qwen_coder/infer_via_synth.py +87 -0
  12. examples/qwen_coder/scripts/infer_coder.sh +18 -0
  13. examples/qwen_coder/scripts/train_coder_30b.sh +21 -0
  14. examples/qwen_coder/sft_full_17b.py +103 -0
  15. examples/qwen_coder/sft_lora_30b.py +110 -0
  16. examples/qwen_coder/subset_jsonl.py +38 -0
  17. examples/qwen_coder/validate_jsonl.py +59 -0
  18. examples/rl/run_eval.py +36 -37
  19. examples/rl/run_rl_and_save.py +5 -5
  20. examples/rl/task_app/math_single_step.py +65 -43
  21. examples/rl/task_app/math_task_app.py +3 -3
  22. examples/sft/README.md +139 -0
  23. examples/sft/configs/crafter_fft_qwen0p6b.toml +44 -0
  24. examples/sft/configs/crafter_lora_qwen0p6b.toml +45 -0
  25. examples/sft/evaluate.py +117 -0
  26. examples/sft/export_dataset.py +117 -0
  27. examples/sft/generate_traces.py +162 -0
  28. examples/swe/__init__.py +12 -0
  29. examples/swe/task_app/README.md +105 -0
  30. examples/swe/task_app/__init__.py +2 -0
  31. examples/swe/task_app/grpo_swe_mini.py +571 -0
  32. examples/swe/task_app/grpo_swe_mini_task_app.py +136 -0
  33. examples/swe/task_app/hosted/README.md +173 -0
  34. examples/swe/task_app/hosted/__init__.py +5 -0
  35. examples/swe/task_app/hosted/branching.py +143 -0
  36. examples/swe/task_app/hosted/environment_routes.py +1289 -0
  37. examples/swe/task_app/hosted/envs/__init__.py +1 -0
  38. examples/swe/task_app/hosted/envs/crafter/__init__.py +6 -0
  39. examples/swe/task_app/hosted/envs/crafter/app.py +1 -0
  40. examples/swe/task_app/hosted/envs/crafter/environment.py +522 -0
  41. examples/swe/task_app/hosted/envs/crafter/policy.py +478 -0
  42. examples/swe/task_app/hosted/envs/crafter/react_agent.py +108 -0
  43. examples/swe/task_app/hosted/envs/crafter/shared.py +305 -0
  44. examples/swe/task_app/hosted/envs/crafter/tools.py +47 -0
  45. examples/swe/task_app/hosted/envs/mini_swe/__init__.py +8 -0
  46. examples/swe/task_app/hosted/envs/mini_swe/environment.py +1164 -0
  47. examples/swe/task_app/hosted/envs/mini_swe/policy.py +355 -0
  48. examples/swe/task_app/hosted/envs/mini_swe/shared.py +83 -0
  49. examples/swe/task_app/hosted/envs/mini_swe/tools.py +96 -0
  50. examples/swe/task_app/hosted/hosted_app.py +204 -0
  51. examples/swe/task_app/hosted/inference/__init__.py +5 -0
  52. examples/swe/task_app/hosted/inference/openai_client.py +618 -0
  53. examples/swe/task_app/hosted/main.py +100 -0
  54. examples/swe/task_app/hosted/policy_routes.py +1079 -0
  55. examples/swe/task_app/hosted/registry.py +195 -0
  56. examples/swe/task_app/hosted/rollout.py +1869 -0
  57. examples/swe/task_app/hosted/storage/__init__.py +5 -0
  58. examples/swe/task_app/hosted/storage/volume.py +211 -0
  59. examples/swe/task_app/hosted/test_agents.py +161 -0
  60. examples/swe/task_app/hosted/test_service.py +137 -0
  61. examples/swe/task_app/hosted/utils.py +62 -0
  62. examples/vlm/README.md +68 -0
  63. examples/vlm/configs/crafter_vlm_gpt4o.toml +44 -0
  64. examples/vlm/crafter_image_only_agent.py +207 -0
  65. examples/vlm/crafter_openai_vlm_agent.py +277 -0
  66. examples/vlm/filter_image_rows.py +63 -0
  67. examples/vlm/run_crafter_vlm_benchmark.py +316 -0
  68. examples/warming_up_to_rl/analyze_trace_db.py +5 -5
  69. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +11 -1
  70. examples/warming_up_to_rl/export_trace_sft.py +78 -21
  71. examples/warming_up_to_rl/groq_test.py +4 -4
  72. examples/warming_up_to_rl/manage_secrets.py +13 -18
  73. examples/warming_up_to_rl/run_eval.py +42 -44
  74. examples/warming_up_to_rl/run_fft_and_save.py +11 -16
  75. examples/warming_up_to_rl/run_local_rollout.py +1 -3
  76. examples/warming_up_to_rl/run_local_rollout_modal.py +2 -4
  77. examples/warming_up_to_rl/run_local_rollout_parallel.py +1 -4
  78. examples/warming_up_to_rl/run_local_rollout_traced.py +3 -5
  79. examples/warming_up_to_rl/run_rl_and_save.py +5 -6
  80. examples/warming_up_to_rl/run_rollout_remote.py +8 -10
  81. examples/warming_up_to_rl/task_app/README.md +6 -2
  82. examples/warming_up_to_rl/task_app/grpo_crafter.py +234 -35
  83. examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +2 -3
  84. examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +1 -1
  85. examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +9 -11
  86. examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +131 -114
  87. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +101 -41
  88. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +73 -51
  89. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +14 -6
  90. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +16 -16
  91. examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +32 -34
  92. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +94 -31
  93. examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +0 -2
  94. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +303 -203
  95. examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +21 -23
  96. examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +328 -225
  97. examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +13 -13
  98. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +1 -0
  99. examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +1 -0
  100. examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +4 -3
  101. synth/__init__.py +14 -0
  102. synth_ai/__init__.py +26 -4
  103. synth_ai/api/models/supported.py +376 -0
  104. synth_ai/api/train/builders.py +128 -21
  105. synth_ai/api/train/cli.py +80 -64
  106. synth_ai/api/train/config_finder.py +7 -2
  107. synth_ai/api/train/env_resolver.py +1 -1
  108. synth_ai/api/train/pollers.py +2 -1
  109. synth_ai/api/train/supported_algos.py +139 -0
  110. synth_ai/api/train/task_app.py +1 -2
  111. synth_ai/api/train/utils.py +13 -44
  112. synth_ai/cli/__init__.py +8 -0
  113. synth_ai/cli/_modal_wrapper.py +28 -0
  114. synth_ai/cli/_typer_patch.py +49 -0
  115. synth_ai/cli/balance.py +1 -2
  116. synth_ai/cli/calc.py +1 -1
  117. synth_ai/cli/demo.py +2 -1
  118. synth_ai/cli/recent.py +2 -2
  119. synth_ai/cli/rl_demo.py +2 -1
  120. synth_ai/cli/root.py +11 -13
  121. synth_ai/cli/status.py +2 -2
  122. synth_ai/cli/task_apps.py +529 -179
  123. synth_ai/cli/traces.py +6 -4
  124. synth_ai/cli/watch.py +12 -18
  125. synth_ai/demo_registry.py +1 -1
  126. synth_ai/demos/core/cli.py +36 -43
  127. synth_ai/demos/demo_task_apps/__init__.py +3 -3
  128. synth_ai/demos/demo_task_apps/core.py +17 -25
  129. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +3 -4
  130. synth_ai/demos/demo_task_apps/math/app.py +2 -1
  131. synth_ai/demos/demo_task_apps/math/deploy_modal.py +3 -4
  132. synth_ai/demos/demo_task_apps/math/modal_task_app.py +16 -18
  133. synth_ai/demos/demo_task_apps/math/task_app_entry.py +0 -1
  134. synth_ai/environments/examples/crafter_classic/environment.py +76 -1
  135. synth_ai/environments/reproducibility/tree.py +2 -5
  136. synth_ai/environments/service/app.py +11 -12
  137. synth_ai/environments/service/core_routes.py +4 -7
  138. synth_ai/environments/stateful/engine.py +1 -1
  139. synth_ai/environments/tasks/core.py +1 -0
  140. synth_ai/environments/tasks/filters.py +5 -6
  141. synth_ai/environments/tasks/utils.py +4 -5
  142. synth_ai/handshake.py +9 -9
  143. synth_ai/http.py +1 -1
  144. synth_ai/http_client.py +18 -10
  145. synth_ai/inference/client.py +15 -5
  146. synth_ai/jobs/client.py +78 -83
  147. synth_ai/learning/__init__.py +41 -6
  148. synth_ai/learning/algorithms.py +14 -0
  149. synth_ai/learning/client.py +91 -24
  150. synth_ai/learning/config.py +2 -38
  151. synth_ai/learning/ft_client.py +4 -59
  152. synth_ai/learning/health.py +5 -6
  153. synth_ai/learning/jobs.py +31 -47
  154. synth_ai/{rl → learning/rl}/__init__.py +14 -4
  155. synth_ai/learning/rl/client.py +267 -0
  156. synth_ai/learning/rl/config.py +31 -0
  157. synth_ai/{rl → learning/rl}/contracts.py +5 -8
  158. synth_ai/{rl → learning/rl}/env_keys.py +39 -15
  159. synth_ai/learning/rl/secrets.py +13 -0
  160. synth_ai/learning/rl_client.py +2 -281
  161. synth_ai/learning/sft/__init__.py +29 -0
  162. synth_ai/learning/sft/client.py +68 -0
  163. synth_ai/learning/sft/config.py +270 -0
  164. synth_ai/learning/sft/data.py +295 -0
  165. synth_ai/learning/sse.py +25 -24
  166. synth_ai/learning/validators.py +25 -28
  167. synth_ai/lm/__init__.py +21 -47
  168. synth_ai/main.py +6 -0
  169. synth_ai/task/__init__.py +25 -27
  170. synth_ai/task/apps/__init__.py +7 -8
  171. synth_ai/task/auth.py +8 -8
  172. synth_ai/task/client.py +14 -14
  173. synth_ai/task/contracts.py +36 -35
  174. synth_ai/task/datasets.py +6 -5
  175. synth_ai/task/errors.py +10 -10
  176. synth_ai/task/health.py +17 -9
  177. synth_ai/task/json.py +58 -23
  178. synth_ai/task/proxy.py +13 -9
  179. synth_ai/task/rubrics.py +16 -15
  180. synth_ai/task/server.py +12 -12
  181. synth_ai/task/tracing_utils.py +4 -4
  182. synth_ai/task/vendors.py +5 -6
  183. synth_ai/tracing_v3/__init__.py +2 -0
  184. synth_ai/tracing_v3/abstractions.py +21 -4
  185. synth_ai/tracing_v3/decorators.py +18 -16
  186. synth_ai/tracing_v3/hooks.py +5 -5
  187. synth_ai/tracing_v3/llm_call_record_helpers.py +6 -6
  188. synth_ai/tracing_v3/session_tracer.py +40 -14
  189. synth_ai/tracing_v3/storage/base.py +85 -0
  190. synth_ai/tracing_v3/storage/config.py +21 -8
  191. synth_ai/tracing_v3/storage/factory.py +10 -7
  192. synth_ai/tracing_v3/storage/utils.py +4 -2
  193. synth_ai/tracing_v3/turso/daemon.py +7 -2
  194. synth_ai/tracing_v3/turso/models.py +2 -2
  195. synth_ai/tracing_v3/turso/native_manager.py +1173 -0
  196. synth_ai/tracing_v3/utils.py +4 -4
  197. synth_ai/v0/api/__init__.py +8 -0
  198. synth_ai/v0/api/models/__init__.py +8 -0
  199. synth_ai/v0/api/models/supported.py +8 -0
  200. synth_ai/v0/config/__init__.py +15 -0
  201. synth_ai/v0/config/base_url.py +12 -0
  202. synth_ai/v0/lm/__init__.py +51 -0
  203. synth_ai/{lm → v0/lm}/caching/ephemeral.py +2 -2
  204. synth_ai/{lm → v0/lm}/caching/handler.py +4 -4
  205. synth_ai/{lm → v0/lm}/caching/initialize.py +1 -1
  206. synth_ai/{lm → v0/lm}/caching/persistent.py +1 -1
  207. synth_ai/{lm → v0/lm}/config.py +6 -1
  208. synth_ai/{lm → v0/lm}/core/all.py +9 -9
  209. synth_ai/{lm → v0/lm}/core/main.py +6 -6
  210. synth_ai/{lm → v0/lm}/core/main_v3.py +10 -10
  211. synth_ai/{lm → v0/lm}/core/synth_models.py +2 -14
  212. synth_ai/{lm → v0/lm}/core/vendor_clients.py +2 -2
  213. synth_ai/{lm → v0/lm}/overrides.py +2 -2
  214. synth_ai/{lm → v0/lm}/provider_support/anthropic.py +4 -4
  215. synth_ai/{lm → v0/lm}/provider_support/openai.py +5 -5
  216. synth_ai/{lm → v0/lm}/structured_outputs/handler.py +5 -5
  217. synth_ai/{lm → v0/lm}/structured_outputs/rehabilitate.py +1 -1
  218. synth_ai/{lm → v0/lm}/vendors/core/anthropic_api.py +9 -9
  219. synth_ai/{lm → v0/lm}/vendors/core/gemini_api.py +5 -5
  220. synth_ai/{lm → v0/lm}/vendors/core/mistral_api.py +5 -5
  221. synth_ai/{lm → v0/lm}/vendors/core/openai_api.py +10 -10
  222. synth_ai/{lm → v0/lm}/vendors/openai_standard.py +8 -8
  223. synth_ai/{lm → v0/lm}/vendors/openai_standard_responses.py +2 -2
  224. synth_ai/{lm → v0/lm}/vendors/supported/custom_endpoint.py +3 -3
  225. synth_ai/{lm → v0/lm}/vendors/supported/deepseek.py +2 -2
  226. synth_ai/{lm → v0/lm}/vendors/supported/grok.py +2 -2
  227. synth_ai/{lm → v0/lm}/vendors/supported/groq.py +1 -1
  228. synth_ai/{lm → v0/lm}/vendors/supported/ollama.py +1 -1
  229. synth_ai/{lm → v0/lm}/vendors/supported/openrouter.py +3 -3
  230. synth_ai/{lm → v0/lm}/vendors/supported/together.py +1 -1
  231. synth_ai/{lm → v0/lm}/vendors/synth_client.py +1 -1
  232. synth_ai/v0/tracing_v3/__init__.py +10 -0
  233. synth_ai/v0/tracing_v3/abstractions.py +3 -0
  234. synth_ai/v0/tracing_v3/decorators.py +3 -0
  235. synth_ai/v0/tracing_v3/llm_call_record_helpers.py +3 -0
  236. synth_ai/v0/tracing_v3/session_tracer.py +3 -0
  237. synth_ai-0.2.9.dev9.dist-info/METADATA +191 -0
  238. {synth_ai-0.2.9.dev7.dist-info → synth_ai-0.2.9.dev9.dist-info}/RECORD +268 -238
  239. {synth_ai-0.2.9.dev7.dist-info → synth_ai-0.2.9.dev9.dist-info}/top_level.txt +1 -0
  240. examples/common_old/backend.py +0 -20
  241. examples/evals_old/README.md +0 -98
  242. examples/evals_old/__init__.py +0 -6
  243. examples/evals_old/compare_models.py +0 -1038
  244. examples/evals_old/example_log.md +0 -145
  245. examples/evals_old/run_demo.sh +0 -126
  246. examples/evals_old/trace_analysis.py +0 -270
  247. examples/finetuning_old/_backup_synth_qwen/config.toml +0 -29
  248. examples/finetuning_old/_backup_synth_qwen/example_log.md +0 -324
  249. examples/finetuning_old/_backup_synth_qwen/filter_traces.py +0 -60
  250. examples/finetuning_old/_backup_synth_qwen/filter_traces_achievements.py +0 -243
  251. examples/finetuning_old/_backup_synth_qwen/purge_v3_traces.py +0 -109
  252. examples/finetuning_old/_backup_synth_qwen/react_agent_lm.py +0 -1924
  253. examples/finetuning_old/_backup_synth_qwen/readme.md +0 -49
  254. examples/finetuning_old/_backup_synth_qwen/run_crafter_qwen4b.py +0 -114
  255. examples/finetuning_old/_backup_synth_qwen/run_demo.sh +0 -195
  256. examples/finetuning_old/_backup_synth_qwen/sft_kickoff.py +0 -119
  257. examples/finetuning_old/synth_qwen_v1/README.md +0 -68
  258. examples/finetuning_old/synth_qwen_v1/filter_traces.py +0 -60
  259. examples/finetuning_old/synth_qwen_v1/filter_traces_achievements.py +0 -243
  260. examples/finetuning_old/synth_qwen_v1/finetune.py +0 -46
  261. examples/finetuning_old/synth_qwen_v1/hello_ft_model.py +0 -71
  262. examples/finetuning_old/synth_qwen_v1/infer.py +0 -36
  263. examples/finetuning_old/synth_qwen_v1/poll.py +0 -46
  264. examples/finetuning_old/synth_qwen_v1/prepare_data.py +0 -35
  265. examples/finetuning_old/synth_qwen_v1/purge_v3_traces.py +0 -109
  266. examples/finetuning_old/synth_qwen_v1/react_agent_lm.py +0 -1933
  267. examples/finetuning_old/synth_qwen_v1/run_crafter_sft_job.py +0 -210
  268. examples/finetuning_old/synth_qwen_v1/run_ft_job.py +0 -237
  269. examples/finetuning_old/synth_qwen_v1/upload_data.py +0 -34
  270. examples/finetuning_old/synth_qwen_v1/util.py +0 -152
  271. examples/rl_old/task_app.py +0 -1131
  272. examples/warming_up_to_rl/old/event_rewards.md +0 -234
  273. examples/warming_up_to_rl/old/notes.md +0 -73
  274. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +0 -738
  275. synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +0 -580
  276. synth_ai/experimental/synth_oss.py +0 -445
  277. synth_ai/learning/filtering.py +0 -0
  278. synth_ai/learning/offline/dpo.py +0 -0
  279. synth_ai/learning/offline/providers.py +0 -7
  280. synth_ai/learning/offline/sft.py +0 -0
  281. synth_ai/learning/offline/shared.py +0 -0
  282. synth_ai/learning/online/grpo.py +0 -0
  283. synth_ai/learning/online/irft.py +0 -0
  284. synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
  285. synth_ai/learning/prompts/gepa.py +0 -0
  286. synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -211
  287. synth_ai/learning/prompts/mipro.py +0 -289
  288. synth_ai/learning/prompts/random_search.py +0 -249
  289. synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
  290. synth_ai/learning/prompts/run_random_search_banking77.py +0 -329
  291. synth_ai/rl/secrets.py +0 -19
  292. synth_ai/scripts/verify_rewards.py +0 -100
  293. synth_ai/tracing/__init__.py +0 -30
  294. synth_ai/tracing_v1/__init__.py +0 -33
  295. synth_ai/tracing_v3/turso/__init__.py +0 -25
  296. synth_ai/tracing_v3/turso/manager.py +0 -838
  297. synth_ai/zyk/__init__.py +0 -30
  298. synth_ai-0.2.9.dev7.dist-info/METADATA +0 -131
  299. /synth_ai/{lm → v0/lm}/caching/__init__.py +0 -0
  300. /synth_ai/{lm → v0/lm}/caching/constants.py +0 -0
  301. /synth_ai/{lm → v0/lm}/caching/dbs.py +0 -0
  302. /synth_ai/{lm → v0/lm}/constants.py +0 -0
  303. /synth_ai/{lm → v0/lm}/core/__init__.py +0 -0
  304. /synth_ai/{lm → v0/lm}/core/exceptions.py +0 -0
  305. /synth_ai/{lm → v0/lm}/cost/__init__.py +0 -0
  306. /synth_ai/{lm → v0/lm}/cost/monitor.py +0 -0
  307. /synth_ai/{lm → v0/lm}/cost/statefulness.py +0 -0
  308. /synth_ai/{lm → v0/lm}/injection.py +0 -0
  309. /synth_ai/{lm → v0/lm}/provider_support/__init__.py +0 -0
  310. /synth_ai/{lm → v0/lm}/provider_support/suppress_logging.py +0 -0
  311. /synth_ai/{lm → v0/lm}/structured_outputs/__init__.py +0 -0
  312. /synth_ai/{lm → v0/lm}/structured_outputs/inject.py +0 -0
  313. /synth_ai/{lm → v0/lm}/tools/__init__.py +0 -0
  314. /synth_ai/{lm → v0/lm}/tools/base.py +0 -0
  315. /synth_ai/{lm → v0/lm}/unified_interface.py +0 -0
  316. /synth_ai/{lm → v0/lm}/vendors/__init__.py +0 -0
  317. /synth_ai/{lm → v0/lm}/vendors/base.py +0 -0
  318. /synth_ai/{lm → v0/lm}/vendors/core/__init__.py +0 -0
  319. /synth_ai/{lm → v0/lm}/vendors/core/synth_dev_api.py +0 -0
  320. /synth_ai/{lm → v0/lm}/vendors/local/__init__.py +0 -0
  321. /synth_ai/{lm → v0/lm}/vendors/local/ollama.py +0 -0
  322. /synth_ai/{lm → v0/lm}/vendors/retries.py +0 -0
  323. /synth_ai/{lm → v0/lm}/vendors/supported/__init__.py +0 -0
  324. /synth_ai/{lm → v0/lm}/warmup.py +0 -0
  325. {synth_ai-0.2.9.dev7.dist-info → synth_ai-0.2.9.dev9.dist-info}/WHEEL +0 -0
  326. {synth_ai-0.2.9.dev7.dist-info → synth_ai-0.2.9.dev9.dist-info}/entry_points.txt +0 -0
  327. {synth_ai-0.2.9.dev7.dist-info → synth_ai-0.2.9.dev9.dist-info}/licenses/LICENSE +0 -0
examples/__init__.py ADDED
@@ -0,0 +1,16 @@
1
+ """Top-level package for Synth AI example environments and utilities."""
2
+
3
+ from importlib import resources as _resources
4
+
5
+ __all__ = ["path_for"]
6
+
7
+
8
+ def path_for(package: str, resource: str) -> str:
9
+ """Return absolute path for a packaged resource inside ``examples``.
10
+
11
+ This helper mirrors the one under ``synth_ai`` so hosted apps can access
12
+ bundled assets without needing to install the repo in editable mode.
13
+ """
14
+
15
+ with _resources.as_file(_resources.files(f"examples.{package}") / resource) as path:
16
+ return str(path)
@@ -12,9 +12,10 @@ Run:
12
12
  """
13
13
 
14
14
  import argparse
15
+ import contextlib
15
16
  import math
16
17
  import os
17
- from typing import Any, Dict, List
18
+ from typing import Any
18
19
 
19
20
  import httpx
20
21
 
@@ -37,21 +38,19 @@ def try_import_crafter_mapping():
37
38
  id_to_item[ind] = label.lower()
38
39
  return id_to_item
39
40
  finally:
40
- try:
41
+ with contextlib.suppress(Exception):
41
42
  env.close()
42
- except Exception:
43
- pass
44
43
  except Exception:
45
44
  return None
46
45
 
47
46
 
48
- def format_semantic_map_view(obs: Dict[str, Any], view_size: int = 7) -> str:
47
+ def format_semantic_map_view(obs: dict[str, Any], view_size: int = 7) -> str:
49
48
  sem = obs.get("semantic_map") or obs.get("sem_map") or obs.get("map")
50
49
  if sem is None:
51
50
  return "No semantic map available"
52
51
 
53
52
  # Normalize to 2D grid
54
- grid: List[List[int]]
53
+ grid: list[list[int]]
55
54
  if isinstance(sem, list) and sem and isinstance(sem[0], list):
56
55
  grid = sem
57
56
  elif isinstance(sem, list):
@@ -82,10 +81,10 @@ def format_semantic_map_view(obs: Dict[str, Any], view_size: int = 7) -> str:
82
81
  px, py = rows // 2, cols // 2
83
82
 
84
83
  half = max(1, view_size // 2)
85
- lines: List[str] = []
84
+ lines: list[str] = []
86
85
  visible: set[str] = set()
87
86
  for dy in range(-half, half + 1):
88
- row_cells: List[str] = []
87
+ row_cells: list[str] = []
89
88
  for dx in range(-half, half + 1):
90
89
  x = px + dx
91
90
  y = py + dy
@@ -175,12 +174,10 @@ async def main():
175
174
  print(format_semantic_map_view(sobs, view_size=7))
176
175
 
177
176
  # Cleanup
178
- try:
177
+ with contextlib.suppress(Exception):
179
178
  await client.post(
180
179
  f"{args.base_url}/env/CrafterClassic/terminate", json={"env_id": env_id}
181
180
  )
182
- except Exception:
183
- pass
184
181
 
185
182
 
186
183
  if __name__ == "__main__":
@@ -0,0 +1,102 @@
1
+ Qwen3 Coder – SFT with LoRA (all linear)
2
+
3
+ This example mirrors the SFT LoRA flow under `examples/sft/` but targets the smallest Qwen3 Coder family model supported downstream. It configures LoRA on all linear projections ("all-linear") to match our RL LoRA recipes.
4
+
5
+ Quick start
6
+
7
+ 1) Generate a tiny synthetic dataset (or export your own)
8
+
9
+ ```
10
+ uv run python examples/qwen_coder/generate_dataset.py \
11
+ --output examples/qwen_coder/ft_data/coder_sft.small.jsonl \
12
+ --n 50 --seed 42 --lang python
13
+ ```
14
+
15
+ 2) Run training via the CLI:
16
+
17
+ ```
18
+ uvx synth-ai train \
19
+ --type sft \
20
+ --config examples/qwen_coder/configs/coder_lora_small.toml \
21
+ --dataset examples/qwen_coder/ft_data/coder_sft.small.jsonl \
22
+ --env-file /path/to/.env
23
+ ```
24
+
25
+ 3) Inference via Synth API (pre/post SFT)
26
+
27
+ Use the SDK’s OpenAI-compatible chat client routed through Synth. Export your env with SYNTH_API_KEY (and optional BACKEND_BASE_URL) or pass an env file to CLI helpers.
28
+
29
+ Minimal one-shot inference:
30
+
31
+ ```bash
32
+ python - <<'PY'
33
+ import os, asyncio
34
+ from synth_ai.v0.lm.core import main_v3 as lm
35
+
36
+ async def run():
37
+ model = os.getenv("MODEL", "Qwen/Qwen3-Coder-30B-A3B-Instruct")
38
+ resp = await lm.chat_async(
39
+ model,
40
+ messages=[{"role":"user","content":"Write a Python function to reverse a string."}],
41
+ max_tokens=128,
42
+ temperature=0.2,
43
+ )
44
+ print(resp["choices"][0]["message"]["content"])
45
+ asyncio.run(run())
46
+ PY
47
+ ```
48
+
49
+ After training, set `MODEL=ft:...` to query the finetuned adapter.
50
+
51
+ 4) 30B LoRA variant
52
+
53
+ ```bash
54
+ uvx synth-ai train \
55
+ --type sft \
56
+ --config examples/qwen_coder/configs/coder_lora_30b.toml \
57
+ --dataset examples/qwen_coder/ft_data/coder_sft.small.jsonl \
58
+ --env-file /path/to/.env
59
+ ```
60
+
61
+ 5) Faster iteration: 4B LoRA config
62
+
63
+ ```bash
64
+ uvx synth-ai train \
65
+ --type sft \
66
+ --config examples/qwen_coder/configs/coder_lora_4b.toml \
67
+ --dataset examples/qwen_coder/ft_data/coder_sft.small.jsonl \
68
+ --env-file /path/to/.env
69
+ ```
70
+
71
+ Environment variables
72
+
73
+ - `SYNTH_API_KEY`: required for training/inference through the hosted backend
74
+ - `BACKEND_BASE_URL`: defaults to `https://agent-learning.onrender.com/api`
75
+
76
+ Post‑SFT smoke
77
+
78
+ - The training helper `sft_lora_30b.py` writes the resulting `ft:<id>` to `examples/qwen_coder/ft_data/ft_model_id.txt`.
79
+ - Validate inference with your finetuned adapter:
80
+
81
+ ```bash
82
+ uv run python examples/qwen_coder/infer_ft_smoke.py
83
+ ```
84
+
85
+ Dataset utilities
86
+
87
+ - `examples/qwen_coder/validate_jsonl.py`: sanity‑check first N lines for chat structure
88
+ - `examples/qwen_coder/subset_jsonl.py`: create a capped subset for quick tests
89
+
90
+ Optional CLI wrappers
91
+
92
+ - `examples/qwen_coder/scripts/train_coder_30b.sh [/path/to/.env]`
93
+ - `examples/qwen_coder/scripts/infer_coder.sh [/path/to/.env]`
94
+
95
+ Notes
96
+
97
+ - LoRA is enabled with `training.mode = "lora"` and `hyperparameters.train_kind = "peft"`.
98
+ - The config sets an `all-linear` target to apply adapters broadly across attention and MLP projections.
99
+ - Adjust `gradient_accumulation_steps`, `per_device_batch`, and `sequence_length` based on available GPU memory.
100
+ - Use the Synth API client (above) for inference to ensure requests route via the hosted backend.
101
+
102
+
@@ -0,0 +1,113 @@
1
+ #!/usr/bin/env python3
2
+ """Shared helpers for Qwen coder SFT examples."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import json
7
+ import os
8
+ from pathlib import Path
9
+
10
+ TRAIN_DATA_PATH = Path("examples/qwen_coder/ft_data/coder_sft.small.jsonl")
11
+ VAL_DATA_PATH = Path("examples/qwen_coder/ft_data/coder_sft.small.val.jsonl")
12
+ DATA_DIR = TRAIN_DATA_PATH.parent
13
+
14
+ _FALLBACK_RECORDS: list[dict[str, object]] = [
15
+ {
16
+ "messages": [
17
+ {"role": "user", "content": "Write a Python function `add(a, b)` that returns the sum of two numbers."}
18
+ ],
19
+ "response": "def add(a, b):\n return a + b\n",
20
+ },
21
+ {
22
+ "messages": [
23
+ {
24
+ "role": "user",
25
+ "content": "Implement a Python function `reverse_string(s)` that returns the reversed string.",
26
+ }
27
+ ],
28
+ "response": "def reverse_string(s: str) -> str:\n return s[::-1]\n",
29
+ },
30
+ {
31
+ "messages": [
32
+ {
33
+ "role": "user",
34
+ "content": "Write a Python function `count_words(text)` returning a dict mapping words to counts.",
35
+ }
36
+ ],
37
+ "response": "from collections import Counter\n\ndef count_words(text: str) -> dict[str, int]:\n words = [w for w in text.split() if w]\n return dict(Counter(words))\n",
38
+ },
39
+ ]
40
+
41
+
42
+ def ensure_tiny_dataset() -> Path:
43
+ """Ensure the tiny coder dataset exists, generating or writing a fallback if needed."""
44
+ if TRAIN_DATA_PATH.exists():
45
+ return TRAIN_DATA_PATH
46
+
47
+ try:
48
+ from examples.qwen_coder.generate_dataset import main as gen_main # type: ignore
49
+
50
+ gen_main()
51
+ if TRAIN_DATA_PATH.exists():
52
+ return TRAIN_DATA_PATH
53
+ except Exception:
54
+ # Fall back to inline dataset below.
55
+ pass
56
+
57
+ DATA_DIR.mkdir(parents=True, exist_ok=True)
58
+ with TRAIN_DATA_PATH.open("w", encoding="utf-8") as fh:
59
+ for record in _FALLBACK_RECORDS:
60
+ fh.write(json.dumps(record, separators=(",", ":")))
61
+ fh.write("\n")
62
+ return TRAIN_DATA_PATH
63
+
64
+
65
+ def optional_validation_dataset() -> Path | None:
66
+ """Return validation dataset path if present."""
67
+ if VAL_DATA_PATH.exists():
68
+ return VAL_DATA_PATH
69
+ return None
70
+
71
+
72
+ def _ensure_parent(path: Path) -> Path:
73
+ path.parent.mkdir(parents=True, exist_ok=True)
74
+ return path
75
+
76
+
77
+ def resolve_output_path(default_filename: str) -> Path:
78
+ """Resolve output path for storing fine-tuned model ids."""
79
+ override = os.getenv("QWEN_CODER_FT_OUTPUT")
80
+ if override:
81
+ return _ensure_parent(Path(override).expanduser())
82
+ return _ensure_parent(DATA_DIR / default_filename)
83
+
84
+
85
+ def resolve_model_id_path(default_filename: str) -> Path:
86
+ """Resolve path to read a stored fine-tuned model id."""
87
+ override = os.getenv("QWEN_CODER_FT_MODEL_PATH")
88
+ if override:
89
+ candidate = Path(override).expanduser()
90
+ if candidate.is_dir():
91
+ return candidate / default_filename
92
+ return candidate
93
+ return DATA_DIR / default_filename
94
+
95
+
96
+ def resolve_infer_output_path(default_filename: str) -> Path:
97
+ """Resolve path for writing inference outputs."""
98
+ override = os.getenv("QWEN_CODER_FT_INFER_OUTPUT")
99
+ if override:
100
+ return _ensure_parent(Path(override).expanduser())
101
+ return _ensure_parent(DATA_DIR / default_filename)
102
+
103
+
104
+ __all__ = [
105
+ "DATA_DIR",
106
+ "TRAIN_DATA_PATH",
107
+ "VAL_DATA_PATH",
108
+ "ensure_tiny_dataset",
109
+ "optional_validation_dataset",
110
+ "resolve_output_path",
111
+ "resolve_model_id_path",
112
+ "resolve_infer_output_path",
113
+ ]
@@ -0,0 +1,61 @@
1
+ # Qwen3 Coder 30B LoRA SFT – all-linear adapters
2
+
3
+ [algorithm]
4
+ type = "offline"
5
+ method = "sft"
6
+ variety = "fft"
7
+
8
+ [job]
9
+ model = "Qwen/Qwen3-Coder-30B-A3B-Instruct"
10
+
11
+ [compute]
12
+ gpu_type = "H200"
13
+ gpu_count = 1
14
+ nodes = 1
15
+
16
+ [data]
17
+ topology = {}
18
+ # Optional validation set
19
+ # validation_path = "examples/qwen_coder/ft_data/coder_sft.small.val.jsonl"
20
+
21
+ [metadata]
22
+ # Effective config hints consumed by the backend
23
+ effective_config = { compute = { gpu_type = "H200", gpu_count = 1, nodes = 1 } }
24
+
25
+ [training]
26
+ mode = "lora"
27
+ use_qlora = true
28
+
29
+ [training.validation]
30
+ enabled = true
31
+ evaluation_strategy = "steps"
32
+ eval_steps = 100
33
+ save_best_model_at_end = true
34
+ metric_for_best_model = "val.loss"
35
+ greater_is_better = false
36
+
37
+ [hyperparameters]
38
+ n_epochs = 1
39
+ train_kind = "peft"
40
+ per_device_batch = 1
41
+ gradient_accumulation_steps = 64
42
+ sequence_length = 4096
43
+ learning_rate = 5e-6
44
+ warmup_ratio = 0.03
45
+
46
+ [hyperparameters.parallelism]
47
+ use_deepspeed = true
48
+ deepspeed_stage = 2
49
+ fsdp = false
50
+ bf16 = true
51
+ fp16 = false
52
+ activation_checkpointing = true
53
+
54
+ # LoRA target selection for coder models: apply to all linear projections
55
+ [lora]
56
+ r = 16
57
+ alpha = 32
58
+ dropout = 0.05
59
+ target_modules = ["all-linear"]
60
+
61
+
@@ -0,0 +1,57 @@
1
+ # Qwen3 Coder 4B LoRA SFT – all-linear adapters
2
+
3
+ [job]
4
+ model = "Qwen/Qwen3-4B"
5
+
6
+ [compute]
7
+ gpu_type = "H100"
8
+ gpu_count = 1
9
+ nodes = 1
10
+
11
+ [data]
12
+ topology = {}
13
+ # Optional validation set
14
+ # validation_path = "examples/qwen_coder/ft_data/coder_sft.small.val.jsonl"
15
+
16
+ [metadata]
17
+ # Effective config hints consumed by the backend
18
+ effective_config = { compute = { gpu_type = "H100", gpu_count = 1, nodes = 1 } }
19
+
20
+ [training]
21
+ mode = "lora"
22
+ use_qlora = true
23
+
24
+ [training.validation]
25
+ enabled = true
26
+ evaluation_strategy = "steps"
27
+ eval_steps = 100
28
+ save_best_model_at_end = true
29
+ metric_for_best_model = "val.loss"
30
+ greater_is_better = false
31
+
32
+ [hyperparameters]
33
+ n_epochs = 1
34
+ train_kind = "peft"
35
+ per_device_batch = 2
36
+ gradient_accumulation_steps = 32
37
+ sequence_length = 4096
38
+ learning_rate = 5e-6
39
+ warmup_ratio = 0.03
40
+
41
+ [hyperparameters.parallelism]
42
+ use_deepspeed = true
43
+ deepspeed_stage = 2
44
+ fsdp = false
45
+ bf16 = true
46
+ fp16 = false
47
+ activation_checkpointing = true
48
+
49
+ # LoRA target selection for coder models: apply to all linear projections
50
+ [lora]
51
+ r = 16
52
+ alpha = 32
53
+ dropout = 0.05
54
+ target_modules = ["all-linear"]
55
+
56
+
57
+
@@ -0,0 +1,58 @@
1
+ # Qwen3 Coder LoRA SFT – all-linear adapters
2
+
3
+ [algorithm]
4
+ type = "offline"
5
+ method = "sft"
6
+ variety = "fft"
7
+
8
+ [job]
9
+ # Smallest supported Qwen3 base; replace with the smallest Coder variant when available
10
+ model = "Qwen/Qwen3-1.7B"
11
+
12
+ [compute]
13
+ gpu_type = "H100"
14
+ gpu_count = 1
15
+ nodes = 1
16
+
17
+ [data]
18
+ topology = {}
19
+ # Optional validation set
20
+ # validation_path = "examples/sft/ft_data/coder_traces.val.jsonl"
21
+
22
+ [training]
23
+ mode = "lora"
24
+ use_qlora = true
25
+
26
+ [training.validation]
27
+ enabled = true
28
+ evaluation_strategy = "steps"
29
+ eval_steps = 100
30
+ save_best_model_at_end = true
31
+ metric_for_best_model = "val.loss"
32
+ greater_is_better = false
33
+
34
+ [hyperparameters]
35
+ n_epochs = 1
36
+ train_kind = "peft"
37
+ per_device_batch = 2
38
+ gradient_accumulation_steps = 32
39
+ sequence_length = 4096
40
+ learning_rate = 5e-6
41
+ warmup_ratio = 0.03
42
+
43
+ [hyperparameters.parallelism]
44
+ use_deepspeed = true
45
+ deepspeed_stage = 2
46
+ fsdp = false
47
+ bf16 = true
48
+ fp16 = false
49
+ activation_checkpointing = true
50
+
51
+ # LoRA target selection for coder models: apply to all linear projections
52
+ [lora]
53
+ r = 16
54
+ alpha = 32
55
+ dropout = 0.05
56
+ target_modules = ["all-linear"]
57
+
58
+
@@ -0,0 +1,98 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Generate a small, synthetic SFT JSONL dataset for coder-style prompts.
4
+
5
+ Each line uses the minimal schema consumed by the SFT CLI:
6
+ {"messages": [{"role": "user", "content": "..."}], "response": "..."}
7
+
8
+ Example:
9
+ uv run python examples/qwen_coder/generate_dataset.py \
10
+ --output examples/qwen_coder/ft_data/coder_sft.small.jsonl \
11
+ --n 50 --seed 42 --lang python
12
+ """
13
+ from __future__ import annotations
14
+
15
+ import argparse
16
+ import json
17
+ import random
18
+ from collections.abc import Iterable
19
+ from pathlib import Path
20
+
21
+ PROMPT_TEMPLATES: dict[str, list[str]] = {
22
+ "python": [
23
+ "Write a Python function `add(a, b)` that returns the sum of two numbers.",
24
+ "Write a Python function `reverse_string(s)` that returns the reversed string.",
25
+ "Implement a Python function `is_palindrome(s)` that returns True if s is a palindrome.",
26
+ "Write a Python function `fibonacci(n)` that returns a list of the first n Fibonacci numbers.",
27
+ "Write a Python function `count_words(text)` that returns a dict of word -> count.",
28
+ ],
29
+ "javascript": [
30
+ "Write a JavaScript function `add(a, b)` that returns the sum of two numbers.",
31
+ "Write a JavaScript function `reverseString(s)` that returns the reversed string.",
32
+ "Implement a JavaScript function `isPalindrome(s)` that returns true if s is a palindrome.",
33
+ "Write a JavaScript function `fibonacci(n)` that returns an array of the first n Fibonacci numbers.",
34
+ "Write a JavaScript function `countWords(text)` that returns an object mapping word -> count.",
35
+ ],
36
+ }
37
+
38
+
39
+ SOLUTIONS: dict[str, list[str]] = {
40
+ "python": [
41
+ """def add(a, b):\n return a + b\n""",
42
+ """def reverse_string(s: str) -> str:\n return s[::-1]\n""",
43
+ """def is_palindrome(s: str) -> bool:\n t = ''.join(ch.lower() for ch in s if ch.isalnum())\n return t == t[::-1]\n""",
44
+ """def fibonacci(n: int) -> list[int]:\n a, b = 0, 1\n out: list[int] = []\n for _ in range(max(0, n)):\n out.append(a)\n a, b = b, a + b\n return out\n""",
45
+ """from collections import Counter\n\n"""
46
+ """def count_words(text: str) -> dict[str, int]:\n words = [w for w in text.split() if w]\n return dict(Counter(words))\n""",
47
+ ],
48
+ "javascript": [
49
+ """function add(a, b) {\n return a + b;\n}\n""",
50
+ """function reverseString(s) {\n return s.split('').reverse().join('');\n}\n""",
51
+ """function isPalindrome(s) {\n const t = (s.match(/[a-z0-9]/gi) || []).join('').toLowerCase();\n return t === t.split('').reverse().join('');\n}\n""",
52
+ """function fibonacci(n) {\n const out = [];\n let a = 0, b = 1;\n for (let i = 0; i < Math.max(0, n); i++) {\n out.push(a);\n [a, b] = [b, a + b];\n }\n return out;\n}\n""",
53
+ """function countWords(text) {\n const words = text.split(/\s+/).filter(Boolean);\n return words.reduce((acc, w) => { acc[w] = (acc[w] || 0) + 1; return acc; }, {});\n}\n""",
54
+ ],
55
+ }
56
+
57
+
58
+ def _iter_examples(n: int, lang: str) -> Iterable[dict]:
59
+ prompts = PROMPT_TEMPLATES.get(lang, PROMPT_TEMPLATES["python"]).copy()
60
+ answers = SOLUTIONS.get(lang, SOLUTIONS["python"]).copy()
61
+ for _ in range(n):
62
+ i = random.randrange(0, len(prompts))
63
+ j = random.randrange(0, len(answers))
64
+ user = prompts[i]
65
+ assistant = answers[j]
66
+ yield {
67
+ "messages": [
68
+ {"role": "user", "content": user},
69
+ ],
70
+ "response": assistant,
71
+ }
72
+
73
+
74
+ def main() -> None:
75
+ ap = argparse.ArgumentParser(description="Generate synthetic coder SFT JSONL dataset")
76
+ ap.add_argument("--output", required=True, help="Path to write JSONL (will create parent dir)")
77
+ ap.add_argument("--n", type=int, default=50, help="Number of examples to generate")
78
+ ap.add_argument("--seed", type=int, default=42, help="Random seed")
79
+ ap.add_argument("--lang", choices=["python", "javascript"], default="python")
80
+ args = ap.parse_args()
81
+
82
+ random.seed(args.seed)
83
+ out_path = Path(args.output).expanduser().resolve()
84
+ out_path.parent.mkdir(parents=True, exist_ok=True)
85
+
86
+ # Write JSONL
87
+ with out_path.open("w", encoding="utf-8") as fh:
88
+ for rec in _iter_examples(max(1, int(args.n)), lang=args.lang):
89
+ fh.write(json.dumps(rec, ensure_ascii=False))
90
+ fh.write("\n")
91
+
92
+ print(f"Wrote {args.n} examples to {out_path}")
93
+
94
+
95
+ if __name__ == "__main__":
96
+ main()
97
+
98
+
@@ -0,0 +1,64 @@
1
+ #!/usr/bin/env python3
2
+ """Post-SFT smoke test: read ft_model_id.txt and run a short inference.
3
+
4
+ Env:
5
+ SYNTH_API_KEY (required)
6
+ BACKEND_BASE_URL (defaults to https://agent-learning.onrender.com/api)
7
+
8
+ Writes:
9
+ examples/qwen_coder/ft_data/ft_infer_smoke.txt
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import asyncio
15
+ import os
16
+ from typing import Any
17
+
18
+ from examples.qwen_coder._shared import resolve_infer_output_path, resolve_model_id_path
19
+ from synth_ai.inference.client import InferenceClient
20
+
21
+
22
+ def _backend() -> str:
23
+ raw = os.getenv("BACKEND_BASE_URL", "https://agent-learning.onrender.com/api").strip()
24
+ return raw if raw.endswith("/api") else (raw + "/api")
25
+
26
+
27
+ async def main() -> None:
28
+ api_key = os.getenv("SYNTH_API_KEY", "").strip()
29
+ if not api_key:
30
+ raise SystemExit("SYNTH_API_KEY required in environment")
31
+
32
+ ft_id_path = resolve_model_id_path(os.getenv("QWEN_CODER_FT_FILENAME", "ft_model_id.txt"))
33
+ if not ft_id_path.exists():
34
+ raise SystemExit(f"Missing {ft_id_path}; run SFT first")
35
+ model_id = ft_id_path.read_text(encoding="utf-8").strip()
36
+ if not model_id:
37
+ raise SystemExit("ft_model_id.txt is empty")
38
+
39
+ client = InferenceClient(base_url=_backend(), api_key=api_key, timeout=60.0)
40
+
41
+ prompt = os.getenv(
42
+ "PROMPT",
43
+ "Write a Python function to check if a string is a palindrome, then test it.",
44
+ )
45
+ resp: dict[str, Any] = await client.create_chat_completion(
46
+ model=model_id,
47
+ messages=[{"role": "user", "content": prompt}],
48
+ temperature=0.2,
49
+ max_tokens=256,
50
+ thinking_budget=256,
51
+ )
52
+
53
+ # Extract assistant content
54
+ content: str = (
55
+ resp.get("choices", [{}])[0].get("message", {}).get("content") or str(resp)
56
+ )
57
+ out_path = resolve_infer_output_path(os.getenv("QWEN_CODER_FT_INFER_FILENAME", "ft_infer_smoke.txt"))
58
+ out_path.write_text(content + "\n", encoding="utf-8")
59
+ print(f"Wrote {out_path} (len={len(content)})")
60
+
61
+
62
+ if __name__ == "__main__":
63
+ asyncio.run(main())
64
+