synth-ai 0.2.9.dev5__py3-none-any.whl → 0.2.9.dev6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synth-ai might be problematic. Click here for more details.
- examples/__init__.py +16 -0
- examples/crafter_debug_render.py +23 -17
- examples/qwen_coder/README.md +102 -0
- examples/qwen_coder/_shared.py +113 -0
- examples/qwen_coder/configs/coder_lora_30b.toml +61 -0
- examples/qwen_coder/configs/coder_lora_4b.toml +57 -0
- examples/qwen_coder/configs/coder_lora_small.toml +58 -0
- examples/qwen_coder/generate_dataset.py +98 -0
- examples/qwen_coder/infer_ft_smoke.py +64 -0
- examples/qwen_coder/infer_prod_proxy.py +73 -0
- examples/qwen_coder/infer_via_synth.py +87 -0
- examples/qwen_coder/scripts/infer_coder.sh +18 -0
- examples/qwen_coder/scripts/train_coder_30b.sh +21 -0
- examples/qwen_coder/sft_full_17b.py +103 -0
- examples/qwen_coder/sft_lora_30b.py +110 -0
- examples/qwen_coder/subset_jsonl.py +38 -0
- examples/qwen_coder/validate_jsonl.py +59 -0
- examples/rl/configs/eval_base_qwen.toml +1 -1
- examples/rl/configs/rl_from_base_qwen17.toml +1 -1
- examples/rl/download_dataset.py +26 -10
- examples/rl/run_eval.py +53 -52
- examples/rl/run_rl_and_save.py +29 -12
- examples/rl/task_app/math_single_step.py +180 -41
- examples/rl/task_app/math_task_app.py +14 -6
- examples/sft/README.md +139 -0
- examples/sft/configs/crafter_fft_qwen0p6b.toml +44 -0
- examples/sft/configs/crafter_lora_qwen0p6b.toml +45 -0
- examples/sft/evaluate.py +117 -0
- examples/sft/export_dataset.py +117 -0
- examples/sft/generate_traces.py +162 -0
- examples/swe/__init__.py +12 -0
- examples/swe/task_app/README.md +105 -0
- examples/swe/task_app/__init__.py +2 -0
- examples/swe/task_app/grpo_swe_mini.py +571 -0
- examples/swe/task_app/grpo_swe_mini_task_app.py +136 -0
- examples/swe/task_app/hosted/README.md +173 -0
- examples/swe/task_app/hosted/__init__.py +5 -0
- examples/swe/task_app/hosted/branching.py +143 -0
- examples/swe/task_app/hosted/environment_routes.py +1289 -0
- examples/swe/task_app/hosted/envs/__init__.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/__init__.py +6 -0
- examples/swe/task_app/hosted/envs/crafter/app.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/environment.py +522 -0
- examples/swe/task_app/hosted/envs/crafter/policy.py +478 -0
- examples/swe/task_app/hosted/envs/crafter/react_agent.py +108 -0
- examples/swe/task_app/hosted/envs/crafter/shared.py +305 -0
- examples/swe/task_app/hosted/envs/crafter/tools.py +47 -0
- examples/swe/task_app/hosted/envs/mini_swe/__init__.py +8 -0
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +1164 -0
- examples/swe/task_app/hosted/envs/mini_swe/policy.py +355 -0
- examples/swe/task_app/hosted/envs/mini_swe/shared.py +83 -0
- examples/swe/task_app/hosted/envs/mini_swe/tools.py +96 -0
- examples/swe/task_app/hosted/hosted_app.py +204 -0
- examples/swe/task_app/hosted/inference/__init__.py +5 -0
- examples/swe/task_app/hosted/inference/openai_client.py +618 -0
- examples/swe/task_app/hosted/main.py +100 -0
- examples/swe/task_app/hosted/policy_routes.py +1079 -0
- examples/swe/task_app/hosted/registry.py +195 -0
- examples/swe/task_app/hosted/rollout.py +1869 -0
- examples/swe/task_app/hosted/storage/__init__.py +5 -0
- examples/swe/task_app/hosted/storage/volume.py +211 -0
- examples/swe/task_app/hosted/test_agents.py +161 -0
- examples/swe/task_app/hosted/test_service.py +137 -0
- examples/swe/task_app/hosted/utils.py +62 -0
- examples/vlm/README.md +68 -0
- examples/vlm/configs/crafter_vlm_gpt4o.toml +44 -0
- examples/vlm/crafter_image_only_agent.py +207 -0
- examples/vlm/crafter_openai_vlm_agent.py +277 -0
- examples/vlm/filter_image_rows.py +63 -0
- examples/vlm/run_crafter_vlm_benchmark.py +316 -0
- examples/warming_up_to_rl/analyze_trace_db.py +12 -10
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +11 -1
- examples/warming_up_to_rl/export_trace_sft.py +218 -36
- examples/warming_up_to_rl/groq_test.py +15 -8
- examples/warming_up_to_rl/manage_secrets.py +29 -25
- examples/warming_up_to_rl/readme.md +9 -2
- examples/warming_up_to_rl/run_eval.py +137 -61
- examples/warming_up_to_rl/run_fft_and_save.py +131 -60
- examples/warming_up_to_rl/run_local_rollout.py +88 -39
- examples/warming_up_to_rl/run_local_rollout_modal.py +114 -28
- examples/warming_up_to_rl/run_local_rollout_parallel.py +81 -20
- examples/warming_up_to_rl/run_local_rollout_traced.py +126 -23
- examples/warming_up_to_rl/run_rl_and_save.py +35 -12
- examples/warming_up_to_rl/run_rollout_remote.py +44 -19
- examples/warming_up_to_rl/task_app/README.md +6 -2
- examples/warming_up_to_rl/task_app/grpo_crafter.py +319 -57
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +11 -30
- examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +9 -11
- examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +137 -182
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +150 -57
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +105 -69
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +19 -7
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +45 -42
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +47 -45
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +198 -92
- examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +0 -2
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +361 -263
- examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +21 -23
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +394 -274
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +1 -1
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +56 -62
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +6 -15
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +4 -3
- synth/__init__.py +14 -0
- synth_ai/__init__.py +20 -4
- synth_ai/api/models/supported.py +376 -0
- synth_ai/api/train/builders.py +157 -26
- synth_ai/api/train/cli.py +213 -57
- synth_ai/api/train/config_finder.py +65 -5
- synth_ai/api/train/env_resolver.py +33 -15
- synth_ai/api/train/pollers.py +13 -4
- synth_ai/api/train/supported_algos.py +139 -0
- synth_ai/api/train/task_app.py +5 -3
- synth_ai/api/train/utils.py +33 -48
- synth_ai/cli/__init__.py +19 -4
- synth_ai/cli/_modal_wrapper.py +28 -0
- synth_ai/cli/_typer_patch.py +49 -0
- synth_ai/cli/balance.py +2 -3
- synth_ai/cli/calc.py +1 -1
- synth_ai/cli/demo.py +21 -6
- synth_ai/cli/recent.py +2 -2
- synth_ai/cli/rl_demo.py +77 -17
- synth_ai/cli/root.py +116 -39
- synth_ai/cli/status.py +2 -2
- synth_ai/cli/task_apps.py +1699 -259
- synth_ai/cli/traces.py +7 -4
- synth_ai/cli/turso.py +73 -0
- synth_ai/cli/watch.py +12 -18
- synth_ai/core/experiment.py +0 -2
- synth_ai/demo_registry.py +68 -31
- synth_ai/demos/core/cli.py +516 -194
- synth_ai/demos/demo_task_apps/__init__.py +3 -3
- synth_ai/demos/demo_task_apps/core.py +64 -28
- synth_ai/demos/demo_task_apps/crafter/configs/crafter_fft_4b.toml +2 -3
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +37 -30
- synth_ai/demos/demo_task_apps/math/_common.py +1 -2
- synth_ai/demos/demo_task_apps/math/app.py +2 -1
- synth_ai/demos/demo_task_apps/math/deploy_modal.py +3 -6
- synth_ai/demos/demo_task_apps/math/modal_task_app.py +183 -82
- synth_ai/demos/demo_task_apps/math/task_app_entry.py +0 -2
- synth_ai/environments/examples/bandit/engine.py +12 -4
- synth_ai/environments/examples/bandit/taskset.py +4 -4
- synth_ai/environments/examples/crafter_classic/environment.py +76 -1
- synth_ai/environments/reproducibility/tree.py +5 -6
- synth_ai/environments/service/app.py +11 -12
- synth_ai/environments/service/core_routes.py +10 -9
- synth_ai/environments/stateful/engine.py +1 -1
- synth_ai/environments/tasks/core.py +1 -0
- synth_ai/environments/tasks/filters.py +5 -6
- synth_ai/environments/tasks/utils.py +4 -5
- synth_ai/evals/base.py +0 -2
- synth_ai/handshake.py +11 -9
- synth_ai/http.py +1 -1
- synth_ai/http_client.py +43 -11
- synth_ai/inference/__init__.py +0 -2
- synth_ai/inference/client.py +20 -6
- synth_ai/jobs/client.py +103 -78
- synth_ai/learning/__init__.py +41 -6
- synth_ai/learning/algorithms.py +14 -0
- synth_ai/learning/client.py +121 -29
- synth_ai/learning/config.py +2 -40
- synth_ai/learning/constants.py +0 -2
- synth_ai/learning/ft_client.py +4 -56
- synth_ai/learning/health.py +13 -7
- synth_ai/learning/jobs.py +43 -47
- synth_ai/{rl → learning/rl}/__init__.py +14 -5
- synth_ai/learning/rl/client.py +267 -0
- synth_ai/learning/rl/config.py +31 -0
- synth_ai/{rl → learning/rl}/contracts.py +5 -10
- synth_ai/{rl → learning/rl}/env_keys.py +45 -16
- synth_ai/learning/rl/secrets.py +13 -0
- synth_ai/learning/rl_client.py +2 -253
- synth_ai/learning/sft/__init__.py +29 -0
- synth_ai/learning/sft/client.py +68 -0
- synth_ai/learning/sft/config.py +270 -0
- synth_ai/learning/sft/data.py +295 -0
- synth_ai/learning/sse.py +25 -26
- synth_ai/learning/validators.py +25 -24
- synth_ai/lm/__init__.py +21 -47
- synth_ai/task/__init__.py +26 -27
- synth_ai/task/apps/__init__.py +18 -19
- synth_ai/task/auth.py +35 -23
- synth_ai/task/client.py +15 -13
- synth_ai/task/contracts.py +37 -35
- synth_ai/task/datasets.py +9 -6
- synth_ai/task/errors.py +11 -10
- synth_ai/task/health.py +17 -11
- synth_ai/task/json.py +58 -24
- synth_ai/task/proxy.py +15 -14
- synth_ai/task/rubrics.py +22 -15
- synth_ai/task/server.py +43 -17
- synth_ai/task/tracing_utils.py +12 -7
- synth_ai/task/validators.py +0 -1
- synth_ai/task/vendors.py +5 -7
- synth_ai/tracing_v3/__init__.py +2 -0
- synth_ai/tracing_v3/abstractions.py +21 -4
- synth_ai/tracing_v3/db_config.py +26 -1
- synth_ai/tracing_v3/decorators.py +18 -15
- synth_ai/tracing_v3/examples/basic_usage.py +3 -2
- synth_ai/tracing_v3/hooks.py +6 -4
- synth_ai/tracing_v3/llm_call_record_helpers.py +6 -6
- synth_ai/tracing_v3/replica_sync.py +1 -0
- synth_ai/tracing_v3/session_tracer.py +63 -16
- synth_ai/tracing_v3/storage/base.py +89 -1
- synth_ai/tracing_v3/storage/config.py +21 -8
- synth_ai/tracing_v3/storage/factory.py +10 -8
- synth_ai/tracing_v3/storage/utils.py +4 -2
- synth_ai/tracing_v3/turso/daemon.py +7 -2
- synth_ai/tracing_v3/turso/models.py +5 -2
- synth_ai/tracing_v3/turso/native_manager.py +1173 -0
- synth_ai/tracing_v3/utils.py +4 -3
- synth_ai/v0/api/__init__.py +8 -0
- synth_ai/v0/api/models/__init__.py +8 -0
- synth_ai/v0/api/models/supported.py +8 -0
- synth_ai/v0/config/__init__.py +15 -0
- synth_ai/v0/config/base_url.py +12 -0
- synth_ai/v0/lm/__init__.py +51 -0
- synth_ai/{lm → v0/lm}/caching/ephemeral.py +3 -5
- synth_ai/{lm → v0/lm}/caching/handler.py +4 -4
- synth_ai/{lm → v0/lm}/caching/initialize.py +1 -1
- synth_ai/{lm → v0/lm}/caching/persistent.py +1 -1
- synth_ai/{lm → v0/lm}/config.py +6 -1
- synth_ai/{lm → v0/lm}/core/all.py +9 -9
- synth_ai/{lm → v0/lm}/core/exceptions.py +0 -2
- synth_ai/{lm → v0/lm}/core/main.py +19 -7
- synth_ai/{lm → v0/lm}/core/main_v3.py +10 -10
- synth_ai/{lm → v0/lm}/core/synth_models.py +2 -15
- synth_ai/{lm → v0/lm}/core/vendor_clients.py +6 -4
- synth_ai/{lm → v0/lm}/overrides.py +4 -4
- synth_ai/{lm → v0/lm}/provider_support/anthropic.py +4 -4
- synth_ai/{lm → v0/lm}/provider_support/openai.py +5 -5
- synth_ai/{lm → v0/lm}/structured_outputs/handler.py +5 -5
- synth_ai/{lm → v0/lm}/structured_outputs/rehabilitate.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/core/anthropic_api.py +16 -16
- synth_ai/{lm → v0/lm}/vendors/core/gemini_api.py +5 -5
- synth_ai/{lm → v0/lm}/vendors/core/mistral_api.py +5 -5
- synth_ai/{lm → v0/lm}/vendors/core/openai_api.py +12 -10
- synth_ai/{lm → v0/lm}/vendors/openai_standard.py +11 -9
- synth_ai/{lm → v0/lm}/vendors/openai_standard_responses.py +8 -5
- synth_ai/{lm → v0/lm}/vendors/supported/custom_endpoint.py +4 -6
- synth_ai/{lm → v0/lm}/vendors/supported/deepseek.py +2 -2
- synth_ai/{lm → v0/lm}/vendors/supported/grok.py +2 -2
- synth_ai/{lm → v0/lm}/vendors/supported/groq.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/supported/ollama.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/supported/openrouter.py +3 -3
- synth_ai/{lm → v0/lm}/vendors/supported/together.py +1 -1
- synth_ai/{lm → v0/lm}/vendors/synth_client.py +38 -11
- synth_ai/v0/tracing/upload.py +32 -135
- synth_ai/v0/tracing_v3/__init__.py +10 -0
- synth_ai/v0/tracing_v3/abstractions.py +3 -0
- synth_ai/v0/tracing_v3/decorators.py +3 -0
- synth_ai/v0/tracing_v3/llm_call_record_helpers.py +3 -0
- synth_ai/v0/tracing_v3/session_tracer.py +3 -0
- synth_ai-0.2.9.dev6.dist-info/METADATA +191 -0
- {synth_ai-0.2.9.dev5.dist-info → synth_ai-0.2.9.dev6.dist-info}/RECORD +291 -262
- {synth_ai-0.2.9.dev5.dist-info → synth_ai-0.2.9.dev6.dist-info}/top_level.txt +1 -0
- examples/common_old/backend.py +0 -21
- examples/evals_old/README.md +0 -98
- examples/evals_old/__init__.py +0 -6
- examples/evals_old/compare_models.py +0 -1037
- examples/evals_old/example_log.md +0 -145
- examples/evals_old/run_demo.sh +0 -126
- examples/evals_old/trace_analysis.py +0 -270
- examples/finetuning_old/_backup_synth_qwen/config.toml +0 -29
- examples/finetuning_old/_backup_synth_qwen/example_log.md +0 -324
- examples/finetuning_old/_backup_synth_qwen/filter_traces.py +0 -60
- examples/finetuning_old/_backup_synth_qwen/filter_traces_achievements.py +0 -239
- examples/finetuning_old/_backup_synth_qwen/purge_v3_traces.py +0 -109
- examples/finetuning_old/_backup_synth_qwen/react_agent_lm.py +0 -1924
- examples/finetuning_old/_backup_synth_qwen/readme.md +0 -49
- examples/finetuning_old/_backup_synth_qwen/run_crafter_qwen4b.py +0 -114
- examples/finetuning_old/_backup_synth_qwen/run_demo.sh +0 -195
- examples/finetuning_old/_backup_synth_qwen/sft_kickoff.py +0 -118
- examples/finetuning_old/synth_qwen_v1/README.md +0 -68
- examples/finetuning_old/synth_qwen_v1/filter_traces.py +0 -60
- examples/finetuning_old/synth_qwen_v1/filter_traces_achievements.py +0 -239
- examples/finetuning_old/synth_qwen_v1/finetune.py +0 -46
- examples/finetuning_old/synth_qwen_v1/hello_ft_model.py +0 -71
- examples/finetuning_old/synth_qwen_v1/infer.py +0 -37
- examples/finetuning_old/synth_qwen_v1/poll.py +0 -44
- examples/finetuning_old/synth_qwen_v1/prepare_data.py +0 -35
- examples/finetuning_old/synth_qwen_v1/purge_v3_traces.py +0 -109
- examples/finetuning_old/synth_qwen_v1/react_agent_lm.py +0 -1932
- examples/finetuning_old/synth_qwen_v1/run_crafter_sft_job.py +0 -207
- examples/finetuning_old/synth_qwen_v1/run_ft_job.py +0 -232
- examples/finetuning_old/synth_qwen_v1/upload_data.py +0 -34
- examples/finetuning_old/synth_qwen_v1/util.py +0 -147
- examples/rl_old/task_app.py +0 -962
- examples/warming_up_to_rl/old/event_rewards.md +0 -234
- examples/warming_up_to_rl/old/notes.md +0 -73
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_modal_ft/filter_traces_sft_turso.py +0 -738
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_openai_ft/filter_traces_sft_turso.py +0 -580
- synth_ai/experimental/synth_oss.py +0 -446
- synth_ai/install_sqld.sh +0 -40
- synth_ai/learning/filtering.py +0 -0
- synth_ai/learning/offline/dpo.py +0 -0
- synth_ai/learning/offline/providers.py +0 -7
- synth_ai/learning/offline/sft.py +0 -0
- synth_ai/learning/offline/shared.py +0 -0
- synth_ai/learning/online/grpo.py +0 -0
- synth_ai/learning/online/irft.py +0 -0
- synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
- synth_ai/learning/prompts/gepa.py +0 -0
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -213
- synth_ai/learning/prompts/mipro.py +0 -289
- synth_ai/learning/prompts/random_search.py +0 -246
- synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
- synth_ai/learning/prompts/run_random_search_banking77.py +0 -324
- synth_ai/rl/secrets.py +0 -19
- synth_ai/scripts/verify_rewards.py +0 -100
- synth_ai/tracing/__init__.py +0 -30
- synth_ai/tracing_v1/__init__.py +0 -33
- synth_ai/tracing_v3/turso/__init__.py +0 -25
- synth_ai/tracing_v3/turso/manager.py +0 -774
- synth_ai/zyk/__init__.py +0 -30
- synth_ai-0.2.9.dev5.dist-info/METADATA +0 -131
- /synth_ai/{lm → v0/lm}/caching/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/caching/constants.py +0 -0
- /synth_ai/{lm → v0/lm}/caching/dbs.py +0 -0
- /synth_ai/{lm → v0/lm}/constants.py +0 -0
- /synth_ai/{lm → v0/lm}/core/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/cost/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/cost/monitor.py +0 -0
- /synth_ai/{lm → v0/lm}/cost/statefulness.py +0 -0
- /synth_ai/{lm → v0/lm}/injection.py +0 -0
- /synth_ai/{lm → v0/lm}/provider_support/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/provider_support/suppress_logging.py +0 -0
- /synth_ai/{lm → v0/lm}/structured_outputs/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/structured_outputs/inject.py +0 -0
- /synth_ai/{lm → v0/lm}/tools/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/tools/base.py +0 -0
- /synth_ai/{lm → v0/lm}/unified_interface.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/base.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/core/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/core/synth_dev_api.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/local/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/local/ollama.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/retries.py +0 -0
- /synth_ai/{lm → v0/lm}/vendors/supported/__init__.py +0 -0
- /synth_ai/{lm → v0/lm}/warmup.py +0 -0
- {synth_ai-0.2.9.dev5.dist-info → synth_ai-0.2.9.dev6.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.9.dev5.dist-info → synth_ai-0.2.9.dev6.dist-info}/entry_points.txt +0 -0
- {synth_ai-0.2.9.dev5.dist-info → synth_ai-0.2.9.dev6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,478 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from .react_agent import CrafterReActAgent
|
|
7
|
+
from .tools import TOOLS_SCHEMA
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Define Policy base class here to avoid circular import
|
|
11
|
+
class Policy(ABC):
|
|
12
|
+
"""Base class for environment-specific policies."""
|
|
13
|
+
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def prepare_inference_request(
|
|
16
|
+
self, observation: dict[str, Any], history: list[dict[str, Any]] = None
|
|
17
|
+
) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]:
|
|
18
|
+
"""Prepare an inference request."""
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
@abstractmethod
|
|
22
|
+
def parse_model_response(
|
|
23
|
+
self, response: str, observation: dict[str, Any]
|
|
24
|
+
) -> list[dict[str, Any]]:
|
|
25
|
+
"""Parse model response into tool calls."""
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# (imports moved to top of file to satisfy linter)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class CrafterPolicy(Policy):
|
|
33
|
+
"""Thin policy scaffold for Crafter using the ReAct agent prompts.
|
|
34
|
+
|
|
35
|
+
This class does not run inference itself. It prepares an inference request
|
|
36
|
+
(messages and optional tools schema) that the Task App can send to the
|
|
37
|
+
inference service, and provides helpers to parse the model response into
|
|
38
|
+
environment tool calls.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
name: str = "crafter-react"
|
|
42
|
+
|
|
43
|
+
def __init__(self, inference_url: str, model: str | None = None) -> None:
|
|
44
|
+
self.inference_url = inference_url
|
|
45
|
+
self.model = model
|
|
46
|
+
self.use_tools = True
|
|
47
|
+
# Sampling parameters (populated via initialize(config))
|
|
48
|
+
self.temperature: float | None = None
|
|
49
|
+
self.top_p: float | None = None
|
|
50
|
+
self.max_tokens: int | None = None
|
|
51
|
+
# Thinking controls (populated via initialize(config))
|
|
52
|
+
self.thinking_mode: str | None = None
|
|
53
|
+
self.thinking_budget: int | None = None
|
|
54
|
+
# Rolling conversation and action history for non-Markov policies
|
|
55
|
+
self.history_messages: list[dict[str, str]] = [] # chat-style without system
|
|
56
|
+
self.turn_index: int = 0
|
|
57
|
+
self.trajectory_history: list[dict[str, Any]] = [] # env/policy step records
|
|
58
|
+
|
|
59
|
+
async def initialize(self, config: dict[str, Any]) -> None:
|
|
60
|
+
if "inference_url" in config:
|
|
61
|
+
self.inference_url = config["inference_url"]
|
|
62
|
+
if "model" in config:
|
|
63
|
+
self.model = config["model"]
|
|
64
|
+
if "use_tools" in config:
|
|
65
|
+
self.use_tools = bool(config["use_tools"])
|
|
66
|
+
# Adopt sampling params from policy config (trainer passes these through)
|
|
67
|
+
if "temperature" in config:
|
|
68
|
+
self.temperature = float(config["temperature"]) # fail fast on bad types
|
|
69
|
+
if "top_p" in config:
|
|
70
|
+
self.top_p = float(config["top_p"]) # fail fast on bad types
|
|
71
|
+
if "max_tokens" in config:
|
|
72
|
+
self.max_tokens = int(config["max_tokens"]) # fail fast on bad types
|
|
73
|
+
# Thinking mode/budget forwarded into vLLM request (mirrors Wordle policy)
|
|
74
|
+
if "thinking_mode" in config:
|
|
75
|
+
self.thinking_mode = str(config["thinking_mode"]) # expect "think" or "no_think"
|
|
76
|
+
if "thinking_budget" in config and config["thinking_budget"] is not None:
|
|
77
|
+
self.thinking_budget = int(config["thinking_budget"]) # number of tokens inside <think>
|
|
78
|
+
if self.thinking_budget is None:
|
|
79
|
+
try:
|
|
80
|
+
if "openai.com" not in (self.inference_url or "").lower():
|
|
81
|
+
self.thinking_budget = 1028
|
|
82
|
+
except Exception:
|
|
83
|
+
self.thinking_budget = 1028
|
|
84
|
+
# Reset state on (re)initialize
|
|
85
|
+
self.history_messages = []
|
|
86
|
+
self.turn_index = 0
|
|
87
|
+
self.trajectory_history = []
|
|
88
|
+
|
|
89
|
+
def _append_user_observation(self, observation_text: str) -> None:
|
|
90
|
+
self.history_messages.append({"role": "user", "content": observation_text})
|
|
91
|
+
self.turn_index += 1
|
|
92
|
+
|
|
93
|
+
def _append_assistant_turn(
|
|
94
|
+
self,
|
|
95
|
+
assistant_text: str | None,
|
|
96
|
+
tool_calls: list[dict[str, Any]] | None,
|
|
97
|
+
env_result: dict[str, Any] | None,
|
|
98
|
+
) -> None:
|
|
99
|
+
# Record assistant content (if any)
|
|
100
|
+
if assistant_text is not None:
|
|
101
|
+
self.history_messages.append({"role": "assistant", "content": assistant_text})
|
|
102
|
+
# Keep structured step record for training/analysis
|
|
103
|
+
record: dict[str, Any] = {"turn": self.turn_index}
|
|
104
|
+
if tool_calls is not None:
|
|
105
|
+
record["tool_calls"] = tool_calls
|
|
106
|
+
if env_result is not None:
|
|
107
|
+
record["env_result"] = env_result
|
|
108
|
+
self.trajectory_history.append(record)
|
|
109
|
+
|
|
110
|
+
def build_inference_request(
|
|
111
|
+
self,
|
|
112
|
+
observation_text: str,
|
|
113
|
+
history: list[dict[str, Any]] | None = None,
|
|
114
|
+
turn: int | None = None,
|
|
115
|
+
image_parts: list[dict[str, Any]] | None = None,
|
|
116
|
+
) -> dict[str, Any]:
|
|
117
|
+
messages = CrafterReActAgent.build_messages(
|
|
118
|
+
observation=observation_text,
|
|
119
|
+
history=history,
|
|
120
|
+
turn=turn,
|
|
121
|
+
image_parts=image_parts,
|
|
122
|
+
)
|
|
123
|
+
payload: dict[str, Any] = {
|
|
124
|
+
"messages": messages,
|
|
125
|
+
}
|
|
126
|
+
if self.model is not None:
|
|
127
|
+
payload["model"] = self.model
|
|
128
|
+
# Thinking controls
|
|
129
|
+
if self.thinking_mode is None and "openai.com" not in (self.inference_url or "").lower():
|
|
130
|
+
self.thinking_mode = "think"
|
|
131
|
+
if self.thinking_mode is not None:
|
|
132
|
+
payload["thinking_mode"] = self.thinking_mode
|
|
133
|
+
if self.thinking_budget is None and "openai.com" not in (self.inference_url or "").lower():
|
|
134
|
+
self.thinking_budget = 1028
|
|
135
|
+
if self.thinking_budget is not None:
|
|
136
|
+
payload["thinking_budget"] = self.thinking_budget
|
|
137
|
+
# Inject sampling parameters if set via initialize(config)
|
|
138
|
+
if self.max_tokens is not None:
|
|
139
|
+
# Use max_completion_tokens for newer models, max_tokens for older ones
|
|
140
|
+
if self.model and ("gpt-5" in self.model):
|
|
141
|
+
payload["max_completion_tokens"] = self.max_tokens
|
|
142
|
+
else:
|
|
143
|
+
payload["max_tokens"] = self.max_tokens
|
|
144
|
+
if self.temperature is not None:
|
|
145
|
+
payload["temperature"] = self.temperature
|
|
146
|
+
if self.top_p is not None:
|
|
147
|
+
payload["top_p"] = self.top_p
|
|
148
|
+
if self.use_tools:
|
|
149
|
+
payload["tools"] = TOOLS_SCHEMA
|
|
150
|
+
payload["tool_choice"] = "required"
|
|
151
|
+
# Ensure the inference server injects family-specific stop sequences
|
|
152
|
+
# to terminate immediately after the first tool call for compliance.
|
|
153
|
+
payload["stop_after_tool_calls"] = 1
|
|
154
|
+
return payload
|
|
155
|
+
|
|
156
|
+
@staticmethod
|
|
157
|
+
def parse_response_to_tool_calls(
|
|
158
|
+
response: dict[str, Any],
|
|
159
|
+
use_tools: bool = True,
|
|
160
|
+
) -> list[dict[str, Any]]:
|
|
161
|
+
"""Turn an inference response into environment tool calls.
|
|
162
|
+
|
|
163
|
+
- If tools were used, expect tool_calls-compatible output and forward as-is
|
|
164
|
+
in our simple JSON format: {"tool_name": str, "arguments": {...}}.
|
|
165
|
+
- If no tools, parse plain-text actions using CrafterReActAgent parser and
|
|
166
|
+
wrap them into a single interact_many tool call.
|
|
167
|
+
"""
|
|
168
|
+
# First check if we got actual tool calls
|
|
169
|
+
choices = response.get("choices", [])
|
|
170
|
+
tool_calls: list[dict[str, Any]] = []
|
|
171
|
+
|
|
172
|
+
for choice in choices:
|
|
173
|
+
msg = choice.get("message", {})
|
|
174
|
+
if "tool_calls" in msg and msg["tool_calls"] is not None:
|
|
175
|
+
for tc in msg["tool_calls"]:
|
|
176
|
+
if tc is None:
|
|
177
|
+
continue
|
|
178
|
+
# Handle both OpenAI format and simplified format
|
|
179
|
+
if "function" in tc:
|
|
180
|
+
# Standard OpenAI format
|
|
181
|
+
tool_calls.append(
|
|
182
|
+
{
|
|
183
|
+
"tool_name": tc["function"]["name"],
|
|
184
|
+
"arguments": tc["function"]["arguments"],
|
|
185
|
+
}
|
|
186
|
+
)
|
|
187
|
+
elif "name" in tc:
|
|
188
|
+
# Simplified format from our vLLM service
|
|
189
|
+
tool_calls.append(
|
|
190
|
+
{
|
|
191
|
+
"tool_name": tc["name"],
|
|
192
|
+
"arguments": tc["arguments"],
|
|
193
|
+
}
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# If we got tool calls, return them
|
|
197
|
+
if tool_calls:
|
|
198
|
+
# Normalize common degenerate pattern ["move_right", "do"] when nothing is nearby.
|
|
199
|
+
# If previous env_result indicates no interaction target, drop trailing 'do'.
|
|
200
|
+
normalized: list[dict[str, Any]] = []
|
|
201
|
+
for tc in tool_calls:
|
|
202
|
+
if tc and isinstance(tc, dict) and tc.get("tool_name") == "interact_many":
|
|
203
|
+
args = tc.get("arguments")
|
|
204
|
+
if isinstance(args, str):
|
|
205
|
+
try:
|
|
206
|
+
import json
|
|
207
|
+
|
|
208
|
+
args = json.loads(args)
|
|
209
|
+
except (json.JSONDecodeError, ValueError):
|
|
210
|
+
args = {}
|
|
211
|
+
actions = []
|
|
212
|
+
if isinstance(args, dict):
|
|
213
|
+
maybe_actions = args.get("actions")
|
|
214
|
+
if isinstance(maybe_actions, list):
|
|
215
|
+
actions = maybe_actions
|
|
216
|
+
# Simple heuristic: avoid repeating same pair; avoid 'do' with no context
|
|
217
|
+
if len(actions) == 2 and actions[0] == "move_right" and actions[1] == "do":
|
|
218
|
+
actions = ["move_right"]
|
|
219
|
+
normalized.append(
|
|
220
|
+
{"tool_name": "interact_many", "arguments": {"actions": actions or []}}
|
|
221
|
+
)
|
|
222
|
+
else:
|
|
223
|
+
normalized.append(tc)
|
|
224
|
+
return normalized
|
|
225
|
+
|
|
226
|
+
# Otherwise, parse plain text content for actions
|
|
227
|
+
text = ""
|
|
228
|
+
for choice in choices:
|
|
229
|
+
msg = choice.get("message", {})
|
|
230
|
+
content = msg.get("content", "")
|
|
231
|
+
if content:
|
|
232
|
+
text = content
|
|
233
|
+
break
|
|
234
|
+
|
|
235
|
+
if text:
|
|
236
|
+
# Try to parse actions from the text
|
|
237
|
+
from .shared import parse_actions
|
|
238
|
+
|
|
239
|
+
actions = parse_actions(text)
|
|
240
|
+
if actions:
|
|
241
|
+
# Wrap actions in interact_many tool call
|
|
242
|
+
return [{"tool_name": "interact_many", "arguments": {"actions": actions}}]
|
|
243
|
+
|
|
244
|
+
# No actions found
|
|
245
|
+
return []
|
|
246
|
+
|
|
247
|
+
async def step(
|
|
248
|
+
self,
|
|
249
|
+
observation_text: str,
|
|
250
|
+
state: dict[str, Any] | None = None,
|
|
251
|
+
metadata: dict[str, Any] | None = None,
|
|
252
|
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
253
|
+
"""Stateful step: update policy history and prepare inference request.
|
|
254
|
+
|
|
255
|
+
Inputs (via metadata, optional):
|
|
256
|
+
- "prev_assistant_text": str — assistant text from prior turn
|
|
257
|
+
- "prev_tool_calls": List[Dict] — tool calls executed last turn
|
|
258
|
+
- "prev_env_result": Dict — env step result for prior tool calls
|
|
259
|
+
- "prev_inference_response": Dict — raw LLM response; if present and
|
|
260
|
+
use_tools=False, we record assistant_text parsed from content.
|
|
261
|
+
|
|
262
|
+
Returns (tool_calls, meta):
|
|
263
|
+
- tool_calls: empty list; coordinator should call inference and then use
|
|
264
|
+
parse_response_to_tool_calls() to derive tool_calls
|
|
265
|
+
- meta: { inference_url, inference_request, turn_index, history_len }
|
|
266
|
+
"""
|
|
267
|
+
# If caller provided results from previous cycle, record them first
|
|
268
|
+
if metadata is not None:
|
|
269
|
+
prev_assistant_text: str | None = None
|
|
270
|
+
prev_tool_calls: list[dict[str, Any]] | None = None
|
|
271
|
+
prev_env_result: dict[str, Any] | None = None
|
|
272
|
+
if "prev_assistant_text" in metadata:
|
|
273
|
+
prev_assistant_text = metadata["prev_assistant_text"]
|
|
274
|
+
if "prev_tool_calls" in metadata:
|
|
275
|
+
prev_tool_calls = metadata["prev_tool_calls"]
|
|
276
|
+
if "prev_env_result" in metadata:
|
|
277
|
+
prev_env_result = metadata["prev_env_result"]
|
|
278
|
+
if (
|
|
279
|
+
prev_assistant_text is not None
|
|
280
|
+
or prev_tool_calls is not None
|
|
281
|
+
or prev_env_result is not None
|
|
282
|
+
):
|
|
283
|
+
self._append_assistant_turn(prev_assistant_text, prev_tool_calls, prev_env_result)
|
|
284
|
+
|
|
285
|
+
# Append current observation as the next user message (internal history only)
|
|
286
|
+
self._append_user_observation(observation_text)
|
|
287
|
+
|
|
288
|
+
# Build user message by combining the current observation text
|
|
289
|
+
# (formatted surroundings/inventory) with the previous 3 tool calls as context.
|
|
290
|
+
# Most recent first.
|
|
291
|
+
lines: list[str] = []
|
|
292
|
+
|
|
293
|
+
def _format_tool_call_line_for_context(
|
|
294
|
+
tool_name: str, arguments: Any, max_chars: int = 500
|
|
295
|
+
) -> str:
|
|
296
|
+
import json as _json
|
|
297
|
+
|
|
298
|
+
# Render arguments compactly, then clip to max_chars
|
|
299
|
+
if isinstance(arguments, dict | list):
|
|
300
|
+
try:
|
|
301
|
+
rendered = _json.dumps(arguments, ensure_ascii=False, separators=(",", ":"))
|
|
302
|
+
except Exception:
|
|
303
|
+
rendered = str(arguments)
|
|
304
|
+
elif isinstance(arguments, str):
|
|
305
|
+
rendered = arguments
|
|
306
|
+
else:
|
|
307
|
+
rendered = str(arguments)
|
|
308
|
+
if isinstance(rendered, str) and len(rendered) > max_chars:
|
|
309
|
+
rendered = rendered[:max_chars]
|
|
310
|
+
return f"- {tool_name}: {rendered}"
|
|
311
|
+
|
|
312
|
+
# Prefer pulling from trajectory_history (accumulates over turns)
|
|
313
|
+
for record in reversed(self.trajectory_history):
|
|
314
|
+
if len(lines) >= 3:
|
|
315
|
+
break
|
|
316
|
+
tc_list = record.get("tool_calls")
|
|
317
|
+
if not tc_list:
|
|
318
|
+
continue
|
|
319
|
+
# Use the first tool call for that turn if multiple exist
|
|
320
|
+
tc = tc_list[0] if isinstance(tc_list, list) and tc_list else None
|
|
321
|
+
if not isinstance(tc, dict):
|
|
322
|
+
continue
|
|
323
|
+
name = tc.get("tool_name") or tc.get("name") or "unknown"
|
|
324
|
+
args = tc.get("arguments")
|
|
325
|
+
lines.append(_format_tool_call_line_for_context(name, args))
|
|
326
|
+
|
|
327
|
+
# If trajectory history is empty (first few turns), fall back to metadata once
|
|
328
|
+
if not lines and metadata is not None and metadata.get("prev_tool_calls"):
|
|
329
|
+
calls: list[dict[str, Any]] = metadata["prev_tool_calls"]
|
|
330
|
+
for call in reversed(calls):
|
|
331
|
+
if len(lines) >= 3:
|
|
332
|
+
break
|
|
333
|
+
if not isinstance(call, dict):
|
|
334
|
+
continue
|
|
335
|
+
name = call.get("tool_name") or call.get("name") or "unknown"
|
|
336
|
+
args = call.get("arguments")
|
|
337
|
+
lines.append(_format_tool_call_line_for_context(name, args))
|
|
338
|
+
|
|
339
|
+
context_text = "Previous tool calls (most recent first):\n" + (
|
|
340
|
+
"\n".join(lines) if lines else "- none"
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
# Combine observation with context so the model always sees surroundings/inventory
|
|
344
|
+
combined_text = f"{observation_text}\n\n{context_text}"
|
|
345
|
+
|
|
346
|
+
raw_observation: dict[str, Any] | None = None
|
|
347
|
+
if metadata is not None:
|
|
348
|
+
raw_candidate = metadata.get("raw_observation")
|
|
349
|
+
if isinstance(raw_candidate, dict):
|
|
350
|
+
raw_observation = raw_candidate
|
|
351
|
+
image_parts = self._extract_image_parts(raw_observation)
|
|
352
|
+
|
|
353
|
+
payload = self.build_inference_request(
|
|
354
|
+
combined_text,
|
|
355
|
+
history=[], # no prior user/assistant history
|
|
356
|
+
turn=self.turn_index,
|
|
357
|
+
image_parts=image_parts,
|
|
358
|
+
)
|
|
359
|
+
# print("Debugging only:; ", payload)
|
|
360
|
+
meta_out = {
|
|
361
|
+
"inference_url": self.inference_url,
|
|
362
|
+
"inference_request": payload,
|
|
363
|
+
"turn_index": self.turn_index,
|
|
364
|
+
"history_len": len(self.history_messages),
|
|
365
|
+
}
|
|
366
|
+
return [], meta_out
|
|
367
|
+
|
|
368
|
+
def state_dict(self) -> dict[str, Any]:
|
|
369
|
+
return {
|
|
370
|
+
"turn_index": self.turn_index,
|
|
371
|
+
"history_messages": self.history_messages,
|
|
372
|
+
"trajectory_history": self.trajectory_history,
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
def load_state_dict(self, state: dict[str, Any]) -> None:
|
|
376
|
+
self.turn_index = int(state["turn_index"])
|
|
377
|
+
self.history_messages = state["history_messages"]
|
|
378
|
+
self.trajectory_history = state["trajectory_history"]
|
|
379
|
+
|
|
380
|
+
async def serialize(self) -> dict[str, Any]:
|
|
381
|
+
return {
|
|
382
|
+
"name": self.name,
|
|
383
|
+
"config": {
|
|
384
|
+
"inference_url": self.inference_url,
|
|
385
|
+
"model": self.model,
|
|
386
|
+
"use_tools": self.use_tools,
|
|
387
|
+
},
|
|
388
|
+
"state": self.state_dict(),
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
@classmethod
|
|
392
|
+
async def deserialize(cls, payload: dict[str, Any]) -> CrafterPolicy:
|
|
393
|
+
config = payload["config"]
|
|
394
|
+
state = payload["state"]
|
|
395
|
+
policy = cls(
|
|
396
|
+
inference_url=config["inference_url"],
|
|
397
|
+
model=config.get("model"),
|
|
398
|
+
)
|
|
399
|
+
policy.use_tools = bool(config["use_tools"])
|
|
400
|
+
policy.load_state_dict(state)
|
|
401
|
+
return policy
|
|
402
|
+
|
|
403
|
+
async def terminate(self) -> None:
|
|
404
|
+
return None
|
|
405
|
+
|
|
406
|
+
def prepare_inference_request(
|
|
407
|
+
self, observation: dict[str, Any], history: list[dict[str, Any]] = None
|
|
408
|
+
) -> tuple[list[dict[str, Any]], list[dict[str, Any]] | None]:
|
|
409
|
+
"""Prepare an inference request (implementing abstract method)."""
|
|
410
|
+
# Format observation with rich contextual information
|
|
411
|
+
observation_text = self._format_observation_for_llm(observation)
|
|
412
|
+
image_parts = self._extract_image_parts(observation)
|
|
413
|
+
|
|
414
|
+
# Build messages (observation_text already formatted; no raw matrices)
|
|
415
|
+
messages = CrafterReActAgent.build_messages(
|
|
416
|
+
observation=observation_text,
|
|
417
|
+
history=history,
|
|
418
|
+
turn=self.turn_index,
|
|
419
|
+
image_parts=image_parts,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Return messages and tools schema
|
|
423
|
+
tools = TOOLS_SCHEMA if self.use_tools else None
|
|
424
|
+
return messages, tools
|
|
425
|
+
|
|
426
|
+
def _format_observation_for_llm(self, observation: dict[str, Any]) -> str:
|
|
427
|
+
"""Format observation with rich contextual information for the LLM using the shared formatter."""
|
|
428
|
+
from .shared import format_observation
|
|
429
|
+
|
|
430
|
+
# Get the observation data (could be nested)
|
|
431
|
+
obs_data = observation.get("observation", observation)
|
|
432
|
+
|
|
433
|
+
# Ensure obs_data is a dict for safe access
|
|
434
|
+
if not isinstance(obs_data, dict):
|
|
435
|
+
return f"Observation: {str(observation)}"
|
|
436
|
+
|
|
437
|
+
# Use the shared format_observation function with step information
|
|
438
|
+
step_idx = observation.get("step_idx", 0)
|
|
439
|
+
max_steps = 100 # Default max steps, could be made configurable
|
|
440
|
+
|
|
441
|
+
# Get additional info from the observation wrapper
|
|
442
|
+
info = observation.get("info", {})
|
|
443
|
+
if isinstance(info, dict) and "health" in info and "health" not in obs_data:
|
|
444
|
+
obs_data = dict(obs_data) # Make a copy
|
|
445
|
+
obs_data["health"] = info["health"]
|
|
446
|
+
|
|
447
|
+
return format_observation(obs_data, step_count=step_idx, max_steps=max_steps)
|
|
448
|
+
|
|
449
|
+
def _extract_image_parts(
|
|
450
|
+
self, observation: dict[str, Any] | None
|
|
451
|
+
) -> list[dict[str, Any]]:
|
|
452
|
+
"""Crafter policy uses text-only prompts; do not attach image parts."""
|
|
453
|
+
|
|
454
|
+
return []
|
|
455
|
+
|
|
456
|
+
def parse_model_response(
|
|
457
|
+
self, response: str, observation: dict[str, Any]
|
|
458
|
+
) -> list[dict[str, Any]]:
|
|
459
|
+
"""Parse model response into tool calls (implementing abstract method).
|
|
460
|
+
|
|
461
|
+
Note: Despite the type hint, vLLM actually returns a dict response,
|
|
462
|
+
not a string. We handle both cases.
|
|
463
|
+
"""
|
|
464
|
+
# Handle dict response from vLLM (the actual case)
|
|
465
|
+
if isinstance(response, dict):
|
|
466
|
+
return self.parse_response_to_tool_calls(response, self.use_tools)
|
|
467
|
+
|
|
468
|
+
# Handle string response (fallback case for raw text)
|
|
469
|
+
if isinstance(response, str):
|
|
470
|
+
actions = CrafterReActAgent.parse_actions_from_response(response)
|
|
471
|
+
if actions:
|
|
472
|
+
return [{"tool_name": "interact_many", "arguments": {"actions": actions}}]
|
|
473
|
+
|
|
474
|
+
# Default empty response
|
|
475
|
+
return []
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
__all__ = ["CrafterPolicy"]
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""Crafter ReAct agent: system prompt and message assembly.
|
|
2
|
+
|
|
3
|
+
This agent encapsulates the Crafter-specific system prompt and helpers to
|
|
4
|
+
construct OpenAI-style message lists. Response parsing delegates to shared
|
|
5
|
+
utilities to keep a single parser.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from .shared import parse_actions
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class CrafterReActAgent:
|
|
16
|
+
"""Lightweight ReAct-style agent surface for Crafter prompts."""
|
|
17
|
+
|
|
18
|
+
@staticmethod
|
|
19
|
+
def get_system_prompt() -> str:
|
|
20
|
+
return (
|
|
21
|
+
"You are playing Crafter, a survival game by Danijar Hafner. Your goal is to collect resources, "
|
|
22
|
+
"craft tools, survive, and unlock achievements.\n\n"
|
|
23
|
+
"Core rules:\n"
|
|
24
|
+
"- The world contains trees (wood), stone, coal, iron, plants, cows, zombies, and water.\n"
|
|
25
|
+
"- Movement constraints: you cannot walk onto blocking tiles: tree, stone, water, lava, coal, iron. Navigate around obstacles.\n"
|
|
26
|
+
"- You start with empty hands and low health/hunger.\n"
|
|
27
|
+
"- Interact ('do') only when adjacent to a resource (tree, stone, cow, zombie, etc.).\n"
|
|
28
|
+
"- Movement is essential: you can and should move multiple steps in one turn to explore effectively.\n"
|
|
29
|
+
"- Achievements are unlocked by collecting resources, crafting tools, placing objects, fighting, and surviving longer.\n\n"
|
|
30
|
+
"Key strategies:\n"
|
|
31
|
+
"1. Begin by moving around to find trees. Use 'do' to collect wood when adjacent.\n"
|
|
32
|
+
"2. Craft a wood pickaxe as soon as you have enough wood ('make_wood_pickaxe').\n"
|
|
33
|
+
"3. Use the pickaxe to gather stone, then craft a stone pickaxe. Progress to iron tools as you find iron.\n"
|
|
34
|
+
"4. Build a table ('place_table') to unlock more crafting options (furnace, sword, etc.).\n"
|
|
35
|
+
"5. Manage hunger by collecting and eating plants or interacting with cows.\n"
|
|
36
|
+
"6. Fight zombies with a sword for achievements and resources.\n"
|
|
37
|
+
"7. Survive by balancing exploration, combat, and resource gathering.\n\n"
|
|
38
|
+
"8. Keep moving to discover new resources and stay alive. If you're in the middle of nowhere, take 5-8 consecutive move-related actions to explore and see what's outside your field of view. Don't delay exploration when it's the right move.\n\n"
|
|
39
|
+
"Achievements to aim for:\n"
|
|
40
|
+
"- Collecting resources (wood, stone, coal, iron, plants).\n"
|
|
41
|
+
"- Crafting tools (wood/stone/iron pickaxe, wood/stone/iron sword).\n"
|
|
42
|
+
"- Placing structures (table, furnace, plant).\n"
|
|
43
|
+
"- Combat (killing a cow or zombie).\n"
|
|
44
|
+
"- Survival milestones (staying alive over time).\n\n"
|
|
45
|
+
"Action policy:\n"
|
|
46
|
+
"- Always return a single tool call: interact_many({actions: [...]})\n"
|
|
47
|
+
"- Use 2–5 actions per call; prefer long movement sequences to explore.\n"
|
|
48
|
+
"- Mix in 'do' only when it makes sense (tree, stone, animal, enemy nearby).\n"
|
|
49
|
+
"- Do not spam the same exact sequence twice in a row—explore in varied directions.\n\n"
|
|
50
|
+
"Available actions: noop, move_up, move_down, move_left, move_right, do (interact), sleep, "
|
|
51
|
+
"place_stone, place_table, place_furnace, place_plant, make_wood_pickaxe, make_stone_pickaxe, "
|
|
52
|
+
"make_iron_pickaxe, make_wood_sword, make_stone_sword, make_iron_sword\n"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
@staticmethod
|
|
56
|
+
def get_system_prompt_with_tools() -> str:
|
|
57
|
+
"""System prompt for tool-based interaction (e.g., Qwen3 models)."""
|
|
58
|
+
return (
|
|
59
|
+
"You are playing Crafter, a survival game by Danijar Hafner. Your goal is to collect resources, "
|
|
60
|
+
"craft tools, survive, and unlock achievements.\n\n"
|
|
61
|
+
"Rules & world:\n"
|
|
62
|
+
"- Explore by chaining multiple movement actions in one turn.\n"
|
|
63
|
+
"- You cannot walk onto blocking tiles: tree, stone, water, lava, coal, iron. Plan routes around obstacles.\n"
|
|
64
|
+
"- Use 'do' intentionally when standing next to resources (trees, stone, cows, zombies, etc.).\n"
|
|
65
|
+
"- Achievements come from collecting, crafting, building, fighting, and surviving.\n\n"
|
|
66
|
+
"Strategy path:\n"
|
|
67
|
+
"1. Move around to find trees → 'do' to collect wood.\n"
|
|
68
|
+
"2. Craft a wood pickaxe.\n"
|
|
69
|
+
"3. Gather stone → craft stone pickaxe.\n"
|
|
70
|
+
"4. Place a table → unlock furnace and swords.\n"
|
|
71
|
+
"5. Fight enemies (cow/zombie) with swords for achievements.\n"
|
|
72
|
+
"6. Keep moving to discover new resources and stay alive. If you're in the middle of nowhere, take 5-8 consecutive move-related actions to explore and see what's outside your field of view. Don't delay exploration when it's the right move.\n\n"
|
|
73
|
+
"You must use the 'interact_many' tool to perform actions in the game. "
|
|
74
|
+
"This tool accepts an array of 1–5 actions to execute sequentially. Prefer sequences like "
|
|
75
|
+
"[move_up, move_up, move_left, do] instead of single steps.\n\n"
|
|
76
|
+
"Available actions: noop, move_up, move_down, move_left, move_right, do (interact), sleep, "
|
|
77
|
+
"place_stone, place_table, place_furnace, place_plant, make_wood_pickaxe, make_stone_pickaxe, "
|
|
78
|
+
"make_iron_pickaxe, make_wood_sword, make_stone_sword, make_iron_sword\n\n"
|
|
79
|
+
"Always call the interact_many tool with your chosen actions. Do not write plain text actions.\n"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
@staticmethod
|
|
83
|
+
def build_messages(
|
|
84
|
+
observation: str,
|
|
85
|
+
history: list[dict[str, Any]] | None = None,
|
|
86
|
+
turn: int | None = None,
|
|
87
|
+
image_parts: list[dict[str, Any]] | None = None,
|
|
88
|
+
) -> list[dict[str, Any]]:
|
|
89
|
+
"""Construct OpenAI-style messages list for vLLM generation."""
|
|
90
|
+
msgs: list[dict[str, Any]] = [
|
|
91
|
+
{"role": "system", "content": CrafterReActAgent.get_system_prompt()}
|
|
92
|
+
]
|
|
93
|
+
if history:
|
|
94
|
+
msgs.extend(history)
|
|
95
|
+
user_content: Any
|
|
96
|
+
if image_parts:
|
|
97
|
+
user_content = [{"type": "text", "text": observation}] + list(image_parts)
|
|
98
|
+
else:
|
|
99
|
+
user_content = observation
|
|
100
|
+
msgs.append({"role": "user", "content": user_content})
|
|
101
|
+
return msgs
|
|
102
|
+
|
|
103
|
+
@staticmethod
|
|
104
|
+
def parse_actions_from_response(response_text: str) -> list[str]:
|
|
105
|
+
return parse_actions(response_text)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
__all__ = ["CrafterReActAgent"]
|