synth-ai 0.2.8.dev4__py3-none-any.whl → 0.2.23.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/README.md +1 -0
- examples/__init__.py +16 -0
- examples/analyze_semantic_words.sh +17 -0
- examples/baseline/banking77_baseline.py +243 -0
- examples/baseline/banking77_pipeline_baseline.py +294 -0
- examples/baseline/crafter_baseline.py +407 -0
- examples/baseline/pokemon_red_baseline.py +326 -0
- examples/baseline/simple_baseline.py +56 -0
- examples/baseline/warming_up_to_rl_baseline.py +239 -0
- examples/blog_posts/gepa/README.md +355 -0
- examples/blog_posts/gepa/configs/banking77_gepa_local.toml +95 -0
- examples/blog_posts/gepa/configs/banking77_gepa_test.toml +80 -0
- examples/blog_posts/gepa/configs/banking77_mipro_local.toml +50 -0
- examples/blog_posts/gepa/configs/banking77_pipeline_gepa_local.toml +101 -0
- examples/blog_posts/gepa/configs/banking77_pipeline_gepa_test.toml +96 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_local.toml +57 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_qwen.toml +35 -0
- examples/blog_posts/gepa/configs/hotpotqa_mipro_local.toml +51 -0
- examples/blog_posts/gepa/configs/hover_gepa_local.toml +57 -0
- examples/blog_posts/gepa/configs/hover_gepa_qwen.toml +35 -0
- examples/blog_posts/gepa/configs/hover_mipro_local.toml +51 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_local.toml +57 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_qwen.toml +35 -0
- examples/blog_posts/gepa/configs/ifbench_mipro_local.toml +51 -0
- examples/blog_posts/gepa/configs/pupa_gepa_local.toml +58 -0
- examples/blog_posts/gepa/configs/pupa_mipro_local.toml +52 -0
- examples/blog_posts/gepa/deploy_banking77_task_app.sh +54 -0
- examples/blog_posts/gepa/gepa_baseline.py +204 -0
- examples/blog_posts/gepa/query_prompts_example.py +97 -0
- examples/blog_posts/gepa/run_gepa_banking77.sh +112 -0
- examples/blog_posts/gepa/run_gepa_banking77_pipeline.sh +163 -0
- examples/blog_posts/gepa/task_apps.py +105 -0
- examples/blog_posts/gepa/test_gepa_local.sh +67 -0
- examples/blog_posts/gepa/verify_banking77_setup.sh +123 -0
- examples/blog_posts/mipro/README.md +415 -0
- examples/blog_posts/mipro/configs/banking77_mipro_local.toml +91 -0
- examples/blog_posts/mipro/configs/banking77_mipro_test.toml +87 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_gemini_flash_lite_local.toml +98 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_gpt41mini_local.toml +96 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_local.toml +94 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_test.toml +170 -0
- examples/blog_posts/mipro/deploy_banking77_pipeline_task_app.sh +59 -0
- examples/blog_posts/mipro/deploy_banking77_task_app.sh +41 -0
- examples/blog_posts/mipro/multi_step.md +79 -0
- examples/blog_posts/mipro/run_mipro_banking77.sh +191 -0
- examples/blog_posts/mipro/run_mipro_banking77_pipeline.sh +171 -0
- examples/blog_posts/mipro/run_mipro_banking77_pipeline_gemini_flash_lite.sh +177 -0
- examples/blog_posts/mipro/run_mipro_banking77_pipeline_gpt41mini.sh +173 -0
- examples/blog_posts/mipro/verify_banking77_setup.sh +117 -0
- examples/blog_posts/pokemon_vl/README.md +98 -0
- examples/blog_posts/pokemon_vl/configs/eval_gpt5nano.toml +26 -0
- examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +27 -0
- examples/blog_posts/pokemon_vl/configs/eval_rl_final.toml +24 -0
- examples/blog_posts/pokemon_vl/configs/filter_high_reward.toml +10 -0
- examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +43 -0
- examples/blog_posts/pokemon_vl/configs/train_sft_qwen4b_vl.toml +40 -0
- examples/blog_posts/pokemon_vl/extract_images.py +239 -0
- examples/blog_posts/pokemon_vl/pokemon_vl_baseline.py +326 -0
- examples/blog_posts/pokemon_vl/run_eval_extract_images.py +209 -0
- examples/blog_posts/pokemon_vl/run_qwen_eval_extract_images.py +212 -0
- examples/blog_posts/pokemon_vl/text_box_analysis.md +106 -0
- examples/blog_posts/warming_up_to_rl/ARCHITECTURE.md +195 -0
- examples/blog_posts/warming_up_to_rl/FINAL_TEST_RESULTS.md +127 -0
- examples/blog_posts/warming_up_to_rl/INFERENCE_SUCCESS.md +132 -0
- examples/blog_posts/warming_up_to_rl/README.md +158 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TESTING.md +164 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TEST_COMPLETE.md +253 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_baseline_qwen32b_10x20.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b_10x20.toml +26 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_groq_qwen32b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_openai_gpt_oss_120b.toml +29 -0
- examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +10 -0
- examples/blog_posts/warming_up_to_rl/configs/smoke_test.toml +75 -0
- examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +91 -0
- examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +40 -0
- examples/blog_posts/warming_up_to_rl/warming_up_to_rl_baseline.py +187 -0
- examples/crafter_debug_render.py +186 -0
- examples/dev/qwen3_32b_qlora_4xh100.toml +45 -0
- examples/gepa/banking77_pipeline_gepa.toml +96 -0
- examples/gepa/multi_stage_gepa_example.toml +84 -0
- examples/gepa/run_gepa_banking77_pipeline.sh +157 -0
- examples/multi_step/SFT_README.md +147 -0
- examples/multi_step/configs/README_verilog_rl.md +77 -0
- examples/multi_step/configs/VERILOG_REWARDS.md +103 -0
- examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +196 -0
- examples/multi_step/configs/crafter_eval_synth_qwen4b.toml +35 -0
- examples/multi_step/configs/crafter_eval_text_only_groq_qwen32b.toml +36 -0
- examples/multi_step/configs/crafter_rl_outcome.toml +75 -0
- examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +145 -0
- examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +84 -0
- examples/multi_step/configs/crafter_rl_stepwise_simple.toml +79 -0
- examples/multi_step/configs/crafter_rl_stepwise_simple_NEW_FORMAT.toml +105 -0
- examples/multi_step/configs/crafter_sft_qwen30b_lora.toml +62 -0
- examples/multi_step/configs/crafter_synth_backend.md +40 -0
- examples/multi_step/configs/verilog_eval_groq_qwen32b.toml +31 -0
- examples/multi_step/configs/verilog_eval_synth_qwen8b.toml +33 -0
- examples/multi_step/configs/verilog_rl_lora.toml +147 -0
- examples/multi_step/convert_traces_to_sft.py +84 -0
- examples/multi_step/crafter_rl_lora.md +70 -0
- examples/multi_step/judges/crafter_backend_judge.py +220 -0
- examples/multi_step/judges/verilog_backend_judge.py +234 -0
- examples/multi_step/readme.md +48 -0
- examples/multi_step/run_sft_qwen30b.sh +45 -0
- examples/multi_step/sse_metrics_streaming_notes.md +357 -0
- examples/multi_step/task_app_config_notes.md +494 -0
- examples/multi_step/verilog_rl_lora.md +218 -0
- examples/qwen_coder/README.md +102 -0
- examples/qwen_coder/_shared.py +113 -0
- examples/qwen_coder/configs/coder_lora_30b.toml +60 -0
- examples/qwen_coder/configs/coder_lora_4b.toml +61 -0
- examples/qwen_coder/configs/coder_lora_small.toml +57 -0
- examples/qwen_coder/generate_dataset.py +98 -0
- examples/qwen_coder/infer_ft_smoke.py +65 -0
- examples/qwen_coder/infer_prod_proxy.py +73 -0
- examples/qwen_coder/infer_via_synth.py +87 -0
- examples/qwen_coder/scripts/infer_coder.sh +19 -0
- examples/qwen_coder/scripts/train_coder_30b.sh +22 -0
- examples/qwen_coder/sft_full_17b.py +103 -0
- examples/qwen_coder/sft_lora_30b.py +110 -0
- examples/qwen_coder/subset_jsonl.py +39 -0
- examples/qwen_coder/todos.md +38 -0
- examples/qwen_coder/validate_jsonl.py +60 -0
- examples/qwen_vl/BUGS_AND_FIXES.md +232 -0
- examples/qwen_vl/IMAGE_VALIDATION_COMPLETE.md +271 -0
- examples/qwen_vl/IMAGE_VALIDATION_SUMMARY.md +260 -0
- examples/qwen_vl/INFERENCE_SFT_TESTS.md +412 -0
- examples/qwen_vl/NEXT_STEPS_2B.md +325 -0
- examples/qwen_vl/QUICKSTART.md +327 -0
- examples/qwen_vl/QUICKSTART_RL_VISION.md +110 -0
- examples/qwen_vl/README.md +152 -0
- examples/qwen_vl/RL_VISION_COMPLETE.md +475 -0
- examples/qwen_vl/RL_VISION_TESTING.md +333 -0
- examples/qwen_vl/SDK_VISION_INTEGRATION.md +328 -0
- examples/qwen_vl/SETUP_COMPLETE.md +274 -0
- examples/qwen_vl/VISION_TESTS_COMPLETE.md +489 -0
- examples/qwen_vl/VLM_PIPELINE_COMPLETE.md +242 -0
- examples/qwen_vl/__init__.py +2 -0
- examples/qwen_vl/collect_data_via_cli.md +415 -0
- examples/qwen_vl/collect_vision_traces.py +368 -0
- examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +110 -0
- examples/qwen_vl/configs/crafter_vlm_sft_example.toml +59 -0
- examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +26 -0
- examples/qwen_vl/configs/eval_gpt4o_vision_proper.toml +29 -0
- examples/qwen_vl/configs/eval_gpt5nano_vision.toml +26 -0
- examples/qwen_vl/configs/eval_qwen3vl_vision.toml +26 -0
- examples/qwen_vl/configs/filter_qwen3vl_sft.toml +49 -0
- examples/qwen_vl/configs/filter_vision_sft.toml +52 -0
- examples/qwen_vl/configs/filter_vision_test.toml +8 -0
- examples/qwen_vl/configs/sft_qwen3_vl_2b_test.toml +54 -0
- examples/qwen_vl/crafter_gpt5nano_agent.py +308 -0
- examples/qwen_vl/crafter_qwen_vl_agent.py +300 -0
- examples/qwen_vl/run_vision_comparison.sh +61 -0
- examples/qwen_vl/run_vision_sft_pipeline.sh +175 -0
- examples/qwen_vl/test_image_validation.py +201 -0
- examples/qwen_vl/test_sft_vision_data.py +110 -0
- examples/rl/README.md +169 -0
- examples/rl/configs/eval_base_qwen.toml +17 -0
- examples/rl/configs/eval_rl_qwen.toml +13 -0
- examples/rl/configs/rl_from_base_qwen.toml +62 -0
- examples/rl/configs/rl_from_base_qwen17.toml +80 -0
- examples/rl/configs/rl_from_ft_qwen.toml +37 -0
- examples/rl/download_dataset.py +80 -0
- examples/rl/run_eval.py +436 -0
- examples/rl/run_rl_and_save.py +111 -0
- examples/rl/task_app/README.md +21 -0
- examples/rl/task_app/math_single_step.py +990 -0
- examples/rl/task_app/math_task_app.py +111 -0
- examples/run_crafter_demo.sh +10 -0
- examples/sdk_prompt_learning_example.py +55 -0
- examples/sft/README.md +139 -0
- examples/sft/configs/crafter_fft_qwen0p6b.toml +49 -0
- examples/sft/configs/crafter_lora_qwen0p6b.toml +49 -0
- examples/sft/evaluate.py +117 -0
- examples/sft/export_dataset.py +120 -0
- examples/sft/generate_traces.py +164 -0
- examples/swe/__init__.py +12 -0
- examples/swe/task_app/README.md +135 -0
- examples/swe/task_app/__init__.py +2 -0
- examples/swe/task_app/grpo_swe_mini.py +604 -0
- examples/swe/task_app/grpo_swe_mini_task_app.py +124 -0
- examples/swe/task_app/hosted/README.md +173 -0
- examples/swe/task_app/hosted/__init__.py +5 -0
- examples/swe/task_app/hosted/branching.py +143 -0
- examples/swe/task_app/hosted/environment_routes.py +1289 -0
- examples/swe/task_app/hosted/envs/__init__.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/__init__.py +6 -0
- examples/swe/task_app/hosted/envs/crafter/app.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/environment.py +522 -0
- examples/swe/task_app/hosted/envs/crafter/policy.py +478 -0
- examples/swe/task_app/hosted/envs/crafter/react_agent.py +108 -0
- examples/swe/task_app/hosted/envs/crafter/shared.py +305 -0
- examples/swe/task_app/hosted/envs/crafter/tools.py +47 -0
- examples/swe/task_app/hosted/envs/mini_swe/__init__.py +8 -0
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +1191 -0
- examples/swe/task_app/hosted/envs/mini_swe/policy.py +355 -0
- examples/swe/task_app/hosted/envs/mini_swe/shared.py +83 -0
- examples/swe/task_app/hosted/envs/mini_swe/tools.py +96 -0
- examples/swe/task_app/hosted/hosted_app.py +204 -0
- examples/swe/task_app/hosted/inference/__init__.py +5 -0
- examples/swe/task_app/hosted/inference/openai_client.py +584 -0
- examples/swe/task_app/hosted/main.py +100 -0
- examples/swe/task_app/hosted/policy_routes.py +1094 -0
- examples/swe/task_app/hosted/registry.py +195 -0
- examples/swe/task_app/hosted/rollout.py +1905 -0
- examples/swe/task_app/hosted/storage/__init__.py +5 -0
- examples/swe/task_app/hosted/storage/volume.py +211 -0
- examples/swe/task_app/hosted/test_agents.py +161 -0
- examples/swe/task_app/hosted/test_service.py +136 -0
- examples/swe/task_app/hosted/utils.py +62 -0
- examples/swe/task_app/morph_backend.py +178 -0
- examples/task_apps/IMAGE_ONLY_EVAL_QUICKSTART.md +258 -0
- examples/task_apps/TESTING.md +275 -0
- examples/task_apps/banking77/__init__.py +6 -0
- examples/task_apps/banking77/banking77_task_app.py +912 -0
- examples/task_apps/banking77/deploy_wrapper.py +46 -0
- examples/task_apps/banking77_pipeline/__init__.py +6 -0
- examples/task_apps/banking77_pipeline/banking77_pipeline_task_app.py +489 -0
- examples/task_apps/banking77_pipeline/deploy_wrapper.py +50 -0
- examples/task_apps/crafter/CREATE_SFT_DATASET.md +286 -0
- examples/task_apps/crafter/EVAL_IMAGE_ONLY_RESULTS.md +152 -0
- examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +187 -0
- examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +281 -0
- examples/task_apps/crafter/QUERY_EXAMPLES.md +203 -0
- examples/task_apps/crafter/README_IMAGE_ONLY_EVAL.md +316 -0
- examples/task_apps/crafter/eval_image_only_gpt4o.toml +28 -0
- examples/task_apps/crafter/eval_text_only_groq_llama.toml +36 -0
- examples/task_apps/crafter/filter_sft_dataset.toml +16 -0
- examples/task_apps/crafter/task_app/README.md +42 -0
- examples/task_apps/crafter/task_app/__init__.py +5 -0
- examples/task_apps/crafter/task_app/grpo_crafter.py +1055 -0
- examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +146 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/README.md +173 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/__init__.py +5 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/branching.py +143 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/environment_routes.py +1226 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/__init__.py +1 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/environment.py +532 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +583 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +122 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +253 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/__init__.py +5 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +999 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/main.py +100 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +1252 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/registry.py +195 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +2233 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/storage/__init__.py +5 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/storage/volume.py +211 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/test_agents.py +161 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/test_service.py +136 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +411 -0
- examples/task_apps/dev/pokemon_emerald/__init__.py +2 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/README.md +811 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/__init__.py +120 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/action.py +160 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/memory.py +155 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/perception.py +69 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/planning.py +96 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/simple.py +1502 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/system_prompt.py +4 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/grab_map.py +68 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/manual.py +216 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/__init__.py +35 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/emerald_utils.py +631 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/emulator.py +1544 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/enums.py +1428 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/memory_reader.py +4848 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/types.py +41 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/utils.py +298 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pyproject.toml +95 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/run.py +204 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/app.py +2152 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/client.py +429 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/frame_server.py +155 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/README.md +78 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/run_tests.py +122 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_agent_direct.py +76 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_agent_prompts.py +413 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_battle_state_formatting.py +204 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_dialogue_detection.py +133 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_dialogue_detection_comprehensive.py +229 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_direct_agent_emulator.py +300 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_fps_adjustment_pytest.py +205 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_house_to_outside_direct.py +200 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_house_to_outside_transition.py +284 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_map_ground_truth_comparison.py +468 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_memory_map.py +575 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_server_map_validation.py +311 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_torchic_state.py +259 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/anticheat.py +372 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/checkpoint.py +296 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/error_handler.py +275 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/get_local_ip.py +22 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/helpers.py +44 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/llm_logger.py +514 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_formatter.py +415 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_stitcher.py +1763 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_stitcher_singleton.py +33 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_trimmer.py +106 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_visualizer.py +334 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/ocr_dialogue.py +1020 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/recording.py +188 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/state_formatter.py +1481 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/vlm.py +862 -0
- examples/task_apps/dev/pokemon_emerald/modal_app.py +114 -0
- examples/task_apps/dev/pokemon_emerald/task_app/README.md +81 -0
- examples/task_apps/dev/pokemon_emerald/task_app/__init__.py +6 -0
- examples/task_apps/dev/pokemon_emerald/task_app/pokemon_emerald.py +685 -0
- examples/task_apps/enron/__init__.py +2 -0
- examples/task_apps/enron/eval_groq_qwen32.toml +16 -0
- examples/task_apps/enron/filter_sft.toml +5 -0
- examples/task_apps/enron/task_app/README.md +14 -0
- examples/task_apps/enron/task_app/__init__.py +1 -0
- examples/task_apps/enron/task_app/grpo_enron.py +906 -0
- examples/task_apps/enron/task_app/grpo_enron_task_app.py +146 -0
- examples/task_apps/enron/tests/__init__.py +4 -0
- examples/task_apps/enron/tests/conftest.py +115 -0
- examples/task_apps/enron/tests/integration/__init__.py +4 -0
- examples/task_apps/enron/tests/integration/test_enron_eval.py +179 -0
- examples/task_apps/enron/tests/integration/test_enron_rollout.py +135 -0
- examples/task_apps/enron/tests/unit/__init__.py +4 -0
- examples/task_apps/enron/tests/unit/test_enron_environment.py +126 -0
- examples/task_apps/gepa_benchmarks/__init__.py +7 -0
- examples/task_apps/gepa_benchmarks/common.py +260 -0
- examples/task_apps/gepa_benchmarks/hotpotqa_task_app.py +507 -0
- examples/task_apps/gepa_benchmarks/hover_task_app.py +436 -0
- examples/task_apps/gepa_benchmarks/ifbench_task_app.py +563 -0
- examples/task_apps/gepa_benchmarks/pupa_task_app.py +460 -0
- examples/task_apps/math/README.md +21 -0
- examples/task_apps/math/math_single_step.py +1000 -0
- examples/task_apps/math/math_task_app.py +115 -0
- examples/task_apps/pokemon_battle/__init__.py +2 -0
- examples/task_apps/pokemon_battle/modal_app.py +104 -0
- examples/task_apps/pokemon_battle/task_app/README.md +68 -0
- examples/task_apps/pokemon_battle/task_app/__init__.py +6 -0
- examples/task_apps/pokemon_battle/task_app/pokemon_showdown.py +932 -0
- examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_COMPLETE.md +283 -0
- examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_STATUS.md +155 -0
- examples/task_apps/pokemon_red/README.md +356 -0
- examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +428 -0
- examples/task_apps/pokemon_red/__init__.py +3 -0
- examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +30 -0
- examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +224 -0
- examples/task_apps/pokemon_red/pallet_town_rl_config.toml +75 -0
- examples/task_apps/pokemon_red/task_app.py +1048 -0
- examples/task_apps/pokemon_red/test_pallet_town_rewards.py +193 -0
- examples/task_apps/sokoban/README.md +306 -0
- examples/task_apps/sokoban/__init__.py +3 -0
- examples/task_apps/sokoban/eval_groq_qwen32.toml +16 -0
- examples/task_apps/sokoban/eval_openai_gpt5.toml +16 -0
- examples/task_apps/sokoban/filter_sft.toml +5 -0
- examples/task_apps/sokoban/task_app.py +1058 -0
- examples/task_apps/sokoban/tests/__init__.py +4 -0
- examples/task_apps/sokoban/tests/conftest.py +113 -0
- examples/task_apps/sokoban/tests/integration/__init__.py +4 -0
- examples/task_apps/sokoban/tests/integration/test_sokoban_eval.py +57 -0
- examples/task_apps/sokoban/tests/integration/test_sokoban_rollout.py +198 -0
- examples/task_apps/sokoban/tests/unit/__init__.py +4 -0
- examples/task_apps/sokoban/tests/unit/test_sokoban_environment.py +114 -0
- examples/task_apps/verilog/__init__.py +1 -0
- examples/task_apps/verilog/eval_groq_qwen32b.toml +22 -0
- examples/task_apps/verilog/filter_sft.toml +5 -0
- examples/task_apps/verilog/task_app/README.md +12 -0
- examples/task_apps/verilog/task_app/__init__.py +1 -0
- examples/task_apps/verilog/task_app/grpo_verilog.py +1166 -0
- examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +145 -0
- examples/task_apps/verilog/tests/__init__.py +4 -0
- examples/task_apps/verilog/tests/conftest.py +115 -0
- examples/task_apps/verilog/tests/integration/__init__.py +4 -0
- examples/task_apps/verilog/tests/integration/test_verilog_eval.py +181 -0
- examples/task_apps/verilog/tests/integration/test_verilog_rollout.py +55 -0
- examples/task_apps/verilog/tests/unit/__init__.py +4 -0
- examples/task_apps/verilog/tests/unit/test_verilog_scoring.py +118 -0
- examples/tunnel_gepa_banking77/README.md +106 -0
- examples/tunnel_gepa_banking77/banking77_gepa_tunnel.toml +95 -0
- examples/tunnel_gepa_banking77/keep_tunnel_running.py +60 -0
- examples/tunnel_gepa_banking77/run_gepa_with_tunnel.sh +226 -0
- examples/vlm/PROPOSAL.md +53 -0
- examples/vlm/README.md +68 -0
- examples/vlm/configs/crafter_vlm_gpt4o.toml +49 -0
- examples/vlm/crafter_image_only_agent.py +207 -0
- examples/vlm/crafter_openai_vlm_agent.py +275 -0
- examples/vlm/filter_image_rows.py +63 -0
- examples/vlm/run_crafter_vlm_benchmark.py +316 -0
- examples/warming_up_to_rl/_utils.py +92 -0
- examples/warming_up_to_rl/analyze_trace_db.py +422 -0
- examples/warming_up_to_rl/configs/crafter_fft.toml +53 -0
- examples/warming_up_to_rl/configs/crafter_fft_4b.toml +54 -0
- examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +22 -0
- examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +15 -0
- examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +24 -0
- examples/warming_up_to_rl/configs/eval_stepwise_complex.toml +35 -0
- examples/warming_up_to_rl/configs/eval_stepwise_consistent.toml +26 -0
- examples/warming_up_to_rl/configs/eval_stepwise_per_achievement.toml +36 -0
- examples/warming_up_to_rl/configs/eval_stepwise_simple.toml +32 -0
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +85 -0
- examples/warming_up_to_rl/configs/rl_from_ft.toml +58 -0
- examples/warming_up_to_rl/export_trace_sft.py +837 -0
- examples/warming_up_to_rl/groq_test.py +97 -0
- examples/warming_up_to_rl/manage_secrets.py +131 -0
- examples/warming_up_to_rl/old/event_rewards.md +234 -0
- examples/warming_up_to_rl/old/notes.md +73 -0
- examples/warming_up_to_rl/readme.md +110 -0
- examples/warming_up_to_rl/run_eval.py +736 -0
- examples/warming_up_to_rl/run_fft_and_save.py +380 -0
- examples/warming_up_to_rl/run_local_rollout.py +239 -0
- examples/warming_up_to_rl/run_local_rollout_modal.py +248 -0
- examples/warming_up_to_rl/run_local_rollout_parallel.py +405 -0
- examples/warming_up_to_rl/run_local_rollout_traced.py +477 -0
- examples/warming_up_to_rl/run_rl_and_save.py +124 -0
- examples/warming_up_to_rl/run_rollout_remote.py +156 -0
- examples/warming_up_to_rl/task_app/README.md +42 -0
- examples/warming_up_to_rl/task_app/grpo_crafter.py +876 -0
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +135 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/README.md +173 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +143 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +1226 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +522 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +454 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +108 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +253 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +729 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +100 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +1114 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +195 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +1891 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +211 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +161 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +137 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +129 -0
- examples/workflows/math_rl/configs/eval_base_qwen.toml +15 -0
- examples/workflows/math_rl/configs/eval_rl_qwen.toml +11 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen.toml +62 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +80 -0
- examples/workflows/math_rl/configs/rl_from_ft_qwen.toml +35 -0
- examples/workflows/math_rl/download_dataset.py +80 -0
- examples/workflows/math_rl/run_eval.py +436 -0
- examples/workflows/math_rl/run_rl_and_save.py +111 -0
- synth_ai/__init__.py +47 -23
- synth_ai/_utils/__init__.py +47 -0
- synth_ai/_utils/base_url.py +10 -0
- synth_ai/_utils/http.py +10 -0
- synth_ai/_utils/prompts.py +10 -0
- synth_ai/_utils/task_app_state.py +12 -0
- synth_ai/_utils/user_config.py +10 -0
- synth_ai/api/models/supported.py +514 -0
- synth_ai/api/train/__init__.py +63 -0
- synth_ai/api/train/builders.py +473 -0
- synth_ai/api/train/cli.py +1185 -0
- synth_ai/api/train/config_finder.py +246 -0
- synth_ai/api/train/configs/__init__.py +65 -0
- synth_ai/api/train/configs/prompt_learning.py +496 -0
- synth_ai/api/train/configs/rl.py +188 -0
- synth_ai/api/train/configs/sft.py +99 -0
- synth_ai/api/train/configs/shared.py +81 -0
- synth_ai/api/train/env_resolver.py +352 -0
- synth_ai/api/train/pollers.py +91 -0
- synth_ai/api/train/prompt_learning.py +425 -0
- synth_ai/api/train/sft.py +390 -0
- synth_ai/api/train/supported_algos.py +147 -0
- synth_ai/api/train/task_app.py +195 -0
- synth_ai/api/train/utils.py +244 -0
- synth_ai/api/train/validators.py +1117 -0
- synth_ai/api/tunnel.py +49 -0
- synth_ai/auth/credentials.py +94 -0
- synth_ai/baseline/__init__.py +25 -0
- synth_ai/baseline/config.py +209 -0
- synth_ai/baseline/discovery.py +214 -0
- synth_ai/baseline/execution.py +146 -0
- synth_ai/cfgs.py +227 -0
- synth_ai/cli/__init__.py +90 -45
- synth_ai/cli/_modal_wrapper.py +31 -0
- synth_ai/cli/_storage.py +20 -0
- synth_ai/cli/_typer_patch.py +47 -0
- synth_ai/cli/_validate_task_app.py +29 -0
- synth_ai/cli/balance.py +16 -4
- synth_ai/cli/calc.py +36 -21
- synth_ai/cli/claude.py +70 -0
- synth_ai/cli/codex.py +267 -0
- synth_ai/cli/commands/__init__.py +18 -0
- synth_ai/cli/commands/baseline/__init__.py +12 -0
- synth_ai/cli/commands/baseline/core.py +637 -0
- synth_ai/cli/commands/baseline/list.py +93 -0
- synth_ai/cli/commands/demo/__init__.py +6 -0
- synth_ai/cli/commands/demo/core.py +163 -0
- synth_ai/cli/commands/eval/__init__.py +19 -0
- synth_ai/cli/commands/eval/core.py +1112 -0
- synth_ai/cli/commands/eval/errors.py +81 -0
- synth_ai/cli/commands/eval/validation.py +133 -0
- synth_ai/cli/commands/filter/__init__.py +12 -0
- synth_ai/cli/commands/filter/core.py +424 -0
- synth_ai/cli/commands/filter/errors.py +55 -0
- synth_ai/cli/commands/filter/validation.py +77 -0
- synth_ai/cli/commands/help/__init__.py +185 -0
- synth_ai/cli/commands/help/core.py +72 -0
- synth_ai/cli/commands/smoke/__init__.py +7 -0
- synth_ai/cli/commands/smoke/core.py +1437 -0
- synth_ai/cli/commands/status/__init__.py +66 -0
- synth_ai/cli/commands/status/client.py +192 -0
- synth_ai/cli/commands/status/config.py +92 -0
- synth_ai/cli/commands/status/errors.py +20 -0
- synth_ai/cli/commands/status/formatters.py +164 -0
- synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
- synth_ai/cli/commands/status/subcommands/files.py +79 -0
- synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
- synth_ai/cli/commands/status/subcommands/models.py +79 -0
- synth_ai/cli/commands/status/subcommands/pricing.py +22 -0
- synth_ai/cli/commands/status/subcommands/runs.py +81 -0
- synth_ai/cli/commands/status/subcommands/session.py +183 -0
- synth_ai/cli/commands/status/subcommands/summary.py +47 -0
- synth_ai/cli/commands/status/subcommands/usage.py +203 -0
- synth_ai/cli/commands/status/utils.py +114 -0
- synth_ai/cli/commands/train/__init__.py +53 -0
- synth_ai/cli/commands/train/core.py +21 -0
- synth_ai/cli/commands/train/errors.py +117 -0
- synth_ai/cli/commands/train/judge_schemas.py +200 -0
- synth_ai/cli/commands/train/judge_validation.py +305 -0
- synth_ai/cli/commands/train/validation.py +386 -0
- synth_ai/cli/demo.py +32 -140
- synth_ai/cli/deploy.py +233 -0
- synth_ai/cli/eval/__init__.py +36 -0
- synth_ai/cli/eval/core.py +5 -0
- synth_ai/cli/eval/errors.py +31 -0
- synth_ai/cli/eval/validation.py +5 -0
- synth_ai/cli/filter/__init__.py +28 -0
- synth_ai/cli/filter/core.py +5 -0
- synth_ai/cli/filter/errors.py +23 -0
- synth_ai/cli/filter/validation.py +5 -0
- synth_ai/cli/legacy_root_backup.py +28 -22
- synth_ai/cli/lib/__init__.py +10 -0
- synth_ai/cli/lib/task_app_discovery.py +7 -0
- synth_ai/cli/lib/task_app_env.py +518 -0
- synth_ai/cli/mcp.py +34 -0
- synth_ai/cli/modal_serve/__init__.py +12 -0
- synth_ai/cli/modal_serve/core.py +14 -0
- synth_ai/cli/modal_serve/errors.py +8 -0
- synth_ai/cli/modal_serve/validation.py +11 -0
- synth_ai/cli/opencode.py +256 -0
- synth_ai/cli/recent.py +13 -7
- synth_ai/cli/rl_demo.py +166 -114
- synth_ai/cli/root.py +143 -112
- synth_ai/cli/serve/__init__.py +12 -0
- synth_ai/cli/serve/core.py +14 -0
- synth_ai/cli/serve/errors.py +8 -0
- synth_ai/cli/serve/validation.py +11 -0
- synth_ai/cli/setup.py +49 -0
- synth_ai/cli/status.py +7 -125
- synth_ai/cli/task_app_deploy.py +7 -0
- synth_ai/cli/task_app_list.py +25 -0
- synth_ai/cli/task_app_modal_serve.py +11 -0
- synth_ai/cli/task_app_serve.py +11 -0
- synth_ai/cli/task_apps.py +3134 -0
- synth_ai/cli/traces.py +9 -5
- synth_ai/cli/train/__init__.py +12 -0
- synth_ai/cli/train/core.py +21 -0
- synth_ai/cli/train/errors.py +8 -0
- synth_ai/cli/train/validation.py +24 -0
- synth_ai/cli/train.py +5 -0
- synth_ai/cli/turso.py +73 -0
- synth_ai/cli/watch.py +13 -18
- synth_ai/demos/__init__.py +10 -0
- synth_ai/demos/core/__init__.py +28 -1
- synth_ai/demos/core/cli.py +745 -416
- synth_ai/demos/crafter/__init__.py +1 -0
- synth_ai/demos/crafter/crafter_fft_4b.toml +55 -0
- synth_ai/demos/crafter/grpo_crafter_task_app.py +185 -0
- synth_ai/demos/crafter/rl_from_base_qwen4b.toml +74 -0
- synth_ai/demos/demo_registry.py +176 -0
- synth_ai/demos/demo_task_apps/__init__.py +7 -1
- synth_ai/demos/demo_task_apps/core.py +75 -37
- synth_ai/demos/demo_task_apps/crafter/__init__.py +1 -0
- synth_ai/demos/demo_task_apps/crafter/configs/crafter_fft_4b.toml +53 -0
- synth_ai/demos/demo_task_apps/crafter/configs/rl_from_base_qwen4b.toml +73 -0
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +184 -0
- synth_ai/demos/demo_task_apps/math/_common.py +1 -2
- synth_ai/demos/demo_task_apps/math/app.py +2 -1
- synth_ai/demos/demo_task_apps/math/config.toml +55 -110
- synth_ai/demos/demo_task_apps/math/deploy_modal.py +3 -6
- synth_ai/demos/demo_task_apps/math/modal_task_app.py +491 -166
- synth_ai/demos/demo_task_apps/math/task_app_entry.py +37 -0
- synth_ai/demos/math/__init__.py +1 -0
- synth_ai/demos/math/_common.py +16 -0
- synth_ai/demos/math/app.py +38 -0
- synth_ai/demos/math/config.toml +76 -0
- synth_ai/demos/math/deploy_modal.py +54 -0
- synth_ai/demos/math/modal_task_app.py +703 -0
- synth_ai/demos/math/task_app_entry.py +51 -0
- synth_ai/environments/environment/core.py +7 -1
- synth_ai/environments/examples/bandit/engine.py +12 -5
- synth_ai/environments/examples/bandit/environment.py +0 -1
- synth_ai/environments/examples/bandit/taskset.py +4 -4
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +7 -4
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +9 -5
- synth_ai/environments/examples/crafter_classic/environment.py +93 -2
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +4 -3
- synth_ai/environments/examples/enron/engine.py +7 -2
- synth_ai/environments/examples/enron/environment.py +68 -0
- synth_ai/environments/examples/red/engine.py +60 -12
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +7 -0
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +151 -179
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_progression.py +477 -0
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +32 -0
- synth_ai/environments/examples/red/environment.py +86 -0
- synth_ai/environments/examples/red/trace_hooks_v3.py +168 -0
- synth_ai/environments/examples/sokoban/taskset.py +116 -0
- synth_ai/environments/examples/verilog/engine.py +104 -12
- synth_ai/environments/examples/wordle/environment.py +0 -1
- synth_ai/environments/reproducibility/tree.py +5 -6
- synth_ai/environments/service/app.py +11 -12
- synth_ai/environments/service/core_routes.py +10 -9
- synth_ai/environments/stateful/engine.py +1 -1
- synth_ai/environments/tasks/core.py +1 -0
- synth_ai/environments/tasks/filters.py +5 -6
- synth_ai/environments/tasks/utils.py +4 -5
- synth_ai/evals/__init__.py +15 -0
- synth_ai/evals/base.py +14 -5
- synth_ai/evals/client.py +82 -0
- synth_ai/evals/types.py +42 -0
- synth_ai/http.py +8 -22
- synth_ai/http_client.py +45 -12
- synth_ai/inference/__init__.py +0 -2
- synth_ai/inference/client.py +21 -7
- synth_ai/jobs/client.py +129 -80
- synth_ai/judge_schemas.py +127 -0
- synth_ai/learning/__init__.py +51 -6
- synth_ai/learning/algorithms.py +14 -0
- synth_ai/learning/client.py +122 -30
- synth_ai/learning/config.py +2 -40
- synth_ai/learning/constants.py +0 -2
- synth_ai/learning/ft_client.py +4 -56
- synth_ai/learning/health.py +14 -8
- synth_ai/learning/jobs.py +43 -47
- synth_ai/learning/prompt_learning_client.py +276 -0
- synth_ai/learning/prompt_learning_types.py +185 -0
- synth_ai/{rl → learning/rl}/__init__.py +14 -5
- synth_ai/learning/rl/client.py +269 -0
- synth_ai/learning/rl/config.py +31 -0
- synth_ai/{rl → learning/rl}/contracts.py +5 -10
- synth_ai/{rl → learning/rl}/env_keys.py +45 -16
- synth_ai/learning/rl/secrets.py +13 -0
- synth_ai/learning/rl_client.py +2 -253
- synth_ai/learning/sft/__init__.py +29 -0
- synth_ai/learning/sft/client.py +68 -0
- synth_ai/learning/sft/config.py +270 -0
- synth_ai/learning/sft/data.py +698 -0
- synth_ai/learning/sse.py +25 -26
- synth_ai/learning/validators.py +29 -25
- synth_ai/mcp/__init__.py +5 -0
- synth_ai/mcp/__main__.py +8 -0
- synth_ai/mcp/main.py +254 -0
- synth_ai/mcp/setup.py +100 -0
- synth_ai/modal.py +257 -0
- synth_ai/pricing/__init__.py +3 -0
- synth_ai/pricing/model_pricing.py +64 -0
- synth_ai/session/__init__.py +75 -0
- synth_ai/session/client.py +383 -0
- synth_ai/session/constants.py +63 -0
- synth_ai/session/exceptions.py +105 -0
- synth_ai/session/manager.py +139 -0
- synth_ai/session/models.py +89 -0
- synth_ai/session/query.py +110 -0
- synth_ai/spec/__init__.py +46 -0
- synth_ai/spec/dataclasses.py +149 -0
- synth_ai/spec/loader.py +144 -0
- synth_ai/spec/serializer.py +199 -0
- synth_ai/spec/validation.py +250 -0
- synth_ai/streaming/__init__.py +29 -0
- synth_ai/streaming/config.py +94 -0
- synth_ai/streaming/handlers.py +589 -0
- synth_ai/streaming/streamer.py +320 -0
- synth_ai/streaming/types.py +95 -0
- synth_ai/task/__init__.py +116 -3
- synth_ai/task/apps/__init__.py +132 -0
- synth_ai/task/auth.py +165 -0
- synth_ai/task/client.py +167 -0
- synth_ai/task/config.py +261 -0
- synth_ai/task/contracts.py +173 -57
- synth_ai/task/datasets.py +108 -0
- synth_ai/task/errors.py +50 -0
- synth_ai/task/health.py +17 -11
- synth_ai/task/inference_api.py +101 -0
- synth_ai/task/json.py +111 -0
- synth_ai/task/proxy.py +251 -0
- synth_ai/task/rubrics/__init__.py +55 -0
- synth_ai/task/rubrics/loaders.py +156 -0
- synth_ai/task/rubrics/models.py +57 -0
- synth_ai/task/rubrics/scoring.py +116 -0
- synth_ai/task/rubrics/strict.py +149 -0
- synth_ai/task/rubrics.py +219 -0
- synth_ai/task/server.py +432 -0
- synth_ai/task/trace_correlation_helpers.py +328 -0
- synth_ai/task/tracing_utils.py +95 -0
- synth_ai/task/validators.py +449 -6
- synth_ai/task/vendors.py +59 -0
- synth_ai/tracing_v3/__init__.py +4 -0
- synth_ai/tracing_v3/abstractions.py +21 -4
- synth_ai/tracing_v3/config.py +167 -22
- synth_ai/tracing_v3/constants.py +21 -0
- synth_ai/tracing_v3/db_config.py +42 -29
- synth_ai/tracing_v3/decorators.py +80 -45
- synth_ai/tracing_v3/examples/basic_usage.py +15 -9
- synth_ai/tracing_v3/hooks.py +6 -4
- synth_ai/tracing_v3/llm_call_record_helpers.py +161 -61
- synth_ai/tracing_v3/migration_helper.py +1 -2
- synth_ai/tracing_v3/replica_sync.py +12 -7
- synth_ai/tracing_v3/serialization.py +130 -0
- synth_ai/tracing_v3/session_tracer.py +86 -21
- synth_ai/tracing_v3/storage/base.py +98 -12
- synth_ai/tracing_v3/storage/config.py +63 -16
- synth_ai/tracing_v3/storage/factory.py +11 -9
- synth_ai/tracing_v3/storage/utils.py +15 -11
- synth_ai/tracing_v3/trace_utils.py +317 -0
- synth_ai/tracing_v3/turso/__init__.py +8 -21
- synth_ai/tracing_v3/turso/daemon.py +123 -15
- synth_ai/tracing_v3/turso/models.py +5 -2
- synth_ai/tracing_v3/turso/native_manager.py +1293 -0
- synth_ai/tracing_v3/utils.py +5 -4
- synth_ai/tunnel.py +143 -0
- synth_ai/tunnel_deploy.py +278 -0
- synth_ai/types.py +8 -0
- synth_ai/urls.py +11 -0
- synth_ai/utils/__init__.py +166 -0
- synth_ai/utils/agents.py +74 -0
- synth_ai/utils/apps.py +152 -0
- synth_ai/utils/base_url.py +94 -0
- synth_ai/utils/bin.py +39 -0
- synth_ai/utils/claude.py +36 -0
- synth_ai/utils/cli.py +284 -0
- synth_ai/utils/config.py +81 -0
- synth_ai/utils/env.py +346 -0
- synth_ai/utils/errors.py +85 -0
- synth_ai/utils/http.py +172 -0
- synth_ai/utils/json.py +72 -0
- synth_ai/utils/log_filter.py +99 -0
- synth_ai/utils/logging.py +198 -0
- synth_ai/utils/modal.py +299 -0
- synth_ai/utils/paths.py +95 -0
- synth_ai/utils/process.py +233 -0
- synth_ai/utils/prompts.py +39 -0
- synth_ai/utils/sqld.py +122 -0
- synth_ai/utils/ssl.py +25 -0
- synth_ai/utils/task_app_discovery.py +882 -0
- synth_ai/utils/task_app_env.py +186 -0
- synth_ai/utils/task_app_state.py +318 -0
- synth_ai/utils/tunnel/__init__.py +12 -0
- synth_ai/utils/tunnel/config.py +55 -0
- synth_ai/utils/user_config.py +137 -0
- synth_ai/uvicorn.py +77 -0
- synth_ai-0.2.23.dev3.dist-info/METADATA +357 -0
- synth_ai-0.2.23.dev3.dist-info/RECORD +983 -0
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/entry_points.txt +0 -1
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/top_level.txt +1 -0
- synth_ai/cli/man.py +0 -106
- synth_ai/core/experiment.py +0 -15
- synth_ai/core/system.py +0 -15
- synth_ai/environments/examples/sokoban/units/astar_common.py +0 -95
- synth_ai/experimental/synth_oss.py +0 -446
- synth_ai/handshake.py +0 -63
- synth_ai/install_sqld.sh +0 -40
- synth_ai/learning/offline/dpo.py +0 -0
- synth_ai/learning/offline/providers.py +0 -7
- synth_ai/learning/offline/sft.py +0 -0
- synth_ai/learning/offline/shared.py +0 -0
- synth_ai/learning/online/grpo.py +0 -0
- synth_ai/learning/online/irft.py +0 -0
- synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
- synth_ai/learning/prompts/gepa.py +0 -0
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -213
- synth_ai/learning/prompts/mipro.py +0 -289
- synth_ai/learning/prompts/random_search.py +0 -246
- synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
- synth_ai/learning/prompts/run_random_search_banking77.py +0 -324
- synth_ai/lm/__init__.py +0 -51
- synth_ai/lm/caching/constants.py +0 -6
- synth_ai/lm/caching/dbs.py +0 -0
- synth_ai/lm/caching/ephemeral.py +0 -102
- synth_ai/lm/caching/handler.py +0 -137
- synth_ai/lm/caching/initialize.py +0 -11
- synth_ai/lm/caching/persistent.py +0 -114
- synth_ai/lm/config.py +0 -110
- synth_ai/lm/constants.py +0 -32
- synth_ai/lm/core/__init__.py +0 -8
- synth_ai/lm/core/all.py +0 -73
- synth_ai/lm/core/exceptions.py +0 -7
- synth_ai/lm/core/main.py +0 -319
- synth_ai/lm/core/main_v3.py +0 -594
- synth_ai/lm/core/synth_models.py +0 -48
- synth_ai/lm/core/vendor_clients.py +0 -188
- synth_ai/lm/cost/monitor.py +0 -1
- synth_ai/lm/cost/statefulness.py +0 -1
- synth_ai/lm/injection.py +0 -80
- synth_ai/lm/overrides.py +0 -206
- synth_ai/lm/provider_support/__init__.py +0 -8
- synth_ai/lm/provider_support/anthropic.py +0 -972
- synth_ai/lm/provider_support/openai.py +0 -1139
- synth_ai/lm/provider_support/suppress_logging.py +0 -31
- synth_ai/lm/structured_outputs/handler.py +0 -440
- synth_ai/lm/structured_outputs/inject.py +0 -297
- synth_ai/lm/structured_outputs/rehabilitate.py +0 -185
- synth_ai/lm/tools/__init__.py +0 -3
- synth_ai/lm/tools/base.py +0 -172
- synth_ai/lm/unified_interface.py +0 -202
- synth_ai/lm/vendors/base.py +0 -81
- synth_ai/lm/vendors/core/anthropic_api.py +0 -387
- synth_ai/lm/vendors/core/gemini_api.py +0 -292
- synth_ai/lm/vendors/core/mistral_api.py +0 -322
- synth_ai/lm/vendors/core/openai_api.py +0 -225
- synth_ai/lm/vendors/core/synth_dev_api.py +0 -0
- synth_ai/lm/vendors/local/ollama.py +0 -0
- synth_ai/lm/vendors/openai_standard.py +0 -780
- synth_ai/lm/vendors/openai_standard_responses.py +0 -256
- synth_ai/lm/vendors/retries.py +0 -22
- synth_ai/lm/vendors/supported/custom_endpoint.py +0 -417
- synth_ai/lm/vendors/supported/deepseek.py +0 -69
- synth_ai/lm/vendors/supported/grok.py +0 -75
- synth_ai/lm/vendors/supported/groq.py +0 -16
- synth_ai/lm/vendors/supported/ollama.py +0 -15
- synth_ai/lm/vendors/supported/openrouter.py +0 -74
- synth_ai/lm/vendors/supported/together.py +0 -11
- synth_ai/lm/vendors/synth_client.py +0 -808
- synth_ai/lm/warmup.py +0 -186
- synth_ai/rl/secrets.py +0 -19
- synth_ai/scripts/verify_rewards.py +0 -100
- synth_ai/tracing/__init__.py +0 -30
- synth_ai/tracing_v1/__init__.py +0 -33
- synth_ai/tracing_v3/turso/manager.py +0 -760
- synth_ai/v0/tracing/abstractions.py +0 -224
- synth_ai/v0/tracing/base_client.py +0 -91
- synth_ai/v0/tracing/client_manager.py +0 -131
- synth_ai/v0/tracing/config.py +0 -142
- synth_ai/v0/tracing/context.py +0 -146
- synth_ai/v0/tracing/decorators.py +0 -682
- synth_ai/v0/tracing/events/__init__.py +0 -0
- synth_ai/v0/tracing/events/manage.py +0 -147
- synth_ai/v0/tracing/events/scope.py +0 -86
- synth_ai/v0/tracing/events/store.py +0 -228
- synth_ai/v0/tracing/immediate_client.py +0 -151
- synth_ai/v0/tracing/local.py +0 -18
- synth_ai/v0/tracing/log_client_base.py +0 -73
- synth_ai/v0/tracing/retry_queue.py +0 -186
- synth_ai/v0/tracing/trackers.py +0 -515
- synth_ai/v0/tracing/upload.py +0 -512
- synth_ai/v0/tracing/utils.py +0 -9
- synth_ai/v0/tracing_v1/__init__.py +0 -16
- synth_ai/v0/tracing_v1/abstractions.py +0 -224
- synth_ai/v0/tracing_v1/base_client.py +0 -91
- synth_ai/v0/tracing_v1/client_manager.py +0 -131
- synth_ai/v0/tracing_v1/config.py +0 -142
- synth_ai/v0/tracing_v1/context.py +0 -146
- synth_ai/v0/tracing_v1/decorators.py +0 -703
- synth_ai/v0/tracing_v1/events/__init__.py +0 -0
- synth_ai/v0/tracing_v1/events/manage.py +0 -147
- synth_ai/v0/tracing_v1/events/scope.py +0 -86
- synth_ai/v0/tracing_v1/events/store.py +0 -228
- synth_ai/v0/tracing_v1/immediate_client.py +0 -151
- synth_ai/v0/tracing_v1/local.py +0 -18
- synth_ai/v0/tracing_v1/log_client_base.py +0 -73
- synth_ai/v0/tracing_v1/retry_queue.py +0 -186
- synth_ai/v0/tracing_v1/trackers.py +0 -515
- synth_ai/v0/tracing_v1/upload.py +0 -527
- synth_ai/v0/tracing_v1/utils.py +0 -9
- synth_ai/zyk/__init__.py +0 -30
- synth_ai-0.2.8.dev4.dist-info/METADATA +0 -129
- synth_ai-0.2.8.dev4.dist-info/RECORD +0 -420
- {synth_ai/lm/caching → examples/task_apps}/__init__.py +0 -0
- {synth_ai/lm/cost → examples/task_apps/crafter}/__init__.py +0 -0
- {synth_ai/lm/structured_outputs → examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server}/__init__.py +0 -0
- {synth_ai/lm/vendors → examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests}/__init__.py +0 -0
- {synth_ai/lm/vendors/core → examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils}/__init__.py +0 -0
- {synth_ai/lm/vendors/local → examples/task_apps/math}/__init__.py +0 -0
- {synth_ai/lm/vendors/supported → examples/workflows}/__init__.py +0 -0
- {synth_ai/v0/tracing → examples/workflows/math_rl}/__init__.py +0 -0
- /synth_ai/{compound/cais.py → cli/__main__.py} +0 -0
- /synth_ai/{learning/filtering.py → py.typed} +0 -0
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1020 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OCR-based dialogue detection for Pokemon Emerald.
|
|
3
|
+
Provides fallback text detection when memory reading fails or returns stale data.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
from PIL import Image
|
|
9
|
+
from typing import Optional, List, Tuple
|
|
10
|
+
import re
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
import pytesseract
|
|
15
|
+
OCR_AVAILABLE = True
|
|
16
|
+
except ImportError:
|
|
17
|
+
OCR_AVAILABLE = False
|
|
18
|
+
logging.warning("pytesseract not available - OCR dialogue detection disabled")
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
class OCRDialogueDetector:
|
|
23
|
+
"""OCR-based dialogue detection for Pokemon Emerald"""
|
|
24
|
+
|
|
25
|
+
# Pokemon Emerald dialogue box coordinates (corrected for actual dialogue position)
|
|
26
|
+
DIALOGUE_BOX_COORDS = {
|
|
27
|
+
'x': 0, # Full width - dialogue spans entire bottom
|
|
28
|
+
'y': 104, # Dialogue starts around row 104 (from debug analysis)
|
|
29
|
+
'width': 240, # Full screen width
|
|
30
|
+
'height': 56 # Bottom portion height (160-104=56)
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
# Tighter OCR coordinates - just the text area inside the border (adjusted lower)
|
|
34
|
+
OCR_TEXT_COORDS = {
|
|
35
|
+
'x': 8, # Skip left border
|
|
36
|
+
'y': 116, # Moved down 4px for better text alignment (104 + 12)
|
|
37
|
+
'width': 224, # Skip both side borders (240 - 16)
|
|
38
|
+
'height': 36 # Reduced height to maintain bottom margin (56 - 20)
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Pokemon Emerald dialogue text colors (based on actual RGB values)
|
|
42
|
+
DIALOGUE_TEXT_COLORS = [
|
|
43
|
+
# Exact text color from user
|
|
44
|
+
(99, 99, 99), # Exact text color
|
|
45
|
+
# Close variations for anti-aliasing and slight rendering differences
|
|
46
|
+
(95, 95, 95), # Slightly darker
|
|
47
|
+
(103, 103, 103), # Slightly lighter
|
|
48
|
+
(91, 91, 91), # Darker variant
|
|
49
|
+
(107, 107, 107), # Lighter variant
|
|
50
|
+
(99, 99, 95), # Slight color shift
|
|
51
|
+
(99, 95, 99), # Slight color shift
|
|
52
|
+
(95, 99, 99), # Slight color shift
|
|
53
|
+
# Additional gray tones that might appear due to rendering
|
|
54
|
+
(87, 87, 87), # Darker gray
|
|
55
|
+
(111, 111, 111), # Lighter gray
|
|
56
|
+
(79, 79, 79), # Much darker
|
|
57
|
+
(119, 119, 119), # Much lighter
|
|
58
|
+
# Shadow colors (darker, often with slight offset)
|
|
59
|
+
(64, 64, 64), # Dark shadow
|
|
60
|
+
(72, 72, 72), # Medium shadow
|
|
61
|
+
(56, 56, 56), # Darker shadow
|
|
62
|
+
(48, 48, 48), # Very dark shadow
|
|
63
|
+
# Possible highlighting/special text colors
|
|
64
|
+
(99, 99, 128), # Blue-tinted for names
|
|
65
|
+
(128, 99, 99), # Red-tinted for special text
|
|
66
|
+
(99, 128, 99), # Green-tinted for special text
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
# Color tolerance for matching (RGB distance) - increased to capture more text pixels
|
|
70
|
+
COLOR_TOLERANCE = 40
|
|
71
|
+
|
|
72
|
+
# Pokemon Emerald dialogue box background colors (based on actual RGB values)
|
|
73
|
+
DIALOGUE_BOX_BACKGROUND_COLORS = [
|
|
74
|
+
# Exact green line/border color from user
|
|
75
|
+
(85, 204, 128), # Exact green border color
|
|
76
|
+
# Variations of the green border for anti-aliasing and shadows
|
|
77
|
+
(80, 199, 123), # Slightly darker green
|
|
78
|
+
(90, 209, 133), # Slightly lighter green
|
|
79
|
+
(85, 204, 128), # Exact match (duplicate for emphasis)
|
|
80
|
+
(75, 194, 118), # Darker green variant
|
|
81
|
+
(95, 214, 138), # Lighter green variant
|
|
82
|
+
# Exact white text background from user
|
|
83
|
+
(255, 255, 255), # Exact white text background
|
|
84
|
+
# Close variations for anti-aliasing and compression artifacts
|
|
85
|
+
(254, 254, 254), # Very close to white
|
|
86
|
+
(253, 253, 253), # Slightly off white
|
|
87
|
+
(252, 252, 252), # Light gray-white
|
|
88
|
+
(248, 248, 248), # Near white
|
|
89
|
+
(240, 240, 240), # Light off-white
|
|
90
|
+
(255, 255, 254), # Slight yellow tint
|
|
91
|
+
(254, 255, 255), # Slight cyan tint
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
# How much of the dialogue box should be background color to consider it "active"
|
|
95
|
+
DIALOGUE_BOX_BACKGROUND_THRESHOLD = 0.4 # 40% of dialogue area should be box color (mostly off-white background)
|
|
96
|
+
|
|
97
|
+
# Battle text area (different position)
|
|
98
|
+
BATTLE_TEXT_COORDS = {
|
|
99
|
+
'x': 8,
|
|
100
|
+
'y': 120,
|
|
101
|
+
'width': 224,
|
|
102
|
+
'height': 40
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
def __init__(self):
|
|
106
|
+
self.last_detected_text = ""
|
|
107
|
+
self.text_stability_threshold = 2 # Frames text must be stable
|
|
108
|
+
self.stable_text_count = 0
|
|
109
|
+
self.debug_color_detection = False # Set to True for color debugging
|
|
110
|
+
self.use_full_frame_scan = False # Set to True to enable full-frame scanning (may pick up noise)
|
|
111
|
+
self.skip_dialogue_box_detection = False # Set to True to temporarily bypass dialogue box detection
|
|
112
|
+
|
|
113
|
+
def detect_dialogue_from_screenshot(self, screenshot: Image.Image) -> Optional[str]:
|
|
114
|
+
"""
|
|
115
|
+
Detect dialogue text from Pokemon Emerald dialogue regions only.
|
|
116
|
+
First verifies dialogue box is visible to prevent false positives.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
screenshot: PIL Image of the game screen
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Detected dialogue text or None if no text found
|
|
123
|
+
"""
|
|
124
|
+
if not OCR_AVAILABLE:
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
screenshot_np = np.array(screenshot)
|
|
129
|
+
|
|
130
|
+
# STEP 1: Check if dialogue box is actually visible (unless bypassed)
|
|
131
|
+
if not self.skip_dialogue_box_detection and not self.is_dialogue_box_visible(screenshot):
|
|
132
|
+
logger.debug("No dialogue box detected - skipping OCR")
|
|
133
|
+
return None
|
|
134
|
+
|
|
135
|
+
# STEP 2: Primary dialogue box area (most common) - use tighter text coordinates
|
|
136
|
+
dialogue_text = self._extract_text_from_region(
|
|
137
|
+
screenshot_np,
|
|
138
|
+
self.OCR_TEXT_COORDS
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
if dialogue_text:
|
|
142
|
+
validated = self._validate_and_clean_text(dialogue_text)
|
|
143
|
+
if validated:
|
|
144
|
+
return validated
|
|
145
|
+
|
|
146
|
+
# Method 2: Battle text area (different position)
|
|
147
|
+
battle_text = self._extract_text_from_region(
|
|
148
|
+
screenshot_np,
|
|
149
|
+
self.BATTLE_TEXT_COORDS
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if battle_text:
|
|
153
|
+
validated = self._validate_and_clean_text(battle_text)
|
|
154
|
+
if validated:
|
|
155
|
+
return validated
|
|
156
|
+
|
|
157
|
+
# Method 3: Full frame scan (only if explicitly enabled - can pick up noise)
|
|
158
|
+
if self.use_full_frame_scan:
|
|
159
|
+
full_frame_text = self._extract_text_from_full_frame(screenshot)
|
|
160
|
+
if full_frame_text:
|
|
161
|
+
validated = self._validate_and_clean_text(full_frame_text)
|
|
162
|
+
if validated:
|
|
163
|
+
return validated
|
|
164
|
+
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.debug(f"OCR dialogue detection failed: {e}")
|
|
169
|
+
return None
|
|
170
|
+
|
|
171
|
+
def _extract_text_from_full_frame(self, screenshot: Image.Image) -> Optional[str]:
|
|
172
|
+
"""
|
|
173
|
+
Extract text from the entire screenshot using OCR
|
|
174
|
+
This is more comprehensive than region-specific detection
|
|
175
|
+
"""
|
|
176
|
+
try:
|
|
177
|
+
# Convert PIL to numpy array
|
|
178
|
+
screenshot_np = np.array(screenshot)
|
|
179
|
+
|
|
180
|
+
# Preprocess the entire frame for better OCR
|
|
181
|
+
processed_frame = self._preprocess_full_frame_for_ocr(screenshot_np)
|
|
182
|
+
|
|
183
|
+
# OCR configuration optimized for Pokemon text detection
|
|
184
|
+
# Use different settings for full frame vs regions
|
|
185
|
+
full_frame_config = r'--oem 3 --psm 6' # Assume uniform block of text
|
|
186
|
+
|
|
187
|
+
# Extract text from entire frame
|
|
188
|
+
full_text = pytesseract.image_to_string(processed_frame, config=full_frame_config)
|
|
189
|
+
|
|
190
|
+
# Clean and validate the text
|
|
191
|
+
cleaned_text = self._clean_full_frame_text(full_text)
|
|
192
|
+
|
|
193
|
+
if cleaned_text:
|
|
194
|
+
return cleaned_text
|
|
195
|
+
|
|
196
|
+
# If that fails, try with different PSM mode
|
|
197
|
+
alt_config = r'--oem 3 --psm 11' # Sparse text, find as much as possible
|
|
198
|
+
alt_text = pytesseract.image_to_string(processed_frame, config=alt_config)
|
|
199
|
+
alt_cleaned = self._clean_full_frame_text(alt_text)
|
|
200
|
+
|
|
201
|
+
return alt_cleaned if alt_cleaned else None
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
logger.debug(f"Full frame OCR failed: {e}")
|
|
205
|
+
return None
|
|
206
|
+
|
|
207
|
+
def _preprocess_full_frame_for_ocr(self, image_np: np.ndarray) -> np.ndarray:
|
|
208
|
+
"""Preprocess entire frame using Pokemon-specific dialogue color matching"""
|
|
209
|
+
# Ensure we have color information
|
|
210
|
+
if len(image_np.shape) != 3:
|
|
211
|
+
# Convert grayscale to color by duplicating channels
|
|
212
|
+
image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGB)
|
|
213
|
+
|
|
214
|
+
# Scale up for better color detection precision
|
|
215
|
+
scaled = cv2.resize(image_np, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)
|
|
216
|
+
|
|
217
|
+
# Create mask for dialogue text colors across entire frame
|
|
218
|
+
text_mask = self._create_dialogue_color_mask(scaled)
|
|
219
|
+
|
|
220
|
+
# Apply color mask - black text on white background (better for OCR)
|
|
221
|
+
binary = np.where(text_mask, 0, 255).astype(np.uint8)
|
|
222
|
+
|
|
223
|
+
# Enhanced morphological operations for full frame
|
|
224
|
+
# Close gaps and thicken text
|
|
225
|
+
kernel_close = np.ones((2, 2), np.uint8)
|
|
226
|
+
cleaned = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel_close)
|
|
227
|
+
|
|
228
|
+
# Dilate to make text more readable
|
|
229
|
+
kernel_dilate = np.ones((1, 1), np.uint8)
|
|
230
|
+
cleaned = cv2.dilate(cleaned, kernel_dilate, iterations=1)
|
|
231
|
+
|
|
232
|
+
return cleaned
|
|
233
|
+
|
|
234
|
+
def _clean_full_frame_text(self, raw_text: str) -> Optional[str]:
|
|
235
|
+
"""Clean and validate text extracted from full frame"""
|
|
236
|
+
if not raw_text:
|
|
237
|
+
return None
|
|
238
|
+
|
|
239
|
+
# Remove excessive whitespace and special characters
|
|
240
|
+
lines = []
|
|
241
|
+
for line in raw_text.split('\n'):
|
|
242
|
+
# Clean each line
|
|
243
|
+
cleaned_line = re.sub(r'\s+', ' ', line.strip())
|
|
244
|
+
|
|
245
|
+
# Filter out lines that are likely noise
|
|
246
|
+
if len(cleaned_line) >= 2: # Minimum meaningful length
|
|
247
|
+
# Check if line has reasonable character content
|
|
248
|
+
alpha_ratio = sum(c.isalpha() for c in cleaned_line) / len(cleaned_line)
|
|
249
|
+
if alpha_ratio >= 0.3: # At least 30% alphabetic characters
|
|
250
|
+
lines.append(cleaned_line)
|
|
251
|
+
|
|
252
|
+
if not lines:
|
|
253
|
+
return None
|
|
254
|
+
|
|
255
|
+
# Join lines and do final cleanup
|
|
256
|
+
full_text = ' '.join(lines)
|
|
257
|
+
|
|
258
|
+
# Remove common OCR artifacts for Pokemon games
|
|
259
|
+
# These are characters commonly misread by OCR
|
|
260
|
+
ocr_artifacts = [
|
|
261
|
+
r'[|\\/_]', # Common line artifacts
|
|
262
|
+
r'^\W+', # Leading non-word characters
|
|
263
|
+
r'\W+$', # Trailing non-word characters
|
|
264
|
+
]
|
|
265
|
+
|
|
266
|
+
for artifact in ocr_artifacts:
|
|
267
|
+
full_text = re.sub(artifact, ' ', full_text)
|
|
268
|
+
|
|
269
|
+
# Final cleanup
|
|
270
|
+
full_text = re.sub(r'\s+', ' ', full_text).strip()
|
|
271
|
+
|
|
272
|
+
# Validate final result
|
|
273
|
+
if len(full_text) < 3:
|
|
274
|
+
return None
|
|
275
|
+
|
|
276
|
+
# Check for reasonable content (not just numbers/symbols)
|
|
277
|
+
alpha_count = sum(c.isalpha() for c in full_text)
|
|
278
|
+
if alpha_count < 3: # Need at least 3 letters
|
|
279
|
+
return None
|
|
280
|
+
|
|
281
|
+
return full_text
|
|
282
|
+
|
|
283
|
+
def detect_all_text_regions(self, screenshot: Image.Image) -> List[dict]:
|
|
284
|
+
"""
|
|
285
|
+
Detect all text regions in the screenshot with their locations
|
|
286
|
+
Useful for debugging and comprehensive text detection
|
|
287
|
+
"""
|
|
288
|
+
if not OCR_AVAILABLE:
|
|
289
|
+
return []
|
|
290
|
+
|
|
291
|
+
try:
|
|
292
|
+
# Convert to numpy array
|
|
293
|
+
screenshot_np = np.array(screenshot)
|
|
294
|
+
processed = self._preprocess_full_frame_for_ocr(screenshot_np)
|
|
295
|
+
|
|
296
|
+
# Use pytesseract to get text with bounding boxes
|
|
297
|
+
data = pytesseract.image_to_data(processed, output_type=pytesseract.Output.DICT)
|
|
298
|
+
|
|
299
|
+
text_regions = []
|
|
300
|
+
n_boxes = len(data['level'])
|
|
301
|
+
|
|
302
|
+
for i in range(n_boxes):
|
|
303
|
+
# Get confidence and text
|
|
304
|
+
confidence = int(data['conf'][i])
|
|
305
|
+
text = data['text'][i].strip()
|
|
306
|
+
|
|
307
|
+
# Only include text with reasonable confidence and content
|
|
308
|
+
if confidence > 30 and len(text) > 1:
|
|
309
|
+
# Get bounding box (scale back from 2x preprocessing)
|
|
310
|
+
x = data['left'][i] // 2 # Scale back from 2x
|
|
311
|
+
y = data['top'][i] // 2
|
|
312
|
+
w = data['width'][i] // 2
|
|
313
|
+
h = data['height'][i] // 2
|
|
314
|
+
|
|
315
|
+
# Validate text content
|
|
316
|
+
alpha_ratio = sum(c.isalpha() for c in text) / len(text)
|
|
317
|
+
if alpha_ratio >= 0.3: # At least 30% letters
|
|
318
|
+
text_regions.append({
|
|
319
|
+
'text': text,
|
|
320
|
+
'confidence': confidence,
|
|
321
|
+
'bbox': (x, y, w, h),
|
|
322
|
+
'area': w * h
|
|
323
|
+
})
|
|
324
|
+
|
|
325
|
+
# Sort by confidence and area (larger, more confident regions first)
|
|
326
|
+
text_regions.sort(key=lambda r: (r['confidence'], r['area']), reverse=True)
|
|
327
|
+
|
|
328
|
+
return text_regions
|
|
329
|
+
|
|
330
|
+
except Exception as e:
|
|
331
|
+
logger.debug(f"Text region detection failed: {e}")
|
|
332
|
+
return []
|
|
333
|
+
|
|
334
|
+
def _extract_text_from_region(self, image_np: np.ndarray, coords: dict) -> str:
|
|
335
|
+
"""Extract text from a specific region of the image"""
|
|
336
|
+
# Extract region of interest
|
|
337
|
+
y1 = coords['y']
|
|
338
|
+
y2 = y1 + coords['height']
|
|
339
|
+
x1 = coords['x']
|
|
340
|
+
x2 = x1 + coords['width']
|
|
341
|
+
|
|
342
|
+
roi = image_np[y1:y2, x1:x2]
|
|
343
|
+
|
|
344
|
+
# Preprocessing for better OCR accuracy
|
|
345
|
+
roi = self._preprocess_for_ocr(roi)
|
|
346
|
+
|
|
347
|
+
# OCR configuration optimized for Pokemon Emerald text
|
|
348
|
+
custom_config = r'--oem 3 --psm 6'
|
|
349
|
+
|
|
350
|
+
# Extract text
|
|
351
|
+
text = pytesseract.image_to_string(roi, config=custom_config)
|
|
352
|
+
return text.strip()
|
|
353
|
+
|
|
354
|
+
def _preprocess_for_ocr(self, roi: np.ndarray) -> np.ndarray:
|
|
355
|
+
"""Preprocess image region using Pokemon-specific dialogue color matching"""
|
|
356
|
+
# Keep original color information for color matching
|
|
357
|
+
if len(roi.shape) != 3:
|
|
358
|
+
# Convert grayscale back to color for processing (duplicate channels)
|
|
359
|
+
roi = cv2.cvtColor(roi, cv2.COLOR_GRAY2RGB)
|
|
360
|
+
|
|
361
|
+
# Scale up first for better color detection precision
|
|
362
|
+
roi = cv2.resize(roi, None, fx=4, fy=4, interpolation=cv2.INTER_CUBIC)
|
|
363
|
+
|
|
364
|
+
# Create mask for dialogue text colors
|
|
365
|
+
text_mask = self._create_dialogue_color_mask(roi)
|
|
366
|
+
|
|
367
|
+
# Apply color mask to create clean binary image
|
|
368
|
+
# Black text on white background (better for OCR)
|
|
369
|
+
binary_roi = np.where(text_mask, 0, 255).astype(np.uint8)
|
|
370
|
+
|
|
371
|
+
# Ensure we have a proper binary image (pure black and white only)
|
|
372
|
+
binary_roi = np.where(binary_roi > 127, 255, 0).astype(np.uint8)
|
|
373
|
+
|
|
374
|
+
# Enhanced morphological operations to thicken and connect text
|
|
375
|
+
# Close gaps in letters
|
|
376
|
+
kernel_close = np.ones((2, 2), np.uint8)
|
|
377
|
+
binary_roi = cv2.morphologyEx(binary_roi, cv2.MORPH_CLOSE, kernel_close)
|
|
378
|
+
|
|
379
|
+
# Dilate to make text thicker and more readable (balanced approach)
|
|
380
|
+
kernel_dilate = np.ones((2, 2), np.uint8)
|
|
381
|
+
binary_roi = cv2.dilate(binary_roi, kernel_dilate, iterations=2)
|
|
382
|
+
|
|
383
|
+
# Remove small noise while preserving text
|
|
384
|
+
kernel_open = np.ones((1, 1), np.uint8)
|
|
385
|
+
binary_roi = cv2.morphologyEx(binary_roi, cv2.MORPH_OPEN, kernel_open)
|
|
386
|
+
|
|
387
|
+
return binary_roi
|
|
388
|
+
|
|
389
|
+
def _create_dialogue_color_mask(self, image: np.ndarray) -> np.ndarray:
|
|
390
|
+
"""Create binary mask for pixels matching Pokemon dialogue text colors"""
|
|
391
|
+
if len(image.shape) != 3:
|
|
392
|
+
return np.zeros(image.shape[:2], dtype=bool)
|
|
393
|
+
|
|
394
|
+
mask = np.zeros(image.shape[:2], dtype=bool)
|
|
395
|
+
matched_pixels_per_color = []
|
|
396
|
+
|
|
397
|
+
# Check each dialogue color
|
|
398
|
+
for i, target_color in enumerate(self.DIALOGUE_TEXT_COLORS):
|
|
399
|
+
# Calculate color distance for all pixels
|
|
400
|
+
color_diff = np.sqrt(np.sum((image - target_color) ** 2, axis=2))
|
|
401
|
+
|
|
402
|
+
# Add pixels within tolerance to mask
|
|
403
|
+
color_mask = (color_diff <= self.COLOR_TOLERANCE)
|
|
404
|
+
mask |= color_mask
|
|
405
|
+
|
|
406
|
+
# Debug information
|
|
407
|
+
if self.debug_color_detection:
|
|
408
|
+
matched_count = np.sum(color_mask)
|
|
409
|
+
matched_pixels_per_color.append(matched_count)
|
|
410
|
+
|
|
411
|
+
# Log color detection results for debugging
|
|
412
|
+
if self.debug_color_detection and any(matched_pixels_per_color):
|
|
413
|
+
total_matched = np.sum(mask)
|
|
414
|
+
logger.debug(f"Color matching: {total_matched} pixels matched dialogue colors")
|
|
415
|
+
for i, count in enumerate(matched_pixels_per_color):
|
|
416
|
+
if count > 0:
|
|
417
|
+
color = self.DIALOGUE_TEXT_COLORS[i]
|
|
418
|
+
logger.debug(f" Color {color}: {count} pixels")
|
|
419
|
+
|
|
420
|
+
return mask
|
|
421
|
+
|
|
422
|
+
def is_dialogue_box_visible(self, screenshot: Image.Image) -> bool:
|
|
423
|
+
"""
|
|
424
|
+
Check if a dialogue box is actually visible by looking for green horizontal border lines.
|
|
425
|
+
Searches for the characteristic green lines above and below the dialogue text.
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
screenshot: PIL Image of the game screen
|
|
429
|
+
|
|
430
|
+
Returns:
|
|
431
|
+
True if dialogue box is detected, False otherwise
|
|
432
|
+
"""
|
|
433
|
+
if not screenshot:
|
|
434
|
+
return False
|
|
435
|
+
|
|
436
|
+
try:
|
|
437
|
+
# Convert to numpy array
|
|
438
|
+
image_np = np.array(screenshot)
|
|
439
|
+
if len(image_np.shape) != 3:
|
|
440
|
+
return False
|
|
441
|
+
|
|
442
|
+
# Extract extended dialogue region to catch border lines
|
|
443
|
+
coords = self.DIALOGUE_BOX_COORDS
|
|
444
|
+
# Extend the search area to catch top and bottom borders
|
|
445
|
+
extended_region = image_np[
|
|
446
|
+
max(0, coords['y'] - 5):min(image_np.shape[0], coords['y'] + coords['height'] + 5),
|
|
447
|
+
coords['x']:coords['x'] + coords['width']
|
|
448
|
+
]
|
|
449
|
+
|
|
450
|
+
if extended_region.size == 0:
|
|
451
|
+
return False
|
|
452
|
+
|
|
453
|
+
# Look for horizontal border lines using actual dialogue border colors
|
|
454
|
+
border_colors = [
|
|
455
|
+
(66, 181, 132), # Main teal border color from debug analysis
|
|
456
|
+
(24, 165, 107), # Secondary border color
|
|
457
|
+
(57, 140, 49), # Darker border variant
|
|
458
|
+
(0, 255, 156), # Bright border accent
|
|
459
|
+
(115, 198, 165) # Light border variant
|
|
460
|
+
]
|
|
461
|
+
border_tolerance = 20 # Tolerance for color matching
|
|
462
|
+
|
|
463
|
+
# Check each row for horizontal border lines
|
|
464
|
+
border_line_rows = []
|
|
465
|
+
height, width = extended_region.shape[:2]
|
|
466
|
+
|
|
467
|
+
for row_idx in range(height):
|
|
468
|
+
row_pixels = extended_region[row_idx]
|
|
469
|
+
|
|
470
|
+
# Count border color pixels in this row
|
|
471
|
+
border_pixels_in_row = 0
|
|
472
|
+
for pixel in row_pixels:
|
|
473
|
+
# Check if pixel matches any of the border colors
|
|
474
|
+
for border_color in border_colors:
|
|
475
|
+
color_diff = np.sqrt(np.sum((pixel - np.array(border_color)) ** 2))
|
|
476
|
+
if color_diff <= border_tolerance:
|
|
477
|
+
border_pixels_in_row += 1
|
|
478
|
+
break # Don't double-count pixels
|
|
479
|
+
|
|
480
|
+
# If significant portion of row has border colors, it's likely a border line
|
|
481
|
+
border_percentage = border_pixels_in_row / width
|
|
482
|
+
if border_percentage > 0.2: # 20% of row width has border colors (lower threshold)
|
|
483
|
+
border_line_rows.append(row_idx)
|
|
484
|
+
|
|
485
|
+
# VERY strict detection to avoid false positives from environment colors
|
|
486
|
+
|
|
487
|
+
# Require many border lines for robust detection
|
|
488
|
+
has_sufficient_border_lines = len(border_line_rows) >= 5 # Need at least 5 border lines
|
|
489
|
+
|
|
490
|
+
# MUST have top AND bottom border lines (no exceptions for false positive prevention)
|
|
491
|
+
has_top_and_bottom_lines = False
|
|
492
|
+
if len(border_line_rows) >= 3:
|
|
493
|
+
# Check if we have lines at different heights (top and bottom)
|
|
494
|
+
min_line = min(border_line_rows)
|
|
495
|
+
max_line = max(border_line_rows)
|
|
496
|
+
if max_line - min_line > 15: # Lines must be at least 15 pixels apart (very strict)
|
|
497
|
+
has_top_and_bottom_lines = True
|
|
498
|
+
|
|
499
|
+
# Additional check: look for proper dialogue box pattern (rectangular border)
|
|
500
|
+
has_rectangular_pattern = False
|
|
501
|
+
if len(border_line_rows) >= 5:
|
|
502
|
+
# Check if we have border lines spread across the dialogue region
|
|
503
|
+
height_quarter = height // 4
|
|
504
|
+
top_lines = [r for r in border_line_rows if r < height_quarter]
|
|
505
|
+
middle_lines = [r for r in border_line_rows if height_quarter <= r <= 3 * height_quarter]
|
|
506
|
+
bottom_lines = [r for r in border_line_rows if r > 3 * height_quarter]
|
|
507
|
+
|
|
508
|
+
# Must have lines in top AND bottom, and some in middle for a proper box
|
|
509
|
+
if len(top_lines) >= 2 and len(bottom_lines) >= 2 and len(middle_lines) >= 1:
|
|
510
|
+
has_rectangular_pattern = True
|
|
511
|
+
|
|
512
|
+
# Extra check: ensure lines are actually horizontal (consistent across width)
|
|
513
|
+
has_proper_horizontal_lines = False
|
|
514
|
+
if len(border_line_rows) >= 3:
|
|
515
|
+
# Check that border lines extend across significant width (not just scattered pixels)
|
|
516
|
+
proper_lines = 0
|
|
517
|
+
for row_idx in border_line_rows[:10]: # Check first 10 lines
|
|
518
|
+
row_pixels = extended_region[row_idx]
|
|
519
|
+
border_pixels_in_row = 0
|
|
520
|
+
for pixel in row_pixels:
|
|
521
|
+
for border_color in border_colors:
|
|
522
|
+
color_diff = np.sqrt(np.sum((pixel - np.array(border_color)) ** 2))
|
|
523
|
+
if color_diff <= border_tolerance:
|
|
524
|
+
border_pixels_in_row += 1
|
|
525
|
+
break
|
|
526
|
+
|
|
527
|
+
# Line must span at least 50% of width to be considered a proper horizontal line
|
|
528
|
+
if border_pixels_in_row / width > 0.5:
|
|
529
|
+
proper_lines += 1
|
|
530
|
+
|
|
531
|
+
if proper_lines >= 3: # Need at least 3 proper horizontal lines
|
|
532
|
+
has_proper_horizontal_lines = True
|
|
533
|
+
|
|
534
|
+
# Log detection results
|
|
535
|
+
if self.debug_color_detection:
|
|
536
|
+
logger.debug(f"Border line detection: Found {len(border_line_rows)} border horizontal lines")
|
|
537
|
+
logger.debug(f"Line rows: {border_line_rows[:5]}") # Show first 5
|
|
538
|
+
logger.debug(f"Has sufficient lines (≥5): {has_sufficient_border_lines}")
|
|
539
|
+
logger.debug(f"Has top+bottom lines (≥15px apart): {has_top_and_bottom_lines}")
|
|
540
|
+
logger.debug(f"Has rectangular pattern: {has_rectangular_pattern}")
|
|
541
|
+
logger.debug(f"Has proper horizontal lines (≥50% width): {has_proper_horizontal_lines}")
|
|
542
|
+
|
|
543
|
+
# Final check: look for actual dialogue box background (light/white area inside borders)
|
|
544
|
+
has_dialogue_background = False
|
|
545
|
+
if len(border_line_rows) >= 3:
|
|
546
|
+
# Check middle area for dialogue background colors (light colors)
|
|
547
|
+
middle_start = height // 4
|
|
548
|
+
middle_end = 3 * height // 4
|
|
549
|
+
middle_region = extended_region[middle_start:middle_end, width//4:3*width//4]
|
|
550
|
+
|
|
551
|
+
if middle_region.size > 0:
|
|
552
|
+
# Look for light background colors typical of dialogue boxes
|
|
553
|
+
light_pixels = 0
|
|
554
|
+
total_pixels = middle_region.size // 3 # Divide by 3 for RGB
|
|
555
|
+
|
|
556
|
+
for pixel in middle_region.reshape(-1, 3):
|
|
557
|
+
# Light colors: high brightness (sum of RGB > 400) or white-ish
|
|
558
|
+
brightness = np.sum(pixel)
|
|
559
|
+
if brightness > 400 or (pixel[0] > 200 and pixel[1] > 200 and pixel[2] > 200):
|
|
560
|
+
light_pixels += 1
|
|
561
|
+
|
|
562
|
+
light_percentage = light_pixels / total_pixels
|
|
563
|
+
if light_percentage > 0.3: # At least 30% of middle area should be light (dialogue background)
|
|
564
|
+
has_dialogue_background = True
|
|
565
|
+
|
|
566
|
+
# Log all criteria
|
|
567
|
+
if self.debug_color_detection:
|
|
568
|
+
logger.debug(f"Has dialogue background (light area): {has_dialogue_background}")
|
|
569
|
+
|
|
570
|
+
# Use simplified detection method to avoid false positives
|
|
571
|
+
# Check for white background in center area
|
|
572
|
+
center_h = extended_region.shape[0] // 2
|
|
573
|
+
center_w = extended_region.shape[1] // 2
|
|
574
|
+
margin = 20
|
|
575
|
+
|
|
576
|
+
center_area = extended_region[
|
|
577
|
+
max(0, center_h - margin):min(extended_region.shape[0], center_h + margin),
|
|
578
|
+
max(0, center_w - margin):min(extended_region.shape[1], center_w + margin)
|
|
579
|
+
]
|
|
580
|
+
|
|
581
|
+
if center_area.size > 0:
|
|
582
|
+
# Count white/light pixels (dialogue background)
|
|
583
|
+
light_mask = (center_area[:,:,0] > 200) & (center_area[:,:,1] > 200) & (center_area[:,:,2] > 200)
|
|
584
|
+
light_percentage = np.sum(light_mask) / light_mask.size
|
|
585
|
+
|
|
586
|
+
# Count text-like colors (dark gray)
|
|
587
|
+
text_mask = ((center_area[:,:,0] > 80) & (center_area[:,:,0] < 130) &
|
|
588
|
+
(center_area[:,:,1] > 80) & (center_area[:,:,1] < 130) &
|
|
589
|
+
(center_area[:,:,2] > 80) & (center_area[:,:,2] < 130))
|
|
590
|
+
text_percentage = np.sum(text_mask) / text_mask.size
|
|
591
|
+
|
|
592
|
+
# Simple, robust criteria
|
|
593
|
+
is_visible = light_percentage > 0.3 and text_percentage > 0.02
|
|
594
|
+
|
|
595
|
+
if self.debug_color_detection:
|
|
596
|
+
logger.debug(f"Simplified detection - Light bg: {light_percentage:.1%}, Text: {text_percentage:.1%}")
|
|
597
|
+
else:
|
|
598
|
+
is_visible = False
|
|
599
|
+
|
|
600
|
+
if self.debug_color_detection:
|
|
601
|
+
logger.debug(f"Dialogue box {'VISIBLE' if is_visible else 'NOT VISIBLE'} "
|
|
602
|
+
f"(found {len(border_line_rows)} border lines)")
|
|
603
|
+
|
|
604
|
+
return is_visible
|
|
605
|
+
|
|
606
|
+
except Exception as e:
|
|
607
|
+
logger.debug(f"Dialogue box detection error: {e}")
|
|
608
|
+
return False
|
|
609
|
+
|
|
610
|
+
def enable_color_debug(self, enabled: bool = True):
|
|
611
|
+
"""Enable/disable color detection debugging"""
|
|
612
|
+
self.debug_color_detection = enabled
|
|
613
|
+
if enabled:
|
|
614
|
+
logger.info("OCR color detection debugging enabled")
|
|
615
|
+
else:
|
|
616
|
+
logger.info("OCR color detection debugging disabled")
|
|
617
|
+
|
|
618
|
+
def analyze_dialogue_colors(self, screenshot: Image.Image) -> dict:
|
|
619
|
+
"""
|
|
620
|
+
Analyze a screenshot to find the actual colors used in the dialogue box.
|
|
621
|
+
This helps fine-tune the DIALOGUE_TEXT_COLORS list.
|
|
622
|
+
"""
|
|
623
|
+
if not screenshot:
|
|
624
|
+
return {}
|
|
625
|
+
|
|
626
|
+
# Convert to numpy array
|
|
627
|
+
image_np = np.array(screenshot)
|
|
628
|
+
if len(image_np.shape) != 3:
|
|
629
|
+
return {}
|
|
630
|
+
|
|
631
|
+
# Extract dialogue region
|
|
632
|
+
coords = self.DIALOGUE_BOX_COORDS
|
|
633
|
+
dialogue_region = image_np[
|
|
634
|
+
coords['y']:coords['y'] + coords['height'],
|
|
635
|
+
coords['x']:coords['x'] + coords['width']
|
|
636
|
+
]
|
|
637
|
+
|
|
638
|
+
if dialogue_region.size == 0:
|
|
639
|
+
return {}
|
|
640
|
+
|
|
641
|
+
# Find unique colors and their frequencies
|
|
642
|
+
pixels = dialogue_region.reshape(-1, 3)
|
|
643
|
+
unique_colors, counts = np.unique(pixels, axis=0, return_counts=True)
|
|
644
|
+
|
|
645
|
+
# Sort by frequency (most common first)
|
|
646
|
+
sorted_indices = np.argsort(counts)[::-1]
|
|
647
|
+
|
|
648
|
+
# Analyze the most common colors
|
|
649
|
+
color_analysis = {
|
|
650
|
+
'total_pixels': len(pixels),
|
|
651
|
+
'unique_colors': len(unique_colors),
|
|
652
|
+
'top_colors': []
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
# Show top 20 most common colors
|
|
656
|
+
for i in range(min(20, len(unique_colors))):
|
|
657
|
+
idx = sorted_indices[i]
|
|
658
|
+
color = tuple(unique_colors[idx])
|
|
659
|
+
count = counts[idx]
|
|
660
|
+
percentage = (count / len(pixels)) * 100
|
|
661
|
+
|
|
662
|
+
color_analysis['top_colors'].append({
|
|
663
|
+
'rgb': color,
|
|
664
|
+
'count': int(count),
|
|
665
|
+
'percentage': round(percentage, 2)
|
|
666
|
+
})
|
|
667
|
+
|
|
668
|
+
return color_analysis
|
|
669
|
+
|
|
670
|
+
def print_color_analysis(self, screenshot: Image.Image):
|
|
671
|
+
"""Print color analysis in a readable format"""
|
|
672
|
+
analysis = self.analyze_dialogue_colors(screenshot)
|
|
673
|
+
|
|
674
|
+
if not analysis:
|
|
675
|
+
print("❌ Could not analyze colors")
|
|
676
|
+
return
|
|
677
|
+
|
|
678
|
+
print(f"\n🎨 DIALOGUE COLOR ANALYSIS")
|
|
679
|
+
print(f"={'='*50}")
|
|
680
|
+
print(f"Total pixels: {analysis['total_pixels']:,}")
|
|
681
|
+
print(f"Unique colors: {analysis['unique_colors']:,}")
|
|
682
|
+
print(f"\nTop Colors (most frequent first):")
|
|
683
|
+
print(f"{'Rank':<4} {'RGB Color':<20} {'Count':<8} {'%':<6} {'Color Type':<15}")
|
|
684
|
+
print(f"{'-'*70}")
|
|
685
|
+
|
|
686
|
+
for i, color_info in enumerate(analysis['top_colors'][:15], 1):
|
|
687
|
+
rgb = color_info['rgb']
|
|
688
|
+
count = color_info['count']
|
|
689
|
+
pct = color_info['percentage']
|
|
690
|
+
|
|
691
|
+
# Classify the color
|
|
692
|
+
if rgb[0] > 240 and rgb[1] > 240 and rgb[2] > 240:
|
|
693
|
+
color_type = "Background"
|
|
694
|
+
elif rgb[0] < 120 and rgb[1] < 120 and rgb[2] < 120:
|
|
695
|
+
color_type = "Text/Shadow"
|
|
696
|
+
elif abs(rgb[0] - rgb[1]) < 10 and abs(rgb[1] - rgb[2]) < 10:
|
|
697
|
+
color_type = "Gray text"
|
|
698
|
+
else:
|
|
699
|
+
color_type = "Other"
|
|
700
|
+
|
|
701
|
+
print(f"{i:<4} {str(rgb):<20} {count:<8} {pct:<6.1f} {color_type:<15}")
|
|
702
|
+
|
|
703
|
+
print(f"\n💡 Suggested dialogue colors to add:")
|
|
704
|
+
suggested = []
|
|
705
|
+
for color_info in analysis['top_colors'][:10]:
|
|
706
|
+
rgb = color_info['rgb']
|
|
707
|
+
# Suggest colors that look like text (not pure white background)
|
|
708
|
+
if rgb[0] < 200 and color_info['percentage'] > 0.5:
|
|
709
|
+
suggested.append(rgb)
|
|
710
|
+
|
|
711
|
+
for color in suggested[:5]: # Show top 5 suggestions
|
|
712
|
+
print(f" {color},")
|
|
713
|
+
|
|
714
|
+
print(f"{'='*50}")
|
|
715
|
+
|
|
716
|
+
def update_dialogue_colors_from_analysis(self, screenshot: Image.Image, threshold_percentage: float = 1.0):
|
|
717
|
+
"""
|
|
718
|
+
Update DIALOGUE_TEXT_COLORS based on analysis of actual screenshot.
|
|
719
|
+
Only adds colors that appear frequently enough (above threshold_percentage).
|
|
720
|
+
"""
|
|
721
|
+
analysis = self.analyze_dialogue_colors(screenshot)
|
|
722
|
+
|
|
723
|
+
if not analysis:
|
|
724
|
+
logger.warning("Could not analyze colors to update dialogue colors")
|
|
725
|
+
return
|
|
726
|
+
|
|
727
|
+
# Find colors that appear frequently and look like text
|
|
728
|
+
new_colors = []
|
|
729
|
+
for color_info in analysis['top_colors']:
|
|
730
|
+
rgb = color_info['rgb']
|
|
731
|
+
pct = color_info['percentage']
|
|
732
|
+
|
|
733
|
+
# Only consider colors that:
|
|
734
|
+
# 1. Appear frequently enough
|
|
735
|
+
# 2. Are not pure white (background)
|
|
736
|
+
# 3. Are not already in our color list
|
|
737
|
+
if (pct >= threshold_percentage and
|
|
738
|
+
not (rgb[0] > 240 and rgb[1] > 240 and rgb[2] > 240) and
|
|
739
|
+
rgb not in self.DIALOGUE_TEXT_COLORS):
|
|
740
|
+
new_colors.append(rgb)
|
|
741
|
+
|
|
742
|
+
if new_colors:
|
|
743
|
+
logger.info(f"Adding {len(new_colors)} new dialogue colors from analysis")
|
|
744
|
+
for color in new_colors[:5]: # Limit to top 5 new colors
|
|
745
|
+
logger.info(f" Added color: {color}")
|
|
746
|
+
|
|
747
|
+
# Add new colors to the existing list
|
|
748
|
+
self.DIALOGUE_TEXT_COLORS.extend(new_colors[:5])
|
|
749
|
+
else:
|
|
750
|
+
logger.info("No new dialogue colors found to add")
|
|
751
|
+
|
|
752
|
+
def analyze_dialogue_box_background(self, screenshot: Image.Image):
|
|
753
|
+
"""
|
|
754
|
+
Analyze dialogue box region to find actual background colors.
|
|
755
|
+
Useful for fine-tuning DIALOGUE_BOX_BACKGROUND_COLORS.
|
|
756
|
+
"""
|
|
757
|
+
analysis = self.analyze_dialogue_colors(screenshot)
|
|
758
|
+
|
|
759
|
+
if not analysis:
|
|
760
|
+
print("❌ Could not analyze dialogue box background")
|
|
761
|
+
return
|
|
762
|
+
|
|
763
|
+
print(f"\n📦 DIALOGUE BOX BACKGROUND ANALYSIS")
|
|
764
|
+
print(f"{'='*50}")
|
|
765
|
+
print(f"Total pixels: {analysis['total_pixels']:,}")
|
|
766
|
+
print(f"Unique colors: {analysis['unique_colors']:,}")
|
|
767
|
+
print(f"\nTop Background Colors (most frequent first):")
|
|
768
|
+
print(f"{'Rank':<4} {'RGB Color':<20} {'Count':<8} {'%':<6} {'Type':<15}")
|
|
769
|
+
print(f"{'-'*70}")
|
|
770
|
+
|
|
771
|
+
for i, color_info in enumerate(analysis['top_colors'][:15], 1):
|
|
772
|
+
rgb = color_info['rgb']
|
|
773
|
+
count = color_info['count']
|
|
774
|
+
pct = color_info['percentage']
|
|
775
|
+
|
|
776
|
+
# Classify as likely background vs text
|
|
777
|
+
if pct > 10: # Very common = likely background
|
|
778
|
+
color_type = "Background"
|
|
779
|
+
elif rgb[0] < 150 and rgb[1] < 150 and rgb[2] < 150:
|
|
780
|
+
color_type = "Text/Shadow"
|
|
781
|
+
else:
|
|
782
|
+
color_type = "Other"
|
|
783
|
+
|
|
784
|
+
print(f"{i:<4} {str(rgb):<20} {count:<8} {pct:<6.1f} {color_type:<15}")
|
|
785
|
+
|
|
786
|
+
print(f"\n💡 Suggested background colors (>5% pixels):")
|
|
787
|
+
for color_info in analysis['top_colors'][:10]:
|
|
788
|
+
rgb = color_info['rgb']
|
|
789
|
+
pct = color_info['percentage']
|
|
790
|
+
# Suggest colors that are common and not text-like
|
|
791
|
+
if pct > 5.0 and not (rgb[0] < 150 and rgb[1] < 150 and rgb[2] < 150):
|
|
792
|
+
print(f" {rgb},")
|
|
793
|
+
|
|
794
|
+
print(f"{'='*50}")
|
|
795
|
+
|
|
796
|
+
def test_dialogue_box_detection(self, screenshot: Image.Image):
|
|
797
|
+
"""Test dialogue box detection with detailed output for green line method"""
|
|
798
|
+
print(f"\n🔍 DIALOGUE BOX DETECTION TEST (Green Line Method)")
|
|
799
|
+
print(f"{'='*50}")
|
|
800
|
+
|
|
801
|
+
# Enable debug mode for detailed output
|
|
802
|
+
old_debug = self.debug_color_detection
|
|
803
|
+
self.debug_color_detection = True
|
|
804
|
+
|
|
805
|
+
is_visible = self.is_dialogue_box_visible(screenshot)
|
|
806
|
+
|
|
807
|
+
# Get detailed green line analysis
|
|
808
|
+
image_np = np.array(screenshot)
|
|
809
|
+
coords = self.DIALOGUE_BOX_COORDS
|
|
810
|
+
|
|
811
|
+
# Extended region for border detection
|
|
812
|
+
extended_region = image_np[
|
|
813
|
+
max(0, coords['y'] - 5):min(image_np.shape[0], coords['y'] + coords['height'] + 5),
|
|
814
|
+
coords['x']:coords['x'] + coords['width']
|
|
815
|
+
]
|
|
816
|
+
|
|
817
|
+
height, width = extended_region.shape[:2]
|
|
818
|
+
green_border_color = (85, 204, 128)
|
|
819
|
+
green_tolerance = 15
|
|
820
|
+
|
|
821
|
+
print(f"Search region: {coords['x']},{coords['y']-5} {coords['width']}x{height+10}")
|
|
822
|
+
print(f"Green border color: {green_border_color}")
|
|
823
|
+
print(f"Green tolerance: ±{green_tolerance}")
|
|
824
|
+
|
|
825
|
+
# Analyze each row
|
|
826
|
+
green_line_rows = []
|
|
827
|
+
for row_idx in range(height):
|
|
828
|
+
row_pixels = extended_region[row_idx]
|
|
829
|
+
|
|
830
|
+
green_pixels_in_row = 0
|
|
831
|
+
for pixel in row_pixels:
|
|
832
|
+
color_diff = np.sqrt(np.sum((pixel - green_border_color) ** 2))
|
|
833
|
+
if color_diff <= green_tolerance:
|
|
834
|
+
green_pixels_in_row += 1
|
|
835
|
+
|
|
836
|
+
green_percentage = green_pixels_in_row / width
|
|
837
|
+
if green_percentage > 0.3: # 30% threshold
|
|
838
|
+
green_line_rows.append({
|
|
839
|
+
'row': row_idx,
|
|
840
|
+
'green_pixels': green_pixels_in_row,
|
|
841
|
+
'percentage': green_percentage * 100
|
|
842
|
+
})
|
|
843
|
+
|
|
844
|
+
print(f"Found {len(green_line_rows)} green horizontal lines:")
|
|
845
|
+
for line_info in green_line_rows[:5]: # Show first 5
|
|
846
|
+
row = line_info['row']
|
|
847
|
+
pixels = line_info['green_pixels']
|
|
848
|
+
pct = line_info['percentage']
|
|
849
|
+
print(f" Row {row}: {pixels}/{width} pixels ({pct:.1f}% green)")
|
|
850
|
+
|
|
851
|
+
print(f"\nResult: {'✅ DIALOGUE BOX VISIBLE' if is_visible else '❌ NOT VISIBLE'}")
|
|
852
|
+
print(f"{'='*50}")
|
|
853
|
+
|
|
854
|
+
# Restore debug setting
|
|
855
|
+
self.debug_color_detection = old_debug
|
|
856
|
+
|
|
857
|
+
return is_visible
|
|
858
|
+
|
|
859
|
+
def _validate_and_clean_text(self, text: str) -> Optional[str]:
|
|
860
|
+
"""Validate and clean detected text"""
|
|
861
|
+
if not text or len(text.strip()) < 3:
|
|
862
|
+
return None
|
|
863
|
+
|
|
864
|
+
# Clean up common OCR errors
|
|
865
|
+
text = re.sub(r'\n+', ' ', text) # Replace newlines with spaces
|
|
866
|
+
text = re.sub(r'\s+', ' ', text) # Normalize whitespace
|
|
867
|
+
text = text.strip()
|
|
868
|
+
|
|
869
|
+
# Filter out obviously wrong detections
|
|
870
|
+
if len(text) < 3 or len(text) > 200:
|
|
871
|
+
return None
|
|
872
|
+
|
|
873
|
+
# Check for minimum alphabetic content (avoid detecting UI elements)
|
|
874
|
+
alpha_ratio = sum(c.isalpha() for c in text) / len(text)
|
|
875
|
+
if alpha_ratio < 0.5:
|
|
876
|
+
return None
|
|
877
|
+
|
|
878
|
+
# Comprehensive random letter filtering - catch ANY nonsense patterns
|
|
879
|
+
if self._is_random_nonsense(text):
|
|
880
|
+
logger.debug(f"OCR validation: Rejected as random nonsense: '{text[:50]}...'")
|
|
881
|
+
return None
|
|
882
|
+
|
|
883
|
+
return text
|
|
884
|
+
|
|
885
|
+
def _is_random_nonsense(self, text: str) -> bool:
|
|
886
|
+
"""
|
|
887
|
+
Comprehensive detection of random letter sequences and nonsense text.
|
|
888
|
+
Catches any type of random letters that don't form meaningful dialogue.
|
|
889
|
+
"""
|
|
890
|
+
if not text or len(text.strip()) < 3:
|
|
891
|
+
return True
|
|
892
|
+
|
|
893
|
+
text_lower = text.lower().strip()
|
|
894
|
+
words = text_lower.split()
|
|
895
|
+
|
|
896
|
+
if len(words) == 0:
|
|
897
|
+
return True
|
|
898
|
+
|
|
899
|
+
# Pattern 1: Excessive single/double character "words"
|
|
900
|
+
short_words = [w for w in words if len(w) <= 2]
|
|
901
|
+
if len(short_words) > len(words) * 0.6: # More than 60% are very short
|
|
902
|
+
return True
|
|
903
|
+
|
|
904
|
+
# Pattern 2: Repetitive patterns (like "a a a a a")
|
|
905
|
+
word_counts = {}
|
|
906
|
+
for word in words:
|
|
907
|
+
word_counts[word] = word_counts.get(word, 0) + 1
|
|
908
|
+
for word, count in word_counts.items():
|
|
909
|
+
if len(word) <= 2 and count >= 3: # Short word repeated 3+ times
|
|
910
|
+
return True
|
|
911
|
+
|
|
912
|
+
# Pattern 3: Too many words (dialogue is usually concise)
|
|
913
|
+
if len(words) > 30:
|
|
914
|
+
return True
|
|
915
|
+
|
|
916
|
+
# Pattern 4: Check for valid English-like words
|
|
917
|
+
valid_words = 0
|
|
918
|
+
dialogue_words = {
|
|
919
|
+
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by',
|
|
920
|
+
'you', 'i', 'we', 'they', 'he', 'she', 'it', 'this', 'that', 'these', 'those',
|
|
921
|
+
'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'do', 'does', 'did',
|
|
922
|
+
'will', 'would', 'could', 'should', 'can', 'may', 'might', 'must',
|
|
923
|
+
'get', 'got', 'give', 'take', 'go', 'come', 'see', 'look', 'want', 'need', 'know', 'think',
|
|
924
|
+
'pokemon', 'trainer', 'battle', 'items', 'store', 'pc', 'computer', 'use', 'hello', 'hi'
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
for word in words:
|
|
928
|
+
clean_word = ''.join(c for c in word if c.isalnum()).lower()
|
|
929
|
+
if len(clean_word) >= 2:
|
|
930
|
+
# Check if it's a known good word
|
|
931
|
+
if clean_word in dialogue_words:
|
|
932
|
+
valid_words += 1
|
|
933
|
+
# Check if it has reasonable letter patterns
|
|
934
|
+
elif self._has_valid_letter_pattern(clean_word):
|
|
935
|
+
valid_words += 1
|
|
936
|
+
|
|
937
|
+
# Need at least 30% valid words
|
|
938
|
+
valid_ratio = valid_words / len(words) if len(words) > 0 else 0
|
|
939
|
+
if valid_ratio < 0.3:
|
|
940
|
+
return True
|
|
941
|
+
|
|
942
|
+
# Pattern 5: Detect excessive mixed case (OCR noise pattern)
|
|
943
|
+
mixed_case_words = 0
|
|
944
|
+
for word in words:
|
|
945
|
+
if len(word) >= 3:
|
|
946
|
+
has_lower = any(c.islower() for c in word)
|
|
947
|
+
has_upper = any(c.isupper() for c in word)
|
|
948
|
+
if has_lower and has_upper and not word[0].isupper(): # Not normal capitalization
|
|
949
|
+
mixed_case_words += 1
|
|
950
|
+
|
|
951
|
+
if mixed_case_words > len(words) * 0.4: # More than 40% have weird capitalization
|
|
952
|
+
return True
|
|
953
|
+
|
|
954
|
+
return False
|
|
955
|
+
|
|
956
|
+
def _has_valid_letter_pattern(self, word: str) -> bool:
|
|
957
|
+
"""Check if word has valid English-like letter patterns"""
|
|
958
|
+
if len(word) < 2:
|
|
959
|
+
return False
|
|
960
|
+
|
|
961
|
+
# Must have at least one vowel (unless very short)
|
|
962
|
+
vowels = 'aeiou'
|
|
963
|
+
has_vowel = any(c in vowels for c in word.lower())
|
|
964
|
+
if len(word) >= 3 and not has_vowel:
|
|
965
|
+
return False
|
|
966
|
+
|
|
967
|
+
# Check for reasonable consonant clusters
|
|
968
|
+
consonants = 'bcdfghjklmnpqrstvwxyz'
|
|
969
|
+
consonant_streak = 0
|
|
970
|
+
max_consonant_streak = 0
|
|
971
|
+
|
|
972
|
+
for char in word.lower():
|
|
973
|
+
if char in consonants:
|
|
974
|
+
consonant_streak += 1
|
|
975
|
+
max_consonant_streak = max(max_consonant_streak, consonant_streak)
|
|
976
|
+
else:
|
|
977
|
+
consonant_streak = 0
|
|
978
|
+
|
|
979
|
+
# Too many consonants in a row suggests OCR noise
|
|
980
|
+
if max_consonant_streak > 4:
|
|
981
|
+
return False
|
|
982
|
+
|
|
983
|
+
# Check for excessive repeated characters
|
|
984
|
+
repeated = 0
|
|
985
|
+
for i in range(len(word) - 1):
|
|
986
|
+
if word[i] == word[i + 1]:
|
|
987
|
+
repeated += 1
|
|
988
|
+
|
|
989
|
+
if repeated > len(word) * 0.4: # More than 40% repeated chars
|
|
990
|
+
return False
|
|
991
|
+
|
|
992
|
+
return True
|
|
993
|
+
|
|
994
|
+
def get_stable_dialogue_text(self, screenshot: Image.Image) -> Optional[str]:
|
|
995
|
+
"""
|
|
996
|
+
Get dialogue text that has been stable across multiple frames.
|
|
997
|
+
This helps avoid detecting transitional/partial text.
|
|
998
|
+
"""
|
|
999
|
+
current_text = self.detect_dialogue_from_screenshot(screenshot)
|
|
1000
|
+
|
|
1001
|
+
if current_text == self.last_detected_text:
|
|
1002
|
+
self.stable_text_count += 1
|
|
1003
|
+
else:
|
|
1004
|
+
self.stable_text_count = 0
|
|
1005
|
+
self.last_detected_text = current_text
|
|
1006
|
+
|
|
1007
|
+
# Return text only if it's been stable for threshold frames
|
|
1008
|
+
if self.stable_text_count >= self.text_stability_threshold and current_text:
|
|
1009
|
+
return current_text
|
|
1010
|
+
|
|
1011
|
+
return None
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
def create_ocr_detector() -> Optional[OCRDialogueDetector]:
|
|
1015
|
+
"""Factory function to create OCR detector if available"""
|
|
1016
|
+
if OCR_AVAILABLE:
|
|
1017
|
+
return OCRDialogueDetector()
|
|
1018
|
+
else:
|
|
1019
|
+
logger.warning("OCR not available - install pytesseract and tesseract-ocr system package")
|
|
1020
|
+
return None
|