synth-ai 0.2.8.dev4__py3-none-any.whl → 0.2.23.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/README.md +1 -0
- examples/__init__.py +16 -0
- examples/analyze_semantic_words.sh +17 -0
- examples/baseline/banking77_baseline.py +243 -0
- examples/baseline/banking77_pipeline_baseline.py +294 -0
- examples/baseline/crafter_baseline.py +407 -0
- examples/baseline/pokemon_red_baseline.py +326 -0
- examples/baseline/simple_baseline.py +56 -0
- examples/baseline/warming_up_to_rl_baseline.py +239 -0
- examples/blog_posts/gepa/README.md +355 -0
- examples/blog_posts/gepa/configs/banking77_gepa_local.toml +95 -0
- examples/blog_posts/gepa/configs/banking77_gepa_test.toml +80 -0
- examples/blog_posts/gepa/configs/banking77_mipro_local.toml +50 -0
- examples/blog_posts/gepa/configs/banking77_pipeline_gepa_local.toml +101 -0
- examples/blog_posts/gepa/configs/banking77_pipeline_gepa_test.toml +96 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_local.toml +57 -0
- examples/blog_posts/gepa/configs/hotpotqa_gepa_qwen.toml +35 -0
- examples/blog_posts/gepa/configs/hotpotqa_mipro_local.toml +51 -0
- examples/blog_posts/gepa/configs/hover_gepa_local.toml +57 -0
- examples/blog_posts/gepa/configs/hover_gepa_qwen.toml +35 -0
- examples/blog_posts/gepa/configs/hover_mipro_local.toml +51 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_local.toml +57 -0
- examples/blog_posts/gepa/configs/ifbench_gepa_qwen.toml +35 -0
- examples/blog_posts/gepa/configs/ifbench_mipro_local.toml +51 -0
- examples/blog_posts/gepa/configs/pupa_gepa_local.toml +58 -0
- examples/blog_posts/gepa/configs/pupa_mipro_local.toml +52 -0
- examples/blog_posts/gepa/deploy_banking77_task_app.sh +54 -0
- examples/blog_posts/gepa/gepa_baseline.py +204 -0
- examples/blog_posts/gepa/query_prompts_example.py +97 -0
- examples/blog_posts/gepa/run_gepa_banking77.sh +112 -0
- examples/blog_posts/gepa/run_gepa_banking77_pipeline.sh +163 -0
- examples/blog_posts/gepa/task_apps.py +105 -0
- examples/blog_posts/gepa/test_gepa_local.sh +67 -0
- examples/blog_posts/gepa/verify_banking77_setup.sh +123 -0
- examples/blog_posts/mipro/README.md +415 -0
- examples/blog_posts/mipro/configs/banking77_mipro_local.toml +91 -0
- examples/blog_posts/mipro/configs/banking77_mipro_test.toml +87 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_gemini_flash_lite_local.toml +98 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_gpt41mini_local.toml +96 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_local.toml +94 -0
- examples/blog_posts/mipro/configs/banking77_pipeline_mipro_test.toml +170 -0
- examples/blog_posts/mipro/deploy_banking77_pipeline_task_app.sh +59 -0
- examples/blog_posts/mipro/deploy_banking77_task_app.sh +41 -0
- examples/blog_posts/mipro/multi_step.md +79 -0
- examples/blog_posts/mipro/run_mipro_banking77.sh +191 -0
- examples/blog_posts/mipro/run_mipro_banking77_pipeline.sh +171 -0
- examples/blog_posts/mipro/run_mipro_banking77_pipeline_gemini_flash_lite.sh +177 -0
- examples/blog_posts/mipro/run_mipro_banking77_pipeline_gpt41mini.sh +173 -0
- examples/blog_posts/mipro/verify_banking77_setup.sh +117 -0
- examples/blog_posts/pokemon_vl/README.md +98 -0
- examples/blog_posts/pokemon_vl/configs/eval_gpt5nano.toml +26 -0
- examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +27 -0
- examples/blog_posts/pokemon_vl/configs/eval_rl_final.toml +24 -0
- examples/blog_posts/pokemon_vl/configs/filter_high_reward.toml +10 -0
- examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +43 -0
- examples/blog_posts/pokemon_vl/configs/train_sft_qwen4b_vl.toml +40 -0
- examples/blog_posts/pokemon_vl/extract_images.py +239 -0
- examples/blog_posts/pokemon_vl/pokemon_vl_baseline.py +326 -0
- examples/blog_posts/pokemon_vl/run_eval_extract_images.py +209 -0
- examples/blog_posts/pokemon_vl/run_qwen_eval_extract_images.py +212 -0
- examples/blog_posts/pokemon_vl/text_box_analysis.md +106 -0
- examples/blog_posts/warming_up_to_rl/ARCHITECTURE.md +195 -0
- examples/blog_posts/warming_up_to_rl/FINAL_TEST_RESULTS.md +127 -0
- examples/blog_posts/warming_up_to_rl/INFERENCE_SUCCESS.md +132 -0
- examples/blog_posts/warming_up_to_rl/README.md +158 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TESTING.md +164 -0
- examples/blog_posts/warming_up_to_rl/SMOKE_TEST_COMPLETE.md +253 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_baseline_qwen32b_10x20.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b_10x20.toml +26 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_groq_qwen32b.toml +25 -0
- examples/blog_posts/warming_up_to_rl/configs/eval_openai_gpt_oss_120b.toml +29 -0
- examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +10 -0
- examples/blog_posts/warming_up_to_rl/configs/smoke_test.toml +75 -0
- examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +91 -0
- examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +40 -0
- examples/blog_posts/warming_up_to_rl/warming_up_to_rl_baseline.py +187 -0
- examples/crafter_debug_render.py +186 -0
- examples/dev/qwen3_32b_qlora_4xh100.toml +45 -0
- examples/gepa/banking77_pipeline_gepa.toml +96 -0
- examples/gepa/multi_stage_gepa_example.toml +84 -0
- examples/gepa/run_gepa_banking77_pipeline.sh +157 -0
- examples/multi_step/SFT_README.md +147 -0
- examples/multi_step/configs/README_verilog_rl.md +77 -0
- examples/multi_step/configs/VERILOG_REWARDS.md +103 -0
- examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +196 -0
- examples/multi_step/configs/crafter_eval_synth_qwen4b.toml +35 -0
- examples/multi_step/configs/crafter_eval_text_only_groq_qwen32b.toml +36 -0
- examples/multi_step/configs/crafter_rl_outcome.toml +75 -0
- examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +145 -0
- examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +84 -0
- examples/multi_step/configs/crafter_rl_stepwise_simple.toml +79 -0
- examples/multi_step/configs/crafter_rl_stepwise_simple_NEW_FORMAT.toml +105 -0
- examples/multi_step/configs/crafter_sft_qwen30b_lora.toml +62 -0
- examples/multi_step/configs/crafter_synth_backend.md +40 -0
- examples/multi_step/configs/verilog_eval_groq_qwen32b.toml +31 -0
- examples/multi_step/configs/verilog_eval_synth_qwen8b.toml +33 -0
- examples/multi_step/configs/verilog_rl_lora.toml +147 -0
- examples/multi_step/convert_traces_to_sft.py +84 -0
- examples/multi_step/crafter_rl_lora.md +70 -0
- examples/multi_step/judges/crafter_backend_judge.py +220 -0
- examples/multi_step/judges/verilog_backend_judge.py +234 -0
- examples/multi_step/readme.md +48 -0
- examples/multi_step/run_sft_qwen30b.sh +45 -0
- examples/multi_step/sse_metrics_streaming_notes.md +357 -0
- examples/multi_step/task_app_config_notes.md +494 -0
- examples/multi_step/verilog_rl_lora.md +218 -0
- examples/qwen_coder/README.md +102 -0
- examples/qwen_coder/_shared.py +113 -0
- examples/qwen_coder/configs/coder_lora_30b.toml +60 -0
- examples/qwen_coder/configs/coder_lora_4b.toml +61 -0
- examples/qwen_coder/configs/coder_lora_small.toml +57 -0
- examples/qwen_coder/generate_dataset.py +98 -0
- examples/qwen_coder/infer_ft_smoke.py +65 -0
- examples/qwen_coder/infer_prod_proxy.py +73 -0
- examples/qwen_coder/infer_via_synth.py +87 -0
- examples/qwen_coder/scripts/infer_coder.sh +19 -0
- examples/qwen_coder/scripts/train_coder_30b.sh +22 -0
- examples/qwen_coder/sft_full_17b.py +103 -0
- examples/qwen_coder/sft_lora_30b.py +110 -0
- examples/qwen_coder/subset_jsonl.py +39 -0
- examples/qwen_coder/todos.md +38 -0
- examples/qwen_coder/validate_jsonl.py +60 -0
- examples/qwen_vl/BUGS_AND_FIXES.md +232 -0
- examples/qwen_vl/IMAGE_VALIDATION_COMPLETE.md +271 -0
- examples/qwen_vl/IMAGE_VALIDATION_SUMMARY.md +260 -0
- examples/qwen_vl/INFERENCE_SFT_TESTS.md +412 -0
- examples/qwen_vl/NEXT_STEPS_2B.md +325 -0
- examples/qwen_vl/QUICKSTART.md +327 -0
- examples/qwen_vl/QUICKSTART_RL_VISION.md +110 -0
- examples/qwen_vl/README.md +152 -0
- examples/qwen_vl/RL_VISION_COMPLETE.md +475 -0
- examples/qwen_vl/RL_VISION_TESTING.md +333 -0
- examples/qwen_vl/SDK_VISION_INTEGRATION.md +328 -0
- examples/qwen_vl/SETUP_COMPLETE.md +274 -0
- examples/qwen_vl/VISION_TESTS_COMPLETE.md +489 -0
- examples/qwen_vl/VLM_PIPELINE_COMPLETE.md +242 -0
- examples/qwen_vl/__init__.py +2 -0
- examples/qwen_vl/collect_data_via_cli.md +415 -0
- examples/qwen_vl/collect_vision_traces.py +368 -0
- examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +110 -0
- examples/qwen_vl/configs/crafter_vlm_sft_example.toml +59 -0
- examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +26 -0
- examples/qwen_vl/configs/eval_gpt4o_vision_proper.toml +29 -0
- examples/qwen_vl/configs/eval_gpt5nano_vision.toml +26 -0
- examples/qwen_vl/configs/eval_qwen3vl_vision.toml +26 -0
- examples/qwen_vl/configs/filter_qwen3vl_sft.toml +49 -0
- examples/qwen_vl/configs/filter_vision_sft.toml +52 -0
- examples/qwen_vl/configs/filter_vision_test.toml +8 -0
- examples/qwen_vl/configs/sft_qwen3_vl_2b_test.toml +54 -0
- examples/qwen_vl/crafter_gpt5nano_agent.py +308 -0
- examples/qwen_vl/crafter_qwen_vl_agent.py +300 -0
- examples/qwen_vl/run_vision_comparison.sh +61 -0
- examples/qwen_vl/run_vision_sft_pipeline.sh +175 -0
- examples/qwen_vl/test_image_validation.py +201 -0
- examples/qwen_vl/test_sft_vision_data.py +110 -0
- examples/rl/README.md +169 -0
- examples/rl/configs/eval_base_qwen.toml +17 -0
- examples/rl/configs/eval_rl_qwen.toml +13 -0
- examples/rl/configs/rl_from_base_qwen.toml +62 -0
- examples/rl/configs/rl_from_base_qwen17.toml +80 -0
- examples/rl/configs/rl_from_ft_qwen.toml +37 -0
- examples/rl/download_dataset.py +80 -0
- examples/rl/run_eval.py +436 -0
- examples/rl/run_rl_and_save.py +111 -0
- examples/rl/task_app/README.md +21 -0
- examples/rl/task_app/math_single_step.py +990 -0
- examples/rl/task_app/math_task_app.py +111 -0
- examples/run_crafter_demo.sh +10 -0
- examples/sdk_prompt_learning_example.py +55 -0
- examples/sft/README.md +139 -0
- examples/sft/configs/crafter_fft_qwen0p6b.toml +49 -0
- examples/sft/configs/crafter_lora_qwen0p6b.toml +49 -0
- examples/sft/evaluate.py +117 -0
- examples/sft/export_dataset.py +120 -0
- examples/sft/generate_traces.py +164 -0
- examples/swe/__init__.py +12 -0
- examples/swe/task_app/README.md +135 -0
- examples/swe/task_app/__init__.py +2 -0
- examples/swe/task_app/grpo_swe_mini.py +604 -0
- examples/swe/task_app/grpo_swe_mini_task_app.py +124 -0
- examples/swe/task_app/hosted/README.md +173 -0
- examples/swe/task_app/hosted/__init__.py +5 -0
- examples/swe/task_app/hosted/branching.py +143 -0
- examples/swe/task_app/hosted/environment_routes.py +1289 -0
- examples/swe/task_app/hosted/envs/__init__.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/__init__.py +6 -0
- examples/swe/task_app/hosted/envs/crafter/app.py +1 -0
- examples/swe/task_app/hosted/envs/crafter/environment.py +522 -0
- examples/swe/task_app/hosted/envs/crafter/policy.py +478 -0
- examples/swe/task_app/hosted/envs/crafter/react_agent.py +108 -0
- examples/swe/task_app/hosted/envs/crafter/shared.py +305 -0
- examples/swe/task_app/hosted/envs/crafter/tools.py +47 -0
- examples/swe/task_app/hosted/envs/mini_swe/__init__.py +8 -0
- examples/swe/task_app/hosted/envs/mini_swe/environment.py +1191 -0
- examples/swe/task_app/hosted/envs/mini_swe/policy.py +355 -0
- examples/swe/task_app/hosted/envs/mini_swe/shared.py +83 -0
- examples/swe/task_app/hosted/envs/mini_swe/tools.py +96 -0
- examples/swe/task_app/hosted/hosted_app.py +204 -0
- examples/swe/task_app/hosted/inference/__init__.py +5 -0
- examples/swe/task_app/hosted/inference/openai_client.py +584 -0
- examples/swe/task_app/hosted/main.py +100 -0
- examples/swe/task_app/hosted/policy_routes.py +1094 -0
- examples/swe/task_app/hosted/registry.py +195 -0
- examples/swe/task_app/hosted/rollout.py +1905 -0
- examples/swe/task_app/hosted/storage/__init__.py +5 -0
- examples/swe/task_app/hosted/storage/volume.py +211 -0
- examples/swe/task_app/hosted/test_agents.py +161 -0
- examples/swe/task_app/hosted/test_service.py +136 -0
- examples/swe/task_app/hosted/utils.py +62 -0
- examples/swe/task_app/morph_backend.py +178 -0
- examples/task_apps/IMAGE_ONLY_EVAL_QUICKSTART.md +258 -0
- examples/task_apps/TESTING.md +275 -0
- examples/task_apps/banking77/__init__.py +6 -0
- examples/task_apps/banking77/banking77_task_app.py +912 -0
- examples/task_apps/banking77/deploy_wrapper.py +46 -0
- examples/task_apps/banking77_pipeline/__init__.py +6 -0
- examples/task_apps/banking77_pipeline/banking77_pipeline_task_app.py +489 -0
- examples/task_apps/banking77_pipeline/deploy_wrapper.py +50 -0
- examples/task_apps/crafter/CREATE_SFT_DATASET.md +286 -0
- examples/task_apps/crafter/EVAL_IMAGE_ONLY_RESULTS.md +152 -0
- examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +187 -0
- examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +281 -0
- examples/task_apps/crafter/QUERY_EXAMPLES.md +203 -0
- examples/task_apps/crafter/README_IMAGE_ONLY_EVAL.md +316 -0
- examples/task_apps/crafter/eval_image_only_gpt4o.toml +28 -0
- examples/task_apps/crafter/eval_text_only_groq_llama.toml +36 -0
- examples/task_apps/crafter/filter_sft_dataset.toml +16 -0
- examples/task_apps/crafter/task_app/README.md +42 -0
- examples/task_apps/crafter/task_app/__init__.py +5 -0
- examples/task_apps/crafter/task_app/grpo_crafter.py +1055 -0
- examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +146 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/README.md +173 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/__init__.py +5 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/branching.py +143 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/environment_routes.py +1226 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/__init__.py +1 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/environment.py +532 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +583 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +122 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +253 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/__init__.py +5 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +999 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/main.py +100 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +1252 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/registry.py +195 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +2233 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/storage/__init__.py +5 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/storage/volume.py +211 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/test_agents.py +161 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/test_service.py +136 -0
- examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +411 -0
- examples/task_apps/dev/pokemon_emerald/__init__.py +2 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/README.md +811 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/__init__.py +120 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/action.py +160 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/memory.py +155 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/perception.py +69 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/planning.py +96 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/simple.py +1502 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/agent/system_prompt.py +4 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/grab_map.py +68 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/manual.py +216 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/__init__.py +35 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/emerald_utils.py +631 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/emulator.py +1544 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/enums.py +1428 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/memory_reader.py +4848 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/types.py +41 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pokemon_env/utils.py +298 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/pyproject.toml +95 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/run.py +204 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/app.py +2152 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/client.py +429 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server/frame_server.py +155 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/README.md +78 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/run_tests.py +122 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_agent_direct.py +76 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_agent_prompts.py +413 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_battle_state_formatting.py +204 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_dialogue_detection.py +133 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_dialogue_detection_comprehensive.py +229 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_direct_agent_emulator.py +300 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_fps_adjustment_pytest.py +205 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_house_to_outside_direct.py +200 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_house_to_outside_transition.py +284 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_map_ground_truth_comparison.py +468 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_memory_map.py +575 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_server_map_validation.py +311 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests/test_torchic_state.py +259 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/anticheat.py +372 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/checkpoint.py +296 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/error_handler.py +275 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/get_local_ip.py +22 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/helpers.py +44 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/llm_logger.py +514 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_formatter.py +415 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_stitcher.py +1763 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_stitcher_singleton.py +33 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_trimmer.py +106 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/map_visualizer.py +334 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/ocr_dialogue.py +1020 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/recording.py +188 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/state_formatter.py +1481 -0
- examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils/vlm.py +862 -0
- examples/task_apps/dev/pokemon_emerald/modal_app.py +114 -0
- examples/task_apps/dev/pokemon_emerald/task_app/README.md +81 -0
- examples/task_apps/dev/pokemon_emerald/task_app/__init__.py +6 -0
- examples/task_apps/dev/pokemon_emerald/task_app/pokemon_emerald.py +685 -0
- examples/task_apps/enron/__init__.py +2 -0
- examples/task_apps/enron/eval_groq_qwen32.toml +16 -0
- examples/task_apps/enron/filter_sft.toml +5 -0
- examples/task_apps/enron/task_app/README.md +14 -0
- examples/task_apps/enron/task_app/__init__.py +1 -0
- examples/task_apps/enron/task_app/grpo_enron.py +906 -0
- examples/task_apps/enron/task_app/grpo_enron_task_app.py +146 -0
- examples/task_apps/enron/tests/__init__.py +4 -0
- examples/task_apps/enron/tests/conftest.py +115 -0
- examples/task_apps/enron/tests/integration/__init__.py +4 -0
- examples/task_apps/enron/tests/integration/test_enron_eval.py +179 -0
- examples/task_apps/enron/tests/integration/test_enron_rollout.py +135 -0
- examples/task_apps/enron/tests/unit/__init__.py +4 -0
- examples/task_apps/enron/tests/unit/test_enron_environment.py +126 -0
- examples/task_apps/gepa_benchmarks/__init__.py +7 -0
- examples/task_apps/gepa_benchmarks/common.py +260 -0
- examples/task_apps/gepa_benchmarks/hotpotqa_task_app.py +507 -0
- examples/task_apps/gepa_benchmarks/hover_task_app.py +436 -0
- examples/task_apps/gepa_benchmarks/ifbench_task_app.py +563 -0
- examples/task_apps/gepa_benchmarks/pupa_task_app.py +460 -0
- examples/task_apps/math/README.md +21 -0
- examples/task_apps/math/math_single_step.py +1000 -0
- examples/task_apps/math/math_task_app.py +115 -0
- examples/task_apps/pokemon_battle/__init__.py +2 -0
- examples/task_apps/pokemon_battle/modal_app.py +104 -0
- examples/task_apps/pokemon_battle/task_app/README.md +68 -0
- examples/task_apps/pokemon_battle/task_app/__init__.py +6 -0
- examples/task_apps/pokemon_battle/task_app/pokemon_showdown.py +932 -0
- examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_COMPLETE.md +283 -0
- examples/task_apps/pokemon_red/EVAL_IMAGE_ONLY_STATUS.md +155 -0
- examples/task_apps/pokemon_red/README.md +356 -0
- examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +428 -0
- examples/task_apps/pokemon_red/__init__.py +3 -0
- examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +30 -0
- examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +224 -0
- examples/task_apps/pokemon_red/pallet_town_rl_config.toml +75 -0
- examples/task_apps/pokemon_red/task_app.py +1048 -0
- examples/task_apps/pokemon_red/test_pallet_town_rewards.py +193 -0
- examples/task_apps/sokoban/README.md +306 -0
- examples/task_apps/sokoban/__init__.py +3 -0
- examples/task_apps/sokoban/eval_groq_qwen32.toml +16 -0
- examples/task_apps/sokoban/eval_openai_gpt5.toml +16 -0
- examples/task_apps/sokoban/filter_sft.toml +5 -0
- examples/task_apps/sokoban/task_app.py +1058 -0
- examples/task_apps/sokoban/tests/__init__.py +4 -0
- examples/task_apps/sokoban/tests/conftest.py +113 -0
- examples/task_apps/sokoban/tests/integration/__init__.py +4 -0
- examples/task_apps/sokoban/tests/integration/test_sokoban_eval.py +57 -0
- examples/task_apps/sokoban/tests/integration/test_sokoban_rollout.py +198 -0
- examples/task_apps/sokoban/tests/unit/__init__.py +4 -0
- examples/task_apps/sokoban/tests/unit/test_sokoban_environment.py +114 -0
- examples/task_apps/verilog/__init__.py +1 -0
- examples/task_apps/verilog/eval_groq_qwen32b.toml +22 -0
- examples/task_apps/verilog/filter_sft.toml +5 -0
- examples/task_apps/verilog/task_app/README.md +12 -0
- examples/task_apps/verilog/task_app/__init__.py +1 -0
- examples/task_apps/verilog/task_app/grpo_verilog.py +1166 -0
- examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +145 -0
- examples/task_apps/verilog/tests/__init__.py +4 -0
- examples/task_apps/verilog/tests/conftest.py +115 -0
- examples/task_apps/verilog/tests/integration/__init__.py +4 -0
- examples/task_apps/verilog/tests/integration/test_verilog_eval.py +181 -0
- examples/task_apps/verilog/tests/integration/test_verilog_rollout.py +55 -0
- examples/task_apps/verilog/tests/unit/__init__.py +4 -0
- examples/task_apps/verilog/tests/unit/test_verilog_scoring.py +118 -0
- examples/tunnel_gepa_banking77/README.md +106 -0
- examples/tunnel_gepa_banking77/banking77_gepa_tunnel.toml +95 -0
- examples/tunnel_gepa_banking77/keep_tunnel_running.py +60 -0
- examples/tunnel_gepa_banking77/run_gepa_with_tunnel.sh +226 -0
- examples/vlm/PROPOSAL.md +53 -0
- examples/vlm/README.md +68 -0
- examples/vlm/configs/crafter_vlm_gpt4o.toml +49 -0
- examples/vlm/crafter_image_only_agent.py +207 -0
- examples/vlm/crafter_openai_vlm_agent.py +275 -0
- examples/vlm/filter_image_rows.py +63 -0
- examples/vlm/run_crafter_vlm_benchmark.py +316 -0
- examples/warming_up_to_rl/_utils.py +92 -0
- examples/warming_up_to_rl/analyze_trace_db.py +422 -0
- examples/warming_up_to_rl/configs/crafter_fft.toml +53 -0
- examples/warming_up_to_rl/configs/crafter_fft_4b.toml +54 -0
- examples/warming_up_to_rl/configs/eval_fft_qwen4b.toml +22 -0
- examples/warming_up_to_rl/configs/eval_groq_qwen32b.toml +15 -0
- examples/warming_up_to_rl/configs/eval_modal_qwen4b.toml +24 -0
- examples/warming_up_to_rl/configs/eval_stepwise_complex.toml +35 -0
- examples/warming_up_to_rl/configs/eval_stepwise_consistent.toml +26 -0
- examples/warming_up_to_rl/configs/eval_stepwise_per_achievement.toml +36 -0
- examples/warming_up_to_rl/configs/eval_stepwise_simple.toml +32 -0
- examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +85 -0
- examples/warming_up_to_rl/configs/rl_from_ft.toml +58 -0
- examples/warming_up_to_rl/export_trace_sft.py +837 -0
- examples/warming_up_to_rl/groq_test.py +97 -0
- examples/warming_up_to_rl/manage_secrets.py +131 -0
- examples/warming_up_to_rl/old/event_rewards.md +234 -0
- examples/warming_up_to_rl/old/notes.md +73 -0
- examples/warming_up_to_rl/readme.md +110 -0
- examples/warming_up_to_rl/run_eval.py +736 -0
- examples/warming_up_to_rl/run_fft_and_save.py +380 -0
- examples/warming_up_to_rl/run_local_rollout.py +239 -0
- examples/warming_up_to_rl/run_local_rollout_modal.py +248 -0
- examples/warming_up_to_rl/run_local_rollout_parallel.py +405 -0
- examples/warming_up_to_rl/run_local_rollout_traced.py +477 -0
- examples/warming_up_to_rl/run_rl_and_save.py +124 -0
- examples/warming_up_to_rl/run_rollout_remote.py +156 -0
- examples/warming_up_to_rl/task_app/README.md +42 -0
- examples/warming_up_to_rl/task_app/grpo_crafter.py +876 -0
- examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +135 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/README.md +173 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/branching.py +143 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/environment_routes.py +1226 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/__init__.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/__init__.py +6 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/app.py +1 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/environment.py +522 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +454 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +108 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/shared.py +305 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/tools.py +47 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +253 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +729 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/main.py +100 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +1114 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/registry.py +195 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +1891 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/__init__.py +5 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/storage/volume.py +211 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_agents.py +161 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/test_service.py +137 -0
- examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +129 -0
- examples/workflows/math_rl/configs/eval_base_qwen.toml +15 -0
- examples/workflows/math_rl/configs/eval_rl_qwen.toml +11 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen.toml +62 -0
- examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +80 -0
- examples/workflows/math_rl/configs/rl_from_ft_qwen.toml +35 -0
- examples/workflows/math_rl/download_dataset.py +80 -0
- examples/workflows/math_rl/run_eval.py +436 -0
- examples/workflows/math_rl/run_rl_and_save.py +111 -0
- synth_ai/__init__.py +47 -23
- synth_ai/_utils/__init__.py +47 -0
- synth_ai/_utils/base_url.py +10 -0
- synth_ai/_utils/http.py +10 -0
- synth_ai/_utils/prompts.py +10 -0
- synth_ai/_utils/task_app_state.py +12 -0
- synth_ai/_utils/user_config.py +10 -0
- synth_ai/api/models/supported.py +514 -0
- synth_ai/api/train/__init__.py +63 -0
- synth_ai/api/train/builders.py +473 -0
- synth_ai/api/train/cli.py +1185 -0
- synth_ai/api/train/config_finder.py +246 -0
- synth_ai/api/train/configs/__init__.py +65 -0
- synth_ai/api/train/configs/prompt_learning.py +496 -0
- synth_ai/api/train/configs/rl.py +188 -0
- synth_ai/api/train/configs/sft.py +99 -0
- synth_ai/api/train/configs/shared.py +81 -0
- synth_ai/api/train/env_resolver.py +352 -0
- synth_ai/api/train/pollers.py +91 -0
- synth_ai/api/train/prompt_learning.py +425 -0
- synth_ai/api/train/sft.py +390 -0
- synth_ai/api/train/supported_algos.py +147 -0
- synth_ai/api/train/task_app.py +195 -0
- synth_ai/api/train/utils.py +244 -0
- synth_ai/api/train/validators.py +1117 -0
- synth_ai/api/tunnel.py +49 -0
- synth_ai/auth/credentials.py +94 -0
- synth_ai/baseline/__init__.py +25 -0
- synth_ai/baseline/config.py +209 -0
- synth_ai/baseline/discovery.py +214 -0
- synth_ai/baseline/execution.py +146 -0
- synth_ai/cfgs.py +227 -0
- synth_ai/cli/__init__.py +90 -45
- synth_ai/cli/_modal_wrapper.py +31 -0
- synth_ai/cli/_storage.py +20 -0
- synth_ai/cli/_typer_patch.py +47 -0
- synth_ai/cli/_validate_task_app.py +29 -0
- synth_ai/cli/balance.py +16 -4
- synth_ai/cli/calc.py +36 -21
- synth_ai/cli/claude.py +70 -0
- synth_ai/cli/codex.py +267 -0
- synth_ai/cli/commands/__init__.py +18 -0
- synth_ai/cli/commands/baseline/__init__.py +12 -0
- synth_ai/cli/commands/baseline/core.py +637 -0
- synth_ai/cli/commands/baseline/list.py +93 -0
- synth_ai/cli/commands/demo/__init__.py +6 -0
- synth_ai/cli/commands/demo/core.py +163 -0
- synth_ai/cli/commands/eval/__init__.py +19 -0
- synth_ai/cli/commands/eval/core.py +1112 -0
- synth_ai/cli/commands/eval/errors.py +81 -0
- synth_ai/cli/commands/eval/validation.py +133 -0
- synth_ai/cli/commands/filter/__init__.py +12 -0
- synth_ai/cli/commands/filter/core.py +424 -0
- synth_ai/cli/commands/filter/errors.py +55 -0
- synth_ai/cli/commands/filter/validation.py +77 -0
- synth_ai/cli/commands/help/__init__.py +185 -0
- synth_ai/cli/commands/help/core.py +72 -0
- synth_ai/cli/commands/smoke/__init__.py +7 -0
- synth_ai/cli/commands/smoke/core.py +1437 -0
- synth_ai/cli/commands/status/__init__.py +66 -0
- synth_ai/cli/commands/status/client.py +192 -0
- synth_ai/cli/commands/status/config.py +92 -0
- synth_ai/cli/commands/status/errors.py +20 -0
- synth_ai/cli/commands/status/formatters.py +164 -0
- synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
- synth_ai/cli/commands/status/subcommands/files.py +79 -0
- synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
- synth_ai/cli/commands/status/subcommands/models.py +79 -0
- synth_ai/cli/commands/status/subcommands/pricing.py +22 -0
- synth_ai/cli/commands/status/subcommands/runs.py +81 -0
- synth_ai/cli/commands/status/subcommands/session.py +183 -0
- synth_ai/cli/commands/status/subcommands/summary.py +47 -0
- synth_ai/cli/commands/status/subcommands/usage.py +203 -0
- synth_ai/cli/commands/status/utils.py +114 -0
- synth_ai/cli/commands/train/__init__.py +53 -0
- synth_ai/cli/commands/train/core.py +21 -0
- synth_ai/cli/commands/train/errors.py +117 -0
- synth_ai/cli/commands/train/judge_schemas.py +200 -0
- synth_ai/cli/commands/train/judge_validation.py +305 -0
- synth_ai/cli/commands/train/validation.py +386 -0
- synth_ai/cli/demo.py +32 -140
- synth_ai/cli/deploy.py +233 -0
- synth_ai/cli/eval/__init__.py +36 -0
- synth_ai/cli/eval/core.py +5 -0
- synth_ai/cli/eval/errors.py +31 -0
- synth_ai/cli/eval/validation.py +5 -0
- synth_ai/cli/filter/__init__.py +28 -0
- synth_ai/cli/filter/core.py +5 -0
- synth_ai/cli/filter/errors.py +23 -0
- synth_ai/cli/filter/validation.py +5 -0
- synth_ai/cli/legacy_root_backup.py +28 -22
- synth_ai/cli/lib/__init__.py +10 -0
- synth_ai/cli/lib/task_app_discovery.py +7 -0
- synth_ai/cli/lib/task_app_env.py +518 -0
- synth_ai/cli/mcp.py +34 -0
- synth_ai/cli/modal_serve/__init__.py +12 -0
- synth_ai/cli/modal_serve/core.py +14 -0
- synth_ai/cli/modal_serve/errors.py +8 -0
- synth_ai/cli/modal_serve/validation.py +11 -0
- synth_ai/cli/opencode.py +256 -0
- synth_ai/cli/recent.py +13 -7
- synth_ai/cli/rl_demo.py +166 -114
- synth_ai/cli/root.py +143 -112
- synth_ai/cli/serve/__init__.py +12 -0
- synth_ai/cli/serve/core.py +14 -0
- synth_ai/cli/serve/errors.py +8 -0
- synth_ai/cli/serve/validation.py +11 -0
- synth_ai/cli/setup.py +49 -0
- synth_ai/cli/status.py +7 -125
- synth_ai/cli/task_app_deploy.py +7 -0
- synth_ai/cli/task_app_list.py +25 -0
- synth_ai/cli/task_app_modal_serve.py +11 -0
- synth_ai/cli/task_app_serve.py +11 -0
- synth_ai/cli/task_apps.py +3134 -0
- synth_ai/cli/traces.py +9 -5
- synth_ai/cli/train/__init__.py +12 -0
- synth_ai/cli/train/core.py +21 -0
- synth_ai/cli/train/errors.py +8 -0
- synth_ai/cli/train/validation.py +24 -0
- synth_ai/cli/train.py +5 -0
- synth_ai/cli/turso.py +73 -0
- synth_ai/cli/watch.py +13 -18
- synth_ai/demos/__init__.py +10 -0
- synth_ai/demos/core/__init__.py +28 -1
- synth_ai/demos/core/cli.py +745 -416
- synth_ai/demos/crafter/__init__.py +1 -0
- synth_ai/demos/crafter/crafter_fft_4b.toml +55 -0
- synth_ai/demos/crafter/grpo_crafter_task_app.py +185 -0
- synth_ai/demos/crafter/rl_from_base_qwen4b.toml +74 -0
- synth_ai/demos/demo_registry.py +176 -0
- synth_ai/demos/demo_task_apps/__init__.py +7 -1
- synth_ai/demos/demo_task_apps/core.py +75 -37
- synth_ai/demos/demo_task_apps/crafter/__init__.py +1 -0
- synth_ai/demos/demo_task_apps/crafter/configs/crafter_fft_4b.toml +53 -0
- synth_ai/demos/demo_task_apps/crafter/configs/rl_from_base_qwen4b.toml +73 -0
- synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +184 -0
- synth_ai/demos/demo_task_apps/math/_common.py +1 -2
- synth_ai/demos/demo_task_apps/math/app.py +2 -1
- synth_ai/demos/demo_task_apps/math/config.toml +55 -110
- synth_ai/demos/demo_task_apps/math/deploy_modal.py +3 -6
- synth_ai/demos/demo_task_apps/math/modal_task_app.py +491 -166
- synth_ai/demos/demo_task_apps/math/task_app_entry.py +37 -0
- synth_ai/demos/math/__init__.py +1 -0
- synth_ai/demos/math/_common.py +16 -0
- synth_ai/demos/math/app.py +38 -0
- synth_ai/demos/math/config.toml +76 -0
- synth_ai/demos/math/deploy_modal.py +54 -0
- synth_ai/demos/math/modal_task_app.py +703 -0
- synth_ai/demos/math/task_app_entry.py +51 -0
- synth_ai/environments/environment/core.py +7 -1
- synth_ai/environments/examples/bandit/engine.py +12 -5
- synth_ai/environments/examples/bandit/environment.py +0 -1
- synth_ai/environments/examples/bandit/taskset.py +4 -4
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +7 -4
- synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +9 -5
- synth_ai/environments/examples/crafter_classic/environment.py +93 -2
- synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +4 -3
- synth_ai/environments/examples/enron/engine.py +7 -2
- synth_ai/environments/examples/enron/environment.py +68 -0
- synth_ai/environments/examples/red/engine.py +60 -12
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +7 -0
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +151 -179
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_progression.py +477 -0
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +32 -0
- synth_ai/environments/examples/red/environment.py +86 -0
- synth_ai/environments/examples/red/trace_hooks_v3.py +168 -0
- synth_ai/environments/examples/sokoban/taskset.py +116 -0
- synth_ai/environments/examples/verilog/engine.py +104 -12
- synth_ai/environments/examples/wordle/environment.py +0 -1
- synth_ai/environments/reproducibility/tree.py +5 -6
- synth_ai/environments/service/app.py +11 -12
- synth_ai/environments/service/core_routes.py +10 -9
- synth_ai/environments/stateful/engine.py +1 -1
- synth_ai/environments/tasks/core.py +1 -0
- synth_ai/environments/tasks/filters.py +5 -6
- synth_ai/environments/tasks/utils.py +4 -5
- synth_ai/evals/__init__.py +15 -0
- synth_ai/evals/base.py +14 -5
- synth_ai/evals/client.py +82 -0
- synth_ai/evals/types.py +42 -0
- synth_ai/http.py +8 -22
- synth_ai/http_client.py +45 -12
- synth_ai/inference/__init__.py +0 -2
- synth_ai/inference/client.py +21 -7
- synth_ai/jobs/client.py +129 -80
- synth_ai/judge_schemas.py +127 -0
- synth_ai/learning/__init__.py +51 -6
- synth_ai/learning/algorithms.py +14 -0
- synth_ai/learning/client.py +122 -30
- synth_ai/learning/config.py +2 -40
- synth_ai/learning/constants.py +0 -2
- synth_ai/learning/ft_client.py +4 -56
- synth_ai/learning/health.py +14 -8
- synth_ai/learning/jobs.py +43 -47
- synth_ai/learning/prompt_learning_client.py +276 -0
- synth_ai/learning/prompt_learning_types.py +185 -0
- synth_ai/{rl → learning/rl}/__init__.py +14 -5
- synth_ai/learning/rl/client.py +269 -0
- synth_ai/learning/rl/config.py +31 -0
- synth_ai/{rl → learning/rl}/contracts.py +5 -10
- synth_ai/{rl → learning/rl}/env_keys.py +45 -16
- synth_ai/learning/rl/secrets.py +13 -0
- synth_ai/learning/rl_client.py +2 -253
- synth_ai/learning/sft/__init__.py +29 -0
- synth_ai/learning/sft/client.py +68 -0
- synth_ai/learning/sft/config.py +270 -0
- synth_ai/learning/sft/data.py +698 -0
- synth_ai/learning/sse.py +25 -26
- synth_ai/learning/validators.py +29 -25
- synth_ai/mcp/__init__.py +5 -0
- synth_ai/mcp/__main__.py +8 -0
- synth_ai/mcp/main.py +254 -0
- synth_ai/mcp/setup.py +100 -0
- synth_ai/modal.py +257 -0
- synth_ai/pricing/__init__.py +3 -0
- synth_ai/pricing/model_pricing.py +64 -0
- synth_ai/session/__init__.py +75 -0
- synth_ai/session/client.py +383 -0
- synth_ai/session/constants.py +63 -0
- synth_ai/session/exceptions.py +105 -0
- synth_ai/session/manager.py +139 -0
- synth_ai/session/models.py +89 -0
- synth_ai/session/query.py +110 -0
- synth_ai/spec/__init__.py +46 -0
- synth_ai/spec/dataclasses.py +149 -0
- synth_ai/spec/loader.py +144 -0
- synth_ai/spec/serializer.py +199 -0
- synth_ai/spec/validation.py +250 -0
- synth_ai/streaming/__init__.py +29 -0
- synth_ai/streaming/config.py +94 -0
- synth_ai/streaming/handlers.py +589 -0
- synth_ai/streaming/streamer.py +320 -0
- synth_ai/streaming/types.py +95 -0
- synth_ai/task/__init__.py +116 -3
- synth_ai/task/apps/__init__.py +132 -0
- synth_ai/task/auth.py +165 -0
- synth_ai/task/client.py +167 -0
- synth_ai/task/config.py +261 -0
- synth_ai/task/contracts.py +173 -57
- synth_ai/task/datasets.py +108 -0
- synth_ai/task/errors.py +50 -0
- synth_ai/task/health.py +17 -11
- synth_ai/task/inference_api.py +101 -0
- synth_ai/task/json.py +111 -0
- synth_ai/task/proxy.py +251 -0
- synth_ai/task/rubrics/__init__.py +55 -0
- synth_ai/task/rubrics/loaders.py +156 -0
- synth_ai/task/rubrics/models.py +57 -0
- synth_ai/task/rubrics/scoring.py +116 -0
- synth_ai/task/rubrics/strict.py +149 -0
- synth_ai/task/rubrics.py +219 -0
- synth_ai/task/server.py +432 -0
- synth_ai/task/trace_correlation_helpers.py +328 -0
- synth_ai/task/tracing_utils.py +95 -0
- synth_ai/task/validators.py +449 -6
- synth_ai/task/vendors.py +59 -0
- synth_ai/tracing_v3/__init__.py +4 -0
- synth_ai/tracing_v3/abstractions.py +21 -4
- synth_ai/tracing_v3/config.py +167 -22
- synth_ai/tracing_v3/constants.py +21 -0
- synth_ai/tracing_v3/db_config.py +42 -29
- synth_ai/tracing_v3/decorators.py +80 -45
- synth_ai/tracing_v3/examples/basic_usage.py +15 -9
- synth_ai/tracing_v3/hooks.py +6 -4
- synth_ai/tracing_v3/llm_call_record_helpers.py +161 -61
- synth_ai/tracing_v3/migration_helper.py +1 -2
- synth_ai/tracing_v3/replica_sync.py +12 -7
- synth_ai/tracing_v3/serialization.py +130 -0
- synth_ai/tracing_v3/session_tracer.py +86 -21
- synth_ai/tracing_v3/storage/base.py +98 -12
- synth_ai/tracing_v3/storage/config.py +63 -16
- synth_ai/tracing_v3/storage/factory.py +11 -9
- synth_ai/tracing_v3/storage/utils.py +15 -11
- synth_ai/tracing_v3/trace_utils.py +317 -0
- synth_ai/tracing_v3/turso/__init__.py +8 -21
- synth_ai/tracing_v3/turso/daemon.py +123 -15
- synth_ai/tracing_v3/turso/models.py +5 -2
- synth_ai/tracing_v3/turso/native_manager.py +1293 -0
- synth_ai/tracing_v3/utils.py +5 -4
- synth_ai/tunnel.py +143 -0
- synth_ai/tunnel_deploy.py +278 -0
- synth_ai/types.py +8 -0
- synth_ai/urls.py +11 -0
- synth_ai/utils/__init__.py +166 -0
- synth_ai/utils/agents.py +74 -0
- synth_ai/utils/apps.py +152 -0
- synth_ai/utils/base_url.py +94 -0
- synth_ai/utils/bin.py +39 -0
- synth_ai/utils/claude.py +36 -0
- synth_ai/utils/cli.py +284 -0
- synth_ai/utils/config.py +81 -0
- synth_ai/utils/env.py +346 -0
- synth_ai/utils/errors.py +85 -0
- synth_ai/utils/http.py +172 -0
- synth_ai/utils/json.py +72 -0
- synth_ai/utils/log_filter.py +99 -0
- synth_ai/utils/logging.py +198 -0
- synth_ai/utils/modal.py +299 -0
- synth_ai/utils/paths.py +95 -0
- synth_ai/utils/process.py +233 -0
- synth_ai/utils/prompts.py +39 -0
- synth_ai/utils/sqld.py +122 -0
- synth_ai/utils/ssl.py +25 -0
- synth_ai/utils/task_app_discovery.py +882 -0
- synth_ai/utils/task_app_env.py +186 -0
- synth_ai/utils/task_app_state.py +318 -0
- synth_ai/utils/tunnel/__init__.py +12 -0
- synth_ai/utils/tunnel/config.py +55 -0
- synth_ai/utils/user_config.py +137 -0
- synth_ai/uvicorn.py +77 -0
- synth_ai-0.2.23.dev3.dist-info/METADATA +357 -0
- synth_ai-0.2.23.dev3.dist-info/RECORD +983 -0
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/entry_points.txt +0 -1
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/top_level.txt +1 -0
- synth_ai/cli/man.py +0 -106
- synth_ai/core/experiment.py +0 -15
- synth_ai/core/system.py +0 -15
- synth_ai/environments/examples/sokoban/units/astar_common.py +0 -95
- synth_ai/experimental/synth_oss.py +0 -446
- synth_ai/handshake.py +0 -63
- synth_ai/install_sqld.sh +0 -40
- synth_ai/learning/offline/dpo.py +0 -0
- synth_ai/learning/offline/providers.py +0 -7
- synth_ai/learning/offline/sft.py +0 -0
- synth_ai/learning/offline/shared.py +0 -0
- synth_ai/learning/online/grpo.py +0 -0
- synth_ai/learning/online/irft.py +0 -0
- synth_ai/learning/prompts/banking77_injection_eval.py +0 -168
- synth_ai/learning/prompts/gepa.py +0 -0
- synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +0 -213
- synth_ai/learning/prompts/mipro.py +0 -289
- synth_ai/learning/prompts/random_search.py +0 -246
- synth_ai/learning/prompts/run_mipro_banking77.py +0 -172
- synth_ai/learning/prompts/run_random_search_banking77.py +0 -324
- synth_ai/lm/__init__.py +0 -51
- synth_ai/lm/caching/constants.py +0 -6
- synth_ai/lm/caching/dbs.py +0 -0
- synth_ai/lm/caching/ephemeral.py +0 -102
- synth_ai/lm/caching/handler.py +0 -137
- synth_ai/lm/caching/initialize.py +0 -11
- synth_ai/lm/caching/persistent.py +0 -114
- synth_ai/lm/config.py +0 -110
- synth_ai/lm/constants.py +0 -32
- synth_ai/lm/core/__init__.py +0 -8
- synth_ai/lm/core/all.py +0 -73
- synth_ai/lm/core/exceptions.py +0 -7
- synth_ai/lm/core/main.py +0 -319
- synth_ai/lm/core/main_v3.py +0 -594
- synth_ai/lm/core/synth_models.py +0 -48
- synth_ai/lm/core/vendor_clients.py +0 -188
- synth_ai/lm/cost/monitor.py +0 -1
- synth_ai/lm/cost/statefulness.py +0 -1
- synth_ai/lm/injection.py +0 -80
- synth_ai/lm/overrides.py +0 -206
- synth_ai/lm/provider_support/__init__.py +0 -8
- synth_ai/lm/provider_support/anthropic.py +0 -972
- synth_ai/lm/provider_support/openai.py +0 -1139
- synth_ai/lm/provider_support/suppress_logging.py +0 -31
- synth_ai/lm/structured_outputs/handler.py +0 -440
- synth_ai/lm/structured_outputs/inject.py +0 -297
- synth_ai/lm/structured_outputs/rehabilitate.py +0 -185
- synth_ai/lm/tools/__init__.py +0 -3
- synth_ai/lm/tools/base.py +0 -172
- synth_ai/lm/unified_interface.py +0 -202
- synth_ai/lm/vendors/base.py +0 -81
- synth_ai/lm/vendors/core/anthropic_api.py +0 -387
- synth_ai/lm/vendors/core/gemini_api.py +0 -292
- synth_ai/lm/vendors/core/mistral_api.py +0 -322
- synth_ai/lm/vendors/core/openai_api.py +0 -225
- synth_ai/lm/vendors/core/synth_dev_api.py +0 -0
- synth_ai/lm/vendors/local/ollama.py +0 -0
- synth_ai/lm/vendors/openai_standard.py +0 -780
- synth_ai/lm/vendors/openai_standard_responses.py +0 -256
- synth_ai/lm/vendors/retries.py +0 -22
- synth_ai/lm/vendors/supported/custom_endpoint.py +0 -417
- synth_ai/lm/vendors/supported/deepseek.py +0 -69
- synth_ai/lm/vendors/supported/grok.py +0 -75
- synth_ai/lm/vendors/supported/groq.py +0 -16
- synth_ai/lm/vendors/supported/ollama.py +0 -15
- synth_ai/lm/vendors/supported/openrouter.py +0 -74
- synth_ai/lm/vendors/supported/together.py +0 -11
- synth_ai/lm/vendors/synth_client.py +0 -808
- synth_ai/lm/warmup.py +0 -186
- synth_ai/rl/secrets.py +0 -19
- synth_ai/scripts/verify_rewards.py +0 -100
- synth_ai/tracing/__init__.py +0 -30
- synth_ai/tracing_v1/__init__.py +0 -33
- synth_ai/tracing_v3/turso/manager.py +0 -760
- synth_ai/v0/tracing/abstractions.py +0 -224
- synth_ai/v0/tracing/base_client.py +0 -91
- synth_ai/v0/tracing/client_manager.py +0 -131
- synth_ai/v0/tracing/config.py +0 -142
- synth_ai/v0/tracing/context.py +0 -146
- synth_ai/v0/tracing/decorators.py +0 -682
- synth_ai/v0/tracing/events/__init__.py +0 -0
- synth_ai/v0/tracing/events/manage.py +0 -147
- synth_ai/v0/tracing/events/scope.py +0 -86
- synth_ai/v0/tracing/events/store.py +0 -228
- synth_ai/v0/tracing/immediate_client.py +0 -151
- synth_ai/v0/tracing/local.py +0 -18
- synth_ai/v0/tracing/log_client_base.py +0 -73
- synth_ai/v0/tracing/retry_queue.py +0 -186
- synth_ai/v0/tracing/trackers.py +0 -515
- synth_ai/v0/tracing/upload.py +0 -512
- synth_ai/v0/tracing/utils.py +0 -9
- synth_ai/v0/tracing_v1/__init__.py +0 -16
- synth_ai/v0/tracing_v1/abstractions.py +0 -224
- synth_ai/v0/tracing_v1/base_client.py +0 -91
- synth_ai/v0/tracing_v1/client_manager.py +0 -131
- synth_ai/v0/tracing_v1/config.py +0 -142
- synth_ai/v0/tracing_v1/context.py +0 -146
- synth_ai/v0/tracing_v1/decorators.py +0 -703
- synth_ai/v0/tracing_v1/events/__init__.py +0 -0
- synth_ai/v0/tracing_v1/events/manage.py +0 -147
- synth_ai/v0/tracing_v1/events/scope.py +0 -86
- synth_ai/v0/tracing_v1/events/store.py +0 -228
- synth_ai/v0/tracing_v1/immediate_client.py +0 -151
- synth_ai/v0/tracing_v1/local.py +0 -18
- synth_ai/v0/tracing_v1/log_client_base.py +0 -73
- synth_ai/v0/tracing_v1/retry_queue.py +0 -186
- synth_ai/v0/tracing_v1/trackers.py +0 -515
- synth_ai/v0/tracing_v1/upload.py +0 -527
- synth_ai/v0/tracing_v1/utils.py +0 -9
- synth_ai/zyk/__init__.py +0 -30
- synth_ai-0.2.8.dev4.dist-info/METADATA +0 -129
- synth_ai-0.2.8.dev4.dist-info/RECORD +0 -420
- {synth_ai/lm/caching → examples/task_apps}/__init__.py +0 -0
- {synth_ai/lm/cost → examples/task_apps/crafter}/__init__.py +0 -0
- {synth_ai/lm/structured_outputs → examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/server}/__init__.py +0 -0
- {synth_ai/lm/vendors → examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/tests}/__init__.py +0 -0
- {synth_ai/lm/vendors/core → examples/task_apps/dev/pokemon_emerald/external/pokeagent-speedrun/utils}/__init__.py +0 -0
- {synth_ai/lm/vendors/local → examples/task_apps/math}/__init__.py +0 -0
- {synth_ai/lm/vendors/supported → examples/workflows}/__init__.py +0 -0
- {synth_ai/v0/tracing → examples/workflows/math_rl}/__init__.py +0 -0
- /synth_ai/{compound/cais.py → cli/__main__.py} +0 -0
- /synth_ai/{learning/filtering.py → py.typed} +0 -0
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/WHEEL +0 -0
- {synth_ai-0.2.8.dev4.dist-info → synth_ai-0.2.23.dev3.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,1139 +0,0 @@
|
|
|
1
|
-
import copy
|
|
2
|
-
import logging
|
|
3
|
-
import types
|
|
4
|
-
from collections import defaultdict
|
|
5
|
-
from dataclasses import dataclass
|
|
6
|
-
from inspect import isclass
|
|
7
|
-
|
|
8
|
-
import openai.resources
|
|
9
|
-
from langfuse import Langfuse
|
|
10
|
-
from langfuse.client import StatefulGenerationClient
|
|
11
|
-
from langfuse.decorators import langfuse_context
|
|
12
|
-
from langfuse.utils import _get_timestamp
|
|
13
|
-
from langfuse.utils.langfuse_singleton import LangfuseSingleton
|
|
14
|
-
from packaging.version import Version
|
|
15
|
-
from pydantic import BaseModel
|
|
16
|
-
from wrapt import wrap_function_wrapper
|
|
17
|
-
|
|
18
|
-
from synth_ai.lm.overrides import (
|
|
19
|
-
apply_injection as apply_injection_overrides,
|
|
20
|
-
)
|
|
21
|
-
from synth_ai.lm.overrides import (
|
|
22
|
-
apply_param_overrides,
|
|
23
|
-
apply_tool_overrides,
|
|
24
|
-
use_overrides_for_messages,
|
|
25
|
-
)
|
|
26
|
-
from synth_ai.lm.provider_support.suppress_logging import *
|
|
27
|
-
from synth_ai.tracing_v1.abstractions import MessageInputs
|
|
28
|
-
from synth_ai.tracing_v1.trackers import synth_tracker_async, synth_tracker_sync
|
|
29
|
-
|
|
30
|
-
try:
|
|
31
|
-
import openai
|
|
32
|
-
except ImportError as err:
|
|
33
|
-
raise ModuleNotFoundError(
|
|
34
|
-
"Please install OpenAI to use this feature: 'pip install openai'"
|
|
35
|
-
) from err
|
|
36
|
-
|
|
37
|
-
# CREDIT TO LANGFUSE FOR OPEN-SOURCING THE CODE THAT THIS IS BASED ON
|
|
38
|
-
# USING WITH MIT LICENSE PERMISSION
|
|
39
|
-
# https://langfuse.com
|
|
40
|
-
|
|
41
|
-
try:
|
|
42
|
-
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI # noqa: F401
|
|
43
|
-
except ImportError:
|
|
44
|
-
AsyncAzureOpenAI = None
|
|
45
|
-
AsyncOpenAI = None
|
|
46
|
-
AzureOpenAI = None
|
|
47
|
-
OpenAI = None
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
# log = logging.getLogger("langfuse")
|
|
51
|
-
|
|
52
|
-
# Add logger configuration
|
|
53
|
-
logger = logging.getLogger(__name__)
|
|
54
|
-
logger.setLevel(logging.DEBUG) # Set to DEBUG to see all messages
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
@dataclass
|
|
58
|
-
class OpenAiDefinition:
|
|
59
|
-
module: str
|
|
60
|
-
object: str
|
|
61
|
-
method: str
|
|
62
|
-
type: str
|
|
63
|
-
sync: bool
|
|
64
|
-
min_version: str | None = None
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
OPENAI_METHODS_V0 = [
|
|
68
|
-
OpenAiDefinition(
|
|
69
|
-
module="openai",
|
|
70
|
-
object="ChatCompletion",
|
|
71
|
-
method="create",
|
|
72
|
-
type="chat",
|
|
73
|
-
sync=True,
|
|
74
|
-
),
|
|
75
|
-
OpenAiDefinition(
|
|
76
|
-
module="openai",
|
|
77
|
-
object="Completion",
|
|
78
|
-
method="create",
|
|
79
|
-
type="completion",
|
|
80
|
-
sync=True,
|
|
81
|
-
),
|
|
82
|
-
]
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
OPENAI_METHODS_V1 = [
|
|
86
|
-
OpenAiDefinition(
|
|
87
|
-
module="openai.resources.chat.completions",
|
|
88
|
-
object="Completions",
|
|
89
|
-
method="create",
|
|
90
|
-
type="chat",
|
|
91
|
-
sync=True,
|
|
92
|
-
),
|
|
93
|
-
OpenAiDefinition(
|
|
94
|
-
module="openai.resources.completions",
|
|
95
|
-
object="Completions",
|
|
96
|
-
method="create",
|
|
97
|
-
type="completion",
|
|
98
|
-
sync=True,
|
|
99
|
-
),
|
|
100
|
-
OpenAiDefinition(
|
|
101
|
-
module="openai.resources.chat.completions",
|
|
102
|
-
object="AsyncCompletions",
|
|
103
|
-
method="create",
|
|
104
|
-
type="chat",
|
|
105
|
-
sync=False,
|
|
106
|
-
),
|
|
107
|
-
OpenAiDefinition(
|
|
108
|
-
module="openai.resources.completions",
|
|
109
|
-
object="AsyncCompletions",
|
|
110
|
-
method="create",
|
|
111
|
-
type="completion",
|
|
112
|
-
sync=False,
|
|
113
|
-
),
|
|
114
|
-
OpenAiDefinition(
|
|
115
|
-
module="openai.resources.chat.completions",
|
|
116
|
-
object="Completions",
|
|
117
|
-
method="parse",
|
|
118
|
-
type="chat",
|
|
119
|
-
sync=True,
|
|
120
|
-
min_version="1.50.0",
|
|
121
|
-
),
|
|
122
|
-
OpenAiDefinition(
|
|
123
|
-
module="openai.resources.chat.completions",
|
|
124
|
-
object="AsyncCompletions",
|
|
125
|
-
method="parse",
|
|
126
|
-
type="chat",
|
|
127
|
-
sync=False,
|
|
128
|
-
min_version="1.50.0",
|
|
129
|
-
),
|
|
130
|
-
]
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
class OpenAiArgsExtractor:
|
|
134
|
-
def __init__(
|
|
135
|
-
self,
|
|
136
|
-
name=None,
|
|
137
|
-
metadata=None,
|
|
138
|
-
trace_id=None,
|
|
139
|
-
session_id=None,
|
|
140
|
-
user_id=None,
|
|
141
|
-
tags=None,
|
|
142
|
-
parent_observation_id=None,
|
|
143
|
-
langfuse_prompt=None, # we cannot use prompt because it's an argument of the old OpenAI completions API
|
|
144
|
-
**kwargs,
|
|
145
|
-
):
|
|
146
|
-
# logger.debug(f"OpenAiArgsExtractor initialized with kwargs: {kwargs}")
|
|
147
|
-
# raise NotImplementedError("This method is not implemented yet")
|
|
148
|
-
self.args = {}
|
|
149
|
-
self.args["name"] = name
|
|
150
|
-
self.args["metadata"] = (
|
|
151
|
-
metadata
|
|
152
|
-
if "response_format" not in kwargs
|
|
153
|
-
else {
|
|
154
|
-
**(metadata or {}),
|
|
155
|
-
"response_format": kwargs["response_format"].model_json_schema()
|
|
156
|
-
if isclass(kwargs["response_format"])
|
|
157
|
-
and issubclass(kwargs["response_format"], BaseModel)
|
|
158
|
-
else kwargs["response_format"],
|
|
159
|
-
}
|
|
160
|
-
)
|
|
161
|
-
self.args["trace_id"] = trace_id
|
|
162
|
-
self.args["session_id"] = session_id
|
|
163
|
-
self.args["user_id"] = user_id
|
|
164
|
-
self.args["tags"] = tags
|
|
165
|
-
self.args["parent_observation_id"] = parent_observation_id
|
|
166
|
-
self.args["langfuse_prompt"] = langfuse_prompt
|
|
167
|
-
self.kwargs = kwargs
|
|
168
|
-
|
|
169
|
-
def get_langfuse_args(self):
|
|
170
|
-
return {**self.args, **self.kwargs}
|
|
171
|
-
|
|
172
|
-
def get_openai_args(self):
|
|
173
|
-
return self.kwargs
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
def _langfuse_wrapper(func):
|
|
177
|
-
def _with_langfuse(open_ai_definitions, initialize):
|
|
178
|
-
def wrapper(wrapped, instance, args, kwargs):
|
|
179
|
-
return func(open_ai_definitions, initialize, wrapped, args, kwargs)
|
|
180
|
-
|
|
181
|
-
return wrapper
|
|
182
|
-
|
|
183
|
-
return _with_langfuse
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
def _extract_chat_prompt(kwargs: dict):
|
|
187
|
-
"""
|
|
188
|
-
Extracts the user input from prompts. Returns an array of messages or a dict with messages and functions.
|
|
189
|
-
"""
|
|
190
|
-
prompt = {}
|
|
191
|
-
|
|
192
|
-
if kwargs.get("functions") is not None:
|
|
193
|
-
prompt.update({"functions": kwargs["functions"]})
|
|
194
|
-
|
|
195
|
-
if kwargs.get("function_call") is not None:
|
|
196
|
-
prompt.update({"function_call": kwargs["function_call"]})
|
|
197
|
-
|
|
198
|
-
if kwargs.get("tools") is not None:
|
|
199
|
-
prompt.update({"tools": kwargs["tools"]})
|
|
200
|
-
|
|
201
|
-
# existing logic to handle the case when prompt is not empty
|
|
202
|
-
if prompt:
|
|
203
|
-
messages = _filter_image_data(kwargs.get("messages", []))
|
|
204
|
-
prompt.update({"messages": messages})
|
|
205
|
-
return prompt
|
|
206
|
-
else:
|
|
207
|
-
# fallback: just return filtered messages
|
|
208
|
-
messages = _filter_image_data(kwargs.get("messages", []))
|
|
209
|
-
return messages
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
def _extract_chat_response(kwargs: dict):
|
|
213
|
-
"""
|
|
214
|
-
Extracts the LLM output from the response.
|
|
215
|
-
"""
|
|
216
|
-
response = {
|
|
217
|
-
"role": kwargs.get("role"),
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
if kwargs.get("function_call") is not None:
|
|
221
|
-
response.update({"function_call": kwargs["function_call"]})
|
|
222
|
-
|
|
223
|
-
if kwargs.get("tool_calls") is not None:
|
|
224
|
-
response.update({"tool_calls": kwargs["tool_calls"]})
|
|
225
|
-
|
|
226
|
-
response["content"] = kwargs.get("content")
|
|
227
|
-
return response
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
def _get_langfuse_data_from_kwargs(
|
|
231
|
-
resource: OpenAiDefinition, langfuse: Langfuse, start_time, kwargs
|
|
232
|
-
):
|
|
233
|
-
# print("DEBUG: Entering _get_langfuse_data_from_kwargs")
|
|
234
|
-
# print("DEBUG: kwargs received:", kwargs)
|
|
235
|
-
|
|
236
|
-
name = kwargs.get("name", "OpenAI-generation")
|
|
237
|
-
# print("DEBUG: name =", name)
|
|
238
|
-
if name is None:
|
|
239
|
-
name = "OpenAI-generation"
|
|
240
|
-
|
|
241
|
-
if name is not None and not isinstance(name, str):
|
|
242
|
-
raise TypeError("name must be a string")
|
|
243
|
-
|
|
244
|
-
decorator_context_observation_id = langfuse_context.get_current_observation_id()
|
|
245
|
-
decorator_context_trace_id = langfuse_context.get_current_trace_id()
|
|
246
|
-
# print("DEBUG: decorator_context_observation_id =", decorator_context_observation_id)
|
|
247
|
-
# print("DEBUG: decorator_context_trace_id =", decorator_context_trace_id)
|
|
248
|
-
|
|
249
|
-
trace_id = kwargs.get("trace_id", None) or decorator_context_trace_id
|
|
250
|
-
# print("DEBUG: trace_id =", trace_id)
|
|
251
|
-
if trace_id is not None and not isinstance(trace_id, str):
|
|
252
|
-
raise TypeError("trace_id must be a string")
|
|
253
|
-
|
|
254
|
-
session_id = kwargs.get("session_id", None)
|
|
255
|
-
# print("DEBUG: session_id =", session_id)
|
|
256
|
-
if session_id is not None and not isinstance(session_id, str):
|
|
257
|
-
raise TypeError("session_id must be a string")
|
|
258
|
-
|
|
259
|
-
user_id = kwargs.get("user_id", None)
|
|
260
|
-
# print("DEBUG: user_id =", user_id)
|
|
261
|
-
if user_id is not None and not isinstance(user_id, str):
|
|
262
|
-
raise TypeError("user_id must be a string")
|
|
263
|
-
|
|
264
|
-
tags = kwargs.get("tags", None)
|
|
265
|
-
# print("DEBUG: tags =", tags)
|
|
266
|
-
if tags is not None and (
|
|
267
|
-
not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)
|
|
268
|
-
):
|
|
269
|
-
raise TypeError("tags must be a list of strings")
|
|
270
|
-
|
|
271
|
-
if decorator_context_trace_id:
|
|
272
|
-
langfuse_context.update_current_trace(session_id=session_id, user_id=user_id, tags=tags)
|
|
273
|
-
|
|
274
|
-
parent_observation_id = kwargs.get("parent_observation_id", None) or (
|
|
275
|
-
decorator_context_observation_id
|
|
276
|
-
if decorator_context_observation_id != decorator_context_trace_id
|
|
277
|
-
else None
|
|
278
|
-
)
|
|
279
|
-
# print("DEBUG: parent_observation_id =", parent_observation_id)
|
|
280
|
-
if parent_observation_id is not None and not isinstance(parent_observation_id, str):
|
|
281
|
-
raise TypeError("parent_observation_id must be a string")
|
|
282
|
-
if parent_observation_id is not None and trace_id is None:
|
|
283
|
-
raise ValueError("parent_observation_id requires trace_id to be set")
|
|
284
|
-
|
|
285
|
-
metadata = kwargs.get("metadata", {})
|
|
286
|
-
# print("DEBUG: metadata =", metadata)
|
|
287
|
-
if metadata is not None and not isinstance(metadata, dict):
|
|
288
|
-
raise TypeError("metadata must be a dictionary")
|
|
289
|
-
|
|
290
|
-
prompt = None
|
|
291
|
-
if resource.type == "completion":
|
|
292
|
-
prompt = kwargs.get("prompt", None)
|
|
293
|
-
elif resource.type == "chat":
|
|
294
|
-
prompt = _extract_chat_prompt(kwargs)
|
|
295
|
-
# Extract model: first check top-level, then check inside 'inputs'
|
|
296
|
-
model = kwargs.get("model", None)
|
|
297
|
-
inputs = kwargs.get("inputs", {}) if kwargs.get("inputs", {}) else {}
|
|
298
|
-
if isinstance(inputs, dict):
|
|
299
|
-
# print("DEBUG: inputs =", inputs)
|
|
300
|
-
if "model_name" in inputs:
|
|
301
|
-
detailed_model = inputs["model_name"]
|
|
302
|
-
print("DEBUG: detailed_model =", detailed_model)
|
|
303
|
-
# If a detailed_model exists and is different from the top-level model, use it.
|
|
304
|
-
if detailed_model and (not model or model != detailed_model):
|
|
305
|
-
print("DEBUG: Upgrading model value from", model, "to", detailed_model)
|
|
306
|
-
model = detailed_model
|
|
307
|
-
# print("DEBUG: final model =", model)
|
|
308
|
-
|
|
309
|
-
# Extract model hyperparameters and add them to the new field 'model_params'
|
|
310
|
-
model_params = {
|
|
311
|
-
"temperature": kwargs.get("temperature", 1),
|
|
312
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
313
|
-
"top_p": kwargs.get("top_p", 1),
|
|
314
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
315
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
316
|
-
}
|
|
317
|
-
if kwargs.get("seed", None) is not None:
|
|
318
|
-
model_params["seed"] = kwargs.get("seed", None)
|
|
319
|
-
|
|
320
|
-
is_nested_trace = False
|
|
321
|
-
if trace_id:
|
|
322
|
-
is_nested_trace = True
|
|
323
|
-
langfuse.trace(id=trace_id, session_id=session_id, user_id=user_id, tags=tags)
|
|
324
|
-
else:
|
|
325
|
-
trace_instance = langfuse.trace(
|
|
326
|
-
session_id=session_id,
|
|
327
|
-
user_id=user_id,
|
|
328
|
-
tags=tags,
|
|
329
|
-
name=name,
|
|
330
|
-
input=prompt,
|
|
331
|
-
metadata=metadata,
|
|
332
|
-
)
|
|
333
|
-
trace_id = trace_instance.id
|
|
334
|
-
# print("DEBUG: Generated new trace_id =", trace_id)
|
|
335
|
-
|
|
336
|
-
langfuse_prompt = kwargs.get("langfuse_prompt", None)
|
|
337
|
-
|
|
338
|
-
extracted_data = {
|
|
339
|
-
"name": name,
|
|
340
|
-
"metadata": metadata,
|
|
341
|
-
"trace_id": trace_id,
|
|
342
|
-
"parent_observation_id": parent_observation_id,
|
|
343
|
-
"user_id": user_id,
|
|
344
|
-
"start_time": start_time,
|
|
345
|
-
"input": prompt,
|
|
346
|
-
"model_params": {
|
|
347
|
-
"model_name": model or None,
|
|
348
|
-
"temperature": kwargs.get("temperature", 1),
|
|
349
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
350
|
-
"top_p": kwargs.get("top_p", 1),
|
|
351
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
352
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
353
|
-
},
|
|
354
|
-
"prompt": langfuse_prompt,
|
|
355
|
-
}
|
|
356
|
-
|
|
357
|
-
# Add seed to model_params if present
|
|
358
|
-
if kwargs.get("seed", None) is not None:
|
|
359
|
-
extracted_data["model_params"]["seed"] = kwargs.get("seed", None)
|
|
360
|
-
|
|
361
|
-
# print("DEBUG: Exiting _get_langfuse_data_from_kwargs with extracted_data:")
|
|
362
|
-
# print(extracted_data)
|
|
363
|
-
# print("DEBUG: is_nested_trace =", is_nested_trace)
|
|
364
|
-
|
|
365
|
-
return extracted_data, is_nested_trace
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
def _create_langfuse_update(
|
|
369
|
-
completion,
|
|
370
|
-
generation: StatefulGenerationClient,
|
|
371
|
-
completion_start_time,
|
|
372
|
-
model=None,
|
|
373
|
-
usage=None,
|
|
374
|
-
model_params=None,
|
|
375
|
-
):
|
|
376
|
-
update = {
|
|
377
|
-
"end_time": _get_timestamp(),
|
|
378
|
-
"output": completion,
|
|
379
|
-
"completion_start_time": completion_start_time,
|
|
380
|
-
}
|
|
381
|
-
|
|
382
|
-
# Create model_params dictionary
|
|
383
|
-
model_params = {
|
|
384
|
-
"model_name": model or None,
|
|
385
|
-
}
|
|
386
|
-
|
|
387
|
-
# Add hyperparameters if provided
|
|
388
|
-
if model_params:
|
|
389
|
-
model_params.update(model_params)
|
|
390
|
-
|
|
391
|
-
# Add model_params to update
|
|
392
|
-
update["model_params"] = model_params
|
|
393
|
-
|
|
394
|
-
if usage is not None:
|
|
395
|
-
update["usage"] = usage
|
|
396
|
-
|
|
397
|
-
generation.update(**update)
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
def _extract_streamed_openai_response(resource, chunks):
|
|
401
|
-
# logger.debug(f"Extracting streamed response for resource type: {resource.type}")
|
|
402
|
-
# logger.debug(f"Number of chunks: {len(chunks)}")
|
|
403
|
-
completion = defaultdict(str) if resource.type == "chat" else ""
|
|
404
|
-
model = None
|
|
405
|
-
usage = None
|
|
406
|
-
|
|
407
|
-
for chunk in chunks:
|
|
408
|
-
if _is_openai_v1():
|
|
409
|
-
chunk = chunk.__dict__
|
|
410
|
-
# logger.debug(f"Processing chunk: {chunk}")
|
|
411
|
-
|
|
412
|
-
# Extract model name from chunk
|
|
413
|
-
model = model or chunk.get("model", None) or None
|
|
414
|
-
|
|
415
|
-
# Extract usage information
|
|
416
|
-
chunk_usage = chunk.get("usage", None)
|
|
417
|
-
if chunk_usage is not None:
|
|
418
|
-
if _is_openai_v1():
|
|
419
|
-
chunk_usage = chunk_usage.__dict__
|
|
420
|
-
usage = chunk_usage
|
|
421
|
-
|
|
422
|
-
# Process choices
|
|
423
|
-
choices = chunk.get("choices", []) # noqa: F841
|
|
424
|
-
# logger.debug(f"Extracted - model: {model}, choices: {choices}")
|
|
425
|
-
|
|
426
|
-
# logger.debug(f"Final completion: {completion}")
|
|
427
|
-
return model, completion, usage
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
def _get_langfuse_data_from_default_response(resource: OpenAiDefinition, response):
|
|
431
|
-
if response is None:
|
|
432
|
-
return None, "<NoneType response returned from OpenAI>", None
|
|
433
|
-
|
|
434
|
-
# Extract model name from response
|
|
435
|
-
model = response.get("model", None) or None
|
|
436
|
-
|
|
437
|
-
# Extract completion based on resource type
|
|
438
|
-
completion = None
|
|
439
|
-
if resource.type == "completion":
|
|
440
|
-
choices = response.get("choices", [])
|
|
441
|
-
if len(choices) > 0:
|
|
442
|
-
choice = choices[-1]
|
|
443
|
-
completion = choice.text if _is_openai_v1() else choice.get("text", None)
|
|
444
|
-
elif resource.type == "chat":
|
|
445
|
-
choices = response.get("choices", [])
|
|
446
|
-
if len(choices) > 0:
|
|
447
|
-
choice = choices[-1]
|
|
448
|
-
completion = (
|
|
449
|
-
_extract_chat_response(choice.message.__dict__)
|
|
450
|
-
if _is_openai_v1()
|
|
451
|
-
else choice.get("message", None)
|
|
452
|
-
)
|
|
453
|
-
|
|
454
|
-
# Extract usage information
|
|
455
|
-
usage = response.get("usage", None)
|
|
456
|
-
if _is_openai_v1() and usage is not None:
|
|
457
|
-
usage = usage.__dict__
|
|
458
|
-
|
|
459
|
-
return model, completion, usage
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
def _is_openai_v1():
|
|
463
|
-
return Version(openai.__version__) >= Version("1.0.0")
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
def _is_streaming_response(response):
|
|
467
|
-
return (
|
|
468
|
-
isinstance(response, types.GeneratorType)
|
|
469
|
-
or isinstance(response, types.AsyncGeneratorType)
|
|
470
|
-
or (_is_openai_v1() and isinstance(response, openai.Stream))
|
|
471
|
-
or (_is_openai_v1() and isinstance(response, openai.AsyncStream))
|
|
472
|
-
)
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
@_langfuse_wrapper
|
|
476
|
-
def _wrap(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs):
|
|
477
|
-
new_langfuse: Langfuse = initialize()
|
|
478
|
-
|
|
479
|
-
start_time = _get_timestamp()
|
|
480
|
-
arg_extractor = OpenAiArgsExtractor(*args, **kwargs)
|
|
481
|
-
|
|
482
|
-
generation, is_nested_trace = _get_langfuse_data_from_kwargs(
|
|
483
|
-
open_ai_resource, new_langfuse, start_time, arg_extractor.get_langfuse_args()
|
|
484
|
-
)
|
|
485
|
-
generation = new_langfuse.generation(**generation)
|
|
486
|
-
try:
|
|
487
|
-
openai_args = arg_extractor.get_openai_args()
|
|
488
|
-
# Apply context-scoped injection to chat messages if present
|
|
489
|
-
if isinstance(openai_args, dict) and "messages" in openai_args:
|
|
490
|
-
try:
|
|
491
|
-
with use_overrides_for_messages(openai_args["messages"]): # type: ignore[arg-type]
|
|
492
|
-
openai_args["messages"] = apply_injection_overrides(openai_args["messages"]) # type: ignore[arg-type]
|
|
493
|
-
openai_args = apply_tool_overrides(openai_args)
|
|
494
|
-
openai_args = apply_param_overrides(openai_args)
|
|
495
|
-
except Exception:
|
|
496
|
-
pass
|
|
497
|
-
openai_response = wrapped(**openai_args)
|
|
498
|
-
|
|
499
|
-
if _is_streaming_response(openai_response):
|
|
500
|
-
return LangfuseResponseGeneratorSync(
|
|
501
|
-
resource=open_ai_resource,
|
|
502
|
-
response=openai_response,
|
|
503
|
-
generation=generation,
|
|
504
|
-
langfuse=new_langfuse,
|
|
505
|
-
is_nested_trace=is_nested_trace,
|
|
506
|
-
kwargs=arg_extractor.get_openai_args(),
|
|
507
|
-
)
|
|
508
|
-
|
|
509
|
-
else:
|
|
510
|
-
model, completion, usage = _get_langfuse_data_from_default_response(
|
|
511
|
-
open_ai_resource,
|
|
512
|
-
(openai_response and openai_response.__dict__)
|
|
513
|
-
if _is_openai_v1()
|
|
514
|
-
else openai_response,
|
|
515
|
-
)
|
|
516
|
-
model_params = {
|
|
517
|
-
"model_name": model or None,
|
|
518
|
-
"temperature": kwargs.get("temperature", 1),
|
|
519
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
520
|
-
"top_p": kwargs.get("top_p", 1),
|
|
521
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
522
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
# Collect messages
|
|
526
|
-
if open_ai_resource.type == "completion":
|
|
527
|
-
user_prompt = arg_extractor.get_openai_args().get("prompt", "")
|
|
528
|
-
messages = [{"role": "user", "content": user_prompt}]
|
|
529
|
-
message_input = MessageInputs(messages=messages)
|
|
530
|
-
|
|
531
|
-
# Track user input
|
|
532
|
-
synth_tracker_sync.track_lm(
|
|
533
|
-
messages=message_input.messages,
|
|
534
|
-
model_name=model,
|
|
535
|
-
model_params=model_params,
|
|
536
|
-
finetune=False,
|
|
537
|
-
)
|
|
538
|
-
|
|
539
|
-
# Track assistant output separately
|
|
540
|
-
assistant_message = [{"role": "assistant", "content": completion}]
|
|
541
|
-
synth_tracker_sync.track_lm_output(
|
|
542
|
-
messages=assistant_message,
|
|
543
|
-
model_name=model,
|
|
544
|
-
model_params=model_params,
|
|
545
|
-
finetune=False,
|
|
546
|
-
)
|
|
547
|
-
|
|
548
|
-
elif open_ai_resource.type == "chat":
|
|
549
|
-
messages = openai_args.get("messages", [])
|
|
550
|
-
message_input = MessageInputs(messages=messages)
|
|
551
|
-
|
|
552
|
-
# Track user input
|
|
553
|
-
synth_tracker_sync.track_lm(
|
|
554
|
-
messages=message_input.messages,
|
|
555
|
-
model_name=model,
|
|
556
|
-
model_params=model_params,
|
|
557
|
-
finetune=False,
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
# Track assistant output separately
|
|
561
|
-
assistant_message = [{"role": "assistant", "content": completion["content"]}]
|
|
562
|
-
synth_tracker_sync.track_lm_output(
|
|
563
|
-
messages=assistant_message, model_name=model, finetune=False
|
|
564
|
-
)
|
|
565
|
-
|
|
566
|
-
else:
|
|
567
|
-
message_input = MessageInputs(messages=[])
|
|
568
|
-
|
|
569
|
-
# Use track_lm
|
|
570
|
-
# synth_tracker_sync.track_lm(
|
|
571
|
-
# messages=message_input.messages,
|
|
572
|
-
# model_name=model,
|
|
573
|
-
# model_params=model_params,finetune=False,
|
|
574
|
-
# )
|
|
575
|
-
|
|
576
|
-
if kwargs.get("seed", None) is not None:
|
|
577
|
-
model_params["seed"] = kwargs.get("seed", None)
|
|
578
|
-
|
|
579
|
-
generation.update(
|
|
580
|
-
model_params=model_params,
|
|
581
|
-
output=completion,
|
|
582
|
-
end_time=_get_timestamp(),
|
|
583
|
-
usage=usage,
|
|
584
|
-
)
|
|
585
|
-
|
|
586
|
-
# Avoiding the trace-update if trace-id is provided by user.
|
|
587
|
-
if not is_nested_trace:
|
|
588
|
-
new_langfuse.trace(id=generation.trace_id, output=completion)
|
|
589
|
-
|
|
590
|
-
return openai_response
|
|
591
|
-
except Exception as ex:
|
|
592
|
-
# log.warning(ex)
|
|
593
|
-
model = kwargs.get("model", None) or None
|
|
594
|
-
model_params = {
|
|
595
|
-
"model_name": model or None,
|
|
596
|
-
"temperature": kwargs.get("temperature", 1),
|
|
597
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
598
|
-
"top_p": kwargs.get("top_p", 1),
|
|
599
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
600
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
601
|
-
}
|
|
602
|
-
if kwargs.get("seed", None) is not None:
|
|
603
|
-
model_params["seed"] = kwargs.get("seed", None)
|
|
604
|
-
|
|
605
|
-
generation.update(
|
|
606
|
-
end_time=_get_timestamp(),
|
|
607
|
-
status_message=str(ex),
|
|
608
|
-
level="ERROR",
|
|
609
|
-
model_params=model_params,
|
|
610
|
-
usage={"input_cost": 0, "output_cost": 0, "total_cost": 0},
|
|
611
|
-
)
|
|
612
|
-
raise ex
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
@_langfuse_wrapper
|
|
616
|
-
async def _wrap_async(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs):
|
|
617
|
-
new_langfuse = initialize()
|
|
618
|
-
start_time = _get_timestamp()
|
|
619
|
-
arg_extractor = OpenAiArgsExtractor(*args, **kwargs)
|
|
620
|
-
|
|
621
|
-
generation, is_nested_trace = _get_langfuse_data_from_kwargs(
|
|
622
|
-
open_ai_resource, new_langfuse, start_time, arg_extractor.get_langfuse_args()
|
|
623
|
-
)
|
|
624
|
-
generation = new_langfuse.generation(**generation)
|
|
625
|
-
|
|
626
|
-
try:
|
|
627
|
-
openai_args = arg_extractor.get_openai_args()
|
|
628
|
-
# Apply context-scoped injection to chat messages if present
|
|
629
|
-
if isinstance(openai_args, dict) and "messages" in openai_args:
|
|
630
|
-
try:
|
|
631
|
-
with use_overrides_for_messages(openai_args["messages"]): # type: ignore[arg-type]
|
|
632
|
-
openai_args["messages"] = apply_injection_overrides(openai_args["messages"]) # type: ignore[arg-type]
|
|
633
|
-
openai_args = apply_tool_overrides(openai_args)
|
|
634
|
-
openai_args = apply_param_overrides(openai_args)
|
|
635
|
-
except Exception:
|
|
636
|
-
pass
|
|
637
|
-
openai_response = await wrapped(**openai_args)
|
|
638
|
-
|
|
639
|
-
if _is_streaming_response(openai_response):
|
|
640
|
-
return LangfuseResponseGeneratorAsync(
|
|
641
|
-
resource=open_ai_resource,
|
|
642
|
-
response=openai_response,
|
|
643
|
-
generation=generation,
|
|
644
|
-
langfuse=new_langfuse,
|
|
645
|
-
is_nested_trace=is_nested_trace,
|
|
646
|
-
kwargs=arg_extractor.get_openai_args(),
|
|
647
|
-
)
|
|
648
|
-
|
|
649
|
-
else:
|
|
650
|
-
model, completion, usage = _get_langfuse_data_from_default_response(
|
|
651
|
-
open_ai_resource,
|
|
652
|
-
(openai_response and openai_response.__dict__)
|
|
653
|
-
if _is_openai_v1()
|
|
654
|
-
else openai_response,
|
|
655
|
-
)
|
|
656
|
-
model_params = {
|
|
657
|
-
"model_name": model or None,
|
|
658
|
-
"temperature": kwargs.get("temperature", 1),
|
|
659
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
660
|
-
"top_p": kwargs.get("top_p", 1),
|
|
661
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
662
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
663
|
-
}
|
|
664
|
-
|
|
665
|
-
# Collect messages
|
|
666
|
-
if open_ai_resource.type == "completion":
|
|
667
|
-
user_prompt = arg_extractor.get_openai_args().get("prompt", "")
|
|
668
|
-
messages = [{"role": "user", "content": user_prompt}]
|
|
669
|
-
message_input = MessageInputs(messages=messages)
|
|
670
|
-
|
|
671
|
-
# Track user input
|
|
672
|
-
synth_tracker_async.track_lm(
|
|
673
|
-
messages=message_input.messages,
|
|
674
|
-
model_name=model,
|
|
675
|
-
model_params=model_params,
|
|
676
|
-
finetune=False,
|
|
677
|
-
)
|
|
678
|
-
|
|
679
|
-
# Track assistant output separately
|
|
680
|
-
assistant_message = [{"role": "assistant", "content": completion}]
|
|
681
|
-
synth_tracker_async.track_lm_output(
|
|
682
|
-
messages=assistant_message, model_name=model, finetune=False
|
|
683
|
-
)
|
|
684
|
-
|
|
685
|
-
elif open_ai_resource.type == "chat":
|
|
686
|
-
messages = openai_args.get("messages", [])
|
|
687
|
-
message_input = MessageInputs(messages=messages)
|
|
688
|
-
|
|
689
|
-
# Track user input
|
|
690
|
-
synth_tracker_async.track_lm(
|
|
691
|
-
messages=message_input.messages,
|
|
692
|
-
model_name=model,
|
|
693
|
-
model_params=model_params,
|
|
694
|
-
finetune=False,
|
|
695
|
-
)
|
|
696
|
-
|
|
697
|
-
# Track assistant output separately
|
|
698
|
-
assistant_message = [{"role": "assistant", "content": completion["content"]}]
|
|
699
|
-
synth_tracker_async.track_lm_output(
|
|
700
|
-
messages=assistant_message, model_name=model, finetune=False
|
|
701
|
-
)
|
|
702
|
-
|
|
703
|
-
else:
|
|
704
|
-
message_input = MessageInputs(messages=[])
|
|
705
|
-
|
|
706
|
-
# Use track_lm
|
|
707
|
-
# synth_tracker_async.track_lm(
|
|
708
|
-
# messages=message_input.messages,
|
|
709
|
-
# model_name=model,
|
|
710
|
-
# model_params=model_params,finetune=False,
|
|
711
|
-
# )
|
|
712
|
-
|
|
713
|
-
# Create model_params dictionary
|
|
714
|
-
model_params = {
|
|
715
|
-
"model_name": model or None,
|
|
716
|
-
"temperature": kwargs.get("temperature", 1),
|
|
717
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
718
|
-
"top_p": kwargs.get("top_p", 1),
|
|
719
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
720
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
721
|
-
}
|
|
722
|
-
if kwargs.get("seed", None) is not None:
|
|
723
|
-
model_params["seed"] = kwargs.get("seed", None)
|
|
724
|
-
|
|
725
|
-
generation.update(
|
|
726
|
-
model_params=model_params,
|
|
727
|
-
output=completion,
|
|
728
|
-
end_time=_get_timestamp(),
|
|
729
|
-
usage=usage,
|
|
730
|
-
)
|
|
731
|
-
# Avoiding the trace-update if trace-id is provided by user.
|
|
732
|
-
if not is_nested_trace:
|
|
733
|
-
new_langfuse.trace(id=generation.trace_id, output=completion)
|
|
734
|
-
|
|
735
|
-
return openai_response
|
|
736
|
-
except Exception as ex:
|
|
737
|
-
model = kwargs.get("model", None) or None
|
|
738
|
-
model_params = {
|
|
739
|
-
"model_name": model or None,
|
|
740
|
-
"temperature": kwargs.get("temperature", 1),
|
|
741
|
-
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
|
742
|
-
"top_p": kwargs.get("top_p", 1),
|
|
743
|
-
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
|
744
|
-
"presence_penalty": kwargs.get("presence_penalty", 0),
|
|
745
|
-
}
|
|
746
|
-
if kwargs.get("seed", None) is not None:
|
|
747
|
-
model_params["seed"] = kwargs.get("seed", None)
|
|
748
|
-
|
|
749
|
-
generation.update(
|
|
750
|
-
end_time=_get_timestamp(),
|
|
751
|
-
status_message=str(ex),
|
|
752
|
-
level="ERROR",
|
|
753
|
-
model_params=model_params,
|
|
754
|
-
usage={"input_cost": 0, "output_cost": 0, "total_cost": 0},
|
|
755
|
-
)
|
|
756
|
-
raise ex
|
|
757
|
-
|
|
758
|
-
async def close(self) -> None:
|
|
759
|
-
"""Close the response and release the connection.
|
|
760
|
-
|
|
761
|
-
Automatically called if the response body is read to completion.
|
|
762
|
-
"""
|
|
763
|
-
await self.response.close()
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
class OpenAILangfuse:
|
|
767
|
-
_langfuse: Langfuse | None = None
|
|
768
|
-
|
|
769
|
-
def initialize(self):
|
|
770
|
-
self._langfuse = LangfuseSingleton().get(
|
|
771
|
-
public_key=openai.langfuse_public_key,
|
|
772
|
-
secret_key=openai.langfuse_secret_key,
|
|
773
|
-
host=openai.langfuse_host,
|
|
774
|
-
debug=openai.langfuse_debug,
|
|
775
|
-
enabled=openai.langfuse_enabled,
|
|
776
|
-
sdk_integration="openai",
|
|
777
|
-
sample_rate=openai.langfuse_sample_rate,
|
|
778
|
-
)
|
|
779
|
-
|
|
780
|
-
return self._langfuse
|
|
781
|
-
|
|
782
|
-
def flush(cls):
|
|
783
|
-
cls._langfuse.flush()
|
|
784
|
-
|
|
785
|
-
def langfuse_auth_check(self):
|
|
786
|
-
"""Check if the provided Langfuse credentials (public and secret key) are valid.
|
|
787
|
-
|
|
788
|
-
Raises:
|
|
789
|
-
Exception: If no projects were found for the provided credentials.
|
|
790
|
-
|
|
791
|
-
Note:
|
|
792
|
-
This method is blocking. It is discouraged to use it in prod code.
|
|
793
|
-
"""
|
|
794
|
-
if self._langfuse is None:
|
|
795
|
-
self.initialize()
|
|
796
|
-
|
|
797
|
-
return self._langfuse.auth_check()
|
|
798
|
-
|
|
799
|
-
def register_tracing(self):
|
|
800
|
-
resources = OPENAI_METHODS_V1 if _is_openai_v1() else OPENAI_METHODS_V0
|
|
801
|
-
|
|
802
|
-
for resource in resources:
|
|
803
|
-
if resource.min_version is not None and Version(openai.__version__) < Version(
|
|
804
|
-
resource.min_version
|
|
805
|
-
):
|
|
806
|
-
continue
|
|
807
|
-
|
|
808
|
-
# Check if the method actually exists before trying to wrap it
|
|
809
|
-
try:
|
|
810
|
-
module = __import__(resource.module, fromlist=[resource.object])
|
|
811
|
-
obj = getattr(module, resource.object, None)
|
|
812
|
-
if obj and not hasattr(obj, resource.method):
|
|
813
|
-
continue # Skip if method doesn't exist
|
|
814
|
-
except (ImportError, AttributeError):
|
|
815
|
-
continue # Skip if module or object doesn't exist
|
|
816
|
-
|
|
817
|
-
wrap_function_wrapper(
|
|
818
|
-
resource.module,
|
|
819
|
-
f"{resource.object}.{resource.method}",
|
|
820
|
-
_wrap(resource, self.initialize)
|
|
821
|
-
if resource.sync
|
|
822
|
-
else _wrap_async(resource, self.initialize),
|
|
823
|
-
)
|
|
824
|
-
|
|
825
|
-
openai.langfuse_public_key = None
|
|
826
|
-
openai.langfuse_secret_key = None
|
|
827
|
-
openai.langfuse_host = None
|
|
828
|
-
openai.langfuse_debug = None
|
|
829
|
-
openai.langfuse_enabled = True
|
|
830
|
-
openai.langfuse_sample_rate = None
|
|
831
|
-
openai.langfuse_mask = None
|
|
832
|
-
openai.langfuse_auth_check = self.langfuse_auth_check
|
|
833
|
-
openai.flush_langfuse = self.flush
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
modifier = OpenAILangfuse()
|
|
837
|
-
modifier.register_tracing()
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
# DEPRECATED: Use `openai.langfuse_auth_check()` instead
|
|
841
|
-
def auth_check():
|
|
842
|
-
if modifier._langfuse is None:
|
|
843
|
-
modifier.initialize()
|
|
844
|
-
|
|
845
|
-
return modifier._langfuse.auth_check()
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
def _filter_image_data(messages: list[dict]):
|
|
849
|
-
"""https://platform.openai.com/docs/guides/vision?lang=python
|
|
850
|
-
|
|
851
|
-
The messages array remains the same, but the 'image_url' is removed from the 'content' array.
|
|
852
|
-
It should only be removed if the value starts with 'data:image/jpeg;base64,'
|
|
853
|
-
|
|
854
|
-
"""
|
|
855
|
-
output_messages = copy.deepcopy(messages)
|
|
856
|
-
|
|
857
|
-
for message in output_messages:
|
|
858
|
-
content = (
|
|
859
|
-
message.get("content", None)
|
|
860
|
-
if isinstance(message, dict)
|
|
861
|
-
else getattr(message, "content", None)
|
|
862
|
-
)
|
|
863
|
-
|
|
864
|
-
if content is not None:
|
|
865
|
-
for index, item in enumerate(content):
|
|
866
|
-
if isinstance(item, dict) and item.get("image_url", None) is not None:
|
|
867
|
-
url = item["image_url"]["url"]
|
|
868
|
-
if url.startswith("data:image/"):
|
|
869
|
-
del content[index]["image_url"]
|
|
870
|
-
|
|
871
|
-
return output_messages
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
class LangfuseResponseGeneratorSync:
|
|
875
|
-
def __init__(
|
|
876
|
-
self,
|
|
877
|
-
*,
|
|
878
|
-
resource,
|
|
879
|
-
response,
|
|
880
|
-
generation,
|
|
881
|
-
langfuse,
|
|
882
|
-
is_nested_trace,
|
|
883
|
-
kwargs,
|
|
884
|
-
):
|
|
885
|
-
self.items = []
|
|
886
|
-
self.resource = resource
|
|
887
|
-
self.response = response
|
|
888
|
-
self.generation = generation
|
|
889
|
-
self.langfuse = langfuse
|
|
890
|
-
self.is_nested_trace = is_nested_trace
|
|
891
|
-
self.kwargs = kwargs
|
|
892
|
-
self.completion_start_time = None
|
|
893
|
-
|
|
894
|
-
def __iter__(self):
|
|
895
|
-
try:
|
|
896
|
-
for i in self.response:
|
|
897
|
-
self.items.append(i)
|
|
898
|
-
|
|
899
|
-
if self.completion_start_time is None:
|
|
900
|
-
self.completion_start_time = _get_timestamp()
|
|
901
|
-
|
|
902
|
-
yield i
|
|
903
|
-
finally:
|
|
904
|
-
self._finalize()
|
|
905
|
-
|
|
906
|
-
def __next__(self):
|
|
907
|
-
try:
|
|
908
|
-
item = self.response.__next__()
|
|
909
|
-
self.items.append(item)
|
|
910
|
-
|
|
911
|
-
if self.completion_start_time is None:
|
|
912
|
-
self.completion_start_time = _get_timestamp()
|
|
913
|
-
|
|
914
|
-
return item
|
|
915
|
-
|
|
916
|
-
except StopIteration:
|
|
917
|
-
self._finalize()
|
|
918
|
-
|
|
919
|
-
raise
|
|
920
|
-
|
|
921
|
-
def __enter__(self):
|
|
922
|
-
return self.__iter__()
|
|
923
|
-
|
|
924
|
-
def __exit__(self, exc_type, exc_value, traceback):
|
|
925
|
-
pass
|
|
926
|
-
|
|
927
|
-
def _finalize(self):
|
|
928
|
-
logger.debug("Entering _finalize() in LangfuseResponseGeneratorSync...")
|
|
929
|
-
# First, extract values from the streamed response items
|
|
930
|
-
model, completion, usage = _extract_streamed_openai_response(self.resource, self.items)
|
|
931
|
-
logger.debug("Extracted model=%s, completion=%s, usage=%s", model, completion, usage)
|
|
932
|
-
|
|
933
|
-
# Look through the streamed items for a detailed model in the additional "inputs"
|
|
934
|
-
for item in self.items:
|
|
935
|
-
if isinstance(item, dict):
|
|
936
|
-
inputs = item.get("inputs")
|
|
937
|
-
if isinstance(inputs, dict):
|
|
938
|
-
detailed = inputs.get("model_name")
|
|
939
|
-
if detailed and detailed != model:
|
|
940
|
-
logger.debug(
|
|
941
|
-
"Upgrading model value from %s to %s based on streamed inputs",
|
|
942
|
-
model,
|
|
943
|
-
detailed,
|
|
944
|
-
)
|
|
945
|
-
model = detailed
|
|
946
|
-
break
|
|
947
|
-
logger.debug("Final model after _finalize check: %s", model)
|
|
948
|
-
|
|
949
|
-
# Create model hyperparameters dictionary
|
|
950
|
-
model_params = {
|
|
951
|
-
"temperature": self.kwargs.get("temperature", 1),
|
|
952
|
-
"max_tokens": self.kwargs.get("max_tokens", float("inf")),
|
|
953
|
-
"top_p": self.kwargs.get("top_p", 1),
|
|
954
|
-
"frequency_penalty": self.kwargs.get("frequency_penalty", 0),
|
|
955
|
-
"presence_penalty": self.kwargs.get("presence_penalty", 0),
|
|
956
|
-
}
|
|
957
|
-
if self.kwargs.get("seed") is not None:
|
|
958
|
-
model_params["seed"] = self.kwargs.get("seed")
|
|
959
|
-
|
|
960
|
-
if self.resource.type == "completion":
|
|
961
|
-
user_prompt = self.kwargs.get("prompt", "")
|
|
962
|
-
messages = [
|
|
963
|
-
{"role": "user", "content": user_prompt},
|
|
964
|
-
{"role": "assistant", "content": completion},
|
|
965
|
-
]
|
|
966
|
-
message_input = MessageInputs(messages=messages)
|
|
967
|
-
elif self.resource.type == "chat":
|
|
968
|
-
messages = self.kwargs.get("messages", [])
|
|
969
|
-
logger.debug("Existing 'messages' from kwargs before appending: %s", messages)
|
|
970
|
-
if isinstance(completion, dict) and "content" in completion:
|
|
971
|
-
messages.append({"role": "assistant", "content": completion["content"]})
|
|
972
|
-
message_input = MessageInputs(messages=messages)
|
|
973
|
-
logger.debug("Final 'messages': %s", message_input.messages)
|
|
974
|
-
else:
|
|
975
|
-
message_input = MessageInputs(messages=[])
|
|
976
|
-
|
|
977
|
-
logger.debug(
|
|
978
|
-
"Calling track_lm (sync) with messages: %s, model: %s",
|
|
979
|
-
message_input.messages,
|
|
980
|
-
model,
|
|
981
|
-
)
|
|
982
|
-
synth_tracker_sync.track_lm(
|
|
983
|
-
messages=message_input.messages,
|
|
984
|
-
model_name=model,
|
|
985
|
-
model_params=model_params,
|
|
986
|
-
finetune=False,
|
|
987
|
-
)
|
|
988
|
-
|
|
989
|
-
# Avoid the trace update if a trace-id was provided by the user.
|
|
990
|
-
if not self.is_nested_trace:
|
|
991
|
-
self.langfuse.trace(id=self.generation.trace_id, output=completion)
|
|
992
|
-
|
|
993
|
-
# Pass the updated model and hyperparameters downstream in the update event.
|
|
994
|
-
_create_langfuse_update(
|
|
995
|
-
completion,
|
|
996
|
-
self.generation,
|
|
997
|
-
self.completion_start_time,
|
|
998
|
-
model=model,
|
|
999
|
-
usage=usage,
|
|
1000
|
-
model_params=model_params,
|
|
1001
|
-
)
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
class LangfuseResponseGeneratorAsync:
|
|
1005
|
-
def __init__(
|
|
1006
|
-
self,
|
|
1007
|
-
*,
|
|
1008
|
-
resource,
|
|
1009
|
-
response,
|
|
1010
|
-
generation,
|
|
1011
|
-
langfuse,
|
|
1012
|
-
is_nested_trace,
|
|
1013
|
-
kwargs,
|
|
1014
|
-
):
|
|
1015
|
-
# logger.debug(f"LangfuseResponseGeneratorAsync initialized with kwargs: {kwargs}")
|
|
1016
|
-
# logger.debug(f"Resource type: {resource.type}")
|
|
1017
|
-
self.items = []
|
|
1018
|
-
self.resource = resource
|
|
1019
|
-
self.response = response
|
|
1020
|
-
self.generation = generation
|
|
1021
|
-
self.langfuse = langfuse
|
|
1022
|
-
self.is_nested_trace = is_nested_trace
|
|
1023
|
-
self.kwargs = kwargs
|
|
1024
|
-
self.completion_start_time = None
|
|
1025
|
-
|
|
1026
|
-
async def __aiter__(self):
|
|
1027
|
-
try:
|
|
1028
|
-
async for i in self.response:
|
|
1029
|
-
self.items.append(i)
|
|
1030
|
-
|
|
1031
|
-
if self.completion_start_time is None:
|
|
1032
|
-
self.completion_start_time = _get_timestamp()
|
|
1033
|
-
|
|
1034
|
-
yield i
|
|
1035
|
-
finally:
|
|
1036
|
-
await self._finalize()
|
|
1037
|
-
|
|
1038
|
-
async def __anext__(self):
|
|
1039
|
-
try:
|
|
1040
|
-
item = await self.response.__anext__()
|
|
1041
|
-
self.items.append(item)
|
|
1042
|
-
|
|
1043
|
-
if self.completion_start_time is None:
|
|
1044
|
-
self.completion_start_time = _get_timestamp()
|
|
1045
|
-
|
|
1046
|
-
return item
|
|
1047
|
-
|
|
1048
|
-
except StopAsyncIteration:
|
|
1049
|
-
await self._finalize()
|
|
1050
|
-
|
|
1051
|
-
raise
|
|
1052
|
-
|
|
1053
|
-
async def __aenter__(self):
|
|
1054
|
-
return self.__aiter__()
|
|
1055
|
-
|
|
1056
|
-
async def __aexit__(self, exc_type, exc_value, traceback):
|
|
1057
|
-
pass
|
|
1058
|
-
|
|
1059
|
-
async def _finalize(self):
|
|
1060
|
-
logger.debug("Entering _finalize() in LangfuseResponseGeneratorAsync...")
|
|
1061
|
-
model, completion, usage = _extract_streamed_openai_response(self.resource, self.items)
|
|
1062
|
-
logger.debug("Extracted model=%s, completion=%s, usage=%s", model, completion, usage)
|
|
1063
|
-
|
|
1064
|
-
# Look through the streamed items for a detailed model in the additional "inputs"
|
|
1065
|
-
for item in self.items:
|
|
1066
|
-
if isinstance(item, dict):
|
|
1067
|
-
inputs = item.get("inputs")
|
|
1068
|
-
if isinstance(inputs, dict):
|
|
1069
|
-
detailed = inputs.get("model_name")
|
|
1070
|
-
if detailed and detailed != model:
|
|
1071
|
-
logger.debug(
|
|
1072
|
-
"Upgrading model value from %s to %s based on streamed inputs",
|
|
1073
|
-
model,
|
|
1074
|
-
detailed,
|
|
1075
|
-
)
|
|
1076
|
-
model = detailed
|
|
1077
|
-
break
|
|
1078
|
-
logger.debug("Final model after _finalize check: %s", model)
|
|
1079
|
-
|
|
1080
|
-
# Create model hyperparameters dictionary
|
|
1081
|
-
model_params = {
|
|
1082
|
-
"temperature": self.kwargs.get("temperature", 1),
|
|
1083
|
-
"max_tokens": self.kwargs.get("max_tokens", float("inf")),
|
|
1084
|
-
"top_p": self.kwargs.get("top_p", 1),
|
|
1085
|
-
"frequency_penalty": self.kwargs.get("frequency_penalty", 0),
|
|
1086
|
-
"presence_penalty": self.kwargs.get("presence_penalty", 0),
|
|
1087
|
-
}
|
|
1088
|
-
if self.kwargs.get("seed") is not None:
|
|
1089
|
-
model_params["seed"] = self.kwargs.get("seed")
|
|
1090
|
-
|
|
1091
|
-
if self.resource.type == "completion":
|
|
1092
|
-
user_prompt = self.kwargs.get("prompt", "")
|
|
1093
|
-
messages = [
|
|
1094
|
-
{"role": "user", "content": user_prompt},
|
|
1095
|
-
{"role": "assistant", "content": completion},
|
|
1096
|
-
]
|
|
1097
|
-
message_input = MessageInputs(messages=messages)
|
|
1098
|
-
elif self.resource.type == "chat":
|
|
1099
|
-
messages = self.kwargs.get("messages", [])
|
|
1100
|
-
logger.debug("Existing 'messages' from kwargs before appending: %s", messages)
|
|
1101
|
-
# If completion is a dict, ensure we extract 'content' safely
|
|
1102
|
-
if isinstance(completion, dict) and "content" in completion:
|
|
1103
|
-
messages.append({"role": "assistant", "content": completion["content"]})
|
|
1104
|
-
message_input = MessageInputs(messages=messages)
|
|
1105
|
-
logger.debug("Final 'messages': %s", message_input.messages)
|
|
1106
|
-
else:
|
|
1107
|
-
message_input = MessageInputs(messages=[])
|
|
1108
|
-
|
|
1109
|
-
logger.debug(
|
|
1110
|
-
"Calling track_lm (async) with messages: %s, model: %s",
|
|
1111
|
-
message_input.messages,
|
|
1112
|
-
model,
|
|
1113
|
-
)
|
|
1114
|
-
synth_tracker_async.track_lm(
|
|
1115
|
-
messages=message_input.messages,
|
|
1116
|
-
model_name=model,
|
|
1117
|
-
model_params=model_params,
|
|
1118
|
-
finetune=False,
|
|
1119
|
-
)
|
|
1120
|
-
|
|
1121
|
-
# Avoiding the trace-update if trace-id is provided by user.
|
|
1122
|
-
if not self.is_nested_trace:
|
|
1123
|
-
self.langfuse.trace(id=self.generation.trace_id, output=completion)
|
|
1124
|
-
|
|
1125
|
-
_create_langfuse_update(
|
|
1126
|
-
completion,
|
|
1127
|
-
self.generation,
|
|
1128
|
-
self.completion_start_time,
|
|
1129
|
-
model=model,
|
|
1130
|
-
usage=usage,
|
|
1131
|
-
model_params=model_params,
|
|
1132
|
-
)
|
|
1133
|
-
|
|
1134
|
-
async def close(self) -> None:
|
|
1135
|
-
"""Close the response and release the connection.
|
|
1136
|
-
|
|
1137
|
-
Automatically called if the response body is read to completion.
|
|
1138
|
-
"""
|
|
1139
|
-
await self.response.close()
|