code-muse 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_muse/__init__.py +26 -0
- code_muse/__main__.py +10 -0
- code_muse/agents/__init__.py +31 -0
- code_muse/agents/_builder.py +214 -0
- code_muse/agents/_compaction.py +506 -0
- code_muse/agents/_diagnostics.py +171 -0
- code_muse/agents/_history.py +382 -0
- code_muse/agents/_key_listeners.py +148 -0
- code_muse/agents/_non_streaming_render.py +148 -0
- code_muse/agents/_runtime.py +596 -0
- code_muse/agents/agent_creator_agent.py +603 -0
- code_muse/agents/agent_helios.py +47 -0
- code_muse/agents/agent_manager.py +740 -0
- code_muse/agents/agent_muse.py +78 -0
- code_muse/agents/agent_planning.py +44 -0
- code_muse/agents/agent_qa_melpomene.py +207 -0
- code_muse/agents/base_agent.py +194 -0
- code_muse/agents/event_stream_handler.py +361 -0
- code_muse/agents/json_agent.py +201 -0
- code_muse/agents/prompt_v3.py +521 -0
- code_muse/agents/subagent_stream_handler.py +273 -0
- code_muse/callbacks.py +941 -0
- code_muse/chatgpt_codex_client.py +333 -0
- code_muse/claude_cache_client.py +853 -0
- code_muse/cli_runner/__init__.py +319 -0
- code_muse/cli_runner/args.py +63 -0
- code_muse/cli_runner/loop.py +510 -0
- code_muse/cli_runner/resume.py +72 -0
- code_muse/cli_runner/runner.py +161 -0
- code_muse/command_line/__init__.py +1 -0
- code_muse/command_line/add_model_menu.py +1331 -0
- code_muse/command_line/agent_menu.py +674 -0
- code_muse/command_line/attachments.py +397 -0
- code_muse/command_line/autosave_menu.py +709 -0
- code_muse/command_line/clipboard.py +528 -0
- code_muse/command_line/colors_menu.py +530 -0
- code_muse/command_line/command_handler.py +262 -0
- code_muse/command_line/command_registry.py +150 -0
- code_muse/command_line/config_commands.py +711 -0
- code_muse/command_line/core_commands.py +740 -0
- code_muse/command_line/diff_menu.py +865 -0
- code_muse/command_line/file_path_completion.py +73 -0
- code_muse/command_line/load_context_completion.py +57 -0
- code_muse/command_line/model_picker_completion.py +512 -0
- code_muse/command_line/model_settings_menu.py +983 -0
- code_muse/command_line/onboarding_slides.py +162 -0
- code_muse/command_line/onboarding_wizard.py +337 -0
- code_muse/command_line/pagination.py +41 -0
- code_muse/command_line/pin_command_completion.py +329 -0
- code_muse/command_line/prompt_toolkit_completion.py +886 -0
- code_muse/command_line/session_commands.py +304 -0
- code_muse/command_line/shell_passthrough.py +145 -0
- code_muse/command_line/skills_completion.py +158 -0
- code_muse/command_line/types.py +18 -0
- code_muse/command_line/uc_menu.py +908 -0
- code_muse/command_line/utils.py +105 -0
- code_muse/command_line/wiggum_state.py +77 -0
- code_muse/config.py +1138 -0
- code_muse/config_agent.py +168 -0
- code_muse/config_appearance.py +241 -0
- code_muse/config_model.py +357 -0
- code_muse/config_security.py +73 -0
- code_muse/error_logging.py +132 -0
- code_muse/evals/__init__.py +35 -0
- code_muse/evals/eval_helpers.py +81 -0
- code_muse/evals/eval_runner.py +299 -0
- code_muse/evals/sample_evals/__init__.py +1 -0
- code_muse/evals/sample_evals/eval_frugal_reads.py +59 -0
- code_muse/evals/sample_evals/eval_memory_planning.py +31 -0
- code_muse/evals/sample_evals/eval_shell_efficiency.py +39 -0
- code_muse/evals/sample_evals/eval_tool_masking.py +33 -0
- code_muse/fs_scan_cache/__init__.py +31 -0
- code_muse/fs_scan_cache/invalidation_hooks.py +89 -0
- code_muse/fs_scan_cache/scan_cache_core.cpython-314-darwin.so +0 -0
- code_muse/fs_scan_cache/scan_cache_core.pyx +203 -0
- code_muse/fs_scan_cache/tool_integration.py +309 -0
- code_muse/fs_scan_cache/ttl_policy.py +44 -0
- code_muse/gemini_code_assist.py +383 -0
- code_muse/gemini_model.py +838 -0
- code_muse/hook_engine/README.md +105 -0
- code_muse/hook_engine/__init__.py +21 -0
- code_muse/hook_engine/aliases.py +153 -0
- code_muse/hook_engine/engine.py +221 -0
- code_muse/hook_engine/executor.py +347 -0
- code_muse/hook_engine/matcher.py +154 -0
- code_muse/hook_engine/models.py +245 -0
- code_muse/hook_engine/registry.py +114 -0
- code_muse/hook_engine/trust.py +268 -0
- code_muse/hook_engine/validator.py +144 -0
- code_muse/http_utils.py +360 -0
- code_muse/keymap.py +128 -0
- code_muse/list_filtering.py +26 -0
- code_muse/main.py +10 -0
- code_muse/messaging/__init__.py +259 -0
- code_muse/messaging/bus.py +621 -0
- code_muse/messaging/commands.py +166 -0
- code_muse/messaging/markdown_patches.py +57 -0
- code_muse/messaging/message_queue.py +397 -0
- code_muse/messaging/messages.py +591 -0
- code_muse/messaging/queue_console.py +269 -0
- code_muse/messaging/renderers.py +308 -0
- code_muse/messaging/rich_renderer.py +1158 -0
- code_muse/messaging/shimmer.py +154 -0
- code_muse/messaging/spinner/__init__.py +87 -0
- code_muse/messaging/spinner/console_spinner.py +250 -0
- code_muse/messaging/spinner/spinner_base.py +82 -0
- code_muse/messaging/subagent_console.py +458 -0
- code_muse/model_factory.py +1203 -0
- code_muse/model_switching.py +59 -0
- code_muse/model_utils.py +156 -0
- code_muse/models.json +66 -0
- code_muse/models_cache/__init__.py +26 -0
- code_muse/models_cache/blocking_lru_cache.py +98 -0
- code_muse/models_cache/cache_writer.py +86 -0
- code_muse/models_cache/sha256_hash.cpython-314-darwin.so +0 -0
- code_muse/models_cache/sha256_hash.pyx +34 -0
- code_muse/models_cache/startup_integration.py +75 -0
- code_muse/models_dev_api.json +1 -0
- code_muse/models_dev_parser.py +590 -0
- code_muse/motion.py +126 -0
- code_muse/plugins/__init__.py +471 -0
- code_muse/plugins/agent_skills/__init__.py +32 -0
- code_muse/plugins/agent_skills/config.py +176 -0
- code_muse/plugins/agent_skills/discovery.py +309 -0
- code_muse/plugins/agent_skills/downloader.py +389 -0
- code_muse/plugins/agent_skills/installer.py +19 -0
- code_muse/plugins/agent_skills/metadata.py +293 -0
- code_muse/plugins/agent_skills/prompt_builder.py +66 -0
- code_muse/plugins/agent_skills/register_callbacks.py +298 -0
- code_muse/plugins/agent_skills/remote_catalog.py +320 -0
- code_muse/plugins/agent_skills/skill_catalog.py +254 -0
- code_muse/plugins/agent_skills/skills_install_menu.py +690 -0
- code_muse/plugins/agent_skills/skills_menu.py +791 -0
- code_muse/plugins/autonomous_memory/__init__.py +39 -0
- code_muse/plugins/autonomous_memory/bm25_scorer.cpython-314-darwin.so +0 -0
- code_muse/plugins/autonomous_memory/bm25_scorer.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/autonomous_memory/bm25_scorer.pyx +291 -0
- code_muse/plugins/autonomous_memory/consolidation.py +82 -0
- code_muse/plugins/autonomous_memory/extraction.py +382 -0
- code_muse/plugins/autonomous_memory/lease_lock.py +105 -0
- code_muse/plugins/autonomous_memory/memory_injection.py +59 -0
- code_muse/plugins/autonomous_memory/register_callbacks.py +268 -0
- code_muse/plugins/autonomous_memory/secret_scanner.py +62 -0
- code_muse/plugins/autonomous_memory/session_scanner.py +163 -0
- code_muse/plugins/aws_bedrock/__init__.py +14 -0
- code_muse/plugins/aws_bedrock/config.py +99 -0
- code_muse/plugins/aws_bedrock/register_callbacks.py +241 -0
- code_muse/plugins/aws_bedrock/utils.py +153 -0
- code_muse/plugins/azure_foundry/README.md +238 -0
- code_muse/plugins/azure_foundry/__init__.py +15 -0
- code_muse/plugins/azure_foundry/config.py +125 -0
- code_muse/plugins/azure_foundry/discovery.py +187 -0
- code_muse/plugins/azure_foundry/register_callbacks.py +495 -0
- code_muse/plugins/azure_foundry/token.py +180 -0
- code_muse/plugins/azure_foundry/utils.py +345 -0
- code_muse/plugins/build_filter/__init__.py +1 -0
- code_muse/plugins/build_filter/register_callbacks.py +201 -0
- code_muse/plugins/build_filter/strategies/__init__.py +1 -0
- code_muse/plugins/build_filter/strategies/build.py +397 -0
- code_muse/plugins/chatgpt_oauth/__init__.py +6 -0
- code_muse/plugins/chatgpt_oauth/config.py +52 -0
- code_muse/plugins/chatgpt_oauth/oauth_flow.py +338 -0
- code_muse/plugins/chatgpt_oauth/register_callbacks.py +172 -0
- code_muse/plugins/chatgpt_oauth/test_plugin.py +301 -0
- code_muse/plugins/chatgpt_oauth/utils.py +538 -0
- code_muse/plugins/checkpointing/__init__.py +29 -0
- code_muse/plugins/checkpointing/checkpoint_hook.py +51 -0
- code_muse/plugins/checkpointing/conversation_snapshots.py +117 -0
- code_muse/plugins/checkpointing/register_callbacks.py +51 -0
- code_muse/plugins/checkpointing/restore_command.py +263 -0
- code_muse/plugins/checkpointing/rewind_shortcut.py +88 -0
- code_muse/plugins/checkpointing/shadow_git.py +90 -0
- code_muse/plugins/claude_code_hooks/__init__.py +1 -0
- code_muse/plugins/claude_code_hooks/config.py +188 -0
- code_muse/plugins/claude_code_hooks/register_callbacks.py +208 -0
- code_muse/plugins/claude_code_oauth/README.md +167 -0
- code_muse/plugins/claude_code_oauth/SETUP.md +93 -0
- code_muse/plugins/claude_code_oauth/__init__.py +25 -0
- code_muse/plugins/claude_code_oauth/config.py +52 -0
- code_muse/plugins/claude_code_oauth/fast_mode.py +124 -0
- code_muse/plugins/claude_code_oauth/prompt_handler.py +63 -0
- code_muse/plugins/claude_code_oauth/register_callbacks.py +547 -0
- code_muse/plugins/claude_code_oauth/test_fast_mode.py +165 -0
- code_muse/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_muse/plugins/claude_code_oauth/token_refresh_heartbeat.py +237 -0
- code_muse/plugins/claude_code_oauth/utils.py +664 -0
- code_muse/plugins/copilot_auth/__init__.py +11 -0
- code_muse/plugins/copilot_auth/config.py +91 -0
- code_muse/plugins/copilot_auth/reasoning_client.py +409 -0
- code_muse/plugins/copilot_auth/register_callbacks.py +461 -0
- code_muse/plugins/copilot_auth/utils.py +584 -0
- code_muse/plugins/custom_commands/__init__.py +14 -0
- code_muse/plugins/custom_commands/args_injection.py +82 -0
- code_muse/plugins/custom_commands/command_discovery.py +89 -0
- code_muse/plugins/custom_commands/command_toml_schema.py +71 -0
- code_muse/plugins/custom_commands/register_callbacks.py +176 -0
- code_muse/plugins/customizable_commands/__init__.py +0 -0
- code_muse/plugins/customizable_commands/register_callbacks.py +136 -0
- code_muse/plugins/destructive_command_guard/__init__.py +14 -0
- code_muse/plugins/destructive_command_guard/detector.py +375 -0
- code_muse/plugins/destructive_command_guard/register_callbacks.py +148 -0
- code_muse/plugins/example_custom_command/README.md +280 -0
- code_muse/plugins/example_custom_command/register_callbacks.py +51 -0
- code_muse/plugins/file_permission_handler/__init__.py +4 -0
- code_muse/plugins/file_permission_handler/register_callbacks.py +441 -0
- code_muse/plugins/filter_engine/__init__.py +30 -0
- code_muse/plugins/filter_engine/classifier.py +153 -0
- code_muse/plugins/filter_engine/content_detector.py +184 -0
- code_muse/plugins/filter_engine/dispatcher.py +244 -0
- code_muse/plugins/filter_engine/register_callbacks.py +188 -0
- code_muse/plugins/filter_engine/registry.py +279 -0
- code_muse/plugins/filter_engine/strategies/__init__.py +8 -0
- code_muse/plugins/filter_engine/strategies/ast_compressor.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/ast_compressor.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/ast_compressor.pyx +348 -0
- code_muse/plugins/filter_engine/strategies/ast_parser.py +167 -0
- code_muse/plugins/filter_engine/strategies/code.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/code.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/code.pyx +584 -0
- code_muse/plugins/filter_engine/strategies/git.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/git.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/git.pyx +438 -0
- code_muse/plugins/filter_engine/strategies/json_compressor.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/json_compressor.pyx +253 -0
- code_muse/plugins/filter_engine/strategies/json_patterns.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/json_patterns.pyx +178 -0
- code_muse/plugins/filter_engine/strategies/lint.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/lint.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/lint.pyx +626 -0
- code_muse/plugins/filter_engine/strategies/test.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/test.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/test.pyx +431 -0
- code_muse/plugins/filter_engine/verbosity.py +63 -0
- code_muse/plugins/force_push_guard/__init__.py +5 -0
- code_muse/plugins/force_push_guard/detector.py +96 -0
- code_muse/plugins/force_push_guard/register_callbacks.py +144 -0
- code_muse/plugins/force_push_guard/test_detector.py +143 -0
- code_muse/plugins/frontend_emitter/__init__.py +25 -0
- code_muse/plugins/frontend_emitter/emitter.py +121 -0
- code_muse/plugins/frontend_emitter/register_callbacks.py +259 -0
- code_muse/plugins/gac/__init__.py +4 -0
- code_muse/plugins/gac/git_ops.py +136 -0
- code_muse/plugins/gac/prompt.py +191 -0
- code_muse/plugins/gac/register_callbacks.py +82 -0
- code_muse/plugins/hook_creator/__init__.py +1 -0
- code_muse/plugins/hook_creator/register_callbacks.py +34 -0
- code_muse/plugins/hook_manager/__init__.py +1 -0
- code_muse/plugins/hook_manager/config.py +289 -0
- code_muse/plugins/hook_manager/hooks_menu.py +563 -0
- code_muse/plugins/hook_manager/register_callbacks.py +227 -0
- code_muse/plugins/hook_monitor/register_callbacks.py +36 -0
- code_muse/plugins/mindpack/__init__.py +0 -0
- code_muse/plugins/mindpack/factory.py +930 -0
- code_muse/plugins/mindpack/judge.py +573 -0
- code_muse/plugins/mindpack/memory.py +100 -0
- code_muse/plugins/mindpack/mindpack_menu.py +1552 -0
- code_muse/plugins/mindpack/orchestration.py +605 -0
- code_muse/plugins/mindpack/register_callbacks.py +175 -0
- code_muse/plugins/mindpack/schemas.py +358 -0
- code_muse/plugins/mindpack/tools.py +387 -0
- code_muse/plugins/oauth_muse_html.py +226 -0
- code_muse/plugins/ollama_setup/__init__.py +5 -0
- code_muse/plugins/ollama_setup/completer.py +36 -0
- code_muse/plugins/ollama_setup/register_callbacks.py +410 -0
- code_muse/plugins/plan_command/__init__.py +0 -0
- code_muse/plugins/plan_command/register_callbacks.py +206 -0
- code_muse/plugins/plan_mode/__init__.py +37 -0
- code_muse/plugins/plan_mode/mode_cycling.py +40 -0
- code_muse/plugins/plan_mode/plan_generation.py +68 -0
- code_muse/plugins/plan_mode/plan_hooks.py +74 -0
- code_muse/plugins/plan_mode/plan_mode_tools.py +138 -0
- code_muse/plugins/plan_mode/register_callbacks.py +121 -0
- code_muse/plugins/plugin_trust/register_callbacks.py +140 -0
- code_muse/plugins/policy_engine/__init__.py +46 -0
- code_muse/plugins/policy_engine/approval_flow_integration.py +59 -0
- code_muse/plugins/policy_engine/policy_evaluator.py +75 -0
- code_muse/plugins/policy_engine/policy_file_discovery.py +90 -0
- code_muse/plugins/policy_engine/policy_toml_schema.py +115 -0
- code_muse/plugins/policy_engine/register_callbacks.py +112 -0
- code_muse/plugins/pop_command/__init__.py +1 -0
- code_muse/plugins/pop_command/register_callbacks.py +189 -0
- code_muse/plugins/prompt_newline/__init__.py +13 -0
- code_muse/plugins/prompt_newline/config.py +19 -0
- code_muse/plugins/prompt_newline/register_callbacks.py +159 -0
- code_muse/plugins/safety_status/__init__.py +0 -0
- code_muse/plugins/safety_status/register_callbacks.py +113 -0
- code_muse/plugins/semantic_compression/__init__.py +6 -0
- code_muse/plugins/semantic_compression/compressor.py +295 -0
- code_muse/plugins/semantic_compression/config.py +123 -0
- code_muse/plugins/semantic_compression/register_callbacks.py +320 -0
- code_muse/plugins/shell_minimizer/__init__.py +50 -0
- code_muse/plugins/shell_minimizer/builtin_filters.toml +393 -0
- code_muse/plugins/shell_minimizer/pipeline.py +556 -0
- code_muse/plugins/shell_minimizer/primitives.py +482 -0
- code_muse/plugins/shell_minimizer/register_callbacks.py +276 -0
- code_muse/plugins/shell_safety/__init__.py +6 -0
- code_muse/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_muse/plugins/shell_safety/command_cache.py +149 -0
- code_muse/plugins/shell_safety/register_callbacks.py +202 -0
- code_muse/plugins/synthetic_status/__init__.py +1 -0
- code_muse/plugins/synthetic_status/register_callbacks.py +128 -0
- code_muse/plugins/synthetic_status/status_api.py +145 -0
- code_muse/plugins/token_caching/__init__.py +21 -0
- code_muse/plugins/token_caching/cache_hit_tracking.py +128 -0
- code_muse/plugins/token_caching/cacheable_prefix_detection.py +28 -0
- code_muse/plugins/token_caching/register_callbacks.py +54 -0
- code_muse/plugins/token_caching/stats_display.py +35 -0
- code_muse/plugins/token_tracking/__init__.py +26 -0
- code_muse/plugins/token_tracking/database.py +381 -0
- code_muse/plugins/token_tracking/edit_analyzer.py +97 -0
- code_muse/plugins/token_tracking/record.py +55 -0
- code_muse/plugins/token_tracking/register_callbacks.py +277 -0
- code_muse/plugins/token_tracking/reports.py +329 -0
- code_muse/plugins/universal_constructor/__init__.py +13 -0
- code_muse/plugins/universal_constructor/models.py +136 -0
- code_muse/plugins/universal_constructor/register_callbacks.py +47 -0
- code_muse/plugins/universal_constructor/registry.py +390 -0
- code_muse/plugins/universal_constructor/runner.py +474 -0
- code_muse/plugins/universal_constructor/safety.py +440 -0
- code_muse/plugins/universal_constructor/sandbox.py +584 -0
- code_muse/provider_identity.py +105 -0
- code_muse/pydantic_patches.py +410 -0
- code_muse/reopenable_async_client.py +233 -0
- code_muse/round_robin_model.py +151 -0
- code_muse/secret_storage.py +74 -0
- code_muse/security/__init__.py +1 -0
- code_muse/security/redaction.cpython-314-darwin.so +0 -0
- code_muse/security/redaction.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/security/redaction.pyx +135 -0
- code_muse/session_storage.py +565 -0
- code_muse/status_display.py +261 -0
- code_muse/stream_parser/__init__.py +76 -0
- code_muse/stream_parser/assistant_text_parser.py +90 -0
- code_muse/stream_parser/citation_parser.py +76 -0
- code_muse/stream_parser/inline_hidden_tag_parser.py +236 -0
- code_muse/stream_parser/proposed_plan_parser.py +158 -0
- code_muse/stream_parser/stream_text_chunk.py +23 -0
- code_muse/stream_parser/stream_text_parser.py +27 -0
- code_muse/stream_parser/tagged_line_parser.cpython-314-darwin.so +0 -0
- code_muse/stream_parser/tagged_line_parser.pyx +251 -0
- code_muse/stream_parser/utf8_stream_parser.cpython-314-darwin.so +0 -0
- code_muse/stream_parser/utf8_stream_parser.pyx +206 -0
- code_muse/summarization_agent.py +308 -0
- code_muse/terminal_utils.cpython-314-darwin.so +0 -0
- code_muse/terminal_utils.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/terminal_utils.pyx +483 -0
- code_muse/tools/__init__.py +459 -0
- code_muse/tools/agent_tools.py +613 -0
- code_muse/tools/ask_user_question/__init__.py +26 -0
- code_muse/tools/ask_user_question/constants.py +73 -0
- code_muse/tools/ask_user_question/demo_tui.py +55 -0
- code_muse/tools/ask_user_question/handler.py +232 -0
- code_muse/tools/ask_user_question/models.py +302 -0
- code_muse/tools/ask_user_question/registration.py +37 -0
- code_muse/tools/ask_user_question/renderers.py +336 -0
- code_muse/tools/ask_user_question/terminal_ui.py +327 -0
- code_muse/tools/ask_user_question/theme.py +156 -0
- code_muse/tools/ask_user_question/tui_loop.py +422 -0
- code_muse/tools/background_jobs.py +99 -0
- code_muse/tools/browser/__init__.py +37 -0
- code_muse/tools/browser/browser_control.py +289 -0
- code_muse/tools/browser/browser_interactions.py +545 -0
- code_muse/tools/browser/browser_locators.py +640 -0
- code_muse/tools/browser/browser_manager.py +376 -0
- code_muse/tools/browser/browser_navigation.py +251 -0
- code_muse/tools/browser/browser_screenshot.py +180 -0
- code_muse/tools/browser/browser_scripts.py +462 -0
- code_muse/tools/browser/browser_workflows.py +222 -0
- code_muse/tools/chrome_cdp/__init__.py +1070 -0
- code_muse/tools/chrome_cdp/register_callbacks.py +61 -0
- code_muse/tools/command_runner.py +1401 -0
- code_muse/tools/common.py +1407 -0
- code_muse/tools/display.py +87 -0
- code_muse/tools/file_modifications.py +1099 -0
- code_muse/tools/file_operations.py +860 -0
- code_muse/tools/image_tools.py +185 -0
- code_muse/tools/meetin_proxy/__init__.py +243 -0
- code_muse/tools/meetin_proxy/capture_addon.py +82 -0
- code_muse/tools/meetin_proxy/proxy_manager.py +326 -0
- code_muse/tools/meetin_proxy/register_callbacks.py +45 -0
- code_muse/tools/path_policy.py +219 -0
- code_muse/tools/skills_tools.py +586 -0
- code_muse/tools/subagent_context.py +158 -0
- code_muse/tools/tools_content.py +50 -0
- code_muse/tools/universal_constructor.py +965 -0
- code_muse/uvx_detection.py +241 -0
- code_muse/version_checker.py +86 -0
- code_muse-0.0.1.data/data/code_muse/models.json +66 -0
- code_muse-0.0.1.data/data/code_muse/models_dev_api.json +1 -0
- code_muse-0.0.1.dist-info/METADATA +845 -0
- code_muse-0.0.1.dist-info/RECORD +394 -0
- code_muse-0.0.1.dist-info/WHEEL +4 -0
- code_muse-0.0.1.dist-info/entry_points.txt +2 -0
- code_muse-0.0.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,506 @@
|
|
|
1
|
+
"""Message history compaction (truncation + summarization).
|
|
2
|
+
|
|
3
|
+
Replaces the old ``message_history_processor`` / ``message_history_accumulator``
|
|
4
|
+
pair from ``BaseAgent``. All logic here is free-function; the one stateful
|
|
5
|
+
entry point is ``make_history_processor(agent)`` which returns a closure that
|
|
6
|
+
pydantic-ai wires in as its ``history_processors`` callback.
|
|
7
|
+
|
|
8
|
+
The delayed-compaction globals and the retry-after-tool-calls plumbing from
|
|
9
|
+
the original god-class are **gone**. If compaction can't run safely right now
|
|
10
|
+
(pending tool calls + summarization strategy), we just skip it this cycle and
|
|
11
|
+
let the next ``history_processor`` invocation handle it.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import dataclasses
|
|
15
|
+
from collections.abc import Callable
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
from pydantic_ai.messages import (
|
|
19
|
+
ModelMessage,
|
|
20
|
+
ModelRequest,
|
|
21
|
+
ModelResponse,
|
|
22
|
+
TextPart,
|
|
23
|
+
ThinkingPart,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
from code_muse.agents._history import (
|
|
27
|
+
CompactionCache,
|
|
28
|
+
estimate_tokens_for_message,
|
|
29
|
+
filter_huge_messages,
|
|
30
|
+
has_pending_tool_calls,
|
|
31
|
+
hash_message,
|
|
32
|
+
prune_interrupted_tool_calls,
|
|
33
|
+
)
|
|
34
|
+
from code_muse.callbacks import (
|
|
35
|
+
on_message_history_processor_end,
|
|
36
|
+
on_message_history_processor_start,
|
|
37
|
+
)
|
|
38
|
+
from code_muse.config import (
|
|
39
|
+
get_compaction_strategy,
|
|
40
|
+
get_compaction_threshold,
|
|
41
|
+
get_protected_token_count,
|
|
42
|
+
)
|
|
43
|
+
from code_muse.messaging import emit_error, emit_info, emit_warning
|
|
44
|
+
from code_muse.messaging.spinner import SpinnerBase, update_spinner_context
|
|
45
|
+
from code_muse.summarization_agent import SummarizationError, run_summarization_sync
|
|
46
|
+
|
|
47
|
+
_SUMMARIZATION_INSTRUCTIONS = (
|
|
48
|
+
"The input will be a log of Agentic AI steps that have been taken"
|
|
49
|
+
" as well as user queries, etc. Summarize the contents of these steps."
|
|
50
|
+
" The high level details should remain but the bulk of the content from tool-call"
|
|
51
|
+
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
52
|
+
" reading a file, and the file contents are large, then in your summary you might just"
|
|
53
|
+
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
54
|
+
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
55
|
+
"\n\nNOTE: This summary represents older conversation history. "
|
|
56
|
+
"Recent messages are preserved separately."
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _find_safe_split_index(messages: list[ModelMessage], initial_split_idx: int) -> int:
|
|
61
|
+
"""Adjust split index so we never sever a tool_call from its tool_return."""
|
|
62
|
+
if initial_split_idx <= 1:
|
|
63
|
+
return initial_split_idx
|
|
64
|
+
|
|
65
|
+
protected_tool_return_ids: set[str] = set()
|
|
66
|
+
for msg in messages[initial_split_idx:]:
|
|
67
|
+
for part in getattr(msg, "parts", []) or []:
|
|
68
|
+
if getattr(part, "part_kind", None) == "tool-return":
|
|
69
|
+
tcid = getattr(part, "tool_call_id", None)
|
|
70
|
+
if tcid:
|
|
71
|
+
protected_tool_return_ids.add(tcid)
|
|
72
|
+
|
|
73
|
+
if not protected_tool_return_ids:
|
|
74
|
+
return initial_split_idx
|
|
75
|
+
|
|
76
|
+
adjusted_idx = initial_split_idx
|
|
77
|
+
# Walk backwards; never cross the system message at index 0.
|
|
78
|
+
for i in range(initial_split_idx - 1, 0, -1):
|
|
79
|
+
msg = messages[i]
|
|
80
|
+
has_match = False
|
|
81
|
+
for part in getattr(msg, "parts", []) or []:
|
|
82
|
+
if getattr(part, "part_kind", None) == "tool-call":
|
|
83
|
+
tcid = getattr(part, "tool_call_id", None)
|
|
84
|
+
if tcid and tcid in protected_tool_return_ids:
|
|
85
|
+
has_match = True
|
|
86
|
+
break
|
|
87
|
+
if has_match:
|
|
88
|
+
adjusted_idx = i
|
|
89
|
+
else:
|
|
90
|
+
# Tool calls and their returns are adjacent — first miss ends it.
|
|
91
|
+
break
|
|
92
|
+
|
|
93
|
+
return adjusted_idx
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def split_for_protected_summarization(
|
|
97
|
+
messages: list[ModelMessage],
|
|
98
|
+
protected_tokens: int,
|
|
99
|
+
model_name: str | None = None,
|
|
100
|
+
cache: CompactionCache | None = None,
|
|
101
|
+
) -> tuple[list[ModelMessage], list[ModelMessage]]:
|
|
102
|
+
"""Split messages into (to_summarize, protected) groups.
|
|
103
|
+
|
|
104
|
+
The system message (index 0) is always protected. Starting from the most
|
|
105
|
+
recent message, we accumulate messages into the protected zone until we
|
|
106
|
+
hit ``protected_tokens``. Everything in-between becomes summarization
|
|
107
|
+
fodder. The split point is adjusted to keep tool_call/tool_return pairs
|
|
108
|
+
together.
|
|
109
|
+
"""
|
|
110
|
+
if len(messages) <= 1:
|
|
111
|
+
return [], messages
|
|
112
|
+
|
|
113
|
+
_tok = cache.estimate_tokens if cache else estimate_tokens_for_message
|
|
114
|
+
|
|
115
|
+
system_message = messages[0]
|
|
116
|
+
system_tokens = _tok(system_message, model_name)
|
|
117
|
+
|
|
118
|
+
protected_messages: list[ModelMessage] = []
|
|
119
|
+
running_tokens = system_tokens
|
|
120
|
+
|
|
121
|
+
for i in range(len(messages) - 1, 0, -1):
|
|
122
|
+
msg_tokens = _tok(messages[i], model_name)
|
|
123
|
+
if running_tokens + msg_tokens > protected_tokens:
|
|
124
|
+
break
|
|
125
|
+
protected_messages.append(messages[i])
|
|
126
|
+
running_tokens += msg_tokens
|
|
127
|
+
|
|
128
|
+
protected_messages.reverse()
|
|
129
|
+
protected_messages.insert(0, system_message)
|
|
130
|
+
|
|
131
|
+
protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
|
|
132
|
+
protected_start_idx = _find_safe_split_index(messages, protected_start_idx)
|
|
133
|
+
messages_to_summarize = messages[1:protected_start_idx]
|
|
134
|
+
|
|
135
|
+
emit_info(
|
|
136
|
+
f"🔒 Protecting {len(protected_messages)} recent messages "
|
|
137
|
+
f"({running_tokens} tokens, limit: {protected_tokens})"
|
|
138
|
+
)
|
|
139
|
+
emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
|
|
140
|
+
|
|
141
|
+
return messages_to_summarize, protected_messages
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def truncate(
|
|
145
|
+
messages: list[ModelMessage],
|
|
146
|
+
protected_tokens: int,
|
|
147
|
+
model_name: str | None = None,
|
|
148
|
+
cache: CompactionCache | None = None,
|
|
149
|
+
) -> list[ModelMessage]:
|
|
150
|
+
"""Drop middle messages, keeping system prompt, optional thinking, and recent tail."""
|
|
151
|
+
import queue
|
|
152
|
+
|
|
153
|
+
if not messages:
|
|
154
|
+
return messages
|
|
155
|
+
|
|
156
|
+
_tok = cache.estimate_tokens if cache else estimate_tokens_for_message
|
|
157
|
+
|
|
158
|
+
emit_info("Truncating message history to manage token usage")
|
|
159
|
+
result: list[ModelMessage] = [messages[0]]
|
|
160
|
+
|
|
161
|
+
# Preserve the 2nd message if it's an extended-thinking context.
|
|
162
|
+
skip_second = False
|
|
163
|
+
if len(messages) > 1:
|
|
164
|
+
second_msg = messages[1]
|
|
165
|
+
if any(isinstance(part, ThinkingPart) for part in second_msg.parts):
|
|
166
|
+
result.append(second_msg)
|
|
167
|
+
skip_second = True
|
|
168
|
+
|
|
169
|
+
start_idx = 2 if skip_second else 1
|
|
170
|
+
messages_to_scan = messages[start_idx:]
|
|
171
|
+
|
|
172
|
+
num_tokens = 0
|
|
173
|
+
stack: queue.LifoQueue[ModelMessage] = queue.LifoQueue()
|
|
174
|
+
for msg in reversed(messages_to_scan):
|
|
175
|
+
num_tokens += _tok(msg, model_name)
|
|
176
|
+
if num_tokens > protected_tokens:
|
|
177
|
+
break
|
|
178
|
+
stack.put(msg)
|
|
179
|
+
|
|
180
|
+
while not stack.empty():
|
|
181
|
+
result.append(stack.get())
|
|
182
|
+
|
|
183
|
+
return prune_interrupted_tool_calls(result)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _run_summarization_core(
|
|
187
|
+
messages: list[ModelMessage],
|
|
188
|
+
protected_tokens: int,
|
|
189
|
+
with_protection: bool,
|
|
190
|
+
model_name: str | None,
|
|
191
|
+
cache: CompactionCache | None = None,
|
|
192
|
+
) -> tuple[list[ModelMessage], list[ModelMessage]]:
|
|
193
|
+
"""Inner summarization that propagates exceptions to the caller.
|
|
194
|
+
|
|
195
|
+
Returns ``(compacted_messages, summarized_source_messages)`` or raises
|
|
196
|
+
on summarization-agent failure. Use :func:`summarize` if you want the
|
|
197
|
+
swallow-and-return-original behavior, or call this directly when you want
|
|
198
|
+
to handle failure yourself (e.g. fall back to truncation).
|
|
199
|
+
"""
|
|
200
|
+
if not messages:
|
|
201
|
+
return [], []
|
|
202
|
+
|
|
203
|
+
if with_protection:
|
|
204
|
+
messages_to_summarize, protected_messages = split_for_protected_summarization(
|
|
205
|
+
messages, protected_tokens, model_name, cache=cache
|
|
206
|
+
)
|
|
207
|
+
else:
|
|
208
|
+
messages_to_summarize = messages[1:]
|
|
209
|
+
protected_messages = messages[:1]
|
|
210
|
+
|
|
211
|
+
system_message = messages[0]
|
|
212
|
+
|
|
213
|
+
if not messages_to_summarize:
|
|
214
|
+
return prune_interrupted_tool_calls(messages), []
|
|
215
|
+
|
|
216
|
+
pruned = prune_interrupted_tool_calls(messages_to_summarize)
|
|
217
|
+
if not pruned:
|
|
218
|
+
return prune_interrupted_tool_calls(messages), []
|
|
219
|
+
|
|
220
|
+
new_messages = run_summarization_sync(
|
|
221
|
+
_SUMMARIZATION_INSTRUCTIONS, message_history=pruned
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
if not isinstance(new_messages, list):
|
|
225
|
+
emit_warning(
|
|
226
|
+
"Summarization agent returned non-list output; wrapping into message request"
|
|
227
|
+
)
|
|
228
|
+
new_messages = [ModelRequest([TextPart(str(new_messages))])]
|
|
229
|
+
|
|
230
|
+
compacted: list[ModelMessage] = [system_message] + list(new_messages)
|
|
231
|
+
compacted.extend(msg for msg in protected_messages if msg is not system_message)
|
|
232
|
+
return prune_interrupted_tool_calls(compacted), messages_to_summarize
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _log_summarization_failure(error: Exception, fallback_note: str = "") -> None:
|
|
236
|
+
"""Single source of truth for summarization-failure user messaging."""
|
|
237
|
+
error_type = type(error).__name__
|
|
238
|
+
emit_error(f"Compaction failed: [{error_type}] {error}")
|
|
239
|
+
if isinstance(error, SummarizationError) and error.original_error:
|
|
240
|
+
underlying = type(error.original_error).__name__
|
|
241
|
+
suffix = f" {fallback_note}" if fallback_note else ""
|
|
242
|
+
emit_warning(f"💡 Underlying error was {underlying}.{suffix}")
|
|
243
|
+
elif fallback_note:
|
|
244
|
+
emit_warning(fallback_note)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def summarize(
|
|
248
|
+
messages: list[ModelMessage],
|
|
249
|
+
protected_tokens: int,
|
|
250
|
+
with_protection: bool = True,
|
|
251
|
+
model_name: str | None = None,
|
|
252
|
+
cache: CompactionCache | None = None,
|
|
253
|
+
) -> tuple[list[ModelMessage], list[ModelMessage]]:
|
|
254
|
+
"""Summarize older messages, preserving the protected recent tail.
|
|
255
|
+
|
|
256
|
+
Returns ``(compacted_messages, summarized_source_messages)``. On failure
|
|
257
|
+
we log a warning and return ``(messages, [])`` so the run continues.
|
|
258
|
+
"""
|
|
259
|
+
try:
|
|
260
|
+
return _run_summarization_core(
|
|
261
|
+
messages, protected_tokens, with_protection, model_name, cache=cache
|
|
262
|
+
)
|
|
263
|
+
except Exception as e:
|
|
264
|
+
_log_summarization_failure(
|
|
265
|
+
e,
|
|
266
|
+
"Consider using '/set compaction_strategy=truncation' as a fallback.",
|
|
267
|
+
)
|
|
268
|
+
return messages, []
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def _truncate_with_dropped(
|
|
272
|
+
filtered: list[ModelMessage],
|
|
273
|
+
protected_tokens: int,
|
|
274
|
+
model_name: str | None,
|
|
275
|
+
cache: CompactionCache | None = None,
|
|
276
|
+
) -> tuple[list[ModelMessage], list[ModelMessage]]:
|
|
277
|
+
"""Truncate ``filtered`` and compute which messages got dropped.
|
|
278
|
+
|
|
279
|
+
Shared by the truncation strategy and the summarization-failure fallback
|
|
280
|
+
so both paths agree on what counts as 'dropped' for hash bookkeeping.
|
|
281
|
+
"""
|
|
282
|
+
result_messages = truncate(filtered, protected_tokens, model_name, cache=cache)
|
|
283
|
+
_hash = cache.hash_message if cache else hash_message
|
|
284
|
+
result_hashes = {_hash(m) for m in result_messages}
|
|
285
|
+
dropped = [m for m in filtered if _hash(m) not in result_hashes]
|
|
286
|
+
return result_messages, dropped
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def compact(
|
|
290
|
+
agent: Any,
|
|
291
|
+
messages: list[ModelMessage],
|
|
292
|
+
model_max: int,
|
|
293
|
+
context_overhead: int,
|
|
294
|
+
) -> tuple[list[ModelMessage], list[ModelMessage]]:
|
|
295
|
+
"""Unified compaction entrypoint. Replaces ``message_history_processor``.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
agent: The owning agent. Used to resolve the active model name so
|
|
299
|
+
token estimates can apply per-model calibration multipliers.
|
|
300
|
+
messages: Current message history (already accumulated by the caller).
|
|
301
|
+
model_max: Effective model context window in tokens.
|
|
302
|
+
context_overhead: Estimated overhead for system prompt + tool schemas.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
``(new_messages, dropped_messages_for_hash_tracking)``.
|
|
306
|
+
"""
|
|
307
|
+
# Resolve model name once so all downstream estimators apply the same
|
|
308
|
+
# per-model calibration multiplier.
|
|
309
|
+
model_name: str | None = None
|
|
310
|
+
if agent is not None:
|
|
311
|
+
try:
|
|
312
|
+
model_name = agent.get_model_name()
|
|
313
|
+
except Exception:
|
|
314
|
+
model_name = None
|
|
315
|
+
|
|
316
|
+
# PERF-04: create a per-compaction cache to avoid repeated hash/token
|
|
317
|
+
# computations on the same message objects within this invocation.
|
|
318
|
+
cache = CompactionCache()
|
|
319
|
+
|
|
320
|
+
message_tokens = cache.sum_tokens(messages, model_name)
|
|
321
|
+
total_tokens = message_tokens + context_overhead
|
|
322
|
+
proportion_used = total_tokens / model_max if model_max else 0.0
|
|
323
|
+
|
|
324
|
+
context_summary = SpinnerBase.format_context_info(
|
|
325
|
+
total_tokens, model_max, proportion_used
|
|
326
|
+
)
|
|
327
|
+
update_spinner_context(context_summary)
|
|
328
|
+
|
|
329
|
+
threshold = get_compaction_threshold()
|
|
330
|
+
if proportion_used <= threshold:
|
|
331
|
+
return messages, []
|
|
332
|
+
|
|
333
|
+
strategy = get_compaction_strategy()
|
|
334
|
+
|
|
335
|
+
protected_tokens = get_protected_token_count()
|
|
336
|
+
filtered = filter_huge_messages(messages, model_name, cache=cache)
|
|
337
|
+
|
|
338
|
+
# filter_huge_messages() already runs prune_interrupted_tool_calls(),
|
|
339
|
+
# so by this point any orphaned tool_call / tool_return pairs (from
|
|
340
|
+
# cancelled runs, Ctrl-C interrupts, etc.) have been stripped out. The
|
|
341
|
+
# check below only trips on a genuine mid-execution state, which
|
|
342
|
+
# shouldn't happen when the history_processor is invoked — but we keep
|
|
343
|
+
# it as a defensive safety net.
|
|
344
|
+
#
|
|
345
|
+
# Previously this check ran on the raw `messages` list, which meant a
|
|
346
|
+
# single orphaned tool_call (e.g., from one cancelled command weeks ago)
|
|
347
|
+
# would defer summarization forever, letting history grow unbounded.
|
|
348
|
+
if strategy == "summarization" and has_pending_tool_calls(filtered):
|
|
349
|
+
emit_warning(
|
|
350
|
+
"⚠️ Summarization deferred: pending tool call(s) detected "
|
|
351
|
+
"after pruning orphans. Will retry on next invocation.",
|
|
352
|
+
message_group="token_context_status",
|
|
353
|
+
)
|
|
354
|
+
return messages, []
|
|
355
|
+
|
|
356
|
+
if strategy == "truncation":
|
|
357
|
+
result_messages, summarized_messages = _truncate_with_dropped(
|
|
358
|
+
filtered, protected_tokens, model_name, cache=cache
|
|
359
|
+
)
|
|
360
|
+
else:
|
|
361
|
+
# Route through the public summarize() so error handling, logging,
|
|
362
|
+
# and any future instrumentation stay in one place (DRY).
|
|
363
|
+
result_messages, summarized_messages = summarize(
|
|
364
|
+
filtered, protected_tokens, True, model_name, cache=cache
|
|
365
|
+
)
|
|
366
|
+
# If summarization failed gracefully (returned original messages
|
|
367
|
+
# with nothing dropped), fall back to truncation for this cycle.
|
|
368
|
+
# The user's strategy preference is preserved for the next cycle.
|
|
369
|
+
if not summarized_messages:
|
|
370
|
+
emit_warning(
|
|
371
|
+
"↪️ Summarization produced no compaction; "
|
|
372
|
+
"falling back to truncation for this cycle.",
|
|
373
|
+
message_group="token_context_status",
|
|
374
|
+
)
|
|
375
|
+
result_messages, summarized_messages = _truncate_with_dropped(
|
|
376
|
+
filtered, protected_tokens, model_name, cache=cache
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
final_token_count = cache.sum_tokens(result_messages, model_name)
|
|
380
|
+
final_summary = SpinnerBase.format_context_info(
|
|
381
|
+
final_token_count,
|
|
382
|
+
model_max,
|
|
383
|
+
final_token_count / model_max if model_max else 0.0,
|
|
384
|
+
)
|
|
385
|
+
update_spinner_context(final_summary)
|
|
386
|
+
|
|
387
|
+
return result_messages, summarized_messages
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def _strip_empty_thinking_parts(
|
|
391
|
+
messages: list[ModelMessage],
|
|
392
|
+
) -> tuple[list[ModelMessage], int]:
|
|
393
|
+
"""Remove empty ThinkingParts; drop messages rendered empty by removal."""
|
|
394
|
+
cleaned: list[ModelMessage] = []
|
|
395
|
+
filtered_count = 0
|
|
396
|
+
for msg in messages:
|
|
397
|
+
parts = list(msg.parts)
|
|
398
|
+
if (
|
|
399
|
+
len(parts) == 1
|
|
400
|
+
and isinstance(parts[0], ThinkingPart)
|
|
401
|
+
and not parts[0].content
|
|
402
|
+
):
|
|
403
|
+
filtered_count += 1
|
|
404
|
+
continue
|
|
405
|
+
if any(isinstance(p, ThinkingPart) and not p.content for p in parts):
|
|
406
|
+
msg = dataclasses.replace(
|
|
407
|
+
msg,
|
|
408
|
+
parts=[
|
|
409
|
+
p
|
|
410
|
+
for p in parts
|
|
411
|
+
if not (isinstance(p, ThinkingPart) and not p.content)
|
|
412
|
+
],
|
|
413
|
+
)
|
|
414
|
+
if not msg.parts:
|
|
415
|
+
filtered_count += 1
|
|
416
|
+
continue
|
|
417
|
+
cleaned.append(msg)
|
|
418
|
+
return cleaned, filtered_count
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def make_history_processor(agent: Any) -> Callable[..., list[ModelMessage]]:
|
|
422
|
+
"""Build the pydantic-ai ``history_processors`` callback for ``agent``.
|
|
423
|
+
|
|
424
|
+
The returned closure:
|
|
425
|
+
1. Fires ``on_message_history_processor_start``.
|
|
426
|
+
2. Merges any incoming messages not already in ``agent._message_history``
|
|
427
|
+
(preserving the last-message regardless of compacted-hash collisions).
|
|
428
|
+
3. Runs ``compact(...)`` if we're over threshold.
|
|
429
|
+
4. Records dropped-message hashes in ``agent._compacted_message_hashes``.
|
|
430
|
+
5. Strips empty ThinkingParts.
|
|
431
|
+
6. Trims trailing ModelResponse messages so history ends with a ModelRequest.
|
|
432
|
+
7. Fires ``on_message_history_processor_end``.
|
|
433
|
+
|
|
434
|
+
Agent contract (Phase 3 will enforce on ``BaseAgent``):
|
|
435
|
+
- ``agent._message_history: list``
|
|
436
|
+
- ``agent._compacted_message_hashes: set``
|
|
437
|
+
- ``agent._get_model_context_length() -> int``
|
|
438
|
+
- ``agent._estimate_context_overhead() -> int``
|
|
439
|
+
- ``agent.name`` / ``agent.session_id`` (optional)
|
|
440
|
+
"""
|
|
441
|
+
|
|
442
|
+
def history_processor(messages: list[ModelMessage]) -> list[ModelMessage]:
|
|
443
|
+
# pydantic-ai picks 1-arg vs 2-arg processor by inspecting the first
|
|
444
|
+
# parameter's type annotation (must be ``RunContext`` for 2-arg form).
|
|
445
|
+
# We don't need ctx, so we use the 1-arg form.
|
|
446
|
+
history: list[ModelMessage] = agent._message_history
|
|
447
|
+
compacted_hashes: set[int] = agent._compacted_message_hashes
|
|
448
|
+
|
|
449
|
+
on_message_history_processor_start(
|
|
450
|
+
agent_name=getattr(agent, "name", None),
|
|
451
|
+
session_id=getattr(agent, "session_id", None),
|
|
452
|
+
message_history=list(history),
|
|
453
|
+
incoming_messages=list(messages),
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
existing_hashes = {hash_message(m) for m in history}
|
|
457
|
+
messages_added = 0
|
|
458
|
+
last_idx = len(messages) - 1
|
|
459
|
+
for i, msg in enumerate(messages):
|
|
460
|
+
h = hash_message(msg)
|
|
461
|
+
if h in existing_hashes:
|
|
462
|
+
continue
|
|
463
|
+
# Always keep the last (newest) message, even if its hash collides
|
|
464
|
+
# with a previously compacted one — short prompts like "yes"/"1"
|
|
465
|
+
# can collide and get silently dropped otherwise.
|
|
466
|
+
if i == last_idx or h not in compacted_hashes:
|
|
467
|
+
history.append(msg)
|
|
468
|
+
messages_added += 1
|
|
469
|
+
|
|
470
|
+
new_history, dropped = compact(
|
|
471
|
+
agent,
|
|
472
|
+
history,
|
|
473
|
+
agent._get_model_context_length(),
|
|
474
|
+
agent._estimate_context_overhead(),
|
|
475
|
+
)
|
|
476
|
+
agent._message_history = new_history
|
|
477
|
+
for m in dropped:
|
|
478
|
+
compacted_hashes.add(hash_message(m))
|
|
479
|
+
|
|
480
|
+
cleaned, filtered_count = _strip_empty_thinking_parts(agent._message_history)
|
|
481
|
+
|
|
482
|
+
# Ensure history ends with a ModelRequest — otherwise Anthropic etc.
|
|
483
|
+
# reject it with a "prefill" error.
|
|
484
|
+
while cleaned and isinstance(cleaned[-1], ModelResponse):
|
|
485
|
+
cleaned.pop()
|
|
486
|
+
|
|
487
|
+
# PERF-07: Only re-prune if we actually stripped thinking parts or
|
|
488
|
+
# popped trailing ModelResponse messages, since those operations can
|
|
489
|
+
# create orphaned tool_call/tool_return pairs. If nothing was
|
|
490
|
+
# stripped or popped, the history is already clean from compact().
|
|
491
|
+
if filtered_count > 0 or len(cleaned) != len(agent._message_history):
|
|
492
|
+
cleaned = prune_interrupted_tool_calls(cleaned)
|
|
493
|
+
|
|
494
|
+
agent._message_history = cleaned
|
|
495
|
+
|
|
496
|
+
on_message_history_processor_end(
|
|
497
|
+
agent_name=getattr(agent, "name", None),
|
|
498
|
+
session_id=getattr(agent, "session_id", None),
|
|
499
|
+
message_history=list(cleaned),
|
|
500
|
+
messages_added=messages_added,
|
|
501
|
+
messages_filtered=len(messages) - messages_added + filtered_count,
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
return cleaned
|
|
505
|
+
|
|
506
|
+
return history_processor
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
"""Terminal-facing diagnostics for agent-run exceptions.
|
|
2
|
+
|
|
3
|
+
When an agent run blows up, ``str(exc)`` alone loses almost all of the
|
|
4
|
+
actionable signal that pydantic-ai / provider SDKs tuck onto ``__cause__``,
|
|
5
|
+
``__context__``, ``BaseExceptionGroup.exceptions``, or attributes like
|
|
6
|
+
``body`` / ``response`` / ``errors``. This module walks those paths in a
|
|
7
|
+
bounded, defensive way and emits structured diagnostic blocks to the terminal.
|
|
8
|
+
|
|
9
|
+
File-level logging (``log_error``) is untouched — this module only controls
|
|
10
|
+
what we *surface* in the terminal for the user. All ``getattr`` access is
|
|
11
|
+
guarded; diagnostic emit must never itself raise.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from rich.text import Text
|
|
17
|
+
|
|
18
|
+
from code_muse.error_logging import log_error
|
|
19
|
+
from code_muse.messaging import emit_info
|
|
20
|
+
|
|
21
|
+
# Python 3.11+ builtin; graceful fallback for 3.10
|
|
22
|
+
try:
|
|
23
|
+
from builtins import BaseExceptionGroup # type: ignore[attr-defined]
|
|
24
|
+
except ImportError: # pragma: no cover - 3.10 only
|
|
25
|
+
BaseExceptionGroup = Exception # type: ignore[misc,assignment]
|
|
26
|
+
|
|
27
|
+
# Only emit deep diagnostics for shapes that actually benefit. The boring 80%
|
|
28
|
+
# of errors get the cheap path (one-line emit + log-file pointer).
|
|
29
|
+
DIAGNOSTIC_TRIGGERS = ("output validation", "retries", "exceptiongroup")
|
|
30
|
+
|
|
31
|
+
# Attributes commonly carrying the "real" story on provider/pydantic-ai errors.
|
|
32
|
+
USEFUL_ATTRS = ("response", "body", "message", "detail", "errors")
|
|
33
|
+
|
|
34
|
+
# Hard caps so a pathological exception tree can't flood the terminal.
|
|
35
|
+
_MAX_CHAIN_DEPTH = 5
|
|
36
|
+
_MAX_GROUP_LEAVES = 10
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _safe_getattr(obj: Any, name: str) -> Any:
|
|
40
|
+
"""``getattr`` that never raises, even on hostile descriptors."""
|
|
41
|
+
try:
|
|
42
|
+
return getattr(obj, name, None)
|
|
43
|
+
except Exception: # pragma: no cover - defensive
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _emit_useful_attrs(exc: BaseException, group_id: str, indent: str) -> None:
|
|
48
|
+
"""Emit any ``USEFUL_ATTRS`` present on ``exc`` as dim lines."""
|
|
49
|
+
for attr in USEFUL_ATTRS:
|
|
50
|
+
val = _safe_getattr(exc, attr)
|
|
51
|
+
if not val:
|
|
52
|
+
continue
|
|
53
|
+
try:
|
|
54
|
+
rendered = str(val)
|
|
55
|
+
except Exception: # pragma: no cover - hostile __str__
|
|
56
|
+
rendered = f"<unrenderable {type(val).__name__}>"
|
|
57
|
+
emit_info(
|
|
58
|
+
Text.from_markup(f"[dim]{indent}{attr}: {rendered}[/dim]"),
|
|
59
|
+
group_id=group_id,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _emit_exception_chain(
|
|
64
|
+
exc: BaseException,
|
|
65
|
+
group_id: str,
|
|
66
|
+
depth: int = 0,
|
|
67
|
+
max_depth: int = _MAX_CHAIN_DEPTH,
|
|
68
|
+
) -> None:
|
|
69
|
+
"""Walk ``__cause__`` / ``__context__`` chains with a bounded depth."""
|
|
70
|
+
# Guard against cycles (rare but possible when users re-raise chains).
|
|
71
|
+
seen: set[int] = set()
|
|
72
|
+
current: BaseException | None = exc
|
|
73
|
+
current_depth = depth
|
|
74
|
+
while current is not None and current_depth < max_depth:
|
|
75
|
+
cause = _safe_getattr(current, "__cause__")
|
|
76
|
+
context = _safe_getattr(current, "__context__")
|
|
77
|
+
nxt = cause if cause is not None else context
|
|
78
|
+
if nxt is None or id(nxt) in seen:
|
|
79
|
+
return
|
|
80
|
+
seen.add(id(nxt))
|
|
81
|
+
label = "cause" if cause is not None else "context"
|
|
82
|
+
emit_info(
|
|
83
|
+
Text.from_markup(
|
|
84
|
+
f"[dim] {' ' * current_depth}{label}: "
|
|
85
|
+
f"{type(nxt).__name__}: {nxt}[/dim]"
|
|
86
|
+
),
|
|
87
|
+
group_id=group_id,
|
|
88
|
+
)
|
|
89
|
+
_emit_useful_attrs(nxt, group_id, indent=" " + " " * current_depth)
|
|
90
|
+
current = nxt
|
|
91
|
+
current_depth += 1
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _needs_deep_diagnostics(exc: BaseException) -> bool:
|
|
95
|
+
"""Return True when the cheap path would hide important detail."""
|
|
96
|
+
if isinstance(exc, BaseExceptionGroup):
|
|
97
|
+
return True
|
|
98
|
+
try:
|
|
99
|
+
msg = str(exc).lower()
|
|
100
|
+
except Exception: # pragma: no cover - hostile __str__
|
|
101
|
+
return False
|
|
102
|
+
return any(trigger in msg for trigger in DIAGNOSTIC_TRIGGERS)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def emit_exception_diagnostics(exc: BaseException, group_id: str) -> None:
|
|
106
|
+
"""Emit terminal diagnostics for ``exc``, bounded and defensive.
|
|
107
|
+
|
|
108
|
+
Cheap path (always): one-line summary + log file write.
|
|
109
|
+
Deep path (only for ``ExceptionGroup``s or trigger-phrase messages):
|
|
110
|
+
cause/context chain + group leaves + useful attributes.
|
|
111
|
+
|
|
112
|
+
Never raises. Worst-case failure is a slightly noisier terminal during an
|
|
113
|
+
already-failed run.
|
|
114
|
+
"""
|
|
115
|
+
try:
|
|
116
|
+
emit_info(f"Unexpected error: {exc}", group_id=group_id)
|
|
117
|
+
except Exception: # pragma: no cover - emit should never fail
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
# File logging is independent of terminal output and stays on the cheap path.
|
|
121
|
+
try:
|
|
122
|
+
log_error(
|
|
123
|
+
exc,
|
|
124
|
+
context=f"Agent run (group_id={group_id})",
|
|
125
|
+
include_traceback=True,
|
|
126
|
+
)
|
|
127
|
+
except Exception: # pragma: no cover - logging failure must not cascade
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
if not _needs_deep_diagnostics(exc):
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
emit_info(
|
|
135
|
+
Text.from_markup("[yellow]Diagnostic detail:[/yellow]"),
|
|
136
|
+
group_id=group_id,
|
|
137
|
+
)
|
|
138
|
+
emit_info(
|
|
139
|
+
Text.from_markup(f"[dim] Exception type: {type(exc).__name__}[/dim]"),
|
|
140
|
+
group_id=group_id,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
_emit_exception_chain(exc, group_id)
|
|
144
|
+
|
|
145
|
+
if isinstance(exc, BaseExceptionGroup):
|
|
146
|
+
for i, sub in enumerate(exc.exceptions[:_MAX_GROUP_LEAVES], start=1):
|
|
147
|
+
emit_info(
|
|
148
|
+
Text.from_markup(
|
|
149
|
+
f"[yellow] Sub-exception {i}: "
|
|
150
|
+
f"{type(sub).__name__}: {sub}[/yellow]"
|
|
151
|
+
),
|
|
152
|
+
group_id=group_id,
|
|
153
|
+
)
|
|
154
|
+
_emit_useful_attrs(sub, group_id, indent=" ")
|
|
155
|
+
# One level of nested cause on each leaf is usually enough.
|
|
156
|
+
_emit_exception_chain(sub, group_id, depth=1, max_depth=3)
|
|
157
|
+
extra = len(exc.exceptions) - _MAX_GROUP_LEAVES
|
|
158
|
+
if extra > 0:
|
|
159
|
+
emit_info(
|
|
160
|
+
Text.from_markup(
|
|
161
|
+
f"[dim] ... and {extra} more sub-exception(s) omitted[/dim]"
|
|
162
|
+
),
|
|
163
|
+
group_id=group_id,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
_emit_useful_attrs(exc, group_id, indent=" ")
|
|
167
|
+
except Exception: # pragma: no cover - diagnostics must never raise
|
|
168
|
+
pass
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
__all__ = ["emit_exception_diagnostics"]
|