code-muse 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_muse/__init__.py +26 -0
- code_muse/__main__.py +10 -0
- code_muse/agents/__init__.py +31 -0
- code_muse/agents/_builder.py +214 -0
- code_muse/agents/_compaction.py +506 -0
- code_muse/agents/_diagnostics.py +171 -0
- code_muse/agents/_history.py +382 -0
- code_muse/agents/_key_listeners.py +148 -0
- code_muse/agents/_non_streaming_render.py +148 -0
- code_muse/agents/_runtime.py +596 -0
- code_muse/agents/agent_creator_agent.py +603 -0
- code_muse/agents/agent_helios.py +47 -0
- code_muse/agents/agent_manager.py +740 -0
- code_muse/agents/agent_muse.py +78 -0
- code_muse/agents/agent_planning.py +44 -0
- code_muse/agents/agent_qa_melpomene.py +207 -0
- code_muse/agents/base_agent.py +194 -0
- code_muse/agents/event_stream_handler.py +361 -0
- code_muse/agents/json_agent.py +201 -0
- code_muse/agents/prompt_v3.py +521 -0
- code_muse/agents/subagent_stream_handler.py +273 -0
- code_muse/callbacks.py +941 -0
- code_muse/chatgpt_codex_client.py +333 -0
- code_muse/claude_cache_client.py +853 -0
- code_muse/cli_runner/__init__.py +319 -0
- code_muse/cli_runner/args.py +63 -0
- code_muse/cli_runner/loop.py +510 -0
- code_muse/cli_runner/resume.py +72 -0
- code_muse/cli_runner/runner.py +161 -0
- code_muse/command_line/__init__.py +1 -0
- code_muse/command_line/add_model_menu.py +1331 -0
- code_muse/command_line/agent_menu.py +674 -0
- code_muse/command_line/attachments.py +397 -0
- code_muse/command_line/autosave_menu.py +709 -0
- code_muse/command_line/clipboard.py +528 -0
- code_muse/command_line/colors_menu.py +530 -0
- code_muse/command_line/command_handler.py +262 -0
- code_muse/command_line/command_registry.py +150 -0
- code_muse/command_line/config_commands.py +711 -0
- code_muse/command_line/core_commands.py +740 -0
- code_muse/command_line/diff_menu.py +865 -0
- code_muse/command_line/file_path_completion.py +73 -0
- code_muse/command_line/load_context_completion.py +57 -0
- code_muse/command_line/model_picker_completion.py +512 -0
- code_muse/command_line/model_settings_menu.py +983 -0
- code_muse/command_line/onboarding_slides.py +162 -0
- code_muse/command_line/onboarding_wizard.py +337 -0
- code_muse/command_line/pagination.py +41 -0
- code_muse/command_line/pin_command_completion.py +329 -0
- code_muse/command_line/prompt_toolkit_completion.py +886 -0
- code_muse/command_line/session_commands.py +304 -0
- code_muse/command_line/shell_passthrough.py +145 -0
- code_muse/command_line/skills_completion.py +158 -0
- code_muse/command_line/types.py +18 -0
- code_muse/command_line/uc_menu.py +908 -0
- code_muse/command_line/utils.py +105 -0
- code_muse/command_line/wiggum_state.py +77 -0
- code_muse/config.py +1138 -0
- code_muse/config_agent.py +168 -0
- code_muse/config_appearance.py +241 -0
- code_muse/config_model.py +357 -0
- code_muse/config_security.py +73 -0
- code_muse/error_logging.py +132 -0
- code_muse/evals/__init__.py +35 -0
- code_muse/evals/eval_helpers.py +81 -0
- code_muse/evals/eval_runner.py +299 -0
- code_muse/evals/sample_evals/__init__.py +1 -0
- code_muse/evals/sample_evals/eval_frugal_reads.py +59 -0
- code_muse/evals/sample_evals/eval_memory_planning.py +31 -0
- code_muse/evals/sample_evals/eval_shell_efficiency.py +39 -0
- code_muse/evals/sample_evals/eval_tool_masking.py +33 -0
- code_muse/fs_scan_cache/__init__.py +31 -0
- code_muse/fs_scan_cache/invalidation_hooks.py +89 -0
- code_muse/fs_scan_cache/scan_cache_core.cpython-314-darwin.so +0 -0
- code_muse/fs_scan_cache/scan_cache_core.pyx +203 -0
- code_muse/fs_scan_cache/tool_integration.py +309 -0
- code_muse/fs_scan_cache/ttl_policy.py +44 -0
- code_muse/gemini_code_assist.py +383 -0
- code_muse/gemini_model.py +838 -0
- code_muse/hook_engine/README.md +105 -0
- code_muse/hook_engine/__init__.py +21 -0
- code_muse/hook_engine/aliases.py +153 -0
- code_muse/hook_engine/engine.py +221 -0
- code_muse/hook_engine/executor.py +347 -0
- code_muse/hook_engine/matcher.py +154 -0
- code_muse/hook_engine/models.py +245 -0
- code_muse/hook_engine/registry.py +114 -0
- code_muse/hook_engine/trust.py +268 -0
- code_muse/hook_engine/validator.py +144 -0
- code_muse/http_utils.py +360 -0
- code_muse/keymap.py +128 -0
- code_muse/list_filtering.py +26 -0
- code_muse/main.py +10 -0
- code_muse/messaging/__init__.py +259 -0
- code_muse/messaging/bus.py +621 -0
- code_muse/messaging/commands.py +166 -0
- code_muse/messaging/markdown_patches.py +57 -0
- code_muse/messaging/message_queue.py +397 -0
- code_muse/messaging/messages.py +591 -0
- code_muse/messaging/queue_console.py +269 -0
- code_muse/messaging/renderers.py +308 -0
- code_muse/messaging/rich_renderer.py +1158 -0
- code_muse/messaging/shimmer.py +154 -0
- code_muse/messaging/spinner/__init__.py +87 -0
- code_muse/messaging/spinner/console_spinner.py +250 -0
- code_muse/messaging/spinner/spinner_base.py +82 -0
- code_muse/messaging/subagent_console.py +458 -0
- code_muse/model_factory.py +1203 -0
- code_muse/model_switching.py +59 -0
- code_muse/model_utils.py +156 -0
- code_muse/models.json +66 -0
- code_muse/models_cache/__init__.py +26 -0
- code_muse/models_cache/blocking_lru_cache.py +98 -0
- code_muse/models_cache/cache_writer.py +86 -0
- code_muse/models_cache/sha256_hash.cpython-314-darwin.so +0 -0
- code_muse/models_cache/sha256_hash.pyx +34 -0
- code_muse/models_cache/startup_integration.py +75 -0
- code_muse/models_dev_api.json +1 -0
- code_muse/models_dev_parser.py +590 -0
- code_muse/motion.py +126 -0
- code_muse/plugins/__init__.py +471 -0
- code_muse/plugins/agent_skills/__init__.py +32 -0
- code_muse/plugins/agent_skills/config.py +176 -0
- code_muse/plugins/agent_skills/discovery.py +309 -0
- code_muse/plugins/agent_skills/downloader.py +389 -0
- code_muse/plugins/agent_skills/installer.py +19 -0
- code_muse/plugins/agent_skills/metadata.py +293 -0
- code_muse/plugins/agent_skills/prompt_builder.py +66 -0
- code_muse/plugins/agent_skills/register_callbacks.py +298 -0
- code_muse/plugins/agent_skills/remote_catalog.py +320 -0
- code_muse/plugins/agent_skills/skill_catalog.py +254 -0
- code_muse/plugins/agent_skills/skills_install_menu.py +690 -0
- code_muse/plugins/agent_skills/skills_menu.py +791 -0
- code_muse/plugins/autonomous_memory/__init__.py +39 -0
- code_muse/plugins/autonomous_memory/bm25_scorer.cpython-314-darwin.so +0 -0
- code_muse/plugins/autonomous_memory/bm25_scorer.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/autonomous_memory/bm25_scorer.pyx +291 -0
- code_muse/plugins/autonomous_memory/consolidation.py +82 -0
- code_muse/plugins/autonomous_memory/extraction.py +382 -0
- code_muse/plugins/autonomous_memory/lease_lock.py +105 -0
- code_muse/plugins/autonomous_memory/memory_injection.py +59 -0
- code_muse/plugins/autonomous_memory/register_callbacks.py +268 -0
- code_muse/plugins/autonomous_memory/secret_scanner.py +62 -0
- code_muse/plugins/autonomous_memory/session_scanner.py +163 -0
- code_muse/plugins/aws_bedrock/__init__.py +14 -0
- code_muse/plugins/aws_bedrock/config.py +99 -0
- code_muse/plugins/aws_bedrock/register_callbacks.py +241 -0
- code_muse/plugins/aws_bedrock/utils.py +153 -0
- code_muse/plugins/azure_foundry/README.md +238 -0
- code_muse/plugins/azure_foundry/__init__.py +15 -0
- code_muse/plugins/azure_foundry/config.py +125 -0
- code_muse/plugins/azure_foundry/discovery.py +187 -0
- code_muse/plugins/azure_foundry/register_callbacks.py +495 -0
- code_muse/plugins/azure_foundry/token.py +180 -0
- code_muse/plugins/azure_foundry/utils.py +345 -0
- code_muse/plugins/build_filter/__init__.py +1 -0
- code_muse/plugins/build_filter/register_callbacks.py +201 -0
- code_muse/plugins/build_filter/strategies/__init__.py +1 -0
- code_muse/plugins/build_filter/strategies/build.py +397 -0
- code_muse/plugins/chatgpt_oauth/__init__.py +6 -0
- code_muse/plugins/chatgpt_oauth/config.py +52 -0
- code_muse/plugins/chatgpt_oauth/oauth_flow.py +338 -0
- code_muse/plugins/chatgpt_oauth/register_callbacks.py +172 -0
- code_muse/plugins/chatgpt_oauth/test_plugin.py +301 -0
- code_muse/plugins/chatgpt_oauth/utils.py +538 -0
- code_muse/plugins/checkpointing/__init__.py +29 -0
- code_muse/plugins/checkpointing/checkpoint_hook.py +51 -0
- code_muse/plugins/checkpointing/conversation_snapshots.py +117 -0
- code_muse/plugins/checkpointing/register_callbacks.py +51 -0
- code_muse/plugins/checkpointing/restore_command.py +263 -0
- code_muse/plugins/checkpointing/rewind_shortcut.py +88 -0
- code_muse/plugins/checkpointing/shadow_git.py +90 -0
- code_muse/plugins/claude_code_hooks/__init__.py +1 -0
- code_muse/plugins/claude_code_hooks/config.py +188 -0
- code_muse/plugins/claude_code_hooks/register_callbacks.py +208 -0
- code_muse/plugins/claude_code_oauth/README.md +167 -0
- code_muse/plugins/claude_code_oauth/SETUP.md +93 -0
- code_muse/plugins/claude_code_oauth/__init__.py +25 -0
- code_muse/plugins/claude_code_oauth/config.py +52 -0
- code_muse/plugins/claude_code_oauth/fast_mode.py +124 -0
- code_muse/plugins/claude_code_oauth/prompt_handler.py +63 -0
- code_muse/plugins/claude_code_oauth/register_callbacks.py +547 -0
- code_muse/plugins/claude_code_oauth/test_fast_mode.py +165 -0
- code_muse/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_muse/plugins/claude_code_oauth/token_refresh_heartbeat.py +237 -0
- code_muse/plugins/claude_code_oauth/utils.py +664 -0
- code_muse/plugins/copilot_auth/__init__.py +11 -0
- code_muse/plugins/copilot_auth/config.py +91 -0
- code_muse/plugins/copilot_auth/reasoning_client.py +409 -0
- code_muse/plugins/copilot_auth/register_callbacks.py +461 -0
- code_muse/plugins/copilot_auth/utils.py +584 -0
- code_muse/plugins/custom_commands/__init__.py +14 -0
- code_muse/plugins/custom_commands/args_injection.py +82 -0
- code_muse/plugins/custom_commands/command_discovery.py +89 -0
- code_muse/plugins/custom_commands/command_toml_schema.py +71 -0
- code_muse/plugins/custom_commands/register_callbacks.py +176 -0
- code_muse/plugins/customizable_commands/__init__.py +0 -0
- code_muse/plugins/customizable_commands/register_callbacks.py +136 -0
- code_muse/plugins/destructive_command_guard/__init__.py +14 -0
- code_muse/plugins/destructive_command_guard/detector.py +375 -0
- code_muse/plugins/destructive_command_guard/register_callbacks.py +148 -0
- code_muse/plugins/example_custom_command/README.md +280 -0
- code_muse/plugins/example_custom_command/register_callbacks.py +51 -0
- code_muse/plugins/file_permission_handler/__init__.py +4 -0
- code_muse/plugins/file_permission_handler/register_callbacks.py +441 -0
- code_muse/plugins/filter_engine/__init__.py +30 -0
- code_muse/plugins/filter_engine/classifier.py +153 -0
- code_muse/plugins/filter_engine/content_detector.py +184 -0
- code_muse/plugins/filter_engine/dispatcher.py +244 -0
- code_muse/plugins/filter_engine/register_callbacks.py +188 -0
- code_muse/plugins/filter_engine/registry.py +279 -0
- code_muse/plugins/filter_engine/strategies/__init__.py +8 -0
- code_muse/plugins/filter_engine/strategies/ast_compressor.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/ast_compressor.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/ast_compressor.pyx +348 -0
- code_muse/plugins/filter_engine/strategies/ast_parser.py +167 -0
- code_muse/plugins/filter_engine/strategies/code.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/code.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/code.pyx +584 -0
- code_muse/plugins/filter_engine/strategies/git.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/git.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/git.pyx +438 -0
- code_muse/plugins/filter_engine/strategies/json_compressor.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/json_compressor.pyx +253 -0
- code_muse/plugins/filter_engine/strategies/json_patterns.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/json_patterns.pyx +178 -0
- code_muse/plugins/filter_engine/strategies/lint.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/lint.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/lint.pyx +626 -0
- code_muse/plugins/filter_engine/strategies/test.cpython-314-darwin.so +0 -0
- code_muse/plugins/filter_engine/strategies/test.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/plugins/filter_engine/strategies/test.pyx +431 -0
- code_muse/plugins/filter_engine/verbosity.py +63 -0
- code_muse/plugins/force_push_guard/__init__.py +5 -0
- code_muse/plugins/force_push_guard/detector.py +96 -0
- code_muse/plugins/force_push_guard/register_callbacks.py +144 -0
- code_muse/plugins/force_push_guard/test_detector.py +143 -0
- code_muse/plugins/frontend_emitter/__init__.py +25 -0
- code_muse/plugins/frontend_emitter/emitter.py +121 -0
- code_muse/plugins/frontend_emitter/register_callbacks.py +259 -0
- code_muse/plugins/gac/__init__.py +4 -0
- code_muse/plugins/gac/git_ops.py +136 -0
- code_muse/plugins/gac/prompt.py +191 -0
- code_muse/plugins/gac/register_callbacks.py +82 -0
- code_muse/plugins/hook_creator/__init__.py +1 -0
- code_muse/plugins/hook_creator/register_callbacks.py +34 -0
- code_muse/plugins/hook_manager/__init__.py +1 -0
- code_muse/plugins/hook_manager/config.py +289 -0
- code_muse/plugins/hook_manager/hooks_menu.py +563 -0
- code_muse/plugins/hook_manager/register_callbacks.py +227 -0
- code_muse/plugins/hook_monitor/register_callbacks.py +36 -0
- code_muse/plugins/mindpack/__init__.py +0 -0
- code_muse/plugins/mindpack/factory.py +930 -0
- code_muse/plugins/mindpack/judge.py +573 -0
- code_muse/plugins/mindpack/memory.py +100 -0
- code_muse/plugins/mindpack/mindpack_menu.py +1552 -0
- code_muse/plugins/mindpack/orchestration.py +605 -0
- code_muse/plugins/mindpack/register_callbacks.py +175 -0
- code_muse/plugins/mindpack/schemas.py +358 -0
- code_muse/plugins/mindpack/tools.py +387 -0
- code_muse/plugins/oauth_muse_html.py +226 -0
- code_muse/plugins/ollama_setup/__init__.py +5 -0
- code_muse/plugins/ollama_setup/completer.py +36 -0
- code_muse/plugins/ollama_setup/register_callbacks.py +410 -0
- code_muse/plugins/plan_command/__init__.py +0 -0
- code_muse/plugins/plan_command/register_callbacks.py +206 -0
- code_muse/plugins/plan_mode/__init__.py +37 -0
- code_muse/plugins/plan_mode/mode_cycling.py +40 -0
- code_muse/plugins/plan_mode/plan_generation.py +68 -0
- code_muse/plugins/plan_mode/plan_hooks.py +74 -0
- code_muse/plugins/plan_mode/plan_mode_tools.py +138 -0
- code_muse/plugins/plan_mode/register_callbacks.py +121 -0
- code_muse/plugins/plugin_trust/register_callbacks.py +140 -0
- code_muse/plugins/policy_engine/__init__.py +46 -0
- code_muse/plugins/policy_engine/approval_flow_integration.py +59 -0
- code_muse/plugins/policy_engine/policy_evaluator.py +75 -0
- code_muse/plugins/policy_engine/policy_file_discovery.py +90 -0
- code_muse/plugins/policy_engine/policy_toml_schema.py +115 -0
- code_muse/plugins/policy_engine/register_callbacks.py +112 -0
- code_muse/plugins/pop_command/__init__.py +1 -0
- code_muse/plugins/pop_command/register_callbacks.py +189 -0
- code_muse/plugins/prompt_newline/__init__.py +13 -0
- code_muse/plugins/prompt_newline/config.py +19 -0
- code_muse/plugins/prompt_newline/register_callbacks.py +159 -0
- code_muse/plugins/safety_status/__init__.py +0 -0
- code_muse/plugins/safety_status/register_callbacks.py +113 -0
- code_muse/plugins/semantic_compression/__init__.py +6 -0
- code_muse/plugins/semantic_compression/compressor.py +295 -0
- code_muse/plugins/semantic_compression/config.py +123 -0
- code_muse/plugins/semantic_compression/register_callbacks.py +320 -0
- code_muse/plugins/shell_minimizer/__init__.py +50 -0
- code_muse/plugins/shell_minimizer/builtin_filters.toml +393 -0
- code_muse/plugins/shell_minimizer/pipeline.py +556 -0
- code_muse/plugins/shell_minimizer/primitives.py +482 -0
- code_muse/plugins/shell_minimizer/register_callbacks.py +276 -0
- code_muse/plugins/shell_safety/__init__.py +6 -0
- code_muse/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_muse/plugins/shell_safety/command_cache.py +149 -0
- code_muse/plugins/shell_safety/register_callbacks.py +202 -0
- code_muse/plugins/synthetic_status/__init__.py +1 -0
- code_muse/plugins/synthetic_status/register_callbacks.py +128 -0
- code_muse/plugins/synthetic_status/status_api.py +145 -0
- code_muse/plugins/token_caching/__init__.py +21 -0
- code_muse/plugins/token_caching/cache_hit_tracking.py +128 -0
- code_muse/plugins/token_caching/cacheable_prefix_detection.py +28 -0
- code_muse/plugins/token_caching/register_callbacks.py +54 -0
- code_muse/plugins/token_caching/stats_display.py +35 -0
- code_muse/plugins/token_tracking/__init__.py +26 -0
- code_muse/plugins/token_tracking/database.py +381 -0
- code_muse/plugins/token_tracking/edit_analyzer.py +97 -0
- code_muse/plugins/token_tracking/record.py +55 -0
- code_muse/plugins/token_tracking/register_callbacks.py +277 -0
- code_muse/plugins/token_tracking/reports.py +329 -0
- code_muse/plugins/universal_constructor/__init__.py +13 -0
- code_muse/plugins/universal_constructor/models.py +136 -0
- code_muse/plugins/universal_constructor/register_callbacks.py +47 -0
- code_muse/plugins/universal_constructor/registry.py +390 -0
- code_muse/plugins/universal_constructor/runner.py +474 -0
- code_muse/plugins/universal_constructor/safety.py +440 -0
- code_muse/plugins/universal_constructor/sandbox.py +584 -0
- code_muse/provider_identity.py +105 -0
- code_muse/pydantic_patches.py +410 -0
- code_muse/reopenable_async_client.py +233 -0
- code_muse/round_robin_model.py +151 -0
- code_muse/secret_storage.py +74 -0
- code_muse/security/__init__.py +1 -0
- code_muse/security/redaction.cpython-314-darwin.so +0 -0
- code_muse/security/redaction.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/security/redaction.pyx +135 -0
- code_muse/session_storage.py +565 -0
- code_muse/status_display.py +261 -0
- code_muse/stream_parser/__init__.py +76 -0
- code_muse/stream_parser/assistant_text_parser.py +90 -0
- code_muse/stream_parser/citation_parser.py +76 -0
- code_muse/stream_parser/inline_hidden_tag_parser.py +236 -0
- code_muse/stream_parser/proposed_plan_parser.py +158 -0
- code_muse/stream_parser/stream_text_chunk.py +23 -0
- code_muse/stream_parser/stream_text_parser.py +27 -0
- code_muse/stream_parser/tagged_line_parser.cpython-314-darwin.so +0 -0
- code_muse/stream_parser/tagged_line_parser.pyx +251 -0
- code_muse/stream_parser/utf8_stream_parser.cpython-314-darwin.so +0 -0
- code_muse/stream_parser/utf8_stream_parser.pyx +206 -0
- code_muse/summarization_agent.py +308 -0
- code_muse/terminal_utils.cpython-314-darwin.so +0 -0
- code_muse/terminal_utils.cpython-314-x86_64-linux-gnu.so +0 -0
- code_muse/terminal_utils.pyx +483 -0
- code_muse/tools/__init__.py +459 -0
- code_muse/tools/agent_tools.py +613 -0
- code_muse/tools/ask_user_question/__init__.py +26 -0
- code_muse/tools/ask_user_question/constants.py +73 -0
- code_muse/tools/ask_user_question/demo_tui.py +55 -0
- code_muse/tools/ask_user_question/handler.py +232 -0
- code_muse/tools/ask_user_question/models.py +302 -0
- code_muse/tools/ask_user_question/registration.py +37 -0
- code_muse/tools/ask_user_question/renderers.py +336 -0
- code_muse/tools/ask_user_question/terminal_ui.py +327 -0
- code_muse/tools/ask_user_question/theme.py +156 -0
- code_muse/tools/ask_user_question/tui_loop.py +422 -0
- code_muse/tools/background_jobs.py +99 -0
- code_muse/tools/browser/__init__.py +37 -0
- code_muse/tools/browser/browser_control.py +289 -0
- code_muse/tools/browser/browser_interactions.py +545 -0
- code_muse/tools/browser/browser_locators.py +640 -0
- code_muse/tools/browser/browser_manager.py +376 -0
- code_muse/tools/browser/browser_navigation.py +251 -0
- code_muse/tools/browser/browser_screenshot.py +180 -0
- code_muse/tools/browser/browser_scripts.py +462 -0
- code_muse/tools/browser/browser_workflows.py +222 -0
- code_muse/tools/chrome_cdp/__init__.py +1070 -0
- code_muse/tools/chrome_cdp/register_callbacks.py +61 -0
- code_muse/tools/command_runner.py +1401 -0
- code_muse/tools/common.py +1407 -0
- code_muse/tools/display.py +87 -0
- code_muse/tools/file_modifications.py +1099 -0
- code_muse/tools/file_operations.py +860 -0
- code_muse/tools/image_tools.py +185 -0
- code_muse/tools/meetin_proxy/__init__.py +243 -0
- code_muse/tools/meetin_proxy/capture_addon.py +82 -0
- code_muse/tools/meetin_proxy/proxy_manager.py +326 -0
- code_muse/tools/meetin_proxy/register_callbacks.py +45 -0
- code_muse/tools/path_policy.py +219 -0
- code_muse/tools/skills_tools.py +586 -0
- code_muse/tools/subagent_context.py +158 -0
- code_muse/tools/tools_content.py +50 -0
- code_muse/tools/universal_constructor.py +965 -0
- code_muse/uvx_detection.py +241 -0
- code_muse/version_checker.py +86 -0
- code_muse-0.0.1.data/data/code_muse/models.json +66 -0
- code_muse-0.0.1.data/data/code_muse/models_dev_api.json +1 -0
- code_muse-0.0.1.dist-info/METADATA +845 -0
- code_muse-0.0.1.dist-info/RECORD +394 -0
- code_muse-0.0.1.dist-info/WHEEL +4 -0
- code_muse-0.0.1.dist-info/entry_points.txt +2 -0
- code_muse-0.0.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
"""Pure helpers for message history hashing, token estimation, and pruning.
|
|
2
|
+
|
|
3
|
+
Extracted from the original ``BaseAgent`` god-class. Everything in here is a
|
|
4
|
+
free function with no hidden state. Call sites pass messages (and, where
|
|
5
|
+
needed, already-resolved strings / tool dicts) in explicitly.
|
|
6
|
+
|
|
7
|
+
PERF-04 adds :class:`CompactionCache` — a per-compaction-run cache that avoids
|
|
8
|
+
repeated ``hash_message`` / ``estimate_tokens_for_message`` calls on the same
|
|
9
|
+
message objects within a single ``compact()`` invocation. No global caches
|
|
10
|
+
that retain message objects.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import dataclasses
|
|
14
|
+
import json
|
|
15
|
+
import math
|
|
16
|
+
import pathlib
|
|
17
|
+
import weakref
|
|
18
|
+
from annotationlib import get_annotations
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
import pydantic
|
|
22
|
+
from pydantic_ai import BinaryContent
|
|
23
|
+
from pydantic_ai.messages import ModelMessage
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _json_safe(obj):
|
|
27
|
+
"""JSON serializer for objects not supported by default json.dumps."""
|
|
28
|
+
if isinstance(obj, pathlib.PurePath):
|
|
29
|
+
return str(obj)
|
|
30
|
+
raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def stringify_part(part: Any) -> str:
|
|
34
|
+
"""Return a stable, timestamp-free string representation of a message part.
|
|
35
|
+
|
|
36
|
+
Used for both hashing and token estimation. Ignoring timestamps means two
|
|
37
|
+
otherwise-identical parts emitted at different times collapse to the same
|
|
38
|
+
string, which is exactly what we want for dedup.
|
|
39
|
+
"""
|
|
40
|
+
attributes: list[str] = [part.__class__.__name__]
|
|
41
|
+
|
|
42
|
+
if hasattr(part, "role") and part.role:
|
|
43
|
+
attributes.append(f"role={part.role}")
|
|
44
|
+
if hasattr(part, "instructions") and part.instructions:
|
|
45
|
+
attributes.append(f"instructions={part.instructions}")
|
|
46
|
+
|
|
47
|
+
if hasattr(part, "tool_call_id") and part.tool_call_id:
|
|
48
|
+
attributes.append(f"tool_call_id={part.tool_call_id}")
|
|
49
|
+
if hasattr(part, "tool_name") and part.tool_name:
|
|
50
|
+
attributes.append(f"tool_name={part.tool_name}")
|
|
51
|
+
|
|
52
|
+
content = getattr(part, "content", None)
|
|
53
|
+
if content is None:
|
|
54
|
+
attributes.append("content=None")
|
|
55
|
+
elif isinstance(content, str):
|
|
56
|
+
attributes.append(f"content={content}")
|
|
57
|
+
elif isinstance(content, pydantic.BaseModel):
|
|
58
|
+
dumped = json.dumps(content.model_dump(), sort_keys=True, default=_json_safe)
|
|
59
|
+
attributes.append(f"content={dumped}")
|
|
60
|
+
elif isinstance(content, dict):
|
|
61
|
+
dumped = json.dumps(content, sort_keys=True, default=_json_safe)
|
|
62
|
+
attributes.append(f"content={dumped}")
|
|
63
|
+
elif isinstance(content, list):
|
|
64
|
+
for item in content:
|
|
65
|
+
if isinstance(item, str):
|
|
66
|
+
attributes.append(f"content={item}")
|
|
67
|
+
elif isinstance(item, BinaryContent):
|
|
68
|
+
attributes.append(f"BinaryContent={hash(item.data)}")
|
|
69
|
+
else:
|
|
70
|
+
attributes.append(f"content={repr(content)}")
|
|
71
|
+
|
|
72
|
+
return "|".join(attributes)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# PERF-07: Cache hash results by object identity. A message that appears
|
|
76
|
+
# in multiple history lists (e.g. after copy-on-write) will hash
|
|
77
|
+
# identically, so id()-keyed caching is correct as long as the message
|
|
78
|
+
# object is alive — which it always is while it's in a history list.
|
|
79
|
+
#
|
|
80
|
+
# A ``weakref.finalize`` callback automatically drops the cached entry
|
|
81
|
+
# when the message object is garbage-collected, preventing stale hits if the
|
|
82
|
+
# same memory address is reused for a different message later (common in
|
|
83
|
+
# tests that create many short-lived messages).
|
|
84
|
+
_hash_cache: dict[int, int] = {}
|
|
85
|
+
_HASH_CACHE_MAX = 8192
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _evict_hash_cache(msg_id: int) -> None:
|
|
89
|
+
_hash_cache.pop(msg_id, None)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def hash_message(message: Any) -> int:
|
|
93
|
+
"""Stable hash for a ``ModelMessage`` that ignores timestamps."""
|
|
94
|
+
msg_id = id(message)
|
|
95
|
+
cached = _hash_cache.get(msg_id)
|
|
96
|
+
if cached is not None:
|
|
97
|
+
return cached
|
|
98
|
+
|
|
99
|
+
role = getattr(message, "role", None)
|
|
100
|
+
instructions = getattr(message, "instructions", None)
|
|
101
|
+
header_bits: list[str] = []
|
|
102
|
+
if role:
|
|
103
|
+
header_bits.append(f"role={role}")
|
|
104
|
+
if instructions:
|
|
105
|
+
header_bits.append(f"instructions={instructions}")
|
|
106
|
+
|
|
107
|
+
part_strings = [stringify_part(part) for part in getattr(message, "parts", [])]
|
|
108
|
+
canonical = "||".join(header_bits + part_strings)
|
|
109
|
+
result = hash(canonical)
|
|
110
|
+
|
|
111
|
+
# Bounded cache — evict oldest entries when full
|
|
112
|
+
if len(_hash_cache) >= _HASH_CACHE_MAX:
|
|
113
|
+
_hash_cache.clear()
|
|
114
|
+
_hash_cache[msg_id] = result
|
|
115
|
+
weakref.finalize(message, _evict_hash_cache, msg_id)
|
|
116
|
+
return result
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def estimate_tokens(text: str) -> int:
|
|
120
|
+
"""Character-based token estimator tuned for code-heavy content.
|
|
121
|
+
|
|
122
|
+
Uses ``len / 3.0`` as the base divisor (code has higher entropy than
|
|
123
|
+
prose, and the old ``/2.5`` heuristic systematically undercounted for
|
|
124
|
+
most models). Per-model calibration multipliers applied downstream
|
|
125
|
+
by ``estimate_tokens_for_message`` adjust for models with even denser
|
|
126
|
+
tokenizers.
|
|
127
|
+
"""
|
|
128
|
+
return max(1, math.floor(len(text) / 3.0))
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
# Models whose tokenizer the char/2.5 heuristic systematically *under*counts.
|
|
132
|
+
# Bump these by a calibration factor so context-usage math stops lying to us.
|
|
133
|
+
# Substring match is case-insensitive; both naming orders are accepted because
|
|
134
|
+
# vendor naming is a coin flip.
|
|
135
|
+
_TOKEN_MULTIPLIER_RULES: tuple[tuple[tuple[str, ...], float], ...] = (
|
|
136
|
+
(("opus-4-7", "4-7-opus"), 1.35),
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def model_token_multiplier(model_name: str | None) -> float:
|
|
141
|
+
"""Per-model fudge factor for our char-based token estimator.
|
|
142
|
+
|
|
143
|
+
Returns 1.0 when ``model_name`` is falsy or doesn't match any rule.
|
|
144
|
+
"""
|
|
145
|
+
if not model_name:
|
|
146
|
+
return 1.0
|
|
147
|
+
lowered = model_name.lower()
|
|
148
|
+
for needles, factor in _TOKEN_MULTIPLIER_RULES:
|
|
149
|
+
if any(needle in lowered for needle in needles):
|
|
150
|
+
return factor
|
|
151
|
+
return 1.0
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _apply_multiplier(raw_tokens: int, model_name: str | None) -> int:
|
|
155
|
+
multiplier = model_token_multiplier(model_name)
|
|
156
|
+
if multiplier == 1.0:
|
|
157
|
+
return raw_tokens
|
|
158
|
+
return max(1, math.floor(raw_tokens * multiplier))
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def estimate_tokens_for_message(
|
|
162
|
+
message: ModelMessage,
|
|
163
|
+
model_name: str | None = None,
|
|
164
|
+
) -> int:
|
|
165
|
+
"""Estimate the number of tokens in a single model message.
|
|
166
|
+
|
|
167
|
+
When ``model_name`` is provided, the raw count is scaled by
|
|
168
|
+
:func:`model_token_multiplier` to compensate for tokenizers that don't
|
|
169
|
+
play nicely with our char/2.5 heuristic.
|
|
170
|
+
"""
|
|
171
|
+
total = 0
|
|
172
|
+
for part in getattr(message, "parts", []) or []:
|
|
173
|
+
part_str = stringify_part(part)
|
|
174
|
+
if part_str:
|
|
175
|
+
total += estimate_tokens(part_str)
|
|
176
|
+
return _apply_multiplier(max(1, total), model_name)
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def estimate_context_overhead(
|
|
180
|
+
system_prompt: str,
|
|
181
|
+
pydantic_tools: dict[str, Any | None],
|
|
182
|
+
model_name: str | None = None,
|
|
183
|
+
) -> int:
|
|
184
|
+
"""Estimate fixed token overhead for the system prompt + tool definitions.
|
|
185
|
+
|
|
186
|
+
The caller is responsible for resolving the system prompt for the active
|
|
187
|
+
model (e.g. via ``prepare_prompt_for_model``). external tool overhead is
|
|
188
|
+
deliberately ignored — it was guesswork anyway.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
system_prompt: The already-resolved instruction/system prompt string.
|
|
192
|
+
pydantic_tools: The pydantic-ai agent's ``_tools`` dict, or ``None``.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Estimated total token overhead.
|
|
196
|
+
"""
|
|
197
|
+
total = 0
|
|
198
|
+
if system_prompt:
|
|
199
|
+
total += estimate_tokens(system_prompt)
|
|
200
|
+
|
|
201
|
+
if not pydantic_tools:
|
|
202
|
+
return _apply_multiplier(total, model_name)
|
|
203
|
+
|
|
204
|
+
for tool_name, tool_func in pydantic_tools.items():
|
|
205
|
+
total += estimate_tokens(tool_name)
|
|
206
|
+
|
|
207
|
+
description = getattr(tool_func, "__doc__", None) or ""
|
|
208
|
+
if description:
|
|
209
|
+
total += estimate_tokens(description)
|
|
210
|
+
|
|
211
|
+
schema = getattr(tool_func, "schema", None)
|
|
212
|
+
if schema is not None:
|
|
213
|
+
schema_str = json.dumps(schema) if isinstance(schema, dict) else str(schema)
|
|
214
|
+
total += estimate_tokens(schema_str)
|
|
215
|
+
else:
|
|
216
|
+
annotations = get_annotations(tool_func)
|
|
217
|
+
if annotations:
|
|
218
|
+
total += estimate_tokens(str(annotations))
|
|
219
|
+
|
|
220
|
+
return _apply_multiplier(total, model_name)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
# Pydantic-AI has FOUR part kinds that carry a tool_call_id:
|
|
224
|
+
# * tool-call -> ToolCallPart (regular tool call)
|
|
225
|
+
# * tool-return -> ToolReturnPart (regular tool response)
|
|
226
|
+
# * builtin-tool-call -> BuiltinToolCallPart (claude extended-thinking, etc.)
|
|
227
|
+
# * builtin-tool-return -> BuiltinToolReturnPart (builtin tool response)
|
|
228
|
+
# * retry-prompt -> RetryPromptPart
|
|
229
|
+
# (assistant told to retry; acts as a response)
|
|
230
|
+
#
|
|
231
|
+
# Treating only `tool-call` / `tool-return` (and ignoring the others) caused
|
|
232
|
+
# subtle bugs: e.g. builtin tool calls on Claude Opus were counted as pending
|
|
233
|
+
# forever, deferring summarization on every turn.
|
|
234
|
+
_TOOL_CALL_PART_KINDS: frozenset[str] = frozenset({"tool-call", "builtin-tool-call"})
|
|
235
|
+
_TOOL_RETURN_PART_KINDS: frozenset[str] = frozenset(
|
|
236
|
+
{"tool-return", "builtin-tool-return", "retry-prompt"}
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def _classify_tool_part(part: object) -> str | None:
|
|
241
|
+
"""Return ``"call"``, ``"return"``, or ``None`` for a message part.
|
|
242
|
+
|
|
243
|
+
``None`` means the part doesn't participate in tool_call_id pairing
|
|
244
|
+
(either no id, or an unrelated part kind).
|
|
245
|
+
"""
|
|
246
|
+
if getattr(part, "tool_call_id", None) is None:
|
|
247
|
+
return None
|
|
248
|
+
pk = getattr(part, "part_kind", None)
|
|
249
|
+
if pk in _TOOL_CALL_PART_KINDS:
|
|
250
|
+
return "call"
|
|
251
|
+
if pk in _TOOL_RETURN_PART_KINDS:
|
|
252
|
+
return "return"
|
|
253
|
+
return None
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def prune_interrupted_tool_calls(
|
|
257
|
+
messages: list[ModelMessage],
|
|
258
|
+
) -> list[ModelMessage]:
|
|
259
|
+
"""Drop messages participating in mismatched tool_call/tool_return pairs.
|
|
260
|
+
|
|
261
|
+
A mismatched ``tool_call_id`` is one that appears only as a call or only
|
|
262
|
+
as a return. The model will reject such histories ("tool_use ids found
|
|
263
|
+
without tool_result blocks"), so we strip them out while preserving order.
|
|
264
|
+
"""
|
|
265
|
+
if not messages:
|
|
266
|
+
return messages
|
|
267
|
+
|
|
268
|
+
tool_call_ids: set[str] = set()
|
|
269
|
+
tool_return_ids: set[str] = set()
|
|
270
|
+
|
|
271
|
+
for msg in messages:
|
|
272
|
+
for part in getattr(msg, "parts", []) or []:
|
|
273
|
+
kind = _classify_tool_part(part)
|
|
274
|
+
if kind == "call":
|
|
275
|
+
tool_call_ids.add(part.tool_call_id)
|
|
276
|
+
elif kind == "return":
|
|
277
|
+
tool_return_ids.add(part.tool_call_id)
|
|
278
|
+
|
|
279
|
+
mismatched = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
280
|
+
if not mismatched:
|
|
281
|
+
return messages
|
|
282
|
+
|
|
283
|
+
pruned: list[ModelMessage] = []
|
|
284
|
+
for msg in messages:
|
|
285
|
+
if any(
|
|
286
|
+
getattr(part, "tool_call_id", None) in mismatched
|
|
287
|
+
for part in getattr(msg, "parts", []) or []
|
|
288
|
+
):
|
|
289
|
+
continue
|
|
290
|
+
pruned.append(msg)
|
|
291
|
+
return pruned
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def has_pending_tool_calls(messages: list[ModelMessage]) -> bool:
|
|
295
|
+
"""Return True if any tool call is still waiting for its response.
|
|
296
|
+
|
|
297
|
+
Recognizes both regular (``tool-call`` / ``tool-return``) and builtin
|
|
298
|
+
(``builtin-tool-call`` / ``builtin-tool-return``) pairings, plus
|
|
299
|
+
``retry-prompt`` as a valid response form.
|
|
300
|
+
"""
|
|
301
|
+
if not messages:
|
|
302
|
+
return False
|
|
303
|
+
|
|
304
|
+
tool_call_ids: set[str] = set()
|
|
305
|
+
tool_return_ids: set[str] = set()
|
|
306
|
+
|
|
307
|
+
for msg in messages:
|
|
308
|
+
for part in getattr(msg, "parts", []) or []:
|
|
309
|
+
kind = _classify_tool_part(part)
|
|
310
|
+
if kind == "call":
|
|
311
|
+
tool_call_ids.add(part.tool_call_id)
|
|
312
|
+
elif kind == "return":
|
|
313
|
+
tool_return_ids.add(part.tool_call_id)
|
|
314
|
+
|
|
315
|
+
return bool(tool_call_ids - tool_return_ids)
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def filter_huge_messages(
|
|
319
|
+
messages: list[ModelMessage],
|
|
320
|
+
model_name: str | None = None,
|
|
321
|
+
cache: Any | None = None, # CompactionCache when available
|
|
322
|
+
) -> list[ModelMessage]:
|
|
323
|
+
"""Drop individual messages above a 50k-token budget, then prune orphans."""
|
|
324
|
+
filtered = [
|
|
325
|
+
m
|
|
326
|
+
for m in messages
|
|
327
|
+
if (
|
|
328
|
+
cache.estimate_tokens(m, model_name)
|
|
329
|
+
if cache
|
|
330
|
+
else estimate_tokens_for_message(m, model_name)
|
|
331
|
+
)
|
|
332
|
+
< 50000
|
|
333
|
+
]
|
|
334
|
+
return prune_interrupted_tool_calls(filtered)
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
# ---------------------------------------------------------------------------
|
|
338
|
+
# PERF-04: Per-compaction-run cache
|
|
339
|
+
# ---------------------------------------------------------------------------
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
@dataclasses.dataclass
|
|
343
|
+
class CompactionCache:
|
|
344
|
+
"""Per-compaction-run cache for message hashes and token estimates.
|
|
345
|
+
|
|
346
|
+
Created fresh at the start of each ``compact()`` call. Avoids repeated
|
|
347
|
+
``hash_message()`` and ``estimate_tokens_for_message()`` invocations on
|
|
348
|
+
the same ``ModelMessage`` objects within a single compaction cycle.
|
|
349
|
+
|
|
350
|
+
Importantly, this is *not* a global cache — it is scoped to one
|
|
351
|
+
``compact()`` invocation and dropped afterwards so we never retain
|
|
352
|
+
message objects beyond their natural lifecycle.
|
|
353
|
+
"""
|
|
354
|
+
|
|
355
|
+
# message id(id(m)) → int hash
|
|
356
|
+
_message_hashes: dict[int, int] = dataclasses.field(default_factory=dict)
|
|
357
|
+
# (id(m), model_name_or_None) → int token count
|
|
358
|
+
_token_counts: dict[tuple[int, str | None], int] = dataclasses.field(
|
|
359
|
+
default_factory=dict
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
def hash_message(self, message: ModelMessage) -> int:
|
|
363
|
+
"""Cached wrapper around :func:`hash_message` (module-level)."""
|
|
364
|
+
key = id(message)
|
|
365
|
+
if key not in self._message_hashes:
|
|
366
|
+
self._message_hashes[key] = hash_message(message)
|
|
367
|
+
return self._message_hashes[key]
|
|
368
|
+
|
|
369
|
+
def estimate_tokens(
|
|
370
|
+
self, message: ModelMessage, model_name: str | None = None
|
|
371
|
+
) -> int:
|
|
372
|
+
"""Cached wrapper around :func:`estimate_tokens_for_message`."""
|
|
373
|
+
key = (id(message), model_name)
|
|
374
|
+
if key not in self._token_counts:
|
|
375
|
+
self._token_counts[key] = estimate_tokens_for_message(message, model_name)
|
|
376
|
+
return self._token_counts[key]
|
|
377
|
+
|
|
378
|
+
def sum_tokens(
|
|
379
|
+
self, messages: list[ModelMessage], model_name: str | None = None
|
|
380
|
+
) -> int:
|
|
381
|
+
"""Sum estimated tokens across all messages using the cache."""
|
|
382
|
+
return sum(self.estimate_tokens(m, model_name) for m in messages)
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""Keyboard listener thread helpers, extracted from ``BaseAgent``.
|
|
2
|
+
|
|
3
|
+
These functions listen for Ctrl+X (shell cancel) and optionally the configured
|
|
4
|
+
cancel-agent key (when it's not bound to a signal like SIGINT). Previously
|
|
5
|
+
they lived as methods on ``BaseAgent`` but they never touched ``self``, so
|
|
6
|
+
they're free functions now.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import threading
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
|
|
12
|
+
from code_muse.keymap import cancel_agent_uses_signal, get_cancel_agent_char_code
|
|
13
|
+
from code_muse.messaging import emit_warning
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def spawn_key_listener(
|
|
17
|
+
stop_event: threading.Event,
|
|
18
|
+
on_escape: Callable[[], None],
|
|
19
|
+
on_cancel_agent: Callable[[], None] | None = None,
|
|
20
|
+
) -> threading.Thread | None:
|
|
21
|
+
"""Start a daemon thread that listens for Ctrl+X / cancel-agent keys.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
stop_event: Signal the listener to stop.
|
|
25
|
+
on_escape: Callback for Ctrl+X (shell command cancel).
|
|
26
|
+
on_cancel_agent: Optional callback for the configured cancel-agent
|
|
27
|
+
key, only used when ``cancel_agent_uses_signal()`` is False.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
The started Thread, or None if stdin isn't a TTY (or otherwise
|
|
31
|
+
unusable, e.g. during tests).
|
|
32
|
+
"""
|
|
33
|
+
try:
|
|
34
|
+
import sys
|
|
35
|
+
except ImportError:
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
stdin = getattr(sys, "stdin", None)
|
|
39
|
+
if stdin is None or not hasattr(stdin, "isatty"):
|
|
40
|
+
return None
|
|
41
|
+
try:
|
|
42
|
+
if not stdin.isatty():
|
|
43
|
+
return None
|
|
44
|
+
except Exception:
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
def listener() -> None:
|
|
48
|
+
try:
|
|
49
|
+
if sys.platform.startswith("win"):
|
|
50
|
+
_listen_windows(stop_event, on_escape, on_cancel_agent)
|
|
51
|
+
else:
|
|
52
|
+
_listen_posix(stop_event, on_escape, on_cancel_agent)
|
|
53
|
+
except Exception:
|
|
54
|
+
emit_warning("Key listener stopped unexpectedly; press Ctrl+C to cancel.")
|
|
55
|
+
|
|
56
|
+
thread = threading.Thread(
|
|
57
|
+
target=listener, name="muse-key-listener", daemon=True
|
|
58
|
+
)
|
|
59
|
+
thread.start()
|
|
60
|
+
return thread
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _listen_windows(
|
|
64
|
+
stop_event: threading.Event,
|
|
65
|
+
on_escape: Callable[[], None],
|
|
66
|
+
on_cancel_agent: Callable[[], None] | None = None,
|
|
67
|
+
) -> None:
|
|
68
|
+
import msvcrt
|
|
69
|
+
import time
|
|
70
|
+
|
|
71
|
+
cancel_agent_char: str | None = None
|
|
72
|
+
if on_cancel_agent is not None and not cancel_agent_uses_signal():
|
|
73
|
+
cancel_agent_char = get_cancel_agent_char_code()
|
|
74
|
+
|
|
75
|
+
while not stop_event.is_set():
|
|
76
|
+
try:
|
|
77
|
+
if msvcrt.kbhit():
|
|
78
|
+
key = msvcrt.getwch()
|
|
79
|
+
if key == "\x18": # Ctrl+X
|
|
80
|
+
try:
|
|
81
|
+
on_escape()
|
|
82
|
+
except Exception:
|
|
83
|
+
emit_warning(
|
|
84
|
+
"Ctrl+X handler raised unexpectedly; Ctrl+C still works."
|
|
85
|
+
)
|
|
86
|
+
elif cancel_agent_char and on_cancel_agent and key == cancel_agent_char:
|
|
87
|
+
try:
|
|
88
|
+
on_cancel_agent()
|
|
89
|
+
except Exception:
|
|
90
|
+
emit_warning("Cancel agent handler raised unexpectedly.")
|
|
91
|
+
except Exception:
|
|
92
|
+
emit_warning(
|
|
93
|
+
"Windows key listener error; Ctrl+C is still available for cancel."
|
|
94
|
+
)
|
|
95
|
+
return
|
|
96
|
+
time.sleep(0.05)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _listen_posix(
|
|
100
|
+
stop_event: threading.Event,
|
|
101
|
+
on_escape: Callable[[], None],
|
|
102
|
+
on_cancel_agent: Callable[[], None] | None = None,
|
|
103
|
+
) -> None:
|
|
104
|
+
import select
|
|
105
|
+
import sys
|
|
106
|
+
import termios
|
|
107
|
+
import tty
|
|
108
|
+
|
|
109
|
+
cancel_agent_char: str | None = None
|
|
110
|
+
if on_cancel_agent is not None and not cancel_agent_uses_signal():
|
|
111
|
+
cancel_agent_char = get_cancel_agent_char_code()
|
|
112
|
+
|
|
113
|
+
stdin = sys.stdin
|
|
114
|
+
try:
|
|
115
|
+
fd = stdin.fileno()
|
|
116
|
+
except AttributeError, ValueError, OSError:
|
|
117
|
+
return
|
|
118
|
+
try:
|
|
119
|
+
original_attrs = termios.tcgetattr(fd)
|
|
120
|
+
except Exception:
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
tty.setcbreak(fd)
|
|
125
|
+
while not stop_event.is_set():
|
|
126
|
+
try:
|
|
127
|
+
read_ready, _, _ = select.select([stdin], [], [], 0.05)
|
|
128
|
+
except Exception:
|
|
129
|
+
break
|
|
130
|
+
if not read_ready:
|
|
131
|
+
continue
|
|
132
|
+
data = stdin.read(1)
|
|
133
|
+
if not data:
|
|
134
|
+
break
|
|
135
|
+
if data == "\x18": # Ctrl+X
|
|
136
|
+
try:
|
|
137
|
+
on_escape()
|
|
138
|
+
except Exception:
|
|
139
|
+
emit_warning(
|
|
140
|
+
"Ctrl+X handler raised unexpectedly; Ctrl+C still works."
|
|
141
|
+
)
|
|
142
|
+
elif cancel_agent_char and on_cancel_agent and data == cancel_agent_char:
|
|
143
|
+
try:
|
|
144
|
+
on_cancel_agent()
|
|
145
|
+
except Exception:
|
|
146
|
+
emit_warning("Cancel agent handler raised unexpectedly.")
|
|
147
|
+
finally:
|
|
148
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, original_attrs)
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""Fallback rendering for agent runs where streaming didn't emit text.
|
|
2
|
+
|
|
3
|
+
Some backends buffer responses and never emit SSE text deltas. In that case
|
|
4
|
+
``event_stream_handler`` runs the tool/thinking paths but never prints the
|
|
5
|
+
final answer, so the user sees a silent agent. This module provides:
|
|
6
|
+
|
|
7
|
+
* ``StreamingTextDetector`` — a thin wrapper around ``event_stream_handler``
|
|
8
|
+
that records whether a ``TextPart`` / ``TextPartDelta`` ever appeared.
|
|
9
|
+
* ``render_result_without_streaming`` — a one-shot renderer that walks
|
|
10
|
+
``result.all_messages()`` and prints thinking + final text via the
|
|
11
|
+
non-streaming markdown path.
|
|
12
|
+
|
|
13
|
+
The detector is async-safe and forwards every event untouched; it only
|
|
14
|
+
observes. The renderer is best-effort: a render failure must never kill the
|
|
15
|
+
run because the caller still has the raw result.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from collections.abc import Callable
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from pydantic_ai import PartDeltaEvent, PartStartEvent
|
|
22
|
+
from pydantic_ai.messages import (
|
|
23
|
+
ModelResponse,
|
|
24
|
+
TextPart,
|
|
25
|
+
TextPartDelta,
|
|
26
|
+
ThinkingPart,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from code_muse.tools.display import display_non_streamed_result
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class StreamingTextDetector:
|
|
33
|
+
"""Wraps an ``event_stream_handler`` and records TextPart activity."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, inner: Callable[..., Any]) -> None:
|
|
36
|
+
self._inner = inner
|
|
37
|
+
self.streamed_text: bool = False
|
|
38
|
+
|
|
39
|
+
async def __call__(self, ctx: Any, events: Any) -> Any:
|
|
40
|
+
detector = self
|
|
41
|
+
|
|
42
|
+
async def _tee() -> Any:
|
|
43
|
+
async for event in events:
|
|
44
|
+
if isinstance(event, PartStartEvent) and isinstance(
|
|
45
|
+
getattr(event, "part", None), TextPart
|
|
46
|
+
):
|
|
47
|
+
part = event.part
|
|
48
|
+
content = getattr(part, "content", "") or ""
|
|
49
|
+
if content.strip():
|
|
50
|
+
detector.streamed_text = True
|
|
51
|
+
elif isinstance(event, PartDeltaEvent) and isinstance(
|
|
52
|
+
getattr(event, "delta", None), TextPartDelta
|
|
53
|
+
):
|
|
54
|
+
delta = event.delta
|
|
55
|
+
if getattr(delta, "content_delta", ""):
|
|
56
|
+
detector.streamed_text = True
|
|
57
|
+
yield event
|
|
58
|
+
|
|
59
|
+
return await self._inner(ctx, _tee())
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _collect_thinking_from_messages(result: Any) -> str:
|
|
63
|
+
"""Concatenate ``ThinkingPart`` content from intermediate ModelResponses.
|
|
64
|
+
|
|
65
|
+
"Intermediate" means every ModelResponse except the final one — the final
|
|
66
|
+
response's text is the answer we render separately.
|
|
67
|
+
"""
|
|
68
|
+
try:
|
|
69
|
+
messages = list(result.all_messages())
|
|
70
|
+
except Exception:
|
|
71
|
+
return ""
|
|
72
|
+
|
|
73
|
+
model_responses = [m for m in messages if isinstance(m, ModelResponse)]
|
|
74
|
+
if len(model_responses) <= 1:
|
|
75
|
+
return ""
|
|
76
|
+
|
|
77
|
+
chunks: list[str] = []
|
|
78
|
+
for response in model_responses[:-1]:
|
|
79
|
+
for part in getattr(response, "parts", []) or []:
|
|
80
|
+
if isinstance(part, ThinkingPart):
|
|
81
|
+
content = getattr(part, "content", "") or ""
|
|
82
|
+
if content.strip():
|
|
83
|
+
chunks.append(content)
|
|
84
|
+
return "\n\n".join(chunks)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _collect_final_text_from_messages(result: Any) -> str:
|
|
88
|
+
"""Concatenate ``TextPart`` content from the final ModelResponse only."""
|
|
89
|
+
try:
|
|
90
|
+
messages = list(result.all_messages())
|
|
91
|
+
except Exception:
|
|
92
|
+
return ""
|
|
93
|
+
|
|
94
|
+
for message in reversed(messages):
|
|
95
|
+
if isinstance(message, ModelResponse):
|
|
96
|
+
chunks = [
|
|
97
|
+
getattr(p, "content", "") or ""
|
|
98
|
+
for p in getattr(message, "parts", []) or []
|
|
99
|
+
if isinstance(p, TextPart)
|
|
100
|
+
]
|
|
101
|
+
return "".join(chunks)
|
|
102
|
+
return ""
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def render_result_without_streaming(result: Any) -> None:
|
|
106
|
+
"""Render ``result`` via the non-streaming markdown path.
|
|
107
|
+
|
|
108
|
+
Emits the thinking banner (if any intermediate thinking was captured) and
|
|
109
|
+
the final agent response. Guarded so a render failure can't kill the run.
|
|
110
|
+
"""
|
|
111
|
+
try:
|
|
112
|
+
thinking = _collect_thinking_from_messages(result)
|
|
113
|
+
if thinking.strip():
|
|
114
|
+
display_non_streamed_result(
|
|
115
|
+
thinking,
|
|
116
|
+
banner_text="THINKING",
|
|
117
|
+
banner_name="thinking",
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
final_text = _collect_final_text_from_messages(result)
|
|
121
|
+
if final_text.strip():
|
|
122
|
+
display_non_streamed_result(final_text)
|
|
123
|
+
except Exception:
|
|
124
|
+
# Rendering is best-effort: the caller still gets the raw result.
|
|
125
|
+
pass
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def should_render_fallback(
|
|
129
|
+
detector: StreamingTextDetector | None,
|
|
130
|
+
*,
|
|
131
|
+
skip: bool,
|
|
132
|
+
) -> bool:
|
|
133
|
+
"""Return True if we should render the final result ourselves.
|
|
134
|
+
|
|
135
|
+
``skip`` is honoured unconditionally (e.g. plugins may render their own output).
|
|
136
|
+
Otherwise: render if there was no detector (streaming disabled) or the
|
|
137
|
+
detector never saw a TextPart fire.
|
|
138
|
+
"""
|
|
139
|
+
if skip:
|
|
140
|
+
return False
|
|
141
|
+
return detector is None or not detector.streamed_text
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
__all__ = [
|
|
145
|
+
"StreamingTextDetector",
|
|
146
|
+
"render_result_without_streaming",
|
|
147
|
+
"should_render_fallback",
|
|
148
|
+
]
|