agentpool 2.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agentpool might be problematic. Click here for more details.
- acp/README.md +64 -0
- acp/__init__.py +172 -0
- acp/__main__.py +10 -0
- acp/acp_requests.py +285 -0
- acp/agent/__init__.py +6 -0
- acp/agent/connection.py +256 -0
- acp/agent/implementations/__init__.py +6 -0
- acp/agent/implementations/debug_server/__init__.py +1 -0
- acp/agent/implementations/debug_server/cli.py +79 -0
- acp/agent/implementations/debug_server/debug.html +234 -0
- acp/agent/implementations/debug_server/debug_server.py +496 -0
- acp/agent/implementations/testing.py +91 -0
- acp/agent/protocol.py +65 -0
- acp/bridge/README.md +162 -0
- acp/bridge/__init__.py +6 -0
- acp/bridge/__main__.py +91 -0
- acp/bridge/bridge.py +246 -0
- acp/bridge/py.typed +0 -0
- acp/bridge/settings.py +15 -0
- acp/client/__init__.py +7 -0
- acp/client/connection.py +251 -0
- acp/client/implementations/__init__.py +7 -0
- acp/client/implementations/default_client.py +185 -0
- acp/client/implementations/headless_client.py +266 -0
- acp/client/implementations/noop_client.py +110 -0
- acp/client/protocol.py +61 -0
- acp/connection.py +280 -0
- acp/exceptions.py +46 -0
- acp/filesystem.py +524 -0
- acp/notifications.py +832 -0
- acp/py.typed +0 -0
- acp/schema/__init__.py +265 -0
- acp/schema/agent_plan.py +30 -0
- acp/schema/agent_requests.py +126 -0
- acp/schema/agent_responses.py +256 -0
- acp/schema/base.py +39 -0
- acp/schema/capabilities.py +230 -0
- acp/schema/client_requests.py +247 -0
- acp/schema/client_responses.py +96 -0
- acp/schema/common.py +81 -0
- acp/schema/content_blocks.py +188 -0
- acp/schema/mcp.py +82 -0
- acp/schema/messages.py +171 -0
- acp/schema/notifications.py +82 -0
- acp/schema/protocol_stuff.md +3 -0
- acp/schema/session_state.py +160 -0
- acp/schema/session_updates.py +419 -0
- acp/schema/slash_commands.py +51 -0
- acp/schema/terminal.py +15 -0
- acp/schema/tool_call.py +347 -0
- acp/stdio.py +250 -0
- acp/task/__init__.py +53 -0
- acp/task/debug.py +197 -0
- acp/task/dispatcher.py +93 -0
- acp/task/queue.py +69 -0
- acp/task/sender.py +82 -0
- acp/task/state.py +87 -0
- acp/task/supervisor.py +93 -0
- acp/terminal_handle.py +30 -0
- acp/tool_call_reporter.py +199 -0
- acp/tool_call_state.py +178 -0
- acp/transports.py +104 -0
- acp/utils.py +240 -0
- agentpool/__init__.py +63 -0
- agentpool/__main__.py +7 -0
- agentpool/agents/__init__.py +30 -0
- agentpool/agents/acp_agent/__init__.py +5 -0
- agentpool/agents/acp_agent/acp_agent.py +837 -0
- agentpool/agents/acp_agent/acp_converters.py +294 -0
- agentpool/agents/acp_agent/client_handler.py +317 -0
- agentpool/agents/acp_agent/session_state.py +44 -0
- agentpool/agents/agent.py +1264 -0
- agentpool/agents/agui_agent/__init__.py +19 -0
- agentpool/agents/agui_agent/agui_agent.py +677 -0
- agentpool/agents/agui_agent/agui_converters.py +423 -0
- agentpool/agents/agui_agent/chunk_transformer.py +204 -0
- agentpool/agents/agui_agent/event_types.py +83 -0
- agentpool/agents/agui_agent/helpers.py +192 -0
- agentpool/agents/architect.py +71 -0
- agentpool/agents/base_agent.py +177 -0
- agentpool/agents/claude_code_agent/__init__.py +11 -0
- agentpool/agents/claude_code_agent/claude_code_agent.py +1021 -0
- agentpool/agents/claude_code_agent/converters.py +243 -0
- agentpool/agents/context.py +105 -0
- agentpool/agents/events/__init__.py +61 -0
- agentpool/agents/events/builtin_handlers.py +129 -0
- agentpool/agents/events/event_emitter.py +320 -0
- agentpool/agents/events/events.py +561 -0
- agentpool/agents/events/tts_handlers.py +186 -0
- agentpool/agents/interactions.py +419 -0
- agentpool/agents/slashed_agent.py +244 -0
- agentpool/agents/sys_prompts.py +178 -0
- agentpool/agents/tool_wrapping.py +184 -0
- agentpool/base_provider.py +28 -0
- agentpool/common_types.py +226 -0
- agentpool/config_resources/__init__.py +16 -0
- agentpool/config_resources/acp_assistant.yml +24 -0
- agentpool/config_resources/agents.yml +109 -0
- agentpool/config_resources/agents_template.yml +18 -0
- agentpool/config_resources/agui_test.yml +18 -0
- agentpool/config_resources/claude_code_agent.yml +16 -0
- agentpool/config_resources/claude_style_subagent.md +30 -0
- agentpool/config_resources/external_acp_agents.yml +77 -0
- agentpool/config_resources/opencode_style_subagent.md +19 -0
- agentpool/config_resources/tts_test_agents.yml +78 -0
- agentpool/delegation/__init__.py +8 -0
- agentpool/delegation/base_team.py +504 -0
- agentpool/delegation/message_flow_tracker.py +39 -0
- agentpool/delegation/pool.py +1129 -0
- agentpool/delegation/team.py +325 -0
- agentpool/delegation/teamrun.py +343 -0
- agentpool/docs/__init__.py +5 -0
- agentpool/docs/gen_examples.py +42 -0
- agentpool/docs/utils.py +370 -0
- agentpool/functional/__init__.py +20 -0
- agentpool/functional/py.typed +0 -0
- agentpool/functional/run.py +80 -0
- agentpool/functional/structure.py +136 -0
- agentpool/hooks/__init__.py +20 -0
- agentpool/hooks/agent_hooks.py +247 -0
- agentpool/hooks/base.py +119 -0
- agentpool/hooks/callable.py +140 -0
- agentpool/hooks/command.py +180 -0
- agentpool/hooks/prompt.py +122 -0
- agentpool/jinja_filters.py +132 -0
- agentpool/log.py +224 -0
- agentpool/mcp_server/__init__.py +17 -0
- agentpool/mcp_server/client.py +429 -0
- agentpool/mcp_server/constants.py +32 -0
- agentpool/mcp_server/conversions.py +172 -0
- agentpool/mcp_server/helpers.py +47 -0
- agentpool/mcp_server/manager.py +232 -0
- agentpool/mcp_server/message_handler.py +164 -0
- agentpool/mcp_server/registries/__init__.py +1 -0
- agentpool/mcp_server/registries/official_registry_client.py +345 -0
- agentpool/mcp_server/registries/pulsemcp_client.py +88 -0
- agentpool/mcp_server/tool_bridge.py +548 -0
- agentpool/messaging/__init__.py +58 -0
- agentpool/messaging/compaction.py +928 -0
- agentpool/messaging/connection_manager.py +319 -0
- agentpool/messaging/context.py +66 -0
- agentpool/messaging/event_manager.py +426 -0
- agentpool/messaging/events.py +39 -0
- agentpool/messaging/message_container.py +209 -0
- agentpool/messaging/message_history.py +491 -0
- agentpool/messaging/messagenode.py +377 -0
- agentpool/messaging/messages.py +655 -0
- agentpool/messaging/processing.py +76 -0
- agentpool/mime_utils.py +95 -0
- agentpool/models/__init__.py +21 -0
- agentpool/models/acp_agents/__init__.py +22 -0
- agentpool/models/acp_agents/base.py +308 -0
- agentpool/models/acp_agents/mcp_capable.py +790 -0
- agentpool/models/acp_agents/non_mcp.py +842 -0
- agentpool/models/agents.py +450 -0
- agentpool/models/agui_agents.py +89 -0
- agentpool/models/claude_code_agents.py +238 -0
- agentpool/models/file_agents.py +116 -0
- agentpool/models/file_parsing.py +367 -0
- agentpool/models/manifest.py +658 -0
- agentpool/observability/__init__.py +9 -0
- agentpool/observability/observability_registry.py +97 -0
- agentpool/prompts/__init__.py +1 -0
- agentpool/prompts/base.py +27 -0
- agentpool/prompts/builtin_provider.py +75 -0
- agentpool/prompts/conversion_manager.py +95 -0
- agentpool/prompts/convert.py +96 -0
- agentpool/prompts/manager.py +204 -0
- agentpool/prompts/parts/zed.md +33 -0
- agentpool/prompts/prompts.py +581 -0
- agentpool/py.typed +0 -0
- agentpool/queries/tree-sitter-language-pack/README.md +7 -0
- agentpool/queries/tree-sitter-language-pack/arduino-tags.scm +5 -0
- agentpool/queries/tree-sitter-language-pack/c-tags.scm +9 -0
- agentpool/queries/tree-sitter-language-pack/chatito-tags.scm +16 -0
- agentpool/queries/tree-sitter-language-pack/clojure-tags.scm +7 -0
- agentpool/queries/tree-sitter-language-pack/commonlisp-tags.scm +122 -0
- agentpool/queries/tree-sitter-language-pack/cpp-tags.scm +15 -0
- agentpool/queries/tree-sitter-language-pack/csharp-tags.scm +26 -0
- agentpool/queries/tree-sitter-language-pack/d-tags.scm +26 -0
- agentpool/queries/tree-sitter-language-pack/dart-tags.scm +92 -0
- agentpool/queries/tree-sitter-language-pack/elisp-tags.scm +5 -0
- agentpool/queries/tree-sitter-language-pack/elixir-tags.scm +54 -0
- agentpool/queries/tree-sitter-language-pack/elm-tags.scm +19 -0
- agentpool/queries/tree-sitter-language-pack/gleam-tags.scm +41 -0
- agentpool/queries/tree-sitter-language-pack/go-tags.scm +42 -0
- agentpool/queries/tree-sitter-language-pack/java-tags.scm +20 -0
- agentpool/queries/tree-sitter-language-pack/javascript-tags.scm +88 -0
- agentpool/queries/tree-sitter-language-pack/lua-tags.scm +34 -0
- agentpool/queries/tree-sitter-language-pack/matlab-tags.scm +10 -0
- agentpool/queries/tree-sitter-language-pack/ocaml-tags.scm +115 -0
- agentpool/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +98 -0
- agentpool/queries/tree-sitter-language-pack/pony-tags.scm +39 -0
- agentpool/queries/tree-sitter-language-pack/properties-tags.scm +5 -0
- agentpool/queries/tree-sitter-language-pack/python-tags.scm +14 -0
- agentpool/queries/tree-sitter-language-pack/r-tags.scm +21 -0
- agentpool/queries/tree-sitter-language-pack/racket-tags.scm +12 -0
- agentpool/queries/tree-sitter-language-pack/ruby-tags.scm +64 -0
- agentpool/queries/tree-sitter-language-pack/rust-tags.scm +60 -0
- agentpool/queries/tree-sitter-language-pack/solidity-tags.scm +43 -0
- agentpool/queries/tree-sitter-language-pack/swift-tags.scm +51 -0
- agentpool/queries/tree-sitter-language-pack/udev-tags.scm +20 -0
- agentpool/queries/tree-sitter-languages/README.md +24 -0
- agentpool/queries/tree-sitter-languages/c-tags.scm +9 -0
- agentpool/queries/tree-sitter-languages/c_sharp-tags.scm +46 -0
- agentpool/queries/tree-sitter-languages/cpp-tags.scm +15 -0
- agentpool/queries/tree-sitter-languages/dart-tags.scm +91 -0
- agentpool/queries/tree-sitter-languages/elisp-tags.scm +8 -0
- agentpool/queries/tree-sitter-languages/elixir-tags.scm +54 -0
- agentpool/queries/tree-sitter-languages/elm-tags.scm +19 -0
- agentpool/queries/tree-sitter-languages/fortran-tags.scm +15 -0
- agentpool/queries/tree-sitter-languages/go-tags.scm +30 -0
- agentpool/queries/tree-sitter-languages/haskell-tags.scm +3 -0
- agentpool/queries/tree-sitter-languages/hcl-tags.scm +77 -0
- agentpool/queries/tree-sitter-languages/java-tags.scm +20 -0
- agentpool/queries/tree-sitter-languages/javascript-tags.scm +88 -0
- agentpool/queries/tree-sitter-languages/julia-tags.scm +60 -0
- agentpool/queries/tree-sitter-languages/kotlin-tags.scm +27 -0
- agentpool/queries/tree-sitter-languages/matlab-tags.scm +10 -0
- agentpool/queries/tree-sitter-languages/ocaml-tags.scm +115 -0
- agentpool/queries/tree-sitter-languages/ocaml_interface-tags.scm +98 -0
- agentpool/queries/tree-sitter-languages/php-tags.scm +26 -0
- agentpool/queries/tree-sitter-languages/python-tags.scm +12 -0
- agentpool/queries/tree-sitter-languages/ql-tags.scm +26 -0
- agentpool/queries/tree-sitter-languages/ruby-tags.scm +64 -0
- agentpool/queries/tree-sitter-languages/rust-tags.scm +60 -0
- agentpool/queries/tree-sitter-languages/scala-tags.scm +65 -0
- agentpool/queries/tree-sitter-languages/typescript-tags.scm +41 -0
- agentpool/queries/tree-sitter-languages/zig-tags.scm +3 -0
- agentpool/repomap.py +1231 -0
- agentpool/resource_providers/__init__.py +17 -0
- agentpool/resource_providers/aggregating.py +54 -0
- agentpool/resource_providers/base.py +172 -0
- agentpool/resource_providers/codemode/__init__.py +9 -0
- agentpool/resource_providers/codemode/code_executor.py +215 -0
- agentpool/resource_providers/codemode/default_prompt.py +19 -0
- agentpool/resource_providers/codemode/helpers.py +83 -0
- agentpool/resource_providers/codemode/progress_executor.py +212 -0
- agentpool/resource_providers/codemode/provider.py +150 -0
- agentpool/resource_providers/codemode/remote_mcp_execution.py +143 -0
- agentpool/resource_providers/codemode/remote_provider.py +171 -0
- agentpool/resource_providers/filtering.py +42 -0
- agentpool/resource_providers/mcp_provider.py +246 -0
- agentpool/resource_providers/plan_provider.py +196 -0
- agentpool/resource_providers/pool.py +69 -0
- agentpool/resource_providers/static.py +289 -0
- agentpool/running/__init__.py +20 -0
- agentpool/running/decorators.py +56 -0
- agentpool/running/discovery.py +101 -0
- agentpool/running/executor.py +284 -0
- agentpool/running/injection.py +111 -0
- agentpool/running/py.typed +0 -0
- agentpool/running/run_nodes.py +87 -0
- agentpool/server.py +122 -0
- agentpool/sessions/__init__.py +13 -0
- agentpool/sessions/manager.py +302 -0
- agentpool/sessions/models.py +71 -0
- agentpool/sessions/session.py +239 -0
- agentpool/sessions/store.py +163 -0
- agentpool/skills/__init__.py +5 -0
- agentpool/skills/manager.py +120 -0
- agentpool/skills/registry.py +210 -0
- agentpool/skills/skill.py +36 -0
- agentpool/storage/__init__.py +17 -0
- agentpool/storage/manager.py +419 -0
- agentpool/storage/serialization.py +136 -0
- agentpool/talk/__init__.py +13 -0
- agentpool/talk/registry.py +128 -0
- agentpool/talk/stats.py +159 -0
- agentpool/talk/talk.py +604 -0
- agentpool/tasks/__init__.py +20 -0
- agentpool/tasks/exceptions.py +25 -0
- agentpool/tasks/registry.py +33 -0
- agentpool/testing.py +129 -0
- agentpool/text_templates/__init__.py +39 -0
- agentpool/text_templates/system_prompt.jinja +30 -0
- agentpool/text_templates/tool_call_default.jinja +13 -0
- agentpool/text_templates/tool_call_markdown.jinja +25 -0
- agentpool/text_templates/tool_call_simple.jinja +5 -0
- agentpool/tools/__init__.py +16 -0
- agentpool/tools/base.py +269 -0
- agentpool/tools/exceptions.py +9 -0
- agentpool/tools/manager.py +255 -0
- agentpool/tools/tool_call_info.py +87 -0
- agentpool/ui/__init__.py +2 -0
- agentpool/ui/base.py +89 -0
- agentpool/ui/mock_provider.py +81 -0
- agentpool/ui/stdlib_provider.py +150 -0
- agentpool/utils/__init__.py +44 -0
- agentpool/utils/baseregistry.py +185 -0
- agentpool/utils/count_tokens.py +62 -0
- agentpool/utils/dag.py +184 -0
- agentpool/utils/importing.py +206 -0
- agentpool/utils/inspection.py +334 -0
- agentpool/utils/model_capabilities.py +25 -0
- agentpool/utils/network.py +28 -0
- agentpool/utils/now.py +22 -0
- agentpool/utils/parse_time.py +87 -0
- agentpool/utils/result_utils.py +35 -0
- agentpool/utils/signatures.py +305 -0
- agentpool/utils/streams.py +112 -0
- agentpool/utils/tasks.py +186 -0
- agentpool/vfs_registry.py +250 -0
- agentpool-2.1.9.dist-info/METADATA +336 -0
- agentpool-2.1.9.dist-info/RECORD +474 -0
- agentpool-2.1.9.dist-info/WHEEL +4 -0
- agentpool-2.1.9.dist-info/entry_points.txt +14 -0
- agentpool-2.1.9.dist-info/licenses/LICENSE +22 -0
- agentpool_cli/__init__.py +34 -0
- agentpool_cli/__main__.py +66 -0
- agentpool_cli/agent.py +175 -0
- agentpool_cli/cli_types.py +23 -0
- agentpool_cli/common.py +163 -0
- agentpool_cli/create.py +175 -0
- agentpool_cli/history.py +217 -0
- agentpool_cli/log.py +78 -0
- agentpool_cli/py.typed +0 -0
- agentpool_cli/run.py +84 -0
- agentpool_cli/serve_acp.py +177 -0
- agentpool_cli/serve_api.py +69 -0
- agentpool_cli/serve_mcp.py +74 -0
- agentpool_cli/serve_vercel.py +233 -0
- agentpool_cli/store.py +171 -0
- agentpool_cli/task.py +84 -0
- agentpool_cli/utils.py +104 -0
- agentpool_cli/watch.py +54 -0
- agentpool_commands/__init__.py +180 -0
- agentpool_commands/agents.py +199 -0
- agentpool_commands/base.py +45 -0
- agentpool_commands/commands.py +58 -0
- agentpool_commands/completers.py +110 -0
- agentpool_commands/connections.py +175 -0
- agentpool_commands/markdown_utils.py +31 -0
- agentpool_commands/models.py +62 -0
- agentpool_commands/prompts.py +78 -0
- agentpool_commands/py.typed +0 -0
- agentpool_commands/read.py +77 -0
- agentpool_commands/resources.py +210 -0
- agentpool_commands/session.py +48 -0
- agentpool_commands/tools.py +269 -0
- agentpool_commands/utils.py +189 -0
- agentpool_commands/workers.py +163 -0
- agentpool_config/__init__.py +53 -0
- agentpool_config/builtin_tools.py +265 -0
- agentpool_config/commands.py +237 -0
- agentpool_config/conditions.py +301 -0
- agentpool_config/converters.py +30 -0
- agentpool_config/durable.py +331 -0
- agentpool_config/event_handlers.py +600 -0
- agentpool_config/events.py +153 -0
- agentpool_config/forward_targets.py +251 -0
- agentpool_config/hook_conditions.py +331 -0
- agentpool_config/hooks.py +241 -0
- agentpool_config/jinja.py +206 -0
- agentpool_config/knowledge.py +41 -0
- agentpool_config/loaders.py +350 -0
- agentpool_config/mcp_server.py +243 -0
- agentpool_config/nodes.py +202 -0
- agentpool_config/observability.py +191 -0
- agentpool_config/output_types.py +55 -0
- agentpool_config/pool_server.py +267 -0
- agentpool_config/prompt_hubs.py +105 -0
- agentpool_config/prompts.py +185 -0
- agentpool_config/py.typed +0 -0
- agentpool_config/resources.py +33 -0
- agentpool_config/session.py +119 -0
- agentpool_config/skills.py +17 -0
- agentpool_config/storage.py +288 -0
- agentpool_config/system_prompts.py +190 -0
- agentpool_config/task.py +162 -0
- agentpool_config/teams.py +52 -0
- agentpool_config/tools.py +112 -0
- agentpool_config/toolsets.py +1033 -0
- agentpool_config/workers.py +86 -0
- agentpool_prompts/__init__.py +1 -0
- agentpool_prompts/braintrust_hub.py +235 -0
- agentpool_prompts/fabric.py +75 -0
- agentpool_prompts/langfuse_hub.py +79 -0
- agentpool_prompts/promptlayer_provider.py +59 -0
- agentpool_prompts/py.typed +0 -0
- agentpool_server/__init__.py +9 -0
- agentpool_server/a2a_server/__init__.py +5 -0
- agentpool_server/a2a_server/a2a_types.py +41 -0
- agentpool_server/a2a_server/server.py +190 -0
- agentpool_server/a2a_server/storage.py +81 -0
- agentpool_server/acp_server/__init__.py +22 -0
- agentpool_server/acp_server/acp_agent.py +786 -0
- agentpool_server/acp_server/acp_tools.py +43 -0
- agentpool_server/acp_server/commands/__init__.py +18 -0
- agentpool_server/acp_server/commands/acp_commands.py +594 -0
- agentpool_server/acp_server/commands/debug_commands.py +376 -0
- agentpool_server/acp_server/commands/docs_commands/__init__.py +39 -0
- agentpool_server/acp_server/commands/docs_commands/fetch_repo.py +169 -0
- agentpool_server/acp_server/commands/docs_commands/get_schema.py +176 -0
- agentpool_server/acp_server/commands/docs_commands/get_source.py +110 -0
- agentpool_server/acp_server/commands/docs_commands/git_diff.py +111 -0
- agentpool_server/acp_server/commands/docs_commands/helpers.py +33 -0
- agentpool_server/acp_server/commands/docs_commands/url_to_markdown.py +90 -0
- agentpool_server/acp_server/commands/spawn.py +210 -0
- agentpool_server/acp_server/converters.py +235 -0
- agentpool_server/acp_server/input_provider.py +338 -0
- agentpool_server/acp_server/server.py +288 -0
- agentpool_server/acp_server/session.py +969 -0
- agentpool_server/acp_server/session_manager.py +313 -0
- agentpool_server/acp_server/syntax_detection.py +250 -0
- agentpool_server/acp_server/zed_tools.md +90 -0
- agentpool_server/aggregating_server.py +309 -0
- agentpool_server/agui_server/__init__.py +11 -0
- agentpool_server/agui_server/server.py +128 -0
- agentpool_server/base.py +189 -0
- agentpool_server/http_server.py +164 -0
- agentpool_server/mcp_server/__init__.py +6 -0
- agentpool_server/mcp_server/server.py +314 -0
- agentpool_server/mcp_server/zed_wrapper.py +110 -0
- agentpool_server/openai_api_server/__init__.py +5 -0
- agentpool_server/openai_api_server/completions/__init__.py +1 -0
- agentpool_server/openai_api_server/completions/helpers.py +81 -0
- agentpool_server/openai_api_server/completions/models.py +98 -0
- agentpool_server/openai_api_server/responses/__init__.py +1 -0
- agentpool_server/openai_api_server/responses/helpers.py +74 -0
- agentpool_server/openai_api_server/responses/models.py +96 -0
- agentpool_server/openai_api_server/server.py +242 -0
- agentpool_server/py.typed +0 -0
- agentpool_storage/__init__.py +9 -0
- agentpool_storage/base.py +310 -0
- agentpool_storage/file_provider.py +378 -0
- agentpool_storage/formatters.py +129 -0
- agentpool_storage/memory_provider.py +396 -0
- agentpool_storage/models.py +108 -0
- agentpool_storage/py.typed +0 -0
- agentpool_storage/session_store.py +262 -0
- agentpool_storage/sql_provider/__init__.py +21 -0
- agentpool_storage/sql_provider/cli.py +146 -0
- agentpool_storage/sql_provider/models.py +249 -0
- agentpool_storage/sql_provider/queries.py +15 -0
- agentpool_storage/sql_provider/sql_provider.py +444 -0
- agentpool_storage/sql_provider/utils.py +234 -0
- agentpool_storage/text_log_provider.py +275 -0
- agentpool_toolsets/__init__.py +15 -0
- agentpool_toolsets/builtin/__init__.py +33 -0
- agentpool_toolsets/builtin/agent_management.py +239 -0
- agentpool_toolsets/builtin/chain.py +288 -0
- agentpool_toolsets/builtin/code.py +398 -0
- agentpool_toolsets/builtin/debug.py +291 -0
- agentpool_toolsets/builtin/execution_environment.py +381 -0
- agentpool_toolsets/builtin/file_edit/__init__.py +11 -0
- agentpool_toolsets/builtin/file_edit/file_edit.py +747 -0
- agentpool_toolsets/builtin/file_edit/fuzzy_matcher/__init__.py +5 -0
- agentpool_toolsets/builtin/file_edit/fuzzy_matcher/example_usage.py +311 -0
- agentpool_toolsets/builtin/file_edit/fuzzy_matcher/streaming_fuzzy_matcher.py +443 -0
- agentpool_toolsets/builtin/history.py +36 -0
- agentpool_toolsets/builtin/integration.py +85 -0
- agentpool_toolsets/builtin/skills.py +77 -0
- agentpool_toolsets/builtin/subagent_tools.py +324 -0
- agentpool_toolsets/builtin/tool_management.py +90 -0
- agentpool_toolsets/builtin/user_interaction.py +52 -0
- agentpool_toolsets/builtin/workers.py +128 -0
- agentpool_toolsets/composio_toolset.py +96 -0
- agentpool_toolsets/config_creation.py +192 -0
- agentpool_toolsets/entry_points.py +47 -0
- agentpool_toolsets/fsspec_toolset/__init__.py +7 -0
- agentpool_toolsets/fsspec_toolset/diagnostics.py +115 -0
- agentpool_toolsets/fsspec_toolset/grep.py +450 -0
- agentpool_toolsets/fsspec_toolset/helpers.py +631 -0
- agentpool_toolsets/fsspec_toolset/streaming_diff_parser.py +249 -0
- agentpool_toolsets/fsspec_toolset/toolset.py +1384 -0
- agentpool_toolsets/mcp_run_toolset.py +61 -0
- agentpool_toolsets/notifications.py +146 -0
- agentpool_toolsets/openapi.py +118 -0
- agentpool_toolsets/py.typed +0 -0
- agentpool_toolsets/search_toolset.py +202 -0
- agentpool_toolsets/semantic_memory_toolset.py +536 -0
- agentpool_toolsets/streaming_tools.py +265 -0
- agentpool_toolsets/vfs_toolset.py +124 -0
|
@@ -0,0 +1,1384 @@
|
|
|
1
|
+
"""FSSpec filesystem toolset implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import contextlib
|
|
6
|
+
from fnmatch import fnmatch
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
import time
|
|
10
|
+
from typing import TYPE_CHECKING, Any
|
|
11
|
+
from urllib.parse import urlparse
|
|
12
|
+
|
|
13
|
+
import anyio
|
|
14
|
+
from exxec.base import ExecutionEnvironment
|
|
15
|
+
from pydantic_ai import (
|
|
16
|
+
BinaryContent,
|
|
17
|
+
PartDeltaEvent,
|
|
18
|
+
PartStartEvent,
|
|
19
|
+
RunContext, # noqa: TC002
|
|
20
|
+
TextPart,
|
|
21
|
+
TextPartDelta,
|
|
22
|
+
)
|
|
23
|
+
from upathtools import is_directory
|
|
24
|
+
|
|
25
|
+
from agentpool.agents.context import AgentContext # noqa: TC001
|
|
26
|
+
from agentpool.log import get_logger
|
|
27
|
+
from agentpool.mime_utils import guess_type, is_binary_content, is_binary_mime
|
|
28
|
+
from agentpool.resource_providers import ResourceProvider
|
|
29
|
+
from agentpool_toolsets.builtin.file_edit import replace_content
|
|
30
|
+
from agentpool_toolsets.builtin.file_edit.fuzzy_matcher import StreamingFuzzyMatcher
|
|
31
|
+
from agentpool_toolsets.fsspec_toolset.diagnostics import DiagnosticsManager
|
|
32
|
+
from agentpool_toolsets.fsspec_toolset.grep import GrepBackend
|
|
33
|
+
from agentpool_toolsets.fsspec_toolset.helpers import (
|
|
34
|
+
format_directory_listing,
|
|
35
|
+
get_changed_line_numbers,
|
|
36
|
+
truncate_lines,
|
|
37
|
+
)
|
|
38
|
+
from agentpool_toolsets.fsspec_toolset.streaming_diff_parser import (
|
|
39
|
+
NewTextChunk,
|
|
40
|
+
OldTextChunk,
|
|
41
|
+
StreamingDiffParser,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
if TYPE_CHECKING:
|
|
46
|
+
import fsspec
|
|
47
|
+
from fsspec.asyn import AsyncFileSystem
|
|
48
|
+
from pydantic_ai.messages import ModelResponse
|
|
49
|
+
|
|
50
|
+
from agentpool.agents.base_agent import BaseAgent
|
|
51
|
+
from agentpool.common_types import ModelType
|
|
52
|
+
from agentpool.messaging import MessageHistory
|
|
53
|
+
from agentpool.prompts.conversion_manager import ConversionManager
|
|
54
|
+
from agentpool.repomap import RepoMap
|
|
55
|
+
from agentpool.tools.base import Tool
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
logger = get_logger(__name__)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class FSSpecTools(ResourceProvider):
|
|
62
|
+
"""Provider for fsspec filesystem tools.
|
|
63
|
+
|
|
64
|
+
NOTE: The ACP execution environment used handles the Terminal events of the protocol,
|
|
65
|
+
the toolset should deal with the ToolCall events for UI display purposes.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
source: fsspec.AbstractFileSystem | ExecutionEnvironment | None = None,
|
|
71
|
+
name: str | None = None,
|
|
72
|
+
cwd: str | None = None,
|
|
73
|
+
edit_model: ModelType | None = None,
|
|
74
|
+
converter: ConversionManager | None = None,
|
|
75
|
+
max_file_size_kb: int = 64,
|
|
76
|
+
max_grep_output_kb: int = 64,
|
|
77
|
+
use_subprocess_grep: bool = True,
|
|
78
|
+
enable_diagnostics: bool = False,
|
|
79
|
+
large_file_tokens: int = 12_000,
|
|
80
|
+
map_max_tokens: int = 2048,
|
|
81
|
+
) -> None:
|
|
82
|
+
"""Initialize with an fsspec filesystem or execution environment.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
source: Filesystem or execution environment to operate on.
|
|
86
|
+
If None, falls back to agent.env at runtime.
|
|
87
|
+
name: Name for this toolset provider
|
|
88
|
+
cwd: Optional cwd to resolve relative paths against
|
|
89
|
+
edit_model: Optional edit model for text editing
|
|
90
|
+
converter: Optional conversion manager for markdown conversion
|
|
91
|
+
max_file_size_kb: Maximum file size in KB for read/write operations (default: 64KB)
|
|
92
|
+
max_grep_output_kb: Maximum grep output size in KB (default: 64KB)
|
|
93
|
+
use_subprocess_grep: Use ripgrep/grep subprocess if available (default: True)
|
|
94
|
+
enable_diagnostics: Run LSP CLI diagnostics after file writes (default: False)
|
|
95
|
+
large_file_tokens: Token threshold for switching to structure map (default: 12000)
|
|
96
|
+
map_max_tokens: Maximum tokens for structure map output (default: 2048)
|
|
97
|
+
"""
|
|
98
|
+
from fsspec.asyn import AsyncFileSystem
|
|
99
|
+
from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
|
|
100
|
+
|
|
101
|
+
if source is None:
|
|
102
|
+
self._fs: AsyncFileSystem | None = None
|
|
103
|
+
self.execution_env: ExecutionEnvironment | None = None
|
|
104
|
+
elif isinstance(source, ExecutionEnvironment):
|
|
105
|
+
self.execution_env = source
|
|
106
|
+
fs = source.get_fs()
|
|
107
|
+
self._fs = fs if isinstance(fs, AsyncFileSystem) else AsyncFileSystemWrapper(fs)
|
|
108
|
+
else:
|
|
109
|
+
self.execution_env = None
|
|
110
|
+
self._fs = (
|
|
111
|
+
source if isinstance(source, AsyncFileSystem) else AsyncFileSystemWrapper(source)
|
|
112
|
+
)
|
|
113
|
+
super().__init__(name=name or f"file_access_{self._fs.protocol if self._fs else 'default'}")
|
|
114
|
+
self.edit_model = edit_model
|
|
115
|
+
self.cwd = cwd
|
|
116
|
+
self.converter = converter
|
|
117
|
+
self.max_file_size = max_file_size_kb * 1024 # Convert KB to bytes
|
|
118
|
+
self.max_grep_output = max_grep_output_kb * 1024 # Convert KB to bytes
|
|
119
|
+
self.use_subprocess_grep = use_subprocess_grep
|
|
120
|
+
self._tools: list[Tool] | None = None
|
|
121
|
+
self._grep_backend: GrepBackend | None = None
|
|
122
|
+
self._enable_diagnostics = enable_diagnostics
|
|
123
|
+
self._diagnostics: DiagnosticsManager | None = None
|
|
124
|
+
self._large_file_tokens = large_file_tokens
|
|
125
|
+
self._map_max_tokens = map_max_tokens
|
|
126
|
+
self._repomap: RepoMap | None = None
|
|
127
|
+
|
|
128
|
+
def get_fs(self, agent_ctx: AgentContext) -> AsyncFileSystem:
|
|
129
|
+
"""Get filesystem, falling back to agent's env if not set.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
agent_ctx: Agent context to get fallback env from
|
|
133
|
+
"""
|
|
134
|
+
from fsspec.asyn import AsyncFileSystem
|
|
135
|
+
from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
|
|
136
|
+
|
|
137
|
+
if self._fs is not None:
|
|
138
|
+
return self._fs
|
|
139
|
+
fs = agent_ctx.agent.env.get_fs()
|
|
140
|
+
return fs if isinstance(fs, AsyncFileSystem) else AsyncFileSystemWrapper(fs)
|
|
141
|
+
|
|
142
|
+
def _get_diagnostics_manager(self, agent_ctx: AgentContext) -> DiagnosticsManager:
|
|
143
|
+
"""Get or create the diagnostics manager."""
|
|
144
|
+
if self._diagnostics is None:
|
|
145
|
+
env = self.execution_env or agent_ctx.agent.env
|
|
146
|
+
self._diagnostics = DiagnosticsManager(env if self._enable_diagnostics else None)
|
|
147
|
+
return self._diagnostics
|
|
148
|
+
|
|
149
|
+
async def _run_diagnostics(self, agent_ctx: AgentContext, path: str) -> str | None:
|
|
150
|
+
"""Run diagnostics on a file if enabled.
|
|
151
|
+
|
|
152
|
+
Returns formatted diagnostics string if issues found, None otherwise.
|
|
153
|
+
"""
|
|
154
|
+
if not self._enable_diagnostics:
|
|
155
|
+
return None
|
|
156
|
+
mgr = self._get_diagnostics_manager(agent_ctx)
|
|
157
|
+
diagnostics = await mgr.run_for_file(path)
|
|
158
|
+
if diagnostics:
|
|
159
|
+
return mgr.format_diagnostics(diagnostics)
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
async def _get_file_map(self, path: str, agent_ctx: AgentContext) -> str | None:
|
|
163
|
+
"""Get structure map for a large file if language is supported.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
path: Absolute file path
|
|
167
|
+
agent_ctx: Agent context for filesystem access
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Structure map string or None if language not supported
|
|
171
|
+
"""
|
|
172
|
+
from agentpool.repomap import RepoMap, is_language_supported
|
|
173
|
+
|
|
174
|
+
if not is_language_supported(path):
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
# Lazy init repomap - use file's directory as root
|
|
178
|
+
if self._repomap is None:
|
|
179
|
+
root = str(Path(path).parent)
|
|
180
|
+
fs = self.get_fs(agent_ctx)
|
|
181
|
+
self._repomap = RepoMap(fs, root, max_tokens=self._map_max_tokens)
|
|
182
|
+
|
|
183
|
+
return await self._repomap.get_file_map(path, max_tokens=self._map_max_tokens)
|
|
184
|
+
|
|
185
|
+
def _resolve_path(self, path: str, agent_ctx: AgentContext) -> str:
|
|
186
|
+
"""Resolve a potentially relative path to an absolute path.
|
|
187
|
+
|
|
188
|
+
Gets cwd from self.cwd, execution_env.cwd, or agent.env.cwd.
|
|
189
|
+
If cwd is set and path is relative, resolves relative to cwd.
|
|
190
|
+
Otherwise returns the path as-is.
|
|
191
|
+
"""
|
|
192
|
+
# Get cwd: explicit toolset cwd > execution_env.cwd > agent.env.cwd
|
|
193
|
+
cwd: str | None = None
|
|
194
|
+
if self.cwd:
|
|
195
|
+
cwd = self.cwd
|
|
196
|
+
elif self.execution_env and self.execution_env.cwd:
|
|
197
|
+
cwd = self.execution_env.cwd
|
|
198
|
+
elif agent_ctx.agent.env and agent_ctx.agent.env.cwd:
|
|
199
|
+
cwd = agent_ctx.agent.env.cwd
|
|
200
|
+
|
|
201
|
+
if cwd and not (path.startswith("/") or (len(path) > 1 and path[1] == ":")):
|
|
202
|
+
return str(Path(cwd) / path)
|
|
203
|
+
return path
|
|
204
|
+
|
|
205
|
+
async def get_tools(self) -> list[Tool]:
|
|
206
|
+
"""Get filesystem tools."""
|
|
207
|
+
if self._tools is not None:
|
|
208
|
+
return self._tools
|
|
209
|
+
|
|
210
|
+
self._tools = [
|
|
211
|
+
self.create_tool(self.list_directory, category="read", read_only=True, idempotent=True),
|
|
212
|
+
self.create_tool(self.read_file, category="read", read_only=True, idempotent=True),
|
|
213
|
+
self.create_tool(self.grep, category="search", read_only=True, idempotent=True),
|
|
214
|
+
self.create_tool(self.write_file, category="edit"),
|
|
215
|
+
self.create_tool(self.delete_path, category="delete", destructive=True),
|
|
216
|
+
self.create_tool(self.edit_file, category="edit"),
|
|
217
|
+
self.create_tool(self.agentic_edit, category="edit"),
|
|
218
|
+
self.create_tool(self.download_file, category="read", open_world=True),
|
|
219
|
+
]
|
|
220
|
+
|
|
221
|
+
if self.converter: # Only add read_as_markdown if converter is available
|
|
222
|
+
self._tools.append(
|
|
223
|
+
self.create_tool(
|
|
224
|
+
self.read_as_markdown,
|
|
225
|
+
category="read",
|
|
226
|
+
read_only=True,
|
|
227
|
+
idempotent=True,
|
|
228
|
+
)
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
return self._tools
|
|
232
|
+
|
|
233
|
+
async def list_directory( # noqa: D417
|
|
234
|
+
self,
|
|
235
|
+
agent_ctx: AgentContext,
|
|
236
|
+
path: str,
|
|
237
|
+
*,
|
|
238
|
+
pattern: str = "*",
|
|
239
|
+
exclude: list[str] | None = None,
|
|
240
|
+
max_depth: int = 1,
|
|
241
|
+
) -> str:
|
|
242
|
+
"""List files in a directory with filtering support.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
path: Base directory to list
|
|
246
|
+
pattern: Glob pattern to match files against. Use "*.py" to match Python
|
|
247
|
+
files in current directory only, or "**/*.py" to match recursively.
|
|
248
|
+
The max_depth parameter limits how deep "**" patterns search.
|
|
249
|
+
exclude: List of patterns to exclude (uses fnmatch against relative paths)
|
|
250
|
+
max_depth: Maximum directory depth to search (default: 1 = current dir only).
|
|
251
|
+
Only affects recursive "**" patterns.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Markdown-formatted directory listing
|
|
255
|
+
"""
|
|
256
|
+
path = self._resolve_path(path, agent_ctx)
|
|
257
|
+
msg = f"Listing directory: {path}"
|
|
258
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="read", locations=[path])
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
fs = self.get_fs(agent_ctx)
|
|
262
|
+
# Check if path exists
|
|
263
|
+
if not await fs._exists(path):
|
|
264
|
+
error_msg = f"Path does not exist: {path}"
|
|
265
|
+
await agent_ctx.events.file_operation(
|
|
266
|
+
"list", path=path, success=False, error=error_msg
|
|
267
|
+
)
|
|
268
|
+
return f"Error: {error_msg}"
|
|
269
|
+
|
|
270
|
+
# Build glob path
|
|
271
|
+
glob_pattern = f"{path.rstrip('/')}/{pattern}"
|
|
272
|
+
paths = await fs._glob(glob_pattern, maxdepth=max_depth, detail=True)
|
|
273
|
+
|
|
274
|
+
files: list[dict[str, Any]] = []
|
|
275
|
+
dirs: list[dict[str, Any]] = []
|
|
276
|
+
|
|
277
|
+
# Safety check - prevent returning too many items
|
|
278
|
+
total_found = len(paths)
|
|
279
|
+
if total_found > 500: # noqa: PLR2004
|
|
280
|
+
suggestions = []
|
|
281
|
+
if pattern == "*":
|
|
282
|
+
suggestions.append("Use a more specific pattern like '*.py', '*.txt', etc.")
|
|
283
|
+
if max_depth > 1:
|
|
284
|
+
suggestions.append(f"Reduce max_depth from {max_depth} to 1 or 2.")
|
|
285
|
+
if not exclude:
|
|
286
|
+
suggestions.append("Use exclude parameter to filter out unwanted directories.")
|
|
287
|
+
|
|
288
|
+
suggestion_text = " ".join(suggestions) if suggestions else ""
|
|
289
|
+
return f"Error: Too many items ({total_found:,}). {suggestion_text}"
|
|
290
|
+
|
|
291
|
+
for file_path, file_info in paths.items(): # pyright: ignore[reportAttributeAccessIssue]
|
|
292
|
+
rel_path = os.path.relpath(str(file_path), path)
|
|
293
|
+
|
|
294
|
+
# Skip excluded patterns
|
|
295
|
+
if exclude and any(fnmatch(rel_path, pat) for pat in exclude):
|
|
296
|
+
continue
|
|
297
|
+
|
|
298
|
+
# Use type from glob detail info, falling back to isdir only if needed
|
|
299
|
+
is_dir = await is_directory(fs, file_path, entry_type=file_info.get("type")) # pyright: ignore[reportArgumentType]
|
|
300
|
+
|
|
301
|
+
item_info = {
|
|
302
|
+
"name": Path(file_path).name, # pyright: ignore[reportArgumentType]
|
|
303
|
+
"path": file_path,
|
|
304
|
+
"relative_path": rel_path,
|
|
305
|
+
"size": file_info.get("size", 0),
|
|
306
|
+
"type": "directory" if is_dir else "file",
|
|
307
|
+
}
|
|
308
|
+
if "mtime" in file_info:
|
|
309
|
+
item_info["modified"] = file_info["mtime"]
|
|
310
|
+
|
|
311
|
+
if is_dir:
|
|
312
|
+
dirs.append(item_info)
|
|
313
|
+
else:
|
|
314
|
+
files.append(item_info)
|
|
315
|
+
|
|
316
|
+
await agent_ctx.events.file_operation("list", path=path, success=True)
|
|
317
|
+
result = format_directory_listing(path, dirs, files, pattern)
|
|
318
|
+
# Emit formatted content for UI display
|
|
319
|
+
from agentpool.agents.events import TextContentItem
|
|
320
|
+
|
|
321
|
+
await agent_ctx.events.tool_call_progress(
|
|
322
|
+
title=f"Listed: {path}",
|
|
323
|
+
items=[TextContentItem(text=result)],
|
|
324
|
+
replace_content=True,
|
|
325
|
+
)
|
|
326
|
+
except (OSError, ValueError, FileNotFoundError) as e:
|
|
327
|
+
await agent_ctx.events.file_operation("list", path=path, success=False, error=str(e))
|
|
328
|
+
return f"Error: Could not list directory: {path}. Ensure path is absolute and exists."
|
|
329
|
+
else:
|
|
330
|
+
return result
|
|
331
|
+
|
|
332
|
+
async def read_file( # noqa: D417
|
|
333
|
+
self,
|
|
334
|
+
agent_ctx: AgentContext,
|
|
335
|
+
path: str,
|
|
336
|
+
encoding: str = "utf-8",
|
|
337
|
+
line: int | None = None,
|
|
338
|
+
limit: int | None = None,
|
|
339
|
+
) -> str | BinaryContent:
|
|
340
|
+
"""Read the context of a text file, or use vision capabilites to read images or documents.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
path: File path to read
|
|
344
|
+
encoding: Text encoding to use for text files (default: utf-8)
|
|
345
|
+
line: Optional line number to start reading from (1-based, text files only)
|
|
346
|
+
limit: Optional maximum number of lines to read (text files only)
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
Text content for text files, BinaryContent for binary files, or dict with error
|
|
350
|
+
"""
|
|
351
|
+
path = self._resolve_path(path, agent_ctx)
|
|
352
|
+
msg = f"Reading file: {path}"
|
|
353
|
+
from agentpool.agents.events import LocationContentItem
|
|
354
|
+
|
|
355
|
+
await agent_ctx.events.tool_call_progress(
|
|
356
|
+
title=msg,
|
|
357
|
+
items=[LocationContentItem(path=path)],
|
|
358
|
+
)
|
|
359
|
+
try:
|
|
360
|
+
mime_type = guess_type(path)
|
|
361
|
+
# Fast path: known binary MIME types (images, audio, video, etc.)
|
|
362
|
+
if is_binary_mime(mime_type):
|
|
363
|
+
data = await self.get_fs(agent_ctx)._cat_file(path)
|
|
364
|
+
await agent_ctx.events.file_operation("read", path=path, success=True)
|
|
365
|
+
mime = mime_type or "application/octet-stream"
|
|
366
|
+
return BinaryContent(data=data, media_type=mime, identifier=path)
|
|
367
|
+
# Read content and probe for binary (git-style null byte detection)
|
|
368
|
+
data = await self.get_fs(agent_ctx)._cat_file(path)
|
|
369
|
+
if is_binary_content(data):
|
|
370
|
+
# Binary file - return as BinaryContent for native model handling
|
|
371
|
+
await agent_ctx.events.file_operation("read", path=path, success=True)
|
|
372
|
+
mime = mime_type or "application/octet-stream"
|
|
373
|
+
return BinaryContent(data=data, media_type=mime, identifier=path)
|
|
374
|
+
content = data.decode(encoding)
|
|
375
|
+
|
|
376
|
+
# Check if file is too large and no targeted read requested
|
|
377
|
+
tokens_approx = len(content) // 4
|
|
378
|
+
if line is None and limit is None and tokens_approx > self._large_file_tokens:
|
|
379
|
+
# Try structure map for supported languages
|
|
380
|
+
map_result = await self._get_file_map(path, agent_ctx)
|
|
381
|
+
if map_result:
|
|
382
|
+
await agent_ctx.events.file_operation("read", path=path, success=True)
|
|
383
|
+
content = map_result
|
|
384
|
+
else:
|
|
385
|
+
# Fallback: head + tail for unsupported languages
|
|
386
|
+
from agentpool.repomap import truncate_with_notice
|
|
387
|
+
|
|
388
|
+
content = truncate_with_notice(path, content)
|
|
389
|
+
await agent_ctx.events.file_operation("read", path=path, success=True)
|
|
390
|
+
else:
|
|
391
|
+
# Normal read with optional offset/limit
|
|
392
|
+
lines = content.splitlines()
|
|
393
|
+
offset = (line - 1) if line else 0
|
|
394
|
+
result_lines, was_truncated = truncate_lines(
|
|
395
|
+
lines, offset, limit, self.max_file_size
|
|
396
|
+
)
|
|
397
|
+
content = "\n".join(result_lines)
|
|
398
|
+
await agent_ctx.events.file_operation("read", path=path, success=True)
|
|
399
|
+
if was_truncated:
|
|
400
|
+
content += f"\n\n[Content truncated at {self.max_file_size} bytes]"
|
|
401
|
+
|
|
402
|
+
except Exception as e: # noqa: BLE001
|
|
403
|
+
await agent_ctx.events.file_operation("read", path=path, success=False, error=str(e))
|
|
404
|
+
return f"error: Failed to read file {path}: {e}"
|
|
405
|
+
else:
|
|
406
|
+
# Emit file content for UI display (formatted at ACP layer)
|
|
407
|
+
from agentpool.agents.events import FileContentItem
|
|
408
|
+
|
|
409
|
+
await agent_ctx.events.tool_call_progress(
|
|
410
|
+
title=f"Read: {path}",
|
|
411
|
+
items=[FileContentItem(content=content, path=path)],
|
|
412
|
+
replace_content=True,
|
|
413
|
+
)
|
|
414
|
+
# Return raw content for agent
|
|
415
|
+
return content
|
|
416
|
+
|
|
417
|
+
async def read_as_markdown(self, agent_ctx: AgentContext, path: str) -> str | dict[str, Any]: # noqa: D417
|
|
418
|
+
"""Read file and convert to markdown text representation.
|
|
419
|
+
|
|
420
|
+
Args:
|
|
421
|
+
path: Path to read
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
File content converted to markdown
|
|
425
|
+
"""
|
|
426
|
+
assert self.converter is not None, "Converter required for read_as_markdown"
|
|
427
|
+
|
|
428
|
+
path = self._resolve_path(path, agent_ctx)
|
|
429
|
+
msg = f"Reading file as markdown: {path}"
|
|
430
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="read", locations=[path])
|
|
431
|
+
try:
|
|
432
|
+
content = await self.converter.convert_file(path)
|
|
433
|
+
await agent_ctx.events.file_operation("read", path=path, success=True)
|
|
434
|
+
# Emit formatted content for UI display
|
|
435
|
+
from agentpool.agents.events import TextContentItem
|
|
436
|
+
|
|
437
|
+
await agent_ctx.events.tool_call_progress(
|
|
438
|
+
title=f"Read as markdown: {path}",
|
|
439
|
+
items=[TextContentItem(text=content)],
|
|
440
|
+
replace_content=True,
|
|
441
|
+
)
|
|
442
|
+
except Exception as e: # noqa: BLE001
|
|
443
|
+
await agent_ctx.events.file_operation("read", path=path, success=False, error=str(e))
|
|
444
|
+
return f"Error: Failed to convert file {path}: {e}"
|
|
445
|
+
else:
|
|
446
|
+
return content
|
|
447
|
+
|
|
448
|
+
async def write_file( # noqa: D417
|
|
449
|
+
self,
|
|
450
|
+
agent_ctx: AgentContext,
|
|
451
|
+
path: str,
|
|
452
|
+
content: str,
|
|
453
|
+
mode: str = "w",
|
|
454
|
+
overwrite: bool = False,
|
|
455
|
+
) -> dict[str, Any]:
|
|
456
|
+
"""Write content to a file.
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
path: File path to write
|
|
460
|
+
content: Content to write
|
|
461
|
+
mode: Write mode ('w' for overwrite, 'a' for append)
|
|
462
|
+
overwrite: Must be True to overwrite existing files (safety check)
|
|
463
|
+
|
|
464
|
+
Returns:
|
|
465
|
+
Dictionary with success info or error details
|
|
466
|
+
"""
|
|
467
|
+
path = self._resolve_path(path, agent_ctx)
|
|
468
|
+
msg = f"Writing file: {path}"
|
|
469
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="edit", locations=[path])
|
|
470
|
+
|
|
471
|
+
content_bytes = len(content.encode("utf-8"))
|
|
472
|
+
|
|
473
|
+
try:
|
|
474
|
+
if mode not in ("w", "a"):
|
|
475
|
+
msg = f"Invalid mode '{mode}'. Use 'w' (write) or 'a' (append)"
|
|
476
|
+
await agent_ctx.events.file_operation("write", path=path, success=False, error=msg)
|
|
477
|
+
return {"error": msg}
|
|
478
|
+
|
|
479
|
+
# Check size limit
|
|
480
|
+
if content_bytes > self.max_file_size:
|
|
481
|
+
msg = (
|
|
482
|
+
f"Content size ({content_bytes} bytes) exceeds maximum "
|
|
483
|
+
f"({self.max_file_size} bytes)"
|
|
484
|
+
)
|
|
485
|
+
await agent_ctx.events.file_operation("write", path=path, success=False, error=msg)
|
|
486
|
+
return {"error": msg}
|
|
487
|
+
|
|
488
|
+
# Check if file exists and overwrite protection
|
|
489
|
+
fs = self.get_fs(agent_ctx)
|
|
490
|
+
file_exists = await fs._exists(path)
|
|
491
|
+
|
|
492
|
+
if file_exists and mode == "w" and not overwrite:
|
|
493
|
+
msg = (
|
|
494
|
+
f"File '{path}' already exists. To overwrite it, you must set overwrite=True. "
|
|
495
|
+
f"This is a safety measure to prevent accidental data loss."
|
|
496
|
+
)
|
|
497
|
+
await agent_ctx.events.file_operation("write", path=path, success=False, error=msg)
|
|
498
|
+
return {"error": msg}
|
|
499
|
+
|
|
500
|
+
# Handle append mode: read existing content and prepend it
|
|
501
|
+
if mode == "a" and file_exists:
|
|
502
|
+
try:
|
|
503
|
+
existing_content = await self._read(agent_ctx, path)
|
|
504
|
+
if isinstance(existing_content, bytes):
|
|
505
|
+
existing_content = existing_content.decode("utf-8")
|
|
506
|
+
content = existing_content + content
|
|
507
|
+
except Exception: # noqa: BLE001
|
|
508
|
+
pass # If we can't read, just write new content
|
|
509
|
+
|
|
510
|
+
await self._write(agent_ctx, path, content)
|
|
511
|
+
|
|
512
|
+
try:
|
|
513
|
+
info = await fs._info(path)
|
|
514
|
+
size = info.get("size", content_bytes)
|
|
515
|
+
except (OSError, KeyError):
|
|
516
|
+
size = content_bytes
|
|
517
|
+
|
|
518
|
+
result: dict[str, Any] = {
|
|
519
|
+
"path": path,
|
|
520
|
+
"size": size,
|
|
521
|
+
"mode": mode,
|
|
522
|
+
"file_existed": file_exists,
|
|
523
|
+
"bytes_written": content_bytes,
|
|
524
|
+
}
|
|
525
|
+
await agent_ctx.events.file_operation("write", path=path, success=True)
|
|
526
|
+
|
|
527
|
+
# Run diagnostics if enabled
|
|
528
|
+
if diagnostics_output := await self._run_diagnostics(agent_ctx, path):
|
|
529
|
+
result["diagnostics"] = diagnostics_output
|
|
530
|
+
except Exception as e: # noqa: BLE001
|
|
531
|
+
await agent_ctx.events.file_operation("write", path=path, success=False, error=str(e))
|
|
532
|
+
return {"error": f"Failed to write file {path}: {e}"}
|
|
533
|
+
else:
|
|
534
|
+
return result
|
|
535
|
+
|
|
536
|
+
async def delete_path( # noqa: D417
|
|
537
|
+
self, agent_ctx: AgentContext, path: str, recursive: bool = False
|
|
538
|
+
) -> dict[str, Any]:
|
|
539
|
+
"""Delete a file or directory.
|
|
540
|
+
|
|
541
|
+
Args:
|
|
542
|
+
path: Path to delete
|
|
543
|
+
recursive: Whether to delete directories recursively
|
|
544
|
+
|
|
545
|
+
Returns:
|
|
546
|
+
Dictionary with operation result
|
|
547
|
+
"""
|
|
548
|
+
path = self._resolve_path(path, agent_ctx)
|
|
549
|
+
msg = f"Deleting path: {path}"
|
|
550
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="delete", locations=[path])
|
|
551
|
+
try:
|
|
552
|
+
# Check if path exists and get its type
|
|
553
|
+
fs = self.get_fs(agent_ctx)
|
|
554
|
+
try:
|
|
555
|
+
info = await fs._info(path)
|
|
556
|
+
path_type = info.get("type", "unknown")
|
|
557
|
+
except FileNotFoundError:
|
|
558
|
+
msg = f"Path does not exist: {path}"
|
|
559
|
+
await agent_ctx.events.file_operation("delete", path=path, success=False, error=msg)
|
|
560
|
+
return {"error": msg}
|
|
561
|
+
except (OSError, ValueError) as e:
|
|
562
|
+
msg = f"Could not check path {path}: {e}"
|
|
563
|
+
await agent_ctx.events.file_operation("delete", path=path, success=False, error=msg)
|
|
564
|
+
return {"error": msg}
|
|
565
|
+
|
|
566
|
+
if path_type == "directory":
|
|
567
|
+
if not recursive:
|
|
568
|
+
try:
|
|
569
|
+
contents = await fs._ls(path)
|
|
570
|
+
if contents: # Check if directory is empty
|
|
571
|
+
error_msg = (
|
|
572
|
+
f"Directory {path} is not empty. "
|
|
573
|
+
f"Use recursive=True to delete non-empty directories"
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
# Emit failure event
|
|
577
|
+
await agent_ctx.events.file_operation(
|
|
578
|
+
"delete", path=path, success=False, error=error_msg
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
return {"error": error_msg}
|
|
582
|
+
except (OSError, ValueError):
|
|
583
|
+
pass # Continue with deletion attempt
|
|
584
|
+
|
|
585
|
+
await fs._rm(path, recursive=recursive)
|
|
586
|
+
else: # It's a file
|
|
587
|
+
await fs._rm(path) # or _rm_file?
|
|
588
|
+
|
|
589
|
+
except Exception as e: # noqa: BLE001
|
|
590
|
+
await agent_ctx.events.file_operation("delete", path=path, success=False, error=str(e))
|
|
591
|
+
return {"error": f"Failed to delete {path}: {e}"}
|
|
592
|
+
else:
|
|
593
|
+
result = {
|
|
594
|
+
"path": path,
|
|
595
|
+
"deleted": True,
|
|
596
|
+
"type": path_type,
|
|
597
|
+
"recursive": recursive,
|
|
598
|
+
}
|
|
599
|
+
await agent_ctx.events.file_operation("delete", path=path, success=True)
|
|
600
|
+
return result
|
|
601
|
+
|
|
602
|
+
async def edit_file( # noqa: D417
|
|
603
|
+
self,
|
|
604
|
+
agent_ctx: AgentContext,
|
|
605
|
+
path: str,
|
|
606
|
+
old_string: str,
|
|
607
|
+
new_string: str,
|
|
608
|
+
description: str,
|
|
609
|
+
replace_all: bool = False,
|
|
610
|
+
) -> str:
|
|
611
|
+
r"""Edit a file by replacing specific content with smart matching.
|
|
612
|
+
|
|
613
|
+
Uses sophisticated matching strategies to handle whitespace, indentation,
|
|
614
|
+
and other variations. Shows the changes as a diff in the UI.
|
|
615
|
+
|
|
616
|
+
Args:
|
|
617
|
+
path: File path (absolute or relative to session cwd)
|
|
618
|
+
old_string: Text content to find and replace
|
|
619
|
+
new_string: Text content to replace it with
|
|
620
|
+
description: Human-readable description of what the edit accomplishes
|
|
621
|
+
replace_all: Whether to replace all occurrences (default: False)
|
|
622
|
+
|
|
623
|
+
Returns:
|
|
624
|
+
Success message with edit summary
|
|
625
|
+
"""
|
|
626
|
+
path = self._resolve_path(path, agent_ctx)
|
|
627
|
+
msg = f"Editing file: {path}"
|
|
628
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="edit", locations=[path])
|
|
629
|
+
if old_string == new_string:
|
|
630
|
+
return "Error: old_string and new_string must be different"
|
|
631
|
+
|
|
632
|
+
# Send initial pending notification
|
|
633
|
+
await agent_ctx.events.file_operation("edit", path=path, success=True)
|
|
634
|
+
|
|
635
|
+
try: # Read current file content
|
|
636
|
+
original_content = await self._read(agent_ctx, path)
|
|
637
|
+
if isinstance(original_content, bytes):
|
|
638
|
+
original_content = original_content.decode("utf-8")
|
|
639
|
+
|
|
640
|
+
try: # Apply smart content replacement
|
|
641
|
+
new_content = replace_content(original_content, old_string, new_string, replace_all)
|
|
642
|
+
except ValueError as e:
|
|
643
|
+
error_msg = f"Edit failed: {e}"
|
|
644
|
+
await agent_ctx.events.file_operation(
|
|
645
|
+
"edit", path=path, success=False, error=error_msg
|
|
646
|
+
)
|
|
647
|
+
return error_msg
|
|
648
|
+
|
|
649
|
+
await self._write(agent_ctx, path, new_content)
|
|
650
|
+
success_msg = f"Successfully edited {Path(path).name}: {description}"
|
|
651
|
+
changed_line_numbers = get_changed_line_numbers(original_content, new_content)
|
|
652
|
+
if lines_changed := len(changed_line_numbers):
|
|
653
|
+
success_msg += f" ({lines_changed} lines changed)"
|
|
654
|
+
|
|
655
|
+
await agent_ctx.events.file_edit_progress(
|
|
656
|
+
path=path,
|
|
657
|
+
old_text=original_content,
|
|
658
|
+
new_text=new_content,
|
|
659
|
+
status="completed",
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
# Run diagnostics if enabled
|
|
663
|
+
if diagnostics_output := await self._run_diagnostics(agent_ctx, path):
|
|
664
|
+
success_msg += f"\n\nDiagnostics:\n{diagnostics_output}"
|
|
665
|
+
except Exception as e: # noqa: BLE001
|
|
666
|
+
error_msg = f"Error editing file: {e}"
|
|
667
|
+
await agent_ctx.events.file_operation("edit", path=path, success=False, error=error_msg)
|
|
668
|
+
return error_msg
|
|
669
|
+
else:
|
|
670
|
+
return success_msg
|
|
671
|
+
|
|
672
|
+
async def grep( # noqa: D417
|
|
673
|
+
self,
|
|
674
|
+
agent_ctx: AgentContext,
|
|
675
|
+
pattern: str,
|
|
676
|
+
path: str,
|
|
677
|
+
*,
|
|
678
|
+
file_pattern: str = "**/*",
|
|
679
|
+
case_sensitive: bool = False,
|
|
680
|
+
max_matches: int = 100,
|
|
681
|
+
context_lines: int = 0,
|
|
682
|
+
) -> str:
|
|
683
|
+
"""Search file contents for a pattern.
|
|
684
|
+
|
|
685
|
+
Args:
|
|
686
|
+
pattern: Regex pattern to search for
|
|
687
|
+
path: Base directory to search in
|
|
688
|
+
file_pattern: Glob pattern to filter files (e.g. "**/*.py")
|
|
689
|
+
case_sensitive: Whether search is case-sensitive
|
|
690
|
+
max_matches: Maximum number of matches to return
|
|
691
|
+
context_lines: Number of context lines before/after match
|
|
692
|
+
|
|
693
|
+
Returns:
|
|
694
|
+
Grep results as formatted text
|
|
695
|
+
"""
|
|
696
|
+
from agentpool_toolsets.fsspec_toolset.grep import (
|
|
697
|
+
DEFAULT_EXCLUDE_PATTERNS,
|
|
698
|
+
detect_grep_backend,
|
|
699
|
+
grep_with_fsspec,
|
|
700
|
+
grep_with_subprocess,
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
resolved_path = self._resolve_path(path, agent_ctx)
|
|
704
|
+
msg = f"Searching for {pattern!r} in {resolved_path}"
|
|
705
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="search", locations=[resolved_path])
|
|
706
|
+
|
|
707
|
+
result: dict[str, Any] | None = None
|
|
708
|
+
try:
|
|
709
|
+
# Try subprocess grep if configured and available
|
|
710
|
+
if self.use_subprocess_grep:
|
|
711
|
+
# Get execution environment for running grep command
|
|
712
|
+
env = self.execution_env or agent_ctx.agent.env
|
|
713
|
+
if env is not None:
|
|
714
|
+
# Detect and cache grep backend
|
|
715
|
+
if self._grep_backend is None:
|
|
716
|
+
self._grep_backend = await detect_grep_backend(env)
|
|
717
|
+
# Only use subprocess if we have a real grep backend
|
|
718
|
+
if self._grep_backend != GrepBackend.PYTHON:
|
|
719
|
+
result = await grep_with_subprocess(
|
|
720
|
+
env=env,
|
|
721
|
+
pattern=pattern,
|
|
722
|
+
path=resolved_path,
|
|
723
|
+
backend=self._grep_backend,
|
|
724
|
+
case_sensitive=case_sensitive,
|
|
725
|
+
max_matches=max_matches,
|
|
726
|
+
max_output_bytes=self.max_grep_output,
|
|
727
|
+
exclude_patterns=DEFAULT_EXCLUDE_PATTERNS,
|
|
728
|
+
use_gitignore=True,
|
|
729
|
+
context_lines=context_lines,
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# Fallback to fsspec grep if subprocess didn't work
|
|
733
|
+
if result is None or "error" in result:
|
|
734
|
+
fs = self.get_fs(agent_ctx)
|
|
735
|
+
result = await grep_with_fsspec(
|
|
736
|
+
fs=fs,
|
|
737
|
+
pattern=pattern,
|
|
738
|
+
path=resolved_path,
|
|
739
|
+
file_pattern=file_pattern,
|
|
740
|
+
case_sensitive=case_sensitive,
|
|
741
|
+
max_matches=max_matches,
|
|
742
|
+
max_output_bytes=self.max_grep_output,
|
|
743
|
+
context_lines=context_lines,
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
if "error" in result:
|
|
747
|
+
return f"Error: {result['error']}"
|
|
748
|
+
|
|
749
|
+
# Format output
|
|
750
|
+
matches = result.get("matches", "")
|
|
751
|
+
match_count = result.get("match_count", 0)
|
|
752
|
+
was_truncated = result.get("was_truncated", False)
|
|
753
|
+
|
|
754
|
+
if not matches:
|
|
755
|
+
output = f"No matches found for pattern '{pattern}'"
|
|
756
|
+
else:
|
|
757
|
+
output = f"Found {match_count} matches:\n\n{matches}"
|
|
758
|
+
if was_truncated:
|
|
759
|
+
output += "\n\n[Results truncated]"
|
|
760
|
+
|
|
761
|
+
# Emit formatted content for UI display
|
|
762
|
+
from agentpool.agents.events import TextContentItem
|
|
763
|
+
|
|
764
|
+
await agent_ctx.events.tool_call_progress(
|
|
765
|
+
title=f"Found {match_count} matches",
|
|
766
|
+
items=[TextContentItem(text=output)],
|
|
767
|
+
replace_content=True,
|
|
768
|
+
)
|
|
769
|
+
except Exception as e: # noqa: BLE001
|
|
770
|
+
return f"Error: Grep failed: {e}"
|
|
771
|
+
else:
|
|
772
|
+
return output
|
|
773
|
+
|
|
774
|
+
async def _read(self, agent_ctx: AgentContext, path: str, encoding: str = "utf-8") -> str:
|
|
775
|
+
# with self.fs.open(path, "r", encoding="utf-8") as f:
|
|
776
|
+
# return f.read()
|
|
777
|
+
return await self.get_fs(agent_ctx)._cat(path) # type: ignore[no-any-return]
|
|
778
|
+
|
|
779
|
+
async def _write(self, agent_ctx: AgentContext, path: str, content: str | bytes) -> None:
|
|
780
|
+
if isinstance(content, str):
|
|
781
|
+
content = content.encode()
|
|
782
|
+
await self.get_fs(agent_ctx)._pipe_file(path, content)
|
|
783
|
+
|
|
784
|
+
async def download_file( # noqa: D417
|
|
785
|
+
self,
|
|
786
|
+
agent_ctx: AgentContext,
|
|
787
|
+
url: str,
|
|
788
|
+
target_dir: str = "downloads",
|
|
789
|
+
chunk_size: int = 8192,
|
|
790
|
+
) -> dict[str, Any]:
|
|
791
|
+
"""Download a file from URL to the toolset's filesystem.
|
|
792
|
+
|
|
793
|
+
Args:
|
|
794
|
+
url: URL to download from
|
|
795
|
+
target_dir: Directory to save the file (relative to cwd if set)
|
|
796
|
+
chunk_size: Size of chunks to download
|
|
797
|
+
|
|
798
|
+
Returns:
|
|
799
|
+
Status information about the download
|
|
800
|
+
"""
|
|
801
|
+
import httpx
|
|
802
|
+
|
|
803
|
+
start_time = time.time()
|
|
804
|
+
|
|
805
|
+
# Resolve target directory
|
|
806
|
+
target_dir = self._resolve_path(target_dir, agent_ctx)
|
|
807
|
+
|
|
808
|
+
msg = f"Downloading: {url}"
|
|
809
|
+
await agent_ctx.events.tool_call_start(title=msg, kind="read", locations=[url])
|
|
810
|
+
|
|
811
|
+
# Extract filename from URL
|
|
812
|
+
filename = Path(urlparse(url).path).name or "downloaded_file"
|
|
813
|
+
full_path = f"{target_dir.rstrip('/')}/{filename}"
|
|
814
|
+
|
|
815
|
+
try:
|
|
816
|
+
fs = self.get_fs(agent_ctx)
|
|
817
|
+
# Ensure target directory exists
|
|
818
|
+
await fs._makedirs(target_dir, exist_ok=True)
|
|
819
|
+
|
|
820
|
+
async with (
|
|
821
|
+
httpx.AsyncClient(verify=False) as client,
|
|
822
|
+
client.stream("GET", url, timeout=30.0) as response,
|
|
823
|
+
):
|
|
824
|
+
response.raise_for_status()
|
|
825
|
+
|
|
826
|
+
total = (
|
|
827
|
+
int(response.headers["Content-Length"])
|
|
828
|
+
if "Content-Length" in response.headers
|
|
829
|
+
else None
|
|
830
|
+
)
|
|
831
|
+
|
|
832
|
+
# Collect all data
|
|
833
|
+
data = bytearray()
|
|
834
|
+
async for chunk in response.aiter_bytes(chunk_size):
|
|
835
|
+
data.extend(chunk)
|
|
836
|
+
size = len(data)
|
|
837
|
+
|
|
838
|
+
if total and (size % (chunk_size * 100) == 0 or size == total):
|
|
839
|
+
progress = size / total * 100
|
|
840
|
+
speed_mbps = (size / 1_048_576) / (time.time() - start_time)
|
|
841
|
+
progress_msg = f"\r{filename}: {progress:.1f}% ({speed_mbps:.1f} MB/s)"
|
|
842
|
+
await agent_ctx.events.progress(progress, 100, progress_msg)
|
|
843
|
+
await anyio.sleep(0)
|
|
844
|
+
|
|
845
|
+
# Write to filesystem
|
|
846
|
+
await self._write(agent_ctx, full_path, bytes(data))
|
|
847
|
+
|
|
848
|
+
duration = time.time() - start_time
|
|
849
|
+
size_mb = len(data) / 1_048_576
|
|
850
|
+
|
|
851
|
+
await agent_ctx.events.file_operation("read", path=full_path, success=True)
|
|
852
|
+
|
|
853
|
+
return {
|
|
854
|
+
"path": full_path,
|
|
855
|
+
"filename": filename,
|
|
856
|
+
"size_bytes": len(data),
|
|
857
|
+
"size_mb": round(size_mb, 2),
|
|
858
|
+
"duration_seconds": round(duration, 2),
|
|
859
|
+
"speed_mbps": round(size_mb / duration, 2) if duration > 0 else 0,
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
except httpx.ConnectError as e:
|
|
863
|
+
error_msg = f"Connection error downloading {url}: {e}"
|
|
864
|
+
await agent_ctx.events.file_operation("read", path=url, success=False, error=error_msg)
|
|
865
|
+
return {"error": error_msg}
|
|
866
|
+
except httpx.TimeoutException:
|
|
867
|
+
error_msg = f"Timeout downloading {url}"
|
|
868
|
+
await agent_ctx.events.file_operation("read", path=url, success=False, error=error_msg)
|
|
869
|
+
return {"error": error_msg}
|
|
870
|
+
except httpx.HTTPStatusError as e:
|
|
871
|
+
error_msg = f"HTTP error {e.response.status_code} downloading {url}"
|
|
872
|
+
await agent_ctx.events.file_operation("read", path=url, success=False, error=error_msg)
|
|
873
|
+
return {"error": error_msg}
|
|
874
|
+
except Exception as e: # noqa: BLE001
|
|
875
|
+
error_msg = f"Error downloading {url}: {e!s}"
|
|
876
|
+
await agent_ctx.events.file_operation("read", path=url, success=False, error=error_msg)
|
|
877
|
+
return {"error": error_msg}
|
|
878
|
+
|
|
879
|
+
async def agentic_edit( # noqa: D417, PLR0915
|
|
880
|
+
self,
|
|
881
|
+
run_ctx: RunContext,
|
|
882
|
+
agent_ctx: AgentContext,
|
|
883
|
+
path: str,
|
|
884
|
+
display_description: str,
|
|
885
|
+
mode: str = "edit",
|
|
886
|
+
matcher: str = "default",
|
|
887
|
+
) -> str:
|
|
888
|
+
r"""Edit or create a file with streaming support.
|
|
889
|
+
|
|
890
|
+
Use this tool for file modifications. Describe what changes you want
|
|
891
|
+
and the tool will apply them progressively as they're generated.
|
|
892
|
+
|
|
893
|
+
Args:
|
|
894
|
+
path: File path (absolute or relative to session cwd)
|
|
895
|
+
display_description: What edits to make - be specific about the changes.
|
|
896
|
+
Examples: "Add error handling to the parse function",
|
|
897
|
+
"Rename the 'foo' variable to 'bar' throughout",
|
|
898
|
+
"Add a docstring to the MyClass class"
|
|
899
|
+
mode: How to modify the file:
|
|
900
|
+
- 'edit': Modify specific parts of existing file (default)
|
|
901
|
+
- 'create': Create a new file (fails if file exists)
|
|
902
|
+
- 'overwrite': Replace entire file content
|
|
903
|
+
matcher: Internal matching algorithm - leave as default unless
|
|
904
|
+
you have a reason to change it.
|
|
905
|
+
|
|
906
|
+
Returns:
|
|
907
|
+
Success message with edit summary
|
|
908
|
+
"""
|
|
909
|
+
from pydantic_ai.messages import CachePoint, ModelRequest
|
|
910
|
+
|
|
911
|
+
from agentpool.messaging import ChatMessage, MessageHistory
|
|
912
|
+
|
|
913
|
+
path = self._resolve_path(path, agent_ctx)
|
|
914
|
+
title = f"AI editing file: {path}"
|
|
915
|
+
await agent_ctx.events.tool_call_start(title=title, kind="edit", locations=[path])
|
|
916
|
+
await agent_ctx.events.file_operation("edit", path=path, success=True)
|
|
917
|
+
|
|
918
|
+
try:
|
|
919
|
+
# Read original content for diff purposes
|
|
920
|
+
if mode == "create":
|
|
921
|
+
original_content = ""
|
|
922
|
+
else:
|
|
923
|
+
original_content = await self._read(agent_ctx, path)
|
|
924
|
+
if isinstance(original_content, bytes):
|
|
925
|
+
original_content = original_content.decode()
|
|
926
|
+
|
|
927
|
+
# Build the edit prompt based on mode
|
|
928
|
+
if mode == "create":
|
|
929
|
+
prompt = _build_create_prompt(path, display_description)
|
|
930
|
+
elif mode == "overwrite":
|
|
931
|
+
prompt = _build_overwrite_prompt(path, display_description, original_content)
|
|
932
|
+
else:
|
|
933
|
+
prompt = _build_edit_prompt(path, display_description, original_content)
|
|
934
|
+
|
|
935
|
+
# Get the current agent and its conversation history
|
|
936
|
+
agent = agent_ctx.native_agent
|
|
937
|
+
|
|
938
|
+
# Create forked message history from current conversation
|
|
939
|
+
# This preserves full context while isolating the edit's messages
|
|
940
|
+
# We need BOTH:
|
|
941
|
+
# 1. Stored history (previous runs) from agent.conversation
|
|
942
|
+
# 2. Current run messages from run_ctx.messages (not yet stored)
|
|
943
|
+
stored_history = agent.conversation.get_history()
|
|
944
|
+
|
|
945
|
+
# Build complete message list
|
|
946
|
+
all_messages: list[ModelRequest | ModelResponse] = []
|
|
947
|
+
|
|
948
|
+
# Add stored history from previous runs
|
|
949
|
+
for chat_msg in stored_history:
|
|
950
|
+
all_messages.extend(chat_msg.to_pydantic_ai())
|
|
951
|
+
|
|
952
|
+
# Add current run's messages (not yet in stored history)
|
|
953
|
+
# But exclude the last message if it contains the current agentic_edit tool call
|
|
954
|
+
# to avoid the sub-agent seeing "I'm calling agentic_edit" in its context
|
|
955
|
+
from pydantic_ai.messages import ModelResponse, ToolCallPart
|
|
956
|
+
|
|
957
|
+
for msg in run_ctx.messages:
|
|
958
|
+
if isinstance(msg, ModelResponse):
|
|
959
|
+
# Filter out the agentic_edit tool call from the last response
|
|
960
|
+
filtered_parts = [
|
|
961
|
+
p
|
|
962
|
+
for p in msg.parts
|
|
963
|
+
if not (isinstance(p, ToolCallPart) and p.tool_name == "agentic_edit")
|
|
964
|
+
]
|
|
965
|
+
if filtered_parts:
|
|
966
|
+
all_messages.append(ModelResponse(parts=filtered_parts))
|
|
967
|
+
else:
|
|
968
|
+
all_messages.append(msg)
|
|
969
|
+
|
|
970
|
+
# Inject CachePoint to cache everything up to this point
|
|
971
|
+
if all_messages:
|
|
972
|
+
cache_request: ModelRequest = ModelRequest(parts=[CachePoint()]) # type: ignore[list-item]
|
|
973
|
+
all_messages.append(cache_request)
|
|
974
|
+
|
|
975
|
+
# Wrap in a single ChatMessage for the forked history
|
|
976
|
+
fork_history = MessageHistory(
|
|
977
|
+
messages=[ChatMessage(messages=all_messages, role="user", content="")]
|
|
978
|
+
)
|
|
979
|
+
else:
|
|
980
|
+
fork_history = MessageHistory()
|
|
981
|
+
|
|
982
|
+
# Stream the edit using the same agent but with forked history
|
|
983
|
+
if mode == "edit" and matcher == "zed":
|
|
984
|
+
# TRUE STREAMING with Zed-style DP fuzzy matcher
|
|
985
|
+
new_content = await self._stream_edit_with_matcher(
|
|
986
|
+
agent, prompt, fork_history, original_content, path, agent_ctx
|
|
987
|
+
)
|
|
988
|
+
elif mode == "edit":
|
|
989
|
+
# TRUE STREAMING with our 9-strategy replace_content (default)
|
|
990
|
+
new_content = await self._stream_edit_with_replace(
|
|
991
|
+
agent, prompt, fork_history, original_content, path, agent_ctx
|
|
992
|
+
)
|
|
993
|
+
else:
|
|
994
|
+
# CREATE/OVERWRITE: Stream raw content directly
|
|
995
|
+
new_content = await self._stream_raw_content(
|
|
996
|
+
agent, prompt, fork_history, original_content, path, agent_ctx
|
|
997
|
+
)
|
|
998
|
+
|
|
999
|
+
# Write the new content to file
|
|
1000
|
+
await self._write(agent_ctx, path, new_content)
|
|
1001
|
+
|
|
1002
|
+
# Build success message
|
|
1003
|
+
original_lines = len(original_content.splitlines()) if original_content else 0
|
|
1004
|
+
new_lines = len(new_content.splitlines())
|
|
1005
|
+
|
|
1006
|
+
if mode == "create":
|
|
1007
|
+
success_msg = f"Successfully created {Path(path).name} ({new_lines} lines)"
|
|
1008
|
+
else:
|
|
1009
|
+
success_msg = f"Successfully edited {Path(path).name} using AI agent"
|
|
1010
|
+
success_msg += f" ({original_lines} → {new_lines} lines)"
|
|
1011
|
+
|
|
1012
|
+
# Send final completion update
|
|
1013
|
+
await agent_ctx.events.file_edit_progress(
|
|
1014
|
+
path=path,
|
|
1015
|
+
old_text=original_content,
|
|
1016
|
+
new_text=new_content,
|
|
1017
|
+
status="completed",
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
except Exception as e: # noqa: BLE001
|
|
1021
|
+
error_msg = f"Error during agentic edit: {e}"
|
|
1022
|
+
await agent_ctx.events.file_operation("edit", path=path, success=False, error=error_msg)
|
|
1023
|
+
return error_msg
|
|
1024
|
+
else:
|
|
1025
|
+
return success_msg
|
|
1026
|
+
|
|
1027
|
+
async def _stream_edit_with_matcher( # noqa: PLR0915
|
|
1028
|
+
self,
|
|
1029
|
+
agent: BaseAgent,
|
|
1030
|
+
prompt: str,
|
|
1031
|
+
fork_history: MessageHistory,
|
|
1032
|
+
original_content: str,
|
|
1033
|
+
path: str,
|
|
1034
|
+
agent_ctx: AgentContext,
|
|
1035
|
+
) -> str:
|
|
1036
|
+
"""TRUE streaming edit using StreamingDiffParser + StreamingFuzzyMatcher.
|
|
1037
|
+
|
|
1038
|
+
Parses diff incrementally, uses DP matcher to find locations as old_text
|
|
1039
|
+
streams, and applies new_text edits as they arrive.
|
|
1040
|
+
"""
|
|
1041
|
+
parser = StreamingDiffParser()
|
|
1042
|
+
matcher = StreamingFuzzyMatcher(original_content)
|
|
1043
|
+
|
|
1044
|
+
# Track current state
|
|
1045
|
+
edited_content = original_content
|
|
1046
|
+
pending_old_text: list[str] = [] # Track old text for prefix/suffix calculation
|
|
1047
|
+
pending_new_text: list[str] = []
|
|
1048
|
+
current_match_range = None
|
|
1049
|
+
|
|
1050
|
+
async for node in agent.run_stream(
|
|
1051
|
+
prompt,
|
|
1052
|
+
message_history=fork_history,
|
|
1053
|
+
store_history=False,
|
|
1054
|
+
):
|
|
1055
|
+
match node:
|
|
1056
|
+
case (
|
|
1057
|
+
PartStartEvent(part=TextPart(content=chunk))
|
|
1058
|
+
| PartDeltaEvent(delta=TextPartDelta(content_delta=chunk))
|
|
1059
|
+
):
|
|
1060
|
+
# Parse diff chunk and process events
|
|
1061
|
+
for event in parser.push(chunk):
|
|
1062
|
+
if isinstance(event, OldTextChunk):
|
|
1063
|
+
if not event.done:
|
|
1064
|
+
# Track old text for later prefix/suffix calculation
|
|
1065
|
+
pending_old_text.append(event.chunk)
|
|
1066
|
+
# Push to matcher for location resolution
|
|
1067
|
+
match_result = matcher.push(event.chunk, line_hint=event.line_hint)
|
|
1068
|
+
if match_result:
|
|
1069
|
+
current_match_range = match_result
|
|
1070
|
+
else:
|
|
1071
|
+
# Old text done - finalize location
|
|
1072
|
+
matches = matcher.finish()
|
|
1073
|
+
if matches:
|
|
1074
|
+
current_match_range = matches[0]
|
|
1075
|
+
# Reset matcher for next hunk
|
|
1076
|
+
matcher = StreamingFuzzyMatcher(edited_content)
|
|
1077
|
+
|
|
1078
|
+
elif isinstance(event, NewTextChunk):
|
|
1079
|
+
if not event.done:
|
|
1080
|
+
pending_new_text.append(event.chunk)
|
|
1081
|
+
else:
|
|
1082
|
+
# New text done - apply the edit if we have a location
|
|
1083
|
+
if current_match_range and pending_new_text:
|
|
1084
|
+
new_text = "".join(pending_new_text)
|
|
1085
|
+
old_text = "".join(pending_old_text)
|
|
1086
|
+
|
|
1087
|
+
# The matcher may find a larger range than our old_text
|
|
1088
|
+
# We need to preserve any prefix/suffix not in old_text
|
|
1089
|
+
matched_text = edited_content[
|
|
1090
|
+
current_match_range.start : current_match_range.end
|
|
1091
|
+
]
|
|
1092
|
+
|
|
1093
|
+
# Find where old_text appears in matched_text
|
|
1094
|
+
old_text_stripped = old_text.strip("\n")
|
|
1095
|
+
match_idx = matched_text.find(old_text_stripped)
|
|
1096
|
+
|
|
1097
|
+
if match_idx >= 0:
|
|
1098
|
+
# Preserve prefix (e.g., blank lines before)
|
|
1099
|
+
prefix = matched_text[:match_idx]
|
|
1100
|
+
# Preserve suffix after old_text
|
|
1101
|
+
suffix_start = match_idx + len(old_text_stripped)
|
|
1102
|
+
suffix = matched_text[suffix_start:]
|
|
1103
|
+
# Build replacement with preserved prefix/suffix
|
|
1104
|
+
replacement = prefix + new_text.strip("\n") + suffix
|
|
1105
|
+
else:
|
|
1106
|
+
# Fallback: direct replacement
|
|
1107
|
+
replacement = new_text
|
|
1108
|
+
|
|
1109
|
+
# Apply edit to content
|
|
1110
|
+
edited_content = (
|
|
1111
|
+
edited_content[: current_match_range.start]
|
|
1112
|
+
+ replacement
|
|
1113
|
+
+ edited_content[current_match_range.end :]
|
|
1114
|
+
)
|
|
1115
|
+
# Emit progress update
|
|
1116
|
+
await agent_ctx.events.file_edit_progress(
|
|
1117
|
+
path=path,
|
|
1118
|
+
old_text=original_content,
|
|
1119
|
+
new_text=edited_content,
|
|
1120
|
+
status="in_progress",
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
# Reset for next hunk
|
|
1124
|
+
pending_old_text = []
|
|
1125
|
+
pending_new_text = []
|
|
1126
|
+
current_match_range = None
|
|
1127
|
+
|
|
1128
|
+
# Process any remaining content
|
|
1129
|
+
for event in parser.finish():
|
|
1130
|
+
if isinstance(event, OldTextChunk):
|
|
1131
|
+
if not event.done:
|
|
1132
|
+
pending_old_text.append(event.chunk)
|
|
1133
|
+
matcher.push(event.chunk, line_hint=event.line_hint)
|
|
1134
|
+
else:
|
|
1135
|
+
matches = matcher.finish()
|
|
1136
|
+
if matches:
|
|
1137
|
+
current_match_range = matches[0]
|
|
1138
|
+
elif isinstance(event, NewTextChunk):
|
|
1139
|
+
if not event.done:
|
|
1140
|
+
pending_new_text.append(event.chunk)
|
|
1141
|
+
elif current_match_range and pending_new_text:
|
|
1142
|
+
new_text = "".join(pending_new_text)
|
|
1143
|
+
old_text = "".join(pending_old_text)
|
|
1144
|
+
matched_text = edited_content[
|
|
1145
|
+
current_match_range.start : current_match_range.end
|
|
1146
|
+
]
|
|
1147
|
+
old_text_stripped = old_text.strip("\n")
|
|
1148
|
+
match_idx = matched_text.find(old_text_stripped)
|
|
1149
|
+
if match_idx >= 0:
|
|
1150
|
+
prefix = matched_text[:match_idx]
|
|
1151
|
+
suffix_start = match_idx + len(old_text_stripped)
|
|
1152
|
+
suffix = matched_text[suffix_start:]
|
|
1153
|
+
replacement = prefix + new_text.strip("\n") + suffix
|
|
1154
|
+
else:
|
|
1155
|
+
replacement = new_text
|
|
1156
|
+
edited_content = (
|
|
1157
|
+
edited_content[: current_match_range.start]
|
|
1158
|
+
+ replacement
|
|
1159
|
+
+ edited_content[current_match_range.end :]
|
|
1160
|
+
)
|
|
1161
|
+
|
|
1162
|
+
return edited_content
|
|
1163
|
+
|
|
1164
|
+
async def _stream_edit_with_replace(
|
|
1165
|
+
self,
|
|
1166
|
+
agent: BaseAgent,
|
|
1167
|
+
prompt: str,
|
|
1168
|
+
fork_history: MessageHistory,
|
|
1169
|
+
original_content: str,
|
|
1170
|
+
path: str,
|
|
1171
|
+
agent_ctx: AgentContext,
|
|
1172
|
+
) -> str:
|
|
1173
|
+
"""TRUE streaming edit using StreamingDiffParser + replace_content().
|
|
1174
|
+
|
|
1175
|
+
Parses diff incrementally, uses our 9-strategy replace_content() to
|
|
1176
|
+
find locations and apply edits as each hunk completes.
|
|
1177
|
+
"""
|
|
1178
|
+
parser = StreamingDiffParser()
|
|
1179
|
+
|
|
1180
|
+
# Track current state
|
|
1181
|
+
edited_content = original_content
|
|
1182
|
+
pending_old_text: list[str] = []
|
|
1183
|
+
pending_new_text: list[str] = []
|
|
1184
|
+
|
|
1185
|
+
async for node in agent.run_stream(
|
|
1186
|
+
prompt,
|
|
1187
|
+
message_history=fork_history,
|
|
1188
|
+
store_history=False,
|
|
1189
|
+
):
|
|
1190
|
+
match node:
|
|
1191
|
+
case (
|
|
1192
|
+
PartStartEvent(part=TextPart(content=chunk))
|
|
1193
|
+
| PartDeltaEvent(delta=TextPartDelta(content_delta=chunk))
|
|
1194
|
+
):
|
|
1195
|
+
# Parse diff chunk and process events
|
|
1196
|
+
for event in parser.push(chunk):
|
|
1197
|
+
if isinstance(event, OldTextChunk):
|
|
1198
|
+
if not event.done:
|
|
1199
|
+
pending_old_text.append(event.chunk)
|
|
1200
|
+
# When old_text is done, we just wait for new_text
|
|
1201
|
+
|
|
1202
|
+
elif isinstance(event, NewTextChunk):
|
|
1203
|
+
if not event.done:
|
|
1204
|
+
pending_new_text.append(event.chunk)
|
|
1205
|
+
else:
|
|
1206
|
+
# Hunk complete - apply using replace_content
|
|
1207
|
+
if pending_old_text and pending_new_text:
|
|
1208
|
+
old_text = "".join(pending_old_text)
|
|
1209
|
+
new_text = "".join(pending_new_text)
|
|
1210
|
+
|
|
1211
|
+
try:
|
|
1212
|
+
edited_content = replace_content(
|
|
1213
|
+
edited_content,
|
|
1214
|
+
old_text,
|
|
1215
|
+
new_text,
|
|
1216
|
+
replace_all=False,
|
|
1217
|
+
)
|
|
1218
|
+
# Emit progress update
|
|
1219
|
+
await agent_ctx.events.file_edit_progress(
|
|
1220
|
+
path=path,
|
|
1221
|
+
old_text=original_content,
|
|
1222
|
+
new_text=edited_content,
|
|
1223
|
+
status="in_progress",
|
|
1224
|
+
)
|
|
1225
|
+
except ValueError as e:
|
|
1226
|
+
# Log but continue - some hunks may fail
|
|
1227
|
+
logger.warning(
|
|
1228
|
+
"Streaming hunk failed",
|
|
1229
|
+
error=str(e),
|
|
1230
|
+
old_text=old_text[:50],
|
|
1231
|
+
)
|
|
1232
|
+
|
|
1233
|
+
# Reset for next hunk
|
|
1234
|
+
pending_old_text = []
|
|
1235
|
+
pending_new_text = []
|
|
1236
|
+
|
|
1237
|
+
# Process any remaining content
|
|
1238
|
+
for event in parser.finish():
|
|
1239
|
+
if isinstance(event, OldTextChunk) and not event.done:
|
|
1240
|
+
pending_old_text.append(event.chunk)
|
|
1241
|
+
elif isinstance(event, NewTextChunk):
|
|
1242
|
+
if not event.done:
|
|
1243
|
+
pending_new_text.append(event.chunk)
|
|
1244
|
+
elif pending_old_text and pending_new_text:
|
|
1245
|
+
old_text = "".join(pending_old_text)
|
|
1246
|
+
new_text = "".join(pending_new_text)
|
|
1247
|
+
with contextlib.suppress(ValueError):
|
|
1248
|
+
edited_content = replace_content(
|
|
1249
|
+
edited_content,
|
|
1250
|
+
old_text,
|
|
1251
|
+
new_text,
|
|
1252
|
+
replace_all=False,
|
|
1253
|
+
)
|
|
1254
|
+
|
|
1255
|
+
return edited_content
|
|
1256
|
+
|
|
1257
|
+
async def _stream_raw_content(
|
|
1258
|
+
self,
|
|
1259
|
+
agent: BaseAgent,
|
|
1260
|
+
prompt: str,
|
|
1261
|
+
fork_history: MessageHistory,
|
|
1262
|
+
original_content: str,
|
|
1263
|
+
path: str,
|
|
1264
|
+
agent_ctx: AgentContext,
|
|
1265
|
+
) -> str:
|
|
1266
|
+
"""Stream raw content for create/overwrite modes.
|
|
1267
|
+
|
|
1268
|
+
Emits progress updates as content streams in.
|
|
1269
|
+
"""
|
|
1270
|
+
streamed_content = ""
|
|
1271
|
+
|
|
1272
|
+
async for node in agent.run_stream(
|
|
1273
|
+
prompt,
|
|
1274
|
+
message_history=fork_history,
|
|
1275
|
+
store_history=False,
|
|
1276
|
+
):
|
|
1277
|
+
match node:
|
|
1278
|
+
case (
|
|
1279
|
+
PartStartEvent(part=TextPart(content=chunk))
|
|
1280
|
+
| PartDeltaEvent(delta=TextPartDelta(content_delta=chunk))
|
|
1281
|
+
):
|
|
1282
|
+
streamed_content += chunk
|
|
1283
|
+
|
|
1284
|
+
# Emit periodic progress updates
|
|
1285
|
+
await agent_ctx.events.file_edit_progress(
|
|
1286
|
+
path=path,
|
|
1287
|
+
old_text=original_content,
|
|
1288
|
+
new_text=streamed_content,
|
|
1289
|
+
status="in_progress",
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
return streamed_content
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
def _build_create_prompt(path: str, description: str) -> str:
|
|
1296
|
+
"""Build prompt for create mode."""
|
|
1297
|
+
return f"""Create a new file at {path} according to this description:
|
|
1298
|
+
|
|
1299
|
+
{description}
|
|
1300
|
+
|
|
1301
|
+
Output ONLY the complete file content. No explanations, no markdown code blocks, no formatting.
|
|
1302
|
+
DO NOT use any tools. Just output the file content directly."""
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
def _build_overwrite_prompt(path: str, description: str, current_content: str) -> str:
|
|
1306
|
+
"""Build prompt for overwrite mode."""
|
|
1307
|
+
return f"""Rewrite the file {path} according to this description:
|
|
1308
|
+
|
|
1309
|
+
{description}
|
|
1310
|
+
|
|
1311
|
+
<current_file_content>
|
|
1312
|
+
{current_content}
|
|
1313
|
+
</current_file_content>
|
|
1314
|
+
|
|
1315
|
+
Output ONLY the complete new file content. No explanations, no code blocks, no formatting.
|
|
1316
|
+
DO NOT use any tools. Just output the file content directly."""
|
|
1317
|
+
|
|
1318
|
+
|
|
1319
|
+
def _build_edit_prompt(path: str, description: str, current_content: str) -> str:
|
|
1320
|
+
"""Build prompt for diff-based edit mode."""
|
|
1321
|
+
return f"""\
|
|
1322
|
+
You MUST respond with edits in unified diff format. Output your changes inside <diff> tags.
|
|
1323
|
+
|
|
1324
|
+
# Diff Format Instructions
|
|
1325
|
+
|
|
1326
|
+
Use standard unified diff format with context lines:
|
|
1327
|
+
- Lines starting with a space are context (unchanged)
|
|
1328
|
+
- Lines starting with `-` are removed
|
|
1329
|
+
- Lines starting with `+` are added
|
|
1330
|
+
- Include 2-3 lines of context before and after each change
|
|
1331
|
+
- Do NOT include line numbers or @@ headers - locations are matched by context
|
|
1332
|
+
|
|
1333
|
+
Example:
|
|
1334
|
+
<diff>
|
|
1335
|
+
def existing_function():
|
|
1336
|
+
- old_implementation()
|
|
1337
|
+
+ new_implementation()
|
|
1338
|
+
return result
|
|
1339
|
+
</diff>
|
|
1340
|
+
|
|
1341
|
+
# Rules
|
|
1342
|
+
|
|
1343
|
+
- Context lines MUST exactly match the file content (including indentation)
|
|
1344
|
+
- Include enough context to uniquely identify the location
|
|
1345
|
+
- For multiple separate changes, include all hunks within a single <diff> block
|
|
1346
|
+
- Separate hunks with a blank line
|
|
1347
|
+
- Do not escape quotes or special characters
|
|
1348
|
+
- Preserve exact indentation from the original file
|
|
1349
|
+
|
|
1350
|
+
<file_to_edit path="{path}">
|
|
1351
|
+
{current_content}
|
|
1352
|
+
</file_to_edit>
|
|
1353
|
+
|
|
1354
|
+
<edit_description>
|
|
1355
|
+
{description}
|
|
1356
|
+
</edit_description>
|
|
1357
|
+
|
|
1358
|
+
You MUST wrap your response in <diff>...</diff> tags.
|
|
1359
|
+
DO NOT use any tools. Just output the diff directly."""
|
|
1360
|
+
|
|
1361
|
+
|
|
1362
|
+
if __name__ == "__main__":
|
|
1363
|
+
|
|
1364
|
+
async def main() -> None:
|
|
1365
|
+
import fsspec
|
|
1366
|
+
from pydantic_ai import RunContext as PyAiContext, RunUsage
|
|
1367
|
+
from pydantic_ai.models.test import TestModel
|
|
1368
|
+
|
|
1369
|
+
from agentpool import AgentPool
|
|
1370
|
+
|
|
1371
|
+
fs = fsspec.filesystem("file")
|
|
1372
|
+
tools = FSSpecTools(fs, name="local_fs")
|
|
1373
|
+
async with AgentPool() as pool:
|
|
1374
|
+
agent = await pool.add_agent("test", model="anthropic-max:claude-haiku-4-5")
|
|
1375
|
+
agent_ctx = agent.get_context()
|
|
1376
|
+
result = await tools.agentic_edit(
|
|
1377
|
+
PyAiContext(deps=None, model=TestModel(), usage=RunUsage()),
|
|
1378
|
+
agent_ctx,
|
|
1379
|
+
path="/home/phil65/dev/oss/agentpool/src/agentpool_toolsets/fsspec_toolset/toolset.py",
|
|
1380
|
+
display_description="Append a poem",
|
|
1381
|
+
)
|
|
1382
|
+
print(result)
|
|
1383
|
+
|
|
1384
|
+
anyio.run(main)
|