fast-agent-mcp 0.2.28__tar.gz → 0.2.30__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/.gitignore +1 -1
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/PKG-INFO +11 -8
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/README.md +5 -3
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/pyproject.toml +6 -6
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/app.py +1 -1
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/commands/check_config.py +11 -2
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/commands/url_parser.py +7 -1
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/config.py +16 -1
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/context.py +5 -3
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/enhanced_prompt.py +105 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/interactive_prompt.py +2 -2
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/model_factory.py +6 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/provider_key_manager.py +1 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/provider_types.py +2 -0
- fast_agent_mcp-0.2.28/src/mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent_mcp-0.2.30/src/mcp_agent/llm/providers/augmented_llm_aliyun.py +9 -9
- fast_agent_mcp-0.2.30/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +93 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/google_converter.py +4 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/common.py +2 -2
- fast_agent_mcp-0.2.30/src/mcp_agent/mcp/hf_auth.py +106 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/mcp_agent_client_session.py +16 -40
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/mcp_aggregator.py +2 -1
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp_server_registry.py +10 -3
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/LICENSE +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/azure-openai/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/state-transfer/agent_one.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/state-transfer/agent_two.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/vision-examples/example1.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/vision-examples/example2.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/vision-examples/example3.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/mcp/vision-examples/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/otel/agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/otel/agent2.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/otel/docker-compose.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/otel/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/researcher/researcher-imp.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/README.md +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/docker-compose.yml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/image_demo.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/tensorzero/simple_agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/graded_report.md +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/short_story.md +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/examples/workflows/short_story.txt +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/base_agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/agents/workflow/router_agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/__main__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/commands/go.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/commands/quickstart.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/commands/setup.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/main.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/cli/terminal.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/console.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/context_dependent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/agent_app.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/agent_types.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/direct_decorators.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/direct_factory.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/error_handling.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/exceptions.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/fastagent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/mcp_content.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/prompt.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/request_params.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/core/validation.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/event_progress.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/executor/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/executor/executor.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/executor/task_registry.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/executor/workflow_signal.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/human_input/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/human_input/handler.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/human_input/types.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/augmented_llm.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/augmented_llm_passthrough.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/augmented_llm_slow.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/memory.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/prompt_utils.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_azure.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_google_native.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_google_oai.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_openai.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/multipart_converter_openai.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/sampling_converter.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/events.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/json_serializer.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/listeners.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/logger.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/rich_progress.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/logging/transport.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/gen_client.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/helpers/content_helpers.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/helpers/server_config_helpers.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/interfaces.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/logger_textio.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/mime_utils.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompt_render.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/resource_utils.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp/sampling.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp_server/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/mcp_server/agent_server.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/progress_display.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/in_dev/agent_build.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/in_dev/css-LICENSE.txt +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/in_dev/slides.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/history_transfer.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/job.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/prompt_category.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/prompt_sizing.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/simple.txt +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/sizer.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/internal/social.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/__init__.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/agent.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/delimited_prompt.txt +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/image_server.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/prompt1.txt +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/prompting/work_with_image.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/tools/tool_definition.py +0 -0
- {fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/ui/console_display.py +0 -0
@@ -168,7 +168,7 @@ examples/mcp_root_test/test_data/*.png
|
|
168
168
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
169
169
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
170
170
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
171
|
-
|
171
|
+
.idea/
|
172
172
|
uv.lock
|
173
173
|
|
174
174
|
# File generated from promptify script (to create an LLM-friendly prompt for the repo)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.30
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>
|
6
6
|
License: Apache License
|
@@ -213,16 +213,17 @@ Requires-Dist: a2a-types>=0.1.0
|
|
213
213
|
Requires-Dist: aiohttp>=3.11.13
|
214
214
|
Requires-Dist: anthropic>=0.49.0
|
215
215
|
Requires-Dist: azure-identity>=1.14.0
|
216
|
+
Requires-Dist: deprecated>=1.2.18
|
216
217
|
Requires-Dist: fastapi>=0.115.6
|
217
218
|
Requires-Dist: google-genai
|
218
|
-
Requires-Dist: mcp==1.9.
|
219
|
+
Requires-Dist: mcp==1.9.3
|
219
220
|
Requires-Dist: openai>=1.63.2
|
220
221
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
221
222
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
222
|
-
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.
|
223
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.40.7; python_version >= '3.10' and python_version < '4.0'
|
223
224
|
Requires-Dist: opentelemetry-instrumentation-google-genai>=0.2b0
|
224
|
-
Requires-Dist: opentelemetry-instrumentation-mcp>=0.40.
|
225
|
-
Requires-Dist: opentelemetry-instrumentation-openai>=0.
|
225
|
+
Requires-Dist: opentelemetry-instrumentation-mcp>=0.40.7; python_version >= '3.10' and python_version < '4.0'
|
226
|
+
Requires-Dist: opentelemetry-instrumentation-openai>=0.0.40.7; python_version >= '3.10' and python_version < '4.0'
|
226
227
|
Requires-Dist: prompt-toolkit>=3.0.50
|
227
228
|
Requires-Dist: pydantic-settings>=2.7.0
|
228
229
|
Requires-Dist: pydantic>=2.10.4
|
@@ -286,11 +287,13 @@ Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Pyt
|
|
286
287
|
|
287
288
|
```bash
|
288
289
|
uv pip install fast-agent-mcp # install fast-agent!
|
289
|
-
|
290
|
-
|
290
|
+
fast-agent go # start an interactive session
|
291
|
+
fast-agent go https://hf.co/mcp # with a remote MCP
|
292
|
+
fast-agent go --model=generic.qwen2.5 # use ollama qwen 2.5
|
293
|
+
fast-agent setup # create an example agent and config files
|
291
294
|
uv run agent.py # run your first agent
|
292
295
|
uv run agent.py --model=o3-mini.low # specify a model
|
293
|
-
|
296
|
+
fast-agent quickstart workflow # create "building effective agents" examples
|
294
297
|
```
|
295
298
|
|
296
299
|
Other quickstart examples include a Researcher Agent (with Evaluator-Optimizer workflow) and Data Analysis Agent (similar to the ChatGPT experience), demonstrating MCP Roots support.
|
@@ -38,11 +38,13 @@ Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Pyt
|
|
38
38
|
|
39
39
|
```bash
|
40
40
|
uv pip install fast-agent-mcp # install fast-agent!
|
41
|
-
|
42
|
-
|
41
|
+
fast-agent go # start an interactive session
|
42
|
+
fast-agent go https://hf.co/mcp # with a remote MCP
|
43
|
+
fast-agent go --model=generic.qwen2.5 # use ollama qwen 2.5
|
44
|
+
fast-agent setup # create an example agent and config files
|
43
45
|
uv run agent.py # run your first agent
|
44
46
|
uv run agent.py --model=o3-mini.low # specify a model
|
45
|
-
|
47
|
+
fast-agent quickstart workflow # create "building effective agents" examples
|
46
48
|
```
|
47
49
|
|
48
50
|
Other quickstart examples include a Researcher Agent (with Evaluator-Optimizer workflow) and Data Analysis Agent (similar to the ChatGPT experience), demonstrating MCP Roots support.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "fast-agent-mcp"
|
3
|
-
version = "0.2.
|
3
|
+
version = "0.2.30"
|
4
4
|
description = "Define, Prompt and Test MCP enabled Agents and Workflows"
|
5
5
|
readme = "README.md"
|
6
6
|
license = { file = "LICENSE" }
|
@@ -15,7 +15,7 @@ classifiers = [
|
|
15
15
|
requires-python = ">=3.10"
|
16
16
|
dependencies = [
|
17
17
|
"fastapi>=0.115.6",
|
18
|
-
"mcp==1.9.
|
18
|
+
"mcp==1.9.3",
|
19
19
|
"opentelemetry-distro>=0.50b0",
|
20
20
|
"opentelemetry-exporter-otlp-proto-http>=1.29.0",
|
21
21
|
"pydantic-settings>=2.7.0",
|
@@ -29,14 +29,14 @@ dependencies = [
|
|
29
29
|
"prompt-toolkit>=3.0.50",
|
30
30
|
"aiohttp>=3.11.13",
|
31
31
|
"a2a-types>=0.1.0",
|
32
|
-
"opentelemetry-instrumentation-openai>=0.
|
33
|
-
"opentelemetry-instrumentation-anthropic>=0.
|
34
|
-
"opentelemetry-instrumentation-mcp>=0.40.
|
32
|
+
"opentelemetry-instrumentation-openai>=0.0.40.7; python_version >= '3.10' and python_version < '4.0'",
|
33
|
+
"opentelemetry-instrumentation-anthropic>=0.40.7; python_version >= '3.10' and python_version < '4.0'",
|
34
|
+
"opentelemetry-instrumentation-mcp>=0.40.7; python_version >= '3.10' and python_version < '4.0'",
|
35
35
|
"google-genai",
|
36
36
|
"opentelemetry-instrumentation-google-genai>=0.2b0",
|
37
37
|
"tensorzero>=2025.4.7",
|
38
|
-
"google-genai",
|
39
38
|
"opentelemetry-instrumentation-google-genai>=0.2b0",
|
39
|
+
"deprecated>=1.2.18",
|
40
40
|
]
|
41
41
|
|
42
42
|
[project.optional-dependencies]
|
@@ -119,7 +119,7 @@ class MCPApp:
|
|
119
119
|
if self._initialized:
|
120
120
|
return
|
121
121
|
|
122
|
-
self._context = await initialize_context(self._config_or_path)
|
122
|
+
self._context = await initialize_context(self._config_or_path, store_globally=True)
|
123
123
|
|
124
124
|
# Set the properties that were passed in the constructor
|
125
125
|
self._context.human_input_handler = self._human_input_callback
|
@@ -226,8 +226,17 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
|
|
226
226
|
|
227
227
|
# Determine transport type
|
228
228
|
if "url" in server_config:
|
229
|
-
|
230
|
-
server_info["url"] =
|
229
|
+
url = server_config.get("url", "")
|
230
|
+
server_info["url"] = url
|
231
|
+
|
232
|
+
# Use URL path to determine transport type
|
233
|
+
try:
|
234
|
+
from .url_parser import parse_server_url
|
235
|
+
_, transport_type, _ = parse_server_url(url)
|
236
|
+
server_info["transport"] = transport_type.upper()
|
237
|
+
except Exception:
|
238
|
+
# Fallback to HTTP if URL parsing fails
|
239
|
+
server_info["transport"] = "HTTP"
|
231
240
|
|
232
241
|
# Get command and args
|
233
242
|
command = server_config.get("command", "")
|
@@ -8,6 +8,8 @@ import re
|
|
8
8
|
from typing import Dict, List, Literal, Tuple
|
9
9
|
from urllib.parse import urlparse
|
10
10
|
|
11
|
+
from mcp_agent.mcp.hf_auth import add_hf_auth_header
|
12
|
+
|
11
13
|
|
12
14
|
def parse_server_url(
|
13
15
|
url: str,
|
@@ -131,7 +133,11 @@ def parse_server_urls(
|
|
131
133
|
result = []
|
132
134
|
for url in url_list:
|
133
135
|
server_name, transport_type, parsed_url = parse_server_url(url)
|
134
|
-
|
136
|
+
|
137
|
+
# Apply HuggingFace authentication if appropriate
|
138
|
+
final_headers = add_hf_auth_header(parsed_url, headers)
|
139
|
+
|
140
|
+
result.append((server_name, transport_type, parsed_url, final_headers))
|
135
141
|
|
136
142
|
return result
|
137
143
|
|
@@ -222,6 +222,15 @@ class TensorZeroSettings(BaseModel):
|
|
222
222
|
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
223
223
|
|
224
224
|
|
225
|
+
class HuggingFaceSettings(BaseModel):
|
226
|
+
"""
|
227
|
+
Settings for HuggingFace authentication (used for MCP connections).
|
228
|
+
"""
|
229
|
+
|
230
|
+
api_key: Optional[str] = None
|
231
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
232
|
+
|
233
|
+
|
225
234
|
class LoggerSettings(BaseModel):
|
226
235
|
"""
|
227
236
|
Logger settings for the fast-agent application.
|
@@ -291,7 +300,7 @@ class Settings(BaseSettings):
|
|
291
300
|
Default model for agents. Format is provider.model_name.<reasoning_effort>, for example openai.o3-mini.low
|
292
301
|
Aliases are provided for common models e.g. sonnet, haiku, gpt-4.1, o3-mini etc.
|
293
302
|
"""
|
294
|
-
|
303
|
+
|
295
304
|
auto_sampling: bool = True
|
296
305
|
"""Enable automatic sampling model selection if not explicitly configured"""
|
297
306
|
|
@@ -322,6 +331,12 @@ class Settings(BaseSettings):
|
|
322
331
|
azure: AzureSettings | None = None
|
323
332
|
"""Settings for using Azure OpenAI Service in the fast-agent application"""
|
324
333
|
|
334
|
+
aliyun: OpenAISettings | None = None
|
335
|
+
"""Settings for using Aliyun OpenAI Service in the fast-agent application"""
|
336
|
+
|
337
|
+
huggingface: HuggingFaceSettings | None = None
|
338
|
+
"""Settings for HuggingFace authentication (used for MCP connections)"""
|
339
|
+
|
325
340
|
logger: LoggerSettings | None = LoggerSettings()
|
326
341
|
"""Logger settings for the fast-agent application"""
|
327
342
|
|
@@ -12,7 +12,8 @@ from opentelemetry import trace
|
|
12
12
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
13
13
|
from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
|
14
14
|
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
|
15
|
-
|
15
|
+
|
16
|
+
# from opentelemetry.instrumentation.mcp import McpInstrumentor
|
16
17
|
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
17
18
|
from opentelemetry.propagate import set_global_textmap
|
18
19
|
from opentelemetry.sdk.resources import Resource
|
@@ -114,7 +115,9 @@ async def configure_otel(config: "Settings") -> None:
|
|
114
115
|
AnthropicInstrumentor().instrument()
|
115
116
|
OpenAIInstrumentor().instrument()
|
116
117
|
GoogleGenAiSdkInstrumentor().instrument()
|
117
|
-
|
118
|
+
|
119
|
+
|
120
|
+
# McpInstrumentor().instrument()
|
118
121
|
|
119
122
|
|
120
123
|
async def configure_logger(config: "Settings") -> None:
|
@@ -198,7 +201,6 @@ _global_context: Context | None = None
|
|
198
201
|
def get_current_context() -> Context:
|
199
202
|
"""
|
200
203
|
Synchronous initializer/getter for global application context.
|
201
|
-
For async usage, use aget_current_context instead.
|
202
204
|
"""
|
203
205
|
global _global_context
|
204
206
|
if _global_context is None:
|
@@ -2,6 +2,11 @@
|
|
2
2
|
Enhanced prompt functionality with advanced prompt_toolkit features.
|
3
3
|
"""
|
4
4
|
|
5
|
+
import asyncio
|
6
|
+
import os
|
7
|
+
import shlex
|
8
|
+
import subprocess
|
9
|
+
import tempfile
|
5
10
|
from importlib.metadata import version
|
6
11
|
from typing import List, Optional
|
7
12
|
|
@@ -96,6 +101,85 @@ class AgentCompleter(Completer):
|
|
96
101
|
)
|
97
102
|
|
98
103
|
|
104
|
+
# Helper function to open text in an external editor
|
105
|
+
def get_text_from_editor(initial_text: str = "") -> str:
|
106
|
+
"""
|
107
|
+
Opens the user\'s configured editor ($VISUAL or $EDITOR) to edit the initial_text.
|
108
|
+
Falls back to \'nano\' (Unix) or \'notepad\' (Windows) if neither is set.
|
109
|
+
Returns the edited text, or the original text if an error occurs.
|
110
|
+
"""
|
111
|
+
editor_cmd_str = os.environ.get("VISUAL") or os.environ.get("EDITOR")
|
112
|
+
|
113
|
+
if not editor_cmd_str:
|
114
|
+
if os.name == "nt": # Windows
|
115
|
+
editor_cmd_str = "notepad"
|
116
|
+
else: # Unix-like (Linux, macOS)
|
117
|
+
editor_cmd_str = "nano" # A common, usually available, simple editor
|
118
|
+
|
119
|
+
# Use shlex.split to handle editors with arguments (e.g., "code --wait")
|
120
|
+
try:
|
121
|
+
editor_cmd_list = shlex.split(editor_cmd_str)
|
122
|
+
if not editor_cmd_list: # Handle empty string from shlex.split
|
123
|
+
raise ValueError("Editor command string is empty or invalid.")
|
124
|
+
except ValueError as e:
|
125
|
+
rich_print(f"[red]Error: Invalid editor command string ('{editor_cmd_str}'): {e}[/red]")
|
126
|
+
return initial_text
|
127
|
+
|
128
|
+
# Create a temporary file for the editor to use.
|
129
|
+
# Using a suffix can help some editors with syntax highlighting or mode.
|
130
|
+
try:
|
131
|
+
with tempfile.NamedTemporaryFile(
|
132
|
+
mode="w+", delete=False, suffix=".txt", encoding="utf-8"
|
133
|
+
) as tmp_file:
|
134
|
+
if initial_text:
|
135
|
+
tmp_file.write(initial_text)
|
136
|
+
tmp_file.flush() # Ensure content is written to disk before editor opens it
|
137
|
+
temp_file_path = tmp_file.name
|
138
|
+
except Exception as e:
|
139
|
+
rich_print(f"[red]Error: Could not create temporary file for editor: {e}[/red]")
|
140
|
+
return initial_text
|
141
|
+
|
142
|
+
try:
|
143
|
+
# Construct the full command: editor_parts + [temp_file_path]
|
144
|
+
# e.g., [\'vim\', \'/tmp/somefile.txt\'] or [\'code\', \'--wait\', \'/tmp/somefile.txt\']
|
145
|
+
full_cmd = editor_cmd_list + [temp_file_path]
|
146
|
+
|
147
|
+
# Run the editor. This is a blocking call.
|
148
|
+
subprocess.run(full_cmd, check=True)
|
149
|
+
|
150
|
+
# Read the content back from the temporary file.
|
151
|
+
with open(temp_file_path, "r", encoding="utf-8") as f:
|
152
|
+
edited_text = f.read()
|
153
|
+
|
154
|
+
except FileNotFoundError:
|
155
|
+
rich_print(
|
156
|
+
f"[red]Error: Editor command '{editor_cmd_list[0]}' not found. "
|
157
|
+
f"Please set $VISUAL or $EDITOR correctly, or install '{editor_cmd_list[0]}'.[/red]"
|
158
|
+
)
|
159
|
+
return initial_text
|
160
|
+
except subprocess.CalledProcessError as e:
|
161
|
+
rich_print(
|
162
|
+
f"[red]Error: Editor '{editor_cmd_list[0]}' closed with an error (code {e.returncode}).[/red]"
|
163
|
+
)
|
164
|
+
return initial_text
|
165
|
+
except Exception as e:
|
166
|
+
rich_print(
|
167
|
+
f"[red]An unexpected error occurred while launching or using the editor: {e}[/red]"
|
168
|
+
)
|
169
|
+
return initial_text
|
170
|
+
finally:
|
171
|
+
# Always attempt to clean up the temporary file.
|
172
|
+
if "temp_file_path" in locals() and os.path.exists(temp_file_path):
|
173
|
+
try:
|
174
|
+
os.remove(temp_file_path)
|
175
|
+
except Exception as e:
|
176
|
+
rich_print(
|
177
|
+
f"[yellow]Warning: Could not remove temporary file {temp_file_path}: {e}[/yellow]"
|
178
|
+
)
|
179
|
+
|
180
|
+
return edited_text.strip() # Added strip() to remove trailing newlines often added by editors
|
181
|
+
|
182
|
+
|
99
183
|
def create_keybindings(on_toggle_multiline=None, app=None):
|
100
184
|
"""Create custom key bindings."""
|
101
185
|
kb = KeyBindings()
|
@@ -140,6 +224,27 @@ def create_keybindings(on_toggle_multiline=None, app=None):
|
|
140
224
|
"""Ctrl+L: Clear the input buffer."""
|
141
225
|
event.current_buffer.text = ""
|
142
226
|
|
227
|
+
@kb.add("c-e")
|
228
|
+
async def _(event) -> None:
|
229
|
+
"""Ctrl+E: Edit current buffer in $EDITOR."""
|
230
|
+
current_text = event.app.current_buffer.text
|
231
|
+
try:
|
232
|
+
# Run the synchronous editor function in a thread
|
233
|
+
edited_text = await event.app.loop.run_in_executor(
|
234
|
+
None, get_text_from_editor, current_text
|
235
|
+
)
|
236
|
+
event.app.current_buffer.text = edited_text
|
237
|
+
# Optionally, move cursor to the end of the edited text
|
238
|
+
event.app.current_buffer.cursor_position = len(edited_text)
|
239
|
+
except asyncio.CancelledError:
|
240
|
+
rich_print("[yellow]Editor interaction cancelled.[/yellow]")
|
241
|
+
except Exception as e:
|
242
|
+
rich_print(f"[red]Error during editor interaction: {e}[/red]")
|
243
|
+
finally:
|
244
|
+
# Ensure the UI is updated
|
245
|
+
if event.app:
|
246
|
+
event.app.invalidate()
|
247
|
+
|
143
248
|
return kb
|
144
249
|
|
145
250
|
|
@@ -351,7 +351,7 @@ class InteractivePrompt:
|
|
351
351
|
for prompt in prompts:
|
352
352
|
# Get basic prompt info
|
353
353
|
prompt_name = getattr(prompt, "name", "Unknown")
|
354
|
-
|
354
|
+
prompt_description = getattr(prompt, "description", "No description")
|
355
355
|
|
356
356
|
# Extract argument information
|
357
357
|
arg_names = []
|
@@ -387,7 +387,7 @@ class InteractivePrompt:
|
|
387
387
|
"server": server_name,
|
388
388
|
"name": prompt_name,
|
389
389
|
"namespaced_name": namespaced_name,
|
390
|
-
"description":
|
390
|
+
"description": prompt_description,
|
391
391
|
"arg_count": len(arg_names),
|
392
392
|
"arg_names": arg_names,
|
393
393
|
"required_args": required_args,
|
@@ -10,6 +10,7 @@ from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
|
|
10
10
|
from mcp_agent.llm.augmented_llm_playback import PlaybackLLM
|
11
11
|
from mcp_agent.llm.augmented_llm_slow import SlowLLM
|
12
12
|
from mcp_agent.llm.provider_types import Provider
|
13
|
+
from mcp_agent.llm.providers.augmented_llm_aliyun import AliyunAugmentedLLM
|
13
14
|
from mcp_agent.llm.providers.augmented_llm_anthropic import AnthropicAugmentedLLM
|
14
15
|
from mcp_agent.llm.providers.augmented_llm_azure import AzureOpenAIAugmentedLLM
|
15
16
|
from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
|
@@ -103,6 +104,10 @@ class ModelFactory:
|
|
103
104
|
"gemini-2.0-flash": Provider.GOOGLE,
|
104
105
|
"gemini-2.5-flash-preview-05-20": Provider.GOOGLE,
|
105
106
|
"gemini-2.5-pro-preview-05-06": Provider.GOOGLE,
|
107
|
+
"qwen-turbo": Provider.ALIYUN,
|
108
|
+
"qwen-plus": Provider.ALIYUN,
|
109
|
+
"qwen-max": Provider.ALIYUN,
|
110
|
+
"qwen-long": Provider.ALIYUN,
|
106
111
|
}
|
107
112
|
|
108
113
|
MODEL_ALIASES = {
|
@@ -136,6 +141,7 @@ class ModelFactory:
|
|
136
141
|
Provider.OPENROUTER: OpenRouterAugmentedLLM,
|
137
142
|
Provider.TENSORZERO: TensorZeroAugmentedLLM,
|
138
143
|
Provider.AZURE: AzureOpenAIAugmentedLLM,
|
144
|
+
Provider.ALIYUN: AliyunAugmentedLLM,
|
139
145
|
}
|
140
146
|
|
141
147
|
# Mapping of special model names to their specific LLM classes
|
@@ -2,17 +2,17 @@ from mcp_agent.core.request_params import RequestParams
|
|
2
2
|
from mcp_agent.llm.provider_types import Provider
|
3
3
|
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
4
4
|
|
5
|
-
|
6
|
-
|
5
|
+
ALIYUN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
6
|
+
DEFAULT_QWEN_MODEL = "qwen-turbo"
|
7
7
|
|
8
8
|
|
9
|
-
class
|
9
|
+
class AliyunAugmentedLLM(OpenAIAugmentedLLM):
|
10
10
|
def __init__(self, *args, **kwargs) -> None:
|
11
|
-
super().__init__(*args, provider=Provider.
|
11
|
+
super().__init__(*args, provider=Provider.ALIYUN, **kwargs)
|
12
12
|
|
13
13
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
14
|
-
"""Initialize
|
15
|
-
chosen_model = kwargs.get("model",
|
14
|
+
"""Initialize Aliyun-specific default parameters"""
|
15
|
+
chosen_model = kwargs.get("model", DEFAULT_QWEN_MODEL)
|
16
16
|
|
17
17
|
return RequestParams(
|
18
18
|
model=chosen_model,
|
@@ -24,7 +24,7 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
|
|
24
24
|
|
25
25
|
def _base_url(self) -> str:
|
26
26
|
base_url = None
|
27
|
-
if self.context.config and self.context.config.
|
28
|
-
base_url = self.context.config.
|
27
|
+
if self.context.config and self.context.config.aliyun:
|
28
|
+
base_url = self.context.config.aliyun.base_url
|
29
29
|
|
30
|
-
return base_url if base_url else
|
30
|
+
return base_url if base_url else ALIYUN_BASE_URL
|
@@ -0,0 +1,93 @@
|
|
1
|
+
from copy import copy
|
2
|
+
from typing import List, Tuple, Type, cast
|
3
|
+
|
4
|
+
from openai.types.chat import (
|
5
|
+
ChatCompletionAssistantMessageParam,
|
6
|
+
ChatCompletionMessage,
|
7
|
+
)
|
8
|
+
|
9
|
+
from mcp_agent.core.request_params import RequestParams
|
10
|
+
from mcp_agent.llm.provider_types import Provider
|
11
|
+
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
12
|
+
from mcp_agent.mcp.interfaces import ModelT
|
13
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
14
|
+
|
15
|
+
DEEPSEEK_BASE_URL = "https://api.deepseek.com"
|
16
|
+
DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type models
|
17
|
+
|
18
|
+
|
19
|
+
class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
|
20
|
+
def __init__(self, *args, **kwargs) -> None:
|
21
|
+
super().__init__(*args, provider=Provider.DEEPSEEK, **kwargs)
|
22
|
+
|
23
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
24
|
+
"""Initialize Deepseek-specific default parameters"""
|
25
|
+
chosen_model = kwargs.get("model", DEFAULT_DEEPSEEK_MODEL)
|
26
|
+
|
27
|
+
return RequestParams(
|
28
|
+
model=chosen_model,
|
29
|
+
systemPrompt=self.instruction,
|
30
|
+
parallel_tool_calls=True,
|
31
|
+
max_iterations=10,
|
32
|
+
use_history=True,
|
33
|
+
)
|
34
|
+
|
35
|
+
def _base_url(self) -> str:
|
36
|
+
base_url = None
|
37
|
+
if self.context.config and self.context.config.deepseek:
|
38
|
+
base_url = self.context.config.deepseek.base_url
|
39
|
+
|
40
|
+
return base_url if base_url else DEEPSEEK_BASE_URL
|
41
|
+
|
42
|
+
async def _apply_prompt_provider_specific_structured(
|
43
|
+
self,
|
44
|
+
multipart_messages: List[PromptMessageMultipart],
|
45
|
+
model: Type[ModelT],
|
46
|
+
request_params: RequestParams | None = None,
|
47
|
+
) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
|
48
|
+
request_params = self.get_request_params(request_params)
|
49
|
+
|
50
|
+
request_params.response_format = {"type": "json_object"}
|
51
|
+
|
52
|
+
# Get the full schema and extract just the properties
|
53
|
+
full_schema = model.model_json_schema()
|
54
|
+
properties = full_schema.get("properties", {})
|
55
|
+
required_fields = full_schema.get("required", [])
|
56
|
+
|
57
|
+
# Create a cleaner format description
|
58
|
+
format_description = "{\n"
|
59
|
+
for field_name, field_info in properties.items():
|
60
|
+
field_type = field_info.get("type", "string")
|
61
|
+
description = field_info.get("description", "")
|
62
|
+
format_description += f' "{field_name}": "{field_type}"'
|
63
|
+
if description:
|
64
|
+
format_description += f" // {description}"
|
65
|
+
if field_name in required_fields:
|
66
|
+
format_description += " // REQUIRED"
|
67
|
+
format_description += "\n"
|
68
|
+
format_description += "}"
|
69
|
+
|
70
|
+
multipart_messages[-1].add_text(
|
71
|
+
f"""YOU MUST RESPOND WITH A JSON OBJECT IN EXACTLY THIS FORMAT:
|
72
|
+
{format_description}
|
73
|
+
|
74
|
+
IMPORTANT RULES:
|
75
|
+
- Respond ONLY with the JSON object, no other text
|
76
|
+
- Do NOT include "properties" or "schema" wrappers
|
77
|
+
- Do NOT use code fences or markdown
|
78
|
+
- The response must be valid JSON that matches the format above
|
79
|
+
- All required fields must be included"""
|
80
|
+
)
|
81
|
+
|
82
|
+
result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
|
83
|
+
multipart_messages, request_params
|
84
|
+
)
|
85
|
+
return self._structured_from_multipart(result, model)
|
86
|
+
|
87
|
+
@classmethod
|
88
|
+
def convert_message_to_message_param(cls, message: ChatCompletionMessage, **kwargs) -> ChatCompletionAssistantMessageParam:
|
89
|
+
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
|
90
|
+
if hasattr(message, "reasoning_content"):
|
91
|
+
message = copy(message)
|
92
|
+
del message.reasoning_content
|
93
|
+
return cast("ChatCompletionAssistantMessageParam", message)
|
{fast_agent_mcp-0.2.28 → fast_agent_mcp-0.2.30}/src/mcp_agent/llm/providers/google_converter.py
RENAMED
@@ -166,6 +166,10 @@ class GoogleConverter:
|
|
166
166
|
fast_agent_parts: List[
|
167
167
|
TextContent | ImageContent | EmbeddedResource | CallToolRequestParams
|
168
168
|
] = []
|
169
|
+
|
170
|
+
if content is None or not hasattr(content, 'parts') or content.parts is None:
|
171
|
+
return [] # Google API response 'content' object is None. Cannot extract parts.
|
172
|
+
|
169
173
|
for part in content.parts:
|
170
174
|
if part.text:
|
171
175
|
fast_agent_parts.append(TextContent(type="text", text=part.text))
|
@@ -8,9 +8,9 @@ SEP = "-"
|
|
8
8
|
|
9
9
|
def create_namespaced_name(server_name: str, resource_name: str) -> str:
|
10
10
|
"""Create a namespaced resource name from server and resource names"""
|
11
|
-
return f"{server_name}{SEP}{resource_name}"
|
11
|
+
return f"{server_name}{SEP}{resource_name}"[:64]
|
12
12
|
|
13
13
|
|
14
14
|
def is_namespaced_name(name: str) -> bool:
|
15
15
|
"""Check if a name is already namespaced"""
|
16
|
-
return SEP in name
|
16
|
+
return SEP in name
|