fast-agent-mcp 0.2.43__tar.gz → 0.2.44__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/PKG-INFO +3 -2
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/pyproject.toml +3 -2
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/base_agent.py +60 -22
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/config.py +2 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/agent_app.py +15 -5
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/enhanced_prompt.py +81 -11
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/fastagent.py +9 -1
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/interactive_prompt.py +60 -1
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/usage_display.py +10 -3
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/augmented_llm.py +4 -5
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/augmented_llm_passthrough.py +15 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +4 -3
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_bedrock.py +3 -3
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_google_native.py +4 -7
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_openai.py +5 -8
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_tensorzero.py +6 -7
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/google_converter.py +6 -9
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +5 -4
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/multipart_converter_openai.py +33 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/multipart_converter_tensorzero.py +3 -2
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/rich_progress.py +6 -2
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/transport.py +30 -36
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/helpers/content_helpers.py +26 -11
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/interfaces.py +22 -2
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompt_message_multipart.py +2 -3
- fast_agent_mcp-0.2.44/src/mcp_agent/ui/console_display.py +625 -0
- fast_agent_mcp-0.2.43/src/mcp_agent/ui/console_display.py → fast_agent_mcp-0.2.44/src/mcp_agent/ui/console_display_legacy.py +50 -63
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/.gitignore +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/LICENSE +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/README.md +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/azure-openai/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/bedrock/fast-agent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/custom-agents/agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/custom-agents/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/forms_demo.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/game_character.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/game_character_handler.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/elicitations/tool_call.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/mcp-filtering/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/mcp-filtering/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/mcp-filtering/mcp_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/mcp-filtering/test_mcp_filtering.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/state-transfer/agent_one.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/state-transfer/agent_two.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/vision-examples/cat.png +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/vision-examples/example1.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/vision-examples/example2.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/vision-examples/example3.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/mcp/vision-examples/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/otel/agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/otel/agent2.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/otel/docker-compose.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/otel/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/researcher/researcher-imp.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/.env.sample +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/Makefile +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/README.md +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/demo_images/clam.jpg +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/demo_images/crab.png +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/demo_images/shrimp.png +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/docker-compose.yml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/image_demo.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/simple_agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/graded_report.md +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/short_story.md +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/examples/workflows/short_story.txt +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/hatch_build.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/agents/workflow/router_agent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/app.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/__main__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/commands/check_config.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/commands/go.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/commands/quickstart.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/commands/server_helpers.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/commands/setup.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/commands/url_parser.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/constants.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/main.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/cli/terminal.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/console.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/context.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/context_dependent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/agent_types.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/direct_decorators.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/direct_factory.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/error_handling.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/exceptions.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/mcp_content.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/prompt.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/request_params.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/core/validation.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/event_progress.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/executor/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/executor/executor.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/executor/task_registry.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/executor/workflow_signal.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/elicitation_form.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/elicitation_forms.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/elicitation_handler.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/elicitation_state.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/handler.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/human_input/types.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/augmented_llm_silent.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/augmented_llm_slow.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/memory.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/model_database.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/model_factory.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/prompt_utils.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/provider_key_manager.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/provider_types.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_aliyun.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_azure.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_google_oai.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/augmented_llm_xai.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/sampling_converter.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/usage_tracking.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/events.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/json_serializer.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/listeners.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/logging/logger.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/common.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/elicitation_factory.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/elicitation_handlers.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/gen_client.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/helpers/server_config_helpers.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/hf_auth.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/logger_textio.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/mime_utils.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompt_render.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/resource_utils.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp/sampling.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp_server/__init__.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp_server/agent_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/mcp_server_registry.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/progress_display.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/forms_demo.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/game_character.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/game_character_handler.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/elicitations/tool_call.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/graded_report.md +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/short_story.md +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
- {fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/tools/tool_definition.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fast-agent-mcp
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.44
|
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>
|
|
6
6
|
License: Apache License
|
|
@@ -218,7 +218,7 @@ Requires-Dist: deprecated>=1.2.18
|
|
|
218
218
|
Requires-Dist: email-validator>=2.2.0
|
|
219
219
|
Requires-Dist: fastapi>=0.115.6
|
|
220
220
|
Requires-Dist: google-genai
|
|
221
|
-
Requires-Dist: mcp==1.
|
|
221
|
+
Requires-Dist: mcp==1.12.0
|
|
222
222
|
Requires-Dist: openai>=1.93.0
|
|
223
223
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
|
224
224
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
|
@@ -229,6 +229,7 @@ Requires-Dist: opentelemetry-instrumentation-openai>=0.40.14; python_version >=
|
|
|
229
229
|
Requires-Dist: prompt-toolkit>=3.0.50
|
|
230
230
|
Requires-Dist: pydantic-settings>=2.7.0
|
|
231
231
|
Requires-Dist: pydantic>=2.10.4
|
|
232
|
+
Requires-Dist: pyperclip>=1.9.0
|
|
232
233
|
Requires-Dist: pyyaml>=6.0.2
|
|
233
234
|
Requires-Dist: rich>=13.9.4
|
|
234
235
|
Requires-Dist: tensorzero>=2025.6.3
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "fast-agent-mcp"
|
|
3
|
-
version = "0.2.
|
|
3
|
+
version = "0.2.44"
|
|
4
4
|
description = "Define, Prompt and Test MCP enabled Agents and Workflows"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = { file = "LICENSE" }
|
|
@@ -15,7 +15,7 @@ classifiers = [
|
|
|
15
15
|
requires-python = ">=3.12"
|
|
16
16
|
dependencies = [
|
|
17
17
|
"fastapi>=0.115.6",
|
|
18
|
-
"mcp==1.
|
|
18
|
+
"mcp==1.12.0",
|
|
19
19
|
"opentelemetry-distro>=0.50b0",
|
|
20
20
|
"opentelemetry-exporter-otlp-proto-http>=1.29.0",
|
|
21
21
|
"pydantic-settings>=2.7.0",
|
|
@@ -38,6 +38,7 @@ dependencies = [
|
|
|
38
38
|
"deprecated>=1.2.18",
|
|
39
39
|
"a2a-sdk>=0.2.9",
|
|
40
40
|
"email-validator>=2.2.0",
|
|
41
|
+
"pyperclip>=1.9.0",
|
|
41
42
|
]
|
|
42
43
|
|
|
43
44
|
[project.optional-dependencies]
|
|
@@ -330,12 +330,12 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
|
330
330
|
def _matches_pattern(self, name: str, pattern: str, server_name: str) -> bool:
|
|
331
331
|
"""
|
|
332
332
|
Check if a name matches a pattern for a specific server.
|
|
333
|
-
|
|
333
|
+
|
|
334
334
|
Args:
|
|
335
335
|
name: The name to match (could be tool name, resource URI, or prompt name)
|
|
336
336
|
pattern: The pattern to match against (e.g., "add", "math*", "resource://math/*")
|
|
337
337
|
server_name: The server name (used for tool name prefixing)
|
|
338
|
-
|
|
338
|
+
|
|
339
339
|
Returns:
|
|
340
340
|
True if the name matches the pattern
|
|
341
341
|
"""
|
|
@@ -343,7 +343,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
|
343
343
|
if name.startswith(f"{server_name}-"):
|
|
344
344
|
full_pattern = f"{server_name}-{pattern}"
|
|
345
345
|
return fnmatch.fnmatch(name, full_pattern)
|
|
346
|
-
|
|
346
|
+
|
|
347
347
|
# For resources and prompts, match directly against the pattern
|
|
348
348
|
return fnmatch.fnmatch(name, pattern)
|
|
349
349
|
|
|
@@ -365,9 +365,9 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
|
365
365
|
filtered_tools = []
|
|
366
366
|
for tool in result.tools:
|
|
367
367
|
# Extract server name from tool name (e.g., "mathematics-add" -> "mathematics")
|
|
368
|
-
if
|
|
369
|
-
server_name = tool.name.split(
|
|
370
|
-
|
|
368
|
+
if "-" in tool.name:
|
|
369
|
+
server_name = tool.name.split("-", 1)[0]
|
|
370
|
+
|
|
371
371
|
# Check if this server has tool filters
|
|
372
372
|
if server_name in self.config.tools:
|
|
373
373
|
# Check if tool matches any pattern for this server
|
|
@@ -495,48 +495,70 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
|
495
495
|
|
|
496
496
|
async def apply_prompt(
|
|
497
497
|
self,
|
|
498
|
-
|
|
498
|
+
prompt: Union[str, GetPromptResult],
|
|
499
499
|
arguments: Dict[str, str] | None = None,
|
|
500
500
|
agent_name: str | None = None,
|
|
501
501
|
server_name: str | None = None,
|
|
502
|
+
as_template: bool = False,
|
|
502
503
|
) -> str:
|
|
503
504
|
"""
|
|
504
|
-
Apply an MCP Server Prompt by name and return the assistant's response.
|
|
505
|
+
Apply an MCP Server Prompt by name or GetPromptResult and return the assistant's response.
|
|
505
506
|
Will search all available servers for the prompt if not namespaced and no server_name provided.
|
|
506
507
|
|
|
507
508
|
If the last message in the prompt is from a user, this will automatically
|
|
508
509
|
generate an assistant response to ensure we always end with an assistant message.
|
|
509
510
|
|
|
510
511
|
Args:
|
|
511
|
-
|
|
512
|
+
prompt: The name of the prompt to apply OR a GetPromptResult object
|
|
512
513
|
arguments: Optional dictionary of string arguments to pass to the prompt template
|
|
513
514
|
agent_name: Optional agent name (ignored at this level, used by multi-agent apps)
|
|
514
515
|
server_name: Optional name of the server to get the prompt from
|
|
516
|
+
as_template: If True, store as persistent template (always included in context)
|
|
515
517
|
|
|
516
518
|
Returns:
|
|
517
519
|
The assistant's response or error message
|
|
518
520
|
"""
|
|
519
521
|
|
|
520
|
-
#
|
|
521
|
-
|
|
522
|
-
|
|
522
|
+
# Handle both string and GetPromptResult inputs
|
|
523
|
+
if isinstance(prompt, str):
|
|
524
|
+
prompt_name = prompt
|
|
525
|
+
# Get the prompt - this will search all servers if needed
|
|
526
|
+
self.logger.debug(f"Loading prompt '{prompt_name}'")
|
|
527
|
+
prompt_result: GetPromptResult = await self.get_prompt(
|
|
528
|
+
prompt_name, arguments, server_name
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
if not prompt_result or not prompt_result.messages:
|
|
532
|
+
error_msg = f"Prompt '{prompt_name}' could not be found or contains no messages"
|
|
533
|
+
self.logger.warning(error_msg)
|
|
534
|
+
return error_msg
|
|
523
535
|
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
536
|
+
# Get the display name (namespaced version)
|
|
537
|
+
namespaced_name = getattr(prompt_result, "namespaced_name", prompt_name)
|
|
538
|
+
else:
|
|
539
|
+
# prompt is a GetPromptResult object
|
|
540
|
+
prompt_result = prompt
|
|
541
|
+
if not prompt_result or not prompt_result.messages:
|
|
542
|
+
error_msg = "Provided GetPromptResult contains no messages"
|
|
543
|
+
self.logger.warning(error_msg)
|
|
544
|
+
return error_msg
|
|
545
|
+
|
|
546
|
+
# Use a reasonable display name
|
|
547
|
+
namespaced_name = getattr(prompt_result, "namespaced_name", "provided_prompt")
|
|
528
548
|
|
|
529
|
-
# Get the display name (namespaced version)
|
|
530
|
-
namespaced_name = getattr(prompt_result, "namespaced_name", prompt_name)
|
|
531
549
|
self.logger.debug(f"Using prompt '{namespaced_name}'")
|
|
532
550
|
|
|
533
551
|
# Convert prompt messages to multipart format using the safer method
|
|
534
552
|
multipart_messages = PromptMessageMultipart.from_get_prompt_result(prompt_result)
|
|
535
553
|
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
554
|
+
if as_template:
|
|
555
|
+
# Use apply_prompt_template to store as persistent prompt messages
|
|
556
|
+
return await self.apply_prompt_template(prompt_result, namespaced_name)
|
|
557
|
+
else:
|
|
558
|
+
# Always call generate to ensure LLM implementations can handle prompt templates
|
|
559
|
+
# This is critical for stateful LLMs like PlaybackLLM
|
|
560
|
+
response = await self.generate(multipart_messages, None)
|
|
561
|
+
return response.first_text()
|
|
540
562
|
|
|
541
563
|
async def get_embedded_resources(
|
|
542
564
|
self, resource_uri: str, server_name: str | None = None
|
|
@@ -636,6 +658,22 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
|
636
658
|
with self.tracer.start_as_current_span(f"Agent: '{self.name}' generate"):
|
|
637
659
|
return await self._llm.generate(multipart_messages, request_params)
|
|
638
660
|
|
|
661
|
+
async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
|
|
662
|
+
"""
|
|
663
|
+
Apply a prompt template as persistent context that will be included in all future conversations.
|
|
664
|
+
Delegates to the attached LLM.
|
|
665
|
+
|
|
666
|
+
Args:
|
|
667
|
+
prompt_result: The GetPromptResult containing prompt messages
|
|
668
|
+
prompt_name: The name of the prompt being applied
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
String representation of the assistant's response if generated
|
|
672
|
+
"""
|
|
673
|
+
assert self._llm
|
|
674
|
+
with self.tracer.start_as_current_span(f"Agent: '{self.name}' apply_prompt_template"):
|
|
675
|
+
return await self._llm.apply_prompt_template(prompt_result, prompt_name)
|
|
676
|
+
|
|
639
677
|
async def structured(
|
|
640
678
|
self,
|
|
641
679
|
multipart_messages: List[PromptMessageMultipart],
|
|
@@ -319,6 +319,8 @@ class LoggerSettings(BaseModel):
|
|
|
319
319
|
"""Truncate display of long tool calls"""
|
|
320
320
|
enable_markup: bool = True
|
|
321
321
|
"""Enable markup in console output. Disable for outputs that may conflict with rich console formatting"""
|
|
322
|
+
use_legacy_display: bool = False
|
|
323
|
+
"""Use the legacy console display instead of the new style display"""
|
|
322
324
|
|
|
323
325
|
|
|
324
326
|
def find_fastagent_config_files(start_path: Path) -> Tuple[Optional[Path], Optional[Path]]:
|
|
@@ -5,7 +5,7 @@ Direct AgentApp implementation for interacting with agents without proxies.
|
|
|
5
5
|
from typing import Dict, List, Optional, Union
|
|
6
6
|
|
|
7
7
|
from deprecated import deprecated
|
|
8
|
-
from mcp.types import PromptMessage
|
|
8
|
+
from mcp.types import GetPromptResult, PromptMessage
|
|
9
9
|
from rich import print as rich_print
|
|
10
10
|
|
|
11
11
|
from mcp_agent.agents.agent import Agent
|
|
@@ -108,22 +108,26 @@ class AgentApp:
|
|
|
108
108
|
|
|
109
109
|
async def apply_prompt(
|
|
110
110
|
self,
|
|
111
|
-
|
|
111
|
+
prompt: Union[str, GetPromptResult],
|
|
112
112
|
arguments: Dict[str, str] | None = None,
|
|
113
113
|
agent_name: str | None = None,
|
|
114
|
+
as_template: bool = False,
|
|
114
115
|
) -> str:
|
|
115
116
|
"""
|
|
116
117
|
Apply a prompt template to an agent (default agent if not specified).
|
|
117
118
|
|
|
118
119
|
Args:
|
|
119
|
-
|
|
120
|
+
prompt: Name of the prompt template to apply OR a GetPromptResult object
|
|
120
121
|
arguments: Optional arguments for the prompt template
|
|
121
122
|
agent_name: Name of the agent to send to
|
|
123
|
+
as_template: If True, store as persistent template (always included in context)
|
|
122
124
|
|
|
123
125
|
Returns:
|
|
124
126
|
The agent's response as a string
|
|
125
127
|
"""
|
|
126
|
-
return await self._agent(agent_name).apply_prompt(
|
|
128
|
+
return await self._agent(agent_name).apply_prompt(
|
|
129
|
+
prompt, arguments, as_template=as_template
|
|
130
|
+
)
|
|
127
131
|
|
|
128
132
|
async def list_prompts(self, server_name: str | None = None, agent_name: str | None = None):
|
|
129
133
|
"""
|
|
@@ -235,7 +239,12 @@ class AgentApp:
|
|
|
235
239
|
"""
|
|
236
240
|
return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
|
|
237
241
|
|
|
238
|
-
async def interactive(
|
|
242
|
+
async def interactive(
|
|
243
|
+
self,
|
|
244
|
+
agent_name: str | None = None,
|
|
245
|
+
default_prompt: str = "",
|
|
246
|
+
pretty_print_parallel: bool = False,
|
|
247
|
+
) -> str:
|
|
239
248
|
"""
|
|
240
249
|
Interactive prompt for sending messages with advanced features.
|
|
241
250
|
|
|
@@ -283,6 +292,7 @@ class AgentApp:
|
|
|
283
292
|
agent = self._agents.get(agent_name)
|
|
284
293
|
if agent and agent.agent_type == AgentType.PARALLEL:
|
|
285
294
|
from mcp_agent.ui.console_display import ConsoleDisplay
|
|
295
|
+
|
|
286
296
|
display = ConsoleDisplay(config=None)
|
|
287
297
|
display.show_parallel_results(agent)
|
|
288
298
|
|
|
@@ -106,15 +106,35 @@ async def _display_agent_info_helper(agent_name: str, agent_provider: object) ->
|
|
|
106
106
|
else:
|
|
107
107
|
# For regular agents, only display if they have MCP servers attached
|
|
108
108
|
if server_count > 0:
|
|
109
|
-
#
|
|
109
|
+
# Build display parts in order: tools, prompts, resources (omit if count is 0)
|
|
110
|
+
display_parts = []
|
|
111
|
+
|
|
112
|
+
if tool_count > 0:
|
|
113
|
+
tool_word = "tool" if tool_count == 1 else "tools"
|
|
114
|
+
display_parts.append(f"{tool_count:,}[dim] {tool_word}[/dim]")
|
|
115
|
+
|
|
116
|
+
if prompt_count > 0:
|
|
117
|
+
prompt_word = "prompt" if prompt_count == 1 else "prompts"
|
|
118
|
+
display_parts.append(f"{prompt_count:,}[dim] {prompt_word}[/dim]")
|
|
119
|
+
|
|
120
|
+
if resource_count > 0:
|
|
121
|
+
resource_word = "resource" if resource_count == 1 else "resources"
|
|
122
|
+
display_parts.append(f"{resource_count:,}[dim] {resource_word}[/dim]")
|
|
123
|
+
|
|
124
|
+
# Always show server count
|
|
110
125
|
server_word = "Server" if server_count == 1 else "Servers"
|
|
111
|
-
|
|
112
|
-
resource_word = "resource" if resource_count == 1 else "resources"
|
|
113
|
-
prompt_word = "prompt" if prompt_count == 1 else "prompts"
|
|
126
|
+
server_text = f"{server_count:,}[dim] MCP {server_word}[/dim]"
|
|
114
127
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
128
|
+
if display_parts:
|
|
129
|
+
content = (
|
|
130
|
+
f"{server_text}[dim], [/dim]"
|
|
131
|
+
+ "[dim], [/dim]".join(display_parts)
|
|
132
|
+
+ "[dim] available[/dim]"
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
content = f"{server_text}[dim] available[/dim]"
|
|
136
|
+
|
|
137
|
+
rich_print(f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]:[/dim] {content}")
|
|
118
138
|
|
|
119
139
|
# Mark as shown
|
|
120
140
|
_agent_info_shown.add(agent_name)
|
|
@@ -274,6 +294,7 @@ class AgentCompleter(Completer):
|
|
|
274
294
|
"prompt": "List and select MCP prompts, or apply specific prompt (/prompt <name>)",
|
|
275
295
|
"agents": "List available agents",
|
|
276
296
|
"usage": "Show current usage statistics",
|
|
297
|
+
"markdown": "Show last assistant message without markdown formatting",
|
|
277
298
|
"help": "Show available commands",
|
|
278
299
|
"clear": "Clear the screen",
|
|
279
300
|
"STOP": "Stop this prompting session and move to next workflow step",
|
|
@@ -398,7 +419,7 @@ def get_text_from_editor(initial_text: str = "") -> str:
|
|
|
398
419
|
return edited_text.strip() # Added strip() to remove trailing newlines often added by editors
|
|
399
420
|
|
|
400
421
|
|
|
401
|
-
def create_keybindings(on_toggle_multiline=None, app=None):
|
|
422
|
+
def create_keybindings(on_toggle_multiline=None, app=None, agent_provider=None, agent_name=None):
|
|
402
423
|
"""Create custom key bindings."""
|
|
403
424
|
kb = KeyBindings()
|
|
404
425
|
|
|
@@ -463,6 +484,41 @@ def create_keybindings(on_toggle_multiline=None, app=None):
|
|
|
463
484
|
if event.app:
|
|
464
485
|
event.app.invalidate()
|
|
465
486
|
|
|
487
|
+
# Store reference to agent provider and agent name for clipboard functionality
|
|
488
|
+
kb.agent_provider = agent_provider
|
|
489
|
+
kb.current_agent_name = agent_name
|
|
490
|
+
|
|
491
|
+
@kb.add("c-y")
|
|
492
|
+
async def _(event) -> None:
|
|
493
|
+
"""Ctrl+Y: Copy last assistant response to clipboard."""
|
|
494
|
+
if kb.agent_provider and kb.current_agent_name:
|
|
495
|
+
try:
|
|
496
|
+
# Get the agent
|
|
497
|
+
if hasattr(kb.agent_provider, "_agent"):
|
|
498
|
+
agent = kb.agent_provider._agent(kb.current_agent_name)
|
|
499
|
+
else:
|
|
500
|
+
agent = kb.agent_provider
|
|
501
|
+
|
|
502
|
+
# Get message history
|
|
503
|
+
if hasattr(agent, "_llm") and agent._llm and agent._llm.message_history:
|
|
504
|
+
# Find last assistant message
|
|
505
|
+
for msg in reversed(agent._llm.message_history):
|
|
506
|
+
if msg.role == "assistant":
|
|
507
|
+
content = msg.last_text()
|
|
508
|
+
import pyperclip
|
|
509
|
+
|
|
510
|
+
pyperclip.copy(content)
|
|
511
|
+
rich_print("\n[green]✓ Copied to clipboard[/green]")
|
|
512
|
+
return
|
|
513
|
+
|
|
514
|
+
rich_print("\n[yellow]No assistant messages found[/yellow]")
|
|
515
|
+
else:
|
|
516
|
+
rich_print("\n[yellow]No message history available[/yellow]")
|
|
517
|
+
except Exception as e:
|
|
518
|
+
rich_print(f"\n[red]Error copying: {e}[/red]")
|
|
519
|
+
else:
|
|
520
|
+
rich_print("[yellow]Clipboard copy not available in this context[/yellow]")
|
|
521
|
+
|
|
466
522
|
return kb
|
|
467
523
|
|
|
468
524
|
|
|
@@ -527,6 +583,7 @@ async def get_enhanced_input(
|
|
|
527
583
|
shortcuts = [
|
|
528
584
|
("Ctrl+T", toggle_text),
|
|
529
585
|
("Ctrl+E", "External"),
|
|
586
|
+
("Ctrl+Y", "Copy"),
|
|
530
587
|
("Ctrl+L", "Clear"),
|
|
531
588
|
("↑/↓", "History"),
|
|
532
589
|
]
|
|
@@ -569,7 +626,12 @@ async def get_enhanced_input(
|
|
|
569
626
|
)
|
|
570
627
|
|
|
571
628
|
# Create key bindings with a reference to the app
|
|
572
|
-
bindings = create_keybindings(
|
|
629
|
+
bindings = create_keybindings(
|
|
630
|
+
on_toggle_multiline=on_multiline_toggle,
|
|
631
|
+
app=session.app,
|
|
632
|
+
agent_provider=agent_provider,
|
|
633
|
+
agent_name=agent_name,
|
|
634
|
+
)
|
|
573
635
|
session.app.key_bindings = bindings
|
|
574
636
|
|
|
575
637
|
# Create formatted prompt text
|
|
@@ -619,6 +681,8 @@ async def get_enhanced_input(
|
|
|
619
681
|
return "LIST_AGENTS"
|
|
620
682
|
elif cmd == "usage":
|
|
621
683
|
return "SHOW_USAGE"
|
|
684
|
+
elif cmd == "markdown":
|
|
685
|
+
return "MARKDOWN"
|
|
622
686
|
elif cmd == "prompt":
|
|
623
687
|
# Handle /prompt with no arguments as interactive mode
|
|
624
688
|
if len(cmd_parts) > 1:
|
|
@@ -792,16 +856,18 @@ async def handle_special_commands(command, agent_app=None):
|
|
|
792
856
|
rich_print(" /help - Show this help")
|
|
793
857
|
rich_print(" /clear - Clear screen")
|
|
794
858
|
rich_print(" /agents - List available agents")
|
|
795
|
-
rich_print(" /prompts - List and select MCP prompts")
|
|
796
859
|
rich_print(" /prompt <name> - Apply a specific prompt by name")
|
|
797
860
|
rich_print(" /usage - Show current usage statistics")
|
|
861
|
+
rich_print(" /markdown - Show last assistant message without markdown formatting")
|
|
798
862
|
rich_print(" @agent_name - Switch to agent")
|
|
799
863
|
rich_print(" STOP - Return control back to the workflow")
|
|
800
864
|
rich_print(" EXIT - Exit fast-agent, terminating any running workflows")
|
|
801
865
|
rich_print("\n[bold]Keyboard Shortcuts:[/bold]")
|
|
802
866
|
rich_print(" Enter - Submit (normal mode) / New line (multiline mode)")
|
|
803
|
-
rich_print(" Ctrl+Enter
|
|
867
|
+
rich_print(" Ctrl+Enter - Always submit (in any mode)")
|
|
804
868
|
rich_print(" Ctrl+T - Toggle multiline mode")
|
|
869
|
+
rich_print(" Ctrl+E - Edit in external editor")
|
|
870
|
+
rich_print(" Ctrl+Y - Copy last assistant response to clipboard")
|
|
805
871
|
rich_print(" Ctrl+L - Clear input")
|
|
806
872
|
rich_print(" Up/Down - Navigate history")
|
|
807
873
|
return True
|
|
@@ -827,6 +893,10 @@ async def handle_special_commands(command, agent_app=None):
|
|
|
827
893
|
# Return a dictionary to signal that usage should be shown
|
|
828
894
|
return {"show_usage": True}
|
|
829
895
|
|
|
896
|
+
elif command == "MARKDOWN":
|
|
897
|
+
# Return a dictionary to signal that markdown display should be shown
|
|
898
|
+
return {"show_markdown": True}
|
|
899
|
+
|
|
830
900
|
elif command == "SELECT_PROMPT" or (
|
|
831
901
|
isinstance(command, str) and command.startswith("SELECT_PROMPT:")
|
|
832
902
|
):
|
|
@@ -314,7 +314,7 @@ class FastAgent:
|
|
|
314
314
|
self.agents,
|
|
315
315
|
model_factory_func,
|
|
316
316
|
)
|
|
317
|
-
|
|
317
|
+
|
|
318
318
|
# Validate API keys after agent creation
|
|
319
319
|
validate_provider_keys_post_creation(active_agents)
|
|
320
320
|
|
|
@@ -435,6 +435,14 @@ class FastAgent:
|
|
|
435
435
|
raise SystemExit(1)
|
|
436
436
|
|
|
437
437
|
finally:
|
|
438
|
+
# Ensure progress display is stopped before showing usage summary
|
|
439
|
+
try:
|
|
440
|
+
from mcp_agent.progress_display import progress_display
|
|
441
|
+
|
|
442
|
+
progress_display.stop()
|
|
443
|
+
except: # noqa: E722
|
|
444
|
+
pass
|
|
445
|
+
|
|
438
446
|
# Print usage report before cleanup (show for user exits too)
|
|
439
447
|
if active_agents and not had_error:
|
|
440
448
|
self._print_usage_report(active_agents)
|
|
@@ -30,7 +30,7 @@ from mcp_agent.core.enhanced_prompt import (
|
|
|
30
30
|
handle_special_commands,
|
|
31
31
|
)
|
|
32
32
|
from mcp_agent.core.usage_display import collect_agents_from_provider, display_usage_report
|
|
33
|
-
from mcp_agent.mcp.mcp_aggregator import SEP
|
|
33
|
+
from mcp_agent.mcp.mcp_aggregator import SEP
|
|
34
34
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
|
35
35
|
from mcp_agent.progress_display import progress_display
|
|
36
36
|
|
|
@@ -56,6 +56,7 @@ class PromptProvider(Protocol):
|
|
|
56
56
|
prompt_title: Optional[str] = None,
|
|
57
57
|
arguments: Optional[Dict[str, str]] = None,
|
|
58
58
|
agent_name: Optional[str] = None,
|
|
59
|
+
as_template: bool = False,
|
|
59
60
|
**kwargs,
|
|
60
61
|
) -> str:
|
|
61
62
|
"""Apply a prompt."""
|
|
@@ -188,6 +189,10 @@ class InteractivePrompt:
|
|
|
188
189
|
# Handle usage display
|
|
189
190
|
await self._show_usage(prompt_provider, agent)
|
|
190
191
|
continue
|
|
192
|
+
elif "show_markdown" in command_result:
|
|
193
|
+
# Handle markdown display
|
|
194
|
+
await self._show_markdown(prompt_provider, agent)
|
|
195
|
+
continue
|
|
191
196
|
|
|
192
197
|
# Skip further processing if:
|
|
193
198
|
# 1. The command was handled (command_result is truthy)
|
|
@@ -713,3 +718,57 @@ class InteractivePrompt:
|
|
|
713
718
|
|
|
714
719
|
except Exception as e:
|
|
715
720
|
rich_print(f"[red]Error showing usage: {e}[/red]")
|
|
721
|
+
|
|
722
|
+
async def _show_markdown(self, prompt_provider: PromptProvider, agent_name: str) -> None:
|
|
723
|
+
"""
|
|
724
|
+
Show the last assistant message without markdown formatting.
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
prompt_provider: Provider that has access to agents
|
|
728
|
+
agent_name: Name of the current agent
|
|
729
|
+
"""
|
|
730
|
+
try:
|
|
731
|
+
# Get agent to display from
|
|
732
|
+
if hasattr(prompt_provider, "_agent"):
|
|
733
|
+
# This is an AgentApp - get the specific agent
|
|
734
|
+
agent = prompt_provider._agent(agent_name)
|
|
735
|
+
else:
|
|
736
|
+
# This is a single agent
|
|
737
|
+
agent = prompt_provider
|
|
738
|
+
|
|
739
|
+
# Check if agent has message history
|
|
740
|
+
if not hasattr(agent, "_llm") or not agent._llm:
|
|
741
|
+
rich_print("[yellow]No message history available[/yellow]")
|
|
742
|
+
return
|
|
743
|
+
|
|
744
|
+
message_history = agent._llm.message_history
|
|
745
|
+
if not message_history:
|
|
746
|
+
rich_print("[yellow]No messages in history[/yellow]")
|
|
747
|
+
return
|
|
748
|
+
|
|
749
|
+
# Find the last assistant message
|
|
750
|
+
last_assistant_msg = None
|
|
751
|
+
for msg in reversed(message_history):
|
|
752
|
+
if msg.role == "assistant":
|
|
753
|
+
last_assistant_msg = msg
|
|
754
|
+
break
|
|
755
|
+
|
|
756
|
+
if not last_assistant_msg:
|
|
757
|
+
rich_print("[yellow]No assistant messages found[/yellow]")
|
|
758
|
+
return
|
|
759
|
+
|
|
760
|
+
# Get the text content and display without markdown
|
|
761
|
+
content = last_assistant_msg.last_text()
|
|
762
|
+
|
|
763
|
+
# Display with a simple header
|
|
764
|
+
rich_print("\n[bold blue]Last Assistant Response (Plain Text):[/bold blue]")
|
|
765
|
+
rich_print("─" * 60)
|
|
766
|
+
# Use console.print with markup=False to display raw text
|
|
767
|
+
from mcp_agent import console
|
|
768
|
+
|
|
769
|
+
console.console.print(content, markup=False)
|
|
770
|
+
rich_print("─" * 60)
|
|
771
|
+
rich_print()
|
|
772
|
+
|
|
773
|
+
except Exception as e:
|
|
774
|
+
rich_print(f"[red]Error showing markdown: {e}[/red]")
|
|
@@ -91,12 +91,19 @@ def display_usage_report(
|
|
|
91
91
|
max_agent_width = min(15, max(len(data["name"]) for data in usage_data) if usage_data else 8)
|
|
92
92
|
agent_width = max(max_agent_width, 5) # Minimum of 5 for "Agent" header
|
|
93
93
|
|
|
94
|
-
# Display the table
|
|
94
|
+
# Display the table with new visual style
|
|
95
95
|
console = Console()
|
|
96
|
+
|
|
97
|
+
# Top separator
|
|
98
|
+
console.print()
|
|
99
|
+
console.print("─" * console.size.width, style="dim")
|
|
100
|
+
console.print()
|
|
101
|
+
|
|
102
|
+
# Header with block character
|
|
103
|
+
console.print("[dim]▎[/dim] [bold dim]Usage Summary[/bold dim]")
|
|
96
104
|
console.print()
|
|
97
|
-
console.print("[dim]Usage Summary (Cumulative)[/dim]")
|
|
98
105
|
|
|
99
|
-
#
|
|
106
|
+
# Table header with proper spacing
|
|
100
107
|
console.print(
|
|
101
108
|
f"[dim]{'Agent':<{agent_width}} {'Input':>9} {'Output':>9} {'Total':>9} {'Turns':>6} {'Tools':>6} {'Context%':>9} {'Model':<25}[/dim]"
|
|
102
109
|
)
|
|
@@ -46,7 +46,6 @@ from mcp_agent.mcp.interfaces import (
|
|
|
46
46
|
from mcp_agent.mcp.mcp_aggregator import MCPAggregator
|
|
47
47
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
|
48
48
|
from mcp_agent.mcp.prompt_render import render_multipart_message
|
|
49
|
-
from mcp_agent.ui.console_display import ConsoleDisplay
|
|
50
49
|
|
|
51
50
|
# Define type variables locally
|
|
52
51
|
MessageParamT = TypeVar("MessageParamT")
|
|
@@ -157,6 +156,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
157
156
|
self._message_history: List[PromptMessageMultipart] = []
|
|
158
157
|
|
|
159
158
|
# Initialize the display component
|
|
159
|
+
if self.context.config and self.context.config.logger.use_legacy_display:
|
|
160
|
+
from mcp_agent.ui.console_display_legacy import ConsoleDisplay
|
|
161
|
+
else:
|
|
162
|
+
from mcp_agent.ui.console_display import ConsoleDisplay
|
|
160
163
|
self.display = ConsoleDisplay(config=self.context.config)
|
|
161
164
|
|
|
162
165
|
# Tool call counter for current turn
|
|
@@ -448,10 +451,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
448
451
|
"""Display a tool result in a formatted panel."""
|
|
449
452
|
self.display.show_tool_result(result, name=self.name)
|
|
450
453
|
|
|
451
|
-
def show_oai_tool_result(self, result: str) -> None:
|
|
452
|
-
"""Display a tool result in a formatted panel."""
|
|
453
|
-
self.display.show_oai_tool_result(result, name=self.name)
|
|
454
|
-
|
|
455
454
|
def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
|
|
456
455
|
"""Display a tool call in a formatted panel."""
|
|
457
456
|
self._current_turn_tool_calls += 1
|
{fast_agent_mcp-0.2.43 → fast_agent_mcp-0.2.44}/src/mcp_agent/llm/augmented_llm_passthrough.py
RENAMED
|
@@ -162,8 +162,17 @@ class PassthroughLLM(AugmentedLLM):
|
|
|
162
162
|
self,
|
|
163
163
|
multipart_messages: List["PromptMessageMultipart"],
|
|
164
164
|
request_params: RequestParams | None = None,
|
|
165
|
+
is_template: bool = False,
|
|
165
166
|
) -> PromptMessageMultipart:
|
|
167
|
+
print(
|
|
168
|
+
f"DEBUG: PassthroughLLM _apply_prompt_provider_specific called with {len(multipart_messages)} messages, is_template={is_template}"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Add messages to history with proper is_prompt flag
|
|
172
|
+
self.history.extend(multipart_messages, is_prompt=is_template)
|
|
173
|
+
|
|
166
174
|
last_message = multipart_messages[-1]
|
|
175
|
+
print(f"DEBUG: Last message role: {last_message.role}, text: '{last_message.first_text()}'")
|
|
167
176
|
|
|
168
177
|
if self.is_tool_call(last_message):
|
|
169
178
|
result = Prompt.assistant(await self.generate_str(last_message.first_text()))
|
|
@@ -200,8 +209,14 @@ class PassthroughLLM(AugmentedLLM):
|
|
|
200
209
|
else:
|
|
201
210
|
# TODO -- improve when we support Audio/Multimodal gen models e.g. gemini . This should really just return the input as "assistant"...
|
|
202
211
|
concatenated: str = "\n".join(message.all_text() for message in multipart_messages)
|
|
212
|
+
print(
|
|
213
|
+
f"DEBUG: PassthroughLLM generating response: '{concatenated}' (is_template={is_template})"
|
|
214
|
+
)
|
|
203
215
|
await self.show_assistant_message(concatenated)
|
|
204
216
|
result = Prompt.assistant(concatenated)
|
|
217
|
+
print(f"DEBUG: PassthroughLLM created result: {result}")
|
|
218
|
+
print(f"DEBUG: Result first_text(): {result.first_text()}")
|
|
219
|
+
print(f"DEBUG: Result content: {result.content}")
|
|
205
220
|
|
|
206
221
|
# Track usage for this passthrough "turn"
|
|
207
222
|
try:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from typing import TYPE_CHECKING, List, Tuple, Type
|
|
2
2
|
|
|
3
|
-
from mcp.types import
|
|
3
|
+
from mcp.types import TextContent
|
|
4
4
|
|
|
5
5
|
from mcp_agent.core.prompt import Prompt
|
|
6
6
|
from mcp_agent.event_progress import ProgressAction
|
|
@@ -33,6 +33,7 @@ from anthropic.types import (
|
|
|
33
33
|
from mcp.types import (
|
|
34
34
|
CallToolRequest,
|
|
35
35
|
CallToolRequestParams,
|
|
36
|
+
ContentBlock,
|
|
36
37
|
)
|
|
37
38
|
from rich.text import Text
|
|
38
39
|
|
|
@@ -149,7 +150,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
|
149
150
|
self,
|
|
150
151
|
message_param,
|
|
151
152
|
request_params: RequestParams | None = None,
|
|
152
|
-
) -> list[
|
|
153
|
+
) -> list[ContentBlock]:
|
|
153
154
|
"""
|
|
154
155
|
Process a query using an LLM and available tools.
|
|
155
156
|
Override this method to use a different LLM.
|
|
@@ -190,7 +191,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
|
190
191
|
for tool in tool_list.tools
|
|
191
192
|
]
|
|
192
193
|
|
|
193
|
-
responses: List[
|
|
194
|
+
responses: List[ContentBlock] = []
|
|
194
195
|
|
|
195
196
|
model = self.default_request_params.model
|
|
196
197
|
|