fast-agent-mcp 0.2.32__tar.gz → 0.2.34__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/PKG-INFO +1 -1
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/pyproject.toml +1 -1
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/base_agent.py +13 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/config.py +40 -4
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/agent_app.py +41 -1
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/enhanced_prompt.py +9 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/fastagent.py +14 -2
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/interactive_prompt.py +59 -13
- fast_agent_mcp-0.2.34/src/mcp_agent/core/usage_display.py +193 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/augmented_llm.py +26 -6
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/augmented_llm_passthrough.py +66 -4
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/augmented_llm_playback.py +19 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/augmented_llm_slow.py +12 -1
- fast_agent_mcp-0.2.34/src/mcp_agent/llm/model_database.py +236 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/model_factory.py +1 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +44 -8
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_google_native.py +18 -1
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_openai.py +20 -7
- fast_agent_mcp-0.2.34/src/mcp_agent/llm/usage_tracking.py +385 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/interfaces.py +6 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/.gitignore +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/LICENSE +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/README.md +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/azure-openai/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/custom-agents/agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/custom-agents/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/state-transfer/agent_one.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/state-transfer/agent_two.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/vision-examples/example1.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/vision-examples/example2.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/vision-examples/example3.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/mcp/vision-examples/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/otel/agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/otel/agent2.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/otel/docker-compose.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/otel/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/researcher/researcher-imp.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/README.md +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/docker-compose.yml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/image_demo.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/tensorzero/simple_agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/graded_report.md +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/short_story.md +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/examples/workflows/short_story.txt +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/agents/workflow/router_agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/app.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/__main__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/commands/check_config.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/commands/go.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/commands/quickstart.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/commands/setup.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/commands/url_parser.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/main.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/cli/terminal.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/console.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/context.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/context_dependent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/agent_types.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/direct_decorators.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/direct_factory.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/error_handling.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/exceptions.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/mcp_content.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/prompt.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/request_params.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/core/validation.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/event_progress.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/executor/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/executor/executor.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/executor/task_registry.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/executor/workflow_signal.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/human_input/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/human_input/handler.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/human_input/types.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/memory.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/prompt_utils.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/provider_key_manager.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/provider_types.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_aliyun.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_azure.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_google_oai.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/google_converter.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/multipart_converter_openai.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/sampling_converter.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/events.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/json_serializer.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/listeners.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/logger.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/rich_progress.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/logging/transport.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/common.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/gen_client.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/helpers/content_helpers.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/helpers/server_config_helpers.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/hf_auth.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/logger_textio.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/mime_utils.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompt_render.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/resource_utils.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp/sampling.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp_server/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp_server/agent_server.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/mcp_server_registry.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/progress_display.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/in_dev/agent_build.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/in_dev/css-LICENSE.txt +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/in_dev/slides.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/history_transfer.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/job.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/prompt_category.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/prompt_sizing.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/simple.txt +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/sizer.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/internal/social.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/__init__.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/agent.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/delimited_prompt.txt +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/image_server.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/prompt1.txt +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/prompting/work_with_image.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/tools/tool_definition.py +0 -0
- {fast_agent_mcp-0.2.32 → fast_agent_mcp-0.2.34}/src/mcp_agent/ui/console_display.py +0 -0
@@ -58,6 +58,7 @@ LLM = TypeVar("LLM", bound=AugmentedLLMProtocol)
|
|
58
58
|
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
59
59
|
if TYPE_CHECKING:
|
60
60
|
from mcp_agent.context import Context
|
61
|
+
from mcp_agent.llm.usage_tracking import UsageAccumulator
|
61
62
|
|
62
63
|
|
63
64
|
DEFAULT_CAPABILITIES = AgentCapabilities(
|
@@ -698,3 +699,15 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
698
699
|
if self._llm:
|
699
700
|
return self._llm.message_history
|
700
701
|
return []
|
702
|
+
|
703
|
+
@property
|
704
|
+
def usage_accumulator(self) -> Optional["UsageAccumulator"]:
|
705
|
+
"""
|
706
|
+
Return the usage accumulator for tracking token usage across turns.
|
707
|
+
|
708
|
+
Returns:
|
709
|
+
UsageAccumulator object if LLM is attached, None otherwise
|
710
|
+
"""
|
711
|
+
if self._llm:
|
712
|
+
return self._llm.usage_accumulator
|
713
|
+
return None
|
@@ -3,8 +3,10 @@ Reading settings from environment variables and providing a settings object
|
|
3
3
|
for the application configuration.
|
4
4
|
"""
|
5
5
|
|
6
|
+
import os
|
7
|
+
import re
|
6
8
|
from pathlib import Path
|
7
|
-
from typing import Dict, List, Literal, Optional
|
9
|
+
from typing import Any, Dict, List, Literal, Optional
|
8
10
|
|
9
11
|
from pydantic import BaseModel, ConfigDict, field_validator
|
10
12
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
@@ -365,6 +367,36 @@ _settings: Settings | None = None
|
|
365
367
|
def get_settings(config_path: str | None = None) -> Settings:
|
366
368
|
"""Get settings instance, automatically loading from config file if available."""
|
367
369
|
|
370
|
+
def resolve_env_vars(config_item: Any) -> Any:
|
371
|
+
"""Recursively resolve environment variables in config data."""
|
372
|
+
if isinstance(config_item, dict):
|
373
|
+
return {k: resolve_env_vars(v) for k, v in config_item.items()}
|
374
|
+
elif isinstance(config_item, list):
|
375
|
+
return [resolve_env_vars(i) for i in config_item]
|
376
|
+
elif isinstance(config_item, str):
|
377
|
+
# Regex to find ${ENV_VAR} or ${ENV_VAR:default_value}
|
378
|
+
pattern = re.compile(r"\$\{([^}]+)\}")
|
379
|
+
|
380
|
+
def replace_match(match: re.Match) -> str:
|
381
|
+
var_name_with_default = match.group(1)
|
382
|
+
if ":" in var_name_with_default:
|
383
|
+
var_name, default_value = var_name_with_default.split(":", 1)
|
384
|
+
return os.getenv(var_name, default_value)
|
385
|
+
else:
|
386
|
+
var_name = var_name_with_default
|
387
|
+
env_value = os.getenv(var_name)
|
388
|
+
if env_value is None:
|
389
|
+
# Optionally, raise an error or return the placeholder if the env var is not set
|
390
|
+
# For now, returning the placeholder to avoid breaking if not set and no default
|
391
|
+
# print(f"Warning: Environment variable {var_name} not set and no default provided.")
|
392
|
+
return match.group(0)
|
393
|
+
return env_value
|
394
|
+
|
395
|
+
# Replace all occurrences
|
396
|
+
resolved_value = pattern.sub(replace_match, config_item)
|
397
|
+
return resolved_value
|
398
|
+
return config_item
|
399
|
+
|
368
400
|
def deep_merge(base: dict, update: dict) -> dict:
|
369
401
|
"""Recursively merge two dictionaries, preserving nested structures."""
|
370
402
|
merged = base.copy()
|
@@ -409,12 +441,14 @@ def get_settings(config_path: str | None = None) -> Settings:
|
|
409
441
|
# Load main config
|
410
442
|
with open(config_file, "r", encoding="utf-8") as f:
|
411
443
|
yaml_settings = yaml.safe_load(f) or {}
|
412
|
-
|
444
|
+
# Resolve environment variables in the loaded YAML settings
|
445
|
+
resolved_yaml_settings = resolve_env_vars(yaml_settings)
|
446
|
+
merged_settings = resolved_yaml_settings
|
413
447
|
# Look for secrets files recursively up the directory tree
|
414
448
|
# but stop after finding the first one
|
415
449
|
current_dir = config_file.parent
|
416
450
|
found_secrets = False
|
417
|
-
# Start with the absolute path of the config file's directory
|
451
|
+
# Start with the absolute path of the config file\'s directory
|
418
452
|
current_dir = config_file.parent.resolve()
|
419
453
|
|
420
454
|
while current_dir != current_dir.parent and not found_secrets:
|
@@ -425,7 +459,9 @@ def get_settings(config_path: str | None = None) -> Settings:
|
|
425
459
|
if secrets_file.exists():
|
426
460
|
with open(secrets_file, "r", encoding="utf-8") as f:
|
427
461
|
yaml_secrets = yaml.safe_load(f) or {}
|
428
|
-
|
462
|
+
# Resolve environment variables in the loaded secrets YAML
|
463
|
+
resolved_secrets_yaml = resolve_env_vars(yaml_secrets)
|
464
|
+
merged_settings = deep_merge(merged_settings, resolved_secrets_yaml)
|
429
465
|
found_secrets = True
|
430
466
|
break
|
431
467
|
if not found_secrets:
|
@@ -6,10 +6,12 @@ from typing import Dict, List, Optional, Union
|
|
6
6
|
|
7
7
|
from deprecated import deprecated
|
8
8
|
from mcp.types import PromptMessage
|
9
|
+
from rich import print as rich_print
|
9
10
|
|
10
11
|
from mcp_agent.agents.agent import Agent
|
11
12
|
from mcp_agent.core.interactive_prompt import InteractivePrompt
|
12
13
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
14
|
+
from mcp_agent.progress_display import progress_display
|
13
15
|
|
14
16
|
|
15
17
|
class AgentApp:
|
@@ -272,7 +274,12 @@ class AgentApp:
|
|
272
274
|
|
273
275
|
# Define the wrapper for send function
|
274
276
|
async def send_wrapper(message, agent_name):
|
275
|
-
|
277
|
+
result = await self.send(message, agent_name)
|
278
|
+
|
279
|
+
# Show usage info after each turn if progress display is enabled
|
280
|
+
self._show_turn_usage(agent_name)
|
281
|
+
|
282
|
+
return result
|
276
283
|
|
277
284
|
# Start the prompt loop with the agent name (not the agent object)
|
278
285
|
return await prompt.prompt_loop(
|
@@ -282,3 +289,36 @@ class AgentApp:
|
|
282
289
|
prompt_provider=self, # Pass self as the prompt provider
|
283
290
|
default=default_prompt,
|
284
291
|
)
|
292
|
+
|
293
|
+
def _show_turn_usage(self, agent_name: str) -> None:
|
294
|
+
"""Show subtle usage information after each turn."""
|
295
|
+
agent = self._agents.get(agent_name)
|
296
|
+
if not agent or not agent.usage_accumulator:
|
297
|
+
return
|
298
|
+
|
299
|
+
# Get the last turn's usage (if any)
|
300
|
+
turns = agent.usage_accumulator.turns
|
301
|
+
if not turns:
|
302
|
+
return
|
303
|
+
|
304
|
+
last_turn = turns[-1]
|
305
|
+
input_tokens = last_turn.input_tokens
|
306
|
+
output_tokens = last_turn.output_tokens
|
307
|
+
|
308
|
+
# Build cache indicators with bright colors
|
309
|
+
cache_indicators = ""
|
310
|
+
if last_turn.cache_usage.cache_write_tokens > 0:
|
311
|
+
cache_indicators += "[bright_yellow]^[/bright_yellow]"
|
312
|
+
if last_turn.cache_usage.cache_read_tokens > 0 or last_turn.cache_usage.cache_hit_tokens > 0:
|
313
|
+
cache_indicators += "[bright_green]*[/bright_green]"
|
314
|
+
|
315
|
+
# Build context percentage - get from accumulator, not individual turn
|
316
|
+
context_info = ""
|
317
|
+
context_percentage = agent.usage_accumulator.context_usage_percentage
|
318
|
+
if context_percentage is not None:
|
319
|
+
context_info = f" ({context_percentage:.1f}%)"
|
320
|
+
|
321
|
+
# Show subtle usage line - pause progress display to ensure visibility
|
322
|
+
with progress_display.paused():
|
323
|
+
cache_suffix = f" {cache_indicators}" if cache_indicators else ""
|
324
|
+
rich_print(f"[dim]Last turn: {input_tokens:,} Input, {output_tokens:,} Output{context_info}[/dim]{cache_suffix}")
|
@@ -58,6 +58,7 @@ class AgentCompleter(Completer):
|
|
58
58
|
"prompts": "List and select MCP prompts", # Changed description
|
59
59
|
"prompt": "Apply a specific prompt by name (/prompt <name>)", # New command
|
60
60
|
"agents": "List available agents",
|
61
|
+
"usage": "Show current usage statistics",
|
61
62
|
"clear": "Clear the screen",
|
62
63
|
"STOP": "Stop this prompting session and move to next workflow step",
|
63
64
|
"EXIT": "Exit fast-agent, terminating any running workflows",
|
@@ -67,6 +68,7 @@ class AgentCompleter(Completer):
|
|
67
68
|
self.commands.pop("agents")
|
68
69
|
self.commands.pop("prompts") # Remove prompts command in human input mode
|
69
70
|
self.commands.pop("prompt", None) # Remove prompt command in human input mode
|
71
|
+
self.commands.pop("usage", None) # Remove usage command in human input mode
|
70
72
|
self.agent_types = agent_types or {}
|
71
73
|
|
72
74
|
def get_completions(self, document, complete_event):
|
@@ -390,6 +392,8 @@ async def get_enhanced_input(
|
|
390
392
|
return "CLEAR"
|
391
393
|
elif cmd == "agents":
|
392
394
|
return "LIST_AGENTS"
|
395
|
+
elif cmd == "usage":
|
396
|
+
return "SHOW_USAGE"
|
393
397
|
elif cmd == "prompts":
|
394
398
|
# Return a dictionary with select_prompt action instead of a string
|
395
399
|
# This way it will match what the command handler expects
|
@@ -566,6 +570,7 @@ async def handle_special_commands(command, agent_app=None):
|
|
566
570
|
rich_print(" /agents - List available agents")
|
567
571
|
rich_print(" /prompts - List and select MCP prompts")
|
568
572
|
rich_print(" /prompt <name> - Apply a specific prompt by name")
|
573
|
+
rich_print(" /usage - Show current usage statistics")
|
569
574
|
rich_print(" @agent_name - Switch to agent")
|
570
575
|
rich_print(" STOP - Return control back to the workflow")
|
571
576
|
rich_print(" EXIT - Exit fast-agent, terminating any running workflows")
|
@@ -594,6 +599,10 @@ async def handle_special_commands(command, agent_app=None):
|
|
594
599
|
rich_print("[yellow]No agents available[/yellow]")
|
595
600
|
return True
|
596
601
|
|
602
|
+
elif command == "SHOW_USAGE":
|
603
|
+
# Return a dictionary to signal that usage should be shown
|
604
|
+
return {"show_usage": True}
|
605
|
+
|
597
606
|
elif command == "SELECT_PROMPT" or (
|
598
607
|
isinstance(command, str) and command.startswith("SELECT_PROMPT:")
|
599
608
|
):
|
@@ -54,6 +54,7 @@ from mcp_agent.core.exceptions import (
|
|
54
54
|
ServerConfigError,
|
55
55
|
ServerInitializationError,
|
56
56
|
)
|
57
|
+
from mcp_agent.core.usage_display import display_usage_report
|
57
58
|
from mcp_agent.core.validation import (
|
58
59
|
validate_server_references,
|
59
60
|
validate_workflow_references,
|
@@ -392,6 +393,10 @@ class FastAgent:
|
|
392
393
|
|
393
394
|
yield wrapper
|
394
395
|
|
396
|
+
except PromptExitError as e:
|
397
|
+
# User requested exit - not an error, show usage report
|
398
|
+
self._handle_error(e)
|
399
|
+
raise SystemExit(0)
|
395
400
|
except (
|
396
401
|
ServerConfigError,
|
397
402
|
ProviderKeyError,
|
@@ -399,15 +404,18 @@ class FastAgent:
|
|
399
404
|
ServerInitializationError,
|
400
405
|
ModelConfigError,
|
401
406
|
CircularDependencyError,
|
402
|
-
PromptExitError,
|
403
407
|
) as e:
|
404
408
|
had_error = True
|
405
409
|
self._handle_error(e)
|
406
410
|
raise SystemExit(1)
|
407
411
|
|
408
412
|
finally:
|
409
|
-
#
|
413
|
+
# Print usage report before cleanup (show for user exits too)
|
410
414
|
if active_agents and not had_error:
|
415
|
+
self._print_usage_report(active_agents)
|
416
|
+
|
417
|
+
# Clean up any active agents (always cleanup, even on errors)
|
418
|
+
if active_agents:
|
411
419
|
for agent in active_agents.values():
|
412
420
|
try:
|
413
421
|
await agent.shutdown()
|
@@ -472,6 +480,10 @@ class FastAgent:
|
|
472
480
|
else:
|
473
481
|
handle_error(e, error_type or "Error", "An unexpected error occurred.")
|
474
482
|
|
483
|
+
def _print_usage_report(self, active_agents: dict) -> None:
|
484
|
+
"""Print a formatted table of token usage for all agents."""
|
485
|
+
display_usage_report(active_agents, show_if_progress_disabled=False, subdued_colors=True)
|
486
|
+
|
475
487
|
async def start_server(
|
476
488
|
self,
|
477
489
|
transport: str = "sse",
|
@@ -28,6 +28,7 @@ from mcp_agent.core.enhanced_prompt import (
|
|
28
28
|
get_selection_input,
|
29
29
|
handle_special_commands,
|
30
30
|
)
|
31
|
+
from mcp_agent.core.usage_display import collect_agents_from_provider, display_usage_report
|
31
32
|
from mcp_agent.mcp.mcp_aggregator import SEP # Import SEP once at the top
|
32
33
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
33
34
|
from mcp_agent.progress_display import progress_display
|
@@ -35,15 +36,26 @@ from mcp_agent.progress_display import progress_display
|
|
35
36
|
# Type alias for the send function
|
36
37
|
SendFunc = Callable[[Union[str, PromptMessage, PromptMessageMultipart], str], Awaitable[str]]
|
37
38
|
|
39
|
+
# Type alias for the agent getter function
|
40
|
+
AgentGetter = Callable[[str], Optional[object]]
|
41
|
+
|
38
42
|
|
39
43
|
class PromptProvider(Protocol):
|
40
44
|
"""Protocol for objects that can provide prompt functionality."""
|
41
|
-
|
42
|
-
async def list_prompts(
|
45
|
+
|
46
|
+
async def list_prompts(
|
47
|
+
self, server_name: Optional[str] = None, agent_name: Optional[str] = None
|
48
|
+
) -> Mapping[str, List[Prompt]]:
|
43
49
|
"""List available prompts."""
|
44
50
|
...
|
45
|
-
|
46
|
-
async def apply_prompt(
|
51
|
+
|
52
|
+
async def apply_prompt(
|
53
|
+
self,
|
54
|
+
prompt_name: str,
|
55
|
+
arguments: Optional[Dict[str, str]] = None,
|
56
|
+
agent_name: Optional[str] = None,
|
57
|
+
**kwargs,
|
58
|
+
) -> str:
|
47
59
|
"""Apply a prompt."""
|
48
60
|
...
|
49
61
|
|
@@ -160,9 +172,11 @@ class InteractivePrompt:
|
|
160
172
|
await self._list_prompts(prompt_provider, agent)
|
161
173
|
else:
|
162
174
|
# Use the name-based selection
|
163
|
-
await self._select_prompt(
|
164
|
-
|
165
|
-
|
175
|
+
await self._select_prompt(prompt_provider, agent, prompt_name)
|
176
|
+
continue
|
177
|
+
elif "show_usage" in command_result:
|
178
|
+
# Handle usage display
|
179
|
+
await self._show_usage(prompt_provider, agent)
|
166
180
|
continue
|
167
181
|
|
168
182
|
# Skip further processing if:
|
@@ -170,7 +184,11 @@ class InteractivePrompt:
|
|
170
184
|
# 2. The original input was a dictionary (special command like /prompt)
|
171
185
|
# 3. The command result itself is a dictionary (special command handling result)
|
172
186
|
# This fixes the issue where /prompt without arguments gets sent to the LLM
|
173
|
-
if
|
187
|
+
if (
|
188
|
+
command_result
|
189
|
+
or isinstance(user_input, dict)
|
190
|
+
or isinstance(command_result, dict)
|
191
|
+
):
|
174
192
|
continue
|
175
193
|
|
176
194
|
if user_input.upper() == "STOP":
|
@@ -183,7 +201,9 @@ class InteractivePrompt:
|
|
183
201
|
|
184
202
|
return result
|
185
203
|
|
186
|
-
async def _get_all_prompts(
|
204
|
+
async def _get_all_prompts(
|
205
|
+
self, prompt_provider: PromptProvider, agent_name: Optional[str] = None
|
206
|
+
):
|
187
207
|
"""
|
188
208
|
Get a list of all available prompts.
|
189
209
|
|
@@ -196,8 +216,10 @@ class InteractivePrompt:
|
|
196
216
|
"""
|
197
217
|
try:
|
198
218
|
# Call list_prompts on the provider
|
199
|
-
prompt_servers = await prompt_provider.list_prompts(
|
200
|
-
|
219
|
+
prompt_servers = await prompt_provider.list_prompts(
|
220
|
+
server_name=None, agent_name=agent_name
|
221
|
+
)
|
222
|
+
|
201
223
|
all_prompts = []
|
202
224
|
|
203
225
|
# Process the returned prompt servers
|
@@ -326,9 +348,11 @@ class InteractivePrompt:
|
|
326
348
|
try:
|
327
349
|
# Get all available prompts directly from the prompt provider
|
328
350
|
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
329
|
-
|
351
|
+
|
330
352
|
# Call list_prompts on the provider
|
331
|
-
prompt_servers = await prompt_provider.list_prompts(
|
353
|
+
prompt_servers = await prompt_provider.list_prompts(
|
354
|
+
server_name=None, agent_name=agent_name
|
355
|
+
)
|
332
356
|
|
333
357
|
if not prompt_servers:
|
334
358
|
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
@@ -557,3 +581,25 @@ class InteractivePrompt:
|
|
557
581
|
|
558
582
|
rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
|
559
583
|
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
584
|
+
|
585
|
+
async def _show_usage(self, prompt_provider: PromptProvider, agent_name: str) -> None:
|
586
|
+
"""
|
587
|
+
Show usage statistics for the current agent(s) in a colorful table format.
|
588
|
+
|
589
|
+
Args:
|
590
|
+
prompt_provider: Provider that has access to agents
|
591
|
+
agent_name: Name of the current agent
|
592
|
+
"""
|
593
|
+
try:
|
594
|
+
# Collect all agents from the prompt provider
|
595
|
+
agents_to_show = collect_agents_from_provider(prompt_provider, agent_name)
|
596
|
+
|
597
|
+
if not agents_to_show:
|
598
|
+
rich_print("[yellow]No usage data available[/yellow]")
|
599
|
+
return
|
600
|
+
|
601
|
+
# Use the shared display utility
|
602
|
+
display_usage_report(agents_to_show, show_if_progress_disabled=True)
|
603
|
+
|
604
|
+
except Exception as e:
|
605
|
+
rich_print(f"[red]Error showing usage: {e}[/red]")
|
@@ -0,0 +1,193 @@
|
|
1
|
+
"""
|
2
|
+
Utility module for displaying usage statistics in a consistent format.
|
3
|
+
Consolidates the usage display logic that was duplicated between fastagent.py and interactive_prompt.py.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import Any, Dict, Optional
|
7
|
+
|
8
|
+
from rich.console import Console
|
9
|
+
|
10
|
+
|
11
|
+
def display_usage_report(
|
12
|
+
agents: Dict[str, Any], show_if_progress_disabled: bool = False, subdued_colors: bool = False
|
13
|
+
) -> None:
|
14
|
+
"""
|
15
|
+
Display a formatted table of token usage for all agents.
|
16
|
+
|
17
|
+
Args:
|
18
|
+
agents: Dictionary of agent name -> agent object
|
19
|
+
show_if_progress_disabled: If True, show even when progress display is disabled
|
20
|
+
subdued_colors: If True, use dim styling for a more subdued appearance
|
21
|
+
"""
|
22
|
+
# Check if progress display is enabled (only relevant for fastagent context)
|
23
|
+
if not show_if_progress_disabled:
|
24
|
+
try:
|
25
|
+
from mcp_agent import config
|
26
|
+
|
27
|
+
settings = config.get_settings()
|
28
|
+
if not settings.logger.progress_display:
|
29
|
+
return
|
30
|
+
except (ImportError, AttributeError):
|
31
|
+
# If we can't check settings, assume we should display
|
32
|
+
pass
|
33
|
+
|
34
|
+
# Collect usage data from all agents
|
35
|
+
usage_data = []
|
36
|
+
total_input = 0
|
37
|
+
total_output = 0
|
38
|
+
total_tokens = 0
|
39
|
+
|
40
|
+
for agent_name, agent in agents.items():
|
41
|
+
if agent.usage_accumulator:
|
42
|
+
summary = agent.usage_accumulator.get_summary()
|
43
|
+
if summary["turn_count"] > 0:
|
44
|
+
input_tokens = summary["cumulative_input_tokens"]
|
45
|
+
output_tokens = summary["cumulative_output_tokens"]
|
46
|
+
billing_tokens = summary["cumulative_billing_tokens"]
|
47
|
+
turns = summary["turn_count"]
|
48
|
+
|
49
|
+
# Get context percentage for this agent
|
50
|
+
context_percentage = agent.usage_accumulator.context_usage_percentage
|
51
|
+
|
52
|
+
# Get model name from LLM's default_request_params
|
53
|
+
model = "unknown"
|
54
|
+
if hasattr(agent, "_llm") and agent._llm:
|
55
|
+
llm = agent._llm
|
56
|
+
if (
|
57
|
+
hasattr(llm, "default_request_params")
|
58
|
+
and llm.default_request_params
|
59
|
+
and hasattr(llm.default_request_params, "model")
|
60
|
+
):
|
61
|
+
model = llm.default_request_params.model or "unknown"
|
62
|
+
|
63
|
+
# Standardize model name truncation - use consistent 25 char width with 22+... truncation
|
64
|
+
if len(model) > 25:
|
65
|
+
model = model[:22] + "..."
|
66
|
+
|
67
|
+
usage_data.append(
|
68
|
+
{
|
69
|
+
"name": agent_name,
|
70
|
+
"model": model,
|
71
|
+
"input": input_tokens,
|
72
|
+
"output": output_tokens,
|
73
|
+
"total": billing_tokens,
|
74
|
+
"turns": turns,
|
75
|
+
"context": context_percentage,
|
76
|
+
}
|
77
|
+
)
|
78
|
+
|
79
|
+
total_input += input_tokens
|
80
|
+
total_output += output_tokens
|
81
|
+
total_tokens += billing_tokens
|
82
|
+
|
83
|
+
if not usage_data:
|
84
|
+
return
|
85
|
+
|
86
|
+
# Calculate dynamic agent column width (max 15)
|
87
|
+
max_agent_width = min(15, max(len(data["name"]) for data in usage_data) if usage_data else 8)
|
88
|
+
agent_width = max(max_agent_width, 5) # Minimum of 5 for "Agent" header
|
89
|
+
|
90
|
+
# Display the table
|
91
|
+
console = Console()
|
92
|
+
console.print()
|
93
|
+
console.print("[dim]Usage Summary (Cumulative)[/dim]")
|
94
|
+
|
95
|
+
# Print header with proper spacing
|
96
|
+
console.print(
|
97
|
+
f"[dim]{'Agent':<{agent_width}} {'Input':>9} {'Output':>9} {'Total':>9} {'Turns':>6} {'Context%':>9} {'Model':<25}[/dim]"
|
98
|
+
)
|
99
|
+
|
100
|
+
# Print agent rows - use styling based on subdued_colors flag
|
101
|
+
for data in usage_data:
|
102
|
+
input_str = f"{data['input']:,}"
|
103
|
+
output_str = f"{data['output']:,}"
|
104
|
+
total_str = f"{data['total']:,}"
|
105
|
+
turns_str = str(data["turns"])
|
106
|
+
context_str = f"{data['context']:.1f}%" if data["context"] is not None else "-"
|
107
|
+
|
108
|
+
# Truncate agent name if needed
|
109
|
+
agent_name = data["name"]
|
110
|
+
if len(agent_name) > agent_width:
|
111
|
+
agent_name = agent_name[: agent_width - 3] + "..."
|
112
|
+
|
113
|
+
if subdued_colors:
|
114
|
+
# Original fastagent.py style with dim wrapper
|
115
|
+
console.print(
|
116
|
+
f"[dim]{agent_name:<{agent_width}} "
|
117
|
+
f"{input_str:>9} "
|
118
|
+
f"{output_str:>9} "
|
119
|
+
f"[bold]{total_str:>9}[/bold] "
|
120
|
+
f"{turns_str:>6} "
|
121
|
+
f"{context_str:>9} "
|
122
|
+
f"{data['model']:<25}[/dim]"
|
123
|
+
)
|
124
|
+
else:
|
125
|
+
# Original interactive_prompt.py style
|
126
|
+
console.print(
|
127
|
+
f"{agent_name:<{agent_width}} "
|
128
|
+
f"{input_str:>9} "
|
129
|
+
f"{output_str:>9} "
|
130
|
+
f"[bold]{total_str:>9}[/bold] "
|
131
|
+
f"{turns_str:>6} "
|
132
|
+
f"{context_str:>9} "
|
133
|
+
f"[dim]{data['model']:<25}[/dim]"
|
134
|
+
)
|
135
|
+
|
136
|
+
# Add total row if multiple agents
|
137
|
+
if len(usage_data) > 1:
|
138
|
+
console.print()
|
139
|
+
total_input_str = f"{total_input:,}"
|
140
|
+
total_output_str = f"{total_output:,}"
|
141
|
+
total_tokens_str = f"{total_tokens:,}"
|
142
|
+
|
143
|
+
if subdued_colors:
|
144
|
+
# Original fastagent.py style with dim wrapper on bold
|
145
|
+
console.print(
|
146
|
+
f"[bold dim]{'TOTAL':<{agent_width}} "
|
147
|
+
f"{total_input_str:>9} "
|
148
|
+
f"{total_output_str:>9} "
|
149
|
+
f"[bold]{total_tokens_str:>9}[/bold] "
|
150
|
+
f"{'':<6} "
|
151
|
+
f"{'':<9} "
|
152
|
+
f"{'':<25}[/bold dim]"
|
153
|
+
)
|
154
|
+
else:
|
155
|
+
# Original interactive_prompt.py style
|
156
|
+
console.print(
|
157
|
+
f"[bold]{'TOTAL':<{agent_width}}[/bold] "
|
158
|
+
f"[bold]{total_input_str:>9}[/bold] "
|
159
|
+
f"[bold]{total_output_str:>9}[/bold] "
|
160
|
+
f"[bold]{total_tokens_str:>9}[/bold] "
|
161
|
+
f"{'':<6} "
|
162
|
+
f"{'':<9} "
|
163
|
+
f"{'':<25}"
|
164
|
+
)
|
165
|
+
|
166
|
+
console.print()
|
167
|
+
|
168
|
+
|
169
|
+
def collect_agents_from_provider(
|
170
|
+
prompt_provider: Any, agent_name: Optional[str] = None
|
171
|
+
) -> Dict[str, Any]:
|
172
|
+
"""
|
173
|
+
Collect agents from a prompt provider for usage display.
|
174
|
+
|
175
|
+
Args:
|
176
|
+
prompt_provider: Provider that has access to agents
|
177
|
+
agent_name: Name of the current agent (for context)
|
178
|
+
|
179
|
+
Returns:
|
180
|
+
Dictionary of agent name -> agent object
|
181
|
+
"""
|
182
|
+
agents_to_show = {}
|
183
|
+
|
184
|
+
if hasattr(prompt_provider, "_agents"):
|
185
|
+
# Multi-agent app - show all agents
|
186
|
+
agents_to_show = prompt_provider._agents
|
187
|
+
elif hasattr(prompt_provider, "agent"):
|
188
|
+
# Single agent
|
189
|
+
agent = prompt_provider.agent
|
190
|
+
if hasattr(agent, "name"):
|
191
|
+
agents_to_show = {agent.name: agent}
|
192
|
+
|
193
|
+
return agents_to_show
|
@@ -30,11 +30,13 @@ from mcp_agent.core.prompt import Prompt
|
|
30
30
|
from mcp_agent.core.request_params import RequestParams
|
31
31
|
from mcp_agent.event_progress import ProgressAction
|
32
32
|
from mcp_agent.llm.memory import Memory, SimpleMemory
|
33
|
+
from mcp_agent.llm.model_database import ModelDatabase
|
33
34
|
from mcp_agent.llm.provider_types import Provider
|
34
35
|
from mcp_agent.llm.sampling_format_converter import (
|
35
36
|
BasicFormatConverter,
|
36
37
|
ProviderFormatConverter,
|
37
38
|
)
|
39
|
+
from mcp_agent.llm.usage_tracking import UsageAccumulator
|
38
40
|
from mcp_agent.logging.logger import get_logger
|
39
41
|
from mcp_agent.mcp.helpers.content_helpers import get_text
|
40
42
|
from mcp_agent.mcp.interfaces import (
|
@@ -155,12 +157,11 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
155
157
|
# Initialize the display component
|
156
158
|
self.display = ConsoleDisplay(config=self.context.config)
|
157
159
|
|
158
|
-
# Initialize default parameters
|
159
|
-
|
160
|
-
|
161
|
-
# Apply model override if provided
|
160
|
+
# Initialize default parameters, passing model info
|
161
|
+
model_kwargs = kwargs.copy()
|
162
162
|
if model:
|
163
|
-
|
163
|
+
model_kwargs["model"] = model
|
164
|
+
self.default_request_params = self._initialize_default_params(model_kwargs)
|
164
165
|
|
165
166
|
# Merge with provided params if any
|
166
167
|
if self._init_request_params:
|
@@ -171,13 +172,22 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
171
172
|
self.type_converter = type_converter
|
172
173
|
self.verb = kwargs.get("verb")
|
173
174
|
|
175
|
+
# Initialize usage tracking
|
176
|
+
self.usage_accumulator = UsageAccumulator()
|
177
|
+
|
174
178
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
175
179
|
"""Initialize default parameters for the LLM.
|
176
180
|
Should be overridden by provider implementations to set provider-specific defaults."""
|
181
|
+
# Get model-aware default max tokens
|
182
|
+
model = kwargs.get("model")
|
183
|
+
max_tokens = ModelDatabase.get_default_max_tokens(model)
|
184
|
+
|
177
185
|
return RequestParams(
|
186
|
+
model=model,
|
187
|
+
maxTokens=max_tokens,
|
178
188
|
systemPrompt=self.instruction,
|
179
189
|
parallel_tool_calls=True,
|
180
|
-
max_iterations=
|
190
|
+
max_iterations=20,
|
181
191
|
use_history=True,
|
182
192
|
)
|
183
193
|
|
@@ -642,3 +652,13 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
642
652
|
|
643
653
|
assert self.provider
|
644
654
|
return ProviderKeyManager.get_api_key(self.provider.value, self.context.config)
|
655
|
+
|
656
|
+
def get_usage_summary(self) -> dict:
|
657
|
+
"""
|
658
|
+
Get a summary of usage statistics for this LLM instance.
|
659
|
+
|
660
|
+
Returns:
|
661
|
+
Dictionary containing usage statistics including tokens, cache metrics,
|
662
|
+
and context window utilization.
|
663
|
+
"""
|
664
|
+
return self.usage_accumulator.get_summary()
|