fast-agent-mcp 0.2.35__tar.gz → 0.2.36__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/PKG-INFO +6 -6
  2. {fast_agent_mcp-0.2.35/src/mcp_agent/resources → fast_agent_mcp-0.2.36}/examples/data-analysis/analysis.py +1 -2
  3. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/agent.py +1 -2
  4. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/parallel.py +0 -2
  5. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/pyproject.toml +6 -6
  6. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/base_agent.py +2 -2
  7. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/router_agent.py +1 -1
  8. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/enhanced_prompt.py +73 -13
  9. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/interactive_prompt.py +118 -8
  10. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/augmented_llm.py +31 -0
  11. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +11 -23
  12. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_azure.py +4 -4
  13. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_openai.py +195 -12
  14. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/multipart_converter_openai.py +4 -3
  15. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/interfaces.py +1 -1
  16. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/.gitignore +0 -0
  17. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/LICENSE +0 -0
  18. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/README.md +0 -0
  19. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/azure-openai/fastagent.config.yaml +0 -0
  20. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/custom-agents/agent.py +0 -0
  21. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/custom-agents/fastagent.config.yaml +0 -0
  22. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/data-analysis/analysis-campaign.py +0 -0
  23. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/data-analysis/fastagent.config.yaml +0 -0
  24. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  25. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/state-transfer/agent_one.py +0 -0
  26. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/state-transfer/agent_two.py +0 -0
  27. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  28. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/vision-examples/example1.py +0 -0
  29. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/vision-examples/example2.py +0 -0
  30. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/vision-examples/example3.py +0 -0
  31. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/mcp/vision-examples/fastagent.config.yaml +0 -0
  32. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/otel/agent.py +0 -0
  33. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/otel/agent2.py +0 -0
  34. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/otel/docker-compose.yaml +0 -0
  35. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/otel/fastagent.config.yaml +0 -0
  36. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/researcher/fastagent.config.yaml +0 -0
  37. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/researcher/researcher-eval.py +0 -0
  38. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/researcher/researcher-imp.py +0 -0
  39. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/researcher/researcher.py +0 -0
  40. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/README.md +0 -0
  41. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/docker-compose.yml +0 -0
  42. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/fastagent.config.yaml +0 -0
  43. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/image_demo.py +0 -0
  44. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  45. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/tensorzero/simple_agent.py +0 -0
  46. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/chaining.py +0 -0
  47. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/evaluator.py +0 -0
  48. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/fastagent.config.yaml +0 -0
  49. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/graded_report.md +0 -0
  50. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/human_input.py +0 -0
  51. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/orchestrator.py +0 -0
  52. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/router.py +0 -0
  53. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/short_story.md +0 -0
  54. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/examples/workflows/short_story.txt +0 -0
  55. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/__init__.py +0 -0
  56. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/__init__.py +0 -0
  57. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/agent.py +0 -0
  58. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/__init__.py +0 -0
  59. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
  60. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
  61. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
  62. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
  63. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
  64. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
  65. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/app.py +0 -0
  66. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/__init__.py +0 -0
  67. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/__main__.py +0 -0
  68. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/commands/check_config.py +0 -0
  69. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/commands/go.py +0 -0
  70. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/commands/quickstart.py +0 -0
  71. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/commands/setup.py +0 -0
  72. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/commands/url_parser.py +0 -0
  73. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/main.py +0 -0
  74. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/cli/terminal.py +0 -0
  75. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/config.py +0 -0
  76. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/console.py +0 -0
  77. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/context.py +0 -0
  78. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/context_dependent.py +0 -0
  79. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/__init__.py +0 -0
  80. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/agent_app.py +0 -0
  81. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/agent_types.py +0 -0
  82. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/direct_decorators.py +0 -0
  83. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/direct_factory.py +0 -0
  84. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/error_handling.py +0 -0
  85. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/exceptions.py +0 -0
  86. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/fastagent.py +0 -0
  87. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/mcp_content.py +0 -0
  88. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/prompt.py +0 -0
  89. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/request_params.py +0 -0
  90. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/usage_display.py +0 -0
  91. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/core/validation.py +0 -0
  92. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/event_progress.py +0 -0
  93. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/executor/__init__.py +0 -0
  94. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/executor/executor.py +0 -0
  95. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/executor/task_registry.py +0 -0
  96. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/executor/workflow_signal.py +0 -0
  97. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/human_input/__init__.py +0 -0
  98. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/human_input/handler.py +0 -0
  99. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/human_input/types.py +0 -0
  100. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/__init__.py +0 -0
  101. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/augmented_llm_passthrough.py +0 -0
  102. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
  103. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/augmented_llm_slow.py +0 -0
  104. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/memory.py +0 -0
  105. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/model_database.py +0 -0
  106. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/model_factory.py +0 -0
  107. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/prompt_utils.py +0 -0
  108. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/provider_key_manager.py +0 -0
  109. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/provider_types.py +0 -0
  110. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/__init__.py +0 -0
  111. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
  112. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_aliyun.py +0 -0
  113. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +0 -0
  114. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
  115. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_google_native.py +0 -0
  116. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_google_oai.py +0 -0
  117. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +0 -0
  118. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -0
  119. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/google_converter.py +0 -0
  120. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
  121. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -0
  122. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
  123. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
  124. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
  125. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
  126. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/sampling_converter.py +0 -0
  127. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
  128. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/llm/usage_tracking.py +0 -0
  129. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/__init__.py +0 -0
  130. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/events.py +0 -0
  131. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/json_serializer.py +0 -0
  132. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/listeners.py +0 -0
  133. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/logger.py +0 -0
  134. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/rich_progress.py +0 -0
  135. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/logging/transport.py +0 -0
  136. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/__init__.py +0 -0
  137. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/common.py +0 -0
  138. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/gen_client.py +0 -0
  139. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
  140. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/helpers/content_helpers.py +0 -0
  141. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/helpers/server_config_helpers.py +0 -0
  142. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/hf_auth.py +0 -0
  143. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/logger_textio.py +0 -0
  144. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
  145. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
  146. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
  147. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/mime_utils.py +0 -0
  148. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
  149. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompt_render.py +0 -0
  150. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
  151. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
  152. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
  153. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
  154. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
  155. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
  156. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
  157. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
  158. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/resource_utils.py +0 -0
  159. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp/sampling.py +0 -0
  160. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp_server/__init__.py +0 -0
  161. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp_server/agent_server.py +0 -0
  162. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/mcp_server_registry.py +0 -0
  163. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/progress_display.py +0 -0
  164. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
  165. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36/src/mcp_agent/resources}/examples/data-analysis/analysis.py +0 -0
  166. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  167. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  168. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/in_dev/agent_build.py +0 -0
  169. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/in_dev/css-LICENSE.txt +0 -0
  170. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/in_dev/slides.py +0 -0
  171. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
  172. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/fastagent.config.yaml +0 -0
  173. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/history_transfer.py +0 -0
  174. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/job.py +0 -0
  175. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/prompt_category.py +0 -0
  176. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/prompt_sizing.py +0 -0
  177. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/simple.txt +0 -0
  178. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/sizer.py +0 -0
  179. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/internal/social.py +0 -0
  180. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
  181. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
  182. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  183. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  184. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/__init__.py +0 -0
  185. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/agent.py +0 -0
  186. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/delimited_prompt.txt +0 -0
  187. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/fastagent.config.yaml +0 -0
  188. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/image_server.py +0 -0
  189. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/prompt1.txt +0 -0
  190. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/prompting/work_with_image.py +0 -0
  191. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
  192. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
  193. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
  194. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
  195. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
  196. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
  197. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
  198. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
  199. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
  200. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
  201. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
  202. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
  203. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/tools/tool_definition.py +0 -0
  204. {fast_agent_mcp-0.2.35 → fast_agent_mcp-0.2.36}/src/mcp_agent/ui/console_display.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.35
3
+ Version: 0.2.36
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>
6
6
  License: Apache License
@@ -209,15 +209,15 @@ Classifier: License :: OSI Approved :: Apache Software License
209
209
  Classifier: Operating System :: OS Independent
210
210
  Classifier: Programming Language :: Python :: 3
211
211
  Requires-Python: >=3.10
212
- Requires-Dist: a2a-types>=0.1.0
212
+ Requires-Dist: a2a-sdk>=0.2.9
213
213
  Requires-Dist: aiohttp>=3.11.13
214
- Requires-Dist: anthropic>=0.49.0
214
+ Requires-Dist: anthropic>=0.55.0
215
215
  Requires-Dist: azure-identity>=1.14.0
216
216
  Requires-Dist: deprecated>=1.2.18
217
217
  Requires-Dist: fastapi>=0.115.6
218
218
  Requires-Dist: google-genai
219
- Requires-Dist: mcp==1.9.4
220
- Requires-Dist: openai>=1.63.2
219
+ Requires-Dist: mcp==1.10.1
220
+ Requires-Dist: openai>=1.93.0
221
221
  Requires-Dist: opentelemetry-distro>=0.50b0
222
222
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
223
223
  Requires-Dist: opentelemetry-instrumentation-anthropic>=0.40.7; python_version >= '3.10' and python_version < '4.0'
@@ -229,7 +229,7 @@ Requires-Dist: pydantic-settings>=2.7.0
229
229
  Requires-Dist: pydantic>=2.10.4
230
230
  Requires-Dist: pyyaml>=6.0.2
231
231
  Requires-Dist: rich>=13.9.4
232
- Requires-Dist: tensorzero>=2025.4.7
232
+ Requires-Dist: tensorzero>=2025.6.3
233
233
  Requires-Dist: typer>=0.15.1
234
234
  Provides-Extra: azure
235
235
  Requires-Dist: azure-identity>=1.14.0; extra == 'azure'
@@ -1,7 +1,6 @@
1
1
  import asyncio
2
2
 
3
3
  from mcp_agent.core.fastagent import FastAgent
4
- from mcp_agent.llm.augmented_llm import RequestParams
5
4
 
6
5
  # Create the application
7
6
  fast = FastAgent("Data Analysis (Roots)")
@@ -21,8 +20,8 @@ Data files are accessible from the /mnt/data/ directory (this is the current wor
21
20
  Visualisations should be saved as .png files in the current working directory.
22
21
  """,
23
22
  servers=["interpreter"],
24
- request_params=RequestParams(maxTokens=8192),
25
23
  )
24
+ @fast.agent(name="another_test", instruction="", servers=["filesystem"])
26
25
  async def main() -> None:
27
26
  # Use the app's context manager
28
27
  async with fast.run() as agent:
@@ -27,9 +27,8 @@ my_t0_system_vars = {
27
27
  )
28
28
  async def main():
29
29
  async with fast.run() as agent_app: # Get the AgentApp wrapper
30
- agent_name = "default"
31
30
  print("\nStarting interactive session with template_vars set via decorator...")
32
- await agent_app.interactive(agent=agent_name)
31
+ await agent_app.interactive()
33
32
 
34
33
 
35
34
  if __name__ == "__main__":
@@ -25,7 +25,6 @@ fast = FastAgent(
25
25
  instruction="""Verify the factual consistency within the story. Identify any contradictions,
26
26
  logical inconsistencies, or inaccuracies in the plot, character actions, or setting.
27
27
  Highlight potential issues with reasoning or coherence.""",
28
- model="gpt-4.1",
29
28
  )
30
29
  @fast.agent(
31
30
  name="style_enforcer",
@@ -40,7 +39,6 @@ fast = FastAgent(
40
39
  into a structured report. Summarize key issues and categorize them by type.
41
40
  Provide actionable recommendations for improving the story,
42
41
  and give an overall grade based on the feedback.""",
43
- model="o3-mini.low",
44
42
  )
45
43
  @fast.parallel(
46
44
  fan_out=["proofreader", "fact_checker", "style_enforcer"],
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fast-agent-mcp"
3
- version = "0.2.35"
3
+ version = "0.2.36"
4
4
  description = "Define, Prompt and Test MCP enabled Agents and Workflows"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -15,7 +15,7 @@ classifiers = [
15
15
  requires-python = ">=3.10"
16
16
  dependencies = [
17
17
  "fastapi>=0.115.6",
18
- "mcp==1.9.4",
18
+ "mcp==1.10.1",
19
19
  "opentelemetry-distro>=0.50b0",
20
20
  "opentelemetry-exporter-otlp-proto-http>=1.29.0",
21
21
  "pydantic-settings>=2.7.0",
@@ -23,20 +23,20 @@ dependencies = [
23
23
  "pyyaml>=6.0.2",
24
24
  "rich>=13.9.4",
25
25
  "typer>=0.15.1",
26
- "anthropic>=0.49.0",
27
- "openai>=1.63.2",
26
+ "anthropic>=0.55.0",
27
+ "openai>=1.93.0",
28
28
  "azure-identity>=1.14.0",
29
29
  "prompt-toolkit>=3.0.50",
30
30
  "aiohttp>=3.11.13",
31
- "a2a-types>=0.1.0",
32
31
  "opentelemetry-instrumentation-openai>=0.0.40.7; python_version >= '3.10' and python_version < '4.0'",
33
32
  "opentelemetry-instrumentation-anthropic>=0.40.7; python_version >= '3.10' and python_version < '4.0'",
34
33
  "opentelemetry-instrumentation-mcp>=0.40.7; python_version >= '3.10' and python_version < '4.0'",
35
34
  "google-genai",
36
35
  "opentelemetry-instrumentation-google-genai>=0.2b0",
37
- "tensorzero>=2025.4.7",
36
+ "tensorzero>=2025.6.3",
38
37
  "opentelemetry-instrumentation-google-genai>=0.2b0",
39
38
  "deprecated>=1.2.18",
39
+ "a2a-sdk>=0.2.9",
40
40
  ]
41
41
 
42
42
  [project.optional-dependencies]
@@ -20,7 +20,7 @@ from typing import (
20
20
  Union,
21
21
  )
22
22
 
23
- from a2a_types.types import AgentCapabilities, AgentCard, AgentSkill
23
+ from a2a.types import AgentCapabilities, AgentCard, AgentSkill
24
24
  from mcp.types import (
25
25
  CallToolResult,
26
26
  EmbeddedResource,
@@ -704,7 +704,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
704
704
  def usage_accumulator(self) -> Optional["UsageAccumulator"]:
705
705
  """
706
706
  Return the usage accumulator for tracking token usage across turns.
707
-
707
+
708
708
  Returns:
709
709
  UsageAccumulator object if LLM is attached, None otherwise
710
710
  """
@@ -21,7 +21,7 @@ from mcp_agent.mcp.interfaces import AugmentedLLMProtocol, ModelT
21
21
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
22
22
 
23
23
  if TYPE_CHECKING:
24
- from a2a_types.types import AgentCard
24
+ from a2a.types import AgentCard
25
25
 
26
26
  from mcp_agent.context import Context
27
27
 
@@ -40,6 +40,59 @@ in_multiline_mode = False
40
40
  # Track whether help text has been shown globally
41
41
  help_message_shown = False
42
42
 
43
+ # Track which agents have shown their info
44
+ _agent_info_shown = set()
45
+
46
+
47
+ async def _display_agent_info_helper(agent_name: str, agent_provider: object) -> None:
48
+ """Helper function to display agent information."""
49
+ # Only show once per agent
50
+ if agent_name in _agent_info_shown:
51
+ return
52
+
53
+ try:
54
+ # Get agent info
55
+ if hasattr(agent_provider, "_agent"):
56
+ # This is an AgentApp - get the specific agent
57
+ agent = agent_provider._agent(agent_name)
58
+ else:
59
+ # This is a single agent
60
+ agent = agent_provider
61
+
62
+ # Get counts
63
+ servers = await agent.list_servers()
64
+ server_count = len(servers) if servers else 0
65
+
66
+ tools_result = await agent.list_tools()
67
+ tool_count = (
68
+ len(tools_result.tools) if tools_result and hasattr(tools_result, "tools") else 0
69
+ )
70
+
71
+ prompts_dict = await agent.list_prompts()
72
+ prompt_count = sum(len(prompts) for prompts in prompts_dict.values()) if prompts_dict else 0
73
+
74
+ # Display with proper pluralization and subdued formatting
75
+ if server_count == 0:
76
+ rich_print(
77
+ f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]: No MCP Servers attached[/dim]"
78
+ )
79
+ else:
80
+ # Pluralization helpers
81
+ server_word = "Server" if server_count == 1 else "Servers"
82
+ tool_word = "tool" if tool_count == 1 else "tools"
83
+ prompt_word = "prompt" if prompt_count == 1 else "prompts"
84
+
85
+ rich_print(
86
+ f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]:[/dim] {server_count:,}[dim] MCP {server_word}, [/dim]{tool_count:,}[dim] {tool_word}, [/dim]{prompt_count:,}[dim] {prompt_word} available[/dim]"
87
+ )
88
+
89
+ # Mark as shown
90
+ _agent_info_shown.add(agent_name)
91
+
92
+ except Exception:
93
+ # Silently ignore errors to not disrupt the user experience
94
+ pass
95
+
43
96
 
44
97
  class AgentCompleter(Completer):
45
98
  """Provide completion for agent names and common commands."""
@@ -54,11 +107,11 @@ class AgentCompleter(Completer):
54
107
  self.agents = agents
55
108
  # Map commands to their descriptions for better completion hints
56
109
  self.commands = {
57
- "help": "Show available commands",
58
- "prompts": "List and select MCP prompts", # Changed description
59
- "prompt": "Apply a specific prompt by name (/prompt <name>)", # New command
110
+ "tools": "List and call MCP tools",
111
+ "prompt": "List and select MCP prompts, or apply specific prompt (/prompt <name>)",
60
112
  "agents": "List available agents",
61
113
  "usage": "Show current usage statistics",
114
+ "help": "Show available commands",
62
115
  "clear": "Clear the screen",
63
116
  "STOP": "Stop this prompting session and move to next workflow step",
64
117
  "EXIT": "Exit fast-agent, terminating any running workflows",
@@ -66,8 +119,8 @@ class AgentCompleter(Completer):
66
119
  }
67
120
  if is_human_input:
68
121
  self.commands.pop("agents")
69
- self.commands.pop("prompts") # Remove prompts command in human input mode
70
122
  self.commands.pop("prompt", None) # Remove prompt command in human input mode
123
+ self.commands.pop("tools", None) # Remove tools command in human input mode
71
124
  self.commands.pop("usage", None) # Remove usage command in human input mode
72
125
  self.agent_types = agent_types or {}
73
126
 
@@ -260,6 +313,7 @@ async def get_enhanced_input(
260
313
  agent_types: dict[str, AgentType] = None,
261
314
  is_human_input: bool = False,
262
315
  toolbar_color: str = "ansiblue",
316
+ agent_provider: object = None,
263
317
  ) -> str:
264
318
  """
265
319
  Enhanced input with advanced prompt_toolkit features.
@@ -274,6 +328,7 @@ async def get_enhanced_input(
274
328
  agent_types: Dictionary mapping agent names to their types for display
275
329
  is_human_input: Whether this is a human input request (disables agent selection features)
276
330
  toolbar_color: Color to use for the agent name in the toolbar (default: "ansiblue")
331
+ agent_provider: Optional agent provider for displaying agent info
277
332
 
278
333
  Returns:
279
334
  User input string
@@ -300,14 +355,15 @@ async def get_enhanced_input(
300
355
  if in_multiline_mode:
301
356
  mode_style = "ansired" # More noticeable for multiline mode
302
357
  mode_text = "MULTILINE"
303
- toggle_text = "Normal Editing"
358
+ toggle_text = "Normal"
304
359
  else:
305
360
  mode_style = "ansigreen"
306
361
  mode_text = "NORMAL"
307
- toggle_text = "Multiline Editing"
362
+ toggle_text = "Multiline"
308
363
 
309
364
  shortcuts = [
310
365
  ("Ctrl+T", toggle_text),
366
+ ("Ctrl+E", "External"),
311
367
  ("Ctrl+L", "Clear"),
312
368
  ("↑/↓", "History"),
313
369
  ]
@@ -373,8 +429,13 @@ async def get_enhanced_input(
373
429
  rich_print("[dim]Type /help for commands. Ctrl+T toggles multiline mode.[/dim]")
374
430
  else:
375
431
  rich_print(
376
- "[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode.[/dim]"
432
+ "[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode.[/dim]\n"
377
433
  )
434
+
435
+ # Display agent info right after help text if agent_provider is available
436
+ if agent_provider and not is_human_input:
437
+ await _display_agent_info_helper(agent_name, agent_provider)
438
+
378
439
  rich_print()
379
440
  help_message_shown = True
380
441
 
@@ -394,12 +455,8 @@ async def get_enhanced_input(
394
455
  return "LIST_AGENTS"
395
456
  elif cmd == "usage":
396
457
  return "SHOW_USAGE"
397
- elif cmd == "prompts":
398
- # Return a dictionary with select_prompt action instead of a string
399
- # This way it will match what the command handler expects
400
- return {"select_prompt": True, "prompt_name": None}
401
458
  elif cmd == "prompt":
402
- # Handle /prompt with no arguments the same way as /prompts
459
+ # Handle /prompt with no arguments as interactive mode
403
460
  if len(cmd_parts) > 1:
404
461
  # Direct prompt selection with name or number
405
462
  prompt_arg = cmd_parts[1].strip()
@@ -409,8 +466,11 @@ async def get_enhanced_input(
409
466
  else:
410
467
  return f"SELECT_PROMPT:{prompt_arg}"
411
468
  else:
412
- # If /prompt is used without arguments, treat it the same as /prompts
469
+ # If /prompt is used without arguments, show interactive selection
413
470
  return {"select_prompt": True, "prompt_name": None}
471
+ elif cmd == "tools":
472
+ # Return a dictionary with list_tools action
473
+ return {"list_tools": True}
414
474
  elif cmd == "exit":
415
475
  return "EXIT"
416
476
  elif cmd.lower() == "stop":
@@ -23,6 +23,7 @@ from rich.table import Table
23
23
 
24
24
  from mcp_agent.core.agent_types import AgentType
25
25
  from mcp_agent.core.enhanced_prompt import (
26
+ _display_agent_info_helper,
26
27
  get_argument_input,
27
28
  get_enhanced_input,
28
29
  get_selection_input,
@@ -121,6 +122,7 @@ class InteractivePrompt:
121
122
  multiline=False, # Default to single-line mode
122
123
  available_agent_names=available_agents,
123
124
  agent_types=self.agent_types, # Pass agent types for display
125
+ agent_provider=prompt_provider, # Pass agent provider for info display
124
126
  )
125
127
 
126
128
  # Handle special commands - pass "True" to enable agent switching
@@ -132,6 +134,9 @@ class InteractivePrompt:
132
134
  new_agent = command_result["switch_agent"]
133
135
  if new_agent in available_agents_set:
134
136
  agent = new_agent
137
+ # Display new agent info immediately when switching
138
+ rich_print() # Add spacing
139
+ await _display_agent_info_helper(agent, prompt_provider)
135
140
  continue
136
141
  else:
137
142
  rich_print(f"[red]Agent '{new_agent}' not found[/red]")
@@ -174,6 +179,10 @@ class InteractivePrompt:
174
179
  # Use the name-based selection
175
180
  await self._select_prompt(prompt_provider, agent, prompt_name)
176
181
  continue
182
+ elif "list_tools" in command_result and prompt_provider:
183
+ # Handle tools list display
184
+ await self._list_tools(prompt_provider, agent)
185
+ continue
177
186
  elif "show_usage" in command_result:
178
187
  # Handle usage display
179
188
  await self._show_usage(prompt_provider, agent)
@@ -333,13 +342,17 @@ class InteractivePrompt:
333
342
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
334
343
 
335
344
  async def _select_prompt(
336
- self, prompt_provider: PromptProvider, agent_name: str, requested_name: Optional[str] = None
345
+ self,
346
+ prompt_provider: PromptProvider,
347
+ agent_name: str,
348
+ requested_name: Optional[str] = None,
349
+ send_func: Optional[SendFunc] = None,
337
350
  ) -> None:
338
351
  """
339
352
  Select and apply a prompt.
340
353
 
341
354
  Args:
342
- prompt_provider: Provider that implements list_prompts and apply_prompt
355
+ prompt_provider: Provider that implements list_prompts and get_prompt
343
356
  agent_name: Name of the agent
344
357
  requested_name: Optional name of the prompt to apply
345
358
  """
@@ -569,12 +582,54 @@ class InteractivePrompt:
569
582
  if arg_value:
570
583
  arg_values[arg_name] = arg_value
571
584
 
572
- # Apply the prompt
585
+ # Apply the prompt using generate() for proper progress display
573
586
  namespaced_name = selected_prompt["namespaced_name"]
574
587
  rich_print(f"\n[bold]Applying prompt [cyan]{namespaced_name}[/cyan]...[/bold]")
575
588
 
576
- # Call apply_prompt on the provider with the prompt name and arguments
577
- await prompt_provider.apply_prompt(namespaced_name, arg_values, agent_name)
589
+ # Get the agent directly for generate() call
590
+ if hasattr(prompt_provider, "_agent"):
591
+ # This is an AgentApp - get the specific agent
592
+ agent = prompt_provider._agent(agent_name)
593
+ else:
594
+ # This is a single agent
595
+ agent = prompt_provider
596
+
597
+ try:
598
+ # Use agent.apply_prompt() which handles everything properly:
599
+ # - get_prompt() to fetch template
600
+ # - convert to multipart
601
+ # - call generate() for progress display
602
+ # - return response text
603
+ # Response display is handled by the agent's show_ methods, don't print it here
604
+
605
+ # Fetch the prompt first (without progress display)
606
+ prompt_result = await agent.get_prompt(namespaced_name, arg_values)
607
+
608
+ if not prompt_result or not prompt_result.messages:
609
+ rich_print(
610
+ f"[red]Prompt '{namespaced_name}' could not be found or contains no messages[/red]"
611
+ )
612
+ return
613
+
614
+ # Convert to multipart format
615
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
616
+
617
+ multipart_messages = PromptMessageMultipart.from_get_prompt_result(prompt_result)
618
+
619
+ # Now start progress display for the actual generation
620
+ progress_display.resume()
621
+ try:
622
+ await agent.generate(multipart_messages, None)
623
+ finally:
624
+ # Pause again for the next UI interaction
625
+ progress_display.pause()
626
+
627
+ # Show usage info after the turn (same as send_wrapper does)
628
+ if hasattr(prompt_provider, "_show_turn_usage"):
629
+ prompt_provider._show_turn_usage(agent_name)
630
+
631
+ except Exception as e:
632
+ rich_print(f"[red]Error applying prompt: {e}[/red]")
578
633
 
579
634
  except Exception as e:
580
635
  import traceback
@@ -582,6 +637,61 @@ class InteractivePrompt:
582
637
  rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
583
638
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
584
639
 
640
+ async def _list_tools(self, prompt_provider: PromptProvider, agent_name: str) -> None:
641
+ """
642
+ List available tools for an agent.
643
+
644
+ Args:
645
+ prompt_provider: Provider that implements list_tools
646
+ agent_name: Name of the agent
647
+ """
648
+ console = Console()
649
+
650
+ try:
651
+ # Get agent to list tools from
652
+ if hasattr(prompt_provider, "_agent"):
653
+ # This is an AgentApp - get the specific agent
654
+ agent = prompt_provider._agent(agent_name)
655
+ else:
656
+ # This is a single agent
657
+ agent = prompt_provider
658
+
659
+ rich_print(f"\n[bold]Fetching tools for agent [cyan]{agent_name}[/cyan]...[/bold]")
660
+
661
+ # Get tools using list_tools
662
+ tools_result = await agent.list_tools()
663
+
664
+ if not tools_result or not hasattr(tools_result, "tools") or not tools_result.tools:
665
+ rich_print("[yellow]No tools available for this agent[/yellow]")
666
+ return
667
+
668
+ # Create a table for better display
669
+ table = Table(title="Available MCP Tools")
670
+ table.add_column("#", justify="right", style="cyan")
671
+ table.add_column("Tool Name", style="bright_blue")
672
+ table.add_column("Description")
673
+
674
+ # Add tools to table
675
+ for i, tool in enumerate(tools_result.tools):
676
+ table.add_row(
677
+ str(i + 1),
678
+ tool.name,
679
+ getattr(tool, "description", "No description") or "No description",
680
+ )
681
+
682
+ console.print(table)
683
+
684
+ # Add usage instructions
685
+ rich_print("\n[bold]Usage:[/bold]")
686
+ rich_print(" • Tools are automatically available in your conversation")
687
+ rich_print(" • Just ask the agent to use a tool by name or description")
688
+
689
+ except Exception as e:
690
+ import traceback
691
+
692
+ rich_print(f"[red]Error listing tools: {e}[/red]")
693
+ rich_print(f"[dim]{traceback.format_exc()}[/dim]")
694
+
585
695
  async def _show_usage(self, prompt_provider: PromptProvider, agent_name: str) -> None:
586
696
  """
587
697
  Show usage statistics for the current agent(s) in a colorful table format.
@@ -593,13 +703,13 @@ class InteractivePrompt:
593
703
  try:
594
704
  # Collect all agents from the prompt provider
595
705
  agents_to_show = collect_agents_from_provider(prompt_provider, agent_name)
596
-
706
+
597
707
  if not agents_to_show:
598
708
  rich_print("[yellow]No usage data available[/yellow]")
599
709
  return
600
-
710
+
601
711
  # Use the shared display utility
602
712
  display_usage_report(agents_to_show, show_if_progress_disabled=True)
603
-
713
+
604
714
  except Exception as e:
605
715
  rich_print(f"[red]Error showing usage: {e}[/red]")
@@ -554,6 +554,37 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
554
554
  }
555
555
  self.logger.debug("Chat in progress", data=data)
556
556
 
557
+ def _update_streaming_progress(self, content: str, model: str, estimated_tokens: int) -> int:
558
+ """Update streaming progress with token estimation and formatting.
559
+
560
+ Args:
561
+ content: The text content from the streaming event
562
+ model: The model name
563
+ estimated_tokens: Current token count to update
564
+
565
+ Returns:
566
+ Updated estimated token count
567
+ """
568
+ # Rough estimate: 1 token per 4 characters (OpenAI's typical ratio)
569
+ text_length = len(content)
570
+ additional_tokens = max(1, text_length // 4)
571
+ new_total = estimated_tokens + additional_tokens
572
+
573
+ # Format token count for display
574
+ token_str = str(new_total).rjust(5)
575
+
576
+ # Emit progress event
577
+ data = {
578
+ "progress_action": ProgressAction.STREAMING,
579
+ "model": model,
580
+ "agent_name": self.name,
581
+ "chat_turn": self.chat_turn(),
582
+ "details": token_str.strip(), # Token count goes in details for STREAMING action
583
+ }
584
+ self.logger.info("Streaming progress", data=data)
585
+
586
+ return new_total
587
+
557
588
  def _log_chat_finished(self, model: Optional[str] = None) -> None:
558
589
  """Log a chat finished event"""
559
590
  data = {
@@ -111,14 +111,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
111
111
  and hasattr(event, "delta")
112
112
  and event.delta.type == "text_delta"
113
113
  ):
114
- # Rough estimate: 1 token per 4 characters (OpenAI's typical ratio)
115
- text_length = len(event.delta.text)
116
- estimated_tokens += max(1, text_length // 4)
117
-
118
- # Update progress on every token for real-time display
119
- token_str = str(estimated_tokens).rjust(5)
120
- # print(f"DEBUG: Streaming tokens: {token_str}")
121
- self._emit_streaming_progress(model, token_str)
114
+ # Use base class method for token estimation and progress emission
115
+ estimated_tokens = self._update_streaming_progress(event.delta.text, model, estimated_tokens)
122
116
 
123
117
  # Also check for final message_delta events with actual usage info
124
118
  elif (
@@ -127,9 +121,16 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
127
121
  and event.usage.output_tokens
128
122
  ):
129
123
  actual_tokens = event.usage.output_tokens
124
+ # Emit final progress with actual token count
130
125
  token_str = str(actual_tokens).rjust(5)
131
- # print(f"DEBUG: Final actual tokens: {token_str}")
132
- self._emit_streaming_progress(model, token_str)
126
+ data = {
127
+ "progress_action": ProgressAction.STREAMING,
128
+ "model": model,
129
+ "agent_name": self.name,
130
+ "chat_turn": self.chat_turn(),
131
+ "details": token_str.strip(),
132
+ }
133
+ self.logger.info("Streaming progress", data=data)
133
134
 
134
135
  # Get the final message with complete usage data
135
136
  message = await stream.get_final_message()
@@ -142,19 +143,6 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
142
143
 
143
144
  return message
144
145
 
145
- def _emit_streaming_progress(self, model: str, token_str: str) -> None:
146
- """Emit a streaming progress event that goes directly to progress display."""
147
- data = {
148
- "progress_action": ProgressAction.STREAMING,
149
- "model": model,
150
- "agent_name": self.name,
151
- "chat_turn": self.chat_turn(),
152
- "details": token_str.strip(), # Token count goes in details for STREAMING action
153
- }
154
- # print(f"DEBUG: Emitting streaming progress event with data: {data}")
155
- # Use a special logger level or namespace to avoid polluting regular logs
156
- self.logger.info("Streaming progress", data=data)
157
-
158
146
  async def _anthropic_completion(
159
147
  self,
160
148
  message_param,
@@ -1,4 +1,4 @@
1
- from openai import AuthenticationError, AzureOpenAI, OpenAI
1
+ from openai import AsyncAzureOpenAI, AsyncOpenAI, AuthenticationError
2
2
 
3
3
  from mcp_agent.core.exceptions import ProviderKeyError
4
4
  from mcp_agent.llm.provider_types import Provider
@@ -93,7 +93,7 @@ class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
93
93
  if not self.resource_name and self.base_url:
94
94
  self.resource_name = _extract_resource_name(self.base_url)
95
95
 
96
- def _openai_client(self) -> OpenAI:
96
+ def _openai_client(self) -> AsyncOpenAI:
97
97
  """
98
98
  Returns an AzureOpenAI client, handling both API Key and DefaultAzureCredential.
99
99
  """
@@ -104,7 +104,7 @@ class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
104
104
  "Missing Azure endpoint",
105
105
  "azure_endpoint (base_url) is None at client creation time.",
106
106
  )
107
- return AzureOpenAI(
107
+ return AsyncAzureOpenAI(
108
108
  azure_ad_token_provider=self.get_azure_token,
109
109
  azure_endpoint=self.base_url,
110
110
  api_version=self.api_version,
@@ -116,7 +116,7 @@ class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
116
116
  "Missing Azure endpoint",
117
117
  "azure_endpoint (base_url) is None at client creation time.",
118
118
  )
119
- return AzureOpenAI(
119
+ return AsyncAzureOpenAI(
120
120
  api_key=self.api_key,
121
121
  azure_endpoint=self.base_url,
122
122
  api_version=self.api_version,