fast-agent-mcp 0.2.17__tar.gz → 0.2.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/.gitignore +1 -0
  2. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/PKG-INFO +15 -14
  3. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/README.md +12 -11
  4. fast_agent_mcp-0.2.19/examples/mcp/vision-examples/example1.py +27 -0
  5. fast_agent_mcp-0.2.19/examples/mcp/vision-examples/example2.py +18 -0
  6. fast_agent_mcp-0.2.19/examples/mcp/vision-examples/example3.py +20 -0
  7. fast_agent_mcp-0.2.19/examples/mcp/vision-examples/fastagent.config.yaml +47 -0
  8. fast_agent_mcp-0.2.19/examples/otel/agent.py +38 -0
  9. fast_agent_mcp-0.2.19/examples/otel/agent2.py +37 -0
  10. fast_agent_mcp-0.2.19/examples/otel/docker-compose.yaml +12 -0
  11. fast_agent_mcp-0.2.19/examples/otel/fastagent.config.yaml +44 -0
  12. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/pyproject.toml +3 -3
  13. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/base_agent.py +6 -2
  14. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/parallel_agent.py +53 -38
  15. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/router_agent.py +22 -17
  16. fast_agent_mcp-0.2.19/src/mcp_agent/cli/commands/go.py +133 -0
  17. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/commands/setup.py +1 -1
  18. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/main.py +5 -3
  19. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/config.py +2 -4
  20. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/context.py +13 -10
  21. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/enhanced_prompt.py +12 -7
  22. fast_agent_mcp-0.2.19/src/mcp_agent/core/fastagent.py +576 -0
  23. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/interactive_prompt.py +6 -2
  24. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/validation.py +12 -1
  25. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/executor/executor.py +8 -9
  26. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/augmented_llm.py +2 -0
  27. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +2 -1
  28. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +1 -3
  29. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/augmented_llm_openai.py +4 -1
  30. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/mcp_aggregator.py +15 -10
  31. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/mcp_connection_manager.py +1 -1
  32. fast_agent_mcp-0.2.17/src/mcp_agent/core/fastagent.py +0 -514
  33. fast_agent_mcp-0.2.17/src/mcp_agent/logging/tracing.py +0 -138
  34. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/LICENSE +0 -0
  35. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/data-analysis/analysis-campaign.py +0 -0
  36. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/data-analysis/analysis.py +0 -0
  37. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/data-analysis/fastagent.config.yaml +0 -0
  38. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  39. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/mcp/state-transfer/agent_one.py +0 -0
  40. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/mcp/state-transfer/agent_two.py +0 -0
  41. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  42. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/researcher/fastagent.config.yaml +0 -0
  43. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/researcher/researcher-eval.py +0 -0
  44. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/researcher/researcher-imp.py +0 -0
  45. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/researcher/researcher.py +0 -0
  46. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/chaining.py +0 -0
  47. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/evaluator.py +0 -0
  48. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/fastagent.config.yaml +0 -0
  49. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/graded_report.md +0 -0
  50. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/human_input.py +0 -0
  51. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/orchestrator.py +0 -0
  52. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/parallel.py +0 -0
  53. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/router.py +0 -0
  54. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/short_story.md +0 -0
  55. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/examples/workflows/short_story.txt +0 -0
  56. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/__init__.py +0 -0
  57. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/__init__.py +0 -0
  58. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/agent.py +0 -0
  59. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/__init__.py +0 -0
  60. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
  61. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
  62. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
  63. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
  64. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
  65. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/app.py +0 -0
  66. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/__init__.py +0 -0
  67. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/__main__.py +0 -0
  68. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/commands/check_config.py +0 -0
  69. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/commands/quickstart.py +0 -0
  70. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/cli/terminal.py +0 -0
  71. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/console.py +0 -0
  72. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/context_dependent.py +0 -0
  73. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/__init__.py +0 -0
  74. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/agent_app.py +0 -0
  75. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/agent_types.py +0 -0
  76. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/direct_decorators.py +0 -0
  77. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/direct_factory.py +0 -0
  78. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/error_handling.py +0 -0
  79. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/exceptions.py +0 -0
  80. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/mcp_content.py +0 -0
  81. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/prompt.py +0 -0
  82. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/core/request_params.py +0 -0
  83. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/event_progress.py +0 -0
  84. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/executor/__init__.py +0 -0
  85. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/executor/task_registry.py +0 -0
  86. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/executor/workflow_signal.py +0 -0
  87. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/human_input/__init__.py +0 -0
  88. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/human_input/handler.py +0 -0
  89. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/human_input/types.py +0 -0
  90. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/__init__.py +0 -0
  91. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/augmented_llm_passthrough.py +0 -0
  92. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
  93. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/memory.py +0 -0
  94. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/model_factory.py +0 -0
  95. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/prompt_utils.py +0 -0
  96. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/provider_key_manager.py +0 -0
  97. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/provider_types.py +0 -0
  98. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/__init__.py +0 -0
  99. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
  100. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
  101. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/augmented_llm_google.py +0 -0
  102. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +0 -0
  103. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
  104. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/multipart_converter_openai.py +0 -0
  105. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
  106. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
  107. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
  108. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
  109. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/sampling_converter.py +0 -0
  110. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
  111. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/__init__.py +0 -0
  112. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/events.py +0 -0
  113. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/json_serializer.py +0 -0
  114. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/listeners.py +0 -0
  115. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/logger.py +0 -0
  116. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/rich_progress.py +0 -0
  117. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/logging/transport.py +0 -0
  118. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/__init__.py +0 -0
  119. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/gen_client.py +0 -0
  120. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
  121. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/helpers/content_helpers.py +0 -0
  122. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/interfaces.py +0 -0
  123. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/logger_textio.py +0 -0
  124. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
  125. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/mime_utils.py +0 -0
  126. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
  127. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompt_render.py +0 -0
  128. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
  129. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
  130. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
  131. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
  132. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
  133. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
  134. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
  135. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
  136. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/resource_utils.py +0 -0
  137. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp/sampling.py +0 -0
  138. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp_server/__init__.py +0 -0
  139. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp_server/agent_server.py +0 -0
  140. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/mcp_server_registry.py +0 -0
  141. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/progress_display.py +0 -0
  142. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
  143. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
  144. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  145. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  146. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/in_dev/agent_build.py +0 -0
  147. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/in_dev/css-LICENSE.txt +0 -0
  148. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/in_dev/slides.py +0 -0
  149. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
  150. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/fastagent.config.yaml +0 -0
  151. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/history_transfer.py +0 -0
  152. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/job.py +0 -0
  153. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/prompt_category.py +0 -0
  154. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/prompt_sizing.py +0 -0
  155. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/simple.txt +0 -0
  156. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/sizer.py +0 -0
  157. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/internal/social.py +0 -0
  158. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
  159. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
  160. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  161. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  162. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/__init__.py +0 -0
  163. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/agent.py +0 -0
  164. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/delimited_prompt.txt +0 -0
  165. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/fastagent.config.yaml +0 -0
  166. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/image_server.py +0 -0
  167. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/prompt1.txt +0 -0
  168. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/prompting/work_with_image.py +0 -0
  169. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
  170. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
  171. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
  172. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
  173. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
  174. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
  175. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
  176. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
  177. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
  178. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
  179. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
  180. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
  181. {fast_agent_mcp-0.2.17 → fast_agent_mcp-0.2.19}/src/mcp_agent/ui/console_display.py +0 -0
@@ -194,3 +194,4 @@ tests/e2e/workflow/weather_location.txt
194
194
  tests/integration/prompt-state/multipart.json
195
195
  tests/integration/prompt-state/history.json
196
196
  !tests/integration/api/fastagent.secrets.yaml
197
+ fastagent.jsonl
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -214,16 +214,16 @@ Requires-Dist: aiohttp>=3.11.13
214
214
  Requires-Dist: anthropic>=0.49.0
215
215
  Requires-Dist: fastapi>=0.115.6
216
216
  Requires-Dist: mcp==1.6.0
217
- Requires-Dist: numpy>=2.2.1
218
217
  Requires-Dist: openai>=1.63.2
219
218
  Requires-Dist: opentelemetry-distro>=0.50b0
220
219
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
220
+ Requires-Dist: opentelemetry-instrumentation-anthropic>=0.39.3
221
+ Requires-Dist: opentelemetry-instrumentation-openai>=0.39.3
221
222
  Requires-Dist: prompt-toolkit>=3.0.50
222
223
  Requires-Dist: pydantic-settings>=2.7.0
223
224
  Requires-Dist: pydantic>=2.10.4
224
225
  Requires-Dist: pyyaml>=6.0.2
225
226
  Requires-Dist: rich>=13.9.4
226
- Requires-Dist: scikit-learn>=1.6.0
227
227
  Requires-Dist: typer>=0.15.1
228
228
  Provides-Extra: dev
229
229
  Requires-Dist: anthropic>=0.42.0; extra == 'dev'
@@ -251,7 +251,7 @@ Description-Content-Type: text/markdown
251
251
  ## Overview
252
252
 
253
253
  > [!TIP]
254
- > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. llms.txt link is here: https://fast-agent.ai/llms.txt
254
+ > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. There is also an LLMs.txt [here](https://fast-agent.ai/llms.txt)
255
255
 
256
256
  **`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o/gpt-4.1 family, o1/o3 family) are supported.
257
257
 
@@ -259,7 +259,8 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
259
259
 
260
260
  `fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
261
261
 
262
- > [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
262
+ > [!IMPORTANT]
263
+ > `fast-agent` The fast-agent documentation repo is here: https://github.com/evalstate/fast-agent-docs. Please feel free to submit PRs for documentation, experience reports or other content you think others may find helpful. All help and feedback warmly received.
263
264
 
264
265
  ### Agent Application Development
265
266
 
@@ -449,10 +450,10 @@ If the Generator has `use_history` off, the previous iteration is returned when
449
450
 
450
451
  ```python
451
452
  @fast.evaluator_optimizer(
452
- name="researcher"
453
- generator="web_searcher"
454
- evaluator="quality_assurance"
455
- min_rating="EXCELLENT"
453
+ name="researcher",
454
+ generator="web_searcher",
455
+ evaluator="quality_assurance",
456
+ min_rating="EXCELLENT",
456
457
  max_refinements=3
457
458
  )
458
459
 
@@ -470,8 +471,8 @@ Routers use an LLM to assess a message, and route it to the most appropriate Age
470
471
 
471
472
  ```python
472
473
  @fast.router(
473
- name="route"
474
- agents["agent1","agent2","agent3"]
474
+ name="route",
475
+ agents=["agent1","agent2","agent3"]
475
476
  )
476
477
  ```
477
478
 
@@ -483,7 +484,7 @@ Given a complex task, the Orchestrator uses an LLM to generate a plan to divide
483
484
 
484
485
  ```python
485
486
  @fast.orchestrator(
486
- name="orchestrate"
487
+ name="orchestrate",
487
488
  agents=["task1","task2","task3"]
488
489
  )
489
490
  ```
@@ -523,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
523
524
  servers=["filesystem"], # list of MCP Servers for the agent
524
525
  model="o3-mini.high", # specify a model for the agent
525
526
  use_history=True, # agent maintains chat history
526
- request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
527
+ request_params=RequestParams(temperature= 0.7), # additional parameters for the LLM (or RequestParams())
527
528
  human_input=True, # agent can request human input
528
529
  )
529
530
  ```
@@ -535,7 +536,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
535
536
  name="chain", # name of the chain
536
537
  sequence=["agent1", "agent2", ...], # list of agents in execution order
537
538
  instruction="instruction", # instruction to describe the chain for other workflows
538
- cumulative=False # whether to accumulate messages through the chain
539
+ cumulative=False, # whether to accumulate messages through the chain
539
540
  continue_with_final=True, # open chat with agent at end of chain after prompting
540
541
  )
541
542
  ```
@@ -10,7 +10,7 @@
10
10
  ## Overview
11
11
 
12
12
  > [!TIP]
13
- > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. llms.txt link is here: https://fast-agent.ai/llms.txt
13
+ > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. There is also an LLMs.txt [here](https://fast-agent.ai/llms.txt)
14
14
 
15
15
  **`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o/gpt-4.1 family, o1/o3 family) are supported.
16
16
 
@@ -18,7 +18,8 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
18
18
 
19
19
  `fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
20
20
 
21
- > [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
21
+ > [!IMPORTANT]
22
+ > `fast-agent` The fast-agent documentation repo is here: https://github.com/evalstate/fast-agent-docs. Please feel free to submit PRs for documentation, experience reports or other content you think others may find helpful. All help and feedback warmly received.
22
23
 
23
24
  ### Agent Application Development
24
25
 
@@ -208,10 +209,10 @@ If the Generator has `use_history` off, the previous iteration is returned when
208
209
 
209
210
  ```python
210
211
  @fast.evaluator_optimizer(
211
- name="researcher"
212
- generator="web_searcher"
213
- evaluator="quality_assurance"
214
- min_rating="EXCELLENT"
212
+ name="researcher",
213
+ generator="web_searcher",
214
+ evaluator="quality_assurance",
215
+ min_rating="EXCELLENT",
215
216
  max_refinements=3
216
217
  )
217
218
 
@@ -229,8 +230,8 @@ Routers use an LLM to assess a message, and route it to the most appropriate Age
229
230
 
230
231
  ```python
231
232
  @fast.router(
232
- name="route"
233
- agents["agent1","agent2","agent3"]
233
+ name="route",
234
+ agents=["agent1","agent2","agent3"]
234
235
  )
235
236
  ```
236
237
 
@@ -242,7 +243,7 @@ Given a complex task, the Orchestrator uses an LLM to generate a plan to divide
242
243
 
243
244
  ```python
244
245
  @fast.orchestrator(
245
- name="orchestrate"
246
+ name="orchestrate",
246
247
  agents=["task1","task2","task3"]
247
248
  )
248
249
  ```
@@ -282,7 +283,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
282
283
  servers=["filesystem"], # list of MCP Servers for the agent
283
284
  model="o3-mini.high", # specify a model for the agent
284
285
  use_history=True, # agent maintains chat history
285
- request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
286
+ request_params=RequestParams(temperature= 0.7), # additional parameters for the LLM (or RequestParams())
286
287
  human_input=True, # agent can request human input
287
288
  )
288
289
  ```
@@ -294,7 +295,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
294
295
  name="chain", # name of the chain
295
296
  sequence=["agent1", "agent2", ...], # list of agents in execution order
296
297
  instruction="instruction", # instruction to describe the chain for other workflows
297
- cumulative=False # whether to accumulate messages through the chain
298
+ cumulative=False, # whether to accumulate messages through the chain
298
299
  continue_with_final=True, # open chat with agent at end of chain after prompting
299
300
  )
300
301
  ```
@@ -0,0 +1,27 @@
1
+ import asyncio
2
+ from pathlib import Path
3
+
4
+ from mcp_agent.core.fastagent import FastAgent
5
+ from mcp_agent.core.prompt import Prompt
6
+
7
+ # Create the application
8
+ fast = FastAgent("fast-agent example")
9
+
10
+
11
+ # Define the agent
12
+ @fast.agent(instruction="You are a helpful AI Agent", servers=["filesystem"])
13
+ async def main():
14
+ # use the --model command line switch or agent arguments to change model
15
+ async with fast.run() as agent:
16
+ await agent.default.generate(
17
+ [
18
+ Prompt.user(
19
+ Path("cat.png"), "Write a report on the content of the image to 'report.md'"
20
+ )
21
+ ]
22
+ )
23
+ await agent.interactive()
24
+
25
+
26
+ if __name__ == "__main__":
27
+ asyncio.run(main())
@@ -0,0 +1,18 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+
5
+ # Create the application
6
+ fast = FastAgent("fast-agent example")
7
+
8
+
9
+ # Define the agent
10
+ @fast.agent(instruction="You are a helpful AI Agent", servers=["filesystem"])
11
+ async def main():
12
+ # use the --model command line switch or agent arguments to change model
13
+ async with fast.run() as agent:
14
+ await agent.interactive()
15
+
16
+
17
+ if __name__ == "__main__":
18
+ asyncio.run(main())
@@ -0,0 +1,20 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+
5
+ # Create the application
6
+ fast = FastAgent("fast-agent example")
7
+
8
+
9
+ # Define the agent
10
+ @fast.agent(instruction="You are a helpful AI Agent", servers=["webcam", "hfspace"])
11
+ async def main():
12
+ async with fast.run() as agent:
13
+ await agent.interactive(
14
+ default_prompt="take an image with the webcam, describe it to flux to "
15
+ "reproduce it and then judge the quality of the result"
16
+ )
17
+
18
+
19
+ if __name__ == "__main__":
20
+ asyncio.run(main())
@@ -0,0 +1,47 @@
1
+ # FastAgent Configuration File
2
+
3
+ # Default Model Configuration:
4
+ #
5
+ # Takes format:
6
+ # <provider>.<model_string>.<reasoning_effort?> (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low)
7
+ # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
8
+ # and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini
9
+ #
10
+ # If not specified, defaults to "haiku".
11
+ # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
12
+
13
+ default_model: haiku
14
+
15
+ # Logging and Console Configuration:
16
+ logger:
17
+ # level: "debug" | "info" | "warning" | "error"
18
+ # type: "none" | "console" | "file" | "http"
19
+ # path: "/path/to/logfile.jsonl"
20
+
21
+
22
+ # Switch the progress display on or off
23
+ progress_display: true
24
+
25
+ # Show chat User/Assistant messages on the console
26
+ show_chat: true
27
+ # Show tool calls on the console
28
+ show_tools: true
29
+ # Truncate long tool responses on the console
30
+ truncate_tools: true
31
+
32
+ # MCP Servers
33
+ mcp:
34
+ servers:
35
+ fetch:
36
+ command: "uvx"
37
+ args: ["mcp-server-fetch"]
38
+ filesystem:
39
+ command: "npx"
40
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "."]
41
+ webcam:
42
+ command: "npx"
43
+ args: ["-y","@llmindset/mcp-webcam"]
44
+ hfspace:
45
+ command: "npx"
46
+ args: ["-y","@llmindset/mcp-hfspace"]
47
+
@@ -0,0 +1,38 @@
1
+ import asyncio
2
+ from typing import Annotated
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from mcp_agent.core.fastagent import FastAgent
7
+ from mcp_agent.core.prompt import Prompt
8
+ from mcp_agent.core.request_params import RequestParams
9
+
10
+ # Create the application
11
+ fast = FastAgent("fast-agent example")
12
+
13
+
14
+ class FormattedResponse(BaseModel):
15
+ thinking: Annotated[
16
+ str, Field(description="Your reflection on the conversation that is not seen by the user.")
17
+ ]
18
+ message: str
19
+
20
+
21
+ # Define the agent
22
+ @fast.agent(
23
+ name="chat",
24
+ instruction="You are a helpful AI Agent",
25
+ servers=["fetch"],
26
+ request_params=RequestParams(maxTokens=8192),
27
+ )
28
+ async def main():
29
+ # use the --model command line switch or agent arguments to change model
30
+ async with fast.run() as agent:
31
+ thinking, response = await agent.chat.structured(
32
+ multipart_messages=[Prompt.user("Let's talk about guitars.")],
33
+ model=FormattedResponse,
34
+ )
35
+
36
+
37
+ if __name__ == "__main__":
38
+ asyncio.run(main())
@@ -0,0 +1,37 @@
1
+ import asyncio
2
+ from typing import Annotated
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from mcp_agent.core.fastagent import FastAgent
7
+ from mcp_agent.core.prompt import Prompt
8
+ from mcp_agent.core.request_params import RequestParams
9
+
10
+ # Create the application
11
+ fast = FastAgent("fast-agent example")
12
+
13
+
14
+ class FormattedResponse(BaseModel):
15
+ thinking: Annotated[
16
+ str, Field(description="Your reflection on the conversation that is not seen by the user.")
17
+ ]
18
+ message: str
19
+
20
+
21
+ # Define the agent
22
+ @fast.agent(
23
+ name="chat",
24
+ instruction="You are a helpful AI Agent",
25
+ servers=["fetch"],
26
+ request_params=RequestParams(maxTokens=8192),
27
+ )
28
+ async def main():
29
+ # use the --model command line switch or agent arguments to change model
30
+ async with fast.run() as agent:
31
+ thinking, response = await agent.chat.generate(
32
+ multipart_messages=[Prompt.user("Let's talk about guitars. Fetch from wikipedia")],
33
+ )
34
+
35
+
36
+ if __name__ == "__main__":
37
+ asyncio.run(main())
@@ -0,0 +1,12 @@
1
+ services:
2
+ jaeger:
3
+ image: jaegertracing/jaeger:2.5.0
4
+ container_name: jaeger
5
+ ports:
6
+ - "16686:16686" # Web UI
7
+ - "4317:4317" # OTLP gRPC
8
+ - "4318:4318" # OTLP HTTP
9
+ - "5778:5778" # Config server
10
+ - "9411:9411" # Zipkin compatible
11
+ restart: unless-stopped
12
+
@@ -0,0 +1,44 @@
1
+ # FastAgent Configuration File
2
+
3
+ # Default Model Configuration:
4
+ #
5
+ # Takes format:
6
+ # <provider>.<model_string>.<reasoning_effort?> (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low)
7
+ # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
8
+ # and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini
9
+ #
10
+ # If not specified, defaults to "haiku".
11
+ # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
12
+
13
+ default_model: haiku
14
+
15
+ # Logging and Console Configuration:
16
+ logger:
17
+ # level: "debug" | "info" | "warning" | "error"
18
+ # type: "none" | "console" | "file" | "http"
19
+ # path: "/path/to/logfile.jsonl"
20
+
21
+ # Switch the progress display on or off
22
+ progress_display: true
23
+
24
+ # Show chat User/Assistant messages on the console
25
+ show_chat: true
26
+ # Show tool calls on the console
27
+ show_tools: true
28
+ # Truncate long tool responses on the console
29
+ truncate_tools: true
30
+
31
+ otel:
32
+ enabled: true # Enable or disable OpenTelemetry
33
+
34
+ # MCP Servers
35
+ mcp:
36
+ servers:
37
+ fetch:
38
+ command: "uvx"
39
+ args: ["mcp-server-fetch"]
40
+ filesystem:
41
+ command: "npx"
42
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "."]
43
+ think:
44
+ command: "mcp-think-tool"
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fast-agent-mcp"
3
- version = "0.2.17"
3
+ version = "0.2.19"
4
4
  description = "Define, Prompt and Test MCP enabled Agents and Workflows"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -24,13 +24,13 @@ dependencies = [
24
24
  "pyyaml>=6.0.2",
25
25
  "rich>=13.9.4",
26
26
  "typer>=0.15.1",
27
- "numpy>=2.2.1",
28
- "scikit-learn>=1.6.0",
29
27
  "anthropic>=0.49.0",
30
28
  "openai>=1.63.2",
31
29
  "prompt-toolkit>=3.0.50",
32
30
  "aiohttp>=3.11.13",
33
31
  "a2a-types>=0.1.0",
32
+ "opentelemetry-instrumentation-openai>=0.39.3",
33
+ "opentelemetry-instrumentation-anthropic>=0.39.3",
34
34
  ]
35
35
 
36
36
  [project.optional-dependencies]
@@ -31,6 +31,7 @@ from mcp.types import (
31
31
  TextContent,
32
32
  Tool,
33
33
  )
34
+ from opentelemetry import trace
34
35
  from pydantic import BaseModel
35
36
 
36
37
  from mcp_agent.core.agent_types import AgentConfig, AgentType
@@ -92,6 +93,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
92
93
  )
93
94
 
94
95
  self._context = context
96
+ self.tracer = trace.get_tracer(__name__)
95
97
  self.name = self.config.name
96
98
  self.instruction = self.config.instruction
97
99
  self.functions = functions or []
@@ -588,7 +590,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
588
590
  The LLM's response as a PromptMessageMultipart
589
591
  """
590
592
  assert self._llm
591
- return await self._llm.generate(multipart_messages, request_params)
593
+ with self.tracer.start_as_current_span(f"Agent: '{self.name}' generate"):
594
+ return await self._llm.generate(multipart_messages, request_params)
592
595
 
593
596
  async def structured(
594
597
  self,
@@ -609,7 +612,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
609
612
  An instance of the specified model, or None if coercion fails
610
613
  """
611
614
  assert self._llm
612
- return await self._llm.structured(multipart_messages, model, request_params)
615
+ with self.tracer.start_as_current_span(f"Agent: '{self.name}' structured"):
616
+ return await self._llm.structured(multipart_messages, model, request_params)
613
617
 
614
618
  async def apply_prompt_messages(
615
619
  self, prompts: List[PromptMessageMultipart], request_params: RequestParams | None = None
@@ -2,6 +2,7 @@ import asyncio
2
2
  from typing import Any, List, Optional, Tuple
3
3
 
4
4
  from mcp.types import TextContent
5
+ from opentelemetry import trace
5
6
 
6
7
  from mcp_agent.agents.agent import Agent
7
8
  from mcp_agent.agents.base_agent import BaseAgent
@@ -18,7 +19,7 @@ class ParallelAgent(BaseAgent):
18
19
  This workflow performs both the fan-out and fan-in operations using LLMs.
19
20
  From the user's perspective, an input is specified and the output is returned.
20
21
  """
21
-
22
+
22
23
  @property
23
24
  def agent_type(self) -> AgentType:
24
25
  """Return the type of this agent."""
@@ -62,31 +63,37 @@ class ParallelAgent(BaseAgent):
62
63
  Returns:
63
64
  The aggregated response from the fan-in agent
64
65
  """
65
- # Execute all fan-out agents in parallel
66
- responses: List[PromptMessageMultipart] = await asyncio.gather(
67
- *[agent.generate(multipart_messages, request_params) for agent in self.fan_out_agents]
68
- )
69
66
 
70
- # Extract the received message from the input
71
- received_message: Optional[str] = (
72
- multipart_messages[-1].all_text() if multipart_messages else None
73
- )
67
+ tracer = trace.get_tracer(__name__)
68
+ with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
69
+ # Execute all fan-out agents in parallel
70
+ responses: List[PromptMessageMultipart] = await asyncio.gather(
71
+ *[
72
+ agent.generate(multipart_messages, request_params)
73
+ for agent in self.fan_out_agents
74
+ ]
75
+ )
74
76
 
75
- # Convert responses to strings for aggregation
76
- string_responses = []
77
- for response in responses:
78
- string_responses.append(response.all_text())
77
+ # Extract the received message from the input
78
+ received_message: Optional[str] = (
79
+ multipart_messages[-1].all_text() if multipart_messages else None
80
+ )
79
81
 
80
- # Format the responses and send to the fan-in agent
81
- aggregated_prompt = self._format_responses(string_responses, received_message)
82
+ # Convert responses to strings for aggregation
83
+ string_responses = []
84
+ for response in responses:
85
+ string_responses.append(response.all_text())
82
86
 
83
- # Create a new multipart message with the formatted responses
84
- formatted_prompt = PromptMessageMultipart(
85
- role="user", content=[TextContent(type="text", text=aggregated_prompt)]
86
- )
87
+ # Format the responses and send to the fan-in agent
88
+ aggregated_prompt = self._format_responses(string_responses, received_message)
89
+
90
+ # Create a new multipart message with the formatted responses
91
+ formatted_prompt = PromptMessageMultipart(
92
+ role="user", content=[TextContent(type="text", text=aggregated_prompt)]
93
+ )
87
94
 
88
- # Use the fan-in agent to aggregate the responses
89
- return await self.fan_in_agent.generate([formatted_prompt], request_params)
95
+ # Use the fan-in agent to aggregate the responses
96
+ return await self.fan_in_agent.generate([formatted_prompt], request_params)
90
97
 
91
98
  def _format_responses(self, responses: List[Any], message: Optional[str] = None) -> str:
92
99
  """
@@ -116,7 +123,7 @@ class ParallelAgent(BaseAgent):
116
123
 
117
124
  async def structured(
118
125
  self,
119
- prompt: List[PromptMessageMultipart],
126
+ multipart_messages: List[PromptMessageMultipart],
120
127
  model: type[ModelT],
121
128
  request_params: Optional[RequestParams] = None,
122
129
  ) -> Tuple[ModelT | None, PromptMessageMultipart]:
@@ -133,27 +140,35 @@ class ParallelAgent(BaseAgent):
133
140
  Returns:
134
141
  An instance of the specified model, or None if coercion fails
135
142
  """
136
- # Generate parallel responses first
137
- responses: List[PromptMessageMultipart] = await asyncio.gather(
138
- *[agent.generate(prompt, request_params) for agent in self.fan_out_agents]
139
- )
140
143
 
141
- # Extract the received message
142
- received_message: Optional[str] = prompt[-1].all_text() if prompt else None
144
+ tracer = trace.get_tracer(__name__)
145
+ with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
146
+ # Generate parallel responses first
147
+ responses: List[PromptMessageMultipart] = await asyncio.gather(
148
+ *[
149
+ agent.generate(multipart_messages, request_params)
150
+ for agent in self.fan_out_agents
151
+ ]
152
+ )
143
153
 
144
- # Convert responses to strings
145
- string_responses = [response.all_text() for response in responses]
154
+ # Extract the received message
155
+ received_message: Optional[str] = (
156
+ multipart_messages[-1].all_text() if multipart_messages else None
157
+ )
146
158
 
147
- # Format the responses for the fan-in agent
148
- aggregated_prompt = self._format_responses(string_responses, received_message)
159
+ # Convert responses to strings
160
+ string_responses = [response.all_text() for response in responses]
149
161
 
150
- # Create a multipart message
151
- formatted_prompt = PromptMessageMultipart(
152
- role="user", content=[TextContent(type="text", text=aggregated_prompt)]
153
- )
162
+ # Format the responses for the fan-in agent
163
+ aggregated_prompt = self._format_responses(string_responses, received_message)
164
+
165
+ # Create a multipart message
166
+ formatted_prompt = PromptMessageMultipart(
167
+ role="user", content=[TextContent(type="text", text=aggregated_prompt)]
168
+ )
154
169
 
155
- # Use the fan-in agent to parse the structured output
156
- return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
170
+ # Use the fan-in agent to parse the structured output
171
+ return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
157
172
 
158
173
  async def initialize(self) -> None:
159
174
  """