fast-agent-mcp 0.2.0__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/PKG-INFO +1 -1
  2. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/pyproject.toml +1 -1
  3. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/commands/bootstrap.py +27 -4
  4. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/config.py +15 -0
  5. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/model_factory.py +4 -0
  6. fast_agent_mcp-0.2.2/src/mcp_agent/llm/providers/augmented_llm_generic.py +46 -0
  7. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/augmented_llm_openai.py +3 -11
  8. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +188 -0
  9. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/data-analysis/analysis.py +65 -0
  10. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
  11. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
  12. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +66 -0
  13. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
  14. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/researcher/researcher-imp.py +189 -0
  15. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/researcher/researcher.py +36 -0
  16. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/chaining.py +36 -0
  17. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/evaluator.py +77 -0
  18. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +24 -0
  19. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/human_input.py +26 -0
  20. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/orchestrator.py +69 -0
  21. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/parallel.py +58 -0
  22. fast_agent_mcp-0.2.2/src/mcp_agent/resources/examples/workflows/router.py +54 -0
  23. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/.gitignore +0 -0
  24. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/LICENSE +0 -0
  25. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/README.md +0 -0
  26. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/data-analysis/analysis-campaign.py +0 -0
  27. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/data-analysis/analysis.py +0 -0
  28. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/data-analysis/fastagent.config.yaml +0 -0
  29. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  30. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/researcher/fastagent.config.yaml +0 -0
  31. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/researcher/researcher-eval.py +0 -0
  32. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/researcher/researcher-imp.py +0 -0
  33. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/researcher/researcher.py +0 -0
  34. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/chaining.py +0 -0
  35. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/evaluator.py +0 -0
  36. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/fastagent.config.yaml +0 -0
  37. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/graded_report.md +0 -0
  38. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/human_input.py +0 -0
  39. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/orchestrator.py +0 -0
  40. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/parallel.py +0 -0
  41. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/router.py +0 -0
  42. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/short_story.md +0 -0
  43. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/examples/workflows/short_story.txt +0 -0
  44. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/__init__.py +0 -0
  45. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/__init__.py +0 -0
  46. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/agent.py +0 -0
  47. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/base_agent.py +0 -0
  48. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/__init__.py +0 -0
  49. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
  50. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
  51. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
  52. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
  53. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
  54. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
  55. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/agents/workflow/router_agent.py +0 -0
  56. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/app.py +0 -0
  57. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/__init__.py +0 -0
  58. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/__main__.py +0 -0
  59. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/commands/config.py +0 -0
  60. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/commands/setup.py +0 -0
  61. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/main.py +0 -0
  62. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/cli/terminal.py +0 -0
  63. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/console.py +0 -0
  64. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/context.py +0 -0
  65. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/context_dependent.py +0 -0
  66. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/__init__.py +0 -0
  67. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/agent_types.py +0 -0
  68. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/direct_agent_app.py +0 -0
  69. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/direct_decorators.py +0 -0
  70. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/direct_factory.py +0 -0
  71. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/enhanced_prompt.py +0 -0
  72. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/error_handling.py +0 -0
  73. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/exceptions.py +0 -0
  74. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/fastagent.py +0 -0
  75. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/interactive_prompt.py +0 -0
  76. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/mcp_content.py +0 -0
  77. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/prompt.py +0 -0
  78. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/request_params.py +0 -0
  79. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/core/validation.py +0 -0
  80. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/event_progress.py +0 -0
  81. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/__init__.py +0 -0
  82. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/decorator_registry.py +0 -0
  83. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/executor.py +0 -0
  84. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/task_registry.py +0 -0
  85. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/temporal.py +0 -0
  86. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/workflow.py +0 -0
  87. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/executor/workflow_signal.py +0 -0
  88. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/human_input/__init__.py +0 -0
  89. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/human_input/handler.py +0 -0
  90. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/human_input/types.py +0 -0
  91. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/__init__.py +0 -0
  92. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/augmented_llm.py +0 -0
  93. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/augmented_llm_passthrough.py +0 -0
  94. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
  95. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/memory.py +0 -0
  96. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/prompt_utils.py +0 -0
  97. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/__init__.py +0 -0
  98. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
  99. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -0
  100. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +0 -0
  101. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
  102. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/multipart_converter_openai.py +0 -0
  103. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
  104. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
  105. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
  106. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
  107. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/sampling_converter.py +0 -0
  108. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
  109. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/__init__.py +0 -0
  110. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/events.py +0 -0
  111. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/json_serializer.py +0 -0
  112. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/listeners.py +0 -0
  113. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/logger.py +0 -0
  114. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/rich_progress.py +0 -0
  115. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/tracing.py +0 -0
  116. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/logging/transport.py +0 -0
  117. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/__init__.py +0 -0
  118. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/gen_client.py +0 -0
  119. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/interfaces.py +0 -0
  120. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/logger_textio.py +0 -0
  121. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/mcp_activity.py +0 -0
  122. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
  123. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/mcp_agent_server.py +0 -0
  124. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
  125. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
  126. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/mime_utils.py +0 -0
  127. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
  128. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompt_render.py +0 -0
  129. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
  130. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
  131. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
  132. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
  133. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
  134. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
  135. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
  136. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
  137. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/resource_utils.py +0 -0
  138. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp/sampling.py +0 -0
  139. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp_server/__init__.py +0 -0
  140. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp_server/agent_server.py +0 -0
  141. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/mcp_server_registry.py +0 -0
  142. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/progress_display.py +0 -0
  143. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/in_dev/agent_build.py +0 -0
  144. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/in_dev/slides.py +0 -0
  145. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
  146. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/fastagent.config.yaml +0 -0
  147. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/job.py +0 -0
  148. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/prompt_category.py +0 -0
  149. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/prompt_sizing.py +0 -0
  150. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/sizer.py +0 -0
  151. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/internal/social.py +0 -0
  152. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/prompting/__init__.py +0 -0
  153. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/prompting/agent.py +0 -0
  154. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/prompting/fastagent.config.yaml +0 -0
  155. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/prompting/image_server.py +0 -0
  156. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/resources/examples/prompting/work_with_image.py +0 -0
  157. {fast_agent_mcp-0.2.0 → fast_agent_mcp-0.2.2}/src/mcp_agent/ui/console_display.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fast-agent-mcp"
3
- version = "0.2.0"
3
+ version = "0.2.2"
4
4
  description = "Define, Prompt and Test MCP enabled Agents and Workflows"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -20,7 +20,6 @@ EXAMPLE_TYPES = {
20
20
  "'Building Effective Agents' paper. Some agents use the 'fetch'\n"
21
21
  "and filesystem MCP Servers.",
22
22
  "files": [
23
- "agent_build.py",
24
23
  "chaining.py",
25
24
  "evaluator.py",
26
25
  "human_input.py",
@@ -70,9 +69,33 @@ def copy_example_files(example_type: str, target_dir: Path, force: bool = False)
70
69
  mount_point_dir.mkdir(parents=True)
71
70
  console.print(f"Created mount-point directory: {mount_point_dir}")
72
71
 
73
- # Use examples from top-level directory
74
- package_dir = Path(__file__).parent.parent.parent.parent.parent
75
- source_dir = package_dir / "examples" / ("workflows" if example_type == "workflow" else f"{example_type}")
72
+ # Try to use examples from the installed package first, or fall back to the top-level directory
73
+ from importlib.resources import files
74
+
75
+ try:
76
+ # First try to find examples in the package resources
77
+ source_dir = (
78
+ files("mcp_agent")
79
+ .joinpath("resources")
80
+ .joinpath("examples")
81
+ .joinpath("workflows" if example_type == "workflow" else f"{example_type}")
82
+ )
83
+ if not source_dir.is_dir():
84
+ # Fall back to the top-level directory for development mode
85
+ package_dir = Path(__file__).parent.parent.parent.parent.parent
86
+ source_dir = (
87
+ package_dir
88
+ / "examples"
89
+ / ("workflows" if example_type == "workflow" else f"{example_type}")
90
+ )
91
+ except (ImportError, ModuleNotFoundError, ValueError):
92
+ # Fall back to the top-level directory if the resource finding fails
93
+ package_dir = Path(__file__).parent.parent.parent.parent.parent
94
+ source_dir = (
95
+ package_dir
96
+ / "examples"
97
+ / ("workflows" if example_type == "workflow" else f"{example_type}")
98
+ )
76
99
 
77
100
  if not source_dir.exists():
78
101
  console.print(f"[red]Error: Source directory not found: {source_dir}[/red]")
@@ -136,6 +136,18 @@ class DeepSeekSettings(BaseModel):
136
136
  model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
137
137
 
138
138
 
139
+ class GenericSettings(BaseModel):
140
+ """
141
+ Settings for using OpenAI models in the fast-agent application.
142
+ """
143
+
144
+ api_key: str | None = None
145
+
146
+ base_url: str | None = None
147
+
148
+ model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
149
+
150
+
139
151
  class TemporalSettings(BaseModel):
140
152
  """
141
153
  Temporal settings for the fast-agent application.
@@ -250,6 +262,9 @@ class Settings(BaseSettings):
250
262
  deepseek: DeepSeekSettings | None = None
251
263
  """Settings for using DeepSeek models in the fast-agent application"""
252
264
 
265
+ generic: GenericSettings | None = None
266
+ """Settings for using Generic models in the fast-agent application"""
267
+
253
268
  logger: LoggerSettings | None = LoggerSettings()
254
269
  """Logger settings for the fast-agent application"""
255
270
 
@@ -9,6 +9,7 @@ from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
9
9
  from mcp_agent.llm.augmented_llm_playback import PlaybackLLM
10
10
  from mcp_agent.llm.providers.augmented_llm_anthropic import AnthropicAugmentedLLM
11
11
  from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
12
+ from mcp_agent.llm.providers.augmented_llm_generic import GenericAugmentedLLM
12
13
  from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
13
14
  from mcp_agent.mcp.interfaces import AugmentedLLMProtocol
14
15
 
@@ -32,6 +33,7 @@ class Provider(Enum):
32
33
  OPENAI = auto()
33
34
  FAST_AGENT = auto()
34
35
  DEEPSEEK = auto()
36
+ GENERIC = auto()
35
37
 
36
38
 
37
39
  class ReasoningEffort(Enum):
@@ -60,6 +62,7 @@ class ModelFactory:
60
62
  "openai": Provider.OPENAI,
61
63
  "fast-agent": Provider.FAST_AGENT,
62
64
  "deepseek": Provider.DEEPSEEK,
65
+ "generic": Provider.GENERIC,
63
66
  }
64
67
 
65
68
  # Mapping of effort strings to enum values
@@ -116,6 +119,7 @@ class ModelFactory:
116
119
  Provider.OPENAI: OpenAIAugmentedLLM,
117
120
  Provider.FAST_AGENT: PassthroughLLM,
118
121
  Provider.DEEPSEEK: DeepSeekAugmentedLLM,
122
+ Provider.GENERIC: GenericAugmentedLLM,
119
123
  }
120
124
 
121
125
  # Mapping of special model names to their specific LLM classes
@@ -0,0 +1,46 @@
1
+ import os
2
+
3
+ from mcp_agent.core.request_params import RequestParams
4
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
5
+
6
+ DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434/v1"
7
+ DEFAULT_OLLAMA_MODEL = "llama3.2:latest"
8
+ DEFAULT_OLLAMA_API_KEY = "ollama"
9
+
10
+
11
+ class GenericAugmentedLLM(OpenAIAugmentedLLM):
12
+ def __init__(self, *args, **kwargs) -> None:
13
+ kwargs["provider_name"] = "GenericOpenAI" # Set provider name in kwargs
14
+ super().__init__(*args, **kwargs) # Properly pass args and kwargs to parent
15
+
16
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
17
+ """Initialize Deepseek-specific default parameters"""
18
+ chosen_model = kwargs.get("model", DEFAULT_OLLAMA_MODEL)
19
+
20
+ return RequestParams(
21
+ model=chosen_model,
22
+ systemPrompt=self.instruction,
23
+ parallel_tool_calls=True,
24
+ max_iterations=10,
25
+ use_history=True,
26
+ )
27
+
28
+ def _api_key(self) -> str:
29
+ config = self.context.config
30
+ api_key = None
31
+
32
+ if config and config.generic:
33
+ api_key = config.generic.api_key
34
+ if api_key == "<your-api-key-here>":
35
+ api_key = None
36
+
37
+ if api_key is None:
38
+ api_key = os.getenv("GENERIC_API_KEY")
39
+
40
+ return api_key or "ollama"
41
+
42
+ def _base_url(self) -> str:
43
+ if self.context.config and self.context.config.deepseek:
44
+ base_url = self.context.config.deepseek.base_url
45
+
46
+ return base_url if base_url else DEFAULT_OLLAMA_BASE_URL
@@ -115,7 +115,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
115
115
  self,
116
116
  message,
117
117
  request_params: RequestParams | None = None,
118
- response_model: Type[ModelT] | None = None,
119
118
  ) -> List[ChatCompletionMessage]:
120
119
  """
121
120
  Process a query using an LLM and available tools.
@@ -192,16 +191,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
192
191
  self.logger.debug(f"{arguments}")
193
192
  self._log_chat_progress(self.chat_turn(), model=model)
194
193
 
195
- if response_model is None:
196
- executor_result = await self.executor.execute(
197
- openai_client.chat.completions.create, **arguments
198
- )
199
- else:
200
- executor_result = await self.executor.execute(
201
- openai_client.beta.chat.completions.parse,
202
- **arguments,
203
- response_format=response_model,
204
- )
194
+ executor_result = await self.executor.execute(
195
+ openai_client.chat.completions.create, **arguments
196
+ )
205
197
 
206
198
  response = executor_result[0]
207
199
 
@@ -0,0 +1,188 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+ from mcp_agent.llm.augmented_llm import RequestParams
5
+
6
+ # Create the application
7
+ fast = FastAgent("Data Analysis & Campaign Generator")
8
+
9
+
10
+ # Original data analysis components
11
+ @fast.agent(
12
+ name="data_analysis",
13
+ instruction="""
14
+ You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
15
+ Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
16
+ You can add further packages if needed.
17
+ Data files are accessible from the /mnt/data/ directory (this is the current working directory).
18
+ Visualisations should be saved as .png files in the current working directory.
19
+ Extract key insights that would be compelling for a social media campaign.
20
+ """,
21
+ servers=["interpreter"],
22
+ request_params=RequestParams(maxTokens=8192),
23
+ model="sonnet",
24
+ )
25
+ @fast.agent(
26
+ "evaluator",
27
+ """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
28
+ You must make sure that the tool has:
29
+ - Considered the best way for a Human to interpret the data
30
+ - Produced insightful visualisations.
31
+ - Provided a high level summary report for the Human.
32
+ - Has had its findings challenged, and justified
33
+ - Extracted compelling insights suitable for social media promotion
34
+ """,
35
+ request_params=RequestParams(maxTokens=8192),
36
+ model="gpt-4o",
37
+ )
38
+ @fast.evaluator_optimizer(
39
+ "analysis_tool",
40
+ generator="data_analysis",
41
+ evaluator="evaluator",
42
+ max_refinements=3,
43
+ min_rating="EXCELLENT",
44
+ )
45
+ # Research component using Brave search
46
+ @fast.agent(
47
+ "context_researcher",
48
+ """You are a research specialist who provides cultural context for different regions.
49
+ For any given data insight and target language/region, research:
50
+ 1. Cultural sensitivities related to presenting this type of data
51
+ 2. Local social media trends and preferences
52
+ 3. Region-specific considerations for marketing campaigns
53
+
54
+ Always provide actionable recommendations for adapting content to each culture.
55
+ """,
56
+ servers=["fetch", "brave"], # Using the fetch MCP server for Brave search
57
+ request_params=RequestParams(temperature=0.3),
58
+ model="gpt-4o",
59
+ )
60
+ # Social media content generator
61
+ @fast.agent(
62
+ "campaign_generator",
63
+ """Generate engaging social media content based on data insights.
64
+ Create compelling, shareable content that:
65
+ - Highlights key research findings in an accessible way
66
+ - Uses appropriate tone for the platform (Twitter/X, LinkedIn, Instagram, etc.)
67
+ - Is concise and impactful
68
+ - Includes suggested hashtags and posting schedule
69
+
70
+ Format your response with clear sections for each platform.
71
+ Save different campaign elements as separate files in the current directory.
72
+ """,
73
+ servers=["filesystem"], # Using filesystem MCP server to save files
74
+ request_params=RequestParams(temperature=0.7),
75
+ model="sonnet",
76
+ use_history=False,
77
+ )
78
+ # Translation agents with cultural adaptation
79
+ @fast.agent(
80
+ "translate_fr",
81
+ """Translate social media content to French with cultural adaptation.
82
+ Consider French cultural norms, expressions, and social media preferences.
83
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
84
+ Save the translated content to a file with appropriate naming.
85
+ """,
86
+ model="haiku",
87
+ use_history=False,
88
+ servers=["filesystem"],
89
+ )
90
+ @fast.agent(
91
+ "translate_es",
92
+ """Translate social media content to Spanish with cultural adaptation.
93
+ Consider Spanish-speaking cultural contexts, expressions, and social media preferences.
94
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
95
+ Save the translated content to a file with appropriate naming.
96
+ """,
97
+ model="haiku",
98
+ use_history=False,
99
+ servers=["filesystem"],
100
+ )
101
+ @fast.agent(
102
+ "translate_de",
103
+ """Translate social media content to German with cultural adaptation.
104
+ Consider German cultural norms, expressions, and social media preferences.
105
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
106
+ Save the translated content to a file with appropriate naming.
107
+ """,
108
+ model="haiku",
109
+ use_history=False,
110
+ servers=["filesystem"],
111
+ )
112
+ @fast.agent(
113
+ "translate_ja",
114
+ """Translate social media content to Japanese with cultural adaptation.
115
+ Consider Japanese cultural norms, expressions, and social media preferences.
116
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
117
+ Save the translated content to a file with appropriate naming.
118
+ """,
119
+ model="haiku",
120
+ use_history=False,
121
+ servers=["filesystem"],
122
+ )
123
+ # Parallel workflow for translations
124
+ @fast.parallel(
125
+ "translate_campaign",
126
+ instruction="Translates content to French, Spanish, German and Japanese. Supply the content to translate, translations will be saved to the filesystem.",
127
+ fan_out=["translate_fr", "translate_es", "translate_de", "translate_ja"],
128
+ include_request=True,
129
+ )
130
+ # Cultural sensitivity review agent
131
+ @fast.agent(
132
+ "cultural_reviewer",
133
+ """Review all translated content for cultural sensitivity and appropriateness.
134
+ For each language version, evaluate:
135
+ - Cultural appropriateness
136
+ - Potential misunderstandings or sensitivities
137
+ - Effectiveness for the target culture
138
+
139
+ Provide specific recommendations for any needed adjustments and save a review report.
140
+ """,
141
+ servers=["filesystem"],
142
+ request_params=RequestParams(temperature=0.2),
143
+ )
144
+ # Campaign optimization workflow
145
+ @fast.evaluator_optimizer(
146
+ "campaign_optimizer",
147
+ generator="campaign_generator",
148
+ evaluator="cultural_reviewer",
149
+ max_refinements=2,
150
+ min_rating="EXCELLENT",
151
+ )
152
+ # Main workflow orchestration
153
+ @fast.orchestrator(
154
+ "research_campaign_creator",
155
+ instruction="""
156
+ Create a complete multi-lingual social media campaign based on data analysis results.
157
+ The workflow will:
158
+ 1. Analyze the provided data and extract key insights
159
+ 2. Research cultural contexts for target languages
160
+ 3. Generate appropriate social media content
161
+ 4. Translate and culturally adapt the content
162
+ 5. Review and optimize all materials
163
+ 6. Save all campaign elements to files
164
+ """,
165
+ agents=[
166
+ "analysis_tool",
167
+ "context_researcher",
168
+ "campaign_optimizer",
169
+ "translate_campaign",
170
+ ],
171
+ model="sonnet", # Using a more capable model for orchestration
172
+ request_params=RequestParams(maxTokens=8192),
173
+ plan_type="full",
174
+ )
175
+ async def main() -> None:
176
+ # Use the app's context manager
177
+ print(
178
+ "WARNING: This workflow will likely run for >10 minutes and consume a lot of tokens. Press Enter to accept the default prompt and proceed"
179
+ )
180
+
181
+ async with fast.run() as agent:
182
+ await agent.research_campaign_creator.prompt(
183
+ default_prompt="Analyze the CSV file in the current directory and create a comprehensive multi-lingual social media campaign based on the findings. Save all campaign elements as separate files."
184
+ )
185
+
186
+
187
+ if __name__ == "__main__":
188
+ asyncio.run(main())
@@ -0,0 +1,65 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+ from mcp_agent.llm.augmented_llm import RequestParams
5
+
6
+ # Create the application
7
+ fast = FastAgent("Data Analysis (Roots)")
8
+
9
+
10
+ # The sample data is under Database Contents License (DbCL) v1.0.
11
+ # Available here : https://www.kaggle.com/datasets/pavansubhasht/ibm-hr-analytics-attrition-dataset
12
+
13
+
14
+ @fast.agent(
15
+ name="data_analysis",
16
+ instruction="""
17
+ You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
18
+ Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
19
+ You can add further packages if needed.
20
+ Data files are accessible from the /mnt/data/ directory (this is the current working directory).
21
+ Visualisations should be saved as .png files in the current working directory.
22
+ """,
23
+ servers=["interpreter"],
24
+ request_params=RequestParams(maxTokens=8192),
25
+ )
26
+ async def main() -> None:
27
+ # Use the app's context manager
28
+ async with fast.run() as agent:
29
+ await agent(
30
+ "There is a csv file in the current directory. "
31
+ "Analyse the file, produce a detailed description of the data, and any patterns it contains.",
32
+ )
33
+ await agent(
34
+ "Consider the data, and how to usefully group it for presentation to a Human. Find insights, using the Python Interpreter as needed.\n"
35
+ "Use MatPlotLib to produce insightful visualisations. Save them as '.png' files in the current directory. Be sure to run the code and save the files.\n"
36
+ "Produce a summary with major insights to the data",
37
+ )
38
+ await agent()
39
+
40
+
41
+ if __name__ == "__main__":
42
+ asyncio.run(main())
43
+
44
+
45
+ ############################################################################################################
46
+ # Example of evaluator/optimizer flow
47
+ ############################################################################################################
48
+ # @fast.agent(
49
+ # "evaluator",
50
+ # """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
51
+ # You must make sure that the tool has:
52
+ # - Considered the best way for a Human to interpret the data
53
+ # - Produced insightful visualasions.
54
+ # - Provided a high level summary report for the Human.
55
+ # - Has had its findings challenged, and justified
56
+ # """,
57
+ # request_params=RequestParams(maxTokens=8192),
58
+ # )
59
+ # @fast.evaluator_optimizer(
60
+ # "analysis_tool",
61
+ # generator="data_analysis",
62
+ # evaluator="evaluator",
63
+ # max_refinements=3,
64
+ # min_rating="EXCELLENT",
65
+ # )
@@ -0,0 +1,41 @@
1
+ default_model: sonnet
2
+
3
+ # on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/
4
+
5
+ mcp:
6
+ servers:
7
+ interpreter:
8
+ command: "docker"
9
+ args:
10
+ [
11
+ "run",
12
+ "-i",
13
+ "--rm",
14
+ "--pull=always",
15
+ "-v",
16
+ "./mount-point:/mnt/data/",
17
+ "ghcr.io/evalstate/mcp-py-repl:latest",
18
+ ]
19
+ roots:
20
+ - uri: "file://./mount-point/"
21
+ name: "test_data"
22
+ server_uri_alias: "file:///mnt/data/"
23
+ filesystem:
24
+ # On windows update the command and arguments to use `node` and the absolute path to the server.
25
+ # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
26
+ # Use `npm -g root` to find the global node_modules path.`
27
+ # command: "node"
28
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."]
29
+ command: "npx"
30
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "./mount-point/"]
31
+ fetch:
32
+ command: "uvx"
33
+ args: ["mcp-server-fetch"]
34
+ brave:
35
+ # On windows replace the command and args line to use `node` and the absolute path to the server.
36
+ # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
37
+ # Use `npm -g root` to find the global node_modules path.`
38
+ # command: "node"
39
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
40
+ command: "npx"
41
+ args: ["-y", "@modelcontextprotocol/server-brave-search"]