fast-agent-mcp 0.1.7__tar.gz → 0.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/PKG-INFO +37 -9
  2. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/README.md +24 -2
  3. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/pyproject.toml +29 -9
  4. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/agents/agent.py +5 -11
  5. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/agent_app.py +125 -44
  6. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/decorators.py +3 -2
  7. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/enhanced_prompt.py +106 -20
  8. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/factory.py +28 -66
  9. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/fastagent.py +13 -3
  10. fast_agent_mcp-0.1.9/src/mcp_agent/core/mcp_content.py +222 -0
  11. fast_agent_mcp-0.1.9/src/mcp_agent/core/prompt.py +132 -0
  12. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/proxies.py +41 -36
  13. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/human_input/handler.py +4 -1
  14. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/transport.py +30 -3
  15. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/mcp_aggregator.py +27 -22
  16. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/mime_utils.py +69 -0
  17. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/prompt_message_multipart.py +64 -0
  18. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/prompt_serialization.py +447 -0
  19. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/prompts/__main__.py +10 -0
  20. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/prompts/prompt_server.py +508 -0
  21. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/prompts/prompt_template.py +469 -0
  22. fast_agent_mcp-0.1.9/src/mcp_agent/mcp/resource_utils.py +203 -0
  23. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/agent.py +1 -1
  24. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  25. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/sizer.py +0 -5
  26. fast_agent_mcp-0.1.9/src/mcp_agent/resources/examples/prompting/__init__.py +3 -0
  27. fast_agent_mcp-0.1.9/src/mcp_agent/resources/examples/prompting/agent.py +23 -0
  28. fast_agent_mcp-0.1.9/src/mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  29. fast_agent_mcp-0.1.9/src/mcp_agent/resources/examples/prompting/image_server.py +56 -0
  30. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
  31. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
  32. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/router.py +0 -2
  33. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
  34. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  35. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/llm/augmented_llm.py +155 -141
  36. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/llm/augmented_llm_anthropic.py +135 -281
  37. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/llm/augmented_llm_openai.py +175 -337
  38. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/augmented_llm_passthrough.py +104 -0
  39. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  40. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/llm/model_factory.py +25 -6
  41. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/openai_utils.py +65 -0
  42. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/providers/__init__.py +8 -0
  43. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  44. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  45. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  46. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  47. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  48. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  49. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
  50. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/router/router_llm.py +18 -24
  51. fast_agent_mcp-0.1.9/src/mcp_agent/workflows/swarm/__init__.py +0 -0
  52. fast_agent_mcp-0.1.7/src/mcp_agent/core/server_validation.py +0 -44
  53. fast_agent_mcp-0.1.7/src/mcp_agent/core/simulator_registry.py +0 -22
  54. fast_agent_mcp-0.1.7/src/mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  55. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/.gitignore +0 -0
  56. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/LICENSE +0 -0
  57. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/__init__.py +0 -0
  58. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/agents/__init__.py +0 -0
  59. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/app.py +0 -0
  60. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/__init__.py +0 -0
  61. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/__main__.py +0 -0
  62. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/commands/bootstrap.py +0 -0
  63. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/commands/config.py +0 -0
  64. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/commands/setup.py +0 -0
  65. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/main.py +0 -0
  66. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/cli/terminal.py +0 -0
  67. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/config.py +0 -0
  68. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/console.py +0 -0
  69. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/context.py +0 -0
  70. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/context_dependent.py +0 -0
  71. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/__init__.py +0 -0
  72. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/agent_types.py +0 -0
  73. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/agent_utils.py +0 -0
  74. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/error_handling.py +0 -0
  75. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/exceptions.py +0 -0
  76. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/types.py +0 -0
  77. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/core/validation.py +0 -0
  78. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/eval/__init__.py +0 -0
  79. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/event_progress.py +0 -0
  80. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/__init__.py +0 -0
  81. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/decorator_registry.py +0 -0
  82. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/executor.py +0 -0
  83. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/task_registry.py +0 -0
  84. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/temporal.py +0 -0
  85. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/workflow.py +0 -0
  86. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/executor/workflow_signal.py +0 -0
  87. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/human_input/__init__.py +0 -0
  88. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/human_input/types.py +0 -0
  89. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/__init__.py +0 -0
  90. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/events.py +0 -0
  91. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/json_serializer.py +0 -0
  92. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/listeners.py +0 -0
  93. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/logger.py +0 -0
  94. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/rich_progress.py +0 -0
  95. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/logging/tracing.py +0 -0
  96. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/__init__.py +0 -0
  97. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/gen_client.py +0 -0
  98. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/mcp_activity.py +0 -0
  99. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
  100. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/mcp_agent_server.py +0 -0
  101. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
  102. {fast_agent_mcp-0.1.7/src/mcp_agent/telemetry → fast_agent_mcp-0.1.9/src/mcp_agent/mcp/prompts}/__init__.py +0 -0
  103. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp/stdio.py +0 -0
  104. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp_server/__init__.py +0 -0
  105. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp_server/agent_server.py +0 -0
  106. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/mcp_server_registry.py +0 -0
  107. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/progress_display.py +0 -0
  108. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
  109. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
  110. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  111. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  112. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/job.py +0 -0
  113. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/prompt_category.py +0 -0
  114. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/prompt_sizing.py +0 -0
  115. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/internal/social.py +0 -0
  116. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -0
  117. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
  118. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
  119. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
  120. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/agent_build.py +0 -0
  121. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
  122. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
  123. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
  124. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
  125. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
  126. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/resources/examples/workflows/sse.py +0 -0
  127. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows → fast_agent_mcp-0.1.9/src/mcp_agent/telemetry}/__init__.py +0 -0
  128. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/telemetry/usage_tracking.py +0 -0
  129. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/ui/console_display.py +0 -0
  130. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/embedding → fast_agent_mcp-0.1.9/src/mcp_agent/workflows}/__init__.py +0 -0
  131. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/evaluator_optimizer → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/embedding}/__init__.py +0 -0
  132. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/embedding/embedding_base.py +0 -0
  133. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/embedding/embedding_cohere.py +0 -0
  134. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/embedding/embedding_openai.py +0 -0
  135. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/intent_classifier → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/evaluator_optimizer}/__init__.py +0 -0
  136. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/llm → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/intent_classifier}/__init__.py +0 -0
  137. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -0
  138. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -0
  139. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -0
  140. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -0
  141. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -0
  142. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -0
  143. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -0
  144. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/orchestrator → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/llm}/__init__.py +0 -0
  145. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/llm/llm_selector.py +0 -0
  146. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/llm/prompt_utils.py +0 -0
  147. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/parallel → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/orchestrator}/__init__.py +0 -0
  148. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/orchestrator/orchestrator_models.py +0 -0
  149. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/orchestrator/orchestrator_prompts.py +0 -0
  150. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/router → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/parallel}/__init__.py +0 -0
  151. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/parallel/fan_in.py +0 -0
  152. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/parallel/fan_out.py +0 -0
  153. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/parallel/parallel_llm.py +0 -0
  154. {fast_agent_mcp-0.1.7/src/mcp_agent/workflows/swarm → fast_agent_mcp-0.1.9/src/mcp_agent/workflows/router}/__init__.py +0 -0
  155. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/router/router_base.py +0 -0
  156. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/router/router_embedding.py +0 -0
  157. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/router/router_embedding_cohere.py +0 -0
  158. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/router/router_embedding_openai.py +0 -0
  159. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/swarm/swarm.py +0 -0
  160. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/swarm/swarm_anthropic.py +0 -0
  161. {fast_agent_mcp-0.1.7 → fast_agent_mcp-0.1.9}/src/mcp_agent/workflows/swarm/swarm_openai.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.1.7
3
+ Version: 0.1.9
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -209,10 +209,10 @@ Classifier: License :: OSI Approved :: Apache Software License
209
209
  Classifier: Operating System :: OS Independent
210
210
  Classifier: Programming Language :: Python :: 3
211
211
  Requires-Python: >=3.10
212
- Requires-Dist: anthropic>=0.42.0
212
+ Requires-Dist: aiohttp>=3.11.13
213
+ Requires-Dist: anthropic>=0.49.0
213
214
  Requires-Dist: fastapi>=0.115.6
214
- Requires-Dist: instructor>=1.7.2
215
- Requires-Dist: mcp==1.2.1
215
+ Requires-Dist: mcp>=1.4.1
216
216
  Requires-Dist: numpy>=2.2.1
217
217
  Requires-Dist: openai>=1.63.2
218
218
  Requires-Dist: opentelemetry-distro>=0.50b0
@@ -224,11 +224,17 @@ Requires-Dist: pyyaml>=6.0.2
224
224
  Requires-Dist: rich>=13.9.4
225
225
  Requires-Dist: scikit-learn>=1.6.0
226
226
  Requires-Dist: typer>=0.15.1
227
- Provides-Extra: anthropic
228
- Requires-Dist: anthropic>=0.42.0; extra == 'anthropic'
229
- Requires-Dist: instructor[anthropic]>=1.7.2; extra == 'anthropic'
230
227
  Provides-Extra: cohere
231
228
  Requires-Dist: cohere>=5.13.4; extra == 'cohere'
229
+ Provides-Extra: dev
230
+ Requires-Dist: anthropic>=0.42.0; extra == 'dev'
231
+ Requires-Dist: pre-commit>=4.0.1; extra == 'dev'
232
+ Requires-Dist: pydantic>=2.10.4; extra == 'dev'
233
+ Requires-Dist: pytest-asyncio>=0.21.1; extra == 'dev'
234
+ Requires-Dist: pytest>=7.4.0; extra == 'dev'
235
+ Requires-Dist: pyyaml>=6.0.2; extra == 'dev'
236
+ Requires-Dist: ruff>=0.8.4; extra == 'dev'
237
+ Requires-Dist: tomli>=2.2.1; extra == 'dev'
232
238
  Provides-Extra: openai
233
239
  Requires-Dist: openai>=1.58.1; extra == 'openai'
234
240
  Provides-Extra: temporal
@@ -253,6 +259,10 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
253
259
 
254
260
  Evaluate how different models handle Agent and MCP Server calling tasks, then build multi-model workflows using the best provider for each task.
255
261
 
262
+ `fast-agent` is now multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints (for supported models), via Prompts and MCP Tool Call results.
263
+
264
+ > [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site.
265
+
256
266
  ### Agent Application Development
257
267
 
258
268
  Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
@@ -582,6 +592,19 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
582
592
  )
583
593
  ```
584
594
 
595
+ ### Multimodal Support
596
+
597
+ Add Resources to prompts using either the inbuilt `prompt-server` or MCP Types directly. Convenience class are made available to do so simply, for example:
598
+
599
+ #### MCP Tool Result Conversion
600
+
601
+ LLM APIs have restrictions on the content types that can be returned as Tool Calls/Function results via their Chat Completions API's:
602
+
603
+ - OpenAI supports Text
604
+ - Anthropic supports Text and Image
605
+
606
+ For MCP Tool Results, `ImageResources` and `EmbeddedResources` are converted to User Messages and added to the conversation.
607
+
585
608
  ### Prompts
586
609
 
587
610
  MCP Prompts are supported with `apply_prompt(name,arguments)`, which always returns an Assistant Message. If the last message from the MCP Server is a 'User' message, it is sent to the LLM for processing. Prompts applied to the Agent's Context are retained - meaning that with `use_history=False`, Agents can act as finely tuned responders.
@@ -599,8 +622,9 @@ Prompts can also be applied interactively through the interactive interface by u
599
622
 
600
623
  ### llmindset.co.uk fork:
601
624
 
625
+ - Addition of MCP Prompts including Prompt Server and agent save/replay ability.
602
626
  - Overhaul of Eval/Opt for Conversation Management
603
- - Remove instructor use for Orchestrator
627
+ - Removed instructor/double-llm calling - native structured outputs for OAI.
604
628
  - Improved handling of Parallel/Fan-In and respose option
605
629
  - XML based generated prompts
606
630
  - "FastAgent" style prototyping, with per-agent models
@@ -619,4 +643,8 @@ Prompts can also be applied interactively through the interactive interface by u
619
643
  - Declarative workflows
620
644
  - Numerous defect fixes
621
645
 
622
- ### Features to add.
646
+ ### Features to add (Commmitted)
647
+
648
+ - Run Agent as MCP Server, with interop
649
+ - Multi-part content types supporing Vision, PDF and multi-part Text.
650
+ - Improved test automation (supported by prompt_server.py and augmented_llm_playback.py)
@@ -16,6 +16,10 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
16
16
 
17
17
  Evaluate how different models handle Agent and MCP Server calling tasks, then build multi-model workflows using the best provider for each task.
18
18
 
19
+ `fast-agent` is now multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints (for supported models), via Prompts and MCP Tool Call results.
20
+
21
+ > [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site.
22
+
19
23
  ### Agent Application Development
20
24
 
21
25
  Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
@@ -345,6 +349,19 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
345
349
  )
346
350
  ```
347
351
 
352
+ ### Multimodal Support
353
+
354
+ Add Resources to prompts using either the inbuilt `prompt-server` or MCP Types directly. Convenience class are made available to do so simply, for example:
355
+
356
+ #### MCP Tool Result Conversion
357
+
358
+ LLM APIs have restrictions on the content types that can be returned as Tool Calls/Function results via their Chat Completions API's:
359
+
360
+ - OpenAI supports Text
361
+ - Anthropic supports Text and Image
362
+
363
+ For MCP Tool Results, `ImageResources` and `EmbeddedResources` are converted to User Messages and added to the conversation.
364
+
348
365
  ### Prompts
349
366
 
350
367
  MCP Prompts are supported with `apply_prompt(name,arguments)`, which always returns an Assistant Message. If the last message from the MCP Server is a 'User' message, it is sent to the LLM for processing. Prompts applied to the Agent's Context are retained - meaning that with `use_history=False`, Agents can act as finely tuned responders.
@@ -362,8 +379,9 @@ Prompts can also be applied interactively through the interactive interface by u
362
379
 
363
380
  ### llmindset.co.uk fork:
364
381
 
382
+ - Addition of MCP Prompts including Prompt Server and agent save/replay ability.
365
383
  - Overhaul of Eval/Opt for Conversation Management
366
- - Remove instructor use for Orchestrator
384
+ - Removed instructor/double-llm calling - native structured outputs for OAI.
367
385
  - Improved handling of Parallel/Fan-In and respose option
368
386
  - XML based generated prompts
369
387
  - "FastAgent" style prototyping, with per-agent models
@@ -382,4 +400,8 @@ Prompts can also be applied interactively through the interactive interface by u
382
400
  - Declarative workflows
383
401
  - Numerous defect fixes
384
402
 
385
- ### Features to add.
403
+ ### Features to add (Commmitted)
404
+
405
+ - Run Agent as MCP Server, with interop
406
+ - Multi-part content types supporing Vision, PDF and multi-part Text.
407
+ - Improved test automation (supported by prompt_server.py and augmented_llm_playback.py)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fast-agent-mcp"
3
- version = "0.1.7"
3
+ version = "0.1.9"
4
4
  description = "Define, Prompt and Test MCP enabled Agents and Workflows"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -16,8 +16,7 @@ classifiers = [
16
16
  requires-python = ">=3.10"
17
17
  dependencies = [
18
18
  "fastapi>=0.115.6",
19
- "instructor>=1.7.2",
20
- "mcp==1.2.1",
19
+ "mcp>=1.4.1",
21
20
  "opentelemetry-distro>=0.50b0",
22
21
  "opentelemetry-exporter-otlp-proto-http>=1.29.0",
23
22
  "pydantic-settings>=2.7.0",
@@ -27,25 +26,32 @@ dependencies = [
27
26
  "typer>=0.15.1",
28
27
  "numpy>=2.2.1",
29
28
  "scikit-learn>=1.6.0",
30
- "anthropic>=0.42.0",
29
+ "anthropic>=0.49.0",
31
30
  "openai>=1.63.2",
32
31
  "prompt-toolkit>=3.0.50",
32
+ "aiohttp>=3.11.13",
33
33
  ]
34
34
 
35
35
  [project.optional-dependencies]
36
36
  temporal = [
37
37
  "temporalio>=1.8.0",
38
38
  ]
39
- anthropic = [
40
- "anthropic>=0.42.0",
41
- "instructor[anthropic]>=1.7.2",
42
- ]
43
39
  openai = [
44
40
  "openai>=1.58.1",
45
41
  ]
46
42
  cohere = [
47
43
  "cohere>=5.13.4",
48
44
  ]
45
+ dev = [
46
+ "anthropic>=0.42.0",
47
+ "pre-commit>=4.0.1",
48
+ "pydantic>=2.10.4",
49
+ "pyyaml>=6.0.2",
50
+ "ruff>=0.8.4",
51
+ "tomli>=2.2.1",
52
+ "pytest>=7.4.0",
53
+ "pytest-asyncio>=0.21.1",
54
+ ]
49
55
 
50
56
  [build-system]
51
57
  requires = ["hatchling"]
@@ -66,14 +72,27 @@ include = [
66
72
  "src/mcp_agent/resources/**/*.csv",
67
73
  ]
68
74
 
75
+ [tool.pytest.ini_options]
76
+ asyncio_mode = "strict"
77
+ asyncio_default_fixture_loop_scope = "function"
78
+ markers = [
79
+ "e2e: tests that connect to external resources (llms)",
80
+ "integration: marks tests as integration tests",
81
+ "simulated_endpoints: marks tests that use simulated external endpoints"
82
+ ]
83
+ # Other pytest options can go here too
84
+ testpaths = ["test", "integration_tests"]
85
+
69
86
  [dependency-groups]
70
87
  dev = [
71
- "anthropic>=0.42.0",
88
+ "anthropic>=0.49.0",
72
89
  "pre-commit>=4.0.1",
73
90
  "pydantic>=2.10.4",
74
91
  "pyyaml>=6.0.2",
75
92
  "ruff>=0.8.4",
76
93
  "tomli>=2.2.1",
94
+ "pytest>=7.4.0",
95
+ "pytest-asyncio>=0.21.1",
77
96
  ]
78
97
 
79
98
  [project.scripts]
@@ -81,6 +100,7 @@ fast-agent = "mcp_agent.cli.__main__:app"
81
100
  fast_agent = "mcp_agent.cli.__main__:app"
82
101
  fastagent = "mcp_agent.cli.__main__:app"
83
102
  silsila = "mcp_agent.cli.__main__:app"
103
+ prompt-server = "mcp_agent.mcp.prompts.__main__:main"
84
104
 
85
105
  [tool.setuptools.package-data]
86
106
  mcp_agent = [
@@ -320,18 +320,20 @@ class Agent(MCPAggregator):
320
320
  ],
321
321
  )
322
322
 
323
- async def apply_prompt(self, prompt_name: str, arguments: dict[str, str] = None) -> str:
323
+ async def apply_prompt(
324
+ self, prompt_name: str, arguments: dict[str, str] = None
325
+ ) -> str:
324
326
  """
325
327
  Apply an MCP Server Prompt by name and return the assistant's response.
326
328
  Will search all available servers for the prompt if not namespaced.
327
-
329
+
328
330
  If the last message in the prompt is from a user, this will automatically
329
331
  generate an assistant response to ensure we always end with an assistant message.
330
332
 
331
333
  Args:
332
334
  prompt_name: The name of the prompt to apply
333
335
  arguments: Optional dictionary of string arguments to pass to the prompt template
334
-
336
+
335
337
  Returns:
336
338
  The assistant's response or error message
337
339
  """
@@ -357,11 +359,3 @@ class Agent(MCPAggregator):
357
359
  # The LLM will automatically generate a response if needed
358
360
  result = await self._llm.apply_prompt_template(prompt_result, display_name)
359
361
  return result
360
-
361
- # For backward compatibility
362
- async def load_prompt(self, prompt_name: str, arguments: dict[str, str] = None) -> str:
363
- """
364
- Legacy method - use apply_prompt instead.
365
- This is maintained for backward compatibility.
366
- """
367
- return await self.apply_prompt(prompt_name, arguments)
@@ -2,9 +2,10 @@
2
2
  Main application wrapper for interacting with agents.
3
3
  """
4
4
 
5
- from typing import Optional, Dict, TYPE_CHECKING
5
+ from typing import Optional, Dict, Union, TYPE_CHECKING
6
6
 
7
7
  from mcp_agent.app import MCPApp
8
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
8
9
  from mcp_agent.progress_display import progress_display
9
10
  from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
10
11
  from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
@@ -37,16 +38,80 @@ class AgentApp:
37
38
  # Optional: set default agent for direct calls
38
39
  self._default = next(iter(agents)) if agents else None
39
40
 
40
- async def send(self, agent_name: str, message: Optional[str]) -> str:
41
- """Core message sending"""
42
- if agent_name not in self._agents:
43
- raise ValueError(f"No agent named '{agent_name}'")
41
+ async def send_prompt(
42
+ self, prompt: PromptMessageMultipart, agent_name: Optional[str] = None
43
+ ) -> str:
44
+ """
45
+ Send a PromptMessageMultipart to an agent
46
+
47
+ Args:
48
+ prompt: The PromptMessageMultipart to send
49
+ agent_name: The name of the agent to send to (uses default if None)
50
+
51
+ Returns:
52
+ The agent's response as a string
53
+ """
54
+ target = agent_name or self._default
55
+ if not target:
56
+ raise ValueError("No default agent available")
57
+
58
+ if target not in self._agents:
59
+ raise ValueError(f"No agent named '{target}'")
60
+
61
+ proxy = self._agents[target]
62
+ return await proxy.send_prompt(prompt)
63
+
64
+ async def send(
65
+ self,
66
+ message: Union[str, PromptMessageMultipart] = None,
67
+ agent_name: Optional[str] = None,
68
+ ) -> str:
69
+ """
70
+ Send a message to the default agent or specified agent
71
+
72
+ Args:
73
+ message: Either a string message or a PromptMessageMultipart object
74
+ agent_name: The name of the agent to send to (uses default if None)
44
75
 
45
- if not message or "" == message:
46
- return await self.prompt(agent_name)
76
+ Returns:
77
+ The agent's response as a string
78
+ """
79
+ target = agent_name or self._default
80
+ if not target:
81
+ raise ValueError("No default agent available")
47
82
 
48
- proxy = self._agents[agent_name]
49
- return await proxy.generate_str(message)
83
+ if target not in self._agents:
84
+ raise ValueError(f"No agent named '{target}'")
85
+
86
+ proxy = self._agents[target]
87
+ return await proxy.send(message)
88
+
89
+ async def apply_prompt(
90
+ self,
91
+ prompt_name: str,
92
+ arguments: Optional[dict[str, str]] = None,
93
+ agent_name: Optional[str] = None,
94
+ ) -> str:
95
+ """
96
+ Apply an MCP Server Prompt by name and return the assistant's response
97
+
98
+ Args:
99
+ prompt_name: The name of the prompt to apply
100
+ arguments: Optional dictionary of string arguments to pass to the prompt template
101
+ agent_name: The name of the agent to use (uses default if None)
102
+
103
+ Returns:
104
+ The assistant's response as a string
105
+ """
106
+ target = agent_name or self._default
107
+ if not target:
108
+ raise ValueError("No default agent available")
109
+
110
+ if target not in self._agents:
111
+ raise ValueError(f"No agent named '{target}'")
112
+
113
+ proxy = self._agents[target]
114
+ return await proxy.apply_prompt(prompt_name, arguments)
50
115
 
51
116
  async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
52
117
  """
@@ -177,9 +242,6 @@ class AgentApp:
177
242
  from rich import print as rich_print
178
243
  from rich.table import Table
179
244
  from rich.console import Console
180
- from prompt_toolkit import PromptSession
181
- from prompt_toolkit.formatted_text import HTML
182
- from prompt_toolkit.completion import WordCompleter
183
245
 
184
246
  console = Console()
185
247
 
@@ -325,8 +387,11 @@ class AgentApp:
325
387
  )
326
388
 
327
389
  # Ask user to select one
328
- prompt_session = PromptSession()
329
- selection = await prompt_session.prompt_async(
390
+ from mcp_agent.core.enhanced_prompt import (
391
+ get_selection_input,
392
+ )
393
+
394
+ selection = await get_selection_input(
330
395
  "Enter prompt number to select: ", default="1"
331
396
  )
332
397
 
@@ -381,12 +446,16 @@ class AgentApp:
381
446
  prompt_names = [
382
447
  str(i + 1) for i in range(len(all_prompts))
383
448
  ]
384
- completer = WordCompleter(prompt_names)
385
449
 
386
450
  # Ask user to select a prompt
387
- prompt_session = PromptSession(completer=completer)
388
- selection = await prompt_session.prompt_async(
389
- "Enter prompt number to select (or press Enter to cancel): "
451
+ from mcp_agent.core.enhanced_prompt import (
452
+ get_selection_input,
453
+ )
454
+
455
+ selection = await get_selection_input(
456
+ "Enter prompt number to select (or press Enter to cancel): ",
457
+ options=prompt_names,
458
+ allow_cancel=True,
390
459
  )
391
460
 
392
461
  # Make cancellation easier
@@ -437,37 +506,38 @@ class AgentApp:
437
506
 
438
507
  # Collect required arguments
439
508
  for arg_name in required_args:
440
- # Show description if available
509
+ # Get description if available
441
510
  description = arg_descriptions.get(arg_name, "")
442
- if description:
443
- rich_print(
444
- f" [dim]{arg_name}: {description}[/dim]"
445
- )
446
-
511
+
447
512
  # Collect required argument value
448
- arg_value = await PromptSession().prompt_async(
449
- HTML(
450
- f"Enter value for <ansibrightcyan>{arg_name}</ansibrightcyan> (required): "
451
- )
513
+ from mcp_agent.core.enhanced_prompt import (
514
+ get_argument_input,
515
+ )
516
+
517
+ arg_value = await get_argument_input(
518
+ arg_name=arg_name,
519
+ description=description,
520
+ required=True,
452
521
  )
453
- # Add to arg_values
454
- arg_values[arg_name] = arg_value
522
+ # Add to arg_values if a value was provided
523
+ if arg_value is not None:
524
+ arg_values[arg_name] = arg_value
455
525
 
456
526
  # Only include non-empty values for optional arguments
457
527
  if optional_args:
458
528
  # Collect optional arguments
459
529
  for arg_name in optional_args:
460
- # Show description if available
530
+ # Get description if available
461
531
  description = arg_descriptions.get(arg_name, "")
462
- if description:
463
- rich_print(
464
- f" [dim]{arg_name}: {description}[/dim]"
465
- )
466
-
467
- arg_value = await PromptSession().prompt_async(
468
- HTML(
469
- f"Enter value for <ansibrightcyan>{arg_name}</ansibrightcyan> (optional, press Enter to skip): "
470
- )
532
+
533
+ from mcp_agent.core.enhanced_prompt import (
534
+ get_argument_input,
535
+ )
536
+
537
+ arg_value = await get_argument_input(
538
+ arg_name=arg_name,
539
+ description=description,
540
+ required=False,
471
541
  )
472
542
  # Only include non-empty values for optional arguments
473
543
  if arg_value:
@@ -501,7 +571,7 @@ class AgentApp:
501
571
  if user_input == "":
502
572
  continue
503
573
 
504
- result = await self.send(agent, user_input)
574
+ result = await self.send(user_input, agent)
505
575
 
506
576
  # Check if current agent is a chain that should continue with final agent
507
577
  if agent_types.get(agent) == "Chain":
@@ -527,10 +597,21 @@ class AgentApp:
527
597
  return self._agents[name]
528
598
 
529
599
  async def __call__(
530
- self, message: Optional[str] = "", agent_name: Optional[str] = None
600
+ self,
601
+ message: Optional[Union[str, PromptMessageMultipart]] = None,
602
+ agent_name: Optional[str] = None,
531
603
  ) -> str:
532
- """Support: agent('message')"""
604
+ """
605
+ Support: agent('message') or agent(Prompt.user('message'))
606
+
607
+ Args:
608
+ message: Either a string message or a PromptMessageMultipart object
609
+ agent_name: The name of the agent to use (uses default if None)
610
+
611
+ Returns:
612
+ The agent's response as a string
613
+ """
533
614
  target = agent_name or self._default
534
615
  if not target:
535
616
  raise ValueError("No default agent available")
536
- return await self.send(target, message)
617
+ return await self.send(message, target)
@@ -256,7 +256,8 @@ def parallel(
256
256
  self.agents[passthrough_name] = {
257
257
  "config": AgentConfig(
258
258
  name=passthrough_name,
259
- instruction=f"Passthrough fan-in for {name}",
259
+ model="passthrough",
260
+ instruction=f"This agent combines the results from the fan-out agents verbatim. {name}",
260
261
  servers=[],
261
262
  use_history=use_history,
262
263
  ),
@@ -452,4 +453,4 @@ def passthrough(
452
453
  name=name,
453
454
  use_history=use_history,
454
455
  )
455
- return decorator
456
+ return decorator
@@ -2,13 +2,13 @@
2
2
  Enhanced prompt functionality with advanced prompt_toolkit features.
3
3
  """
4
4
 
5
- from typing import List
5
+ from typing import List, Optional
6
6
  from importlib.metadata import version
7
7
  from prompt_toolkit import PromptSession
8
8
  from prompt_toolkit.formatted_text import HTML
9
9
  from prompt_toolkit.history import InMemoryHistory
10
10
  from prompt_toolkit.key_binding import KeyBindings
11
- from prompt_toolkit.completion import Completer, Completion
11
+ from prompt_toolkit.completion import Completer, Completion, WordCompleter
12
12
  from prompt_toolkit.lexers import PygmentsLexer
13
13
  from prompt_toolkit.filters import Condition
14
14
  from prompt_toolkit.styles import Style
@@ -330,6 +330,110 @@ async def get_enhanced_input(
330
330
  # Log and gracefully handle other exceptions
331
331
  print(f"\nInput error: {type(e).__name__}: {e}")
332
332
  return "STOP"
333
+ finally:
334
+ # Ensure the prompt session is properly cleaned up
335
+ # This is especially important on Windows to prevent resource leaks
336
+ if session.app.is_running:
337
+ session.app.exit()
338
+
339
+
340
+ async def get_selection_input(
341
+ prompt_text: str,
342
+ options: List[str] = None,
343
+ default: str = None,
344
+ allow_cancel: bool = True,
345
+ complete_options: bool = True,
346
+ ) -> Optional[str]:
347
+ """
348
+ Display a selection prompt and return the user's selection.
349
+
350
+ Args:
351
+ prompt_text: Text to display as the prompt
352
+ options: List of valid options (for auto-completion)
353
+ default: Default value if user presses enter
354
+ allow_cancel: Whether to allow cancellation with empty input
355
+ complete_options: Whether to use the options for auto-completion
356
+
357
+ Returns:
358
+ Selected value, or None if cancelled
359
+ """
360
+ try:
361
+ # Initialize completer if options provided and completion requested
362
+ completer = WordCompleter(options) if options and complete_options else None
363
+
364
+ # Create prompt session
365
+ prompt_session = PromptSession(completer=completer)
366
+
367
+ try:
368
+ # Get user input
369
+ selection = await prompt_session.prompt_async(
370
+ prompt_text, default=default or ""
371
+ )
372
+
373
+ # Handle cancellation
374
+ if allow_cancel and not selection.strip():
375
+ return None
376
+
377
+ return selection
378
+ finally:
379
+ # Ensure prompt session cleanup
380
+ if prompt_session.app.is_running:
381
+ prompt_session.app.exit()
382
+ except (KeyboardInterrupt, EOFError):
383
+ return None
384
+ except Exception as e:
385
+ rich_print(f"\n[red]Error getting selection: {e}[/red]")
386
+ return None
387
+
388
+
389
+ async def get_argument_input(
390
+ arg_name: str,
391
+ description: str = None,
392
+ required: bool = True,
393
+ ) -> Optional[str]:
394
+ """
395
+ Prompt for an argument value with formatting and help text.
396
+
397
+ Args:
398
+ arg_name: Name of the argument
399
+ description: Optional description of the argument
400
+ required: Whether this argument is required
401
+
402
+ Returns:
403
+ Input value, or None if cancelled/skipped
404
+ """
405
+ # Format the prompt differently based on whether it's required
406
+ required_text = "(required)" if required else "(optional, press Enter to skip)"
407
+
408
+ # Show description if available
409
+ if description:
410
+ rich_print(f" [dim]{arg_name}: {description}[/dim]")
411
+
412
+ prompt_text = HTML(
413
+ f"Enter value for <ansibrightcyan>{arg_name}</ansibrightcyan> {required_text}: "
414
+ )
415
+
416
+ # Create prompt session
417
+ prompt_session = PromptSession()
418
+
419
+ try:
420
+ # Get user input
421
+ arg_value = await prompt_session.prompt_async(prompt_text)
422
+
423
+ # For optional arguments, empty input means skip
424
+ if not required and not arg_value:
425
+ return None
426
+
427
+ return arg_value
428
+ except (KeyboardInterrupt, EOFError):
429
+ return None
430
+ except Exception as e:
431
+ rich_print(f"\n[red]Error getting input: {e}[/red]")
432
+ return None
433
+ finally:
434
+ # Ensure prompt session cleanup
435
+ if prompt_session.app.is_running:
436
+ prompt_session.app.exit()
333
437
 
334
438
 
335
439
  async def handle_special_commands(command, agent_app=None):
@@ -408,24 +512,6 @@ async def handle_special_commands(command, agent_app=None):
408
512
  )
409
513
  return True
410
514
 
411
- elif command == "SELECT_PROMPT" or (
412
- isinstance(command, str) and command.startswith("SELECT_PROMPT:")
413
- ):
414
- # Handle prompt selection UI (previously named "list_prompts" action)
415
- if agent_app:
416
- # If it's a specific prompt, extract the name
417
- prompt_name = None
418
- if isinstance(command, str) and command.startswith("SELECT_PROMPT:"):
419
- prompt_name = command.split(":", 1)[1].strip()
420
-
421
- # Return a dictionary with a select_prompt action to be handled by the caller
422
- return {"select_prompt": True, "prompt_name": prompt_name}
423
- else:
424
- rich_print(
425
- "[yellow]Prompt selection is not available outside of an agent context[/yellow]"
426
- )
427
- return True
428
-
429
515
  elif isinstance(command, str) and command.startswith("SWITCH:"):
430
516
  agent_name = command.split(":", 1)[1]
431
517
  if agent_name in available_agents: