fast-agent-mcp 0.2.48__tar.gz → 0.2.50__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (242) hide show
  1. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/PKG-INFO +4 -4
  2. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/pyproject.toml +4 -4
  3. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/config.py +3 -2
  4. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/event_progress.py +18 -0
  5. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/model_database.py +34 -0
  6. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/model_factory.py +1 -0
  7. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_aliyun.py +7 -8
  8. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_azure.py +1 -1
  9. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +7 -8
  10. fast_agent_mcp-0.2.50/src/mcp_agent/llm/providers/augmented_llm_groq.py +103 -0
  11. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_openai.py +22 -6
  12. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +10 -15
  13. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_xai.py +8 -8
  14. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/multipart_converter_openai.py +35 -18
  15. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/rich_progress.py +44 -12
  16. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/helpers/content_helpers.py +29 -0
  17. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mcp_agent_client_session.py +5 -3
  18. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mcp_aggregator.py +32 -1
  19. fast_agent_mcp-0.2.48/src/mcp_agent/llm/providers/augmented_llm_groq.py +0 -30
  20. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/.gitignore +0 -0
  21. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/LICENSE +0 -0
  22. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/README.md +0 -0
  23. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/azure-openai/fastagent.config.yaml +0 -0
  24. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/bedrock/fast-agent.config.yaml +0 -0
  25. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/custom-agents/agent.py +0 -0
  26. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/custom-agents/fastagent.config.yaml +0 -0
  27. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/data-analysis/analysis-campaign.py +0 -0
  28. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/data-analysis/analysis.py +0 -0
  29. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/data-analysis/fastagent.config.yaml +0 -0
  30. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  31. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  32. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
  33. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  34. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  35. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  36. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/forms_demo.py +0 -0
  37. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/game_character.py +0 -0
  38. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/game_character_handler.py +0 -0
  39. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/tool_call.py +0 -0
  40. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/fastagent.config.yaml +0 -0
  41. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/fastagent.secrets.yaml.example +0 -0
  42. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/mcp_server.py +0 -0
  43. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/test_mcp_filtering.py +0 -0
  44. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/agent_one.py +0 -0
  45. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/agent_two.py +0 -0
  46. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  47. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  48. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/cat.png +0 -0
  49. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/example1.py +0 -0
  50. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/example2.py +0 -0
  51. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/example3.py +0 -0
  52. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/fastagent.config.yaml +0 -0
  53. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/otel/agent.py +0 -0
  54. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/otel/agent2.py +0 -0
  55. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/otel/docker-compose.yaml +0 -0
  56. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/otel/fastagent.config.yaml +0 -0
  57. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/researcher/fastagent.config.yaml +0 -0
  58. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/researcher/researcher-eval.py +0 -0
  59. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/researcher/researcher-imp.py +0 -0
  60. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/researcher/researcher.py +0 -0
  61. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/.env.sample +0 -0
  62. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/Makefile +0 -0
  63. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/README.md +0 -0
  64. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/agent.py +0 -0
  65. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/demo_images/clam.jpg +0 -0
  66. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/demo_images/crab.png +0 -0
  67. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/demo_images/shrimp.png +0 -0
  68. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/docker-compose.yml +0 -0
  69. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/fastagent.config.yaml +0 -0
  70. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/image_demo.py +0 -0
  71. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/mcp_server/Dockerfile +0 -0
  72. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
  73. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  74. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/simple_agent.py +0 -0
  75. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
  76. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
  77. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
  78. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/chaining.py +0 -0
  79. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/evaluator.py +0 -0
  80. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/fastagent.config.yaml +0 -0
  81. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/graded_report.md +0 -0
  82. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/human_input.py +0 -0
  83. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/orchestrator.py +0 -0
  84. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/parallel.py +0 -0
  85. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/router.py +0 -0
  86. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/short_story.md +0 -0
  87. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/examples/workflows/short_story.txt +0 -0
  88. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/hatch_build.py +0 -0
  89. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/__init__.py +0 -0
  90. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/__init__.py +0 -0
  91. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/agent.py +0 -0
  92. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/base_agent.py +0 -0
  93. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/__init__.py +0 -0
  94. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
  95. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
  96. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/iterative_planner.py +0 -0
  97. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
  98. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
  99. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
  100. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
  101. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/router_agent.py +0 -0
  102. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/app.py +0 -0
  103. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/__init__.py +0 -0
  104. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/__main__.py +0 -0
  105. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/check_config.py +0 -0
  106. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/go.py +0 -0
  107. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/quickstart.py +0 -0
  108. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/server_helpers.py +0 -0
  109. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/setup.py +0 -0
  110. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/url_parser.py +0 -0
  111. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/constants.py +0 -0
  112. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/main.py +0 -0
  113. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/terminal.py +0 -0
  114. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/console.py +0 -0
  115. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/context.py +0 -0
  116. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/context_dependent.py +0 -0
  117. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/__init__.py +0 -0
  118. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/agent_app.py +0 -0
  119. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/agent_types.py +0 -0
  120. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/direct_decorators.py +0 -0
  121. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/direct_factory.py +0 -0
  122. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/enhanced_prompt.py +0 -0
  123. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/error_handling.py +0 -0
  124. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/exceptions.py +0 -0
  125. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/fastagent.py +0 -0
  126. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/interactive_prompt.py +0 -0
  127. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/mcp_content.py +0 -0
  128. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/mermaid_utils.py +0 -0
  129. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/prompt.py +0 -0
  130. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/request_params.py +0 -0
  131. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/usage_display.py +0 -0
  132. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/validation.py +0 -0
  133. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/__init__.py +0 -0
  134. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/executor.py +0 -0
  135. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/task_registry.py +0 -0
  136. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/workflow_signal.py +0 -0
  137. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/__init__.py +0 -0
  138. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_form.py +0 -0
  139. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_forms.py +0 -0
  140. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_handler.py +0 -0
  141. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_state.py +0 -0
  142. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/form_fields.py +0 -0
  143. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/handler.py +0 -0
  144. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/simple_form.py +0 -0
  145. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/types.py +0 -0
  146. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/__init__.py +0 -0
  147. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm.py +0 -0
  148. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_passthrough.py +0 -0
  149. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
  150. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_silent.py +0 -0
  151. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_slow.py +0 -0
  152. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/memory.py +0 -0
  153. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/prompt_utils.py +0 -0
  154. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/provider_key_manager.py +0 -0
  155. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/provider_types.py +0 -0
  156. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/__init__.py +0 -0
  157. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
  158. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -0
  159. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -0
  160. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
  161. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_google_native.py +0 -0
  162. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_google_oai.py +0 -0
  163. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -0
  164. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/google_converter.py +0 -0
  165. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
  166. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -0
  167. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
  168. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
  169. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
  170. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
  171. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/sampling_converter.py +0 -0
  172. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
  173. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/usage_tracking.py +0 -0
  174. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/__init__.py +0 -0
  175. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/events.py +0 -0
  176. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/json_serializer.py +0 -0
  177. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/listeners.py +0 -0
  178. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/logger.py +0 -0
  179. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/transport.py +0 -0
  180. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/__init__.py +0 -0
  181. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/common.py +0 -0
  182. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/elicitation_factory.py +0 -0
  183. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/elicitation_handlers.py +0 -0
  184. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/gen_client.py +0 -0
  185. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
  186. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/helpers/server_config_helpers.py +0 -0
  187. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/hf_auth.py +0 -0
  188. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/interfaces.py +0 -0
  189. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/logger_textio.py +0 -0
  190. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
  191. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mime_utils.py +0 -0
  192. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
  193. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompt_render.py +0 -0
  194. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
  195. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
  196. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
  197. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
  198. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
  199. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
  200. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
  201. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
  202. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/resource_utils.py +0 -0
  203. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/sampling.py +0 -0
  204. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp_server/__init__.py +0 -0
  205. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp_server/agent_server.py +0 -0
  206. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp_server_registry.py +0 -0
  207. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/progress_display.py +0 -0
  208. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/py.typed +0 -0
  209. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
  210. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
  211. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  212. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  213. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  214. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
  215. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  216. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  217. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  218. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/forms_demo.py +0 -0
  219. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/game_character.py +0 -0
  220. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/game_character_handler.py +0 -0
  221. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/tool_call.py +0 -0
  222. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
  223. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
  224. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  225. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  226. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
  227. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
  228. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
  229. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
  230. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
  231. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
  232. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
  233. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/graded_report.md +0 -0
  234. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
  235. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
  236. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
  237. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
  238. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/short_story.md +0 -0
  239. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
  240. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/tools/tool_definition.py +0 -0
  241. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/ui/console_display.py +0 -0
  242. {fast_agent_mcp-0.2.48 → fast_agent_mcp-0.2.50}/src/mcp_agent/ui/console_display_legacy.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.48
3
+ Version: 0.2.50
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>
6
6
  License: Apache License
@@ -208,8 +208,8 @@ License-File: LICENSE
208
208
  Classifier: License :: OSI Approved :: Apache Software License
209
209
  Classifier: Operating System :: OS Independent
210
210
  Classifier: Programming Language :: Python :: 3
211
- Requires-Python: >=3.12
212
- Requires-Dist: a2a-sdk>=0.2.9
211
+ Requires-Python: >=3.13
212
+ Requires-Dist: a2a-sdk>=0.3.0
213
213
  Requires-Dist: aiohttp>=3.11.13
214
214
  Requires-Dist: anthropic>=0.59.0
215
215
  Requires-Dist: azure-identity>=1.14.0
@@ -232,7 +232,7 @@ Requires-Dist: pydantic>=2.10.4
232
232
  Requires-Dist: pyperclip>=1.9.0
233
233
  Requires-Dist: pyyaml>=6.0.2
234
234
  Requires-Dist: rich>=14.1.0
235
- Requires-Dist: tensorzero>=2025.6.3
235
+ Requires-Dist: tensorzero>=2025.7.5
236
236
  Requires-Dist: typer>=0.15.1
237
237
  Provides-Extra: azure
238
238
  Requires-Dist: azure-identity>=1.14.0; extra == 'azure'
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fast-agent-mcp"
3
- version = "0.2.48"
3
+ version = "0.2.50"
4
4
  description = "Define, Prompt and Test MCP enabled Agents and Workflows"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -12,7 +12,7 @@ classifiers = [
12
12
  "License :: OSI Approved :: Apache Software License",
13
13
  "Operating System :: OS Independent"
14
14
  ]
15
- requires-python = ">=3.12"
15
+ requires-python = ">=3.13"
16
16
  dependencies = [
17
17
  "fastapi>=0.115.6",
18
18
  "mcp==1.12.1",
@@ -34,9 +34,9 @@ dependencies = [
34
34
  "opentelemetry-instrumentation-mcp>=0.43.1; python_version >= '3.10' and python_version < '4.0'",
35
35
  "google-genai>=1.27.0",
36
36
  "opentelemetry-instrumentation-google-genai>=0.3b0",
37
- "tensorzero>=2025.6.3",
37
+ "tensorzero>=2025.7.5",
38
38
  "deprecated>=1.2.18",
39
- "a2a-sdk>=0.2.9",
39
+ "a2a-sdk>=0.3.0",
40
40
  "email-validator>=2.2.0",
41
41
  "pyperclip>=1.9.0",
42
42
  ]
@@ -8,6 +8,7 @@ import re
8
8
  from pathlib import Path
9
9
  from typing import Any, Dict, List, Literal, Optional, Tuple
10
10
 
11
+ from mcp import Implementation
11
12
  from pydantic import BaseModel, ConfigDict, field_validator
12
13
  from pydantic_settings import BaseSettings, SettingsConfigDict
13
14
 
@@ -61,11 +62,9 @@ class MCPServerSettings(BaseModel):
61
62
  Represents the configuration for an individual server.
62
63
  """
63
64
 
64
- # TODO: saqadri - server name should be something a server can provide itself during initialization
65
65
  name: str | None = None
66
66
  """The name of the server."""
67
67
 
68
- # TODO: saqadri - server description should be something a server can provide itself during initialization
69
68
  description: str | None = None
70
69
  """The description of the server."""
71
70
 
@@ -108,6 +107,8 @@ class MCPServerSettings(BaseModel):
108
107
  cwd: str | None = None
109
108
  """Working directory for the executed server command."""
110
109
 
110
+ implementation: Implementation | None = None
111
+
111
112
 
112
113
  class MCPSettings(BaseModel):
113
114
  """Configuration for all MCP servers."""
@@ -20,6 +20,7 @@ class ProgressAction(str, Enum):
20
20
  PLANNING = "Planning"
21
21
  READY = "Ready"
22
22
  CALLING_TOOL = "Calling Tool"
23
+ TOOL_PROGRESS = "Tool Progress"
23
24
  UPDATED = "Updated"
24
25
  FINISHED = "Finished"
25
26
  SHUTDOWN = "Shutdown"
@@ -35,6 +36,8 @@ class ProgressEvent(BaseModel):
35
36
  details: Optional[str] = None
36
37
  agent_name: Optional[str] = None
37
38
  streaming_tokens: Optional[str] = None # Special field for streaming token count
39
+ progress: Optional[float] = None # Current progress value
40
+ total: Optional[float] = None # Total value for progress calculation
38
41
 
39
42
  def __str__(self) -> str:
40
43
  """Format the progress event for display."""
@@ -86,6 +89,12 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
86
89
  details = f"{server_name} ({tool_name})"
87
90
  else:
88
91
  details = f"{server_name}"
92
+
93
+ # For TOOL_PROGRESS, use progress message if available, otherwise keep default
94
+ if progress_action == ProgressAction.TOOL_PROGRESS:
95
+ progress_message = event_data.get("details", "")
96
+ if progress_message: # Only override if message is non-empty
97
+ details = progress_message
89
98
 
90
99
  elif "augmented_llm" in namespace:
91
100
  model = event_data.get("model", "")
@@ -104,10 +113,19 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
104
113
  if progress_action == ProgressAction.STREAMING:
105
114
  streaming_tokens = event_data.get("details", "")
106
115
 
116
+ # Extract progress data for TOOL_PROGRESS actions
117
+ progress = None
118
+ total = None
119
+ if progress_action == ProgressAction.TOOL_PROGRESS:
120
+ progress = event_data.get("progress")
121
+ total = event_data.get("total")
122
+
107
123
  return ProgressEvent(
108
124
  action=ProgressAction(progress_action),
109
125
  target=target or "unknown",
110
126
  details=details,
111
127
  agent_name=event_data.get("agent_name"),
112
128
  streaming_tokens=streaming_tokens,
129
+ progress=progress,
130
+ total=total,
113
131
  )
@@ -22,6 +22,12 @@ class ModelParameters(BaseModel):
22
22
  tokenizes: List[str]
23
23
  """List of supported content types for tokenization"""
24
24
 
25
+ json_mode: None | str = "schema"
26
+ """Structured output style. 'schema', 'object' or None for unsupported """
27
+
28
+ reasoning: None | str = None
29
+ """Reasoning output style. 'tags' if enclosed in <thinking> tags, 'none' if not used"""
30
+
25
31
 
26
32
  class ModelDatabase:
27
33
  """Centralized model configuration database"""
@@ -87,6 +93,13 @@ class ModelDatabase:
87
93
  QWEN_STANDARD = ModelParameters(
88
94
  context_window=32000, max_output_tokens=8192, tokenizes=QWEN_MULTIMODAL
89
95
  )
96
+ QWEN3_REASONER = ModelParameters(
97
+ context_window=131072,
98
+ max_output_tokens=16384,
99
+ tokenizes=TEXT_ONLY,
100
+ json_mode="object",
101
+ reasoning="tags",
102
+ )
90
103
 
91
104
  FAST_AGENT_STANDARD = ModelParameters(
92
105
  context_window=1000000, max_output_tokens=100000, tokenizes=TEXT_ONLY
@@ -125,6 +138,13 @@ class ModelDatabase:
125
138
  context_window=65536, max_output_tokens=32768, tokenizes=TEXT_ONLY
126
139
  )
127
140
 
141
+ DEEPSEEK_DISTILL = ModelParameters(
142
+ context_window=131072,
143
+ max_output_tokens=131072,
144
+ tokenizes=TEXT_ONLY,
145
+ json_mode="object",
146
+ reasoning="tags",
147
+ )
128
148
  GEMINI_2_5_PRO = ModelParameters(
129
149
  context_window=2097152, max_output_tokens=8192, tokenizes=GOOGLE_MULTIMODAL
130
150
  )
@@ -214,6 +234,8 @@ class ModelDatabase:
214
234
  "grok-3-fast": GROK_3,
215
235
  "grok-3-mini-fast": GROK_3,
216
236
  "moonshotai/kimi-k2-instruct": KIMI_MOONSHOT,
237
+ "qwen/qwen3-32b": QWEN3_REASONER,
238
+ "deepseek-r1-distill-llama-70b": DEEPSEEK_DISTILL,
217
239
  }
218
240
 
219
241
  @classmethod
@@ -239,6 +261,18 @@ class ModelDatabase:
239
261
  params = cls.get_model_params(model)
240
262
  return params.tokenizes if params else None
241
263
 
264
+ @classmethod
265
+ def get_json_mode(cls, model: str) -> str | None:
266
+ """Get supported json mode (structured output) for a model"""
267
+ params = cls.get_model_params(model)
268
+ return params.json_mode if params else None
269
+
270
+ @classmethod
271
+ def get_reasoning(cls, model: str) -> str | None:
272
+ """Get supported reasoning output style for a model"""
273
+ params = cls.get_model_params(model)
274
+ return params.reasoning if params else None
275
+
242
276
  @classmethod
243
277
  def get_default_max_tokens(cls, model: str) -> int:
244
278
  """Get default max_tokens for RequestParams based on model"""
@@ -143,6 +143,7 @@ class ModelFactory:
143
143
  "gemini2": "gemini-2.0-flash",
144
144
  "gemini25": "gemini-2.5-flash-preview-05-20",
145
145
  "gemini25pro": "gemini-2.5-pro-preview-05-06",
146
+ "kimi": "groq.moonshotai/kimi-k2-instruct",
146
147
  }
147
148
 
148
149
  # Mapping of providers to their LLM classes
@@ -12,15 +12,14 @@ class AliyunAugmentedLLM(OpenAIAugmentedLLM):
12
12
 
13
13
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
14
14
  """Initialize Aliyun-specific default parameters"""
15
+ # Get base defaults from parent (includes ModelDatabase lookup)
16
+ base_params = super()._initialize_default_params(kwargs)
17
+
18
+ # Override with Aliyun-specific settings
15
19
  chosen_model = kwargs.get("model", DEFAULT_QWEN_MODEL)
16
-
17
- return RequestParams(
18
- model=chosen_model,
19
- systemPrompt=self.instruction,
20
- parallel_tool_calls=True,
21
- max_iterations=20,
22
- use_history=True,
23
- )
20
+ base_params.model = chosen_model
21
+
22
+ return base_params
24
23
 
25
24
  def _base_url(self) -> str:
26
25
  base_url = None
@@ -18,7 +18,7 @@ def _extract_resource_name(url: str) -> str | None:
18
18
  return host.replace(suffix, "") if host.endswith(suffix) else None
19
19
 
20
20
 
21
- DEFAULT_AZURE_API_VERSION = "2023-05-15"
21
+ DEFAULT_AZURE_API_VERSION = "2024-10-21"
22
22
 
23
23
 
24
24
  class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
@@ -22,15 +22,14 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
22
22
 
23
23
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
24
24
  """Initialize Deepseek-specific default parameters"""
25
+ # Get base defaults from parent (includes ModelDatabase lookup)
26
+ base_params = super()._initialize_default_params(kwargs)
27
+
28
+ # Override with Deepseek-specific settings
25
29
  chosen_model = kwargs.get("model", DEFAULT_DEEPSEEK_MODEL)
26
-
27
- return RequestParams(
28
- model=chosen_model,
29
- systemPrompt=self.instruction,
30
- parallel_tool_calls=True,
31
- max_iterations=20,
32
- use_history=True,
33
- )
30
+ base_params.model = chosen_model
31
+
32
+ return base_params
34
33
 
35
34
  def _base_url(self) -> str:
36
35
  base_url = None
@@ -0,0 +1,103 @@
1
+ from typing import List, Tuple, Type, cast
2
+
3
+ from pydantic_core import from_json
4
+
5
+ from mcp_agent.core.request_params import RequestParams
6
+ from mcp_agent.llm.model_database import ModelDatabase
7
+ from mcp_agent.llm.provider_types import Provider
8
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
9
+ from mcp_agent.logging.logger import get_logger
10
+ from mcp_agent.mcp.helpers.content_helpers import get_text, split_thinking_content
11
+ from mcp_agent.mcp.interfaces import ModelT
12
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
13
+
14
+ GROQ_BASE_URL = "https://api.groq.com/openai/v1"
15
+ DEFAULT_GROQ_MODEL = "moonshotai/kimi-k2-instruct"
16
+
17
+ ### There is some big refactorings to be had quite easily here now:
18
+ ### - combining the structured output type handling
19
+ ### - deduplicating between this and the deepseek llm
20
+
21
+
22
+ class GroqAugmentedLLM(OpenAIAugmentedLLM):
23
+ def __init__(self, *args, **kwargs) -> None:
24
+ super().__init__(*args, provider=Provider.GROQ, **kwargs)
25
+
26
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
27
+ """Initialize Groq default parameters"""
28
+ # Get base defaults from parent (includes ModelDatabase lookup)
29
+ base_params = super()._initialize_default_params(kwargs)
30
+
31
+ # Override with Groq-specific settings
32
+ chosen_model = kwargs.get("model", DEFAULT_GROQ_MODEL)
33
+ base_params.model = chosen_model
34
+ base_params.parallel_tool_calls = False
35
+
36
+ return base_params
37
+
38
+ async def _apply_prompt_provider_specific_structured(
39
+ self,
40
+ multipart_messages: List[PromptMessageMultipart],
41
+ model: Type[ModelT],
42
+ request_params: RequestParams | None = None,
43
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
44
+ request_params = self.get_request_params(request_params)
45
+
46
+ assert self.default_request_params
47
+ llm_model = self.default_request_params.model or DEFAULT_GROQ_MODEL
48
+ json_mode: str | None = ModelDatabase.get_json_mode(llm_model)
49
+ if "json_object" == json_mode:
50
+ request_params.response_format = {"type": "json_object"}
51
+
52
+ # Get the full schema and extract just the properties
53
+ full_schema = model.model_json_schema()
54
+ properties = full_schema.get("properties", {})
55
+ required_fields = full_schema.get("required", [])
56
+
57
+ # Create a cleaner format description
58
+ format_description = "{\n"
59
+ for field_name, field_info in properties.items():
60
+ field_type = field_info.get("type", "string")
61
+ description = field_info.get("description", "")
62
+ format_description += f' "{field_name}": "{field_type}"'
63
+ if description:
64
+ format_description += f" // {description}"
65
+ if field_name in required_fields:
66
+ format_description += " // REQUIRED"
67
+ format_description += "\n"
68
+ format_description += "}"
69
+
70
+ multipart_messages[-1].add_text(
71
+ f"""YOU MUST RESPOND WITH A JSON OBJECT IN EXACTLY THIS FORMAT:
72
+ {format_description}
73
+
74
+ IMPORTANT RULES:
75
+ - Respond ONLY with the JSON object, no other text
76
+ - Do NOT include "properties" or "schema" wrappers
77
+ - Do NOT use code fences or markdown
78
+ - The response must be valid JSON that matches the format above
79
+ - All required fields must be included"""
80
+ )
81
+
82
+ result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
83
+ multipart_messages, request_params
84
+ )
85
+ reasoning_mode: str | None = ModelDatabase.get_reasoning(llm_model)
86
+ try:
87
+ text = get_text(result.content[-1]) or ""
88
+ if "tags" == reasoning_mode:
89
+ _, text = split_thinking_content(text)
90
+ json_data = from_json(text, allow_partial=True)
91
+ validated_model = model.model_validate(json_data)
92
+ return cast("ModelT", validated_model), result
93
+ except ValueError as e:
94
+ logger = get_logger(__name__)
95
+ logger.warning(f"Failed to parse structured response: {str(e)}")
96
+ return None, result
97
+
98
+ def _base_url(self) -> str:
99
+ base_url = None
100
+ if self.context.config and self.context.config.groq:
101
+ base_url = self.context.config.groq.base_url
102
+
103
+ return base_url if base_url else GROQ_BASE_URL
@@ -307,6 +307,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
307
307
  request_params = self.get_request_params(request_params=request_params)
308
308
 
309
309
  responses: List[ContentBlock] = []
310
+ model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
310
311
 
311
312
  # TODO -- move this in to agent context management / agent group handling
312
313
  messages: List[ChatCompletionMessageParam] = []
@@ -347,7 +348,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
347
348
  stream = await self._openai_client().chat.completions.create(**arguments)
348
349
  # Process the stream
349
350
  response = await self._process_stream(stream, self.default_request_params.model)
350
-
351
351
  # Track usage if response is valid and has usage data
352
352
  if (
353
353
  hasattr(response, "usage")
@@ -391,6 +391,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
391
391
  # Convert to dict and remove None values
392
392
  message_dict = message.model_dump()
393
393
  message_dict = {k: v for k, v in message_dict.items() if v is not None}
394
+ if model_name == "deepseek-r1-distill-llama-70b":
395
+ message_dict.pop("reasoning", None)
396
+
394
397
  messages.append(message_dict)
395
398
 
396
399
  message_text = message.content
@@ -412,6 +415,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
412
415
  )
413
416
 
414
417
  tool_results = []
418
+
415
419
  for tool_call in message.tool_calls:
416
420
  self.show_tool_call(
417
421
  available_tools,
@@ -428,12 +432,24 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
428
432
  else from_json(tool_call.function.arguments, allow_partial=True),
429
433
  ),
430
434
  )
431
- result = await self.call_tool(tool_call_request, tool_call.id)
432
- self.show_tool_result(result)
433
435
 
434
- tool_results.append((tool_call.id, result))
435
- responses.extend(result.content)
436
- messages.extend(OpenAIConverter.convert_function_results_to_openai(tool_results))
436
+ try:
437
+ result = await self.call_tool(tool_call_request, tool_call.id)
438
+ self.show_tool_result(result)
439
+ tool_results.append((tool_call.id, result))
440
+ responses.extend(result.content)
441
+ except Exception as e:
442
+ self.logger.error(f"Tool call {tool_call.id} failed with error: {e}")
443
+ # Still add the tool_call_id with an error result to prevent missing responses
444
+ error_result = CallToolResult(
445
+ content=[TextContent(type="text", text=f"Tool call failed: {str(e)}")]
446
+ )
447
+ tool_results.append((tool_call.id, error_result))
448
+
449
+ converted_messages = OpenAIConverter.convert_function_results_to_openai(
450
+ tool_results
451
+ )
452
+ messages.extend(converted_messages)
437
453
 
438
454
  self.logger.debug(
439
455
  f"Iteration {i}: Tool call results: {str(tool_results) if tool_results else 'None'}"
@@ -17,24 +17,19 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
17
17
 
18
18
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
19
19
  """Initialize OpenRouter-specific default parameters."""
20
+ # Get base defaults from parent (includes ModelDatabase lookup)
21
+ base_params = super()._initialize_default_params(kwargs)
22
+
23
+ # Override with OpenRouter-specific settings
20
24
  # OpenRouter model names include the provider, e.g., "google/gemini-flash-1.5"
21
25
  # The model should be passed in the 'model' kwarg during factory creation.
22
26
  chosen_model = kwargs.get("model", DEFAULT_OPENROUTER_MODEL)
23
- if not chosen_model:
24
- # Unlike Deepseek, OpenRouter *requires* a model path in the identifier.
25
- # The factory should extract this before calling the constructor.
26
- # We rely on the model being passed correctly via kwargs.
27
- # If it's still None here, it indicates an issue upstream (factory or user input).
28
- # However, the base class _get_model handles the error if model is None.
29
- pass
30
-
31
- return RequestParams(
32
- model=chosen_model, # Will be validated by base class
33
- systemPrompt=self.instruction,
34
- parallel_tool_calls=True, # Default based on OpenAI provider
35
- max_iterations=20, # Default based on OpenAI provider
36
- use_history=True, # Default based on OpenAI provider
37
- )
27
+ if chosen_model:
28
+ base_params.model = chosen_model
29
+ # If it's still None here, it indicates an issue upstream (factory or user input).
30
+ # However, the base class _get_model handles the error if model is None.
31
+
32
+ return base_params
38
33
 
39
34
  def _base_url(self) -> str:
40
35
  """Retrieve the OpenRouter base URL from config or use the default."""
@@ -16,15 +16,15 @@ class XAIAugmentedLLM(OpenAIAugmentedLLM):
16
16
 
17
17
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
18
18
  """Initialize xAI parameters"""
19
+ # Get base defaults from parent (includes ModelDatabase lookup)
20
+ base_params = super()._initialize_default_params(kwargs)
21
+
22
+ # Override with xAI-specific settings
19
23
  chosen_model = kwargs.get("model", DEFAULT_XAI_MODEL)
20
-
21
- return RequestParams(
22
- model=chosen_model,
23
- systemPrompt=self.instruction,
24
- parallel_tool_calls=False,
25
- max_iterations=20,
26
- use_history=True,
27
- )
24
+ base_params.model = chosen_model
25
+ base_params.parallel_tool_calls = False
26
+
27
+ return base_params
28
28
 
29
29
  def _base_url(self) -> str:
30
30
  base_url = os.getenv("XAI_BASE_URL", XAI_BASE_URL)
@@ -441,9 +441,6 @@ class OpenAIConverter:
441
441
  # Convert to OpenAI format
442
442
  user_message = OpenAIConverter.convert_to_openai(non_text_multipart)
443
443
 
444
- # We need to add tool_call_id manually
445
- user_message["tool_call_id"] = tool_call_id
446
-
447
444
  return (tool_message, [user_message])
448
445
 
449
446
  @staticmethod
@@ -461,22 +458,42 @@ class OpenAIConverter:
461
458
  Returns:
462
459
  List of OpenAI API messages for tool responses
463
460
  """
464
- messages = []
461
+ tool_messages = []
462
+ user_messages = []
463
+ has_mixed_content = False
465
464
 
466
465
  for tool_call_id, result in results:
467
- converted = OpenAIConverter.convert_tool_result_to_openai(
468
- tool_result=result,
469
- tool_call_id=tool_call_id,
470
- concatenate_text_blocks=concatenate_text_blocks,
471
- )
472
-
473
- # Handle the case where we have mixed content and get back a tuple
474
- if isinstance(converted, tuple):
475
- tool_message, additional_messages = converted
476
- messages.append(tool_message)
477
- messages.extend(additional_messages)
478
- else:
479
- # Single message case (text-only)
480
- messages.append(converted)
466
+ try:
467
+ converted = OpenAIConverter.convert_tool_result_to_openai(
468
+ tool_result=result,
469
+ tool_call_id=tool_call_id,
470
+ concatenate_text_blocks=concatenate_text_blocks,
471
+ )
481
472
 
473
+ # Handle the case where we have mixed content and get back a tuple
474
+ if isinstance(converted, tuple):
475
+ tool_message, additional_messages = converted
476
+ tool_messages.append(tool_message)
477
+ user_messages.extend(additional_messages)
478
+ has_mixed_content = True
479
+ else:
480
+ # Single message case (text-only)
481
+ tool_messages.append(converted)
482
+ except Exception as e:
483
+ _logger.error(f"Failed to convert tool_call_id={tool_call_id}: {e}")
484
+ # Create a basic tool response to prevent missing tool_call_id error
485
+ fallback_message = {
486
+ "role": "tool",
487
+ "tool_call_id": tool_call_id,
488
+ "content": f"[Conversion error: {str(e)}]",
489
+ }
490
+ tool_messages.append(fallback_message)
491
+
492
+ # CONDITIONAL REORDERING: Only reorder if there are user messages (mixed content)
493
+ if has_mixed_content and user_messages:
494
+ # Reorder: All tool messages first (OpenAI sequence), then user messages (vision context)
495
+ messages = tool_messages + user_messages
496
+ else:
497
+ # Pure tool responses - keep original order to preserve context (snapshots, etc.)
498
+ messages = tool_messages
482
499
  return messages
@@ -21,7 +21,7 @@ class RichProgressDisplay:
21
21
  self._progress = Progress(
22
22
  SpinnerColumn(spinner_name="simpleDotsScrolling"),
23
23
  TextColumn(
24
- "[progress.description]{task.description}|",
24
+ "[progress.description]{task.description}",
25
25
  # table_column=Column(max_width=16),
26
26
  ),
27
27
  TextColumn(text_format="{task.fields[target]:<16}", style="Bold Blue"),
@@ -77,11 +77,12 @@ class RichProgressDisplay:
77
77
  ProgressAction.LOADED: "dim green",
78
78
  ProgressAction.INITIALIZED: "dim green",
79
79
  ProgressAction.CHATTING: "bold blue",
80
- ProgressAction.STREAMING: "bold blue", # Same color as chatting
80
+ ProgressAction.STREAMING: "bold green", # Assistant Colour
81
81
  ProgressAction.ROUTING: "bold blue",
82
82
  ProgressAction.PLANNING: "bold blue",
83
83
  ProgressAction.READY: "dim green",
84
84
  ProgressAction.CALLING_TOOL: "bold magenta",
85
+ ProgressAction.TOOL_PROGRESS: "bold magenta",
85
86
  ProgressAction.FINISHED: "black on green",
86
87
  ProgressAction.SHUTDOWN: "black on red",
87
88
  ProgressAction.AGGREGATOR_INITIALIZED: "bold green",
@@ -107,18 +108,49 @@ class RichProgressDisplay:
107
108
  # Ensure no None values in the update
108
109
  # For streaming, use custom description immediately to avoid flashing
109
110
  if event.action == ProgressAction.STREAMING and event.streaming_tokens:
110
- formatted_tokens = f"↓ {event.streaming_tokens.strip()}".ljust(15)
111
+ # Account for [dim][/dim] tags (11 characters) in padding calculation
112
+ formatted_tokens = f"▎[dim]◀[/dim] {event.streaming_tokens.strip()}".ljust(17 + 11)
111
113
  description = f"[{self._get_action_style(event.action)}]{formatted_tokens}"
114
+ elif event.action == ProgressAction.CHATTING:
115
+ # Add special formatting for chatting with dimmed arrow
116
+ formatted_text = f"▎[dim]▶[/dim] {event.action.value.strip()}".ljust(17 + 11)
117
+ description = f"[{self._get_action_style(event.action)}]{formatted_text}"
118
+ elif event.action == ProgressAction.CALLING_TOOL:
119
+ # Add special formatting for calling tool with dimmed arrow
120
+ formatted_text = f"▎[dim]◀[/dim] {event.action.value}".ljust(17 + 11)
121
+ description = f"[{self._get_action_style(event.action)}]{formatted_text}"
122
+ elif event.action == ProgressAction.TOOL_PROGRESS:
123
+ # Format similar to streaming - show progress numbers
124
+ if event.progress is not None:
125
+ if event.total is not None:
126
+ progress_display = f"{int(event.progress)}/{int(event.total)}"
127
+ else:
128
+ progress_display = str(int(event.progress))
129
+ else:
130
+ progress_display = "Processing"
131
+ formatted_text = f"▎[dim]▶[/dim] {progress_display}".ljust(17 + 11)
132
+ description = f"[{self._get_action_style(event.action)}]{formatted_text}"
112
133
  else:
113
- description = f"[{self._get_action_style(event.action)}]{event.action.value:<15}"
114
-
115
- self._progress.update(
116
- task_id,
117
- description=description,
118
- target=event.target or task_name, # Use task_name as fallback for target
119
- details=event.details or "",
120
- task_name=task_name,
121
- )
134
+ description = f"[{self._get_action_style(event.action)}]{event.action.value:<15}"
135
+
136
+ # Update basic task information
137
+ update_kwargs = {
138
+ "description": description,
139
+ "target": event.target or task_name, # Use task_name as fallback for target
140
+ "details": event.details or "",
141
+ "task_name": task_name,
142
+ }
143
+
144
+ # For TOOL_PROGRESS events, update progress if available
145
+ if event.action == ProgressAction.TOOL_PROGRESS and event.progress is not None:
146
+ if event.total is not None:
147
+ update_kwargs["completed"] = event.progress
148
+ update_kwargs["total"] = event.total
149
+ else:
150
+ # If no total, just show as indeterminate progress
151
+ self._progress.reset(task_id)
152
+
153
+ self._progress.update(task_id, **update_kwargs)
122
154
 
123
155
  if (
124
156
  event.action == ProgressAction.INITIALIZED