fast-agent-mcp 0.2.49__tar.gz → 0.2.50__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (242) hide show
  1. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/PKG-INFO +4 -4
  2. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/pyproject.toml +4 -4
  3. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/event_progress.py +18 -0
  4. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/model_database.py +34 -0
  5. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_aliyun.py +7 -8
  6. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_deepseek.py +7 -8
  7. fast_agent_mcp-0.2.50/src/mcp_agent/llm/providers/augmented_llm_groq.py +103 -0
  8. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_openai.py +13 -7
  9. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_openrouter.py +10 -15
  10. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_xai.py +8 -8
  11. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/rich_progress.py +30 -7
  12. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/helpers/content_helpers.py +29 -0
  13. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mcp_aggregator.py +32 -1
  14. fast_agent_mcp-0.2.49/src/mcp_agent/llm/providers/augmented_llm_groq.py +0 -30
  15. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/.gitignore +0 -0
  16. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/LICENSE +0 -0
  17. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/README.md +0 -0
  18. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/azure-openai/fastagent.config.yaml +0 -0
  19. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/bedrock/fast-agent.config.yaml +0 -0
  20. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/custom-agents/agent.py +0 -0
  21. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/custom-agents/fastagent.config.yaml +0 -0
  22. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/data-analysis/analysis-campaign.py +0 -0
  23. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/data-analysis/analysis.py +0 -0
  24. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/data-analysis/fastagent.config.yaml +0 -0
  25. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  26. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  27. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
  28. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  29. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  30. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  31. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/forms_demo.py +0 -0
  32. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/game_character.py +0 -0
  33. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/game_character_handler.py +0 -0
  34. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/elicitations/tool_call.py +0 -0
  35. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/fastagent.config.yaml +0 -0
  36. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/fastagent.secrets.yaml.example +0 -0
  37. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/mcp_server.py +0 -0
  38. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/mcp-filtering/test_mcp_filtering.py +0 -0
  39. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/agent_one.py +0 -0
  40. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/agent_two.py +0 -0
  41. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  42. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  43. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/cat.png +0 -0
  44. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/example1.py +0 -0
  45. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/example2.py +0 -0
  46. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/example3.py +0 -0
  47. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/mcp/vision-examples/fastagent.config.yaml +0 -0
  48. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/otel/agent.py +0 -0
  49. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/otel/agent2.py +0 -0
  50. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/otel/docker-compose.yaml +0 -0
  51. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/otel/fastagent.config.yaml +0 -0
  52. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/researcher/fastagent.config.yaml +0 -0
  53. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/researcher/researcher-eval.py +0 -0
  54. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/researcher/researcher-imp.py +0 -0
  55. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/researcher/researcher.py +0 -0
  56. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/.env.sample +0 -0
  57. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/Makefile +0 -0
  58. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/README.md +0 -0
  59. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/agent.py +0 -0
  60. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/demo_images/clam.jpg +0 -0
  61. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/demo_images/crab.png +0 -0
  62. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/demo_images/shrimp.png +0 -0
  63. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/docker-compose.yml +0 -0
  64. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/fastagent.config.yaml +0 -0
  65. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/image_demo.py +0 -0
  66. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/mcp_server/Dockerfile +0 -0
  67. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
  68. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  69. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/simple_agent.py +0 -0
  70. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
  71. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
  72. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
  73. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/chaining.py +0 -0
  74. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/evaluator.py +0 -0
  75. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/fastagent.config.yaml +0 -0
  76. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/graded_report.md +0 -0
  77. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/human_input.py +0 -0
  78. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/orchestrator.py +0 -0
  79. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/parallel.py +0 -0
  80. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/router.py +0 -0
  81. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/short_story.md +0 -0
  82. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/examples/workflows/short_story.txt +0 -0
  83. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/hatch_build.py +0 -0
  84. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/__init__.py +0 -0
  85. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/__init__.py +0 -0
  86. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/agent.py +0 -0
  87. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/base_agent.py +0 -0
  88. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/__init__.py +0 -0
  89. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/chain_agent.py +0 -0
  90. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/evaluator_optimizer.py +0 -0
  91. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/iterative_planner.py +0 -0
  92. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/orchestrator_agent.py +0 -0
  93. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/orchestrator_models.py +0 -0
  94. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/orchestrator_prompts.py +0 -0
  95. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/parallel_agent.py +0 -0
  96. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/agents/workflow/router_agent.py +0 -0
  97. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/app.py +0 -0
  98. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/__init__.py +0 -0
  99. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/__main__.py +0 -0
  100. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/check_config.py +0 -0
  101. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/go.py +0 -0
  102. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/quickstart.py +0 -0
  103. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/server_helpers.py +0 -0
  104. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/setup.py +0 -0
  105. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/commands/url_parser.py +0 -0
  106. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/constants.py +0 -0
  107. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/main.py +0 -0
  108. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/cli/terminal.py +0 -0
  109. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/config.py +0 -0
  110. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/console.py +0 -0
  111. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/context.py +0 -0
  112. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/context_dependent.py +0 -0
  113. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/__init__.py +0 -0
  114. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/agent_app.py +0 -0
  115. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/agent_types.py +0 -0
  116. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/direct_decorators.py +0 -0
  117. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/direct_factory.py +0 -0
  118. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/enhanced_prompt.py +0 -0
  119. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/error_handling.py +0 -0
  120. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/exceptions.py +0 -0
  121. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/fastagent.py +0 -0
  122. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/interactive_prompt.py +0 -0
  123. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/mcp_content.py +0 -0
  124. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/mermaid_utils.py +0 -0
  125. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/prompt.py +0 -0
  126. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/request_params.py +0 -0
  127. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/usage_display.py +0 -0
  128. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/core/validation.py +0 -0
  129. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/__init__.py +0 -0
  130. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/executor.py +0 -0
  131. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/task_registry.py +0 -0
  132. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/executor/workflow_signal.py +0 -0
  133. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/__init__.py +0 -0
  134. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_form.py +0 -0
  135. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_forms.py +0 -0
  136. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_handler.py +0 -0
  137. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/elicitation_state.py +0 -0
  138. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/form_fields.py +0 -0
  139. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/handler.py +0 -0
  140. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/simple_form.py +0 -0
  141. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/human_input/types.py +0 -0
  142. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/__init__.py +0 -0
  143. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm.py +0 -0
  144. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_passthrough.py +0 -0
  145. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_playback.py +0 -0
  146. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_silent.py +0 -0
  147. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/augmented_llm_slow.py +0 -0
  148. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/memory.py +0 -0
  149. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/model_factory.py +0 -0
  150. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/prompt_utils.py +0 -0
  151. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/provider_key_manager.py +0 -0
  152. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/provider_types.py +0 -0
  153. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/__init__.py +0 -0
  154. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/anthropic_utils.py +0 -0
  155. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -0
  156. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_azure.py +0 -0
  157. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -0
  158. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_generic.py +0 -0
  159. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_google_native.py +0 -0
  160. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_google_oai.py +0 -0
  161. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -0
  162. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/google_converter.py +0 -0
  163. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/multipart_converter_anthropic.py +0 -0
  164. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/multipart_converter_openai.py +0 -0
  165. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -0
  166. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/openai_multipart.py +0 -0
  167. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/openai_utils.py +0 -0
  168. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -0
  169. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/providers/sampling_converter_openai.py +0 -0
  170. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/sampling_converter.py +0 -0
  171. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/sampling_format_converter.py +0 -0
  172. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/llm/usage_tracking.py +0 -0
  173. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/__init__.py +0 -0
  174. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/events.py +0 -0
  175. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/json_serializer.py +0 -0
  176. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/listeners.py +0 -0
  177. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/logger.py +0 -0
  178. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/logging/transport.py +0 -0
  179. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/__init__.py +0 -0
  180. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/common.py +0 -0
  181. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/elicitation_factory.py +0 -0
  182. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/elicitation_handlers.py +0 -0
  183. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/gen_client.py +0 -0
  184. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/helpers/__init__.py +0 -0
  185. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/helpers/server_config_helpers.py +0 -0
  186. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/hf_auth.py +0 -0
  187. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/interfaces.py +0 -0
  188. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/logger_textio.py +0 -0
  189. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
  190. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
  191. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/mime_utils.py +0 -0
  192. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompt_message_multipart.py +0 -0
  193. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompt_render.py +0 -0
  194. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompt_serialization.py +0 -0
  195. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/__init__.py +0 -0
  196. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/__main__.py +0 -0
  197. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_constants.py +0 -0
  198. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_helpers.py +0 -0
  199. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_load.py +0 -0
  200. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_server.py +0 -0
  201. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/prompts/prompt_template.py +0 -0
  202. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/resource_utils.py +0 -0
  203. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp/sampling.py +0 -0
  204. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp_server/__init__.py +0 -0
  205. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp_server/agent_server.py +0 -0
  206. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/mcp_server_registry.py +0 -0
  207. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/progress_display.py +0 -0
  208. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/py.typed +0 -0
  209. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
  210. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
  211. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  212. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  213. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  214. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
  215. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  216. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  217. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  218. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/forms_demo.py +0 -0
  219. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/game_character.py +0 -0
  220. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/game_character_handler.py +0 -0
  221. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/elicitations/tool_call.py +0 -0
  222. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_one.py +0 -0
  223. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/agent_two.py +0 -0
  224. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  225. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  226. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
  227. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
  228. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/researcher-imp.py +0 -0
  229. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
  230. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
  231. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
  232. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
  233. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/graded_report.md +0 -0
  234. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
  235. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
  236. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
  237. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
  238. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/short_story.md +0 -0
  239. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/resources/examples/workflows/short_story.txt +0 -0
  240. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/tools/tool_definition.py +0 -0
  241. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/ui/console_display.py +0 -0
  242. {fast_agent_mcp-0.2.49 → fast_agent_mcp-0.2.50}/src/mcp_agent/ui/console_display_legacy.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.49
3
+ Version: 0.2.50
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>
6
6
  License: Apache License
@@ -208,8 +208,8 @@ License-File: LICENSE
208
208
  Classifier: License :: OSI Approved :: Apache Software License
209
209
  Classifier: Operating System :: OS Independent
210
210
  Classifier: Programming Language :: Python :: 3
211
- Requires-Python: >=3.12
212
- Requires-Dist: a2a-sdk>=0.2.9
211
+ Requires-Python: >=3.13
212
+ Requires-Dist: a2a-sdk>=0.3.0
213
213
  Requires-Dist: aiohttp>=3.11.13
214
214
  Requires-Dist: anthropic>=0.59.0
215
215
  Requires-Dist: azure-identity>=1.14.0
@@ -232,7 +232,7 @@ Requires-Dist: pydantic>=2.10.4
232
232
  Requires-Dist: pyperclip>=1.9.0
233
233
  Requires-Dist: pyyaml>=6.0.2
234
234
  Requires-Dist: rich>=14.1.0
235
- Requires-Dist: tensorzero>=2025.6.3
235
+ Requires-Dist: tensorzero>=2025.7.5
236
236
  Requires-Dist: typer>=0.15.1
237
237
  Provides-Extra: azure
238
238
  Requires-Dist: azure-identity>=1.14.0; extra == 'azure'
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fast-agent-mcp"
3
- version = "0.2.49"
3
+ version = "0.2.50"
4
4
  description = "Define, Prompt and Test MCP enabled Agents and Workflows"
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -12,7 +12,7 @@ classifiers = [
12
12
  "License :: OSI Approved :: Apache Software License",
13
13
  "Operating System :: OS Independent"
14
14
  ]
15
- requires-python = ">=3.12"
15
+ requires-python = ">=3.13"
16
16
  dependencies = [
17
17
  "fastapi>=0.115.6",
18
18
  "mcp==1.12.1",
@@ -34,9 +34,9 @@ dependencies = [
34
34
  "opentelemetry-instrumentation-mcp>=0.43.1; python_version >= '3.10' and python_version < '4.0'",
35
35
  "google-genai>=1.27.0",
36
36
  "opentelemetry-instrumentation-google-genai>=0.3b0",
37
- "tensorzero>=2025.6.3",
37
+ "tensorzero>=2025.7.5",
38
38
  "deprecated>=1.2.18",
39
- "a2a-sdk>=0.2.9",
39
+ "a2a-sdk>=0.3.0",
40
40
  "email-validator>=2.2.0",
41
41
  "pyperclip>=1.9.0",
42
42
  ]
@@ -20,6 +20,7 @@ class ProgressAction(str, Enum):
20
20
  PLANNING = "Planning"
21
21
  READY = "Ready"
22
22
  CALLING_TOOL = "Calling Tool"
23
+ TOOL_PROGRESS = "Tool Progress"
23
24
  UPDATED = "Updated"
24
25
  FINISHED = "Finished"
25
26
  SHUTDOWN = "Shutdown"
@@ -35,6 +36,8 @@ class ProgressEvent(BaseModel):
35
36
  details: Optional[str] = None
36
37
  agent_name: Optional[str] = None
37
38
  streaming_tokens: Optional[str] = None # Special field for streaming token count
39
+ progress: Optional[float] = None # Current progress value
40
+ total: Optional[float] = None # Total value for progress calculation
38
41
 
39
42
  def __str__(self) -> str:
40
43
  """Format the progress event for display."""
@@ -86,6 +89,12 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
86
89
  details = f"{server_name} ({tool_name})"
87
90
  else:
88
91
  details = f"{server_name}"
92
+
93
+ # For TOOL_PROGRESS, use progress message if available, otherwise keep default
94
+ if progress_action == ProgressAction.TOOL_PROGRESS:
95
+ progress_message = event_data.get("details", "")
96
+ if progress_message: # Only override if message is non-empty
97
+ details = progress_message
89
98
 
90
99
  elif "augmented_llm" in namespace:
91
100
  model = event_data.get("model", "")
@@ -104,10 +113,19 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
104
113
  if progress_action == ProgressAction.STREAMING:
105
114
  streaming_tokens = event_data.get("details", "")
106
115
 
116
+ # Extract progress data for TOOL_PROGRESS actions
117
+ progress = None
118
+ total = None
119
+ if progress_action == ProgressAction.TOOL_PROGRESS:
120
+ progress = event_data.get("progress")
121
+ total = event_data.get("total")
122
+
107
123
  return ProgressEvent(
108
124
  action=ProgressAction(progress_action),
109
125
  target=target or "unknown",
110
126
  details=details,
111
127
  agent_name=event_data.get("agent_name"),
112
128
  streaming_tokens=streaming_tokens,
129
+ progress=progress,
130
+ total=total,
113
131
  )
@@ -22,6 +22,12 @@ class ModelParameters(BaseModel):
22
22
  tokenizes: List[str]
23
23
  """List of supported content types for tokenization"""
24
24
 
25
+ json_mode: None | str = "schema"
26
+ """Structured output style. 'schema', 'object' or None for unsupported """
27
+
28
+ reasoning: None | str = None
29
+ """Reasoning output style. 'tags' if enclosed in <thinking> tags, 'none' if not used"""
30
+
25
31
 
26
32
  class ModelDatabase:
27
33
  """Centralized model configuration database"""
@@ -87,6 +93,13 @@ class ModelDatabase:
87
93
  QWEN_STANDARD = ModelParameters(
88
94
  context_window=32000, max_output_tokens=8192, tokenizes=QWEN_MULTIMODAL
89
95
  )
96
+ QWEN3_REASONER = ModelParameters(
97
+ context_window=131072,
98
+ max_output_tokens=16384,
99
+ tokenizes=TEXT_ONLY,
100
+ json_mode="object",
101
+ reasoning="tags",
102
+ )
90
103
 
91
104
  FAST_AGENT_STANDARD = ModelParameters(
92
105
  context_window=1000000, max_output_tokens=100000, tokenizes=TEXT_ONLY
@@ -125,6 +138,13 @@ class ModelDatabase:
125
138
  context_window=65536, max_output_tokens=32768, tokenizes=TEXT_ONLY
126
139
  )
127
140
 
141
+ DEEPSEEK_DISTILL = ModelParameters(
142
+ context_window=131072,
143
+ max_output_tokens=131072,
144
+ tokenizes=TEXT_ONLY,
145
+ json_mode="object",
146
+ reasoning="tags",
147
+ )
128
148
  GEMINI_2_5_PRO = ModelParameters(
129
149
  context_window=2097152, max_output_tokens=8192, tokenizes=GOOGLE_MULTIMODAL
130
150
  )
@@ -214,6 +234,8 @@ class ModelDatabase:
214
234
  "grok-3-fast": GROK_3,
215
235
  "grok-3-mini-fast": GROK_3,
216
236
  "moonshotai/kimi-k2-instruct": KIMI_MOONSHOT,
237
+ "qwen/qwen3-32b": QWEN3_REASONER,
238
+ "deepseek-r1-distill-llama-70b": DEEPSEEK_DISTILL,
217
239
  }
218
240
 
219
241
  @classmethod
@@ -239,6 +261,18 @@ class ModelDatabase:
239
261
  params = cls.get_model_params(model)
240
262
  return params.tokenizes if params else None
241
263
 
264
+ @classmethod
265
+ def get_json_mode(cls, model: str) -> str | None:
266
+ """Get supported json mode (structured output) for a model"""
267
+ params = cls.get_model_params(model)
268
+ return params.json_mode if params else None
269
+
270
+ @classmethod
271
+ def get_reasoning(cls, model: str) -> str | None:
272
+ """Get supported reasoning output style for a model"""
273
+ params = cls.get_model_params(model)
274
+ return params.reasoning if params else None
275
+
242
276
  @classmethod
243
277
  def get_default_max_tokens(cls, model: str) -> int:
244
278
  """Get default max_tokens for RequestParams based on model"""
@@ -12,15 +12,14 @@ class AliyunAugmentedLLM(OpenAIAugmentedLLM):
12
12
 
13
13
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
14
14
  """Initialize Aliyun-specific default parameters"""
15
+ # Get base defaults from parent (includes ModelDatabase lookup)
16
+ base_params = super()._initialize_default_params(kwargs)
17
+
18
+ # Override with Aliyun-specific settings
15
19
  chosen_model = kwargs.get("model", DEFAULT_QWEN_MODEL)
16
-
17
- return RequestParams(
18
- model=chosen_model,
19
- systemPrompt=self.instruction,
20
- parallel_tool_calls=True,
21
- max_iterations=20,
22
- use_history=True,
23
- )
20
+ base_params.model = chosen_model
21
+
22
+ return base_params
24
23
 
25
24
  def _base_url(self) -> str:
26
25
  base_url = None
@@ -22,15 +22,14 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
22
22
 
23
23
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
24
24
  """Initialize Deepseek-specific default parameters"""
25
+ # Get base defaults from parent (includes ModelDatabase lookup)
26
+ base_params = super()._initialize_default_params(kwargs)
27
+
28
+ # Override with Deepseek-specific settings
25
29
  chosen_model = kwargs.get("model", DEFAULT_DEEPSEEK_MODEL)
26
-
27
- return RequestParams(
28
- model=chosen_model,
29
- systemPrompt=self.instruction,
30
- parallel_tool_calls=True,
31
- max_iterations=20,
32
- use_history=True,
33
- )
30
+ base_params.model = chosen_model
31
+
32
+ return base_params
34
33
 
35
34
  def _base_url(self) -> str:
36
35
  base_url = None
@@ -0,0 +1,103 @@
1
+ from typing import List, Tuple, Type, cast
2
+
3
+ from pydantic_core import from_json
4
+
5
+ from mcp_agent.core.request_params import RequestParams
6
+ from mcp_agent.llm.model_database import ModelDatabase
7
+ from mcp_agent.llm.provider_types import Provider
8
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
9
+ from mcp_agent.logging.logger import get_logger
10
+ from mcp_agent.mcp.helpers.content_helpers import get_text, split_thinking_content
11
+ from mcp_agent.mcp.interfaces import ModelT
12
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
13
+
14
+ GROQ_BASE_URL = "https://api.groq.com/openai/v1"
15
+ DEFAULT_GROQ_MODEL = "moonshotai/kimi-k2-instruct"
16
+
17
+ ### There is some big refactorings to be had quite easily here now:
18
+ ### - combining the structured output type handling
19
+ ### - deduplicating between this and the deepseek llm
20
+
21
+
22
+ class GroqAugmentedLLM(OpenAIAugmentedLLM):
23
+ def __init__(self, *args, **kwargs) -> None:
24
+ super().__init__(*args, provider=Provider.GROQ, **kwargs)
25
+
26
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
27
+ """Initialize Groq default parameters"""
28
+ # Get base defaults from parent (includes ModelDatabase lookup)
29
+ base_params = super()._initialize_default_params(kwargs)
30
+
31
+ # Override with Groq-specific settings
32
+ chosen_model = kwargs.get("model", DEFAULT_GROQ_MODEL)
33
+ base_params.model = chosen_model
34
+ base_params.parallel_tool_calls = False
35
+
36
+ return base_params
37
+
38
+ async def _apply_prompt_provider_specific_structured(
39
+ self,
40
+ multipart_messages: List[PromptMessageMultipart],
41
+ model: Type[ModelT],
42
+ request_params: RequestParams | None = None,
43
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
44
+ request_params = self.get_request_params(request_params)
45
+
46
+ assert self.default_request_params
47
+ llm_model = self.default_request_params.model or DEFAULT_GROQ_MODEL
48
+ json_mode: str | None = ModelDatabase.get_json_mode(llm_model)
49
+ if "json_object" == json_mode:
50
+ request_params.response_format = {"type": "json_object"}
51
+
52
+ # Get the full schema and extract just the properties
53
+ full_schema = model.model_json_schema()
54
+ properties = full_schema.get("properties", {})
55
+ required_fields = full_schema.get("required", [])
56
+
57
+ # Create a cleaner format description
58
+ format_description = "{\n"
59
+ for field_name, field_info in properties.items():
60
+ field_type = field_info.get("type", "string")
61
+ description = field_info.get("description", "")
62
+ format_description += f' "{field_name}": "{field_type}"'
63
+ if description:
64
+ format_description += f" // {description}"
65
+ if field_name in required_fields:
66
+ format_description += " // REQUIRED"
67
+ format_description += "\n"
68
+ format_description += "}"
69
+
70
+ multipart_messages[-1].add_text(
71
+ f"""YOU MUST RESPOND WITH A JSON OBJECT IN EXACTLY THIS FORMAT:
72
+ {format_description}
73
+
74
+ IMPORTANT RULES:
75
+ - Respond ONLY with the JSON object, no other text
76
+ - Do NOT include "properties" or "schema" wrappers
77
+ - Do NOT use code fences or markdown
78
+ - The response must be valid JSON that matches the format above
79
+ - All required fields must be included"""
80
+ )
81
+
82
+ result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
83
+ multipart_messages, request_params
84
+ )
85
+ reasoning_mode: str | None = ModelDatabase.get_reasoning(llm_model)
86
+ try:
87
+ text = get_text(result.content[-1]) or ""
88
+ if "tags" == reasoning_mode:
89
+ _, text = split_thinking_content(text)
90
+ json_data = from_json(text, allow_partial=True)
91
+ validated_model = model.model_validate(json_data)
92
+ return cast("ModelT", validated_model), result
93
+ except ValueError as e:
94
+ logger = get_logger(__name__)
95
+ logger.warning(f"Failed to parse structured response: {str(e)}")
96
+ return None, result
97
+
98
+ def _base_url(self) -> str:
99
+ base_url = None
100
+ if self.context.config and self.context.config.groq:
101
+ base_url = self.context.config.groq.base_url
102
+
103
+ return base_url if base_url else GROQ_BASE_URL
@@ -307,6 +307,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
307
307
  request_params = self.get_request_params(request_params=request_params)
308
308
 
309
309
  responses: List[ContentBlock] = []
310
+ model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
310
311
 
311
312
  # TODO -- move this in to agent context management / agent group handling
312
313
  messages: List[ChatCompletionMessageParam] = []
@@ -347,7 +348,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
347
348
  stream = await self._openai_client().chat.completions.create(**arguments)
348
349
  # Process the stream
349
350
  response = await self._process_stream(stream, self.default_request_params.model)
350
-
351
351
  # Track usage if response is valid and has usage data
352
352
  if (
353
353
  hasattr(response, "usage")
@@ -391,6 +391,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
391
391
  # Convert to dict and remove None values
392
392
  message_dict = message.model_dump()
393
393
  message_dict = {k: v for k, v in message_dict.items() if v is not None}
394
+ if model_name == "deepseek-r1-distill-llama-70b":
395
+ message_dict.pop("reasoning", None)
396
+
394
397
  messages.append(message_dict)
395
398
 
396
399
  message_text = message.content
@@ -412,9 +415,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
412
415
  )
413
416
 
414
417
  tool_results = []
415
-
418
+
416
419
  for tool_call in message.tool_calls:
417
-
418
420
  self.show_tool_call(
419
421
  available_tools,
420
422
  tool_call.function.name,
@@ -430,7 +432,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
430
432
  else from_json(tool_call.function.arguments, allow_partial=True),
431
433
  ),
432
434
  )
433
-
435
+
434
436
  try:
435
437
  result = await self.call_tool(tool_call_request, tool_call.id)
436
438
  self.show_tool_result(result)
@@ -439,10 +441,14 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
439
441
  except Exception as e:
440
442
  self.logger.error(f"Tool call {tool_call.id} failed with error: {e}")
441
443
  # Still add the tool_call_id with an error result to prevent missing responses
442
- error_result = CallToolResult(content=[TextContent(type="text", text=f"Tool call failed: {str(e)}")])
444
+ error_result = CallToolResult(
445
+ content=[TextContent(type="text", text=f"Tool call failed: {str(e)}")]
446
+ )
443
447
  tool_results.append((tool_call.id, error_result))
444
-
445
- converted_messages = OpenAIConverter.convert_function_results_to_openai(tool_results)
448
+
449
+ converted_messages = OpenAIConverter.convert_function_results_to_openai(
450
+ tool_results
451
+ )
446
452
  messages.extend(converted_messages)
447
453
 
448
454
  self.logger.debug(
@@ -17,24 +17,19 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
17
17
 
18
18
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
19
19
  """Initialize OpenRouter-specific default parameters."""
20
+ # Get base defaults from parent (includes ModelDatabase lookup)
21
+ base_params = super()._initialize_default_params(kwargs)
22
+
23
+ # Override with OpenRouter-specific settings
20
24
  # OpenRouter model names include the provider, e.g., "google/gemini-flash-1.5"
21
25
  # The model should be passed in the 'model' kwarg during factory creation.
22
26
  chosen_model = kwargs.get("model", DEFAULT_OPENROUTER_MODEL)
23
- if not chosen_model:
24
- # Unlike Deepseek, OpenRouter *requires* a model path in the identifier.
25
- # The factory should extract this before calling the constructor.
26
- # We rely on the model being passed correctly via kwargs.
27
- # If it's still None here, it indicates an issue upstream (factory or user input).
28
- # However, the base class _get_model handles the error if model is None.
29
- pass
30
-
31
- return RequestParams(
32
- model=chosen_model, # Will be validated by base class
33
- systemPrompt=self.instruction,
34
- parallel_tool_calls=True, # Default based on OpenAI provider
35
- max_iterations=20, # Default based on OpenAI provider
36
- use_history=True, # Default based on OpenAI provider
37
- )
27
+ if chosen_model:
28
+ base_params.model = chosen_model
29
+ # If it's still None here, it indicates an issue upstream (factory or user input).
30
+ # However, the base class _get_model handles the error if model is None.
31
+
32
+ return base_params
38
33
 
39
34
  def _base_url(self) -> str:
40
35
  """Retrieve the OpenRouter base URL from config or use the default."""
@@ -16,15 +16,15 @@ class XAIAugmentedLLM(OpenAIAugmentedLLM):
16
16
 
17
17
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
18
18
  """Initialize xAI parameters"""
19
+ # Get base defaults from parent (includes ModelDatabase lookup)
20
+ base_params = super()._initialize_default_params(kwargs)
21
+
22
+ # Override with xAI-specific settings
19
23
  chosen_model = kwargs.get("model", DEFAULT_XAI_MODEL)
20
-
21
- return RequestParams(
22
- model=chosen_model,
23
- systemPrompt=self.instruction,
24
- parallel_tool_calls=False,
25
- max_iterations=20,
26
- use_history=True,
27
- )
24
+ base_params.model = chosen_model
25
+ base_params.parallel_tool_calls = False
26
+
27
+ return base_params
28
28
 
29
29
  def _base_url(self) -> str:
30
30
  base_url = os.getenv("XAI_BASE_URL", XAI_BASE_URL)
@@ -82,6 +82,7 @@ class RichProgressDisplay:
82
82
  ProgressAction.PLANNING: "bold blue",
83
83
  ProgressAction.READY: "dim green",
84
84
  ProgressAction.CALLING_TOOL: "bold magenta",
85
+ ProgressAction.TOOL_PROGRESS: "bold magenta",
85
86
  ProgressAction.FINISHED: "black on green",
86
87
  ProgressAction.SHUTDOWN: "black on red",
87
88
  ProgressAction.AGGREGATOR_INITIALIZED: "bold green",
@@ -118,16 +119,38 @@ class RichProgressDisplay:
118
119
  # Add special formatting for calling tool with dimmed arrow
119
120
  formatted_text = f"▎[dim]◀[/dim] {event.action.value}".ljust(17 + 11)
120
121
  description = f"[{self._get_action_style(event.action)}]{formatted_text}"
122
+ elif event.action == ProgressAction.TOOL_PROGRESS:
123
+ # Format similar to streaming - show progress numbers
124
+ if event.progress is not None:
125
+ if event.total is not None:
126
+ progress_display = f"{int(event.progress)}/{int(event.total)}"
127
+ else:
128
+ progress_display = str(int(event.progress))
129
+ else:
130
+ progress_display = "Processing"
131
+ formatted_text = f"▎[dim]▶[/dim] {progress_display}".ljust(17 + 11)
132
+ description = f"[{self._get_action_style(event.action)}]{formatted_text}"
121
133
  else:
122
134
  description = f"[{self._get_action_style(event.action)}]▎ {event.action.value:<15}"
123
135
 
124
- self._progress.update(
125
- task_id,
126
- description=description,
127
- target=event.target or task_name, # Use task_name as fallback for target
128
- details=event.details or "",
129
- task_name=task_name,
130
- )
136
+ # Update basic task information
137
+ update_kwargs = {
138
+ "description": description,
139
+ "target": event.target or task_name, # Use task_name as fallback for target
140
+ "details": event.details or "",
141
+ "task_name": task_name,
142
+ }
143
+
144
+ # For TOOL_PROGRESS events, update progress if available
145
+ if event.action == ProgressAction.TOOL_PROGRESS and event.progress is not None:
146
+ if event.total is not None:
147
+ update_kwargs["completed"] = event.progress
148
+ update_kwargs["total"] = event.total
149
+ else:
150
+ # If no total, just show as indeterminate progress
151
+ self._progress.reset(task_id)
152
+
153
+ self._progress.update(task_id, **update_kwargs)
131
154
 
132
155
  if (
133
156
  event.action == ProgressAction.INITIALIZED
@@ -156,3 +156,32 @@ def get_resource_text(result: ReadResourceResult, index: int = 0) -> Optional[st
156
156
  return content.text
157
157
 
158
158
  return None
159
+
160
+
161
+ def split_thinking_content(message: str) -> tuple[Optional[str], str]:
162
+ """
163
+ Split a message into thinking and content parts.
164
+
165
+ Extracts content between <thinking> tags and returns it along with the remaining content.
166
+
167
+ Args:
168
+ message: A string that may contain a <thinking>...</thinking> block followed by content
169
+
170
+ Returns:
171
+ A tuple of (thinking_content, main_content) where:
172
+ - thinking_content: The content inside <thinking> tags, or None if not found/parsing fails
173
+ - main_content: The content after the thinking block, or the entire message if no thinking block
174
+ """
175
+ import re
176
+
177
+ # Pattern to match <thinking>...</thinking> at the start of the message
178
+ pattern = r"^<think>(.*?)</think>\s*(.*)$"
179
+ match = re.match(pattern, message, re.DOTALL)
180
+
181
+ if match:
182
+ thinking_content = match.group(1).strip()
183
+ main_content = match.group(2).strip()
184
+ return (thinking_content, main_content)
185
+ else:
186
+ # No thinking block found or parsing failed
187
+ return (None, message)
@@ -12,6 +12,7 @@ from typing import (
12
12
 
13
13
  from mcp import GetPromptResult, ReadResourceResult
14
14
  from mcp.client.session import ClientSession
15
+ from mcp.shared.session import ProgressFnT
15
16
  from mcp.types import (
16
17
  CallToolResult,
17
18
  ListToolsResult,
@@ -136,6 +137,24 @@ class MCPAggregator(ContextDependent):
136
137
  # Lock for refreshing tools from a server
137
138
  self._refresh_lock = Lock()
138
139
 
140
+ def _create_progress_callback(self, server_name: str, tool_name: str) -> "ProgressFnT":
141
+ """Create a progress callback function for tool execution."""
142
+ async def progress_callback(progress: float, total: float | None, message: str | None) -> None:
143
+ """Handle progress notifications from MCP tool execution."""
144
+ logger.info(
145
+ "Tool progress update",
146
+ data={
147
+ "progress_action": ProgressAction.TOOL_PROGRESS,
148
+ "tool_name": tool_name,
149
+ "server_name": server_name,
150
+ "agent_name": self.agent_name,
151
+ "progress": progress,
152
+ "total": total,
153
+ "details": message or "", # Put the message in details column
154
+ },
155
+ )
156
+ return progress_callback
157
+
139
158
  async def close(self) -> None:
140
159
  """
141
160
  Close all persistent connections when the aggregator is deleted.
@@ -468,6 +487,7 @@ class MCPAggregator(ContextDependent):
468
487
  method_name: str,
469
488
  method_args: Dict[str, Any] = None,
470
489
  error_factory: Callable[[str], R] = None,
490
+ progress_callback: ProgressFnT | None = None,
471
491
  ) -> R:
472
492
  """
473
493
  Generic method to execute operations on a specific server.
@@ -479,6 +499,7 @@ class MCPAggregator(ContextDependent):
479
499
  method_name: Name of the method to call on the client session
480
500
  method_args: Arguments to pass to the method
481
501
  error_factory: Function to create an error return value if the operation fails
502
+ progress_callback: Optional progress callback for operations that support it
482
503
 
483
504
  Returns:
484
505
  Result from the operation or an error result
@@ -487,7 +508,12 @@ class MCPAggregator(ContextDependent):
487
508
  async def try_execute(client: ClientSession):
488
509
  try:
489
510
  method = getattr(client, method_name)
490
- return await method(**method_args)
511
+ # For call_tool method, check if we need to add progress_callback
512
+ if method_name == "call_tool" and progress_callback:
513
+ # The call_tool method signature includes progress_callback parameter
514
+ return await method(**method_args, progress_callback=progress_callback)
515
+ else:
516
+ return await method(**method_args)
491
517
  except Exception as e:
492
518
  error_msg = (
493
519
  f"Failed to {method_name} '{operation_name}' on server '{server_name}': {e}"
@@ -597,6 +623,10 @@ class MCPAggregator(ContextDependent):
597
623
  with tracer.start_as_current_span(f"MCP Tool: {server_name}/{local_tool_name}"):
598
624
  trace.get_current_span().set_attribute("tool_name", local_tool_name)
599
625
  trace.get_current_span().set_attribute("server_name", server_name)
626
+
627
+ # Create progress callback for this tool execution
628
+ progress_callback = self._create_progress_callback(server_name, local_tool_name)
629
+
600
630
  return await self._execute_on_server(
601
631
  server_name=server_name,
602
632
  operation_type="tool",
@@ -609,6 +639,7 @@ class MCPAggregator(ContextDependent):
609
639
  error_factory=lambda msg: CallToolResult(
610
640
  isError=True, content=[TextContent(type="text", text=msg)]
611
641
  ),
642
+ progress_callback=progress_callback,
612
643
  )
613
644
 
614
645
  async def get_prompt(
@@ -1,30 +0,0 @@
1
- from mcp_agent.core.request_params import RequestParams
2
- from mcp_agent.llm.provider_types import Provider
3
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
4
-
5
- GROQ_BASE_URL = "https://api.groq.com/openai/v1"
6
- DEFAULT_GROQ_MODEL = ""
7
-
8
-
9
- class GroqAugmentedLLM(OpenAIAugmentedLLM):
10
- def __init__(self, *args, **kwargs) -> None:
11
- super().__init__(*args, provider=Provider.GROQ, **kwargs)
12
-
13
- def _initialize_default_params(self, kwargs: dict) -> RequestParams:
14
- """Initialize Groq default parameters"""
15
- chosen_model = kwargs.get("model", DEFAULT_GROQ_MODEL)
16
-
17
- return RequestParams(
18
- model=chosen_model,
19
- systemPrompt=self.instruction,
20
- parallel_tool_calls=False,
21
- max_iterations=20,
22
- use_history=True,
23
- )
24
-
25
- def _base_url(self) -> str:
26
- base_url = None
27
- if self.context.config and self.context.config.groq:
28
- base_url = self.context.config.groq.base_url
29
-
30
- return base_url if base_url else GROQ_BASE_URL
File without changes