fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (234) hide show
  1. fast_agent/__init__.py +127 -0
  2. fast_agent/agents/__init__.py +36 -0
  3. {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
  4. fast_agent/agents/llm_agent.py +217 -0
  5. fast_agent/agents/llm_decorator.py +486 -0
  6. mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
  7. fast_agent/agents/tool_agent.py +168 -0
  8. {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
  9. {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
  10. {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
  11. {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
  12. {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
  13. {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
  14. {mcp_agent → fast_agent}/cli/__main__.py +5 -3
  15. {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
  16. {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
  17. {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
  18. {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
  19. {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
  20. {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
  21. {mcp_agent → fast_agent}/cli/main.py +36 -16
  22. {mcp_agent → fast_agent}/cli/terminal.py +2 -2
  23. {mcp_agent → fast_agent}/config.py +13 -2
  24. fast_agent/constants.py +8 -0
  25. {mcp_agent → fast_agent}/context.py +24 -19
  26. {mcp_agent → fast_agent}/context_dependent.py +9 -5
  27. fast_agent/core/__init__.py +17 -0
  28. {mcp_agent → fast_agent}/core/agent_app.py +39 -36
  29. fast_agent/core/core_app.py +135 -0
  30. {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
  31. {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
  32. {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
  33. {mcp_agent → fast_agent}/core/fastagent.py +32 -32
  34. fast_agent/core/logging/__init__.py +5 -0
  35. {mcp_agent → fast_agent/core}/logging/events.py +3 -3
  36. {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
  37. {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
  38. {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
  39. {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
  40. fast_agent/core/prompt.py +9 -0
  41. {mcp_agent → fast_agent}/core/validation.py +4 -4
  42. fast_agent/event_progress.py +61 -0
  43. fast_agent/history/history_exporter.py +44 -0
  44. {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
  45. {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
  46. {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
  47. {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
  48. {mcp_agent → fast_agent}/human_input/types.py +1 -18
  49. fast_agent/interfaces.py +228 -0
  50. fast_agent/llm/__init__.py +9 -0
  51. mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
  52. fast_agent/llm/internal/passthrough.py +137 -0
  53. mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
  54. mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
  55. fast_agent/llm/internal/slow.py +38 -0
  56. {mcp_agent → fast_agent}/llm/memory.py +40 -30
  57. {mcp_agent → fast_agent}/llm/model_database.py +35 -2
  58. {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
  59. fast_agent/llm/model_info.py +126 -0
  60. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
  61. fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
  62. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
  63. fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
  64. fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
  65. {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
  66. fast_agent/llm/provider/google/llm_google_native.py +431 -0
  67. mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
  68. mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
  69. mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
  70. mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
  71. mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
  72. mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
  73. mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
  74. mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
  75. mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
  76. mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
  77. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
  78. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
  79. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
  80. {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
  81. {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
  82. {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
  83. {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
  84. fast_agent/mcp/__init__.py +43 -0
  85. {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
  86. {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
  87. {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
  88. fast_agent/mcp/helpers/__init__.py +36 -0
  89. fast_agent/mcp/helpers/content_helpers.py +183 -0
  90. {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
  91. {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
  92. fast_agent/mcp/interfaces.py +93 -0
  93. {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
  94. {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
  95. {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
  96. {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
  97. {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
  98. {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
  99. fast_agent/mcp/prompt.py +159 -0
  100. mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
  101. {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
  102. {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
  103. fast_agent/mcp/prompts/__main__.py +7 -0
  104. {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
  105. {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
  106. {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
  107. {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
  108. {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
  109. {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
  110. {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
  111. {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
  112. fast_agent/mcp/ui_agent.py +48 -0
  113. fast_agent/mcp/ui_mixin.py +209 -0
  114. fast_agent/mcp_server_registry.py +90 -0
  115. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
  116. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
  117. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
  118. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
  119. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
  120. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
  121. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
  122. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
  123. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
  124. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
  125. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
  126. {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
  127. {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
  128. {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
  129. {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
  130. {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
  131. {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
  132. {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
  133. {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
  134. {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
  135. {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
  136. fast_agent/resources/setup/.gitignore +24 -0
  137. fast_agent/resources/setup/agent.py +18 -0
  138. fast_agent/resources/setup/fastagent.config.yaml +44 -0
  139. fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
  140. fast_agent/tools/elicitation.py +369 -0
  141. fast_agent/types/__init__.py +32 -0
  142. fast_agent/types/llm_stop_reason.py +77 -0
  143. fast_agent/ui/__init__.py +38 -0
  144. fast_agent/ui/console_display.py +1005 -0
  145. {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
  146. mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
  147. {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
  148. {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
  149. fast_agent/ui/mcp_ui_utils.py +224 -0
  150. {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
  151. {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
  152. {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
  153. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
  154. fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
  155. fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
  156. fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
  157. fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
  158. mcp_agent/__init__.py +0 -114
  159. mcp_agent/agents/agent.py +0 -92
  160. mcp_agent/agents/workflow/__init__.py +0 -1
  161. mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
  162. mcp_agent/app.py +0 -175
  163. mcp_agent/core/__init__.py +0 -26
  164. mcp_agent/core/prompt.py +0 -191
  165. mcp_agent/event_progress.py +0 -134
  166. mcp_agent/human_input/handler.py +0 -81
  167. mcp_agent/llm/__init__.py +0 -2
  168. mcp_agent/llm/augmented_llm_passthrough.py +0 -232
  169. mcp_agent/llm/augmented_llm_slow.py +0 -53
  170. mcp_agent/llm/providers/__init__.py +0 -8
  171. mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
  172. mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
  173. mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
  174. mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
  175. mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
  176. mcp_agent/llm/sampling_format_converter.py +0 -37
  177. mcp_agent/logging/__init__.py +0 -0
  178. mcp_agent/mcp/__init__.py +0 -50
  179. mcp_agent/mcp/helpers/__init__.py +0 -25
  180. mcp_agent/mcp/helpers/content_helpers.py +0 -187
  181. mcp_agent/mcp/interfaces.py +0 -266
  182. mcp_agent/mcp/prompts/__init__.py +0 -0
  183. mcp_agent/mcp/prompts/__main__.py +0 -10
  184. mcp_agent/mcp_server_registry.py +0 -343
  185. mcp_agent/tools/tool_definition.py +0 -14
  186. mcp_agent/ui/console_display.py +0 -790
  187. mcp_agent/ui/console_display_legacy.py +0 -401
  188. {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
  189. {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
  190. {mcp_agent → fast_agent}/cli/constants.py +0 -0
  191. {mcp_agent → fast_agent}/core/error_handling.py +0 -0
  192. {mcp_agent → fast_agent}/core/exceptions.py +0 -0
  193. {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
  194. {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
  195. {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
  196. {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
  197. {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
  198. {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
  199. {mcp_agent → fast_agent}/mcp/common.py +0 -0
  200. {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
  201. {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
  202. {mcp_agent → fast_agent}/py.typed +0 -0
  203. {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  204. {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  205. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  206. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  207. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  208. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  209. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  210. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  211. {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
  212. {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
  213. {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
  214. {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
  215. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  216. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
  217. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  218. {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
  219. {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
  220. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
  221. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
  222. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  223. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
  224. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
  225. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
  226. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
  227. {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
  228. {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
  229. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
  230. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
  231. {mcp_agent → fast_agent/ui}/console.py +0 -0
  232. {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
  233. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
  234. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -6,17 +6,16 @@ from openai.types.chat import (
6
6
  ChatCompletionMessage,
7
7
  )
8
8
 
9
- from mcp_agent.core.request_params import RequestParams
10
- from mcp_agent.llm.provider_types import Provider
11
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
12
- from mcp_agent.mcp.interfaces import ModelT
13
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
9
+ from fast_agent.interfaces import ModelT
10
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
11
+ from fast_agent.llm.provider_types import Provider
12
+ from fast_agent.types import PromptMessageExtended, RequestParams
14
13
 
15
14
  DEEPSEEK_BASE_URL = "https://api.deepseek.com"
16
15
  DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type models
17
16
 
18
17
 
19
- class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
18
+ class DeepSeekLLM(OpenAILLM):
20
19
  def __init__(self, *args, **kwargs) -> None:
21
20
  super().__init__(*args, provider=Provider.DEEPSEEK, **kwargs)
22
21
 
@@ -24,11 +23,11 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
24
23
  """Initialize Deepseek-specific default parameters"""
25
24
  # Get base defaults from parent (includes ModelDatabase lookup)
26
25
  base_params = super()._initialize_default_params(kwargs)
27
-
26
+
28
27
  # Override with Deepseek-specific settings
29
28
  chosen_model = kwargs.get("model", DEFAULT_DEEPSEEK_MODEL)
30
29
  base_params.model = chosen_model
31
-
30
+
32
31
  return base_params
33
32
 
34
33
  def _base_url(self) -> str:
@@ -40,10 +39,10 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
40
39
 
41
40
  async def _apply_prompt_provider_specific_structured(
42
41
  self,
43
- multipart_messages: List[PromptMessageMultipart],
42
+ multipart_messages: List[PromptMessageExtended],
44
43
  model: Type[ModelT],
45
44
  request_params: RequestParams | None = None,
46
- ) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
45
+ ) -> Tuple[ModelT | None, PromptMessageExtended]: # noqa: F821
47
46
  request_params = self.get_request_params(request_params)
48
47
 
49
48
  request_params.response_format = {"type": "json_object"}
@@ -78,7 +77,7 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
78
77
  - All required fields must be included"""
79
78
  )
80
79
 
81
- result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
80
+ result: PromptMessageExtended = await self._apply_prompt_provider_specific(
82
81
  multipart_messages, request_params
83
82
  )
84
83
  return self._structured_from_multipart(result, model)
@@ -1,15 +1,15 @@
1
1
  import os
2
2
 
3
- from mcp_agent.core.request_params import RequestParams
4
- from mcp_agent.llm.provider_types import Provider
5
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
3
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
4
+ from fast_agent.llm.provider_types import Provider
5
+ from fast_agent.types import RequestParams
6
6
 
7
7
  DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434/v1"
8
8
  DEFAULT_OLLAMA_MODEL = "llama3.2:latest"
9
9
  DEFAULT_OLLAMA_API_KEY = "ollama"
10
10
 
11
11
 
12
- class GenericAugmentedLLM(OpenAIAugmentedLLM):
12
+ class GenericLLM(OpenAILLM):
13
13
  def __init__(self, *args, **kwargs) -> None:
14
14
  super().__init__(
15
15
  *args, provider=Provider.GENERIC, **kwargs
@@ -1,12 +1,12 @@
1
- from mcp_agent.core.request_params import RequestParams
2
- from mcp_agent.llm.provider_types import Provider
3
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
1
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
2
+ from fast_agent.llm.provider_types import Provider
3
+ from fast_agent.types import RequestParams
4
4
 
5
5
  GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai"
6
6
  DEFAULT_GOOGLE_MODEL = "gemini-2.0-flash"
7
7
 
8
8
 
9
- class GoogleOaiAugmentedLLM(OpenAIAugmentedLLM):
9
+ class GoogleOaiLLM(OpenAILLM):
10
10
  def __init__(self, *args, **kwargs) -> None:
11
11
  super().__init__(*args, provider=Provider.GOOGLE_OAI, **kwargs)
12
12
 
@@ -2,14 +2,13 @@ from typing import List, Tuple, Type, cast
2
2
 
3
3
  from pydantic_core import from_json
4
4
 
5
- from mcp_agent.core.request_params import RequestParams
6
- from mcp_agent.llm.model_database import ModelDatabase
7
- from mcp_agent.llm.provider_types import Provider
8
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
9
- from mcp_agent.logging.logger import get_logger
10
- from mcp_agent.mcp.helpers.content_helpers import get_text, split_thinking_content
11
- from mcp_agent.mcp.interfaces import ModelT
12
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
5
+ from fast_agent.core.logging.logger import get_logger
6
+ from fast_agent.interfaces import ModelT
7
+ from fast_agent.llm.model_database import ModelDatabase
8
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
9
+ from fast_agent.llm.provider_types import Provider
10
+ from fast_agent.mcp.helpers.content_helpers import get_text, split_thinking_content
11
+ from fast_agent.types import PromptMessageExtended, RequestParams
13
12
 
14
13
  GROQ_BASE_URL = "https://api.groq.com/openai/v1"
15
14
  DEFAULT_GROQ_MODEL = "moonshotai/kimi-k2-instruct"
@@ -19,7 +18,7 @@ DEFAULT_GROQ_MODEL = "moonshotai/kimi-k2-instruct"
19
18
  ### - deduplicating between this and the deepseek llm
20
19
 
21
20
 
22
- class GroqAugmentedLLM(OpenAIAugmentedLLM):
21
+ class GroqLLM(OpenAILLM):
23
22
  def __init__(self, *args, **kwargs) -> None:
24
23
  super().__init__(*args, provider=Provider.GROQ, **kwargs)
25
24
 
@@ -37,10 +36,10 @@ class GroqAugmentedLLM(OpenAIAugmentedLLM):
37
36
 
38
37
  async def _apply_prompt_provider_specific_structured(
39
38
  self,
40
- multipart_messages: List[PromptMessageMultipart],
39
+ multipart_messages: List[PromptMessageExtended],
41
40
  model: Type[ModelT],
42
41
  request_params: RequestParams | None = None,
43
- ) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
42
+ ) -> Tuple[ModelT | None, PromptMessageExtended]: # noqa: F821
44
43
  request_params = self.get_request_params(request_params)
45
44
 
46
45
  assert self.default_request_params
@@ -51,9 +50,7 @@ class GroqAugmentedLLM(OpenAIAugmentedLLM):
51
50
 
52
51
  # Create a cleaner format description from full schema
53
52
  full_schema = model.model_json_schema()
54
- format_description = self._schema_to_json_object(
55
- full_schema, full_schema.get("$defs")
56
- )
53
+ format_description = self._schema_to_json_object(full_schema, full_schema.get("$defs"))
57
54
 
58
55
  multipart_messages[-1].add_text(
59
56
  f"""YOU MUST RESPOND WITH A JSON OBJECT IN EXACTLY THIS FORMAT:
@@ -64,9 +61,10 @@ IMPORTANT RULES:
64
61
  - Do NOT include "properties" or "schema" wrappers
65
62
  - Do NOT use code fences or markdown
66
63
  - The response must be valid JSON that matches the format above
67
- - All required fields must be included""")
64
+ - All required fields must be included"""
65
+ )
68
66
 
69
- result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
67
+ result: PromptMessageExtended = await self._apply_prompt_provider_specific(
70
68
  multipart_messages, request_params
71
69
  )
72
70
  reasoning_mode: str | None = ModelDatabase.get_reasoning(llm_model)
@@ -1,9 +1,9 @@
1
- from typing import Dict, List
1
+ from typing import Any, Dict, List
2
2
 
3
+ from mcp import Tool
3
4
  from mcp.types import (
4
5
  CallToolRequest,
5
6
  CallToolRequestParams,
6
- CallToolResult,
7
7
  ContentBlock,
8
8
  TextContent,
9
9
  )
@@ -18,23 +18,20 @@ from openai.types.chat import (
18
18
  ChatCompletionToolParam,
19
19
  )
20
20
  from pydantic_core import from_json
21
- from rich.text import Text
22
21
 
23
- from mcp_agent.core.exceptions import ProviderKeyError
24
- from mcp_agent.core.prompt import Prompt
25
- from mcp_agent.event_progress import ProgressAction
26
- from mcp_agent.llm.augmented_llm import (
27
- AugmentedLLM,
22
+ from fast_agent.core.exceptions import ProviderKeyError
23
+ from fast_agent.core.logging.logger import get_logger
24
+ from fast_agent.core.prompt import Prompt
25
+ from fast_agent.event_progress import ProgressAction
26
+ from fast_agent.llm.fastagent_llm import (
27
+ FastAgentLLM,
28
28
  RequestParams,
29
29
  )
30
- from mcp_agent.llm.provider_types import Provider
31
- from mcp_agent.llm.providers.multipart_converter_openai import OpenAIConverter, OpenAIMessage
32
- from mcp_agent.llm.providers.sampling_converter_openai import (
33
- OpenAISamplingConverter,
34
- )
35
- from mcp_agent.llm.usage_tracking import TurnUsage
36
- from mcp_agent.logging.logger import get_logger
37
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
30
+ from fast_agent.llm.provider.openai.multipart_converter_openai import OpenAIConverter, OpenAIMessage
31
+ from fast_agent.llm.provider_types import Provider
32
+ from fast_agent.llm.usage_tracking import TurnUsage
33
+ from fast_agent.types import PromptMessageExtended
34
+ from fast_agent.types.llm_stop_reason import LlmStopReason
38
35
 
39
36
  _logger = get_logger(__name__)
40
37
 
@@ -42,30 +39,22 @@ DEFAULT_OPENAI_MODEL = "gpt-4.1-mini"
42
39
  DEFAULT_REASONING_EFFORT = "medium"
43
40
 
44
41
 
45
- class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletionMessage]):
46
- """
47
- The basic building block of agentic systems is an LLM enhanced with augmentations
48
- such as retrieval, tools, and memory provided from a collection of MCP servers.
49
- This implementation uses OpenAI's ChatCompletion as the LLM.
50
- """
51
-
42
+ class OpenAILLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage]):
52
43
  # OpenAI-specific parameter exclusions
53
44
  OPENAI_EXCLUDE_FIELDS = {
54
- AugmentedLLM.PARAM_MESSAGES,
55
- AugmentedLLM.PARAM_MODEL,
56
- AugmentedLLM.PARAM_MAX_TOKENS,
57
- AugmentedLLM.PARAM_SYSTEM_PROMPT,
58
- AugmentedLLM.PARAM_PARALLEL_TOOL_CALLS,
59
- AugmentedLLM.PARAM_USE_HISTORY,
60
- AugmentedLLM.PARAM_MAX_ITERATIONS,
61
- AugmentedLLM.PARAM_TEMPLATE_VARS,
45
+ FastAgentLLM.PARAM_MESSAGES,
46
+ FastAgentLLM.PARAM_MODEL,
47
+ FastAgentLLM.PARAM_MAX_TOKENS,
48
+ FastAgentLLM.PARAM_SYSTEM_PROMPT,
49
+ FastAgentLLM.PARAM_PARALLEL_TOOL_CALLS,
50
+ FastAgentLLM.PARAM_USE_HISTORY,
51
+ FastAgentLLM.PARAM_MAX_ITERATIONS,
52
+ FastAgentLLM.PARAM_TEMPLATE_VARS,
53
+ FastAgentLLM.PARAM_MCP_METADATA,
54
+ FastAgentLLM.PARAM_STOP_SEQUENCES,
62
55
  }
63
56
 
64
57
  def __init__(self, provider: Provider = Provider.OPENAI, *args, **kwargs) -> None:
65
- # Set type_converter before calling super().__init__
66
- if "type_converter" not in kwargs:
67
- kwargs["type_converter"] = OpenAISamplingConverter
68
-
69
58
  super().__init__(*args, provider=provider, **kwargs)
70
59
 
71
60
  # Initialize logger with name if available
@@ -143,8 +132,14 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
143
132
  # Use base class method for token estimation and progress emission
144
133
  estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens)
145
134
 
146
- # Get the final completion with usage data
147
- final_completion = state.get_final_completion()
135
+ # Check if we hit the length limit to avoid LengthFinishReasonError
136
+ current_snapshot = state.current_completion_snapshot
137
+ if current_snapshot.choices and current_snapshot.choices[0].finish_reason == "length":
138
+ # Return the current snapshot directly to avoid exception
139
+ final_completion = current_snapshot
140
+ else:
141
+ # Get the final completion with usage data (may include structured output parsing)
142
+ final_completion = state.get_final_completion()
148
143
 
149
144
  # Log final usage information
150
145
  if hasattr(final_completion, "usage") and final_completion.usage:
@@ -295,9 +290,10 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
295
290
 
296
291
  async def _openai_completion(
297
292
  self,
298
- message: OpenAIMessage,
293
+ message: List[OpenAIMessage] | None,
299
294
  request_params: RequestParams | None = None,
300
- ) -> List[ContentBlock]:
295
+ tools: List[Tool] | None = None,
296
+ ) -> PromptMessageExtended:
301
297
  """
302
298
  Process a query using an LLM and available tools.
303
299
  The default implementation uses OpenAI's ChatCompletion as the LLM.
@@ -306,7 +302,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
306
302
 
307
303
  request_params = self.get_request_params(request_params=request_params)
308
304
 
309
- responses: List[ContentBlock] = []
305
+ response_content_blocks: List[ContentBlock] = []
310
306
  model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
311
307
 
312
308
  # TODO -- move this in to agent context management / agent group handling
@@ -316,9 +312,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
316
312
  messages.append(ChatCompletionSystemMessageParam(role="system", content=system_prompt))
317
313
 
318
314
  messages.extend(self.history.get(include_completion_history=request_params.use_history))
319
- messages.append(message)
315
+ if message is not None:
316
+ messages.extend(message)
320
317
 
321
- response = await self.aggregator.list_tools()
322
318
  available_tools: List[ChatCompletionToolParam] | None = [
323
319
  {
324
320
  "type": "function",
@@ -328,7 +324,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
328
324
  "parameters": self.adjust_schema(tool.inputSchema),
329
325
  },
330
326
  }
331
- for tool in response.tools
327
+ for tool in tools or []
332
328
  ]
333
329
 
334
330
  if not available_tools:
@@ -338,154 +334,91 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
338
334
  available_tools = []
339
335
 
340
336
  # we do NOT send "stop sequences" as this causes errors with mutlimodal processing
341
- for i in range(request_params.max_iterations):
342
- arguments = self._prepare_api_request(messages, available_tools, request_params)
343
- self.logger.debug(f"OpenAI completion requested for: {arguments}")
344
-
345
- self._log_chat_progress(self.chat_turn(), model=self.default_request_params.model)
346
-
347
- # Use basic streaming API
348
- stream = await self._openai_client().chat.completions.create(**arguments)
349
- # Process the stream
350
- response = await self._process_stream(stream, self.default_request_params.model)
351
- # Track usage if response is valid and has usage data
352
- if (
353
- hasattr(response, "usage")
354
- and response.usage
355
- and not isinstance(response, BaseException)
356
- ):
357
- try:
358
- model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
359
- turn_usage = TurnUsage.from_openai(response.usage, model_name)
360
- self._finalize_turn_usage(turn_usage)
361
- except Exception as e:
362
- self.logger.warning(f"Failed to track usage: {e}")
363
-
364
- self.logger.debug(
365
- "OpenAI completion response:",
366
- data=response,
367
- )
368
-
369
- if isinstance(response, AuthenticationError):
370
- raise ProviderKeyError(
371
- "Rejected OpenAI API key",
372
- "The configured OpenAI API key was rejected.\n"
373
- "Please check that your API key is valid and not expired.",
374
- ) from response
375
- elif isinstance(response, BaseException):
376
- self.logger.error(f"Error: {response}")
377
- break
378
-
379
- if not response.choices or len(response.choices) == 0:
380
- # No response from the model, we're done
381
- break
382
-
383
- choice = response.choices[0]
384
- message = choice.message
385
- # prep for image/audio gen models
386
- if message.content:
387
- responses.append(TextContent(type="text", text=message.content))
388
-
389
- # ParsedChatCompletionMessage is compatible with ChatCompletionMessage
390
- # since it inherits from it, so we can use it directly
391
- # Convert to dict and remove None values
392
- message_dict = message.model_dump()
393
- message_dict = {k: v for k, v in message_dict.items() if v is not None}
394
- if model_name in (
395
- "deepseek-r1-distill-llama-70b",
396
- "openai/gpt-oss-120b",
397
- "openai/gpt-oss-20b",
398
- ):
399
- message_dict.pop("reasoning", None)
400
- message_dict.pop("channel", None)
401
-
402
- messages.append(message_dict)
403
-
404
- message_text = message.content
405
- if await self._is_tool_stop_reason(choice.finish_reason) and message.tool_calls:
406
- if message_text:
407
- await self.show_assistant_message(
408
- message_text,
409
- message.tool_calls[
410
- 0
411
- ].function.name, # TODO support displaying multiple tool calls
412
- )
413
- else:
414
- await self.show_assistant_message(
415
- Text(
416
- "the assistant requested tool calls",
417
- style="dim green italic",
418
- ),
419
- message.tool_calls[0].function.name,
420
- )
337
+ arguments: dict[str, Any] = self._prepare_api_request(
338
+ messages, available_tools, request_params
339
+ )
340
+ if not self._reasoning and request_params.stopSequences:
341
+ arguments["stop"] = request_params.stopSequences
421
342
 
422
- tool_results = []
343
+ self.logger.debug(f"OpenAI completion requested for: {arguments}")
423
344
 
424
- for tool_call in message.tool_calls:
425
- self.show_tool_call(
426
- available_tools,
427
- tool_call.function.name,
428
- tool_call.function.arguments,
429
- )
430
- tool_call_request = CallToolRequest(
431
- method="tools/call",
432
- params=CallToolRequestParams(
433
- name=tool_call.function.name,
434
- arguments={}
435
- if not tool_call.function.arguments
436
- or tool_call.function.arguments.strip() == ""
437
- else from_json(tool_call.function.arguments, allow_partial=True),
438
- ),
439
- )
440
-
441
- try:
442
- result = await self.call_tool(tool_call_request, tool_call.id)
443
- self.show_tool_result(result)
444
- tool_results.append((tool_call.id, result))
445
- responses.extend(result.content)
446
- except Exception as e:
447
- self.logger.error(f"Tool call {tool_call.id} failed with error: {e}")
448
- # Still add the tool_call_id with an error result to prevent missing responses
449
- error_result = CallToolResult(
450
- content=[TextContent(type="text", text=f"Tool call failed: {str(e)}")]
451
- )
452
- tool_results.append((tool_call.id, error_result))
453
-
454
- converted_messages = OpenAIConverter.convert_function_results_to_openai(
455
- tool_results
456
- )
457
- messages.extend(converted_messages)
345
+ self._log_chat_progress(self.chat_turn(), model=self.default_request_params.model)
346
+ model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
458
347
 
459
- self.logger.debug(
460
- f"Iteration {i}: Tool call results: {str(tool_results) if tool_results else 'None'}"
461
- )
462
- elif choice.finish_reason == "length":
463
- # We have reached the max tokens limit
464
- self.logger.debug(f"Iteration {i}: Stopping because finish_reason is 'length'")
465
- if request_params and request_params.maxTokens is not None:
466
- message_text = Text(
467
- f"the assistant has reached the maximum token limit ({request_params.maxTokens})",
468
- style="dim green italic",
469
- )
470
- else:
471
- message_text = Text(
472
- "the assistant has reached the maximum token limit",
473
- style="dim green italic",
474
- )
348
+ # Use basic streaming API
349
+ stream = await self._openai_client().chat.completions.create(**arguments)
350
+ # Process the stream
351
+ response = await self._process_stream(stream, model_name)
352
+ # Track usage if response is valid and has usage data
353
+ if (
354
+ hasattr(response, "usage")
355
+ and response.usage
356
+ and not isinstance(response, BaseException)
357
+ ):
358
+ try:
359
+ turn_usage = TurnUsage.from_openai(response.usage, model_name)
360
+ self._finalize_turn_usage(turn_usage)
361
+ except Exception as e:
362
+ self.logger.warning(f"Failed to track usage: {e}")
363
+
364
+ self.logger.debug(
365
+ "OpenAI completion response:",
366
+ data=response,
367
+ )
475
368
 
476
- await self.show_assistant_message(message_text)
477
- break
478
- elif choice.finish_reason == "content_filter":
479
- # The response was filtered by the content filter
480
- self.logger.debug(
481
- f"Iteration {i}: Stopping because finish_reason is 'content_filter'"
369
+ if isinstance(response, AuthenticationError):
370
+ raise ProviderKeyError(
371
+ "Rejected OpenAI API key",
372
+ "The configured OpenAI API key was rejected.\n"
373
+ "Please check that your API key is valid and not expired.",
374
+ ) from response
375
+ elif isinstance(response, BaseException):
376
+ self.logger.error(f"Error: {response}")
377
+
378
+ choice = response.choices[0]
379
+ message = choice.message
380
+ # prep for image/audio gen models
381
+ if message.content:
382
+ response_content_blocks.append(TextContent(type="text", text=message.content))
383
+
384
+ # ParsedChatCompletionMessage is compatible with ChatCompletionMessage
385
+ # since it inherits from it, so we can use it directly
386
+ # Convert to dict and remove None values
387
+ message_dict = message.model_dump()
388
+ message_dict = {k: v for k, v in message_dict.items() if v is not None}
389
+ if model_name in (
390
+ "deepseek-r1-distill-llama-70b",
391
+ "openai/gpt-oss-120b",
392
+ "openai/gpt-oss-20b",
393
+ ):
394
+ message_dict.pop("reasoning", None)
395
+ message_dict.pop("channel", None)
396
+
397
+ messages.append(message_dict)
398
+ stop_reason = LlmStopReason.END_TURN
399
+ requested_tool_calls: Dict[str, CallToolRequest] | None = None
400
+ if await self._is_tool_stop_reason(choice.finish_reason) and message.tool_calls:
401
+ requested_tool_calls = {}
402
+ stop_reason = LlmStopReason.TOOL_USE
403
+ for tool_call in message.tool_calls:
404
+ tool_call_request = CallToolRequest(
405
+ method="tools/call",
406
+ params=CallToolRequestParams(
407
+ name=tool_call.function.name,
408
+ arguments={}
409
+ if not tool_call.function.arguments
410
+ or tool_call.function.arguments.strip() == ""
411
+ else from_json(tool_call.function.arguments, allow_partial=True),
412
+ ),
482
413
  )
483
- break
484
- elif choice.finish_reason == "stop":
485
- self.logger.debug(f"Iteration {i}: Stopping because finish_reason is 'stop'")
486
- if message_text:
487
- await self.show_assistant_message(message_text, "")
488
- break
414
+ requested_tool_calls[tool_call.id] = tool_call_request
415
+ elif choice.finish_reason == "length":
416
+ stop_reason = LlmStopReason.MAX_TOKENS
417
+ # We have reached the max tokens limit
418
+ self.logger.debug(" Stopping because finish_reason is 'length'")
419
+ elif choice.finish_reason == "content_filter":
420
+ stop_reason = LlmStopReason.SAFETY
421
+ self.logger.debug(" Stopping because finish_reason is 'content_filter'")
489
422
 
490
423
  if request_params.use_history:
491
424
  # Get current prompt messages
@@ -501,19 +434,21 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
501
434
 
502
435
  self._log_chat_finished(model=self.default_request_params.model)
503
436
 
504
- return responses
437
+ return Prompt.assistant(
438
+ *response_content_blocks, stop_reason=stop_reason, tool_calls=requested_tool_calls
439
+ )
505
440
 
506
441
  async def _is_tool_stop_reason(self, finish_reason: str) -> bool:
507
442
  return True
508
443
 
509
444
  async def _apply_prompt_provider_specific(
510
445
  self,
511
- multipart_messages: List["PromptMessageMultipart"],
446
+ multipart_messages: List["PromptMessageExtended"],
512
447
  request_params: RequestParams | None = None,
448
+ tools: List[Tool] | None = None,
513
449
  is_template: bool = False,
514
- ) -> PromptMessageMultipart:
450
+ ) -> PromptMessageExtended:
515
451
  # Reset tool call counter for new turn
516
- self._reset_turn_tool_calls()
517
452
 
518
453
  last_message = multipart_messages[-1]
519
454
 
@@ -524,29 +459,21 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
524
459
  )
525
460
  converted = []
526
461
  for msg in messages_to_add:
527
- converted.append(OpenAIConverter.convert_to_openai(msg))
462
+ # convert_to_openai now returns a list of messages
463
+ converted.extend(OpenAIConverter.convert_to_openai(msg))
528
464
 
529
- # TODO -- this looks like a defect from previous apply_prompt implementation.
530
465
  self.history.extend(converted, is_prompt=is_template)
531
466
 
532
467
  if "assistant" == last_message.role:
533
468
  return last_message
534
469
 
535
- # For assistant messages: Return the last message (no completion needed)
536
- message_param: OpenAIMessage = OpenAIConverter.convert_to_openai(last_message)
537
- responses: List[ContentBlock] = await self._openai_completion(
538
- message_param,
539
- request_params,
540
- )
541
- return Prompt.assistant(*responses)
542
-
543
- async def pre_tool_call(self, tool_call_id: str | None, request: CallToolRequest):
544
- return request
470
+ converted_messages = OpenAIConverter.convert_to_openai(last_message)
471
+ if not converted_messages:
472
+ # Fallback for empty conversion
473
+ converted_messages = [{"role": "user", "content": ""}]
545
474
 
546
- async def post_tool_call(
547
- self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
548
- ):
549
- return result
475
+ # Call completion without additional messages (all messages are now in history)
476
+ return await self._openai_completion(converted_messages, request_params, tools)
550
477
 
551
478
  def _prepare_api_request(
552
479
  self, messages, tools: List[ChatCompletionToolParam] | None, request_params: RequestParams
@@ -1,15 +1,15 @@
1
1
  import os
2
2
 
3
- from mcp_agent.core.request_params import RequestParams
4
- from mcp_agent.llm.provider_types import Provider
5
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
3
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
4
+ from fast_agent.llm.provider_types import Provider
5
+ from fast_agent.types import RequestParams
6
6
 
7
7
  DEFAULT_OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
8
8
  # No single default model for OpenRouter, users must specify full path
9
9
  DEFAULT_OPENROUTER_MODEL = None
10
10
 
11
11
 
12
- class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
12
+ class OpenRouterLLM(OpenAILLM):
13
13
  """Augmented LLM provider for OpenRouter, using an OpenAI-compatible API."""
14
14
 
15
15
  def __init__(self, *args, **kwargs) -> None:
@@ -19,7 +19,7 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
19
19
  """Initialize OpenRouter-specific default parameters."""
20
20
  # Get base defaults from parent (includes ModelDatabase lookup)
21
21
  base_params = super()._initialize_default_params(kwargs)
22
-
22
+
23
23
  # Override with OpenRouter-specific settings
24
24
  # OpenRouter model names include the provider, e.g., "google/gemini-flash-1.5"
25
25
  # The model should be passed in the 'model' kwarg during factory creation.
@@ -28,7 +28,7 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
28
28
  base_params.model = chosen_model
29
29
  # If it's still None here, it indicates an issue upstream (factory or user input).
30
30
  # However, the base class _get_model handles the error if model is None.
31
-
31
+
32
32
  return base_params
33
33
 
34
34
  def _base_url(self) -> str: