fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (234) hide show
  1. fast_agent/__init__.py +127 -0
  2. fast_agent/agents/__init__.py +36 -0
  3. {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
  4. fast_agent/agents/llm_agent.py +217 -0
  5. fast_agent/agents/llm_decorator.py +486 -0
  6. mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
  7. fast_agent/agents/tool_agent.py +168 -0
  8. {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
  9. {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
  10. {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
  11. {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
  12. {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
  13. {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
  14. {mcp_agent → fast_agent}/cli/__main__.py +5 -3
  15. {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
  16. {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
  17. {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
  18. {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
  19. {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
  20. {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
  21. {mcp_agent → fast_agent}/cli/main.py +36 -16
  22. {mcp_agent → fast_agent}/cli/terminal.py +2 -2
  23. {mcp_agent → fast_agent}/config.py +13 -2
  24. fast_agent/constants.py +8 -0
  25. {mcp_agent → fast_agent}/context.py +24 -19
  26. {mcp_agent → fast_agent}/context_dependent.py +9 -5
  27. fast_agent/core/__init__.py +17 -0
  28. {mcp_agent → fast_agent}/core/agent_app.py +39 -36
  29. fast_agent/core/core_app.py +135 -0
  30. {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
  31. {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
  32. {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
  33. {mcp_agent → fast_agent}/core/fastagent.py +32 -32
  34. fast_agent/core/logging/__init__.py +5 -0
  35. {mcp_agent → fast_agent/core}/logging/events.py +3 -3
  36. {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
  37. {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
  38. {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
  39. {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
  40. fast_agent/core/prompt.py +9 -0
  41. {mcp_agent → fast_agent}/core/validation.py +4 -4
  42. fast_agent/event_progress.py +61 -0
  43. fast_agent/history/history_exporter.py +44 -0
  44. {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
  45. {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
  46. {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
  47. {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
  48. {mcp_agent → fast_agent}/human_input/types.py +1 -18
  49. fast_agent/interfaces.py +228 -0
  50. fast_agent/llm/__init__.py +9 -0
  51. mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
  52. fast_agent/llm/internal/passthrough.py +137 -0
  53. mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
  54. mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
  55. fast_agent/llm/internal/slow.py +38 -0
  56. {mcp_agent → fast_agent}/llm/memory.py +40 -30
  57. {mcp_agent → fast_agent}/llm/model_database.py +35 -2
  58. {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
  59. fast_agent/llm/model_info.py +126 -0
  60. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
  61. fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
  62. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
  63. fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
  64. fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
  65. {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
  66. fast_agent/llm/provider/google/llm_google_native.py +431 -0
  67. mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
  68. mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
  69. mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
  70. mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
  71. mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
  72. mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
  73. mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
  74. mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
  75. mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
  76. mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
  77. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
  78. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
  79. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
  80. {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
  81. {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
  82. {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
  83. {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
  84. fast_agent/mcp/__init__.py +43 -0
  85. {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
  86. {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
  87. {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
  88. fast_agent/mcp/helpers/__init__.py +36 -0
  89. fast_agent/mcp/helpers/content_helpers.py +183 -0
  90. {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
  91. {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
  92. fast_agent/mcp/interfaces.py +93 -0
  93. {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
  94. {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
  95. {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
  96. {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
  97. {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
  98. {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
  99. fast_agent/mcp/prompt.py +159 -0
  100. mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
  101. {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
  102. {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
  103. fast_agent/mcp/prompts/__main__.py +7 -0
  104. {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
  105. {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
  106. {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
  107. {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
  108. {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
  109. {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
  110. {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
  111. {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
  112. fast_agent/mcp/ui_agent.py +48 -0
  113. fast_agent/mcp/ui_mixin.py +209 -0
  114. fast_agent/mcp_server_registry.py +90 -0
  115. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
  116. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
  117. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
  118. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
  119. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
  120. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
  121. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
  122. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
  123. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
  124. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
  125. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
  126. {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
  127. {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
  128. {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
  129. {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
  130. {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
  131. {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
  132. {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
  133. {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
  134. {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
  135. {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
  136. fast_agent/resources/setup/.gitignore +24 -0
  137. fast_agent/resources/setup/agent.py +18 -0
  138. fast_agent/resources/setup/fastagent.config.yaml +44 -0
  139. fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
  140. fast_agent/tools/elicitation.py +369 -0
  141. fast_agent/types/__init__.py +32 -0
  142. fast_agent/types/llm_stop_reason.py +77 -0
  143. fast_agent/ui/__init__.py +38 -0
  144. fast_agent/ui/console_display.py +1005 -0
  145. {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
  146. mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
  147. {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
  148. {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
  149. fast_agent/ui/mcp_ui_utils.py +224 -0
  150. {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
  151. {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
  152. {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
  153. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
  154. fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
  155. fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
  156. fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
  157. fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
  158. mcp_agent/__init__.py +0 -114
  159. mcp_agent/agents/agent.py +0 -92
  160. mcp_agent/agents/workflow/__init__.py +0 -1
  161. mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
  162. mcp_agent/app.py +0 -175
  163. mcp_agent/core/__init__.py +0 -26
  164. mcp_agent/core/prompt.py +0 -191
  165. mcp_agent/event_progress.py +0 -134
  166. mcp_agent/human_input/handler.py +0 -81
  167. mcp_agent/llm/__init__.py +0 -2
  168. mcp_agent/llm/augmented_llm_passthrough.py +0 -232
  169. mcp_agent/llm/augmented_llm_slow.py +0 -53
  170. mcp_agent/llm/providers/__init__.py +0 -8
  171. mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
  172. mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
  173. mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
  174. mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
  175. mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
  176. mcp_agent/llm/sampling_format_converter.py +0 -37
  177. mcp_agent/logging/__init__.py +0 -0
  178. mcp_agent/mcp/__init__.py +0 -50
  179. mcp_agent/mcp/helpers/__init__.py +0 -25
  180. mcp_agent/mcp/helpers/content_helpers.py +0 -187
  181. mcp_agent/mcp/interfaces.py +0 -266
  182. mcp_agent/mcp/prompts/__init__.py +0 -0
  183. mcp_agent/mcp/prompts/__main__.py +0 -10
  184. mcp_agent/mcp_server_registry.py +0 -343
  185. mcp_agent/tools/tool_definition.py +0 -14
  186. mcp_agent/ui/console_display.py +0 -790
  187. mcp_agent/ui/console_display_legacy.py +0 -401
  188. {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
  189. {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
  190. {mcp_agent → fast_agent}/cli/constants.py +0 -0
  191. {mcp_agent → fast_agent}/core/error_handling.py +0 -0
  192. {mcp_agent → fast_agent}/core/exceptions.py +0 -0
  193. {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
  194. {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
  195. {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
  196. {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
  197. {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
  198. {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
  199. {mcp_agent → fast_agent}/mcp/common.py +0 -0
  200. {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
  201. {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
  202. {mcp_agent → fast_agent}/py.typed +0 -0
  203. {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  204. {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  205. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  206. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  207. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  208. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  209. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  210. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  211. {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
  212. {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
  213. {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
  214. {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
  215. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  216. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
  217. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  218. {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
  219. {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
  220. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
  221. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
  222. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  223. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
  224. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
  225. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
  226. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
  227. {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
  228. {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
  229. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
  230. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
  231. {mcp_agent → fast_agent/ui}/console.py +0 -0
  232. {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
  233. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
  234. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -2,12 +2,12 @@ from typing import Any, Dict, List, Optional
2
2
 
3
3
  from openai.types.chat import ChatCompletionMessageParam, ChatCompletionSystemMessageParam
4
4
 
5
- from mcp_agent.core.request_params import RequestParams
6
- from mcp_agent.llm.provider_types import Provider
7
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
5
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
6
+ from fast_agent.llm.provider_types import Provider
7
+ from fast_agent.types import RequestParams
8
8
 
9
9
 
10
- class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
10
+ class TensorZeroOpenAILLM(OpenAILLM):
11
11
  """
12
12
  An LLM augmentation that interacts with TensorZero's OpenAI-compatible inference endpoint.
13
13
  This class extends the base OpenAIAugmentedLLM to handle TensorZero-specific
@@ -55,7 +55,7 @@ class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
55
55
  if self.context and self.context.config and hasattr(self.context.config, "tensorzero"):
56
56
  base_url = getattr(self.context.config.tensorzero, "base_url", default_url)
57
57
  # Ensure the path is correctly appended
58
- if not base_url.endswith('/openai/v1'):
58
+ if not base_url.endswith("/openai/v1"):
59
59
  base_url = f"{base_url.rstrip('/')}/openai/v1"
60
60
  self.logger.debug(f"Using TensorZero base URL from config: {base_url}")
61
61
  return base_url
@@ -63,10 +63,10 @@ class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
63
63
  return default_url
64
64
 
65
65
  def _prepare_api_request(
66
- self,
67
- messages: List[ChatCompletionMessageParam],
68
- tools: Optional[List[Any]],
69
- request_params: RequestParams
66
+ self,
67
+ messages: List[ChatCompletionMessageParam],
68
+ tools: Optional[List[Any]],
69
+ request_params: RequestParams,
70
70
  ) -> Dict[str, Any]:
71
71
  """
72
72
  Prepares the API request for the TensorZero OpenAI-compatible endpoint.
@@ -87,8 +87,7 @@ class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
87
87
  # If content is a string, convert it to the TensorZero format
88
88
  if isinstance(msg.get("content"), str):
89
89
  messages[i] = ChatCompletionSystemMessageParam(
90
- role="system",
91
- content=[request_params.template_vars]
90
+ role="system", content=[request_params.template_vars]
92
91
  )
93
92
  elif isinstance(msg.get("content"), list):
94
93
  # If content is already a list, merge the template vars
@@ -98,10 +97,12 @@ class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
98
97
 
99
98
  if not system_message_found:
100
99
  # If no system message exists, create one
101
- messages.insert(0, ChatCompletionSystemMessageParam(
102
- role="system",
103
- content=[request_params.template_vars]
104
- ))
100
+ messages.insert(
101
+ 0,
102
+ ChatCompletionSystemMessageParam(
103
+ role="system", content=[request_params.template_vars]
104
+ ),
105
+ )
105
106
 
106
107
  # Add TensorZero-specific extra body parameters
107
108
  extra_body = arguments.get("extra_body", {})
@@ -124,4 +125,4 @@ class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
124
125
  arguments["extra_body"] = extra_body
125
126
 
126
127
  self.logger.debug(f"Final API request arguments: {arguments}")
127
- return arguments
128
+ return arguments
@@ -1,14 +1,14 @@
1
1
  import os
2
2
 
3
- from mcp_agent.core.request_params import RequestParams
4
- from mcp_agent.llm.provider_types import Provider
5
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
3
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
4
+ from fast_agent.llm.provider_types import Provider
5
+ from fast_agent.types import RequestParams
6
6
 
7
7
  XAI_BASE_URL = "https://api.x.ai/v1"
8
8
  DEFAULT_XAI_MODEL = "grok-3"
9
9
 
10
10
 
11
- class XAIAugmentedLLM(OpenAIAugmentedLLM):
11
+ class XAILLM(OpenAILLM):
12
12
  def __init__(self, *args, **kwargs) -> None:
13
13
  super().__init__(
14
14
  *args, provider=Provider.XAI, **kwargs
@@ -18,12 +18,12 @@ class XAIAugmentedLLM(OpenAIAugmentedLLM):
18
18
  """Initialize xAI parameters"""
19
19
  # Get base defaults from parent (includes ModelDatabase lookup)
20
20
  base_params = super()._initialize_default_params(kwargs)
21
-
21
+
22
22
  # Override with xAI-specific settings
23
23
  chosen_model = kwargs.get("model", DEFAULT_XAI_MODEL)
24
24
  base_params.model = chosen_model
25
25
  base_params.parallel_tool_calls = False
26
-
26
+
27
27
  return base_params
28
28
 
29
29
  def _base_url(self) -> str:
@@ -1,3 +1,4 @@
1
+ import json
1
2
  from typing import Any, Dict, List, Optional, Tuple, Union
2
3
 
3
4
  from mcp.types import (
@@ -5,13 +6,12 @@ from mcp.types import (
5
6
  EmbeddedResource,
6
7
  ImageContent,
7
8
  PromptMessage,
8
- ResourceLink,
9
9
  TextContent,
10
10
  )
11
11
  from openai.types.chat import ChatCompletionMessageParam
12
12
 
13
- from mcp_agent.logging.logger import get_logger
14
- from mcp_agent.mcp.helpers.content_helpers import (
13
+ from fast_agent.core.logging.logger import get_logger
14
+ from fast_agent.mcp.helpers.content_helpers import (
15
15
  get_image_data,
16
16
  get_resource_uri,
17
17
  get_text,
@@ -20,13 +20,12 @@ from mcp_agent.mcp.helpers.content_helpers import (
20
20
  is_resource_link,
21
21
  is_text_content,
22
22
  )
23
- from mcp_agent.mcp.mime_utils import (
23
+ from fast_agent.mcp.mime_utils import (
24
24
  guess_mime_type,
25
25
  is_image_mime_type,
26
26
  is_text_mime_type,
27
27
  )
28
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
29
- from mcp_agent.mcp.resource_utils import extract_title_from_uri
28
+ from fast_agent.types import PromptMessageExtended
30
29
 
31
30
  _logger = get_logger("multipart_converter_openai")
32
31
 
@@ -55,39 +54,111 @@ class OpenAIConverter:
55
54
 
56
55
  @staticmethod
57
56
  def convert_to_openai(
58
- multipart_msg: PromptMessageMultipart, concatenate_text_blocks: bool = False
59
- ) -> Dict[str, str | ContentBlock | List[ContentBlock]]:
57
+ multipart_msg: PromptMessageExtended, concatenate_text_blocks: bool = False
58
+ ) -> List[Dict[str, Any]]:
60
59
  """
61
- Convert a PromptMessageMultipart message to OpenAI API format.
60
+ Convert a PromptMessageExtended message to OpenAI API format.
62
61
 
63
62
  Args:
64
- multipart_msg: The PromptMessageMultipart message to convert
63
+ multipart_msg: The PromptMessageExtended message to convert
65
64
  concatenate_text_blocks: If True, adjacent text blocks will be combined
66
65
 
67
66
  Returns:
68
- An OpenAI API message object
67
+ A list of OpenAI API message objects
69
68
  """
69
+ # If this is an assistant message that contains tool_calls, convert to an
70
+ # assistant message with tool_calls per OpenAI format to establish the
71
+ # required call IDs before tool responses appear.
72
+ if multipart_msg.role == "assistant" and multipart_msg.tool_calls:
73
+ tool_calls_list: List[Dict[str, Any]] = []
74
+ for tool_id, req in multipart_msg.tool_calls.items():
75
+ name = None
76
+ arguments = {}
77
+ try:
78
+ params = getattr(req, "params", None)
79
+ if params is not None:
80
+ name = getattr(params, "name", None)
81
+ arguments = getattr(params, "arguments", {}) or {}
82
+ except Exception:
83
+ pass
84
+
85
+ tool_calls_list.append(
86
+ {
87
+ "id": tool_id,
88
+ "type": "function",
89
+ "function": {
90
+ "name": name or "unknown_tool",
91
+ "arguments": json.dumps(arguments),
92
+ },
93
+ }
94
+ )
95
+
96
+ return [{"role": "assistant", "tool_calls": tool_calls_list, "content": ""}]
97
+
98
+ # Handle tool_results first if present
99
+ if multipart_msg.tool_results:
100
+ messages = OpenAIConverter.convert_function_results_to_openai(
101
+ multipart_msg.tool_results, concatenate_text_blocks
102
+ )
103
+
104
+ # If there's also content, convert and append it
105
+ if multipart_msg.content:
106
+ role = multipart_msg.role
107
+ content_msg = OpenAIConverter._convert_content_to_message(
108
+ multipart_msg.content, role, concatenate_text_blocks
109
+ )
110
+ if content_msg: # Only append if non-empty
111
+ messages.append(content_msg)
112
+
113
+ return messages
114
+
115
+ # Regular content conversion (no tool_results)
70
116
  role = multipart_msg.role
117
+ content_msg = OpenAIConverter._convert_content_to_message(
118
+ multipart_msg.content, role, concatenate_text_blocks
119
+ )
120
+ return [content_msg] if content_msg else []
71
121
 
122
+ @staticmethod
123
+ def _convert_content_to_message(
124
+ content: list, role: str, concatenate_text_blocks: bool = False
125
+ ) -> Dict[str, Any] | None:
126
+ """
127
+ Convert content blocks to a single OpenAI message.
128
+
129
+ Args:
130
+ content: List of content blocks
131
+ role: The message role
132
+ concatenate_text_blocks: If True, adjacent text blocks will be combined
133
+
134
+ Returns:
135
+ An OpenAI message dict or None if content is empty
136
+ """
72
137
  # Handle empty content
73
- if not multipart_msg.content:
138
+ if not content:
74
139
  return {"role": role, "content": ""}
75
140
 
76
141
  # single text block
77
- if 1 == len(multipart_msg.content) and is_text_content(multipart_msg.content[0]):
78
- return {"role": role, "content": get_text(multipart_msg.content[0])}
142
+ if 1 == len(content) and is_text_content(content[0]):
143
+ return {"role": role, "content": get_text(content[0])}
79
144
 
80
145
  # For user messages, convert each content block
81
146
  content_blocks: List[ContentBlock] = []
82
147
 
83
- for item in multipart_msg.content:
148
+ _logger.debug(f"Converting {len(content)} content items for role '{role}'")
149
+
150
+ for item in content:
84
151
  try:
85
152
  if is_text_content(item):
86
153
  text = get_text(item)
87
154
  content_blocks.append({"type": "text", "text": text})
88
155
 
89
156
  elif is_image_content(item):
90
- content_blocks.append(OpenAIConverter._convert_image_content(item))
157
+ image_block = OpenAIConverter._convert_image_content(item)
158
+ content_blocks.append(image_block)
159
+ _logger.debug(
160
+ f"Added image content block: {image_block.get('type', 'unknown')}"
161
+ )
91
162
 
92
163
  elif is_resource_content(item):
93
164
  block = OpenAIConverter._convert_embedded_resource(item)
@@ -95,9 +166,9 @@ class OpenAIConverter:
95
166
  content_blocks.append(block)
96
167
 
97
168
  elif is_resource_link(item):
98
- block = OpenAIConverter._convert_resource_link(item)
99
- if block:
100
- content_blocks.append(block)
169
+ text = get_text(item)
170
+ if text:
171
+ content_blocks.append({"type": "text", "text": text})
101
172
 
102
173
  else:
103
174
  _logger.warning(f"Unsupported content type: {type(item)}")
@@ -119,7 +190,9 @@ class OpenAIConverter:
119
190
  content_blocks = OpenAIConverter._concatenate_text_blocks(content_blocks)
120
191
 
121
192
  # Return user message with content blocks
122
- return {"role": role, "content": content_blocks}
193
+ result = {"role": role, "content": content_blocks}
194
+ _logger.debug(f"Final message for role '{role}': {len(content_blocks)} content blocks")
195
+ return result
123
196
 
124
197
  @staticmethod
125
198
  def _concatenate_text_blocks(blocks: List[ContentBlock]) -> List[ContentBlock]:
@@ -173,11 +246,13 @@ class OpenAIConverter:
173
246
  Returns:
174
247
  An OpenAI API message object
175
248
  """
176
- # Convert the PromptMessage to a PromptMessageMultipart containing a single content item
177
- multipart = PromptMessageMultipart(role=message.role, content=[message.content])
249
+ # Convert the PromptMessage to a PromptMessageExtended containing a single content item
250
+ multipart = PromptMessageExtended(role=message.role, content=[message.content])
178
251
 
179
252
  # Use the existing conversion method with the specified concatenation option
180
- return OpenAIConverter.convert_to_openai(multipart, concatenate_text_blocks)
253
+ # Since convert_to_openai now returns a list, we return the first element
254
+ messages = OpenAIConverter.convert_to_openai(multipart, concatenate_text_blocks)
255
+ return messages[0] if messages else {"role": message.role, "content": ""}
181
256
 
182
257
  @staticmethod
183
258
  def _convert_image_content(content: ImageContent) -> ContentBlock:
@@ -220,32 +295,6 @@ class OpenAIConverter:
220
295
 
221
296
  return "text/plain"
222
297
 
223
- @staticmethod
224
- def _convert_resource_link(
225
- resource: ResourceLink,
226
- ) -> Optional[ContentBlock]:
227
- """
228
- Convert ResourceLink to OpenAI content block.
229
-
230
- Args:
231
- resource: The resource link to convert
232
-
233
- Returns:
234
- An OpenAI content block or None if conversion failed
235
- """
236
- name = resource.name or "unknown"
237
- uri_str = str(resource.uri)
238
- mime_type = resource.mimeType or "unknown"
239
- description = resource.description or "No description"
240
-
241
- # Create a text block with the resource link information
242
- return {
243
- "type": "text",
244
- "text": f"Linked Resource ${name} MIME type {mime_type}>\n"
245
- f"Resource Link: {uri_str}\n"
246
- f"${description}\n",
247
- }
248
-
249
298
  @staticmethod
250
299
  def _convert_embedded_resource(
251
300
  resource: EmbeddedResource,
@@ -263,6 +312,8 @@ class OpenAIConverter:
263
312
  uri_str = get_resource_uri(resource)
264
313
  uri = getattr(resource_content, "uri", None)
265
314
  is_url = uri and str(uri).startswith(("http://", "https://"))
315
+ from fast_agent.mcp.resource_utils import extract_title_from_uri
316
+
266
317
  title = extract_title_from_uri(uri) if uri else "resource"
267
318
  mime_type = OpenAIConverter._determine_mime_type(resource_content)
268
319
 
@@ -410,15 +461,16 @@ class OpenAIConverter:
410
461
  tool_message_content = ""
411
462
  if text_content:
412
463
  # Convert text content to OpenAI format
413
- temp_multipart = PromptMessageMultipart(role="user", content=text_content)
414
- converted = OpenAIConverter.convert_to_openai(
464
+ temp_multipart = PromptMessageExtended(role="user", content=text_content)
465
+ converted_messages = OpenAIConverter.convert_to_openai(
415
466
  temp_multipart, concatenate_text_blocks=concatenate_text_blocks
416
467
  )
417
468
 
418
- # Extract text from content blocks
419
- tool_message_content = OpenAIConverter._extract_text_from_content_blocks(
420
- converted.get("content", "")
421
- )
469
+ # Extract text from content blocks (convert_to_openai now returns a list)
470
+ if converted_messages:
471
+ tool_message_content = OpenAIConverter._extract_text_from_content_blocks(
472
+ converted_messages[0].get("content", "")
473
+ )
422
474
 
423
475
  # Ensure we always have non-empty content for compatibility
424
476
  if not tool_message_content or tool_message_content.strip() == "":
@@ -436,23 +488,33 @@ class OpenAIConverter:
436
488
  return tool_message
437
489
 
438
490
  # Process non-text content as a separate user message
439
- non_text_multipart = PromptMessageMultipart(role="user", content=non_text_content)
491
+ non_text_multipart = PromptMessageExtended(role="user", content=non_text_content)
492
+
493
+ # Convert to OpenAI format (returns a list now)
494
+ user_messages = OpenAIConverter.convert_to_openai(non_text_multipart)
440
495
 
441
- # Convert to OpenAI format
442
- user_message = OpenAIConverter.convert_to_openai(non_text_multipart)
496
+ # Debug logging to understand what's happening with image conversion
497
+ _logger.debug(
498
+ f"Tool result conversion: non_text_content={len(non_text_content)} items, "
499
+ f"user_messages={len(user_messages)} messages"
500
+ )
501
+ if not user_messages:
502
+ _logger.warning(
503
+ f"No user messages generated for non-text content: {[type(item).__name__ for item in non_text_content]}"
504
+ )
443
505
 
444
- return (tool_message, [user_message])
506
+ return (tool_message, user_messages)
445
507
 
446
508
  @staticmethod
447
509
  def convert_function_results_to_openai(
448
- results: List[Tuple[str, CallToolResult]],
510
+ results: Dict[str, CallToolResult],
449
511
  concatenate_text_blocks: bool = False,
450
512
  ) -> List[Dict[str, Any]]:
451
513
  """
452
- Convert a list of function call results to OpenAI messages.
514
+ Convert function call results to OpenAI messages.
453
515
 
454
516
  Args:
455
- results: List of (tool_call_id, result) tuples
517
+ results: Dictionary mapping tool_call_id to CallToolResult
456
518
  concatenate_text_blocks: If True, adjacent text blocks will be combined
457
519
 
458
520
  Returns:
@@ -462,7 +524,7 @@ class OpenAIConverter:
462
524
  user_messages = []
463
525
  has_mixed_content = False
464
526
 
465
- for tool_call_id, result in results:
527
+ for tool_call_id, result in results.items():
466
528
  try:
467
529
  converted = OpenAIConverter.convert_tool_result_to_openai(
468
530
  tool_result=result,
@@ -1,6 +1,6 @@
1
1
  # openai_multipart.py
2
2
  """
3
- Clean utilities for converting between PromptMessageMultipart and OpenAI message formats.
3
+ Clean utilities for converting between PromptMessageExtended and OpenAI message formats.
4
4
  Each function handles all content types consistently and is designed for simple testing.
5
5
  """
6
6
 
@@ -18,34 +18,34 @@ from openai.types.chat import (
18
18
  ChatCompletionMessageParam,
19
19
  )
20
20
 
21
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
21
+ from fast_agent.types import PromptMessageExtended
22
22
 
23
23
 
24
- def openai_to_multipart(
24
+ def openai_to_extended(
25
25
  message: Union[
26
26
  ChatCompletionMessage,
27
27
  ChatCompletionMessageParam,
28
28
  List[Union[ChatCompletionMessage, ChatCompletionMessageParam]],
29
29
  ],
30
- ) -> Union[PromptMessageMultipart, List[PromptMessageMultipart]]:
30
+ ) -> Union[PromptMessageExtended, List[PromptMessageExtended]]:
31
31
  """
32
- Convert OpenAI messages to PromptMessageMultipart format.
32
+ Convert OpenAI messages to PromptMessageExtended format.
33
33
 
34
34
  Args:
35
35
  message: OpenAI Message, MessageParam, or list of them
36
36
 
37
37
  Returns:
38
- Equivalent message(s) in PromptMessageMultipart format
38
+ Equivalent message(s) in PromptMessageExtended format
39
39
  """
40
40
  if isinstance(message, list):
41
- return [_openai_message_to_multipart(m) for m in message]
42
- return _openai_message_to_multipart(message)
41
+ return [_openai_message_to_extended(m) for m in message]
42
+ return _openai_message_to_extended(message)
43
43
 
44
44
 
45
- def _openai_message_to_multipart(
45
+ def _openai_message_to_extended(
46
46
  message: Union[ChatCompletionMessage, Dict[str, Any]],
47
- ) -> PromptMessageMultipart:
48
- """Convert a single OpenAI message to PromptMessageMultipart."""
47
+ ) -> PromptMessageExtended:
48
+ """Convert a single OpenAI message to PromptMessageExtended."""
49
49
  # Get role and content from message
50
50
  if isinstance(message, dict):
51
51
  role = message.get("role", "assistant")
@@ -166,4 +166,4 @@ def _openai_message_to_multipart(
166
166
  )
167
167
  )
168
168
 
169
- return PromptMessageMultipart(role=role, content=mcp_contents)
169
+ return PromptMessageExtended(role=role, content=mcp_contents)
@@ -12,54 +12,56 @@ from openai.types.chat import (
12
12
  ChatCompletionMessageParam,
13
13
  )
14
14
 
15
- from mcp_agent.llm.providers.multipart_converter_openai import OpenAIConverter
16
- from mcp_agent.llm.providers.openai_multipart import (
17
- openai_to_multipart,
15
+ from fast_agent.llm.provider.openai.multipart_converter_openai import OpenAIConverter
16
+ from fast_agent.llm.provider.openai.openai_multipart import (
17
+ openai_to_extended,
18
18
  )
19
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
19
+ from fast_agent.types import PromptMessageExtended
20
20
 
21
21
 
22
22
  def openai_message_to_prompt_message_multipart(
23
23
  message: Union[ChatCompletionMessage, Dict[str, Any]],
24
- ) -> PromptMessageMultipart:
24
+ ) -> PromptMessageExtended:
25
25
  """
26
- Convert an OpenAI ChatCompletionMessage to a PromptMessageMultipart.
26
+ Convert an OpenAI ChatCompletionMessage to a PromptMessageExtended.
27
27
 
28
28
  Args:
29
29
  message: The OpenAI message to convert (can be an actual ChatCompletionMessage
30
30
  or a dictionary with the same structure)
31
31
 
32
32
  Returns:
33
- A PromptMessageMultipart representation
33
+ A PromptMessageExtended representation
34
34
  """
35
- return openai_to_multipart(message)
35
+ return openai_to_extended(message)
36
36
 
37
37
 
38
38
  def openai_message_param_to_prompt_message_multipart(
39
39
  message_param: ChatCompletionMessageParam,
40
- ) -> PromptMessageMultipart:
40
+ ) -> PromptMessageExtended:
41
41
  """
42
- Convert an OpenAI ChatCompletionMessageParam to a PromptMessageMultipart.
42
+ Convert an OpenAI ChatCompletionMessageParam to a PromptMessageExtended.
43
43
 
44
44
  Args:
45
45
  message_param: The OpenAI message param to convert
46
46
 
47
47
  Returns:
48
- A PromptMessageMultipart representation
48
+ A PromptMessageExtended representation
49
49
  """
50
- return openai_to_multipart(message_param)
50
+ return openai_to_extended(message_param)
51
51
 
52
52
 
53
53
  def prompt_message_multipart_to_openai_message_param(
54
- multipart: PromptMessageMultipart,
54
+ multipart: PromptMessageExtended,
55
55
  ) -> ChatCompletionMessageParam:
56
56
  """
57
- Convert a PromptMessageMultipart to an OpenAI ChatCompletionMessageParam.
57
+ Convert a PromptMessageExtended to an OpenAI ChatCompletionMessageParam.
58
58
 
59
59
  Args:
60
- multipart: The PromptMessageMultipart to convert
60
+ multipart: The PromptMessageExtended to convert
61
61
 
62
62
  Returns:
63
63
  An OpenAI ChatCompletionMessageParam representation
64
64
  """
65
- return OpenAIConverter.convert_to_openai(multipart)
65
+ # convert_to_openai now returns a list, return the first element for backward compatibility
66
+ messages = OpenAIConverter.convert_to_openai(multipart)
67
+ return messages[0] if messages else {"role": multipart.role, "content": ""}
@@ -8,7 +8,7 @@ from typing import Any, Dict
8
8
 
9
9
  from pydantic import BaseModel
10
10
 
11
- from mcp_agent.core.exceptions import ProviderKeyError
11
+ from fast_agent.core.exceptions import ProviderKeyError
12
12
 
13
13
  PROVIDER_ENVIRONMENT_MAP: Dict[str, str] = {
14
14
  # default behaviour in _get_env_key_name is to capitalize the
@@ -62,7 +62,7 @@ class ProviderKeyManager:
62
62
  ProviderKeyError: If the API key is not found or is invalid
63
63
  """
64
64
 
65
- from mcp_agent.llm.provider_types import Provider
65
+ from fast_agent.llm.provider_types import Provider
66
66
 
67
67
  provider_name = provider_name.lower()
68
68
 
@@ -8,6 +8,8 @@ from enum import Enum
8
8
  class Provider(Enum):
9
9
  """Supported LLM providers"""
10
10
 
11
+ display_name: str
12
+
11
13
  def __new__(cls, config_name, display_name=None):
12
14
  obj = object.__new__(cls)
13
15
  obj._value_ = config_name
@@ -1,5 +1,5 @@
1
1
  """
2
- Simplified converter between MCP sampling types and PromptMessageMultipart.
2
+ Simplified converter between MCP sampling types and PromptMessageExtended.
3
3
  This replaces the more complex provider-specific converters with direct conversions.
4
4
  """
5
5
 
@@ -12,8 +12,8 @@ from mcp.types import (
12
12
  TextContent,
13
13
  )
14
14
 
15
- from mcp_agent.mcp.interfaces import RequestParams
16
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
15
+ from fast_agent.types import PromptMessageExtended, RequestParams
16
+ from fast_agent.types.llm_stop_reason import LlmStopReason
17
17
 
18
18
 
19
19
  class SamplingConverter:
@@ -21,7 +21,7 @@ class SamplingConverter:
21
21
  Simplified converter between MCP sampling types and internal LLM types.
22
22
 
23
23
  This handles converting between:
24
- - SamplingMessage and PromptMessageMultipart
24
+ - SamplingMessage and PromptMessageExtended
25
25
  - CreateMessageRequestParams and RequestParams
26
26
  - LLM responses and CreateMessageResult
27
27
  """
@@ -29,17 +29,17 @@ class SamplingConverter:
29
29
  @staticmethod
30
30
  def sampling_message_to_prompt_message(
31
31
  message: SamplingMessage,
32
- ) -> PromptMessageMultipart:
32
+ ) -> PromptMessageExtended:
33
33
  """
34
- Convert a SamplingMessage to a PromptMessageMultipart.
34
+ Convert a SamplingMessage to a PromptMessageExtended.
35
35
 
36
36
  Args:
37
37
  message: MCP SamplingMessage to convert
38
38
 
39
39
  Returns:
40
- PromptMessageMultipart suitable for use with LLMs
40
+ PromptMessageExtended suitable for use with LLMs
41
41
  """
42
- return PromptMessageMultipart(role=message.role, content=[message.content])
42
+ return PromptMessageExtended(role=message.role, content=[message.content])
43
43
 
44
44
  @staticmethod
45
45
  def extract_request_params(params: CreateMessageRequestParams) -> RequestParams:
@@ -77,20 +77,23 @@ class SamplingConverter:
77
77
  role="assistant",
78
78
  content=TextContent(type="text", text=error_message),
79
79
  model=model or "unknown",
80
- stopReason="error",
80
+ stopReason=LlmStopReason.ERROR.value,
81
81
  )
82
82
 
83
83
  @staticmethod
84
84
  def convert_messages(
85
85
  messages: List[SamplingMessage],
86
- ) -> List[PromptMessageMultipart]:
86
+ ) -> List[PromptMessageExtended]:
87
87
  """
88
- Convert multiple SamplingMessages to PromptMessageMultipart objects.
88
+ Convert multiple SamplingMessages to PromptMessageExtended objects.
89
+
90
+ This properly combines consecutive messages with the same role into a single
91
+ multipart message, which is required by APIs like Anthropic.
89
92
 
90
93
  Args:
91
94
  messages: List of SamplingMessages to convert
92
95
 
93
96
  Returns:
94
- List of PromptMessageMultipart objects, each with a single content item
97
+ List of PromptMessageExtended objects with consecutive same-role messages combined
95
98
  """
96
99
  return [SamplingConverter.sampling_message_to_prompt_message(msg) for msg in messages]