fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +59 -371
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +3 -1
  14. mcp_agent/cli/commands/bootstrap.py +18 -7
  15. mcp_agent/cli/commands/setup.py +12 -4
  16. mcp_agent/cli/main.py +1 -1
  17. mcp_agent/cli/terminal.py +1 -1
  18. mcp_agent/config.py +24 -35
  19. mcp_agent/context.py +3 -1
  20. mcp_agent/context_dependent.py +3 -1
  21. mcp_agent/core/agent_types.py +10 -7
  22. mcp_agent/core/direct_agent_app.py +179 -0
  23. mcp_agent/core/direct_decorators.py +443 -0
  24. mcp_agent/core/direct_factory.py +476 -0
  25. mcp_agent/core/enhanced_prompt.py +15 -20
  26. mcp_agent/core/fastagent.py +151 -337
  27. mcp_agent/core/interactive_prompt.py +424 -0
  28. mcp_agent/core/mcp_content.py +19 -11
  29. mcp_agent/core/prompt.py +6 -2
  30. mcp_agent/core/validation.py +89 -16
  31. mcp_agent/executor/decorator_registry.py +6 -2
  32. mcp_agent/executor/temporal.py +35 -11
  33. mcp_agent/executor/workflow_signal.py +8 -2
  34. mcp_agent/human_input/handler.py +3 -1
  35. mcp_agent/llm/__init__.py +2 -0
  36. mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
  37. mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
  38. mcp_agent/llm/augmented_llm_playback.py +83 -0
  39. mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
  40. mcp_agent/llm/providers/__init__.py +8 -0
  41. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
  42. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
  43. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  44. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
  45. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
  46. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
  47. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
  48. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
  49. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
  50. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
  51. mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
  52. mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
  53. mcp_agent/logging/logger.py +2 -2
  54. mcp_agent/mcp/gen_client.py +9 -3
  55. mcp_agent/mcp/interfaces.py +67 -45
  56. mcp_agent/mcp/logger_textio.py +97 -0
  57. mcp_agent/mcp/mcp_agent_client_session.py +12 -4
  58. mcp_agent/mcp/mcp_agent_server.py +3 -1
  59. mcp_agent/mcp/mcp_aggregator.py +124 -93
  60. mcp_agent/mcp/mcp_connection_manager.py +21 -7
  61. mcp_agent/mcp/prompt_message_multipart.py +59 -1
  62. mcp_agent/mcp/prompt_render.py +77 -0
  63. mcp_agent/mcp/prompt_serialization.py +20 -13
  64. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  65. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  66. mcp_agent/mcp/prompts/prompt_load.py +15 -5
  67. mcp_agent/mcp/prompts/prompt_server.py +154 -87
  68. mcp_agent/mcp/prompts/prompt_template.py +26 -35
  69. mcp_agent/mcp/resource_utils.py +3 -1
  70. mcp_agent/mcp/sampling.py +24 -15
  71. mcp_agent/mcp_server/agent_server.py +8 -5
  72. mcp_agent/mcp_server_registry.py +22 -9
  73. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
  74. mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
  75. mcp_agent/resources/examples/internal/agent.py +4 -2
  76. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  77. mcp_agent/resources/examples/prompting/image_server.py +3 -1
  78. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  79. mcp_agent/ui/console_display.py +27 -7
  80. fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
  81. mcp_agent/core/agent_app.py +0 -570
  82. mcp_agent/core/agent_utils.py +0 -69
  83. mcp_agent/core/decorators.py +0 -448
  84. mcp_agent/core/factory.py +0 -422
  85. mcp_agent/core/proxies.py +0 -278
  86. mcp_agent/core/types.py +0 -22
  87. mcp_agent/eval/__init__.py +0 -0
  88. mcp_agent/mcp/stdio.py +0 -114
  89. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  90. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  91. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  92. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  93. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  94. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  95. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  96. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
  97. mcp_agent/resources/examples/researcher/researcher.py +0 -39
  98. mcp_agent/resources/examples/workflows/chaining.py +0 -45
  99. mcp_agent/resources/examples/workflows/evaluator.py +0 -79
  100. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  101. mcp_agent/resources/examples/workflows/human_input.py +0 -26
  102. mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
  103. mcp_agent/resources/examples/workflows/parallel.py +0 -79
  104. mcp_agent/resources/examples/workflows/router.py +0 -54
  105. mcp_agent/resources/examples/workflows/sse.py +0 -23
  106. mcp_agent/telemetry/__init__.py +0 -0
  107. mcp_agent/telemetry/usage_tracking.py +0 -19
  108. mcp_agent/workflows/__init__.py +0 -0
  109. mcp_agent/workflows/embedding/__init__.py +0 -0
  110. mcp_agent/workflows/embedding/embedding_base.py +0 -58
  111. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  112. mcp_agent/workflows/embedding/embedding_openai.py +0 -37
  113. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  114. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
  115. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  116. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
  117. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
  118. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
  119. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
  120. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
  121. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  122. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
  123. mcp_agent/workflows/llm/__init__.py +0 -0
  124. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
  125. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  126. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  127. mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
  128. mcp_agent/workflows/parallel/__init__.py +0 -0
  129. mcp_agent/workflows/parallel/fan_in.py +0 -320
  130. mcp_agent/workflows/parallel/fan_out.py +0 -181
  131. mcp_agent/workflows/parallel/parallel_llm.py +0 -149
  132. mcp_agent/workflows/router/__init__.py +0 -0
  133. mcp_agent/workflows/router/router_base.py +0 -338
  134. mcp_agent/workflows/router/router_embedding.py +0 -226
  135. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  136. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  137. mcp_agent/workflows/router/router_llm.py +0 -304
  138. mcp_agent/workflows/swarm/__init__.py +0 -0
  139. mcp_agent/workflows/swarm/swarm.py +0 -292
  140. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  141. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  142. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  143. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  144. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  145. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
  146. /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
  147. /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
@@ -1,4 +1,4 @@
1
- from typing import TYPE_CHECKING, List, Optional, Sequence, Union
1
+ from typing import List, Sequence, Union
2
2
 
3
3
  from anthropic.types import (
4
4
  Base64ImageSourceParam,
@@ -30,11 +30,16 @@ from mcp_agent.mcp.mime_utils import (
30
30
  is_text_mime_type,
31
31
  )
32
32
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
33
+ from mcp_agent.mcp.prompts.prompt_helpers import (
34
+ get_image_data,
35
+ get_resource_uri,
36
+ get_text,
37
+ is_image_content,
38
+ is_resource_content,
39
+ is_text_content,
40
+ )
33
41
  from mcp_agent.mcp.resource_utils import extract_title_from_uri
34
42
 
35
- if TYPE_CHECKING:
36
- from pydantic import AnyUrl
37
-
38
43
  _logger = get_logger("multipart_converter_anthropic")
39
44
 
40
45
  # List of image MIME types supported by Anthropic API
@@ -74,7 +79,9 @@ class AnthropicConverter:
74
79
  return MessageParam(role=role, content=[])
75
80
 
76
81
  # Convert content blocks
77
- anthropic_blocks = AnthropicConverter._convert_content_items(multipart_msg.content, document_mode=True)
82
+ anthropic_blocks = AnthropicConverter._convert_content_items(
83
+ multipart_msg.content, document_mode=True
84
+ )
78
85
 
79
86
  # Filter blocks based on role (assistant can only have text blocks)
80
87
  if role == "assistant":
@@ -83,7 +90,9 @@ class AnthropicConverter:
83
90
  if block.get("type") == "text":
84
91
  text_blocks.append(block)
85
92
  else:
86
- _logger.warning(f"Removing non-text block from assistant message: {block.get('type')}")
93
+ _logger.warning(
94
+ f"Removing non-text block from assistant message: {block.get('type')}"
95
+ )
87
96
  anthropic_blocks = text_blocks
88
97
 
89
98
  # Create the Anthropic message
@@ -124,31 +133,38 @@ class AnthropicConverter:
124
133
  anthropic_blocks: List[ContentBlockParam] = []
125
134
 
126
135
  for content_item in content_items:
127
- if isinstance(content_item, TextContent):
128
- anthropic_blocks.append(TextBlockParam(type="text", text=content_item.text))
129
-
130
- elif isinstance(content_item, ImageContent):
136
+ if is_text_content(content_item):
137
+ # Handle text content
138
+ text = get_text(content_item)
139
+ anthropic_blocks.append(TextBlockParam(type="text", text=text))
140
+
141
+ elif is_image_content(content_item):
142
+ # Handle image content
143
+ image_content = content_item # type: ImageContent
131
144
  # Check if image MIME type is supported
132
- if not AnthropicConverter._is_supported_image_type(content_item.mimeType):
145
+ if not AnthropicConverter._is_supported_image_type(image_content.mimeType):
146
+ data_size = len(image_content.data) if image_content.data else 0
133
147
  anthropic_blocks.append(
134
148
  TextBlockParam(
135
149
  type="text",
136
- text=f"Image with unsupported format '{content_item.mimeType}' ({len(content_item.data)} bytes)",
150
+ text=f"Image with unsupported format '{image_content.mimeType}' ({data_size} bytes)",
137
151
  )
138
152
  )
139
153
  else:
154
+ image_data = get_image_data(image_content)
140
155
  anthropic_blocks.append(
141
156
  ImageBlockParam(
142
157
  type="image",
143
158
  source=Base64ImageSourceParam(
144
159
  type="base64",
145
- media_type=content_item.mimeType,
146
- data=content_item.data,
160
+ media_type=image_content.mimeType,
161
+ data=image_data,
147
162
  ),
148
163
  )
149
164
  )
150
165
 
151
- elif isinstance(content_item, EmbeddedResource):
166
+ elif is_resource_content(content_item):
167
+ # Handle embedded resource
152
168
  block = AnthropicConverter._convert_embedded_resource(content_item, document_mode)
153
169
  anthropic_blocks.append(block)
154
170
 
@@ -170,7 +186,8 @@ class AnthropicConverter:
170
186
  An appropriate ContentBlockParam for the resource
171
187
  """
172
188
  resource_content = resource.resource
173
- uri: Optional[AnyUrl] = getattr(resource_content, "uri", None)
189
+ uri_str = get_resource_uri(resource)
190
+ uri = getattr(resource_content, "uri", None)
174
191
  is_url: bool = uri and uri.scheme in ("http", "https")
175
192
 
176
193
  # Determine MIME type
@@ -185,23 +202,33 @@ class AnthropicConverter:
185
202
 
186
203
  elif is_image_mime_type(mime_type):
187
204
  if not AnthropicConverter._is_supported_image_type(mime_type):
188
- return AnthropicConverter._create_fallback_text(f"Image with unsupported format '{mime_type}'", resource)
205
+ return AnthropicConverter._create_fallback_text(
206
+ f"Image with unsupported format '{mime_type}'", resource
207
+ )
189
208
 
190
- if is_url:
191
- return ImageBlockParam(type="image", source=URLImageSourceParam(type="url", url=str(uri)))
192
- elif hasattr(resource_content, "blob"):
209
+ if is_url and uri_str:
210
+ return ImageBlockParam(
211
+ type="image", source=URLImageSourceParam(type="url", url=uri_str)
212
+ )
213
+
214
+ # Try to get image data
215
+ image_data = get_image_data(resource)
216
+ if image_data:
193
217
  return ImageBlockParam(
194
218
  type="image",
195
- source=Base64ImageSourceParam(type="base64", media_type=mime_type, data=resource_content.blob),
219
+ source=Base64ImageSourceParam(
220
+ type="base64", media_type=mime_type, data=image_data
221
+ ),
196
222
  )
223
+
197
224
  return AnthropicConverter._create_fallback_text("Image missing data", resource)
198
225
 
199
226
  elif mime_type == "application/pdf":
200
- if is_url:
227
+ if is_url and uri_str:
201
228
  return DocumentBlockParam(
202
229
  type="document",
203
230
  title=title,
204
- source=URLPDFSourceParam(type="url", url=str(uri)),
231
+ source=URLPDFSourceParam(type="url", url=uri_str),
205
232
  )
206
233
  elif hasattr(resource_content, "blob"):
207
234
  return DocumentBlockParam(
@@ -216,7 +243,8 @@ class AnthropicConverter:
216
243
  return TextBlockParam(type="text", text=f"[PDF resource missing data: {title}]")
217
244
 
218
245
  elif is_text_mime_type(mime_type):
219
- if not hasattr(resource_content, "text"):
246
+ text = get_text(resource)
247
+ if not text:
220
248
  return TextBlockParam(
221
249
  type="text",
222
250
  text=f"[Text content could not be extracted from {title}]",
@@ -230,26 +258,31 @@ class AnthropicConverter:
230
258
  source=PlainTextSourceParam(
231
259
  type="text",
232
260
  media_type="text/plain",
233
- data=resource_content.text,
261
+ data=text,
234
262
  ),
235
263
  )
236
264
 
237
265
  # Return as simple text block when not in document mode
238
- return TextBlockParam(type="text", text=resource_content.text)
266
+ return TextBlockParam(type="text", text=text)
239
267
 
240
268
  # Default fallback - convert to text if possible
241
- if hasattr(resource_content, "text"):
242
- return TextBlockParam(type="text", text=resource_content.text)
269
+ text = get_text(resource)
270
+ if text:
271
+ return TextBlockParam(type="text", text=text)
243
272
 
244
273
  # This is for binary resources - match the format expected by the test
245
- if isinstance(resource.resource, BlobResourceContents) and hasattr(resource.resource, "blob"):
274
+ if isinstance(resource.resource, BlobResourceContents) and hasattr(
275
+ resource.resource, "blob"
276
+ ):
246
277
  blob_length = len(resource.resource.blob)
247
278
  return TextBlockParam(
248
279
  type="text",
249
280
  text=f"Embedded Resource {uri._url} with unsupported format {mime_type} ({blob_length} characters)",
250
281
  )
251
282
 
252
- return AnthropicConverter._create_fallback_text(f"Unsupported resource ({mime_type})", resource)
283
+ return AnthropicConverter._create_fallback_text(
284
+ f"Unsupported resource ({mime_type})", resource
285
+ )
253
286
 
254
287
  @staticmethod
255
288
  def _determine_mime_type(
@@ -292,7 +325,9 @@ class AnthropicConverter:
292
325
  return TextBlockParam(type="text", text="[SVG content could not be extracted]")
293
326
 
294
327
  @staticmethod
295
- def _create_fallback_text(message: str, resource: Union[TextContent, ImageContent, EmbeddedResource]) -> TextBlockParam:
328
+ def _create_fallback_text(
329
+ message: str, resource: Union[TextContent, ImageContent, EmbeddedResource]
330
+ ) -> TextBlockParam:
296
331
  """
297
332
  Create a fallback text block for unsupported resource types.
298
333
 
@@ -310,7 +345,9 @@ class AnthropicConverter:
310
345
  return TextBlockParam(type="text", text=f"[{message}]")
311
346
 
312
347
  @staticmethod
313
- def convert_tool_result_to_anthropic(tool_result: CallToolResult, tool_use_id: str) -> ToolResultBlockParam:
348
+ def convert_tool_result_to_anthropic(
349
+ tool_result: CallToolResult, tool_use_id: str
350
+ ) -> ToolResultBlockParam:
314
351
  """
315
352
  Convert an MCP CallToolResult to an Anthropic ToolResultBlockParam.
316
353
 
@@ -327,7 +364,9 @@ class AnthropicConverter:
327
364
  for item in tool_result.content:
328
365
  if isinstance(item, EmbeddedResource):
329
366
  # For embedded resources, always use text mode in tool results
330
- resource_block = AnthropicConverter._convert_embedded_resource(item, document_mode=False)
367
+ resource_block = AnthropicConverter._convert_embedded_resource(
368
+ item, document_mode=False
369
+ )
331
370
  anthropic_content.append(resource_block)
332
371
  elif isinstance(item, (TextContent, ImageContent)):
333
372
  # For text and image, use standard conversion
@@ -376,11 +415,15 @@ class AnthropicConverter:
376
415
 
377
416
  # Text resources go in tool results, others go as separate blocks
378
417
  if isinstance(resource_content, TextResourceContents):
379
- block = AnthropicConverter._convert_embedded_resource(item, document_mode=False)
418
+ block = AnthropicConverter._convert_embedded_resource(
419
+ item, document_mode=False
420
+ )
380
421
  tool_result_blocks.append(block)
381
422
  else:
382
423
  # For binary resources like PDFs, add as separate block
383
- block = AnthropicConverter._convert_embedded_resource(item, document_mode=True)
424
+ block = AnthropicConverter._convert_embedded_resource(
425
+ item, document_mode=True
426
+ )
384
427
  separate_blocks.append(block)
385
428
 
386
429
  # Create the tool result block if we have content
@@ -15,6 +15,15 @@ from mcp_agent.mcp.mime_utils import (
15
15
  is_text_mime_type,
16
16
  )
17
17
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
18
+ from mcp_agent.mcp.prompts.prompt_helpers import (
19
+ MessageContent,
20
+ get_image_data,
21
+ get_resource_uri,
22
+ get_text,
23
+ is_image_content,
24
+ is_resource_content,
25
+ is_text_content,
26
+ )
18
27
  from mcp_agent.mcp.resource_utils import extract_title_from_uri
19
28
 
20
29
  _logger = get_logger("multipart_converter_openai")
@@ -38,10 +47,14 @@ class OpenAIConverter:
38
47
  Returns:
39
48
  True if the MIME type is generally supported, False otherwise
40
49
  """
41
- return mime_type is not None and is_image_mime_type(mime_type) and mime_type != "image/svg+xml"
50
+ return (
51
+ mime_type is not None and is_image_mime_type(mime_type) and mime_type != "image/svg+xml"
52
+ )
42
53
 
43
54
  @staticmethod
44
- def convert_to_openai(multipart_msg: PromptMessageMultipart, concatenate_text_blocks: bool = False) -> OpenAIMessage:
55
+ def convert_to_openai(
56
+ multipart_msg: PromptMessageMultipart, concatenate_text_blocks: bool = False
57
+ ) -> OpenAIMessage:
45
58
  """
46
59
  Convert a PromptMessageMultipart message to OpenAI API format.
47
60
 
@@ -58,25 +71,10 @@ class OpenAIConverter:
58
71
  if not multipart_msg.content:
59
72
  return {"role": role, "content": ""}
60
73
 
61
- # Assistant messages in OpenAI only support string content, not array of content blocks
62
- if role == "assistant":
63
- # Extract text from all text content blocks
64
- content_text = ""
65
- for item in multipart_msg.content:
66
- if isinstance(item, TextContent):
67
- content_text += item.text
68
- # Other types are ignored for assistant messages in OpenAI
69
-
70
- return {"role": role, "content": content_text}
71
-
72
- # System messages also only support string content
73
- if role == "system":
74
- # Extract text from all text content blocks
75
- content_text = ""
76
- for item in multipart_msg.content:
77
- if isinstance(item, TextContent):
78
- content_text += item.text
79
-
74
+ # Assistant and system messages in OpenAI only support string content, not array of content blocks
75
+ if role == "assistant" or role == "system":
76
+ # Use MessageContent helper to get all text
77
+ content_text = MessageContent.join_text(multipart_msg, separator="")
80
78
  return {"role": role, "content": content_text}
81
79
 
82
80
  # For user messages, convert each content block
@@ -84,13 +82,14 @@ class OpenAIConverter:
84
82
 
85
83
  for item in multipart_msg.content:
86
84
  try:
87
- if isinstance(item, TextContent):
88
- content_blocks.append({"type": "text", "text": item.text})
85
+ if is_text_content(item):
86
+ text = get_text(item)
87
+ content_blocks.append({"type": "text", "text": text})
89
88
 
90
- elif isinstance(item, ImageContent):
89
+ elif is_image_content(item):
91
90
  content_blocks.append(OpenAIConverter._convert_image_content(item))
92
91
 
93
- elif isinstance(item, EmbeddedResource):
92
+ elif is_resource_content(item):
94
93
  block = OpenAIConverter._convert_embedded_resource(item)
95
94
  if block:
96
95
  content_blocks.append(block)
@@ -118,7 +117,11 @@ class OpenAIConverter:
118
117
  return {"role": role, "content": ""}
119
118
 
120
119
  # If we only have one text content and it's empty, return an empty string for content
121
- if len(content_blocks) == 1 and content_blocks[0]["type"] == "text" and not content_blocks[0]["text"]:
120
+ if (
121
+ len(content_blocks) == 1
122
+ and content_blocks[0]["type"] == "text"
123
+ and not content_blocks[0]["text"]
124
+ ):
122
125
  return {"role": role, "content": ""}
123
126
 
124
127
  # If concatenate_text_blocks is True, combine adjacent text blocks
@@ -167,7 +170,9 @@ class OpenAIConverter:
167
170
  return combined_blocks
168
171
 
169
172
  @staticmethod
170
- def convert_prompt_message_to_openai(message: PromptMessage, concatenate_text_blocks: bool = False) -> OpenAIMessage:
173
+ def convert_prompt_message_to_openai(
174
+ message: PromptMessage, concatenate_text_blocks: bool = False
175
+ ) -> OpenAIMessage:
171
176
  """
172
177
  Convert a standard PromptMessage to OpenAI API format.
173
178
 
@@ -187,8 +192,11 @@ class OpenAIConverter:
187
192
  @staticmethod
188
193
  def _convert_image_content(content: ImageContent) -> ContentBlock:
189
194
  """Convert ImageContent to OpenAI image_url content block."""
195
+ # Get image data using helper
196
+ image_data = get_image_data(content)
197
+
190
198
  # OpenAI requires image URLs or data URIs for images
191
- image_url = {"url": f"data:{content.mimeType};base64,{content.data}"}
199
+ image_url = {"url": f"data:{content.mimeType};base64,{image_data}"}
192
200
 
193
201
  # Check if the image has annotations for detail level
194
202
  if hasattr(content, "annotations") and content.annotations:
@@ -236,6 +244,7 @@ class OpenAIConverter:
236
244
  An appropriate OpenAI content block or None if conversion failed
237
245
  """
238
246
  resource_content = resource.resource
247
+ uri_str = get_resource_uri(resource)
239
248
  uri = getattr(resource_content, "uri", None)
240
249
  is_url = uri and str(uri).startswith(("http://", "https://"))
241
250
  title = extract_title_from_uri(uri) if uri else "resource"
@@ -245,23 +254,26 @@ class OpenAIConverter:
245
254
 
246
255
  # Handle images
247
256
  if OpenAIConverter._is_supported_image_type(mime_type):
248
- if is_url:
249
- return {"type": "image_url", "image_url": {"url": str(uri)}}
250
- elif hasattr(resource_content, "blob"):
257
+ if is_url and uri_str:
258
+ return {"type": "image_url", "image_url": {"url": uri_str}}
259
+
260
+ # Try to get image data
261
+ image_data = get_image_data(resource)
262
+ if image_data:
251
263
  return {
252
264
  "type": "image_url",
253
- "image_url": {"url": f"data:{mime_type};base64,{resource_content.blob}"},
265
+ "image_url": {"url": f"data:{mime_type};base64,{image_data}"},
254
266
  }
255
267
  else:
256
268
  return {"type": "text", "text": f"[Image missing data: {title}]"}
257
269
 
258
270
  # Handle PDFs
259
271
  elif mime_type == "application/pdf":
260
- if is_url:
272
+ if is_url and uri_str:
261
273
  # OpenAI doesn't directly support PDF URLs, explain this limitation
262
274
  return {
263
275
  "type": "text",
264
- "text": f"[PDF URL: {uri}]\nOpenAI requires PDF files to be uploaded or provided as base64 data.",
276
+ "text": f"[PDF URL: {uri_str}]\nOpenAI requires PDF files to be uploaded or provided as base64 data.",
265
277
  }
266
278
  elif hasattr(resource_content, "blob"):
267
279
  return {
@@ -273,18 +285,31 @@ class OpenAIConverter:
273
285
  }
274
286
 
275
287
  # Handle SVG (convert to text)
276
- elif mime_type == "image/svg+xml" and hasattr(resource_content, "text"):
277
- file_text = f'<fastagent:file title="{title}" mimetype="{mime_type}">\n' f"{resource_content.text}\n" f"</fastagent:file>"
278
- return {"type": "text", "text": file_text}
288
+ elif mime_type == "image/svg+xml":
289
+ text = get_text(resource)
290
+ if text:
291
+ file_text = (
292
+ f'<fastagent:file title="{title}" mimetype="{mime_type}">\n'
293
+ f"{text}\n"
294
+ f"</fastagent:file>"
295
+ )
296
+ return {"type": "text", "text": file_text}
279
297
 
280
298
  # Handle text files
281
- elif is_text_mime_type(mime_type) and hasattr(resource_content, "text"):
282
- file_text = f'<fastagent:file title="{title}" mimetype="{mime_type}">\n' f"{resource_content.text}\n" f"</fastagent:file>"
283
- return {"type": "text", "text": file_text}
299
+ elif is_text_mime_type(mime_type):
300
+ text = get_text(resource)
301
+ if text:
302
+ file_text = (
303
+ f'<fastagent:file title="{title}" mimetype="{mime_type}">\n'
304
+ f"{text}\n"
305
+ f"</fastagent:file>"
306
+ )
307
+ return {"type": "text", "text": file_text}
284
308
 
285
309
  # Default fallback for text resources
286
- elif hasattr(resource_content, "text"):
287
- return {"type": "text", "text": resource_content.text}
310
+ text = get_text(resource)
311
+ if text:
312
+ return {"type": "text", "text": text}
288
313
 
289
314
  # Default fallback for binary resources
290
315
  elif hasattr(resource_content, "blob"):
@@ -370,10 +395,14 @@ class OpenAIConverter:
370
395
  if text_content:
371
396
  # Convert text content to OpenAI format
372
397
  temp_multipart = PromptMessageMultipart(role="user", content=text_content)
373
- converted = OpenAIConverter.convert_to_openai(temp_multipart, concatenate_text_blocks=concatenate_text_blocks)
398
+ converted = OpenAIConverter.convert_to_openai(
399
+ temp_multipart, concatenate_text_blocks=concatenate_text_blocks
400
+ )
374
401
 
375
402
  # Extract text from content blocks
376
- tool_message_content = OpenAIConverter._extract_text_from_content_blocks(converted.get("content", ""))
403
+ tool_message_content = OpenAIConverter._extract_text_from_content_blocks(
404
+ converted.get("content", "")
405
+ )
377
406
 
378
407
  if not tool_message_content:
379
408
  tool_message_content = "[Tool returned non-text content]"
@@ -70,7 +70,11 @@ def _openai_message_to_multipart(
70
70
  text = part.get("text") if isinstance(part, dict) else getattr(part, "text", "")
71
71
 
72
72
  # Check if this is a resource marker
73
- if text and (text.startswith("[Resource:") or text.startswith("[Binary Resource:")) and "\n" in text:
73
+ if (
74
+ text
75
+ and (text.startswith("[Resource:") or text.startswith("[Binary Resource:"))
76
+ and "\n" in text
77
+ ):
74
78
  header, content_text = text.split("\n", 1)
75
79
  if "MIME:" in header:
76
80
  mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
@@ -96,14 +100,24 @@ def _openai_message_to_multipart(
96
100
 
97
101
  # Handle image content
98
102
  elif part_type == "image_url":
99
- image_url = part.get("image_url", {}) if isinstance(part, dict) else getattr(part, "image_url", None)
103
+ image_url = (
104
+ part.get("image_url", {})
105
+ if isinstance(part, dict)
106
+ else getattr(part, "image_url", None)
107
+ )
100
108
  if image_url:
101
- url = image_url.get("url") if isinstance(image_url, dict) else getattr(image_url, "url", "")
109
+ url = (
110
+ image_url.get("url")
111
+ if isinstance(image_url, dict)
112
+ else getattr(image_url, "url", "")
113
+ )
102
114
  if url and url.startswith("data:image/"):
103
115
  # Handle base64 data URLs
104
116
  mime_type = url.split(";")[0].replace("data:", "")
105
117
  data = url.split(",")[1]
106
- mcp_contents.append(ImageContent(type="image", data=data, mimeType=mime_type))
118
+ mcp_contents.append(
119
+ ImageContent(type="image", data=data, mimeType=mime_type)
120
+ )
107
121
 
108
122
  # Handle explicit resource types
109
123
  elif part_type == "resource" and isinstance(part, dict) and "resource" in part:
@@ -12,11 +12,11 @@ from openai.types.chat import (
12
12
  ChatCompletionMessageParam,
13
13
  )
14
14
 
15
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
16
- from mcp_agent.workflows.llm.providers.multipart_converter_openai import OpenAIConverter
17
- from mcp_agent.workflows.llm.providers.openai_multipart import (
15
+ from mcp_agent.llm.providers.multipart_converter_openai import OpenAIConverter
16
+ from mcp_agent.llm.providers.openai_multipart import (
18
17
  openai_to_multipart,
19
18
  )
19
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
20
20
 
21
21
 
22
22
  def openai_message_to_prompt_message_multipart(
@@ -7,11 +7,11 @@ from mcp.types import (
7
7
  PromptMessage,
8
8
  )
9
9
 
10
- from mcp_agent.logging.logger import get_logger
11
- from mcp_agent.workflows.llm.providers.multipart_converter_anthropic import (
10
+ from mcp_agent.llm.providers.multipart_converter_anthropic import (
12
11
  AnthropicConverter,
13
12
  )
14
- from mcp_agent.workflows.llm.sampling_format_converter import ProviderFormatConverter
13
+ from mcp_agent.llm.sampling_format_converter import ProviderFormatConverter
14
+ from mcp_agent.logging.logger import get_logger
15
15
 
16
16
  _logger = get_logger(__name__)
17
17
 
@@ -7,10 +7,10 @@ from openai.types.chat import (
7
7
  ChatCompletionMessage,
8
8
  )
9
9
 
10
- from mcp_agent.logging.logger import get_logger
11
- from mcp_agent.workflows.llm.sampling_format_converter import (
10
+ from mcp_agent.llm.sampling_format_converter import (
12
11
  ProviderFormatConverter,
13
12
  )
13
+ from mcp_agent.logging.logger import get_logger
14
14
 
15
15
  _logger = get_logger(__name__)
16
16
 
@@ -19,7 +19,7 @@ class OpenAISamplingConverter(ProviderFormatConverter[Dict[str, Any], ChatComple
19
19
  @classmethod
20
20
  def from_prompt_message(cls, message: PromptMessage) -> Dict[str, Any]:
21
21
  """Convert an MCP PromptMessage to an OpenAI message dict."""
22
- from mcp_agent.workflows.llm.providers.multipart_converter_openai import (
22
+ from mcp_agent.llm.providers.multipart_converter_openai import (
23
23
  OpenAIConverter,
24
24
  )
25
25
 
@@ -9,7 +9,6 @@ from mcp.types import (
9
9
  CreateMessageRequestParams,
10
10
  CreateMessageResult,
11
11
  SamplingMessage,
12
- StopReason,
13
12
  TextContent,
14
13
  )
15
14
 
@@ -62,26 +61,6 @@ class SamplingConverter:
62
61
  # Add any other parameters needed
63
62
  )
64
63
 
65
- @staticmethod
66
- def create_message_result(response: str, model: str, stop_reason: StopReason = "endTurn") -> CreateMessageResult:
67
- """
68
- Create a CreateMessageResult from an LLM response.
69
-
70
- Args:
71
- response: Text response from the LLM
72
- model: Model identifier
73
- stop_reason: Reason generation stopped
74
-
75
- Returns:
76
- CreateMessageResult suitable for returning to MCP
77
- """
78
- return CreateMessageResult(
79
- role="assistant",
80
- content=TextContent(type="text", text=response),
81
- model=model,
82
- stopReason=stop_reason,
83
- )
84
-
85
64
  @staticmethod
86
65
  def error_result(error_message: str, model: Optional[str] = None) -> CreateMessageResult:
87
66
  """
@@ -1,7 +1,9 @@
1
- from typing import Generic, Protocol, TypeVar
1
+ from typing import Generic, List, Protocol, TypeVar
2
2
 
3
3
  from mcp.types import PromptMessage
4
4
 
5
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
6
+
5
7
  # Define covariant type variables
6
8
  MessageParamT_co = TypeVar("MessageParamT_co", covariant=True)
7
9
  MessageT_co = TypeVar("MessageT_co", covariant=True)
@@ -15,8 +17,21 @@ class ProviderFormatConverter(Protocol, Generic[MessageParamT_co, MessageT_co]):
15
17
  """Convert an MCP PromptMessage to a provider-specific message parameter."""
16
18
  ...
17
19
 
20
+ @classmethod
21
+ def from_mutlipart_prompts(
22
+ cls, messages: List[PromptMessageMultipart]
23
+ ) -> List[MessageParamT_co]:
24
+ """Convert a list of PromptMessageMultiparts to a list of provider-specific implementations"""
25
+ ...
26
+
18
27
 
19
28
  class BasicFormatConverter(ProviderFormatConverter[PromptMessage, PromptMessage]):
20
29
  @classmethod
21
30
  def from_prompt_message(cls, message: PromptMessage) -> PromptMessage:
22
31
  return message
32
+
33
+ @classmethod
34
+ def from_multipart_prompts(
35
+ cls, messages: List[PromptMessageMultipart]
36
+ ) -> List[PromptMessageMultipart]:
37
+ return messages
@@ -192,7 +192,7 @@ class LoggingConfig:
192
192
  batch_size: int = 100,
193
193
  flush_interval: float = 2.0,
194
194
  **kwargs: Any,
195
- ):
195
+ ) -> None:
196
196
  """
197
197
  Configure the logging system.
198
198
 
@@ -230,7 +230,7 @@ class LoggingConfig:
230
230
  cls._initialized = True
231
231
 
232
232
  @classmethod
233
- async def shutdown(cls):
233
+ async def shutdown(cls) -> None:
234
234
  """Shutdown the logging system gracefully."""
235
235
  if not cls._initialized:
236
236
  return