fast-agent-mcp 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (100) hide show
  1. fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
  2. fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
  3. fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
  4. fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
  5. fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
  6. mcp_agent/__init__.py +0 -0
  7. mcp_agent/agents/__init__.py +0 -0
  8. mcp_agent/agents/agent.py +277 -0
  9. mcp_agent/app.py +303 -0
  10. mcp_agent/cli/__init__.py +0 -0
  11. mcp_agent/cli/__main__.py +4 -0
  12. mcp_agent/cli/commands/bootstrap.py +221 -0
  13. mcp_agent/cli/commands/config.py +11 -0
  14. mcp_agent/cli/commands/setup.py +229 -0
  15. mcp_agent/cli/main.py +68 -0
  16. mcp_agent/cli/terminal.py +24 -0
  17. mcp_agent/config.py +334 -0
  18. mcp_agent/console.py +28 -0
  19. mcp_agent/context.py +251 -0
  20. mcp_agent/context_dependent.py +48 -0
  21. mcp_agent/core/fastagent.py +1013 -0
  22. mcp_agent/eval/__init__.py +0 -0
  23. mcp_agent/event_progress.py +88 -0
  24. mcp_agent/executor/__init__.py +0 -0
  25. mcp_agent/executor/decorator_registry.py +120 -0
  26. mcp_agent/executor/executor.py +293 -0
  27. mcp_agent/executor/task_registry.py +34 -0
  28. mcp_agent/executor/temporal.py +405 -0
  29. mcp_agent/executor/workflow.py +197 -0
  30. mcp_agent/executor/workflow_signal.py +325 -0
  31. mcp_agent/human_input/__init__.py +0 -0
  32. mcp_agent/human_input/handler.py +49 -0
  33. mcp_agent/human_input/types.py +58 -0
  34. mcp_agent/logging/__init__.py +0 -0
  35. mcp_agent/logging/events.py +123 -0
  36. mcp_agent/logging/json_serializer.py +163 -0
  37. mcp_agent/logging/listeners.py +216 -0
  38. mcp_agent/logging/logger.py +365 -0
  39. mcp_agent/logging/rich_progress.py +120 -0
  40. mcp_agent/logging/tracing.py +140 -0
  41. mcp_agent/logging/transport.py +461 -0
  42. mcp_agent/mcp/__init__.py +0 -0
  43. mcp_agent/mcp/gen_client.py +85 -0
  44. mcp_agent/mcp/mcp_activity.py +18 -0
  45. mcp_agent/mcp/mcp_agent_client_session.py +242 -0
  46. mcp_agent/mcp/mcp_agent_server.py +56 -0
  47. mcp_agent/mcp/mcp_aggregator.py +394 -0
  48. mcp_agent/mcp/mcp_connection_manager.py +330 -0
  49. mcp_agent/mcp/stdio.py +104 -0
  50. mcp_agent/mcp_server_registry.py +275 -0
  51. mcp_agent/progress_display.py +10 -0
  52. mcp_agent/resources/examples/decorator/main.py +26 -0
  53. mcp_agent/resources/examples/decorator/optimizer.py +78 -0
  54. mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
  55. mcp_agent/resources/examples/decorator/parallel.py +81 -0
  56. mcp_agent/resources/examples/decorator/router.py +56 -0
  57. mcp_agent/resources/examples/decorator/tiny.py +22 -0
  58. mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
  59. mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
  60. mcp_agent/telemetry/__init__.py +0 -0
  61. mcp_agent/telemetry/usage_tracking.py +18 -0
  62. mcp_agent/workflows/__init__.py +0 -0
  63. mcp_agent/workflows/embedding/__init__.py +0 -0
  64. mcp_agent/workflows/embedding/embedding_base.py +61 -0
  65. mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
  66. mcp_agent/workflows/embedding/embedding_openai.py +46 -0
  67. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  68. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
  69. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  70. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
  71. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
  72. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
  73. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
  74. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
  75. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
  76. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
  77. mcp_agent/workflows/llm/__init__.py +0 -0
  78. mcp_agent/workflows/llm/augmented_llm.py +645 -0
  79. mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
  80. mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
  81. mcp_agent/workflows/llm/llm_selector.py +345 -0
  82. mcp_agent/workflows/llm/model_factory.py +175 -0
  83. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  84. mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
  85. mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
  86. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
  87. mcp_agent/workflows/parallel/__init__.py +0 -0
  88. mcp_agent/workflows/parallel/fan_in.py +350 -0
  89. mcp_agent/workflows/parallel/fan_out.py +187 -0
  90. mcp_agent/workflows/parallel/parallel_llm.py +141 -0
  91. mcp_agent/workflows/router/__init__.py +0 -0
  92. mcp_agent/workflows/router/router_base.py +276 -0
  93. mcp_agent/workflows/router/router_embedding.py +240 -0
  94. mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
  95. mcp_agent/workflows/router/router_embedding_openai.py +59 -0
  96. mcp_agent/workflows/router/router_llm.py +301 -0
  97. mcp_agent/workflows/swarm/__init__.py +0 -0
  98. mcp_agent/workflows/swarm/swarm.py +320 -0
  99. mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
  100. mcp_agent/workflows/swarm/swarm_openai.py +41 -0
@@ -0,0 +1,615 @@
1
+ import json
2
+ from typing import Iterable, List, Type
3
+
4
+ import instructor
5
+ from openai import OpenAI
6
+ from openai.types.chat import (
7
+ ChatCompletionAssistantMessageParam,
8
+ ChatCompletionContentPartParam,
9
+ ChatCompletionContentPartTextParam,
10
+ ChatCompletionContentPartRefusalParam,
11
+ ChatCompletionMessage,
12
+ ChatCompletionMessageParam,
13
+ ChatCompletionSystemMessageParam,
14
+ ChatCompletionToolParam,
15
+ ChatCompletionToolMessageParam,
16
+ ChatCompletionUserMessageParam,
17
+ )
18
+ from mcp.types import (
19
+ CallToolRequestParams,
20
+ CallToolRequest,
21
+ CallToolResult,
22
+ EmbeddedResource,
23
+ ImageContent,
24
+ TextContent,
25
+ TextResourceContents,
26
+ )
27
+
28
+ from mcp_agent.workflows.llm.augmented_llm import (
29
+ AugmentedLLM,
30
+ ModelT,
31
+ MCPMessageParam,
32
+ MCPMessageResult,
33
+ ProviderToMCPConverter,
34
+ RequestParams,
35
+ )
36
+ from mcp_agent.logging.logger import get_logger
37
+ from rich.text import Text
38
+
39
+
40
+ DEFAULT_OPENAI_MODEL = "gpt-4o"
41
+ DEFAULT_REASONING_EFFORT = "medium"
42
+
43
+
44
+ class OpenAIAugmentedLLM(
45
+ AugmentedLLM[ChatCompletionMessageParam, ChatCompletionMessage]
46
+ ):
47
+ """
48
+ The basic building block of agentic systems is an LLM enhanced with augmentations
49
+ such as retrieval, tools, and memory provided from a collection of MCP servers.
50
+ This implementation uses OpenAI's ChatCompletion as the LLM.
51
+ """
52
+
53
+ def __init__(self, *args, **kwargs):
54
+ super().__init__(*args, **kwargs)
55
+
56
+ self.provider = "OpenAI"
57
+ # Initialize logger with name if available
58
+ self.logger = get_logger(f"{__name__}.{self.name}" if self.name else __name__)
59
+
60
+ # Set up reasoning-related attributes
61
+ self._reasoning_effort = kwargs.get("reasoning_effort", None)
62
+ if self.context and self.context.config and self.context.config.openai:
63
+ if self._reasoning_effort is None and hasattr(
64
+ self.context.config.openai, "reasoning_effort"
65
+ ):
66
+ self._reasoning_effort = self.context.config.openai.reasoning_effort
67
+
68
+ # Determine if we're using a reasoning model
69
+ chosen_model = (
70
+ self.default_request_params.model if self.default_request_params else None
71
+ )
72
+ self._reasoning = chosen_model and (
73
+ chosen_model.startswith("o3") or chosen_model.startswith("o1")
74
+ )
75
+ if self._reasoning:
76
+ self.logger.info(
77
+ f"Using reasoning model '{chosen_model}' with '{self._reasoning_effort}' reasoning effort"
78
+ )
79
+
80
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
81
+ """Initialize OpenAI-specific default parameters"""
82
+ chosen_model = kwargs.get("model", DEFAULT_OPENAI_MODEL)
83
+
84
+ # Get default model from config if available
85
+ if self.context and self.context.config and self.context.config.openai:
86
+ if hasattr(self.context.config.openai, "default_model"):
87
+ chosen_model = self.context.config.openai.default_model
88
+
89
+ return RequestParams(
90
+ model=chosen_model,
91
+ modelPreferences=self.model_preferences,
92
+ systemPrompt=self.instruction,
93
+ parallel_tool_calls=True,
94
+ max_iterations=10,
95
+ use_history=True,
96
+ )
97
+
98
+ async def generate(self, message, request_params: RequestParams | None = None):
99
+ """
100
+ Process a query using an LLM and available tools.
101
+ The default implementation uses OpenAI's ChatCompletion as the LLM.
102
+ Override this method to use a different LLM.
103
+ """
104
+ config = self.context.config
105
+ if not config.openai.api_key:
106
+ raise "OpenAI API key is not set"
107
+ openai_client = OpenAI(
108
+ api_key=config.openai.api_key, base_url=config.openai.base_url
109
+ )
110
+ messages: List[ChatCompletionMessageParam] = []
111
+ params = self.get_request_params(request_params)
112
+
113
+ system_prompt = self.instruction or params.systemPrompt
114
+ if system_prompt:
115
+ messages.append(
116
+ ChatCompletionSystemMessageParam(role="system", content=system_prompt)
117
+ )
118
+
119
+ if params.use_history:
120
+ messages.extend(self.history.get())
121
+
122
+ if isinstance(message, str):
123
+ messages.append(
124
+ ChatCompletionUserMessageParam(role="user", content=message)
125
+ )
126
+ elif isinstance(message, list):
127
+ messages.extend(message)
128
+ else:
129
+ messages.append(message)
130
+
131
+ response = await self.aggregator.list_tools()
132
+ available_tools: List[ChatCompletionToolParam] = [
133
+ ChatCompletionToolParam(
134
+ type="function",
135
+ function={
136
+ "name": tool.name,
137
+ "description": tool.description,
138
+ "parameters": tool.inputSchema,
139
+ # TODO: saqadri - determine if we should specify "strict" to True by default
140
+ },
141
+ )
142
+ for tool in response.tools
143
+ ]
144
+ if not available_tools:
145
+ available_tools = None
146
+
147
+ responses: List[ChatCompletionMessage] = []
148
+ model = await self.select_model(params)
149
+ chat_turn = len(messages) // 2
150
+ if self._reasoning:
151
+ self.show_user_message(
152
+ str(message), f"{model} ({self._reasoning_effort})", chat_turn
153
+ )
154
+ else:
155
+ self.show_user_message(str(message), model, chat_turn)
156
+
157
+ for i in range(params.max_iterations):
158
+ arguments = {
159
+ "model": model or "gpt-4o",
160
+ "messages": messages,
161
+ "stop": params.stopSequences,
162
+ "tools": available_tools,
163
+ }
164
+ if self._reasoning:
165
+ arguments = {
166
+ **arguments,
167
+ "max_completion_tokens": params.maxTokens,
168
+ "reasoning_effort": self._reasoning_effort,
169
+ }
170
+ else:
171
+ arguments = {**arguments, "max_tokens": params.maxTokens}
172
+ if available_tools:
173
+ arguments["parallel_tool_calls"] = params.parallel_tool_calls
174
+
175
+ if params.metadata:
176
+ arguments = {**arguments, **params.metadata}
177
+
178
+ self.logger.debug(f"{arguments}")
179
+ self._log_chat_progress(chat_turn, model=model)
180
+
181
+ executor_result = await self.executor.execute(
182
+ openai_client.chat.completions.create, **arguments
183
+ )
184
+
185
+ response = executor_result[0]
186
+
187
+ self.logger.debug(
188
+ "OpenAI ChatCompletion response:",
189
+ data=response,
190
+ )
191
+
192
+ if isinstance(response, BaseException):
193
+ self.logger.error(f"Error: {response}")
194
+ break
195
+
196
+ if not response.choices or len(response.choices) == 0:
197
+ # No response from the model, we're done
198
+ break
199
+
200
+ # TODO: saqadri - handle multiple choices for more complex interactions.
201
+ # Keeping it simple for now because multiple choices will also complicate memory management
202
+ choice = response.choices[0]
203
+ message = choice.message
204
+ responses.append(message)
205
+
206
+ converted_message = self.convert_message_to_message_param(
207
+ message, name=self.name
208
+ )
209
+ messages.append(converted_message)
210
+ message_text = converted_message.content
211
+ if (
212
+ choice.finish_reason in ["tool_calls", "function_call"]
213
+ and message.tool_calls
214
+ ):
215
+ if message_text:
216
+ await self.show_assistant_message(
217
+ message_text,
218
+ message.tool_calls[
219
+ 0
220
+ ].function.name, # TODO support multiple tool calls
221
+ )
222
+ else:
223
+ await self.show_assistant_message(
224
+ Text(
225
+ "the assistant requested tool calls",
226
+ style="dim green italic",
227
+ ),
228
+ message.tool_calls[0].function.name,
229
+ )
230
+
231
+ # Execute all tool calls in parallel.
232
+ tool_tasks = []
233
+ for tool_call in message.tool_calls:
234
+ self.show_tool_call(
235
+ available_tools,
236
+ tool_call.function.name,
237
+ tool_call.function.arguments,
238
+ )
239
+ tool_tasks.append(self.execute_tool_call(tool_call))
240
+ # Wait for all tool calls to complete.
241
+ tool_results = await self.executor.execute(*tool_tasks)
242
+ self.logger.debug(
243
+ f"Iteration {i}: Tool call results: {str(tool_results) if tool_results else 'None'}"
244
+ )
245
+ # Add non-None results to messages.
246
+ for result in tool_results:
247
+ if isinstance(result, BaseException):
248
+ self.logger.error(
249
+ f"Warning: Unexpected error during tool execution: {result}. Continuing..."
250
+ )
251
+ continue
252
+ if result is not None:
253
+ self.show_oai_tool_result(str(result["content"]))
254
+ messages.append(result)
255
+ elif choice.finish_reason == "length":
256
+ # We have reached the max tokens limit
257
+ self.logger.debug(
258
+ f"Iteration {i}: Stopping because finish_reason is 'length'"
259
+ )
260
+ # TODO: saqadri - would be useful to return the reason for stopping to the caller
261
+ break
262
+ elif choice.finish_reason == "content_filter":
263
+ # The response was filtered by the content filter
264
+ self.logger.debug(
265
+ f"Iteration {i}: Stopping because finish_reason is 'content_filter'"
266
+ )
267
+ # TODO: saqadri - would be useful to return the reason for stopping to the caller
268
+ break
269
+ elif choice.finish_reason == "stop":
270
+ self.logger.debug(
271
+ f"Iteration {i}: Stopping because finish_reason is 'stop'"
272
+ )
273
+ if message_text:
274
+ await self.show_assistant_message(message_text, "")
275
+ break
276
+
277
+ if params.use_history:
278
+ self.history.set(messages)
279
+
280
+ self._log_chat_finished(model=model)
281
+
282
+ return responses
283
+
284
+ async def generate_str(
285
+ self,
286
+ message,
287
+ request_params: RequestParams | None = None,
288
+ ):
289
+ """
290
+ Process a query using an LLM and available tools.
291
+ The default implementation uses OpenAI's ChatCompletion as the LLM.
292
+ Override this method to use a different LLM.
293
+ """
294
+ responses = await self.generate(
295
+ message=message,
296
+ request_params=request_params,
297
+ )
298
+
299
+ final_text: List[str] = []
300
+
301
+ for response in responses:
302
+ content = response.content
303
+ if not content:
304
+ continue
305
+
306
+ if isinstance(content, str):
307
+ final_text.append(content)
308
+ continue
309
+
310
+ return "\n".join(final_text)
311
+
312
+ async def generate_structured(
313
+ self,
314
+ message,
315
+ response_model: Type[ModelT],
316
+ request_params: RequestParams | None = None,
317
+ ) -> ModelT:
318
+ # First we invoke the LLM to generate a string response
319
+ # We need to do this in a two-step process because Instructor doesn't
320
+ # know how to invoke MCP tools via call_tool, so we'll handle all the
321
+ # processing first and then pass the final response through Instructor
322
+ response = await self.generate_str(
323
+ message=message,
324
+ request_params=request_params,
325
+ )
326
+
327
+ # Next we pass the text through instructor to extract structured data
328
+ client = instructor.from_openai(
329
+ OpenAI(
330
+ api_key=self.context.config.openai.api_key,
331
+ base_url=self.context.config.openai.base_url,
332
+ ),
333
+ mode=instructor.Mode.TOOLS_STRICT,
334
+ )
335
+
336
+ params = self.get_request_params(request_params)
337
+ model = await self.select_model(params)
338
+
339
+ # Extract structured data from natural language
340
+ structured_response = client.chat.completions.create(
341
+ model=model,
342
+ response_model=response_model,
343
+ messages=[
344
+ {"role": "user", "content": response},
345
+ ],
346
+ )
347
+
348
+ return structured_response
349
+
350
+ async def pre_tool_call(self, tool_call_id: str | None, request: CallToolRequest):
351
+ return request
352
+
353
+ async def post_tool_call(
354
+ self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
355
+ ):
356
+ return result
357
+
358
+ async def execute_tool_call(
359
+ self,
360
+ tool_call: ChatCompletionToolParam,
361
+ ) -> ChatCompletionToolMessageParam | None:
362
+ """
363
+ Execute a single tool call and return the result message.
364
+ Returns None if there's no content to add to messages.
365
+ """
366
+ tool_name = tool_call.function.name
367
+ tool_args_str = tool_call.function.arguments
368
+ tool_call_id = tool_call.id
369
+ tool_args = {}
370
+
371
+ try:
372
+ if tool_args_str:
373
+ tool_args = json.loads(tool_args_str)
374
+ except json.JSONDecodeError as e:
375
+ return ChatCompletionToolMessageParam(
376
+ role="tool",
377
+ tool_call_id=tool_call_id,
378
+ content=f"Invalid JSON provided in tool call arguments for '{tool_name}'. Failed to load JSON: {str(e)}",
379
+ )
380
+
381
+ tool_call_request = CallToolRequest(
382
+ method="tools/call",
383
+ params=CallToolRequestParams(name=tool_name, arguments=tool_args),
384
+ )
385
+
386
+ result = await self.call_tool(
387
+ request=tool_call_request, tool_call_id=tool_call_id
388
+ )
389
+
390
+ if result.content:
391
+ return ChatCompletionToolMessageParam(
392
+ role="tool",
393
+ tool_call_id=tool_call_id,
394
+ content=[mcp_content_to_openai_content(c) for c in result.content],
395
+ )
396
+
397
+ return None
398
+
399
+ def message_param_str(self, message: ChatCompletionMessageParam) -> str:
400
+ """Convert an input message to a string representation."""
401
+ if message.get("content"):
402
+ content = message["content"]
403
+ if isinstance(content, str):
404
+ return content
405
+ else: # content is a list
406
+ final_text: List[str] = []
407
+ for part in content:
408
+ text_part = part.get("text")
409
+ if text_part:
410
+ final_text.append(str(text_part))
411
+ else:
412
+ final_text.append(str(part))
413
+
414
+ return "\n".join(final_text)
415
+
416
+ return str(message)
417
+
418
+ def message_str(self, message: ChatCompletionMessage) -> str:
419
+ """Convert an output message to a string representation."""
420
+ content = message.content
421
+ if content:
422
+ return content
423
+
424
+ return str(message)
425
+
426
+
427
+ class MCPOpenAITypeConverter(
428
+ ProviderToMCPConverter[ChatCompletionMessageParam, ChatCompletionMessage]
429
+ ):
430
+ """
431
+ Convert between OpenAI and MCP types.
432
+ """
433
+
434
+ @classmethod
435
+ def from_mcp_message_result(cls, result: MCPMessageResult) -> ChatCompletionMessage:
436
+ # MCPMessageResult -> ChatCompletionMessage
437
+ if result.role != "assistant":
438
+ raise ValueError(
439
+ f"Expected role to be 'assistant' but got '{result.role}' instead."
440
+ )
441
+
442
+ return ChatCompletionMessage(
443
+ role="assistant",
444
+ content=result.content.text or str(result.context),
445
+ # Lossy conversion for the following fields:
446
+ # result.model
447
+ # result.stopReason
448
+ )
449
+
450
+ @classmethod
451
+ def to_mcp_message_result(cls, result: ChatCompletionMessage) -> MCPMessageResult:
452
+ # ChatCompletionMessage -> MCPMessageResult
453
+ return MCPMessageResult(
454
+ role=result.role,
455
+ content=TextContent(type="text", text=result.content),
456
+ model=None,
457
+ stopReason=None,
458
+ # extras for ChatCompletionMessage fields
459
+ **result.model_dump(exclude={"role", "content"}),
460
+ )
461
+
462
+ @classmethod
463
+ def from_mcp_message_param(
464
+ cls, param: MCPMessageParam
465
+ ) -> ChatCompletionMessageParam:
466
+ # MCPMessageParam -> ChatCompletionMessageParam
467
+ if param.role == "assistant":
468
+ extras = param.model_dump(exclude={"role", "content"})
469
+ return ChatCompletionAssistantMessageParam(
470
+ role="assistant",
471
+ content=mcp_content_to_openai_content(param.content),
472
+ **extras,
473
+ )
474
+ elif param.role == "user":
475
+ extras = param.model_dump(exclude={"role", "content"})
476
+ return ChatCompletionUserMessageParam(
477
+ role="user",
478
+ content=mcp_content_to_openai_content(param.content),
479
+ **extras,
480
+ )
481
+ else:
482
+ raise ValueError(
483
+ f"Unexpected role: {param.role}, MCP only supports 'assistant' and 'user'"
484
+ )
485
+
486
+ @classmethod
487
+ def to_mcp_message_param(cls, param: ChatCompletionMessageParam) -> MCPMessageParam:
488
+ # ChatCompletionMessage -> MCPMessageParam
489
+
490
+ contents = openai_content_to_mcp_content(param.content)
491
+
492
+ # TODO: saqadri - the mcp_content can have multiple elements
493
+ # while sampling message content has a single content element
494
+ # Right now we error out if there are > 1 elements in mcp_content
495
+ # We need to handle this case properly going forward
496
+ if len(contents) > 1:
497
+ raise NotImplementedError(
498
+ "Multiple content elements in a single message are not supported"
499
+ )
500
+ mcp_content: TextContent | ImageContent | EmbeddedResource = contents[0]
501
+
502
+ if param.role == "assistant":
503
+ return MCPMessageParam(
504
+ role="assistant",
505
+ content=mcp_content,
506
+ **typed_dict_extras(param, ["role", "content"]),
507
+ )
508
+ elif param.role == "user":
509
+ return MCPMessageParam(
510
+ role="user",
511
+ content=mcp_content,
512
+ **typed_dict_extras(param, ["role", "content"]),
513
+ )
514
+ elif param.role == "tool":
515
+ raise NotImplementedError(
516
+ "Tool messages are not supported in SamplingMessage yet"
517
+ )
518
+ elif param.role == "system":
519
+ raise NotImplementedError(
520
+ "System messages are not supported in SamplingMessage yet"
521
+ )
522
+ elif param.role == "developer":
523
+ raise NotImplementedError(
524
+ "Developer messages are not supported in SamplingMessage yet"
525
+ )
526
+ elif param.role == "function":
527
+ raise NotImplementedError(
528
+ "Function messages are not supported in SamplingMessage yet"
529
+ )
530
+ else:
531
+ raise ValueError(
532
+ f"Unexpected role: {param.role}, MCP only supports 'assistant', 'user', 'tool', 'system', 'developer', and 'function'"
533
+ )
534
+
535
+
536
+ def mcp_content_to_openai_content(
537
+ content: TextContent | ImageContent | EmbeddedResource,
538
+ ) -> ChatCompletionContentPartTextParam:
539
+ if isinstance(content, list):
540
+ # Handle list of content items
541
+ return ChatCompletionContentPartTextParam(
542
+ type="text",
543
+ text="\n".join(mcp_content_to_openai_content(c) for c in content),
544
+ )
545
+
546
+ if isinstance(content, TextContent):
547
+ return ChatCompletionContentPartTextParam(type="text", text=content.text)
548
+ elif isinstance(content, ImageContent):
549
+ # Best effort to convert an image to text
550
+ return ChatCompletionContentPartTextParam(
551
+ type="text", text=f"{content.mimeType}:{content.data}"
552
+ )
553
+ elif isinstance(content, EmbeddedResource):
554
+ if isinstance(content.resource, TextResourceContents):
555
+ return ChatCompletionContentPartTextParam(
556
+ type="text", text=content.resource.text
557
+ )
558
+ else: # BlobResourceContents
559
+ return ChatCompletionContentPartTextParam(
560
+ type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
561
+ )
562
+ else:
563
+ # Last effort to convert the content to a string
564
+ return ChatCompletionContentPartTextParam(type="text", text=str(content))
565
+
566
+
567
+ def openai_content_to_mcp_content(
568
+ content: str
569
+ | Iterable[ChatCompletionContentPartParam | ChatCompletionContentPartRefusalParam],
570
+ ) -> Iterable[TextContent | ImageContent | EmbeddedResource]:
571
+ mcp_content = []
572
+
573
+ if isinstance(content, str):
574
+ mcp_content = [TextContent(type="text", text=content)]
575
+ else:
576
+ # TODO: saqadri - this is a best effort conversion, we should handle all possible content types
577
+ for c in content:
578
+ if c.type == "text": # isinstance(c, ChatCompletionContentPartTextParam):
579
+ mcp_content.append(
580
+ TextContent(
581
+ type="text", text=c.text, **typed_dict_extras(c, ["text"])
582
+ )
583
+ )
584
+ elif (
585
+ c.type == "image_url"
586
+ ): # isinstance(c, ChatCompletionContentPartImageParam):
587
+ raise NotImplementedError("Image content conversion not implemented")
588
+ # TODO: saqadri - need to download the image into a base64-encoded string
589
+ # Download image from c.image_url
590
+ # return ImageContent(
591
+ # type="image",
592
+ # data=downloaded_image,
593
+ # **c
594
+ # )
595
+ elif (
596
+ c.type == "input_audio"
597
+ ): # isinstance(c, ChatCompletionContentPartInputAudioParam):
598
+ raise NotImplementedError("Audio content conversion not implemented")
599
+ elif (
600
+ c.type == "refusal"
601
+ ): # isinstance(c, ChatCompletionContentPartRefusalParam):
602
+ mcp_content.append(
603
+ TextContent(
604
+ type="text", text=c.refusal, **typed_dict_extras(c, ["refusal"])
605
+ )
606
+ )
607
+ else:
608
+ raise ValueError(f"Unexpected content type: {c.type}")
609
+
610
+ return mcp_content
611
+
612
+
613
+ def typed_dict_extras(d: dict, exclude: List[str]):
614
+ extras = {k: v for k, v in d.items() if k not in exclude}
615
+ return extras