fast-agent-mcp 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +37 -9
  2. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +53 -31
  3. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +125 -44
  6. mcp_agent/core/decorators.py +3 -2
  7. mcp_agent/core/enhanced_prompt.py +106 -20
  8. mcp_agent/core/factory.py +28 -66
  9. mcp_agent/core/fastagent.py +13 -3
  10. mcp_agent/core/mcp_content.py +222 -0
  11. mcp_agent/core/prompt.py +132 -0
  12. mcp_agent/core/proxies.py +41 -36
  13. mcp_agent/human_input/handler.py +4 -1
  14. mcp_agent/logging/transport.py +30 -3
  15. mcp_agent/mcp/mcp_aggregator.py +27 -22
  16. mcp_agent/mcp/mime_utils.py +69 -0
  17. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  18. mcp_agent/mcp/prompt_serialization.py +447 -0
  19. mcp_agent/mcp/prompts/__init__.py +0 -0
  20. mcp_agent/mcp/prompts/__main__.py +10 -0
  21. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  22. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  23. mcp_agent/mcp/resource_utils.py +203 -0
  24. mcp_agent/resources/examples/internal/agent.py +1 -1
  25. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  26. mcp_agent/resources/examples/internal/sizer.py +0 -5
  27. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  28. mcp_agent/resources/examples/prompting/agent.py +23 -0
  29. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  30. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  31. mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
  32. mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
  33. mcp_agent/resources/examples/workflows/router.py +0 -2
  34. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
  35. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  36. mcp_agent/workflows/llm/augmented_llm.py +155 -141
  37. mcp_agent/workflows/llm/augmented_llm_anthropic.py +135 -281
  38. mcp_agent/workflows/llm/augmented_llm_openai.py +175 -337
  39. mcp_agent/workflows/llm/augmented_llm_passthrough.py +104 -0
  40. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  41. mcp_agent/workflows/llm/model_factory.py +25 -6
  42. mcp_agent/workflows/llm/openai_utils.py +65 -0
  43. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  44. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  45. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  46. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  47. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  48. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  49. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  50. mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
  51. mcp_agent/workflows/router/router_llm.py +18 -24
  52. mcp_agent/core/server_validation.py +0 -44
  53. mcp_agent/core/simulator_registry.py +0 -22
  54. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  55. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  56. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,37 +1,34 @@
1
- import json
2
1
  import os
3
- from typing import Iterable, List, Type
4
- from mcp.types import PromptMessage
5
- import instructor
2
+ from typing import List, Type, TYPE_CHECKING
3
+
4
+ from pydantic_core import from_json
5
+
6
+ from mcp_agent.workflows.llm.providers.multipart_converter_openai import OpenAIConverter
7
+ from mcp_agent.workflows.llm.providers.sampling_converter_openai import (
8
+ OpenAISamplingConverter,
9
+ )
10
+
11
+ if TYPE_CHECKING:
12
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
6
13
  from openai import OpenAI, AuthenticationError
14
+
15
+ # from openai.types.beta.chat import
7
16
  from openai.types.chat import (
8
- ChatCompletionAssistantMessageParam,
9
- ChatCompletionContentPartParam,
10
- ChatCompletionContentPartTextParam,
11
- ChatCompletionContentPartRefusalParam,
12
- ChatCompletionMessage,
13
17
  ChatCompletionMessageParam,
18
+ ChatCompletionMessage,
14
19
  ChatCompletionSystemMessageParam,
15
20
  ChatCompletionToolParam,
16
- ChatCompletionToolMessageParam,
17
21
  ChatCompletionUserMessageParam,
18
22
  )
19
23
  from mcp.types import (
20
24
  CallToolRequestParams,
21
25
  CallToolRequest,
22
26
  CallToolResult,
23
- EmbeddedResource,
24
- ImageContent,
25
- TextContent,
26
- TextResourceContents,
27
27
  )
28
28
 
29
29
  from mcp_agent.workflows.llm.augmented_llm import (
30
30
  AugmentedLLM,
31
31
  ModelT,
32
- MCPMessageParam,
33
- MCPMessageResult,
34
- ProviderToMCPConverter,
35
32
  RequestParams,
36
33
  )
37
34
  from mcp_agent.core.exceptions import ProviderKeyError
@@ -56,7 +53,7 @@ class OpenAIAugmentedLLM(
56
53
  def __init__(self, *args, **kwargs):
57
54
  # Set type_converter before calling super().__init__
58
55
  if "type_converter" not in kwargs:
59
- kwargs["type_converter"] = MCPOpenAITypeConverter
56
+ kwargs["type_converter"] = OpenAISamplingConverter
60
57
 
61
58
  super().__init__(*args, **kwargs)
62
59
 
@@ -128,7 +125,12 @@ class OpenAIAugmentedLLM(
128
125
  self.context.config.openai.base_url if self.context.config.openai else None
129
126
  )
130
127
 
131
- async def generate(self, message, request_params: RequestParams | None = None):
128
+ async def generate(
129
+ self,
130
+ message,
131
+ request_params: RequestParams | None = None,
132
+ response_model: Type[ModelT] | None = None,
133
+ ) -> List[ChatCompletionMessage]:
132
134
  """
133
135
  Process a query using an LLM and available tools.
134
136
  The default implementation uses OpenAI's ChatCompletion as the LLM.
@@ -152,7 +154,7 @@ class OpenAIAugmentedLLM(
152
154
  ChatCompletionSystemMessageParam(role="system", content=system_prompt)
153
155
  )
154
156
 
155
- # Always include prompt messages, but only include conversation history
157
+ # Always include prompt messages, but only include conversation history
156
158
  # if use_history is True
157
159
  messages.extend(self.history.get(include_history=params.use_history))
158
160
 
@@ -179,7 +181,7 @@ class OpenAIAugmentedLLM(
179
181
  for tool in response.tools
180
182
  ]
181
183
  if not available_tools:
182
- available_tools = None
184
+ available_tools = []
183
185
 
184
186
  responses: List[ChatCompletionMessage] = []
185
187
  model = await self.select_model(params)
@@ -191,11 +193,11 @@ class OpenAIAugmentedLLM(
191
193
  else:
192
194
  self.show_user_message(str(message), model, chat_turn)
193
195
 
196
+ # we do NOT send stop sequences as this causes errors with mutlimodal processing
194
197
  for i in range(params.max_iterations):
195
198
  arguments = {
196
199
  "model": model or "gpt-4o",
197
200
  "messages": messages,
198
- "stop": params.stopSequences,
199
201
  "tools": available_tools,
200
202
  }
201
203
  if self._reasoning:
@@ -215,9 +217,16 @@ class OpenAIAugmentedLLM(
215
217
  self.logger.debug(f"{arguments}")
216
218
  self._log_chat_progress(chat_turn, model=model)
217
219
 
218
- executor_result = await self.executor.execute(
219
- openai_client.chat.completions.create, **arguments
220
- )
220
+ if response_model is None:
221
+ executor_result = await self.executor.execute(
222
+ openai_client.chat.completions.create, **arguments
223
+ )
224
+ else:
225
+ executor_result = await self.executor.execute(
226
+ openai_client.beta.chat.completions.parse,
227
+ **arguments,
228
+ response_format=response_model,
229
+ )
221
230
 
222
231
  response = executor_result[0]
223
232
 
@@ -271,30 +280,34 @@ class OpenAIAugmentedLLM(
271
280
  message.tool_calls[0].function.name,
272
281
  )
273
282
 
274
- # Execute all tool calls in parallel.
275
- tool_tasks = []
283
+ tool_results = []
276
284
  for tool_call in message.tool_calls:
277
285
  self.show_tool_call(
278
286
  available_tools,
279
287
  tool_call.function.name,
280
288
  tool_call.function.arguments,
281
289
  )
282
- tool_tasks.append(self.execute_tool_call(tool_call))
283
- # Wait for all tool calls to complete.
284
- tool_results = await self.executor.execute(*tool_tasks)
290
+ tool_call_request = CallToolRequest(
291
+ method="tools/call",
292
+ params=CallToolRequestParams(
293
+ name=tool_call.function.name,
294
+ arguments=from_json(
295
+ tool_call.function.arguments, allow_partial=True
296
+ ),
297
+ ),
298
+ )
299
+ result = await self.call_tool(tool_call_request, tool_call.id)
300
+ self.show_oai_tool_result(str(result))
301
+
302
+ tool_results.append((tool_call.id, result))
303
+
304
+ messages.extend(
305
+ OpenAIConverter.convert_function_results_to_openai(tool_results)
306
+ )
307
+
285
308
  self.logger.debug(
286
309
  f"Iteration {i}: Tool call results: {str(tool_results) if tool_results else 'None'}"
287
310
  )
288
- # Add non-None results to messages.
289
- for result in tool_results:
290
- if isinstance(result, BaseException):
291
- self.logger.error(
292
- f"Warning: Unexpected error during tool execution: {result}. Continuing..."
293
- )
294
- continue
295
- if result is not None:
296
- self.show_oai_tool_result(str(result["content"]))
297
- messages.append(result)
298
311
  elif choice.finish_reason == "length":
299
312
  # We have reached the max tokens limit
300
313
  self.logger.debug(
@@ -334,10 +347,10 @@ class OpenAIAugmentedLLM(
334
347
  if params.use_history:
335
348
  # Get current prompt messages
336
349
  prompt_messages = self.history.get(include_history=False)
337
-
350
+
338
351
  # Calculate new conversation messages (excluding prompts)
339
- new_messages = messages[len(prompt_messages):]
340
-
352
+ new_messages = messages[len(prompt_messages) :]
353
+
341
354
  # Update conversation history
342
355
  self.history.set(new_messages)
343
356
 
@@ -354,7 +367,15 @@ class OpenAIAugmentedLLM(
354
367
  Process a query using an LLM and available tools.
355
368
  The default implementation uses OpenAI's ChatCompletion as the LLM.
356
369
  Override this method to use a different LLM.
370
+
371
+ Special commands:
372
+ - "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
373
+ in MCP prompt format with user/assistant delimiters.
357
374
  """
375
+ # Check if this is a special command to save history
376
+ if isinstance(message, str) and message.startswith("***SAVE_HISTORY "):
377
+ return await self._save_history_to_file(message)
378
+
358
379
  responses = await self.generate(
359
380
  message=message,
360
381
  request_params=request_params,
@@ -373,46 +394,128 @@ class OpenAIAugmentedLLM(
373
394
 
374
395
  return "\n".join(final_text)
375
396
 
397
+ async def _apply_prompt_template_provider_specific(
398
+ self, multipart_messages: List["PromptMessageMultipart"]
399
+ ) -> str:
400
+ """
401
+ OpenAI-specific implementation of apply_prompt_template that handles
402
+ multimodal content natively.
403
+
404
+ Args:
405
+ multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
406
+
407
+ Returns:
408
+ String representation of the assistant's response if generated,
409
+ or the last assistant message in the prompt
410
+ """
411
+
412
+ # TODO -- this is very similar to Anthropic (just the converter class changes).
413
+ # TODO -- potential refactor to base class, standardize Converter interface
414
+ # Check the last message role
415
+ last_message = multipart_messages[-1]
416
+
417
+ # Add all previous messages to history (or all messages if last is from assistant)
418
+ messages_to_add = (
419
+ multipart_messages[:-1]
420
+ if last_message.role == "user"
421
+ else multipart_messages
422
+ )
423
+ converted = []
424
+ for msg in messages_to_add:
425
+ converted.append(OpenAIConverter.convert_to_openai(msg))
426
+ self.history.extend(converted, is_prompt=True)
427
+
428
+ if last_message.role == "user":
429
+ # For user messages: Generate response to the last one
430
+ self.logger.debug(
431
+ "Last message in prompt is from user, generating assistant response"
432
+ )
433
+ message_param = OpenAIConverter.convert_to_openai(last_message)
434
+ return await self.generate_str(message_param)
435
+ else:
436
+ # For assistant messages: Return the last message content as text
437
+ self.logger.debug(
438
+ "Last message in prompt is from assistant, returning it directly"
439
+ )
440
+ return str(last_message)
441
+
442
+ async def _save_history_to_file(self, command: str) -> str:
443
+ """
444
+ Save the conversation history to a file in MCP prompt format.
445
+
446
+ Args:
447
+ command: The command string, expected format: "***SAVE_HISTORY <filename.md>"
448
+
449
+ Returns:
450
+ Success or error message
451
+ """
452
+ try:
453
+ # Extract the filename from the command
454
+ parts = command.split(" ", 1)
455
+ if len(parts) != 2 or not parts[1].strip():
456
+ return "Error: Invalid format. Expected '***SAVE_HISTORY <filename.md>'"
457
+
458
+ filename = parts[1].strip()
459
+
460
+ # Get all messages from history
461
+ messages = self.history.get(include_history=True)
462
+
463
+ # Import required utilities
464
+ from mcp_agent.workflows.llm.openai_utils import (
465
+ openai_message_param_to_prompt_message_multipart,
466
+ )
467
+ from mcp_agent.mcp.prompt_serialization import (
468
+ multipart_messages_to_delimited_format,
469
+ )
470
+
471
+ # Convert message params to PromptMessageMultipart objects
472
+ multipart_messages = []
473
+ for msg in messages:
474
+ # Skip system messages - PromptMessageMultipart only supports user and assistant roles
475
+ if isinstance(msg, dict) and msg.get("role") == "system":
476
+ continue
477
+
478
+ # Convert the message to a multipart message
479
+ multipart_messages.append(
480
+ openai_message_param_to_prompt_message_multipart(msg)
481
+ )
482
+
483
+ # Convert to delimited format
484
+ delimited_content = multipart_messages_to_delimited_format(
485
+ multipart_messages,
486
+ user_delimiter="---USER",
487
+ assistant_delimiter="---ASSISTANT",
488
+ )
489
+
490
+ # Write to file
491
+ with open(filename, "w", encoding="utf-8") as f:
492
+ f.write("\n\n".join(delimited_content))
493
+
494
+ self.logger.info(f"Saved conversation history to {filename}")
495
+ return f"Done. Saved conversation history to {filename}"
496
+
497
+ except Exception as e:
498
+ self.logger.error(f"Error saving history: {str(e)}")
499
+ return f"Error saving history: {str(e)}"
500
+
376
501
  async def generate_structured(
377
502
  self,
378
503
  message,
379
504
  response_model: Type[ModelT],
380
505
  request_params: RequestParams | None = None,
381
506
  ) -> ModelT:
382
- # First we invoke the LLM to generate a string response
383
- # We need to do this in a two-step process because Instructor doesn't
384
- # know how to invoke MCP tools via call_tool, so we'll handle all the
385
- # processing first and then pass the final response through Instructor
386
- response = await self.generate_str(
507
+ responses = await self.generate(
387
508
  message=message,
388
509
  request_params=request_params,
389
- )
390
-
391
- # Next we pass the text through instructor to extract structured data
392
- client = instructor.from_openai(
393
- OpenAI(
394
- api_key=self._api_key(),
395
- base_url=self._base_url(),
396
- ),
397
- mode=instructor.Mode.TOOLS_STRICT,
398
- )
399
-
400
- params = self.get_request_params(request_params)
401
- model = await self.select_model(params)
402
-
403
- # Extract structured data from natural language
404
- structured_response = client.chat.completions.create(
405
- model=model,
406
510
  response_model=response_model,
407
- messages=[
408
- {"role": "user", "content": response},
409
- ],
410
- )
411
- await self.show_assistant_message(
412
- str(structured_response), title="ASSISTANT/STRUCTURED"
413
511
  )
512
+ return responses[0].parsed
414
513
 
415
- return structured_response
514
+ async def generate_prompt(
515
+ self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
516
+ ) -> str:
517
+ converted_prompt = OpenAIConverter.convert_to_openai(prompt)
518
+ return await self.generate_str(converted_prompt, request_params)
416
519
 
417
520
  async def pre_tool_call(self, tool_call_id: str | None, request: CallToolRequest):
418
521
  return request
@@ -422,47 +525,6 @@ class OpenAIAugmentedLLM(
422
525
  ):
423
526
  return result
424
527
 
425
- async def execute_tool_call(
426
- self,
427
- tool_call: ChatCompletionToolParam,
428
- ) -> ChatCompletionToolMessageParam | None:
429
- """
430
- Execute a single tool call and return the result message.
431
- Returns None if there's no content to add to messages.
432
- """
433
- tool_name = tool_call.function.name
434
- tool_args_str = tool_call.function.arguments
435
- tool_call_id = tool_call.id
436
- tool_args = {}
437
-
438
- try:
439
- if tool_args_str:
440
- tool_args = json.loads(tool_args_str)
441
- except json.JSONDecodeError as e:
442
- return ChatCompletionToolMessageParam(
443
- role="tool",
444
- tool_call_id=tool_call_id,
445
- content=f"Invalid JSON provided in tool call arguments for '{tool_name}'. Failed to load JSON: {str(e)}",
446
- )
447
-
448
- tool_call_request = CallToolRequest(
449
- method="tools/call",
450
- params=CallToolRequestParams(name=tool_name, arguments=tool_args),
451
- )
452
-
453
- result = await self.call_tool(
454
- request=tool_call_request, tool_call_id=tool_call_id
455
- )
456
-
457
- if result.content:
458
- return ChatCompletionToolMessageParam(
459
- role="tool",
460
- tool_call_id=tool_call_id,
461
- content=[mcp_content_to_openai_content(c) for c in result.content],
462
- )
463
-
464
- return None
465
-
466
528
  def message_param_str(self, message: ChatCompletionMessageParam) -> str:
467
529
  """Convert an input message to a string representation."""
468
530
  if message.get("content"):
@@ -489,227 +551,3 @@ class OpenAIAugmentedLLM(
489
551
  return content
490
552
 
491
553
  return str(message)
492
-
493
-
494
- class MCPOpenAITypeConverter(
495
- ProviderToMCPConverter[ChatCompletionMessageParam, ChatCompletionMessage]
496
- ):
497
- """
498
- Convert between OpenAI and MCP types.
499
- """
500
-
501
- @classmethod
502
- def from_mcp_message_result(cls, result: MCPMessageResult) -> ChatCompletionMessage:
503
- # MCPMessageResult -> ChatCompletionMessage
504
- if result.role != "assistant":
505
- raise ValueError(
506
- f"Expected role to be 'assistant' but got '{result.role}' instead."
507
- )
508
-
509
- return ChatCompletionMessage(
510
- role="assistant",
511
- content=result.content.text or str(result.context),
512
- # Lossy conversion for the following fields:
513
- # result.model
514
- # result.stopReason
515
- )
516
-
517
- @classmethod
518
- def to_mcp_message_result(cls, result: ChatCompletionMessage) -> MCPMessageResult:
519
- # ChatCompletionMessage -> MCPMessageResult
520
- return MCPMessageResult(
521
- role=result.role,
522
- content=TextContent(type="text", text=result.content),
523
- model=None,
524
- stopReason=None,
525
- # extras for ChatCompletionMessage fields
526
- **result.model_dump(exclude={"role", "content"}),
527
- )
528
-
529
- @classmethod
530
- def from_mcp_message_param(
531
- cls, param: MCPMessageParam
532
- ) -> ChatCompletionMessageParam:
533
- # MCPMessageParam -> ChatCompletionMessageParam
534
- if param.role == "assistant":
535
- extras = param.model_dump(exclude={"role", "content"})
536
- return ChatCompletionAssistantMessageParam(
537
- role="assistant",
538
- content=mcp_content_to_openai_content(param.content),
539
- **extras,
540
- )
541
- elif param.role == "user":
542
- extras = param.model_dump(exclude={"role", "content"})
543
- return ChatCompletionUserMessageParam(
544
- role="user",
545
- content=mcp_content_to_openai_content(param.content),
546
- **extras,
547
- )
548
- else:
549
- raise ValueError(
550
- f"Unexpected role: {param.role}, MCP only supports 'assistant' and 'user'"
551
- )
552
-
553
- @classmethod
554
- def to_mcp_message_param(cls, param: ChatCompletionMessageParam) -> MCPMessageParam:
555
- # ChatCompletionMessage -> MCPMessageParam
556
-
557
- contents = openai_content_to_mcp_content(param.content)
558
-
559
- # TODO: saqadri - the mcp_content can have multiple elements
560
- # while sampling message content has a single content element
561
- # Right now we error out if there are > 1 elements in mcp_content
562
- # We need to handle this case properly going forward
563
- if len(contents) > 1:
564
- raise NotImplementedError(
565
- "Multiple content elements in a single message are not supported"
566
- )
567
- mcp_content: TextContent | ImageContent | EmbeddedResource = contents[0]
568
-
569
- if param.role == "assistant":
570
- return MCPMessageParam(
571
- role="assistant",
572
- content=mcp_content,
573
- **typed_dict_extras(param, ["role", "content"]),
574
- )
575
- elif param.role == "user":
576
- return MCPMessageParam(
577
- role="user",
578
- content=mcp_content,
579
- **typed_dict_extras(param, ["role", "content"]),
580
- )
581
- elif param.role == "tool":
582
- raise NotImplementedError(
583
- "Tool messages are not supported in SamplingMessage yet"
584
- )
585
- elif param.role == "system":
586
- raise NotImplementedError(
587
- "System messages are not supported in SamplingMessage yet"
588
- )
589
- elif param.role == "developer":
590
- raise NotImplementedError(
591
- "Developer messages are not supported in SamplingMessage yet"
592
- )
593
- elif param.role == "function":
594
- raise NotImplementedError(
595
- "Function messages are not supported in SamplingMessage yet"
596
- )
597
- else:
598
- raise ValueError(
599
- f"Unexpected role: {param.role}, MCP only supports 'assistant', 'user', 'tool', 'system', 'developer', and 'function'"
600
- )
601
-
602
- @classmethod
603
- def from_mcp_prompt_message(
604
- cls, message: PromptMessage
605
- ) -> ChatCompletionMessageParam:
606
- """Convert an MCP PromptMessage to an OpenAI ChatCompletionMessageParam."""
607
-
608
- # Extract content
609
- content = None
610
- if hasattr(message.content, "text"):
611
- content = message.content.text
612
- else:
613
- content = str(message.content)
614
-
615
- # Extract extras
616
- extras = message.model_dump(exclude={"role", "content"})
617
-
618
- if message.role == "user":
619
- return ChatCompletionUserMessageParam(
620
- role="user", content=content, **extras
621
- )
622
- elif message.role == "assistant":
623
- return ChatCompletionAssistantMessageParam(
624
- role="assistant", content=content, **extras
625
- )
626
- else:
627
- # Fall back to user for any unrecognized role, including "system"
628
- _logger.warning(
629
- f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
630
- )
631
- return ChatCompletionUserMessageParam(
632
- role="user", content=f"[{message.role.upper()}] {content}", **extras
633
- )
634
-
635
-
636
- def mcp_content_to_openai_content(
637
- content: TextContent | ImageContent | EmbeddedResource,
638
- ) -> ChatCompletionContentPartTextParam:
639
- if isinstance(content, list):
640
- # Handle list of content items
641
- return ChatCompletionContentPartTextParam(
642
- type="text",
643
- text="\n".join(mcp_content_to_openai_content(c) for c in content),
644
- )
645
-
646
- if isinstance(content, TextContent):
647
- return ChatCompletionContentPartTextParam(type="text", text=content.text)
648
- elif isinstance(content, ImageContent):
649
- # Best effort to convert an image to text
650
- return ChatCompletionContentPartTextParam(
651
- type="text", text=f"{content.mimeType}:{content.data}"
652
- )
653
- elif isinstance(content, EmbeddedResource):
654
- if isinstance(content.resource, TextResourceContents):
655
- return ChatCompletionContentPartTextParam(
656
- type="text", text=content.resource.text
657
- )
658
- else: # BlobResourceContents
659
- return ChatCompletionContentPartTextParam(
660
- type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
661
- )
662
- else:
663
- # Last effort to convert the content to a string
664
- return ChatCompletionContentPartTextParam(type="text", text=str(content))
665
-
666
-
667
- def openai_content_to_mcp_content(
668
- content: str
669
- | Iterable[ChatCompletionContentPartParam | ChatCompletionContentPartRefusalParam],
670
- ) -> Iterable[TextContent | ImageContent | EmbeddedResource]:
671
- mcp_content = []
672
-
673
- if isinstance(content, str):
674
- mcp_content = [TextContent(type="text", text=content)]
675
- else:
676
- # TODO: saqadri - this is a best effort conversion, we should handle all possible content types
677
- for c in content:
678
- if c.type == "text": # isinstance(c, ChatCompletionContentPartTextParam):
679
- mcp_content.append(
680
- TextContent(
681
- type="text", text=c.text, **typed_dict_extras(c, ["text"])
682
- )
683
- )
684
- elif (
685
- c.type == "image_url"
686
- ): # isinstance(c, ChatCompletionContentPartImageParam):
687
- raise NotImplementedError("Image content conversion not implemented")
688
- # TODO: saqadri - need to download the image into a base64-encoded string
689
- # Download image from c.image_url
690
- # return ImageContent(
691
- # type="image",
692
- # data=downloaded_image,
693
- # **c
694
- # )
695
- elif (
696
- c.type == "input_audio"
697
- ): # isinstance(c, ChatCompletionContentPartInputAudioParam):
698
- raise NotImplementedError("Audio content conversion not implemented")
699
- elif (
700
- c.type == "refusal"
701
- ): # isinstance(c, ChatCompletionContentPartRefusalParam):
702
- mcp_content.append(
703
- TextContent(
704
- type="text", text=c.refusal, **typed_dict_extras(c, ["refusal"])
705
- )
706
- )
707
- else:
708
- raise ValueError(f"Unexpected content type: {c.type}")
709
-
710
- return mcp_content
711
-
712
-
713
- def typed_dict_extras(d: dict, exclude: List[str]):
714
- extras = {k: v for k, v in d.items() if k not in exclude}
715
- return extras