fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +26 -4
  2. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +43 -22
  3. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +89 -13
  6. mcp_agent/core/fastagent.py +13 -3
  7. mcp_agent/core/mcp_content.py +222 -0
  8. mcp_agent/core/prompt.py +132 -0
  9. mcp_agent/core/proxies.py +41 -36
  10. mcp_agent/logging/transport.py +30 -3
  11. mcp_agent/mcp/mcp_aggregator.py +11 -10
  12. mcp_agent/mcp/mime_utils.py +69 -0
  13. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  14. mcp_agent/mcp/prompt_serialization.py +447 -0
  15. mcp_agent/mcp/prompts/__init__.py +0 -0
  16. mcp_agent/mcp/prompts/__main__.py +10 -0
  17. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  18. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  19. mcp_agent/mcp/resource_utils.py +203 -0
  20. mcp_agent/resources/examples/internal/agent.py +1 -1
  21. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  22. mcp_agent/resources/examples/internal/sizer.py +0 -5
  23. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  24. mcp_agent/resources/examples/prompting/agent.py +23 -0
  25. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  26. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  27. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  28. mcp_agent/workflows/llm/augmented_llm.py +139 -66
  29. mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
  30. mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
  31. mcp_agent/workflows/llm/augmented_llm_passthrough.py +43 -0
  32. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  33. mcp_agent/workflows/llm/model_factory.py +20 -3
  34. mcp_agent/workflows/llm/openai_utils.py +65 -0
  35. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  36. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  37. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  38. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  39. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  40. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  41. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  42. mcp_agent/core/server_validation.py +0 -44
  43. mcp_agent/core/simulator_registry.py +0 -22
  44. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  45. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  46. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,38 +1,34 @@
1
- import json
2
1
  import os
3
- from typing import Iterable, List, Type
4
- from mcp.types import PromptMessage
2
+ from typing import List, Type, TYPE_CHECKING
3
+
4
+ from pydantic_core import from_json
5
+
6
+ from mcp_agent.workflows.llm.providers.multipart_converter_openai import OpenAIConverter
7
+ from mcp_agent.workflows.llm.providers.sampling_converter_openai import (
8
+ OpenAISamplingConverter,
9
+ )
10
+
11
+ if TYPE_CHECKING:
12
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
5
13
  from openai import OpenAI, AuthenticationError
6
14
 
7
15
  # from openai.types.beta.chat import
8
16
  from openai.types.chat import (
9
- ChatCompletionAssistantMessageParam,
10
17
  ChatCompletionMessageParam,
11
- ChatCompletionContentPartParam,
12
- ChatCompletionContentPartTextParam,
13
- ChatCompletionContentPartRefusalParam,
14
18
  ChatCompletionMessage,
15
19
  ChatCompletionSystemMessageParam,
16
20
  ChatCompletionToolParam,
17
- ChatCompletionToolMessageParam,
18
21
  ChatCompletionUserMessageParam,
19
22
  )
20
23
  from mcp.types import (
21
24
  CallToolRequestParams,
22
25
  CallToolRequest,
23
26
  CallToolResult,
24
- EmbeddedResource,
25
- ImageContent,
26
- TextContent,
27
- TextResourceContents,
28
27
  )
29
28
 
30
29
  from mcp_agent.workflows.llm.augmented_llm import (
31
30
  AugmentedLLM,
32
31
  ModelT,
33
- MCPMessageParam,
34
- MCPMessageResult,
35
- ProviderToMCPConverter,
36
32
  RequestParams,
37
33
  )
38
34
  from mcp_agent.core.exceptions import ProviderKeyError
@@ -57,7 +53,7 @@ class OpenAIAugmentedLLM(
57
53
  def __init__(self, *args, **kwargs):
58
54
  # Set type_converter before calling super().__init__
59
55
  if "type_converter" not in kwargs:
60
- kwargs["type_converter"] = MCPOpenAITypeConverter
56
+ kwargs["type_converter"] = OpenAISamplingConverter
61
57
 
62
58
  super().__init__(*args, **kwargs)
63
59
 
@@ -197,11 +193,11 @@ class OpenAIAugmentedLLM(
197
193
  else:
198
194
  self.show_user_message(str(message), model, chat_turn)
199
195
 
196
+ # we do NOT send stop sequences as this causes errors with mutlimodal processing
200
197
  for i in range(params.max_iterations):
201
198
  arguments = {
202
199
  "model": model or "gpt-4o",
203
200
  "messages": messages,
204
- "stop": params.stopSequences,
205
201
  "tools": available_tools,
206
202
  }
207
203
  if self._reasoning:
@@ -284,30 +280,34 @@ class OpenAIAugmentedLLM(
284
280
  message.tool_calls[0].function.name,
285
281
  )
286
282
 
287
- # Execute all tool calls in parallel.
288
- tool_tasks = []
283
+ tool_results = []
289
284
  for tool_call in message.tool_calls:
290
285
  self.show_tool_call(
291
286
  available_tools,
292
287
  tool_call.function.name,
293
288
  tool_call.function.arguments,
294
289
  )
295
- tool_tasks.append(self.execute_tool_call(tool_call))
296
- # Wait for all tool calls to complete.
297
- tool_results = await self.executor.execute(*tool_tasks)
290
+ tool_call_request = CallToolRequest(
291
+ method="tools/call",
292
+ params=CallToolRequestParams(
293
+ name=tool_call.function.name,
294
+ arguments=from_json(
295
+ tool_call.function.arguments, allow_partial=True
296
+ ),
297
+ ),
298
+ )
299
+ result = await self.call_tool(tool_call_request, tool_call.id)
300
+ self.show_oai_tool_result(str(result))
301
+
302
+ tool_results.append((tool_call.id, result))
303
+
304
+ messages.extend(
305
+ OpenAIConverter.convert_function_results_to_openai(tool_results)
306
+ )
307
+
298
308
  self.logger.debug(
299
309
  f"Iteration {i}: Tool call results: {str(tool_results) if tool_results else 'None'}"
300
310
  )
301
- # Add non-None results to messages.
302
- for result in tool_results:
303
- if isinstance(result, BaseException):
304
- self.logger.error(
305
- f"Warning: Unexpected error during tool execution: {result}. Continuing..."
306
- )
307
- continue
308
- if result is not None:
309
- self.show_oai_tool_result(str(result["content"]))
310
- messages.append(result)
311
311
  elif choice.finish_reason == "length":
312
312
  # We have reached the max tokens limit
313
313
  self.logger.debug(
@@ -367,7 +367,15 @@ class OpenAIAugmentedLLM(
367
367
  Process a query using an LLM and available tools.
368
368
  The default implementation uses OpenAI's ChatCompletion as the LLM.
369
369
  Override this method to use a different LLM.
370
+
371
+ Special commands:
372
+ - "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
373
+ in MCP prompt format with user/assistant delimiters.
370
374
  """
375
+ # Check if this is a special command to save history
376
+ if isinstance(message, str) and message.startswith("***SAVE_HISTORY "):
377
+ return await self._save_history_to_file(message)
378
+
371
379
  responses = await self.generate(
372
380
  message=message,
373
381
  request_params=request_params,
@@ -386,6 +394,110 @@ class OpenAIAugmentedLLM(
386
394
 
387
395
  return "\n".join(final_text)
388
396
 
397
+ async def _apply_prompt_template_provider_specific(
398
+ self, multipart_messages: List["PromptMessageMultipart"]
399
+ ) -> str:
400
+ """
401
+ OpenAI-specific implementation of apply_prompt_template that handles
402
+ multimodal content natively.
403
+
404
+ Args:
405
+ multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
406
+
407
+ Returns:
408
+ String representation of the assistant's response if generated,
409
+ or the last assistant message in the prompt
410
+ """
411
+
412
+ # TODO -- this is very similar to Anthropic (just the converter class changes).
413
+ # TODO -- potential refactor to base class, standardize Converter interface
414
+ # Check the last message role
415
+ last_message = multipart_messages[-1]
416
+
417
+ # Add all previous messages to history (or all messages if last is from assistant)
418
+ messages_to_add = (
419
+ multipart_messages[:-1]
420
+ if last_message.role == "user"
421
+ else multipart_messages
422
+ )
423
+ converted = []
424
+ for msg in messages_to_add:
425
+ converted.append(OpenAIConverter.convert_to_openai(msg))
426
+ self.history.extend(converted, is_prompt=True)
427
+
428
+ if last_message.role == "user":
429
+ # For user messages: Generate response to the last one
430
+ self.logger.debug(
431
+ "Last message in prompt is from user, generating assistant response"
432
+ )
433
+ message_param = OpenAIConverter.convert_to_openai(last_message)
434
+ return await self.generate_str(message_param)
435
+ else:
436
+ # For assistant messages: Return the last message content as text
437
+ self.logger.debug(
438
+ "Last message in prompt is from assistant, returning it directly"
439
+ )
440
+ return str(last_message)
441
+
442
+ async def _save_history_to_file(self, command: str) -> str:
443
+ """
444
+ Save the conversation history to a file in MCP prompt format.
445
+
446
+ Args:
447
+ command: The command string, expected format: "***SAVE_HISTORY <filename.md>"
448
+
449
+ Returns:
450
+ Success or error message
451
+ """
452
+ try:
453
+ # Extract the filename from the command
454
+ parts = command.split(" ", 1)
455
+ if len(parts) != 2 or not parts[1].strip():
456
+ return "Error: Invalid format. Expected '***SAVE_HISTORY <filename.md>'"
457
+
458
+ filename = parts[1].strip()
459
+
460
+ # Get all messages from history
461
+ messages = self.history.get(include_history=True)
462
+
463
+ # Import required utilities
464
+ from mcp_agent.workflows.llm.openai_utils import (
465
+ openai_message_param_to_prompt_message_multipart,
466
+ )
467
+ from mcp_agent.mcp.prompt_serialization import (
468
+ multipart_messages_to_delimited_format,
469
+ )
470
+
471
+ # Convert message params to PromptMessageMultipart objects
472
+ multipart_messages = []
473
+ for msg in messages:
474
+ # Skip system messages - PromptMessageMultipart only supports user and assistant roles
475
+ if isinstance(msg, dict) and msg.get("role") == "system":
476
+ continue
477
+
478
+ # Convert the message to a multipart message
479
+ multipart_messages.append(
480
+ openai_message_param_to_prompt_message_multipart(msg)
481
+ )
482
+
483
+ # Convert to delimited format
484
+ delimited_content = multipart_messages_to_delimited_format(
485
+ multipart_messages,
486
+ user_delimiter="---USER",
487
+ assistant_delimiter="---ASSISTANT",
488
+ )
489
+
490
+ # Write to file
491
+ with open(filename, "w", encoding="utf-8") as f:
492
+ f.write("\n\n".join(delimited_content))
493
+
494
+ self.logger.info(f"Saved conversation history to {filename}")
495
+ return f"Done. Saved conversation history to {filename}"
496
+
497
+ except Exception as e:
498
+ self.logger.error(f"Error saving history: {str(e)}")
499
+ return f"Error saving history: {str(e)}"
500
+
389
501
  async def generate_structured(
390
502
  self,
391
503
  message,
@@ -399,14 +511,11 @@ class OpenAIAugmentedLLM(
399
511
  )
400
512
  return responses[0].parsed
401
513
 
402
- # return response_model.model_validate(
403
- # from_json(responses[0].content, allow_partial=True)
404
- # )
405
- # part1 = from_json(response, allow_partial=True)
406
- # return response_model.model_validate(part1)
407
-
408
- # TODO -- would prefer to use the OpenAI message[0].parsed function here
409
- # return response_model.model_validate(from_json(response, allow_partial=True))
514
+ async def generate_prompt(
515
+ self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
516
+ ) -> str:
517
+ converted_prompt = OpenAIConverter.convert_to_openai(prompt)
518
+ return await self.generate_str(converted_prompt, request_params)
410
519
 
411
520
  async def pre_tool_call(self, tool_call_id: str | None, request: CallToolRequest):
412
521
  return request
@@ -416,47 +525,6 @@ class OpenAIAugmentedLLM(
416
525
  ):
417
526
  return result
418
527
 
419
- async def execute_tool_call(
420
- self,
421
- tool_call: ChatCompletionToolParam,
422
- ) -> ChatCompletionToolMessageParam | None:
423
- """
424
- Execute a single tool call and return the result message.
425
- Returns None if there's no content to add to messages.
426
- """
427
- tool_name = tool_call.function.name
428
- tool_args_str = tool_call.function.arguments
429
- tool_call_id = tool_call.id
430
- tool_args = {}
431
-
432
- try:
433
- if tool_args_str:
434
- tool_args = json.loads(tool_args_str)
435
- except json.JSONDecodeError as e:
436
- return ChatCompletionToolMessageParam(
437
- role="tool",
438
- tool_call_id=tool_call_id,
439
- content=f"Invalid JSON provided in tool call arguments for '{tool_name}'. Failed to load JSON: {str(e)}",
440
- )
441
-
442
- tool_call_request = CallToolRequest(
443
- method="tools/call",
444
- params=CallToolRequestParams(name=tool_name, arguments=tool_args),
445
- )
446
-
447
- result = await self.call_tool(
448
- request=tool_call_request, tool_call_id=tool_call_id
449
- )
450
-
451
- if result.content:
452
- return ChatCompletionToolMessageParam(
453
- role="tool",
454
- tool_call_id=tool_call_id,
455
- content=[mcp_content_to_openai_content(c) for c in result.content],
456
- )
457
-
458
- return None
459
-
460
528
  def message_param_str(self, message: ChatCompletionMessageParam) -> str:
461
529
  """Convert an input message to a string representation."""
462
530
  if message.get("content"):
@@ -483,227 +551,3 @@ class OpenAIAugmentedLLM(
483
551
  return content
484
552
 
485
553
  return str(message)
486
-
487
-
488
- class MCPOpenAITypeConverter(
489
- ProviderToMCPConverter[ChatCompletionMessageParam, ChatCompletionMessage]
490
- ):
491
- """
492
- Convert between OpenAI and MCP types.
493
- """
494
-
495
- @classmethod
496
- def from_mcp_message_result(cls, result: MCPMessageResult) -> ChatCompletionMessage:
497
- # MCPMessageResult -> ChatCompletionMessage
498
- if result.role != "assistant":
499
- raise ValueError(
500
- f"Expected role to be 'assistant' but got '{result.role}' instead."
501
- )
502
-
503
- return ChatCompletionMessage(
504
- role="assistant",
505
- content=result.content.text or str(result.context),
506
- # Lossy conversion for the following fields:
507
- # result.model
508
- # result.stopReason
509
- )
510
-
511
- @classmethod
512
- def to_mcp_message_result(cls, result: ChatCompletionMessage) -> MCPMessageResult:
513
- # ChatCompletionMessage -> MCPMessageResult
514
- return MCPMessageResult(
515
- role=result.role,
516
- content=TextContent(type="text", text=result.content),
517
- model=None,
518
- stopReason=None,
519
- # extras for ChatCompletionMessage fields
520
- **result.model_dump(exclude={"role", "content"}),
521
- )
522
-
523
- @classmethod
524
- def from_mcp_message_param(
525
- cls, param: MCPMessageParam
526
- ) -> ChatCompletionMessageParam:
527
- # MCPMessageParam -> ChatCompletionMessageParam
528
- if param.role == "assistant":
529
- extras = param.model_dump(exclude={"role", "content"})
530
- return ChatCompletionAssistantMessageParam(
531
- role="assistant",
532
- content=mcp_content_to_openai_content(param.content),
533
- **extras,
534
- )
535
- elif param.role == "user":
536
- extras = param.model_dump(exclude={"role", "content"})
537
- return ChatCompletionUserMessageParam(
538
- role="user",
539
- content=mcp_content_to_openai_content(param.content),
540
- **extras,
541
- )
542
- else:
543
- raise ValueError(
544
- f"Unexpected role: {param.role}, MCP only supports 'assistant' and 'user'"
545
- )
546
-
547
- @classmethod
548
- def to_mcp_message_param(cls, param: ChatCompletionMessageParam) -> MCPMessageParam:
549
- # ChatCompletionMessage -> MCPMessageParam
550
-
551
- contents = openai_content_to_mcp_content(param.content)
552
-
553
- # TODO: saqadri - the mcp_content can have multiple elements
554
- # while sampling message content has a single content element
555
- # Right now we error out if there are > 1 elements in mcp_content
556
- # We need to handle this case properly going forward
557
- if len(contents) > 1:
558
- raise NotImplementedError(
559
- "Multiple content elements in a single message are not supported"
560
- )
561
- mcp_content: TextContent | ImageContent | EmbeddedResource = contents[0]
562
-
563
- if param.role == "assistant":
564
- return MCPMessageParam(
565
- role="assistant",
566
- content=mcp_content,
567
- **typed_dict_extras(param, ["role", "content"]),
568
- )
569
- elif param.role == "user":
570
- return MCPMessageParam(
571
- role="user",
572
- content=mcp_content,
573
- **typed_dict_extras(param, ["role", "content"]),
574
- )
575
- elif param.role == "tool":
576
- raise NotImplementedError(
577
- "Tool messages are not supported in SamplingMessage yet"
578
- )
579
- elif param.role == "system":
580
- raise NotImplementedError(
581
- "System messages are not supported in SamplingMessage yet"
582
- )
583
- elif param.role == "developer":
584
- raise NotImplementedError(
585
- "Developer messages are not supported in SamplingMessage yet"
586
- )
587
- elif param.role == "function":
588
- raise NotImplementedError(
589
- "Function messages are not supported in SamplingMessage yet"
590
- )
591
- else:
592
- raise ValueError(
593
- f"Unexpected role: {param.role}, MCP only supports 'assistant', 'user', 'tool', 'system', 'developer', and 'function'"
594
- )
595
-
596
- @classmethod
597
- def from_mcp_prompt_message(
598
- cls, message: PromptMessage
599
- ) -> ChatCompletionMessageParam:
600
- """Convert an MCP PromptMessage to an OpenAI ChatCompletionMessageParam."""
601
-
602
- # Extract content
603
- content = None
604
- if hasattr(message.content, "text"):
605
- content = message.content.text
606
- else:
607
- content = str(message.content)
608
-
609
- # Extract extras
610
- extras = message.model_dump(exclude={"role", "content"})
611
-
612
- if message.role == "user":
613
- return ChatCompletionUserMessageParam(
614
- role="user", content=content, **extras
615
- )
616
- elif message.role == "assistant":
617
- return ChatCompletionAssistantMessageParam(
618
- role="assistant", content=content, **extras
619
- )
620
- else:
621
- # Fall back to user for any unrecognized role, including "system"
622
- _logger.warning(
623
- f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
624
- )
625
- return ChatCompletionUserMessageParam(
626
- role="user", content=f"[{message.role.upper()}] {content}", **extras
627
- )
628
-
629
-
630
- def mcp_content_to_openai_content(
631
- content: TextContent | ImageContent | EmbeddedResource,
632
- ) -> ChatCompletionContentPartTextParam:
633
- if isinstance(content, list):
634
- # Handle list of content items
635
- return ChatCompletionContentPartTextParam(
636
- type="text",
637
- text="\n".join(mcp_content_to_openai_content(c) for c in content),
638
- )
639
-
640
- if isinstance(content, TextContent):
641
- return ChatCompletionContentPartTextParam(type="text", text=content.text)
642
- elif isinstance(content, ImageContent):
643
- # Best effort to convert an image to text
644
- return ChatCompletionContentPartTextParam(
645
- type="text", text=f"{content.mimeType}:{content.data}"
646
- )
647
- elif isinstance(content, EmbeddedResource):
648
- if isinstance(content.resource, TextResourceContents):
649
- return ChatCompletionContentPartTextParam(
650
- type="text", text=content.resource.text
651
- )
652
- else: # BlobResourceContents
653
- return ChatCompletionContentPartTextParam(
654
- type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
655
- )
656
- else:
657
- # Last effort to convert the content to a string
658
- return ChatCompletionContentPartTextParam(type="text", text=str(content))
659
-
660
-
661
- def openai_content_to_mcp_content(
662
- content: str
663
- | Iterable[ChatCompletionContentPartParam | ChatCompletionContentPartRefusalParam],
664
- ) -> Iterable[TextContent | ImageContent | EmbeddedResource]:
665
- mcp_content = []
666
-
667
- if isinstance(content, str):
668
- mcp_content = [TextContent(type="text", text=content)]
669
- else:
670
- # TODO: saqadri - this is a best effort conversion, we should handle all possible content types
671
- for c in content:
672
- if c.type == "text": # isinstance(c, ChatCompletionContentPartTextParam):
673
- mcp_content.append(
674
- TextContent(
675
- type="text", text=c.text, **typed_dict_extras(c, ["text"])
676
- )
677
- )
678
- elif (
679
- c.type == "image_url"
680
- ): # isinstance(c, ChatCompletionContentPartImageParam):
681
- raise NotImplementedError("Image content conversion not implemented")
682
- # TODO: saqadri - need to download the image into a base64-encoded string
683
- # Download image from c.image_url
684
- # return ImageContent(
685
- # type="image",
686
- # data=downloaded_image,
687
- # **c
688
- # )
689
- elif (
690
- c.type == "input_audio"
691
- ): # isinstance(c, ChatCompletionContentPartInputAudioParam):
692
- raise NotImplementedError("Audio content conversion not implemented")
693
- elif (
694
- c.type == "refusal"
695
- ): # isinstance(c, ChatCompletionContentPartRefusalParam):
696
- mcp_content.append(
697
- TextContent(
698
- type="text", text=c.refusal, **typed_dict_extras(c, ["refusal"])
699
- )
700
- )
701
- else:
702
- raise ValueError(f"Unexpected content type: {c.type}")
703
-
704
- return mcp_content
705
-
706
-
707
- def typed_dict_extras(d: dict, exclude: List[str]):
708
- extras = {k: v for k, v in d.items() if k not in exclude}
709
- return extras
@@ -1,6 +1,9 @@
1
1
  from typing import Any, List, Optional, Type, Union
2
2
 
3
+ from mcp import GetPromptResult
4
+ from mcp.types import PromptMessage
3
5
  from pydantic_core import from_json
6
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
4
7
  from mcp_agent.workflows.llm.augmented_llm import (
5
8
  AugmentedLLM,
6
9
  MessageParamT,
@@ -8,6 +11,7 @@ from mcp_agent.workflows.llm.augmented_llm import (
8
11
  ModelT,
9
12
  RequestParams,
10
13
  )
14
+ from mcp_agent.logging.logger import get_logger
11
15
 
12
16
 
13
17
  class PassthroughLLM(AugmentedLLM):
@@ -21,6 +25,10 @@ class PassthroughLLM(AugmentedLLM):
21
25
 
22
26
  def __init__(self, name: str = "Passthrough", context=None, **kwargs):
23
27
  super().__init__(name=name, context=context, **kwargs)
28
+ self.provider = "fast-agent"
29
+ # Initialize logger - keep it simple without name reference
30
+ self.logger = get_logger(__name__)
31
+ self._messages = [PromptMessage]
24
32
 
25
33
  async def generate(
26
34
  self,
@@ -59,3 +67,38 @@ class PassthroughLLM(AugmentedLLM):
59
67
  return response_model(**message)
60
68
  elif isinstance(message, str):
61
69
  return response_model.model_validate(from_json(message, allow_partial=True))
70
+
71
+ async def generate_prompt(
72
+ self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
73
+ ) -> str:
74
+ return await self.generate_str(prompt.content[0].text, request_params)
75
+
76
+ async def apply_prompt_template(
77
+ self, prompt_result: GetPromptResult, prompt_name: str
78
+ ) -> str:
79
+ """
80
+ Apply a prompt template by adding it to the conversation history.
81
+ If the last message in the prompt is from a user, automatically
82
+ generate an assistant response.
83
+
84
+ Args:
85
+ prompt_result: The GetPromptResult containing prompt messages
86
+ prompt_name: The name of the prompt being applied
87
+
88
+ Returns:
89
+ String representation of the assistant's response if generated,
90
+ or the last assistant message in the prompt
91
+ """
92
+ prompt_messages: List[PromptMessage] = prompt_result.messages
93
+
94
+ # Extract arguments if they were stored in the result
95
+ arguments = getattr(prompt_result, "arguments", None)
96
+
97
+ # Display information about the loaded prompt
98
+ await self.show_prompt_loaded(
99
+ prompt_name=prompt_name,
100
+ description=prompt_result.description,
101
+ message_count=len(prompt_messages),
102
+ arguments=arguments,
103
+ )
104
+ self._messages = prompt_messages