fast-agent-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/METADATA +5 -1
  2. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/RECORD +28 -17
  3. mcp_agent/agents/agent.py +46 -0
  4. mcp_agent/core/agent_app.py +373 -9
  5. mcp_agent/core/decorators.py +455 -0
  6. mcp_agent/core/enhanced_prompt.py +70 -4
  7. mcp_agent/core/factory.py +501 -0
  8. mcp_agent/core/fastagent.py +140 -1059
  9. mcp_agent/core/proxies.py +83 -47
  10. mcp_agent/core/validation.py +221 -0
  11. mcp_agent/human_input/handler.py +5 -2
  12. mcp_agent/mcp/mcp_aggregator.py +537 -47
  13. mcp_agent/mcp/mcp_connection_manager.py +13 -2
  14. mcp_agent/mcp_server/__init__.py +4 -0
  15. mcp_agent/mcp_server/agent_server.py +121 -0
  16. mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
  17. mcp_agent/resources/examples/internal/prompt_category.py +21 -0
  18. mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
  19. mcp_agent/resources/examples/internal/sizer.py +24 -0
  20. mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
  21. mcp_agent/resources/examples/workflows/sse.py +23 -0
  22. mcp_agent/ui/console_display.py +278 -0
  23. mcp_agent/workflows/llm/augmented_llm.py +245 -179
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
  26. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/WHEEL +0 -0
  27. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/entry_points.txt +0 -0
  28. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import os
3
3
  from typing import Iterable, List, Type
4
-
4
+ from mcp.types import PromptMessage
5
5
  import instructor
6
6
  from openai import OpenAI, AuthenticationError
7
7
  from openai.types.chat import (
@@ -38,6 +38,7 @@ from mcp_agent.core.exceptions import ProviderKeyError
38
38
  from mcp_agent.logging.logger import get_logger
39
39
  from rich.text import Text
40
40
 
41
+ _logger = get_logger(__name__)
41
42
 
42
43
  DEFAULT_OPENAI_MODEL = "gpt-4o"
43
44
  DEFAULT_REASONING_EFFORT = "medium"
@@ -53,6 +54,10 @@ class OpenAIAugmentedLLM(
53
54
  """
54
55
 
55
56
  def __init__(self, *args, **kwargs):
57
+ # Set type_converter before calling super().__init__
58
+ if "type_converter" not in kwargs:
59
+ kwargs["type_converter"] = MCPOpenAITypeConverter
60
+
56
61
  super().__init__(*args, **kwargs)
57
62
 
58
63
  self.provider = "OpenAI"
@@ -147,8 +152,9 @@ class OpenAIAugmentedLLM(
147
152
  ChatCompletionSystemMessageParam(role="system", content=system_prompt)
148
153
  )
149
154
 
150
- if params.use_history:
151
- messages.extend(self.history.get())
155
+ # Always include prompt messages, but only include conversation history
156
+ # if use_history is True
157
+ messages.extend(self.history.get(include_history=params.use_history))
152
158
 
153
159
  if isinstance(message, str):
154
160
  messages.append(
@@ -323,8 +329,17 @@ class OpenAIAugmentedLLM(
323
329
  await self.show_assistant_message(message_text, "")
324
330
  break
325
331
 
332
+ # Only save the new conversation messages to history if use_history is true
333
+ # Keep the prompt messages separate
326
334
  if params.use_history:
327
- self.history.set(messages)
335
+ # Get current prompt messages
336
+ prompt_messages = self.history.get(include_history=False)
337
+
338
+ # Calculate new conversation messages (excluding prompts)
339
+ new_messages = messages[len(prompt_messages):]
340
+
341
+ # Update conversation history
342
+ self.history.set(new_messages)
328
343
 
329
344
  self._log_chat_finished(model=model)
330
345
 
@@ -584,6 +599,39 @@ class MCPOpenAITypeConverter(
584
599
  f"Unexpected role: {param.role}, MCP only supports 'assistant', 'user', 'tool', 'system', 'developer', and 'function'"
585
600
  )
586
601
 
602
+ @classmethod
603
+ def from_mcp_prompt_message(
604
+ cls, message: PromptMessage
605
+ ) -> ChatCompletionMessageParam:
606
+ """Convert an MCP PromptMessage to an OpenAI ChatCompletionMessageParam."""
607
+
608
+ # Extract content
609
+ content = None
610
+ if hasattr(message.content, "text"):
611
+ content = message.content.text
612
+ else:
613
+ content = str(message.content)
614
+
615
+ # Extract extras
616
+ extras = message.model_dump(exclude={"role", "content"})
617
+
618
+ if message.role == "user":
619
+ return ChatCompletionUserMessageParam(
620
+ role="user", content=content, **extras
621
+ )
622
+ elif message.role == "assistant":
623
+ return ChatCompletionAssistantMessageParam(
624
+ role="assistant", content=content, **extras
625
+ )
626
+ else:
627
+ # Fall back to user for any unrecognized role, including "system"
628
+ _logger.warning(
629
+ f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
630
+ )
631
+ return ChatCompletionUserMessageParam(
632
+ role="user", content=f"[{message.role.upper()}] {content}", **extras
633
+ )
634
+
587
635
 
588
636
  def mcp_content_to_openai_content(
589
637
  content: TextContent | ImageContent | EmbeddedResource,