fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +26 -4
  2. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +43 -22
  3. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +89 -13
  6. mcp_agent/core/fastagent.py +13 -3
  7. mcp_agent/core/mcp_content.py +222 -0
  8. mcp_agent/core/prompt.py +132 -0
  9. mcp_agent/core/proxies.py +41 -36
  10. mcp_agent/logging/transport.py +30 -3
  11. mcp_agent/mcp/mcp_aggregator.py +11 -10
  12. mcp_agent/mcp/mime_utils.py +69 -0
  13. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  14. mcp_agent/mcp/prompt_serialization.py +447 -0
  15. mcp_agent/mcp/prompts/__init__.py +0 -0
  16. mcp_agent/mcp/prompts/__main__.py +10 -0
  17. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  18. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  19. mcp_agent/mcp/resource_utils.py +203 -0
  20. mcp_agent/resources/examples/internal/agent.py +1 -1
  21. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  22. mcp_agent/resources/examples/internal/sizer.py +0 -5
  23. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  24. mcp_agent/resources/examples/prompting/agent.py +23 -0
  25. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  26. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  27. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  28. mcp_agent/workflows/llm/augmented_llm.py +139 -66
  29. mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
  30. mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
  31. mcp_agent/workflows/llm/augmented_llm_passthrough.py +43 -0
  32. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  33. mcp_agent/workflows/llm/model_factory.py +20 -3
  34. mcp_agent/workflows/llm/openai_utils.py +65 -0
  35. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  36. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  37. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  38. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  39. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  40. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  41. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  42. mcp_agent/core/server_validation.py +0 -44
  43. mcp_agent/core/simulator_registry.py +0 -22
  44. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  45. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  46. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,132 @@
1
+ """
2
+ Prompt class for easily creating and working with MCP prompt content.
3
+ """
4
+
5
+ from typing import List, Literal
6
+
7
+ from mcp.types import PromptMessage
8
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
9
+
10
+ # Import our content helper functions
11
+ from .mcp_content import User, Assistant, MCPPrompt
12
+
13
+
14
+ class Prompt:
15
+ """
16
+ A helper class for working with MCP prompt content.
17
+
18
+ This class provides static methods to create:
19
+ - PromptMessage instances
20
+ - PromptMessageMultipart instances
21
+ - Lists of messages for conversations
22
+
23
+ All methods intelligently handle various content types:
24
+ - Strings become TextContent
25
+ - Image file paths become ImageContent
26
+ - Other file paths become EmbeddedResource
27
+ - Pre-formatted messages pass through unchanged
28
+ """
29
+
30
+ @classmethod
31
+ def user(cls, *content_items) -> PromptMessageMultipart:
32
+ """
33
+ Create a user PromptMessageMultipart with various content items.
34
+
35
+ Args:
36
+ *content_items: Content items (strings, file paths, etc.)
37
+
38
+ Returns:
39
+ A PromptMessageMultipart with user role and the specified content
40
+ """
41
+ messages = User(*content_items)
42
+ return PromptMessageMultipart(
43
+ role="user", content=[msg["content"] for msg in messages]
44
+ )
45
+
46
+ @classmethod
47
+ def assistant(cls, *content_items) -> PromptMessageMultipart:
48
+ """
49
+ Create an assistant PromptMessageMultipart with various content items.
50
+
51
+ Args:
52
+ *content_items: Content items (strings, file paths, etc.)
53
+
54
+ Returns:
55
+ A PromptMessageMultipart with assistant role and the specified content
56
+ """
57
+ messages = Assistant(*content_items)
58
+ return PromptMessageMultipart(
59
+ role="assistant", content=[msg["content"] for msg in messages]
60
+ )
61
+
62
+ @classmethod
63
+ def message(
64
+ cls, *content_items, role: Literal["user", "assistant"] = "user"
65
+ ) -> PromptMessageMultipart:
66
+ """
67
+ Create a PromptMessageMultipart with the specified role and content items.
68
+
69
+ Args:
70
+ *content_items: Content items (strings, file paths, etc.)
71
+ role: Role for the message (user or assistant)
72
+
73
+ Returns:
74
+ A PromptMessageMultipart with the specified role and content
75
+ """
76
+ messages = MCPPrompt(*content_items, role=role)
77
+ return PromptMessageMultipart(
78
+ role=messages[0]["role"] if messages else role,
79
+ content=[msg["content"] for msg in messages],
80
+ )
81
+
82
+ @classmethod
83
+ def conversation(cls, *messages) -> List[PromptMessage]:
84
+ """
85
+ Create a list of PromptMessages from various inputs.
86
+
87
+ This method accepts:
88
+ - PromptMessageMultipart instances
89
+ - Dictionaries with role and content
90
+ - Lists of dictionaries with role and content
91
+
92
+ Args:
93
+ *messages: Messages to include in the conversation
94
+
95
+ Returns:
96
+ A list of PromptMessage objects for the conversation
97
+ """
98
+ result = []
99
+
100
+ for item in messages:
101
+ if isinstance(item, PromptMessageMultipart):
102
+ # Convert PromptMessageMultipart to a list of PromptMessages
103
+ result.extend(item.to_prompt_messages())
104
+ elif isinstance(item, dict) and "role" in item and "content" in item:
105
+ # Convert a single message dict to PromptMessage
106
+ result.append(PromptMessage(**item))
107
+ elif isinstance(item, list):
108
+ # Process each item in the list
109
+ for msg in item:
110
+ if isinstance(msg, dict) and "role" in msg and "content" in msg:
111
+ result.append(PromptMessage(**msg))
112
+ # Ignore other types
113
+
114
+ return result
115
+
116
+ @classmethod
117
+ def from_multipart(
118
+ cls, multipart: List[PromptMessageMultipart]
119
+ ) -> List[PromptMessage]:
120
+ """
121
+ Convert a list of PromptMessageMultipart objects to PromptMessages.
122
+
123
+ Args:
124
+ multipart: List of PromptMessageMultipart objects
125
+
126
+ Returns:
127
+ A flat list of PromptMessage objects
128
+ """
129
+ result = []
130
+ for mp in multipart:
131
+ result.extend(mp.to_prompt_messages())
132
+ return result
mcp_agent/core/proxies.py CHANGED
@@ -3,10 +3,11 @@ Proxy classes for agent interactions.
3
3
  These proxies provide a consistent interface for interacting with different types of agents.
4
4
  """
5
5
 
6
- from typing import List, Optional, Dict, TYPE_CHECKING
6
+ from typing import List, Optional, Dict, Union, TYPE_CHECKING
7
7
 
8
8
  from mcp_agent.agents.agent import Agent
9
9
  from mcp_agent.app import MCPApp
10
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
10
11
 
11
12
  # Handle circular imports
12
13
  if TYPE_CHECKING:
@@ -34,27 +35,43 @@ class BaseAgentProxy:
34
35
  return await self.prompt()
35
36
  return await self.send(message)
36
37
 
37
- async def send(self, message: Optional[str] = None) -> str:
38
- """Allow: agent.researcher.send('message')"""
38
+ async def send(
39
+ self, message: Optional[Union[str, PromptMessageMultipart]] = None
40
+ ) -> str:
41
+ """
42
+ Allow: agent.researcher.send('message') or agent.researcher.send(Prompt.user('message'))
43
+
44
+ Args:
45
+ message: Either a string message or a PromptMessageMultipart object
46
+
47
+ Returns:
48
+ The agent's response as a string
49
+ """
39
50
  if message is None:
40
51
  # For consistency with agent(), use prompt() to open the interactive interface
41
52
  return await self.prompt()
53
+
54
+ # If a PromptMessageMultipart is passed, use send_prompt
55
+ if isinstance(message, PromptMessageMultipart):
56
+ return await self.send_prompt(message)
57
+
58
+ # For string messages, use generate_str (traditional behavior)
42
59
  return await self.generate_str(message)
43
60
 
44
61
  async def prompt(self, default_prompt: str = "") -> str:
45
62
  """Allow: agent.researcher.prompt()"""
46
63
  from mcp_agent.core.agent_app import AgentApp
47
-
64
+
48
65
  # First check if _app is directly an AgentApp
49
66
  if isinstance(self._app, AgentApp):
50
67
  return await self._app.prompt(self._name, default_prompt)
51
-
68
+
52
69
  # If not, check if it's an MCPApp with an _agent_app attribute
53
70
  if hasattr(self._app, "_agent_app"):
54
71
  agent_app = self._app._agent_app
55
72
  if agent_app:
56
73
  return await agent_app.prompt(self._name, default_prompt)
57
-
74
+
58
75
  # If we can't find an AgentApp, return an error message
59
76
  return "ERROR: Cannot prompt() - AgentApp not found"
60
77
 
@@ -62,23 +79,18 @@ class BaseAgentProxy:
62
79
  """Generate response for a message - must be implemented by subclasses"""
63
80
  raise NotImplementedError("Subclasses must implement generate_str")
64
81
 
65
- async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
66
- """
67
- Use a Prompt from an MCP Server - implemented by subclasses.
68
- Always returns an Assistant message.
69
-
70
- Args:
71
- prompt_name: Name of the prompt to load
72
- arguments: Optional dictionary of string arguments for prompt templating
73
- """
74
- raise NotImplementedError("Subclasses must implement mcp-prompt")
75
-
76
- async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
82
+ async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
83
+ """Send a message to the agent and return the response"""
84
+ raise NotImplementedError("Subclasses must implement send(prompt)")
85
+
86
+ async def apply_prompt(
87
+ self, prompt_name: str = None, arguments: dict[str, str] = None
88
+ ) -> str:
77
89
  """
78
90
  Apply a Prompt from an MCP Server - implemented by subclasses.
79
91
  This is the preferred method for applying prompts.
80
92
  Always returns an Assistant message.
81
-
93
+
82
94
  Args:
83
95
  prompt_name: Name of the prompt to apply
84
96
  arguments: Optional dictionary of string arguments for prompt templating
@@ -97,28 +109,21 @@ class LLMAgentProxy(BaseAgentProxy):
97
109
  """Forward message and all kwargs to the agent's LLM"""
98
110
  return await self._agent._llm.generate_str(message, **kwargs)
99
111
 
100
- async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
101
- """
102
- Load and apply a prompt from an MCP server.
103
-
104
- Args:
105
- prompt_name: Name of the prompt to load
106
- arguments: Optional dictionary of string arguments for prompt templating
107
-
108
- Returns:
109
- The assistant's response
110
- """
111
- return await self._agent.load_prompt(prompt_name, arguments)
112
-
113
- async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
112
+ async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
113
+ """Send a message to the agent and return the response"""
114
+ return await self._agent._llm.generate_prompt(prompt, None)
115
+
116
+ async def apply_prompt(
117
+ self, prompt_name: str = None, arguments: dict[str, str] = None
118
+ ) -> str:
114
119
  """
115
120
  Apply a prompt from an MCP server.
116
121
  This is the preferred method for applying prompts.
117
-
122
+
118
123
  Args:
119
124
  prompt_name: Name of the prompt to apply
120
125
  arguments: Optional dictionary of string arguments for prompt templating
121
-
126
+
122
127
  Returns:
123
128
  The assistant's response
124
129
  """
@@ -220,4 +225,4 @@ class ChainProxy(BaseAgentProxy):
220
225
  proxy = self._agent_proxies[agent_name]
221
226
  current_message = await proxy.generate_str(current_message)
222
227
 
223
- return current_message
228
+ return current_message
@@ -290,6 +290,21 @@ class AsyncEventBus:
290
290
  # Update transport if provided
291
291
  cls._instance.transport = transport
292
292
  return cls._instance
293
+
294
+ @classmethod
295
+ def reset(cls) -> None:
296
+ """
297
+ Reset the singleton instance.
298
+ This is primarily useful for testing scenarios where you need to ensure
299
+ a clean state between tests.
300
+ """
301
+ if cls._instance:
302
+ # Signal shutdown
303
+ cls._instance._running = False
304
+ cls._instance._stop_event.set()
305
+
306
+ # Clear the singleton instance
307
+ cls._instance = None
293
308
 
294
309
  async def start(self):
295
310
  """Start the event bus and all lifecycle-aware listeners."""
@@ -383,11 +398,19 @@ class AsyncEventBus:
383
398
  async def _process_events(self):
384
399
  """Process events from the queue until stopped."""
385
400
  while self._running:
401
+ event = None
386
402
  try:
387
403
  # Use wait_for with a timeout to allow checking running state
388
404
  try:
405
+ # Check if we should be stopping first
406
+ if not self._running or self._stop_event.is_set():
407
+ break
408
+
389
409
  event = await asyncio.wait_for(self._queue.get(), timeout=0.1)
390
410
  except asyncio.TimeoutError:
411
+ # Check again before continuing
412
+ if not self._running or self._stop_event.is_set():
413
+ break
391
414
  continue
392
415
 
393
416
  # Process the event through all listeners
@@ -407,13 +430,17 @@ class AsyncEventBus:
407
430
  f"Stacktrace: {''.join(traceback.format_exception(type(r), r, r.__traceback__))}"
408
431
  )
409
432
 
410
- self._queue.task_done()
411
-
412
433
  except asyncio.CancelledError:
434
+ # If we have a current event, mark it done before breaking
435
+ if event is not None:
436
+ self._queue.task_done()
413
437
  break
414
438
  except Exception as e:
415
439
  print(f"Error in event processing loop: {e}")
416
- continue
440
+ finally:
441
+ # Always mark the task as done if we got an event
442
+ if event is not None:
443
+ self._queue.task_done()
417
444
 
418
445
  # Process remaining events in queue
419
446
  while not self._queue.empty():
@@ -370,20 +370,11 @@ class MCPAggregator(ContextDependent):
370
370
  Returns:
371
371
  Result from the operation or an error result
372
372
  """
373
- logger.info(
374
- f"Requesting {operation_type}",
375
- data={
376
- "progress_action": ProgressAction.STARTING,
377
- f"{operation_type}_name": operation_name,
378
- "server_name": server_name,
379
- "agent_name": self.agent_name,
380
- },
381
- )
382
373
 
383
374
  async def try_execute(client: ClientSession):
384
375
  try:
385
376
  method = getattr(client, method_name)
386
- return await method(**method_args) if method_args else await method()
377
+ return await method(**method_args)
387
378
  except Exception as e:
388
379
  error_msg = f"Failed to {method_name} '{operation_name}' on server '{server_name}': {e}"
389
380
  logger.error(error_msg)
@@ -469,6 +460,16 @@ class MCPAggregator(ContextDependent):
469
460
  logger.error(f"Error: Tool '{name}' not found")
470
461
  return CallToolResult(isError=True, message=f"Tool '{name}' not found")
471
462
 
463
+ logger.info(
464
+ "Requesting tool call",
465
+ data={
466
+ "progress_action": ProgressAction.CALLING_TOOL,
467
+ "tool_name": local_tool_name,
468
+ "server_name": server_name,
469
+ "agent_name": self.agent_name,
470
+ },
471
+ )
472
+
472
473
  return await self._execute_on_server(
473
474
  server_name=server_name,
474
475
  operation_type="tool",
@@ -0,0 +1,69 @@
1
+ # mime_utils.py
2
+
3
+ import mimetypes
4
+
5
+ # Initialize mimetypes database
6
+ mimetypes.init()
7
+
8
+ # Extend with additional types that might be missing
9
+ mimetypes.add_type("text/x-python", ".py")
10
+ mimetypes.add_type("image/webp", ".webp")
11
+
12
+ # Known text-based MIME types not starting with "text/"
13
+ TEXT_MIME_TYPES = {
14
+ "application/json",
15
+ "application/javascript",
16
+ "application/xml",
17
+ "application/ld+json",
18
+ "application/xhtml+xml",
19
+ "application/x-httpd-php",
20
+ "application/x-sh",
21
+ "application/ecmascript",
22
+ "application/graphql",
23
+ "application/x-www-form-urlencoded",
24
+ "application/yaml",
25
+ "application/toml",
26
+ "application/x-python-code",
27
+ "application/vnd.api+json",
28
+ }
29
+
30
+ # Common text-based MIME type patterns
31
+ TEXT_MIME_PATTERNS = ("+xml", "+json", "+yaml", "+text")
32
+
33
+
34
+ def guess_mime_type(file_path: str) -> str:
35
+ """
36
+ Guess the MIME type of a file based on its extension.
37
+ """
38
+ mime_type, _ = mimetypes.guess_type(file_path)
39
+ return mime_type or "application/octet-stream"
40
+
41
+
42
+ def is_text_mime_type(mime_type: str) -> bool:
43
+ """Determine if a MIME type represents text content."""
44
+ if not mime_type:
45
+ return False
46
+
47
+ # Standard text types
48
+ if mime_type.startswith("text/"):
49
+ return True
50
+
51
+ # Known text types
52
+ if mime_type in TEXT_MIME_TYPES:
53
+ return True
54
+
55
+ # Common text patterns
56
+ if any(mime_type.endswith(pattern) for pattern in TEXT_MIME_PATTERNS):
57
+ return True
58
+
59
+ return False
60
+
61
+
62
+ def is_binary_content(mime_type: str) -> bool:
63
+ """Check if content should be treated as binary."""
64
+ return not is_text_mime_type(mime_type)
65
+
66
+
67
+ def is_image_mime_type(mime_type: str) -> bool:
68
+ """Check if a MIME type represents an image."""
69
+ return mime_type.startswith("image/") and mime_type != "image/svg+xml"
@@ -0,0 +1,64 @@
1
+ from typing import List, Union
2
+ from pydantic import BaseModel
3
+
4
+ from mcp.types import (
5
+ PromptMessage,
6
+ TextContent,
7
+ ImageContent,
8
+ EmbeddedResource,
9
+ Role,
10
+ GetPromptResult,
11
+ )
12
+
13
+
14
+ class PromptMessageMultipart(BaseModel):
15
+ """
16
+ Extension of PromptMessage that handles multiple content parts.
17
+ Internally converts to/from a sequence of standard PromptMessages.
18
+ """
19
+
20
+ role: Role
21
+ content: List[Union[TextContent, ImageContent, EmbeddedResource]]
22
+
23
+ @classmethod
24
+ def from_prompt_messages(
25
+ cls, messages: List[PromptMessage]
26
+ ) -> List["PromptMessageMultipart"]:
27
+ """Convert a sequence of PromptMessages into PromptMessageMultipart objects."""
28
+ if not messages:
29
+ return []
30
+
31
+ result = []
32
+ current_group = None
33
+ current_role = None
34
+
35
+ for msg in messages:
36
+ if msg.role != current_role:
37
+ # Role changed, start new message
38
+ if current_group is not None:
39
+ result.append(current_group)
40
+ current_role = msg.role
41
+ current_group = cls(role=msg.role, content=[msg.content])
42
+ else:
43
+ # Same role, add to current message
44
+ current_group.content.append(msg.content)
45
+
46
+ # Add the last group
47
+ if current_group is not None:
48
+ result.append(current_group)
49
+
50
+ return result
51
+
52
+ def to_prompt_messages(self) -> List[PromptMessage]:
53
+ """Convert this PromptMessageMultipart to a sequence of standard PromptMessages."""
54
+ return [
55
+ PromptMessage(role=self.role, content=content_part)
56
+ for content_part in self.content
57
+ ]
58
+
59
+ @classmethod
60
+ def parse_get_prompt_result(
61
+ cls, result: GetPromptResult
62
+ ) -> List["PromptMessageMultipart"]:
63
+ """Parse a GetPromptResult into PromptMessageMultipart objects."""
64
+ return cls.from_prompt_messages(result.messages)