fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/METADATA +27 -4
  2. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/RECORD +51 -30
  3. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +114 -8
  5. mcp_agent/context.py +0 -2
  6. mcp_agent/core/agent_app.py +89 -13
  7. mcp_agent/core/factory.py +14 -13
  8. mcp_agent/core/fastagent.py +15 -5
  9. mcp_agent/core/mcp_content.py +222 -0
  10. mcp_agent/core/prompt.py +132 -0
  11. mcp_agent/core/proxies.py +79 -36
  12. mcp_agent/logging/listeners.py +3 -6
  13. mcp_agent/logging/transport.py +30 -3
  14. mcp_agent/mcp/mcp_agent_client_session.py +21 -145
  15. mcp_agent/mcp/mcp_aggregator.py +61 -12
  16. mcp_agent/mcp/mcp_connection_manager.py +0 -1
  17. mcp_agent/mcp/mime_utils.py +69 -0
  18. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  19. mcp_agent/mcp/prompt_serialization.py +447 -0
  20. mcp_agent/mcp/prompts/__init__.py +0 -0
  21. mcp_agent/mcp/prompts/__main__.py +10 -0
  22. mcp_agent/mcp/prompts/prompt_server.py +509 -0
  23. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  24. mcp_agent/mcp/resource_utils.py +223 -0
  25. mcp_agent/mcp/stdio.py +23 -15
  26. mcp_agent/mcp_server_registry.py +5 -2
  27. mcp_agent/resources/examples/internal/agent.py +1 -1
  28. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  29. mcp_agent/resources/examples/internal/sizer.py +0 -5
  30. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  31. mcp_agent/resources/examples/prompting/agent.py +23 -0
  32. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  33. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  34. mcp_agent/resources/examples/workflows/orchestrator.py +3 -3
  35. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  36. mcp_agent/workflows/llm/augmented_llm.py +139 -66
  37. mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
  38. mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
  39. mcp_agent/workflows/llm/augmented_llm_passthrough.py +99 -1
  40. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  41. mcp_agent/workflows/llm/model_factory.py +20 -3
  42. mcp_agent/workflows/llm/openai_utils.py +65 -0
  43. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  44. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  45. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  46. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  47. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  48. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  49. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  50. mcp_agent/core/server_validation.py +0 -44
  51. mcp_agent/core/simulator_registry.py +0 -22
  52. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  53. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/WHEEL +0 -0
  54. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/licenses/LICENSE +0 -0
mcp_agent/mcp/stdio.py CHANGED
@@ -9,10 +9,12 @@ from anyio.streams.text import TextReceiveStream
9
9
  from mcp.client.stdio import StdioServerParameters, get_default_environment
10
10
  import mcp.types as types
11
11
  from mcp_agent.logging.logger import get_logger
12
+ from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
12
13
 
13
14
  logger = get_logger(__name__)
14
15
 
15
16
 
17
+ # TODO this will be removed when client library 1.4.2 is released
16
18
  @asynccontextmanager
17
19
  async def stdio_client_with_rich_stderr(server: StdioServerParameters):
18
20
  """
@@ -22,10 +24,16 @@ async def stdio_client_with_rich_stderr(server: StdioServerParameters):
22
24
  Args:
23
25
  server: The server parameters for the stdio connection
24
26
  """
27
+ read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception]
28
+ read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception]
29
+
30
+ write_stream: MemoryObjectSendStream[types.JSONRPCMessage]
31
+ write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage]
32
+
25
33
  read_stream_writer, read_stream = anyio.create_memory_object_stream(0)
26
34
  write_stream, write_stream_reader = anyio.create_memory_object_stream(0)
27
-
28
35
  # Open process with stderr piped for capture
36
+
29
37
  process = await anyio.open_process(
30
38
  [server.command, *server.args],
31
39
  env=server.env if server.env is not None else get_default_environment(),
@@ -67,19 +75,19 @@ async def stdio_client_with_rich_stderr(server: StdioServerParameters):
67
75
  except anyio.ClosedResourceError:
68
76
  await anyio.lowlevel.checkpoint()
69
77
 
70
- async def stderr_reader():
71
- assert process.stderr, "Opened process is missing stderr"
72
- try:
73
- async for chunk in TextReceiveStream(
74
- process.stderr,
75
- encoding=server.encoding,
76
- errors=server.encoding_error_handler,
77
- ):
78
- if chunk.strip():
79
- # Let the logging system handle the formatting consistently
80
- logger.event("info", "mcpserver.stderr", chunk.rstrip(), None, {})
81
- except anyio.ClosedResourceError:
82
- await anyio.lowlevel.checkpoint()
78
+ # async def stderr_reader():
79
+ # assert process.stderr, "Opened process is missing stderr"
80
+ # try:
81
+ # async for chunk in TextReceiveStream(
82
+ # process.stderr,
83
+ # encoding=server.encoding,
84
+ # errors=server.encoding_error_handler,
85
+ # ):
86
+ # if chunk.strip():
87
+ # # Let the logging system handle the formatting consistently
88
+ # logger.event("info", "mcpserver.stderr", chunk.rstrip(), None, {})
89
+ # except anyio.ClosedResourceError:
90
+ # await anyio.lowlevel.checkpoint()
83
91
 
84
92
  async def stdin_writer():
85
93
  assert process.stdin, "Opened process is missing stdin"
@@ -87,6 +95,7 @@ async def stdio_client_with_rich_stderr(server: StdioServerParameters):
87
95
  async with write_stream_reader:
88
96
  async for message in write_stream_reader:
89
97
  json = message.model_dump_json(by_alias=True, exclude_none=True)
98
+ print(f"**********{id(process.stdin)}")
90
99
  await process.stdin.send(
91
100
  (json + "\n").encode(
92
101
  encoding=server.encoding,
@@ -100,5 +109,4 @@ async def stdio_client_with_rich_stderr(server: StdioServerParameters):
100
109
  async with anyio.create_task_group() as tg, process:
101
110
  tg.start_soon(stdout_reader)
102
111
  tg.start_soon(stdin_writer)
103
- tg.start_soon(stderr_reader)
104
112
  yield read_stream, write_stream
@@ -15,9 +15,9 @@ from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStre
15
15
  from mcp import ClientSession
16
16
  from mcp.client.stdio import (
17
17
  StdioServerParameters,
18
- stdio_client,
19
18
  get_default_environment,
20
19
  )
20
+ from mcp_agent.mcp.stdio import stdio_client_with_rich_stderr
21
21
  from mcp.client.sse import sse_client
22
22
 
23
23
  from mcp_agent.config import (
@@ -134,7 +134,10 @@ class ServerRegistry:
134
134
  env={**get_default_environment(), **(config.env or {})},
135
135
  )
136
136
 
137
- async with stdio_client(server_params) as (read_stream, write_stream):
137
+ async with stdio_client_with_rich_stderr(server_params) as (
138
+ read_stream,
139
+ write_stream,
140
+ ):
138
141
  session = client_session_factory(
139
142
  read_stream,
140
143
  write_stream,
@@ -10,7 +10,7 @@ fast = FastAgent("FastAgent Example")
10
10
  async def main():
11
11
  # use the --model command line switch or agent arguments to change model
12
12
  async with fast.run() as agent:
13
- await agent()
13
+ await agent.prompt()
14
14
 
15
15
 
16
16
  if __name__ == "__main__":
@@ -44,9 +44,9 @@ mcp:
44
44
  # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
45
45
  command: "npx"
46
46
  args: ["-y", "@modelcontextprotocol/server-brave-search"]
47
- sizer:
47
+ sizing_setup:
48
48
  command: "uv"
49
- args: ["run", "prompt_sizing.py"]
49
+ args: ["run", "prompt_sizing1.py"]
50
50
  category:
51
51
  command: "uv"
52
52
  args: ["run", "prompt_category.py"]
@@ -12,11 +12,6 @@ fast = FastAgent("Sizer Prompt Test")
12
12
  )
13
13
  async def main():
14
14
  async with fast.run() as agent:
15
- # await agent["sizer"].load_prompt("sizing_prompt_units", {"metric": "False"})
16
- # print(await agent["sizer"].load_prompt("category-category_prompt"))
17
- # await agent("What is the size of the moon?")
18
- # await agent("What is the size of the Earth?")
19
- # await agent("What is the size of the Sun?")
20
15
  await agent()
21
16
 
22
17
 
@@ -0,0 +1,3 @@
1
+ """
2
+ Prompting examples package for MCP Agent.
3
+ """
@@ -0,0 +1,23 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("FastAgent Example")
6
+
7
+
8
+ # Define the agent
9
+ @fast.agent(
10
+ "agent",
11
+ instruction="You are a helpful AI Agent",
12
+ servers=["prompts"], # , "imgetage", "hfspace"],
13
+ # model="gpt-4o",
14
+ # instruction="You are a helpful AI Agent", servers=["prompts","basic_memory"], model="haiku"
15
+ )
16
+ async def main():
17
+ # use the --model command line switch or agent arguments to change model
18
+ async with fast.run() as agent:
19
+ await agent()
20
+
21
+
22
+ if __name__ == "__main__":
23
+ asyncio.run(main())
@@ -0,0 +1,44 @@
1
+ # FastAgent Configuration File
2
+
3
+ # Default Model Configuration:
4
+ #
5
+ # Takes format:
6
+ # <provider>.<model_string>.<reasoning_effort?> (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low)
7
+ # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
8
+ # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini
9
+ #
10
+ # If not specified, defaults to "haiku".
11
+ # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
12
+
13
+ default_model: haiku
14
+
15
+ # Logging and Console Configuration:
16
+ logger:
17
+ # level: "debug" | "info" | "warning" | "error"
18
+ # type: "none" | "console" | "file" | "http"
19
+ # path: "/path/to/logfile.jsonl"
20
+ type: file
21
+ level: error
22
+ # Switch the progress display on or off
23
+ progress_display: true
24
+
25
+ # Show chat User/Assistant messages on the console
26
+ show_chat: true
27
+ # Show tool calls on the console
28
+ show_tools: true
29
+ # Truncate long tool responses on the console
30
+ truncate_tools: true
31
+
32
+ # MCP Servers
33
+ mcp:
34
+ servers:
35
+ prompts:
36
+ command: "prompt-server"
37
+ args: ["sizing.md", "resource.md","resource-exe.md","pdf_prompt.md"]
38
+ hfspace:
39
+ command: "npx"
40
+ args: ["@llmindset/mcp-hfspace"]
41
+ image:
42
+ command: "uv"
43
+ args: ["run", "image_server.py"]
44
+
@@ -0,0 +1,56 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simple MCP server that responds to tool calls with text and image content.
4
+ """
5
+
6
+ import logging
7
+ from pathlib import Path
8
+
9
+ from mcp.server.fastmcp import FastMCP, Context, Image
10
+ from mcp.types import TextContent, ImageContent
11
+
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Create the FastMCP server
17
+ app = FastMCP(name="ImageToolServer", debug=True)
18
+
19
+
20
+ @app.tool(name="get_image", description="Returns an image with a descriptive text")
21
+ async def get_image(
22
+ image_name: str = "default", ctx: Context = None
23
+ ) -> list[TextContent | ImageContent]:
24
+ """
25
+ Returns an image file along with a descriptive text.
26
+
27
+ Args:
28
+ image_name: Name of the image to return (default just returns image.jpg)
29
+
30
+ Returns:
31
+ A list containing a text message and the requested image
32
+ """
33
+ try:
34
+ # Read the image file and convert to base64
35
+ # Create the response with text and image
36
+ return [
37
+ TextContent(type="text", text="Here's your image:"),
38
+ Image(path="image.jpg").to_image_content(),
39
+ ]
40
+ except Exception as e:
41
+ logger.exception(f"Error processing image: {e}")
42
+ return [TextContent(type="text", text=f"Error processing image: {str(e)}")]
43
+
44
+
45
+ if __name__ == "__main__":
46
+ # Check if the default image exists
47
+ if not Path("image.jpg").exists():
48
+ logger.warning(
49
+ "Default image file 'image.jpg' not found in the current directory"
50
+ )
51
+ logger.warning(
52
+ "Please add an image file named 'image.jpg' to the current directory"
53
+ )
54
+
55
+ # Run the server using stdio transport
56
+ app.run(transport="stdio")
@@ -50,9 +50,9 @@ fast = FastAgent("Orchestrator-Workers")
50
50
  async def main():
51
51
  async with fast.run() as agent:
52
52
  await agent()
53
- # await agent.author(
54
- # "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
55
- # )
53
+ await agent.author(
54
+ "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
55
+ )
56
56
 
57
57
  # The orchestrator can be used just like any other agent
58
58
  task = (
@@ -0,0 +1,101 @@
1
+ """
2
+ Utility functions for Anthropic integration with MCP.
3
+
4
+ Provides conversion between Anthropic message formats and PromptMessageMultipart,
5
+ leveraging existing code for resource handling and delimited formats.
6
+ """
7
+
8
+ from anthropic.types import (
9
+ MessageParam,
10
+ )
11
+
12
+ from mcp.types import (
13
+ TextContent,
14
+ ImageContent,
15
+ EmbeddedResource,
16
+ TextResourceContents,
17
+ )
18
+
19
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
20
+
21
+
22
+ # TODO -- only used for saving, but this will be driven directly from PromptMessages
23
+ def anthropic_message_param_to_prompt_message_multipart(
24
+ message_param: MessageParam,
25
+ ) -> PromptMessageMultipart:
26
+ """
27
+ Convert an Anthropic MessageParam to a PromptMessageMultipart.
28
+
29
+ Args:
30
+ message_param: The Anthropic MessageParam to convert
31
+
32
+ Returns:
33
+ A PromptMessageMultipart representation
34
+ """
35
+ role = message_param["role"]
36
+ content = message_param["content"]
37
+
38
+ # Handle string content (user messages can be simple strings)
39
+ if isinstance(content, str):
40
+ return PromptMessageMultipart(
41
+ role=role, content=[TextContent(type="text", text=content)]
42
+ )
43
+
44
+ # Convert content blocks to MCP content types
45
+ mcp_contents = []
46
+
47
+ for block in content:
48
+ if isinstance(block, dict):
49
+ if block.get("type") == "text":
50
+ text = block.get("text", "")
51
+
52
+ # Check if this is a resource marker
53
+ if (
54
+ text
55
+ and (
56
+ text.startswith("[Resource:")
57
+ or text.startswith("[Binary Resource:")
58
+ )
59
+ and "\n" in text
60
+ ):
61
+ header, content_text = text.split("\n", 1)
62
+ if "MIME:" in header:
63
+ mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
64
+ if (
65
+ mime_match != "text/plain"
66
+ ): # Only process non-plain text resources
67
+ if (
68
+ "Resource:" in header
69
+ and "Binary Resource:" not in header
70
+ ):
71
+ uri = (
72
+ header.split("Resource:", 1)[1]
73
+ .split(",")[0]
74
+ .strip()
75
+ )
76
+ mcp_contents.append(
77
+ EmbeddedResource(
78
+ type="resource",
79
+ resource=TextResourceContents(
80
+ uri=uri,
81
+ mimeType=mime_match,
82
+ text=content_text,
83
+ ),
84
+ )
85
+ )
86
+ continue
87
+
88
+ # Regular text content
89
+ mcp_contents.append(TextContent(type="text", text=text))
90
+
91
+ elif block.get("type") == "image":
92
+ # Image content
93
+ source = block.get("source", {})
94
+ if isinstance(source, dict) and source.get("type") == "base64":
95
+ media_type = source.get("media_type", "image/png")
96
+ data = source.get("data", "")
97
+ mcp_contents.append(
98
+ ImageContent(type="image", data=data, mimeType=media_type)
99
+ )
100
+
101
+ return PromptMessageMultipart(role=role, content=mcp_contents)