fast-agent-mcp 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +37 -9
  2. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +53 -31
  3. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +125 -44
  6. mcp_agent/core/decorators.py +3 -2
  7. mcp_agent/core/enhanced_prompt.py +106 -20
  8. mcp_agent/core/factory.py +28 -66
  9. mcp_agent/core/fastagent.py +13 -3
  10. mcp_agent/core/mcp_content.py +222 -0
  11. mcp_agent/core/prompt.py +132 -0
  12. mcp_agent/core/proxies.py +41 -36
  13. mcp_agent/human_input/handler.py +4 -1
  14. mcp_agent/logging/transport.py +30 -3
  15. mcp_agent/mcp/mcp_aggregator.py +27 -22
  16. mcp_agent/mcp/mime_utils.py +69 -0
  17. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  18. mcp_agent/mcp/prompt_serialization.py +447 -0
  19. mcp_agent/mcp/prompts/__init__.py +0 -0
  20. mcp_agent/mcp/prompts/__main__.py +10 -0
  21. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  22. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  23. mcp_agent/mcp/resource_utils.py +203 -0
  24. mcp_agent/resources/examples/internal/agent.py +1 -1
  25. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  26. mcp_agent/resources/examples/internal/sizer.py +0 -5
  27. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  28. mcp_agent/resources/examples/prompting/agent.py +23 -0
  29. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  30. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  31. mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
  32. mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
  33. mcp_agent/resources/examples/workflows/router.py +0 -2
  34. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
  35. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  36. mcp_agent/workflows/llm/augmented_llm.py +155 -141
  37. mcp_agent/workflows/llm/augmented_llm_anthropic.py +135 -281
  38. mcp_agent/workflows/llm/augmented_llm_openai.py +175 -337
  39. mcp_agent/workflows/llm/augmented_llm_passthrough.py +104 -0
  40. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  41. mcp_agent/workflows/llm/model_factory.py +25 -6
  42. mcp_agent/workflows/llm/openai_utils.py +65 -0
  43. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  44. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  45. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  46. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  47. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  48. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  49. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  50. mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
  51. mcp_agent/workflows/router/router_llm.py +18 -24
  52. mcp_agent/core/server_validation.py +0 -44
  53. mcp_agent/core/simulator_registry.py +0 -22
  54. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  55. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  56. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -12,11 +12,6 @@ fast = FastAgent("Sizer Prompt Test")
12
12
  )
13
13
  async def main():
14
14
  async with fast.run() as agent:
15
- # await agent["sizer"].load_prompt("sizing_prompt_units", {"metric": "False"})
16
- # print(await agent["sizer"].load_prompt("category-category_prompt"))
17
- # await agent("What is the size of the moon?")
18
- # await agent("What is the size of the Earth?")
19
- # await agent("What is the size of the Sun?")
20
15
  await agent()
21
16
 
22
17
 
@@ -0,0 +1,3 @@
1
+ """
2
+ Prompting examples package for MCP Agent.
3
+ """
@@ -0,0 +1,23 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("FastAgent Example")
6
+
7
+
8
+ # Define the agent
9
+ @fast.agent(
10
+ "agent",
11
+ instruction="You are a helpful AI Agent",
12
+ servers=["prompts"], # , "imgetage", "hfspace"],
13
+ # model="gpt-4o",
14
+ # instruction="You are a helpful AI Agent", servers=["prompts","basic_memory"], model="haiku"
15
+ )
16
+ async def main():
17
+ # use the --model command line switch or agent arguments to change model
18
+ async with fast.run() as agent:
19
+ await agent()
20
+
21
+
22
+ if __name__ == "__main__":
23
+ asyncio.run(main())
@@ -0,0 +1,44 @@
1
+ # FastAgent Configuration File
2
+
3
+ # Default Model Configuration:
4
+ #
5
+ # Takes format:
6
+ # <provider>.<model_string>.<reasoning_effort?> (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low)
7
+ # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
8
+ # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini
9
+ #
10
+ # If not specified, defaults to "haiku".
11
+ # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
12
+
13
+ default_model: haiku
14
+
15
+ # Logging and Console Configuration:
16
+ logger:
17
+ # level: "debug" | "info" | "warning" | "error"
18
+ # type: "none" | "console" | "file" | "http"
19
+ # path: "/path/to/logfile.jsonl"
20
+ type: file
21
+ level: error
22
+ # Switch the progress display on or off
23
+ progress_display: true
24
+
25
+ # Show chat User/Assistant messages on the console
26
+ show_chat: true
27
+ # Show tool calls on the console
28
+ show_tools: true
29
+ # Truncate long tool responses on the console
30
+ truncate_tools: true
31
+
32
+ # MCP Servers
33
+ mcp:
34
+ servers:
35
+ prompts:
36
+ command: "prompt-server"
37
+ args: ["sizing.md", "resource.md","resource-exe.md","pdf_prompt.md"]
38
+ hfspace:
39
+ command: "npx"
40
+ args: ["@llmindset/mcp-hfspace"]
41
+ image:
42
+ command: "uv"
43
+ args: ["run", "image_server.py"]
44
+
@@ -0,0 +1,56 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simple MCP server that responds to tool calls with text and image content.
4
+ """
5
+
6
+ import logging
7
+ from pathlib import Path
8
+
9
+ from mcp.server.fastmcp import FastMCP, Context, Image
10
+ from mcp.types import TextContent, ImageContent
11
+
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Create the FastMCP server
17
+ app = FastMCP(name="ImageToolServer", debug=True)
18
+
19
+
20
+ @app.tool(name="get_image", description="Returns an image with a descriptive text")
21
+ async def get_image(
22
+ image_name: str = "default", ctx: Context = None
23
+ ) -> list[TextContent | ImageContent]:
24
+ """
25
+ Returns an image file along with a descriptive text.
26
+
27
+ Args:
28
+ image_name: Name of the image to return (default just returns image.jpg)
29
+
30
+ Returns:
31
+ A list containing a text message and the requested image
32
+ """
33
+ try:
34
+ # Read the image file and convert to base64
35
+ # Create the response with text and image
36
+ return [
37
+ TextContent(type="text", text="Here's your image:"),
38
+ Image(path="image.jpg").to_image_content(),
39
+ ]
40
+ except Exception as e:
41
+ logger.exception(f"Error processing image: {e}")
42
+ return [TextContent(type="text", text=f"Error processing image: {str(e)}")]
43
+
44
+
45
+ if __name__ == "__main__":
46
+ # Check if the default image exists
47
+ if not Path("image.jpg").exists():
48
+ logger.warning(
49
+ "Default image file 'image.jpg' not found in the current directory"
50
+ )
51
+ logger.warning(
52
+ "Please add an image file named 'image.jpg' to the current directory"
53
+ )
54
+
55
+ # Run the server using stdio transport
56
+ app.run(transport="stdio")
@@ -2,7 +2,7 @@ import asyncio
2
2
 
3
3
  from mcp_agent.core.fastagent import FastAgent
4
4
 
5
- agents = FastAgent(name="Researcher")
5
+ agents = FastAgent(name="Researcher Agent (EO)")
6
6
 
7
7
 
8
8
  @agents.agent(
@@ -45,13 +45,14 @@ fast = FastAgent("Orchestrator-Workers")
45
45
  @fast.orchestrator(
46
46
  name="orchestrate",
47
47
  agents=["finder", "writer", "proofreader"],
48
- plan_type="full",
48
+ plan_type="iterative",
49
49
  )
50
50
  async def main():
51
51
  async with fast.run() as agent:
52
- await agent.author(
53
- "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
54
- )
52
+ await agent()
53
+ # await agent.author(
54
+ # "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
55
+ # )
55
56
 
56
57
  # The orchestrator can be used just like any other agent
57
58
  task = (
@@ -25,7 +25,6 @@ SAMPLE_REQUESTS = [
25
25
  name="fetcher",
26
26
  instruction="""You are an agent, with a tool enabling you to fetch URLs.""",
27
27
  servers=["fetch"],
28
- model="haiku",
29
28
  )
30
29
  @fast.agent(
31
30
  name="code_expert",
@@ -33,7 +32,6 @@ SAMPLE_REQUESTS = [
33
32
  When asked about code, architecture, or development practices,
34
33
  you provide thorough and practical insights.""",
35
34
  servers=["filesystem"],
36
- model="gpt-4o",
37
35
  )
38
36
  @fast.agent(
39
37
  name="general_assistant",
@@ -12,6 +12,7 @@ from mcp_agent.workflows.llm.augmented_llm import (
12
12
  )
13
13
  from mcp_agent.agents.agent import Agent, AgentConfig
14
14
  from mcp_agent.logging.logger import get_logger
15
+ from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
15
16
 
16
17
  if TYPE_CHECKING:
17
18
  from mcp_agent.context import Context
@@ -89,45 +90,33 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
89
90
  evaluator: str | Agent | AugmentedLLM,
90
91
  min_rating: QualityRating = QualityRating.GOOD,
91
92
  max_refinements: int = 3,
92
- llm_factory: Callable[[Agent], AugmentedLLM]
93
- | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
93
+ llm_factory: Callable[[Agent], AugmentedLLM] | None = None,
94
94
  context: Optional["Context"] = None,
95
- name: Optional[str] = None, # Allow overriding the name
96
- instruction: Optional[str] = None, # Allow overriding the instruction
95
+ name: Optional[str] = None,
96
+ instruction: Optional[str] = None,
97
97
  ):
98
98
  """
99
99
  Initialize the evaluator-optimizer workflow.
100
100
 
101
101
  Args:
102
- generator: The agent/LLM/workflow that generates responses. Can be:
103
- - An Agent that will be converted to an AugmentedLLM
104
- - An AugmentedLLM instance
105
- - An Orchestrator/Router/ParallelLLM workflow
106
- evaluator_agent: The agent/LLM that evaluates responses
107
- evaluation_criteria: Criteria for the evaluator to assess responses
102
+ generator: The agent/LLM/workflow that generates responses
103
+ evaluator: The evaluator (string instruction, Agent or AugmentedLLM)
108
104
  min_rating: Minimum acceptable quality rating
109
105
  max_refinements: Maximum refinement iterations
110
- llm_factory: Optional factory to create LLMs from agents
106
+ llm_factory: Factory to create LLMs from agents when needed
111
107
  name: Optional name for the workflow (defaults to generator's name)
112
108
  instruction: Optional instruction (defaults to generator's instruction)
113
-
114
- Note on History Management:
115
- This workflow manages two distinct history contexts:
116
- 1. Generator History: Controlled by the generator's use_history setting. When False,
117
- each refinement iteration starts fresh without previous context.
118
- 2. Evaluator History: Always disabled as each evaluation should be independent
119
- and based solely on the current response.
120
109
  """
121
- # Set up initial instance attributes - allow name override
122
- self.name = name or generator.name
110
+ # Set initial attributes
111
+ self.name = name or getattr(generator, "name", "EvaluatorOptimizer")
123
112
  self.llm_factory = llm_factory
124
113
  self.generator = generator
125
114
  self.evaluator = evaluator
126
115
  self.min_rating = min_rating
127
116
  self.max_refinements = max_refinements
128
117
 
129
- # Determine generator's history setting before super().__init__
130
-
118
+ # Determine generator's history setting directly based on type
119
+ self.generator_use_history = False
131
120
  if isinstance(generator, Agent):
132
121
  self.generator_use_history = generator.config.use_history
133
122
  elif isinstance(generator, AugmentedLLM):
@@ -135,90 +124,55 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
135
124
  generator.aggregator, Agent
136
125
  ):
137
126
  self.generator_use_history = generator.aggregator.config.use_history
138
- else:
127
+ elif hasattr(generator, "default_request_params"):
139
128
  self.generator_use_history = getattr(
140
- generator,
141
- "use_history",
142
- getattr(generator.default_request_params, "use_history", False),
129
+ generator.default_request_params, "use_history", False
143
130
  )
144
- # Handle ChainProxy with type checking
145
- elif hasattr(generator, "_sequence") and hasattr(generator, "_agent_proxies"):
146
- # This is how we detect a ChainProxy without directly importing it
147
- # For ChainProxy, we'll default use_history to False
148
- self.generator_use_history = False
149
- else:
150
- raise ValueError(f"Unsupported optimizer type: {type(generator)}")
151
-
152
- # Now we can call super().__init__ which will use generator_use_history
153
- super().__init__(context=context, name=name or generator.name)
131
+ # All other types default to False
154
132
 
155
- # Add a PassthroughLLM as _llm property for compatibility with Orchestrator
156
- from mcp_agent.workflows.llm.augmented_llm import PassthroughLLM
133
+ # Initialize parent class
134
+ super().__init__(context=context, name=name or getattr(generator, "name", None))
157
135
 
136
+ # Create a PassthroughLLM as _llm property
137
+ # TODO -- remove this when we fix/remove the inheritance hierarchy
158
138
  self._llm = PassthroughLLM(name=f"{self.name}_passthrough", context=context)
159
139
 
160
- # Set up the generator
161
-
140
+ # Set up the generator based on type
162
141
  if isinstance(generator, Agent):
163
142
  if not llm_factory:
164
- raise ValueError("llm_factory is required when using an Agent")
165
-
166
- # Only create new LLM if agent doesn't have one
167
- if hasattr(generator, "_llm") and generator._llm:
168
- self.generator_llm = generator._llm
169
- else:
170
- self.generator_llm = llm_factory(agent=generator)
143
+ raise ValueError(
144
+ "llm_factory is required when using an Agent generator"
145
+ )
171
146
 
147
+ # Use existing LLM if available, otherwise create new one
148
+ self.generator_llm = getattr(generator, "_llm", None) or llm_factory(
149
+ agent=generator
150
+ )
172
151
  self.aggregator = generator
173
- self.instruction = (
174
- instruction # Use provided instruction if any
175
- or (
176
- generator.instruction
177
- if isinstance(generator.instruction, str)
178
- else None
179
- ) # Fallback to generator's
152
+ self.instruction = instruction or (
153
+ generator.instruction
154
+ if isinstance(generator.instruction, str)
155
+ else None
180
156
  )
181
- elif hasattr(generator, "_sequence") and hasattr(generator, "_agent_proxies"):
182
- # For ChainProxy, use it directly for generation
157
+ elif isinstance(generator, AugmentedLLM):
158
+ self.generator_llm = generator
159
+ self.aggregator = getattr(generator, "aggregator", None)
160
+ self.instruction = instruction or generator.instruction
161
+ else:
162
+ # ChainProxy-like object
183
163
  self.generator_llm = generator
184
164
  self.aggregator = None
185
165
  self.instruction = (
186
166
  instruction or f"Chain of agents: {', '.join(generator._sequence)}"
187
167
  )
188
168
 
189
- elif isinstance(generator, AugmentedLLM):
190
- self.generator_llm = generator
191
- self.aggregator = generator.aggregator
192
- self.instruction = generator.instruction
193
-
194
- # Set up the evaluator - evaluations should be independent, so history is always disabled
195
- if isinstance(evaluator, AugmentedLLM):
196
- self.evaluator_llm = evaluator
197
- # Override evaluator's history setting
198
- if hasattr(evaluator, "default_request_params"):
199
- evaluator.default_request_params.use_history = False
200
- elif isinstance(evaluator, Agent):
201
- if not llm_factory:
202
- raise ValueError(
203
- "llm_factory is required when using an Agent evaluator"
204
- )
205
-
206
- # Create evaluator with history disabled
207
- if hasattr(evaluator, "_llm") and evaluator._llm:
208
- self.evaluator_llm = evaluator._llm
209
- if hasattr(self.evaluator_llm, "default_request_params"):
210
- self.evaluator_llm.default_request_params.use_history = False
211
- else:
212
- # Force history off in config before creating LLM
213
- evaluator.config.use_history = False
214
- self.evaluator_llm = llm_factory(agent=evaluator)
215
- elif isinstance(evaluator, str):
169
+ # Set up the evaluator - always disable history
170
+ if isinstance(evaluator, str):
216
171
  if not llm_factory:
217
172
  raise ValueError(
218
173
  "llm_factory is required when using a string evaluator"
219
174
  )
220
175
 
221
- # Create evaluator agent with history disabled
222
176
  evaluator_agent = Agent(
223
177
  name="Evaluator",
224
178
  instruction=evaluator,
@@ -226,17 +180,33 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
226
180
  name="Evaluator",
227
181
  instruction=evaluator,
228
182
  servers=[],
229
- use_history=False, # Force history off for evaluator
183
+ use_history=False,
230
184
  ),
231
185
  )
232
186
  self.evaluator_llm = llm_factory(agent=evaluator_agent)
187
+ elif isinstance(evaluator, Agent):
188
+ if not llm_factory:
189
+ raise ValueError(
190
+ "llm_factory is required when using an Agent evaluator"
191
+ )
192
+
193
+ # Disable history and use/create LLM
194
+ evaluator.config.use_history = False
195
+ self.evaluator_llm = getattr(evaluator, "_llm", None) or llm_factory(
196
+ agent=evaluator
197
+ )
198
+ elif isinstance(evaluator, AugmentedLLM):
199
+ self.evaluator_llm = evaluator
200
+ # Ensure history is disabled
201
+ if hasattr(self.evaluator_llm, "default_request_params"):
202
+ self.evaluator_llm.default_request_params.use_history = False
233
203
  else:
234
204
  raise ValueError(f"Unsupported evaluator type: {type(evaluator)}")
235
205
 
236
- # Track iteration history (for the workflow itself)
206
+ # Track iteration history
237
207
  self.refinement_history = []
238
208
 
239
- # Set up workflow's default params based on generator's history setting
209
+ # Set up workflow's default params
240
210
  self.default_request_params = self._initialize_default_params({})
241
211
 
242
212
  # Ensure evaluator's request params have history disabled
@@ -0,0 +1,101 @@
1
+ """
2
+ Utility functions for Anthropic integration with MCP.
3
+
4
+ Provides conversion between Anthropic message formats and PromptMessageMultipart,
5
+ leveraging existing code for resource handling and delimited formats.
6
+ """
7
+
8
+ from anthropic.types import (
9
+ MessageParam,
10
+ )
11
+
12
+ from mcp.types import (
13
+ TextContent,
14
+ ImageContent,
15
+ EmbeddedResource,
16
+ TextResourceContents,
17
+ )
18
+
19
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
20
+
21
+
22
+ # TODO -- only used for saving, but this will be driven directly from PromptMessages
23
+ def anthropic_message_param_to_prompt_message_multipart(
24
+ message_param: MessageParam,
25
+ ) -> PromptMessageMultipart:
26
+ """
27
+ Convert an Anthropic MessageParam to a PromptMessageMultipart.
28
+
29
+ Args:
30
+ message_param: The Anthropic MessageParam to convert
31
+
32
+ Returns:
33
+ A PromptMessageMultipart representation
34
+ """
35
+ role = message_param["role"]
36
+ content = message_param["content"]
37
+
38
+ # Handle string content (user messages can be simple strings)
39
+ if isinstance(content, str):
40
+ return PromptMessageMultipart(
41
+ role=role, content=[TextContent(type="text", text=content)]
42
+ )
43
+
44
+ # Convert content blocks to MCP content types
45
+ mcp_contents = []
46
+
47
+ for block in content:
48
+ if isinstance(block, dict):
49
+ if block.get("type") == "text":
50
+ text = block.get("text", "")
51
+
52
+ # Check if this is a resource marker
53
+ if (
54
+ text
55
+ and (
56
+ text.startswith("[Resource:")
57
+ or text.startswith("[Binary Resource:")
58
+ )
59
+ and "\n" in text
60
+ ):
61
+ header, content_text = text.split("\n", 1)
62
+ if "MIME:" in header:
63
+ mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
64
+ if (
65
+ mime_match != "text/plain"
66
+ ): # Only process non-plain text resources
67
+ if (
68
+ "Resource:" in header
69
+ and "Binary Resource:" not in header
70
+ ):
71
+ uri = (
72
+ header.split("Resource:", 1)[1]
73
+ .split(",")[0]
74
+ .strip()
75
+ )
76
+ mcp_contents.append(
77
+ EmbeddedResource(
78
+ type="resource",
79
+ resource=TextResourceContents(
80
+ uri=uri,
81
+ mimeType=mime_match,
82
+ text=content_text,
83
+ ),
84
+ )
85
+ )
86
+ continue
87
+
88
+ # Regular text content
89
+ mcp_contents.append(TextContent(type="text", text=text))
90
+
91
+ elif block.get("type") == "image":
92
+ # Image content
93
+ source = block.get("source", {})
94
+ if isinstance(source, dict) and source.get("type") == "base64":
95
+ media_type = source.get("media_type", "image/png")
96
+ data = source.get("data", "")
97
+ mcp_contents.append(
98
+ ImageContent(type="image", data=data, mimeType=media_type)
99
+ )
100
+
101
+ return PromptMessageMultipart(role=role, content=mcp_contents)