fast-agent-mcp 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,127 @@
1
+ """
2
+ Proxy classes for agent interactions.
3
+ These proxies provide a consistent interface for interacting with different types of agents.
4
+ """
5
+
6
+ from typing import List, Optional, Dict, TYPE_CHECKING
7
+
8
+ from mcp_agent.agents.agent import Agent
9
+ from mcp_agent.app import MCPApp
10
+
11
+ # Handle circular imports
12
+ if TYPE_CHECKING:
13
+ from mcp_agent.core.types import WorkflowType, ProxyDict
14
+ else:
15
+ # Define minimal versions for runtime
16
+ from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
17
+ from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
18
+ from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import EvaluatorOptimizerLLM
19
+ from mcp_agent.workflows.router.router_llm import LLMRouter
20
+ from typing import Union
21
+ WorkflowType = Union[Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter]
22
+ ProxyDict = Dict[str, "BaseAgentProxy"]
23
+
24
+
25
+ class BaseAgentProxy:
26
+ """Base class for all proxy types"""
27
+
28
+ def __init__(self, app: MCPApp, name: str):
29
+ self._app = app
30
+ self._name = name
31
+
32
+ async def __call__(self, message: Optional[str] = None) -> str:
33
+ """Allow: agent.researcher('message')"""
34
+ return await self.send(message)
35
+
36
+ async def send(self, message: Optional[str] = None) -> str:
37
+ """Allow: agent.researcher.send('message')"""
38
+ if message is None:
39
+ return await self.prompt()
40
+ return await self.generate_str(message)
41
+
42
+ async def prompt(self, default_prompt: str = "") -> str:
43
+ """Allow: agent.researcher.prompt()"""
44
+ return await self._app.prompt(self._name, default_prompt)
45
+
46
+ async def generate_str(self, message: str) -> str:
47
+ """Generate response for a message - must be implemented by subclasses"""
48
+ raise NotImplementedError("Subclasses must implement generate_str")
49
+
50
+
51
+ class AgentProxy(BaseAgentProxy):
52
+ """Legacy proxy for individual agent operations"""
53
+
54
+ async def generate_str(self, message: str) -> str:
55
+ return await self._app.send(self._name, message)
56
+
57
+
58
+ class LLMAgentProxy(BaseAgentProxy):
59
+ """Proxy for regular agents that use _llm.generate_str()"""
60
+
61
+ def __init__(self, app: MCPApp, name: str, agent: Agent):
62
+ super().__init__(app, name)
63
+ self._agent = agent
64
+
65
+ async def generate_str(self, message: str) -> str:
66
+ return await self._agent._llm.generate_str(message)
67
+
68
+
69
+ class WorkflowProxy(BaseAgentProxy):
70
+ """Proxy for workflow types that implement generate_str() directly"""
71
+
72
+ def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
73
+ super().__init__(app, name)
74
+ self._workflow = workflow
75
+
76
+ async def generate_str(self, message: str) -> str:
77
+ return await self._workflow.generate_str(message)
78
+
79
+
80
+ class RouterProxy(BaseAgentProxy):
81
+ """Proxy for LLM Routers"""
82
+
83
+ def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
84
+ super().__init__(app, name)
85
+ self._workflow = workflow
86
+
87
+ async def generate_str(self, message: str) -> str:
88
+ results = await self._workflow.route(message)
89
+ if not results:
90
+ return "No appropriate route found for the request."
91
+
92
+ # Get the top result
93
+ top_result = results[0]
94
+ if isinstance(top_result.result, Agent):
95
+ # Agent route - delegate to the agent
96
+ agent = top_result.result
97
+
98
+ return await agent._llm.generate_str(message)
99
+ elif isinstance(top_result.result, str):
100
+ # Server route - use the router directly
101
+ return "Tool call requested by router - not yet supported"
102
+
103
+ return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
104
+
105
+
106
+ class ChainProxy(BaseAgentProxy):
107
+ """Proxy for chained agent operations"""
108
+
109
+ def __init__(
110
+ self, app: MCPApp, name: str, sequence: List[str], agent_proxies: ProxyDict
111
+ ):
112
+ super().__init__(app, name)
113
+ self._sequence = sequence
114
+ self._agent_proxies = agent_proxies
115
+ self._continue_with_final = True # Default behavior
116
+
117
+ async def generate_str(self, message: str) -> str:
118
+ """Chain message through a sequence of agents"""
119
+ current_message = message
120
+
121
+ for agent_name in self._sequence:
122
+ proxy = self._agent_proxies[agent_name]
123
+ current_message = await proxy.generate_str(current_message)
124
+
125
+ return current_message
126
+
127
+
@@ -0,0 +1,22 @@
1
+ """
2
+ Type definitions for fast-agent core module.
3
+ """
4
+
5
+ from typing import Dict, Union, TypeAlias, TYPE_CHECKING
6
+
7
+ from mcp_agent.agents.agent import Agent
8
+ from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
9
+ from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
10
+ from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import EvaluatorOptimizerLLM
11
+ from mcp_agent.workflows.router.router_llm import LLMRouter
12
+
13
+ # Avoid circular imports
14
+ if TYPE_CHECKING:
15
+ from mcp_agent.core.proxies import BaseAgentProxy
16
+
17
+ # Type aliases for better readability
18
+ WorkflowType: TypeAlias = Union[
19
+ Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter
20
+ ]
21
+ AgentOrWorkflow: TypeAlias = Union[Agent, WorkflowType]
22
+ ProxyDict: TypeAlias = Dict[str, "BaseAgentProxy"] # Forward reference as string
@@ -0,0 +1,66 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("Social Media Manager")
6
+
7
+
8
+ @fast.agent(
9
+ "url_fetcher",
10
+ "Given a URL, provide a complete and comprehensive summary",
11
+ servers=["fetch"],
12
+ )
13
+ @fast.agent(
14
+ "post_author",
15
+ """
16
+ Write a 280 character social media post for any given text.
17
+ Respond only with the post, never use hashtags.
18
+ """,
19
+ )
20
+ @fast.agent("translate_fr", "Translate the text to French.")
21
+ @fast.agent("translate_de", "Translate the text to German.")
22
+ @fast.agent(
23
+ "review",
24
+ """
25
+ Cleanly format the original content and translations for review by a Social Media manager.
26
+ Highlight any cultural sensitivities.
27
+ """,
28
+ model="sonnet",
29
+ )
30
+ @fast.parallel(
31
+ "translated_plan",
32
+ fan_out=["translate_fr", "translate_de"],
33
+ )
34
+ @fast.agent(
35
+ "human_review_and_post",
36
+ """
37
+ - You can send a social media post by saving it to a file name 'post-<lang>.md'.
38
+ - NEVER POST TO SOCIAL MEDIA UNLESS THE HUMAN HAS REVIEWED AND APPROVED.
39
+
40
+ Present the Social Media report to the Human, and then provide direct actionable questions to assist
41
+ the Human in posting the content.
42
+
43
+ You are being connected to a Human now, the first message you receive will be a
44
+ Social Media report ready to review with the Human.
45
+
46
+ """,
47
+ human_input=True,
48
+ servers=["filesystem"],
49
+ )
50
+ @fast.chain(
51
+ "post_writer",
52
+ sequence=[
53
+ "url_fetcher",
54
+ "post_author",
55
+ "translated_plan",
56
+ "human_review_and_post",
57
+ ],
58
+ )
59
+ async def main():
60
+ async with fast.run() as agent:
61
+ # using chain workflow
62
+ await agent.post_writer.prompt()
63
+
64
+
65
+ if __name__ == "__main__":
66
+ asyncio.run(main())
@@ -17,17 +17,27 @@ fast = FastAgent("Agent Chaining")
17
17
  Respond only with the post, never use hashtags.
18
18
  """,
19
19
  )
20
+ @fast.chain(
21
+ name="post_writer",
22
+ sequence=["url_fetcher", "social_media"],
23
+ )
20
24
  async def main():
21
25
  async with fast.run() as agent:
22
- await agent.social_media(
23
- await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
24
- )
26
+ # using chain workflow
27
+ await agent.post_writer.prompt()
28
+
29
+ # calling directly
30
+ # await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
31
+ # await agent.social_media(
32
+ # await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
33
+ # )
25
34
 
35
+ # agents can also be accessed like dictionaries:
36
+ # awwait agent["post_writer"].prompt()
26
37
 
27
- # uncomment below to interact with agents
28
- # await agent()
29
38
 
30
- # alternative syntax for above is agent["social_media"].send(message)
39
+ # alternative syntax for above is result = agent["post_writer"].send(message)
40
+ # alternative syntax for above is result = agent["post_writer"].prompt()
31
41
 
32
42
 
33
43
  if __name__ == "__main__":
@@ -60,6 +60,10 @@ and whispers of a hidden agenda linger among the villagers.
60
60
  and give an overall grade based on the feedback.""",
61
61
  model="o3-mini.low",
62
62
  )
63
+ @fast.agent(
64
+ name="cats-to-dogs",
65
+ instruction="you should take any text, and change references about cats to dogs",
66
+ )
63
67
  @fast.parallel(
64
68
  fan_out=["proofreader", "fact_checker", "style_enforcer"],
65
69
  fan_in="grader",
@@ -1,6 +1,16 @@
1
1
  from abc import abstractmethod
2
2
 
3
- from typing import Generic, List, Optional, Protocol, Type, TypeVar, TYPE_CHECKING
3
+ from typing import (
4
+ Any,
5
+ Generic,
6
+ List,
7
+ Optional,
8
+ Protocol,
9
+ Type,
10
+ TypeVar,
11
+ TYPE_CHECKING,
12
+ Union,
13
+ )
4
14
 
5
15
  from pydantic import Field
6
16
 
@@ -657,3 +667,67 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
657
667
  "agent_name": self.name,
658
668
  }
659
669
  self.logger.debug("Chat finished", data=data)
670
+
671
+
672
+ class PassthroughLLM(AugmentedLLM):
673
+ """
674
+ A specialized LLM implementation that simply passes through input messages without modification.
675
+
676
+ This is useful for cases where you need an object with the AugmentedLLM interface
677
+ but want to preserve the original message without any processing, such as in a
678
+ parallel workflow where no fan-in aggregation is needed.
679
+ """
680
+
681
+ def __init__(self, name: str = "Passthrough", context=None, **kwargs):
682
+ super().__init__(name=name, context=context, **kwargs)
683
+
684
+ async def generate(
685
+ self,
686
+ message: Union[str, MessageParamT, List[MessageParamT]],
687
+ request_params: Optional[RequestParams] = None,
688
+ ) -> Union[List[MessageT], Any]:
689
+ """Simply return the input message as is."""
690
+ # Return in the format expected by the caller
691
+ return [message] if isinstance(message, list) else message
692
+
693
+ async def generate_str(
694
+ self,
695
+ message: Union[str, MessageParamT, List[MessageParamT]],
696
+ request_params: Optional[RequestParams] = None,
697
+ ) -> str:
698
+ """Return the input message as a string."""
699
+ self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
700
+ await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
701
+
702
+ return str(message)
703
+
704
+ async def generate_structured(
705
+ self,
706
+ message: Union[str, MessageParamT, List[MessageParamT]],
707
+ response_model: Type[ModelT],
708
+ request_params: Optional[RequestParams] = None,
709
+ ) -> ModelT:
710
+ """
711
+ Return the input message as the requested model type.
712
+ This is a best-effort implementation - it may fail if the
713
+ message cannot be converted to the requested model.
714
+ """
715
+ if isinstance(message, response_model):
716
+ return message
717
+ elif isinstance(message, dict):
718
+ return response_model(**message)
719
+ elif isinstance(message, str):
720
+ try:
721
+ # Try to parse as JSON if it's a string
722
+ import json
723
+
724
+ data = json.loads(message)
725
+ return response_model(**data)
726
+ except: # noqa: E722
727
+ raise ValueError(
728
+ f"Cannot convert message of type {type(message)} to {response_model}"
729
+ )
730
+ else:
731
+ raise ValueError(
732
+ f"Cannot convert message of type {type(message)} to {response_model}"
733
+ )
@@ -39,8 +39,9 @@ from mcp_agent.workflows.llm.augmented_llm import (
39
39
  )
40
40
  from mcp_agent.core.exceptions import ProviderKeyError
41
41
  from mcp_agent.logging.logger import get_logger
42
+ from rich.text import Text
42
43
 
43
- DEFAULT_ANTHROPIC_MODEL = "claude-3-5-sonnet-latest"
44
+ DEFAULT_ANTHROPIC_MODEL = "claude-3-7-sonnet-latest"
44
45
 
45
46
 
46
47
  class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
@@ -149,9 +150,23 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
149
150
  "Please check that your API key is valid and not expired.",
150
151
  ) from response
151
152
  elif isinstance(response, BaseException):
152
- self.logger.error(f"Error: {executor_result}")
153
+ error_details = str(response)
154
+ self.logger.error(f"Error: {error_details}", data=executor_result)
155
+
156
+ # Try to extract more useful information for API errors
157
+ if hasattr(response, "status_code") and hasattr(response, "response"):
158
+ try:
159
+ error_json = response.response.json()
160
+ error_details = (
161
+ f"Error code: {response.status_code} - {error_json}"
162
+ )
163
+ except: # noqa: E722
164
+ error_details = (
165
+ f"Error code: {response.status_code} - {str(response)}"
166
+ )
167
+
153
168
  # Convert other errors to text response
154
- error_message = f"Error during generation: {str(response)}"
169
+ error_message = f"Error during generation: {error_details}"
155
170
  response = Message(
156
171
  id="error", # Required field
157
172
  model="error", # Required field
@@ -207,14 +222,25 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
207
222
  message_text += block.text
208
223
 
209
224
  # response.stop_reason == "tool_use":
210
- for content in response.content:
211
- if content.type == "tool_use":
225
+ # First, collect all tool uses in this turn
226
+ tool_uses = [c for c in response.content if c.type == "tool_use"]
227
+
228
+ if tool_uses:
229
+ if message_text == "":
230
+ message_text = Text(
231
+ "the assistant requested tool calls",
232
+ style="dim green italic",
233
+ )
234
+
235
+ await self.show_assistant_message(message_text)
236
+
237
+ # Process all tool calls and collect results
238
+ tool_results = []
239
+ for content in tool_uses:
212
240
  tool_name = content.name
213
241
  tool_args = content.input
214
242
  tool_use_id = content.id
215
243
 
216
- await self.show_assistant_message(message_text, tool_name)
217
-
218
244
  self.show_tool_call(available_tools, tool_name, tool_args)
219
245
  tool_call_request = CallToolRequest(
220
246
  method="tools/call",
@@ -227,20 +253,25 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
227
253
  request=tool_call_request, tool_call_id=tool_use_id
228
254
  )
229
255
  self.show_tool_result(result)
230
- messages.append(
231
- MessageParam(
232
- role="user",
233
- content=[
234
- ToolResultBlockParam(
235
- type="tool_result",
236
- tool_use_id=tool_use_id,
237
- content=result.content,
238
- is_error=result.isError,
239
- )
240
- ],
256
+
257
+ # Add each result to our collection
258
+ tool_results.append(
259
+ ToolResultBlockParam(
260
+ type="tool_result",
261
+ tool_use_id=tool_use_id,
262
+ content=result.content,
263
+ is_error=result.isError,
241
264
  )
242
265
  )
243
266
 
267
+ # Add all tool results in a single message
268
+ messages.append(
269
+ MessageParam(
270
+ role="user",
271
+ content=tool_results,
272
+ )
273
+ )
274
+
244
275
  if params.use_history:
245
276
  self.history.set(messages)
246
277
 
@@ -1,4 +1,4 @@
1
- from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING
1
+ from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, Union
2
2
  import asyncio
3
3
 
4
4
  from mcp_agent.agents.agent import Agent
@@ -28,15 +28,17 @@ class ParallelLLM(AugmentedLLM[MessageParamT, MessageT]):
28
28
  fan_out_agents: List[Agent | AugmentedLLM],
29
29
  llm_factory: Callable[[Agent], AugmentedLLM] = None,
30
30
  context: Optional["Context"] = None,
31
+ include_request: bool = True,
31
32
  **kwargs,
32
33
  ):
33
34
  super().__init__(context=context, **kwargs)
34
35
  self.fan_in_agent = fan_in_agent
35
36
  self.fan_out_agents = fan_out_agents
36
37
  self.llm_factory = llm_factory
38
+ self.include_request = include_request
37
39
  self.history = None # History tracking is complex in this workflow
38
40
 
39
- async def ensure_llm(self, agent: Agent | AugmentedLLM) -> AugmentedLLM:
41
+ async def ensure_llm(self, agent: Union[Agent, AugmentedLLM]) -> AugmentedLLM:
40
42
  """Ensure an agent has an LLM attached, using existing or creating new."""
41
43
  if isinstance(agent, AugmentedLLM):
42
44
  return agent
@@ -65,9 +67,14 @@ class ParallelLLM(AugmentedLLM[MessageParamT, MessageT]):
65
67
  *[llm.generate(message, request_params) for llm in fan_out_llms]
66
68
  )
67
69
 
70
+ # Get message string for inclusion in formatted output
71
+ message_str = (
72
+ str(message) if isinstance(message, (str, MessageParamT)) else None
73
+ )
74
+
68
75
  # Run fan-in to aggregate results
69
76
  result = await fan_in_llm.generate(
70
- self._format_responses(responses),
77
+ self._format_responses(responses, message_str),
71
78
  request_params=request_params,
72
79
  )
73
80
 
@@ -92,9 +99,14 @@ class ParallelLLM(AugmentedLLM[MessageParamT, MessageT]):
92
99
  *[llm.generate_str(message, request_params) for llm in fan_out_llms]
93
100
  )
94
101
 
102
+ # Get message string for inclusion in formatted output
103
+ message_str = (
104
+ str(message) if isinstance(message, (str, MessageParamT)) else None
105
+ )
106
+
95
107
  # Run fan-in to aggregate results
96
108
  result = await fan_in_llm.generate_str(
97
- self._format_responses(responses),
109
+ self._format_responses(responses, message_str),
98
110
  request_params=request_params,
99
111
  )
100
112
 
@@ -123,19 +135,32 @@ class ParallelLLM(AugmentedLLM[MessageParamT, MessageT]):
123
135
  ]
124
136
  )
125
137
 
138
+ # Get message string for inclusion in formatted output
139
+ message_str = (
140
+ str(message) if isinstance(message, (str, MessageParamT)) else None
141
+ )
142
+
126
143
  # Run fan-in to aggregate results
127
144
  result = await fan_in_llm.generate_structured(
128
- self._format_responses(responses),
145
+ self._format_responses(responses, message_str),
129
146
  response_model=response_model,
130
147
  request_params=request_params,
131
148
  )
132
149
 
133
150
  return result
134
151
 
135
- def _format_responses(self, responses: List[Any]) -> str:
152
+ def _format_responses(self, responses: List[Any], message: str = None) -> str:
136
153
  """Format a list of responses for the fan-in agent."""
137
154
  formatted = []
155
+
156
+ # Include the original message if specified
157
+ if self.include_request and message:
158
+ formatted.append("The following request was sent to the agents:")
159
+ formatted.append(f"<fastagent:request>\n{message}\n</fastagent:request>")
160
+
138
161
  for i, response in enumerate(responses):
139
162
  agent_name = self.fan_out_agents[i].name
140
- formatted.append(f"Response from {agent_name}:\n{response}")
163
+ formatted.append(
164
+ f'<fastagent:response agent="{agent_name}">\n{response}\n</fastagent:response>'
165
+ )
141
166
  return "\n\n".join(formatted)