fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +61 -415
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +15 -19
- mcp_agent/cli/commands/bootstrap.py +19 -38
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +7 -14
- mcp_agent/cli/main.py +7 -10
- mcp_agent/cli/terminal.py +3 -3
- mcp_agent/config.py +25 -40
- mcp_agent/context.py +12 -21
- mcp_agent/context_dependent.py +3 -5
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +23 -55
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/fastagent.py +145 -371
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +17 -17
- mcp_agent/core/prompt.py +6 -9
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/validation.py +92 -18
- mcp_agent/executor/decorator_registry.py +9 -17
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +19 -41
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +15 -21
- mcp_agent/human_input/handler.py +4 -7
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/llm/augmented_llm.py +450 -0
- mcp_agent/llm/augmented_llm_passthrough.py +162 -0
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/llm/memory.py +103 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
- mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
- mcp_agent/llm/sampling_format_converter.py +37 -0
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +17 -19
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +1 -3
- mcp_agent/mcp/interfaces.py +117 -110
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +7 -7
- mcp_agent/mcp/mcp_agent_server.py +8 -8
- mcp_agent/mcp/mcp_aggregator.py +102 -143
- mcp_agent/mcp/mcp_connection_manager.py +20 -27
- mcp_agent/mcp/prompt_message_multipart.py +68 -16
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +30 -48
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +109 -0
- mcp_agent/mcp/prompts/prompt_server.py +155 -195
- mcp_agent/mcp/prompts/prompt_template.py +35 -66
- mcp_agent/mcp/resource_utils.py +7 -14
- mcp_agent/mcp/sampling.py +17 -17
- mcp_agent/mcp_server/agent_server.py +13 -17
- mcp_agent/mcp_server_registry.py +13 -22
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
- mcp_agent/resources/examples/in_dev/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +6 -3
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +4 -8
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +16 -20
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- mcp_agent/core/agent_app.py +0 -646
- mcp_agent/core/agent_utils.py +0 -71
- mcp_agent/core/decorators.py +0 -455
- mcp_agent/core/factory.py +0 -463
- mcp_agent/core/proxies.py +0 -269
- mcp_agent/core/types.py +0 -24
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -111
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
- mcp_agent/resources/examples/researcher/researcher.py +0 -38
- mcp_agent/resources/examples/workflows/chaining.py +0 -44
- mcp_agent/resources/examples/workflows/evaluator.py +0 -78
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -25
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
- mcp_agent/resources/examples/workflows/parallel.py +0 -78
- mcp_agent/resources/examples/workflows/router.py +0 -53
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -18
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -61
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -46
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +0 -753
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -350
- mcp_agent/workflows/parallel/fan_out.py +0 -187
- mcp_agent/workflows/parallel/parallel_llm.py +0 -166
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -368
- mcp_agent/workflows/router/router_embedding.py +0 -240
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -320
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -320
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -1,350 +0,0 @@
|
|
1
|
-
import contextlib
|
2
|
-
from typing import Callable, Dict, List, Optional, Type, TYPE_CHECKING
|
3
|
-
|
4
|
-
from mcp_agent.agents.agent import Agent
|
5
|
-
from mcp_agent.context_dependent import ContextDependent
|
6
|
-
from mcp_agent.workflows.llm.augmented_llm import (
|
7
|
-
AugmentedLLM,
|
8
|
-
MessageParamT,
|
9
|
-
MessageT,
|
10
|
-
ModelT,
|
11
|
-
RequestParams,
|
12
|
-
)
|
13
|
-
|
14
|
-
if TYPE_CHECKING:
|
15
|
-
from mcp_agent.context import Context
|
16
|
-
|
17
|
-
FanInInput = (
|
18
|
-
# Dict of agent/source name to list of messages generated by that agent
|
19
|
-
Dict[str, List[MessageT] | List[MessageParamT]]
|
20
|
-
# Dict of agent/source name to string generated by that agent
|
21
|
-
| Dict[str, str]
|
22
|
-
# List of lists of messages generated by each agent
|
23
|
-
| List[List[MessageT] | List[MessageParamT]]
|
24
|
-
# List of strings generated by each agent
|
25
|
-
| List[str]
|
26
|
-
)
|
27
|
-
|
28
|
-
|
29
|
-
class FanIn(ContextDependent):
|
30
|
-
"""
|
31
|
-
Aggregate results from multiple parallel tasks into a single result.
|
32
|
-
|
33
|
-
This is a building block of the Parallel workflow, which can be used to fan out
|
34
|
-
work to multiple agents or other parallel tasks, and then aggregate the results.
|
35
|
-
|
36
|
-
For example, you can use FanIn to combine the results of multiple agents into a single response,
|
37
|
-
such as a Summarization Fan-In agent that combines the outputs of multiple language models.
|
38
|
-
"""
|
39
|
-
|
40
|
-
def __init__(
|
41
|
-
self,
|
42
|
-
aggregator_agent: Agent | AugmentedLLM[MessageParamT, MessageT],
|
43
|
-
llm_factory: Callable[[Agent], AugmentedLLM[MessageParamT, MessageT]] = None,
|
44
|
-
context: Optional["Context"] = None,
|
45
|
-
**kwargs,
|
46
|
-
):
|
47
|
-
"""
|
48
|
-
Initialize the FanIn with an Agent responsible for processing multiple responses into a single aggregated one.
|
49
|
-
"""
|
50
|
-
|
51
|
-
super().__init__(context=context, **kwargs)
|
52
|
-
|
53
|
-
self.executor = self.context.executor
|
54
|
-
self.llm_factory = llm_factory
|
55
|
-
self.aggregator_agent = aggregator_agent
|
56
|
-
|
57
|
-
if not isinstance(self.aggregator_agent, AugmentedLLM):
|
58
|
-
if not self.llm_factory:
|
59
|
-
raise ValueError("llm_factory is required when using an Agent")
|
60
|
-
|
61
|
-
async def generate(
|
62
|
-
self,
|
63
|
-
messages: FanInInput,
|
64
|
-
request_params: RequestParams | None = None,
|
65
|
-
) -> List[MessageT]:
|
66
|
-
"""
|
67
|
-
Request fan-in agent generation from a list of messages from multiple sources/agents.
|
68
|
-
Internally aggregates the messages and then calls the aggregator agent to generate a response.
|
69
|
-
"""
|
70
|
-
message: (
|
71
|
-
str | MessageParamT | List[MessageParamT]
|
72
|
-
) = await self.aggregate_messages(messages)
|
73
|
-
|
74
|
-
async with contextlib.AsyncExitStack() as stack:
|
75
|
-
if isinstance(self.aggregator_agent, AugmentedLLM):
|
76
|
-
llm = self.aggregator_agent
|
77
|
-
else:
|
78
|
-
# Enter agent context
|
79
|
-
ctx_agent = await stack.enter_async_context(self.aggregator_agent)
|
80
|
-
llm = await ctx_agent.attach_llm(self.llm_factory)
|
81
|
-
|
82
|
-
return await llm.generate(
|
83
|
-
message=message,
|
84
|
-
request_params=request_params,
|
85
|
-
)
|
86
|
-
|
87
|
-
async def generate_str(
|
88
|
-
self,
|
89
|
-
messages: FanInInput,
|
90
|
-
request_params: RequestParams | None = None,
|
91
|
-
) -> str:
|
92
|
-
"""
|
93
|
-
Request fan-in agent generation from a list of messages from multiple sources/agents.
|
94
|
-
Internally aggregates the messages and then calls the aggregator agent to generate a
|
95
|
-
response, which is returned as a string.
|
96
|
-
"""
|
97
|
-
|
98
|
-
message: (
|
99
|
-
str | MessageParamT | List[MessageParamT]
|
100
|
-
) = await self.aggregate_messages(messages)
|
101
|
-
|
102
|
-
async with contextlib.AsyncExitStack() as stack:
|
103
|
-
if isinstance(self.aggregator_agent, AugmentedLLM):
|
104
|
-
llm = self.aggregator_agent
|
105
|
-
else:
|
106
|
-
# Enter agent context
|
107
|
-
ctx_agent = await stack.enter_async_context(self.aggregator_agent)
|
108
|
-
llm = await ctx_agent.attach_llm(self.llm_factory)
|
109
|
-
|
110
|
-
return await llm.generate_str(
|
111
|
-
message=message, request_params=request_params
|
112
|
-
)
|
113
|
-
|
114
|
-
async def generate_structured(
|
115
|
-
self,
|
116
|
-
messages: FanInInput,
|
117
|
-
response_model: Type[ModelT],
|
118
|
-
request_params: RequestParams | None = None,
|
119
|
-
) -> ModelT:
|
120
|
-
"""
|
121
|
-
Request a structured fan-in agent generation from a list of messages
|
122
|
-
from multiple sources/agents. Internally aggregates the messages and then calls
|
123
|
-
the aggregator agent to generate a response, which is returned as a Pydantic model.
|
124
|
-
"""
|
125
|
-
|
126
|
-
message: (
|
127
|
-
str | MessageParamT | List[MessageParamT]
|
128
|
-
) = await self.aggregate_messages(messages)
|
129
|
-
|
130
|
-
async with contextlib.AsyncExitStack() as stack:
|
131
|
-
if isinstance(self.aggregator_agent, AugmentedLLM):
|
132
|
-
llm = self.aggregator_agent
|
133
|
-
else:
|
134
|
-
# Enter agent context
|
135
|
-
ctx_agent = await stack.enter_async_context(self.aggregator_agent)
|
136
|
-
llm = await ctx_agent.attach_llm(self.llm_factory)
|
137
|
-
|
138
|
-
return await llm.generate_structured(
|
139
|
-
message=message,
|
140
|
-
response_model=response_model,
|
141
|
-
request_params=request_params,
|
142
|
-
)
|
143
|
-
|
144
|
-
async def aggregate_messages(
|
145
|
-
self, messages: FanInInput
|
146
|
-
) -> str | MessageParamT | List[MessageParamT]:
|
147
|
-
"""
|
148
|
-
Aggregate messages from multiple sources/agents into a single message to
|
149
|
-
use with the aggregator agent generation.
|
150
|
-
|
151
|
-
The input can be a dictionary of agent/source name to list of messages
|
152
|
-
generated by that agent, or just the unattributed lists of messages to aggregate.
|
153
|
-
|
154
|
-
Args:
|
155
|
-
messages: Can be one of:
|
156
|
-
- Dict[str, List[MessageT] | List[MessageParamT]]: Dict of agent names to messages
|
157
|
-
- Dict[str, str]: Dict of agent names to message strings
|
158
|
-
- List[List[MessageT] | List[MessageParamT]]: List of message lists from agents
|
159
|
-
- List[str]: List of message strings from agents
|
160
|
-
|
161
|
-
Returns:
|
162
|
-
Aggregated message as string, MessageParamT or List[MessageParamT]
|
163
|
-
|
164
|
-
Raises:
|
165
|
-
ValueError: If input is empty or contains empty/invalid elements
|
166
|
-
"""
|
167
|
-
# Handle dictionary inputs
|
168
|
-
if isinstance(messages, dict):
|
169
|
-
# Check for empty dict
|
170
|
-
if not messages:
|
171
|
-
raise ValueError("Input dictionary cannot be empty")
|
172
|
-
|
173
|
-
first_value = next(iter(messages.values()))
|
174
|
-
|
175
|
-
# Dict[str, List[MessageT] | List[MessageParamT]]
|
176
|
-
if isinstance(first_value, list):
|
177
|
-
if any(not isinstance(v, list) for v in messages.values()):
|
178
|
-
raise ValueError("All dictionary values must be lists of messages")
|
179
|
-
# Process list of messages for each agent
|
180
|
-
return await self.aggregate_agent_messages(messages)
|
181
|
-
|
182
|
-
# Dict[str, str]
|
183
|
-
elif isinstance(first_value, str):
|
184
|
-
if any(not isinstance(v, str) for v in messages.values()):
|
185
|
-
raise ValueError("All dictionary values must be strings")
|
186
|
-
# Process string outputs from each agent
|
187
|
-
return await self.aggregate_agent_message_strings(messages)
|
188
|
-
|
189
|
-
else:
|
190
|
-
raise ValueError(
|
191
|
-
"Dictionary values must be either lists of messages or strings"
|
192
|
-
)
|
193
|
-
|
194
|
-
# Handle list inputs
|
195
|
-
elif isinstance(messages, list):
|
196
|
-
# Check for empty list
|
197
|
-
if not messages:
|
198
|
-
raise ValueError("Input list cannot be empty")
|
199
|
-
|
200
|
-
first_item = messages[0]
|
201
|
-
|
202
|
-
# List[List[MessageT] | List[MessageParamT]]
|
203
|
-
if isinstance(first_item, list):
|
204
|
-
if any(not isinstance(item, list) for item in messages):
|
205
|
-
raise ValueError("All list items must be lists of messages")
|
206
|
-
# Process list of message lists
|
207
|
-
return await self.aggregate_message_lists(messages)
|
208
|
-
|
209
|
-
# List[str]
|
210
|
-
elif isinstance(first_item, str):
|
211
|
-
if any(not isinstance(item, str) for item in messages):
|
212
|
-
raise ValueError("All list items must be strings")
|
213
|
-
# Process list of strings
|
214
|
-
return await self.aggregate_message_strings(messages)
|
215
|
-
|
216
|
-
else:
|
217
|
-
raise ValueError(
|
218
|
-
"List items must be either lists of messages or strings"
|
219
|
-
)
|
220
|
-
|
221
|
-
else:
|
222
|
-
raise ValueError(
|
223
|
-
"Input must be either a dictionary of agent messages or a list of messages"
|
224
|
-
)
|
225
|
-
|
226
|
-
# Helper methods for processing different types of inputs
|
227
|
-
async def aggregate_agent_messages(
|
228
|
-
self, messages: Dict[str, List[MessageT] | List[MessageParamT]]
|
229
|
-
) -> str | MessageParamT | List[MessageParamT]:
|
230
|
-
"""
|
231
|
-
Aggregate message lists with agent names.
|
232
|
-
|
233
|
-
Args:
|
234
|
-
messages: Dictionary mapping agent names to their message lists
|
235
|
-
|
236
|
-
Returns:
|
237
|
-
str | List[MessageParamT]: Messages formatted with agent attribution
|
238
|
-
|
239
|
-
"""
|
240
|
-
|
241
|
-
# In the default implementation, we'll just convert the messages to a
|
242
|
-
# single string with agent attribution
|
243
|
-
aggregated_messages = []
|
244
|
-
|
245
|
-
if not messages:
|
246
|
-
return ""
|
247
|
-
|
248
|
-
# Format each agent's messages with attribution
|
249
|
-
for agent_name, agent_messages in messages.items():
|
250
|
-
agent_message_strings = []
|
251
|
-
for msg in agent_messages or []:
|
252
|
-
if isinstance(msg, str):
|
253
|
-
agent_message_strings.append(f"Agent {agent_name}: {msg}")
|
254
|
-
else:
|
255
|
-
# Assume it's a Message/MessageParamT and add attribution
|
256
|
-
# TODO -- this should really unpack the text from the message
|
257
|
-
agent_message_strings.append(
|
258
|
-
f"Agent {agent_name}: {str(msg.content[0])}"
|
259
|
-
)
|
260
|
-
|
261
|
-
aggregated_messages.append("\n".join(agent_message_strings))
|
262
|
-
|
263
|
-
# Combine all messages with clear separation
|
264
|
-
final_message = "\n\n".join(aggregated_messages)
|
265
|
-
final_message = f"Aggregated responses from multiple Agents:\n\n{final_message}"
|
266
|
-
return final_message
|
267
|
-
|
268
|
-
async def aggregate_agent_message_strings(self, messages: Dict[str, str]) -> str:
|
269
|
-
"""
|
270
|
-
Aggregate string outputs with agent names.
|
271
|
-
|
272
|
-
Args:
|
273
|
-
messages: Dictionary mapping agent names to their string outputs
|
274
|
-
|
275
|
-
Returns:
|
276
|
-
str: Combined string with agent attributions
|
277
|
-
"""
|
278
|
-
if not messages:
|
279
|
-
return ""
|
280
|
-
|
281
|
-
# Format each agent's message with agent attribution
|
282
|
-
aggregated_messages = [
|
283
|
-
f"Agent {agent_name}: {message}" for agent_name, message in messages.items()
|
284
|
-
]
|
285
|
-
|
286
|
-
# Combine all messages with clear separation
|
287
|
-
final_message = "\n\n".join(aggregated_messages)
|
288
|
-
final_message = f"Aggregated responses from multiple Agents:\n\n{final_message}"
|
289
|
-
return final_message
|
290
|
-
|
291
|
-
async def aggregate_message_lists(
|
292
|
-
self, messages: List[List[MessageT] | List[MessageParamT]]
|
293
|
-
) -> str | MessageParamT | List[MessageParamT]:
|
294
|
-
"""
|
295
|
-
Aggregate message lists without agent names.
|
296
|
-
|
297
|
-
Args:
|
298
|
-
messages: List of message lists from different agents
|
299
|
-
|
300
|
-
Returns:
|
301
|
-
List[MessageParamT]: List of formatted messages
|
302
|
-
"""
|
303
|
-
aggregated_messages = []
|
304
|
-
|
305
|
-
if not messages:
|
306
|
-
return ""
|
307
|
-
|
308
|
-
# Format each source's messages
|
309
|
-
for i, source_messages in enumerate(messages, 1):
|
310
|
-
source_message_strings = []
|
311
|
-
for msg in source_messages or []:
|
312
|
-
if isinstance(msg, str):
|
313
|
-
source_message_strings.append(f"Source {i}: {msg}")
|
314
|
-
else:
|
315
|
-
# Assume it's a MessageParamT or MessageT and add source attribution
|
316
|
-
source_message_strings.append(f"Source {i}: {str(msg)}")
|
317
|
-
|
318
|
-
aggregated_messages.append("\n".join(source_messages))
|
319
|
-
|
320
|
-
# Combine all messages with clear separation
|
321
|
-
final_message = "\n\n".join(aggregated_messages)
|
322
|
-
final_message = (
|
323
|
-
f"Aggregated responses from multiple sources:\n\n{final_message}"
|
324
|
-
)
|
325
|
-
return final_message
|
326
|
-
|
327
|
-
async def aggregate_message_strings(self, messages: List[str]) -> str:
|
328
|
-
"""
|
329
|
-
Aggregate string outputs without agent names.
|
330
|
-
|
331
|
-
Args:
|
332
|
-
messages: List of string outputs from different agents
|
333
|
-
|
334
|
-
Returns:
|
335
|
-
str: Combined string with source attributions
|
336
|
-
"""
|
337
|
-
if not messages:
|
338
|
-
return ""
|
339
|
-
|
340
|
-
# Format each source's message with attribution
|
341
|
-
aggregated_messages = [
|
342
|
-
f"Source {i}: {message}" for i, message in enumerate(messages, 1)
|
343
|
-
]
|
344
|
-
|
345
|
-
# Combine all messages with clear separation
|
346
|
-
final_message = "\n\n".join(aggregated_messages)
|
347
|
-
final_message = (
|
348
|
-
f"Aggregated responses from multiple sources:\n\n{final_message}"
|
349
|
-
)
|
350
|
-
return final_message
|
@@ -1,187 +0,0 @@
|
|
1
|
-
import contextlib
|
2
|
-
import functools
|
3
|
-
from typing import Any, Callable, Coroutine, Dict, List, Optional, Type, TYPE_CHECKING
|
4
|
-
|
5
|
-
from mcp_agent.agents.agent import Agent
|
6
|
-
from mcp_agent.context_dependent import ContextDependent
|
7
|
-
from mcp_agent.workflows.llm.augmented_llm import (
|
8
|
-
AugmentedLLM,
|
9
|
-
MessageParamT,
|
10
|
-
MessageT,
|
11
|
-
ModelT,
|
12
|
-
RequestParams,
|
13
|
-
)
|
14
|
-
from mcp_agent.logging.logger import get_logger
|
15
|
-
|
16
|
-
if TYPE_CHECKING:
|
17
|
-
from mcp_agent.context import Context
|
18
|
-
|
19
|
-
logger = get_logger(__name__)
|
20
|
-
|
21
|
-
|
22
|
-
class FanOut(ContextDependent):
|
23
|
-
"""
|
24
|
-
Distribute work to multiple parallel tasks.
|
25
|
-
|
26
|
-
This is a building block of the Parallel workflow, which can be used to fan out
|
27
|
-
work to multiple agents or other parallel tasks, and then aggregate the results.
|
28
|
-
"""
|
29
|
-
|
30
|
-
def __init__(
|
31
|
-
self,
|
32
|
-
agents: List[Agent | AugmentedLLM[MessageParamT, MessageT]] | None = None,
|
33
|
-
functions: List[Callable[[MessageParamT], List[MessageT]]] | None = None,
|
34
|
-
llm_factory: Callable[[Agent], AugmentedLLM[MessageParamT, MessageT]] = None,
|
35
|
-
context: Optional["Context"] = None,
|
36
|
-
**kwargs,
|
37
|
-
):
|
38
|
-
"""
|
39
|
-
Initialize the FanOut with a list of agents, functions, or LLMs.
|
40
|
-
If agents are provided, they will be wrapped in an AugmentedLLM using llm_factory if not already done so.
|
41
|
-
If functions are provided, they will be invoked in parallel directly.
|
42
|
-
"""
|
43
|
-
super().__init__(context=context, **kwargs)
|
44
|
-
self.executor = self.context.executor
|
45
|
-
self.llm_factory = llm_factory
|
46
|
-
self.agents = agents or []
|
47
|
-
self.functions: List[Callable[[MessageParamT], MessageT]] = functions or []
|
48
|
-
|
49
|
-
if not self.agents and not self.functions:
|
50
|
-
raise ValueError(
|
51
|
-
"At least one agent or function must be provided for fan-out to work"
|
52
|
-
)
|
53
|
-
|
54
|
-
if not self.llm_factory:
|
55
|
-
for agent in self.agents:
|
56
|
-
if not isinstance(agent, AugmentedLLM):
|
57
|
-
raise ValueError("llm_factory is required when using an Agent")
|
58
|
-
|
59
|
-
async def generate(
|
60
|
-
self,
|
61
|
-
message: str | MessageParamT | List[MessageParamT],
|
62
|
-
request_params: RequestParams | None = None,
|
63
|
-
) -> Dict[str, List[MessageT]]:
|
64
|
-
"""
|
65
|
-
Request fan-out agent/function generations, and return the results as a dictionary.
|
66
|
-
The keys are the names of the agents or functions that generated the results.
|
67
|
-
"""
|
68
|
-
tasks: List[
|
69
|
-
Callable[..., List[MessageT]] | Coroutine[Any, Any, List[MessageT]]
|
70
|
-
] = []
|
71
|
-
task_names: List[str] = []
|
72
|
-
task_results = []
|
73
|
-
|
74
|
-
async with contextlib.AsyncExitStack() as stack:
|
75
|
-
for agent in self.agents:
|
76
|
-
if isinstance(agent, AugmentedLLM):
|
77
|
-
llm = agent
|
78
|
-
else:
|
79
|
-
# Enter agent context
|
80
|
-
ctx_agent = await stack.enter_async_context(agent)
|
81
|
-
llm = await ctx_agent.attach_llm(self.llm_factory)
|
82
|
-
|
83
|
-
tasks.append(
|
84
|
-
llm.generate(
|
85
|
-
message=message,
|
86
|
-
request_params=request_params,
|
87
|
-
)
|
88
|
-
)
|
89
|
-
task_names.append(agent.name)
|
90
|
-
|
91
|
-
# Create bound methods for regular functions
|
92
|
-
for function in self.functions:
|
93
|
-
tasks.append(functools.partial(function, message))
|
94
|
-
task_names.append(function.__name__ or id(function))
|
95
|
-
|
96
|
-
# Wait for all tasks to complete
|
97
|
-
logger.debug("Running fan-out tasks:", data=task_names)
|
98
|
-
task_results = await self.executor.execute(*tasks)
|
99
|
-
|
100
|
-
logger.debug(
|
101
|
-
"Fan-out tasks completed:", data=dict(zip(task_names, task_results))
|
102
|
-
)
|
103
|
-
return dict(zip(task_names, task_results))
|
104
|
-
|
105
|
-
async def generate_str(
|
106
|
-
self,
|
107
|
-
message: str | MessageParamT | List[MessageParamT],
|
108
|
-
request_params: RequestParams | None = None,
|
109
|
-
) -> Dict[str, str]:
|
110
|
-
"""
|
111
|
-
Request fan-out agent/function generations and return the string results as a dictionary.
|
112
|
-
The keys are the names of the agents or functions that generated the results.
|
113
|
-
"""
|
114
|
-
|
115
|
-
def fn_result_to_string(fn, message):
|
116
|
-
return str(fn(message))
|
117
|
-
|
118
|
-
tasks: List[Callable[..., str] | Coroutine[Any, Any, str]] = []
|
119
|
-
task_names: List[str] = []
|
120
|
-
task_results = []
|
121
|
-
|
122
|
-
async with contextlib.AsyncExitStack() as stack:
|
123
|
-
for agent in self.agents:
|
124
|
-
if isinstance(agent, AugmentedLLM):
|
125
|
-
llm = agent
|
126
|
-
else:
|
127
|
-
# Enter agent context
|
128
|
-
ctx_agent = await stack.enter_async_context(agent)
|
129
|
-
llm = await ctx_agent.attach_llm(self.llm_factory)
|
130
|
-
|
131
|
-
tasks.append(
|
132
|
-
llm.generate_str(
|
133
|
-
message=message,
|
134
|
-
request_params=request_params,
|
135
|
-
)
|
136
|
-
)
|
137
|
-
task_names.append(agent.name)
|
138
|
-
|
139
|
-
# Create bound methods for regular functions
|
140
|
-
for function in self.functions:
|
141
|
-
tasks.append(functools.partial(fn_result_to_string, function, message))
|
142
|
-
task_names.append(function.__name__ or id(function))
|
143
|
-
|
144
|
-
task_results = await self.executor.execute(*tasks)
|
145
|
-
|
146
|
-
return dict(zip(task_names, task_results))
|
147
|
-
|
148
|
-
async def generate_structured(
|
149
|
-
self,
|
150
|
-
message: str | MessageParamT | List[MessageParamT],
|
151
|
-
response_model: Type[ModelT],
|
152
|
-
request_params: RequestParams | None = None,
|
153
|
-
) -> Dict[str, ModelT]:
|
154
|
-
"""
|
155
|
-
Request a structured fan-out agent/function generation and return the result as a Pydantic model.
|
156
|
-
The keys are the names of the agents or functions that generated the results.
|
157
|
-
"""
|
158
|
-
tasks = []
|
159
|
-
task_names = []
|
160
|
-
task_results = []
|
161
|
-
|
162
|
-
async with contextlib.AsyncExitStack() as stack:
|
163
|
-
for agent in self.agents:
|
164
|
-
if isinstance(agent, AugmentedLLM):
|
165
|
-
llm = agent
|
166
|
-
else:
|
167
|
-
# Enter agent context
|
168
|
-
ctx_agent = await stack.enter_async_context(agent)
|
169
|
-
llm = await ctx_agent.attach_llm(self.llm_factory)
|
170
|
-
|
171
|
-
tasks.append(
|
172
|
-
llm.generate_structured(
|
173
|
-
message=message,
|
174
|
-
response_model=response_model,
|
175
|
-
request_params=request_params,
|
176
|
-
)
|
177
|
-
)
|
178
|
-
task_names.append(agent.name)
|
179
|
-
|
180
|
-
# Create bound methods for regular functions
|
181
|
-
for function in self.functions:
|
182
|
-
tasks.append(functools.partial(function, message))
|
183
|
-
task_names.append(function.__name__ or id(function))
|
184
|
-
|
185
|
-
task_results = await self.executor.execute(*tasks)
|
186
|
-
|
187
|
-
return dict(zip(task_names, task_results))
|
@@ -1,166 +0,0 @@
|
|
1
|
-
from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, Union
|
2
|
-
import asyncio
|
3
|
-
|
4
|
-
from mcp_agent.agents.agent import Agent
|
5
|
-
from mcp_agent.workflows.llm.augmented_llm import (
|
6
|
-
AugmentedLLM,
|
7
|
-
MessageParamT,
|
8
|
-
MessageT,
|
9
|
-
ModelT,
|
10
|
-
RequestParams,
|
11
|
-
)
|
12
|
-
|
13
|
-
if TYPE_CHECKING:
|
14
|
-
from mcp_agent.context import Context
|
15
|
-
|
16
|
-
|
17
|
-
class ParallelLLM(AugmentedLLM[MessageParamT, MessageT]):
|
18
|
-
"""
|
19
|
-
LLMs can sometimes work simultaneously on a task (fan-out)
|
20
|
-
and have their outputs aggregated programmatically (fan-in).
|
21
|
-
This workflow performs both the fan-out and fan-in operations using LLMs.
|
22
|
-
From the user's perspective, an input is specified and the output is returned.
|
23
|
-
"""
|
24
|
-
|
25
|
-
def __init__(
|
26
|
-
self,
|
27
|
-
fan_in_agent: Agent | AugmentedLLM,
|
28
|
-
fan_out_agents: List[Agent | AugmentedLLM],
|
29
|
-
llm_factory: Callable[[Agent], AugmentedLLM] = None,
|
30
|
-
context: Optional["Context"] = None,
|
31
|
-
include_request: bool = True,
|
32
|
-
**kwargs,
|
33
|
-
):
|
34
|
-
super().__init__(context=context, **kwargs)
|
35
|
-
self.fan_in_agent = fan_in_agent
|
36
|
-
self.fan_out_agents = fan_out_agents
|
37
|
-
self.llm_factory = llm_factory
|
38
|
-
self.include_request = include_request
|
39
|
-
self.history = None # History tracking is complex in this workflow
|
40
|
-
|
41
|
-
async def ensure_llm(self, agent: Union[Agent, AugmentedLLM]) -> AugmentedLLM:
|
42
|
-
"""Ensure an agent has an LLM attached, using existing or creating new."""
|
43
|
-
if isinstance(agent, AugmentedLLM):
|
44
|
-
return agent
|
45
|
-
|
46
|
-
if not hasattr(agent, "_llm") or agent._llm is None:
|
47
|
-
return await agent.attach_llm(self.llm_factory)
|
48
|
-
|
49
|
-
return agent._llm
|
50
|
-
|
51
|
-
async def generate(
|
52
|
-
self,
|
53
|
-
message: str | MessageParamT | List[MessageParamT],
|
54
|
-
request_params: RequestParams | None = None,
|
55
|
-
) -> List[MessageT] | Any:
|
56
|
-
"""Generate responses using parallel fan-out and fan-in."""
|
57
|
-
# Ensure all agents have LLMs
|
58
|
-
fan_out_llms = []
|
59
|
-
for agent in self.fan_out_agents:
|
60
|
-
llm = await self.ensure_llm(agent)
|
61
|
-
fan_out_llms.append(llm)
|
62
|
-
|
63
|
-
fan_in_llm = await self.ensure_llm(self.fan_in_agent)
|
64
|
-
|
65
|
-
# Run fan-out operations in parallel
|
66
|
-
responses = await asyncio.gather(
|
67
|
-
*[llm.generate(message, request_params) for llm in fan_out_llms]
|
68
|
-
)
|
69
|
-
|
70
|
-
# Get message string for inclusion in formatted output
|
71
|
-
message_str = (
|
72
|
-
str(message) if isinstance(message, (str, MessageParamT)) else None
|
73
|
-
)
|
74
|
-
|
75
|
-
# Run fan-in to aggregate results
|
76
|
-
result = await fan_in_llm.generate(
|
77
|
-
self._format_responses(responses, message_str),
|
78
|
-
request_params=request_params,
|
79
|
-
)
|
80
|
-
|
81
|
-
return result
|
82
|
-
|
83
|
-
async def generate_str(
|
84
|
-
self,
|
85
|
-
message: str | MessageParamT | List[MessageParamT],
|
86
|
-
request_params: RequestParams | None = None,
|
87
|
-
) -> str:
|
88
|
-
"""Generate string responses using parallel fan-out and fan-in."""
|
89
|
-
# Ensure all agents have LLMs
|
90
|
-
fan_out_llms = []
|
91
|
-
for agent in self.fan_out_agents:
|
92
|
-
llm = await self.ensure_llm(agent)
|
93
|
-
fan_out_llms.append(llm)
|
94
|
-
|
95
|
-
fan_in_llm = await self.ensure_llm(self.fan_in_agent)
|
96
|
-
|
97
|
-
# Run fan-out operations in parallel
|
98
|
-
responses = await asyncio.gather(
|
99
|
-
*[llm.generate_str(message, request_params) for llm in fan_out_llms]
|
100
|
-
)
|
101
|
-
|
102
|
-
# Get message string for inclusion in formatted output
|
103
|
-
message_str = (
|
104
|
-
str(message) if isinstance(message, (str, MessageParamT)) else None
|
105
|
-
)
|
106
|
-
|
107
|
-
# Run fan-in to aggregate results
|
108
|
-
result = await fan_in_llm.generate_str(
|
109
|
-
self._format_responses(responses, message_str),
|
110
|
-
request_params=request_params,
|
111
|
-
)
|
112
|
-
|
113
|
-
return result
|
114
|
-
|
115
|
-
async def generate_structured(
|
116
|
-
self,
|
117
|
-
message: str | MessageParamT | List[MessageParamT],
|
118
|
-
response_model: Type[ModelT],
|
119
|
-
request_params: RequestParams | None = None,
|
120
|
-
) -> ModelT:
|
121
|
-
"""Generate structured responses using parallel fan-out and fan-in."""
|
122
|
-
# Ensure all agents have LLMs
|
123
|
-
fan_out_llms = []
|
124
|
-
for agent in self.fan_out_agents:
|
125
|
-
llm = await self.ensure_llm(agent)
|
126
|
-
fan_out_llms.append(llm)
|
127
|
-
|
128
|
-
fan_in_llm = await self.ensure_llm(self.fan_in_agent)
|
129
|
-
|
130
|
-
# Run fan-out operations in parallel
|
131
|
-
responses = await asyncio.gather(
|
132
|
-
*[
|
133
|
-
llm.generate_structured(message, response_model, request_params)
|
134
|
-
for llm in fan_out_llms
|
135
|
-
]
|
136
|
-
)
|
137
|
-
|
138
|
-
# Get message string for inclusion in formatted output
|
139
|
-
message_str = (
|
140
|
-
str(message) if isinstance(message, (str, MessageParamT)) else None
|
141
|
-
)
|
142
|
-
|
143
|
-
# Run fan-in to aggregate results
|
144
|
-
result = await fan_in_llm.generate_structured(
|
145
|
-
self._format_responses(responses, message_str),
|
146
|
-
response_model=response_model,
|
147
|
-
request_params=request_params,
|
148
|
-
)
|
149
|
-
|
150
|
-
return result
|
151
|
-
|
152
|
-
def _format_responses(self, responses: List[Any], message: str = None) -> str:
|
153
|
-
"""Format a list of responses for the fan-in agent."""
|
154
|
-
formatted = []
|
155
|
-
|
156
|
-
# Include the original message if specified
|
157
|
-
if self.include_request and message:
|
158
|
-
formatted.append("The following request was sent to the agents:")
|
159
|
-
formatted.append(f"<fastagent:request>\n{message}\n</fastagent:request>")
|
160
|
-
|
161
|
-
for i, response in enumerate(responses):
|
162
|
-
agent_name = self.fan_out_agents[i].name
|
163
|
-
formatted.append(
|
164
|
-
f'<fastagent:response agent="{agent_name}">\n{response}\n</fastagent:response>'
|
165
|
-
)
|
166
|
-
return "\n\n".join(formatted)
|
File without changes
|