fast-agent-mcp 0.1.2__tar.gz → 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/.gitignore +2 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/PKG-INFO +4 -3
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/README.md +3 -2
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/pyproject.toml +1 -1
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/agent_utils.py +10 -4
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/enhanced_prompt.py +27 -16
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/fastagent.py +27 -12
- fast_agent_mcp-0.1.3/src/mcp_agent/core/proxies.py +170 -0
- fast_agent_mcp-0.1.3/src/mcp_agent/core/simulator_registry.py +22 -0
- fast_agent_mcp-0.1.3/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent_mcp-0.1.3/src/mcp_agent/resources/examples/researcher/researcher-imp.py +190 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +16 -1
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/augmented_llm_anthropic.py +12 -6
- fast_agent_mcp-0.1.3/src/mcp_agent/workflows/llm/enhanced_passthrough.py +70 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/model_factory.py +3 -0
- fast_agent_mcp-0.1.2/src/mcp_agent/core/proxies.py +0 -127
- fast_agent_mcp-0.1.2/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -22
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/LICENSE +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/agents/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/agents/agent.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/app.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/__main__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/commands/bootstrap.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/commands/config.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/commands/setup.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/main.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/cli/terminal.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/config.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/console.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/context.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/context_dependent.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/agent_app.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/agent_types.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/error_handling.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/exceptions.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/server_validation.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/core/types.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/eval/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/event_progress.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/decorator_registry.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/executor.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/task_registry.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/temporal.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/workflow.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/executor/workflow_signal.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/human_input/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/human_input/handler.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/human_input/types.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/events.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/json_serializer.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/listeners.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/logger.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/rich_progress.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/tracing.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/logging/transport.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/gen_client.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/mcp_activity.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/mcp_agent_server.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp/stdio.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/mcp_server_registry.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/progress_display.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/internal/job.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/internal/social.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/agent_build.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/telemetry/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/telemetry/usage_tracking.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/embedding/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/embedding/embedding_base.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/embedding/embedding_cohere.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/embedding/embedding_openai.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/augmented_llm.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/augmented_llm_openai.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/llm_selector.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/llm/prompt_utils.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/orchestrator/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/orchestrator/orchestrator.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/orchestrator/orchestrator_models.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/orchestrator/orchestrator_prompts.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/parallel/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/parallel/fan_in.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/parallel/fan_out.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/parallel/parallel_llm.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/router/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/router/router_base.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/router/router_embedding.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/router/router_embedding_cohere.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/router/router_embedding_openai.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/router/router_llm.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/swarm/__init__.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/swarm/swarm.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/swarm/swarm_anthropic.py +0 -0
- {fast_agent_mcp-0.1.2 → fast_agent_mcp-0.1.3}/src/mcp_agent/workflows/swarm/swarm_openai.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.3
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -366,7 +366,7 @@ Saved as `social.py` we can now run this workflow from the command line with:
|
|
366
366
|
uv run social.py --agent social_media --message "<url>"
|
367
367
|
```
|
368
368
|
|
369
|
-
Add the `--quiet` switch to
|
369
|
+
Add the `--quiet` switch to disable progress and message display and return only the final response - useful for simple automations.
|
370
370
|
|
371
371
|
## Workflows
|
372
372
|
|
@@ -426,7 +426,7 @@ The Parallel Workflow sends the same message to multiple Agents simultaneously (
|
|
426
426
|
)
|
427
427
|
```
|
428
428
|
|
429
|
-
|
429
|
+
If you don't specify a `fan-in` agent, the `parallel` returns the combined Agent results verbatim.
|
430
430
|
|
431
431
|
`parallel` is also useful to ensemble ideas from different LLMs.
|
432
432
|
|
@@ -526,6 +526,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
526
526
|
name="chain", # name of the chain
|
527
527
|
sequence=["agent1", "agent2", ...], # list of agents in execution order
|
528
528
|
instruction="instruction", # instruction to describe the chain for other workflows
|
529
|
+
cumulative=False # whether to accumulate messages through the chain
|
529
530
|
continue_with_final=True, # open chat with agent at end of chain after prompting
|
530
531
|
)
|
531
532
|
```
|
@@ -129,7 +129,7 @@ Saved as `social.py` we can now run this workflow from the command line with:
|
|
129
129
|
uv run social.py --agent social_media --message "<url>"
|
130
130
|
```
|
131
131
|
|
132
|
-
Add the `--quiet` switch to
|
132
|
+
Add the `--quiet` switch to disable progress and message display and return only the final response - useful for simple automations.
|
133
133
|
|
134
134
|
## Workflows
|
135
135
|
|
@@ -189,7 +189,7 @@ The Parallel Workflow sends the same message to multiple Agents simultaneously (
|
|
189
189
|
)
|
190
190
|
```
|
191
191
|
|
192
|
-
|
192
|
+
If you don't specify a `fan-in` agent, the `parallel` returns the combined Agent results verbatim.
|
193
193
|
|
194
194
|
`parallel` is also useful to ensemble ideas from different LLMs.
|
195
195
|
|
@@ -289,6 +289,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
289
289
|
name="chain", # name of the chain
|
290
290
|
sequence=["agent1", "agent2", ...], # list of agents in execution order
|
291
291
|
instruction="instruction", # instruction to describe the chain for other workflows
|
292
|
+
cumulative=False # whether to accumulate messages through the chain
|
292
293
|
continue_with_final=True, # open chat with agent at end of chain after prompting
|
293
294
|
)
|
294
295
|
```
|
@@ -8,10 +8,11 @@ from mcp_agent.event_progress import ProgressAction
|
|
8
8
|
|
9
9
|
# Handle circular imports
|
10
10
|
if TYPE_CHECKING:
|
11
|
-
from mcp_agent.core.proxies import BaseAgentProxy
|
11
|
+
from mcp_agent.core.proxies import BaseAgentProxy
|
12
12
|
from mcp_agent.core.types import AgentOrWorkflow, ProxyDict
|
13
13
|
else:
|
14
|
-
from mcp_agent.core.proxies import BaseAgentProxy
|
14
|
+
from mcp_agent.core.proxies import BaseAgentProxy
|
15
|
+
|
15
16
|
# Define minimal types for runtime
|
16
17
|
AgentOrWorkflow = object # Simple placeholder
|
17
18
|
ProxyDict = dict # Simple placeholder
|
@@ -27,8 +28,13 @@ def unwrap_proxy(proxy: BaseAgentProxy) -> AgentOrWorkflow:
|
|
27
28
|
Returns:
|
28
29
|
The underlying Agent or workflow instance
|
29
30
|
"""
|
31
|
+
from mcp_agent.core.proxies import LLMAgentProxy, ChainProxy
|
32
|
+
|
30
33
|
if isinstance(proxy, LLMAgentProxy):
|
31
34
|
return proxy._agent
|
35
|
+
elif isinstance(proxy, ChainProxy):
|
36
|
+
# Return the ChainProxy itself as the workflow
|
37
|
+
return proxy
|
32
38
|
return proxy._workflow
|
33
39
|
|
34
40
|
|
@@ -51,7 +57,7 @@ def get_agent_instances(
|
|
51
57
|
def log_agent_load(app, agent_name: str) -> None:
|
52
58
|
"""
|
53
59
|
Log agent loading event to application logger.
|
54
|
-
|
60
|
+
|
55
61
|
Args:
|
56
62
|
app: The application instance
|
57
63
|
agent_name: Name of the agent being loaded
|
@@ -62,4 +68,4 @@ def log_agent_load(app, agent_name: str) -> None:
|
|
62
68
|
"progress_action": ProgressAction.LOADED,
|
63
69
|
"agent_name": agent_name,
|
64
70
|
},
|
65
|
-
)
|
71
|
+
)
|
@@ -11,6 +11,7 @@ from prompt_toolkit.key_binding import KeyBindings
|
|
11
11
|
from prompt_toolkit.completion import Completer, Completion
|
12
12
|
from prompt_toolkit.lexers import PygmentsLexer
|
13
13
|
from prompt_toolkit.filters import Condition
|
14
|
+
from prompt_toolkit.styles import Style
|
14
15
|
from pygments.lexers.python import PythonLexer
|
15
16
|
from rich import print as rich_print
|
16
17
|
|
@@ -31,8 +32,8 @@ available_agents = set()
|
|
31
32
|
# Keep track of multi-line mode state
|
32
33
|
in_multiline_mode = False
|
33
34
|
|
34
|
-
# Track
|
35
|
-
|
35
|
+
# Track whether help text has been shown globally
|
36
|
+
help_message_shown = False
|
36
37
|
|
37
38
|
|
38
39
|
class AgentCompleter(Completer):
|
@@ -87,7 +88,7 @@ class AgentCompleter(Completer):
|
|
87
88
|
start_position=-len(agent_name),
|
88
89
|
display=agent,
|
89
90
|
display_meta=agent_type,
|
90
|
-
style="bg:ansiblack fg:ansiblue",
|
91
|
+
# style="bg:ansiblack fg:ansiblue",
|
91
92
|
)
|
92
93
|
|
93
94
|
|
@@ -168,7 +169,7 @@ async def get_enhanced_input(
|
|
168
169
|
Returns:
|
169
170
|
User input string
|
170
171
|
"""
|
171
|
-
global in_multiline_mode, available_agents
|
172
|
+
global in_multiline_mode, available_agents, help_message_shown
|
172
173
|
|
173
174
|
# Update global state
|
174
175
|
in_multiline_mode = multiline
|
@@ -210,10 +211,21 @@ async def get_enhanced_input(
|
|
210
211
|
shortcuts = [(k, v) for k, v in shortcuts if v]
|
211
212
|
|
212
213
|
shortcut_text = " | ".join(f"{key}:{action}" for key, action in shortcuts)
|
214
|
+
|
213
215
|
return HTML(
|
214
|
-
f" <{toolbar_color}> {agent_name} </
|
216
|
+
f" <style fg='{toolbar_color}' bg='ansiblack'> {agent_name} </style> Mode: <style fg='{mode_style}' bg='ansiblack'> {mode_text} </style> {newline} | {shortcut_text} | v{app_version}"
|
215
217
|
)
|
216
218
|
|
219
|
+
# A more terminal-agnostic style that should work across themes
|
220
|
+
custom_style = Style.from_dict(
|
221
|
+
{
|
222
|
+
"completion-menu.completion": "bg:#ansiblack #ansigreen",
|
223
|
+
"completion-menu.completion.current": "bg:#ansiblack bold #ansigreen",
|
224
|
+
"completion-menu.meta.completion": "bg:#ansiblack #ansiblue",
|
225
|
+
"completion-menu.meta.completion.current": "bg:#ansibrightblack #ansiblue",
|
226
|
+
"bottom-toolbar": "#ansiblack bg:#ansigray",
|
227
|
+
}
|
228
|
+
)
|
217
229
|
# Create session with history and completions
|
218
230
|
session = PromptSession(
|
219
231
|
history=agent_histories[agent_name],
|
@@ -227,7 +239,8 @@ async def get_enhanced_input(
|
|
227
239
|
multiline=Condition(lambda: in_multiline_mode),
|
228
240
|
complete_in_thread=True,
|
229
241
|
mouse_support=False,
|
230
|
-
bottom_toolbar=get_toolbar,
|
242
|
+
bottom_toolbar=get_toolbar,
|
243
|
+
style=custom_style,
|
231
244
|
)
|
232
245
|
|
233
246
|
# Create key bindings with a reference to the app
|
@@ -237,7 +250,7 @@ async def get_enhanced_input(
|
|
237
250
|
session.app.key_bindings = bindings
|
238
251
|
|
239
252
|
# Create formatted prompt text
|
240
|
-
prompt_text = f"<
|
253
|
+
prompt_text = f"<ansibrightblue>{agent_name}</ansibrightblue> > "
|
241
254
|
|
242
255
|
# Add default value display if requested
|
243
256
|
if show_default and default and default != "STOP":
|
@@ -246,25 +259,24 @@ async def get_enhanced_input(
|
|
246
259
|
# Only show hints at startup if requested
|
247
260
|
if show_stop_hint:
|
248
261
|
if default == "STOP":
|
249
|
-
rich_print("[
|
250
|
-
else:
|
251
|
-
rich_print("Enter a prompt, or [red]STOP[/red] to finish")
|
262
|
+
rich_print("Enter a prompt, [red]STOP[/red] to finish")
|
252
263
|
if default:
|
253
264
|
rich_print(
|
254
265
|
f"Press <ENTER> to use the default prompt:\n[cyan]{default}[/cyan]"
|
255
266
|
)
|
256
267
|
|
257
|
-
# Mention available features but only on first usage
|
258
|
-
if
|
268
|
+
# Mention available features but only on first usage globally
|
269
|
+
if not help_message_shown:
|
259
270
|
if is_human_input:
|
260
271
|
rich_print(
|
261
|
-
"[dim]
|
272
|
+
"[dim]Type /help for commands. Ctrl+T toggles multiline mode.[/dim]"
|
262
273
|
)
|
263
274
|
else:
|
264
275
|
rich_print(
|
265
|
-
"[dim]
|
276
|
+
"[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode. [/dim]"
|
266
277
|
)
|
267
|
-
|
278
|
+
rich_print()
|
279
|
+
help_message_shown = True
|
268
280
|
|
269
281
|
# Process special commands
|
270
282
|
def pre_process_input(text):
|
@@ -325,7 +337,6 @@ async def handle_special_commands(command, agent_app=None):
|
|
325
337
|
rich_print(
|
326
338
|
" Enter - Submit (normal mode) / New line (multiline mode)"
|
327
339
|
)
|
328
|
-
rich_print(" \\ + Enter - Insert new line in normal mode")
|
329
340
|
rich_print(" Ctrl+Enter - Always submit (in any mode)")
|
330
341
|
rich_print(" Ctrl+T - Toggle multiline mode")
|
331
342
|
rich_print(" Ctrl+L - Clear input")
|
@@ -696,6 +696,7 @@ class FastAgent(ContextDependent):
|
|
696
696
|
use_history: bool = True,
|
697
697
|
request_params: Optional[Dict] = None,
|
698
698
|
continue_with_final: bool = True,
|
699
|
+
cumulative: bool = False,
|
699
700
|
) -> Callable:
|
700
701
|
"""
|
701
702
|
Decorator to create and register a chain of agents.
|
@@ -709,6 +710,8 @@ class FastAgent(ContextDependent):
|
|
709
710
|
use_history: Whether to maintain conversation history
|
710
711
|
request_params: Additional request parameters
|
711
712
|
continue_with_final: When using prompt(), whether to continue with the final agent after processing chain (default: True)
|
713
|
+
cumulative: When True, each agent receives all previous agent responses concatenated (default: False)
|
714
|
+
When False, each agent only gets the output of the previous agent (default behavior)
|
712
715
|
"""
|
713
716
|
# Support both parameter names
|
714
717
|
agent_sequence = sequence or agents
|
@@ -717,8 +720,11 @@ class FastAgent(ContextDependent):
|
|
717
720
|
|
718
721
|
# Auto-generate instruction if not provided
|
719
722
|
if instruction is None:
|
720
|
-
#
|
721
|
-
|
723
|
+
# Generate an appropriate instruction based on mode
|
724
|
+
if cumulative:
|
725
|
+
instruction = f"Cumulative chain of agents: {', '.join(agent_sequence)}"
|
726
|
+
else:
|
727
|
+
instruction = f"Chain of agents: {', '.join(agent_sequence)}"
|
722
728
|
|
723
729
|
decorator = self._create_decorator(
|
724
730
|
AgentType.CHAIN,
|
@@ -734,6 +740,7 @@ class FastAgent(ContextDependent):
|
|
734
740
|
use_history=use_history,
|
735
741
|
request_params=request_params,
|
736
742
|
continue_with_final=continue_with_final,
|
743
|
+
cumulative=cumulative,
|
737
744
|
)
|
738
745
|
return decorator
|
739
746
|
|
@@ -912,9 +919,13 @@ class FastAgent(ContextDependent):
|
|
912
919
|
f"evaluator={agent_data['evaluator']}"
|
913
920
|
)
|
914
921
|
|
915
|
-
|
916
|
-
|
917
|
-
)
|
922
|
+
# Get model from generator if it's an Agent, or from config otherwise
|
923
|
+
optimizer_model = None
|
924
|
+
if isinstance(generator, Agent):
|
925
|
+
optimizer_model = generator.config.model
|
926
|
+
elif hasattr(generator, '_sequence') and hasattr(generator, '_agent_proxies'):
|
927
|
+
# For ChainProxy, use the config model directly
|
928
|
+
optimizer_model = config.model
|
918
929
|
|
919
930
|
instance = EvaluatorOptimizerLLM(
|
920
931
|
name=config.name, # Pass name from config
|
@@ -993,6 +1004,10 @@ class FastAgent(ContextDependent):
|
|
993
1004
|
instance._continue_with_final = agent_data.get(
|
994
1005
|
"continue_with_final", True
|
995
1006
|
)
|
1007
|
+
# Set cumulative behavior from configuration
|
1008
|
+
instance._cumulative = agent_data.get(
|
1009
|
+
"cumulative", False
|
1010
|
+
)
|
996
1011
|
|
997
1012
|
# We removed the AgentType.PASSTHROUGH case
|
998
1013
|
# Passthrough agents are now created as BASIC agents with a special LLM
|
@@ -1307,12 +1322,6 @@ class FastAgent(ContextDependent):
|
|
1307
1322
|
# First create basic agents
|
1308
1323
|
active_agents = await self._create_basic_agents(agent_app)
|
1309
1324
|
|
1310
|
-
# Create workflow types that don't depend on other workflows first
|
1311
|
-
evaluator_optimizers = await self._create_evaluator_optimizers(
|
1312
|
-
agent_app, active_agents
|
1313
|
-
)
|
1314
|
-
active_agents.update(evaluator_optimizers)
|
1315
|
-
|
1316
1325
|
# Create parallel agents next as they might be dependencies
|
1317
1326
|
parallel_agents = await self._create_parallel_agents(
|
1318
1327
|
agent_app, active_agents
|
@@ -1323,11 +1332,17 @@ class FastAgent(ContextDependent):
|
|
1323
1332
|
routers = await self._create_routers(agent_app, active_agents)
|
1324
1333
|
active_agents.update(routers)
|
1325
1334
|
|
1326
|
-
# Create chains next
|
1335
|
+
# Create chains next - MOVED UP because evaluator-optimizers might depend on chains
|
1327
1336
|
chains = await self._create_agents_in_dependency_order(
|
1328
1337
|
agent_app, active_agents, AgentType.CHAIN
|
1329
1338
|
)
|
1330
1339
|
active_agents.update(chains)
|
1340
|
+
|
1341
|
+
# Now create evaluator-optimizers AFTER chains are available
|
1342
|
+
evaluator_optimizers = await self._create_evaluator_optimizers(
|
1343
|
+
agent_app, active_agents
|
1344
|
+
)
|
1345
|
+
active_agents.update(evaluator_optimizers)
|
1331
1346
|
|
1332
1347
|
# Create orchestrators last as they might depend on any other agent type
|
1333
1348
|
orchestrators = await self._create_orchestrators(
|
@@ -0,0 +1,170 @@
|
|
1
|
+
"""
|
2
|
+
Proxy classes for agent interactions.
|
3
|
+
These proxies provide a consistent interface for interacting with different types of agents.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import List, Optional, Dict, TYPE_CHECKING
|
7
|
+
|
8
|
+
from mcp_agent.agents.agent import Agent
|
9
|
+
from mcp_agent.app import MCPApp
|
10
|
+
|
11
|
+
# Handle circular imports
|
12
|
+
if TYPE_CHECKING:
|
13
|
+
from mcp_agent.core.types import WorkflowType, ProxyDict
|
14
|
+
else:
|
15
|
+
# Define minimal versions for runtime
|
16
|
+
from typing import Any
|
17
|
+
|
18
|
+
# Use Any for runtime to avoid circular imports
|
19
|
+
WorkflowType = Any
|
20
|
+
ProxyDict = Dict[str, "BaseAgentProxy"]
|
21
|
+
|
22
|
+
|
23
|
+
class BaseAgentProxy:
|
24
|
+
"""Base class for all proxy types"""
|
25
|
+
|
26
|
+
def __init__(self, app: MCPApp, name: str):
|
27
|
+
self._app = app
|
28
|
+
self._name = name
|
29
|
+
|
30
|
+
async def __call__(self, message: Optional[str] = None) -> str:
|
31
|
+
"""Allow: agent.researcher('message')"""
|
32
|
+
return await self.send(message)
|
33
|
+
|
34
|
+
async def send(self, message: Optional[str] = None) -> str:
|
35
|
+
"""Allow: agent.researcher.send('message')"""
|
36
|
+
if message is None:
|
37
|
+
return await self.prompt()
|
38
|
+
return await self.generate_str(message)
|
39
|
+
|
40
|
+
async def prompt(self, default_prompt: str = "") -> str:
|
41
|
+
"""Allow: agent.researcher.prompt()"""
|
42
|
+
return await self._app.prompt(self._name, default_prompt)
|
43
|
+
|
44
|
+
async def generate_str(self, message: str) -> str:
|
45
|
+
"""Generate response for a message - must be implemented by subclasses"""
|
46
|
+
raise NotImplementedError("Subclasses must implement generate_str")
|
47
|
+
|
48
|
+
|
49
|
+
class AgentProxy(BaseAgentProxy):
|
50
|
+
"""Legacy proxy for individual agent operations"""
|
51
|
+
|
52
|
+
async def generate_str(self, message: str, **kwargs) -> str:
|
53
|
+
"""Forward only the message to app.send, ignoring kwargs for legacy compatibility"""
|
54
|
+
return await self._app.send(self._name, message)
|
55
|
+
|
56
|
+
|
57
|
+
class LLMAgentProxy(BaseAgentProxy):
|
58
|
+
"""Proxy for regular agents that use _llm.generate_str()"""
|
59
|
+
|
60
|
+
def __init__(self, app: MCPApp, name: str, agent: Agent):
|
61
|
+
super().__init__(app, name)
|
62
|
+
self._agent = agent
|
63
|
+
|
64
|
+
async def generate_str(self, message: str, **kwargs) -> str:
|
65
|
+
"""Forward message and all kwargs to the agent's LLM"""
|
66
|
+
return await self._agent._llm.generate_str(message, **kwargs)
|
67
|
+
|
68
|
+
|
69
|
+
class WorkflowProxy(BaseAgentProxy):
|
70
|
+
"""Proxy for workflow types that implement generate_str() directly"""
|
71
|
+
|
72
|
+
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
|
73
|
+
super().__init__(app, name)
|
74
|
+
self._workflow = workflow
|
75
|
+
|
76
|
+
async def generate_str(self, message: str, **kwargs) -> str:
|
77
|
+
"""Forward message and all kwargs to the underlying workflow"""
|
78
|
+
return await self._workflow.generate_str(message, **kwargs)
|
79
|
+
|
80
|
+
|
81
|
+
class RouterProxy(BaseAgentProxy):
|
82
|
+
"""Proxy for LLM Routers"""
|
83
|
+
|
84
|
+
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
|
85
|
+
super().__init__(app, name)
|
86
|
+
self._workflow = workflow
|
87
|
+
|
88
|
+
async def generate_str(self, message: str, **kwargs) -> str:
|
89
|
+
"""
|
90
|
+
Route the message and forward kwargs to the resulting agent if applicable.
|
91
|
+
Note: For now, route() itself doesn't accept kwargs.
|
92
|
+
"""
|
93
|
+
results = await self._workflow.route(message)
|
94
|
+
if not results:
|
95
|
+
return "No appropriate route found for the request."
|
96
|
+
|
97
|
+
# Get the top result
|
98
|
+
top_result = results[0]
|
99
|
+
if isinstance(top_result.result, Agent):
|
100
|
+
# Agent route - delegate to the agent, passing along kwargs
|
101
|
+
agent = top_result.result
|
102
|
+
return await agent._llm.generate_str(message, **kwargs)
|
103
|
+
elif isinstance(top_result.result, str):
|
104
|
+
# Server route - use the router directly
|
105
|
+
return "Tool call requested by router - not yet supported"
|
106
|
+
|
107
|
+
return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
|
108
|
+
|
109
|
+
|
110
|
+
class ChainProxy(BaseAgentProxy):
|
111
|
+
"""Proxy for chained agent operations"""
|
112
|
+
|
113
|
+
def __init__(
|
114
|
+
self, app: MCPApp, name: str, sequence: List[str], agent_proxies: ProxyDict
|
115
|
+
):
|
116
|
+
super().__init__(app, name)
|
117
|
+
self._sequence = sequence
|
118
|
+
self._agent_proxies = agent_proxies
|
119
|
+
self._continue_with_final = True # Default behavior
|
120
|
+
self._cumulative = False # Default to sequential chaining
|
121
|
+
|
122
|
+
async def generate_str(self, message: str) -> str:
|
123
|
+
"""Chain message through a sequence of agents with optional cumulative behavior"""
|
124
|
+
if not self._cumulative:
|
125
|
+
# Original sequential behavior
|
126
|
+
current_message = message
|
127
|
+
for agent_name in self._sequence:
|
128
|
+
proxy = self._agent_proxies[agent_name]
|
129
|
+
current_message = await proxy.generate_str(current_message)
|
130
|
+
return current_message
|
131
|
+
else:
|
132
|
+
# Cumulative behavior
|
133
|
+
original_message = message
|
134
|
+
agent_responses = {}
|
135
|
+
|
136
|
+
for agent_name in self._sequence:
|
137
|
+
proxy = self._agent_proxies[agent_name]
|
138
|
+
|
139
|
+
if not agent_responses: # First agent
|
140
|
+
response = await proxy.generate_str(original_message)
|
141
|
+
else:
|
142
|
+
# Construct context with previous responses
|
143
|
+
context_message = "The following request was sent to the agents:\n"
|
144
|
+
context_message += f"<fastagent:request>\n{original_message}\n</fastagent:request>\n\n"
|
145
|
+
|
146
|
+
context_message += "Previous agent responses:\n"
|
147
|
+
|
148
|
+
for prev_name in self._sequence:
|
149
|
+
if prev_name in agent_responses:
|
150
|
+
prev_response = agent_responses[prev_name]
|
151
|
+
context_message += f'<fastagent:response agent="{prev_name}">\n{prev_response}\n</fastagent:response>\n\n'
|
152
|
+
|
153
|
+
context_message += f"Your task is to build upon this work to address: {original_message}"
|
154
|
+
|
155
|
+
response = await proxy.generate_str(context_message)
|
156
|
+
|
157
|
+
agent_responses[agent_name] = response
|
158
|
+
|
159
|
+
# Format final output with ALL responses in XML format
|
160
|
+
final_output = "The following request was sent to the agents:\n"
|
161
|
+
final_output += (
|
162
|
+
f"<fastagent:request>\n{original_message}\n</fastagent:request>\n\n"
|
163
|
+
)
|
164
|
+
|
165
|
+
for agent_name in self._sequence:
|
166
|
+
response = agent_responses[agent_name]
|
167
|
+
final_output += f'<fastagent:response agent="{agent_name}">\n{response}\n</fastagent:response>\n\n'
|
168
|
+
|
169
|
+
# Return the XML-structured combination of all responses
|
170
|
+
return final_output.strip()
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from typing import Optional, Any
|
2
|
+
|
3
|
+
|
4
|
+
class SimulatorRegistry:
|
5
|
+
"""Registry to access simulator instances for testing assertions"""
|
6
|
+
|
7
|
+
_instances = {}
|
8
|
+
|
9
|
+
@classmethod
|
10
|
+
def register(cls, name: str, simulator: "Any"):
|
11
|
+
"""Register a simulator instance"""
|
12
|
+
cls._instances[name] = simulator
|
13
|
+
|
14
|
+
@classmethod
|
15
|
+
def get(cls, name: str) -> Optional["Any"]:
|
16
|
+
"""Get a simulator by name"""
|
17
|
+
return cls._instances.get(name)
|
18
|
+
|
19
|
+
@classmethod
|
20
|
+
def clear(cls):
|
21
|
+
"""Clear registry (useful between tests)"""
|
22
|
+
cls._instances.clear()
|
@@ -0,0 +1,41 @@
|
|
1
|
+
default_model: sonnet
|
2
|
+
|
3
|
+
# on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/
|
4
|
+
|
5
|
+
mcp:
|
6
|
+
servers:
|
7
|
+
interpreter:
|
8
|
+
command: "docker"
|
9
|
+
args:
|
10
|
+
[
|
11
|
+
"run",
|
12
|
+
"-i",
|
13
|
+
"--rm",
|
14
|
+
"--pull=always",
|
15
|
+
"-v",
|
16
|
+
"./mount-point:/mnt/data/",
|
17
|
+
"ghcr.io/evalstate/mcp-py-repl:latest",
|
18
|
+
]
|
19
|
+
roots:
|
20
|
+
- uri: "file://./mount-point/"
|
21
|
+
name: "test_data"
|
22
|
+
server_uri_alias: "file:///mnt/data/"
|
23
|
+
filesystem:
|
24
|
+
# On windows update the command and arguments to use `node` and the absolute path to the server.
|
25
|
+
# Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
|
26
|
+
# Use `npm -g root` to find the global node_modules path.`
|
27
|
+
# command: "node"
|
28
|
+
# args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."]
|
29
|
+
command: "npx"
|
30
|
+
args: ["-y", "@modelcontextprotocol/server-filesystem", "./mount-point/"]
|
31
|
+
fetch:
|
32
|
+
command: "uvx"
|
33
|
+
args: ["mcp-server-fetch"]
|
34
|
+
brave:
|
35
|
+
# On windows replace the command and args line to use `node` and the absolute path to the server.
|
36
|
+
# Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
|
37
|
+
# Use `npm -g root` to find the global node_modules path.`
|
38
|
+
# command: "node"
|
39
|
+
# args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
|
40
|
+
command: "npx"
|
41
|
+
args: ["-y", "@modelcontextprotocol/server-brave-search"]
|