fast-agent-mcp 0.0.11__tar.gz → 0.0.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/PKG-INFO +3 -1
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/README.md +1 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/pyproject.toml +2 -1
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/commands/bootstrap.py +1 -5
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/main.py +4 -4
- fast_agent_mcp-0.0.12/src/mcp_agent/core/enhanced_prompt.py +315 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/core/fastagent.py +66 -28
- fast_agent_mcp-0.0.12/src/mcp_agent/resources/examples/internal/agent.py +17 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/internal/job.py +1 -1
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- fast_agent_mcp-0.0.12/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +53 -0
- fast_agent_mcp-0.0.12/src/mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent_mcp-0.0.12/src/mcp_agent/resources/examples/researcher/researcher.py +38 -0
- fast_agent_mcp-0.0.12/src/mcp_agent/resources/examples/workflows/agent.py +17 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/evaluator.py +6 -3
- fast_agent_mcp-0.0.12/src/mcp_agent/resources/examples/workflows/fastagent.py +22 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +31 -30
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/llm/augmented_llm.py +5 -2
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/llm/augmented_llm_anthropic.py +3 -1
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/llm/augmented_llm_openai.py +20 -9
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/.gitignore +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/LICENSE +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/agents/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/agents/agent.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/app.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/__main__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/commands/config.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/commands/setup.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/cli/terminal.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/config.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/console.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/context.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/context_dependent.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/core/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/core/exceptions.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/core/server_validation.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/eval/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/event_progress.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/decorator_registry.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/executor.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/task_registry.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/temporal.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/workflow.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/executor/workflow_signal.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/human_input/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/human_input/handler.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/human_input/types.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/events.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/json_serializer.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/listeners.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/logger.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/rich_progress.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/tracing.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/logging/transport.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/gen_client.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/mcp_activity.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/mcp_agent_server.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp/stdio.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/mcp_server_registry.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/progress_display.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/mcp_researcher/researcher.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/agent_build.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/chaining.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/telemetry/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/telemetry/usage_tracking.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/embedding/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/embedding/embedding_base.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/embedding/embedding_cohere.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/embedding/embedding_openai.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/llm/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/llm/llm_selector.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/llm/model_factory.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/orchestrator/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/orchestrator/orchestrator.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/orchestrator/orchestrator_models.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/orchestrator/orchestrator_prompts.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/parallel/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/parallel/fan_in.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/parallel/fan_out.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/parallel/parallel_llm.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/router/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/router/router_base.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/router/router_embedding.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/router/router_embedding_cohere.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/router/router_embedding_openai.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/router/router_llm.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/swarm/__init__.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/swarm/swarm.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/swarm/swarm_anthropic.py +0 -0
- {fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/workflows/swarm/swarm_openai.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fast-agent-mcp
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.12
|
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
|
6
6
|
License: Apache License
|
|
@@ -217,6 +217,7 @@ Requires-Dist: numpy>=2.2.1
|
|
|
217
217
|
Requires-Dist: openai>=1.63.2
|
|
218
218
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
|
219
219
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
|
220
|
+
Requires-Dist: prompt-toolkit>=3.0.50
|
|
220
221
|
Requires-Dist: pydantic-settings>=2.7.0
|
|
221
222
|
Requires-Dist: pydantic>=2.10.4
|
|
222
223
|
Requires-Dist: pyyaml>=6.0.2
|
|
@@ -271,6 +272,7 @@ Other bootstrap examples include a Researcher (with Evaluator-Optimizer workflow
|
|
|
271
272
|
### llmindset.co.uk fork:
|
|
272
273
|
|
|
273
274
|
- "FastAgent" style prototyping, with per-agent models
|
|
275
|
+
- Api keys through Environment Variables
|
|
274
276
|
- Warm-up / Post-Workflow Agent Interactions
|
|
275
277
|
- Quick Setup
|
|
276
278
|
- Interactive Prompt Mode
|
|
@@ -35,6 +35,7 @@ Other bootstrap examples include a Researcher (with Evaluator-Optimizer workflow
|
|
|
35
35
|
### llmindset.co.uk fork:
|
|
36
36
|
|
|
37
37
|
- "FastAgent" style prototyping, with per-agent models
|
|
38
|
+
- Api keys through Environment Variables
|
|
38
39
|
- Warm-up / Post-Workflow Agent Interactions
|
|
39
40
|
- Quick Setup
|
|
40
41
|
- Interactive Prompt Mode
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "fast-agent-mcp"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.12"
|
|
4
4
|
description = "Define, Prompt and Test MCP enabled Agents and Workflows"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
license = { file = "LICENSE" }
|
|
@@ -29,6 +29,7 @@ dependencies = [
|
|
|
29
29
|
"scikit-learn>=1.6.0",
|
|
30
30
|
"anthropic>=0.42.0",
|
|
31
31
|
"openai>=1.63.2",
|
|
32
|
+
"prompt-toolkit>=3.0.50",
|
|
32
33
|
]
|
|
33
34
|
|
|
34
35
|
[project.optional-dependencies]
|
|
@@ -33,11 +33,7 @@ EXAMPLE_TYPES = {
|
|
|
33
33
|
"description": "Research agent example with additional evaluation/optimization\n"
|
|
34
34
|
"example. Uses Brave Search and Docker MCP Servers.\n"
|
|
35
35
|
"Creates examples in a 'researcher' subdirectory.",
|
|
36
|
-
"files": [
|
|
37
|
-
"researcher.py",
|
|
38
|
-
"researcher-eval.py",
|
|
39
|
-
"mcp_agent.secrets.yaml.example",
|
|
40
|
-
],
|
|
36
|
+
"files": ["researcher.py", "researcher-eval.py", "fastagent.config.yaml"],
|
|
41
37
|
"create_subdir": True,
|
|
42
38
|
},
|
|
43
39
|
"data-analysis": {
|
|
@@ -40,11 +40,11 @@ def show_welcome():
|
|
|
40
40
|
|
|
41
41
|
console.print("\n[bold]Getting Started:[/bold]")
|
|
42
42
|
console.print("1. Set up a new project:")
|
|
43
|
-
console.print("
|
|
43
|
+
console.print(" fastagent setup")
|
|
44
44
|
console.print("\n2. Try an example:")
|
|
45
|
-
console.print("
|
|
45
|
+
console.print(" fastagent bootstrap create decorator")
|
|
46
46
|
console.print("\nUse --help with any command for more information")
|
|
47
|
-
console.print("Example:
|
|
47
|
+
console.print("Example: fastagent bootstrap --help")
|
|
48
48
|
|
|
49
49
|
|
|
50
50
|
@app.callback(invoke_without_command=True)
|
|
@@ -56,7 +56,7 @@ def main(
|
|
|
56
56
|
True, "--color/--no-color", help="Enable/disable color output"
|
|
57
57
|
),
|
|
58
58
|
):
|
|
59
|
-
"""
|
|
59
|
+
"""FastAgent CLI - Build effective agents using Model Context Protocol (MCP).
|
|
60
60
|
|
|
61
61
|
Use --help with any command for detailed usage information.
|
|
62
62
|
"""
|
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Enhanced prompt functionality with advanced prompt_toolkit features.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
from prompt_toolkit import PromptSession
|
|
7
|
+
from prompt_toolkit.formatted_text import HTML
|
|
8
|
+
from prompt_toolkit.history import InMemoryHistory
|
|
9
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
10
|
+
from prompt_toolkit.completion import Completer, Completion
|
|
11
|
+
from prompt_toolkit.lexers import PygmentsLexer
|
|
12
|
+
from prompt_toolkit.filters import Condition
|
|
13
|
+
from pygments.lexers.python import PythonLexer
|
|
14
|
+
from rich import print as rich_print
|
|
15
|
+
|
|
16
|
+
# Map of agent names to their history
|
|
17
|
+
agent_histories = {}
|
|
18
|
+
|
|
19
|
+
# Store available agents for auto-completion
|
|
20
|
+
available_agents = set()
|
|
21
|
+
|
|
22
|
+
# Keep track of multi-line mode state
|
|
23
|
+
in_multiline_mode = False
|
|
24
|
+
|
|
25
|
+
# Track which agents have already shown welcome messages
|
|
26
|
+
agent_messages_shown = set()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AgentCompleter(Completer):
|
|
30
|
+
"""Provide completion for agent names and common commands."""
|
|
31
|
+
|
|
32
|
+
def __init__(self, agents: List[str], commands: List[str] = None, agent_types: dict = None):
|
|
33
|
+
self.agents = agents
|
|
34
|
+
self.commands = commands or ["help", "clear", "STOP"]
|
|
35
|
+
self.agent_types = agent_types or {}
|
|
36
|
+
|
|
37
|
+
def get_completions(self, document, complete_event):
|
|
38
|
+
"""Synchronous completions method - this is what prompt_toolkit expects by default"""
|
|
39
|
+
text = document.text_before_cursor.lower()
|
|
40
|
+
|
|
41
|
+
# Complete commands
|
|
42
|
+
if text.startswith("/"):
|
|
43
|
+
cmd = text[1:]
|
|
44
|
+
for command in self.commands:
|
|
45
|
+
if command.lower().startswith(cmd):
|
|
46
|
+
yield Completion(
|
|
47
|
+
command,
|
|
48
|
+
start_position=-len(cmd),
|
|
49
|
+
display=command,
|
|
50
|
+
display_meta="Command",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Complete agent names for agent-related commands
|
|
54
|
+
elif text.startswith("@"):
|
|
55
|
+
agent_name = text[1:]
|
|
56
|
+
for agent in self.agents:
|
|
57
|
+
if agent.lower().startswith(agent_name.lower()):
|
|
58
|
+
# Get agent type or default to "Agent"
|
|
59
|
+
agent_type = self.agent_types.get(agent, "Agent")
|
|
60
|
+
yield Completion(
|
|
61
|
+
agent,
|
|
62
|
+
start_position=-len(agent_name),
|
|
63
|
+
display=agent,
|
|
64
|
+
display_meta=agent_type,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def create_keybindings(on_toggle_multiline=None, app=None):
|
|
69
|
+
"""Create custom key bindings."""
|
|
70
|
+
kb = KeyBindings()
|
|
71
|
+
|
|
72
|
+
@kb.add("c-m", filter=Condition(lambda: not in_multiline_mode))
|
|
73
|
+
def _(event):
|
|
74
|
+
"""Enter: accept input when not in multiline mode."""
|
|
75
|
+
event.current_buffer.validate_and_handle()
|
|
76
|
+
|
|
77
|
+
@kb.add("c-m", filter=Condition(lambda: in_multiline_mode))
|
|
78
|
+
def _(event):
|
|
79
|
+
"""Enter: insert newline when in multiline mode."""
|
|
80
|
+
event.current_buffer.insert_text("\n")
|
|
81
|
+
|
|
82
|
+
@kb.add("escape", "enter")
|
|
83
|
+
def _(event):
|
|
84
|
+
"""Alt+Enter: always submit even in multiline mode."""
|
|
85
|
+
event.current_buffer.validate_and_handle()
|
|
86
|
+
|
|
87
|
+
@kb.add("c-t")
|
|
88
|
+
def _(event):
|
|
89
|
+
"""Ctrl+T: Toggle multiline mode."""
|
|
90
|
+
global in_multiline_mode
|
|
91
|
+
in_multiline_mode = not in_multiline_mode
|
|
92
|
+
|
|
93
|
+
# Force redraw the app to update toolbar
|
|
94
|
+
if event.app:
|
|
95
|
+
event.app.invalidate()
|
|
96
|
+
elif app:
|
|
97
|
+
app.invalidate()
|
|
98
|
+
|
|
99
|
+
# Call the toggle callback if provided
|
|
100
|
+
if on_toggle_multiline:
|
|
101
|
+
on_toggle_multiline(in_multiline_mode)
|
|
102
|
+
|
|
103
|
+
# Instead of printing, we'll just update the toolbar
|
|
104
|
+
# The toolbar will show the current mode
|
|
105
|
+
|
|
106
|
+
@kb.add("c-l")
|
|
107
|
+
def _(event):
|
|
108
|
+
"""Ctrl+L: Clear input."""
|
|
109
|
+
event.current_buffer.text = ""
|
|
110
|
+
|
|
111
|
+
return kb
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
async def get_enhanced_input(
|
|
115
|
+
agent_name: str,
|
|
116
|
+
default: str = "",
|
|
117
|
+
show_default: bool = False,
|
|
118
|
+
show_stop_hint: bool = False,
|
|
119
|
+
multiline: bool = False,
|
|
120
|
+
available_agent_names: List[str] = None,
|
|
121
|
+
syntax: str = None,
|
|
122
|
+
agent_types: dict = None,
|
|
123
|
+
) -> str:
|
|
124
|
+
"""
|
|
125
|
+
Enhanced input with advanced prompt_toolkit features.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
agent_name: Name of the agent (used for prompt and history)
|
|
129
|
+
default: Default value if user presses enter
|
|
130
|
+
show_default: Whether to show the default value in the prompt
|
|
131
|
+
show_stop_hint: Whether to show the STOP hint
|
|
132
|
+
multiline: Start in multiline mode
|
|
133
|
+
available_agent_names: List of agent names for auto-completion
|
|
134
|
+
syntax: Syntax highlighting (e.g., 'python', 'sql')
|
|
135
|
+
agent_types: Dictionary mapping agent names to their types for display
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
User input string
|
|
139
|
+
"""
|
|
140
|
+
global in_multiline_mode, available_agents
|
|
141
|
+
|
|
142
|
+
# Update global state
|
|
143
|
+
in_multiline_mode = multiline
|
|
144
|
+
if available_agent_names:
|
|
145
|
+
available_agents = set(available_agent_names)
|
|
146
|
+
|
|
147
|
+
# Get or create history object for this agent
|
|
148
|
+
if agent_name not in agent_histories:
|
|
149
|
+
agent_histories[agent_name] = InMemoryHistory()
|
|
150
|
+
|
|
151
|
+
# Define callback for multiline toggle
|
|
152
|
+
def on_multiline_toggle(enabled):
|
|
153
|
+
nonlocal session
|
|
154
|
+
if hasattr(session, "app") and session.app:
|
|
155
|
+
session.app.invalidate()
|
|
156
|
+
|
|
157
|
+
# Define toolbar function that will update dynamically
|
|
158
|
+
def get_toolbar():
|
|
159
|
+
if in_multiline_mode:
|
|
160
|
+
mode_style = "ansired" # More noticeable for multiline mode
|
|
161
|
+
mode_text = "MULTILINE"
|
|
162
|
+
toggle_text = "Normal Editing"
|
|
163
|
+
else:
|
|
164
|
+
mode_style = "ansigreen"
|
|
165
|
+
mode_text = "NORMAL"
|
|
166
|
+
toggle_text = "Multiline Editing"
|
|
167
|
+
|
|
168
|
+
shortcuts = [
|
|
169
|
+
("Ctrl+T", toggle_text),
|
|
170
|
+
("Alt+Enter", "Submit" if in_multiline_mode else ""),
|
|
171
|
+
("Ctrl+L", "Clear"),
|
|
172
|
+
("↑/↓", "History"),
|
|
173
|
+
]
|
|
174
|
+
# Only show relevant shortcuts based on mode
|
|
175
|
+
shortcuts = [(k, v) for k, v in shortcuts if v]
|
|
176
|
+
|
|
177
|
+
shortcut_text = " | ".join(f"{key}:{action}" for key, action in shortcuts)
|
|
178
|
+
return HTML(
|
|
179
|
+
f" <b>Agent:</b> <ansiblue> {agent_name} </ansiblue> | <b>Mode:</b> <{mode_style}> {mode_text} </{mode_style}> | {shortcut_text}"
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# Create session with history and completions
|
|
183
|
+
session = PromptSession(
|
|
184
|
+
history=agent_histories[agent_name],
|
|
185
|
+
completer=AgentCompleter(
|
|
186
|
+
agents=list(available_agents) if available_agents else [],
|
|
187
|
+
agent_types=agent_types or {},
|
|
188
|
+
),
|
|
189
|
+
complete_while_typing=True,
|
|
190
|
+
lexer=PygmentsLexer(PythonLexer) if syntax == "python" else None,
|
|
191
|
+
multiline=Condition(lambda: in_multiline_mode),
|
|
192
|
+
complete_in_thread=True,
|
|
193
|
+
mouse_support=True,
|
|
194
|
+
bottom_toolbar=get_toolbar, # Pass the function here
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Create key bindings with a reference to the app
|
|
198
|
+
bindings = create_keybindings(
|
|
199
|
+
on_toggle_multiline=on_multiline_toggle, app=session.app
|
|
200
|
+
)
|
|
201
|
+
session.app.key_bindings = bindings
|
|
202
|
+
|
|
203
|
+
# Create formatted prompt text
|
|
204
|
+
prompt_text = f"<ansicyan>{agent_name}</ansicyan> > "
|
|
205
|
+
|
|
206
|
+
# Add default value display if requested
|
|
207
|
+
if show_default and default and default != "STOP":
|
|
208
|
+
prompt_text = f"{prompt_text} [<ansigreen>{default}</ansigreen>] "
|
|
209
|
+
|
|
210
|
+
# Only show hints at startup if requested
|
|
211
|
+
if show_stop_hint:
|
|
212
|
+
if default == "STOP":
|
|
213
|
+
rich_print("[yellow]Press <ENTER> to finish.[/yellow]")
|
|
214
|
+
else:
|
|
215
|
+
rich_print("Enter a prompt, or [red]STOP[/red] to finish")
|
|
216
|
+
if default:
|
|
217
|
+
rich_print(
|
|
218
|
+
f"Press <ENTER> to use the default prompt:\n[cyan]{default}[/cyan]"
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Mention available features but only on first usage for this agent
|
|
222
|
+
if agent_name not in agent_messages_shown:
|
|
223
|
+
rich_print(
|
|
224
|
+
"[dim]Tip: Type /help for commands, press F1 for keyboard shortcuts. Ctrl+T toggles multiline mode. @Agent to switch agent[/dim]"
|
|
225
|
+
)
|
|
226
|
+
agent_messages_shown.add(agent_name)
|
|
227
|
+
|
|
228
|
+
# Process special commands
|
|
229
|
+
def pre_process_input(text):
|
|
230
|
+
# Command processing
|
|
231
|
+
if text and text.startswith("/"):
|
|
232
|
+
cmd = text[1:].strip().lower()
|
|
233
|
+
if cmd == "help":
|
|
234
|
+
return "HELP"
|
|
235
|
+
elif cmd == "clear":
|
|
236
|
+
return "CLEAR"
|
|
237
|
+
elif cmd == "agents":
|
|
238
|
+
return "LIST_AGENTS"
|
|
239
|
+
|
|
240
|
+
# Agent switching
|
|
241
|
+
if text and text.startswith("@"):
|
|
242
|
+
return f"SWITCH:{text[1:].strip()}"
|
|
243
|
+
|
|
244
|
+
return text
|
|
245
|
+
|
|
246
|
+
# Get the input - using async version
|
|
247
|
+
try:
|
|
248
|
+
result = await session.prompt_async(HTML(prompt_text), default=default)
|
|
249
|
+
return pre_process_input(result)
|
|
250
|
+
except KeyboardInterrupt:
|
|
251
|
+
# Handle Ctrl+C gracefully
|
|
252
|
+
return "STOP"
|
|
253
|
+
except EOFError:
|
|
254
|
+
# Handle Ctrl+D gracefully
|
|
255
|
+
return "STOP"
|
|
256
|
+
except Exception as e:
|
|
257
|
+
# Log and gracefully handle other exceptions
|
|
258
|
+
print(f"\nInput error: {type(e).__name__}: {e}")
|
|
259
|
+
return "STOP"
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
async def handle_special_commands(command, agent_app=None):
|
|
263
|
+
"""Handle special input commands."""
|
|
264
|
+
# Quick guard for empty or None commands
|
|
265
|
+
if not command:
|
|
266
|
+
return False
|
|
267
|
+
|
|
268
|
+
# Check for special commands
|
|
269
|
+
if command == "HELP":
|
|
270
|
+
rich_print("\n[bold]Available Commands:[/bold]")
|
|
271
|
+
rich_print(" /help - Show this help")
|
|
272
|
+
rich_print(" /clear - Clear screen")
|
|
273
|
+
rich_print(" /agents - List available agents")
|
|
274
|
+
rich_print(" @agent_name - Switch to agent")
|
|
275
|
+
rich_print(" STOP - End session")
|
|
276
|
+
rich_print("\n[bold]Keyboard Shortcuts:[/bold]")
|
|
277
|
+
rich_print(
|
|
278
|
+
" Enter - Submit (normal mode) / New line (multiline mode)"
|
|
279
|
+
)
|
|
280
|
+
rich_print(" Alt+Enter - Always submit (even in multiline mode)")
|
|
281
|
+
rich_print(" Ctrl+T - Toggle multiline mode")
|
|
282
|
+
rich_print(" Ctrl+L - Clear input")
|
|
283
|
+
rich_print(" Up/Down - Navigate history")
|
|
284
|
+
rich_print(" F1 - Show help")
|
|
285
|
+
return True
|
|
286
|
+
|
|
287
|
+
elif command == "CLEAR":
|
|
288
|
+
# Clear screen (ANSI escape sequence)
|
|
289
|
+
print("\033c", end="")
|
|
290
|
+
return True
|
|
291
|
+
|
|
292
|
+
elif command == "LIST_AGENTS":
|
|
293
|
+
if available_agents:
|
|
294
|
+
rich_print("\n[bold]Available Agents:[/bold]")
|
|
295
|
+
for agent in sorted(available_agents):
|
|
296
|
+
rich_print(f" @{agent}")
|
|
297
|
+
else:
|
|
298
|
+
rich_print("[yellow]No agents available[/yellow]")
|
|
299
|
+
return True
|
|
300
|
+
|
|
301
|
+
elif isinstance(command, str) and command.startswith("SWITCH:"):
|
|
302
|
+
agent_name = command.split(":", 1)[1]
|
|
303
|
+
if agent_name in available_agents:
|
|
304
|
+
if agent_app:
|
|
305
|
+
rich_print(f"[green]Switching to agent: {agent_name}[/green]")
|
|
306
|
+
return {"switch_agent": agent_name}
|
|
307
|
+
else:
|
|
308
|
+
rich_print(
|
|
309
|
+
"[yellow]Agent switching not available in this context[/yellow]"
|
|
310
|
+
)
|
|
311
|
+
else:
|
|
312
|
+
rich_print(f"[red]Unknown agent: {agent_name}[/red]")
|
|
313
|
+
return True
|
|
314
|
+
|
|
315
|
+
return False
|
|
@@ -28,7 +28,6 @@ from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
|
|
|
28
28
|
)
|
|
29
29
|
from mcp_agent.workflows.router.router_llm import LLMRouter
|
|
30
30
|
from mcp_agent.config import Settings
|
|
31
|
-
from rich.prompt import Prompt
|
|
32
31
|
from rich import print
|
|
33
32
|
from mcp_agent.progress_display import progress_display
|
|
34
33
|
from mcp_agent.workflows.llm.model_factory import ModelFactory
|
|
@@ -161,34 +160,73 @@ class AgentApp:
|
|
|
161
160
|
|
|
162
161
|
async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
|
|
163
162
|
"""
|
|
164
|
-
Interactive prompt for sending messages.
|
|
163
|
+
Interactive prompt for sending messages with advanced features.
|
|
165
164
|
|
|
166
165
|
Args:
|
|
167
166
|
agent_name: Optional target agent name (uses default if not specified)
|
|
168
|
-
|
|
167
|
+
default: Default message to use when user presses enter
|
|
169
168
|
"""
|
|
169
|
+
from .enhanced_prompt import get_enhanced_input, handle_special_commands
|
|
170
170
|
|
|
171
171
|
agent = agent_name or self._default
|
|
172
172
|
|
|
173
173
|
if agent not in self._agents:
|
|
174
174
|
raise ValueError(f"No agent named '{agent}'")
|
|
175
|
+
|
|
176
|
+
# Pass all available agent names for auto-completion
|
|
177
|
+
available_agents = list(self._agents.keys())
|
|
178
|
+
|
|
179
|
+
# Create agent_types dictionary mapping agent names to their types
|
|
180
|
+
agent_types = {}
|
|
181
|
+
for name, proxy in self._agents.items():
|
|
182
|
+
# Determine agent type based on the proxy type
|
|
183
|
+
if isinstance(proxy, LLMAgentProxy):
|
|
184
|
+
# Convert AgentType.BASIC.value ("agent") to "Agent"
|
|
185
|
+
agent_types[name] = "Agent"
|
|
186
|
+
elif isinstance(proxy, RouterProxy):
|
|
187
|
+
agent_types[name] = "Router"
|
|
188
|
+
elif isinstance(proxy, WorkflowProxy):
|
|
189
|
+
# For workflow proxies, check the workflow type
|
|
190
|
+
workflow = proxy._workflow
|
|
191
|
+
if isinstance(workflow, Orchestrator):
|
|
192
|
+
agent_types[name] = "Orchestrator"
|
|
193
|
+
elif isinstance(workflow, ParallelLLM):
|
|
194
|
+
agent_types[name] = "Parallel"
|
|
195
|
+
elif isinstance(workflow, EvaluatorOptimizerLLM):
|
|
196
|
+
agent_types[name] = "Evaluator"
|
|
197
|
+
else:
|
|
198
|
+
agent_types[name] = "Workflow"
|
|
199
|
+
|
|
175
200
|
result = ""
|
|
176
201
|
while True:
|
|
177
202
|
with progress_display.paused():
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
prompt_text = f"[blue]{agent}[/blue] >"
|
|
189
|
-
user_input = Prompt.ask(
|
|
190
|
-
prompt=prompt_text, default=default, show_default=False
|
|
203
|
+
# Use the enhanced input method with advanced features
|
|
204
|
+
user_input = await get_enhanced_input(
|
|
205
|
+
agent_name=agent,
|
|
206
|
+
default=default,
|
|
207
|
+
show_default=(default != ""),
|
|
208
|
+
show_stop_hint=True,
|
|
209
|
+
multiline=False, # Default to single-line mode
|
|
210
|
+
available_agent_names=available_agents,
|
|
211
|
+
syntax=None, # Can enable syntax highlighting for code input
|
|
212
|
+
agent_types=agent_types, # Pass agent types for display
|
|
191
213
|
)
|
|
214
|
+
|
|
215
|
+
# Handle special commands
|
|
216
|
+
command_result = await handle_special_commands(user_input, self)
|
|
217
|
+
|
|
218
|
+
# Check if we should switch agents
|
|
219
|
+
if (
|
|
220
|
+
isinstance(command_result, dict)
|
|
221
|
+
and "switch_agent" in command_result
|
|
222
|
+
):
|
|
223
|
+
agent = command_result["switch_agent"]
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
# Skip further processing if command was handled
|
|
227
|
+
if command_result:
|
|
228
|
+
continue
|
|
229
|
+
|
|
192
230
|
if user_input.upper() == "STOP":
|
|
193
231
|
return
|
|
194
232
|
if user_input == "":
|
|
@@ -400,12 +438,12 @@ class FastAgent(ContextDependent):
|
|
|
400
438
|
elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
|
|
401
439
|
# Check both evaluator and optimizer exist
|
|
402
440
|
evaluator = agent_data["evaluator"]
|
|
403
|
-
|
|
441
|
+
generator = agent_data["generator"]
|
|
404
442
|
missing = []
|
|
405
443
|
if evaluator not in available_components:
|
|
406
444
|
missing.append(f"evaluator: {evaluator}")
|
|
407
|
-
if
|
|
408
|
-
missing.append(f"
|
|
445
|
+
if generator not in available_components:
|
|
446
|
+
missing.append(f"generator: {generator}")
|
|
409
447
|
if missing:
|
|
410
448
|
raise AgentConfigError(
|
|
411
449
|
f"Evaluator-Optimizer '{name}' references non-existent components: {', '.join(missing)}"
|
|
@@ -672,7 +710,7 @@ class FastAgent(ContextDependent):
|
|
|
672
710
|
def evaluator_optimizer(
|
|
673
711
|
self,
|
|
674
712
|
name: str,
|
|
675
|
-
|
|
713
|
+
generator: str,
|
|
676
714
|
evaluator: str,
|
|
677
715
|
min_rating: str = "GOOD",
|
|
678
716
|
max_refinements: int = 3,
|
|
@@ -684,7 +722,7 @@ class FastAgent(ContextDependent):
|
|
|
684
722
|
|
|
685
723
|
Args:
|
|
686
724
|
name: Name of the workflow
|
|
687
|
-
|
|
725
|
+
generator: Name of the generator agent
|
|
688
726
|
evaluator: Name of the evaluator agent
|
|
689
727
|
min_rating: Minimum acceptable quality rating (EXCELLENT, GOOD, FAIR, POOR)
|
|
690
728
|
max_refinements: Maximum number of refinement iterations
|
|
@@ -699,7 +737,7 @@ class FastAgent(ContextDependent):
|
|
|
699
737
|
wrapper_needed=True,
|
|
700
738
|
)(
|
|
701
739
|
name=name,
|
|
702
|
-
|
|
740
|
+
generator=generator,
|
|
703
741
|
evaluator=evaluator,
|
|
704
742
|
min_rating=min_rating,
|
|
705
743
|
max_refinements=max_refinements,
|
|
@@ -853,27 +891,27 @@ class FastAgent(ContextDependent):
|
|
|
853
891
|
|
|
854
892
|
elif agent_type == AgentType.EVALUATOR_OPTIMIZER:
|
|
855
893
|
# Get the referenced agents - unwrap from proxies
|
|
856
|
-
|
|
857
|
-
active_agents[agent_data["
|
|
894
|
+
generator = self._unwrap_proxy(
|
|
895
|
+
active_agents[agent_data["generator"]]
|
|
858
896
|
)
|
|
859
897
|
evaluator = self._unwrap_proxy(
|
|
860
898
|
active_agents[agent_data["evaluator"]]
|
|
861
899
|
)
|
|
862
900
|
|
|
863
|
-
if not
|
|
901
|
+
if not generator or not evaluator:
|
|
864
902
|
raise ValueError(
|
|
865
903
|
f"Missing agents for workflow {name}: "
|
|
866
|
-
f"
|
|
904
|
+
f"generator={agent_data['generator']}, "
|
|
867
905
|
f"evaluator={agent_data['evaluator']}"
|
|
868
906
|
)
|
|
869
907
|
|
|
870
908
|
# TODO: Remove legacy - factory usage is only needed for str evaluators
|
|
871
909
|
# Later this should only be passed when evaluator is a string
|
|
872
910
|
optimizer_model = (
|
|
873
|
-
|
|
911
|
+
generator.config.model if isinstance(generator, Agent) else None
|
|
874
912
|
)
|
|
875
913
|
instance = EvaluatorOptimizerLLM(
|
|
876
|
-
|
|
914
|
+
generator=generator,
|
|
877
915
|
evaluator=evaluator,
|
|
878
916
|
min_rating=QualityRating[agent_data["min_rating"]],
|
|
879
917
|
max_refinements=agent_data["max_refinements"],
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
3
|
+
|
|
4
|
+
# Create the application
|
|
5
|
+
fast = FastAgent("FastAgent Example")
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Define the agent
|
|
9
|
+
@fast.agent(servers=["fetch"])
|
|
10
|
+
async def main():
|
|
11
|
+
# use the --model command line switch or agent arguments to change model
|
|
12
|
+
async with fast.run() as agent:
|
|
13
|
+
await agent()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if __name__ == "__main__":
|
|
17
|
+
asyncio.run(main())
|
{fast_agent_mcp-0.0.11 → fast_agent_mcp-0.0.12}/src/mcp_agent/resources/examples/internal/job.py
RENAMED
|
@@ -52,7 +52,7 @@ fast = FastAgent("PMO Job Description Generator")
|
|
|
52
52
|
)
|
|
53
53
|
@fast.evaluator_optimizer(
|
|
54
54
|
name="job_description_writer",
|
|
55
|
-
|
|
55
|
+
generator="content_generator",
|
|
56
56
|
evaluator="consistency_checker",
|
|
57
57
|
min_rating="EXCELLENT",
|
|
58
58
|
max_refinements=2,
|
|
@@ -35,7 +35,7 @@ Summarize your evaluation as a structured response with:
|
|
|
35
35
|
- Specific feedback and areas for improvement.""",
|
|
36
36
|
)
|
|
37
37
|
@agents.evaluator_optimizer(
|
|
38
|
-
|
|
38
|
+
generator="Researcher",
|
|
39
39
|
evaluator="Evaluator",
|
|
40
40
|
max_refinements=5,
|
|
41
41
|
min_rating="EXCELLENT",
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Please edit this configuration file to match your environment (on Windows).
|
|
3
|
+
# Examples in comments below - check/change the paths.
|
|
4
|
+
#
|
|
5
|
+
#
|
|
6
|
+
|
|
7
|
+
execution_engine: asyncio
|
|
8
|
+
logger:
|
|
9
|
+
type: file
|
|
10
|
+
level: error
|
|
11
|
+
truncate_tools: true
|
|
12
|
+
|
|
13
|
+
mcp:
|
|
14
|
+
servers:
|
|
15
|
+
brave:
|
|
16
|
+
# On windows replace the command and args line to use `node` and the absolute path to the server.
|
|
17
|
+
# Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
|
|
18
|
+
# Use `npm -g root` to find the global node_modules path.`
|
|
19
|
+
# command: "node"
|
|
20
|
+
# args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
|
|
21
|
+
command: "npx"
|
|
22
|
+
args: ["-y", "@modelcontextprotocol/server-brave-search"]
|
|
23
|
+
env:
|
|
24
|
+
# You can also place your BRAVE_API_KEY in the fastagent.secrets.yaml file.
|
|
25
|
+
BRAVE_API_KEY: <your_brave_api_key>
|
|
26
|
+
filesystem:
|
|
27
|
+
# On windows update the command and arguments to use `node` and the absolute path to the server.
|
|
28
|
+
# Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
|
|
29
|
+
# Use `npm -g root` to find the global node_modules path.`
|
|
30
|
+
# command: "node"
|
|
31
|
+
# args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","./agent_folder"]
|
|
32
|
+
command: "npx"
|
|
33
|
+
args: ["-y", "@modelcontextprotocol/server-filesystem", "./agent_folder/"]
|
|
34
|
+
interpreter:
|
|
35
|
+
command: "docker"
|
|
36
|
+
args: [
|
|
37
|
+
"run",
|
|
38
|
+
"-i",
|
|
39
|
+
"--rm",
|
|
40
|
+
"--pull=always",
|
|
41
|
+
"-v",
|
|
42
|
+
"./agent_folder:/mnt/data/",
|
|
43
|
+
# Docker needs the absolute path on Windows (e.g. "x:/fastagent/agent_folder:/mnt/data/")
|
|
44
|
+
# "./agent_folder:/mnt/data/",
|
|
45
|
+
"ghcr.io/evalstate/mcp-py-repl:latest",
|
|
46
|
+
]
|
|
47
|
+
roots:
|
|
48
|
+
- uri: "file://./agent_folder/"
|
|
49
|
+
name: "agent_folder"
|
|
50
|
+
server_uri_alias: "file:///mnt/data/"
|
|
51
|
+
fetch:
|
|
52
|
+
command: "uvx"
|
|
53
|
+
args: ["mcp-server-fetch"]
|