fast-agent-mcp 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
- fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
- mcp_agent/agents/agent.py +37 -79
- mcp_agent/app.py +16 -22
- mcp_agent/cli/commands/bootstrap.py +22 -52
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +11 -26
- mcp_agent/cli/main.py +6 -9
- mcp_agent/cli/terminal.py +2 -2
- mcp_agent/config.py +1 -5
- mcp_agent/context.py +13 -24
- mcp_agent/context_dependent.py +3 -7
- mcp_agent/core/agent_app.py +45 -121
- mcp_agent/core/agent_utils.py +3 -5
- mcp_agent/core/decorators.py +5 -12
- mcp_agent/core/enhanced_prompt.py +25 -52
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/factory.py +29 -70
- mcp_agent/core/fastagent.py +48 -88
- mcp_agent/core/mcp_content.py +8 -16
- mcp_agent/core/prompt.py +8 -15
- mcp_agent/core/proxies.py +34 -25
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/types.py +4 -6
- mcp_agent/core/validation.py +4 -3
- mcp_agent/executor/decorator_registry.py +11 -23
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +28 -74
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +17 -29
- mcp_agent/human_input/handler.py +4 -9
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +15 -17
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +4 -12
- mcp_agent/mcp/interfaces.py +71 -86
- mcp_agent/mcp/mcp_agent_client_session.py +11 -19
- mcp_agent/mcp/mcp_agent_server.py +8 -10
- mcp_agent/mcp/mcp_aggregator.py +45 -117
- mcp_agent/mcp/mcp_connection_manager.py +16 -37
- mcp_agent/mcp/prompt_message_multipart.py +12 -18
- mcp_agent/mcp/prompt_serialization.py +13 -38
- mcp_agent/mcp/prompts/prompt_load.py +99 -0
- mcp_agent/mcp/prompts/prompt_server.py +21 -128
- mcp_agent/mcp/prompts/prompt_template.py +20 -42
- mcp_agent/mcp/resource_utils.py +8 -17
- mcp_agent/mcp/sampling.py +5 -14
- mcp_agent/mcp/stdio.py +11 -8
- mcp_agent/mcp_server/agent_server.py +10 -17
- mcp_agent/mcp_server_registry.py +13 -35
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +2 -1
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +5 -11
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
- mcp_agent/resources/examples/researcher/researcher.py +2 -1
- mcp_agent/resources/examples/workflows/agent_build.py +2 -1
- mcp_agent/resources/examples/workflows/chaining.py +2 -1
- mcp_agent/resources/examples/workflows/evaluator.py +2 -1
- mcp_agent/resources/examples/workflows/human_input.py +2 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
- mcp_agent/resources/examples/workflows/parallel.py +2 -1
- mcp_agent/resources/examples/workflows/router.py +2 -1
- mcp_agent/resources/examples/workflows/sse.py +1 -1
- mcp_agent/telemetry/usage_tracking.py +2 -1
- mcp_agent/ui/console_display.py +15 -39
- mcp_agent/workflows/embedding/embedding_base.py +1 -4
- mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
- mcp_agent/workflows/embedding/embedding_openai.py +4 -13
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
- mcp_agent/workflows/llm/anthropic_utils.py +8 -29
- mcp_agent/workflows/llm/augmented_llm.py +69 -247
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +39 -73
- mcp_agent/workflows/llm/augmented_llm_openai.py +42 -97
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +13 -20
- mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
- mcp_agent/workflows/llm/memory.py +103 -0
- mcp_agent/workflows/llm/model_factory.py +8 -20
- mcp_agent/workflows/llm/openai_utils.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +1 -3
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +47 -89
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +20 -55
- mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +10 -12
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +7 -11
- mcp_agent/workflows/llm/sampling_converter.py +4 -11
- mcp_agent/workflows/llm/sampling_format_converter.py +12 -12
- mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
- mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
- mcp_agent/workflows/parallel/fan_in.py +17 -47
- mcp_agent/workflows/parallel/fan_out.py +6 -12
- mcp_agent/workflows/parallel/parallel_llm.py +9 -26
- mcp_agent/workflows/router/router_base.py +19 -49
- mcp_agent/workflows/router/router_embedding.py +11 -25
- mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
- mcp_agent/workflows/router/router_embedding_openai.py +2 -2
- mcp_agent/workflows/router/router_llm.py +12 -28
- mcp_agent/workflows/swarm/swarm.py +20 -48
- mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
- mcp_agent/workflows/swarm/swarm_openai.py +2 -2
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
agents = FastAgent(name="Enhanced Researcher")
|
@@ -162,7 +163,7 @@ The researcher should be able to understand exactly why they received their rati
|
|
162
163
|
min_rating="EXCELLENT",
|
163
164
|
name="EnhancedResearcher",
|
164
165
|
)
|
165
|
-
async def main():
|
166
|
+
async def main() -> None:
|
166
167
|
async with agents.run() as agent:
|
167
168
|
# Start with a warm-up to set expectations and explain the research approach
|
168
169
|
await agent.Researcher.send(
|
@@ -180,9 +181,7 @@ async def main():
|
|
180
181
|
# Start the main research workflow
|
181
182
|
await agent.prompt("EnhancedResearcher")
|
182
183
|
|
183
|
-
print(
|
184
|
-
"\nWould you like to ask follow-up questions to the Researcher? (Type 'STOP' to end)"
|
185
|
-
)
|
184
|
+
print("\nWould you like to ask follow-up questions to the Researcher? (Type 'STOP' to end)")
|
186
185
|
await agent.prompt("Researcher", default="STOP")
|
187
186
|
|
188
187
|
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import asyncio
|
2
2
|
|
3
3
|
from mcp_agent.core.fastagent import FastAgent
|
4
|
+
|
4
5
|
# from rich import print
|
5
6
|
|
6
7
|
agents = FastAgent(name="Researcher Agent")
|
@@ -16,7 +17,7 @@ The interpreter has numpy, pandas, matplotlib and seaborn already installed
|
|
16
17
|
""",
|
17
18
|
servers=["brave", "interpreter", "filesystem", "fetch"],
|
18
19
|
)
|
19
|
-
async def main():
|
20
|
+
async def main() -> None:
|
20
21
|
research_prompt = """
|
21
22
|
Produce an investment report for the company Eutelsat. The final report should be saved in the filesystem in markdown format, and
|
22
23
|
contain at least the following:
|
@@ -3,6 +3,7 @@ This demonstrates creating multiple agents and an orchestrator to coordinate the
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
8
9
|
|
@@ -62,7 +63,7 @@ if needed. Remind the Human of this.
|
|
62
63
|
request_params=RequestParams(maxTokens=8192),
|
63
64
|
max_iterations=5,
|
64
65
|
)
|
65
|
-
async def main():
|
66
|
+
async def main() -> None:
|
66
67
|
async with fast.run() as agent:
|
67
68
|
CODER_WARMUP = """
|
68
69
|
- Read this paper: https://www.anthropic.com/research/building-effective-agents" to understand how
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
# Create the application
|
@@ -21,7 +22,7 @@ fast = FastAgent("Agent Chaining")
|
|
21
22
|
name="post_writer",
|
22
23
|
sequence=["url_fetcher", "social_media"],
|
23
24
|
)
|
24
|
-
async def main():
|
25
|
+
async def main() -> None:
|
25
26
|
async with fast.run() as agent:
|
26
27
|
# using chain workflow
|
27
28
|
await agent.post_writer.prompt()
|
@@ -3,6 +3,7 @@ This demonstrates creating an optimizer and evaluator to iteratively improve con
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -49,7 +50,7 @@ fast = FastAgent("Evaluator-Optimizer")
|
|
49
50
|
min_rating="EXCELLENT", # Strive for excellence
|
50
51
|
max_refinements=3, # Maximum iterations
|
51
52
|
)
|
52
|
-
async def main():
|
53
|
+
async def main() -> None:
|
53
54
|
async with fast.run() as agent:
|
54
55
|
job_posting = (
|
55
56
|
"Software Engineer at LastMile AI. Responsibilities include developing AI systems, "
|
@@ -3,6 +3,7 @@ Agent which demonstrates Human Input tool
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -14,7 +15,7 @@ fast = FastAgent("Human Input")
|
|
14
15
|
instruction="An AI agent that assists with basic tasks. Request Human Input when needed.",
|
15
16
|
human_input=True,
|
16
17
|
)
|
17
|
-
async def main():
|
18
|
+
async def main() -> None:
|
18
19
|
async with fast.run() as agent:
|
19
20
|
# this usually causes the LLM to request the Human Input Tool
|
20
21
|
await agent("print the next number in the sequence")
|
@@ -3,6 +3,7 @@ This demonstrates creating multiple agents and an orchestrator to coordinate the
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -47,7 +48,7 @@ fast = FastAgent("Orchestrator-Workers")
|
|
47
48
|
agents=["finder", "writer", "proofreader"],
|
48
49
|
plan_type="iterative",
|
49
50
|
)
|
50
|
-
async def main():
|
51
|
+
async def main() -> None:
|
51
52
|
async with fast.run() as agent:
|
52
53
|
await agent()
|
53
54
|
await agent.author(
|
@@ -3,6 +3,7 @@ Parallel Workflow showing Fan Out and Fan In agents, using different models
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -65,7 +66,7 @@ and whispers of a hidden agenda linger among the villagers.
|
|
65
66
|
fan_in="grader",
|
66
67
|
name="parallel",
|
67
68
|
)
|
68
|
-
async def main():
|
69
|
+
async def main() -> None:
|
69
70
|
# Use the app's context manager
|
70
71
|
async with fast.run() as agent:
|
71
72
|
await agent.parallel(f"student short story submission: {SHORT_STORY}")
|
@@ -6,6 +6,7 @@ Demonstrates router's ability to either:
|
|
6
6
|
"""
|
7
7
|
|
8
8
|
import asyncio
|
9
|
+
|
9
10
|
from mcp_agent.core.fastagent import FastAgent
|
10
11
|
|
11
12
|
# Create the application
|
@@ -43,7 +44,7 @@ SAMPLE_REQUESTS = [
|
|
43
44
|
model="sonnet",
|
44
45
|
agents=["code_expert", "general_assistant", "fetcher"],
|
45
46
|
)
|
46
|
-
async def main():
|
47
|
+
async def main() -> None:
|
47
48
|
async with fast.run() as agent:
|
48
49
|
for request in SAMPLE_REQUESTS:
|
49
50
|
await agent.route(request)
|
@@ -10,7 +10,7 @@ fa = FastAgent("My Application")
|
|
10
10
|
@fa.agent("analyst", "hello, world", servers=["fetch"])
|
11
11
|
|
12
12
|
# Run the application with MCP server
|
13
|
-
async def main():
|
13
|
+
async def main() -> None:
|
14
14
|
await fa.run_with_mcp_server(
|
15
15
|
transport="sse", # Use "sse" for web server, "stdio" for command line
|
16
16
|
port=8000,
|
@@ -1,10 +1,11 @@
|
|
1
1
|
import logging
|
2
|
+
|
2
3
|
from mcp_agent.config import get_settings
|
3
4
|
|
4
5
|
logger = logging.getLogger(__name__)
|
5
6
|
|
6
7
|
|
7
|
-
def send_usage_data():
|
8
|
+
def send_usage_data() -> None:
|
8
9
|
config = get_settings()
|
9
10
|
if not config.usage_telemetry.enabled:
|
10
11
|
logger.info("Usage tracking is disabled")
|
mcp_agent/ui/console_display.py
CHANGED
@@ -17,7 +17,7 @@ class ConsoleDisplay:
|
|
17
17
|
This centralizes the UI display logic used by LLM implementations.
|
18
18
|
"""
|
19
19
|
|
20
|
-
def __init__(self, config=None):
|
20
|
+
def __init__(self, config=None) -> None:
|
21
21
|
"""
|
22
22
|
Initialize the console display handler.
|
23
23
|
|
@@ -26,7 +26,7 @@ class ConsoleDisplay:
|
|
26
26
|
"""
|
27
27
|
self.config = config
|
28
28
|
|
29
|
-
def show_tool_result(self, result: CallToolResult):
|
29
|
+
def show_tool_result(self, result: CallToolResult) -> None:
|
30
30
|
"""Display a tool result in a formatted panel."""
|
31
31
|
if not self.config or not self.config.logger.show_tools:
|
32
32
|
return
|
@@ -49,7 +49,7 @@ class ConsoleDisplay:
|
|
49
49
|
console.console.print(panel)
|
50
50
|
console.console.print("\n")
|
51
51
|
|
52
|
-
def show_oai_tool_result(self, result):
|
52
|
+
def show_oai_tool_result(self, result) -> None:
|
53
53
|
"""Display an OpenAI tool result in a formatted panel."""
|
54
54
|
if not self.config or not self.config.logger.show_tools:
|
55
55
|
return
|
@@ -70,7 +70,7 @@ class ConsoleDisplay:
|
|
70
70
|
console.console.print(panel)
|
71
71
|
console.console.print("\n")
|
72
72
|
|
73
|
-
def show_tool_call(self, available_tools, tool_name, tool_args):
|
73
|
+
def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
|
74
74
|
"""Display a tool call in a formatted panel."""
|
75
75
|
if not self.config or not self.config.logger.show_tools:
|
76
76
|
return
|
@@ -109,25 +109,13 @@ class ConsoleDisplay:
|
|
109
109
|
tool_call_name = display_tool["name"]
|
110
110
|
else:
|
111
111
|
# Handle potential object format (e.g., Pydantic models)
|
112
|
-
tool_call_name = (
|
113
|
-
|
114
|
-
|
115
|
-
else display_tool.name
|
116
|
-
)
|
117
|
-
|
118
|
-
parts = (
|
119
|
-
tool_call_name.split(SEP)
|
120
|
-
if SEP in tool_call_name
|
121
|
-
else [tool_call_name, tool_call_name]
|
122
|
-
)
|
112
|
+
tool_call_name = display_tool.function.name if hasattr(display_tool, "function") else display_tool.name
|
113
|
+
|
114
|
+
parts = tool_call_name.split(SEP) if SEP in tool_call_name else [tool_call_name, tool_call_name]
|
123
115
|
|
124
116
|
if selected_tool_name.split(SEP)[0] == parts[0]:
|
125
|
-
style =
|
126
|
-
|
127
|
-
)
|
128
|
-
shortened_name = (
|
129
|
-
parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
|
130
|
-
)
|
117
|
+
style = "magenta" if tool_call_name == selected_tool_name else "dim white"
|
118
|
+
shortened_name = parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
|
131
119
|
display_tool_list.append(f"[{shortened_name}] ", style)
|
132
120
|
|
133
121
|
return display_tool_list
|
@@ -139,7 +127,7 @@ class ConsoleDisplay:
|
|
139
127
|
highlight_namespaced_tool: str = "",
|
140
128
|
title: str = "ASSISTANT",
|
141
129
|
name: Optional[str] = None,
|
142
|
-
):
|
130
|
+
) -> None:
|
143
131
|
"""Display an assistant message in a formatted panel."""
|
144
132
|
if not self.config or not self.config.logger.show_chat:
|
145
133
|
return
|
@@ -150,19 +138,11 @@ class ConsoleDisplay:
|
|
150
138
|
# Add human input tool if available
|
151
139
|
tools = await aggregator.list_tools()
|
152
140
|
if any(tool.name == HUMAN_INPUT_TOOL_NAME for tool in tools.tools):
|
153
|
-
style =
|
154
|
-
"green"
|
155
|
-
if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME
|
156
|
-
else "dim white"
|
157
|
-
)
|
141
|
+
style = "green" if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME else "dim white"
|
158
142
|
display_server_list.append("[human] ", style)
|
159
143
|
|
160
144
|
# Add all available servers
|
161
|
-
mcp_server_name = (
|
162
|
-
highlight_namespaced_tool.split(SEP)[0]
|
163
|
-
if SEP in highlight_namespaced_tool
|
164
|
-
else highlight_namespaced_tool
|
165
|
-
)
|
145
|
+
mcp_server_name = highlight_namespaced_tool.split(SEP)[0] if SEP in highlight_namespaced_tool else highlight_namespaced_tool
|
166
146
|
|
167
147
|
for server_name in await aggregator.list_servers():
|
168
148
|
style = "green" if server_name == mcp_server_name else "dim white"
|
@@ -181,9 +161,7 @@ class ConsoleDisplay:
|
|
181
161
|
console.console.print(panel)
|
182
162
|
console.console.print("\n")
|
183
163
|
|
184
|
-
def show_user_message(
|
185
|
-
self, message, model: Optional[str], chat_turn: int, name: Optional[str] = None
|
186
|
-
):
|
164
|
+
def show_user_message(self, message, model: Optional[str], chat_turn: int, name: Optional[str] = None) -> None:
|
187
165
|
"""Display a user message in a formatted panel."""
|
188
166
|
if not self.config or not self.config.logger.show_chat:
|
189
167
|
return
|
@@ -209,7 +187,7 @@ class ConsoleDisplay:
|
|
209
187
|
agent_name: Optional[str] = None,
|
210
188
|
aggregator=None,
|
211
189
|
arguments: Optional[dict[str, str]] = None,
|
212
|
-
):
|
190
|
+
) -> None:
|
213
191
|
"""
|
214
192
|
Display information about a loaded prompt template.
|
215
193
|
|
@@ -242,9 +220,7 @@ class ConsoleDisplay:
|
|
242
220
|
|
243
221
|
# Create content text
|
244
222
|
content = Text()
|
245
|
-
messages_phrase =
|
246
|
-
f"Loaded {message_count} message{'s' if message_count != 1 else ''}"
|
247
|
-
)
|
223
|
+
messages_phrase = f"Loaded {message_count} message{'s' if message_count != 1 else ''}"
|
248
224
|
content.append(f"{messages_phrase} from template ", style="cyan italic")
|
249
225
|
content.append(f"'{prompt_name}'", style="cyan bold italic")
|
250
226
|
|
@@ -7,7 +7,6 @@ from sklearn.metrics.pairwise import cosine_similarity
|
|
7
7
|
|
8
8
|
from mcp_agent.context_dependent import ContextDependent
|
9
9
|
|
10
|
-
|
11
10
|
FloatArray = NDArray[float32]
|
12
11
|
|
13
12
|
|
@@ -32,9 +31,7 @@ class EmbeddingModel(ABC, ContextDependent):
|
|
32
31
|
"""Return the dimensionality of the embeddings"""
|
33
32
|
|
34
33
|
|
35
|
-
def compute_similarity_scores(
|
36
|
-
embedding_a: FloatArray, embedding_b: FloatArray
|
37
|
-
) -> Dict[str, float]:
|
34
|
+
def compute_similarity_scores(embedding_a: FloatArray, embedding_b: FloatArray) -> Dict[str, float]:
|
38
35
|
"""
|
39
36
|
Compute different similarity metrics between embeddings
|
40
37
|
"""
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from cohere import Client
|
4
4
|
from numpy import array, float32
|
@@ -17,7 +17,7 @@ class CohereEmbeddingModel(EmbeddingModel):
|
|
17
17
|
model: str = "embed-multilingual-v3.0",
|
18
18
|
context: Optional["Context"] = None,
|
19
19
|
**kwargs,
|
20
|
-
):
|
20
|
+
) -> None:
|
21
21
|
super().__init__(context=context, **kwargs)
|
22
22
|
self.client = Client(api_key=self.context.config.cohere.api_key)
|
23
23
|
self.model = model
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from numpy import array, float32, stack
|
4
4
|
from openai import OpenAI
|
@@ -12,9 +12,7 @@ if TYPE_CHECKING:
|
|
12
12
|
class OpenAIEmbeddingModel(EmbeddingModel):
|
13
13
|
"""OpenAI embedding model implementation"""
|
14
14
|
|
15
|
-
def __init__(
|
16
|
-
self, model: str = "text-embedding-3-small", context: Optional["Context"] = None
|
17
|
-
):
|
15
|
+
def __init__(self, model: str = "text-embedding-3-small", context: Optional["Context"] = None) -> None:
|
18
16
|
super().__init__(context=context)
|
19
17
|
self.client = OpenAI(api_key=self.context.config.openai.api_key)
|
20
18
|
self.model = model
|
@@ -25,20 +23,13 @@ class OpenAIEmbeddingModel(EmbeddingModel):
|
|
25
23
|
}[model]
|
26
24
|
|
27
25
|
async def embed(self, data: List[str]) -> FloatArray:
|
28
|
-
response = self.client.embeddings.create(
|
29
|
-
model=self.model, input=data, encoding_format="float"
|
30
|
-
)
|
26
|
+
response = self.client.embeddings.create(model=self.model, input=data, encoding_format="float")
|
31
27
|
|
32
28
|
# Sort the embeddings by their index to ensure correct order
|
33
29
|
sorted_embeddings = sorted(response.data, key=lambda x: x["index"])
|
34
30
|
|
35
31
|
# Stack all embeddings into a single array
|
36
|
-
embeddings = stack(
|
37
|
-
[
|
38
|
-
array(embedding["embedding"], dtype=float32)
|
39
|
-
for embedding in sorted_embeddings
|
40
|
-
]
|
41
|
-
)
|
32
|
+
embeddings = stack([array(embedding["embedding"], dtype=float32) for embedding in sorted_embeddings])
|
42
33
|
return embeddings
|
43
34
|
|
44
35
|
@property
|
@@ -1,8 +1,12 @@
|
|
1
1
|
import contextlib
|
2
2
|
from enum import Enum
|
3
|
-
from typing import Callable, List, Optional, Type
|
3
|
+
from typing import TYPE_CHECKING, Callable, List, Optional, Type
|
4
|
+
|
4
5
|
from pydantic import BaseModel, Field
|
5
6
|
|
7
|
+
from mcp_agent.agents.agent import Agent
|
8
|
+
from mcp_agent.core.agent_types import AgentConfig
|
9
|
+
from mcp_agent.logging.logger import get_logger
|
6
10
|
from mcp_agent.workflows.llm.augmented_llm import (
|
7
11
|
AugmentedLLM,
|
8
12
|
MessageParamT,
|
@@ -10,9 +14,6 @@ from mcp_agent.workflows.llm.augmented_llm import (
|
|
10
14
|
ModelT,
|
11
15
|
RequestParams,
|
12
16
|
)
|
13
|
-
from mcp_agent.agents.agent import Agent
|
14
|
-
from mcp_agent.core.agent_types import AgentConfig
|
15
|
-
from mcp_agent.logging.logger import get_logger
|
16
17
|
from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
|
17
18
|
|
18
19
|
if TYPE_CHECKING:
|
@@ -34,15 +35,9 @@ class EvaluationResult(BaseModel):
|
|
34
35
|
"""Model representing the evaluation result from the evaluator LLM"""
|
35
36
|
|
36
37
|
rating: QualityRating = Field(description="Quality rating of the response")
|
37
|
-
feedback: str = Field(
|
38
|
-
|
39
|
-
)
|
40
|
-
needs_improvement: bool = Field(
|
41
|
-
description="Whether the output needs further improvement"
|
42
|
-
)
|
43
|
-
focus_areas: List[str] = Field(
|
44
|
-
default_factory=list, description="Specific areas to focus on in next iteration"
|
45
|
-
)
|
38
|
+
feedback: str = Field(description="Specific feedback and suggestions for improvement")
|
39
|
+
needs_improvement: bool = Field(description="Whether the output needs further improvement")
|
40
|
+
focus_areas: List[str] = Field(default_factory=list, description="Specific areas to focus on in next iteration")
|
46
41
|
|
47
42
|
|
48
43
|
class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
@@ -75,7 +70,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
75
70
|
use_history=self.generator_use_history, # Use generator's history setting
|
76
71
|
)
|
77
72
|
|
78
|
-
def _init_request_params(self):
|
73
|
+
def _init_request_params(self) -> None:
|
79
74
|
"""Initialize request parameters for both generator and evaluator components."""
|
80
75
|
# Set up workflow's default params based on generator's history setting
|
81
76
|
self.default_request_params = self._initialize_default_params({})
|
@@ -94,7 +89,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
94
89
|
context: Optional["Context"] = None,
|
95
90
|
name: Optional[str] = None,
|
96
91
|
instruction: Optional[str] = None,
|
97
|
-
):
|
92
|
+
) -> None:
|
98
93
|
"""
|
99
94
|
Initialize the evaluator-optimizer workflow.
|
100
95
|
|
@@ -120,14 +115,10 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
120
115
|
if isinstance(generator, Agent):
|
121
116
|
self.generator_use_history = generator.config.use_history
|
122
117
|
elif isinstance(generator, AugmentedLLM):
|
123
|
-
if hasattr(generator, "aggregator") and isinstance(
|
124
|
-
generator.aggregator, Agent
|
125
|
-
):
|
118
|
+
if hasattr(generator, "aggregator") and isinstance(generator.aggregator, Agent):
|
126
119
|
self.generator_use_history = generator.aggregator.config.use_history
|
127
120
|
elif hasattr(generator, "default_request_params"):
|
128
|
-
self.generator_use_history = getattr(
|
129
|
-
generator.default_request_params, "use_history", False
|
130
|
-
)
|
121
|
+
self.generator_use_history = getattr(generator.default_request_params, "use_history", False)
|
131
122
|
# All other types default to False
|
132
123
|
|
133
124
|
# Initialize parent class
|
@@ -140,20 +131,12 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
140
131
|
# Set up the generator based on type
|
141
132
|
if isinstance(generator, Agent):
|
142
133
|
if not llm_factory:
|
143
|
-
raise ValueError(
|
144
|
-
"llm_factory is required when using an Agent generator"
|
145
|
-
)
|
134
|
+
raise ValueError("llm_factory is required when using an Agent generator")
|
146
135
|
|
147
136
|
# Use existing LLM if available, otherwise create new one
|
148
|
-
self.generator_llm = getattr(generator, "_llm", None) or llm_factory(
|
149
|
-
agent=generator
|
150
|
-
)
|
137
|
+
self.generator_llm = getattr(generator, "_llm", None) or llm_factory(agent=generator)
|
151
138
|
self.aggregator = generator
|
152
|
-
self.instruction = instruction or (
|
153
|
-
generator.instruction
|
154
|
-
if isinstance(generator.instruction, str)
|
155
|
-
else None
|
156
|
-
)
|
139
|
+
self.instruction = instruction or (generator.instruction if isinstance(generator.instruction, str) else None)
|
157
140
|
elif isinstance(generator, AugmentedLLM):
|
158
141
|
self.generator_llm = generator
|
159
142
|
self.aggregator = getattr(generator, "aggregator", None)
|
@@ -162,16 +145,12 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
162
145
|
# ChainProxy-like object
|
163
146
|
self.generator_llm = generator
|
164
147
|
self.aggregator = None
|
165
|
-
self.instruction = (
|
166
|
-
instruction or f"Chain of agents: {', '.join(generator._sequence)}"
|
167
|
-
)
|
148
|
+
self.instruction = instruction or f"Chain of agents: {', '.join(generator._sequence)}"
|
168
149
|
|
169
150
|
# Set up the evaluator - always disable history
|
170
151
|
if isinstance(evaluator, str):
|
171
152
|
if not llm_factory:
|
172
|
-
raise ValueError(
|
173
|
-
"llm_factory is required when using a string evaluator"
|
174
|
-
)
|
153
|
+
raise ValueError("llm_factory is required when using a string evaluator")
|
175
154
|
|
176
155
|
evaluator_agent = Agent(
|
177
156
|
name="Evaluator",
|
@@ -186,15 +165,11 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
186
165
|
self.evaluator_llm = llm_factory(agent=evaluator_agent)
|
187
166
|
elif isinstance(evaluator, Agent):
|
188
167
|
if not llm_factory:
|
189
|
-
raise ValueError(
|
190
|
-
"llm_factory is required when using an Agent evaluator"
|
191
|
-
)
|
168
|
+
raise ValueError("llm_factory is required when using an Agent evaluator")
|
192
169
|
|
193
170
|
# Disable history and use/create LLM
|
194
171
|
evaluator.config.use_history = False
|
195
|
-
self.evaluator_llm = getattr(evaluator, "_llm", None) or llm_factory(
|
196
|
-
agent=evaluator
|
197
|
-
)
|
172
|
+
self.evaluator_llm = getattr(evaluator, "_llm", None) or llm_factory(agent=evaluator)
|
198
173
|
elif isinstance(evaluator, AugmentedLLM):
|
199
174
|
self.evaluator_llm = evaluator
|
200
175
|
# Ensure history is disabled
|
@@ -282,10 +257,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
282
257
|
)
|
283
258
|
|
284
259
|
# Check if we've reached acceptable quality
|
285
|
-
if
|
286
|
-
evaluation_result.rating.value >= self.min_rating.value
|
287
|
-
or not evaluation_result.needs_improvement
|
288
|
-
):
|
260
|
+
if evaluation_result.rating.value >= self.min_rating.value or not evaluation_result.needs_improvement:
|
289
261
|
logger.debug(
|
290
262
|
f"Acceptable quality {evaluation_result.rating.value} reached",
|
291
263
|
data={
|
@@ -338,9 +310,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
338
310
|
request_params: RequestParams | None = None,
|
339
311
|
) -> ModelT:
|
340
312
|
"""Generate an optimized structured response"""
|
341
|
-
response_str = await self.generate_str(
|
342
|
-
message=message, request_params=request_params
|
343
|
-
)
|
313
|
+
response_str = await self.generate_str(message=message, request_params=request_params)
|
344
314
|
|
345
315
|
return await self.generator.generate_structured(
|
346
316
|
message=response_str,
|
@@ -348,9 +318,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
348
318
|
request_params=request_params,
|
349
319
|
)
|
350
320
|
|
351
|
-
def _build_eval_prompt(
|
352
|
-
self, original_request: str, current_response: str, iteration: int
|
353
|
-
) -> str:
|
321
|
+
def _build_eval_prompt(self, original_request: str, current_response: str, iteration: int) -> str:
|
354
322
|
"""Build the evaluation prompt for the evaluator"""
|
355
323
|
return f"""
|
356
324
|
You are an expert evaluator for content quality. Your task is to evaluate a response against the user's original request.
|
@@ -409,9 +377,7 @@ Be concrete and actionable in your recommendations.
|
|
409
377
|
"""Build the refinement prompt for the optimizer"""
|
410
378
|
# Get the correct history setting - use param if provided, otherwise class default
|
411
379
|
if use_history is None:
|
412
|
-
use_history =
|
413
|
-
self.generator_use_history
|
414
|
-
) # Use generator's setting as default
|
380
|
+
use_history = self.generator_use_history # Use generator's setting as default
|
415
381
|
|
416
382
|
# Start with clear non-delimited instructions
|
417
383
|
prompt = f"""
|
@@ -1,5 +1,6 @@
|
|
1
1
|
from abc import ABC, abstractmethod
|
2
|
-
from typing import Dict, List, Optional
|
2
|
+
from typing import TYPE_CHECKING, Dict, List, Optional
|
3
|
+
|
3
4
|
from pydantic import BaseModel, Field
|
4
5
|
|
5
6
|
if TYPE_CHECKING:
|
@@ -54,9 +55,7 @@ class IntentClassifier(ABC):
|
|
54
55
|
- Determining the type of analysis requested for a dataset
|
55
56
|
"""
|
56
57
|
|
57
|
-
def __init__(
|
58
|
-
self, intents: List[Intent], context: Optional["Context"] = None, **kwargs
|
59
|
-
):
|
58
|
+
def __init__(self, intents: List[Intent], context: Optional["Context"] = None, **kwargs) -> None:
|
60
59
|
super().__init__(context=context, **kwargs)
|
61
60
|
self.intents = {intent.name: intent for intent in intents}
|
62
61
|
self.initialized: bool = False
|
@@ -65,9 +64,7 @@ class IntentClassifier(ABC):
|
|
65
64
|
raise ValueError("At least one intent must be provided")
|
66
65
|
|
67
66
|
@abstractmethod
|
68
|
-
async def classify(
|
69
|
-
self, request: str, top_k: int = 1
|
70
|
-
) -> List[IntentClassificationResult]:
|
67
|
+
async def classify(self, request: str, top_k: int = 1) -> List[IntentClassificationResult]:
|
71
68
|
"""
|
72
69
|
Classify the input request into one or more intents.
|
73
70
|
|
@@ -79,7 +76,7 @@ class IntentClassifier(ABC):
|
|
79
76
|
List of classification results, ordered by confidence
|
80
77
|
"""
|
81
78
|
|
82
|
-
async def initialize(self):
|
79
|
+
async def initialize(self) -> None:
|
83
80
|
"""Initialize the classifier. Override this method if needed."""
|
84
81
|
self.initialized = True
|
85
82
|
|
@@ -1,17 +1,17 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from numpy import mean
|
4
4
|
|
5
5
|
from mcp_agent.workflows.embedding.embedding_base import (
|
6
|
-
FloatArray,
|
7
6
|
EmbeddingModel,
|
7
|
+
FloatArray,
|
8
8
|
compute_confidence,
|
9
9
|
compute_similarity_scores,
|
10
10
|
)
|
11
11
|
from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
|
12
12
|
Intent,
|
13
|
-
IntentClassifier,
|
14
13
|
IntentClassificationResult,
|
14
|
+
IntentClassifier,
|
15
15
|
)
|
16
16
|
|
17
17
|
if TYPE_CHECKING:
|
@@ -43,7 +43,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
|
|
43
43
|
embedding_model: EmbeddingModel,
|
44
44
|
context: Optional["Context"] = None,
|
45
45
|
**kwargs,
|
46
|
-
):
|
46
|
+
) -> None:
|
47
47
|
super().__init__(intents=intents, context=context, **kwargs)
|
48
48
|
self.embedding_model = embedding_model
|
49
49
|
self.initialized = False
|
@@ -65,7 +65,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
|
|
65
65
|
await instance.initialize()
|
66
66
|
return instance
|
67
67
|
|
68
|
-
async def initialize(self):
|
68
|
+
async def initialize(self) -> None:
|
69
69
|
"""
|
70
70
|
Precompute embeddings for all intents by combining their
|
71
71
|
descriptions and examples
|
@@ -91,9 +91,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
|
|
91
91
|
|
92
92
|
self.initialized = True
|
93
93
|
|
94
|
-
async def classify(
|
95
|
-
self, request: str, top_k: int = 1
|
96
|
-
) -> List[IntentClassificationResult]:
|
94
|
+
async def classify(self, request: str, top_k: int = 1) -> List[IntentClassificationResult]:
|
97
95
|
"""
|
98
96
|
Classify the input text into one or more intents
|
99
97
|
|
@@ -116,9 +114,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
|
|
116
114
|
if intent.embedding is None:
|
117
115
|
continue
|
118
116
|
|
119
|
-
similarity_scores = compute_similarity_scores(
|
120
|
-
request_embedding, intent.embedding
|
121
|
-
)
|
117
|
+
similarity_scores = compute_similarity_scores(request_embedding, intent.embedding)
|
122
118
|
|
123
119
|
# Compute overall confidence score
|
124
120
|
confidence = compute_confidence(similarity_scores)
|