fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
- fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
- mcp_agent/agents/agent.py +37 -102
- mcp_agent/app.py +16 -27
- mcp_agent/cli/commands/bootstrap.py +22 -52
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +11 -26
- mcp_agent/cli/main.py +6 -9
- mcp_agent/cli/terminal.py +2 -2
- mcp_agent/config.py +1 -5
- mcp_agent/context.py +13 -26
- mcp_agent/context_dependent.py +3 -7
- mcp_agent/core/agent_app.py +46 -122
- mcp_agent/core/agent_types.py +29 -2
- mcp_agent/core/agent_utils.py +3 -5
- mcp_agent/core/decorators.py +6 -14
- mcp_agent/core/enhanced_prompt.py +25 -52
- mcp_agent/core/error_handling.py +1 -1
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/factory.py +30 -72
- mcp_agent/core/fastagent.py +48 -88
- mcp_agent/core/mcp_content.py +10 -19
- mcp_agent/core/prompt.py +8 -15
- mcp_agent/core/proxies.py +34 -25
- mcp_agent/core/request_params.py +46 -0
- mcp_agent/core/types.py +6 -6
- mcp_agent/core/validation.py +16 -16
- mcp_agent/executor/decorator_registry.py +11 -23
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +28 -74
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +17 -29
- mcp_agent/human_input/handler.py +4 -9
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +15 -17
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +24 -24
- mcp_agent/mcp/gen_client.py +4 -12
- mcp_agent/mcp/interfaces.py +107 -88
- mcp_agent/mcp/mcp_agent_client_session.py +11 -19
- mcp_agent/mcp/mcp_agent_server.py +8 -10
- mcp_agent/mcp/mcp_aggregator.py +49 -122
- mcp_agent/mcp/mcp_connection_manager.py +16 -37
- mcp_agent/mcp/prompt_message_multipart.py +12 -18
- mcp_agent/mcp/prompt_serialization.py +13 -38
- mcp_agent/mcp/prompts/prompt_load.py +99 -0
- mcp_agent/mcp/prompts/prompt_server.py +21 -128
- mcp_agent/mcp/prompts/prompt_template.py +20 -42
- mcp_agent/mcp/resource_utils.py +8 -17
- mcp_agent/mcp/sampling.py +62 -64
- mcp_agent/mcp/stdio.py +11 -8
- mcp_agent/mcp_server/__init__.py +1 -1
- mcp_agent/mcp_server/agent_server.py +10 -17
- mcp_agent/mcp_server_registry.py +13 -35
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +2 -1
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/prompting/__init__.py +1 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +5 -11
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
- mcp_agent/resources/examples/researcher/researcher.py +2 -1
- mcp_agent/resources/examples/workflows/agent_build.py +2 -1
- mcp_agent/resources/examples/workflows/chaining.py +2 -1
- mcp_agent/resources/examples/workflows/evaluator.py +2 -1
- mcp_agent/resources/examples/workflows/human_input.py +2 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
- mcp_agent/resources/examples/workflows/parallel.py +2 -1
- mcp_agent/resources/examples/workflows/router.py +2 -1
- mcp_agent/resources/examples/workflows/sse.py +1 -1
- mcp_agent/telemetry/usage_tracking.py +2 -1
- mcp_agent/ui/console_display.py +17 -41
- mcp_agent/workflows/embedding/embedding_base.py +1 -4
- mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
- mcp_agent/workflows/embedding/embedding_openai.py +4 -13
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
- mcp_agent/workflows/llm/anthropic_utils.py +8 -29
- mcp_agent/workflows/llm/augmented_llm.py +94 -332
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +43 -76
- mcp_agent/workflows/llm/augmented_llm_openai.py +46 -100
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +42 -20
- mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
- mcp_agent/workflows/llm/memory.py +103 -0
- mcp_agent/workflows/llm/model_factory.py +9 -21
- mcp_agent/workflows/llm/openai_utils.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +39 -27
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +246 -184
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +212 -202
- mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +11 -212
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +13 -215
- mcp_agent/workflows/llm/sampling_converter.py +117 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +12 -29
- mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
- mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
- mcp_agent/workflows/parallel/fan_in.py +17 -47
- mcp_agent/workflows/parallel/fan_out.py +6 -12
- mcp_agent/workflows/parallel/parallel_llm.py +9 -26
- mcp_agent/workflows/router/router_base.py +29 -59
- mcp_agent/workflows/router/router_embedding.py +11 -25
- mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
- mcp_agent/workflows/router/router_embedding_openai.py +2 -2
- mcp_agent/workflows/router/router_llm.py +12 -28
- mcp_agent/workflows/swarm/swarm.py +20 -48
- mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
- mcp_agent/workflows/swarm/swarm_openai.py +2 -2
- fast_agent_mcp-0.1.11.dist-info/RECORD +0 -160
- mcp_agent/workflows/llm/llm_selector.py +0 -345
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
|
-
from pydantic import Field
|
2
1
|
from mcp.server.fastmcp import FastMCP
|
3
|
-
from mcp.server.fastmcp.prompts.base import
|
2
|
+
from mcp.server.fastmcp.prompts.base import AssistantMessage, UserMessage
|
3
|
+
from pydantic import Field
|
4
4
|
|
5
5
|
mcp = FastMCP("MCP Prompt Tester")
|
6
6
|
|
@@ -23,9 +23,7 @@ def sizing_prompt():
|
|
23
23
|
description="set up the sizing protocol with metric or imperial units",
|
24
24
|
)
|
25
25
|
def sizing_prompt_units(
|
26
|
-
metric: bool = Field(
|
27
|
-
description="Set to True for Metric, False for Imperial", default=True
|
28
|
-
),
|
26
|
+
metric: bool = Field(description="Set to True for Metric, False for Imperial", default=True),
|
29
27
|
):
|
30
28
|
if metric:
|
31
29
|
return [
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
fast = FastAgent("Sizer Prompt Test")
|
@@ -10,7 +11,7 @@ fast = FastAgent("Sizer Prompt Test")
|
|
10
11
|
servers=["sizer", "category"],
|
11
12
|
use_history=True,
|
12
13
|
)
|
13
|
-
async def main():
|
14
|
+
async def main() -> None:
|
14
15
|
async with fast.run() as agent:
|
15
16
|
await agent()
|
16
17
|
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
# Create the application
|
@@ -56,7 +57,7 @@ Social Media report ready to review with the Human.
|
|
56
57
|
"human_review_and_post",
|
57
58
|
],
|
58
59
|
)
|
59
|
-
async def main():
|
60
|
+
async def main() -> None:
|
60
61
|
async with fast.run() as agent:
|
61
62
|
# using chain workflow
|
62
63
|
await agent.post_writer.prompt()
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
# Create the application
|
@@ -13,7 +14,7 @@ fast = FastAgent("FastAgent Example")
|
|
13
14
|
# model="gpt-4o",
|
14
15
|
# instruction="You are a helpful AI Agent", servers=["prompts","basic_memory"], model="haiku"
|
15
16
|
)
|
16
|
-
async def main():
|
17
|
+
async def main() -> None:
|
17
18
|
# use the --model command line switch or agent arguments to change model
|
18
19
|
async with fast.run() as agent:
|
19
20
|
await agent()
|
@@ -6,8 +6,8 @@ Simple MCP server that responds to tool calls with text and image content.
|
|
6
6
|
import logging
|
7
7
|
from pathlib import Path
|
8
8
|
|
9
|
-
from mcp.server.fastmcp import
|
10
|
-
from mcp.types import
|
9
|
+
from mcp.server.fastmcp import Context, FastMCP, Image
|
10
|
+
from mcp.types import ImageContent, TextContent
|
11
11
|
|
12
12
|
# Configure logging
|
13
13
|
logging.basicConfig(level=logging.INFO)
|
@@ -18,9 +18,7 @@ app = FastMCP(name="ImageToolServer", debug=True)
|
|
18
18
|
|
19
19
|
|
20
20
|
@app.tool(name="get_image", description="Returns an image with a descriptive text")
|
21
|
-
async def get_image(
|
22
|
-
image_name: str = "default", ctx: Context = None
|
23
|
-
) -> list[TextContent | ImageContent]:
|
21
|
+
async def get_image(image_name: str = "default", ctx: Context = None) -> list[TextContent | ImageContent]:
|
24
22
|
"""
|
25
23
|
Returns an image file along with a descriptive text.
|
26
24
|
|
@@ -45,12 +43,8 @@ async def get_image(
|
|
45
43
|
if __name__ == "__main__":
|
46
44
|
# Check if the default image exists
|
47
45
|
if not Path("image.jpg").exists():
|
48
|
-
logger.warning(
|
49
|
-
|
50
|
-
)
|
51
|
-
logger.warning(
|
52
|
-
"Please add an image file named 'image.jpg' to the current directory"
|
53
|
-
)
|
46
|
+
logger.warning("Default image file 'image.jpg' not found in the current directory")
|
47
|
+
logger.warning("Please add an image file named 'image.jpg' to the current directory")
|
54
48
|
|
55
49
|
# Run the server using stdio transport
|
56
50
|
app.run(transport="stdio")
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
agents = FastAgent(name="Enhanced Researcher")
|
@@ -162,7 +163,7 @@ The researcher should be able to understand exactly why they received their rati
|
|
162
163
|
min_rating="EXCELLENT",
|
163
164
|
name="EnhancedResearcher",
|
164
165
|
)
|
165
|
-
async def main():
|
166
|
+
async def main() -> None:
|
166
167
|
async with agents.run() as agent:
|
167
168
|
# Start with a warm-up to set expectations and explain the research approach
|
168
169
|
await agent.Researcher.send(
|
@@ -180,9 +181,7 @@ async def main():
|
|
180
181
|
# Start the main research workflow
|
181
182
|
await agent.prompt("EnhancedResearcher")
|
182
183
|
|
183
|
-
print(
|
184
|
-
"\nWould you like to ask follow-up questions to the Researcher? (Type 'STOP' to end)"
|
185
|
-
)
|
184
|
+
print("\nWould you like to ask follow-up questions to the Researcher? (Type 'STOP' to end)")
|
186
185
|
await agent.prompt("Researcher", default="STOP")
|
187
186
|
|
188
187
|
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import asyncio
|
2
2
|
|
3
3
|
from mcp_agent.core.fastagent import FastAgent
|
4
|
+
|
4
5
|
# from rich import print
|
5
6
|
|
6
7
|
agents = FastAgent(name="Researcher Agent")
|
@@ -16,7 +17,7 @@ The interpreter has numpy, pandas, matplotlib and seaborn already installed
|
|
16
17
|
""",
|
17
18
|
servers=["brave", "interpreter", "filesystem", "fetch"],
|
18
19
|
)
|
19
|
-
async def main():
|
20
|
+
async def main() -> None:
|
20
21
|
research_prompt = """
|
21
22
|
Produce an investment report for the company Eutelsat. The final report should be saved in the filesystem in markdown format, and
|
22
23
|
contain at least the following:
|
@@ -3,6 +3,7 @@ This demonstrates creating multiple agents and an orchestrator to coordinate the
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
8
9
|
|
@@ -62,7 +63,7 @@ if needed. Remind the Human of this.
|
|
62
63
|
request_params=RequestParams(maxTokens=8192),
|
63
64
|
max_iterations=5,
|
64
65
|
)
|
65
|
-
async def main():
|
66
|
+
async def main() -> None:
|
66
67
|
async with fast.run() as agent:
|
67
68
|
CODER_WARMUP = """
|
68
69
|
- Read this paper: https://www.anthropic.com/research/building-effective-agents" to understand how
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp_agent.core.fastagent import FastAgent
|
3
4
|
|
4
5
|
# Create the application
|
@@ -21,7 +22,7 @@ fast = FastAgent("Agent Chaining")
|
|
21
22
|
name="post_writer",
|
22
23
|
sequence=["url_fetcher", "social_media"],
|
23
24
|
)
|
24
|
-
async def main():
|
25
|
+
async def main() -> None:
|
25
26
|
async with fast.run() as agent:
|
26
27
|
# using chain workflow
|
27
28
|
await agent.post_writer.prompt()
|
@@ -3,6 +3,7 @@ This demonstrates creating an optimizer and evaluator to iteratively improve con
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -49,7 +50,7 @@ fast = FastAgent("Evaluator-Optimizer")
|
|
49
50
|
min_rating="EXCELLENT", # Strive for excellence
|
50
51
|
max_refinements=3, # Maximum iterations
|
51
52
|
)
|
52
|
-
async def main():
|
53
|
+
async def main() -> None:
|
53
54
|
async with fast.run() as agent:
|
54
55
|
job_posting = (
|
55
56
|
"Software Engineer at LastMile AI. Responsibilities include developing AI systems, "
|
@@ -3,6 +3,7 @@ Agent which demonstrates Human Input tool
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -14,7 +15,7 @@ fast = FastAgent("Human Input")
|
|
14
15
|
instruction="An AI agent that assists with basic tasks. Request Human Input when needed.",
|
15
16
|
human_input=True,
|
16
17
|
)
|
17
|
-
async def main():
|
18
|
+
async def main() -> None:
|
18
19
|
async with fast.run() as agent:
|
19
20
|
# this usually causes the LLM to request the Human Input Tool
|
20
21
|
await agent("print the next number in the sequence")
|
@@ -3,6 +3,7 @@ This demonstrates creating multiple agents and an orchestrator to coordinate the
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -47,7 +48,7 @@ fast = FastAgent("Orchestrator-Workers")
|
|
47
48
|
agents=["finder", "writer", "proofreader"],
|
48
49
|
plan_type="iterative",
|
49
50
|
)
|
50
|
-
async def main():
|
51
|
+
async def main() -> None:
|
51
52
|
async with fast.run() as agent:
|
52
53
|
await agent()
|
53
54
|
await agent.author(
|
@@ -3,6 +3,7 @@ Parallel Workflow showing Fan Out and Fan In agents, using different models
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
|
6
7
|
from mcp_agent.core.fastagent import FastAgent
|
7
8
|
|
8
9
|
# Create the application
|
@@ -65,7 +66,7 @@ and whispers of a hidden agenda linger among the villagers.
|
|
65
66
|
fan_in="grader",
|
66
67
|
name="parallel",
|
67
68
|
)
|
68
|
-
async def main():
|
69
|
+
async def main() -> None:
|
69
70
|
# Use the app's context manager
|
70
71
|
async with fast.run() as agent:
|
71
72
|
await agent.parallel(f"student short story submission: {SHORT_STORY}")
|
@@ -6,6 +6,7 @@ Demonstrates router's ability to either:
|
|
6
6
|
"""
|
7
7
|
|
8
8
|
import asyncio
|
9
|
+
|
9
10
|
from mcp_agent.core.fastagent import FastAgent
|
10
11
|
|
11
12
|
# Create the application
|
@@ -43,7 +44,7 @@ SAMPLE_REQUESTS = [
|
|
43
44
|
model="sonnet",
|
44
45
|
agents=["code_expert", "general_assistant", "fetcher"],
|
45
46
|
)
|
46
|
-
async def main():
|
47
|
+
async def main() -> None:
|
47
48
|
async with fast.run() as agent:
|
48
49
|
for request in SAMPLE_REQUESTS:
|
49
50
|
await agent.route(request)
|
@@ -10,7 +10,7 @@ fa = FastAgent("My Application")
|
|
10
10
|
@fa.agent("analyst", "hello, world", servers=["fetch"])
|
11
11
|
|
12
12
|
# Run the application with MCP server
|
13
|
-
async def main():
|
13
|
+
async def main() -> None:
|
14
14
|
await fa.run_with_mcp_server(
|
15
15
|
transport="sse", # Use "sse" for web server, "stdio" for command line
|
16
16
|
port=8000,
|
@@ -1,10 +1,11 @@
|
|
1
1
|
import logging
|
2
|
+
|
2
3
|
from mcp_agent.config import get_settings
|
3
4
|
|
4
5
|
logger = logging.getLogger(__name__)
|
5
6
|
|
6
7
|
|
7
|
-
def send_usage_data():
|
8
|
+
def send_usage_data() -> None:
|
8
9
|
config = get_settings()
|
9
10
|
if not config.usage_telemetry.enabled:
|
10
11
|
logger.info("Usage tracking is disabled")
|
mcp_agent/ui/console_display.py
CHANGED
@@ -17,7 +17,7 @@ class ConsoleDisplay:
|
|
17
17
|
This centralizes the UI display logic used by LLM implementations.
|
18
18
|
"""
|
19
19
|
|
20
|
-
def __init__(self, config=None):
|
20
|
+
def __init__(self, config=None) -> None:
|
21
21
|
"""
|
22
22
|
Initialize the console display handler.
|
23
23
|
|
@@ -26,7 +26,7 @@ class ConsoleDisplay:
|
|
26
26
|
"""
|
27
27
|
self.config = config
|
28
28
|
|
29
|
-
def show_tool_result(self, result: CallToolResult):
|
29
|
+
def show_tool_result(self, result: CallToolResult) -> None:
|
30
30
|
"""Display a tool result in a formatted panel."""
|
31
31
|
if not self.config or not self.config.logger.show_tools:
|
32
32
|
return
|
@@ -49,7 +49,7 @@ class ConsoleDisplay:
|
|
49
49
|
console.console.print(panel)
|
50
50
|
console.console.print("\n")
|
51
51
|
|
52
|
-
def show_oai_tool_result(self, result):
|
52
|
+
def show_oai_tool_result(self, result) -> None:
|
53
53
|
"""Display an OpenAI tool result in a formatted panel."""
|
54
54
|
if not self.config or not self.config.logger.show_tools:
|
55
55
|
return
|
@@ -70,7 +70,7 @@ class ConsoleDisplay:
|
|
70
70
|
console.console.print(panel)
|
71
71
|
console.console.print("\n")
|
72
72
|
|
73
|
-
def show_tool_call(self, available_tools, tool_name, tool_args):
|
73
|
+
def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
|
74
74
|
"""Display a tool call in a formatted panel."""
|
75
75
|
if not self.config or not self.config.logger.show_tools:
|
76
76
|
return
|
@@ -109,25 +109,13 @@ class ConsoleDisplay:
|
|
109
109
|
tool_call_name = display_tool["name"]
|
110
110
|
else:
|
111
111
|
# Handle potential object format (e.g., Pydantic models)
|
112
|
-
tool_call_name = (
|
113
|
-
|
114
|
-
|
115
|
-
else display_tool.name
|
116
|
-
)
|
117
|
-
|
118
|
-
parts = (
|
119
|
-
tool_call_name.split(SEP)
|
120
|
-
if SEP in tool_call_name
|
121
|
-
else [tool_call_name, tool_call_name]
|
122
|
-
)
|
112
|
+
tool_call_name = display_tool.function.name if hasattr(display_tool, "function") else display_tool.name
|
113
|
+
|
114
|
+
parts = tool_call_name.split(SEP) if SEP in tool_call_name else [tool_call_name, tool_call_name]
|
123
115
|
|
124
116
|
if selected_tool_name.split(SEP)[0] == parts[0]:
|
125
|
-
style =
|
126
|
-
|
127
|
-
)
|
128
|
-
shortened_name = (
|
129
|
-
parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
|
130
|
-
)
|
117
|
+
style = "magenta" if tool_call_name == selected_tool_name else "dim white"
|
118
|
+
shortened_name = parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
|
131
119
|
display_tool_list.append(f"[{shortened_name}] ", style)
|
132
120
|
|
133
121
|
return display_tool_list
|
@@ -139,7 +127,7 @@ class ConsoleDisplay:
|
|
139
127
|
highlight_namespaced_tool: str = "",
|
140
128
|
title: str = "ASSISTANT",
|
141
129
|
name: Optional[str] = None,
|
142
|
-
):
|
130
|
+
) -> None:
|
143
131
|
"""Display an assistant message in a formatted panel."""
|
144
132
|
if not self.config or not self.config.logger.show_chat:
|
145
133
|
return
|
@@ -150,19 +138,11 @@ class ConsoleDisplay:
|
|
150
138
|
# Add human input tool if available
|
151
139
|
tools = await aggregator.list_tools()
|
152
140
|
if any(tool.name == HUMAN_INPUT_TOOL_NAME for tool in tools.tools):
|
153
|
-
style =
|
154
|
-
"green"
|
155
|
-
if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME
|
156
|
-
else "dim white"
|
157
|
-
)
|
141
|
+
style = "green" if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME else "dim white"
|
158
142
|
display_server_list.append("[human] ", style)
|
159
143
|
|
160
144
|
# Add all available servers
|
161
|
-
mcp_server_name = (
|
162
|
-
highlight_namespaced_tool.split(SEP)[0]
|
163
|
-
if SEP in highlight_namespaced_tool
|
164
|
-
else highlight_namespaced_tool
|
165
|
-
)
|
145
|
+
mcp_server_name = highlight_namespaced_tool.split(SEP)[0] if SEP in highlight_namespaced_tool else highlight_namespaced_tool
|
166
146
|
|
167
147
|
for server_name in await aggregator.list_servers():
|
168
148
|
style = "green" if server_name == mcp_server_name else "dim white"
|
@@ -181,9 +161,7 @@ class ConsoleDisplay:
|
|
181
161
|
console.console.print(panel)
|
182
162
|
console.console.print("\n")
|
183
163
|
|
184
|
-
def show_user_message(
|
185
|
-
self, message, model: Optional[str], chat_turn: int, name: Optional[str] = None
|
186
|
-
):
|
164
|
+
def show_user_message(self, message, model: Optional[str], chat_turn: int, name: Optional[str] = None) -> None:
|
187
165
|
"""Display a user message in a formatted panel."""
|
188
166
|
if not self.config or not self.config.logger.show_chat:
|
189
167
|
return
|
@@ -209,7 +187,7 @@ class ConsoleDisplay:
|
|
209
187
|
agent_name: Optional[str] = None,
|
210
188
|
aggregator=None,
|
211
189
|
arguments: Optional[dict[str, str]] = None,
|
212
|
-
):
|
190
|
+
) -> None:
|
213
191
|
"""
|
214
192
|
Display information about a loaded prompt template.
|
215
193
|
|
@@ -242,22 +220,20 @@ class ConsoleDisplay:
|
|
242
220
|
|
243
221
|
# Create content text
|
244
222
|
content = Text()
|
245
|
-
messages_phrase =
|
246
|
-
f"Loaded {message_count} message{'s' if message_count != 1 else ''}"
|
247
|
-
)
|
223
|
+
messages_phrase = f"Loaded {message_count} message{'s' if message_count != 1 else ''}"
|
248
224
|
content.append(f"{messages_phrase} from template ", style="cyan italic")
|
249
225
|
content.append(f"'{prompt_name}'", style="cyan bold italic")
|
250
226
|
|
251
227
|
if agent_name:
|
252
228
|
content.append(f" for {agent_name}", style="cyan italic")
|
253
|
-
|
229
|
+
|
254
230
|
# Add template arguments if provided
|
255
231
|
if arguments:
|
256
232
|
content.append("\n\nArguments:", style="cyan")
|
257
233
|
for key, value in arguments.items():
|
258
234
|
content.append(f"\n {key}: ", style="cyan bold")
|
259
235
|
content.append(value, style="white")
|
260
|
-
|
236
|
+
|
261
237
|
if description:
|
262
238
|
content.append("\n\n", style="default")
|
263
239
|
content.append(description, style="dim white")
|
@@ -7,7 +7,6 @@ from sklearn.metrics.pairwise import cosine_similarity
|
|
7
7
|
|
8
8
|
from mcp_agent.context_dependent import ContextDependent
|
9
9
|
|
10
|
-
|
11
10
|
FloatArray = NDArray[float32]
|
12
11
|
|
13
12
|
|
@@ -32,9 +31,7 @@ class EmbeddingModel(ABC, ContextDependent):
|
|
32
31
|
"""Return the dimensionality of the embeddings"""
|
33
32
|
|
34
33
|
|
35
|
-
def compute_similarity_scores(
|
36
|
-
embedding_a: FloatArray, embedding_b: FloatArray
|
37
|
-
) -> Dict[str, float]:
|
34
|
+
def compute_similarity_scores(embedding_a: FloatArray, embedding_b: FloatArray) -> Dict[str, float]:
|
38
35
|
"""
|
39
36
|
Compute different similarity metrics between embeddings
|
40
37
|
"""
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from cohere import Client
|
4
4
|
from numpy import array, float32
|
@@ -17,7 +17,7 @@ class CohereEmbeddingModel(EmbeddingModel):
|
|
17
17
|
model: str = "embed-multilingual-v3.0",
|
18
18
|
context: Optional["Context"] = None,
|
19
19
|
**kwargs,
|
20
|
-
):
|
20
|
+
) -> None:
|
21
21
|
super().__init__(context=context, **kwargs)
|
22
22
|
self.client = Client(api_key=self.context.config.cohere.api_key)
|
23
23
|
self.model = model
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from numpy import array, float32, stack
|
4
4
|
from openai import OpenAI
|
@@ -12,9 +12,7 @@ if TYPE_CHECKING:
|
|
12
12
|
class OpenAIEmbeddingModel(EmbeddingModel):
|
13
13
|
"""OpenAI embedding model implementation"""
|
14
14
|
|
15
|
-
def __init__(
|
16
|
-
self, model: str = "text-embedding-3-small", context: Optional["Context"] = None
|
17
|
-
):
|
15
|
+
def __init__(self, model: str = "text-embedding-3-small", context: Optional["Context"] = None) -> None:
|
18
16
|
super().__init__(context=context)
|
19
17
|
self.client = OpenAI(api_key=self.context.config.openai.api_key)
|
20
18
|
self.model = model
|
@@ -25,20 +23,13 @@ class OpenAIEmbeddingModel(EmbeddingModel):
|
|
25
23
|
}[model]
|
26
24
|
|
27
25
|
async def embed(self, data: List[str]) -> FloatArray:
|
28
|
-
response = self.client.embeddings.create(
|
29
|
-
model=self.model, input=data, encoding_format="float"
|
30
|
-
)
|
26
|
+
response = self.client.embeddings.create(model=self.model, input=data, encoding_format="float")
|
31
27
|
|
32
28
|
# Sort the embeddings by their index to ensure correct order
|
33
29
|
sorted_embeddings = sorted(response.data, key=lambda x: x["index"])
|
34
30
|
|
35
31
|
# Stack all embeddings into a single array
|
36
|
-
embeddings = stack(
|
37
|
-
[
|
38
|
-
array(embedding["embedding"], dtype=float32)
|
39
|
-
for embedding in sorted_embeddings
|
40
|
-
]
|
41
|
-
)
|
32
|
+
embeddings = stack([array(embedding["embedding"], dtype=float32) for embedding in sorted_embeddings])
|
42
33
|
return embeddings
|
43
34
|
|
44
35
|
@property
|