fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
- fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
- mcp_agent/agents/agent.py +37 -102
- mcp_agent/app.py +16 -27
- mcp_agent/cli/commands/bootstrap.py +22 -52
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +11 -26
- mcp_agent/cli/main.py +6 -9
- mcp_agent/cli/terminal.py +2 -2
- mcp_agent/config.py +1 -5
- mcp_agent/context.py +13 -26
- mcp_agent/context_dependent.py +3 -7
- mcp_agent/core/agent_app.py +46 -122
- mcp_agent/core/agent_types.py +29 -2
- mcp_agent/core/agent_utils.py +3 -5
- mcp_agent/core/decorators.py +6 -14
- mcp_agent/core/enhanced_prompt.py +25 -52
- mcp_agent/core/error_handling.py +1 -1
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/factory.py +30 -72
- mcp_agent/core/fastagent.py +48 -88
- mcp_agent/core/mcp_content.py +10 -19
- mcp_agent/core/prompt.py +8 -15
- mcp_agent/core/proxies.py +34 -25
- mcp_agent/core/request_params.py +46 -0
- mcp_agent/core/types.py +6 -6
- mcp_agent/core/validation.py +16 -16
- mcp_agent/executor/decorator_registry.py +11 -23
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +28 -74
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +17 -29
- mcp_agent/human_input/handler.py +4 -9
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +15 -17
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +24 -24
- mcp_agent/mcp/gen_client.py +4 -12
- mcp_agent/mcp/interfaces.py +107 -88
- mcp_agent/mcp/mcp_agent_client_session.py +11 -19
- mcp_agent/mcp/mcp_agent_server.py +8 -10
- mcp_agent/mcp/mcp_aggregator.py +49 -122
- mcp_agent/mcp/mcp_connection_manager.py +16 -37
- mcp_agent/mcp/prompt_message_multipart.py +12 -18
- mcp_agent/mcp/prompt_serialization.py +13 -38
- mcp_agent/mcp/prompts/prompt_load.py +99 -0
- mcp_agent/mcp/prompts/prompt_server.py +21 -128
- mcp_agent/mcp/prompts/prompt_template.py +20 -42
- mcp_agent/mcp/resource_utils.py +8 -17
- mcp_agent/mcp/sampling.py +62 -64
- mcp_agent/mcp/stdio.py +11 -8
- mcp_agent/mcp_server/__init__.py +1 -1
- mcp_agent/mcp_server/agent_server.py +10 -17
- mcp_agent/mcp_server_registry.py +13 -35
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +2 -1
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/prompting/__init__.py +1 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +5 -11
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
- mcp_agent/resources/examples/researcher/researcher.py +2 -1
- mcp_agent/resources/examples/workflows/agent_build.py +2 -1
- mcp_agent/resources/examples/workflows/chaining.py +2 -1
- mcp_agent/resources/examples/workflows/evaluator.py +2 -1
- mcp_agent/resources/examples/workflows/human_input.py +2 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
- mcp_agent/resources/examples/workflows/parallel.py +2 -1
- mcp_agent/resources/examples/workflows/router.py +2 -1
- mcp_agent/resources/examples/workflows/sse.py +1 -1
- mcp_agent/telemetry/usage_tracking.py +2 -1
- mcp_agent/ui/console_display.py +17 -41
- mcp_agent/workflows/embedding/embedding_base.py +1 -4
- mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
- mcp_agent/workflows/embedding/embedding_openai.py +4 -13
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
- mcp_agent/workflows/llm/anthropic_utils.py +8 -29
- mcp_agent/workflows/llm/augmented_llm.py +94 -332
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +43 -76
- mcp_agent/workflows/llm/augmented_llm_openai.py +46 -100
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +42 -20
- mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
- mcp_agent/workflows/llm/memory.py +103 -0
- mcp_agent/workflows/llm/model_factory.py +9 -21
- mcp_agent/workflows/llm/openai_utils.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +39 -27
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +246 -184
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +212 -202
- mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +11 -212
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +13 -215
- mcp_agent/workflows/llm/sampling_converter.py +117 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +12 -29
- mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
- mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
- mcp_agent/workflows/parallel/fan_in.py +17 -47
- mcp_agent/workflows/parallel/fan_out.py +6 -12
- mcp_agent/workflows/parallel/parallel_llm.py +9 -26
- mcp_agent/workflows/router/router_base.py +29 -59
- mcp_agent/workflows/router/router_embedding.py +11 -25
- mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
- mcp_agent/workflows/router/router_embedding_openai.py +2 -2
- mcp_agent/workflows/router/router_llm.py +12 -28
- mcp_agent/workflows/swarm/swarm.py +20 -48
- mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
- mcp_agent/workflows/swarm/swarm_openai.py +2 -2
- fast_agent_mcp-0.1.11.dist-info/RECORD +0 -160
- mcp_agent/workflows/llm/llm_selector.py +0 -345
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
mcp_agent/logging/transport.py
CHANGED
@@ -6,12 +6,14 @@ Transports for the Logger module for MCP Agent, including:
|
|
6
6
|
|
7
7
|
import asyncio
|
8
8
|
import json
|
9
|
+
import traceback
|
9
10
|
from abc import ABC, abstractmethod
|
10
|
-
from typing import Dict, List, Protocol
|
11
11
|
from pathlib import Path
|
12
|
+
from typing import Dict, List, Protocol
|
12
13
|
|
13
14
|
import aiohttp
|
14
15
|
from opentelemetry import trace
|
16
|
+
from rich import print
|
15
17
|
from rich.json import JSON
|
16
18
|
from rich.text import Text
|
17
19
|
|
@@ -20,8 +22,6 @@ from mcp_agent.console import console
|
|
20
22
|
from mcp_agent.logging.events import Event, EventFilter
|
21
23
|
from mcp_agent.logging.json_serializer import JSONSerializer
|
22
24
|
from mcp_agent.logging.listeners import EventListener, LifecycleAwareListener
|
23
|
-
from rich import print
|
24
|
-
import traceback
|
25
25
|
|
26
26
|
|
27
27
|
class EventTransport(Protocol):
|
@@ -30,7 +30,7 @@ class EventTransport(Protocol):
|
|
30
30
|
(Kafka, RabbitMQ, REST, etc.).
|
31
31
|
"""
|
32
32
|
|
33
|
-
async def send_event(self, event: Event):
|
33
|
+
async def send_event(self, event: Event) -> None:
|
34
34
|
"""
|
35
35
|
Send an event to the external system.
|
36
36
|
Args:
|
@@ -44,10 +44,10 @@ class FilteredEventTransport(EventTransport, ABC):
|
|
44
44
|
Event transport that filters events based on a filter before sending.
|
45
45
|
"""
|
46
46
|
|
47
|
-
def __init__(self, event_filter: EventFilter | None = None):
|
47
|
+
def __init__(self, event_filter: EventFilter | None = None) -> None:
|
48
48
|
self.filter = event_filter
|
49
49
|
|
50
|
-
async def send_event(self, event: Event):
|
50
|
+
async def send_event(self, event: Event) -> None:
|
51
51
|
if not self.filter or self.filter.matches(event):
|
52
52
|
await self.send_matched_event(event)
|
53
53
|
|
@@ -59,7 +59,7 @@ class FilteredEventTransport(EventTransport, ABC):
|
|
59
59
|
class NoOpTransport(FilteredEventTransport):
|
60
60
|
"""Default transport that does nothing (purely local)."""
|
61
61
|
|
62
|
-
async def send_matched_event(self, event):
|
62
|
+
async def send_matched_event(self, event) -> None:
|
63
63
|
"""Do nothing."""
|
64
64
|
pass
|
65
65
|
|
@@ -67,7 +67,7 @@ class NoOpTransport(FilteredEventTransport):
|
|
67
67
|
class ConsoleTransport(FilteredEventTransport):
|
68
68
|
"""Simple transport that prints events to console."""
|
69
69
|
|
70
|
-
def __init__(self, event_filter: EventFilter | None = None):
|
70
|
+
def __init__(self, event_filter: EventFilter | None = None) -> None:
|
71
71
|
super().__init__(event_filter=event_filter)
|
72
72
|
# Use shared console instances
|
73
73
|
self._serializer = JSONSerializer()
|
@@ -78,7 +78,7 @@ class ConsoleTransport(FilteredEventTransport):
|
|
78
78
|
"error": "bold red",
|
79
79
|
}
|
80
80
|
|
81
|
-
async def send_matched_event(self, event: Event):
|
81
|
+
async def send_matched_event(self, event: Event) -> None:
|
82
82
|
# Map log levels to styles
|
83
83
|
style = self.log_level_styles.get(event.type, "white")
|
84
84
|
|
@@ -114,7 +114,7 @@ class FileTransport(FilteredEventTransport):
|
|
114
114
|
event_filter: EventFilter | None = None,
|
115
115
|
mode: str = "a",
|
116
116
|
encoding: str = "utf-8",
|
117
|
-
):
|
117
|
+
) -> None:
|
118
118
|
"""Initialize FileTransport.
|
119
119
|
|
120
120
|
Args:
|
@@ -186,7 +186,7 @@ class HTTPTransport(FilteredEventTransport):
|
|
186
186
|
batch_size: int = 100,
|
187
187
|
timeout: float = 5.0,
|
188
188
|
event_filter: EventFilter | None = None,
|
189
|
-
):
|
189
|
+
) -> None:
|
190
190
|
super().__init__(event_filter=event_filter)
|
191
191
|
self.endpoint = endpoint
|
192
192
|
self.headers = headers or {}
|
@@ -198,14 +198,14 @@ class HTTPTransport(FilteredEventTransport):
|
|
198
198
|
self._session: aiohttp.ClientSession | None = None
|
199
199
|
self._serializer = JSONSerializer()
|
200
200
|
|
201
|
-
async def start(self):
|
201
|
+
async def start(self) -> None:
|
202
202
|
"""Initialize HTTP session."""
|
203
203
|
if not self._session:
|
204
204
|
self._session = aiohttp.ClientSession(
|
205
205
|
headers=self.headers, timeout=aiohttp.ClientTimeout(total=self.timeout)
|
206
206
|
)
|
207
207
|
|
208
|
-
async def stop(self):
|
208
|
+
async def stop(self) -> None:
|
209
209
|
"""Close HTTP session and flush any remaining events."""
|
210
210
|
if self.batch:
|
211
211
|
await self._flush()
|
@@ -213,14 +213,14 @@ class HTTPTransport(FilteredEventTransport):
|
|
213
213
|
await self._session.close()
|
214
214
|
self._session = None
|
215
215
|
|
216
|
-
async def send_matched_event(self, event: Event):
|
216
|
+
async def send_matched_event(self, event: Event) -> None:
|
217
217
|
"""Add event to batch, flush if batch is full."""
|
218
218
|
async with self.lock:
|
219
219
|
self.batch.append(event)
|
220
220
|
if len(self.batch) >= self.batch_size:
|
221
221
|
await self._flush()
|
222
222
|
|
223
|
-
async def _flush(self):
|
223
|
+
async def _flush(self) -> None:
|
224
224
|
"""Send batch of events to HTTP endpoint."""
|
225
225
|
if not self.batch:
|
226
226
|
return
|
@@ -266,7 +266,7 @@ class AsyncEventBus:
|
|
266
266
|
|
267
267
|
_instance = None
|
268
268
|
|
269
|
-
def __init__(self, transport: EventTransport | None = None):
|
269
|
+
def __init__(self, transport: EventTransport | None = None) -> None:
|
270
270
|
self.transport: EventTransport = transport or NoOpTransport()
|
271
271
|
self.listeners: Dict[str, EventListener] = {}
|
272
272
|
self._queue = asyncio.Queue()
|
@@ -290,7 +290,7 @@ class AsyncEventBus:
|
|
290
290
|
# Update transport if provided
|
291
291
|
cls._instance.transport = transport
|
292
292
|
return cls._instance
|
293
|
-
|
293
|
+
|
294
294
|
@classmethod
|
295
295
|
def reset(cls) -> None:
|
296
296
|
"""
|
@@ -302,11 +302,11 @@ class AsyncEventBus:
|
|
302
302
|
# Signal shutdown
|
303
303
|
cls._instance._running = False
|
304
304
|
cls._instance._stop_event.set()
|
305
|
-
|
305
|
+
|
306
306
|
# Clear the singleton instance
|
307
307
|
cls._instance = None
|
308
308
|
|
309
|
-
async def start(self):
|
309
|
+
async def start(self) -> None:
|
310
310
|
"""Start the event bus and all lifecycle-aware listeners."""
|
311
311
|
if self._running:
|
312
312
|
return
|
@@ -321,7 +321,7 @@ class AsyncEventBus:
|
|
321
321
|
self._running = True
|
322
322
|
self._task = asyncio.create_task(self._process_events())
|
323
323
|
|
324
|
-
async def stop(self):
|
324
|
+
async def stop(self) -> None:
|
325
325
|
"""Stop the event bus and all lifecycle-aware listeners."""
|
326
326
|
if not self._running:
|
327
327
|
return
|
@@ -369,7 +369,7 @@ class AsyncEventBus:
|
|
369
369
|
except Exception as e:
|
370
370
|
print(f"Error stopping listener: {e}")
|
371
371
|
|
372
|
-
async def emit(self, event: Event):
|
372
|
+
async def emit(self, event: Event) -> None:
|
373
373
|
"""Emit an event to all listeners and transport."""
|
374
374
|
# Inject current tracing info if available
|
375
375
|
span = trace.get_current_span()
|
@@ -387,15 +387,15 @@ class AsyncEventBus:
|
|
387
387
|
# Then queue for listeners
|
388
388
|
await self._queue.put(event)
|
389
389
|
|
390
|
-
def add_listener(self, name: str, listener: EventListener):
|
390
|
+
def add_listener(self, name: str, listener: EventListener) -> None:
|
391
391
|
"""Add a listener to the event bus."""
|
392
392
|
self.listeners[name] = listener
|
393
393
|
|
394
|
-
def remove_listener(self, name: str):
|
394
|
+
def remove_listener(self, name: str) -> None:
|
395
395
|
"""Remove a listener from the event bus."""
|
396
396
|
self.listeners.pop(name, None)
|
397
397
|
|
398
|
-
async def _process_events(self):
|
398
|
+
async def _process_events(self) -> None:
|
399
399
|
"""Process events from the queue until stopped."""
|
400
400
|
while self._running:
|
401
401
|
event = None
|
mcp_agent/mcp/gen_client.py
CHANGED
@@ -28,9 +28,7 @@ async def gen_client(
|
|
28
28
|
For persistent connections, use connect() or MCPConnectionManager instead.
|
29
29
|
"""
|
30
30
|
if not server_registry:
|
31
|
-
raise ValueError(
|
32
|
-
"Server registry not found in the context. Please specify one either on this method, or in the context."
|
33
|
-
)
|
31
|
+
raise ValueError("Server registry not found in the context. Please specify one either on this method, or in the context.")
|
34
32
|
|
35
33
|
async with server_registry.initialize_server(
|
36
34
|
server_name=server_name,
|
@@ -53,9 +51,7 @@ async def connect(
|
|
53
51
|
If required, callers can specify their own message receive loop and ClientSession class constructor to customize further.
|
54
52
|
"""
|
55
53
|
if not server_registry:
|
56
|
-
raise ValueError(
|
57
|
-
"Server registry not found in the context. Please specify one either on this method, or in the context."
|
58
|
-
)
|
54
|
+
raise ValueError("Server registry not found in the context. Please specify one either on this method, or in the context.")
|
59
55
|
|
60
56
|
server_connection = await server_registry.connection_manager.get_server(
|
61
57
|
server_name=server_name,
|
@@ -73,13 +69,9 @@ async def disconnect(
|
|
73
69
|
Disconnect from the specified server. If server_name is None, disconnect from all servers.
|
74
70
|
"""
|
75
71
|
if not server_registry:
|
76
|
-
raise ValueError(
|
77
|
-
"Server registry not found in the context. Please specify one either on this method, or in the context."
|
78
|
-
)
|
72
|
+
raise ValueError("Server registry not found in the context. Please specify one either on this method, or in the context.")
|
79
73
|
|
80
74
|
if server_name:
|
81
|
-
await server_registry.connection_manager.disconnect_server(
|
82
|
-
server_name=server_name
|
83
|
-
)
|
75
|
+
await server_registry.connection_manager.disconnect_server(server_name=server_name)
|
84
76
|
else:
|
85
77
|
await server_registry.connection_manager.disconnect_all_servers()
|
mcp_agent/mcp/interfaces.py
CHANGED
@@ -3,130 +3,149 @@ Interface definitions to prevent circular imports.
|
|
3
3
|
This module defines protocols (interfaces) that can be used to break circular dependencies.
|
4
4
|
"""
|
5
5
|
|
6
|
-
from
|
7
|
-
from typing import
|
8
|
-
|
6
|
+
from datetime import timedelta
|
7
|
+
from typing import (
|
8
|
+
Any,
|
9
|
+
AsyncContextManager,
|
10
|
+
Callable,
|
11
|
+
Generic,
|
12
|
+
List,
|
13
|
+
Optional,
|
14
|
+
Protocol,
|
15
|
+
Type,
|
16
|
+
TypeVar,
|
17
|
+
Union,
|
18
|
+
runtime_checkable,
|
19
|
+
)
|
20
|
+
|
21
|
+
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
9
22
|
from mcp import ClientSession
|
10
|
-
from mcp.types import
|
11
|
-
from pydantic import Field
|
23
|
+
from mcp.types import PromptMessage
|
12
24
|
|
25
|
+
from mcp_agent.core.request_params import RequestParams
|
26
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
13
27
|
|
14
|
-
class ServerRegistryProtocol(Protocol):
|
15
|
-
"""
|
16
|
-
Protocol defining the minimal interface of ServerRegistry needed by gen_client.
|
17
|
-
This allows gen_client to depend on this protocol rather than the full ServerRegistry class.
|
18
|
-
"""
|
19
28
|
|
20
|
-
|
21
|
-
|
29
|
+
@runtime_checkable
|
30
|
+
class MCPConnectionManagerProtocol(Protocol):
|
31
|
+
"""Protocol for MCPConnectionManager functionality needed by ServerRegistry."""
|
32
|
+
|
33
|
+
async def get_server(
|
22
34
|
self,
|
23
35
|
server_name: str,
|
24
|
-
client_session_factory
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
36
|
+
client_session_factory: Optional[
|
37
|
+
Callable[
|
38
|
+
[
|
39
|
+
MemoryObjectReceiveStream,
|
40
|
+
MemoryObjectSendStream,
|
41
|
+
Optional[timedelta],
|
42
|
+
],
|
43
|
+
ClientSession,
|
44
|
+
]
|
45
|
+
] = None,
|
46
|
+
) -> "ServerConnection": ...
|
29
47
|
|
30
|
-
|
31
|
-
def connection_manager(self) -> "ConnectionManagerProtocol":
|
32
|
-
"""Get the connection manager."""
|
33
|
-
...
|
48
|
+
async def disconnect_server(self, server_name: str) -> None: ...
|
34
49
|
|
50
|
+
async def disconnect_all_servers(self) -> None: ...
|
35
51
|
|
36
|
-
class ConnectionManagerProtocol(Protocol):
|
37
|
-
"""
|
38
|
-
Protocol defining the minimal interface of ConnectionManager needed.
|
39
|
-
"""
|
40
52
|
|
41
|
-
|
53
|
+
@runtime_checkable
|
54
|
+
class ServerRegistryProtocol(Protocol):
|
55
|
+
"""Protocol defining the minimal interface of ServerRegistry needed by gen_client."""
|
56
|
+
|
57
|
+
@property
|
58
|
+
def connection_manager(self) -> MCPConnectionManagerProtocol: ...
|
59
|
+
|
60
|
+
def initialize_server(
|
42
61
|
self,
|
43
62
|
server_name: str,
|
44
|
-
client_session_factory
|
45
|
-
|
46
|
-
|
63
|
+
client_session_factory: Optional[
|
64
|
+
Callable[
|
65
|
+
[
|
66
|
+
MemoryObjectReceiveStream,
|
67
|
+
MemoryObjectSendStream,
|
68
|
+
Optional[timedelta],
|
69
|
+
],
|
70
|
+
ClientSession,
|
71
|
+
]
|
72
|
+
] = None,
|
73
|
+
init_hook: Optional[Callable] = None,
|
74
|
+
) -> AsyncContextManager[ClientSession]:
|
75
|
+
"""Initialize a server and yield a client session."""
|
47
76
|
...
|
48
77
|
|
49
|
-
async def disconnect_server(self, server_name: str) -> None:
|
50
|
-
"""Disconnect from a server."""
|
51
|
-
...
|
52
78
|
|
53
|
-
|
54
|
-
|
55
|
-
...
|
79
|
+
class ServerConnection(Protocol):
|
80
|
+
"""Protocol for server connection objects returned by MCPConnectionManager."""
|
56
81
|
|
82
|
+
@property
|
83
|
+
def session(self) -> ClientSession: ...
|
57
84
|
|
58
|
-
# Type variables for generic protocols
|
59
|
-
MessageParamT = TypeVar("MessageParamT")
|
60
|
-
"""A type representing an input message to an LLM."""
|
61
85
|
|
86
|
+
# Regular invariant type variables
|
87
|
+
MessageParamT = TypeVar("MessageParamT")
|
62
88
|
MessageT = TypeVar("MessageT")
|
63
|
-
"""A type representing an output message from an LLM."""
|
64
|
-
|
65
89
|
ModelT = TypeVar("ModelT")
|
66
|
-
"""A type representing a structured output message from an LLM."""
|
67
|
-
|
68
|
-
|
69
|
-
class RequestParams(CreateMessageRequestParams):
|
70
|
-
"""
|
71
|
-
Parameters to configure the AugmentedLLM 'generate' requests.
|
72
|
-
"""
|
73
|
-
|
74
|
-
messages: None = Field(exclude=True, default=None)
|
75
|
-
"""
|
76
|
-
Ignored. 'messages' are removed from CreateMessageRequestParams
|
77
|
-
to avoid confusion with the 'message' parameter on 'generate' method.
|
78
|
-
"""
|
79
|
-
|
80
|
-
maxTokens: int = 2048
|
81
|
-
"""The maximum number of tokens to sample, as requested by the server."""
|
82
90
|
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
If specified, this overrides the 'modelPreferences' selection criteria.
|
87
|
-
"""
|
88
|
-
|
89
|
-
use_history: bool = True
|
90
|
-
"""
|
91
|
-
Include the message history in the generate request.
|
92
|
-
"""
|
93
|
-
|
94
|
-
max_iterations: int = 10
|
95
|
-
"""
|
96
|
-
The maximum number of iterations to run the LLM for.
|
97
|
-
"""
|
91
|
+
# Variance-annotated type variables
|
92
|
+
MessageParamT_co = TypeVar("MessageParamT_co", contravariant=True)
|
93
|
+
MessageT_co = TypeVar("MessageT_co")
|
98
94
|
|
99
|
-
parallel_tool_calls: bool = True
|
100
|
-
"""
|
101
|
-
Whether to allow multiple tool calls per iteration.
|
102
|
-
Also known as multi-step tool use.
|
103
|
-
"""
|
104
95
|
|
105
|
-
|
106
|
-
class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
|
96
|
+
class AugmentedLLMProtocol(Protocol, Generic[MessageParamT_co, MessageT_co]):
|
107
97
|
"""Protocol defining the interface for augmented LLMs"""
|
108
98
|
|
109
99
|
async def generate(
|
110
100
|
self,
|
111
|
-
message: str
|
101
|
+
message: Union[str, MessageParamT_co, List[MessageParamT_co]],
|
112
102
|
request_params: RequestParams | None = None,
|
113
|
-
) -> List[
|
103
|
+
) -> List[MessageT_co]:
|
114
104
|
"""Request an LLM generation, which may run multiple iterations, and return the result"""
|
105
|
+
...
|
115
106
|
|
116
107
|
async def generate_str(
|
117
108
|
self,
|
118
|
-
message: str
|
109
|
+
message: Union[str, MessageParamT_co, List[MessageParamT_co]],
|
119
110
|
request_params: RequestParams | None = None,
|
120
111
|
) -> str:
|
121
112
|
"""Request an LLM generation and return the string representation of the result"""
|
113
|
+
...
|
122
114
|
|
123
|
-
async def
|
115
|
+
async def structured(
|
124
116
|
self,
|
125
|
-
|
126
|
-
|
127
|
-
request_params: RequestParams | None
|
117
|
+
prompt: Union[str, PromptMessage, PromptMessageMultipart, List[str]],
|
118
|
+
model: Type[ModelT],
|
119
|
+
request_params: RequestParams | None,
|
128
120
|
) -> ModelT:
|
129
|
-
"""
|
121
|
+
"""Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
|
122
|
+
...
|
123
|
+
|
124
|
+
async def generate_prompt(
|
125
|
+
self,
|
126
|
+
prompt: Union[str, PromptMessage, PromptMessageMultipart, List[str]],
|
127
|
+
request_params: RequestParams | None,
|
128
|
+
) -> str:
|
129
|
+
"""Request an LLM generation and return a string representation of the result"""
|
130
|
+
...
|
131
|
+
|
132
|
+
async def apply_prompt(
|
133
|
+
self,
|
134
|
+
multipart_messages: List["PromptMessageMultipart"],
|
135
|
+
request_params: RequestParams | None = None,
|
136
|
+
) -> str:
|
137
|
+
"""
|
138
|
+
Apply a list of PromptMessageMultipart messages directly to the LLM.
|
139
|
+
This is a cleaner interface to _apply_prompt_template_provider_specific.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
multipart_messages: List of PromptMessageMultipart objects
|
143
|
+
request_params: Optional parameters to configure the LLM request
|
144
|
+
|
145
|
+
Returns:
|
146
|
+
String representation of the assistant's response
|
147
|
+
"""
|
148
|
+
...
|
130
149
|
|
131
150
|
|
132
151
|
class ModelFactoryClassProtocol(Protocol):
|
@@ -134,7 +153,7 @@ class ModelFactoryClassProtocol(Protocol):
|
|
134
153
|
Protocol defining the minimal interface of the ModelFactory class needed by sampling.
|
135
154
|
This allows sampling.py to depend on this protocol rather than the concrete ModelFactory class.
|
136
155
|
"""
|
137
|
-
|
156
|
+
|
138
157
|
@classmethod
|
139
158
|
def create_factory(
|
140
159
|
cls, model_string: str, request_params: Optional[RequestParams] = None
|
@@ -149,4 +168,4 @@ class ModelFactoryClassProtocol(Protocol):
|
|
149
168
|
Returns:
|
150
169
|
A factory function that can create an LLM instance
|
151
170
|
"""
|
152
|
-
...
|
171
|
+
...
|
@@ -3,12 +3,12 @@ A derived client session for the MCP Agent framework.
|
|
3
3
|
It adds logging and supports sampling requests.
|
4
4
|
"""
|
5
5
|
|
6
|
-
from typing import Optional
|
6
|
+
from typing import TYPE_CHECKING, Optional
|
7
7
|
|
8
8
|
from mcp import ClientSession
|
9
9
|
from mcp.shared.session import (
|
10
|
-
ReceiveResultT,
|
11
10
|
ReceiveNotificationT,
|
11
|
+
ReceiveResultT,
|
12
12
|
RequestId,
|
13
13
|
SendNotificationT,
|
14
14
|
SendRequestT,
|
@@ -21,11 +21,13 @@ from mcp.types import (
|
|
21
21
|
)
|
22
22
|
from pydantic import AnyUrl
|
23
23
|
|
24
|
-
from mcp_agent.config import MCPServerSettings
|
25
24
|
from mcp_agent.context_dependent import ContextDependent
|
26
25
|
from mcp_agent.logging.logger import get_logger
|
27
26
|
from mcp_agent.mcp.sampling import sample
|
28
27
|
|
28
|
+
if TYPE_CHECKING:
|
29
|
+
from mcp_agent.config import MCPServerSettings
|
30
|
+
|
29
31
|
logger = get_logger(__name__)
|
30
32
|
|
31
33
|
|
@@ -63,10 +65,8 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
63
65
|
Developers can extend this class to add more custom functionality as needed
|
64
66
|
"""
|
65
67
|
|
66
|
-
def __init__(self, *args, **kwargs):
|
67
|
-
super().__init__(
|
68
|
-
*args, **kwargs, list_roots_callback=list_roots, sampling_callback=sample
|
69
|
-
)
|
68
|
+
def __init__(self, *args, **kwargs) -> None:
|
69
|
+
super().__init__(*args, **kwargs, list_roots_callback=list_roots, sampling_callback=sample)
|
70
70
|
self.server_config: Optional[MCPServerSettings] = None
|
71
71
|
|
72
72
|
async def send_request(
|
@@ -91,9 +91,7 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
91
91
|
logger.error("send_notification failed", data=e)
|
92
92
|
raise
|
93
93
|
|
94
|
-
async def _send_response(
|
95
|
-
self, request_id: RequestId, response: SendResultT | ErrorData
|
96
|
-
) -> None:
|
94
|
+
async def _send_response(self, request_id: RequestId, response: SendResultT | ErrorData) -> None:
|
97
95
|
logger.debug(
|
98
96
|
f"send_response: request_id={request_id}, response=",
|
99
97
|
data=response.model_dump(),
|
@@ -111,16 +109,10 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
111
109
|
)
|
112
110
|
return await super()._received_notification(notification)
|
113
111
|
|
114
|
-
async def send_progress_notification(
|
115
|
-
self, progress_token: str | int, progress: float, total: float | None = None
|
116
|
-
) -> None:
|
112
|
+
async def send_progress_notification(self, progress_token: str | int, progress: float, total: float | None = None) -> None:
|
117
113
|
"""
|
118
114
|
Sends a progress notification for a request that is currently being
|
119
115
|
processed.
|
120
116
|
"""
|
121
|
-
logger.debug(
|
122
|
-
|
123
|
-
)
|
124
|
-
return await super().send_progress_notification(
|
125
|
-
progress_token=progress_token, progress=progress, total=total
|
126
|
-
)
|
117
|
+
logger.debug("send_progress_notification: progress_token={progress_token}, progress={progress}, total={total}")
|
118
|
+
return await super().send_progress_notification(progress_token=progress_token, progress=progress, total=total)
|
@@ -1,7 +1,9 @@
|
|
1
1
|
import asyncio
|
2
|
+
|
2
3
|
from mcp.server import NotificationOptions
|
3
4
|
from mcp.server.fastmcp import FastMCP
|
4
5
|
from mcp.server.stdio import stdio_server
|
6
|
+
|
5
7
|
from mcp_agent.executor.temporal import get_temporal_client
|
6
8
|
from mcp_agent.telemetry.tracing import setup_tracing
|
7
9
|
|
@@ -10,27 +12,23 @@ app = FastMCP("mcp-agent-server")
|
|
10
12
|
setup_tracing("mcp-agent-server")
|
11
13
|
|
12
14
|
|
13
|
-
async def run():
|
15
|
+
async def run() -> None:
|
14
16
|
async with stdio_server() as (read_stream, write_stream):
|
15
17
|
await app._mcp_server.run(
|
16
18
|
read_stream,
|
17
19
|
write_stream,
|
18
|
-
app._mcp_server.create_initialization_options(
|
19
|
-
notification_options=NotificationOptions(
|
20
|
-
tools_changed=True, resources_changed=True
|
21
|
-
)
|
22
|
-
),
|
20
|
+
app._mcp_server.create_initialization_options(notification_options=NotificationOptions(tools_changed=True, resources_changed=True)),
|
23
21
|
)
|
24
22
|
|
25
23
|
|
26
24
|
@app.tool
|
27
|
-
async def run_workflow(query: str):
|
25
|
+
async def run_workflow(query: str) -> None:
|
28
26
|
"""Run the workflow given its name or id"""
|
29
27
|
pass
|
30
28
|
|
31
29
|
|
32
30
|
@app.tool
|
33
|
-
async def pause_workflow(workflow_id: str):
|
31
|
+
async def pause_workflow(workflow_id: str) -> None:
|
34
32
|
"""Pause a running workflow."""
|
35
33
|
temporal_client = await get_temporal_client()
|
36
34
|
handle = temporal_client.get_workflow_handle(workflow_id)
|
@@ -38,14 +36,14 @@ async def pause_workflow(workflow_id: str):
|
|
38
36
|
|
39
37
|
|
40
38
|
@app.tool
|
41
|
-
async def resume_workflow(workflow_id: str):
|
39
|
+
async def resume_workflow(workflow_id: str) -> None:
|
42
40
|
"""Resume a paused workflow."""
|
43
41
|
temporal_client = await get_temporal_client()
|
44
42
|
handle = temporal_client.get_workflow_handle(workflow_id)
|
45
43
|
await handle.signal("resume")
|
46
44
|
|
47
45
|
|
48
|
-
async def provide_user_input(workflow_id: str, input_data: str):
|
46
|
+
async def provide_user_input(workflow_id: str, input_data: str) -> None:
|
49
47
|
"""Provide user/human input to a waiting workflow step."""
|
50
48
|
temporal_client = await get_temporal_client()
|
51
49
|
handle = temporal_client.get_workflow_handle(workflow_id)
|