fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +61 -415
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +15 -19
  14. mcp_agent/cli/commands/bootstrap.py +19 -38
  15. mcp_agent/cli/commands/config.py +4 -4
  16. mcp_agent/cli/commands/setup.py +7 -14
  17. mcp_agent/cli/main.py +7 -10
  18. mcp_agent/cli/terminal.py +3 -3
  19. mcp_agent/config.py +25 -40
  20. mcp_agent/context.py +12 -21
  21. mcp_agent/context_dependent.py +3 -5
  22. mcp_agent/core/agent_types.py +10 -7
  23. mcp_agent/core/direct_agent_app.py +179 -0
  24. mcp_agent/core/direct_decorators.py +443 -0
  25. mcp_agent/core/direct_factory.py +476 -0
  26. mcp_agent/core/enhanced_prompt.py +23 -55
  27. mcp_agent/core/exceptions.py +8 -8
  28. mcp_agent/core/fastagent.py +145 -371
  29. mcp_agent/core/interactive_prompt.py +424 -0
  30. mcp_agent/core/mcp_content.py +17 -17
  31. mcp_agent/core/prompt.py +6 -9
  32. mcp_agent/core/request_params.py +6 -3
  33. mcp_agent/core/validation.py +92 -18
  34. mcp_agent/executor/decorator_registry.py +9 -17
  35. mcp_agent/executor/executor.py +8 -17
  36. mcp_agent/executor/task_registry.py +2 -4
  37. mcp_agent/executor/temporal.py +19 -41
  38. mcp_agent/executor/workflow.py +3 -5
  39. mcp_agent/executor/workflow_signal.py +15 -21
  40. mcp_agent/human_input/handler.py +4 -7
  41. mcp_agent/human_input/types.py +2 -3
  42. mcp_agent/llm/__init__.py +2 -0
  43. mcp_agent/llm/augmented_llm.py +450 -0
  44. mcp_agent/llm/augmented_llm_passthrough.py +162 -0
  45. mcp_agent/llm/augmented_llm_playback.py +83 -0
  46. mcp_agent/llm/memory.py +103 -0
  47. mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
  48. mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
  49. mcp_agent/llm/providers/__init__.py +8 -0
  50. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
  51. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
  52. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  53. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
  54. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
  55. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
  56. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
  57. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
  58. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
  59. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
  60. mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
  61. mcp_agent/llm/sampling_format_converter.py +37 -0
  62. mcp_agent/logging/events.py +1 -5
  63. mcp_agent/logging/json_serializer.py +7 -6
  64. mcp_agent/logging/listeners.py +20 -23
  65. mcp_agent/logging/logger.py +17 -19
  66. mcp_agent/logging/rich_progress.py +10 -8
  67. mcp_agent/logging/tracing.py +4 -6
  68. mcp_agent/logging/transport.py +22 -22
  69. mcp_agent/mcp/gen_client.py +1 -3
  70. mcp_agent/mcp/interfaces.py +117 -110
  71. mcp_agent/mcp/logger_textio.py +97 -0
  72. mcp_agent/mcp/mcp_agent_client_session.py +7 -7
  73. mcp_agent/mcp/mcp_agent_server.py +8 -8
  74. mcp_agent/mcp/mcp_aggregator.py +102 -143
  75. mcp_agent/mcp/mcp_connection_manager.py +20 -27
  76. mcp_agent/mcp/prompt_message_multipart.py +68 -16
  77. mcp_agent/mcp/prompt_render.py +77 -0
  78. mcp_agent/mcp/prompt_serialization.py +30 -48
  79. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  80. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  81. mcp_agent/mcp/prompts/prompt_load.py +109 -0
  82. mcp_agent/mcp/prompts/prompt_server.py +155 -195
  83. mcp_agent/mcp/prompts/prompt_template.py +35 -66
  84. mcp_agent/mcp/resource_utils.py +7 -14
  85. mcp_agent/mcp/sampling.py +17 -17
  86. mcp_agent/mcp_server/agent_server.py +13 -17
  87. mcp_agent/mcp_server_registry.py +13 -22
  88. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
  89. mcp_agent/resources/examples/in_dev/slides.py +110 -0
  90. mcp_agent/resources/examples/internal/agent.py +6 -3
  91. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  92. mcp_agent/resources/examples/internal/job.py +2 -1
  93. mcp_agent/resources/examples/internal/prompt_category.py +1 -1
  94. mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
  95. mcp_agent/resources/examples/internal/sizer.py +2 -1
  96. mcp_agent/resources/examples/internal/social.py +2 -1
  97. mcp_agent/resources/examples/prompting/agent.py +2 -1
  98. mcp_agent/resources/examples/prompting/image_server.py +4 -8
  99. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  100. mcp_agent/ui/console_display.py +16 -20
  101. fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
  102. mcp_agent/core/agent_app.py +0 -646
  103. mcp_agent/core/agent_utils.py +0 -71
  104. mcp_agent/core/decorators.py +0 -455
  105. mcp_agent/core/factory.py +0 -463
  106. mcp_agent/core/proxies.py +0 -269
  107. mcp_agent/core/types.py +0 -24
  108. mcp_agent/eval/__init__.py +0 -0
  109. mcp_agent/mcp/stdio.py +0 -111
  110. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  111. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  112. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  113. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  114. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  115. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  116. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  117. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
  118. mcp_agent/resources/examples/researcher/researcher.py +0 -38
  119. mcp_agent/resources/examples/workflows/chaining.py +0 -44
  120. mcp_agent/resources/examples/workflows/evaluator.py +0 -78
  121. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  122. mcp_agent/resources/examples/workflows/human_input.py +0 -25
  123. mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
  124. mcp_agent/resources/examples/workflows/parallel.py +0 -78
  125. mcp_agent/resources/examples/workflows/router.py +0 -53
  126. mcp_agent/resources/examples/workflows/sse.py +0 -23
  127. mcp_agent/telemetry/__init__.py +0 -0
  128. mcp_agent/telemetry/usage_tracking.py +0 -18
  129. mcp_agent/workflows/__init__.py +0 -0
  130. mcp_agent/workflows/embedding/__init__.py +0 -0
  131. mcp_agent/workflows/embedding/embedding_base.py +0 -61
  132. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  133. mcp_agent/workflows/embedding/embedding_openai.py +0 -46
  134. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  135. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
  136. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  137. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
  138. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
  139. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
  140. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
  141. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
  142. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  143. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
  144. mcp_agent/workflows/llm/__init__.py +0 -0
  145. mcp_agent/workflows/llm/augmented_llm.py +0 -753
  146. mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
  147. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
  148. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  149. mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
  150. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  151. mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
  152. mcp_agent/workflows/parallel/__init__.py +0 -0
  153. mcp_agent/workflows/parallel/fan_in.py +0 -350
  154. mcp_agent/workflows/parallel/fan_out.py +0 -187
  155. mcp_agent/workflows/parallel/parallel_llm.py +0 -166
  156. mcp_agent/workflows/router/__init__.py +0 -0
  157. mcp_agent/workflows/router/router_base.py +0 -368
  158. mcp_agent/workflows/router/router_embedding.py +0 -240
  159. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  160. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  161. mcp_agent/workflows/router/router_llm.py +0 -320
  162. mcp_agent/workflows/swarm/__init__.py +0 -0
  163. mcp_agent/workflows/swarm/swarm.py +0 -320
  164. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  165. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  166. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  167. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  168. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  169. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -6,12 +6,14 @@ Transports for the Logger module for MCP Agent, including:
6
6
 
7
7
  import asyncio
8
8
  import json
9
+ import traceback
9
10
  from abc import ABC, abstractmethod
10
- from typing import Dict, List, Protocol
11
11
  from pathlib import Path
12
+ from typing import Dict, List, Protocol
12
13
 
13
14
  import aiohttp
14
15
  from opentelemetry import trace
16
+ from rich import print
15
17
  from rich.json import JSON
16
18
  from rich.text import Text
17
19
 
@@ -20,8 +22,6 @@ from mcp_agent.console import console
20
22
  from mcp_agent.logging.events import Event, EventFilter
21
23
  from mcp_agent.logging.json_serializer import JSONSerializer
22
24
  from mcp_agent.logging.listeners import EventListener, LifecycleAwareListener
23
- from rich import print
24
- import traceback
25
25
 
26
26
 
27
27
  class EventTransport(Protocol):
@@ -30,7 +30,7 @@ class EventTransport(Protocol):
30
30
  (Kafka, RabbitMQ, REST, etc.).
31
31
  """
32
32
 
33
- async def send_event(self, event: Event):
33
+ async def send_event(self, event: Event) -> None:
34
34
  """
35
35
  Send an event to the external system.
36
36
  Args:
@@ -44,10 +44,10 @@ class FilteredEventTransport(EventTransport, ABC):
44
44
  Event transport that filters events based on a filter before sending.
45
45
  """
46
46
 
47
- def __init__(self, event_filter: EventFilter | None = None):
47
+ def __init__(self, event_filter: EventFilter | None = None) -> None:
48
48
  self.filter = event_filter
49
49
 
50
- async def send_event(self, event: Event):
50
+ async def send_event(self, event: Event) -> None:
51
51
  if not self.filter or self.filter.matches(event):
52
52
  await self.send_matched_event(event)
53
53
 
@@ -59,7 +59,7 @@ class FilteredEventTransport(EventTransport, ABC):
59
59
  class NoOpTransport(FilteredEventTransport):
60
60
  """Default transport that does nothing (purely local)."""
61
61
 
62
- async def send_matched_event(self, event):
62
+ async def send_matched_event(self, event) -> None:
63
63
  """Do nothing."""
64
64
  pass
65
65
 
@@ -67,7 +67,7 @@ class NoOpTransport(FilteredEventTransport):
67
67
  class ConsoleTransport(FilteredEventTransport):
68
68
  """Simple transport that prints events to console."""
69
69
 
70
- def __init__(self, event_filter: EventFilter | None = None):
70
+ def __init__(self, event_filter: EventFilter | None = None) -> None:
71
71
  super().__init__(event_filter=event_filter)
72
72
  # Use shared console instances
73
73
  self._serializer = JSONSerializer()
@@ -78,7 +78,7 @@ class ConsoleTransport(FilteredEventTransport):
78
78
  "error": "bold red",
79
79
  }
80
80
 
81
- async def send_matched_event(self, event: Event):
81
+ async def send_matched_event(self, event: Event) -> None:
82
82
  # Map log levels to styles
83
83
  style = self.log_level_styles.get(event.type, "white")
84
84
 
@@ -114,7 +114,7 @@ class FileTransport(FilteredEventTransport):
114
114
  event_filter: EventFilter | None = None,
115
115
  mode: str = "a",
116
116
  encoding: str = "utf-8",
117
- ):
117
+ ) -> None:
118
118
  """Initialize FileTransport.
119
119
 
120
120
  Args:
@@ -186,7 +186,7 @@ class HTTPTransport(FilteredEventTransport):
186
186
  batch_size: int = 100,
187
187
  timeout: float = 5.0,
188
188
  event_filter: EventFilter | None = None,
189
- ):
189
+ ) -> None:
190
190
  super().__init__(event_filter=event_filter)
191
191
  self.endpoint = endpoint
192
192
  self.headers = headers or {}
@@ -198,14 +198,14 @@ class HTTPTransport(FilteredEventTransport):
198
198
  self._session: aiohttp.ClientSession | None = None
199
199
  self._serializer = JSONSerializer()
200
200
 
201
- async def start(self):
201
+ async def start(self) -> None:
202
202
  """Initialize HTTP session."""
203
203
  if not self._session:
204
204
  self._session = aiohttp.ClientSession(
205
205
  headers=self.headers, timeout=aiohttp.ClientTimeout(total=self.timeout)
206
206
  )
207
207
 
208
- async def stop(self):
208
+ async def stop(self) -> None:
209
209
  """Close HTTP session and flush any remaining events."""
210
210
  if self.batch:
211
211
  await self._flush()
@@ -213,14 +213,14 @@ class HTTPTransport(FilteredEventTransport):
213
213
  await self._session.close()
214
214
  self._session = None
215
215
 
216
- async def send_matched_event(self, event: Event):
216
+ async def send_matched_event(self, event: Event) -> None:
217
217
  """Add event to batch, flush if batch is full."""
218
218
  async with self.lock:
219
219
  self.batch.append(event)
220
220
  if len(self.batch) >= self.batch_size:
221
221
  await self._flush()
222
222
 
223
- async def _flush(self):
223
+ async def _flush(self) -> None:
224
224
  """Send batch of events to HTTP endpoint."""
225
225
  if not self.batch:
226
226
  return
@@ -266,7 +266,7 @@ class AsyncEventBus:
266
266
 
267
267
  _instance = None
268
268
 
269
- def __init__(self, transport: EventTransport | None = None):
269
+ def __init__(self, transport: EventTransport | None = None) -> None:
270
270
  self.transport: EventTransport = transport or NoOpTransport()
271
271
  self.listeners: Dict[str, EventListener] = {}
272
272
  self._queue = asyncio.Queue()
@@ -306,7 +306,7 @@ class AsyncEventBus:
306
306
  # Clear the singleton instance
307
307
  cls._instance = None
308
308
 
309
- async def start(self):
309
+ async def start(self) -> None:
310
310
  """Start the event bus and all lifecycle-aware listeners."""
311
311
  if self._running:
312
312
  return
@@ -321,7 +321,7 @@ class AsyncEventBus:
321
321
  self._running = True
322
322
  self._task = asyncio.create_task(self._process_events())
323
323
 
324
- async def stop(self):
324
+ async def stop(self) -> None:
325
325
  """Stop the event bus and all lifecycle-aware listeners."""
326
326
  if not self._running:
327
327
  return
@@ -369,7 +369,7 @@ class AsyncEventBus:
369
369
  except Exception as e:
370
370
  print(f"Error stopping listener: {e}")
371
371
 
372
- async def emit(self, event: Event):
372
+ async def emit(self, event: Event) -> None:
373
373
  """Emit an event to all listeners and transport."""
374
374
  # Inject current tracing info if available
375
375
  span = trace.get_current_span()
@@ -387,15 +387,15 @@ class AsyncEventBus:
387
387
  # Then queue for listeners
388
388
  await self._queue.put(event)
389
389
 
390
- def add_listener(self, name: str, listener: EventListener):
390
+ def add_listener(self, name: str, listener: EventListener) -> None:
391
391
  """Add a listener to the event bus."""
392
392
  self.listeners[name] = listener
393
393
 
394
- def remove_listener(self, name: str):
394
+ def remove_listener(self, name: str) -> None:
395
395
  """Remove a listener from the event bus."""
396
396
  self.listeners.pop(name, None)
397
397
 
398
- async def _process_events(self):
398
+ async def _process_events(self) -> None:
399
399
  """Process events from the queue until stopped."""
400
400
  while self._running:
401
401
  event = None
@@ -78,8 +78,6 @@ async def disconnect(
78
78
  )
79
79
 
80
80
  if server_name:
81
- await server_registry.connection_manager.disconnect_server(
82
- server_name=server_name
83
- )
81
+ await server_registry.connection_manager.disconnect_server(server_name=server_name)
84
82
  else:
85
83
  await server_registry.connection_manager.disconnect_all_servers()
@@ -3,164 +3,171 @@ Interface definitions to prevent circular imports.
3
3
  This module defines protocols (interfaces) that can be used to break circular dependencies.
4
4
  """
5
5
 
6
- from contextlib import asynccontextmanager
6
+ from datetime import timedelta
7
7
  from typing import (
8
8
  Any,
9
- AsyncGenerator,
9
+ AsyncContextManager,
10
10
  Callable,
11
- Generic,
11
+ Dict,
12
12
  List,
13
13
  Optional,
14
14
  Protocol,
15
+ Tuple,
15
16
  Type,
16
17
  TypeVar,
18
+ Union,
19
+ runtime_checkable,
17
20
  )
18
21
 
19
- from mcp import ClientSession
20
- from mcp.types import CreateMessageRequestParams
21
- from pydantic import Field
22
+ from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
23
+ from deprecated import deprecated
24
+ from mcp import ClientSession, GetPromptResult, ReadResourceResult
25
+ from pydantic import BaseModel
22
26
 
27
+ from mcp_agent.core.prompt import Prompt
28
+ from mcp_agent.core.request_params import RequestParams
23
29
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
24
30
 
25
31
 
26
- class ServerRegistryProtocol(Protocol):
27
- """
28
- Protocol defining the minimal interface of ServerRegistry needed by gen_client.
29
- This allows gen_client to depend on this protocol rather than the full ServerRegistry class.
30
- """
32
+ @runtime_checkable
33
+ class MCPConnectionManagerProtocol(Protocol):
34
+ """Protocol for MCPConnectionManager functionality needed by ServerRegistry."""
31
35
 
32
- @asynccontextmanager
33
- async def initialize_server(
36
+ async def get_server(
34
37
  self,
35
38
  server_name: str,
36
- client_session_factory=None,
37
- init_hook=None,
38
- ) -> AsyncGenerator[ClientSession, None]:
39
- """Initialize a server and yield a client session."""
40
- ...
39
+ client_session_factory: Optional[
40
+ Callable[
41
+ [
42
+ MemoryObjectReceiveStream,
43
+ MemoryObjectSendStream,
44
+ Optional[timedelta],
45
+ ],
46
+ ClientSession,
47
+ ]
48
+ ] = None,
49
+ ) -> "ServerConnection": ...
41
50
 
42
- @property
43
- def connection_manager(self) -> "ConnectionManagerProtocol":
44
- """Get the connection manager."""
45
- ...
51
+ async def disconnect_server(self, server_name: str) -> None: ...
46
52
 
53
+ async def disconnect_all_servers(self) -> None: ...
47
54
 
48
- class ConnectionManagerProtocol(Protocol):
49
- """
50
- Protocol defining the minimal interface of ConnectionManager needed.
51
- """
52
55
 
53
- async def get_server(
56
+ @runtime_checkable
57
+ class ServerRegistryProtocol(Protocol):
58
+ """Protocol defining the minimal interface of ServerRegistry needed by gen_client."""
59
+
60
+ @property
61
+ def connection_manager(self) -> MCPConnectionManagerProtocol: ...
62
+
63
+ def initialize_server(
54
64
  self,
55
65
  server_name: str,
56
- client_session_factory=None,
57
- ):
58
- """Get a server connection."""
66
+ client_session_factory: Optional[
67
+ Callable[
68
+ [
69
+ MemoryObjectReceiveStream,
70
+ MemoryObjectSendStream,
71
+ Optional[timedelta],
72
+ ],
73
+ ClientSession,
74
+ ]
75
+ ] = None,
76
+ init_hook: Optional[Callable] = None,
77
+ ) -> AsyncContextManager[ClientSession]:
78
+ """Initialize a server and yield a client session."""
59
79
  ...
60
80
 
61
- async def disconnect_server(self, server_name: str) -> None:
62
- """Disconnect from a server."""
63
- ...
64
81
 
65
- async def disconnect_all_servers(self) -> None:
66
- """Disconnect from all servers."""
67
- ...
82
+ class ServerConnection(Protocol):
83
+ """Protocol for server connection objects returned by MCPConnectionManager."""
68
84
 
85
+ @property
86
+ def session(self) -> ClientSession: ...
69
87
 
70
- # Type variables for generic protocols
71
- MessageParamT = TypeVar("MessageParamT")
72
- """A type representing an input message to an LLM."""
73
88
 
74
- MessageT = TypeVar("MessageT")
75
- """A type representing an output message from an LLM."""
89
+ ModelT = TypeVar("ModelT", bound=BaseModel)
76
90
 
77
- ModelT = TypeVar("ModelT")
78
- """A type representing a structured output message from an LLM."""
79
91
 
92
+ class AugmentedLLMProtocol(Protocol):
93
+ """Protocol defining the interface for augmented LLMs"""
80
94
 
81
- class RequestParams(CreateMessageRequestParams):
82
- """
83
- Parameters to configure the AugmentedLLM 'generate' requests.
84
- """
95
+ async def structured(
96
+ self,
97
+ prompt: List[PromptMessageMultipart],
98
+ model: Type[ModelT],
99
+ request_params: RequestParams | None = None,
100
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
101
+ """Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
102
+ ...
85
103
 
86
- messages: None = Field(exclude=True, default=None)
87
- """
88
- Ignored. 'messages' are removed from CreateMessageRequestParams
89
- to avoid confusion with the 'message' parameter on 'generate' method.
90
- """
104
+ async def generate(
105
+ self,
106
+ multipart_messages: List[PromptMessageMultipart],
107
+ request_params: RequestParams | None = None,
108
+ ) -> PromptMessageMultipart:
109
+ """
110
+ Apply a list of PromptMessageMultipart messages directly to the LLM.
91
111
 
92
- maxTokens: int = 2048
93
- """The maximum number of tokens to sample, as requested by the server."""
94
112
 
95
- model: str | None = None
96
- """
97
- The model to use for the LLM generation.
98
- If specified, this overrides the 'modelPreferences' selection criteria.
99
- """
113
+ Args:
114
+ multipart_messages: List of PromptMessageMultipart objects
115
+ request_params: Optional parameters to configure the LLM request
100
116
 
101
- use_history: bool = True
102
- """
103
- Include the message history in the generate request.
104
- """
117
+ Returns:
118
+ A PromptMessageMultipart containing the Assistant response, including Tool Content
119
+ """
120
+ ...
105
121
 
106
- max_iterations: int = 10
107
- """
108
- The maximum number of iterations to run the LLM for.
109
- """
110
122
 
111
- parallel_tool_calls: bool = True
112
- """
113
- Whether to allow multiple tool calls per iteration.
114
- Also known as multi-step tool use.
115
- """
123
+ class AgentProtocol(AugmentedLLMProtocol, Protocol):
124
+ """Protocol defining the standard agent interface"""
116
125
 
126
+ name: str
117
127
 
118
- class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
119
- """Protocol defining the interface for augmented LLMs"""
128
+ async def __call__(self, message: Union[str, PromptMessageMultipart] | None = None) -> str:
129
+ """Make the agent callable for sending messages directly."""
130
+ ...
120
131
 
121
- async def generate(
122
- self,
123
- message: str | MessageParamT | List[MessageParamT],
124
- request_params: RequestParams | None = None,
125
- ) -> List[MessageT]:
126
- """Request an LLM generation, which may run multiple iterations, and return the result"""
132
+ async def send(self, message: Union[str, PromptMessageMultipart]) -> str:
133
+ """Send a message to the agent and get a response"""
134
+ ...
127
135
 
128
- async def generate_str(
129
- self,
130
- message: str | MessageParamT | List[MessageParamT],
131
- request_params: RequestParams | None = None,
132
- ) -> str:
133
- """Request an LLM generation and return the string representation of the result"""
136
+ async def prompt(self, default_prompt: str = "") -> str:
137
+ """Start an interactive prompt session with the agent"""
138
+ ...
134
139
 
135
- async def generate_structured(
136
- self,
137
- message: str | MessageParamT | List[MessageParamT],
138
- response_model: Type[ModelT],
139
- request_params: RequestParams | None = None,
140
- ) -> ModelT:
141
- """Request a structured LLM generation and return the result as a Pydantic model."""
140
+ async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str:
141
+ """Apply an MCP prompt template by name"""
142
+ ...
142
143
 
143
- async def generate_prompt(
144
- self, prompt: PromptMessageMultipart, request_params: RequestParams | None
145
- ) -> str:
146
- """Request an LLM generation and return a string representation of the result"""
144
+ async def get_prompt(self, prompt_name: str) -> GetPromptResult: ...
145
+
146
+ async def list_prompts(self, server_name: str | None) -> Dict[str, List[Prompt]]: ...
147
+
148
+ async def get_resource(self, server_name: str, resource_uri: str) -> ReadResourceResult: ...
147
149
 
148
- async def apply_prompt(
150
+ @deprecated
151
+ async def generate_str(self, message: str, request_params: RequestParams | None) -> str:
152
+ """Generate a response. Deprecated: please use send instead"""
153
+ ...
154
+
155
+ async def with_resource(
149
156
  self,
150
- multipart_messages: List["PromptMessageMultipart"],
151
- request_params: RequestParams | None = None,
157
+ prompt_content: Union[str, PromptMessageMultipart],
158
+ server_name: str,
159
+ resource_name: str,
152
160
  ) -> str:
153
- """
154
- Apply a list of PromptMessageMultipart messages directly to the LLM.
155
- This is a cleaner interface to _apply_prompt_template_provider_specific.
161
+ """Send a message with an attached MCP resource"""
162
+ ...
156
163
 
157
- Args:
158
- multipart_messages: List of PromptMessageMultipart objects
159
- request_params: Optional parameters to configure the LLM request
164
+ async def initialize(self) -> None:
165
+ """Initialize the agent and connect to MCP servers"""
166
+ ...
160
167
 
161
- Returns:
162
- String representation of the assistant's response
163
- """
168
+ async def shutdown(self) -> None:
169
+ """Shut down the agent and close connections"""
170
+ ...
164
171
 
165
172
 
166
173
  class ModelFactoryClassProtocol(Protocol):
@@ -172,7 +179,7 @@ class ModelFactoryClassProtocol(Protocol):
172
179
  @classmethod
173
180
  def create_factory(
174
181
  cls, model_string: str, request_params: Optional[RequestParams] = None
175
- ) -> Callable[..., AugmentedLLMProtocol[Any, Any]]:
182
+ ) -> Callable[..., Any]:
176
183
  """
177
184
  Creates a factory function that can be used to construct an LLM instance.
178
185
 
@@ -0,0 +1,97 @@
1
+ """
2
+ Utilities for MCP stdio client integration with our logging system.
3
+ """
4
+
5
+ import io
6
+ import sys
7
+ from typing import TextIO
8
+
9
+ from mcp_agent.logging.logger import get_logger
10
+
11
+ logger = get_logger(__name__)
12
+
13
+
14
+ class LoggerTextIO(TextIO):
15
+ """
16
+ A TextIO implementation that logs to our application logger.
17
+ This implements the full TextIO interface as specified by Python.
18
+
19
+ Args:
20
+ server_name: The name of the server to include in logs
21
+ """
22
+
23
+ def __init__(self, server_name: str) -> None:
24
+ super().__init__()
25
+ self.server_name = server_name
26
+ # Use a StringIO for buffering
27
+ self._buffer = io.StringIO()
28
+ # Keep track of complete and partial lines
29
+ self._line_buffer = ""
30
+
31
+ def write(self, s: str) -> int:
32
+ """
33
+ Write data to our buffer and log any complete lines.
34
+ """
35
+ if not s:
36
+ return 0
37
+
38
+ # Handle line buffering for clean log output
39
+ text = self._line_buffer + s
40
+ lines = text.split("\n")
41
+
42
+ # If the text ends with a newline, the last line is complete
43
+ if text.endswith("\n"):
44
+ complete_lines = lines
45
+ self._line_buffer = ""
46
+ else:
47
+ # Otherwise, the last line is incomplete
48
+ complete_lines = lines[:-1]
49
+ self._line_buffer = lines[-1]
50
+
51
+ # Log complete lines but at debug level instead of info to prevent console spam
52
+ for line in complete_lines:
53
+ if line.strip(): # Only log non-empty lines
54
+ logger.debug(f"{self.server_name} (stderr): {line}")
55
+
56
+ # Always write to the underlying buffer
57
+ return self._buffer.write(s)
58
+
59
+ def flush(self) -> None:
60
+ """Flush the internal buffer."""
61
+ self._buffer.flush()
62
+
63
+ def close(self) -> None:
64
+ """Close the stream."""
65
+ # Log any remaining content in the line buffer
66
+ if self._line_buffer and self._line_buffer.strip():
67
+ logger.debug(f"{self.server_name} (stderr): {self._line_buffer}")
68
+ self._buffer.close()
69
+
70
+ def readable(self) -> bool:
71
+ return False
72
+
73
+ def writable(self) -> bool:
74
+ return True
75
+
76
+ def seekable(self) -> bool:
77
+ return False
78
+
79
+ def fileno(self) -> int:
80
+ """
81
+ Return a file descriptor for this stream.
82
+ We use sys.stderr's fileno since TextIO is expected to return a real file descriptor.
83
+ """
84
+ return sys.stderr.fileno()
85
+
86
+
87
+ def get_stderr_handler(server_name: str) -> TextIO:
88
+ """
89
+ Get a stderr handler that routes MCP server errors to our logger.
90
+
91
+ Args:
92
+ server_name: The name of the server to include in logs
93
+
94
+ Returns:
95
+ A TextIO object that can be used as stderr by MCP
96
+ """
97
+ return LoggerTextIO(server_name)
@@ -3,12 +3,12 @@ A derived client session for the MCP Agent framework.
3
3
  It adds logging and supports sampling requests.
4
4
  """
5
5
 
6
- from typing import Optional
6
+ from typing import TYPE_CHECKING, Optional
7
7
 
8
8
  from mcp import ClientSession
9
9
  from mcp.shared.session import (
10
- ReceiveResultT,
11
10
  ReceiveNotificationT,
11
+ ReceiveResultT,
12
12
  RequestId,
13
13
  SendNotificationT,
14
14
  SendRequestT,
@@ -21,11 +21,13 @@ from mcp.types import (
21
21
  )
22
22
  from pydantic import AnyUrl
23
23
 
24
- from mcp_agent.config import MCPServerSettings
25
24
  from mcp_agent.context_dependent import ContextDependent
26
25
  from mcp_agent.logging.logger import get_logger
27
26
  from mcp_agent.mcp.sampling import sample
28
27
 
28
+ if TYPE_CHECKING:
29
+ from mcp_agent.config import MCPServerSettings
30
+
29
31
  logger = get_logger(__name__)
30
32
 
31
33
 
@@ -63,10 +65,8 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
63
65
  Developers can extend this class to add more custom functionality as needed
64
66
  """
65
67
 
66
- def __init__(self, *args, **kwargs):
67
- super().__init__(
68
- *args, **kwargs, list_roots_callback=list_roots, sampling_callback=sample
69
- )
68
+ def __init__(self, *args, **kwargs) -> None:
69
+ super().__init__(*args, **kwargs, list_roots_callback=list_roots, sampling_callback=sample)
70
70
  self.server_config: Optional[MCPServerSettings] = None
71
71
 
72
72
  async def send_request(
@@ -1,7 +1,9 @@
1
1
  import asyncio
2
+
2
3
  from mcp.server import NotificationOptions
3
4
  from mcp.server.fastmcp import FastMCP
4
5
  from mcp.server.stdio import stdio_server
6
+
5
7
  from mcp_agent.executor.temporal import get_temporal_client
6
8
  from mcp_agent.telemetry.tracing import setup_tracing
7
9
 
@@ -10,27 +12,25 @@ app = FastMCP("mcp-agent-server")
10
12
  setup_tracing("mcp-agent-server")
11
13
 
12
14
 
13
- async def run():
15
+ async def run() -> None:
14
16
  async with stdio_server() as (read_stream, write_stream):
15
17
  await app._mcp_server.run(
16
18
  read_stream,
17
19
  write_stream,
18
20
  app._mcp_server.create_initialization_options(
19
- notification_options=NotificationOptions(
20
- tools_changed=True, resources_changed=True
21
- )
21
+ notification_options=NotificationOptions(tools_changed=True, resources_changed=True)
22
22
  ),
23
23
  )
24
24
 
25
25
 
26
26
  @app.tool
27
- async def run_workflow(query: str):
27
+ async def run_workflow(query: str) -> None:
28
28
  """Run the workflow given its name or id"""
29
29
  pass
30
30
 
31
31
 
32
32
  @app.tool
33
- async def pause_workflow(workflow_id: str):
33
+ async def pause_workflow(workflow_id: str) -> None:
34
34
  """Pause a running workflow."""
35
35
  temporal_client = await get_temporal_client()
36
36
  handle = temporal_client.get_workflow_handle(workflow_id)
@@ -38,14 +38,14 @@ async def pause_workflow(workflow_id: str):
38
38
 
39
39
 
40
40
  @app.tool
41
- async def resume_workflow(workflow_id: str):
41
+ async def resume_workflow(workflow_id: str) -> None:
42
42
  """Resume a paused workflow."""
43
43
  temporal_client = await get_temporal_client()
44
44
  handle = temporal_client.get_workflow_handle(workflow_id)
45
45
  await handle.signal("resume")
46
46
 
47
47
 
48
- async def provide_user_input(workflow_id: str, input_data: str):
48
+ async def provide_user_input(workflow_id: str, input_data: str) -> None:
49
49
  """Provide user/human input to a waiting workflow step."""
50
50
  temporal_client = await get_temporal_client()
51
51
  handle = temporal_client.get_workflow_handle(workflow_id)