fast-agent-mcp 0.2.13__py3-none-any.whl → 0.2.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.13.dist-info → fast_agent_mcp-0.2.16.dist-info}/METADATA +1 -1
- {fast_agent_mcp-0.2.13.dist-info → fast_agent_mcp-0.2.16.dist-info}/RECORD +36 -36
- mcp_agent/agents/agent.py +2 -2
- mcp_agent/agents/base_agent.py +3 -3
- mcp_agent/agents/workflow/chain_agent.py +2 -2
- mcp_agent/agents/workflow/evaluator_optimizer.py +3 -3
- mcp_agent/agents/workflow/orchestrator_agent.py +3 -3
- mcp_agent/agents/workflow/parallel_agent.py +2 -2
- mcp_agent/agents/workflow/router_agent.py +2 -2
- mcp_agent/cli/commands/check_config.py +450 -0
- mcp_agent/cli/commands/setup.py +1 -1
- mcp_agent/cli/main.py +8 -15
- mcp_agent/config.py +4 -7
- mcp_agent/core/agent_types.py +8 -8
- mcp_agent/core/direct_decorators.py +10 -8
- mcp_agent/core/direct_factory.py +4 -1
- mcp_agent/core/enhanced_prompt.py +6 -5
- mcp_agent/core/interactive_prompt.py +70 -50
- mcp_agent/core/validation.py +6 -4
- mcp_agent/event_progress.py +6 -6
- mcp_agent/llm/augmented_llm.py +10 -2
- mcp_agent/llm/augmented_llm_passthrough.py +5 -3
- mcp_agent/llm/augmented_llm_playback.py +2 -1
- mcp_agent/llm/model_factory.py +7 -27
- mcp_agent/llm/provider_key_manager.py +83 -0
- mcp_agent/llm/provider_types.py +16 -0
- mcp_agent/llm/providers/augmented_llm_anthropic.py +5 -26
- mcp_agent/llm/providers/augmented_llm_deepseek.py +5 -24
- mcp_agent/llm/providers/augmented_llm_generic.py +4 -16
- mcp_agent/llm/providers/augmented_llm_openai.py +4 -26
- mcp_agent/llm/providers/augmented_llm_openrouter.py +17 -45
- mcp_agent/mcp/interfaces.py +2 -1
- mcp_agent/mcp_server/agent_server.py +120 -38
- mcp_agent/cli/commands/config.py +0 -11
- mcp_agent/executor/temporal.py +0 -383
- mcp_agent/executor/workflow.py +0 -195
- {fast_agent_mcp-0.2.13.dist-info → fast_agent_mcp-0.2.16.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.13.dist-info → fast_agent_mcp-0.2.16.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.13.dist-info → fast_agent_mcp-0.2.16.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,9 @@
|
|
1
|
-
import os
|
2
1
|
from typing import TYPE_CHECKING, List
|
3
2
|
|
4
3
|
from mcp.types import EmbeddedResource, ImageContent, TextContent
|
5
4
|
|
6
5
|
from mcp_agent.core.prompt import Prompt
|
6
|
+
from mcp_agent.llm.provider_types import Provider
|
7
7
|
from mcp_agent.llm.providers.multipart_converter_anthropic import (
|
8
8
|
AnthropicConverter,
|
9
9
|
)
|
@@ -51,12 +51,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
51
51
|
"""
|
52
52
|
|
53
53
|
def __init__(self, *args, **kwargs) -> None:
|
54
|
-
self.provider = "Anthropic"
|
55
54
|
# Initialize logger - keep it simple without name reference
|
56
55
|
self.logger = get_logger(__name__)
|
57
56
|
|
58
|
-
|
59
|
-
|
57
|
+
super().__init__(
|
58
|
+
*args, provider=Provider.ANTHROPIC, type_converter=AnthropicSamplingConverter, **kwargs
|
59
|
+
)
|
60
60
|
|
61
61
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
62
62
|
"""Initialize Anthropic-specific default parameters"""
|
@@ -83,7 +83,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
83
83
|
Override this method to use a different LLM.
|
84
84
|
"""
|
85
85
|
|
86
|
-
api_key = self._api_key(
|
86
|
+
api_key = self._api_key()
|
87
87
|
base_url = self._base_url()
|
88
88
|
if base_url and base_url.endswith("/v1"):
|
89
89
|
base_url = base_url.rstrip("/v1")
|
@@ -277,27 +277,6 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
277
277
|
|
278
278
|
return responses
|
279
279
|
|
280
|
-
def _api_key(self, config):
|
281
|
-
api_key = None
|
282
|
-
|
283
|
-
if hasattr(config, "anthropic") and config.anthropic:
|
284
|
-
api_key = config.anthropic.api_key
|
285
|
-
if api_key == "<your-api-key-here>":
|
286
|
-
api_key = None
|
287
|
-
|
288
|
-
if api_key is None:
|
289
|
-
api_key = os.getenv("ANTHROPIC_API_KEY")
|
290
|
-
|
291
|
-
if not api_key:
|
292
|
-
raise ProviderKeyError(
|
293
|
-
"Anthropic API key not configured",
|
294
|
-
"The Anthropic API key is required but not set.\n"
|
295
|
-
"Add it to your configuration file under anthropic.api_key "
|
296
|
-
"or set the ANTHROPIC_API_KEY environment variable.",
|
297
|
-
)
|
298
|
-
|
299
|
-
return api_key
|
300
|
-
|
301
280
|
async def generate_messages(
|
302
281
|
self,
|
303
282
|
message_param,
|
@@ -1,7 +1,6 @@
|
|
1
|
-
import os
|
2
1
|
|
3
|
-
from mcp_agent.core.exceptions import ProviderKeyError
|
4
2
|
from mcp_agent.core.request_params import RequestParams
|
3
|
+
from mcp_agent.llm.provider_types import Provider
|
5
4
|
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
6
5
|
|
7
6
|
DEEPSEEK_BASE_URL = "https://api.deepseek.com"
|
@@ -11,7 +10,9 @@ DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type mo
|
|
11
10
|
class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
|
12
11
|
def __init__(self, *args, **kwargs) -> None:
|
13
12
|
kwargs["provider_name"] = "Deepseek" # Set provider name in kwargs
|
14
|
-
super().__init__(
|
13
|
+
super().__init__(
|
14
|
+
*args, provider=Provider.DEEPSEEK, **kwargs
|
15
|
+
) # Properly pass args and kwargs to parent
|
15
16
|
|
16
17
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
17
18
|
"""Initialize Deepseek-specific default parameters"""
|
@@ -25,28 +26,8 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
|
|
25
26
|
use_history=True,
|
26
27
|
)
|
27
28
|
|
28
|
-
def _api_key(self) -> str:
|
29
|
-
config = self.context.config
|
30
|
-
api_key = None
|
31
|
-
|
32
|
-
if config and config.deepseek:
|
33
|
-
api_key = config.deepseek.api_key
|
34
|
-
if api_key == "<your-api-key-here>":
|
35
|
-
api_key = None
|
36
|
-
|
37
|
-
if api_key is None:
|
38
|
-
api_key = os.getenv("DEEPSEEK_API_KEY")
|
39
|
-
|
40
|
-
if not api_key:
|
41
|
-
raise ProviderKeyError(
|
42
|
-
"DEEPSEEK API key not configured",
|
43
|
-
"The DEEKSEEK API key is required but not set.\n"
|
44
|
-
"Add it to your configuration file under deepseek.api_key\n"
|
45
|
-
"Or set the DEEPSEEK_API_KEY environment variable",
|
46
|
-
)
|
47
|
-
return api_key
|
48
|
-
|
49
29
|
def _base_url(self) -> str:
|
30
|
+
base_url = None
|
50
31
|
if self.context.config and self.context.config.deepseek:
|
51
32
|
base_url = self.context.config.deepseek.base_url
|
52
33
|
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import os
|
2
2
|
|
3
3
|
from mcp_agent.core.request_params import RequestParams
|
4
|
+
from mcp_agent.llm.provider_types import Provider
|
4
5
|
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
5
6
|
|
6
7
|
DEFAULT_OLLAMA_BASE_URL = "http://localhost:11434/v1"
|
@@ -10,8 +11,9 @@ DEFAULT_OLLAMA_API_KEY = "ollama"
|
|
10
11
|
|
11
12
|
class GenericAugmentedLLM(OpenAIAugmentedLLM):
|
12
13
|
def __init__(self, *args, **kwargs) -> None:
|
13
|
-
|
14
|
-
|
14
|
+
super().__init__(
|
15
|
+
*args, provider=Provider.GENERIC, **kwargs
|
16
|
+
) # Properly pass args and kwargs to parent
|
15
17
|
|
16
18
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
17
19
|
"""Initialize Generic parameters"""
|
@@ -25,20 +27,6 @@ class GenericAugmentedLLM(OpenAIAugmentedLLM):
|
|
25
27
|
use_history=True,
|
26
28
|
)
|
27
29
|
|
28
|
-
def _api_key(self) -> str:
|
29
|
-
config = self.context.config
|
30
|
-
api_key = None
|
31
|
-
|
32
|
-
if config and config.generic:
|
33
|
-
api_key = config.generic.api_key
|
34
|
-
if api_key == "<your-api-key-here>":
|
35
|
-
api_key = None
|
36
|
-
|
37
|
-
if api_key is None:
|
38
|
-
api_key = os.getenv("GENERIC_API_KEY")
|
39
|
-
|
40
|
-
return api_key or "ollama"
|
41
|
-
|
42
30
|
def _base_url(self) -> str:
|
43
31
|
base_url = os.getenv("GENERIC_BASE_URL", DEFAULT_OLLAMA_BASE_URL)
|
44
32
|
if self.context.config and self.context.config.generic:
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import os
|
2
1
|
from typing import List, Tuple, Type
|
3
2
|
|
4
3
|
from mcp.types import (
|
@@ -29,6 +28,7 @@ from mcp_agent.llm.augmented_llm import (
|
|
29
28
|
ModelT,
|
30
29
|
RequestParams,
|
31
30
|
)
|
31
|
+
from mcp_agent.llm.provider_types import Provider
|
32
32
|
from mcp_agent.llm.providers.multipart_converter_openai import OpenAIConverter
|
33
33
|
from mcp_agent.llm.providers.sampling_converter_openai import (
|
34
34
|
OpenAISamplingConverter,
|
@@ -49,14 +49,13 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
49
49
|
This implementation uses OpenAI's ChatCompletion as the LLM.
|
50
50
|
"""
|
51
51
|
|
52
|
-
def __init__(self,
|
52
|
+
def __init__(self, provider: Provider = Provider.OPENAI, *args, **kwargs) -> None:
|
53
53
|
# Set type_converter before calling super().__init__
|
54
54
|
if "type_converter" not in kwargs:
|
55
55
|
kwargs["type_converter"] = OpenAISamplingConverter
|
56
56
|
|
57
|
-
super().__init__(*args, **kwargs)
|
57
|
+
super().__init__(*args, provider=provider, **kwargs)
|
58
58
|
|
59
|
-
self.provider = provider_name
|
60
59
|
# Initialize logger with name if available
|
61
60
|
self.logger = get_logger(f"{__name__}.{self.name}" if self.name else __name__)
|
62
61
|
|
@@ -90,27 +89,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
90
89
|
use_history=True,
|
91
90
|
)
|
92
91
|
|
93
|
-
def _api_key(self) -> str:
|
94
|
-
config = self.context.config
|
95
|
-
api_key = None
|
96
|
-
|
97
|
-
if hasattr(config, "openai") and config.openai:
|
98
|
-
api_key = config.openai.api_key
|
99
|
-
if api_key == "<your-api-key-here>":
|
100
|
-
api_key = None
|
101
|
-
|
102
|
-
if api_key is None:
|
103
|
-
api_key = os.getenv("OPENAI_API_KEY")
|
104
|
-
|
105
|
-
if not api_key:
|
106
|
-
raise ProviderKeyError(
|
107
|
-
"OpenAI API key not configured",
|
108
|
-
"The OpenAI API key is required but not set.\n"
|
109
|
-
"Add it to your configuration file under openai.api_key\n"
|
110
|
-
"Or set the OPENAI_API_KEY environment variable",
|
111
|
-
)
|
112
|
-
return api_key
|
113
|
-
|
114
92
|
def _base_url(self) -> str:
|
115
93
|
return self.context.config.openai.base_url if self.context.config.openai else None
|
116
94
|
|
@@ -371,7 +349,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
371
349
|
The parsed response as a Pydantic model, or None if parsing fails
|
372
350
|
"""
|
373
351
|
|
374
|
-
if not
|
352
|
+
if not Provider.OPENAI == self.provider:
|
375
353
|
return await super().structured(prompt, model, request_params)
|
376
354
|
|
377
355
|
logger = get_logger(__name__)
|
@@ -1,19 +1,19 @@
|
|
1
1
|
import os
|
2
2
|
|
3
|
-
from mcp_agent.core.exceptions import ProviderKeyError
|
4
3
|
from mcp_agent.core.request_params import RequestParams
|
4
|
+
from mcp_agent.llm.provider_types import Provider
|
5
5
|
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
6
6
|
|
7
7
|
DEFAULT_OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
|
8
8
|
# No single default model for OpenRouter, users must specify full path
|
9
|
-
DEFAULT_OPENROUTER_MODEL = None
|
9
|
+
DEFAULT_OPENROUTER_MODEL = None
|
10
10
|
|
11
11
|
|
12
12
|
class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
|
13
13
|
"""Augmented LLM provider for OpenRouter, using an OpenAI-compatible API."""
|
14
|
+
|
14
15
|
def __init__(self, *args, **kwargs) -> None:
|
15
|
-
|
16
|
-
super().__init__(*args, **kwargs)
|
16
|
+
super().__init__(*args, provider=Provider.OPENROUTER, **kwargs)
|
17
17
|
|
18
18
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
19
19
|
"""Initialize OpenRouter-specific default parameters."""
|
@@ -21,58 +21,30 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
|
|
21
21
|
# The model should be passed in the 'model' kwarg during factory creation.
|
22
22
|
chosen_model = kwargs.get("model", DEFAULT_OPENROUTER_MODEL)
|
23
23
|
if not chosen_model:
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
24
|
+
# Unlike Deepseek, OpenRouter *requires* a model path in the identifier.
|
25
|
+
# The factory should extract this before calling the constructor.
|
26
|
+
# We rely on the model being passed correctly via kwargs.
|
27
|
+
# If it's still None here, it indicates an issue upstream (factory or user input).
|
28
|
+
# However, the base class _get_model handles the error if model is None.
|
29
|
+
pass
|
31
30
|
|
32
31
|
return RequestParams(
|
33
|
-
model=chosen_model,
|
32
|
+
model=chosen_model, # Will be validated by base class
|
34
33
|
systemPrompt=self.instruction,
|
35
|
-
parallel_tool_calls=True,
|
36
|
-
max_iterations=10,
|
37
|
-
use_history=True,
|
34
|
+
parallel_tool_calls=True, # Default based on OpenAI provider
|
35
|
+
max_iterations=10, # Default based on OpenAI provider
|
36
|
+
use_history=True, # Default based on OpenAI provider
|
38
37
|
)
|
39
38
|
|
40
|
-
def _api_key(self) -> str:
|
41
|
-
"""Retrieve the OpenRouter API key from config or environment variables."""
|
42
|
-
config = self.context.config
|
43
|
-
api_key = None
|
44
|
-
|
45
|
-
# Check config file first
|
46
|
-
if config and hasattr(config, 'openrouter') and config.openrouter:
|
47
|
-
api_key = getattr(config.openrouter, 'api_key', None)
|
48
|
-
if api_key == "<your-openrouter-api-key-here>" or not api_key:
|
49
|
-
api_key = None
|
50
|
-
|
51
|
-
# Fallback to environment variable
|
52
|
-
if api_key is None:
|
53
|
-
api_key = os.getenv("OPENROUTER_API_KEY")
|
54
|
-
|
55
|
-
if not api_key:
|
56
|
-
raise ProviderKeyError(
|
57
|
-
"OpenRouter API key not configured",
|
58
|
-
"The OpenRouter API key is required but not set.\n"
|
59
|
-
"Add it to your configuration file under openrouter.api_key\n"
|
60
|
-
"Or set the OPENROUTER_API_KEY environment variable.",
|
61
|
-
)
|
62
|
-
return api_key
|
63
|
-
|
64
39
|
def _base_url(self) -> str:
|
65
40
|
"""Retrieve the OpenRouter base URL from config or use the default."""
|
66
41
|
base_url = os.getenv("OPENROUTER_BASE_URL", DEFAULT_OPENROUTER_BASE_URL) # Default
|
67
42
|
config = self.context.config
|
68
|
-
|
43
|
+
|
69
44
|
# Check config file for override
|
70
|
-
if config and hasattr(config,
|
71
|
-
config_base_url = getattr(config.openrouter,
|
45
|
+
if config and hasattr(config, "openrouter") and config.openrouter:
|
46
|
+
config_base_url = getattr(config.openrouter, "base_url", None)
|
72
47
|
if config_base_url:
|
73
48
|
base_url = config_base_url
|
74
49
|
|
75
50
|
return base_url
|
76
|
-
|
77
|
-
# Other methods like _get_model, _send_request etc., are inherited from OpenAIAugmentedLLM
|
78
|
-
# We may override them later if OpenRouter deviates significantly or offers unique features.
|
mcp_agent/mcp/interfaces.py
CHANGED
@@ -26,6 +26,7 @@ from mcp import ClientSession
|
|
26
26
|
from mcp.types import GetPromptResult, Prompt, PromptMessage, ReadResourceResult
|
27
27
|
from pydantic import BaseModel
|
28
28
|
|
29
|
+
from mcp_agent.core.agent_types import AgentType
|
29
30
|
from mcp_agent.core.request_params import RequestParams
|
30
31
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
31
32
|
|
@@ -137,7 +138,7 @@ class AgentProtocol(AugmentedLLMProtocol, Protocol):
|
|
137
138
|
name: str
|
138
139
|
|
139
140
|
@property
|
140
|
-
def agent_type(self) ->
|
141
|
+
def agent_type(self) -> AgentType:
|
141
142
|
"""Return the type of this agent"""
|
142
143
|
...
|
143
144
|
|
@@ -143,13 +143,38 @@ class AgentMCPServer:
|
|
143
143
|
self.mcp_server.settings.host = host
|
144
144
|
self.mcp_server.settings.port = port
|
145
145
|
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
146
|
+
# For synchronous run, we can use the simpler approach
|
147
|
+
try:
|
148
|
+
# Add any server attributes that might help with shutdown
|
149
|
+
if not hasattr(self.mcp_server, "_server_should_exit"):
|
150
|
+
self.mcp_server._server_should_exit = False
|
151
|
+
|
152
|
+
# Run the server
|
153
|
+
self.mcp_server.run(transport=transport)
|
154
|
+
except KeyboardInterrupt:
|
155
|
+
print("\nServer stopped by user (CTRL+C)")
|
156
|
+
except SystemExit as e:
|
157
|
+
# Handle normal exit
|
158
|
+
print(f"\nServer exiting with code {e.code}")
|
159
|
+
# Re-raise to allow normal exit process
|
160
|
+
raise
|
161
|
+
except Exception as e:
|
162
|
+
print(f"\nServer error: {e}")
|
163
|
+
finally:
|
164
|
+
# Run an async cleanup in a new event loop
|
165
|
+
try:
|
166
|
+
asyncio.run(self.shutdown())
|
167
|
+
except (SystemExit, KeyboardInterrupt):
|
168
|
+
# These are expected during shutdown
|
169
|
+
pass
|
170
|
+
else: # stdio
|
171
|
+
try:
|
172
|
+
self.mcp_server.run(transport=transport)
|
173
|
+
except KeyboardInterrupt:
|
174
|
+
print("\nServer stopped by user (CTRL+C)")
|
175
|
+
finally:
|
176
|
+
# Minimal cleanup for stdio
|
177
|
+
asyncio.run(self._cleanup_stdio())
|
153
178
|
|
154
179
|
async def run_async(
|
155
180
|
self, transport: str = "sse", host: str = "0.0.0.0", port: int = 8000
|
@@ -169,20 +194,26 @@ class AgentMCPServer:
|
|
169
194
|
try:
|
170
195
|
# Wait for the server task to complete
|
171
196
|
await self._server_task
|
172
|
-
except asyncio.CancelledError:
|
173
|
-
|
174
|
-
|
197
|
+
except (asyncio.CancelledError, KeyboardInterrupt):
|
198
|
+
# Both cancellation and KeyboardInterrupt are expected during shutdown
|
199
|
+
logger.info("Server stopped via cancellation or interrupt")
|
200
|
+
print("\nServer stopped")
|
201
|
+
except SystemExit as e:
|
202
|
+
# Handle normal exit cleanly
|
203
|
+
logger.info(f"Server exiting with code {e.code}")
|
204
|
+
print(f"\nServer exiting with code {e.code}")
|
205
|
+
# If this is exit code 0, let it propagate for normal exit
|
206
|
+
if e.code == 0:
|
207
|
+
raise
|
175
208
|
except Exception as e:
|
176
209
|
logger.error(f"Server error: {e}", exc_info=True)
|
177
210
|
print(f"\nServer error: {e}")
|
178
211
|
finally:
|
179
|
-
#
|
180
|
-
await self.
|
181
|
-
logger.info("Server shutdown complete.")
|
212
|
+
# Only do minimal cleanup - don't try to be too clever
|
213
|
+
await self._cleanup_stdio()
|
182
214
|
print("\nServer shutdown complete.")
|
183
215
|
else: # stdio
|
184
216
|
# For STDIO, use simpler approach that respects STDIO lifecycle
|
185
|
-
# STDIO will naturally terminate when streams close
|
186
217
|
try:
|
187
218
|
# Run directly without extra monitoring or signal handlers
|
188
219
|
# This preserves the natural lifecycle of STDIO connections
|
@@ -190,9 +221,14 @@ class AgentMCPServer:
|
|
190
221
|
except (asyncio.CancelledError, KeyboardInterrupt):
|
191
222
|
logger.info("Server stopped (CTRL+C)")
|
192
223
|
print("\nServer stopped (CTRL+C)")
|
193
|
-
|
224
|
+
except SystemExit as e:
|
225
|
+
# Handle normal exit cleanly
|
226
|
+
logger.info(f"Server exiting with code {e.code}")
|
227
|
+
print(f"\nServer exiting with code {e.code}")
|
228
|
+
# If this is exit code 0, let it propagate for normal exit
|
229
|
+
if e.code == 0:
|
230
|
+
raise
|
194
231
|
# Only perform minimal cleanup needed for STDIO
|
195
|
-
# Don't use our full shutdown procedure which could keep process alive
|
196
232
|
await self._cleanup_stdio()
|
197
233
|
|
198
234
|
async def _run_server_with_shutdown(self, transport: str):
|
@@ -246,7 +282,6 @@ class AgentMCPServer:
|
|
246
282
|
force_shutdown_task = asyncio.create_task(self._force_shutdown_event.wait())
|
247
283
|
timeout_task = asyncio.create_task(asyncio.sleep(self._shutdown_timeout))
|
248
284
|
|
249
|
-
# Wait for either force shutdown or timeout
|
250
285
|
done, pending = await asyncio.wait(
|
251
286
|
[force_shutdown_task, timeout_task], return_when=asyncio.FIRST_COMPLETED
|
252
287
|
)
|
@@ -255,21 +290,16 @@ class AgentMCPServer:
|
|
255
290
|
for task in pending:
|
256
291
|
task.cancel()
|
257
292
|
|
258
|
-
# Determine
|
293
|
+
# Determine shutdown reason
|
259
294
|
if force_shutdown_task in done:
|
260
|
-
logger.info("Force shutdown requested")
|
261
|
-
print("\
|
295
|
+
logger.info("Force shutdown requested by user")
|
296
|
+
print("\nForce shutdown initiated...")
|
262
297
|
else:
|
263
298
|
logger.info(f"Graceful shutdown timed out after {self._shutdown_timeout} seconds")
|
264
299
|
print(f"\nGraceful shutdown timed out after {self._shutdown_timeout} seconds")
|
265
300
|
|
266
|
-
|
267
|
-
await self._close_sse_connections()
|
301
|
+
os._exit(0)
|
268
302
|
|
269
|
-
# Cancel the server task if running
|
270
|
-
if self._server_task and not self._server_task.done():
|
271
|
-
logger.info("Cancelling server task")
|
272
|
-
self._server_task.cancel()
|
273
303
|
except asyncio.CancelledError:
|
274
304
|
# Monitor was cancelled - clean exit
|
275
305
|
pass
|
@@ -302,11 +332,36 @@ class AgentMCPServer:
|
|
302
332
|
for session_id, writer in writers:
|
303
333
|
try:
|
304
334
|
logger.debug(f"Closing SSE connection: {session_id}")
|
335
|
+
# Instead of aclose, try to close more gracefully
|
336
|
+
# Send a special event to notify client, then close
|
337
|
+
try:
|
338
|
+
if hasattr(writer, "send") and not getattr(writer, "_closed", False):
|
339
|
+
try:
|
340
|
+
# Try to send a close event if possible
|
341
|
+
await writer.send(Exception("Server shutting down"))
|
342
|
+
except (AttributeError, asyncio.CancelledError):
|
343
|
+
pass
|
344
|
+
except Exception:
|
345
|
+
pass
|
346
|
+
|
347
|
+
# Now close the stream
|
305
348
|
await writer.aclose()
|
306
349
|
sse._read_stream_writers.pop(session_id, None)
|
307
350
|
except Exception as e:
|
308
351
|
logger.error(f"Error closing SSE connection {session_id}: {e}")
|
309
352
|
|
353
|
+
# If we have a ASGI lifespan hook, try to signal closure
|
354
|
+
if (
|
355
|
+
hasattr(self.mcp_server, "_lifespan_state")
|
356
|
+
and self.mcp_server._lifespan_state == "started"
|
357
|
+
):
|
358
|
+
logger.debug("Attempting to signal ASGI lifespan shutdown")
|
359
|
+
try:
|
360
|
+
if hasattr(self.mcp_server, "_on_shutdown"):
|
361
|
+
await self.mcp_server._on_shutdown()
|
362
|
+
except Exception as e:
|
363
|
+
logger.error(f"Error during ASGI lifespan shutdown: {e}")
|
364
|
+
|
310
365
|
async def with_bridged_context(self, agent_context, mcp_context, func, *args, **kwargs):
|
311
366
|
"""
|
312
367
|
Execute a function with bridged context between MCP and agent
|
@@ -374,18 +429,45 @@ class AgentMCPServer:
|
|
374
429
|
# Signal shutdown
|
375
430
|
self._graceful_shutdown_event.set()
|
376
431
|
|
377
|
-
|
378
|
-
|
432
|
+
try:
|
433
|
+
# Close SSE connections
|
434
|
+
await self._close_sse_connections()
|
379
435
|
|
380
|
-
|
381
|
-
|
436
|
+
# Close any resources in the exit stack
|
437
|
+
await self._exit_stack.aclose()
|
382
438
|
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
439
|
+
# Shutdown any agent resources
|
440
|
+
for agent_name, agent in self.agent_app._agents.items():
|
441
|
+
try:
|
442
|
+
if hasattr(agent, "shutdown"):
|
443
|
+
await agent.shutdown()
|
444
|
+
except Exception as e:
|
445
|
+
logger.error(f"Error shutting down agent {agent_name}: {e}")
|
446
|
+
except Exception as e:
|
447
|
+
# Log any errors but don't let them prevent shutdown
|
448
|
+
logger.error(f"Error during shutdown: {e}", exc_info=True)
|
449
|
+
finally:
|
450
|
+
logger.info("Full shutdown complete")
|
451
|
+
|
452
|
+
async def _cleanup_minimal(self):
|
453
|
+
"""Perform minimal cleanup before simulating a KeyboardInterrupt."""
|
454
|
+
logger.info("Performing minimal cleanup before interrupt")
|
455
|
+
|
456
|
+
# Only close SSE connection writers directly
|
457
|
+
if (
|
458
|
+
hasattr(self.mcp_server, "_sse_transport")
|
459
|
+
and self.mcp_server._sse_transport is not None
|
460
|
+
):
|
461
|
+
sse = self.mcp_server._sse_transport
|
462
|
+
|
463
|
+
# Close all read stream writers
|
464
|
+
if hasattr(sse, "_read_stream_writers"):
|
465
|
+
for session_id, writer in list(sse._read_stream_writers.items()):
|
466
|
+
try:
|
467
|
+
await writer.aclose()
|
468
|
+
except Exception:
|
469
|
+
# Ignore errors during cleanup
|
470
|
+
pass
|
390
471
|
|
391
|
-
|
472
|
+
# Clear active connections set to prevent further operations
|
473
|
+
self._active_connections.clear()
|
mcp_agent/cli/commands/config.py
DELETED