autobyteus 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/bootstrap_steps/__init__.py +2 -0
- autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +2 -0
- autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py +71 -0
- autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +41 -12
- autobyteus/agent/runtime/agent_worker.py +24 -34
- autobyteus/agent/shutdown_steps/__init__.py +17 -0
- autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py +63 -0
- autobyteus/agent/shutdown_steps/base_shutdown_step.py +33 -0
- autobyteus/agent/shutdown_steps/llm_instance_cleanup_step.py +45 -0
- autobyteus/agent/shutdown_steps/mcp_server_cleanup_step.py +32 -0
- autobyteus/tools/base_tool.py +2 -0
- autobyteus/tools/mcp/__init__.py +10 -7
- autobyteus/tools/mcp/call_handlers/__init__.py +0 -2
- autobyteus/tools/mcp/config_service.py +1 -6
- autobyteus/tools/mcp/factory.py +12 -26
- autobyteus/tools/mcp/registrar.py +57 -178
- autobyteus/tools/mcp/server/__init__.py +16 -0
- autobyteus/tools/mcp/server/base_managed_mcp_server.py +139 -0
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +29 -0
- autobyteus/tools/mcp/server/proxy.py +36 -0
- autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +33 -0
- autobyteus/tools/mcp/server_instance_manager.py +93 -0
- autobyteus/tools/mcp/tool.py +28 -46
- autobyteus/tools/mcp/tool_registrar.py +177 -0
- autobyteus/tools/mcp/types.py +10 -21
- autobyteus/tools/registry/tool_definition.py +11 -2
- autobyteus/tools/registry/tool_registry.py +27 -28
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.3.dist-info}/METADATA +2 -1
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.3.dist-info}/RECORD +32 -20
- autobyteus/tools/mcp/call_handlers/sse_handler.py +0 -22
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.3.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.3.dist-info}/licenses/LICENSE +0 -0
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -9,6 +9,7 @@ from .agent_runtime_queue_initialization_step import AgentRuntimeQueueInitializa
|
|
|
9
9
|
from .workspace_context_initialization_step import WorkspaceContextInitializationStep
|
|
10
10
|
# ToolInitializationStep is no longer a bootstrap step.
|
|
11
11
|
from .system_prompt_processing_step import SystemPromptProcessingStep
|
|
12
|
+
from .mcp_server_prewarming_step import McpServerPrewarmingStep
|
|
12
13
|
# LLMConfigFinalizationStep and LLMInstanceCreationStep removed.
|
|
13
14
|
|
|
14
15
|
__all__ = [
|
|
@@ -16,4 +17,5 @@ __all__ = [
|
|
|
16
17
|
"AgentRuntimeQueueInitializationStep", # UPDATED
|
|
17
18
|
"WorkspaceContextInitializationStep",
|
|
18
19
|
"SystemPromptProcessingStep",
|
|
20
|
+
"McpServerPrewarmingStep",
|
|
19
21
|
]
|
|
@@ -6,6 +6,7 @@ from .base_bootstrap_step import BaseBootstrapStep
|
|
|
6
6
|
from .agent_runtime_queue_initialization_step import AgentRuntimeQueueInitializationStep
|
|
7
7
|
from .workspace_context_initialization_step import WorkspaceContextInitializationStep
|
|
8
8
|
from .system_prompt_processing_step import SystemPromptProcessingStep
|
|
9
|
+
from .mcp_server_prewarming_step import McpServerPrewarmingStep
|
|
9
10
|
from autobyteus.agent.events import AgentReadyEvent
|
|
10
11
|
|
|
11
12
|
if TYPE_CHECKING:
|
|
@@ -31,6 +32,7 @@ class AgentBootstrapper:
|
|
|
31
32
|
self.bootstrap_steps: List[BaseBootstrapStep] = [
|
|
32
33
|
AgentRuntimeQueueInitializationStep(),
|
|
33
34
|
WorkspaceContextInitializationStep(),
|
|
35
|
+
McpServerPrewarmingStep(),
|
|
34
36
|
SystemPromptProcessingStep(),
|
|
35
37
|
]
|
|
36
38
|
logger.debug("AgentBootstrapper initialized with default steps.")
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py
|
|
2
|
+
import logging
|
|
3
|
+
from typing import TYPE_CHECKING, Set
|
|
4
|
+
|
|
5
|
+
from .base_bootstrap_step import BaseBootstrapStep
|
|
6
|
+
from autobyteus.tools.mcp.config_service import McpConfigService
|
|
7
|
+
from autobyteus.tools.mcp.server_instance_manager import McpServerInstanceManager
|
|
8
|
+
from autobyteus.tools.tool_category import ToolCategory
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from autobyteus.agent.context import AgentContext
|
|
12
|
+
from autobyteus.agent.phases import AgentPhaseManager
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
class McpServerPrewarmingStep(BaseBootstrapStep):
|
|
17
|
+
"""
|
|
18
|
+
Bootstrap step to eagerly start all MCP servers associated with an agent's tools.
|
|
19
|
+
This ensures servers are running and ready before the agent becomes idle.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
self._config_service = McpConfigService()
|
|
24
|
+
self._instance_manager = McpServerInstanceManager()
|
|
25
|
+
logger.debug("McpServerPrewarmingStep initialized.")
|
|
26
|
+
|
|
27
|
+
async def execute(self,
|
|
28
|
+
context: 'AgentContext',
|
|
29
|
+
phase_manager: 'AgentPhaseManager') -> bool:
|
|
30
|
+
agent_id = context.agent_id
|
|
31
|
+
logger.info(f"Agent '{agent_id}': Executing McpServerPrewarmingStep.")
|
|
32
|
+
|
|
33
|
+
# 1. Find all unique server IDs by inspecting tool definitions.
|
|
34
|
+
mcp_server_ids: Set[str] = set()
|
|
35
|
+
for tool in context.config.tools:
|
|
36
|
+
# This is the new, superior check. It relies on abstract metadata, not concrete types.
|
|
37
|
+
if tool.definition and tool.definition.category == ToolCategory.MCP:
|
|
38
|
+
# This is the new, superior way to get the server_id.
|
|
39
|
+
# It does not rely on private attributes of the tool instance.
|
|
40
|
+
server_id = tool.definition.metadata.get("mcp_server_id")
|
|
41
|
+
if server_id:
|
|
42
|
+
mcp_server_ids.add(server_id)
|
|
43
|
+
|
|
44
|
+
if not mcp_server_ids:
|
|
45
|
+
logger.debug(f"Agent '{agent_id}': No MCP tools found. Skipping MCP server pre-warming.")
|
|
46
|
+
return True
|
|
47
|
+
|
|
48
|
+
logger.info(f"Agent '{agent_id}': Found {len(mcp_server_ids)} unique MCP server IDs to pre-warm: {mcp_server_ids}")
|
|
49
|
+
|
|
50
|
+
# 2. For each server ID, unconditionally start its server instance for this agent.
|
|
51
|
+
for server_id in mcp_server_ids:
|
|
52
|
+
try:
|
|
53
|
+
config = self._config_service.get_config(server_id)
|
|
54
|
+
if not config:
|
|
55
|
+
logger.warning(f"Agent '{agent_id}': Could not find config for server_id '{server_id}' used by a tool. Cannot pre-warm.")
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
logger.info(f"Agent '{agent_id}': Pre-warming MCP server '{server_id}'.")
|
|
59
|
+
# Get the instance for this agent, which creates it if it doesn't exist.
|
|
60
|
+
server_instance = self._instance_manager.get_server_instance(agent_id, server_id)
|
|
61
|
+
# Explicitly connect to start the server process.
|
|
62
|
+
await server_instance.connect()
|
|
63
|
+
logger.info(f"Agent '{agent_id}': Successfully connected to pre-warmed MCP server '{server_id}'.")
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
error_message = f"Agent '{agent_id}': Failed to pre-warm MCP server '{server_id}': {e}"
|
|
67
|
+
logger.error(error_message, exc_info=True)
|
|
68
|
+
# A failure to pre-warm a server is a critical bootstrap failure.
|
|
69
|
+
return False
|
|
70
|
+
|
|
71
|
+
return True
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
# file: autobyteus/autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py
|
|
2
2
|
import logging
|
|
3
|
-
from typing import TYPE_CHECKING
|
|
3
|
+
from typing import TYPE_CHECKING, List
|
|
4
4
|
|
|
5
5
|
from .base_processor import BaseLLMResponseProcessor
|
|
6
|
+
from autobyteus.agent.events import PendingToolInvocationEvent
|
|
7
|
+
from autobyteus.agent.tool_invocation import ToolInvocation
|
|
6
8
|
from autobyteus.tools.usage.parsers import ProviderAwareToolUsageParser
|
|
7
9
|
from autobyteus.tools.usage.parsers.exceptions import ToolUsageParseException
|
|
8
|
-
from autobyteus.agent.events import PendingToolInvocationEvent
|
|
9
10
|
|
|
10
11
|
if TYPE_CHECKING:
|
|
11
12
|
from autobyteus.agent.context import AgentContext
|
|
@@ -17,21 +18,20 @@ logger = logging.getLogger(__name__)
|
|
|
17
18
|
class ProviderAwareToolUsageProcessor(BaseLLMResponseProcessor):
|
|
18
19
|
"""
|
|
19
20
|
A "master" tool usage processor that uses a high-level parser from the
|
|
20
|
-
`tools` module to extract tool invocations
|
|
21
|
-
|
|
21
|
+
`tools` module to extract tool invocations. It then ensures each invocation
|
|
22
|
+
has a session-unique ID before enqueuing the necessary agent events.
|
|
22
23
|
"""
|
|
24
|
+
INVOCATION_COUNTS_KEY = "agent_tool_invocation_counts"
|
|
25
|
+
|
|
23
26
|
def __init__(self):
|
|
24
27
|
self._parser = ProviderAwareToolUsageParser()
|
|
25
28
|
logger.debug("ProviderAwareToolUsageProcessor initialized.")
|
|
26
29
|
|
|
27
|
-
@classmethod
|
|
28
|
-
def get_name(cls) -> str:
|
|
29
|
-
return "provider_aware_tool_usage"
|
|
30
|
-
|
|
31
30
|
async def process_response(self, response: 'CompleteResponse', context: 'AgentContext', triggering_event: 'LLMCompleteResponseReceivedEvent') -> bool:
|
|
32
31
|
"""
|
|
33
|
-
Uses a ProviderAwareToolUsageParser to get
|
|
34
|
-
and then enqueues a
|
|
32
|
+
Uses a ProviderAwareToolUsageParser to get tool invocations, makes their
|
|
33
|
+
IDs unique within the agent's session, and then enqueues a
|
|
34
|
+
PendingToolInvocationEvent for each one.
|
|
35
35
|
Propagates ToolUsageParseException if parsing fails.
|
|
36
36
|
"""
|
|
37
37
|
try:
|
|
@@ -44,9 +44,38 @@ class ProviderAwareToolUsageProcessor(BaseLLMResponseProcessor):
|
|
|
44
44
|
if not tool_invocations:
|
|
45
45
|
return False
|
|
46
46
|
|
|
47
|
-
|
|
47
|
+
# --- NEW LOGIC FOR UNIQUE ID GENERATION ---
|
|
48
|
+
|
|
49
|
+
# Ensure the counter map exists in the agent's state's custom data
|
|
50
|
+
if self.INVOCATION_COUNTS_KEY not in context.custom_data:
|
|
51
|
+
context.custom_data[self.INVOCATION_COUNTS_KEY] = {}
|
|
52
|
+
|
|
53
|
+
invocation_counts = context.custom_data[self.INVOCATION_COUNTS_KEY]
|
|
54
|
+
|
|
55
|
+
processed_invocations: List[ToolInvocation] = []
|
|
56
|
+
|
|
48
57
|
for invocation in tool_invocations:
|
|
49
|
-
|
|
58
|
+
base_id = invocation.id
|
|
59
|
+
|
|
60
|
+
# Get the current count for this base ID, default to 0
|
|
61
|
+
count = invocation_counts.get(base_id, 0)
|
|
62
|
+
|
|
63
|
+
# Create the new session-unique ID
|
|
64
|
+
unique_id = f"{base_id}_{count}"
|
|
65
|
+
|
|
66
|
+
# Update the invocation's ID in-place
|
|
67
|
+
invocation.id = unique_id
|
|
68
|
+
|
|
69
|
+
# Increment the counter for the next time this base ID is seen
|
|
70
|
+
invocation_counts[base_id] = count + 1
|
|
71
|
+
|
|
72
|
+
processed_invocations.append(invocation)
|
|
73
|
+
|
|
74
|
+
# --- END NEW LOGIC ---
|
|
75
|
+
|
|
76
|
+
logger.info(f"Agent '{context.agent_id}': Parsed {len(processed_invocations)} tool invocations. Enqueuing events with unique IDs.")
|
|
77
|
+
for invocation in processed_invocations:
|
|
78
|
+
logger.info(f"Agent '{context.agent_id}' ({self.get_name()}) identified tool invocation: {invocation.name} with unique ID {invocation.id}. Enqueuing event.")
|
|
50
79
|
await context.input_event_queues.enqueue_tool_invocation_request(
|
|
51
80
|
PendingToolInvocationEvent(tool_invocation=invocation)
|
|
52
81
|
)
|
|
@@ -15,6 +15,7 @@ from autobyteus.agent.events import (
|
|
|
15
15
|
from autobyteus.agent.events import WorkerEventDispatcher
|
|
16
16
|
from autobyteus.agent.runtime.agent_thread_pool_manager import AgentThreadPoolManager
|
|
17
17
|
from autobyteus.agent.bootstrap_steps.agent_bootstrapper import AgentBootstrapper
|
|
18
|
+
from autobyteus.agent.shutdown_steps import AgentShutdownOrchestrator
|
|
18
19
|
|
|
19
20
|
if TYPE_CHECKING:
|
|
20
21
|
from autobyteus.agent.context import AgentContext
|
|
@@ -176,41 +177,22 @@ class AgentWorker:
|
|
|
176
177
|
logger.error(f"Fatal error in AgentWorker '{agent_id}' async_run() loop: {e}", exc_info=True)
|
|
177
178
|
finally:
|
|
178
179
|
logger.info(f"AgentWorker '{agent_id}' async_run() loop has finished.")
|
|
180
|
+
# --- Shutdown sequence moved here, inside the original task's finally block ---
|
|
181
|
+
logger.info(f"AgentWorker '{agent_id}': Running shutdown sequence on worker loop.")
|
|
182
|
+
orchestrator = AgentShutdownOrchestrator()
|
|
183
|
+
cleanup_successful = await orchestrator.run(self.context)
|
|
179
184
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
+
if not cleanup_successful:
|
|
186
|
+
logger.critical(f"AgentWorker '{agent_id}': Shutdown resource cleanup failed. The agent may not have shut down cleanly.")
|
|
187
|
+
else:
|
|
188
|
+
logger.info(f"AgentWorker '{agent_id}': Shutdown resource cleanup completed successfully.")
|
|
189
|
+
logger.info(f"AgentWorker '{agent_id}': Shutdown sequence completed.")
|
|
185
190
|
|
|
186
|
-
async def _shutdown_sequence(self):
|
|
187
|
-
"""
|
|
188
|
-
The explicit, ordered shutdown sequence for the worker, executed on its own event loop.
|
|
189
|
-
"""
|
|
190
|
-
agent_id = self.context.agent_id
|
|
191
|
-
logger.info(f"AgentWorker '{agent_id}': Running shutdown sequence on worker loop.")
|
|
192
|
-
|
|
193
|
-
# 1. Clean up resources like the LLM instance.
|
|
194
|
-
if self.context.llm_instance and hasattr(self.context.llm_instance, 'cleanup'):
|
|
195
|
-
logger.info(f"AgentWorker '{agent_id}': Running LLM instance cleanup.")
|
|
196
|
-
try:
|
|
197
|
-
cleanup_func = self.context.llm_instance.cleanup
|
|
198
|
-
if asyncio.iscoroutinefunction(cleanup_func):
|
|
199
|
-
await cleanup_func()
|
|
200
|
-
else:
|
|
201
|
-
cleanup_func()
|
|
202
|
-
logger.info(f"AgentWorker '{agent_id}': LLM instance cleanup completed.")
|
|
203
|
-
except Exception as e:
|
|
204
|
-
logger.error(f"AgentWorker '{agent_id}': Error during LLM instance cleanup: {e}", exc_info=True)
|
|
205
|
-
|
|
206
|
-
# 2. Signal the main event loop to stop.
|
|
207
|
-
await self._signal_internal_stop()
|
|
208
|
-
logger.info(f"AgentWorker '{agent_id}': Shutdown sequence completed.")
|
|
209
191
|
|
|
210
192
|
async def stop(self, timeout: float = 10.0) -> None:
|
|
211
193
|
"""
|
|
212
|
-
Gracefully stops the worker by
|
|
213
|
-
|
|
194
|
+
Gracefully stops the worker by signaling its event loop to terminate,
|
|
195
|
+
then waiting for the thread to complete its cleanup and exit.
|
|
214
196
|
"""
|
|
215
197
|
if not self._is_active or self._stop_initiated:
|
|
216
198
|
return
|
|
@@ -219,14 +201,22 @@ class AgentWorker:
|
|
|
219
201
|
logger.info(f"AgentWorker '{agent_id}': Stop requested.")
|
|
220
202
|
self._stop_initiated = True
|
|
221
203
|
|
|
222
|
-
# Schedule
|
|
204
|
+
# Schedule a coroutine on the worker's loop to set the stop event.
|
|
223
205
|
if self.get_worker_loop():
|
|
224
|
-
|
|
206
|
+
def _coro_factory():
|
|
207
|
+
async def _signal_coro():
|
|
208
|
+
if self._async_stop_event and not self._async_stop_event.is_set():
|
|
209
|
+
self._async_stop_event.set()
|
|
210
|
+
if self.context.state.input_event_queues:
|
|
211
|
+
await self.context.state.input_event_queues.enqueue_internal_system_event(AgentStoppedEvent())
|
|
212
|
+
return _signal_coro()
|
|
213
|
+
|
|
214
|
+
future = self.schedule_coroutine_on_worker_loop(_coro_factory)
|
|
225
215
|
try:
|
|
226
|
-
# Wait for the
|
|
216
|
+
# Wait for the signal to be processed.
|
|
227
217
|
future.result(timeout=max(1.0, timeout-1))
|
|
228
218
|
except Exception as e:
|
|
229
|
-
logger.error(f"AgentWorker '{agent_id}': Error
|
|
219
|
+
logger.error(f"AgentWorker '{agent_id}': Error signaling stop event: {e}", exc_info=True)
|
|
230
220
|
|
|
231
221
|
# Wait for the main thread future to complete.
|
|
232
222
|
if self._thread_future:
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/agent/shutdown_steps/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
Defines individual, self-contained steps for the agent shutdown process.
|
|
4
|
+
These steps are orchestrated by the AgentShutdownOrchestrator.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .base_shutdown_step import BaseShutdownStep
|
|
8
|
+
from .llm_instance_cleanup_step import LLMInstanceCleanupStep
|
|
9
|
+
from .mcp_server_cleanup_step import McpServerCleanupStep
|
|
10
|
+
from .agent_shutdown_orchestrator import AgentShutdownOrchestrator
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"BaseShutdownStep",
|
|
14
|
+
"LLMInstanceCleanupStep",
|
|
15
|
+
"McpServerCleanupStep",
|
|
16
|
+
"AgentShutdownOrchestrator",
|
|
17
|
+
]
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py
|
|
2
|
+
import logging
|
|
3
|
+
from typing import TYPE_CHECKING, List, Optional
|
|
4
|
+
|
|
5
|
+
from .base_shutdown_step import BaseShutdownStep
|
|
6
|
+
from .llm_instance_cleanup_step import LLMInstanceCleanupStep
|
|
7
|
+
from .mcp_server_cleanup_step import McpServerCleanupStep
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from autobyteus.agent.context import AgentContext
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
class AgentShutdownOrchestrator:
|
|
15
|
+
"""
|
|
16
|
+
Orchestrates the agent's shutdown process by executing a sequence of
|
|
17
|
+
self-contained cleanup steps.
|
|
18
|
+
"""
|
|
19
|
+
def __init__(self, steps: Optional[List[BaseShutdownStep]] = None):
|
|
20
|
+
"""
|
|
21
|
+
Initializes the AgentShutdownOrchestrator.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
steps: An optional list of shutdown steps to execute. If not provided,
|
|
25
|
+
a default sequence will be used.
|
|
26
|
+
"""
|
|
27
|
+
if steps is None:
|
|
28
|
+
self.shutdown_steps: List[BaseShutdownStep] = [
|
|
29
|
+
LLMInstanceCleanupStep(),
|
|
30
|
+
McpServerCleanupStep(),
|
|
31
|
+
]
|
|
32
|
+
logger.debug("AgentShutdownOrchestrator initialized with default steps.")
|
|
33
|
+
else:
|
|
34
|
+
self.shutdown_steps = steps
|
|
35
|
+
logger.debug(f"AgentShutdownOrchestrator initialized with {len(steps)} custom steps.")
|
|
36
|
+
|
|
37
|
+
async def run(self, context: 'AgentContext') -> bool:
|
|
38
|
+
"""
|
|
39
|
+
Executes the configured sequence of shutdown steps.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
context: The agent's context.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
True if all steps completed successfully, False otherwise.
|
|
46
|
+
"""
|
|
47
|
+
agent_id = context.agent_id
|
|
48
|
+
logger.info(f"Agent '{agent_id}': AgentShutdownOrchestrator starting execution.")
|
|
49
|
+
|
|
50
|
+
for step_index, step_instance in enumerate(self.shutdown_steps):
|
|
51
|
+
step_name = step_instance.__class__.__name__
|
|
52
|
+
logger.debug(f"Agent '{agent_id}': Executing shutdown step {step_index + 1}/{len(self.shutdown_steps)}: {step_name}")
|
|
53
|
+
|
|
54
|
+
success = await step_instance.execute(context)
|
|
55
|
+
|
|
56
|
+
if not success:
|
|
57
|
+
error_message = f"Shutdown step {step_name} failed."
|
|
58
|
+
logger.error(f"Agent '{agent_id}': {error_message} Halting shutdown orchestration.")
|
|
59
|
+
# The step itself is responsible for detailed error logging.
|
|
60
|
+
return False
|
|
61
|
+
|
|
62
|
+
logger.info(f"Agent '{agent_id}': All shutdown steps completed successfully.")
|
|
63
|
+
return True
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/agent/shutdown_steps/base_shutdown_step.py
|
|
2
|
+
import logging
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from autobyteus.agent.context import AgentContext
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
class BaseShutdownStep(ABC):
|
|
12
|
+
"""
|
|
13
|
+
Abstract base class for individual steps in the agent shutdown process.
|
|
14
|
+
Each step is responsible for a specific part of the cleanup and
|
|
15
|
+
for reporting its success or failure.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
@abstractmethod
|
|
19
|
+
async def execute(self, context: 'AgentContext') -> bool:
|
|
20
|
+
"""
|
|
21
|
+
Executes the shutdown step.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
context: The agent's context, providing access to state and resources.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
True if the step completed successfully, False otherwise.
|
|
28
|
+
If False, the step is expected to have handled its own detailed logging.
|
|
29
|
+
"""
|
|
30
|
+
raise NotImplementedError("Subclasses must implement the 'execute' method.")
|
|
31
|
+
|
|
32
|
+
def __repr__(self) -> str:
|
|
33
|
+
return f"<{self.__class__.__name__}>"
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/agent/shutdown_steps/llm_instance_cleanup_step.py
|
|
2
|
+
import asyncio
|
|
3
|
+
import logging
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from .base_shutdown_step import BaseShutdownStep
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from autobyteus.agent.context import AgentContext
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class LLMInstanceCleanupStep(BaseShutdownStep):
|
|
14
|
+
"""
|
|
15
|
+
Shutdown step for cleaning up the agent's LLM instance.
|
|
16
|
+
"""
|
|
17
|
+
def __init__(self):
|
|
18
|
+
logger.debug("LLMInstanceCleanupStep initialized.")
|
|
19
|
+
|
|
20
|
+
async def execute(self, context: 'AgentContext') -> bool:
|
|
21
|
+
agent_id = context.agent_id
|
|
22
|
+
logger.info(f"Agent '{agent_id}': Executing LLMInstanceCleanupStep.")
|
|
23
|
+
|
|
24
|
+
llm_instance = context.llm_instance
|
|
25
|
+
if not llm_instance:
|
|
26
|
+
logger.debug(f"Agent '{agent_id}': No LLM instance found in context. Skipping cleanup.")
|
|
27
|
+
return True
|
|
28
|
+
|
|
29
|
+
if hasattr(llm_instance, 'cleanup') and callable(getattr(llm_instance, 'cleanup')):
|
|
30
|
+
try:
|
|
31
|
+
logger.info(f"Agent '{agent_id}': Running LLM instance cleanup for '{llm_instance.__class__.__name__}'.")
|
|
32
|
+
cleanup_func = llm_instance.cleanup
|
|
33
|
+
if asyncio.iscoroutinefunction(cleanup_func):
|
|
34
|
+
await cleanup_func()
|
|
35
|
+
else:
|
|
36
|
+
cleanup_func()
|
|
37
|
+
logger.info(f"Agent '{agent_id}': LLM instance cleanup completed successfully.")
|
|
38
|
+
return True
|
|
39
|
+
except Exception as e:
|
|
40
|
+
error_message = f"Agent '{agent_id}': Error during LLM instance cleanup: {e}"
|
|
41
|
+
logger.error(error_message, exc_info=True)
|
|
42
|
+
return False
|
|
43
|
+
else:
|
|
44
|
+
logger.debug(f"Agent '{agent_id}': LLM instance of type '{llm_instance.__class__.__name__}' does not have a 'cleanup' method. Skipping.")
|
|
45
|
+
return True
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/agent/shutdown_steps/mcp_server_cleanup_step.py
|
|
2
|
+
import logging
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
from .base_shutdown_step import BaseShutdownStep
|
|
6
|
+
from autobyteus.tools.mcp.server_instance_manager import McpServerInstanceManager
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from autobyteus.agent.context import AgentContext
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class McpServerCleanupStep(BaseShutdownStep):
|
|
14
|
+
"""
|
|
15
|
+
Shutdown step for cleaning up all MCP server instances associated with an agent.
|
|
16
|
+
"""
|
|
17
|
+
def __init__(self):
|
|
18
|
+
self._instance_manager = McpServerInstanceManager()
|
|
19
|
+
logger.debug("McpServerCleanupStep initialized.")
|
|
20
|
+
|
|
21
|
+
async def execute(self, context: 'AgentContext') -> bool:
|
|
22
|
+
agent_id = context.agent_id
|
|
23
|
+
logger.info(f"Agent '{agent_id}': Executing McpServerCleanupStep.")
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
await self._instance_manager.cleanup_mcp_server_instances_for_agent(agent_id)
|
|
27
|
+
logger.info(f"Agent '{agent_id}': MCP server instance cleanup completed successfully.")
|
|
28
|
+
return True
|
|
29
|
+
except Exception as e:
|
|
30
|
+
error_message = f"Agent '{agent_id}': Critical failure during McpServerCleanupStep: {e}"
|
|
31
|
+
logger.error(error_message, exc_info=True)
|
|
32
|
+
return False
|
autobyteus/tools/base_tool.py
CHANGED
|
@@ -16,6 +16,7 @@ if TYPE_CHECKING:
|
|
|
16
16
|
from autobyteus.tools.parameter_schema import ParameterSchema
|
|
17
17
|
from autobyteus.tools.tool_config import ToolConfig
|
|
18
18
|
from .tool_state import ToolState
|
|
19
|
+
from autobyteus.tools.registry import ToolDefinition
|
|
19
20
|
|
|
20
21
|
logger = logging.getLogger('autobyteus')
|
|
21
22
|
|
|
@@ -26,6 +27,7 @@ class BaseTool(ABC, EventEmitter, metaclass=ToolMeta):
|
|
|
26
27
|
def __init__(self, config: Optional['ToolConfig'] = None):
|
|
27
28
|
super().__init__()
|
|
28
29
|
self.agent_id: Optional[str] = None
|
|
30
|
+
self.definition: Optional['ToolDefinition'] = None # Link back to its definition
|
|
29
31
|
# The config is stored primarily for potential use by subclasses or future base features.
|
|
30
32
|
self._config = config
|
|
31
33
|
# Add a dedicated state dictionary for the tool instance
|
autobyteus/tools/mcp/__init__.py
CHANGED
|
@@ -2,14 +2,15 @@
|
|
|
2
2
|
"""
|
|
3
3
|
This package implements the Model Context Protocol (MCP) integration for AutoByteUs.
|
|
4
4
|
It allows AutoByteUs to connect to external MCP servers, discover tools,
|
|
5
|
-
and register them as standard AutoByteUs tools using a
|
|
5
|
+
and register them as standard AutoByteUs tools using a stateful, server-centric
|
|
6
|
+
architecture with per-agent isolation.
|
|
6
7
|
"""
|
|
7
8
|
import logging
|
|
8
9
|
|
|
9
10
|
logger = logging.getLogger(__name__)
|
|
10
11
|
|
|
11
12
|
# The actual 'mcp' library and its components are expected to be installed
|
|
12
|
-
# in the environment and are used by the internal
|
|
13
|
+
# in the environment and are used by the internal components.
|
|
13
14
|
|
|
14
15
|
logger.info("AutoByteUs MCP integration package initialized. Expects 'mcp' library to be available.")
|
|
15
16
|
|
|
@@ -17,9 +18,9 @@ logger.info("AutoByteUs MCP integration package initialized. Expects 'mcp' libra
|
|
|
17
18
|
from .types import (
|
|
18
19
|
BaseMcpConfig,
|
|
19
20
|
StdioMcpServerConfig,
|
|
20
|
-
SseMcpServerConfig,
|
|
21
21
|
StreamableHttpMcpServerConfig,
|
|
22
|
-
McpTransportType
|
|
22
|
+
McpTransportType,
|
|
23
|
+
McpServerInstanceKey,
|
|
23
24
|
)
|
|
24
25
|
# Import McpConfigService from config_service.py
|
|
25
26
|
from .config_service import McpConfigService
|
|
@@ -28,17 +29,19 @@ from .config_service import McpConfigService
|
|
|
28
29
|
from .schema_mapper import McpSchemaMapper
|
|
29
30
|
from .tool import GenericMcpTool
|
|
30
31
|
from .factory import McpToolFactory
|
|
31
|
-
from .
|
|
32
|
+
from .tool_registrar import McpToolRegistrar
|
|
33
|
+
from .server_instance_manager import McpServerInstanceManager
|
|
32
34
|
|
|
33
35
|
__all__ = [
|
|
34
36
|
# Types from types.py
|
|
35
37
|
"BaseMcpConfig",
|
|
36
38
|
"StdioMcpServerConfig",
|
|
37
|
-
"SseMcpServerConfig",
|
|
38
39
|
"StreamableHttpMcpServerConfig",
|
|
39
40
|
"McpTransportType",
|
|
40
|
-
|
|
41
|
+
"McpServerInstanceKey",
|
|
42
|
+
# Services and Managers
|
|
41
43
|
"McpConfigService",
|
|
44
|
+
"McpServerInstanceManager",
|
|
42
45
|
# Other public components
|
|
43
46
|
"McpSchemaMapper",
|
|
44
47
|
"GenericMcpTool",
|
|
@@ -8,11 +8,9 @@ for a specific transport protocol (e.g., STDIO, Streamable HTTP).
|
|
|
8
8
|
from .base_handler import McpCallHandler
|
|
9
9
|
from .stdio_handler import StdioMcpCallHandler
|
|
10
10
|
from .streamable_http_handler import StreamableHttpMcpCallHandler
|
|
11
|
-
from .sse_handler import SseMcpCallHandler
|
|
12
11
|
|
|
13
12
|
__all__ = [
|
|
14
13
|
"McpCallHandler",
|
|
15
14
|
"StdioMcpCallHandler",
|
|
16
15
|
"StreamableHttpMcpCallHandler",
|
|
17
|
-
"SseMcpCallHandler",
|
|
18
16
|
]
|
|
@@ -8,7 +8,6 @@ from typing import List, Dict, Any, Optional, Union, Type
|
|
|
8
8
|
from .types import (
|
|
9
9
|
BaseMcpConfig,
|
|
10
10
|
StdioMcpServerConfig,
|
|
11
|
-
SseMcpServerConfig,
|
|
12
11
|
StreamableHttpMcpServerConfig,
|
|
13
12
|
McpTransportType
|
|
14
13
|
)
|
|
@@ -38,7 +37,7 @@ class McpConfigService(metaclass=SingletonMeta):
|
|
|
38
37
|
@staticmethod
|
|
39
38
|
def _create_specific_config(server_id: str, transport_type: McpTransportType, config_data: Dict[str, Any]) -> BaseMcpConfig:
|
|
40
39
|
"""
|
|
41
|
-
Creates a specific McpServerConfig (Stdio,
|
|
40
|
+
Creates a specific McpServerConfig (Stdio, StreamableHttp) based on transport_type.
|
|
42
41
|
The 'server_id' is injected.
|
|
43
42
|
Parameters from nested structures like 'stdio_params' are un-nested.
|
|
44
43
|
"""
|
|
@@ -50,7 +49,6 @@ class McpConfigService(metaclass=SingletonMeta):
|
|
|
50
49
|
|
|
51
50
|
transport_specific_params_key_map = {
|
|
52
51
|
McpTransportType.STDIO: "stdio_params",
|
|
53
|
-
McpTransportType.SSE: "sse_params",
|
|
54
52
|
McpTransportType.STREAMABLE_HTTP: "streamable_http_params"
|
|
55
53
|
}
|
|
56
54
|
|
|
@@ -62,7 +60,6 @@ class McpConfigService(metaclass=SingletonMeta):
|
|
|
62
60
|
constructor_params.update(specific_params_dict)
|
|
63
61
|
|
|
64
62
|
constructor_params.pop(transport_specific_params_key_map.get(McpTransportType.STDIO), None)
|
|
65
|
-
constructor_params.pop(transport_specific_params_key_map.get(McpTransportType.SSE), None)
|
|
66
63
|
constructor_params.pop(transport_specific_params_key_map.get(McpTransportType.STREAMABLE_HTTP), None)
|
|
67
64
|
constructor_params.pop('transport_type', None)
|
|
68
65
|
|
|
@@ -75,8 +72,6 @@ class McpConfigService(metaclass=SingletonMeta):
|
|
|
75
72
|
try:
|
|
76
73
|
if transport_type == McpTransportType.STDIO:
|
|
77
74
|
return StdioMcpServerConfig(**constructor_params)
|
|
78
|
-
elif transport_type == McpTransportType.SSE:
|
|
79
|
-
return SseMcpServerConfig(**constructor_params)
|
|
80
75
|
elif transport_type == McpTransportType.STREAMABLE_HTTP:
|
|
81
76
|
return StreamableHttpMcpServerConfig(**constructor_params)
|
|
82
77
|
else:
|