autobyteus 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. autobyteus/agent/bootstrap_steps/__init__.py +2 -0
  2. autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +2 -0
  3. autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py +71 -0
  4. autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +41 -12
  5. autobyteus/agent/runtime/agent_runtime.py +1 -4
  6. autobyteus/agent/runtime/agent_worker.py +56 -23
  7. autobyteus/agent/shutdown_steps/__init__.py +17 -0
  8. autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py +63 -0
  9. autobyteus/agent/shutdown_steps/base_shutdown_step.py +33 -0
  10. autobyteus/agent/shutdown_steps/llm_instance_cleanup_step.py +45 -0
  11. autobyteus/agent/shutdown_steps/mcp_server_cleanup_step.py +32 -0
  12. autobyteus/llm/api/deepseek_llm.py +10 -172
  13. autobyteus/llm/api/grok_llm.py +10 -171
  14. autobyteus/llm/api/kimi_llm.py +24 -0
  15. autobyteus/llm/api/openai_compatible_llm.py +193 -0
  16. autobyteus/llm/api/openai_llm.py +11 -139
  17. autobyteus/llm/llm_factory.py +62 -0
  18. autobyteus/llm/providers.py +1 -0
  19. autobyteus/llm/token_counter/kimi_token_counter.py +24 -0
  20. autobyteus/llm/token_counter/token_counter_factory.py +3 -0
  21. autobyteus/llm/utils/messages.py +3 -3
  22. autobyteus/tools/base_tool.py +2 -0
  23. autobyteus/tools/mcp/__init__.py +10 -7
  24. autobyteus/tools/mcp/call_handlers/__init__.py +0 -2
  25. autobyteus/tools/mcp/config_service.py +1 -6
  26. autobyteus/tools/mcp/factory.py +12 -26
  27. autobyteus/tools/mcp/registrar.py +57 -178
  28. autobyteus/tools/mcp/server/__init__.py +16 -0
  29. autobyteus/tools/mcp/server/base_managed_mcp_server.py +139 -0
  30. autobyteus/tools/mcp/server/http_managed_mcp_server.py +29 -0
  31. autobyteus/tools/mcp/server/proxy.py +36 -0
  32. autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +33 -0
  33. autobyteus/tools/mcp/server_instance_manager.py +93 -0
  34. autobyteus/tools/mcp/tool.py +28 -46
  35. autobyteus/tools/mcp/tool_registrar.py +177 -0
  36. autobyteus/tools/mcp/types.py +10 -21
  37. autobyteus/tools/registry/tool_definition.py +11 -2
  38. autobyteus/tools/registry/tool_registry.py +27 -28
  39. autobyteus/tools/usage/parsers/_json_extractor.py +99 -0
  40. autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +46 -77
  41. autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +87 -97
  42. autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +38 -46
  43. autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +104 -154
  44. {autobyteus-1.1.1.dist-info → autobyteus-1.1.3.dist-info}/METADATA +4 -2
  45. {autobyteus-1.1.1.dist-info → autobyteus-1.1.3.dist-info}/RECORD +48 -32
  46. autobyteus/tools/mcp/call_handlers/sse_handler.py +0 -22
  47. {autobyteus-1.1.1.dist-info → autobyteus-1.1.3.dist-info}/WHEEL +0 -0
  48. {autobyteus-1.1.1.dist-info → autobyteus-1.1.3.dist-info}/licenses/LICENSE +0 -0
  49. {autobyteus-1.1.1.dist-info → autobyteus-1.1.3.dist-info}/top_level.txt +0 -0
@@ -9,6 +9,7 @@ from .agent_runtime_queue_initialization_step import AgentRuntimeQueueInitializa
9
9
  from .workspace_context_initialization_step import WorkspaceContextInitializationStep
10
10
  # ToolInitializationStep is no longer a bootstrap step.
11
11
  from .system_prompt_processing_step import SystemPromptProcessingStep
12
+ from .mcp_server_prewarming_step import McpServerPrewarmingStep
12
13
  # LLMConfigFinalizationStep and LLMInstanceCreationStep removed.
13
14
 
14
15
  __all__ = [
@@ -16,4 +17,5 @@ __all__ = [
16
17
  "AgentRuntimeQueueInitializationStep", # UPDATED
17
18
  "WorkspaceContextInitializationStep",
18
19
  "SystemPromptProcessingStep",
20
+ "McpServerPrewarmingStep",
19
21
  ]
@@ -6,6 +6,7 @@ from .base_bootstrap_step import BaseBootstrapStep
6
6
  from .agent_runtime_queue_initialization_step import AgentRuntimeQueueInitializationStep
7
7
  from .workspace_context_initialization_step import WorkspaceContextInitializationStep
8
8
  from .system_prompt_processing_step import SystemPromptProcessingStep
9
+ from .mcp_server_prewarming_step import McpServerPrewarmingStep
9
10
  from autobyteus.agent.events import AgentReadyEvent
10
11
 
11
12
  if TYPE_CHECKING:
@@ -31,6 +32,7 @@ class AgentBootstrapper:
31
32
  self.bootstrap_steps: List[BaseBootstrapStep] = [
32
33
  AgentRuntimeQueueInitializationStep(),
33
34
  WorkspaceContextInitializationStep(),
35
+ McpServerPrewarmingStep(),
34
36
  SystemPromptProcessingStep(),
35
37
  ]
36
38
  logger.debug("AgentBootstrapper initialized with default steps.")
@@ -0,0 +1,71 @@
1
+ # file: autobyteus/autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py
2
+ import logging
3
+ from typing import TYPE_CHECKING, Set
4
+
5
+ from .base_bootstrap_step import BaseBootstrapStep
6
+ from autobyteus.tools.mcp.config_service import McpConfigService
7
+ from autobyteus.tools.mcp.server_instance_manager import McpServerInstanceManager
8
+ from autobyteus.tools.tool_category import ToolCategory
9
+
10
+ if TYPE_CHECKING:
11
+ from autobyteus.agent.context import AgentContext
12
+ from autobyteus.agent.phases import AgentPhaseManager
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ class McpServerPrewarmingStep(BaseBootstrapStep):
17
+ """
18
+ Bootstrap step to eagerly start all MCP servers associated with an agent's tools.
19
+ This ensures servers are running and ready before the agent becomes idle.
20
+ """
21
+
22
+ def __init__(self):
23
+ self._config_service = McpConfigService()
24
+ self._instance_manager = McpServerInstanceManager()
25
+ logger.debug("McpServerPrewarmingStep initialized.")
26
+
27
+ async def execute(self,
28
+ context: 'AgentContext',
29
+ phase_manager: 'AgentPhaseManager') -> bool:
30
+ agent_id = context.agent_id
31
+ logger.info(f"Agent '{agent_id}': Executing McpServerPrewarmingStep.")
32
+
33
+ # 1. Find all unique server IDs by inspecting tool definitions.
34
+ mcp_server_ids: Set[str] = set()
35
+ for tool in context.config.tools:
36
+ # This is the new, superior check. It relies on abstract metadata, not concrete types.
37
+ if tool.definition and tool.definition.category == ToolCategory.MCP:
38
+ # This is the new, superior way to get the server_id.
39
+ # It does not rely on private attributes of the tool instance.
40
+ server_id = tool.definition.metadata.get("mcp_server_id")
41
+ if server_id:
42
+ mcp_server_ids.add(server_id)
43
+
44
+ if not mcp_server_ids:
45
+ logger.debug(f"Agent '{agent_id}': No MCP tools found. Skipping MCP server pre-warming.")
46
+ return True
47
+
48
+ logger.info(f"Agent '{agent_id}': Found {len(mcp_server_ids)} unique MCP server IDs to pre-warm: {mcp_server_ids}")
49
+
50
+ # 2. For each server ID, unconditionally start its server instance for this agent.
51
+ for server_id in mcp_server_ids:
52
+ try:
53
+ config = self._config_service.get_config(server_id)
54
+ if not config:
55
+ logger.warning(f"Agent '{agent_id}': Could not find config for server_id '{server_id}' used by a tool. Cannot pre-warm.")
56
+ continue
57
+
58
+ logger.info(f"Agent '{agent_id}': Pre-warming MCP server '{server_id}'.")
59
+ # Get the instance for this agent, which creates it if it doesn't exist.
60
+ server_instance = self._instance_manager.get_server_instance(agent_id, server_id)
61
+ # Explicitly connect to start the server process.
62
+ await server_instance.connect()
63
+ logger.info(f"Agent '{agent_id}': Successfully connected to pre-warmed MCP server '{server_id}'.")
64
+
65
+ except Exception as e:
66
+ error_message = f"Agent '{agent_id}': Failed to pre-warm MCP server '{server_id}': {e}"
67
+ logger.error(error_message, exc_info=True)
68
+ # A failure to pre-warm a server is a critical bootstrap failure.
69
+ return False
70
+
71
+ return True
@@ -1,11 +1,12 @@
1
1
  # file: autobyteus/autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py
2
2
  import logging
3
- from typing import TYPE_CHECKING
3
+ from typing import TYPE_CHECKING, List
4
4
 
5
5
  from .base_processor import BaseLLMResponseProcessor
6
+ from autobyteus.agent.events import PendingToolInvocationEvent
7
+ from autobyteus.agent.tool_invocation import ToolInvocation
6
8
  from autobyteus.tools.usage.parsers import ProviderAwareToolUsageParser
7
9
  from autobyteus.tools.usage.parsers.exceptions import ToolUsageParseException
8
- from autobyteus.agent.events import PendingToolInvocationEvent
9
10
 
10
11
  if TYPE_CHECKING:
11
12
  from autobyteus.agent.context import AgentContext
@@ -17,21 +18,20 @@ logger = logging.getLogger(__name__)
17
18
  class ProviderAwareToolUsageProcessor(BaseLLMResponseProcessor):
18
19
  """
19
20
  A "master" tool usage processor that uses a high-level parser from the
20
- `tools` module to extract tool invocations, and then enqueues the
21
- necessary agent events based on the parsed results.
21
+ `tools` module to extract tool invocations. It then ensures each invocation
22
+ has a session-unique ID before enqueuing the necessary agent events.
22
23
  """
24
+ INVOCATION_COUNTS_KEY = "agent_tool_invocation_counts"
25
+
23
26
  def __init__(self):
24
27
  self._parser = ProviderAwareToolUsageParser()
25
28
  logger.debug("ProviderAwareToolUsageProcessor initialized.")
26
29
 
27
- @classmethod
28
- def get_name(cls) -> str:
29
- return "provider_aware_tool_usage"
30
-
31
30
  async def process_response(self, response: 'CompleteResponse', context: 'AgentContext', triggering_event: 'LLMCompleteResponseReceivedEvent') -> bool:
32
31
  """
33
- Uses a ProviderAwareToolUsageParser to get a list of tool invocations,
34
- and then enqueues a PendingToolInvocationEvent for each one.
32
+ Uses a ProviderAwareToolUsageParser to get tool invocations, makes their
33
+ IDs unique within the agent's session, and then enqueues a
34
+ PendingToolInvocationEvent for each one.
35
35
  Propagates ToolUsageParseException if parsing fails.
36
36
  """
37
37
  try:
@@ -44,9 +44,38 @@ class ProviderAwareToolUsageProcessor(BaseLLMResponseProcessor):
44
44
  if not tool_invocations:
45
45
  return False
46
46
 
47
- logger.info(f"Agent '{context.agent_id}': Parsed {len(tool_invocations)} tool invocations. Enqueuing events.")
47
+ # --- NEW LOGIC FOR UNIQUE ID GENERATION ---
48
+
49
+ # Ensure the counter map exists in the agent's state's custom data
50
+ if self.INVOCATION_COUNTS_KEY not in context.custom_data:
51
+ context.custom_data[self.INVOCATION_COUNTS_KEY] = {}
52
+
53
+ invocation_counts = context.custom_data[self.INVOCATION_COUNTS_KEY]
54
+
55
+ processed_invocations: List[ToolInvocation] = []
56
+
48
57
  for invocation in tool_invocations:
49
- logger.info(f"Agent '{context.agent_id}' ({self.get_name()}) identified tool invocation: {invocation.name}. Enqueuing event.")
58
+ base_id = invocation.id
59
+
60
+ # Get the current count for this base ID, default to 0
61
+ count = invocation_counts.get(base_id, 0)
62
+
63
+ # Create the new session-unique ID
64
+ unique_id = f"{base_id}_{count}"
65
+
66
+ # Update the invocation's ID in-place
67
+ invocation.id = unique_id
68
+
69
+ # Increment the counter for the next time this base ID is seen
70
+ invocation_counts[base_id] = count + 1
71
+
72
+ processed_invocations.append(invocation)
73
+
74
+ # --- END NEW LOGIC ---
75
+
76
+ logger.info(f"Agent '{context.agent_id}': Parsed {len(processed_invocations)} tool invocations. Enqueuing events with unique IDs.")
77
+ for invocation in processed_invocations:
78
+ logger.info(f"Agent '{context.agent_id}' ({self.get_name()}) identified tool invocation: {invocation.name} with unique ID {invocation.id}. Enqueuing event.")
50
79
  await context.input_event_queues.enqueue_tool_invocation_request(
51
80
  PendingToolInvocationEvent(tool_invocation=invocation)
52
81
  )
@@ -120,10 +120,7 @@ class AgentRuntime:
120
120
  await self.phase_manager.notify_shutdown_initiated()
121
121
  await self._worker.stop(timeout=timeout)
122
122
 
123
- if self.context.llm_instance and hasattr(self.context.llm_instance, 'cleanup'):
124
- cleanup_func = self.context.llm_instance.cleanup
125
- if asyncio.iscoroutinefunction(cleanup_func): await cleanup_func()
126
- else: cleanup_func()
123
+ # LLM instance cleanup is now handled by the AgentWorker before its loop closes.
127
124
 
128
125
  await self.phase_manager.notify_final_shutdown_complete()
129
126
  logger.info(f"AgentRuntime for '{agent_id}' stop() method completed.")
@@ -15,6 +15,7 @@ from autobyteus.agent.events import (
15
15
  from autobyteus.agent.events import WorkerEventDispatcher
16
16
  from autobyteus.agent.runtime.agent_thread_pool_manager import AgentThreadPoolManager
17
17
  from autobyteus.agent.bootstrap_steps.agent_bootstrapper import AgentBootstrapper
18
+ from autobyteus.agent.shutdown_steps import AgentShutdownOrchestrator
18
19
 
19
20
  if TYPE_CHECKING:
20
21
  from autobyteus.agent.context import AgentContext
@@ -140,19 +141,19 @@ class AgentWorker:
140
141
 
141
142
  async def async_run(self) -> None:
142
143
  agent_id = self.context.agent_id
143
- logger.info(f"AgentWorker '{agent_id}' async_run(): Starting.")
144
-
145
- # --- Direct Initialization ---
146
- initialization_successful = await self._initialize()
147
- if not initialization_successful:
148
- logger.critical(f"AgentWorker '{agent_id}' failed to initialize. Worker is shutting down.")
149
- if self._async_stop_event and not self._async_stop_event.is_set():
150
- self._async_stop_event.set()
151
- return
152
-
153
- # --- Main Event Loop ---
154
- logger.info(f"AgentWorker '{agent_id}' initialized successfully. Entering main event loop.")
155
144
  try:
145
+ logger.info(f"AgentWorker '{agent_id}' async_run(): Starting.")
146
+
147
+ # --- Direct Initialization ---
148
+ initialization_successful = await self._initialize()
149
+ if not initialization_successful:
150
+ logger.critical(f"AgentWorker '{agent_id}' failed to initialize. Worker is shutting down.")
151
+ if self._async_stop_event and not self._async_stop_event.is_set():
152
+ self._async_stop_event.set()
153
+ return
154
+
155
+ # --- Main Event Loop ---
156
+ logger.info(f"AgentWorker '{agent_id}' initialized successfully. Entering main event loop.")
156
157
  while not self._async_stop_event.is_set():
157
158
  try:
158
159
  queue_event_tuple = await asyncio.wait_for(
@@ -176,25 +177,57 @@ class AgentWorker:
176
177
  logger.error(f"Fatal error in AgentWorker '{agent_id}' async_run() loop: {e}", exc_info=True)
177
178
  finally:
178
179
  logger.info(f"AgentWorker '{agent_id}' async_run() loop has finished.")
180
+ # --- Shutdown sequence moved here, inside the original task's finally block ---
181
+ logger.info(f"AgentWorker '{agent_id}': Running shutdown sequence on worker loop.")
182
+ orchestrator = AgentShutdownOrchestrator()
183
+ cleanup_successful = await orchestrator.run(self.context)
184
+
185
+ if not cleanup_successful:
186
+ logger.critical(f"AgentWorker '{agent_id}': Shutdown resource cleanup failed. The agent may not have shut down cleanly.")
187
+ else:
188
+ logger.info(f"AgentWorker '{agent_id}': Shutdown resource cleanup completed successfully.")
189
+ logger.info(f"AgentWorker '{agent_id}': Shutdown sequence completed.")
179
190
 
180
- async def _signal_internal_stop(self):
181
- if self._async_stop_event and not self._async_stop_event.is_set():
182
- self._async_stop_event.set()
183
- if self.context.state.input_event_queues:
184
- await self.context.state.input_event_queues.enqueue_internal_system_event(AgentStoppedEvent())
185
191
 
186
192
  async def stop(self, timeout: float = 10.0) -> None:
193
+ """
194
+ Gracefully stops the worker by signaling its event loop to terminate,
195
+ then waiting for the thread to complete its cleanup and exit.
196
+ """
187
197
  if not self._is_active or self._stop_initiated:
188
198
  return
199
+
200
+ agent_id = self.context.agent_id
201
+ logger.info(f"AgentWorker '{agent_id}': Stop requested.")
189
202
  self._stop_initiated = True
190
- if self.get_worker_loop() and self._async_stop_event:
191
- future = asyncio.run_coroutine_threadsafe(self._signal_internal_stop(), self.get_worker_loop())
192
- try: future.result(timeout=1.0)
193
- except Exception: pass
203
+
204
+ # Schedule a coroutine on the worker's loop to set the stop event.
205
+ if self.get_worker_loop():
206
+ def _coro_factory():
207
+ async def _signal_coro():
208
+ if self._async_stop_event and not self._async_stop_event.is_set():
209
+ self._async_stop_event.set()
210
+ if self.context.state.input_event_queues:
211
+ await self.context.state.input_event_queues.enqueue_internal_system_event(AgentStoppedEvent())
212
+ return _signal_coro()
213
+
214
+ future = self.schedule_coroutine_on_worker_loop(_coro_factory)
215
+ try:
216
+ # Wait for the signal to be processed.
217
+ future.result(timeout=max(1.0, timeout-1))
218
+ except Exception as e:
219
+ logger.error(f"AgentWorker '{agent_id}': Error signaling stop event: {e}", exc_info=True)
220
+
221
+ # Wait for the main thread future to complete.
194
222
  if self._thread_future:
195
- try: await asyncio.wait_for(asyncio.wrap_future(self._thread_future), timeout=timeout)
196
- except asyncio.TimeoutError: logger.warning(f"Timeout waiting for worker thread of '{self.context.agent_id}'.")
223
+ try:
224
+ await asyncio.wait_for(asyncio.wrap_future(self._thread_future), timeout=timeout)
225
+ logger.info(f"AgentWorker '{agent_id}': Worker thread has terminated.")
226
+ except asyncio.TimeoutError:
227
+ logger.warning(f"AgentWorker '{agent_id}': Timeout waiting for worker thread to terminate.")
228
+
197
229
  self._is_active = False
198
230
 
231
+
199
232
  def is_alive(self) -> bool:
200
233
  return self._thread_future is not None and not self._thread_future.done()
@@ -0,0 +1,17 @@
1
+ # file: autobyteus/autobyteus/agent/shutdown_steps/__init__.py
2
+ """
3
+ Defines individual, self-contained steps for the agent shutdown process.
4
+ These steps are orchestrated by the AgentShutdownOrchestrator.
5
+ """
6
+
7
+ from .base_shutdown_step import BaseShutdownStep
8
+ from .llm_instance_cleanup_step import LLMInstanceCleanupStep
9
+ from .mcp_server_cleanup_step import McpServerCleanupStep
10
+ from .agent_shutdown_orchestrator import AgentShutdownOrchestrator
11
+
12
+ __all__ = [
13
+ "BaseShutdownStep",
14
+ "LLMInstanceCleanupStep",
15
+ "McpServerCleanupStep",
16
+ "AgentShutdownOrchestrator",
17
+ ]
@@ -0,0 +1,63 @@
1
+ # file: autobyteus/autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py
2
+ import logging
3
+ from typing import TYPE_CHECKING, List, Optional
4
+
5
+ from .base_shutdown_step import BaseShutdownStep
6
+ from .llm_instance_cleanup_step import LLMInstanceCleanupStep
7
+ from .mcp_server_cleanup_step import McpServerCleanupStep
8
+
9
+ if TYPE_CHECKING:
10
+ from autobyteus.agent.context import AgentContext
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ class AgentShutdownOrchestrator:
15
+ """
16
+ Orchestrates the agent's shutdown process by executing a sequence of
17
+ self-contained cleanup steps.
18
+ """
19
+ def __init__(self, steps: Optional[List[BaseShutdownStep]] = None):
20
+ """
21
+ Initializes the AgentShutdownOrchestrator.
22
+
23
+ Args:
24
+ steps: An optional list of shutdown steps to execute. If not provided,
25
+ a default sequence will be used.
26
+ """
27
+ if steps is None:
28
+ self.shutdown_steps: List[BaseShutdownStep] = [
29
+ LLMInstanceCleanupStep(),
30
+ McpServerCleanupStep(),
31
+ ]
32
+ logger.debug("AgentShutdownOrchestrator initialized with default steps.")
33
+ else:
34
+ self.shutdown_steps = steps
35
+ logger.debug(f"AgentShutdownOrchestrator initialized with {len(steps)} custom steps.")
36
+
37
+ async def run(self, context: 'AgentContext') -> bool:
38
+ """
39
+ Executes the configured sequence of shutdown steps.
40
+
41
+ Args:
42
+ context: The agent's context.
43
+
44
+ Returns:
45
+ True if all steps completed successfully, False otherwise.
46
+ """
47
+ agent_id = context.agent_id
48
+ logger.info(f"Agent '{agent_id}': AgentShutdownOrchestrator starting execution.")
49
+
50
+ for step_index, step_instance in enumerate(self.shutdown_steps):
51
+ step_name = step_instance.__class__.__name__
52
+ logger.debug(f"Agent '{agent_id}': Executing shutdown step {step_index + 1}/{len(self.shutdown_steps)}: {step_name}")
53
+
54
+ success = await step_instance.execute(context)
55
+
56
+ if not success:
57
+ error_message = f"Shutdown step {step_name} failed."
58
+ logger.error(f"Agent '{agent_id}': {error_message} Halting shutdown orchestration.")
59
+ # The step itself is responsible for detailed error logging.
60
+ return False
61
+
62
+ logger.info(f"Agent '{agent_id}': All shutdown steps completed successfully.")
63
+ return True
@@ -0,0 +1,33 @@
1
+ # file: autobyteus/autobyteus/agent/shutdown_steps/base_shutdown_step.py
2
+ import logging
3
+ from abc import ABC, abstractmethod
4
+ from typing import TYPE_CHECKING
5
+
6
+ if TYPE_CHECKING:
7
+ from autobyteus.agent.context import AgentContext
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ class BaseShutdownStep(ABC):
12
+ """
13
+ Abstract base class for individual steps in the agent shutdown process.
14
+ Each step is responsible for a specific part of the cleanup and
15
+ for reporting its success or failure.
16
+ """
17
+
18
+ @abstractmethod
19
+ async def execute(self, context: 'AgentContext') -> bool:
20
+ """
21
+ Executes the shutdown step.
22
+
23
+ Args:
24
+ context: The agent's context, providing access to state and resources.
25
+
26
+ Returns:
27
+ True if the step completed successfully, False otherwise.
28
+ If False, the step is expected to have handled its own detailed logging.
29
+ """
30
+ raise NotImplementedError("Subclasses must implement the 'execute' method.")
31
+
32
+ def __repr__(self) -> str:
33
+ return f"<{self.__class__.__name__}>"
@@ -0,0 +1,45 @@
1
+ # file: autobyteus/autobyteus/agent/shutdown_steps/llm_instance_cleanup_step.py
2
+ import asyncio
3
+ import logging
4
+ from typing import TYPE_CHECKING
5
+
6
+ from .base_shutdown_step import BaseShutdownStep
7
+
8
+ if TYPE_CHECKING:
9
+ from autobyteus.agent.context import AgentContext
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class LLMInstanceCleanupStep(BaseShutdownStep):
14
+ """
15
+ Shutdown step for cleaning up the agent's LLM instance.
16
+ """
17
+ def __init__(self):
18
+ logger.debug("LLMInstanceCleanupStep initialized.")
19
+
20
+ async def execute(self, context: 'AgentContext') -> bool:
21
+ agent_id = context.agent_id
22
+ logger.info(f"Agent '{agent_id}': Executing LLMInstanceCleanupStep.")
23
+
24
+ llm_instance = context.llm_instance
25
+ if not llm_instance:
26
+ logger.debug(f"Agent '{agent_id}': No LLM instance found in context. Skipping cleanup.")
27
+ return True
28
+
29
+ if hasattr(llm_instance, 'cleanup') and callable(getattr(llm_instance, 'cleanup')):
30
+ try:
31
+ logger.info(f"Agent '{agent_id}': Running LLM instance cleanup for '{llm_instance.__class__.__name__}'.")
32
+ cleanup_func = llm_instance.cleanup
33
+ if asyncio.iscoroutinefunction(cleanup_func):
34
+ await cleanup_func()
35
+ else:
36
+ cleanup_func()
37
+ logger.info(f"Agent '{agent_id}': LLM instance cleanup completed successfully.")
38
+ return True
39
+ except Exception as e:
40
+ error_message = f"Agent '{agent_id}': Error during LLM instance cleanup: {e}"
41
+ logger.error(error_message, exc_info=True)
42
+ return False
43
+ else:
44
+ logger.debug(f"Agent '{agent_id}': LLM instance of type '{llm_instance.__class__.__name__}' does not have a 'cleanup' method. Skipping.")
45
+ return True
@@ -0,0 +1,32 @@
1
+ # file: autobyteus/autobyteus/agent/shutdown_steps/mcp_server_cleanup_step.py
2
+ import logging
3
+ from typing import TYPE_CHECKING
4
+
5
+ from .base_shutdown_step import BaseShutdownStep
6
+ from autobyteus.tools.mcp.server_instance_manager import McpServerInstanceManager
7
+
8
+ if TYPE_CHECKING:
9
+ from autobyteus.agent.context import AgentContext
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class McpServerCleanupStep(BaseShutdownStep):
14
+ """
15
+ Shutdown step for cleaning up all MCP server instances associated with an agent.
16
+ """
17
+ def __init__(self):
18
+ self._instance_manager = McpServerInstanceManager()
19
+ logger.debug("McpServerCleanupStep initialized.")
20
+
21
+ async def execute(self, context: 'AgentContext') -> bool:
22
+ agent_id = context.agent_id
23
+ logger.info(f"Agent '{agent_id}': Executing McpServerCleanupStep.")
24
+
25
+ try:
26
+ await self._instance_manager.cleanup_mcp_server_instances_for_agent(agent_id)
27
+ logger.info(f"Agent '{agent_id}': MCP server instance cleanup completed successfully.")
28
+ return True
29
+ except Exception as e:
30
+ error_message = f"Agent '{agent_id}': Critical failure during McpServerCleanupStep: {e}"
31
+ logger.error(error_message, exc_info=True)
32
+ return False