monoco-toolkit 0.3.11__py3-none-any.whl → 0.3.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/core/automation/__init__.py +51 -0
- monoco/core/automation/config.py +338 -0
- monoco/core/automation/field_watcher.py +296 -0
- monoco/core/automation/handlers.py +723 -0
- monoco/core/config.py +1 -1
- monoco/core/executor/__init__.py +38 -0
- monoco/core/executor/agent_action.py +254 -0
- monoco/core/executor/git_action.py +303 -0
- monoco/core/executor/im_action.py +309 -0
- monoco/core/executor/pytest_action.py +218 -0
- monoco/core/git.py +15 -0
- monoco/core/hooks/context.py +74 -13
- monoco/core/router/__init__.py +55 -0
- monoco/core/router/action.py +341 -0
- monoco/core/router/router.py +392 -0
- monoco/core/scheduler/__init__.py +63 -0
- monoco/core/scheduler/base.py +152 -0
- monoco/core/scheduler/engines.py +175 -0
- monoco/core/scheduler/events.py +171 -0
- monoco/core/scheduler/local.py +377 -0
- monoco/core/watcher/__init__.py +57 -0
- monoco/core/watcher/base.py +365 -0
- monoco/core/watcher/dropzone.py +152 -0
- monoco/core/watcher/issue.py +303 -0
- monoco/core/watcher/memo.py +200 -0
- monoco/core/watcher/task.py +238 -0
- monoco/daemon/events.py +34 -0
- monoco/daemon/scheduler.py +172 -201
- monoco/daemon/services.py +27 -243
- monoco/features/agent/__init__.py +25 -7
- monoco/features/agent/cli.py +91 -57
- monoco/features/agent/engines.py +31 -170
- monoco/features/agent/worker.py +1 -1
- monoco/features/issue/commands.py +90 -32
- monoco/features/issue/core.py +249 -4
- monoco/features/spike/commands.py +5 -3
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/METADATA +1 -1
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/RECORD +41 -20
- monoco/features/agent/apoptosis.py +0 -44
- monoco/features/agent/manager.py +0 -127
- monoco/features/agent/session.py +0 -169
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TaskWatcher - Monitors task files for changes.
|
|
3
|
+
|
|
4
|
+
Part of Layer 1 (File Watcher) in the event automation framework.
|
|
5
|
+
Emits events for task status changes and completion.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import logging
|
|
12
|
+
import re
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Any, Dict, List, Optional, Set
|
|
16
|
+
|
|
17
|
+
from monoco.core.scheduler import AgentEventType, EventBus, event_bus
|
|
18
|
+
|
|
19
|
+
from .base import (
|
|
20
|
+
ChangeType,
|
|
21
|
+
FieldChange,
|
|
22
|
+
FileEvent,
|
|
23
|
+
FilesystemWatcher,
|
|
24
|
+
WatchConfig,
|
|
25
|
+
PollingWatcher,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class TaskFileEvent(FileEvent):
|
|
32
|
+
"""FileEvent specific to Task files."""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
path: Path,
|
|
37
|
+
change_type: ChangeType,
|
|
38
|
+
task_changes: Optional[List[Dict[str, Any]]] = None,
|
|
39
|
+
**kwargs,
|
|
40
|
+
):
|
|
41
|
+
super().__init__(
|
|
42
|
+
path=path,
|
|
43
|
+
change_type=change_type,
|
|
44
|
+
watcher_name="TaskWatcher",
|
|
45
|
+
**kwargs,
|
|
46
|
+
)
|
|
47
|
+
self.task_changes = task_changes or []
|
|
48
|
+
|
|
49
|
+
def to_agent_event_type(self) -> Optional[AgentEventType]:
|
|
50
|
+
"""Tasks map to issue updates for now."""
|
|
51
|
+
return AgentEventType.ISSUE_UPDATED
|
|
52
|
+
|
|
53
|
+
def to_payload(self) -> Dict[str, Any]:
|
|
54
|
+
"""Convert to payload with Task-specific fields."""
|
|
55
|
+
payload = super().to_payload()
|
|
56
|
+
payload["task_changes"] = self.task_changes
|
|
57
|
+
return payload
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass
|
|
61
|
+
class TaskItem:
|
|
62
|
+
"""Represents a single task item."""
|
|
63
|
+
content: str
|
|
64
|
+
state: str # " ", "x", "X", "-", "/"
|
|
65
|
+
line_number: int
|
|
66
|
+
level: int = 0
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def is_completed(self) -> bool:
|
|
70
|
+
return self.state.lower() == "x"
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def is_in_progress(self) -> bool:
|
|
74
|
+
return self.state in ("-", "/")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class TaskWatcher(PollingWatcher):
|
|
78
|
+
"""
|
|
79
|
+
Watcher for task files.
|
|
80
|
+
|
|
81
|
+
Monitors task files (e.g., tasks.md, TODO.md) for:
|
|
82
|
+
- Task creation
|
|
83
|
+
- Task status changes (todo -> doing -> done)
|
|
84
|
+
- Task completion
|
|
85
|
+
|
|
86
|
+
Example:
|
|
87
|
+
>>> config = WatchConfig(
|
|
88
|
+
... path=Path("./tasks.md"),
|
|
89
|
+
... patterns=["*.md"],
|
|
90
|
+
... )
|
|
91
|
+
>>> watcher = TaskWatcher(config)
|
|
92
|
+
>>> await watcher.start()
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
# Regex to match task items
|
|
96
|
+
TASK_PATTERN = re.compile(
|
|
97
|
+
r"^(\s*)-\s*\[([ xX\-/])\]\s*(.+)$",
|
|
98
|
+
re.MULTILINE,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
def __init__(
|
|
102
|
+
self,
|
|
103
|
+
config: WatchConfig,
|
|
104
|
+
event_bus: Optional[EventBus] = None,
|
|
105
|
+
name: str = "TaskWatcher",
|
|
106
|
+
):
|
|
107
|
+
super().__init__(config, event_bus, name)
|
|
108
|
+
self._task_cache: Dict[str, TaskItem] = {} # task_id -> TaskItem
|
|
109
|
+
|
|
110
|
+
async def _check_changes(self) -> None:
|
|
111
|
+
"""Check for task file changes."""
|
|
112
|
+
if not self.config.path.exists():
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
content = self._read_file_content(self.config.path) or ""
|
|
117
|
+
current_tasks = self._parse_tasks(content)
|
|
118
|
+
|
|
119
|
+
# Detect changes
|
|
120
|
+
task_changes = self._detect_task_changes(current_tasks)
|
|
121
|
+
|
|
122
|
+
if task_changes:
|
|
123
|
+
await self._emit_task_changes(task_changes)
|
|
124
|
+
|
|
125
|
+
# Update cache
|
|
126
|
+
self._task_cache = current_tasks
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Error checking task file: {e}")
|
|
130
|
+
|
|
131
|
+
def _parse_tasks(self, content: str) -> Dict[str, TaskItem]:
|
|
132
|
+
"""Parse task items from content."""
|
|
133
|
+
tasks = {}
|
|
134
|
+
lines = content.split("\n")
|
|
135
|
+
|
|
136
|
+
for line_num, line in enumerate(lines, 1):
|
|
137
|
+
match = self.TASK_PATTERN.match(line)
|
|
138
|
+
if match:
|
|
139
|
+
indent = match.group(1)
|
|
140
|
+
state = match.group(2)
|
|
141
|
+
task_content = match.group(3).strip()
|
|
142
|
+
|
|
143
|
+
# Generate task ID from content hash
|
|
144
|
+
import hashlib
|
|
145
|
+
task_id = hashlib.md5(
|
|
146
|
+
f"{line_num}:{task_content}".encode()
|
|
147
|
+
).hexdigest()[:12]
|
|
148
|
+
|
|
149
|
+
tasks[task_id] = TaskItem(
|
|
150
|
+
content=task_content,
|
|
151
|
+
state=state,
|
|
152
|
+
line_number=line_num,
|
|
153
|
+
level=len(indent) // 2,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
return tasks
|
|
157
|
+
|
|
158
|
+
def _detect_task_changes(
|
|
159
|
+
self,
|
|
160
|
+
current_tasks: Dict[str, TaskItem],
|
|
161
|
+
) -> List[Dict[str, Any]]:
|
|
162
|
+
"""Detect changes between cached and current tasks."""
|
|
163
|
+
changes = []
|
|
164
|
+
current_ids = set(current_tasks.keys())
|
|
165
|
+
cached_ids = set(self._task_cache.keys())
|
|
166
|
+
|
|
167
|
+
# New tasks
|
|
168
|
+
for task_id in current_ids - cached_ids:
|
|
169
|
+
task = current_tasks[task_id]
|
|
170
|
+
changes.append({
|
|
171
|
+
"type": "created",
|
|
172
|
+
"task_id": task_id,
|
|
173
|
+
"content": task.content,
|
|
174
|
+
"state": task.state,
|
|
175
|
+
})
|
|
176
|
+
|
|
177
|
+
# Deleted tasks
|
|
178
|
+
for task_id in cached_ids - current_ids:
|
|
179
|
+
task = self._task_cache[task_id]
|
|
180
|
+
changes.append({
|
|
181
|
+
"type": "deleted",
|
|
182
|
+
"task_id": task_id,
|
|
183
|
+
"content": task.content,
|
|
184
|
+
})
|
|
185
|
+
|
|
186
|
+
# Modified tasks
|
|
187
|
+
for task_id in current_ids & cached_ids:
|
|
188
|
+
current = current_tasks[task_id]
|
|
189
|
+
cached = self._task_cache[task_id]
|
|
190
|
+
|
|
191
|
+
if current.state != cached.state:
|
|
192
|
+
changes.append({
|
|
193
|
+
"type": "state_changed",
|
|
194
|
+
"task_id": task_id,
|
|
195
|
+
"content": current.content,
|
|
196
|
+
"old_state": cached.state,
|
|
197
|
+
"new_state": current.state,
|
|
198
|
+
"is_completed": current.is_completed,
|
|
199
|
+
})
|
|
200
|
+
|
|
201
|
+
return changes
|
|
202
|
+
|
|
203
|
+
async def _emit_task_changes(self, changes: List[Dict[str, Any]]) -> None:
|
|
204
|
+
"""Emit events for task changes."""
|
|
205
|
+
event = TaskFileEvent(
|
|
206
|
+
path=self.config.path,
|
|
207
|
+
change_type=ChangeType.MODIFIED,
|
|
208
|
+
task_changes=changes,
|
|
209
|
+
metadata={
|
|
210
|
+
"total_changes": len(changes),
|
|
211
|
+
"completed_tasks": sum(1 for c in changes if c.get("is_completed")),
|
|
212
|
+
},
|
|
213
|
+
)
|
|
214
|
+
await self.emit(event)
|
|
215
|
+
|
|
216
|
+
# Log summary
|
|
217
|
+
created = sum(1 for c in changes if c["type"] == "created")
|
|
218
|
+
completed = sum(1 for c in changes if c["type"] == "state_changed" and c.get("is_completed"))
|
|
219
|
+
logger.debug(f"Task changes: {created} created, {completed} completed")
|
|
220
|
+
|
|
221
|
+
def get_task_stats(self) -> Dict[str, int]:
|
|
222
|
+
"""Get task statistics."""
|
|
223
|
+
total = len(self._task_cache)
|
|
224
|
+
completed = sum(1 for t in self._task_cache.values() if t.is_completed)
|
|
225
|
+
in_progress = sum(1 for t in self._task_cache.values() if t.is_in_progress)
|
|
226
|
+
|
|
227
|
+
return {
|
|
228
|
+
"total": total,
|
|
229
|
+
"completed": completed,
|
|
230
|
+
"in_progress": in_progress,
|
|
231
|
+
"pending": total - completed - in_progress,
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
235
|
+
"""Get watcher statistics."""
|
|
236
|
+
stats = super().get_stats()
|
|
237
|
+
stats.update(self.get_task_stats())
|
|
238
|
+
return stats
|
monoco/daemon/events.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
EventBus - Central event system for Agent scheduling.
|
|
3
|
+
|
|
4
|
+
DEPRECATED: This module has been moved to monoco.core.scheduler.
|
|
5
|
+
This file is kept for backward compatibility and re-exports from the new location.
|
|
6
|
+
|
|
7
|
+
Migration:
|
|
8
|
+
Old: from monoco.daemon.events import AgentEventType, event_bus
|
|
9
|
+
New: from monoco.core.scheduler import AgentEventType, event_bus
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import warnings
|
|
13
|
+
from monoco.core.scheduler import (
|
|
14
|
+
AgentEventType,
|
|
15
|
+
AgentEvent,
|
|
16
|
+
EventBus,
|
|
17
|
+
event_bus,
|
|
18
|
+
EventHandler,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
warnings.warn(
|
|
22
|
+
"monoco.daemon.events is deprecated. "
|
|
23
|
+
"Use monoco.core.scheduler instead.",
|
|
24
|
+
DeprecationWarning,
|
|
25
|
+
stacklevel=2
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
__all__ = [
|
|
29
|
+
"AgentEventType",
|
|
30
|
+
"AgentEvent",
|
|
31
|
+
"EventBus",
|
|
32
|
+
"event_bus",
|
|
33
|
+
"EventHandler",
|
|
34
|
+
]
|
monoco/daemon/scheduler.py
CHANGED
|
@@ -1,236 +1,207 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Scheduler Service - Unified event-driven architecture (FEAT-0164).
|
|
3
|
+
|
|
4
|
+
This module implements a unified event-driven scheduler service that:
|
|
5
|
+
1. Uses AgentScheduler for agent lifecycle management (FEAT-0160)
|
|
6
|
+
2. Integrates Watcher framework for file system events (FEAT-0161)
|
|
7
|
+
3. Uses ActionRouter for event routing (FEAT-0161)
|
|
8
|
+
4. Uses new Handler framework from core.automation (FEAT-0162)
|
|
9
|
+
|
|
10
|
+
Replaces the old architecture based on SessionManager + SemaphoreManager + polling loops.
|
|
11
|
+
"""
|
|
12
|
+
|
|
1
13
|
import asyncio
|
|
2
14
|
import logging
|
|
3
15
|
import os
|
|
4
|
-
from typing import Dict, Optional, List, Any
|
|
16
|
+
from typing import Dict, Optional, List, Any
|
|
5
17
|
from pathlib import Path
|
|
6
18
|
|
|
7
|
-
from monoco.daemon.services import ProjectManager
|
|
8
|
-
from monoco.
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
19
|
+
from monoco.daemon.services import ProjectManager
|
|
20
|
+
from monoco.core.scheduler import (
|
|
21
|
+
AgentEventType,
|
|
22
|
+
event_bus,
|
|
23
|
+
AgentScheduler,
|
|
24
|
+
LocalProcessScheduler,
|
|
25
|
+
)
|
|
26
|
+
from monoco.core.router import ActionRouter
|
|
27
|
+
from monoco.core.watcher import WatchConfig, IssueWatcher, MemoWatcher, TaskWatcher
|
|
28
|
+
from monoco.core.automation.handlers import start_all_handlers, stop_all_handlers
|
|
14
29
|
from monoco.core.config import get_config
|
|
15
30
|
|
|
16
31
|
logger = logging.getLogger("monoco.daemon.scheduler")
|
|
17
32
|
|
|
33
|
+
|
|
18
34
|
class SchedulerService:
|
|
35
|
+
"""
|
|
36
|
+
Unified event-driven scheduler service.
|
|
37
|
+
|
|
38
|
+
Responsibilities:
|
|
39
|
+
- Initialize and manage AgentScheduler
|
|
40
|
+
- Setup and manage Watchers for file system events
|
|
41
|
+
- Configure ActionRouter for event routing
|
|
42
|
+
- Start/stop all handlers
|
|
43
|
+
|
|
44
|
+
Architecture:
|
|
45
|
+
```
|
|
46
|
+
SchedulerService
|
|
47
|
+
├── AgentScheduler (LocalProcessScheduler)
|
|
48
|
+
│ └── Manages agent process lifecycle
|
|
49
|
+
├── Watchers
|
|
50
|
+
│ ├── IssueWatcher -> EventBus
|
|
51
|
+
│ ├── MemoWatcher -> EventBus
|
|
52
|
+
│ └── TaskWatcher -> EventBus
|
|
53
|
+
├── ActionRouter
|
|
54
|
+
│ └── Routes events to Actions
|
|
55
|
+
└── Handlers (from core.automation)
|
|
56
|
+
├── TaskFileHandler
|
|
57
|
+
├── IssueStageHandler
|
|
58
|
+
├── MemoThresholdHandler
|
|
59
|
+
└── PRCreatedHandler
|
|
60
|
+
```
|
|
61
|
+
"""
|
|
62
|
+
|
|
19
63
|
def __init__(self, project_manager: ProjectManager):
|
|
20
64
|
self.project_manager = project_manager
|
|
21
|
-
self.session_managers: Dict[str, SessionManager] = {}
|
|
22
|
-
self._monitoring_task: Optional[asyncio.Task] = None
|
|
23
|
-
self.apoptosis_managers: Dict[str, ApoptosisManager] = {}
|
|
24
65
|
|
|
25
|
-
#
|
|
26
|
-
|
|
27
|
-
self.
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
66
|
+
# AgentScheduler (FEAT-0160)
|
|
67
|
+
scheduler_config = self._load_scheduler_config()
|
|
68
|
+
self.agent_scheduler: AgentScheduler = LocalProcessScheduler(
|
|
69
|
+
max_concurrent=scheduler_config.get("max_concurrent", 5),
|
|
70
|
+
project_root=Path.cwd(),
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# ActionRouter (FEAT-0161)
|
|
74
|
+
self.action_router = ActionRouter(event_bus)
|
|
75
|
+
|
|
76
|
+
# Watchers (FEAT-0161)
|
|
77
|
+
self.watchers: List[Any] = []
|
|
78
|
+
|
|
79
|
+
# Handlers (FEAT-0162)
|
|
80
|
+
self.handlers: List[Any] = []
|
|
81
|
+
|
|
82
|
+
# Background tasks
|
|
83
|
+
self._tasks: List[asyncio.Task] = []
|
|
84
|
+
self._running = False
|
|
85
|
+
|
|
86
|
+
def _load_scheduler_config(self) -> Dict[str, Any]:
|
|
87
|
+
"""Load scheduler configuration from config files and env vars."""
|
|
88
|
+
config = {"max_concurrent": 5}
|
|
89
|
+
|
|
31
90
|
try:
|
|
32
91
|
settings = get_config()
|
|
33
|
-
|
|
92
|
+
|
|
93
|
+
# Check for concurrency config
|
|
94
|
+
if hasattr(settings, "agent") and hasattr(settings.agent, "concurrency"):
|
|
95
|
+
concurrency_config = settings.agent.concurrency
|
|
96
|
+
if hasattr(concurrency_config, "global_max"):
|
|
97
|
+
config["max_concurrent"] = concurrency_config.global_max
|
|
34
98
|
|
|
35
99
|
# Check for environment variable override
|
|
36
100
|
env_max_agents = os.environ.get("MONOCO_MAX_AGENTS")
|
|
37
101
|
if env_max_agents:
|
|
38
102
|
try:
|
|
39
|
-
|
|
40
|
-
logger.info(f"Overriding
|
|
103
|
+
config["max_concurrent"] = int(env_max_agents)
|
|
104
|
+
logger.info(f"Overriding max_concurrent from environment: {env_max_agents}")
|
|
41
105
|
except ValueError:
|
|
42
106
|
logger.warning(f"Invalid MONOCO_MAX_AGENTS value: {env_max_agents}")
|
|
43
107
|
|
|
44
|
-
return
|
|
108
|
+
return config
|
|
45
109
|
except Exception as e:
|
|
46
|
-
logger.warning(f"Failed to load
|
|
47
|
-
return
|
|
48
|
-
|
|
49
|
-
def get_managers(self, project_path: Path) -> Tuple[SessionManager, ApoptosisManager]:
|
|
50
|
-
key = str(project_path)
|
|
51
|
-
if key not in self.session_managers:
|
|
52
|
-
sm = SessionManager(project_root=project_path)
|
|
53
|
-
self.session_managers[key] = sm
|
|
54
|
-
self.apoptosis_managers[key] = ApoptosisManager(sm)
|
|
55
|
-
return self.session_managers[key], self.apoptosis_managers[key]
|
|
56
|
-
|
|
110
|
+
logger.warning(f"Failed to load scheduler config: {e}. Using defaults.")
|
|
111
|
+
return config
|
|
112
|
+
|
|
57
113
|
async def start(self):
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
114
|
+
"""Start the scheduler service."""
|
|
115
|
+
logger.info("Starting Scheduler Service (unified event-driven architecture)...")
|
|
116
|
+
self._running = True
|
|
117
|
+
|
|
118
|
+
# 1. Start EventBus
|
|
119
|
+
await event_bus.start()
|
|
120
|
+
|
|
121
|
+
# 2. Start AgentScheduler
|
|
122
|
+
await self.agent_scheduler.start()
|
|
123
|
+
|
|
124
|
+
# 3. Setup and start Watchers
|
|
125
|
+
self._setup_watchers()
|
|
126
|
+
for watcher in self.watchers:
|
|
127
|
+
await watcher.start()
|
|
128
|
+
|
|
129
|
+
# 4. Start Handlers (FEAT-0162)
|
|
130
|
+
self.handlers = start_all_handlers(self.agent_scheduler)
|
|
131
|
+
|
|
132
|
+
# 5. Start ActionRouter
|
|
133
|
+
await self.action_router.start()
|
|
134
|
+
|
|
135
|
+
logger.info("Scheduler Service started with unified event-driven architecture")
|
|
136
|
+
|
|
61
137
|
def stop(self):
|
|
138
|
+
"""Stop the scheduler service."""
|
|
62
139
|
logger.info("Stopping Scheduler Service...")
|
|
63
|
-
|
|
64
|
-
self._monitoring_task.cancel()
|
|
65
|
-
|
|
66
|
-
# Terminate all sessions
|
|
67
|
-
for sm in self.session_managers.values():
|
|
68
|
-
filtered_sessions = sm.list_sessions()
|
|
69
|
-
for session in filtered_sessions:
|
|
70
|
-
session.terminate()
|
|
71
|
-
|
|
72
|
-
async def monitor_loop(self):
|
|
73
|
-
try:
|
|
74
|
-
while True:
|
|
75
|
-
await self.tick()
|
|
76
|
-
await asyncio.sleep(5)
|
|
77
|
-
except asyncio.CancelledError:
|
|
78
|
-
pass
|
|
79
|
-
except Exception as e:
|
|
80
|
-
logger.error(f"Scheduler loop crashed: {e}", exc_info=True)
|
|
81
|
-
|
|
82
|
-
async def tick(self):
|
|
83
|
-
# We iterate over keys to avoid modification during iteration issues if new projects added
|
|
84
|
-
projects = list(self.project_manager.projects.values())
|
|
85
|
-
for project_ctx in projects:
|
|
86
|
-
await self.process_project(project_ctx)
|
|
87
|
-
|
|
88
|
-
async def process_project(self, project_context):
|
|
89
|
-
sm, am = self.get_managers(project_context.path)
|
|
140
|
+
self._running = False
|
|
90
141
|
|
|
91
|
-
#
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
# 1.5 Handover Trigger: Architect -> Engineer
|
|
95
|
-
await self.check_handover_trigger(sm, project_context)
|
|
96
|
-
|
|
97
|
-
# 2. Monitor Active Sessions (Supervisor)
|
|
98
|
-
active_sessions = sm.list_sessions()
|
|
99
|
-
for session in active_sessions:
|
|
100
|
-
if session.model.status in ["running", "pending"]:
|
|
101
|
-
status = session.refresh_status() # Updates model.status
|
|
102
|
-
|
|
103
|
-
# Check for timeout/failure
|
|
104
|
-
if status == "timeout" or status == "failed":
|
|
105
|
-
if session.model.status != "crashed":
|
|
106
|
-
logger.warning(f"Session {session.model.id} led to {status}. Triggering Autopsy.")
|
|
107
|
-
# Record failure for cooldown
|
|
108
|
-
self.semaphore_manager.record_failure(
|
|
109
|
-
issue_id=session.model.issue_id,
|
|
110
|
-
session_id=session.model.id
|
|
111
|
-
)
|
|
112
|
-
am.trigger_apoptosis(session.model.id, failure_reason=f"Session status became {status}")
|
|
113
|
-
else:
|
|
114
|
-
# Track active session in semaphore manager
|
|
115
|
-
self.semaphore_manager.acquire(session.model.id, session.model.role_name)
|
|
116
|
-
|
|
117
|
-
# Daemon Logic for Chained Execution
|
|
118
|
-
if status == "completed":
|
|
119
|
-
# Clear failure record on success
|
|
120
|
-
self.semaphore_manager.clear_failure(session.model.issue_id)
|
|
121
|
-
self.handle_completion(session, sm)
|
|
122
|
-
|
|
123
|
-
async def check_inbox_trigger(self, sm: SessionManager, project_context):
|
|
124
|
-
# Checking existing Architect sessions
|
|
125
|
-
existing_architects = [s for s in sm.list_sessions() if s.model.role_name == "Architect" and s.model.status == "running"]
|
|
126
|
-
|
|
127
|
-
if not existing_architects:
|
|
128
|
-
# Check semaphore before spawning
|
|
129
|
-
if not self.semaphore_manager.can_acquire("Architect"):
|
|
130
|
-
logger.warning("Cannot spawn Architect: concurrency limit reached")
|
|
131
|
-
return
|
|
132
|
-
|
|
133
|
-
trigger_policy = MemoAccumulationPolicy(count_threshold=5)
|
|
134
|
-
if trigger_policy.evaluate({"issues_root": project_context.issues_root}):
|
|
135
|
-
logger.info(f"Triggering Architect for project {project_context.id}")
|
|
136
|
-
self.spawn_architect(sm, project_context)
|
|
137
|
-
|
|
138
|
-
async def check_handover_trigger(self, sm: SessionManager, project_context):
|
|
139
|
-
# Scan for OPEN + DOING issues with no active worker
|
|
140
|
-
try:
|
|
141
|
-
all_issues = list_issues(project_context.issues_root)
|
|
142
|
-
handover_policy = HandoverPolicy(target_status="open", target_stage="doing")
|
|
143
|
-
|
|
144
|
-
for issue in all_issues:
|
|
145
|
-
if handover_policy.evaluate({"issue": issue}):
|
|
146
|
-
# Check if session exists
|
|
147
|
-
active = [s for s in sm.list_sessions(issue_id=issue.id) if s.model.status in ["running", "pending"]]
|
|
148
|
-
if not active:
|
|
149
|
-
# Check semaphore before spawning (including cooldown check)
|
|
150
|
-
if not self.semaphore_manager.can_acquire("Engineer", issue_id=issue.id):
|
|
151
|
-
logger.warning(f"Cannot spawn Engineer for {issue.id}: concurrency limit or cooldown active")
|
|
152
|
-
continue
|
|
153
|
-
|
|
154
|
-
logger.info(f"Handover trigger: Spawning Engineer for {issue.id}")
|
|
155
|
-
self.spawn_engineer(sm, issue)
|
|
156
|
-
except Exception as e:
|
|
157
|
-
logger.error(f"Error in Handover trigger: {e}")
|
|
158
|
-
|
|
159
|
-
def spawn_engineer(self, sm: SessionManager, issue):
|
|
160
|
-
role = RoleTemplate(
|
|
161
|
-
name="Engineer",
|
|
162
|
-
description="Software Engineer",
|
|
163
|
-
trigger="handover",
|
|
164
|
-
goal=f"Implement feature: {issue.title}",
|
|
165
|
-
system_prompt="You are a Software Engineer. Read the issue and implement requirements.",
|
|
166
|
-
engine="gemini"
|
|
167
|
-
)
|
|
168
|
-
session = sm.create_session(issue_id=issue.id, role=role)
|
|
142
|
+
# Cancel background tasks
|
|
143
|
+
for task in self._tasks:
|
|
144
|
+
task.cancel()
|
|
169
145
|
|
|
170
|
-
#
|
|
171
|
-
self.
|
|
146
|
+
# Stop ActionRouter
|
|
147
|
+
asyncio.create_task(self.action_router.stop())
|
|
172
148
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
# Release slot on spawn failure
|
|
177
|
-
self.semaphore_manager.release(session.model.id)
|
|
178
|
-
self.semaphore_manager.record_failure(issue.id, session.model.id)
|
|
179
|
-
logger.error(f"Failed to start Engineer session for {issue.id}: {e}")
|
|
180
|
-
raise
|
|
181
|
-
|
|
182
|
-
def spawn_architect(self, sm: SessionManager, project_context):
|
|
183
|
-
# Create Architect Session
|
|
184
|
-
role = RoleTemplate(
|
|
185
|
-
name="Architect",
|
|
186
|
-
description="System Architect",
|
|
187
|
-
trigger="memo.accumulation",
|
|
188
|
-
goal="Process memo inbox and create issues.",
|
|
189
|
-
system_prompt="You are the Architect. Process the Memo inbox.",
|
|
190
|
-
engine="gemini" # Default or from config?
|
|
191
|
-
)
|
|
192
|
-
session = sm.create_session(issue_id="architecture-review", role=role)
|
|
149
|
+
# Stop Handlers
|
|
150
|
+
stop_all_handlers(self.handlers)
|
|
151
|
+
self.handlers = []
|
|
193
152
|
|
|
194
|
-
#
|
|
195
|
-
self.
|
|
153
|
+
# Stop Watchers
|
|
154
|
+
for watcher in self.watchers:
|
|
155
|
+
asyncio.create_task(watcher.stop())
|
|
156
|
+
self.watchers = []
|
|
196
157
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
def
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
#
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
)
|
|
214
|
-
return
|
|
215
|
-
|
|
216
|
-
logger.info(f"Engineer finished for {session.model.issue_id}. Spawning Reviewer.")
|
|
217
|
-
reviewer_role = RoleTemplate(
|
|
218
|
-
name="Reviewer",
|
|
219
|
-
description="Code Reviewer",
|
|
220
|
-
trigger="engineer.completion",
|
|
221
|
-
goal=f"Review work on {session.model.issue_id}",
|
|
222
|
-
system_prompt="You are a Code Reviewer. Review the code changes.",
|
|
223
|
-
engine="gemini"
|
|
158
|
+
# Stop AgentScheduler
|
|
159
|
+
asyncio.create_task(self.agent_scheduler.stop())
|
|
160
|
+
|
|
161
|
+
# Stop EventBus
|
|
162
|
+
asyncio.create_task(event_bus.stop())
|
|
163
|
+
|
|
164
|
+
logger.info("Scheduler Service stopped")
|
|
165
|
+
|
|
166
|
+
def _setup_watchers(self):
|
|
167
|
+
"""Initialize all filesystem watchers."""
|
|
168
|
+
for project_ctx in self.project_manager.projects.values():
|
|
169
|
+
# IssueWatcher
|
|
170
|
+
config = WatchConfig(
|
|
171
|
+
path=project_ctx.issues_root,
|
|
172
|
+
patterns=["*.md"],
|
|
173
|
+
recursive=True,
|
|
224
174
|
)
|
|
225
|
-
|
|
175
|
+
self.watchers.append(IssueWatcher(config, event_bus))
|
|
226
176
|
|
|
227
|
-
#
|
|
228
|
-
|
|
177
|
+
# MemoWatcher
|
|
178
|
+
memo_path = project_ctx.path / "Memos" / "inbox.md"
|
|
179
|
+
if memo_path.exists():
|
|
180
|
+
memo_config = WatchConfig(
|
|
181
|
+
path=memo_path,
|
|
182
|
+
patterns=["*.md"],
|
|
183
|
+
)
|
|
184
|
+
self.watchers.append(MemoWatcher(memo_config, event_bus))
|
|
229
185
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
186
|
+
# TaskWatcher (if tasks.md exists)
|
|
187
|
+
task_path = project_ctx.path / "tasks.md"
|
|
188
|
+
if task_path.exists():
|
|
189
|
+
task_config = WatchConfig(
|
|
190
|
+
path=task_path,
|
|
191
|
+
patterns=["*.md"],
|
|
192
|
+
)
|
|
193
|
+
self.watchers.append(TaskWatcher(task_config, event_bus))
|
|
194
|
+
|
|
195
|
+
logger.info(f"Setup {len(self.watchers)} watchers")
|
|
196
|
+
|
|
197
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
198
|
+
"""Get scheduler service statistics."""
|
|
199
|
+
return {
|
|
200
|
+
"running": self._running,
|
|
201
|
+
"event_bus": event_bus.get_stats(),
|
|
202
|
+
"agent_scheduler": self.agent_scheduler.get_stats(),
|
|
203
|
+
"watchers": len(self.watchers),
|
|
204
|
+
"handlers": len(self.handlers),
|
|
205
|
+
"action_router": self.action_router.get_stats(),
|
|
206
|
+
"projects": len(self.project_manager.projects),
|
|
207
|
+
}
|