monoco-toolkit 0.3.11__py3-none-any.whl → 0.3.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/core/automation/__init__.py +51 -0
- monoco/core/automation/config.py +338 -0
- monoco/core/automation/field_watcher.py +296 -0
- monoco/core/automation/handlers.py +723 -0
- monoco/core/config.py +1 -1
- monoco/core/executor/__init__.py +38 -0
- monoco/core/executor/agent_action.py +254 -0
- monoco/core/executor/git_action.py +303 -0
- monoco/core/executor/im_action.py +309 -0
- monoco/core/executor/pytest_action.py +218 -0
- monoco/core/git.py +15 -0
- monoco/core/hooks/context.py +74 -13
- monoco/core/router/__init__.py +55 -0
- monoco/core/router/action.py +341 -0
- monoco/core/router/router.py +392 -0
- monoco/core/scheduler/__init__.py +63 -0
- monoco/core/scheduler/base.py +152 -0
- monoco/core/scheduler/engines.py +175 -0
- monoco/core/scheduler/events.py +171 -0
- monoco/core/scheduler/local.py +377 -0
- monoco/core/watcher/__init__.py +57 -0
- monoco/core/watcher/base.py +365 -0
- monoco/core/watcher/dropzone.py +152 -0
- monoco/core/watcher/issue.py +303 -0
- monoco/core/watcher/memo.py +200 -0
- monoco/core/watcher/task.py +238 -0
- monoco/daemon/events.py +34 -0
- monoco/daemon/scheduler.py +172 -201
- monoco/daemon/services.py +27 -243
- monoco/features/agent/__init__.py +25 -7
- monoco/features/agent/cli.py +91 -57
- monoco/features/agent/engines.py +31 -170
- monoco/features/agent/worker.py +1 -1
- monoco/features/issue/commands.py +90 -32
- monoco/features/issue/core.py +249 -4
- monoco/features/spike/commands.py +5 -3
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/METADATA +1 -1
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/RECORD +41 -20
- monoco/features/agent/apoptosis.py +0 -44
- monoco/features/agent/manager.py +0 -127
- monoco/features/agent/session.py +0 -169
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LocalProcessScheduler - Local process-based agent scheduler.
|
|
3
|
+
|
|
4
|
+
Implements the AgentScheduler ABC using local subprocess execution.
|
|
5
|
+
Integrates with SessionManager and Worker for process lifecycle management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import logging
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
import time
|
|
13
|
+
import uuid
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Dict, Optional, Any
|
|
16
|
+
|
|
17
|
+
from .base import AgentScheduler, AgentTask, AgentStatus
|
|
18
|
+
from .engines import EngineFactory
|
|
19
|
+
from .events import AgentEventType, event_bus
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger("monoco.core.scheduler.local")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LocalProcessScheduler(AgentScheduler):
|
|
25
|
+
"""
|
|
26
|
+
Local process-based scheduler for agent execution.
|
|
27
|
+
|
|
28
|
+
This scheduler manages agent tasks as local subprocesses, providing:
|
|
29
|
+
- Process lifecycle management (spawn, monitor, terminate)
|
|
30
|
+
- Concurrency quota control via semaphore
|
|
31
|
+
- Timeout handling
|
|
32
|
+
- Session tracking and status reporting
|
|
33
|
+
|
|
34
|
+
Attributes:
|
|
35
|
+
max_concurrent: Maximum number of concurrent agent processes
|
|
36
|
+
project_root: Root path of the Monoco project
|
|
37
|
+
|
|
38
|
+
Example:
|
|
39
|
+
>>> scheduler = LocalProcessScheduler(max_concurrent=5)
|
|
40
|
+
>>> session_id = await scheduler.schedule(task)
|
|
41
|
+
>>> status = scheduler.get_status(session_id)
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
max_concurrent: int = 5,
|
|
47
|
+
project_root: Optional[Path] = None,
|
|
48
|
+
):
|
|
49
|
+
self.max_concurrent = max_concurrent
|
|
50
|
+
self.project_root = project_root or Path.cwd()
|
|
51
|
+
|
|
52
|
+
# Session tracking: session_id -> process info
|
|
53
|
+
self._sessions: Dict[str, Dict[str, Any]] = {}
|
|
54
|
+
|
|
55
|
+
# Concurrency control
|
|
56
|
+
self._semaphore = asyncio.Semaphore(max_concurrent)
|
|
57
|
+
|
|
58
|
+
# Background monitoring task
|
|
59
|
+
self._monitor_task: Optional[asyncio.Task] = None
|
|
60
|
+
self._running = False
|
|
61
|
+
|
|
62
|
+
async def start(self):
|
|
63
|
+
"""Start the scheduler and monitoring loop."""
|
|
64
|
+
if self._running:
|
|
65
|
+
return
|
|
66
|
+
self._running = True
|
|
67
|
+
self._monitor_task = asyncio.create_task(self._monitor_loop())
|
|
68
|
+
logger.info(f"LocalProcessScheduler started (max_concurrent={self.max_concurrent})")
|
|
69
|
+
|
|
70
|
+
async def stop(self):
|
|
71
|
+
"""Stop the scheduler and terminate all sessions."""
|
|
72
|
+
if not self._running:
|
|
73
|
+
return
|
|
74
|
+
self._running = False
|
|
75
|
+
|
|
76
|
+
# Cancel monitor loop
|
|
77
|
+
if self._monitor_task:
|
|
78
|
+
self._monitor_task.cancel()
|
|
79
|
+
try:
|
|
80
|
+
await self._monitor_task
|
|
81
|
+
except asyncio.CancelledError:
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
# Terminate all active sessions
|
|
85
|
+
for session_id in list(self._sessions.keys()):
|
|
86
|
+
await self.terminate(session_id)
|
|
87
|
+
|
|
88
|
+
logger.info("LocalProcessScheduler stopped")
|
|
89
|
+
|
|
90
|
+
async def schedule(self, task: AgentTask) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Schedule a task for execution as a local subprocess.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
task: The task to schedule
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
session_id: Unique identifier for the scheduled session
|
|
99
|
+
|
|
100
|
+
Raises:
|
|
101
|
+
RuntimeError: If scheduling fails or engine is not supported
|
|
102
|
+
"""
|
|
103
|
+
session_id = str(uuid.uuid4())
|
|
104
|
+
|
|
105
|
+
# Acquire semaphore slot
|
|
106
|
+
acquired = await self._semaphore.acquire()
|
|
107
|
+
if not acquired:
|
|
108
|
+
# This shouldn't happen with asyncio.Semaphore, but just in case
|
|
109
|
+
raise RuntimeError("Failed to acquire concurrency slot")
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
# Get engine adapter
|
|
113
|
+
adapter = EngineFactory.create(task.engine)
|
|
114
|
+
command = adapter.build_command(task.prompt)
|
|
115
|
+
|
|
116
|
+
logger.info(f"[{session_id}] Starting {task.role_name} with {task.engine} engine")
|
|
117
|
+
|
|
118
|
+
# Start subprocess
|
|
119
|
+
process = subprocess.Popen(
|
|
120
|
+
command,
|
|
121
|
+
stdout=sys.stdout,
|
|
122
|
+
stderr=sys.stderr,
|
|
123
|
+
text=True,
|
|
124
|
+
cwd=self.project_root,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Track session
|
|
128
|
+
self._sessions[session_id] = {
|
|
129
|
+
"task": task,
|
|
130
|
+
"process": process,
|
|
131
|
+
"status": AgentStatus.RUNNING,
|
|
132
|
+
"started_at": time.time(),
|
|
133
|
+
"role_name": task.role_name,
|
|
134
|
+
"issue_id": task.issue_id,
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
# Publish session started event
|
|
138
|
+
await event_bus.publish(
|
|
139
|
+
AgentEventType.SESSION_STARTED,
|
|
140
|
+
{
|
|
141
|
+
"session_id": session_id,
|
|
142
|
+
"issue_id": task.issue_id,
|
|
143
|
+
"role_name": task.role_name,
|
|
144
|
+
"engine": task.engine,
|
|
145
|
+
},
|
|
146
|
+
source="LocalProcessScheduler"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
return session_id
|
|
150
|
+
|
|
151
|
+
except Exception as e:
|
|
152
|
+
# Release semaphore on failure
|
|
153
|
+
self._semaphore.release()
|
|
154
|
+
logger.error(f"[{session_id}] Failed to start task: {e}")
|
|
155
|
+
raise RuntimeError(f"Failed to schedule task: {e}")
|
|
156
|
+
|
|
157
|
+
async def terminate(self, session_id: str) -> bool:
|
|
158
|
+
"""
|
|
159
|
+
Terminate a running or pending session.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
session_id: The session ID to terminate
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
True if termination was successful, False otherwise
|
|
166
|
+
"""
|
|
167
|
+
session = self._sessions.get(session_id)
|
|
168
|
+
if not session:
|
|
169
|
+
logger.warning(f"[{session_id}] Session not found for termination")
|
|
170
|
+
return False
|
|
171
|
+
|
|
172
|
+
process = session.get("process")
|
|
173
|
+
if not process:
|
|
174
|
+
return False
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
# Try graceful termination
|
|
178
|
+
process.terminate()
|
|
179
|
+
|
|
180
|
+
# Wait a bit for graceful shutdown
|
|
181
|
+
try:
|
|
182
|
+
process.wait(timeout=2)
|
|
183
|
+
except subprocess.TimeoutExpired:
|
|
184
|
+
# Force kill if still running
|
|
185
|
+
process.kill()
|
|
186
|
+
process.wait()
|
|
187
|
+
|
|
188
|
+
session["status"] = AgentStatus.TERMINATED
|
|
189
|
+
|
|
190
|
+
# Publish session terminated event
|
|
191
|
+
await event_bus.publish(
|
|
192
|
+
AgentEventType.SESSION_TERMINATED,
|
|
193
|
+
{
|
|
194
|
+
"session_id": session_id,
|
|
195
|
+
"issue_id": session.get("issue_id"),
|
|
196
|
+
"role_name": session.get("role_name"),
|
|
197
|
+
},
|
|
198
|
+
source="LocalProcessScheduler"
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
# Release semaphore
|
|
202
|
+
self._semaphore.release()
|
|
203
|
+
|
|
204
|
+
logger.info(f"[{session_id}] Session terminated")
|
|
205
|
+
return True
|
|
206
|
+
|
|
207
|
+
except Exception as e:
|
|
208
|
+
logger.error(f"[{session_id}] Error terminating session: {e}")
|
|
209
|
+
return False
|
|
210
|
+
|
|
211
|
+
def get_status(self, session_id: str) -> Optional[AgentStatus]:
|
|
212
|
+
"""
|
|
213
|
+
Get the current status of a session.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
session_id: The session ID to query
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
The current AgentStatus, or None if session not found
|
|
220
|
+
"""
|
|
221
|
+
session = self._sessions.get(session_id)
|
|
222
|
+
if not session:
|
|
223
|
+
return None
|
|
224
|
+
return session.get("status")
|
|
225
|
+
|
|
226
|
+
def list_active(self) -> Dict[str, AgentStatus]:
|
|
227
|
+
"""
|
|
228
|
+
List all active (pending or running) sessions.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Dictionary mapping session_id to AgentStatus
|
|
232
|
+
"""
|
|
233
|
+
return {
|
|
234
|
+
session_id: session["status"]
|
|
235
|
+
for session_id, session in self._sessions.items()
|
|
236
|
+
if session["status"] in (AgentStatus.PENDING, AgentStatus.RUNNING)
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
240
|
+
"""
|
|
241
|
+
Get scheduler statistics.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
Dictionary containing scheduler metrics
|
|
245
|
+
"""
|
|
246
|
+
active_count = len(self.list_active())
|
|
247
|
+
total_count = len(self._sessions)
|
|
248
|
+
|
|
249
|
+
return {
|
|
250
|
+
"running": self._running,
|
|
251
|
+
"max_concurrent": self.max_concurrent,
|
|
252
|
+
"active_sessions": active_count,
|
|
253
|
+
"total_sessions": total_count,
|
|
254
|
+
"available_slots": self.max_concurrent - active_count,
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
async def _monitor_loop(self):
|
|
258
|
+
"""Background loop to monitor session statuses."""
|
|
259
|
+
logger.info("Starting session monitor loop")
|
|
260
|
+
|
|
261
|
+
while self._running:
|
|
262
|
+
try:
|
|
263
|
+
await self._check_sessions()
|
|
264
|
+
await asyncio.sleep(2) # Check every 2 seconds
|
|
265
|
+
except asyncio.CancelledError:
|
|
266
|
+
break
|
|
267
|
+
except Exception as e:
|
|
268
|
+
logger.error(f"Error in monitor loop: {e}")
|
|
269
|
+
await asyncio.sleep(2)
|
|
270
|
+
|
|
271
|
+
async def _check_sessions(self):
|
|
272
|
+
"""Check all sessions and update statuses."""
|
|
273
|
+
for session_id, session in list(self._sessions.items()):
|
|
274
|
+
process = session.get("process")
|
|
275
|
+
if not process:
|
|
276
|
+
continue
|
|
277
|
+
|
|
278
|
+
current_status = session["status"]
|
|
279
|
+
|
|
280
|
+
# Skip if already in terminal state
|
|
281
|
+
if current_status in (
|
|
282
|
+
AgentStatus.COMPLETED,
|
|
283
|
+
AgentStatus.FAILED,
|
|
284
|
+
AgentStatus.TERMINATED,
|
|
285
|
+
AgentStatus.TIMEOUT,
|
|
286
|
+
):
|
|
287
|
+
continue
|
|
288
|
+
|
|
289
|
+
# Check timeout
|
|
290
|
+
task = session.get("task")
|
|
291
|
+
started_at = session.get("started_at", 0)
|
|
292
|
+
if task and task.timeout and (time.time() - started_at) > task.timeout:
|
|
293
|
+
logger.warning(f"[{session_id}] Task timeout exceeded ({task.timeout}s)")
|
|
294
|
+
await self._handle_timeout(session_id, session)
|
|
295
|
+
continue
|
|
296
|
+
|
|
297
|
+
# Check process status
|
|
298
|
+
returncode = process.poll()
|
|
299
|
+
if returncode is None:
|
|
300
|
+
# Still running
|
|
301
|
+
continue
|
|
302
|
+
|
|
303
|
+
# Process finished
|
|
304
|
+
if returncode == 0:
|
|
305
|
+
await self._handle_completion(session_id, session)
|
|
306
|
+
else:
|
|
307
|
+
await self._handle_failure(session_id, session, returncode)
|
|
308
|
+
|
|
309
|
+
async def _handle_completion(self, session_id: str, session: Dict[str, Any]):
|
|
310
|
+
"""Handle successful session completion."""
|
|
311
|
+
session["status"] = AgentStatus.COMPLETED
|
|
312
|
+
|
|
313
|
+
# Publish completion event
|
|
314
|
+
await event_bus.publish(
|
|
315
|
+
AgentEventType.SESSION_COMPLETED,
|
|
316
|
+
{
|
|
317
|
+
"session_id": session_id,
|
|
318
|
+
"issue_id": session.get("issue_id"),
|
|
319
|
+
"role_name": session.get("role_name"),
|
|
320
|
+
},
|
|
321
|
+
source="LocalProcessScheduler"
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Release semaphore
|
|
325
|
+
self._semaphore.release()
|
|
326
|
+
|
|
327
|
+
logger.info(f"[{session_id}] Session completed successfully")
|
|
328
|
+
|
|
329
|
+
async def _handle_failure(self, session_id: str, session: Dict[str, Any], returncode: int):
|
|
330
|
+
"""Handle session failure."""
|
|
331
|
+
session["status"] = AgentStatus.FAILED
|
|
332
|
+
|
|
333
|
+
# Publish failure event
|
|
334
|
+
await event_bus.publish(
|
|
335
|
+
AgentEventType.SESSION_FAILED,
|
|
336
|
+
{
|
|
337
|
+
"session_id": session_id,
|
|
338
|
+
"issue_id": session.get("issue_id"),
|
|
339
|
+
"role_name": session.get("role_name"),
|
|
340
|
+
"reason": f"Process exited with code {returncode}",
|
|
341
|
+
},
|
|
342
|
+
source="LocalProcessScheduler"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# Release semaphore
|
|
346
|
+
self._semaphore.release()
|
|
347
|
+
|
|
348
|
+
logger.error(f"[{session_id}] Session failed with exit code {returncode}")
|
|
349
|
+
|
|
350
|
+
async def _handle_timeout(self, session_id: str, session: Dict[str, Any]):
|
|
351
|
+
"""Handle session timeout."""
|
|
352
|
+
process = session.get("process")
|
|
353
|
+
|
|
354
|
+
# Kill the process
|
|
355
|
+
if process:
|
|
356
|
+
try:
|
|
357
|
+
process.kill()
|
|
358
|
+
process.wait()
|
|
359
|
+
except Exception as e:
|
|
360
|
+
logger.error(f"[{session_id}] Error killing timed out process: {e}")
|
|
361
|
+
|
|
362
|
+
session["status"] = AgentStatus.TIMEOUT
|
|
363
|
+
|
|
364
|
+
# Publish failure event (timeout is a type of failure)
|
|
365
|
+
await event_bus.publish(
|
|
366
|
+
AgentEventType.SESSION_FAILED,
|
|
367
|
+
{
|
|
368
|
+
"session_id": session_id,
|
|
369
|
+
"issue_id": session.get("issue_id"),
|
|
370
|
+
"role_name": session.get("role_name"),
|
|
371
|
+
"reason": "Timeout exceeded",
|
|
372
|
+
},
|
|
373
|
+
source="LocalProcessScheduler"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
# Release semaphore
|
|
377
|
+
self._semaphore.release()
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Watcher Module - Layer 1 of the Event Automation Framework.
|
|
3
|
+
|
|
4
|
+
This module provides file system watching capabilities with event emission.
|
|
5
|
+
It is part of the three-layer architecture:
|
|
6
|
+
- Layer 1: File Watcher (this module)
|
|
7
|
+
- Layer 2: Action Router
|
|
8
|
+
- Layer 3: Action Executor
|
|
9
|
+
|
|
10
|
+
Example Usage:
|
|
11
|
+
>>> from monoco.core.watcher import IssueWatcher, WatchConfig
|
|
12
|
+
>>> from pathlib import Path
|
|
13
|
+
>>>
|
|
14
|
+
>>> config = WatchConfig(
|
|
15
|
+
... path=Path("./Issues"),
|
|
16
|
+
... patterns=["*.md"],
|
|
17
|
+
... recursive=True,
|
|
18
|
+
... )
|
|
19
|
+
>>> watcher = IssueWatcher(config)
|
|
20
|
+
>>> await watcher.start()
|
|
21
|
+
>>> # Events are automatically emitted to EventBus
|
|
22
|
+
>>> await watcher.stop()
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from .base import (
|
|
26
|
+
ChangeType,
|
|
27
|
+
FieldChange,
|
|
28
|
+
FileEvent,
|
|
29
|
+
FilesystemWatcher,
|
|
30
|
+
PollingWatcher,
|
|
31
|
+
WatchdogWatcher,
|
|
32
|
+
WatchConfig,
|
|
33
|
+
)
|
|
34
|
+
from .issue import IssueWatcher, IssueFileEvent
|
|
35
|
+
from .memo import MemoWatcher, MemoFileEvent
|
|
36
|
+
from .task import TaskWatcher, TaskFileEvent
|
|
37
|
+
from .dropzone import DropzoneWatcher, DropzoneFileEvent
|
|
38
|
+
|
|
39
|
+
__all__ = [
|
|
40
|
+
# Base classes
|
|
41
|
+
"ChangeType",
|
|
42
|
+
"FieldChange",
|
|
43
|
+
"FileEvent",
|
|
44
|
+
"FilesystemWatcher",
|
|
45
|
+
"PollingWatcher",
|
|
46
|
+
"WatchdogWatcher",
|
|
47
|
+
"WatchConfig",
|
|
48
|
+
# Concrete watchers
|
|
49
|
+
"IssueWatcher",
|
|
50
|
+
"IssueFileEvent",
|
|
51
|
+
"MemoWatcher",
|
|
52
|
+
"MemoFileEvent",
|
|
53
|
+
"TaskWatcher",
|
|
54
|
+
"TaskFileEvent",
|
|
55
|
+
"DropzoneWatcher",
|
|
56
|
+
"DropzoneFileEvent",
|
|
57
|
+
]
|