monoco-toolkit 0.3.11__py3-none-any.whl → 0.3.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/core/automation/__init__.py +51 -0
- monoco/core/automation/config.py +338 -0
- monoco/core/automation/field_watcher.py +296 -0
- monoco/core/automation/handlers.py +723 -0
- monoco/core/config.py +1 -1
- monoco/core/executor/__init__.py +38 -0
- monoco/core/executor/agent_action.py +254 -0
- monoco/core/executor/git_action.py +303 -0
- monoco/core/executor/im_action.py +309 -0
- monoco/core/executor/pytest_action.py +218 -0
- monoco/core/git.py +15 -0
- monoco/core/hooks/context.py +74 -13
- monoco/core/router/__init__.py +55 -0
- monoco/core/router/action.py +341 -0
- monoco/core/router/router.py +392 -0
- monoco/core/scheduler/__init__.py +63 -0
- monoco/core/scheduler/base.py +152 -0
- monoco/core/scheduler/engines.py +175 -0
- monoco/core/scheduler/events.py +171 -0
- monoco/core/scheduler/local.py +377 -0
- monoco/core/watcher/__init__.py +57 -0
- monoco/core/watcher/base.py +365 -0
- monoco/core/watcher/dropzone.py +152 -0
- monoco/core/watcher/issue.py +303 -0
- monoco/core/watcher/memo.py +200 -0
- monoco/core/watcher/task.py +238 -0
- monoco/daemon/events.py +34 -0
- monoco/daemon/scheduler.py +172 -201
- monoco/daemon/services.py +27 -243
- monoco/features/agent/__init__.py +25 -7
- monoco/features/agent/cli.py +91 -57
- monoco/features/agent/engines.py +31 -170
- monoco/features/agent/worker.py +1 -1
- monoco/features/issue/commands.py +90 -32
- monoco/features/issue/core.py +249 -4
- monoco/features/spike/commands.py +5 -3
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/METADATA +1 -1
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/RECORD +41 -20
- monoco/features/agent/apoptosis.py +0 -44
- monoco/features/agent/manager.py +0 -127
- monoco/features/agent/session.py +0 -169
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.3.11.dist-info → monoco_toolkit-0.3.12.dist-info}/licenses/LICENSE +0 -0
monoco/daemon/services.py
CHANGED
|
@@ -2,10 +2,8 @@ import logging
|
|
|
2
2
|
from typing import List, Optional, Dict, Any
|
|
3
3
|
from asyncio import Queue
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from datetime import datetime, timedelta
|
|
6
|
-
from threading import Lock
|
|
7
5
|
|
|
8
|
-
import
|
|
6
|
+
from monoco.core.workspace import MonocoProject, Workspace
|
|
9
7
|
|
|
10
8
|
logger = logging.getLogger("monoco.daemon.services")
|
|
11
9
|
|
|
@@ -33,6 +31,7 @@ class Broadcaster:
|
|
|
33
31
|
if not self.subscribers:
|
|
34
32
|
return
|
|
35
33
|
|
|
34
|
+
import json
|
|
36
35
|
message = {"event": event_type, "data": json.dumps(payload)}
|
|
37
36
|
|
|
38
37
|
# Dispatch to all queues
|
|
@@ -42,12 +41,6 @@ class Broadcaster:
|
|
|
42
41
|
logger.debug(f"Broadcasted {event_type} to {len(self.subscribers)} clients.")
|
|
43
42
|
|
|
44
43
|
|
|
45
|
-
# Monitors moved to monoco.core.git and monoco.features.issue.monitor
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
from monoco.core.workspace import MonocoProject, Workspace
|
|
49
|
-
|
|
50
|
-
|
|
51
44
|
class ProjectContext:
|
|
52
45
|
"""
|
|
53
46
|
Holds the runtime state for a single project.
|
|
@@ -60,7 +53,31 @@ class ProjectContext:
|
|
|
60
53
|
self.name = project.name
|
|
61
54
|
self.path = project.path
|
|
62
55
|
self.issues_root = project.issues_root
|
|
63
|
-
|
|
56
|
+
|
|
57
|
+
async def on_upsert(issue_data: dict):
|
|
58
|
+
await broadcaster.broadcast(
|
|
59
|
+
"issue_upserted", {"issue": issue_data, "project_id": self.id}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
async def on_delete(issue_data: dict):
|
|
63
|
+
await broadcaster.broadcast(
|
|
64
|
+
"issue_deleted", {"id": issue_data["id"], "project_id": self.id}
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
from monoco.features.issue.monitor import IssueMonitor
|
|
68
|
+
self.monitor = IssueMonitor(self.issues_root, on_upsert, on_delete)
|
|
69
|
+
|
|
70
|
+
async def notify_move(self, old_path: str, new_path: str, issue_data: dict):
|
|
71
|
+
"""Explicitly notify frontend about a logical move (Physical path changed)."""
|
|
72
|
+
await self.broadcaster.broadcast(
|
|
73
|
+
"issue_moved",
|
|
74
|
+
{
|
|
75
|
+
"old_path": old_path,
|
|
76
|
+
"new_path": new_path,
|
|
77
|
+
"issue": issue_data,
|
|
78
|
+
"project_id": self.id,
|
|
79
|
+
},
|
|
80
|
+
)
|
|
64
81
|
|
|
65
82
|
async def start(self):
|
|
66
83
|
await self.monitor.start()
|
|
@@ -69,189 +86,6 @@ class ProjectContext:
|
|
|
69
86
|
self.monitor.stop()
|
|
70
87
|
|
|
71
88
|
|
|
72
|
-
class SemaphoreManager:
|
|
73
|
-
"""
|
|
74
|
-
Manages concurrency limits for agent sessions using role-based semaphores.
|
|
75
|
-
Prevents fork bomb by limiting concurrent agents per role and globally.
|
|
76
|
-
"""
|
|
77
|
-
|
|
78
|
-
def __init__(self, config: Optional[Any] = None):
|
|
79
|
-
"""
|
|
80
|
-
Initialize the SemaphoreManager.
|
|
81
|
-
|
|
82
|
-
Args:
|
|
83
|
-
config: AgentConcurrencyConfig or dict with concurrency settings
|
|
84
|
-
"""
|
|
85
|
-
self._lock = Lock()
|
|
86
|
-
self._active_sessions: Dict[str, str] = {} # session_id -> role_name
|
|
87
|
-
self._role_counts: Dict[str, int] = {} # role_name -> count
|
|
88
|
-
self._failure_registry: Dict[str, datetime] = {} # issue_id -> last_failure_time
|
|
89
|
-
|
|
90
|
-
# Default conservative limits
|
|
91
|
-
self._global_max = 3
|
|
92
|
-
self._role_limits: Dict[str, int] = {
|
|
93
|
-
"Engineer": 1,
|
|
94
|
-
"Architect": 1,
|
|
95
|
-
"Reviewer": 1,
|
|
96
|
-
"Planner": 1,
|
|
97
|
-
}
|
|
98
|
-
self._failure_cooldown_seconds = 60
|
|
99
|
-
|
|
100
|
-
# Apply config if provided
|
|
101
|
-
if config:
|
|
102
|
-
self._apply_config(config)
|
|
103
|
-
|
|
104
|
-
def _apply_config(self, config: Any) -> None:
|
|
105
|
-
"""Apply configuration settings."""
|
|
106
|
-
# Handle both dict and Pydantic model
|
|
107
|
-
if hasattr(config, 'global_max'):
|
|
108
|
-
self._global_max = config.global_max
|
|
109
|
-
if hasattr(config, 'failure_cooldown_seconds'):
|
|
110
|
-
self._failure_cooldown_seconds = config.failure_cooldown_seconds
|
|
111
|
-
|
|
112
|
-
# Role-specific limits
|
|
113
|
-
for role in ["Engineer", "Architect", "Reviewer", "Planner"]:
|
|
114
|
-
if hasattr(config, role.lower()):
|
|
115
|
-
self._role_limits[role] = getattr(config, role.lower())
|
|
116
|
-
|
|
117
|
-
def can_acquire(self, role_name: str, issue_id: Optional[str] = None) -> bool:
|
|
118
|
-
"""
|
|
119
|
-
Check if a new session can be acquired for the given role.
|
|
120
|
-
|
|
121
|
-
Args:
|
|
122
|
-
role_name: The role to check (e.g., "Engineer", "Architect")
|
|
123
|
-
issue_id: Optional issue ID to check for failure cooldown
|
|
124
|
-
|
|
125
|
-
Returns:
|
|
126
|
-
True if the session can be started, False otherwise
|
|
127
|
-
"""
|
|
128
|
-
with self._lock:
|
|
129
|
-
# Check global limit
|
|
130
|
-
total_active = len(self._active_sessions)
|
|
131
|
-
if total_active >= self._global_max:
|
|
132
|
-
logger.warning(
|
|
133
|
-
f"Global concurrency limit reached ({self._global_max}). "
|
|
134
|
-
f"Cannot spawn {role_name}."
|
|
135
|
-
)
|
|
136
|
-
return False
|
|
137
|
-
|
|
138
|
-
# Check role-specific limit
|
|
139
|
-
role_count = self._role_counts.get(role_name, 0)
|
|
140
|
-
role_limit = self._role_limits.get(role_name, 1)
|
|
141
|
-
if role_count >= role_limit:
|
|
142
|
-
logger.warning(
|
|
143
|
-
f"Role concurrency limit reached for {role_name} "
|
|
144
|
-
f"({role_count}/{role_limit})."
|
|
145
|
-
)
|
|
146
|
-
return False
|
|
147
|
-
|
|
148
|
-
# Check failure cooldown for this issue
|
|
149
|
-
if issue_id and issue_id in self._failure_registry:
|
|
150
|
-
last_failure = self._failure_registry[issue_id]
|
|
151
|
-
cooldown = timedelta(seconds=self._failure_cooldown_seconds)
|
|
152
|
-
if datetime.now() - last_failure < cooldown:
|
|
153
|
-
remaining = cooldown - (datetime.now() - last_failure)
|
|
154
|
-
logger.warning(
|
|
155
|
-
f"Issue {issue_id} is in cooldown period. "
|
|
156
|
-
f"Remaining: {remaining.seconds}s. Skipping spawn."
|
|
157
|
-
)
|
|
158
|
-
return False
|
|
159
|
-
|
|
160
|
-
return True
|
|
161
|
-
|
|
162
|
-
def acquire(self, session_id: str, role_name: str) -> bool:
|
|
163
|
-
"""
|
|
164
|
-
Acquire a slot for a new session.
|
|
165
|
-
|
|
166
|
-
Args:
|
|
167
|
-
session_id: Unique identifier for the session
|
|
168
|
-
role_name: The role of the session
|
|
169
|
-
|
|
170
|
-
Returns:
|
|
171
|
-
True if acquired successfully, False otherwise
|
|
172
|
-
"""
|
|
173
|
-
with self._lock:
|
|
174
|
-
if session_id in self._active_sessions:
|
|
175
|
-
logger.warning(f"Session {session_id} already tracked")
|
|
176
|
-
return True
|
|
177
|
-
|
|
178
|
-
self._active_sessions[session_id] = role_name
|
|
179
|
-
self._role_counts[role_name] = self._role_counts.get(role_name, 0) + 1
|
|
180
|
-
logger.info(
|
|
181
|
-
f"Acquired slot for {role_name} session {session_id}. "
|
|
182
|
-
f"Global: {len(self._active_sessions)}/{self._global_max}, "
|
|
183
|
-
f"Role: {self._role_counts[role_name]}/{self._role_limits.get(role_name, 1)}"
|
|
184
|
-
)
|
|
185
|
-
return True
|
|
186
|
-
|
|
187
|
-
def release(self, session_id: str) -> None:
|
|
188
|
-
"""
|
|
189
|
-
Release a slot when a session ends.
|
|
190
|
-
|
|
191
|
-
Args:
|
|
192
|
-
session_id: The session ID to release
|
|
193
|
-
"""
|
|
194
|
-
with self._lock:
|
|
195
|
-
if session_id not in self._active_sessions:
|
|
196
|
-
return
|
|
197
|
-
|
|
198
|
-
role_name = self._active_sessions.pop(session_id)
|
|
199
|
-
self._role_counts[role_name] = max(0, self._role_counts.get(role_name, 0) - 1)
|
|
200
|
-
logger.info(
|
|
201
|
-
f"Released slot for {role_name} session {session_id}. "
|
|
202
|
-
f"Global: {len(self._active_sessions)}/{self._global_max}"
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
def record_failure(self, issue_id: str, session_id: Optional[str] = None) -> None:
|
|
206
|
-
"""
|
|
207
|
-
Record a failure for cooldown purposes.
|
|
208
|
-
|
|
209
|
-
Args:
|
|
210
|
-
issue_id: The issue that failed
|
|
211
|
-
session_id: Optional session ID to release
|
|
212
|
-
"""
|
|
213
|
-
with self._lock:
|
|
214
|
-
self._failure_registry[issue_id] = datetime.now()
|
|
215
|
-
logger.warning(
|
|
216
|
-
f"Recorded failure for issue {issue_id}. "
|
|
217
|
-
f"Cooldown: {self._failure_cooldown_seconds}s"
|
|
218
|
-
)
|
|
219
|
-
|
|
220
|
-
# Release the slot if session_id provided
|
|
221
|
-
if session_id:
|
|
222
|
-
self.release(session_id)
|
|
223
|
-
|
|
224
|
-
def get_status(self) -> Dict[str, Any]:
|
|
225
|
-
"""
|
|
226
|
-
Get current semaphore status for monitoring.
|
|
227
|
-
|
|
228
|
-
Returns:
|
|
229
|
-
Dict with current counts and limits
|
|
230
|
-
"""
|
|
231
|
-
with self._lock:
|
|
232
|
-
return {
|
|
233
|
-
"global": {
|
|
234
|
-
"active": len(self._active_sessions),
|
|
235
|
-
"limit": self._global_max,
|
|
236
|
-
},
|
|
237
|
-
"roles": {
|
|
238
|
-
role: {
|
|
239
|
-
"active": self._role_counts.get(role, 0),
|
|
240
|
-
"limit": limit,
|
|
241
|
-
}
|
|
242
|
-
for role, limit in self._role_limits.items()
|
|
243
|
-
},
|
|
244
|
-
"cooldown_issues": len(self._failure_registry),
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
def clear_failure(self, issue_id: str) -> None:
|
|
248
|
-
"""Clear failure record for an issue (e.g., after successful completion)."""
|
|
249
|
-
with self._lock:
|
|
250
|
-
if issue_id in self._failure_registry:
|
|
251
|
-
del self._failure_registry[issue_id]
|
|
252
|
-
logger.info(f"Cleared failure record for issue {issue_id}")
|
|
253
|
-
|
|
254
|
-
|
|
255
89
|
class ProjectManager:
|
|
256
90
|
"""
|
|
257
91
|
Discovers and manages multiple Monoco projects within a workspace.
|
|
@@ -298,53 +132,3 @@ class ProjectManager:
|
|
|
298
132
|
}
|
|
299
133
|
for p in self.projects.values()
|
|
300
134
|
]
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
from monoco.features.issue.monitor import IssueMonitor
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
class ProjectContext:
|
|
307
|
-
"""
|
|
308
|
-
Holds the runtime state for a single project.
|
|
309
|
-
Now wraps the core MonocoProject primitive.
|
|
310
|
-
"""
|
|
311
|
-
|
|
312
|
-
def __init__(self, project: MonocoProject, broadcaster: Broadcaster):
|
|
313
|
-
self.project = project
|
|
314
|
-
self.id = project.id
|
|
315
|
-
self.name = project.name
|
|
316
|
-
self.path = project.path
|
|
317
|
-
self.issues_root = project.issues_root
|
|
318
|
-
|
|
319
|
-
async def on_upsert(issue_data: dict):
|
|
320
|
-
await broadcaster.broadcast(
|
|
321
|
-
"issue_upserted", {"issue": issue_data, "project_id": self.id}
|
|
322
|
-
)
|
|
323
|
-
|
|
324
|
-
async def on_delete(issue_data: dict):
|
|
325
|
-
# We skip broadcast here if it's part of a move?
|
|
326
|
-
# Actually, standard upsert/delete is fine, but we need a specialized event for MOVE
|
|
327
|
-
# to help VS Code redirect without closing/reopening.
|
|
328
|
-
await broadcaster.broadcast(
|
|
329
|
-
"issue_deleted", {"id": issue_data["id"], "project_id": self.id}
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
self.monitor = IssueMonitor(self.issues_root, on_upsert, on_delete)
|
|
333
|
-
|
|
334
|
-
async def notify_move(self, old_path: str, new_path: str, issue_data: dict):
|
|
335
|
-
"""Explicitly notify frontend about a logical move (Physical path changed)."""
|
|
336
|
-
await self.broadcaster.broadcast(
|
|
337
|
-
"issue_moved",
|
|
338
|
-
{
|
|
339
|
-
"old_path": old_path,
|
|
340
|
-
"new_path": new_path,
|
|
341
|
-
"issue": issue_data,
|
|
342
|
-
"project_id": self.id,
|
|
343
|
-
},
|
|
344
|
-
)
|
|
345
|
-
|
|
346
|
-
async def start(self):
|
|
347
|
-
await self.monitor.start()
|
|
348
|
-
|
|
349
|
-
def stop(self):
|
|
350
|
-
self.monitor.stop()
|
|
@@ -1,10 +1,25 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent feature module - CLI interface for agent operations.
|
|
3
|
+
|
|
4
|
+
Note: The old SessionManager/RuntimeSession/ApoptosisManager architecture
|
|
5
|
+
has been removed in FEAT-0164. This module now provides CLI commands that
|
|
6
|
+
use the new AgentScheduler abstraction from core.scheduler.
|
|
7
|
+
"""
|
|
8
|
+
|
|
1
9
|
from .models import RoleTemplate, AgentRoleConfig as AgentConfig, SchedulerConfig
|
|
2
10
|
from .worker import Worker
|
|
3
11
|
from .config import load_scheduler_config, load_agent_config
|
|
4
12
|
from .defaults import DEFAULT_ROLES
|
|
5
|
-
|
|
6
|
-
from .
|
|
7
|
-
from .
|
|
13
|
+
|
|
14
|
+
# Re-export engines from core.scheduler for backward compatibility
|
|
15
|
+
from monoco.core.scheduler import (
|
|
16
|
+
EngineAdapter,
|
|
17
|
+
EngineFactory,
|
|
18
|
+
GeminiAdapter,
|
|
19
|
+
ClaudeAdapter,
|
|
20
|
+
QwenAdapter,
|
|
21
|
+
KimiAdapter,
|
|
22
|
+
)
|
|
8
23
|
|
|
9
24
|
__all__ = [
|
|
10
25
|
"RoleTemplate",
|
|
@@ -14,8 +29,11 @@ __all__ = [
|
|
|
14
29
|
"Worker",
|
|
15
30
|
"load_scheduler_config",
|
|
16
31
|
"DEFAULT_ROLES",
|
|
17
|
-
|
|
18
|
-
"
|
|
19
|
-
"
|
|
20
|
-
"
|
|
32
|
+
# Re-exported from core.scheduler
|
|
33
|
+
"EngineAdapter",
|
|
34
|
+
"EngineFactory",
|
|
35
|
+
"GeminiAdapter",
|
|
36
|
+
"ClaudeAdapter",
|
|
37
|
+
"QwenAdapter",
|
|
38
|
+
"KimiAdapter",
|
|
21
39
|
]
|
monoco/features/agent/cli.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
import typer
|
|
2
2
|
import time
|
|
3
|
+
import asyncio
|
|
3
4
|
from pathlib import Path
|
|
4
5
|
from typing import Optional
|
|
5
6
|
from monoco.core.output import print_output, print_error
|
|
6
7
|
from monoco.core.config import get_config
|
|
7
|
-
from monoco.features.agent import
|
|
8
|
+
from monoco.features.agent import load_scheduler_config
|
|
9
|
+
from monoco.core.scheduler import AgentTask, LocalProcessScheduler
|
|
8
10
|
|
|
9
11
|
app = typer.Typer(name="agent", help="Manage agent sessions and roles")
|
|
10
12
|
session_app = typer.Typer(name="session", help="Manage active agent sessions")
|
|
@@ -50,12 +52,10 @@ def list_providers():
|
|
|
50
52
|
"""
|
|
51
53
|
from monoco.core.integrations import get_all_integrations
|
|
52
54
|
|
|
53
|
-
# Ideally we'd pass project-specific integrations here if they existed in config objects
|
|
54
55
|
integrations = get_all_integrations(enabled_only=False)
|
|
55
56
|
|
|
56
57
|
output = []
|
|
57
58
|
for key, integration in integrations.items():
|
|
58
|
-
# Perform health check
|
|
59
59
|
health = integration.check_health()
|
|
60
60
|
status_icon = "✅" if health.available else "❌"
|
|
61
61
|
|
|
@@ -127,14 +127,9 @@ def run(
|
|
|
127
127
|
full_prompt = " ".join(prompt) if prompt else ""
|
|
128
128
|
|
|
129
129
|
if issue:
|
|
130
|
-
# User explicitly linked an issue
|
|
131
130
|
issue_id = issue.upper()
|
|
132
131
|
description = full_prompt or None
|
|
133
132
|
else:
|
|
134
|
-
# Ad-hoc task check
|
|
135
|
-
import re
|
|
136
|
-
# Heuristic: if prompt looks like an ID and is short, maybe they meant ID?
|
|
137
|
-
# But explicit is better. Let's assume everything in prompt is instructions.
|
|
138
133
|
issue_id = "NEW_TASK"
|
|
139
134
|
description = full_prompt
|
|
140
135
|
|
|
@@ -156,13 +151,11 @@ def run(
|
|
|
156
151
|
|
|
157
152
|
integration = get_integration(target_engine)
|
|
158
153
|
|
|
159
|
-
# If integration is found, check health
|
|
160
154
|
is_available = False
|
|
161
155
|
if integration:
|
|
162
156
|
health = integration.check_health()
|
|
163
157
|
is_available = health.available
|
|
164
158
|
if not is_available and provider:
|
|
165
|
-
# If user explicitly requested this provider, fail hard
|
|
166
159
|
print_error(f"Requested provider '{target_engine}' is not available.")
|
|
167
160
|
print_error(f"Error: {health.error}")
|
|
168
161
|
raise typer.Exit(code=1)
|
|
@@ -173,23 +166,18 @@ def run(
|
|
|
173
166
|
|
|
174
167
|
all_integrations = get_all_integrations(enabled_only=True)
|
|
175
168
|
fallback_found = None
|
|
176
|
-
|
|
177
|
-
# Priority list for fallback
|
|
178
169
|
priority = ["cursor", "claude", "gemini", "qwen", "kimi"]
|
|
179
170
|
|
|
180
|
-
# Try priority matches first
|
|
181
171
|
for key in priority:
|
|
182
172
|
if key in all_integrations:
|
|
183
173
|
if all_integrations[key].check_health().available:
|
|
184
174
|
fallback_found = key
|
|
185
175
|
break
|
|
186
176
|
|
|
187
|
-
# Determine strict fallback
|
|
188
177
|
if fallback_found:
|
|
189
178
|
print_output(f"🔄 Falling back to available provider: [bold green]{fallback_found}[/bold green]")
|
|
190
179
|
selected_role.engine = fallback_found
|
|
191
180
|
else:
|
|
192
|
-
# If NO CLI tools available, maybe generic agent?
|
|
193
181
|
if "agent" in all_integrations:
|
|
194
182
|
print_output("🔄 Falling back to Generic Agent (No CLI execution).", style="yellow")
|
|
195
183
|
selected_role.engine = "agent"
|
|
@@ -198,7 +186,6 @@ def run(
|
|
|
198
186
|
print_error("Please install Cursor, Claude Code, or Gemini CLI.")
|
|
199
187
|
raise typer.Exit(code=1)
|
|
200
188
|
elif provider:
|
|
201
|
-
# If available and user overrode it
|
|
202
189
|
print_output(f"Overriding provider: {selected_role.engine} -> {provider}")
|
|
203
190
|
selected_role.engine = provider
|
|
204
191
|
|
|
@@ -208,89 +195,136 @@ def run(
|
|
|
208
195
|
title="Agent Framework",
|
|
209
196
|
)
|
|
210
197
|
|
|
211
|
-
# 4. Initialize
|
|
212
|
-
|
|
213
|
-
|
|
198
|
+
# 4. Initialize AgentScheduler and schedule task
|
|
199
|
+
scheduler = LocalProcessScheduler(
|
|
200
|
+
max_concurrent=5,
|
|
201
|
+
project_root=project_root,
|
|
202
|
+
)
|
|
214
203
|
|
|
204
|
+
task = AgentTask(
|
|
205
|
+
task_id=f"cli-{issue_id}-{int(time.time())}",
|
|
206
|
+
role_name=selected_role.name,
|
|
207
|
+
issue_id=issue_id,
|
|
208
|
+
prompt=description or "Execute task",
|
|
209
|
+
engine=selected_role.engine,
|
|
210
|
+
timeout=selected_role.timeout or 900,
|
|
211
|
+
metadata={
|
|
212
|
+
"role_description": selected_role.description,
|
|
213
|
+
"role_goal": selected_role.goal,
|
|
214
|
+
},
|
|
215
|
+
)
|
|
215
216
|
|
|
216
217
|
try:
|
|
217
|
-
#
|
|
218
|
-
|
|
219
|
-
|
|
218
|
+
# Run async scheduler in sync context
|
|
219
|
+
asyncio.run(scheduler.start())
|
|
220
|
+
session_id = asyncio.run(scheduler.schedule(task))
|
|
221
|
+
|
|
222
|
+
print_output(f"Session {session_id} started.")
|
|
220
223
|
|
|
221
224
|
if detach:
|
|
222
225
|
print_output(
|
|
223
|
-
f"Session {
|
|
226
|
+
f"Session {session_id} running in background (detached)."
|
|
224
227
|
)
|
|
225
228
|
return
|
|
226
229
|
|
|
227
|
-
# Monitoring Loop
|
|
228
|
-
while
|
|
230
|
+
# Monitoring Loop - poll for task status
|
|
231
|
+
while True:
|
|
232
|
+
status = scheduler.get_task_status(session_id)
|
|
233
|
+
if status in ["completed", "failed", "crashed"]:
|
|
234
|
+
break
|
|
229
235
|
time.sleep(1)
|
|
230
236
|
|
|
231
|
-
|
|
237
|
+
final_status = scheduler.get_task_status(session_id)
|
|
238
|
+
if final_status == "failed":
|
|
232
239
|
print_error(
|
|
233
|
-
f"Session {
|
|
240
|
+
f"Session {session_id} FAILED. Review logs for details."
|
|
234
241
|
)
|
|
235
242
|
else:
|
|
236
243
|
print_output(
|
|
237
|
-
f"Session finished with status: {
|
|
244
|
+
f"Session finished with status: {final_status}",
|
|
238
245
|
title="Agent Framework",
|
|
239
246
|
)
|
|
240
247
|
|
|
241
248
|
except KeyboardInterrupt:
|
|
242
249
|
print("\nStopping...")
|
|
243
|
-
|
|
250
|
+
asyncio.run(scheduler.cancel_task(session_id))
|
|
244
251
|
print_output("Session terminated.")
|
|
252
|
+
finally:
|
|
253
|
+
asyncio.run(scheduler.stop())
|
|
245
254
|
|
|
246
255
|
|
|
247
256
|
@session_app.command(name="kill")
|
|
248
257
|
def kill_session(session_id: str):
|
|
249
258
|
"""
|
|
250
259
|
Terminate a specific session.
|
|
260
|
+
|
|
261
|
+
Note: Uses AgentScheduler to cancel the task.
|
|
251
262
|
"""
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
263
|
+
settings = get_config()
|
|
264
|
+
project_root = Path(settings.paths.root).resolve()
|
|
265
|
+
|
|
266
|
+
scheduler = LocalProcessScheduler(
|
|
267
|
+
max_concurrent=5,
|
|
268
|
+
project_root=project_root,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
try:
|
|
272
|
+
asyncio.run(scheduler.start())
|
|
273
|
+
asyncio.run(scheduler.cancel_task(session_id))
|
|
256
274
|
print_output(f"Session {session_id} terminated.")
|
|
257
|
-
|
|
258
|
-
|
|
275
|
+
except Exception as e:
|
|
276
|
+
print_error(f"Failed to terminate session: {e}")
|
|
277
|
+
finally:
|
|
278
|
+
asyncio.run(scheduler.stop())
|
|
259
279
|
|
|
260
280
|
|
|
261
281
|
@session_app.command(name="list")
|
|
262
282
|
def list_sessions():
|
|
263
283
|
"""
|
|
264
284
|
List active agent sessions.
|
|
285
|
+
|
|
286
|
+
Note: Shows tasks from AgentScheduler.
|
|
265
287
|
"""
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
{
|
|
273
|
-
"id": s.model.id,
|
|
274
|
-
"issue": s.model.issue_id,
|
|
275
|
-
"role": s.model.role_name,
|
|
276
|
-
"status": s.model.status,
|
|
277
|
-
"branch": s.model.branch_name,
|
|
278
|
-
}
|
|
279
|
-
)
|
|
280
|
-
|
|
281
|
-
print_output(
|
|
282
|
-
output
|
|
283
|
-
or "No active sessions found (Note: Persistence not implemented in CLI list yet).",
|
|
284
|
-
title="Active Sessions",
|
|
288
|
+
settings = get_config()
|
|
289
|
+
project_root = Path(settings.paths.root).resolve()
|
|
290
|
+
|
|
291
|
+
scheduler = LocalProcessScheduler(
|
|
292
|
+
max_concurrent=5,
|
|
293
|
+
project_root=project_root,
|
|
285
294
|
)
|
|
295
|
+
|
|
296
|
+
try:
|
|
297
|
+
asyncio.run(scheduler.start())
|
|
298
|
+
stats = scheduler.get_stats()
|
|
299
|
+
|
|
300
|
+
output = {
|
|
301
|
+
"scheduler_status": "running" if stats.get("running") else "stopped",
|
|
302
|
+
"active_tasks": stats.get("active_tasks", 0),
|
|
303
|
+
"completed_tasks": stats.get("completed_tasks", 0),
|
|
304
|
+
"failed_tasks": stats.get("failed_tasks", 0),
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
print_output(output, title="Agent Scheduler Status")
|
|
308
|
+
finally:
|
|
309
|
+
asyncio.run(scheduler.stop())
|
|
286
310
|
|
|
287
311
|
|
|
288
312
|
@session_app.command(name="logs")
|
|
289
313
|
def session_logs(session_id: str):
|
|
290
314
|
"""
|
|
291
315
|
Stream logs for a session.
|
|
316
|
+
|
|
317
|
+
Note: Logs are stored in .monoco/sessions/{session_id}.log
|
|
292
318
|
"""
|
|
319
|
+
settings = get_config()
|
|
320
|
+
project_root = Path(settings.paths.root).resolve()
|
|
321
|
+
log_path = project_root / ".monoco" / "sessions" / f"{session_id}.log"
|
|
322
|
+
|
|
293
323
|
print_output(f"Streaming logs for {session_id}...", title="Session Logs")
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
324
|
+
|
|
325
|
+
if log_path.exists():
|
|
326
|
+
print(log_path.read_text())
|
|
327
|
+
else:
|
|
328
|
+
print(f"[12:00:00] Session {session_id} started")
|
|
329
|
+
print("[12:00:01] Worker initialized")
|
|
330
|
+
print("(Log file not found - showing placeholder)")
|