monoco-toolkit 0.3.10__py3-none-any.whl → 0.3.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/__main__.py +8 -0
- monoco/core/artifacts/__init__.py +16 -0
- monoco/core/artifacts/manager.py +575 -0
- monoco/core/artifacts/models.py +161 -0
- monoco/core/config.py +31 -4
- monoco/core/git.py +23 -0
- monoco/core/ingestion/__init__.py +20 -0
- monoco/core/ingestion/discovery.py +248 -0
- monoco/core/ingestion/watcher.py +343 -0
- monoco/core/ingestion/worker.py +436 -0
- monoco/core/loader.py +633 -0
- monoco/core/registry.py +34 -25
- monoco/core/skills.py +119 -80
- monoco/daemon/app.py +77 -1
- monoco/daemon/commands.py +10 -0
- monoco/daemon/mailroom_service.py +196 -0
- monoco/daemon/models.py +1 -0
- monoco/daemon/scheduler.py +236 -0
- monoco/daemon/services.py +185 -0
- monoco/daemon/triggers.py +55 -0
- monoco/features/agent/adapter.py +17 -7
- monoco/features/agent/apoptosis.py +4 -4
- monoco/features/agent/manager.py +41 -5
- monoco/{core/resources/en/skills/monoco_core → features/agent/resources/en/skills/monoco_atom_core}/SKILL.md +2 -2
- monoco/features/agent/resources/en/skills/{flow_engineer → monoco_workflow_agent_engineer}/SKILL.md +2 -2
- monoco/features/agent/resources/en/skills/{flow_manager → monoco_workflow_agent_manager}/SKILL.md +2 -2
- monoco/features/agent/resources/en/skills/{flow_planner → monoco_workflow_agent_planner}/SKILL.md +2 -2
- monoco/features/agent/resources/en/skills/{flow_reviewer → monoco_workflow_agent_reviewer}/SKILL.md +2 -2
- monoco/features/agent/resources/{roles/role-engineer.yaml → zh/roles/monoco_role_engineer.yaml} +3 -3
- monoco/features/agent/resources/{roles/role-manager.yaml → zh/roles/monoco_role_manager.yaml} +8 -8
- monoco/features/agent/resources/{roles/role-planner.yaml → zh/roles/monoco_role_planner.yaml} +8 -8
- monoco/features/agent/resources/{roles/role-reviewer.yaml → zh/roles/monoco_role_reviewer.yaml} +8 -8
- monoco/{core/resources/zh/skills/monoco_core → features/agent/resources/zh/skills/monoco_atom_core}/SKILL.md +2 -2
- monoco/features/agent/resources/zh/skills/{flow_engineer → monoco_workflow_agent_engineer}/SKILL.md +2 -2
- monoco/features/agent/resources/zh/skills/{flow_manager → monoco_workflow_agent_manager}/SKILL.md +2 -2
- monoco/features/agent/resources/zh/skills/{flow_planner → monoco_workflow_agent_planner}/SKILL.md +2 -2
- monoco/features/agent/resources/zh/skills/{flow_reviewer → monoco_workflow_agent_reviewer}/SKILL.md +2 -2
- monoco/features/agent/session.py +59 -11
- monoco/features/artifact/__init__.py +0 -0
- monoco/features/artifact/adapter.py +33 -0
- monoco/features/artifact/resources/zh/AGENTS.md +14 -0
- monoco/features/artifact/resources/zh/skills/monoco_atom_artifact/SKILL.md +278 -0
- monoco/features/glossary/adapter.py +18 -7
- monoco/features/glossary/resources/en/skills/{monoco_glossary → monoco_atom_glossary}/SKILL.md +2 -2
- monoco/features/glossary/resources/zh/skills/{monoco_glossary → monoco_atom_glossary}/SKILL.md +2 -2
- monoco/features/hooks/__init__.py +11 -0
- monoco/features/hooks/adapter.py +67 -0
- monoco/features/hooks/commands.py +309 -0
- monoco/features/hooks/core.py +441 -0
- monoco/features/hooks/resources/ADDING_HOOKS.md +234 -0
- monoco/features/i18n/adapter.py +18 -5
- monoco/features/i18n/core.py +482 -17
- monoco/features/i18n/resources/en/skills/{monoco_i18n → monoco_atom_i18n}/SKILL.md +2 -2
- monoco/features/i18n/resources/en/skills/{i18n_scan_workflow → monoco_workflow_i18n_scan}/SKILL.md +2 -2
- monoco/features/i18n/resources/zh/skills/{monoco_i18n → monoco_atom_i18n}/SKILL.md +2 -2
- monoco/features/i18n/resources/zh/skills/{i18n_scan_workflow → monoco_workflow_i18n_scan}/SKILL.md +2 -2
- monoco/features/issue/adapter.py +19 -6
- monoco/features/issue/commands.py +281 -7
- monoco/features/issue/core.py +227 -13
- monoco/features/issue/engine/machine.py +114 -4
- monoco/features/issue/linter.py +60 -5
- monoco/features/issue/models.py +2 -2
- monoco/features/issue/resources/en/AGENTS.md +109 -0
- monoco/features/issue/resources/en/skills/{monoco_issue → monoco_atom_issue}/SKILL.md +2 -2
- monoco/features/issue/resources/en/skills/{issue_create_workflow → monoco_workflow_issue_creation}/SKILL.md +2 -2
- monoco/features/issue/resources/en/skills/{issue_develop_workflow → monoco_workflow_issue_development}/SKILL.md +2 -2
- monoco/features/issue/resources/en/skills/{issue_lifecycle_workflow → monoco_workflow_issue_management}/SKILL.md +2 -2
- monoco/features/issue/resources/en/skills/{issue_refine_workflow → monoco_workflow_issue_refinement}/SKILL.md +2 -2
- monoco/features/issue/resources/hooks/post-checkout.sh +39 -0
- monoco/features/issue/resources/hooks/pre-commit.sh +41 -0
- monoco/features/issue/resources/hooks/pre-push.sh +35 -0
- monoco/features/issue/resources/zh/AGENTS.md +109 -0
- monoco/features/issue/resources/zh/skills/{monoco_issue → monoco_atom_issue_lifecycle}/SKILL.md +2 -2
- monoco/features/issue/resources/zh/skills/{issue_create_workflow → monoco_workflow_issue_creation}/SKILL.md +2 -2
- monoco/features/issue/resources/zh/skills/{issue_develop_workflow → monoco_workflow_issue_development}/SKILL.md +2 -2
- monoco/features/issue/resources/zh/skills/{issue_lifecycle_workflow → monoco_workflow_issue_management}/SKILL.md +2 -2
- monoco/features/issue/resources/zh/skills/{issue_refine_workflow → monoco_workflow_issue_refinement}/SKILL.md +2 -2
- monoco/features/issue/validator.py +101 -1
- monoco/features/memo/adapter.py +21 -8
- monoco/features/memo/cli.py +103 -10
- monoco/features/memo/core.py +178 -92
- monoco/features/memo/models.py +53 -0
- monoco/features/memo/resources/en/skills/{monoco_memo → monoco_atom_memo}/SKILL.md +2 -2
- monoco/features/memo/resources/en/skills/{note_processing_workflow → monoco_workflow_note_processing}/SKILL.md +2 -2
- monoco/features/memo/resources/zh/skills/{monoco_memo → monoco_atom_memo}/SKILL.md +2 -2
- monoco/features/memo/resources/zh/skills/{note_processing_workflow → monoco_workflow_note_processing}/SKILL.md +2 -2
- monoco/features/spike/adapter.py +18 -5
- monoco/features/spike/resources/en/skills/{monoco_spike → monoco_atom_spike}/SKILL.md +2 -2
- monoco/features/spike/resources/en/skills/{research_workflow → monoco_workflow_research}/SKILL.md +2 -2
- monoco/features/spike/resources/zh/skills/{monoco_spike → monoco_atom_spike}/SKILL.md +2 -2
- monoco/features/spike/resources/zh/skills/{research_workflow → monoco_workflow_research}/SKILL.md +2 -2
- monoco/main.py +38 -1
- {monoco_toolkit-0.3.10.dist-info → monoco_toolkit-0.3.11.dist-info}/METADATA +7 -1
- monoco_toolkit-0.3.11.dist-info/RECORD +181 -0
- monoco_toolkit-0.3.10.dist-info/RECORD +0 -156
- /monoco/{core → features/agent}/resources/en/AGENTS.md +0 -0
- /monoco/{core → features/agent}/resources/zh/AGENTS.md +0 -0
- {monoco_toolkit-0.3.10.dist-info → monoco_toolkit-0.3.11.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.3.10.dist-info → monoco_toolkit-0.3.11.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.3.10.dist-info → monoco_toolkit-0.3.11.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mailroom Service for Monoco Daemon.
|
|
3
|
+
|
|
4
|
+
Manages automated document ingestion with concurrent processing,
|
|
5
|
+
environment discovery, and artifact registration.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import logging
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional, Dict, Any
|
|
14
|
+
|
|
15
|
+
from monoco.core.ingestion.discovery import EnvironmentDiscovery
|
|
16
|
+
from monoco.core.ingestion.worker import ConversionWorker
|
|
17
|
+
from monoco.core.ingestion.watcher import DropzoneWatcher, IngestionEvent
|
|
18
|
+
from monoco.core.artifacts.manager import ArtifactManager
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MailroomService:
|
|
24
|
+
"""
|
|
25
|
+
Service for automated document ingestion in Monoco Daemon.
|
|
26
|
+
|
|
27
|
+
Features:
|
|
28
|
+
- Environment discovery for conversion tools
|
|
29
|
+
- Dropzone monitoring for new files
|
|
30
|
+
- Concurrent conversion processing
|
|
31
|
+
- Artifact registration
|
|
32
|
+
- SSE event broadcasting
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
workspace_root: Path,
|
|
38
|
+
broadcaster: Optional[Any] = None,
|
|
39
|
+
dropzone_path: Optional[Path] = None,
|
|
40
|
+
max_concurrent: int = 4,
|
|
41
|
+
):
|
|
42
|
+
"""
|
|
43
|
+
Initialize the Mailroom service.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
workspace_root: Root directory of the workspace
|
|
47
|
+
broadcaster: SSE broadcaster for events
|
|
48
|
+
dropzone_path: Path to dropzone directory (default: workspace/.monoco/dropzone)
|
|
49
|
+
max_concurrent: Maximum concurrent conversion tasks
|
|
50
|
+
"""
|
|
51
|
+
self.workspace_root = Path(workspace_root)
|
|
52
|
+
self.broadcaster = broadcaster
|
|
53
|
+
|
|
54
|
+
# Default dropzone location
|
|
55
|
+
self.dropzone_path = dropzone_path or (self.workspace_root / ".monoco" / "dropzone")
|
|
56
|
+
|
|
57
|
+
# Initialize components
|
|
58
|
+
self.discovery = EnvironmentDiscovery()
|
|
59
|
+
self.conversion_worker = ConversionWorker(
|
|
60
|
+
discovery=self.discovery,
|
|
61
|
+
max_concurrent=max_concurrent,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Artifact manager (lazy init)
|
|
65
|
+
self._artifact_manager: Optional[ArtifactManager] = None
|
|
66
|
+
|
|
67
|
+
# Watcher (lazy init)
|
|
68
|
+
self._watcher: Optional[DropzoneWatcher] = None
|
|
69
|
+
|
|
70
|
+
# State
|
|
71
|
+
self._running = False
|
|
72
|
+
self._stats: Dict[str, Any] = {
|
|
73
|
+
"files_detected": 0,
|
|
74
|
+
"conversions_success": 0,
|
|
75
|
+
"conversions_failed": 0,
|
|
76
|
+
"artifacts_registered": 0,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def artifact_manager(self) -> ArtifactManager:
|
|
81
|
+
"""Get or create the artifact manager."""
|
|
82
|
+
if self._artifact_manager is None:
|
|
83
|
+
self._artifact_manager = ArtifactManager(self.workspace_root)
|
|
84
|
+
return self._artifact_manager
|
|
85
|
+
|
|
86
|
+
async def start(self) -> None:
|
|
87
|
+
"""Start the Mailroom service."""
|
|
88
|
+
if self._running:
|
|
89
|
+
return
|
|
90
|
+
|
|
91
|
+
logger.info("Starting Mailroom service...")
|
|
92
|
+
|
|
93
|
+
# Perform environment discovery
|
|
94
|
+
tools = self.discovery.discover()
|
|
95
|
+
total_tools = sum(len(t) for t in tools.values())
|
|
96
|
+
logger.info(f"Discovered {total_tools} conversion tools")
|
|
97
|
+
|
|
98
|
+
# Log discovered capabilities
|
|
99
|
+
capabilities = self.discovery.get_capabilities_summary()
|
|
100
|
+
logger.info(f"Capabilities: {capabilities}")
|
|
101
|
+
|
|
102
|
+
# Initialize and start dropzone watcher
|
|
103
|
+
self._watcher = DropzoneWatcher(
|
|
104
|
+
dropzone_path=self.dropzone_path,
|
|
105
|
+
artifact_manager=self.artifact_manager,
|
|
106
|
+
conversion_worker=self.conversion_worker,
|
|
107
|
+
process_existing=False, # Don't process existing files on startup
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Set up event callback
|
|
111
|
+
self._watcher.set_event_callback(self._on_ingestion_event)
|
|
112
|
+
|
|
113
|
+
# Start watching (this is synchronous, runs in background thread)
|
|
114
|
+
self._watcher.start()
|
|
115
|
+
|
|
116
|
+
self._running = True
|
|
117
|
+
logger.info(f"Mailroom service started. Dropzone: {self.dropzone_path}")
|
|
118
|
+
|
|
119
|
+
async def stop(self) -> None:
|
|
120
|
+
"""Stop the Mailroom service."""
|
|
121
|
+
if not self._running:
|
|
122
|
+
return
|
|
123
|
+
|
|
124
|
+
logger.info("Stopping Mailroom service...")
|
|
125
|
+
|
|
126
|
+
if self._watcher:
|
|
127
|
+
self._watcher.stop()
|
|
128
|
+
self._watcher = None
|
|
129
|
+
|
|
130
|
+
self._running = False
|
|
131
|
+
logger.info("Mailroom service stopped")
|
|
132
|
+
|
|
133
|
+
def _on_ingestion_event(self, event: IngestionEvent) -> None:
|
|
134
|
+
"""Handle ingestion events from the watcher."""
|
|
135
|
+
# Update stats
|
|
136
|
+
if event.event_type.value == "file_detected":
|
|
137
|
+
self._stats["files_detected"] += 1
|
|
138
|
+
elif event.event_type.value == "conversion_completed":
|
|
139
|
+
self._stats["conversions_success"] += 1
|
|
140
|
+
elif event.event_type.value == "conversion_failed":
|
|
141
|
+
self._stats["conversions_failed"] += 1
|
|
142
|
+
elif event.event_type.value == "artifact_registered":
|
|
143
|
+
self._stats["artifacts_registered"] += 1
|
|
144
|
+
|
|
145
|
+
# Broadcast via SSE if broadcaster available
|
|
146
|
+
if self.broadcaster:
|
|
147
|
+
asyncio.create_task(self._broadcast_event(event))
|
|
148
|
+
|
|
149
|
+
async def _broadcast_event(self, event: IngestionEvent) -> None:
|
|
150
|
+
"""Broadcast ingestion event to SSE clients."""
|
|
151
|
+
try:
|
|
152
|
+
payload = {
|
|
153
|
+
"type": event.event_type.value,
|
|
154
|
+
"file_path": str(event.file_path),
|
|
155
|
+
"task_id": event.task_id,
|
|
156
|
+
"artifact_id": event.artifact_id,
|
|
157
|
+
"error_message": event.error_message,
|
|
158
|
+
"metadata": event.metadata,
|
|
159
|
+
"timestamp": event.timestamp.isoformat(),
|
|
160
|
+
}
|
|
161
|
+
await self.broadcaster.broadcast("MAILROOM_EVENT", payload)
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.error(f"Failed to broadcast mailroom event: {e}")
|
|
164
|
+
|
|
165
|
+
def get_status(self) -> Dict[str, Any]:
|
|
166
|
+
"""Get current service status and statistics."""
|
|
167
|
+
capabilities = self.discovery.get_capabilities_summary()
|
|
168
|
+
|
|
169
|
+
return {
|
|
170
|
+
"running": self._running,
|
|
171
|
+
"dropzone_path": str(self.dropzone_path),
|
|
172
|
+
"capabilities": capabilities,
|
|
173
|
+
"supported_extensions": self.conversion_worker.get_supported_extensions(),
|
|
174
|
+
"stats": self._stats.copy(),
|
|
175
|
+
"tools": [
|
|
176
|
+
{
|
|
177
|
+
"name": tool.name,
|
|
178
|
+
"type": tool.tool_type.value,
|
|
179
|
+
"version": tool.version,
|
|
180
|
+
"capabilities": [c.value for c in tool.capabilities],
|
|
181
|
+
}
|
|
182
|
+
for tool in self.discovery.get_all_tools()
|
|
183
|
+
],
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
def is_running(self) -> bool:
|
|
187
|
+
"""Check if the service is running."""
|
|
188
|
+
return self._running
|
|
189
|
+
|
|
190
|
+
def get_discovery(self) -> EnvironmentDiscovery:
|
|
191
|
+
"""Get the environment discovery instance."""
|
|
192
|
+
return self.discovery
|
|
193
|
+
|
|
194
|
+
def get_worker(self) -> ConversionWorker:
|
|
195
|
+
"""Get the conversion worker instance."""
|
|
196
|
+
return self.conversion_worker
|
monoco/daemon/models.py
CHANGED
|
@@ -18,6 +18,7 @@ class CreateIssueRequest(BaseModel):
|
|
|
18
18
|
related: List[str] = []
|
|
19
19
|
subdir: Optional[str] = None
|
|
20
20
|
project_id: Optional[str] = None # Added for multi-project support
|
|
21
|
+
from_memos: List[str] = [] # Memo IDs to link to this issue
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
class UpdateIssueRequest(BaseModel):
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
from typing import Dict, Optional, List, Any, Tuple
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from monoco.daemon.services import ProjectManager, SemaphoreManager
|
|
8
|
+
from monoco.daemon.triggers import MemoAccumulationPolicy, HandoverPolicy
|
|
9
|
+
from monoco.features.agent.manager import SessionManager
|
|
10
|
+
from monoco.features.agent.models import RoleTemplate
|
|
11
|
+
from monoco.features.agent.session import RuntimeSession
|
|
12
|
+
from monoco.features.agent.apoptosis import ApoptosisManager
|
|
13
|
+
from monoco.features.issue.core import list_issues
|
|
14
|
+
from monoco.core.config import get_config
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger("monoco.daemon.scheduler")
|
|
17
|
+
|
|
18
|
+
class SchedulerService:
|
|
19
|
+
def __init__(self, project_manager: ProjectManager):
|
|
20
|
+
self.project_manager = project_manager
|
|
21
|
+
self.session_managers: Dict[str, SessionManager] = {}
|
|
22
|
+
self._monitoring_task: Optional[asyncio.Task] = None
|
|
23
|
+
self.apoptosis_managers: Dict[str, ApoptosisManager] = {}
|
|
24
|
+
|
|
25
|
+
# Initialize SemaphoreManager with config
|
|
26
|
+
config = self._load_concurrency_config()
|
|
27
|
+
self.semaphore_manager = SemaphoreManager(config)
|
|
28
|
+
|
|
29
|
+
def _load_concurrency_config(self) -> Optional[Any]:
|
|
30
|
+
"""Load concurrency configuration from config files and env vars."""
|
|
31
|
+
try:
|
|
32
|
+
settings = get_config()
|
|
33
|
+
concurrency_config = settings.agent.concurrency
|
|
34
|
+
|
|
35
|
+
# Check for environment variable override
|
|
36
|
+
env_max_agents = os.environ.get("MONOCO_MAX_AGENTS")
|
|
37
|
+
if env_max_agents:
|
|
38
|
+
try:
|
|
39
|
+
concurrency_config.global_max = int(env_max_agents)
|
|
40
|
+
logger.info(f"Overriding global_max from environment: {env_max_agents}")
|
|
41
|
+
except ValueError:
|
|
42
|
+
logger.warning(f"Invalid MONOCO_MAX_AGENTS value: {env_max_agents}")
|
|
43
|
+
|
|
44
|
+
return concurrency_config
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.warning(f"Failed to load concurrency config: {e}. Using defaults.")
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
def get_managers(self, project_path: Path) -> Tuple[SessionManager, ApoptosisManager]:
|
|
50
|
+
key = str(project_path)
|
|
51
|
+
if key not in self.session_managers:
|
|
52
|
+
sm = SessionManager(project_root=project_path)
|
|
53
|
+
self.session_managers[key] = sm
|
|
54
|
+
self.apoptosis_managers[key] = ApoptosisManager(sm)
|
|
55
|
+
return self.session_managers[key], self.apoptosis_managers[key]
|
|
56
|
+
|
|
57
|
+
async def start(self):
|
|
58
|
+
logger.info("Starting Scheduler Service...")
|
|
59
|
+
self._monitoring_task = asyncio.create_task(self.monitor_loop())
|
|
60
|
+
|
|
61
|
+
def stop(self):
|
|
62
|
+
logger.info("Stopping Scheduler Service...")
|
|
63
|
+
if self._monitoring_task:
|
|
64
|
+
self._monitoring_task.cancel()
|
|
65
|
+
|
|
66
|
+
# Terminate all sessions
|
|
67
|
+
for sm in self.session_managers.values():
|
|
68
|
+
filtered_sessions = sm.list_sessions()
|
|
69
|
+
for session in filtered_sessions:
|
|
70
|
+
session.terminate()
|
|
71
|
+
|
|
72
|
+
async def monitor_loop(self):
|
|
73
|
+
try:
|
|
74
|
+
while True:
|
|
75
|
+
await self.tick()
|
|
76
|
+
await asyncio.sleep(5)
|
|
77
|
+
except asyncio.CancelledError:
|
|
78
|
+
pass
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logger.error(f"Scheduler loop crashed: {e}", exc_info=True)
|
|
81
|
+
|
|
82
|
+
async def tick(self):
|
|
83
|
+
# We iterate over keys to avoid modification during iteration issues if new projects added
|
|
84
|
+
projects = list(self.project_manager.projects.values())
|
|
85
|
+
for project_ctx in projects:
|
|
86
|
+
await self.process_project(project_ctx)
|
|
87
|
+
|
|
88
|
+
async def process_project(self, project_context):
|
|
89
|
+
sm, am = self.get_managers(project_context.path)
|
|
90
|
+
|
|
91
|
+
# 1. Trigger Check: Architect
|
|
92
|
+
await self.check_inbox_trigger(sm, project_context)
|
|
93
|
+
|
|
94
|
+
# 1.5 Handover Trigger: Architect -> Engineer
|
|
95
|
+
await self.check_handover_trigger(sm, project_context)
|
|
96
|
+
|
|
97
|
+
# 2. Monitor Active Sessions (Supervisor)
|
|
98
|
+
active_sessions = sm.list_sessions()
|
|
99
|
+
for session in active_sessions:
|
|
100
|
+
if session.model.status in ["running", "pending"]:
|
|
101
|
+
status = session.refresh_status() # Updates model.status
|
|
102
|
+
|
|
103
|
+
# Check for timeout/failure
|
|
104
|
+
if status == "timeout" or status == "failed":
|
|
105
|
+
if session.model.status != "crashed":
|
|
106
|
+
logger.warning(f"Session {session.model.id} led to {status}. Triggering Autopsy.")
|
|
107
|
+
# Record failure for cooldown
|
|
108
|
+
self.semaphore_manager.record_failure(
|
|
109
|
+
issue_id=session.model.issue_id,
|
|
110
|
+
session_id=session.model.id
|
|
111
|
+
)
|
|
112
|
+
am.trigger_apoptosis(session.model.id, failure_reason=f"Session status became {status}")
|
|
113
|
+
else:
|
|
114
|
+
# Track active session in semaphore manager
|
|
115
|
+
self.semaphore_manager.acquire(session.model.id, session.model.role_name)
|
|
116
|
+
|
|
117
|
+
# Daemon Logic for Chained Execution
|
|
118
|
+
if status == "completed":
|
|
119
|
+
# Clear failure record on success
|
|
120
|
+
self.semaphore_manager.clear_failure(session.model.issue_id)
|
|
121
|
+
self.handle_completion(session, sm)
|
|
122
|
+
|
|
123
|
+
async def check_inbox_trigger(self, sm: SessionManager, project_context):
|
|
124
|
+
# Checking existing Architect sessions
|
|
125
|
+
existing_architects = [s for s in sm.list_sessions() if s.model.role_name == "Architect" and s.model.status == "running"]
|
|
126
|
+
|
|
127
|
+
if not existing_architects:
|
|
128
|
+
# Check semaphore before spawning
|
|
129
|
+
if not self.semaphore_manager.can_acquire("Architect"):
|
|
130
|
+
logger.warning("Cannot spawn Architect: concurrency limit reached")
|
|
131
|
+
return
|
|
132
|
+
|
|
133
|
+
trigger_policy = MemoAccumulationPolicy(count_threshold=5)
|
|
134
|
+
if trigger_policy.evaluate({"issues_root": project_context.issues_root}):
|
|
135
|
+
logger.info(f"Triggering Architect for project {project_context.id}")
|
|
136
|
+
self.spawn_architect(sm, project_context)
|
|
137
|
+
|
|
138
|
+
async def check_handover_trigger(self, sm: SessionManager, project_context):
|
|
139
|
+
# Scan for OPEN + DOING issues with no active worker
|
|
140
|
+
try:
|
|
141
|
+
all_issues = list_issues(project_context.issues_root)
|
|
142
|
+
handover_policy = HandoverPolicy(target_status="open", target_stage="doing")
|
|
143
|
+
|
|
144
|
+
for issue in all_issues:
|
|
145
|
+
if handover_policy.evaluate({"issue": issue}):
|
|
146
|
+
# Check if session exists
|
|
147
|
+
active = [s for s in sm.list_sessions(issue_id=issue.id) if s.model.status in ["running", "pending"]]
|
|
148
|
+
if not active:
|
|
149
|
+
# Check semaphore before spawning (including cooldown check)
|
|
150
|
+
if not self.semaphore_manager.can_acquire("Engineer", issue_id=issue.id):
|
|
151
|
+
logger.warning(f"Cannot spawn Engineer for {issue.id}: concurrency limit or cooldown active")
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
logger.info(f"Handover trigger: Spawning Engineer for {issue.id}")
|
|
155
|
+
self.spawn_engineer(sm, issue)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.error(f"Error in Handover trigger: {e}")
|
|
158
|
+
|
|
159
|
+
def spawn_engineer(self, sm: SessionManager, issue):
|
|
160
|
+
role = RoleTemplate(
|
|
161
|
+
name="Engineer",
|
|
162
|
+
description="Software Engineer",
|
|
163
|
+
trigger="handover",
|
|
164
|
+
goal=f"Implement feature: {issue.title}",
|
|
165
|
+
system_prompt="You are a Software Engineer. Read the issue and implement requirements.",
|
|
166
|
+
engine="gemini"
|
|
167
|
+
)
|
|
168
|
+
session = sm.create_session(issue_id=issue.id, role=role)
|
|
169
|
+
|
|
170
|
+
# Acquire semaphore slot
|
|
171
|
+
self.semaphore_manager.acquire(session.model.id, "Engineer")
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
session.start()
|
|
175
|
+
except Exception as e:
|
|
176
|
+
# Release slot on spawn failure
|
|
177
|
+
self.semaphore_manager.release(session.model.id)
|
|
178
|
+
self.semaphore_manager.record_failure(issue.id, session.model.id)
|
|
179
|
+
logger.error(f"Failed to start Engineer session for {issue.id}: {e}")
|
|
180
|
+
raise
|
|
181
|
+
|
|
182
|
+
def spawn_architect(self, sm: SessionManager, project_context):
|
|
183
|
+
# Create Architect Session
|
|
184
|
+
role = RoleTemplate(
|
|
185
|
+
name="Architect",
|
|
186
|
+
description="System Architect",
|
|
187
|
+
trigger="memo.accumulation",
|
|
188
|
+
goal="Process memo inbox and create issues.",
|
|
189
|
+
system_prompt="You are the Architect. Process the Memo inbox.",
|
|
190
|
+
engine="gemini" # Default or from config?
|
|
191
|
+
)
|
|
192
|
+
session = sm.create_session(issue_id="architecture-review", role=role)
|
|
193
|
+
|
|
194
|
+
# Acquire semaphore slot
|
|
195
|
+
self.semaphore_manager.acquire(session.model.id, "Architect")
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
session.start()
|
|
199
|
+
except Exception as e:
|
|
200
|
+
# Release slot on spawn failure
|
|
201
|
+
self.semaphore_manager.release(session.model.id)
|
|
202
|
+
logger.error(f"Failed to start Architect session: {e}")
|
|
203
|
+
raise
|
|
204
|
+
|
|
205
|
+
def handle_completion(self, session: RuntimeSession, sm: SessionManager):
|
|
206
|
+
# Chained Execution: Engineer -> Reviewer
|
|
207
|
+
if session.model.role_name == "Engineer":
|
|
208
|
+
# Check semaphore before spawning Reviewer
|
|
209
|
+
if not self.semaphore_manager.can_acquire("Reviewer", issue_id=session.model.issue_id):
|
|
210
|
+
logger.warning(
|
|
211
|
+
f"Cannot spawn Reviewer for {session.model.issue_id}: "
|
|
212
|
+
f"concurrency limit reached. Review will be deferred."
|
|
213
|
+
)
|
|
214
|
+
return
|
|
215
|
+
|
|
216
|
+
logger.info(f"Engineer finished for {session.model.issue_id}. Spawning Reviewer.")
|
|
217
|
+
reviewer_role = RoleTemplate(
|
|
218
|
+
name="Reviewer",
|
|
219
|
+
description="Code Reviewer",
|
|
220
|
+
trigger="engineer.completion",
|
|
221
|
+
goal=f"Review work on {session.model.issue_id}",
|
|
222
|
+
system_prompt="You are a Code Reviewer. Review the code changes.",
|
|
223
|
+
engine="gemini"
|
|
224
|
+
)
|
|
225
|
+
rs = sm.create_session(issue_id=session.model.issue_id, role=reviewer_role)
|
|
226
|
+
|
|
227
|
+
# Acquire semaphore slot
|
|
228
|
+
self.semaphore_manager.acquire(rs.model.id, "Reviewer")
|
|
229
|
+
|
|
230
|
+
try:
|
|
231
|
+
rs.start()
|
|
232
|
+
except Exception as e:
|
|
233
|
+
# Release slot on spawn failure
|
|
234
|
+
self.semaphore_manager.release(rs.model.id)
|
|
235
|
+
logger.error(f"Failed to start Reviewer session for {session.model.issue_id}: {e}")
|
|
236
|
+
raise
|
monoco/daemon/services.py
CHANGED
|
@@ -2,6 +2,8 @@ import logging
|
|
|
2
2
|
from typing import List, Optional, Dict, Any
|
|
3
3
|
from asyncio import Queue
|
|
4
4
|
from pathlib import Path
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from threading import Lock
|
|
5
7
|
|
|
6
8
|
import json
|
|
7
9
|
|
|
@@ -67,6 +69,189 @@ class ProjectContext:
|
|
|
67
69
|
self.monitor.stop()
|
|
68
70
|
|
|
69
71
|
|
|
72
|
+
class SemaphoreManager:
|
|
73
|
+
"""
|
|
74
|
+
Manages concurrency limits for agent sessions using role-based semaphores.
|
|
75
|
+
Prevents fork bomb by limiting concurrent agents per role and globally.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
def __init__(self, config: Optional[Any] = None):
|
|
79
|
+
"""
|
|
80
|
+
Initialize the SemaphoreManager.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
config: AgentConcurrencyConfig or dict with concurrency settings
|
|
84
|
+
"""
|
|
85
|
+
self._lock = Lock()
|
|
86
|
+
self._active_sessions: Dict[str, str] = {} # session_id -> role_name
|
|
87
|
+
self._role_counts: Dict[str, int] = {} # role_name -> count
|
|
88
|
+
self._failure_registry: Dict[str, datetime] = {} # issue_id -> last_failure_time
|
|
89
|
+
|
|
90
|
+
# Default conservative limits
|
|
91
|
+
self._global_max = 3
|
|
92
|
+
self._role_limits: Dict[str, int] = {
|
|
93
|
+
"Engineer": 1,
|
|
94
|
+
"Architect": 1,
|
|
95
|
+
"Reviewer": 1,
|
|
96
|
+
"Planner": 1,
|
|
97
|
+
}
|
|
98
|
+
self._failure_cooldown_seconds = 60
|
|
99
|
+
|
|
100
|
+
# Apply config if provided
|
|
101
|
+
if config:
|
|
102
|
+
self._apply_config(config)
|
|
103
|
+
|
|
104
|
+
def _apply_config(self, config: Any) -> None:
|
|
105
|
+
"""Apply configuration settings."""
|
|
106
|
+
# Handle both dict and Pydantic model
|
|
107
|
+
if hasattr(config, 'global_max'):
|
|
108
|
+
self._global_max = config.global_max
|
|
109
|
+
if hasattr(config, 'failure_cooldown_seconds'):
|
|
110
|
+
self._failure_cooldown_seconds = config.failure_cooldown_seconds
|
|
111
|
+
|
|
112
|
+
# Role-specific limits
|
|
113
|
+
for role in ["Engineer", "Architect", "Reviewer", "Planner"]:
|
|
114
|
+
if hasattr(config, role.lower()):
|
|
115
|
+
self._role_limits[role] = getattr(config, role.lower())
|
|
116
|
+
|
|
117
|
+
def can_acquire(self, role_name: str, issue_id: Optional[str] = None) -> bool:
|
|
118
|
+
"""
|
|
119
|
+
Check if a new session can be acquired for the given role.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
role_name: The role to check (e.g., "Engineer", "Architect")
|
|
123
|
+
issue_id: Optional issue ID to check for failure cooldown
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
True if the session can be started, False otherwise
|
|
127
|
+
"""
|
|
128
|
+
with self._lock:
|
|
129
|
+
# Check global limit
|
|
130
|
+
total_active = len(self._active_sessions)
|
|
131
|
+
if total_active >= self._global_max:
|
|
132
|
+
logger.warning(
|
|
133
|
+
f"Global concurrency limit reached ({self._global_max}). "
|
|
134
|
+
f"Cannot spawn {role_name}."
|
|
135
|
+
)
|
|
136
|
+
return False
|
|
137
|
+
|
|
138
|
+
# Check role-specific limit
|
|
139
|
+
role_count = self._role_counts.get(role_name, 0)
|
|
140
|
+
role_limit = self._role_limits.get(role_name, 1)
|
|
141
|
+
if role_count >= role_limit:
|
|
142
|
+
logger.warning(
|
|
143
|
+
f"Role concurrency limit reached for {role_name} "
|
|
144
|
+
f"({role_count}/{role_limit})."
|
|
145
|
+
)
|
|
146
|
+
return False
|
|
147
|
+
|
|
148
|
+
# Check failure cooldown for this issue
|
|
149
|
+
if issue_id and issue_id in self._failure_registry:
|
|
150
|
+
last_failure = self._failure_registry[issue_id]
|
|
151
|
+
cooldown = timedelta(seconds=self._failure_cooldown_seconds)
|
|
152
|
+
if datetime.now() - last_failure < cooldown:
|
|
153
|
+
remaining = cooldown - (datetime.now() - last_failure)
|
|
154
|
+
logger.warning(
|
|
155
|
+
f"Issue {issue_id} is in cooldown period. "
|
|
156
|
+
f"Remaining: {remaining.seconds}s. Skipping spawn."
|
|
157
|
+
)
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
return True
|
|
161
|
+
|
|
162
|
+
def acquire(self, session_id: str, role_name: str) -> bool:
|
|
163
|
+
"""
|
|
164
|
+
Acquire a slot for a new session.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
session_id: Unique identifier for the session
|
|
168
|
+
role_name: The role of the session
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
True if acquired successfully, False otherwise
|
|
172
|
+
"""
|
|
173
|
+
with self._lock:
|
|
174
|
+
if session_id in self._active_sessions:
|
|
175
|
+
logger.warning(f"Session {session_id} already tracked")
|
|
176
|
+
return True
|
|
177
|
+
|
|
178
|
+
self._active_sessions[session_id] = role_name
|
|
179
|
+
self._role_counts[role_name] = self._role_counts.get(role_name, 0) + 1
|
|
180
|
+
logger.info(
|
|
181
|
+
f"Acquired slot for {role_name} session {session_id}. "
|
|
182
|
+
f"Global: {len(self._active_sessions)}/{self._global_max}, "
|
|
183
|
+
f"Role: {self._role_counts[role_name]}/{self._role_limits.get(role_name, 1)}"
|
|
184
|
+
)
|
|
185
|
+
return True
|
|
186
|
+
|
|
187
|
+
def release(self, session_id: str) -> None:
|
|
188
|
+
"""
|
|
189
|
+
Release a slot when a session ends.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
session_id: The session ID to release
|
|
193
|
+
"""
|
|
194
|
+
with self._lock:
|
|
195
|
+
if session_id not in self._active_sessions:
|
|
196
|
+
return
|
|
197
|
+
|
|
198
|
+
role_name = self._active_sessions.pop(session_id)
|
|
199
|
+
self._role_counts[role_name] = max(0, self._role_counts.get(role_name, 0) - 1)
|
|
200
|
+
logger.info(
|
|
201
|
+
f"Released slot for {role_name} session {session_id}. "
|
|
202
|
+
f"Global: {len(self._active_sessions)}/{self._global_max}"
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
def record_failure(self, issue_id: str, session_id: Optional[str] = None) -> None:
|
|
206
|
+
"""
|
|
207
|
+
Record a failure for cooldown purposes.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
issue_id: The issue that failed
|
|
211
|
+
session_id: Optional session ID to release
|
|
212
|
+
"""
|
|
213
|
+
with self._lock:
|
|
214
|
+
self._failure_registry[issue_id] = datetime.now()
|
|
215
|
+
logger.warning(
|
|
216
|
+
f"Recorded failure for issue {issue_id}. "
|
|
217
|
+
f"Cooldown: {self._failure_cooldown_seconds}s"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
# Release the slot if session_id provided
|
|
221
|
+
if session_id:
|
|
222
|
+
self.release(session_id)
|
|
223
|
+
|
|
224
|
+
def get_status(self) -> Dict[str, Any]:
|
|
225
|
+
"""
|
|
226
|
+
Get current semaphore status for monitoring.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
Dict with current counts and limits
|
|
230
|
+
"""
|
|
231
|
+
with self._lock:
|
|
232
|
+
return {
|
|
233
|
+
"global": {
|
|
234
|
+
"active": len(self._active_sessions),
|
|
235
|
+
"limit": self._global_max,
|
|
236
|
+
},
|
|
237
|
+
"roles": {
|
|
238
|
+
role: {
|
|
239
|
+
"active": self._role_counts.get(role, 0),
|
|
240
|
+
"limit": limit,
|
|
241
|
+
}
|
|
242
|
+
for role, limit in self._role_limits.items()
|
|
243
|
+
},
|
|
244
|
+
"cooldown_issues": len(self._failure_registry),
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
def clear_failure(self, issue_id: str) -> None:
|
|
248
|
+
"""Clear failure record for an issue (e.g., after successful completion)."""
|
|
249
|
+
with self._lock:
|
|
250
|
+
if issue_id in self._failure_registry:
|
|
251
|
+
del self._failure_registry[issue_id]
|
|
252
|
+
logger.info(f"Cleared failure record for issue {issue_id}")
|
|
253
|
+
|
|
254
|
+
|
|
70
255
|
class ProjectManager:
|
|
71
256
|
"""
|
|
72
257
|
Discovers and manages multiple Monoco projects within a workspace.
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING, Optional, List, Any
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from monoco.features.memo.core import load_memos
|
|
4
|
+
from monoco.features.issue.models import IssueMetadata, IssueStatus, IssueStage
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from monoco.features.issue.models import IssueMetadata
|
|
8
|
+
|
|
9
|
+
class TriggerPolicy:
|
|
10
|
+
"""
|
|
11
|
+
Base class for trigger policies.
|
|
12
|
+
"""
|
|
13
|
+
def evaluate(self, context: dict) -> bool:
|
|
14
|
+
raise NotImplementedError
|
|
15
|
+
|
|
16
|
+
class MemoAccumulationPolicy(TriggerPolicy):
|
|
17
|
+
"""
|
|
18
|
+
Trigger when pending memos exceed a threshold.
|
|
19
|
+
"""
|
|
20
|
+
def __init__(self, count_threshold: int = 5):
|
|
21
|
+
self.count_threshold = count_threshold
|
|
22
|
+
|
|
23
|
+
def evaluate(self, context: dict) -> bool:
|
|
24
|
+
issues_root = context.get("issues_root")
|
|
25
|
+
if not issues_root:
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
if isinstance(issues_root, str):
|
|
29
|
+
issues_root = Path(issues_root)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
memos = load_memos(issues_root)
|
|
33
|
+
pending_memos = [m for m in memos if m.status == "pending"]
|
|
34
|
+
return len(pending_memos) >= self.count_threshold
|
|
35
|
+
except Exception as e:
|
|
36
|
+
print(f"Error evaluating MemoAccumulationPolicy: {e}")
|
|
37
|
+
return False
|
|
38
|
+
|
|
39
|
+
class HandoverPolicy(TriggerPolicy):
|
|
40
|
+
"""
|
|
41
|
+
Trigger when an issue enters a specific state (e.g. Open/Doing for Engineer).
|
|
42
|
+
"""
|
|
43
|
+
def __init__(self, target_status: IssueStatus, target_stage: IssueStage):
|
|
44
|
+
self.target_status = target_status
|
|
45
|
+
self.target_stage = target_stage
|
|
46
|
+
|
|
47
|
+
def evaluate(self, context: dict) -> bool:
|
|
48
|
+
issue: Optional[IssueMetadata] = context.get("issue")
|
|
49
|
+
if not issue:
|
|
50
|
+
return False
|
|
51
|
+
|
|
52
|
+
return (
|
|
53
|
+
issue.status == self.target_status
|
|
54
|
+
and issue.stage == self.target_stage
|
|
55
|
+
)
|