monoco-toolkit 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/core/agent/adapters.py +24 -1
- monoco/core/config.py +77 -17
- monoco/core/integrations.py +8 -0
- monoco/core/lsp.py +7 -0
- monoco/core/output.py +8 -1
- monoco/core/resources/zh/SKILL.md +6 -7
- monoco/core/setup.py +8 -0
- monoco/features/i18n/resources/zh/SKILL.md +5 -5
- monoco/features/issue/commands.py +135 -55
- monoco/features/issue/core.py +157 -122
- monoco/features/issue/domain/__init__.py +0 -0
- monoco/features/issue/domain/lifecycle.py +126 -0
- monoco/features/issue/domain/models.py +170 -0
- monoco/features/issue/domain/parser.py +223 -0
- monoco/features/issue/domain/workspace.py +104 -0
- monoco/features/issue/engine/__init__.py +22 -0
- monoco/features/issue/engine/config.py +172 -0
- monoco/features/issue/engine/machine.py +185 -0
- monoco/features/issue/engine/models.py +18 -0
- monoco/features/issue/linter.py +32 -11
- monoco/features/issue/lsp/__init__.py +3 -0
- monoco/features/issue/lsp/definition.py +72 -0
- monoco/features/issue/models.py +26 -9
- monoco/features/issue/resources/zh/SKILL.md +8 -9
- monoco/features/issue/validator.py +181 -65
- monoco/features/spike/core.py +5 -22
- monoco/features/spike/resources/zh/SKILL.md +2 -2
- monoco/main.py +2 -26
- monoco_toolkit-0.2.7.dist-info/METADATA +129 -0
- {monoco_toolkit-0.2.5.dist-info → monoco_toolkit-0.2.7.dist-info}/RECORD +33 -27
- monoco/features/agent/commands.py +0 -166
- monoco/features/agent/doctor.py +0 -30
- monoco/features/pty/core.py +0 -185
- monoco/features/pty/router.py +0 -138
- monoco/features/pty/server.py +0 -56
- monoco_toolkit-0.2.5.dist-info/METADATA +0 -93
- {monoco_toolkit-0.2.5.dist-info → monoco_toolkit-0.2.7.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.2.5.dist-info → monoco_toolkit-0.2.7.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.2.5.dist-info → monoco_toolkit-0.2.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
from typing import List, Optional, Any, Dict
|
|
2
|
+
from pydantic import BaseModel, Field, model_validator
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from ..models import IssueType, IssueStatus, IssueStage, IssueSolution, IssueIsolation, IssueID, current_time
|
|
5
|
+
from monoco.core.lsp import Range, Position
|
|
6
|
+
|
|
7
|
+
class Span(BaseModel):
|
|
8
|
+
"""
|
|
9
|
+
Represents a fine-grained location inside a ContentBlock.
|
|
10
|
+
"""
|
|
11
|
+
type: str # 'wikilink', 'issue_id', 'checkbox', 'yaml_key', 'plain_text'
|
|
12
|
+
range: Range
|
|
13
|
+
content: str
|
|
14
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
15
|
+
|
|
16
|
+
class ContentBlock(BaseModel):
|
|
17
|
+
"""
|
|
18
|
+
Represents a block of content in the markdown body.
|
|
19
|
+
"""
|
|
20
|
+
type: str # e.g., 'heading', 'task_list', 'paragraph', 'empty'
|
|
21
|
+
content: str
|
|
22
|
+
line_start: int
|
|
23
|
+
line_end: int
|
|
24
|
+
spans: List[Span] = Field(default_factory=list)
|
|
25
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
26
|
+
|
|
27
|
+
def to_string(self) -> str:
|
|
28
|
+
return self.content
|
|
29
|
+
|
|
30
|
+
from enum import Enum
|
|
31
|
+
|
|
32
|
+
class TaskState(str, Enum):
|
|
33
|
+
TODO = " "
|
|
34
|
+
DONE = "x"
|
|
35
|
+
DOING = "-"
|
|
36
|
+
CANCELLED = "+"
|
|
37
|
+
|
|
38
|
+
class TaskItem(ContentBlock):
|
|
39
|
+
type: str = "task_item" # override type
|
|
40
|
+
state: TaskState = TaskState.TODO
|
|
41
|
+
level: int = 0
|
|
42
|
+
parent_index: Optional[int] = None
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def is_completed(self) -> bool:
|
|
46
|
+
return self.state in [TaskState.DONE, TaskState.CANCELLED]
|
|
47
|
+
|
|
48
|
+
class IssueBody(BaseModel):
|
|
49
|
+
"""
|
|
50
|
+
Represents the parsed body of the issue.
|
|
51
|
+
"""
|
|
52
|
+
blocks: List[ContentBlock] = Field(default_factory=list)
|
|
53
|
+
|
|
54
|
+
def to_markdown(self) -> str:
|
|
55
|
+
return "\n".join(b.to_string() for b in self.blocks)
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def raw(self) -> str:
|
|
59
|
+
return self.to_markdown()
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def tasks(self) -> List[TaskItem]:
|
|
63
|
+
return [b for b in self.blocks if isinstance(b, TaskItem) or (isinstance(b, ContentBlock) and b.type == 'task_item')]
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def progress(self) -> str:
|
|
67
|
+
tasks = self.tasks
|
|
68
|
+
if not tasks:
|
|
69
|
+
return "0/0"
|
|
70
|
+
completed = len([t for t in tasks if isinstance(t, TaskItem) and t.is_completed])
|
|
71
|
+
return f"{completed}/{len(tasks)}"
|
|
72
|
+
|
|
73
|
+
class IssueFrontmatter(BaseModel):
|
|
74
|
+
"""
|
|
75
|
+
Represents the YAML frontmatter of the issue.
|
|
76
|
+
Contains metadata and validation logic.
|
|
77
|
+
"""
|
|
78
|
+
id: str
|
|
79
|
+
uid: Optional[str] = None
|
|
80
|
+
type: IssueType
|
|
81
|
+
status: IssueStatus = IssueStatus.OPEN
|
|
82
|
+
stage: Optional[IssueStage] = None
|
|
83
|
+
title: str
|
|
84
|
+
created_at: datetime = Field(default_factory=current_time)
|
|
85
|
+
opened_at: Optional[datetime] = None
|
|
86
|
+
updated_at: datetime = Field(default_factory=current_time)
|
|
87
|
+
closed_at: Optional[datetime] = None
|
|
88
|
+
parent: Optional[str] = None
|
|
89
|
+
dependencies: List[str] = Field(default_factory=list)
|
|
90
|
+
related: List[str] = Field(default_factory=list)
|
|
91
|
+
tags: List[str] = Field(default_factory=list)
|
|
92
|
+
solution: Optional[IssueSolution] = None
|
|
93
|
+
isolation: Optional[IssueIsolation] = None
|
|
94
|
+
|
|
95
|
+
model_config = {"extra": "allow"}
|
|
96
|
+
|
|
97
|
+
@model_validator(mode='before')
|
|
98
|
+
@classmethod
|
|
99
|
+
def normalize_fields(cls, v: Any) -> Any:
|
|
100
|
+
# Reusing normalization logic from original model or keeping it clean here
|
|
101
|
+
if isinstance(v, dict):
|
|
102
|
+
if "type" in v and isinstance(v["type"], str):
|
|
103
|
+
v["type"] = v["type"].lower()
|
|
104
|
+
if "status" in v and isinstance(v["status"], str):
|
|
105
|
+
v["status"] = v["status"].lower()
|
|
106
|
+
return v
|
|
107
|
+
|
|
108
|
+
class Issue(BaseModel):
|
|
109
|
+
"""
|
|
110
|
+
The Aggregate Root for an Issue in the Domain Layer.
|
|
111
|
+
"""
|
|
112
|
+
path: Optional[str] = None
|
|
113
|
+
frontmatter: IssueFrontmatter
|
|
114
|
+
body: IssueBody
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def id(self) -> str:
|
|
118
|
+
return self.frontmatter.id
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def status(self) -> IssueStatus:
|
|
122
|
+
return self.frontmatter.status
|
|
123
|
+
|
|
124
|
+
def to_file_content(self) -> str:
|
|
125
|
+
"""
|
|
126
|
+
Reconstruct the full file content.
|
|
127
|
+
"""
|
|
128
|
+
import yaml
|
|
129
|
+
|
|
130
|
+
# Dump frontmatter
|
|
131
|
+
# Dump frontmatter with explicit field handling
|
|
132
|
+
# We want to keep certain fields even if empty to serve as prompts
|
|
133
|
+
data = self.frontmatter.model_dump(mode='json')
|
|
134
|
+
|
|
135
|
+
# Explicit ordering and key retention
|
|
136
|
+
# We construct a new dict to control order and presence
|
|
137
|
+
ordered_dump = {}
|
|
138
|
+
|
|
139
|
+
# 1. Identity
|
|
140
|
+
ordered_dump['id'] = data['id']
|
|
141
|
+
if data.get('uid'): ordered_dump['uid'] = data['uid']
|
|
142
|
+
|
|
143
|
+
# 2. Classifier
|
|
144
|
+
ordered_dump['type'] = data['type']
|
|
145
|
+
ordered_dump['status'] = data['status']
|
|
146
|
+
if data.get('stage'): ordered_dump['stage'] = data['stage']
|
|
147
|
+
|
|
148
|
+
# 3. Content
|
|
149
|
+
ordered_dump['title'] = data['title']
|
|
150
|
+
|
|
151
|
+
# 4. Dates (Always keep created/updated, others if exist)
|
|
152
|
+
ordered_dump['created_at'] = data['created_at']
|
|
153
|
+
if data.get('opened_at'): ordered_dump['opened_at'] = data['opened_at']
|
|
154
|
+
ordered_dump['updated_at'] = data['updated_at']
|
|
155
|
+
if data.get('closed_at'): ordered_dump['closed_at'] = data['closed_at']
|
|
156
|
+
|
|
157
|
+
# 5. Graph (Always include to prompt usage)
|
|
158
|
+
ordered_dump['parent'] = data.get('parent') # Allow null
|
|
159
|
+
ordered_dump['dependencies'] = data.get('dependencies', [])
|
|
160
|
+
ordered_dump['related'] = data.get('related', [])
|
|
161
|
+
ordered_dump['tags'] = data.get('tags', [])
|
|
162
|
+
|
|
163
|
+
# 6. Lifecycle (Optional)
|
|
164
|
+
if data.get('solution'): ordered_dump['solution'] = data['solution']
|
|
165
|
+
if data.get('isolation'): ordered_dump['isolation'] = data['isolation']
|
|
166
|
+
|
|
167
|
+
fm_str = yaml.dump(ordered_dump, sort_keys=False, allow_unicode=True).strip()
|
|
168
|
+
body_str = self.body.to_markdown()
|
|
169
|
+
|
|
170
|
+
return f"---\n{fm_str}\n---\n\n{body_str}"
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
import re
|
|
3
|
+
from typing import List, Optional, Tuple, Any
|
|
4
|
+
from .models import Issue, IssueFrontmatter, IssueBody, ContentBlock, Span
|
|
5
|
+
from monoco.core.lsp import Range, Position
|
|
6
|
+
|
|
7
|
+
class MarkdownParser:
|
|
8
|
+
"""
|
|
9
|
+
Parses markdown content into Domain Models.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
# Regex for standard Issue IDs and cross-project IDs
|
|
13
|
+
ISSUE_ID_PATTERN = r"\b((?:[a-zA-Z0-9_]+::)?(?:EPIC|FEAT|CHORE|FIX)-\d{4})\b"
|
|
14
|
+
# Regex for Wikilinks [[Project::IssueID]] or [[IssueID]]
|
|
15
|
+
WIKILINK_PATTERN = r"\[\[((?:[a-zA-Z0-9_]+::)?(?:EPIC|FEAT|CHORE|FIX)-\d{4})\]\]"
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def parse(content: str, path: Optional[str] = None) -> Issue:
|
|
19
|
+
lines = content.splitlines()
|
|
20
|
+
|
|
21
|
+
# 1. Parse Frontmatter
|
|
22
|
+
frontmatter_dict, body_start_line = MarkdownParser._extract_frontmatter(lines)
|
|
23
|
+
|
|
24
|
+
# 2. Create Frontmatter Object
|
|
25
|
+
# Handle cases where frontmatter might be empty or invalid
|
|
26
|
+
if not frontmatter_dict:
|
|
27
|
+
# Fallback or error? For now, assume valid issues have frontmatter.
|
|
28
|
+
# But during creation/drafting it might be partial.
|
|
29
|
+
# We'll assume the input content *should* be a valid issue file.
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
frontmatter = IssueFrontmatter(**frontmatter_dict)
|
|
33
|
+
|
|
34
|
+
# 3. Parse Body
|
|
35
|
+
body_lines = lines[body_start_line:]
|
|
36
|
+
# Adjust line numbers relative to the original file
|
|
37
|
+
blocks = MarkdownParser._parse_blocks(body_lines, start_line_offset=body_start_line)
|
|
38
|
+
|
|
39
|
+
body = IssueBody(blocks=blocks)
|
|
40
|
+
|
|
41
|
+
return Issue(path=path, frontmatter=frontmatter, body=body)
|
|
42
|
+
|
|
43
|
+
@staticmethod
|
|
44
|
+
def _extract_frontmatter(lines: List[str]) -> Tuple[dict, int]:
|
|
45
|
+
"""
|
|
46
|
+
Extracts YAML frontmatter. Returns (dict, body_start_line_index).
|
|
47
|
+
"""
|
|
48
|
+
if not lines or lines[0].strip() != "---":
|
|
49
|
+
return {}, 0
|
|
50
|
+
|
|
51
|
+
fm_lines = []
|
|
52
|
+
i = 1
|
|
53
|
+
while i < len(lines):
|
|
54
|
+
line = lines[i]
|
|
55
|
+
if line.strip() == "---":
|
|
56
|
+
return yaml.safe_load("\n".join(fm_lines)), i + 1
|
|
57
|
+
fm_lines.append(line)
|
|
58
|
+
i += 1
|
|
59
|
+
|
|
60
|
+
return {}, 0 # malformed
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def _parse_blocks(lines: List[str], start_line_offset: int) -> List[ContentBlock]:
|
|
64
|
+
blocks = []
|
|
65
|
+
current_block_lines = []
|
|
66
|
+
current_block_type = "paragraph"
|
|
67
|
+
current_start_line = start_line_offset
|
|
68
|
+
|
|
69
|
+
def flush_block():
|
|
70
|
+
nonlocal current_block_lines, current_start_line
|
|
71
|
+
if current_block_lines:
|
|
72
|
+
content = "\n".join(current_block_lines)
|
|
73
|
+
block = ContentBlock(
|
|
74
|
+
type=current_block_type,
|
|
75
|
+
content=content,
|
|
76
|
+
line_start=current_start_line,
|
|
77
|
+
line_end=current_start_line + len(current_block_lines)
|
|
78
|
+
)
|
|
79
|
+
block.spans = MarkdownParser._parse_spans(current_block_lines, current_start_line)
|
|
80
|
+
blocks.append(block)
|
|
81
|
+
current_block_lines = []
|
|
82
|
+
|
|
83
|
+
for i, line in enumerate(lines):
|
|
84
|
+
abs_line_idx = start_line_offset + i
|
|
85
|
+
|
|
86
|
+
# Simple heuristic for block detection
|
|
87
|
+
# 1. Heading
|
|
88
|
+
if re.match(r"^#{1,6}\s", line):
|
|
89
|
+
flush_block()
|
|
90
|
+
|
|
91
|
+
# Add heading as its own block
|
|
92
|
+
block = ContentBlock(
|
|
93
|
+
type="heading",
|
|
94
|
+
content=line,
|
|
95
|
+
line_start=abs_line_idx,
|
|
96
|
+
line_end=abs_line_idx + 1
|
|
97
|
+
)
|
|
98
|
+
block.spans = MarkdownParser._parse_spans([line], abs_line_idx)
|
|
99
|
+
blocks.append(block)
|
|
100
|
+
current_start_line = abs_line_idx + 1
|
|
101
|
+
current_block_type = "paragraph" # reset
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
# 2. Task List Item
|
|
105
|
+
# Regex to capture indent, state char
|
|
106
|
+
task_match = re.match(r"^(\s*)-\s*\[([ xX\-\+~/])\]", line)
|
|
107
|
+
if task_match:
|
|
108
|
+
flush_block()
|
|
109
|
+
|
|
110
|
+
indent_str = task_match.group(1)
|
|
111
|
+
state_char = task_match.group(2).lower()
|
|
112
|
+
|
|
113
|
+
# Calculate level (assuming 2 spaces per level)
|
|
114
|
+
level = len(indent_str) // 2
|
|
115
|
+
|
|
116
|
+
# Determine state
|
|
117
|
+
from .models import TaskState, TaskItem
|
|
118
|
+
state_map = {
|
|
119
|
+
" ": TaskState.TODO,
|
|
120
|
+
"x": TaskState.DONE,
|
|
121
|
+
"-": TaskState.DOING, # Legacy
|
|
122
|
+
"/": TaskState.DOING, # New Standard
|
|
123
|
+
"+": TaskState.CANCELLED, # Legacy
|
|
124
|
+
"~": TaskState.CANCELLED # New Standard
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
# Fallback for 'X' -> 'x'
|
|
128
|
+
if state_char not in state_map and state_char == 'x':
|
|
129
|
+
state_char = 'x'
|
|
130
|
+
|
|
131
|
+
block = TaskItem(
|
|
132
|
+
content=line,
|
|
133
|
+
line_start=abs_line_idx,
|
|
134
|
+
line_end=abs_line_idx + 1,
|
|
135
|
+
state=state_map.get(state_char, TaskState.TODO),
|
|
136
|
+
level=level,
|
|
137
|
+
metadata={"checked": state_char in ['x', '+']}
|
|
138
|
+
)
|
|
139
|
+
block.spans = MarkdownParser._parse_spans([line], abs_line_idx)
|
|
140
|
+
blocks.append(block)
|
|
141
|
+
current_start_line = abs_line_idx + 1
|
|
142
|
+
current_block_type = "paragraph"
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
# 3. Empty lines (separators)
|
|
146
|
+
if not line.strip():
|
|
147
|
+
flush_block()
|
|
148
|
+
|
|
149
|
+
blocks.append(ContentBlock(
|
|
150
|
+
type="empty",
|
|
151
|
+
content="",
|
|
152
|
+
line_start=abs_line_idx,
|
|
153
|
+
line_end=abs_line_idx + 1
|
|
154
|
+
))
|
|
155
|
+
current_start_line = abs_line_idx + 1
|
|
156
|
+
current_block_type = "paragraph"
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
# Default: accumulate lines into paragraph
|
|
160
|
+
if not current_block_lines:
|
|
161
|
+
current_start_line = abs_line_idx
|
|
162
|
+
|
|
163
|
+
current_block_lines.append(line)
|
|
164
|
+
|
|
165
|
+
# Flush remaining
|
|
166
|
+
flush_block()
|
|
167
|
+
|
|
168
|
+
return blocks
|
|
169
|
+
|
|
170
|
+
@staticmethod
|
|
171
|
+
def _parse_spans(lines: List[str], line_offset: int) -> List[Span]:
|
|
172
|
+
"""
|
|
173
|
+
Parses a list of lines into Spans.
|
|
174
|
+
"""
|
|
175
|
+
spans = []
|
|
176
|
+
for i, line in enumerate(lines):
|
|
177
|
+
abs_line_idx = line_offset + i
|
|
178
|
+
|
|
179
|
+
# 1. Parse Checkboxes (only at start of line)
|
|
180
|
+
checkbox_match = re.match(r"^(\s*-\s*\[)([ xX\-\+~/])(\])", line)
|
|
181
|
+
if checkbox_match:
|
|
182
|
+
start_char = len(checkbox_match.group(1))
|
|
183
|
+
end_char = start_char + 1
|
|
184
|
+
spans.append(Span(
|
|
185
|
+
type="checkbox",
|
|
186
|
+
range=Range(
|
|
187
|
+
start=Position(line=abs_line_idx, character=start_char),
|
|
188
|
+
end=Position(line=abs_line_idx, character=end_char)
|
|
189
|
+
),
|
|
190
|
+
content=checkbox_match.group(2),
|
|
191
|
+
metadata={"state": checkbox_match.group(2)}
|
|
192
|
+
))
|
|
193
|
+
|
|
194
|
+
# 2. Parse Wikilinks
|
|
195
|
+
for match in re.finditer(MarkdownParser.WIKILINK_PATTERN, line):
|
|
196
|
+
spans.append(Span(
|
|
197
|
+
type="wikilink",
|
|
198
|
+
range=Range(
|
|
199
|
+
start=Position(line=abs_line_idx, character=match.start()),
|
|
200
|
+
end=Position(line=abs_line_idx, character=match.end())
|
|
201
|
+
),
|
|
202
|
+
content=match.group(0),
|
|
203
|
+
metadata={"issue_id": match.group(1)}
|
|
204
|
+
))
|
|
205
|
+
|
|
206
|
+
# 3. Parse Raw Issue IDs (not inside wikilinks)
|
|
207
|
+
# We use a simple exclusion logic: if a match is inside a wikilink, skip it.
|
|
208
|
+
wikilink_ranges = [(s.range.start.character, s.range.end.character) for s in spans if s.type == "wikilink" and s.range.start.line == abs_line_idx]
|
|
209
|
+
|
|
210
|
+
for match in re.finditer(MarkdownParser.ISSUE_ID_PATTERN, line):
|
|
211
|
+
is_inside = any(r[0] <= match.start() and match.end() <= r[1] for r in wikilink_ranges)
|
|
212
|
+
if not is_inside:
|
|
213
|
+
spans.append(Span(
|
|
214
|
+
type="issue_id",
|
|
215
|
+
range=Range(
|
|
216
|
+
start=Position(line=abs_line_idx, character=match.start()),
|
|
217
|
+
end=Position(line=abs_line_idx, character=match.end())
|
|
218
|
+
),
|
|
219
|
+
content=match.group(0),
|
|
220
|
+
metadata={"issue_id": match.group(1)}
|
|
221
|
+
))
|
|
222
|
+
|
|
223
|
+
return spans
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Dict, List, Optional, Set
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from .parser import MarkdownParser
|
|
6
|
+
from .models import Issue
|
|
7
|
+
from monoco.core.config import get_config, MonocoConfig
|
|
8
|
+
from monoco.core.lsp import Location, Range, Position
|
|
9
|
+
|
|
10
|
+
class IssueLocation(BaseModel):
|
|
11
|
+
project_id: str
|
|
12
|
+
file_path: str
|
|
13
|
+
issue_id: str
|
|
14
|
+
|
|
15
|
+
class WorkspaceSymbolIndex:
|
|
16
|
+
"""
|
|
17
|
+
Maintains a global index of all issues in the Monoco Workspace.
|
|
18
|
+
Allows resolving Issue IDs (local or namespaced) to file locations.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, root_path: Path):
|
|
22
|
+
self.root_path = root_path
|
|
23
|
+
self.index: Dict[str, IssueLocation] = {} # Map<FullID, Location>
|
|
24
|
+
self.local_map: Dict[str, str] = {} # Map<LocalID, FullID> for current context project
|
|
25
|
+
self._is_indexed = False
|
|
26
|
+
|
|
27
|
+
def build_index(self, recursive: bool = True):
|
|
28
|
+
"""
|
|
29
|
+
Scans the workspace and subprojects to build the index.
|
|
30
|
+
"""
|
|
31
|
+
self.index.clear()
|
|
32
|
+
|
|
33
|
+
# 1. Index local project
|
|
34
|
+
project_name = "local"
|
|
35
|
+
conf = get_config(str(self.root_path))
|
|
36
|
+
if conf and conf.project and conf.project.name:
|
|
37
|
+
project_name = conf.project.name.lower()
|
|
38
|
+
|
|
39
|
+
self._index_project(self.root_path, project_name)
|
|
40
|
+
|
|
41
|
+
# 2. Index workspace members
|
|
42
|
+
if recursive:
|
|
43
|
+
try:
|
|
44
|
+
for member_name, rel_path in conf.project.members.items():
|
|
45
|
+
member_root = (self.root_path / rel_path).resolve()
|
|
46
|
+
if member_root.exists():
|
|
47
|
+
self._index_project(member_root, member_name.lower())
|
|
48
|
+
except Exception:
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
self._is_indexed = True
|
|
52
|
+
|
|
53
|
+
def _index_project(self, project_root: Path, project_name: str):
|
|
54
|
+
issues_dir = project_root / "Issues"
|
|
55
|
+
if not issues_dir.exists():
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
# Scan Epics, Features, Chores, Fixes
|
|
59
|
+
for subdir in ["Epics", "Features", "Chores", "Fixes"]:
|
|
60
|
+
d = issues_dir / subdir
|
|
61
|
+
if d.exists():
|
|
62
|
+
for f in d.rglob("*.md"):
|
|
63
|
+
# Filename format: {ID}-{slug}.md
|
|
64
|
+
# Regex: EPIC-0016-title.md -> EPIC-0016
|
|
65
|
+
match = re.match(r"^((?:EPIC|FEAT|CHORE|FIX)-\d{4})", f.name)
|
|
66
|
+
if match:
|
|
67
|
+
issue_id = match.group(1)
|
|
68
|
+
full_id = f"{project_name}::{issue_id}"
|
|
69
|
+
loc = IssueLocation(
|
|
70
|
+
project_id=project_name,
|
|
71
|
+
file_path=str(f.absolute()),
|
|
72
|
+
issue_id=issue_id
|
|
73
|
+
)
|
|
74
|
+
self.index[full_id] = loc
|
|
75
|
+
self.index[issue_id] = loc # Alias for local lookup
|
|
76
|
+
|
|
77
|
+
def resolve(self, issue_id: str, context_project: Optional[str] = None) -> Optional[IssueLocation]:
|
|
78
|
+
"""
|
|
79
|
+
Resolves an issue ID to its location.
|
|
80
|
+
Supports 'Project::ID' and 'ID'.
|
|
81
|
+
"""
|
|
82
|
+
if not self._is_indexed:
|
|
83
|
+
self.build_index()
|
|
84
|
+
|
|
85
|
+
# Normalize lookup ID
|
|
86
|
+
if "::" in issue_id:
|
|
87
|
+
proj, lid = issue_id.split("::", 1)
|
|
88
|
+
issue_id = f"{proj.lower()}::{lid.upper()}"
|
|
89
|
+
else:
|
|
90
|
+
issue_id = issue_id.upper()
|
|
91
|
+
if context_project:
|
|
92
|
+
context_project = context_project.lower()
|
|
93
|
+
|
|
94
|
+
# 1. Try exact match
|
|
95
|
+
if issue_id in self.index:
|
|
96
|
+
return self.index[issue_id]
|
|
97
|
+
|
|
98
|
+
# 2. Try contextual resolution if it's a local ID
|
|
99
|
+
if "::" not in issue_id and context_project:
|
|
100
|
+
full_id = f"{context_project}::{issue_id}"
|
|
101
|
+
if full_id in self.index:
|
|
102
|
+
return self.index[full_id]
|
|
103
|
+
|
|
104
|
+
return None
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from .models import Transition
|
|
3
|
+
|
|
4
|
+
from .machine import StateMachine
|
|
5
|
+
from .config import DEFAULT_ISSUE_CONFIG
|
|
6
|
+
from monoco.core.config import get_config
|
|
7
|
+
|
|
8
|
+
def get_engine(project_root: Optional[str] = None) -> StateMachine:
|
|
9
|
+
# 1. Load Core Config (merges workspace & project yamls)
|
|
10
|
+
core_config = get_config(project_root)
|
|
11
|
+
|
|
12
|
+
# 2. Start with Defaults
|
|
13
|
+
# Use model_copy to avoid mutating the global default instance
|
|
14
|
+
final_config = DEFAULT_ISSUE_CONFIG.model_copy(deep=True)
|
|
15
|
+
|
|
16
|
+
# 3. Merge User Overrides
|
|
17
|
+
if core_config.issue:
|
|
18
|
+
# core_config.issue is already an IssueSchemaConfig (parse/validated by Pydantic)
|
|
19
|
+
# We just need to merge it.
|
|
20
|
+
final_config.merge(core_config.issue)
|
|
21
|
+
|
|
22
|
+
return StateMachine(final_config)
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
from monoco.core.config import IssueSchemaConfig, IssueTypeConfig, TransitionConfig, StateMachineConfig
|
|
2
|
+
|
|
3
|
+
DEFAULT_ISSUE_CONFIG = IssueSchemaConfig(
|
|
4
|
+
types=[
|
|
5
|
+
IssueTypeConfig(name="epic", label="Epic", prefix="EPIC", folder="Epics"),
|
|
6
|
+
IssueTypeConfig(name="feature", label="Feature", prefix="FEAT", folder="Features"),
|
|
7
|
+
IssueTypeConfig(name="chore", label="Chore", prefix="CHORE", folder="Chores"),
|
|
8
|
+
IssueTypeConfig(name="fix", label="Fix", prefix="FIX", folder="Fixes"),
|
|
9
|
+
],
|
|
10
|
+
statuses=["open", "closed", "backlog"],
|
|
11
|
+
stages=["draft", "doing", "review", "done", "freezed"],
|
|
12
|
+
solutions=["implemented", "cancelled", "wontfix", "duplicate"],
|
|
13
|
+
workflows=[
|
|
14
|
+
# --- UNIVERSAL AGENT ACTIONS ---
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# --- OPEN -> OPEN Transitions (Stage changes) ---
|
|
18
|
+
TransitionConfig(
|
|
19
|
+
name="start",
|
|
20
|
+
label="Start",
|
|
21
|
+
icon="$(play)",
|
|
22
|
+
from_status="open",
|
|
23
|
+
from_stage="draft",
|
|
24
|
+
to_status="open",
|
|
25
|
+
to_stage="doing",
|
|
26
|
+
command_template="monoco issue start {id}",
|
|
27
|
+
description="Start working on the issue"
|
|
28
|
+
),
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
TransitionConfig(
|
|
33
|
+
name="stop",
|
|
34
|
+
label="Stop",
|
|
35
|
+
icon="$(stop)",
|
|
36
|
+
from_status="open",
|
|
37
|
+
from_stage="doing",
|
|
38
|
+
to_status="open",
|
|
39
|
+
to_stage="draft",
|
|
40
|
+
command_template="monoco issue stop {id}",
|
|
41
|
+
description="Stop working and return to draft"
|
|
42
|
+
),
|
|
43
|
+
TransitionConfig(
|
|
44
|
+
name="submit",
|
|
45
|
+
label="Submit",
|
|
46
|
+
icon="$(check)",
|
|
47
|
+
from_status="open",
|
|
48
|
+
from_stage="doing",
|
|
49
|
+
to_status="open",
|
|
50
|
+
to_stage="review",
|
|
51
|
+
command_template="monoco issue submit {id}",
|
|
52
|
+
description="Submit for review"
|
|
53
|
+
),
|
|
54
|
+
TransitionConfig(
|
|
55
|
+
name="reject",
|
|
56
|
+
label="Reject",
|
|
57
|
+
icon="$(error)",
|
|
58
|
+
from_status="open",
|
|
59
|
+
from_stage="review",
|
|
60
|
+
to_status="open",
|
|
61
|
+
to_stage="doing",
|
|
62
|
+
command_template="monoco issue update {id} --stage doing",
|
|
63
|
+
description="Reject review and return to doing"
|
|
64
|
+
),
|
|
65
|
+
|
|
66
|
+
# --- OPEN -> CLOSED Transitions ---
|
|
67
|
+
TransitionConfig(
|
|
68
|
+
name="accept",
|
|
69
|
+
label="Accept",
|
|
70
|
+
icon="$(pass-filled)",
|
|
71
|
+
from_status="open",
|
|
72
|
+
from_stage="review",
|
|
73
|
+
to_status="closed",
|
|
74
|
+
to_stage="done",
|
|
75
|
+
required_solution="implemented",
|
|
76
|
+
command_template="monoco issue close {id} --solution implemented",
|
|
77
|
+
description="Accept and close issue"
|
|
78
|
+
),
|
|
79
|
+
TransitionConfig(
|
|
80
|
+
name="close_done",
|
|
81
|
+
label="Close",
|
|
82
|
+
icon="$(close)",
|
|
83
|
+
from_status="open",
|
|
84
|
+
from_stage="done",
|
|
85
|
+
to_status="closed",
|
|
86
|
+
to_stage="done",
|
|
87
|
+
required_solution="implemented",
|
|
88
|
+
command_template="monoco issue close {id} --solution implemented",
|
|
89
|
+
description="Close completed issue"
|
|
90
|
+
),
|
|
91
|
+
TransitionConfig(
|
|
92
|
+
name="cancel",
|
|
93
|
+
label="Cancel",
|
|
94
|
+
icon="$(trash)",
|
|
95
|
+
from_status="open",
|
|
96
|
+
# Allowed from any stage except DONE (though core.py had a check for it)
|
|
97
|
+
to_status="closed",
|
|
98
|
+
to_stage="done",
|
|
99
|
+
required_solution="cancelled",
|
|
100
|
+
command_template="monoco issue cancel {id}",
|
|
101
|
+
description="Cancel the issue"
|
|
102
|
+
),
|
|
103
|
+
TransitionConfig(
|
|
104
|
+
name="wontfix",
|
|
105
|
+
label="Won't Fix",
|
|
106
|
+
icon="$(circle-slash)",
|
|
107
|
+
from_status="open",
|
|
108
|
+
to_status="closed",
|
|
109
|
+
to_stage="done",
|
|
110
|
+
required_solution="wontfix",
|
|
111
|
+
command_template="monoco issue close {id} --solution wontfix",
|
|
112
|
+
description="Mark as won't fix"
|
|
113
|
+
),
|
|
114
|
+
|
|
115
|
+
# --- BACKLOG Transitions ---
|
|
116
|
+
TransitionConfig(
|
|
117
|
+
name="push",
|
|
118
|
+
label="Push to Backlog",
|
|
119
|
+
icon="$(archive)",
|
|
120
|
+
from_status="open",
|
|
121
|
+
to_status="backlog",
|
|
122
|
+
to_stage="freezed",
|
|
123
|
+
command_template="monoco issue backlog push {id}",
|
|
124
|
+
description="Move issue to backlog"
|
|
125
|
+
),
|
|
126
|
+
|
|
127
|
+
TransitionConfig(
|
|
128
|
+
name="pull",
|
|
129
|
+
label="Pull",
|
|
130
|
+
icon="$(arrow-up)",
|
|
131
|
+
from_status="backlog",
|
|
132
|
+
to_status="open",
|
|
133
|
+
to_stage="draft",
|
|
134
|
+
command_template="monoco issue backlog pull {id}",
|
|
135
|
+
description="Restore issue from backlog"
|
|
136
|
+
),
|
|
137
|
+
TransitionConfig(
|
|
138
|
+
name="cancel_backlog",
|
|
139
|
+
label="Cancel",
|
|
140
|
+
icon="$(trash)",
|
|
141
|
+
from_status="backlog",
|
|
142
|
+
to_status="closed",
|
|
143
|
+
to_stage="done",
|
|
144
|
+
required_solution="cancelled",
|
|
145
|
+
command_template="monoco issue cancel {id}",
|
|
146
|
+
description="Cancel backlog issue"
|
|
147
|
+
),
|
|
148
|
+
|
|
149
|
+
# --- CLOSED Transitions ---
|
|
150
|
+
TransitionConfig(
|
|
151
|
+
name="reopen",
|
|
152
|
+
label="Reopen",
|
|
153
|
+
icon="$(refresh)",
|
|
154
|
+
from_status="closed",
|
|
155
|
+
to_status="open",
|
|
156
|
+
to_stage="draft",
|
|
157
|
+
command_template="monoco issue open {id}",
|
|
158
|
+
description="Reopen a closed issue"
|
|
159
|
+
),
|
|
160
|
+
TransitionConfig(
|
|
161
|
+
name="reopen_from_done",
|
|
162
|
+
label="Reopen",
|
|
163
|
+
icon="$(refresh)",
|
|
164
|
+
from_status="open",
|
|
165
|
+
from_stage="done",
|
|
166
|
+
to_status="open",
|
|
167
|
+
to_stage="draft",
|
|
168
|
+
command_template="monoco issue open {id}",
|
|
169
|
+
description="Reopen a done issue"
|
|
170
|
+
),
|
|
171
|
+
]
|
|
172
|
+
)
|