monoco-toolkit 0.1.1__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/cli/__init__.py +0 -0
- monoco/cli/project.py +87 -0
- monoco/cli/workspace.py +46 -0
- monoco/core/agent/__init__.py +5 -0
- monoco/core/agent/action.py +144 -0
- monoco/core/agent/adapters.py +129 -0
- monoco/core/agent/protocol.py +31 -0
- monoco/core/agent/state.py +106 -0
- monoco/core/config.py +212 -17
- monoco/core/execution.py +62 -0
- monoco/core/feature.py +58 -0
- monoco/core/git.py +51 -2
- monoco/core/injection.py +196 -0
- monoco/core/integrations.py +242 -0
- monoco/core/lsp.py +68 -0
- monoco/core/output.py +21 -3
- monoco/core/registry.py +36 -0
- monoco/core/resources/en/AGENTS.md +8 -0
- monoco/core/resources/en/SKILL.md +66 -0
- monoco/core/resources/zh/AGENTS.md +8 -0
- monoco/core/resources/zh/SKILL.md +65 -0
- monoco/core/setup.py +96 -110
- monoco/core/skills.py +444 -0
- monoco/core/state.py +53 -0
- monoco/core/sync.py +224 -0
- monoco/core/telemetry.py +4 -1
- monoco/core/workspace.py +85 -20
- monoco/daemon/app.py +127 -58
- monoco/daemon/models.py +4 -0
- monoco/daemon/services.py +56 -155
- monoco/features/config/commands.py +125 -44
- monoco/features/i18n/adapter.py +29 -0
- monoco/features/i18n/commands.py +89 -10
- monoco/features/i18n/core.py +113 -27
- monoco/features/i18n/resources/en/AGENTS.md +8 -0
- monoco/features/i18n/resources/en/SKILL.md +94 -0
- monoco/features/i18n/resources/zh/AGENTS.md +8 -0
- monoco/features/i18n/resources/zh/SKILL.md +94 -0
- monoco/features/issue/adapter.py +34 -0
- monoco/features/issue/commands.py +343 -101
- monoco/features/issue/core.py +384 -150
- monoco/features/issue/domain/__init__.py +0 -0
- monoco/features/issue/domain/lifecycle.py +126 -0
- monoco/features/issue/domain/models.py +170 -0
- monoco/features/issue/domain/parser.py +223 -0
- monoco/features/issue/domain/workspace.py +104 -0
- monoco/features/issue/engine/__init__.py +22 -0
- monoco/features/issue/engine/config.py +172 -0
- monoco/features/issue/engine/machine.py +185 -0
- monoco/features/issue/engine/models.py +18 -0
- monoco/features/issue/linter.py +325 -120
- monoco/features/issue/lsp/__init__.py +3 -0
- monoco/features/issue/lsp/definition.py +72 -0
- monoco/features/issue/migration.py +134 -0
- monoco/features/issue/models.py +46 -24
- monoco/features/issue/monitor.py +94 -0
- monoco/features/issue/resources/en/AGENTS.md +20 -0
- monoco/features/issue/resources/en/SKILL.md +111 -0
- monoco/features/issue/resources/zh/AGENTS.md +20 -0
- monoco/features/issue/resources/zh/SKILL.md +138 -0
- monoco/features/issue/validator.py +455 -0
- monoco/features/spike/adapter.py +30 -0
- monoco/features/spike/commands.py +45 -24
- monoco/features/spike/core.py +6 -40
- monoco/features/spike/resources/en/AGENTS.md +7 -0
- monoco/features/spike/resources/en/SKILL.md +74 -0
- monoco/features/spike/resources/zh/AGENTS.md +7 -0
- monoco/features/spike/resources/zh/SKILL.md +74 -0
- monoco/main.py +91 -2
- monoco_toolkit-0.2.8.dist-info/METADATA +136 -0
- monoco_toolkit-0.2.8.dist-info/RECORD +83 -0
- monoco_toolkit-0.1.1.dist-info/METADATA +0 -93
- monoco_toolkit-0.1.1.dist-info/RECORD +0 -33
- {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.8.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.8.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.8.dist-info}/licenses/LICENSE +0 -0
|
File without changes
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
from typing import List, Optional, Callable
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from ..models import IssueStatus, IssueStage, IssueSolution, current_time
|
|
4
|
+
from .models import Issue
|
|
5
|
+
|
|
6
|
+
class Transition(BaseModel):
|
|
7
|
+
name: str
|
|
8
|
+
from_status: Optional[IssueStatus] = None # None means any
|
|
9
|
+
from_stage: Optional[IssueStage] = None # None means any
|
|
10
|
+
to_status: IssueStatus
|
|
11
|
+
to_stage: Optional[IssueStage] = None
|
|
12
|
+
required_solution: Optional[IssueSolution] = None
|
|
13
|
+
description: str = ""
|
|
14
|
+
|
|
15
|
+
def is_allowed(self, issue: Issue) -> bool:
|
|
16
|
+
if self.from_status and issue.status != self.from_status:
|
|
17
|
+
return False
|
|
18
|
+
if self.from_stage and issue.frontmatter.stage != self.from_stage:
|
|
19
|
+
return False
|
|
20
|
+
return True
|
|
21
|
+
|
|
22
|
+
class TransitionService:
|
|
23
|
+
def __init__(self):
|
|
24
|
+
self.transitions: List[Transition] = [
|
|
25
|
+
# Open -> Backlog
|
|
26
|
+
Transition(
|
|
27
|
+
name="freeze",
|
|
28
|
+
from_status=IssueStatus.OPEN,
|
|
29
|
+
to_status=IssueStatus.BACKLOG,
|
|
30
|
+
to_stage=IssueStage.FREEZED,
|
|
31
|
+
description="Move open issue to backlog"
|
|
32
|
+
),
|
|
33
|
+
# Backlog -> Open
|
|
34
|
+
Transition(
|
|
35
|
+
name="activate",
|
|
36
|
+
from_status=IssueStatus.BACKLOG,
|
|
37
|
+
to_status=IssueStatus.OPEN,
|
|
38
|
+
to_stage=IssueStage.DRAFT, # Reset to draft?
|
|
39
|
+
description="Restore issue from backlog"
|
|
40
|
+
),
|
|
41
|
+
# Open (Draft) -> Open (Doing)
|
|
42
|
+
Transition(
|
|
43
|
+
name="start",
|
|
44
|
+
from_status=IssueStatus.OPEN,
|
|
45
|
+
from_stage=IssueStage.DRAFT,
|
|
46
|
+
to_status=IssueStatus.OPEN,
|
|
47
|
+
to_stage=IssueStage.DOING,
|
|
48
|
+
description="Start working on the issue"
|
|
49
|
+
),
|
|
50
|
+
# Open (Doing) -> Open (Review)
|
|
51
|
+
Transition(
|
|
52
|
+
name="submit",
|
|
53
|
+
from_status=IssueStatus.OPEN,
|
|
54
|
+
from_stage=IssueStage.DOING,
|
|
55
|
+
to_status=IssueStatus.OPEN,
|
|
56
|
+
to_stage=IssueStage.REVIEW,
|
|
57
|
+
description="Submit for review"
|
|
58
|
+
),
|
|
59
|
+
# Open (Review) -> Open (Doing) - reject
|
|
60
|
+
Transition(
|
|
61
|
+
name="reject",
|
|
62
|
+
from_status=IssueStatus.OPEN,
|
|
63
|
+
from_stage=IssueStage.REVIEW,
|
|
64
|
+
to_status=IssueStatus.OPEN,
|
|
65
|
+
to_stage=IssueStage.DOING,
|
|
66
|
+
description="Reject review and return to doing"
|
|
67
|
+
),
|
|
68
|
+
# Open (Review) -> Closed (Implemented)
|
|
69
|
+
Transition(
|
|
70
|
+
name="accept",
|
|
71
|
+
from_status=IssueStatus.OPEN,
|
|
72
|
+
from_stage=IssueStage.REVIEW,
|
|
73
|
+
to_status=IssueStatus.CLOSED,
|
|
74
|
+
to_stage=IssueStage.DONE,
|
|
75
|
+
required_solution=IssueSolution.IMPLEMENTED,
|
|
76
|
+
description="Accept and close issue"
|
|
77
|
+
),
|
|
78
|
+
# Direct Close (Cancel, Wontfix, Duplicate)
|
|
79
|
+
Transition(
|
|
80
|
+
name="cancel",
|
|
81
|
+
to_status=IssueStatus.CLOSED,
|
|
82
|
+
to_stage=IssueStage.DONE,
|
|
83
|
+
required_solution=IssueSolution.CANCELLED,
|
|
84
|
+
description="Cancel the issue"
|
|
85
|
+
),
|
|
86
|
+
Transition(
|
|
87
|
+
name="wontfix",
|
|
88
|
+
to_status=IssueStatus.CLOSED,
|
|
89
|
+
to_stage=IssueStage.DONE,
|
|
90
|
+
required_solution=IssueSolution.WONTFIX,
|
|
91
|
+
description="Mark as wontfix"
|
|
92
|
+
),
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
def get_available_transitions(self, issue: Issue) -> List[Transition]:
|
|
96
|
+
return [t for t in self.transitions if t.is_allowed(issue)]
|
|
97
|
+
|
|
98
|
+
def apply_transition(self, issue: Issue, transition_name: str) -> Issue:
|
|
99
|
+
# Find transition
|
|
100
|
+
candidates = [t for t in self.transitions if t.name == transition_name]
|
|
101
|
+
valid_transition = None
|
|
102
|
+
for t in candidates:
|
|
103
|
+
if t.is_allowed(issue):
|
|
104
|
+
valid_transition = t
|
|
105
|
+
break
|
|
106
|
+
|
|
107
|
+
if not valid_transition:
|
|
108
|
+
raise ValueError(f"Transition '{transition_name}' is not allowed for current state.")
|
|
109
|
+
|
|
110
|
+
# Apply changes
|
|
111
|
+
issue.frontmatter.status = valid_transition.to_status
|
|
112
|
+
if valid_transition.to_stage:
|
|
113
|
+
issue.frontmatter.stage = valid_transition.to_stage
|
|
114
|
+
if valid_transition.required_solution:
|
|
115
|
+
issue.frontmatter.solution = valid_transition.required_solution
|
|
116
|
+
|
|
117
|
+
issue.frontmatter.updated_at = current_time()
|
|
118
|
+
|
|
119
|
+
# Logic for closed_at, opened_at etc.
|
|
120
|
+
if valid_transition.to_status == IssueStatus.CLOSED and issue.frontmatter.closed_at is None:
|
|
121
|
+
issue.frontmatter.closed_at = current_time()
|
|
122
|
+
|
|
123
|
+
if valid_transition.to_status == IssueStatus.OPEN and issue.frontmatter.opened_at is None:
|
|
124
|
+
issue.frontmatter.opened_at = current_time()
|
|
125
|
+
|
|
126
|
+
return issue
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
from typing import List, Optional, Any, Dict
|
|
2
|
+
from pydantic import BaseModel, Field, model_validator
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from ..models import IssueType, IssueStatus, IssueStage, IssueSolution, IssueIsolation, IssueID, current_time
|
|
5
|
+
from monoco.core.lsp import Range, Position
|
|
6
|
+
|
|
7
|
+
class Span(BaseModel):
|
|
8
|
+
"""
|
|
9
|
+
Represents a fine-grained location inside a ContentBlock.
|
|
10
|
+
"""
|
|
11
|
+
type: str # 'wikilink', 'issue_id', 'checkbox', 'yaml_key', 'plain_text'
|
|
12
|
+
range: Range
|
|
13
|
+
content: str
|
|
14
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
15
|
+
|
|
16
|
+
class ContentBlock(BaseModel):
|
|
17
|
+
"""
|
|
18
|
+
Represents a block of content in the markdown body.
|
|
19
|
+
"""
|
|
20
|
+
type: str # e.g., 'heading', 'task_list', 'paragraph', 'empty'
|
|
21
|
+
content: str
|
|
22
|
+
line_start: int
|
|
23
|
+
line_end: int
|
|
24
|
+
spans: List[Span] = Field(default_factory=list)
|
|
25
|
+
metadata: Dict[str, Any] = Field(default_factory=dict)
|
|
26
|
+
|
|
27
|
+
def to_string(self) -> str:
|
|
28
|
+
return self.content
|
|
29
|
+
|
|
30
|
+
from enum import Enum
|
|
31
|
+
|
|
32
|
+
class TaskState(str, Enum):
|
|
33
|
+
TODO = " "
|
|
34
|
+
DONE = "x"
|
|
35
|
+
DOING = "-"
|
|
36
|
+
CANCELLED = "+"
|
|
37
|
+
|
|
38
|
+
class TaskItem(ContentBlock):
|
|
39
|
+
type: str = "task_item" # override type
|
|
40
|
+
state: TaskState = TaskState.TODO
|
|
41
|
+
level: int = 0
|
|
42
|
+
parent_index: Optional[int] = None
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def is_completed(self) -> bool:
|
|
46
|
+
return self.state in [TaskState.DONE, TaskState.CANCELLED]
|
|
47
|
+
|
|
48
|
+
class IssueBody(BaseModel):
|
|
49
|
+
"""
|
|
50
|
+
Represents the parsed body of the issue.
|
|
51
|
+
"""
|
|
52
|
+
blocks: List[ContentBlock] = Field(default_factory=list)
|
|
53
|
+
|
|
54
|
+
def to_markdown(self) -> str:
|
|
55
|
+
return "\n".join(b.to_string() for b in self.blocks)
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def raw(self) -> str:
|
|
59
|
+
return self.to_markdown()
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def tasks(self) -> List[TaskItem]:
|
|
63
|
+
return [b for b in self.blocks if isinstance(b, TaskItem) or (isinstance(b, ContentBlock) and b.type == 'task_item')]
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def progress(self) -> str:
|
|
67
|
+
tasks = self.tasks
|
|
68
|
+
if not tasks:
|
|
69
|
+
return "0/0"
|
|
70
|
+
completed = len([t for t in tasks if isinstance(t, TaskItem) and t.is_completed])
|
|
71
|
+
return f"{completed}/{len(tasks)}"
|
|
72
|
+
|
|
73
|
+
class IssueFrontmatter(BaseModel):
|
|
74
|
+
"""
|
|
75
|
+
Represents the YAML frontmatter of the issue.
|
|
76
|
+
Contains metadata and validation logic.
|
|
77
|
+
"""
|
|
78
|
+
id: str
|
|
79
|
+
uid: Optional[str] = None
|
|
80
|
+
type: IssueType
|
|
81
|
+
status: IssueStatus = IssueStatus.OPEN
|
|
82
|
+
stage: Optional[IssueStage] = None
|
|
83
|
+
title: str
|
|
84
|
+
created_at: datetime = Field(default_factory=current_time)
|
|
85
|
+
opened_at: Optional[datetime] = None
|
|
86
|
+
updated_at: datetime = Field(default_factory=current_time)
|
|
87
|
+
closed_at: Optional[datetime] = None
|
|
88
|
+
parent: Optional[str] = None
|
|
89
|
+
dependencies: List[str] = Field(default_factory=list)
|
|
90
|
+
related: List[str] = Field(default_factory=list)
|
|
91
|
+
tags: List[str] = Field(default_factory=list)
|
|
92
|
+
solution: Optional[IssueSolution] = None
|
|
93
|
+
isolation: Optional[IssueIsolation] = None
|
|
94
|
+
|
|
95
|
+
model_config = {"extra": "allow"}
|
|
96
|
+
|
|
97
|
+
@model_validator(mode='before')
|
|
98
|
+
@classmethod
|
|
99
|
+
def normalize_fields(cls, v: Any) -> Any:
|
|
100
|
+
# Reusing normalization logic from original model or keeping it clean here
|
|
101
|
+
if isinstance(v, dict):
|
|
102
|
+
if "type" in v and isinstance(v["type"], str):
|
|
103
|
+
v["type"] = v["type"].lower()
|
|
104
|
+
if "status" in v and isinstance(v["status"], str):
|
|
105
|
+
v["status"] = v["status"].lower()
|
|
106
|
+
return v
|
|
107
|
+
|
|
108
|
+
class Issue(BaseModel):
|
|
109
|
+
"""
|
|
110
|
+
The Aggregate Root for an Issue in the Domain Layer.
|
|
111
|
+
"""
|
|
112
|
+
path: Optional[str] = None
|
|
113
|
+
frontmatter: IssueFrontmatter
|
|
114
|
+
body: IssueBody
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def id(self) -> str:
|
|
118
|
+
return self.frontmatter.id
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def status(self) -> IssueStatus:
|
|
122
|
+
return self.frontmatter.status
|
|
123
|
+
|
|
124
|
+
def to_file_content(self) -> str:
|
|
125
|
+
"""
|
|
126
|
+
Reconstruct the full file content.
|
|
127
|
+
"""
|
|
128
|
+
import yaml
|
|
129
|
+
|
|
130
|
+
# Dump frontmatter
|
|
131
|
+
# Dump frontmatter with explicit field handling
|
|
132
|
+
# We want to keep certain fields even if empty to serve as prompts
|
|
133
|
+
data = self.frontmatter.model_dump(mode='json')
|
|
134
|
+
|
|
135
|
+
# Explicit ordering and key retention
|
|
136
|
+
# We construct a new dict to control order and presence
|
|
137
|
+
ordered_dump = {}
|
|
138
|
+
|
|
139
|
+
# 1. Identity
|
|
140
|
+
ordered_dump['id'] = data['id']
|
|
141
|
+
if data.get('uid'): ordered_dump['uid'] = data['uid']
|
|
142
|
+
|
|
143
|
+
# 2. Classifier
|
|
144
|
+
ordered_dump['type'] = data['type']
|
|
145
|
+
ordered_dump['status'] = data['status']
|
|
146
|
+
if data.get('stage'): ordered_dump['stage'] = data['stage']
|
|
147
|
+
|
|
148
|
+
# 3. Content
|
|
149
|
+
ordered_dump['title'] = data['title']
|
|
150
|
+
|
|
151
|
+
# 4. Dates (Always keep created/updated, others if exist)
|
|
152
|
+
ordered_dump['created_at'] = data['created_at']
|
|
153
|
+
if data.get('opened_at'): ordered_dump['opened_at'] = data['opened_at']
|
|
154
|
+
ordered_dump['updated_at'] = data['updated_at']
|
|
155
|
+
if data.get('closed_at'): ordered_dump['closed_at'] = data['closed_at']
|
|
156
|
+
|
|
157
|
+
# 5. Graph (Always include to prompt usage)
|
|
158
|
+
ordered_dump['parent'] = data.get('parent') # Allow null
|
|
159
|
+
ordered_dump['dependencies'] = data.get('dependencies', [])
|
|
160
|
+
ordered_dump['related'] = data.get('related', [])
|
|
161
|
+
ordered_dump['tags'] = data.get('tags', [])
|
|
162
|
+
|
|
163
|
+
# 6. Lifecycle (Optional)
|
|
164
|
+
if data.get('solution'): ordered_dump['solution'] = data['solution']
|
|
165
|
+
if data.get('isolation'): ordered_dump['isolation'] = data['isolation']
|
|
166
|
+
|
|
167
|
+
fm_str = yaml.dump(ordered_dump, sort_keys=False, allow_unicode=True).strip()
|
|
168
|
+
body_str = self.body.to_markdown()
|
|
169
|
+
|
|
170
|
+
return f"---\n{fm_str}\n---\n\n{body_str}"
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
import re
|
|
3
|
+
from typing import List, Optional, Tuple, Any
|
|
4
|
+
from .models import Issue, IssueFrontmatter, IssueBody, ContentBlock, Span
|
|
5
|
+
from monoco.core.lsp import Range, Position
|
|
6
|
+
|
|
7
|
+
class MarkdownParser:
|
|
8
|
+
"""
|
|
9
|
+
Parses markdown content into Domain Models.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
# Regex for standard Issue IDs and cross-project IDs
|
|
13
|
+
ISSUE_ID_PATTERN = r"\b((?:[a-zA-Z0-9_]+::)?(?:EPIC|FEAT|CHORE|FIX)-\d{4})\b"
|
|
14
|
+
# Regex for Wikilinks [[Project::IssueID]] or [[IssueID]]
|
|
15
|
+
WIKILINK_PATTERN = r"\[\[((?:[a-zA-Z0-9_]+::)?(?:EPIC|FEAT|CHORE|FIX)-\d{4})\]\]"
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def parse(content: str, path: Optional[str] = None) -> Issue:
|
|
19
|
+
lines = content.splitlines()
|
|
20
|
+
|
|
21
|
+
# 1. Parse Frontmatter
|
|
22
|
+
frontmatter_dict, body_start_line = MarkdownParser._extract_frontmatter(lines)
|
|
23
|
+
|
|
24
|
+
# 2. Create Frontmatter Object
|
|
25
|
+
# Handle cases where frontmatter might be empty or invalid
|
|
26
|
+
if not frontmatter_dict:
|
|
27
|
+
# Fallback or error? For now, assume valid issues have frontmatter.
|
|
28
|
+
# But during creation/drafting it might be partial.
|
|
29
|
+
# We'll assume the input content *should* be a valid issue file.
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
frontmatter = IssueFrontmatter(**frontmatter_dict)
|
|
33
|
+
|
|
34
|
+
# 3. Parse Body
|
|
35
|
+
body_lines = lines[body_start_line:]
|
|
36
|
+
# Adjust line numbers relative to the original file
|
|
37
|
+
blocks = MarkdownParser._parse_blocks(body_lines, start_line_offset=body_start_line)
|
|
38
|
+
|
|
39
|
+
body = IssueBody(blocks=blocks)
|
|
40
|
+
|
|
41
|
+
return Issue(path=path, frontmatter=frontmatter, body=body)
|
|
42
|
+
|
|
43
|
+
@staticmethod
|
|
44
|
+
def _extract_frontmatter(lines: List[str]) -> Tuple[dict, int]:
|
|
45
|
+
"""
|
|
46
|
+
Extracts YAML frontmatter. Returns (dict, body_start_line_index).
|
|
47
|
+
"""
|
|
48
|
+
if not lines or lines[0].strip() != "---":
|
|
49
|
+
return {}, 0
|
|
50
|
+
|
|
51
|
+
fm_lines = []
|
|
52
|
+
i = 1
|
|
53
|
+
while i < len(lines):
|
|
54
|
+
line = lines[i]
|
|
55
|
+
if line.strip() == "---":
|
|
56
|
+
return yaml.safe_load("\n".join(fm_lines)), i + 1
|
|
57
|
+
fm_lines.append(line)
|
|
58
|
+
i += 1
|
|
59
|
+
|
|
60
|
+
return {}, 0 # malformed
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def _parse_blocks(lines: List[str], start_line_offset: int) -> List[ContentBlock]:
|
|
64
|
+
blocks = []
|
|
65
|
+
current_block_lines = []
|
|
66
|
+
current_block_type = "paragraph"
|
|
67
|
+
current_start_line = start_line_offset
|
|
68
|
+
|
|
69
|
+
def flush_block():
|
|
70
|
+
nonlocal current_block_lines, current_start_line
|
|
71
|
+
if current_block_lines:
|
|
72
|
+
content = "\n".join(current_block_lines)
|
|
73
|
+
block = ContentBlock(
|
|
74
|
+
type=current_block_type,
|
|
75
|
+
content=content,
|
|
76
|
+
line_start=current_start_line,
|
|
77
|
+
line_end=current_start_line + len(current_block_lines)
|
|
78
|
+
)
|
|
79
|
+
block.spans = MarkdownParser._parse_spans(current_block_lines, current_start_line)
|
|
80
|
+
blocks.append(block)
|
|
81
|
+
current_block_lines = []
|
|
82
|
+
|
|
83
|
+
for i, line in enumerate(lines):
|
|
84
|
+
abs_line_idx = start_line_offset + i
|
|
85
|
+
|
|
86
|
+
# Simple heuristic for block detection
|
|
87
|
+
# 1. Heading
|
|
88
|
+
if re.match(r"^#{1,6}\s", line):
|
|
89
|
+
flush_block()
|
|
90
|
+
|
|
91
|
+
# Add heading as its own block
|
|
92
|
+
block = ContentBlock(
|
|
93
|
+
type="heading",
|
|
94
|
+
content=line,
|
|
95
|
+
line_start=abs_line_idx,
|
|
96
|
+
line_end=abs_line_idx + 1
|
|
97
|
+
)
|
|
98
|
+
block.spans = MarkdownParser._parse_spans([line], abs_line_idx)
|
|
99
|
+
blocks.append(block)
|
|
100
|
+
current_start_line = abs_line_idx + 1
|
|
101
|
+
current_block_type = "paragraph" # reset
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
# 2. Task List Item
|
|
105
|
+
# Regex to capture indent, state char
|
|
106
|
+
task_match = re.match(r"^(\s*)-\s*\[([ xX\-\+~/])\]", line)
|
|
107
|
+
if task_match:
|
|
108
|
+
flush_block()
|
|
109
|
+
|
|
110
|
+
indent_str = task_match.group(1)
|
|
111
|
+
state_char = task_match.group(2).lower()
|
|
112
|
+
|
|
113
|
+
# Calculate level (assuming 2 spaces per level)
|
|
114
|
+
level = len(indent_str) // 2
|
|
115
|
+
|
|
116
|
+
# Determine state
|
|
117
|
+
from .models import TaskState, TaskItem
|
|
118
|
+
state_map = {
|
|
119
|
+
" ": TaskState.TODO,
|
|
120
|
+
"x": TaskState.DONE,
|
|
121
|
+
"-": TaskState.DOING, # Legacy
|
|
122
|
+
"/": TaskState.DOING, # New Standard
|
|
123
|
+
"+": TaskState.CANCELLED, # Legacy
|
|
124
|
+
"~": TaskState.CANCELLED # New Standard
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
# Fallback for 'X' -> 'x'
|
|
128
|
+
if state_char not in state_map and state_char == 'x':
|
|
129
|
+
state_char = 'x'
|
|
130
|
+
|
|
131
|
+
block = TaskItem(
|
|
132
|
+
content=line,
|
|
133
|
+
line_start=abs_line_idx,
|
|
134
|
+
line_end=abs_line_idx + 1,
|
|
135
|
+
state=state_map.get(state_char, TaskState.TODO),
|
|
136
|
+
level=level,
|
|
137
|
+
metadata={"checked": state_char in ['x', '+']}
|
|
138
|
+
)
|
|
139
|
+
block.spans = MarkdownParser._parse_spans([line], abs_line_idx)
|
|
140
|
+
blocks.append(block)
|
|
141
|
+
current_start_line = abs_line_idx + 1
|
|
142
|
+
current_block_type = "paragraph"
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
# 3. Empty lines (separators)
|
|
146
|
+
if not line.strip():
|
|
147
|
+
flush_block()
|
|
148
|
+
|
|
149
|
+
blocks.append(ContentBlock(
|
|
150
|
+
type="empty",
|
|
151
|
+
content="",
|
|
152
|
+
line_start=abs_line_idx,
|
|
153
|
+
line_end=abs_line_idx + 1
|
|
154
|
+
))
|
|
155
|
+
current_start_line = abs_line_idx + 1
|
|
156
|
+
current_block_type = "paragraph"
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
# Default: accumulate lines into paragraph
|
|
160
|
+
if not current_block_lines:
|
|
161
|
+
current_start_line = abs_line_idx
|
|
162
|
+
|
|
163
|
+
current_block_lines.append(line)
|
|
164
|
+
|
|
165
|
+
# Flush remaining
|
|
166
|
+
flush_block()
|
|
167
|
+
|
|
168
|
+
return blocks
|
|
169
|
+
|
|
170
|
+
@staticmethod
|
|
171
|
+
def _parse_spans(lines: List[str], line_offset: int) -> List[Span]:
|
|
172
|
+
"""
|
|
173
|
+
Parses a list of lines into Spans.
|
|
174
|
+
"""
|
|
175
|
+
spans = []
|
|
176
|
+
for i, line in enumerate(lines):
|
|
177
|
+
abs_line_idx = line_offset + i
|
|
178
|
+
|
|
179
|
+
# 1. Parse Checkboxes (only at start of line)
|
|
180
|
+
checkbox_match = re.match(r"^(\s*-\s*\[)([ xX\-\+~/])(\])", line)
|
|
181
|
+
if checkbox_match:
|
|
182
|
+
start_char = len(checkbox_match.group(1))
|
|
183
|
+
end_char = start_char + 1
|
|
184
|
+
spans.append(Span(
|
|
185
|
+
type="checkbox",
|
|
186
|
+
range=Range(
|
|
187
|
+
start=Position(line=abs_line_idx, character=start_char),
|
|
188
|
+
end=Position(line=abs_line_idx, character=end_char)
|
|
189
|
+
),
|
|
190
|
+
content=checkbox_match.group(2),
|
|
191
|
+
metadata={"state": checkbox_match.group(2)}
|
|
192
|
+
))
|
|
193
|
+
|
|
194
|
+
# 2. Parse Wikilinks
|
|
195
|
+
for match in re.finditer(MarkdownParser.WIKILINK_PATTERN, line):
|
|
196
|
+
spans.append(Span(
|
|
197
|
+
type="wikilink",
|
|
198
|
+
range=Range(
|
|
199
|
+
start=Position(line=abs_line_idx, character=match.start()),
|
|
200
|
+
end=Position(line=abs_line_idx, character=match.end())
|
|
201
|
+
),
|
|
202
|
+
content=match.group(0),
|
|
203
|
+
metadata={"issue_id": match.group(1)}
|
|
204
|
+
))
|
|
205
|
+
|
|
206
|
+
# 3. Parse Raw Issue IDs (not inside wikilinks)
|
|
207
|
+
# We use a simple exclusion logic: if a match is inside a wikilink, skip it.
|
|
208
|
+
wikilink_ranges = [(s.range.start.character, s.range.end.character) for s in spans if s.type == "wikilink" and s.range.start.line == abs_line_idx]
|
|
209
|
+
|
|
210
|
+
for match in re.finditer(MarkdownParser.ISSUE_ID_PATTERN, line):
|
|
211
|
+
is_inside = any(r[0] <= match.start() and match.end() <= r[1] for r in wikilink_ranges)
|
|
212
|
+
if not is_inside:
|
|
213
|
+
spans.append(Span(
|
|
214
|
+
type="issue_id",
|
|
215
|
+
range=Range(
|
|
216
|
+
start=Position(line=abs_line_idx, character=match.start()),
|
|
217
|
+
end=Position(line=abs_line_idx, character=match.end())
|
|
218
|
+
),
|
|
219
|
+
content=match.group(0),
|
|
220
|
+
metadata={"issue_id": match.group(1)}
|
|
221
|
+
))
|
|
222
|
+
|
|
223
|
+
return spans
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Dict, List, Optional, Set
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from .parser import MarkdownParser
|
|
6
|
+
from .models import Issue
|
|
7
|
+
from monoco.core.config import get_config, MonocoConfig
|
|
8
|
+
from monoco.core.lsp import Location, Range, Position
|
|
9
|
+
|
|
10
|
+
class IssueLocation(BaseModel):
|
|
11
|
+
project_id: str
|
|
12
|
+
file_path: str
|
|
13
|
+
issue_id: str
|
|
14
|
+
|
|
15
|
+
class WorkspaceSymbolIndex:
|
|
16
|
+
"""
|
|
17
|
+
Maintains a global index of all issues in the Monoco Workspace.
|
|
18
|
+
Allows resolving Issue IDs (local or namespaced) to file locations.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, root_path: Path):
|
|
22
|
+
self.root_path = root_path
|
|
23
|
+
self.index: Dict[str, IssueLocation] = {} # Map<FullID, Location>
|
|
24
|
+
self.local_map: Dict[str, str] = {} # Map<LocalID, FullID> for current context project
|
|
25
|
+
self._is_indexed = False
|
|
26
|
+
|
|
27
|
+
def build_index(self, recursive: bool = True):
|
|
28
|
+
"""
|
|
29
|
+
Scans the workspace and subprojects to build the index.
|
|
30
|
+
"""
|
|
31
|
+
self.index.clear()
|
|
32
|
+
|
|
33
|
+
# 1. Index local project
|
|
34
|
+
project_name = "local"
|
|
35
|
+
conf = get_config(str(self.root_path))
|
|
36
|
+
if conf and conf.project and conf.project.name:
|
|
37
|
+
project_name = conf.project.name.lower()
|
|
38
|
+
|
|
39
|
+
self._index_project(self.root_path, project_name)
|
|
40
|
+
|
|
41
|
+
# 2. Index workspace members
|
|
42
|
+
if recursive:
|
|
43
|
+
try:
|
|
44
|
+
for member_name, rel_path in conf.project.members.items():
|
|
45
|
+
member_root = (self.root_path / rel_path).resolve()
|
|
46
|
+
if member_root.exists():
|
|
47
|
+
self._index_project(member_root, member_name.lower())
|
|
48
|
+
except Exception:
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
self._is_indexed = True
|
|
52
|
+
|
|
53
|
+
def _index_project(self, project_root: Path, project_name: str):
|
|
54
|
+
issues_dir = project_root / "Issues"
|
|
55
|
+
if not issues_dir.exists():
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
# Scan Epics, Features, Chores, Fixes
|
|
59
|
+
for subdir in ["Epics", "Features", "Chores", "Fixes"]:
|
|
60
|
+
d = issues_dir / subdir
|
|
61
|
+
if d.exists():
|
|
62
|
+
for f in d.rglob("*.md"):
|
|
63
|
+
# Filename format: {ID}-{slug}.md
|
|
64
|
+
# Regex: EPIC-0016-title.md -> EPIC-0016
|
|
65
|
+
match = re.match(r"^((?:EPIC|FEAT|CHORE|FIX)-\d{4})", f.name)
|
|
66
|
+
if match:
|
|
67
|
+
issue_id = match.group(1)
|
|
68
|
+
full_id = f"{project_name}::{issue_id}"
|
|
69
|
+
loc = IssueLocation(
|
|
70
|
+
project_id=project_name,
|
|
71
|
+
file_path=str(f.absolute()),
|
|
72
|
+
issue_id=issue_id
|
|
73
|
+
)
|
|
74
|
+
self.index[full_id] = loc
|
|
75
|
+
self.index[issue_id] = loc # Alias for local lookup
|
|
76
|
+
|
|
77
|
+
def resolve(self, issue_id: str, context_project: Optional[str] = None) -> Optional[IssueLocation]:
|
|
78
|
+
"""
|
|
79
|
+
Resolves an issue ID to its location.
|
|
80
|
+
Supports 'Project::ID' and 'ID'.
|
|
81
|
+
"""
|
|
82
|
+
if not self._is_indexed:
|
|
83
|
+
self.build_index()
|
|
84
|
+
|
|
85
|
+
# Normalize lookup ID
|
|
86
|
+
if "::" in issue_id:
|
|
87
|
+
proj, lid = issue_id.split("::", 1)
|
|
88
|
+
issue_id = f"{proj.lower()}::{lid.upper()}"
|
|
89
|
+
else:
|
|
90
|
+
issue_id = issue_id.upper()
|
|
91
|
+
if context_project:
|
|
92
|
+
context_project = context_project.lower()
|
|
93
|
+
|
|
94
|
+
# 1. Try exact match
|
|
95
|
+
if issue_id in self.index:
|
|
96
|
+
return self.index[issue_id]
|
|
97
|
+
|
|
98
|
+
# 2. Try contextual resolution if it's a local ID
|
|
99
|
+
if "::" not in issue_id and context_project:
|
|
100
|
+
full_id = f"{context_project}::{issue_id}"
|
|
101
|
+
if full_id in self.index:
|
|
102
|
+
return self.index[full_id]
|
|
103
|
+
|
|
104
|
+
return None
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from .models import Transition
|
|
3
|
+
|
|
4
|
+
from .machine import StateMachine
|
|
5
|
+
from .config import DEFAULT_ISSUE_CONFIG
|
|
6
|
+
from monoco.core.config import get_config
|
|
7
|
+
|
|
8
|
+
def get_engine(project_root: Optional[str] = None) -> StateMachine:
|
|
9
|
+
# 1. Load Core Config (merges workspace & project yamls)
|
|
10
|
+
core_config = get_config(project_root)
|
|
11
|
+
|
|
12
|
+
# 2. Start with Defaults
|
|
13
|
+
# Use model_copy to avoid mutating the global default instance
|
|
14
|
+
final_config = DEFAULT_ISSUE_CONFIG.model_copy(deep=True)
|
|
15
|
+
|
|
16
|
+
# 3. Merge User Overrides
|
|
17
|
+
if core_config.issue:
|
|
18
|
+
# core_config.issue is already an IssueSchemaConfig (parse/validated by Pydantic)
|
|
19
|
+
# We just need to merge it.
|
|
20
|
+
final_config.merge(core_config.issue)
|
|
21
|
+
|
|
22
|
+
return StateMachine(final_config)
|