monoco-toolkit 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/cli/project.py +35 -31
- monoco/cli/workspace.py +26 -16
- monoco/core/agent/__init__.py +0 -2
- monoco/core/agent/action.py +44 -20
- monoco/core/agent/adapters.py +20 -16
- monoco/core/agent/protocol.py +5 -4
- monoco/core/agent/state.py +21 -21
- monoco/core/config.py +90 -33
- monoco/core/execution.py +21 -16
- monoco/core/feature.py +8 -5
- monoco/core/git.py +61 -30
- monoco/core/hooks.py +57 -0
- monoco/core/injection.py +47 -44
- monoco/core/integrations.py +50 -35
- monoco/core/lsp.py +12 -1
- monoco/core/output.py +35 -16
- monoco/core/registry.py +3 -2
- monoco/core/setup.py +190 -124
- monoco/core/skills.py +121 -107
- monoco/core/state.py +12 -10
- monoco/core/sync.py +85 -56
- monoco/core/telemetry.py +10 -6
- monoco/core/workspace.py +26 -19
- monoco/daemon/app.py +123 -79
- monoco/daemon/commands.py +14 -13
- monoco/daemon/models.py +11 -3
- monoco/daemon/reproduce_stats.py +8 -8
- monoco/daemon/services.py +32 -33
- monoco/daemon/stats.py +59 -40
- monoco/features/config/commands.py +38 -25
- monoco/features/i18n/adapter.py +4 -5
- monoco/features/i18n/commands.py +83 -49
- monoco/features/i18n/core.py +94 -54
- monoco/features/issue/adapter.py +6 -7
- monoco/features/issue/commands.py +468 -272
- monoco/features/issue/core.py +419 -312
- monoco/features/issue/domain/lifecycle.py +33 -23
- monoco/features/issue/domain/models.py +71 -38
- monoco/features/issue/domain/parser.py +92 -69
- monoco/features/issue/domain/workspace.py +19 -16
- monoco/features/issue/engine/__init__.py +3 -3
- monoco/features/issue/engine/config.py +18 -25
- monoco/features/issue/engine/machine.py +72 -39
- monoco/features/issue/engine/models.py +4 -2
- monoco/features/issue/linter.py +287 -157
- monoco/features/issue/lsp/definition.py +26 -19
- monoco/features/issue/migration.py +45 -34
- monoco/features/issue/models.py +29 -13
- monoco/features/issue/monitor.py +24 -8
- monoco/features/issue/resources/en/SKILL.md +6 -2
- monoco/features/issue/validator.py +383 -208
- monoco/features/skills/__init__.py +0 -1
- monoco/features/skills/core.py +24 -18
- monoco/features/spike/adapter.py +4 -5
- monoco/features/spike/commands.py +51 -38
- monoco/features/spike/core.py +24 -16
- monoco/main.py +34 -21
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/METADATA +1 -1
- monoco_toolkit-0.3.0.dist-info/RECORD +84 -0
- monoco_toolkit-0.2.8.dist-info/RECORD +0 -83
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.0.dist-info}/licenses/LICENSE +0 -0
monoco/features/issue/linter.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
from rich.console import Console
|
|
4
4
|
from rich.table import Table
|
|
@@ -11,6 +11,7 @@ from monoco.core.lsp import Diagnostic, DiagnosticSeverity
|
|
|
11
11
|
|
|
12
12
|
console = Console()
|
|
13
13
|
|
|
14
|
+
|
|
14
15
|
def check_environment_policy(project_root: Path):
|
|
15
16
|
"""
|
|
16
17
|
Guardrail: Prevent direct modifications on protected branches (main/master).
|
|
@@ -26,28 +27,33 @@ def check_environment_policy(project_root: Path):
|
|
|
26
27
|
# Check if dirty (uncommitted changes)
|
|
27
28
|
changed_files = git.get_git_status(project_root)
|
|
28
29
|
if changed_files:
|
|
29
|
-
console.print(
|
|
30
|
-
console.print(
|
|
30
|
+
console.print("\n[bold red]🛑 Environment Policy Violation[/bold red]")
|
|
31
|
+
console.print(
|
|
32
|
+
f"You are modifying code directly on protected branch: [bold cyan]{current_branch}[/bold cyan]"
|
|
33
|
+
)
|
|
31
34
|
console.print(f"Found {len(changed_files)} uncommitted changes.")
|
|
32
|
-
console.print(
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
console.print(
|
|
35
|
+
console.print(
|
|
36
|
+
"[yellow]Action Required:[/yellow] Please stash your changes and switch to a feature branch."
|
|
37
|
+
)
|
|
38
|
+
console.print(" > git stash")
|
|
39
|
+
console.print(" > monoco issue start <ID> --branch")
|
|
40
|
+
console.print(" > git stash pop")
|
|
36
41
|
raise typer.Exit(code=1)
|
|
37
42
|
except Exception:
|
|
38
43
|
# Fail safe: Do not block linting if git check fails unexpectedly
|
|
39
44
|
pass
|
|
40
45
|
|
|
46
|
+
|
|
41
47
|
def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnostic]:
|
|
42
48
|
"""
|
|
43
49
|
Verify the integrity of the Issues directory using LSP Validator.
|
|
44
50
|
"""
|
|
45
51
|
diagnostics = []
|
|
46
52
|
validator = IssueValidator(issues_root)
|
|
47
|
-
|
|
53
|
+
|
|
48
54
|
all_issue_ids = set()
|
|
49
55
|
all_issues = []
|
|
50
|
-
|
|
56
|
+
|
|
51
57
|
# 1. Collection Phase (Build Index)
|
|
52
58
|
# Helper to collect issues from a project
|
|
53
59
|
def collect_project_issues(project_issues_root: Path, project_name: str = "local"):
|
|
@@ -66,16 +72,17 @@ def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnost
|
|
|
66
72
|
if meta:
|
|
67
73
|
local_id = meta.id
|
|
68
74
|
full_id = f"{project_name}::{local_id}"
|
|
69
|
-
|
|
75
|
+
|
|
70
76
|
all_issue_ids.add(local_id)
|
|
71
77
|
all_issue_ids.add(full_id)
|
|
72
|
-
|
|
78
|
+
|
|
73
79
|
project_issues.append((f, meta))
|
|
74
80
|
return project_issues
|
|
75
81
|
|
|
76
82
|
from monoco.core.config import get_config
|
|
83
|
+
|
|
77
84
|
conf = get_config(str(issues_root.parent))
|
|
78
|
-
|
|
85
|
+
|
|
79
86
|
# Identify local project name
|
|
80
87
|
local_project_name = "local"
|
|
81
88
|
if conf and conf.project and conf.project.name:
|
|
@@ -84,183 +91,217 @@ def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnost
|
|
|
84
91
|
# Find Topmost Workspace Root
|
|
85
92
|
workspace_root = issues_root.parent
|
|
86
93
|
for parent in [workspace_root] + list(workspace_root.parents):
|
|
87
|
-
if (parent / ".monoco" / "workspace.yaml").exists() or (
|
|
94
|
+
if (parent / ".monoco" / "workspace.yaml").exists() or (
|
|
95
|
+
parent / ".monoco" / "project.yaml"
|
|
96
|
+
).exists():
|
|
88
97
|
workspace_root = parent
|
|
89
|
-
|
|
98
|
+
|
|
90
99
|
# Collect from local issues_root
|
|
91
100
|
all_issues.extend(collect_project_issues(issues_root, local_project_name))
|
|
92
|
-
|
|
101
|
+
|
|
93
102
|
if recursive:
|
|
94
103
|
try:
|
|
95
104
|
# Re-read config from workspace root to get all members
|
|
96
105
|
ws_conf = get_config(str(workspace_root))
|
|
97
|
-
|
|
106
|
+
|
|
98
107
|
# Index Root project if different from current
|
|
99
108
|
if workspace_root != issues_root.parent:
|
|
100
109
|
root_issues_dir = workspace_root / "Issues"
|
|
101
110
|
if root_issues_dir.exists():
|
|
102
|
-
all_issues.extend(
|
|
111
|
+
all_issues.extend(
|
|
112
|
+
collect_project_issues(
|
|
113
|
+
root_issues_dir, ws_conf.project.name.lower()
|
|
114
|
+
)
|
|
115
|
+
)
|
|
103
116
|
|
|
104
117
|
# Index all members
|
|
105
118
|
for member_name, rel_path in ws_conf.project.members.items():
|
|
106
119
|
member_root = (workspace_root / rel_path).resolve()
|
|
107
120
|
member_issues_dir = member_root / "Issues"
|
|
108
121
|
if member_issues_dir.exists() and member_issues_dir != issues_root:
|
|
109
|
-
all_issues.extend(
|
|
122
|
+
all_issues.extend(
|
|
123
|
+
collect_project_issues(member_issues_dir, member_name.lower())
|
|
124
|
+
)
|
|
110
125
|
except Exception:
|
|
111
126
|
pass
|
|
112
127
|
|
|
113
128
|
# 2. Validation Phase
|
|
114
129
|
for path, meta in all_issues:
|
|
115
|
-
content = path.read_text()
|
|
116
|
-
|
|
130
|
+
content = path.read_text() # Re-read content for validation
|
|
131
|
+
|
|
117
132
|
# A. Run Core Validator
|
|
118
133
|
file_diagnostics = validator.validate(meta, content, all_issue_ids)
|
|
119
|
-
|
|
134
|
+
|
|
120
135
|
# Add context to diagnostics (Path)
|
|
121
136
|
for d in file_diagnostics:
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
137
|
+
d.source = f"{meta.id}" # Use ID as source context
|
|
138
|
+
d.data = {"path": path} # Attach path for potential fixers
|
|
139
|
+
diagnostics.append(d)
|
|
140
|
+
|
|
126
141
|
return diagnostics
|
|
127
142
|
|
|
128
143
|
|
|
129
|
-
def run_lint(
|
|
144
|
+
def run_lint(
|
|
145
|
+
issues_root: Path,
|
|
146
|
+
recursive: bool = False,
|
|
147
|
+
fix: bool = False,
|
|
148
|
+
format: str = "table",
|
|
149
|
+
file_paths: Optional[List[str]] = None,
|
|
150
|
+
):
|
|
130
151
|
"""
|
|
131
152
|
Run lint with optional auto-fix and format selection.
|
|
132
|
-
|
|
153
|
+
|
|
133
154
|
Args:
|
|
134
155
|
issues_root: Root directory of issues
|
|
135
156
|
recursive: Recursively scan workspace members
|
|
136
157
|
fix: Apply auto-fixes
|
|
137
158
|
format: Output format (table, json)
|
|
138
|
-
|
|
159
|
+
file_paths: Optional list of paths to files to validate (LSP/Pre-commit mode)
|
|
139
160
|
"""
|
|
140
161
|
# 0. Environment Policy Check (Guardrail)
|
|
141
162
|
# We assume issues_root.parent is the project root or close enough for git context
|
|
142
163
|
check_environment_policy(issues_root.parent)
|
|
143
164
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
165
|
+
diagnostics = []
|
|
166
|
+
|
|
167
|
+
# File list mode (for LSP integration or pre-commit)
|
|
168
|
+
if file_paths:
|
|
169
|
+
# Pre-scan entire workspace to get all issue IDs for reference validation
|
|
170
|
+
# We need this context even when validating a single file
|
|
171
|
+
all_issue_ids = set()
|
|
172
|
+
for subdir in ["Epics", "Features", "Chores", "Fixes"]:
|
|
173
|
+
d = issues_root / subdir
|
|
174
|
+
if d.exists():
|
|
175
|
+
for status in ["open", "closed", "backlog"]:
|
|
176
|
+
status_dir = d / status
|
|
177
|
+
if status_dir.exists():
|
|
178
|
+
for f in status_dir.rglob("*.md"):
|
|
179
|
+
try:
|
|
180
|
+
m = core.parse_issue(f)
|
|
181
|
+
if m:
|
|
182
|
+
all_issue_ids.add(m.id)
|
|
183
|
+
except Exception:
|
|
184
|
+
pass
|
|
185
|
+
|
|
186
|
+
validator = IssueValidator(issues_root)
|
|
187
|
+
|
|
188
|
+
for file_path in file_paths:
|
|
189
|
+
file = Path(file_path).resolve()
|
|
190
|
+
if not file.exists():
|
|
191
|
+
console.print(f"[red]Error:[/red] File not found: {file_path}")
|
|
192
|
+
continue # Skip missing files but continue linting others
|
|
193
|
+
|
|
194
|
+
# Parse and validate file
|
|
195
|
+
try:
|
|
196
|
+
meta = core.parse_issue(file)
|
|
197
|
+
if not meta:
|
|
198
|
+
console.print(
|
|
199
|
+
f"[yellow]Warning:[/yellow] Failed to parse issue metadata from {file_path}. Skipping."
|
|
200
|
+
)
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
content = file.read_text()
|
|
204
|
+
file_diagnostics = validator.validate(meta, content, all_issue_ids)
|
|
205
|
+
|
|
206
|
+
# Add context
|
|
207
|
+
for d in file_diagnostics:
|
|
208
|
+
d.source = meta.id
|
|
209
|
+
d.data = {"path": file}
|
|
210
|
+
diagnostics.append(d)
|
|
211
|
+
|
|
212
|
+
except Exception as e:
|
|
213
|
+
console.print(
|
|
214
|
+
f"[red]Error:[/red] Validation failed for {file_path}: {e}"
|
|
215
|
+
)
|
|
216
|
+
# We don't exit here, we collect errors
|
|
188
217
|
else:
|
|
189
218
|
# Full workspace scan mode
|
|
190
219
|
diagnostics = check_integrity(issues_root, recursive)
|
|
191
|
-
|
|
220
|
+
|
|
192
221
|
# Filter only Warnings and Errors
|
|
193
222
|
issues = [d for d in diagnostics if d.severity <= DiagnosticSeverity.Warning]
|
|
194
|
-
|
|
223
|
+
|
|
195
224
|
if fix:
|
|
196
225
|
fixed_count = 0
|
|
197
226
|
console.print("[dim]Attempting auto-fixes...[/dim]")
|
|
198
|
-
|
|
227
|
+
|
|
199
228
|
# We must track processed paths to avoid redundant writes if multiple errors exist
|
|
200
229
|
processed_paths = set()
|
|
201
|
-
|
|
202
|
-
for d in issues:
|
|
203
|
-
path = d.data.get('path')
|
|
204
|
-
if not path: continue
|
|
205
|
-
|
|
206
|
-
# Read fresh content iteration
|
|
207
|
-
pass
|
|
208
230
|
|
|
209
231
|
# Group diagnostics by file path
|
|
210
232
|
from collections import defaultdict
|
|
233
|
+
|
|
211
234
|
file_diags = defaultdict(list)
|
|
212
235
|
for d in issues:
|
|
213
|
-
if d.data.get(
|
|
214
|
-
file_diags[d.data[
|
|
215
|
-
|
|
236
|
+
if d.data.get("path"):
|
|
237
|
+
file_diags[d.data["path"]].append(d)
|
|
238
|
+
|
|
216
239
|
for path, diags in file_diags.items():
|
|
217
240
|
try:
|
|
218
241
|
content = path.read_text()
|
|
219
242
|
new_content = content
|
|
220
243
|
has_changes = False
|
|
221
|
-
|
|
244
|
+
|
|
222
245
|
# Parse meta once for the file
|
|
223
246
|
try:
|
|
224
247
|
meta = core.parse_issue(path)
|
|
225
248
|
except Exception:
|
|
226
|
-
console.print(
|
|
249
|
+
console.print(
|
|
250
|
+
f"[yellow]Skipping fix for {path.name}: Cannot parse metadata[/yellow]"
|
|
251
|
+
)
|
|
227
252
|
continue
|
|
228
253
|
|
|
229
254
|
# Apply fixes for this file
|
|
230
255
|
for d in diags:
|
|
231
256
|
if "Structure Error" in d.message:
|
|
232
257
|
expected_header = f"## {meta.id}: {meta.title}"
|
|
233
|
-
|
|
258
|
+
|
|
234
259
|
# Check if strictly present
|
|
235
260
|
if expected_header in new_content:
|
|
236
261
|
continue
|
|
237
|
-
|
|
262
|
+
|
|
238
263
|
# Strategy: Look for existing heading with same ID to replace
|
|
239
264
|
# Matches: "## ID..." or "## ID ..."
|
|
240
265
|
# Regex: ^##\s+ID\b.*$
|
|
241
266
|
# We use meta.id which is safe.
|
|
242
|
-
heading_regex = re.compile(
|
|
243
|
-
|
|
267
|
+
heading_regex = re.compile(
|
|
268
|
+
rf"^##\s+{re.escape(meta.id)}.*$", re.MULTILINE
|
|
269
|
+
)
|
|
270
|
+
|
|
244
271
|
match_existing = heading_regex.search(new_content)
|
|
245
|
-
|
|
272
|
+
|
|
246
273
|
if match_existing:
|
|
247
274
|
# Replace existing incorrect heading
|
|
248
275
|
# We use sub to replace just the first occurrence
|
|
249
|
-
new_content = heading_regex.sub(
|
|
276
|
+
new_content = heading_regex.sub(
|
|
277
|
+
expected_header, new_content, count=1
|
|
278
|
+
)
|
|
250
279
|
has_changes = True
|
|
251
280
|
else:
|
|
252
281
|
# Insert after frontmatter
|
|
253
|
-
fm_match = re.search(
|
|
282
|
+
fm_match = re.search(
|
|
283
|
+
r"^---(.*?)---", new_content, re.DOTALL | re.MULTILINE
|
|
284
|
+
)
|
|
254
285
|
if fm_match:
|
|
255
286
|
end_pos = fm_match.end()
|
|
256
287
|
header_block = f"\n\n{expected_header}\n"
|
|
257
|
-
new_content =
|
|
288
|
+
new_content = (
|
|
289
|
+
new_content[:end_pos]
|
|
290
|
+
+ header_block
|
|
291
|
+
+ new_content[end_pos:].lstrip()
|
|
292
|
+
)
|
|
258
293
|
has_changes = True
|
|
259
294
|
|
|
260
|
-
if
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
295
|
+
if (
|
|
296
|
+
"Review Requirement: Missing '## Review Comments' section"
|
|
297
|
+
in d.message
|
|
298
|
+
):
|
|
299
|
+
if "## Review Comments" not in new_content:
|
|
300
|
+
new_content = (
|
|
301
|
+
new_content.rstrip()
|
|
302
|
+
+ "\n\n## Review Comments\n\n- [ ] Self-Review\n"
|
|
303
|
+
)
|
|
304
|
+
has_changes = True
|
|
264
305
|
|
|
265
306
|
if "Malformed ID" in d.message:
|
|
266
307
|
lines = new_content.splitlines()
|
|
@@ -273,44 +314,56 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
273
314
|
lines[line_idx] = new_line
|
|
274
315
|
new_content = "\n".join(lines) + "\n"
|
|
275
316
|
has_changes = True
|
|
276
|
-
|
|
317
|
+
|
|
277
318
|
if "Tag Check: Missing required context tags" in d.message:
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
319
|
+
# Extract missing tags from message
|
|
320
|
+
# Message format: "Tag Check: Missing required context tags: #TAG1, #TAG2"
|
|
321
|
+
try:
|
|
322
|
+
parts = d.message.split(": ")
|
|
323
|
+
if len(parts) >= 3:
|
|
324
|
+
tags_str = parts[-1]
|
|
325
|
+
missing_tags = [t.strip() for t in tags_str.split(",")]
|
|
326
|
+
|
|
327
|
+
# We need to update content via core.update_issue logic effectively
|
|
328
|
+
# But we are in a loop potentially with other string edits.
|
|
329
|
+
# IMPORTANT: Mixed strategy (Regex vs Object Update) is risky.
|
|
330
|
+
# However, tags are in YAML frontmatter.
|
|
331
|
+
# Since we might have modified new_content already (string), using core.update_issue on file is dangerous (race condition with memory).
|
|
332
|
+
# Better to append to tags list in YAML via regex or yaml parser on new_content.
|
|
333
|
+
|
|
334
|
+
# Parsing Frontmatter from new_content
|
|
335
|
+
fm_match = re.search(
|
|
336
|
+
r"^---(.*?)---",
|
|
337
|
+
new_content,
|
|
338
|
+
re.DOTALL | re.MULTILINE,
|
|
339
|
+
)
|
|
340
|
+
if fm_match:
|
|
341
|
+
import yaml
|
|
342
|
+
|
|
343
|
+
fm_text = fm_match.group(1)
|
|
344
|
+
data = yaml.safe_load(fm_text) or {}
|
|
345
|
+
current_tags = data.get("tags", [])
|
|
346
|
+
if not isinstance(current_tags, list):
|
|
347
|
+
current_tags = []
|
|
348
|
+
|
|
349
|
+
# Add missing
|
|
350
|
+
updated_tags = sorted(
|
|
351
|
+
list(set(current_tags) | set(missing_tags))
|
|
352
|
+
)
|
|
353
|
+
data["tags"] = updated_tags
|
|
354
|
+
|
|
355
|
+
# Dump back
|
|
356
|
+
new_fm_text = yaml.dump(
|
|
357
|
+
data, sort_keys=False, allow_unicode=True
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
# Replace FM block
|
|
361
|
+
new_content = new_content.replace(
|
|
362
|
+
fm_match.group(1), "\n" + new_fm_text
|
|
363
|
+
)
|
|
364
|
+
has_changes = True
|
|
365
|
+
except Exception as ex:
|
|
366
|
+
console.print(f"[red]Failed to fix tags: {ex}[/red]")
|
|
314
367
|
|
|
315
368
|
if has_changes:
|
|
316
369
|
path.write_text(new_content)
|
|
@@ -319,27 +372,94 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
319
372
|
except Exception as e:
|
|
320
373
|
console.print(f"[red]Failed to fix {path.name}: {e}[/red]")
|
|
321
374
|
|
|
375
|
+
# Separate Try-Block for Domains Fix to avoid nesting logic too deep
|
|
376
|
+
try:
|
|
377
|
+
content = path.read_text()
|
|
378
|
+
new_content = content
|
|
379
|
+
has_changes = False
|
|
380
|
+
|
|
381
|
+
# Check diagnostics again for this file
|
|
382
|
+
current_file_diags = file_diags.get(path, [])
|
|
383
|
+
|
|
384
|
+
needs_domain_fix = any(
|
|
385
|
+
"Missing 'domains' field" in d.message for d in current_file_diags
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
if needs_domain_fix:
|
|
389
|
+
# Add 'domains: []' to frontmatter
|
|
390
|
+
# We insert it before 'tags:' if possible, or at end of keys
|
|
391
|
+
fm_match = re.search(
|
|
392
|
+
r"^---(.*?)---", new_content, re.DOTALL | re.MULTILINE
|
|
393
|
+
)
|
|
394
|
+
if fm_match:
|
|
395
|
+
import yaml
|
|
396
|
+
|
|
397
|
+
fm_text = fm_match.group(1)
|
|
398
|
+
# We prefer to edit text directly to preserve comments if possible,
|
|
399
|
+
# but for adding a key, robust way is ensuring it's in.
|
|
400
|
+
pass
|
|
401
|
+
|
|
402
|
+
# Simple Regex Insertion: find "tags:" and insert before it
|
|
403
|
+
if "tags:" in fm_text:
|
|
404
|
+
new_fm_text = fm_text.replace("tags:", "domains: []\ntags:")
|
|
405
|
+
new_content = new_content.replace(
|
|
406
|
+
fm_match.group(1), new_fm_text
|
|
407
|
+
)
|
|
408
|
+
has_changes = True
|
|
409
|
+
else:
|
|
410
|
+
# Append to end
|
|
411
|
+
new_fm_text = fm_text.rstrip() + "\ndomains: []\n"
|
|
412
|
+
new_content = new_content.replace(
|
|
413
|
+
fm_match.group(1), new_fm_text
|
|
414
|
+
)
|
|
415
|
+
has_changes = True
|
|
416
|
+
|
|
417
|
+
if has_changes:
|
|
418
|
+
path.write_text(new_content)
|
|
419
|
+
if not any(
|
|
420
|
+
path == p for p in processed_paths
|
|
421
|
+
): # count once per file
|
|
422
|
+
fixed_count += 1
|
|
423
|
+
processed_paths.add(path)
|
|
424
|
+
console.print(f"[dim]Fixed (Domains): {path.name}[/dim]")
|
|
425
|
+
|
|
426
|
+
except Exception as e:
|
|
427
|
+
console.print(f"[red]Failed to fix domains for {path.name}: {e}[/red]")
|
|
428
|
+
|
|
322
429
|
console.print(f"[green]Applied auto-fixes to {fixed_count} files.[/green]")
|
|
323
|
-
|
|
430
|
+
|
|
324
431
|
# Re-run validation to verify
|
|
325
|
-
if
|
|
326
|
-
|
|
327
|
-
file
|
|
328
|
-
meta = core.parse_issue(file)
|
|
329
|
-
content = file.read_text()
|
|
432
|
+
if file_paths:
|
|
433
|
+
diagnostics = [] # Reset
|
|
434
|
+
# Re-validate file list
|
|
330
435
|
validator = IssueValidator(issues_root)
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
436
|
+
# We assume all_issue_ids is already populated from the first pass if it was needed
|
|
437
|
+
# But let's be safe and assume we might need to re-scan if IDs changed (unlikely during lint)
|
|
438
|
+
# For simplicity, we reuse the validator instance but might need fresh content
|
|
439
|
+
|
|
440
|
+
for file_path in file_paths:
|
|
441
|
+
file = Path(file_path).resolve()
|
|
442
|
+
if not file.exists():
|
|
443
|
+
continue
|
|
444
|
+
|
|
445
|
+
try:
|
|
446
|
+
meta = core.parse_issue(file)
|
|
447
|
+
content = file.read_text()
|
|
448
|
+
file_diagnostics = validator.validate(meta, content, all_issue_ids)
|
|
449
|
+
for d in file_diagnostics:
|
|
450
|
+
d.source = meta.id
|
|
451
|
+
d.data = {"path": file}
|
|
452
|
+
diagnostics.append(d)
|
|
453
|
+
except Exception:
|
|
454
|
+
pass
|
|
335
455
|
else:
|
|
336
456
|
diagnostics = check_integrity(issues_root, recursive)
|
|
337
457
|
issues = [d for d in diagnostics if d.severity <= DiagnosticSeverity.Warning]
|
|
338
458
|
|
|
339
459
|
# Output formatting
|
|
340
460
|
if format == "json":
|
|
341
|
-
import json
|
|
342
461
|
from pydantic import RootModel
|
|
462
|
+
|
|
343
463
|
# Use RootModel to export a list of models
|
|
344
464
|
print(RootModel(issues).model_dump_json(indent=2))
|
|
345
465
|
if any(d.severity == DiagnosticSeverity.Error for d in issues):
|
|
@@ -347,14 +467,21 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
347
467
|
return
|
|
348
468
|
|
|
349
469
|
if not issues:
|
|
350
|
-
console.print(
|
|
470
|
+
console.print(
|
|
471
|
+
"[green]✔[/green] Issue integrity check passed. No integrity errors found."
|
|
472
|
+
)
|
|
351
473
|
else:
|
|
352
|
-
table = Table(
|
|
474
|
+
table = Table(
|
|
475
|
+
title="Issue Integrity Report",
|
|
476
|
+
show_header=True,
|
|
477
|
+
header_style="bold magenta",
|
|
478
|
+
border_style="red",
|
|
479
|
+
)
|
|
353
480
|
table.add_column("Issue", style="cyan")
|
|
354
481
|
table.add_column("Severity", justify="center")
|
|
355
482
|
table.add_column("Line", justify="right", style="dim")
|
|
356
483
|
table.add_column("Message")
|
|
357
|
-
|
|
484
|
+
|
|
358
485
|
for d in issues:
|
|
359
486
|
sev_style = "red" if d.severity == DiagnosticSeverity.Error else "yellow"
|
|
360
487
|
sev_label = "ERROR" if d.severity == DiagnosticSeverity.Error else "WARN"
|
|
@@ -363,15 +490,18 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
363
490
|
d.source or "Unknown",
|
|
364
491
|
f"[{sev_style}]{sev_label}[/{sev_style}]",
|
|
365
492
|
line_str,
|
|
366
|
-
d.message
|
|
493
|
+
d.message,
|
|
367
494
|
)
|
|
368
|
-
|
|
495
|
+
|
|
369
496
|
console.print(table)
|
|
370
|
-
|
|
497
|
+
|
|
371
498
|
if any(d.severity == DiagnosticSeverity.Error for d in issues):
|
|
372
|
-
console.print(
|
|
499
|
+
console.print(
|
|
500
|
+
"\n[yellow]Tip: Run 'monoco issue lint --fix' to attempt automatic repairs.[/yellow]"
|
|
501
|
+
)
|
|
373
502
|
raise typer.Exit(code=1)
|
|
374
|
-
|
|
375
|
-
if issues:
|
|
376
|
-
console.print("\n[yellow]Tip: Run 'monoco issue lint --fix' to attempt automatic repairs.[/yellow]")
|
|
377
503
|
|
|
504
|
+
if issues:
|
|
505
|
+
console.print(
|
|
506
|
+
"\n[yellow]Tip: Run 'monoco issue lint --fix' to attempt automatic repairs.[/yellow]"
|
|
507
|
+
)
|