monoco-toolkit 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- monoco/cli/project.py +35 -31
- monoco/cli/workspace.py +26 -16
- monoco/core/agent/__init__.py +0 -2
- monoco/core/agent/action.py +44 -20
- monoco/core/agent/adapters.py +20 -16
- monoco/core/agent/protocol.py +5 -4
- monoco/core/agent/state.py +21 -21
- monoco/core/config.py +90 -33
- monoco/core/execution.py +21 -16
- monoco/core/feature.py +8 -5
- monoco/core/git.py +61 -30
- monoco/core/hooks.py +57 -0
- monoco/core/injection.py +47 -44
- monoco/core/integrations.py +50 -35
- monoco/core/lsp.py +12 -1
- monoco/core/output.py +35 -16
- monoco/core/registry.py +3 -2
- monoco/core/setup.py +190 -124
- monoco/core/skills.py +121 -107
- monoco/core/state.py +12 -10
- monoco/core/sync.py +85 -56
- monoco/core/telemetry.py +10 -6
- monoco/core/workspace.py +26 -19
- monoco/daemon/app.py +123 -79
- monoco/daemon/commands.py +14 -13
- monoco/daemon/models.py +11 -3
- monoco/daemon/reproduce_stats.py +8 -8
- monoco/daemon/services.py +32 -33
- monoco/daemon/stats.py +59 -40
- monoco/features/config/commands.py +38 -25
- monoco/features/i18n/adapter.py +4 -5
- monoco/features/i18n/commands.py +83 -49
- monoco/features/i18n/core.py +94 -54
- monoco/features/issue/adapter.py +6 -7
- monoco/features/issue/commands.py +500 -260
- monoco/features/issue/core.py +504 -293
- monoco/features/issue/domain/lifecycle.py +33 -23
- monoco/features/issue/domain/models.py +71 -38
- monoco/features/issue/domain/parser.py +92 -69
- monoco/features/issue/domain/workspace.py +19 -16
- monoco/features/issue/engine/__init__.py +3 -3
- monoco/features/issue/engine/config.py +18 -25
- monoco/features/issue/engine/machine.py +72 -39
- monoco/features/issue/engine/models.py +4 -2
- monoco/features/issue/linter.py +326 -111
- monoco/features/issue/lsp/definition.py +26 -19
- monoco/features/issue/migration.py +45 -34
- monoco/features/issue/models.py +30 -13
- monoco/features/issue/monitor.py +24 -8
- monoco/features/issue/resources/en/AGENTS.md +5 -0
- monoco/features/issue/resources/en/SKILL.md +30 -2
- monoco/features/issue/resources/zh/AGENTS.md +5 -0
- monoco/features/issue/resources/zh/SKILL.md +26 -1
- monoco/features/issue/validator.py +417 -172
- monoco/features/skills/__init__.py +0 -1
- monoco/features/skills/core.py +24 -18
- monoco/features/spike/adapter.py +4 -5
- monoco/features/spike/commands.py +51 -38
- monoco/features/spike/core.py +24 -16
- monoco/main.py +34 -21
- {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/METADATA +10 -3
- monoco_toolkit-0.3.0.dist-info/RECORD +84 -0
- monoco_toolkit-0.2.7.dist-info/RECORD +0 -83
- {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/WHEEL +0 -0
- {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/entry_points.txt +0 -0
- {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/licenses/LICENSE +0 -0
monoco/features/issue/linter.py
CHANGED
|
@@ -1,26 +1,59 @@
|
|
|
1
|
-
from typing import List, Optional
|
|
1
|
+
from typing import List, Optional
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
from rich.console import Console
|
|
4
4
|
from rich.table import Table
|
|
5
5
|
import typer
|
|
6
6
|
import re
|
|
7
|
-
|
|
7
|
+
from monoco.core import git
|
|
8
8
|
from . import core
|
|
9
9
|
from .validator import IssueValidator
|
|
10
10
|
from monoco.core.lsp import Diagnostic, DiagnosticSeverity
|
|
11
11
|
|
|
12
12
|
console = Console()
|
|
13
13
|
|
|
14
|
+
|
|
15
|
+
def check_environment_policy(project_root: Path):
|
|
16
|
+
"""
|
|
17
|
+
Guardrail: Prevent direct modifications on protected branches (main/master).
|
|
18
|
+
"""
|
|
19
|
+
# Only enforce if it is a git repo
|
|
20
|
+
try:
|
|
21
|
+
if not git.is_git_repo(project_root):
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
current_branch = git.get_current_branch(project_root)
|
|
25
|
+
# Standard protected branches
|
|
26
|
+
if current_branch in ["main", "master", "production"]:
|
|
27
|
+
# Check if dirty (uncommitted changes)
|
|
28
|
+
changed_files = git.get_git_status(project_root)
|
|
29
|
+
if changed_files:
|
|
30
|
+
console.print("\n[bold red]🛑 Environment Policy Violation[/bold red]")
|
|
31
|
+
console.print(
|
|
32
|
+
f"You are modifying code directly on protected branch: [bold cyan]{current_branch}[/bold cyan]"
|
|
33
|
+
)
|
|
34
|
+
console.print(f"Found {len(changed_files)} uncommitted changes.")
|
|
35
|
+
console.print(
|
|
36
|
+
"[yellow]Action Required:[/yellow] Please stash your changes and switch to a feature branch."
|
|
37
|
+
)
|
|
38
|
+
console.print(" > git stash")
|
|
39
|
+
console.print(" > monoco issue start <ID> --branch")
|
|
40
|
+
console.print(" > git stash pop")
|
|
41
|
+
raise typer.Exit(code=1)
|
|
42
|
+
except Exception:
|
|
43
|
+
# Fail safe: Do not block linting if git check fails unexpectedly
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
|
|
14
47
|
def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnostic]:
|
|
15
48
|
"""
|
|
16
49
|
Verify the integrity of the Issues directory using LSP Validator.
|
|
17
50
|
"""
|
|
18
51
|
diagnostics = []
|
|
19
52
|
validator = IssueValidator(issues_root)
|
|
20
|
-
|
|
53
|
+
|
|
21
54
|
all_issue_ids = set()
|
|
22
55
|
all_issues = []
|
|
23
|
-
|
|
56
|
+
|
|
24
57
|
# 1. Collection Phase (Build Index)
|
|
25
58
|
# Helper to collect issues from a project
|
|
26
59
|
def collect_project_issues(project_issues_root: Path, project_name: str = "local"):
|
|
@@ -39,16 +72,17 @@ def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnost
|
|
|
39
72
|
if meta:
|
|
40
73
|
local_id = meta.id
|
|
41
74
|
full_id = f"{project_name}::{local_id}"
|
|
42
|
-
|
|
75
|
+
|
|
43
76
|
all_issue_ids.add(local_id)
|
|
44
77
|
all_issue_ids.add(full_id)
|
|
45
|
-
|
|
78
|
+
|
|
46
79
|
project_issues.append((f, meta))
|
|
47
80
|
return project_issues
|
|
48
81
|
|
|
49
82
|
from monoco.core.config import get_config
|
|
83
|
+
|
|
50
84
|
conf = get_config(str(issues_root.parent))
|
|
51
|
-
|
|
85
|
+
|
|
52
86
|
# Identify local project name
|
|
53
87
|
local_project_name = "local"
|
|
54
88
|
if conf and conf.project and conf.project.name:
|
|
@@ -57,179 +91,279 @@ def check_integrity(issues_root: Path, recursive: bool = False) -> List[Diagnost
|
|
|
57
91
|
# Find Topmost Workspace Root
|
|
58
92
|
workspace_root = issues_root.parent
|
|
59
93
|
for parent in [workspace_root] + list(workspace_root.parents):
|
|
60
|
-
if (parent / ".monoco" / "workspace.yaml").exists() or (
|
|
94
|
+
if (parent / ".monoco" / "workspace.yaml").exists() or (
|
|
95
|
+
parent / ".monoco" / "project.yaml"
|
|
96
|
+
).exists():
|
|
61
97
|
workspace_root = parent
|
|
62
|
-
|
|
98
|
+
|
|
63
99
|
# Collect from local issues_root
|
|
64
100
|
all_issues.extend(collect_project_issues(issues_root, local_project_name))
|
|
65
|
-
|
|
101
|
+
|
|
66
102
|
if recursive:
|
|
67
103
|
try:
|
|
68
104
|
# Re-read config from workspace root to get all members
|
|
69
105
|
ws_conf = get_config(str(workspace_root))
|
|
70
|
-
|
|
106
|
+
|
|
71
107
|
# Index Root project if different from current
|
|
72
108
|
if workspace_root != issues_root.parent:
|
|
73
109
|
root_issues_dir = workspace_root / "Issues"
|
|
74
110
|
if root_issues_dir.exists():
|
|
75
|
-
all_issues.extend(
|
|
111
|
+
all_issues.extend(
|
|
112
|
+
collect_project_issues(
|
|
113
|
+
root_issues_dir, ws_conf.project.name.lower()
|
|
114
|
+
)
|
|
115
|
+
)
|
|
76
116
|
|
|
77
117
|
# Index all members
|
|
78
118
|
for member_name, rel_path in ws_conf.project.members.items():
|
|
79
119
|
member_root = (workspace_root / rel_path).resolve()
|
|
80
120
|
member_issues_dir = member_root / "Issues"
|
|
81
121
|
if member_issues_dir.exists() and member_issues_dir != issues_root:
|
|
82
|
-
all_issues.extend(
|
|
122
|
+
all_issues.extend(
|
|
123
|
+
collect_project_issues(member_issues_dir, member_name.lower())
|
|
124
|
+
)
|
|
83
125
|
except Exception:
|
|
84
126
|
pass
|
|
85
127
|
|
|
86
128
|
# 2. Validation Phase
|
|
87
129
|
for path, meta in all_issues:
|
|
88
|
-
content = path.read_text()
|
|
89
|
-
|
|
130
|
+
content = path.read_text() # Re-read content for validation
|
|
131
|
+
|
|
90
132
|
# A. Run Core Validator
|
|
91
133
|
file_diagnostics = validator.validate(meta, content, all_issue_ids)
|
|
92
|
-
|
|
134
|
+
|
|
93
135
|
# Add context to diagnostics (Path)
|
|
94
136
|
for d in file_diagnostics:
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
137
|
+
d.source = f"{meta.id}" # Use ID as source context
|
|
138
|
+
d.data = {"path": path} # Attach path for potential fixers
|
|
139
|
+
diagnostics.append(d)
|
|
140
|
+
|
|
99
141
|
return diagnostics
|
|
100
142
|
|
|
101
143
|
|
|
102
|
-
def run_lint(
|
|
144
|
+
def run_lint(
|
|
145
|
+
issues_root: Path,
|
|
146
|
+
recursive: bool = False,
|
|
147
|
+
fix: bool = False,
|
|
148
|
+
format: str = "table",
|
|
149
|
+
file_paths: Optional[List[str]] = None,
|
|
150
|
+
):
|
|
103
151
|
"""
|
|
104
152
|
Run lint with optional auto-fix and format selection.
|
|
105
|
-
|
|
153
|
+
|
|
106
154
|
Args:
|
|
107
155
|
issues_root: Root directory of issues
|
|
108
156
|
recursive: Recursively scan workspace members
|
|
109
157
|
fix: Apply auto-fixes
|
|
110
158
|
format: Output format (table, json)
|
|
111
|
-
|
|
159
|
+
file_paths: Optional list of paths to files to validate (LSP/Pre-commit mode)
|
|
112
160
|
"""
|
|
113
|
-
#
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
161
|
+
# 0. Environment Policy Check (Guardrail)
|
|
162
|
+
# We assume issues_root.parent is the project root or close enough for git context
|
|
163
|
+
check_environment_policy(issues_root.parent)
|
|
164
|
+
|
|
165
|
+
diagnostics = []
|
|
166
|
+
|
|
167
|
+
# File list mode (for LSP integration or pre-commit)
|
|
168
|
+
if file_paths:
|
|
169
|
+
# Pre-scan entire workspace to get all issue IDs for reference validation
|
|
170
|
+
# We need this context even when validating a single file
|
|
171
|
+
all_issue_ids = set()
|
|
172
|
+
for subdir in ["Epics", "Features", "Chores", "Fixes"]:
|
|
173
|
+
d = issues_root / subdir
|
|
174
|
+
if d.exists():
|
|
175
|
+
for status in ["open", "closed", "backlog"]:
|
|
176
|
+
status_dir = d / status
|
|
177
|
+
if status_dir.exists():
|
|
178
|
+
for f in status_dir.rglob("*.md"):
|
|
179
|
+
try:
|
|
180
|
+
m = core.parse_issue(f)
|
|
181
|
+
if m:
|
|
182
|
+
all_issue_ids.add(m.id)
|
|
183
|
+
except Exception:
|
|
184
|
+
pass
|
|
185
|
+
|
|
186
|
+
validator = IssueValidator(issues_root)
|
|
187
|
+
|
|
188
|
+
for file_path in file_paths:
|
|
189
|
+
file = Path(file_path).resolve()
|
|
190
|
+
if not file.exists():
|
|
191
|
+
console.print(f"[red]Error:[/red] File not found: {file_path}")
|
|
192
|
+
continue # Skip missing files but continue linting others
|
|
193
|
+
|
|
194
|
+
# Parse and validate file
|
|
195
|
+
try:
|
|
196
|
+
meta = core.parse_issue(file)
|
|
197
|
+
if not meta:
|
|
198
|
+
console.print(
|
|
199
|
+
f"[yellow]Warning:[/yellow] Failed to parse issue metadata from {file_path}. Skipping."
|
|
200
|
+
)
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
content = file.read_text()
|
|
204
|
+
file_diagnostics = validator.validate(meta, content, all_issue_ids)
|
|
205
|
+
|
|
206
|
+
# Add context
|
|
207
|
+
for d in file_diagnostics:
|
|
208
|
+
d.source = meta.id
|
|
209
|
+
d.data = {"path": file}
|
|
210
|
+
diagnostics.append(d)
|
|
211
|
+
|
|
212
|
+
except Exception as e:
|
|
213
|
+
console.print(
|
|
214
|
+
f"[red]Error:[/red] Validation failed for {file_path}: {e}"
|
|
215
|
+
)
|
|
216
|
+
# We don't exit here, we collect errors
|
|
157
217
|
else:
|
|
158
218
|
# Full workspace scan mode
|
|
159
219
|
diagnostics = check_integrity(issues_root, recursive)
|
|
160
|
-
|
|
220
|
+
|
|
161
221
|
# Filter only Warnings and Errors
|
|
162
222
|
issues = [d for d in diagnostics if d.severity <= DiagnosticSeverity.Warning]
|
|
163
|
-
|
|
223
|
+
|
|
164
224
|
if fix:
|
|
165
225
|
fixed_count = 0
|
|
166
226
|
console.print("[dim]Attempting auto-fixes...[/dim]")
|
|
167
|
-
|
|
227
|
+
|
|
168
228
|
# We must track processed paths to avoid redundant writes if multiple errors exist
|
|
169
229
|
processed_paths = set()
|
|
170
|
-
|
|
171
|
-
for d in issues:
|
|
172
|
-
path = d.data.get('path')
|
|
173
|
-
if not path: continue
|
|
174
|
-
|
|
175
|
-
# Read fresh content iteration
|
|
176
|
-
pass
|
|
177
230
|
|
|
178
231
|
# Group diagnostics by file path
|
|
179
232
|
from collections import defaultdict
|
|
233
|
+
|
|
180
234
|
file_diags = defaultdict(list)
|
|
181
235
|
for d in issues:
|
|
182
|
-
if d.data.get(
|
|
183
|
-
file_diags[d.data[
|
|
184
|
-
|
|
236
|
+
if d.data.get("path"):
|
|
237
|
+
file_diags[d.data["path"]].append(d)
|
|
238
|
+
|
|
185
239
|
for path, diags in file_diags.items():
|
|
186
240
|
try:
|
|
187
241
|
content = path.read_text()
|
|
188
242
|
new_content = content
|
|
189
243
|
has_changes = False
|
|
190
|
-
|
|
244
|
+
|
|
191
245
|
# Parse meta once for the file
|
|
192
246
|
try:
|
|
193
247
|
meta = core.parse_issue(path)
|
|
194
248
|
except Exception:
|
|
195
|
-
console.print(
|
|
249
|
+
console.print(
|
|
250
|
+
f"[yellow]Skipping fix for {path.name}: Cannot parse metadata[/yellow]"
|
|
251
|
+
)
|
|
196
252
|
continue
|
|
197
253
|
|
|
198
254
|
# Apply fixes for this file
|
|
199
255
|
for d in diags:
|
|
200
256
|
if "Structure Error" in d.message:
|
|
201
257
|
expected_header = f"## {meta.id}: {meta.title}"
|
|
202
|
-
|
|
258
|
+
|
|
203
259
|
# Check if strictly present
|
|
204
260
|
if expected_header in new_content:
|
|
205
261
|
continue
|
|
206
|
-
|
|
262
|
+
|
|
207
263
|
# Strategy: Look for existing heading with same ID to replace
|
|
208
264
|
# Matches: "## ID..." or "## ID ..."
|
|
209
265
|
# Regex: ^##\s+ID\b.*$
|
|
210
266
|
# We use meta.id which is safe.
|
|
211
|
-
heading_regex = re.compile(
|
|
212
|
-
|
|
267
|
+
heading_regex = re.compile(
|
|
268
|
+
rf"^##\s+{re.escape(meta.id)}.*$", re.MULTILINE
|
|
269
|
+
)
|
|
270
|
+
|
|
213
271
|
match_existing = heading_regex.search(new_content)
|
|
214
|
-
|
|
272
|
+
|
|
215
273
|
if match_existing:
|
|
216
274
|
# Replace existing incorrect heading
|
|
217
275
|
# We use sub to replace just the first occurrence
|
|
218
|
-
new_content = heading_regex.sub(
|
|
276
|
+
new_content = heading_regex.sub(
|
|
277
|
+
expected_header, new_content, count=1
|
|
278
|
+
)
|
|
219
279
|
has_changes = True
|
|
220
280
|
else:
|
|
221
281
|
# Insert after frontmatter
|
|
222
|
-
fm_match = re.search(
|
|
282
|
+
fm_match = re.search(
|
|
283
|
+
r"^---(.*?)---", new_content, re.DOTALL | re.MULTILINE
|
|
284
|
+
)
|
|
223
285
|
if fm_match:
|
|
224
286
|
end_pos = fm_match.end()
|
|
225
287
|
header_block = f"\n\n{expected_header}\n"
|
|
226
|
-
new_content =
|
|
288
|
+
new_content = (
|
|
289
|
+
new_content[:end_pos]
|
|
290
|
+
+ header_block
|
|
291
|
+
+ new_content[end_pos:].lstrip()
|
|
292
|
+
)
|
|
293
|
+
has_changes = True
|
|
294
|
+
|
|
295
|
+
if (
|
|
296
|
+
"Review Requirement: Missing '## Review Comments' section"
|
|
297
|
+
in d.message
|
|
298
|
+
):
|
|
299
|
+
if "## Review Comments" not in new_content:
|
|
300
|
+
new_content = (
|
|
301
|
+
new_content.rstrip()
|
|
302
|
+
+ "\n\n## Review Comments\n\n- [ ] Self-Review\n"
|
|
303
|
+
)
|
|
304
|
+
has_changes = True
|
|
305
|
+
|
|
306
|
+
if "Malformed ID" in d.message:
|
|
307
|
+
lines = new_content.splitlines()
|
|
308
|
+
if d.range and d.range.start.line < len(lines):
|
|
309
|
+
line_idx = d.range.start.line
|
|
310
|
+
line = lines[line_idx]
|
|
311
|
+
# Remove # from quoted strings or raw values
|
|
312
|
+
new_line = line.replace("'#", "'").replace('"#', '"')
|
|
313
|
+
if new_line != line:
|
|
314
|
+
lines[line_idx] = new_line
|
|
315
|
+
new_content = "\n".join(lines) + "\n"
|
|
227
316
|
has_changes = True
|
|
228
317
|
|
|
229
|
-
if "
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
318
|
+
if "Tag Check: Missing required context tags" in d.message:
|
|
319
|
+
# Extract missing tags from message
|
|
320
|
+
# Message format: "Tag Check: Missing required context tags: #TAG1, #TAG2"
|
|
321
|
+
try:
|
|
322
|
+
parts = d.message.split(": ")
|
|
323
|
+
if len(parts) >= 3:
|
|
324
|
+
tags_str = parts[-1]
|
|
325
|
+
missing_tags = [t.strip() for t in tags_str.split(",")]
|
|
326
|
+
|
|
327
|
+
# We need to update content via core.update_issue logic effectively
|
|
328
|
+
# But we are in a loop potentially with other string edits.
|
|
329
|
+
# IMPORTANT: Mixed strategy (Regex vs Object Update) is risky.
|
|
330
|
+
# However, tags are in YAML frontmatter.
|
|
331
|
+
# Since we might have modified new_content already (string), using core.update_issue on file is dangerous (race condition with memory).
|
|
332
|
+
# Better to append to tags list in YAML via regex or yaml parser on new_content.
|
|
333
|
+
|
|
334
|
+
# Parsing Frontmatter from new_content
|
|
335
|
+
fm_match = re.search(
|
|
336
|
+
r"^---(.*?)---",
|
|
337
|
+
new_content,
|
|
338
|
+
re.DOTALL | re.MULTILINE,
|
|
339
|
+
)
|
|
340
|
+
if fm_match:
|
|
341
|
+
import yaml
|
|
342
|
+
|
|
343
|
+
fm_text = fm_match.group(1)
|
|
344
|
+
data = yaml.safe_load(fm_text) or {}
|
|
345
|
+
current_tags = data.get("tags", [])
|
|
346
|
+
if not isinstance(current_tags, list):
|
|
347
|
+
current_tags = []
|
|
348
|
+
|
|
349
|
+
# Add missing
|
|
350
|
+
updated_tags = sorted(
|
|
351
|
+
list(set(current_tags) | set(missing_tags))
|
|
352
|
+
)
|
|
353
|
+
data["tags"] = updated_tags
|
|
354
|
+
|
|
355
|
+
# Dump back
|
|
356
|
+
new_fm_text = yaml.dump(
|
|
357
|
+
data, sort_keys=False, allow_unicode=True
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
# Replace FM block
|
|
361
|
+
new_content = new_content.replace(
|
|
362
|
+
fm_match.group(1), "\n" + new_fm_text
|
|
363
|
+
)
|
|
364
|
+
has_changes = True
|
|
365
|
+
except Exception as ex:
|
|
366
|
+
console.print(f"[red]Failed to fix tags: {ex}[/red]")
|
|
233
367
|
|
|
234
368
|
if has_changes:
|
|
235
369
|
path.write_text(new_content)
|
|
@@ -238,27 +372,94 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
238
372
|
except Exception as e:
|
|
239
373
|
console.print(f"[red]Failed to fix {path.name}: {e}[/red]")
|
|
240
374
|
|
|
375
|
+
# Separate Try-Block for Domains Fix to avoid nesting logic too deep
|
|
376
|
+
try:
|
|
377
|
+
content = path.read_text()
|
|
378
|
+
new_content = content
|
|
379
|
+
has_changes = False
|
|
380
|
+
|
|
381
|
+
# Check diagnostics again for this file
|
|
382
|
+
current_file_diags = file_diags.get(path, [])
|
|
383
|
+
|
|
384
|
+
needs_domain_fix = any(
|
|
385
|
+
"Missing 'domains' field" in d.message for d in current_file_diags
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
if needs_domain_fix:
|
|
389
|
+
# Add 'domains: []' to frontmatter
|
|
390
|
+
# We insert it before 'tags:' if possible, or at end of keys
|
|
391
|
+
fm_match = re.search(
|
|
392
|
+
r"^---(.*?)---", new_content, re.DOTALL | re.MULTILINE
|
|
393
|
+
)
|
|
394
|
+
if fm_match:
|
|
395
|
+
import yaml
|
|
396
|
+
|
|
397
|
+
fm_text = fm_match.group(1)
|
|
398
|
+
# We prefer to edit text directly to preserve comments if possible,
|
|
399
|
+
# but for adding a key, robust way is ensuring it's in.
|
|
400
|
+
pass
|
|
401
|
+
|
|
402
|
+
# Simple Regex Insertion: find "tags:" and insert before it
|
|
403
|
+
if "tags:" in fm_text:
|
|
404
|
+
new_fm_text = fm_text.replace("tags:", "domains: []\ntags:")
|
|
405
|
+
new_content = new_content.replace(
|
|
406
|
+
fm_match.group(1), new_fm_text
|
|
407
|
+
)
|
|
408
|
+
has_changes = True
|
|
409
|
+
else:
|
|
410
|
+
# Append to end
|
|
411
|
+
new_fm_text = fm_text.rstrip() + "\ndomains: []\n"
|
|
412
|
+
new_content = new_content.replace(
|
|
413
|
+
fm_match.group(1), new_fm_text
|
|
414
|
+
)
|
|
415
|
+
has_changes = True
|
|
416
|
+
|
|
417
|
+
if has_changes:
|
|
418
|
+
path.write_text(new_content)
|
|
419
|
+
if not any(
|
|
420
|
+
path == p for p in processed_paths
|
|
421
|
+
): # count once per file
|
|
422
|
+
fixed_count += 1
|
|
423
|
+
processed_paths.add(path)
|
|
424
|
+
console.print(f"[dim]Fixed (Domains): {path.name}[/dim]")
|
|
425
|
+
|
|
426
|
+
except Exception as e:
|
|
427
|
+
console.print(f"[red]Failed to fix domains for {path.name}: {e}[/red]")
|
|
428
|
+
|
|
241
429
|
console.print(f"[green]Applied auto-fixes to {fixed_count} files.[/green]")
|
|
242
|
-
|
|
430
|
+
|
|
243
431
|
# Re-run validation to verify
|
|
244
|
-
if
|
|
245
|
-
|
|
246
|
-
file
|
|
247
|
-
meta = core.parse_issue(file)
|
|
248
|
-
content = file.read_text()
|
|
432
|
+
if file_paths:
|
|
433
|
+
diagnostics = [] # Reset
|
|
434
|
+
# Re-validate file list
|
|
249
435
|
validator = IssueValidator(issues_root)
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
436
|
+
# We assume all_issue_ids is already populated from the first pass if it was needed
|
|
437
|
+
# But let's be safe and assume we might need to re-scan if IDs changed (unlikely during lint)
|
|
438
|
+
# For simplicity, we reuse the validator instance but might need fresh content
|
|
439
|
+
|
|
440
|
+
for file_path in file_paths:
|
|
441
|
+
file = Path(file_path).resolve()
|
|
442
|
+
if not file.exists():
|
|
443
|
+
continue
|
|
444
|
+
|
|
445
|
+
try:
|
|
446
|
+
meta = core.parse_issue(file)
|
|
447
|
+
content = file.read_text()
|
|
448
|
+
file_diagnostics = validator.validate(meta, content, all_issue_ids)
|
|
449
|
+
for d in file_diagnostics:
|
|
450
|
+
d.source = meta.id
|
|
451
|
+
d.data = {"path": file}
|
|
452
|
+
diagnostics.append(d)
|
|
453
|
+
except Exception:
|
|
454
|
+
pass
|
|
254
455
|
else:
|
|
255
456
|
diagnostics = check_integrity(issues_root, recursive)
|
|
256
457
|
issues = [d for d in diagnostics if d.severity <= DiagnosticSeverity.Warning]
|
|
257
458
|
|
|
258
459
|
# Output formatting
|
|
259
460
|
if format == "json":
|
|
260
|
-
import json
|
|
261
461
|
from pydantic import RootModel
|
|
462
|
+
|
|
262
463
|
# Use RootModel to export a list of models
|
|
263
464
|
print(RootModel(issues).model_dump_json(indent=2))
|
|
264
465
|
if any(d.severity == DiagnosticSeverity.Error for d in issues):
|
|
@@ -266,14 +467,21 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
266
467
|
return
|
|
267
468
|
|
|
268
469
|
if not issues:
|
|
269
|
-
console.print(
|
|
470
|
+
console.print(
|
|
471
|
+
"[green]✔[/green] Issue integrity check passed. No integrity errors found."
|
|
472
|
+
)
|
|
270
473
|
else:
|
|
271
|
-
table = Table(
|
|
474
|
+
table = Table(
|
|
475
|
+
title="Issue Integrity Report",
|
|
476
|
+
show_header=True,
|
|
477
|
+
header_style="bold magenta",
|
|
478
|
+
border_style="red",
|
|
479
|
+
)
|
|
272
480
|
table.add_column("Issue", style="cyan")
|
|
273
481
|
table.add_column("Severity", justify="center")
|
|
274
482
|
table.add_column("Line", justify="right", style="dim")
|
|
275
483
|
table.add_column("Message")
|
|
276
|
-
|
|
484
|
+
|
|
277
485
|
for d in issues:
|
|
278
486
|
sev_style = "red" if d.severity == DiagnosticSeverity.Error else "yellow"
|
|
279
487
|
sev_label = "ERROR" if d.severity == DiagnosticSeverity.Error else "WARN"
|
|
@@ -282,11 +490,18 @@ def run_lint(issues_root: Path, recursive: bool = False, fix: bool = False, form
|
|
|
282
490
|
d.source or "Unknown",
|
|
283
491
|
f"[{sev_style}]{sev_label}[/{sev_style}]",
|
|
284
492
|
line_str,
|
|
285
|
-
d.message
|
|
493
|
+
d.message,
|
|
286
494
|
)
|
|
287
|
-
|
|
495
|
+
|
|
288
496
|
console.print(table)
|
|
289
|
-
|
|
497
|
+
|
|
290
498
|
if any(d.severity == DiagnosticSeverity.Error for d in issues):
|
|
499
|
+
console.print(
|
|
500
|
+
"\n[yellow]Tip: Run 'monoco issue lint --fix' to attempt automatic repairs.[/yellow]"
|
|
501
|
+
)
|
|
291
502
|
raise typer.Exit(code=1)
|
|
292
503
|
|
|
504
|
+
if issues:
|
|
505
|
+
console.print(
|
|
506
|
+
"\n[yellow]Tip: Run 'monoco issue lint --fix' to attempt automatic repairs.[/yellow]"
|
|
507
|
+
)
|