monoco-toolkit 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. monoco/cli/project.py +35 -31
  2. monoco/cli/workspace.py +26 -16
  3. monoco/core/agent/__init__.py +0 -2
  4. monoco/core/agent/action.py +44 -20
  5. monoco/core/agent/adapters.py +20 -16
  6. monoco/core/agent/protocol.py +5 -4
  7. monoco/core/agent/state.py +21 -21
  8. monoco/core/config.py +90 -33
  9. monoco/core/execution.py +21 -16
  10. monoco/core/feature.py +8 -5
  11. monoco/core/git.py +61 -30
  12. monoco/core/hooks.py +57 -0
  13. monoco/core/injection.py +47 -44
  14. monoco/core/integrations.py +50 -35
  15. monoco/core/lsp.py +12 -1
  16. monoco/core/output.py +35 -16
  17. monoco/core/registry.py +3 -2
  18. monoco/core/setup.py +190 -124
  19. monoco/core/skills.py +121 -107
  20. monoco/core/state.py +12 -10
  21. monoco/core/sync.py +85 -56
  22. monoco/core/telemetry.py +10 -6
  23. monoco/core/workspace.py +26 -19
  24. monoco/daemon/app.py +123 -79
  25. monoco/daemon/commands.py +14 -13
  26. monoco/daemon/models.py +11 -3
  27. monoco/daemon/reproduce_stats.py +8 -8
  28. monoco/daemon/services.py +32 -33
  29. monoco/daemon/stats.py +59 -40
  30. monoco/features/config/commands.py +38 -25
  31. monoco/features/i18n/adapter.py +4 -5
  32. monoco/features/i18n/commands.py +83 -49
  33. monoco/features/i18n/core.py +94 -54
  34. monoco/features/issue/adapter.py +6 -7
  35. monoco/features/issue/commands.py +468 -272
  36. monoco/features/issue/core.py +419 -312
  37. monoco/features/issue/domain/lifecycle.py +33 -23
  38. monoco/features/issue/domain/models.py +71 -38
  39. monoco/features/issue/domain/parser.py +92 -69
  40. monoco/features/issue/domain/workspace.py +19 -16
  41. monoco/features/issue/engine/__init__.py +3 -3
  42. monoco/features/issue/engine/config.py +18 -25
  43. monoco/features/issue/engine/machine.py +72 -39
  44. monoco/features/issue/engine/models.py +4 -2
  45. monoco/features/issue/linter.py +287 -157
  46. monoco/features/issue/lsp/definition.py +26 -19
  47. monoco/features/issue/migration.py +45 -34
  48. monoco/features/issue/models.py +29 -13
  49. monoco/features/issue/monitor.py +24 -8
  50. monoco/features/issue/resources/en/SKILL.md +6 -2
  51. monoco/features/issue/validator.py +395 -208
  52. monoco/features/skills/__init__.py +0 -1
  53. monoco/features/skills/core.py +24 -18
  54. monoco/features/spike/adapter.py +4 -5
  55. monoco/features/spike/commands.py +51 -38
  56. monoco/features/spike/core.py +24 -16
  57. monoco/main.py +34 -21
  58. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/METADATA +1 -1
  59. monoco_toolkit-0.3.1.dist-info/RECORD +84 -0
  60. monoco_toolkit-0.2.8.dist-info/RECORD +0 -83
  61. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/WHEEL +0 -0
  62. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/entry_points.txt +0 -0
  63. {monoco_toolkit-0.2.8.dist-info → monoco_toolkit-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,34 +1,35 @@
1
1
  import re
2
- import yaml
3
- from typing import List, Set, Optional, Dict
2
+ from typing import List, Set, Optional
4
3
  from pathlib import Path
5
4
 
6
5
  from monoco.core.lsp import Diagnostic, DiagnosticSeverity, Range, Position
7
6
  from monoco.core.config import get_config
8
7
  from monoco.features.i18n.core import detect_language
9
- from .models import IssueMetadata, IssueType
8
+ from .models import IssueMetadata
10
9
  from .domain.parser import MarkdownParser
11
10
  from .domain.models import ContentBlock
12
11
 
12
+
13
13
  class IssueValidator:
14
14
  """
15
15
  Centralized validation logic for Issue Tickets.
16
16
  Returns LSP-compatible Diagnostics.
17
17
  """
18
-
18
+
19
19
  def __init__(self, issue_root: Optional[Path] = None):
20
20
  self.issue_root = issue_root
21
21
 
22
- def validate(self, meta: IssueMetadata, content: str, all_issue_ids: Set[str] = set()) -> List[Diagnostic]:
22
+ def validate(
23
+ self, meta: IssueMetadata, content: str, all_issue_ids: Set[str] = set()
24
+ ) -> List[Diagnostic]:
23
25
  diagnostics = []
24
-
26
+
25
27
  # Parse Content into Blocks (Domain Layer)
26
28
  # Handle case where content might be just body (from update_issue) or full file
27
29
  if content.startswith("---"):
28
30
  try:
29
31
  issue_domain = MarkdownParser.parse(content)
30
32
  blocks = issue_domain.body.blocks
31
- has_frontmatter = True
32
33
  except Exception:
33
34
  # Fallback if parser fails (e.g. invalid YAML)
34
35
  # We continue with empty blocks or try partial parsing?
@@ -41,72 +42,81 @@ class IssueValidator:
41
42
  if lines[i].strip() == "---":
42
43
  start_line = i + 1
43
44
  break
44
- blocks = MarkdownParser._parse_blocks(lines[start_line:], start_line_offset=start_line)
45
- has_frontmatter = True
45
+ blocks = MarkdownParser._parse_blocks(
46
+ lines[start_line:], start_line_offset=start_line
47
+ )
46
48
  else:
47
49
  # Assume content is just body
48
50
  lines = content.splitlines()
49
51
  blocks = MarkdownParser._parse_blocks(lines, start_line_offset=0)
50
- has_frontmatter = False
51
52
 
52
53
  # 1. State Matrix Validation
53
54
  diagnostics.extend(self._validate_state_matrix(meta, content))
54
-
55
+
55
56
  # 2. State Requirements (Strict Verification)
56
57
  diagnostics.extend(self._validate_state_requirements(meta, blocks))
57
-
58
+
58
59
  # 3. Structure Consistency (Headings) - Using Blocks
59
60
  diagnostics.extend(self._validate_structure_blocks(meta, blocks))
60
-
61
+
61
62
  # 4. Lifecycle/Integrity (Solution, etc.)
62
63
  diagnostics.extend(self._validate_integrity(meta, content))
63
-
64
+
64
65
  # 5. Reference Integrity
65
66
  diagnostics.extend(self._validate_references(meta, content, all_issue_ids))
66
67
 
68
+ # 5.5 Domain Integrity
69
+ diagnostics.extend(self._validate_domains(meta, content, all_issue_ids))
70
+
67
71
  # 6. Time Consistency
68
72
  diagnostics.extend(self._validate_time_consistency(meta, content))
69
73
 
70
74
  # 7. Checkbox Syntax - Using Blocks
71
75
  diagnostics.extend(self._validate_checkbox_logic_blocks(blocks))
72
-
76
+
73
77
  # 8. Language Consistency
74
78
  diagnostics.extend(self._validate_language_consistency(meta, content))
75
79
 
76
80
  return diagnostics
77
81
 
78
- def _validate_language_consistency(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
82
+ def _validate_language_consistency(
83
+ self, meta: IssueMetadata, content: str
84
+ ) -> List[Diagnostic]:
79
85
  diagnostics = []
80
86
  try:
81
87
  config = get_config()
82
88
  source_lang = config.i18n.source_lang
83
-
89
+
84
90
  # Check for language mismatch (specifically zh vs en)
85
- if source_lang.lower() == 'zh':
91
+ if source_lang.lower() == "zh":
86
92
  detected = detect_language(content)
87
- if detected == 'en':
88
- diagnostics.append(self._create_diagnostic(
89
- "Language Mismatch: Project source language is 'zh' but content appears to be 'en'.",
90
- DiagnosticSeverity.Warning
91
- ))
93
+ if detected == "en":
94
+ diagnostics.append(
95
+ self._create_diagnostic(
96
+ "Language Mismatch: Project source language is 'zh' but content appears to be 'en'.",
97
+ DiagnosticSeverity.Warning,
98
+ )
99
+ )
92
100
  except Exception:
93
101
  pass
94
102
  return diagnostics
95
103
 
96
- def _create_diagnostic(self, message: str, severity: DiagnosticSeverity, line: int = 0) -> Diagnostic:
104
+ def _create_diagnostic(
105
+ self, message: str, severity: DiagnosticSeverity, line: int = 0
106
+ ) -> Diagnostic:
97
107
  """Helper to create a diagnostic object."""
98
108
  return Diagnostic(
99
109
  range=Range(
100
110
  start=Position(line=line, character=0),
101
- end=Position(line=line, character=100) # Arbitrary end
111
+ end=Position(line=line, character=100), # Arbitrary end
102
112
  ),
103
113
  severity=severity,
104
- message=message
114
+ message=message,
105
115
  )
106
116
 
107
117
  def _get_field_line(self, content: str, field_name: str) -> int:
108
118
  """Helper to find the line number of a field in the front matter."""
109
- lines = content.split('\n')
119
+ lines = content.split("\n")
110
120
  in_fm = False
111
121
  for i, line in enumerate(lines):
112
122
  stripped = line.strip()
@@ -115,51 +125,97 @@ class IssueValidator:
115
125
  in_fm = True
116
126
  continue
117
127
  else:
118
- break # End of FM
128
+ break # End of FM
119
129
  if in_fm:
120
130
  # Match "field:", "field :", or "field: value"
121
131
  if re.match(rf"^{re.escape(field_name)}\s*:", stripped):
122
132
  return i
123
133
  return 0
124
134
 
125
- def _validate_state_matrix(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
135
+ def _validate_state_matrix(
136
+ self, meta: IssueMetadata, content: str
137
+ ) -> List[Diagnostic]:
126
138
  diagnostics = []
127
-
139
+
128
140
  # Check based on parsed metadata (now that auto-correction is disabled)
129
141
  if meta.status == "closed" and meta.stage != "done":
130
142
  line = self._get_field_line(content, "status")
131
- diagnostics.append(self._create_diagnostic(
132
- f"State Mismatch: Closed issues must be in 'Done' stage (found: {meta.stage if meta.stage else 'None'})",
133
- DiagnosticSeverity.Error,
134
- line=line
135
- ))
136
-
143
+ diagnostics.append(
144
+ self._create_diagnostic(
145
+ f"State Mismatch: Closed issues must be in 'Done' stage (found: {meta.stage if meta.stage else 'None'})",
146
+ DiagnosticSeverity.Error,
147
+ line=line,
148
+ )
149
+ )
150
+
137
151
  if meta.status == "backlog" and meta.stage != "freezed":
138
152
  line = self._get_field_line(content, "status")
139
- diagnostics.append(self._create_diagnostic(
140
- f"State Mismatch: Backlog issues must be in 'Freezed' stage (found: {meta.stage if meta.stage else 'None'})",
141
- DiagnosticSeverity.Error,
142
- line=line
143
- ))
153
+ diagnostics.append(
154
+ self._create_diagnostic(
155
+ f"State Mismatch: Backlog issues must be in 'Freezed' stage (found: {meta.stage if meta.stage else 'None'})",
156
+ DiagnosticSeverity.Error,
157
+ line=line,
158
+ )
159
+ )
144
160
 
145
161
  return diagnostics
146
162
 
147
- def _validate_state_requirements(self, meta: IssueMetadata, blocks: List[ContentBlock]) -> List[Diagnostic]:
163
+ def _validate_state_requirements(
164
+ self, meta: IssueMetadata, blocks: List[ContentBlock]
165
+ ) -> List[Diagnostic]:
148
166
  diagnostics = []
149
-
167
+
150
168
  # 1. Map Blocks to Sections
151
169
  sections = {"tasks": [], "ac": [], "review": []}
152
170
  current_section = None
153
-
171
+
154
172
  for block in blocks:
155
173
  if block.type == "heading":
156
174
  title = block.content.strip().lower()
157
175
  # Parse title to identify sections (supporting Chinese and English synonyms)
158
- if any(kw in title for kw in ["technical tasks", "工作包", "技术任务", "key deliverables", "关键交付", "重点工作", "子功能", "子故事", "child features", "stories", "需求", "requirements", "implementation", "实现", "交付", "delivery", "规划", "plan", "tasks", "任务"]):
176
+ if any(
177
+ kw in title
178
+ for kw in [
179
+ "technical tasks",
180
+ "工作包",
181
+ "技术任务",
182
+ "key deliverables",
183
+ "关键交付",
184
+ "重点工作",
185
+ "子功能",
186
+ "子故事",
187
+ "child features",
188
+ "stories",
189
+ "需求",
190
+ "requirements",
191
+ "implementation",
192
+ "实现",
193
+ "交付",
194
+ "delivery",
195
+ "规划",
196
+ "plan",
197
+ "tasks",
198
+ "任务",
199
+ ]
200
+ ):
159
201
  current_section = "tasks"
160
- elif any(kw in title for kw in ["acceptance criteria", "验收标准", "交付目标", "验收"]):
202
+ elif any(
203
+ kw in title
204
+ for kw in ["acceptance criteria", "验收标准", "交付目标", "验收"]
205
+ ):
161
206
  current_section = "ac"
162
- elif any(kw in title for kw in ["review comments", "确认事项", "评审记录", "复盘记录", "review", "评审", "确认"]):
207
+ elif any(
208
+ kw in title
209
+ for kw in [
210
+ "review comments",
211
+ "确认事项",
212
+ "评审记录",
213
+ "复盘记录",
214
+ "review",
215
+ "评审",
216
+ "确认",
217
+ ]
218
+ ):
163
219
  current_section = "review"
164
220
  elif title.startswith("###"):
165
221
  # Subheading: allow continued collection for the current section
@@ -172,284 +228,415 @@ class IssueValidator:
172
228
 
173
229
  # 2. Logic: DOING -> Must have defined tasks
174
230
  if meta.stage in ["doing", "review", "done"]:
175
- if not sections["tasks"]:
176
- # We can't strictly point to a line if section missing, but we can point to top/bottom
177
- # Or just a general error.
178
- diagnostics.append(self._create_diagnostic(
179
- "State Requirement (DOING+): Must define 'Technical Tasks' (at least 1 checkbox).",
180
- DiagnosticSeverity.Warning
181
- ))
231
+ if not sections["tasks"]:
232
+ # We can't strictly point to a line if section missing, but we can point to top/bottom
233
+ # Or just a general error.
234
+ diagnostics.append(
235
+ self._create_diagnostic(
236
+ "State Requirement (DOING+): Must define 'Technical Tasks' (at least 1 checkbox).",
237
+ DiagnosticSeverity.Warning,
238
+ )
239
+ )
182
240
 
183
241
  # 3. Logic: REVIEW -> Tasks must be Completed ([x]) or Cancelled ([~], [+])
184
242
  # No [ ] (ToDo) or [-]/[/] (Doing) allowed.
185
243
  if meta.stage in ["review", "done"]:
186
244
  for block in sections["tasks"]:
187
- content = block.content.strip()
188
- # Check for explicit illegal states
189
- if re.search(r"-\s*\[\s+\]", content):
190
- diagnostics.append(self._create_diagnostic(
191
- f"State Requirement ({meta.stage.upper()}): Technical Tasks must be resolved. Found Todo [ ]: '{content}'",
192
- DiagnosticSeverity.Error,
193
- line=block.line_start
194
- ))
195
- elif re.search(r"-\s*\[[-\/]]", content):
196
- diagnostics.append(self._create_diagnostic(
197
- f"State Requirement ({meta.stage.upper()}): Technical Tasks must be finished (not Doing). Found Doing [-]: '{content}'",
198
- DiagnosticSeverity.Error,
199
- line=block.line_start
200
- ))
245
+ content = block.content.strip()
246
+ # Check for explicit illegal states
247
+ if re.search(r"-\s*\[\s+\]", content):
248
+ diagnostics.append(
249
+ self._create_diagnostic(
250
+ f"State Requirement ({meta.stage.upper()}): Technical Tasks must be resolved. Found Todo [ ]: '{content}'",
251
+ DiagnosticSeverity.Error,
252
+ line=block.line_start,
253
+ )
254
+ )
255
+ elif re.search(r"-\s*\[[-\/]]", content):
256
+ diagnostics.append(
257
+ self._create_diagnostic(
258
+ f"State Requirement ({meta.stage.upper()}): Technical Tasks must be finished (not Doing). Found Doing [-]: '{content}'",
259
+ DiagnosticSeverity.Error,
260
+ line=block.line_start,
261
+ )
262
+ )
201
263
 
202
264
  # 4. Logic: DONE -> AC must be Verified ([x])
203
265
  if meta.stage == "done":
204
- for block in sections["ac"]:
205
- content = block.content.strip()
206
- if not re.search(r"-\s*\[[xX]\]", content):
207
- diagnostics.append(self._create_diagnostic(
208
- f"State Requirement (DONE): Acceptance Criteria must be passed ([x]). Found: '{content}'",
209
- DiagnosticSeverity.Error,
210
- line=block.line_start
211
- ))
212
-
213
- # 5. Logic: DONE -> Review Checkboxes (if any) must be Resolved ([x] or [~])
214
- for block in sections["review"]:
215
- content = block.content.strip()
216
- # Must be [x], [X], [~], [+]
217
- # Therefore [ ], [-], [/] are invalid blocking states
218
- if re.search(r"-\s*\[[\s\-\/]\]", content):
219
- diagnostics.append(self._create_diagnostic(
220
- f"State Requirement (DONE): Actionable Review Comments must be resolved ([x] or [~]). Found: '{content}'",
221
- DiagnosticSeverity.Error,
222
- line=block.line_start
223
- ))
224
-
266
+ for block in sections["ac"]:
267
+ content = block.content.strip()
268
+ if not re.search(r"-\s*\[[xX]\]", content):
269
+ diagnostics.append(
270
+ self._create_diagnostic(
271
+ f"State Requirement (DONE): Acceptance Criteria must be passed ([x]). Found: '{content}'",
272
+ DiagnosticSeverity.Error,
273
+ line=block.line_start,
274
+ )
275
+ )
276
+
277
+ # 5. Logic: DONE -> Review Checkboxes (if any) must be Resolved ([x] or [~])
278
+ for block in sections["review"]:
279
+ content = block.content.strip()
280
+ # Must be [x], [X], [~], [+]
281
+ # Therefore [ ], [-], [/] are invalid blocking states
282
+ if re.search(r"-\s*\[[\s\-\/]\]", content):
283
+ diagnostics.append(
284
+ self._create_diagnostic(
285
+ f"State Requirement (DONE): Actionable Review Comments must be resolved ([x] or [~]). Found: '{content}'",
286
+ DiagnosticSeverity.Error,
287
+ line=block.line_start,
288
+ )
289
+ )
290
+
225
291
  return diagnostics
226
292
 
227
- def _validate_structure_blocks(self, meta: IssueMetadata, blocks: List[ContentBlock]) -> List[Diagnostic]:
293
+ def _validate_structure_blocks(
294
+ self, meta: IssueMetadata, blocks: List[ContentBlock]
295
+ ) -> List[Diagnostic]:
228
296
  diagnostics = []
229
-
297
+
230
298
  # 1. Heading check: ## {issue-id}: {issue-title}
231
299
  expected_header = f"## {meta.id}: {meta.title}"
232
300
  header_found = False
233
-
301
+
234
302
  # 2. Review Comments Check
235
303
  review_header_found = False
236
304
  review_content_found = False
237
-
305
+
238
306
  review_header_index = -1
239
-
307
+
240
308
  for i, block in enumerate(blocks):
241
- if block.type == 'heading':
309
+ if block.type == "heading":
242
310
  stripped = block.content.strip()
243
311
  if stripped == expected_header:
244
312
  header_found = True
245
-
313
+
246
314
  if stripped == "## Review Comments":
247
315
  review_header_found = True
248
316
  review_header_index = i
249
-
317
+
250
318
  # Check content after review header
251
319
  if review_header_found:
252
320
  # Check if there are blocks after review_header_index that are NOT empty
253
321
  for j in range(review_header_index + 1, len(blocks)):
254
- if blocks[j].type != 'empty':
322
+ if blocks[j].type != "empty":
255
323
  review_content_found = True
256
324
  break
257
325
 
258
326
  if not header_found:
259
- diagnostics.append(self._create_diagnostic(
260
- f"Structure Error: Missing Level 2 Heading '{expected_header}'",
261
- DiagnosticSeverity.Warning
262
- ))
263
-
327
+ diagnostics.append(
328
+ self._create_diagnostic(
329
+ f"Structure Error: Missing Level 2 Heading '{expected_header}'",
330
+ DiagnosticSeverity.Warning,
331
+ )
332
+ )
333
+
264
334
  if meta.stage in ["review", "done"]:
265
335
  if not review_header_found:
266
- diagnostics.append(self._create_diagnostic(
267
- "Review Requirement: Missing '## Review Comments' section.",
268
- DiagnosticSeverity.Error
269
- ))
336
+ diagnostics.append(
337
+ self._create_diagnostic(
338
+ "Review Requirement: Missing '## Review Comments' section.",
339
+ DiagnosticSeverity.Error,
340
+ )
341
+ )
270
342
  elif not review_content_found:
271
- diagnostics.append(self._create_diagnostic(
272
- "Review Requirement: '## Review Comments' section is empty.",
273
- DiagnosticSeverity.Error
274
- ))
343
+ diagnostics.append(
344
+ self._create_diagnostic(
345
+ "Review Requirement: '## Review Comments' section is empty.",
346
+ DiagnosticSeverity.Error,
347
+ )
348
+ )
275
349
  return diagnostics
276
350
 
277
- def _validate_integrity(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
351
+ def _validate_integrity(
352
+ self, meta: IssueMetadata, content: str
353
+ ) -> List[Diagnostic]:
278
354
  diagnostics = []
279
355
  if meta.status == "closed" and not meta.solution:
280
356
  line = self._get_field_line(content, "status")
281
- diagnostics.append(self._create_diagnostic(
282
- f"Data Integrity: Closed issue {meta.id} missing 'solution' field.",
283
- DiagnosticSeverity.Error,
284
- line=line
285
- ))
286
-
357
+ diagnostics.append(
358
+ self._create_diagnostic(
359
+ f"Data Integrity: Closed issue {meta.id} missing 'solution' field.",
360
+ DiagnosticSeverity.Error,
361
+ line=line,
362
+ )
363
+ )
364
+
287
365
  # Tags Integrity Check
288
366
  # Requirement: tags field must carry parent dependencies and related issue id
289
367
  required_tags = set()
290
-
368
+
291
369
  # Self ID
292
370
  required_tags.add(f"#{meta.id}")
293
-
371
+
294
372
  if meta.parent:
295
373
  # Strip potential user # if accidentally added in models, though core stripped it
296
374
  # But here we want the tag TO HAVE #
297
- p = meta.parent if not meta.parent.startswith("#") else meta.parent[1:]
298
- required_tags.add(f"#{p}")
299
-
375
+ p = meta.parent if not meta.parent.startswith("#") else meta.parent[1:]
376
+ required_tags.add(f"#{p}")
377
+
300
378
  for d in meta.dependencies:
301
- _d = d if not d.startswith("#") else d[1:]
302
- required_tags.add(f"#{_d}")
303
-
379
+ _d = d if not d.startswith("#") else d[1:]
380
+ required_tags.add(f"#{_d}")
381
+
304
382
  for r in meta.related:
305
- _r = r if not r.startswith("#") else r[1:]
306
- required_tags.add(f"#{_r}")
307
-
383
+ _r = r if not r.startswith("#") else r[1:]
384
+ required_tags.add(f"#{_r}")
385
+
308
386
  current_tags = set(meta.tags) if meta.tags else set()
309
387
  missing_tags = required_tags - current_tags
310
-
388
+
311
389
  if missing_tags:
312
390
  line = self._get_field_line(content, "tags")
313
391
  # If tags field doesn't exist, line is 0, which is fine
314
392
  # We join them for display
315
393
  missing_str = ", ".join(sorted(missing_tags))
316
- diagnostics.append(self._create_diagnostic(
317
- f"Tag Check: Missing required context tags: {missing_str}",
318
- DiagnosticSeverity.Warning,
319
- line=line
320
- ))
321
-
394
+ diagnostics.append(
395
+ self._create_diagnostic(
396
+ f"Tag Check: Missing required context tags: {missing_str}",
397
+ DiagnosticSeverity.Warning,
398
+ line=line,
399
+ )
400
+ )
401
+
322
402
  return diagnostics
323
-
324
- def _validate_references(self, meta: IssueMetadata, content: str, all_ids: Set[str]) -> List[Diagnostic]:
403
+
404
+ def _validate_references(
405
+ self, meta: IssueMetadata, content: str, all_ids: Set[str]
406
+ ) -> List[Diagnostic]:
325
407
  diagnostics = []
326
-
408
+
327
409
  # Malformed ID Check
328
410
  if meta.parent and meta.parent.startswith("#"):
329
- line = self._get_field_line(content, "parent")
330
- diagnostics.append(self._create_diagnostic(
331
- f"Malformed ID: Parent '{meta.parent}' should not start with '#'.",
332
- DiagnosticSeverity.Warning,
333
- line=line
334
- ))
411
+ line = self._get_field_line(content, "parent")
412
+ diagnostics.append(
413
+ self._create_diagnostic(
414
+ f"Malformed ID: Parent '{meta.parent}' should not start with '#'.",
415
+ DiagnosticSeverity.Warning,
416
+ line=line,
417
+ )
418
+ )
335
419
 
336
420
  if meta.dependencies:
337
421
  for dep in meta.dependencies:
338
422
  if dep.startswith("#"):
339
423
  line = self._get_field_line(content, "dependencies")
340
- diagnostics.append(self._create_diagnostic(
341
- f"Malformed ID: Dependency '{dep}' should not start with '#'.",
342
- DiagnosticSeverity.Warning,
343
- line=line
344
- ))
424
+ diagnostics.append(
425
+ self._create_diagnostic(
426
+ f"Malformed ID: Dependency '{dep}' should not start with '#'.",
427
+ DiagnosticSeverity.Warning,
428
+ line=line,
429
+ )
430
+ )
345
431
 
346
432
  if meta.related:
347
433
  for rel in meta.related:
348
434
  if rel.startswith("#"):
349
435
  line = self._get_field_line(content, "related")
350
- diagnostics.append(self._create_diagnostic(
351
- f"Malformed ID: Related '{rel}' should not start with '#'.",
352
- DiagnosticSeverity.Warning,
353
- line=line
354
- ))
436
+ diagnostics.append(
437
+ self._create_diagnostic(
438
+ f"Malformed ID: Related '{rel}' should not start with '#'.",
439
+ DiagnosticSeverity.Warning,
440
+ line=line,
441
+ )
442
+ )
355
443
 
356
444
  if not all_ids:
357
445
  return diagnostics
358
-
359
- if meta.parent and meta.parent not in all_ids and not meta.parent.startswith("#"):
360
- line = self._get_field_line(content, "parent")
361
- diagnostics.append(self._create_diagnostic(
362
- f"Broken Reference: Parent '{meta.parent}' not found.",
363
- DiagnosticSeverity.Error,
364
- line=line
365
- ))
366
-
446
+
447
+ if (
448
+ meta.parent
449
+ and meta.parent not in all_ids
450
+ and not meta.parent.startswith("#")
451
+ ):
452
+ line = self._get_field_line(content, "parent")
453
+ diagnostics.append(
454
+ self._create_diagnostic(
455
+ f"Broken Reference: Parent '{meta.parent}' not found.",
456
+ DiagnosticSeverity.Error,
457
+ line=line,
458
+ )
459
+ )
460
+
367
461
  for dep in meta.dependencies:
368
462
  if dep not in all_ids:
369
463
  line = self._get_field_line(content, "dependencies")
370
- diagnostics.append(self._create_diagnostic(
371
- f"Broken Reference: Dependency '{dep}' not found.",
372
- DiagnosticSeverity.Error,
373
- line=line
374
- ))
375
-
464
+ diagnostics.append(
465
+ self._create_diagnostic(
466
+ f"Broken Reference: Dependency '{dep}' not found.",
467
+ DiagnosticSeverity.Error,
468
+ line=line,
469
+ )
470
+ )
471
+
376
472
  # Body Reference Check
377
473
  # Regex for generic issue ID: (EPIC|FEAT|CHORE|FIX)-\d{4}
378
474
  # We scan line by line to get line numbers
379
- lines = content.split('\n')
475
+ lines = content.split("\n")
380
476
  # Skip frontmatter for body check to avoid double counting (handled above)
381
477
  in_fm = False
382
478
  fm_end = 0
383
479
  for i, line in enumerate(lines):
384
- if line.strip() == '---':
385
- if not in_fm: in_fm = True
386
- else:
480
+ if line.strip() == "---":
481
+ if not in_fm:
482
+ in_fm = True
483
+ else:
387
484
  fm_end = i
388
485
  break
389
-
486
+
390
487
  for i, line in enumerate(lines):
391
- if i <= fm_end: continue # Skip frontmatter
392
-
488
+ if i <= fm_end:
489
+ continue # Skip frontmatter
490
+
393
491
  # Find all matches
394
492
  matches = re.finditer(r"\b((?:EPIC|FEAT|CHORE|FIX)-\d{4})\b", line)
395
493
  for match in matches:
396
494
  ref_id = match.group(1)
397
495
  if ref_id != meta.id and ref_id not in all_ids:
398
- # Check if it's a namespaced ID? The regex only catches local IDs.
399
- # If users use MON::FEAT-0001, the regex might catch FEAT-0001.
400
- # But all_ids contains full IDs (potentially namespaced).
401
- # Simple logic: if ref_id isn't in all_ids, check if any id ENDS with ref_id
402
-
403
- found_namespaced = any(known.endswith(f"::{ref_id}") for known in all_ids)
404
-
405
- if not found_namespaced:
406
- diagnostics.append(self._create_diagnostic(
407
- f"Broken Reference: Issue '{ref_id}' not found.",
408
- DiagnosticSeverity.Warning,
409
- line=i
410
- ))
496
+ # Check if it's a namespaced ID? The regex only catches local IDs.
497
+ # If users use MON::FEAT-0001, the regex might catch FEAT-0001.
498
+ # But all_ids contains full IDs (potentially namespaced).
499
+ # Simple logic: if ref_id isn't in all_ids, check if any id ENDS with ref_id
500
+
501
+ found_namespaced = any(
502
+ known.endswith(f"::{ref_id}") for known in all_ids
503
+ )
504
+
505
+ if not found_namespaced:
506
+ diagnostics.append(
507
+ self._create_diagnostic(
508
+ f"Broken Reference: Issue '{ref_id}' not found.",
509
+ DiagnosticSeverity.Warning,
510
+ line=i,
511
+ )
512
+ )
411
513
  return diagnostics
412
514
 
413
- def _validate_time_consistency(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
515
+ def _validate_time_consistency(
516
+ self, meta: IssueMetadata, content: str
517
+ ) -> List[Diagnostic]:
414
518
  diagnostics = []
415
519
  c = meta.created_at
416
520
  o = meta.opened_at
417
521
  u = meta.updated_at
418
522
  cl = meta.closed_at
419
-
523
+
420
524
  created_line = self._get_field_line(content, "created_at")
421
525
  opened_line = self._get_field_line(content, "opened_at")
422
- updated_line = self._get_field_line(content, "updated_at")
423
- closed_line = self._get_field_line(content, "closed_at")
424
526
 
425
527
  if o and c > o:
426
- diagnostics.append(self._create_diagnostic("Time Travel: created_at > opened_at", DiagnosticSeverity.Warning, line=created_line))
427
-
528
+ diagnostics.append(
529
+ self._create_diagnostic(
530
+ "Time Travel: created_at > opened_at",
531
+ DiagnosticSeverity.Warning,
532
+ line=created_line,
533
+ )
534
+ )
535
+
428
536
  if u and c > u:
429
- diagnostics.append(self._create_diagnostic("Time Travel: created_at > updated_at", DiagnosticSeverity.Warning, line=created_line))
430
-
537
+ diagnostics.append(
538
+ self._create_diagnostic(
539
+ "Time Travel: created_at > updated_at",
540
+ DiagnosticSeverity.Warning,
541
+ line=created_line,
542
+ )
543
+ )
544
+
431
545
  if cl:
432
546
  if c > cl:
433
- diagnostics.append(self._create_diagnostic("Time Travel: created_at > closed_at", DiagnosticSeverity.Error, line=created_line))
547
+ diagnostics.append(
548
+ self._create_diagnostic(
549
+ "Time Travel: created_at > closed_at",
550
+ DiagnosticSeverity.Error,
551
+ line=created_line,
552
+ )
553
+ )
434
554
  if o and o > cl:
435
- diagnostics.append(self._create_diagnostic("Time Travel: opened_at > closed_at", DiagnosticSeverity.Error, line=opened_line))
555
+ diagnostics.append(
556
+ self._create_diagnostic(
557
+ "Time Travel: opened_at > closed_at",
558
+ DiagnosticSeverity.Error,
559
+ line=opened_line,
560
+ )
561
+ )
562
+
563
+ return diagnostics
564
+
565
+ def _validate_domains(
566
+ self, meta: IssueMetadata, content: str, all_ids: Set[str] = set()
567
+ ) -> List[Diagnostic]:
568
+ diagnostics = []
569
+ # Check if 'domains' field exists in frontmatter text
570
+ # We rely on text parsing because Pydantic defaults 'domains' to [] if missing.
571
+
572
+ # If line is 0, it might be the first line (rare) or missing.
573
+ # _get_field_line returns 0 if not found, but also if found at line 0?
574
+ # Let's check if the field actually exists in text.
575
+ has_domains_field = False
576
+ lines = content.splitlines()
577
+ in_fm = False
578
+ for i, line_content in enumerate(lines):
579
+ stripped = line_content.strip()
580
+ if stripped == "---":
581
+ if not in_fm:
582
+ in_fm = True
583
+ else:
584
+ break
585
+ elif in_fm:
586
+ if stripped.startswith("domains:"):
587
+ has_domains_field = True
588
+ break
589
+
590
+ # Governance Maturity Check
591
+ # Rule: If Epics > 8 or Issues > 50, enforce Domain usage
592
+ num_issues = len(all_ids)
593
+ num_epics = len(
594
+ [i for i in all_ids if "EPIC-" in i]
595
+ ) # Simple heuristic, ideally check type
596
+
597
+ is_mature = num_issues > 50 or num_epics > 8
598
+
599
+ if not has_domains_field:
600
+ if is_mature:
601
+ # We report it on line 0 (start of file) or line 1
602
+ diagnostics.append(
603
+ self._create_diagnostic(
604
+ "Governance Maturity: Project scale (Epics>8 or Issues>50) requires 'domains' field in frontmatter.",
605
+ DiagnosticSeverity.Warning,
606
+ line=0,
607
+ )
608
+ )
436
609
 
437
610
  return diagnostics
438
611
 
439
- def _validate_checkbox_logic_blocks(self, blocks: List[ContentBlock]) -> List[Diagnostic]:
612
+ def _validate_checkbox_logic_blocks(
613
+ self, blocks: List[ContentBlock]
614
+ ) -> List[Diagnostic]:
440
615
  diagnostics = []
441
-
616
+
442
617
  for block in blocks:
443
- if block.type == 'task_item':
618
+ if block.type == "task_item":
444
619
  content = block.content.strip()
445
620
  # Syntax Check: - [?]
446
621
  # Added supported chars: /, ~, +
447
622
  match = re.match(r"- \[([ x\-/~+])\]", content)
448
623
  if not match:
449
624
  # Check for Common errors
450
- if re.match(r"- \[.{2,}\]", content): # [xx] or [ ]
451
- diagnostics.append(self._create_diagnostic("Invalid Checkbox: Use single character [ ], [x], [-], [/]", DiagnosticSeverity.Error, block.line_start))
452
- elif re.match(r"- \[([^ x\-/~+])\]", content): # [v], [o]
453
- diagnostics.append(self._create_diagnostic("Invalid Checkbox Status: Use [ ], [x], [/], [~]", DiagnosticSeverity.Error, block.line_start))
454
-
625
+ if re.match(r"- \[.{2,}\]", content): # [xx] or [ ]
626
+ diagnostics.append(
627
+ self._create_diagnostic(
628
+ "Invalid Checkbox: Use single character [ ], [x], [-], [/]",
629
+ DiagnosticSeverity.Error,
630
+ block.line_start,
631
+ )
632
+ )
633
+ elif re.match(r"- \[([^ x\-/~+])\]", content): # [v], [o]
634
+ diagnostics.append(
635
+ self._create_diagnostic(
636
+ "Invalid Checkbox Status: Use [ ], [x], [/], [~]",
637
+ DiagnosticSeverity.Error,
638
+ block.line_start,
639
+ )
640
+ )
641
+
455
642
  return diagnostics