monoco-toolkit 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. monoco/cli/project.py +35 -31
  2. monoco/cli/workspace.py +26 -16
  3. monoco/core/agent/__init__.py +0 -2
  4. monoco/core/agent/action.py +44 -20
  5. monoco/core/agent/adapters.py +20 -16
  6. monoco/core/agent/protocol.py +5 -4
  7. monoco/core/agent/state.py +21 -21
  8. monoco/core/config.py +90 -33
  9. monoco/core/execution.py +21 -16
  10. monoco/core/feature.py +8 -5
  11. monoco/core/git.py +61 -30
  12. monoco/core/hooks.py +57 -0
  13. monoco/core/injection.py +47 -44
  14. monoco/core/integrations.py +50 -35
  15. monoco/core/lsp.py +12 -1
  16. monoco/core/output.py +35 -16
  17. monoco/core/registry.py +3 -2
  18. monoco/core/setup.py +190 -124
  19. monoco/core/skills.py +121 -107
  20. monoco/core/state.py +12 -10
  21. monoco/core/sync.py +85 -56
  22. monoco/core/telemetry.py +10 -6
  23. monoco/core/workspace.py +26 -19
  24. monoco/daemon/app.py +123 -79
  25. monoco/daemon/commands.py +14 -13
  26. monoco/daemon/models.py +11 -3
  27. monoco/daemon/reproduce_stats.py +8 -8
  28. monoco/daemon/services.py +32 -33
  29. monoco/daemon/stats.py +59 -40
  30. monoco/features/config/commands.py +38 -25
  31. monoco/features/i18n/adapter.py +4 -5
  32. monoco/features/i18n/commands.py +83 -49
  33. monoco/features/i18n/core.py +94 -54
  34. monoco/features/issue/adapter.py +6 -7
  35. monoco/features/issue/commands.py +500 -260
  36. monoco/features/issue/core.py +504 -293
  37. monoco/features/issue/domain/lifecycle.py +33 -23
  38. monoco/features/issue/domain/models.py +71 -38
  39. monoco/features/issue/domain/parser.py +92 -69
  40. monoco/features/issue/domain/workspace.py +19 -16
  41. monoco/features/issue/engine/__init__.py +3 -3
  42. monoco/features/issue/engine/config.py +18 -25
  43. monoco/features/issue/engine/machine.py +72 -39
  44. monoco/features/issue/engine/models.py +4 -2
  45. monoco/features/issue/linter.py +326 -111
  46. monoco/features/issue/lsp/definition.py +26 -19
  47. monoco/features/issue/migration.py +45 -34
  48. monoco/features/issue/models.py +30 -13
  49. monoco/features/issue/monitor.py +24 -8
  50. monoco/features/issue/resources/en/AGENTS.md +5 -0
  51. monoco/features/issue/resources/en/SKILL.md +30 -2
  52. monoco/features/issue/resources/zh/AGENTS.md +5 -0
  53. monoco/features/issue/resources/zh/SKILL.md +26 -1
  54. monoco/features/issue/validator.py +417 -172
  55. monoco/features/skills/__init__.py +0 -1
  56. monoco/features/skills/core.py +24 -18
  57. monoco/features/spike/adapter.py +4 -5
  58. monoco/features/spike/commands.py +51 -38
  59. monoco/features/spike/core.py +24 -16
  60. monoco/main.py +34 -21
  61. {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/METADATA +10 -3
  62. monoco_toolkit-0.3.0.dist-info/RECORD +84 -0
  63. monoco_toolkit-0.2.7.dist-info/RECORD +0 -83
  64. {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/WHEEL +0 -0
  65. {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/entry_points.txt +0 -0
  66. {monoco_toolkit-0.2.7.dist-info → monoco_toolkit-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,34 +1,35 @@
1
1
  import re
2
- import yaml
3
- from typing import List, Set, Optional, Dict
2
+ from typing import List, Set, Optional
4
3
  from pathlib import Path
5
4
 
6
5
  from monoco.core.lsp import Diagnostic, DiagnosticSeverity, Range, Position
7
6
  from monoco.core.config import get_config
8
7
  from monoco.features.i18n.core import detect_language
9
- from .models import IssueMetadata, IssueType
8
+ from .models import IssueMetadata
10
9
  from .domain.parser import MarkdownParser
11
10
  from .domain.models import ContentBlock
12
11
 
12
+
13
13
  class IssueValidator:
14
14
  """
15
15
  Centralized validation logic for Issue Tickets.
16
16
  Returns LSP-compatible Diagnostics.
17
17
  """
18
-
18
+
19
19
  def __init__(self, issue_root: Optional[Path] = None):
20
20
  self.issue_root = issue_root
21
21
 
22
- def validate(self, meta: IssueMetadata, content: str, all_issue_ids: Set[str] = set()) -> List[Diagnostic]:
22
+ def validate(
23
+ self, meta: IssueMetadata, content: str, all_issue_ids: Set[str] = set()
24
+ ) -> List[Diagnostic]:
23
25
  diagnostics = []
24
-
26
+
25
27
  # Parse Content into Blocks (Domain Layer)
26
28
  # Handle case where content might be just body (from update_issue) or full file
27
29
  if content.startswith("---"):
28
30
  try:
29
31
  issue_domain = MarkdownParser.parse(content)
30
32
  blocks = issue_domain.body.blocks
31
- has_frontmatter = True
32
33
  except Exception:
33
34
  # Fallback if parser fails (e.g. invalid YAML)
34
35
  # We continue with empty blocks or try partial parsing?
@@ -41,72 +42,81 @@ class IssueValidator:
41
42
  if lines[i].strip() == "---":
42
43
  start_line = i + 1
43
44
  break
44
- blocks = MarkdownParser._parse_blocks(lines[start_line:], start_line_offset=start_line)
45
- has_frontmatter = True
45
+ blocks = MarkdownParser._parse_blocks(
46
+ lines[start_line:], start_line_offset=start_line
47
+ )
46
48
  else:
47
49
  # Assume content is just body
48
50
  lines = content.splitlines()
49
51
  blocks = MarkdownParser._parse_blocks(lines, start_line_offset=0)
50
- has_frontmatter = False
51
52
 
52
53
  # 1. State Matrix Validation
53
54
  diagnostics.extend(self._validate_state_matrix(meta, content))
54
-
55
+
55
56
  # 2. State Requirements (Strict Verification)
56
57
  diagnostics.extend(self._validate_state_requirements(meta, blocks))
57
-
58
+
58
59
  # 3. Structure Consistency (Headings) - Using Blocks
59
60
  diagnostics.extend(self._validate_structure_blocks(meta, blocks))
60
-
61
+
61
62
  # 4. Lifecycle/Integrity (Solution, etc.)
62
63
  diagnostics.extend(self._validate_integrity(meta, content))
63
-
64
+
64
65
  # 5. Reference Integrity
65
66
  diagnostics.extend(self._validate_references(meta, content, all_issue_ids))
66
67
 
68
+ # 5.5 Domain Integrity
69
+ diagnostics.extend(self._validate_domains(meta, content))
70
+
67
71
  # 6. Time Consistency
68
72
  diagnostics.extend(self._validate_time_consistency(meta, content))
69
73
 
70
74
  # 7. Checkbox Syntax - Using Blocks
71
75
  diagnostics.extend(self._validate_checkbox_logic_blocks(blocks))
72
-
76
+
73
77
  # 8. Language Consistency
74
78
  diagnostics.extend(self._validate_language_consistency(meta, content))
75
79
 
76
80
  return diagnostics
77
81
 
78
- def _validate_language_consistency(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
82
+ def _validate_language_consistency(
83
+ self, meta: IssueMetadata, content: str
84
+ ) -> List[Diagnostic]:
79
85
  diagnostics = []
80
86
  try:
81
87
  config = get_config()
82
88
  source_lang = config.i18n.source_lang
83
-
89
+
84
90
  # Check for language mismatch (specifically zh vs en)
85
- if source_lang.lower() == 'zh':
91
+ if source_lang.lower() == "zh":
86
92
  detected = detect_language(content)
87
- if detected == 'en':
88
- diagnostics.append(self._create_diagnostic(
89
- "Language Mismatch: Project source language is 'zh' but content appears to be 'en'.",
90
- DiagnosticSeverity.Warning
91
- ))
93
+ if detected == "en":
94
+ diagnostics.append(
95
+ self._create_diagnostic(
96
+ "Language Mismatch: Project source language is 'zh' but content appears to be 'en'.",
97
+ DiagnosticSeverity.Warning,
98
+ )
99
+ )
92
100
  except Exception:
93
101
  pass
94
102
  return diagnostics
95
103
 
96
- def _create_diagnostic(self, message: str, severity: DiagnosticSeverity, line: int = 0) -> Diagnostic:
104
+ def _create_diagnostic(
105
+ self, message: str, severity: DiagnosticSeverity, line: int = 0
106
+ ) -> Diagnostic:
97
107
  """Helper to create a diagnostic object."""
98
108
  return Diagnostic(
99
109
  range=Range(
100
110
  start=Position(line=line, character=0),
101
- end=Position(line=line, character=100) # Arbitrary end
111
+ end=Position(line=line, character=100), # Arbitrary end
102
112
  ),
103
113
  severity=severity,
104
- message=message
114
+ message=message,
105
115
  )
106
116
 
107
117
  def _get_field_line(self, content: str, field_name: str) -> int:
108
118
  """Helper to find the line number of a field in the front matter."""
109
- lines = content.split('\n')
119
+ lines = content.split("\n")
110
120
  in_fm = False
111
121
  for i, line in enumerate(lines):
112
122
  stripped = line.strip()
@@ -115,51 +125,101 @@ class IssueValidator:
115
125
  in_fm = True
116
126
  continue
117
127
  else:
118
- break # End of FM
128
+ break # End of FM
119
129
  if in_fm:
120
130
  # Match "field:", "field :", or "field: value"
121
131
  if re.match(rf"^{re.escape(field_name)}\s*:", stripped):
122
132
  return i
123
133
  return 0
124
134
 
125
- def _validate_state_matrix(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
135
+ def _validate_state_matrix(
136
+ self, meta: IssueMetadata, content: str
137
+ ) -> List[Diagnostic]:
126
138
  diagnostics = []
127
-
139
+
128
140
  # Check based on parsed metadata (now that auto-correction is disabled)
129
141
  if meta.status == "closed" and meta.stage != "done":
130
142
  line = self._get_field_line(content, "status")
131
- diagnostics.append(self._create_diagnostic(
132
- f"State Mismatch: Closed issues must be in 'Done' stage (found: {meta.stage if meta.stage else 'None'})",
133
- DiagnosticSeverity.Error,
134
- line=line
135
- ))
136
-
143
+ diagnostics.append(
144
+ self._create_diagnostic(
145
+ f"State Mismatch: Closed issues must be in 'Done' stage (found: {meta.stage if meta.stage else 'None'})",
146
+ DiagnosticSeverity.Error,
147
+ line=line,
148
+ )
149
+ )
150
+
137
151
  if meta.status == "backlog" and meta.stage != "freezed":
138
152
  line = self._get_field_line(content, "status")
139
- diagnostics.append(self._create_diagnostic(
140
- f"State Mismatch: Backlog issues must be in 'Freezed' stage (found: {meta.stage if meta.stage else 'None'})",
141
- DiagnosticSeverity.Error,
142
- line=line
143
- ))
153
+ diagnostics.append(
154
+ self._create_diagnostic(
155
+ f"State Mismatch: Backlog issues must be in 'Freezed' stage (found: {meta.stage if meta.stage else 'None'})",
156
+ DiagnosticSeverity.Error,
157
+ line=line,
158
+ )
159
+ )
144
160
 
145
161
  return diagnostics
146
162
 
147
- def _validate_state_requirements(self, meta: IssueMetadata, blocks: List[ContentBlock]) -> List[Diagnostic]:
163
+ def _validate_state_requirements(
164
+ self, meta: IssueMetadata, blocks: List[ContentBlock]
165
+ ) -> List[Diagnostic]:
148
166
  diagnostics = []
149
-
167
+
150
168
  # 1. Map Blocks to Sections
151
169
  sections = {"tasks": [], "ac": [], "review": []}
152
170
  current_section = None
153
-
171
+
154
172
  for block in blocks:
155
173
  if block.type == "heading":
156
174
  title = block.content.strip().lower()
157
- if "technical tasks" in title:
175
+ # Parse title to identify sections (supporting Chinese and English synonyms)
176
+ if any(
177
+ kw in title
178
+ for kw in [
179
+ "technical tasks",
180
+ "工作包",
181
+ "技术任务",
182
+ "key deliverables",
183
+ "关键交付",
184
+ "重点工作",
185
+ "子功能",
186
+ "子故事",
187
+ "child features",
188
+ "stories",
189
+ "需求",
190
+ "requirements",
191
+ "implementation",
192
+ "实现",
193
+ "交付",
194
+ "delivery",
195
+ "规划",
196
+ "plan",
197
+ "tasks",
198
+ "任务",
199
+ ]
200
+ ):
158
201
  current_section = "tasks"
159
- elif "acceptance criteria" in title:
202
+ elif any(
203
+ kw in title
204
+ for kw in ["acceptance criteria", "验收标准", "交付目标", "验收"]
205
+ ):
160
206
  current_section = "ac"
161
- elif "review comments" in title:
207
+ elif any(
208
+ kw in title
209
+ for kw in [
210
+ "review comments",
211
+ "确认事项",
212
+ "评审记录",
213
+ "复盘记录",
214
+ "review",
215
+ "评审",
216
+ "确认",
217
+ ]
218
+ ):
162
219
  current_section = "review"
220
+ elif title.startswith("###"):
221
+ # Subheading: allow continued collection for the current section
222
+ pass
163
223
  else:
164
224
  current_section = None
165
225
  elif block.type == "task_item":
@@ -168,218 +228,403 @@ class IssueValidator:
168
228
 
169
229
  # 2. Logic: DOING -> Must have defined tasks
170
230
  if meta.stage in ["doing", "review", "done"]:
171
- if not sections["tasks"]:
172
- # We can't strictly point to a line if section missing, but we can point to top/bottom
173
- # Or just a general error.
174
- diagnostics.append(self._create_diagnostic(
175
- "State Requirement (DOING+): Must define 'Technical Tasks' (at least 1 checkbox).",
176
- DiagnosticSeverity.Warning
177
- ))
231
+ if not sections["tasks"]:
232
+ # We can't strictly point to a line if section missing, but we can point to top/bottom
233
+ # Or just a general error.
234
+ diagnostics.append(
235
+ self._create_diagnostic(
236
+ "State Requirement (DOING+): Must define 'Technical Tasks' (at least 1 checkbox).",
237
+ DiagnosticSeverity.Warning,
238
+ )
239
+ )
178
240
 
179
241
  # 3. Logic: REVIEW -> Tasks must be Completed ([x]) or Cancelled ([~], [+])
180
242
  # No [ ] (ToDo) or [-]/[/] (Doing) allowed.
181
243
  if meta.stage in ["review", "done"]:
182
244
  for block in sections["tasks"]:
183
- content = block.content.strip()
184
- # Check for explicit illegal states
185
- if re.search(r"-\s*\[\s+\]", content):
186
- diagnostics.append(self._create_diagnostic(
187
- f"State Requirement ({meta.stage.upper()}): Technical Tasks must be resolved. Found Todo [ ]: '{content}'",
188
- DiagnosticSeverity.Error,
189
- line=block.line_start
190
- ))
191
- elif re.search(r"-\s*\[[-\/]]", content):
192
- diagnostics.append(self._create_diagnostic(
193
- f"State Requirement ({meta.stage.upper()}): Technical Tasks must be finished (not Doing). Found Doing [-]: '{content}'",
194
- DiagnosticSeverity.Error,
195
- line=block.line_start
196
- ))
245
+ content = block.content.strip()
246
+ # Check for explicit illegal states
247
+ if re.search(r"-\s*\[\s+\]", content):
248
+ diagnostics.append(
249
+ self._create_diagnostic(
250
+ f"State Requirement ({meta.stage.upper()}): Technical Tasks must be resolved. Found Todo [ ]: '{content}'",
251
+ DiagnosticSeverity.Error,
252
+ line=block.line_start,
253
+ )
254
+ )
255
+ elif re.search(r"-\s*\[[-\/]]", content):
256
+ diagnostics.append(
257
+ self._create_diagnostic(
258
+ f"State Requirement ({meta.stage.upper()}): Technical Tasks must be finished (not Doing). Found Doing [-]: '{content}'",
259
+ DiagnosticSeverity.Error,
260
+ line=block.line_start,
261
+ )
262
+ )
197
263
 
198
264
  # 4. Logic: DONE -> AC must be Verified ([x])
199
265
  if meta.stage == "done":
200
- for block in sections["ac"]:
201
- content = block.content.strip()
202
- if not re.search(r"-\s*\[[xX]\]", content):
203
- diagnostics.append(self._create_diagnostic(
204
- f"State Requirement (DONE): Acceptance Criteria must be passed ([x]). Found: '{content}'",
205
- DiagnosticSeverity.Error,
206
- line=block.line_start
207
- ))
208
-
209
- # 5. Logic: DONE -> Review Checkboxes (if any) must be Resolved ([x] or [~])
210
- for block in sections["review"]:
211
- content = block.content.strip()
212
- # Must be [x], [X], [~], [+]
213
- # Therefore [ ], [-], [/] are invalid blocking states
214
- if re.search(r"-\s*\[[\s\-\/]\]", content):
215
- diagnostics.append(self._create_diagnostic(
216
- f"State Requirement (DONE): Actionable Review Comments must be resolved ([x] or [~]). Found: '{content}'",
217
- DiagnosticSeverity.Error,
218
- line=block.line_start
219
- ))
220
-
266
+ for block in sections["ac"]:
267
+ content = block.content.strip()
268
+ if not re.search(r"-\s*\[[xX]\]", content):
269
+ diagnostics.append(
270
+ self._create_diagnostic(
271
+ f"State Requirement (DONE): Acceptance Criteria must be passed ([x]). Found: '{content}'",
272
+ DiagnosticSeverity.Error,
273
+ line=block.line_start,
274
+ )
275
+ )
276
+
277
+ # 5. Logic: DONE -> Review Checkboxes (if any) must be Resolved ([x] or [~])
278
+ for block in sections["review"]:
279
+ content = block.content.strip()
280
+ # Must be [x], [X], [~], [+]
281
+ # Therefore [ ], [-], [/] are invalid blocking states
282
+ if re.search(r"-\s*\[[\s\-\/]\]", content):
283
+ diagnostics.append(
284
+ self._create_diagnostic(
285
+ f"State Requirement (DONE): Actionable Review Comments must be resolved ([x] or [~]). Found: '{content}'",
286
+ DiagnosticSeverity.Error,
287
+ line=block.line_start,
288
+ )
289
+ )
290
+
221
291
  return diagnostics
222
292
 
223
- def _validate_structure_blocks(self, meta: IssueMetadata, blocks: List[ContentBlock]) -> List[Diagnostic]:
293
+ def _validate_structure_blocks(
294
+ self, meta: IssueMetadata, blocks: List[ContentBlock]
295
+ ) -> List[Diagnostic]:
224
296
  diagnostics = []
225
-
297
+
226
298
  # 1. Heading check: ## {issue-id}: {issue-title}
227
299
  expected_header = f"## {meta.id}: {meta.title}"
228
300
  header_found = False
229
-
301
+
230
302
  # 2. Review Comments Check
231
303
  review_header_found = False
232
304
  review_content_found = False
233
-
305
+
234
306
  review_header_index = -1
235
-
307
+
236
308
  for i, block in enumerate(blocks):
237
- if block.type == 'heading':
309
+ if block.type == "heading":
238
310
  stripped = block.content.strip()
239
311
  if stripped == expected_header:
240
312
  header_found = True
241
-
313
+
242
314
  if stripped == "## Review Comments":
243
315
  review_header_found = True
244
316
  review_header_index = i
245
-
317
+
246
318
  # Check content after review header
247
319
  if review_header_found:
248
320
  # Check if there are blocks after review_header_index that are NOT empty
249
321
  for j in range(review_header_index + 1, len(blocks)):
250
- if blocks[j].type != 'empty':
322
+ if blocks[j].type != "empty":
251
323
  review_content_found = True
252
324
  break
253
325
 
254
326
  if not header_found:
255
- diagnostics.append(self._create_diagnostic(
256
- f"Structure Error: Missing Level 2 Heading '{expected_header}'",
257
- DiagnosticSeverity.Warning
258
- ))
259
-
327
+ diagnostics.append(
328
+ self._create_diagnostic(
329
+ f"Structure Error: Missing Level 2 Heading '{expected_header}'",
330
+ DiagnosticSeverity.Warning,
331
+ )
332
+ )
333
+
260
334
  if meta.stage in ["review", "done"]:
261
335
  if not review_header_found:
262
- diagnostics.append(self._create_diagnostic(
263
- "Review Requirement: Missing '## Review Comments' section.",
264
- DiagnosticSeverity.Error
265
- ))
336
+ diagnostics.append(
337
+ self._create_diagnostic(
338
+ "Review Requirement: Missing '## Review Comments' section.",
339
+ DiagnosticSeverity.Error,
340
+ )
341
+ )
266
342
  elif not review_content_found:
267
- diagnostics.append(self._create_diagnostic(
268
- "Review Requirement: '## Review Comments' section is empty.",
269
- DiagnosticSeverity.Error
270
- ))
343
+ diagnostics.append(
344
+ self._create_diagnostic(
345
+ "Review Requirement: '## Review Comments' section is empty.",
346
+ DiagnosticSeverity.Error,
347
+ )
348
+ )
271
349
  return diagnostics
272
350
 
273
- def _validate_integrity(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
351
+ def _validate_integrity(
352
+ self, meta: IssueMetadata, content: str
353
+ ) -> List[Diagnostic]:
274
354
  diagnostics = []
275
355
  if meta.status == "closed" and not meta.solution:
276
356
  line = self._get_field_line(content, "status")
277
- diagnostics.append(self._create_diagnostic(
278
- f"Data Integrity: Closed issue {meta.id} missing 'solution' field.",
279
- DiagnosticSeverity.Error,
280
- line=line
281
- ))
357
+ diagnostics.append(
358
+ self._create_diagnostic(
359
+ f"Data Integrity: Closed issue {meta.id} missing 'solution' field.",
360
+ DiagnosticSeverity.Error,
361
+ line=line,
362
+ )
363
+ )
364
+
365
+ # Tags Integrity Check
366
+ # Requirement: tags field must carry parent dependencies and related issue id
367
+ required_tags = set()
368
+
369
+ # Self ID
370
+ required_tags.add(f"#{meta.id}")
371
+
372
+ if meta.parent:
373
+ # Strip potential user # if accidentally added in models, though core stripped it
374
+ # But here we want the tag TO HAVE #
375
+ p = meta.parent if not meta.parent.startswith("#") else meta.parent[1:]
376
+ required_tags.add(f"#{p}")
377
+
378
+ for d in meta.dependencies:
379
+ _d = d if not d.startswith("#") else d[1:]
380
+ required_tags.add(f"#{_d}")
381
+
382
+ for r in meta.related:
383
+ _r = r if not r.startswith("#") else r[1:]
384
+ required_tags.add(f"#{_r}")
385
+
386
+ current_tags = set(meta.tags) if meta.tags else set()
387
+ missing_tags = required_tags - current_tags
388
+
389
+ if missing_tags:
390
+ line = self._get_field_line(content, "tags")
391
+ # If tags field doesn't exist, line is 0, which is fine
392
+ # We join them for display
393
+ missing_str = ", ".join(sorted(missing_tags))
394
+ diagnostics.append(
395
+ self._create_diagnostic(
396
+ f"Tag Check: Missing required context tags: {missing_str}",
397
+ DiagnosticSeverity.Warning,
398
+ line=line,
399
+ )
400
+ )
401
+
282
402
  return diagnostics
283
-
284
- def _validate_references(self, meta: IssueMetadata, content: str, all_ids: Set[str]) -> List[Diagnostic]:
403
+
404
+ def _validate_references(
405
+ self, meta: IssueMetadata, content: str, all_ids: Set[str]
406
+ ) -> List[Diagnostic]:
285
407
  diagnostics = []
408
+
409
+ # Malformed ID Check
410
+ if meta.parent and meta.parent.startswith("#"):
411
+ line = self._get_field_line(content, "parent")
412
+ diagnostics.append(
413
+ self._create_diagnostic(
414
+ f"Malformed ID: Parent '{meta.parent}' should not start with '#'.",
415
+ DiagnosticSeverity.Warning,
416
+ line=line,
417
+ )
418
+ )
419
+
420
+ if meta.dependencies:
421
+ for dep in meta.dependencies:
422
+ if dep.startswith("#"):
423
+ line = self._get_field_line(content, "dependencies")
424
+ diagnostics.append(
425
+ self._create_diagnostic(
426
+ f"Malformed ID: Dependency '{dep}' should not start with '#'.",
427
+ DiagnosticSeverity.Warning,
428
+ line=line,
429
+ )
430
+ )
431
+
432
+ if meta.related:
433
+ for rel in meta.related:
434
+ if rel.startswith("#"):
435
+ line = self._get_field_line(content, "related")
436
+ diagnostics.append(
437
+ self._create_diagnostic(
438
+ f"Malformed ID: Related '{rel}' should not start with '#'.",
439
+ DiagnosticSeverity.Warning,
440
+ line=line,
441
+ )
442
+ )
443
+
286
444
  if not all_ids:
287
445
  return diagnostics
288
-
289
- if meta.parent and meta.parent not in all_ids:
290
- line = self._get_field_line(content, "parent")
291
- diagnostics.append(self._create_diagnostic(
292
- f"Broken Reference: Parent '{meta.parent}' not found.",
293
- DiagnosticSeverity.Error,
294
- line=line
295
- ))
296
-
446
+
447
+ if (
448
+ meta.parent
449
+ and meta.parent not in all_ids
450
+ and not meta.parent.startswith("#")
451
+ ):
452
+ line = self._get_field_line(content, "parent")
453
+ diagnostics.append(
454
+ self._create_diagnostic(
455
+ f"Broken Reference: Parent '{meta.parent}' not found.",
456
+ DiagnosticSeverity.Error,
457
+ line=line,
458
+ )
459
+ )
460
+
297
461
  for dep in meta.dependencies:
298
462
  if dep not in all_ids:
299
463
  line = self._get_field_line(content, "dependencies")
300
- diagnostics.append(self._create_diagnostic(
301
- f"Broken Reference: Dependency '{dep}' not found.",
302
- DiagnosticSeverity.Error,
303
- line=line
304
- ))
305
-
464
+ diagnostics.append(
465
+ self._create_diagnostic(
466
+ f"Broken Reference: Dependency '{dep}' not found.",
467
+ DiagnosticSeverity.Error,
468
+ line=line,
469
+ )
470
+ )
471
+
306
472
  # Body Reference Check
307
473
  # Regex for generic issue ID: (EPIC|FEAT|CHORE|FIX)-\d{4}
308
474
  # We scan line by line to get line numbers
309
- lines = content.split('\n')
475
+ lines = content.split("\n")
310
476
  # Skip frontmatter for body check to avoid double counting (handled above)
311
477
  in_fm = False
312
478
  fm_end = 0
313
479
  for i, line in enumerate(lines):
314
- if line.strip() == '---':
315
- if not in_fm: in_fm = True
316
- else:
480
+ if line.strip() == "---":
481
+ if not in_fm:
482
+ in_fm = True
483
+ else:
317
484
  fm_end = i
318
485
  break
319
-
486
+
320
487
  for i, line in enumerate(lines):
321
- if i <= fm_end: continue # Skip frontmatter
322
-
488
+ if i <= fm_end:
489
+ continue # Skip frontmatter
490
+
323
491
  # Find all matches
324
492
  matches = re.finditer(r"\b((?:EPIC|FEAT|CHORE|FIX)-\d{4})\b", line)
325
493
  for match in matches:
326
494
  ref_id = match.group(1)
327
495
  if ref_id != meta.id and ref_id not in all_ids:
328
- # Check if it's a namespaced ID? The regex only catches local IDs.
329
- # If users use MON::FEAT-0001, the regex might catch FEAT-0001.
330
- # But all_ids contains full IDs (potentially namespaced).
331
- # Simple logic: if ref_id isn't in all_ids, check if any id ENDS with ref_id
332
-
333
- found_namespaced = any(known.endswith(f"::{ref_id}") for known in all_ids)
334
-
335
- if not found_namespaced:
336
- diagnostics.append(self._create_diagnostic(
337
- f"Broken Reference: Issue '{ref_id}' not found.",
338
- DiagnosticSeverity.Warning,
339
- line=i
340
- ))
496
+ # Check if it's a namespaced ID? The regex only catches local IDs.
497
+ # If users use MON::FEAT-0001, the regex might catch FEAT-0001.
498
+ # But all_ids contains full IDs (potentially namespaced).
499
+ # Simple logic: if ref_id isn't in all_ids, check if any id ENDS with ref_id
500
+
501
+ found_namespaced = any(
502
+ known.endswith(f"::{ref_id}") for known in all_ids
503
+ )
504
+
505
+ if not found_namespaced:
506
+ diagnostics.append(
507
+ self._create_diagnostic(
508
+ f"Broken Reference: Issue '{ref_id}' not found.",
509
+ DiagnosticSeverity.Warning,
510
+ line=i,
511
+ )
512
+ )
341
513
  return diagnostics
342
514
 
343
- def _validate_time_consistency(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
515
+ def _validate_time_consistency(
516
+ self, meta: IssueMetadata, content: str
517
+ ) -> List[Diagnostic]:
344
518
  diagnostics = []
345
519
  c = meta.created_at
346
520
  o = meta.opened_at
347
521
  u = meta.updated_at
348
522
  cl = meta.closed_at
349
-
523
+
350
524
  created_line = self._get_field_line(content, "created_at")
351
525
  opened_line = self._get_field_line(content, "opened_at")
352
- updated_line = self._get_field_line(content, "updated_at")
353
- closed_line = self._get_field_line(content, "closed_at")
354
526
 
355
527
  if o and c > o:
356
- diagnostics.append(self._create_diagnostic("Time Travel: created_at > opened_at", DiagnosticSeverity.Warning, line=created_line))
357
-
528
+ diagnostics.append(
529
+ self._create_diagnostic(
530
+ "Time Travel: created_at > opened_at",
531
+ DiagnosticSeverity.Warning,
532
+ line=created_line,
533
+ )
534
+ )
535
+
358
536
  if u and c > u:
359
- diagnostics.append(self._create_diagnostic("Time Travel: created_at > updated_at", DiagnosticSeverity.Warning, line=created_line))
360
-
537
+ diagnostics.append(
538
+ self._create_diagnostic(
539
+ "Time Travel: created_at > updated_at",
540
+ DiagnosticSeverity.Warning,
541
+ line=created_line,
542
+ )
543
+ )
544
+
361
545
  if cl:
362
546
  if c > cl:
363
- diagnostics.append(self._create_diagnostic("Time Travel: created_at > closed_at", DiagnosticSeverity.Error, line=created_line))
547
+ diagnostics.append(
548
+ self._create_diagnostic(
549
+ "Time Travel: created_at > closed_at",
550
+ DiagnosticSeverity.Error,
551
+ line=created_line,
552
+ )
553
+ )
364
554
  if o and o > cl:
365
- diagnostics.append(self._create_diagnostic("Time Travel: opened_at > closed_at", DiagnosticSeverity.Error, line=opened_line))
555
+ diagnostics.append(
556
+ self._create_diagnostic(
557
+ "Time Travel: opened_at > closed_at",
558
+ DiagnosticSeverity.Error,
559
+ line=opened_line,
560
+ )
561
+ )
366
562
 
367
563
  return diagnostics
368
564
 
369
- def _validate_checkbox_logic_blocks(self, blocks: List[ContentBlock]) -> List[Diagnostic]:
565
+ def _validate_domains(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
370
566
  diagnostics = []
371
-
567
+ # Check if 'domains' field exists in frontmatter text
568
+ # We rely on text parsing because Pydantic defaults 'domains' to [] if missing.
569
+
570
+ # If line is 0, it might be the first line (rare) or missing.
571
+ # _get_field_line returns 0 if not found, but also if found at line 0?
572
+ # Let's check if the field actually exists in text.
573
+ has_domains_field = False
574
+ lines = content.splitlines()
575
+ in_fm = False
576
+ for i, line_content in enumerate(lines):
577
+ stripped = line_content.strip()
578
+ if stripped == "---":
579
+ if not in_fm:
580
+ in_fm = True
581
+ else:
582
+ break
583
+ elif in_fm:
584
+ if stripped.startswith("domains:"):
585
+ has_domains_field = True
586
+ break
587
+
588
+ if not has_domains_field:
589
+ # We report it on line 0 (start of file) or line 1
590
+ diagnostics.append(
591
+ self._create_diagnostic(
592
+ "Structure Error: Missing 'domains' field in frontmatter.",
593
+ DiagnosticSeverity.Warning,
594
+ line=0,
595
+ )
596
+ )
597
+
598
+ return diagnostics
599
+
600
+ def _validate_checkbox_logic_blocks(
601
+ self, blocks: List[ContentBlock]
602
+ ) -> List[Diagnostic]:
603
+ diagnostics = []
604
+
372
605
  for block in blocks:
373
- if block.type == 'task_item':
606
+ if block.type == "task_item":
374
607
  content = block.content.strip()
375
608
  # Syntax Check: - [?]
376
609
  # Added supported chars: /, ~, +
377
610
  match = re.match(r"- \[([ x\-/~+])\]", content)
378
611
  if not match:
379
612
  # Check for Common errors
380
- if re.match(r"- \[.{2,}\]", content): # [xx] or [ ]
381
- diagnostics.append(self._create_diagnostic("Invalid Checkbox: Use single character [ ], [x], [-], [/]", DiagnosticSeverity.Error, block.line_start))
382
- elif re.match(r"- \[([^ x\-/~+])\]", content): # [v], [o]
383
- diagnostics.append(self._create_diagnostic("Invalid Checkbox Status: Use [ ], [x], [/], [~]", DiagnosticSeverity.Error, block.line_start))
384
-
613
+ if re.match(r"- \[.{2,}\]", content): # [xx] or [ ]
614
+ diagnostics.append(
615
+ self._create_diagnostic(
616
+ "Invalid Checkbox: Use single character [ ], [x], [-], [/]",
617
+ DiagnosticSeverity.Error,
618
+ block.line_start,
619
+ )
620
+ )
621
+ elif re.match(r"- \[([^ x\-/~+])\]", content): # [v], [o]
622
+ diagnostics.append(
623
+ self._create_diagnostic(
624
+ "Invalid Checkbox Status: Use [ ], [x], [/], [~]",
625
+ DiagnosticSeverity.Error,
626
+ block.line_start,
627
+ )
628
+ )
629
+
385
630
  return diagnostics