monoco-toolkit 0.1.1__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. monoco/cli/__init__.py +0 -0
  2. monoco/cli/project.py +87 -0
  3. monoco/cli/workspace.py +46 -0
  4. monoco/core/agent/__init__.py +5 -0
  5. monoco/core/agent/action.py +144 -0
  6. monoco/core/agent/adapters.py +129 -0
  7. monoco/core/agent/protocol.py +31 -0
  8. monoco/core/agent/state.py +106 -0
  9. monoco/core/config.py +212 -17
  10. monoco/core/execution.py +62 -0
  11. monoco/core/feature.py +58 -0
  12. monoco/core/git.py +51 -2
  13. monoco/core/injection.py +196 -0
  14. monoco/core/integrations.py +242 -0
  15. monoco/core/lsp.py +68 -0
  16. monoco/core/output.py +21 -3
  17. monoco/core/registry.py +36 -0
  18. monoco/core/resources/en/AGENTS.md +8 -0
  19. monoco/core/resources/en/SKILL.md +66 -0
  20. monoco/core/resources/zh/AGENTS.md +8 -0
  21. monoco/core/resources/zh/SKILL.md +65 -0
  22. monoco/core/setup.py +96 -110
  23. monoco/core/skills.py +444 -0
  24. monoco/core/state.py +53 -0
  25. monoco/core/sync.py +224 -0
  26. monoco/core/telemetry.py +4 -1
  27. monoco/core/workspace.py +85 -20
  28. monoco/daemon/app.py +127 -58
  29. monoco/daemon/models.py +4 -0
  30. monoco/daemon/services.py +56 -155
  31. monoco/features/config/commands.py +125 -44
  32. monoco/features/i18n/adapter.py +29 -0
  33. monoco/features/i18n/commands.py +89 -10
  34. monoco/features/i18n/core.py +113 -27
  35. monoco/features/i18n/resources/en/AGENTS.md +8 -0
  36. monoco/features/i18n/resources/en/SKILL.md +94 -0
  37. monoco/features/i18n/resources/zh/AGENTS.md +8 -0
  38. monoco/features/i18n/resources/zh/SKILL.md +94 -0
  39. monoco/features/issue/adapter.py +34 -0
  40. monoco/features/issue/commands.py +343 -101
  41. monoco/features/issue/core.py +384 -150
  42. monoco/features/issue/domain/__init__.py +0 -0
  43. monoco/features/issue/domain/lifecycle.py +126 -0
  44. monoco/features/issue/domain/models.py +170 -0
  45. monoco/features/issue/domain/parser.py +223 -0
  46. monoco/features/issue/domain/workspace.py +104 -0
  47. monoco/features/issue/engine/__init__.py +22 -0
  48. monoco/features/issue/engine/config.py +172 -0
  49. monoco/features/issue/engine/machine.py +185 -0
  50. monoco/features/issue/engine/models.py +18 -0
  51. monoco/features/issue/linter.py +325 -120
  52. monoco/features/issue/lsp/__init__.py +3 -0
  53. monoco/features/issue/lsp/definition.py +72 -0
  54. monoco/features/issue/migration.py +134 -0
  55. monoco/features/issue/models.py +46 -24
  56. monoco/features/issue/monitor.py +94 -0
  57. monoco/features/issue/resources/en/AGENTS.md +20 -0
  58. monoco/features/issue/resources/en/SKILL.md +111 -0
  59. monoco/features/issue/resources/zh/AGENTS.md +20 -0
  60. monoco/features/issue/resources/zh/SKILL.md +138 -0
  61. monoco/features/issue/validator.py +455 -0
  62. monoco/features/spike/adapter.py +30 -0
  63. monoco/features/spike/commands.py +45 -24
  64. monoco/features/spike/core.py +6 -40
  65. monoco/features/spike/resources/en/AGENTS.md +7 -0
  66. monoco/features/spike/resources/en/SKILL.md +74 -0
  67. monoco/features/spike/resources/zh/AGENTS.md +7 -0
  68. monoco/features/spike/resources/zh/SKILL.md +74 -0
  69. monoco/main.py +91 -2
  70. monoco_toolkit-0.2.8.dist-info/METADATA +136 -0
  71. monoco_toolkit-0.2.8.dist-info/RECORD +83 -0
  72. monoco_toolkit-0.1.1.dist-info/METADATA +0 -93
  73. monoco_toolkit-0.1.1.dist-info/RECORD +0 -33
  74. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.8.dist-info}/WHEEL +0 -0
  75. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.8.dist-info}/entry_points.txt +0 -0
  76. {monoco_toolkit-0.1.1.dist-info → monoco_toolkit-0.2.8.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,455 @@
1
+ import re
2
+ import yaml
3
+ from typing import List, Set, Optional, Dict
4
+ from pathlib import Path
5
+
6
+ from monoco.core.lsp import Diagnostic, DiagnosticSeverity, Range, Position
7
+ from monoco.core.config import get_config
8
+ from monoco.features.i18n.core import detect_language
9
+ from .models import IssueMetadata, IssueType
10
+ from .domain.parser import MarkdownParser
11
+ from .domain.models import ContentBlock
12
+
13
+ class IssueValidator:
14
+ """
15
+ Centralized validation logic for Issue Tickets.
16
+ Returns LSP-compatible Diagnostics.
17
+ """
18
+
19
+ def __init__(self, issue_root: Optional[Path] = None):
20
+ self.issue_root = issue_root
21
+
22
+ def validate(self, meta: IssueMetadata, content: str, all_issue_ids: Set[str] = set()) -> List[Diagnostic]:
23
+ diagnostics = []
24
+
25
+ # Parse Content into Blocks (Domain Layer)
26
+ # Handle case where content might be just body (from update_issue) or full file
27
+ if content.startswith("---"):
28
+ try:
29
+ issue_domain = MarkdownParser.parse(content)
30
+ blocks = issue_domain.body.blocks
31
+ has_frontmatter = True
32
+ except Exception:
33
+ # Fallback if parser fails (e.g. invalid YAML)
34
+ # We continue with empty blocks or try partial parsing?
35
+ # For now, let's try to parse blocks ignoring FM
36
+ lines = content.splitlines()
37
+ # Find end of FM
38
+ start_line = 0
39
+ if lines[0].strip() == "---":
40
+ for i in range(1, len(lines)):
41
+ if lines[i].strip() == "---":
42
+ start_line = i + 1
43
+ break
44
+ blocks = MarkdownParser._parse_blocks(lines[start_line:], start_line_offset=start_line)
45
+ has_frontmatter = True
46
+ else:
47
+ # Assume content is just body
48
+ lines = content.splitlines()
49
+ blocks = MarkdownParser._parse_blocks(lines, start_line_offset=0)
50
+ has_frontmatter = False
51
+
52
+ # 1. State Matrix Validation
53
+ diagnostics.extend(self._validate_state_matrix(meta, content))
54
+
55
+ # 2. State Requirements (Strict Verification)
56
+ diagnostics.extend(self._validate_state_requirements(meta, blocks))
57
+
58
+ # 3. Structure Consistency (Headings) - Using Blocks
59
+ diagnostics.extend(self._validate_structure_blocks(meta, blocks))
60
+
61
+ # 4. Lifecycle/Integrity (Solution, etc.)
62
+ diagnostics.extend(self._validate_integrity(meta, content))
63
+
64
+ # 5. Reference Integrity
65
+ diagnostics.extend(self._validate_references(meta, content, all_issue_ids))
66
+
67
+ # 6. Time Consistency
68
+ diagnostics.extend(self._validate_time_consistency(meta, content))
69
+
70
+ # 7. Checkbox Syntax - Using Blocks
71
+ diagnostics.extend(self._validate_checkbox_logic_blocks(blocks))
72
+
73
+ # 8. Language Consistency
74
+ diagnostics.extend(self._validate_language_consistency(meta, content))
75
+
76
+ return diagnostics
77
+
78
+ def _validate_language_consistency(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
79
+ diagnostics = []
80
+ try:
81
+ config = get_config()
82
+ source_lang = config.i18n.source_lang
83
+
84
+ # Check for language mismatch (specifically zh vs en)
85
+ if source_lang.lower() == 'zh':
86
+ detected = detect_language(content)
87
+ if detected == 'en':
88
+ diagnostics.append(self._create_diagnostic(
89
+ "Language Mismatch: Project source language is 'zh' but content appears to be 'en'.",
90
+ DiagnosticSeverity.Warning
91
+ ))
92
+ except Exception:
93
+ pass
94
+ return diagnostics
95
+
96
+ def _create_diagnostic(self, message: str, severity: DiagnosticSeverity, line: int = 0) -> Diagnostic:
97
+ """Helper to create a diagnostic object."""
98
+ return Diagnostic(
99
+ range=Range(
100
+ start=Position(line=line, character=0),
101
+ end=Position(line=line, character=100) # Arbitrary end
102
+ ),
103
+ severity=severity,
104
+ message=message
105
+ )
106
+
107
+ def _get_field_line(self, content: str, field_name: str) -> int:
108
+ """Helper to find the line number of a field in the front matter."""
109
+ lines = content.split('\n')
110
+ in_fm = False
111
+ for i, line in enumerate(lines):
112
+ stripped = line.strip()
113
+ if stripped == "---":
114
+ if not in_fm:
115
+ in_fm = True
116
+ continue
117
+ else:
118
+ break # End of FM
119
+ if in_fm:
120
+ # Match "field:", "field :", or "field: value"
121
+ if re.match(rf"^{re.escape(field_name)}\s*:", stripped):
122
+ return i
123
+ return 0
124
+
125
+ def _validate_state_matrix(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
126
+ diagnostics = []
127
+
128
+ # Check based on parsed metadata (now that auto-correction is disabled)
129
+ if meta.status == "closed" and meta.stage != "done":
130
+ line = self._get_field_line(content, "status")
131
+ diagnostics.append(self._create_diagnostic(
132
+ f"State Mismatch: Closed issues must be in 'Done' stage (found: {meta.stage if meta.stage else 'None'})",
133
+ DiagnosticSeverity.Error,
134
+ line=line
135
+ ))
136
+
137
+ if meta.status == "backlog" and meta.stage != "freezed":
138
+ line = self._get_field_line(content, "status")
139
+ diagnostics.append(self._create_diagnostic(
140
+ f"State Mismatch: Backlog issues must be in 'Freezed' stage (found: {meta.stage if meta.stage else 'None'})",
141
+ DiagnosticSeverity.Error,
142
+ line=line
143
+ ))
144
+
145
+ return diagnostics
146
+
147
+ def _validate_state_requirements(self, meta: IssueMetadata, blocks: List[ContentBlock]) -> List[Diagnostic]:
148
+ diagnostics = []
149
+
150
+ # 1. Map Blocks to Sections
151
+ sections = {"tasks": [], "ac": [], "review": []}
152
+ current_section = None
153
+
154
+ for block in blocks:
155
+ if block.type == "heading":
156
+ title = block.content.strip().lower()
157
+ # Parse title to identify sections (supporting Chinese and English synonyms)
158
+ if any(kw in title for kw in ["technical tasks", "工作包", "技术任务", "key deliverables", "关键交付", "重点工作", "子功能", "子故事", "child features", "stories", "需求", "requirements", "implementation", "实现", "交付", "delivery", "规划", "plan", "tasks", "任务"]):
159
+ current_section = "tasks"
160
+ elif any(kw in title for kw in ["acceptance criteria", "验收标准", "交付目标", "验收"]):
161
+ current_section = "ac"
162
+ elif any(kw in title for kw in ["review comments", "确认事项", "评审记录", "复盘记录", "review", "评审", "确认"]):
163
+ current_section = "review"
164
+ elif title.startswith("###"):
165
+ # Subheading: allow continued collection for the current section
166
+ pass
167
+ else:
168
+ current_section = None
169
+ elif block.type == "task_item":
170
+ if current_section and current_section in sections:
171
+ sections[current_section].append(block)
172
+
173
+ # 2. Logic: DOING -> Must have defined tasks
174
+ if meta.stage in ["doing", "review", "done"]:
175
+ if not sections["tasks"]:
176
+ # We can't strictly point to a line if section missing, but we can point to top/bottom
177
+ # Or just a general error.
178
+ diagnostics.append(self._create_diagnostic(
179
+ "State Requirement (DOING+): Must define 'Technical Tasks' (at least 1 checkbox).",
180
+ DiagnosticSeverity.Warning
181
+ ))
182
+
183
+ # 3. Logic: REVIEW -> Tasks must be Completed ([x]) or Cancelled ([~], [+])
184
+ # No [ ] (ToDo) or [-]/[/] (Doing) allowed.
185
+ if meta.stage in ["review", "done"]:
186
+ for block in sections["tasks"]:
187
+ content = block.content.strip()
188
+ # Check for explicit illegal states
189
+ if re.search(r"-\s*\[\s+\]", content):
190
+ diagnostics.append(self._create_diagnostic(
191
+ f"State Requirement ({meta.stage.upper()}): Technical Tasks must be resolved. Found Todo [ ]: '{content}'",
192
+ DiagnosticSeverity.Error,
193
+ line=block.line_start
194
+ ))
195
+ elif re.search(r"-\s*\[[-\/]]", content):
196
+ diagnostics.append(self._create_diagnostic(
197
+ f"State Requirement ({meta.stage.upper()}): Technical Tasks must be finished (not Doing). Found Doing [-]: '{content}'",
198
+ DiagnosticSeverity.Error,
199
+ line=block.line_start
200
+ ))
201
+
202
+ # 4. Logic: DONE -> AC must be Verified ([x])
203
+ if meta.stage == "done":
204
+ for block in sections["ac"]:
205
+ content = block.content.strip()
206
+ if not re.search(r"-\s*\[[xX]\]", content):
207
+ diagnostics.append(self._create_diagnostic(
208
+ f"State Requirement (DONE): Acceptance Criteria must be passed ([x]). Found: '{content}'",
209
+ DiagnosticSeverity.Error,
210
+ line=block.line_start
211
+ ))
212
+
213
+ # 5. Logic: DONE -> Review Checkboxes (if any) must be Resolved ([x] or [~])
214
+ for block in sections["review"]:
215
+ content = block.content.strip()
216
+ # Must be [x], [X], [~], [+]
217
+ # Therefore [ ], [-], [/] are invalid blocking states
218
+ if re.search(r"-\s*\[[\s\-\/]\]", content):
219
+ diagnostics.append(self._create_diagnostic(
220
+ f"State Requirement (DONE): Actionable Review Comments must be resolved ([x] or [~]). Found: '{content}'",
221
+ DiagnosticSeverity.Error,
222
+ line=block.line_start
223
+ ))
224
+
225
+ return diagnostics
226
+
227
+ def _validate_structure_blocks(self, meta: IssueMetadata, blocks: List[ContentBlock]) -> List[Diagnostic]:
228
+ diagnostics = []
229
+
230
+ # 1. Heading check: ## {issue-id}: {issue-title}
231
+ expected_header = f"## {meta.id}: {meta.title}"
232
+ header_found = False
233
+
234
+ # 2. Review Comments Check
235
+ review_header_found = False
236
+ review_content_found = False
237
+
238
+ review_header_index = -1
239
+
240
+ for i, block in enumerate(blocks):
241
+ if block.type == 'heading':
242
+ stripped = block.content.strip()
243
+ if stripped == expected_header:
244
+ header_found = True
245
+
246
+ if stripped == "## Review Comments":
247
+ review_header_found = True
248
+ review_header_index = i
249
+
250
+ # Check content after review header
251
+ if review_header_found:
252
+ # Check if there are blocks after review_header_index that are NOT empty
253
+ for j in range(review_header_index + 1, len(blocks)):
254
+ if blocks[j].type != 'empty':
255
+ review_content_found = True
256
+ break
257
+
258
+ if not header_found:
259
+ diagnostics.append(self._create_diagnostic(
260
+ f"Structure Error: Missing Level 2 Heading '{expected_header}'",
261
+ DiagnosticSeverity.Warning
262
+ ))
263
+
264
+ if meta.stage in ["review", "done"]:
265
+ if not review_header_found:
266
+ diagnostics.append(self._create_diagnostic(
267
+ "Review Requirement: Missing '## Review Comments' section.",
268
+ DiagnosticSeverity.Error
269
+ ))
270
+ elif not review_content_found:
271
+ diagnostics.append(self._create_diagnostic(
272
+ "Review Requirement: '## Review Comments' section is empty.",
273
+ DiagnosticSeverity.Error
274
+ ))
275
+ return diagnostics
276
+
277
+ def _validate_integrity(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
278
+ diagnostics = []
279
+ if meta.status == "closed" and not meta.solution:
280
+ line = self._get_field_line(content, "status")
281
+ diagnostics.append(self._create_diagnostic(
282
+ f"Data Integrity: Closed issue {meta.id} missing 'solution' field.",
283
+ DiagnosticSeverity.Error,
284
+ line=line
285
+ ))
286
+
287
+ # Tags Integrity Check
288
+ # Requirement: tags field must carry parent dependencies and related issue id
289
+ required_tags = set()
290
+
291
+ # Self ID
292
+ required_tags.add(f"#{meta.id}")
293
+
294
+ if meta.parent:
295
+ # Strip potential user # if accidentally added in models, though core stripped it
296
+ # But here we want the tag TO HAVE #
297
+ p = meta.parent if not meta.parent.startswith("#") else meta.parent[1:]
298
+ required_tags.add(f"#{p}")
299
+
300
+ for d in meta.dependencies:
301
+ _d = d if not d.startswith("#") else d[1:]
302
+ required_tags.add(f"#{_d}")
303
+
304
+ for r in meta.related:
305
+ _r = r if not r.startswith("#") else r[1:]
306
+ required_tags.add(f"#{_r}")
307
+
308
+ current_tags = set(meta.tags) if meta.tags else set()
309
+ missing_tags = required_tags - current_tags
310
+
311
+ if missing_tags:
312
+ line = self._get_field_line(content, "tags")
313
+ # If tags field doesn't exist, line is 0, which is fine
314
+ # We join them for display
315
+ missing_str = ", ".join(sorted(missing_tags))
316
+ diagnostics.append(self._create_diagnostic(
317
+ f"Tag Check: Missing required context tags: {missing_str}",
318
+ DiagnosticSeverity.Warning,
319
+ line=line
320
+ ))
321
+
322
+ return diagnostics
323
+
324
+ def _validate_references(self, meta: IssueMetadata, content: str, all_ids: Set[str]) -> List[Diagnostic]:
325
+ diagnostics = []
326
+
327
+ # Malformed ID Check
328
+ if meta.parent and meta.parent.startswith("#"):
329
+ line = self._get_field_line(content, "parent")
330
+ diagnostics.append(self._create_diagnostic(
331
+ f"Malformed ID: Parent '{meta.parent}' should not start with '#'.",
332
+ DiagnosticSeverity.Warning,
333
+ line=line
334
+ ))
335
+
336
+ if meta.dependencies:
337
+ for dep in meta.dependencies:
338
+ if dep.startswith("#"):
339
+ line = self._get_field_line(content, "dependencies")
340
+ diagnostics.append(self._create_diagnostic(
341
+ f"Malformed ID: Dependency '{dep}' should not start with '#'.",
342
+ DiagnosticSeverity.Warning,
343
+ line=line
344
+ ))
345
+
346
+ if meta.related:
347
+ for rel in meta.related:
348
+ if rel.startswith("#"):
349
+ line = self._get_field_line(content, "related")
350
+ diagnostics.append(self._create_diagnostic(
351
+ f"Malformed ID: Related '{rel}' should not start with '#'.",
352
+ DiagnosticSeverity.Warning,
353
+ line=line
354
+ ))
355
+
356
+ if not all_ids:
357
+ return diagnostics
358
+
359
+ if meta.parent and meta.parent not in all_ids and not meta.parent.startswith("#"):
360
+ line = self._get_field_line(content, "parent")
361
+ diagnostics.append(self._create_diagnostic(
362
+ f"Broken Reference: Parent '{meta.parent}' not found.",
363
+ DiagnosticSeverity.Error,
364
+ line=line
365
+ ))
366
+
367
+ for dep in meta.dependencies:
368
+ if dep not in all_ids:
369
+ line = self._get_field_line(content, "dependencies")
370
+ diagnostics.append(self._create_diagnostic(
371
+ f"Broken Reference: Dependency '{dep}' not found.",
372
+ DiagnosticSeverity.Error,
373
+ line=line
374
+ ))
375
+
376
+ # Body Reference Check
377
+ # Regex for generic issue ID: (EPIC|FEAT|CHORE|FIX)-\d{4}
378
+ # We scan line by line to get line numbers
379
+ lines = content.split('\n')
380
+ # Skip frontmatter for body check to avoid double counting (handled above)
381
+ in_fm = False
382
+ fm_end = 0
383
+ for i, line in enumerate(lines):
384
+ if line.strip() == '---':
385
+ if not in_fm: in_fm = True
386
+ else:
387
+ fm_end = i
388
+ break
389
+
390
+ for i, line in enumerate(lines):
391
+ if i <= fm_end: continue # Skip frontmatter
392
+
393
+ # Find all matches
394
+ matches = re.finditer(r"\b((?:EPIC|FEAT|CHORE|FIX)-\d{4})\b", line)
395
+ for match in matches:
396
+ ref_id = match.group(1)
397
+ if ref_id != meta.id and ref_id not in all_ids:
398
+ # Check if it's a namespaced ID? The regex only catches local IDs.
399
+ # If users use MON::FEAT-0001, the regex might catch FEAT-0001.
400
+ # But all_ids contains full IDs (potentially namespaced).
401
+ # Simple logic: if ref_id isn't in all_ids, check if any id ENDS with ref_id
402
+
403
+ found_namespaced = any(known.endswith(f"::{ref_id}") for known in all_ids)
404
+
405
+ if not found_namespaced:
406
+ diagnostics.append(self._create_diagnostic(
407
+ f"Broken Reference: Issue '{ref_id}' not found.",
408
+ DiagnosticSeverity.Warning,
409
+ line=i
410
+ ))
411
+ return diagnostics
412
+
413
+ def _validate_time_consistency(self, meta: IssueMetadata, content: str) -> List[Diagnostic]:
414
+ diagnostics = []
415
+ c = meta.created_at
416
+ o = meta.opened_at
417
+ u = meta.updated_at
418
+ cl = meta.closed_at
419
+
420
+ created_line = self._get_field_line(content, "created_at")
421
+ opened_line = self._get_field_line(content, "opened_at")
422
+ updated_line = self._get_field_line(content, "updated_at")
423
+ closed_line = self._get_field_line(content, "closed_at")
424
+
425
+ if o and c > o:
426
+ diagnostics.append(self._create_diagnostic("Time Travel: created_at > opened_at", DiagnosticSeverity.Warning, line=created_line))
427
+
428
+ if u and c > u:
429
+ diagnostics.append(self._create_diagnostic("Time Travel: created_at > updated_at", DiagnosticSeverity.Warning, line=created_line))
430
+
431
+ if cl:
432
+ if c > cl:
433
+ diagnostics.append(self._create_diagnostic("Time Travel: created_at > closed_at", DiagnosticSeverity.Error, line=created_line))
434
+ if o and o > cl:
435
+ diagnostics.append(self._create_diagnostic("Time Travel: opened_at > closed_at", DiagnosticSeverity.Error, line=opened_line))
436
+
437
+ return diagnostics
438
+
439
+ def _validate_checkbox_logic_blocks(self, blocks: List[ContentBlock]) -> List[Diagnostic]:
440
+ diagnostics = []
441
+
442
+ for block in blocks:
443
+ if block.type == 'task_item':
444
+ content = block.content.strip()
445
+ # Syntax Check: - [?]
446
+ # Added supported chars: /, ~, +
447
+ match = re.match(r"- \[([ x\-/~+])\]", content)
448
+ if not match:
449
+ # Check for Common errors
450
+ if re.match(r"- \[.{2,}\]", content): # [xx] or [ ]
451
+ diagnostics.append(self._create_diagnostic("Invalid Checkbox: Use single character [ ], [x], [-], [/]", DiagnosticSeverity.Error, block.line_start))
452
+ elif re.match(r"- \[([^ x\-/~+])\]", content): # [v], [o]
453
+ diagnostics.append(self._create_diagnostic("Invalid Checkbox Status: Use [ ], [x], [/], [~]", DiagnosticSeverity.Error, block.line_start))
454
+
455
+ return diagnostics
@@ -0,0 +1,30 @@
1
+ from pathlib import Path
2
+ from typing import Dict
3
+ from monoco.core.feature import MonocoFeature, IntegrationData
4
+ from monoco.features.spike import core
5
+
6
+ class SpikeFeature(MonocoFeature):
7
+ @property
8
+ def name(self) -> str:
9
+ return "spike"
10
+
11
+ def initialize(self, root: Path, config: Dict) -> None:
12
+ spikes_name = config.get("paths", {}).get("spikes", ".references")
13
+ core.init(root, spikes_name)
14
+
15
+ def integrate(self, root: Path, config: Dict) -> IntegrationData:
16
+ # Determine language from config, default to 'en'
17
+ lang = config.get("i18n", {}).get("source_lang", "en")
18
+ base_dir = Path(__file__).parent / "resources"
19
+
20
+ prompt_file = base_dir / lang / "AGENTS.md"
21
+ if not prompt_file.exists():
22
+ prompt_file = base_dir / "en" / "AGENTS.md"
23
+
24
+ content = ""
25
+ if prompt_file.exists():
26
+ content = prompt_file.read_text(encoding="utf-8").strip()
27
+
28
+ return IntegrationData(
29
+ system_prompts={"Spike (Research)": content}
30
+ )
@@ -1,15 +1,18 @@
1
1
  import typer
2
2
  from pathlib import Path
3
3
  from rich.console import Console
4
+ from typing import Annotated
4
5
 
5
6
  from monoco.core.config import get_config
7
+ from monoco.core.output import AgentOutput, OutputManager
6
8
  from . import core
7
9
 
8
10
  app = typer.Typer(help="Spike & Repo Management.")
9
- console = Console()
10
11
 
11
12
  @app.command("init")
12
- def init():
13
+ def init(
14
+ json: AgentOutput = False,
15
+ ):
13
16
  """Initialize the Spike environment (gitignore setup)."""
14
17
  config = get_config()
15
18
  root_dir = Path(config.paths.root)
@@ -20,11 +23,16 @@ def init():
20
23
  # Create the directory
21
24
  (root_dir / spikes_dir_name).mkdir(exist_ok=True)
22
25
 
23
- console.print(f"[green]✔[/green] Initialized Spike environment. Added '{spikes_dir_name}/' to .gitignore.")
26
+ OutputManager.print({
27
+ "status": "initialized",
28
+ "directory": spikes_dir_name,
29
+ "gitignore_updated": True
30
+ })
24
31
 
25
32
  @app.command("add")
26
33
  def add_repo(
27
34
  url: str = typer.Argument(..., help="Git Repository URL"),
35
+ json: AgentOutput = False,
28
36
  ):
29
37
  """Add a new research repository."""
30
38
  config = get_config()
@@ -38,13 +46,18 @@ def add_repo(
38
46
  name = name[:-4]
39
47
 
40
48
  core.update_config_repos(root_dir, name, url)
41
- console.print(f"[green]✔[/green] Added repo [bold]{name}[/bold] ({url}) to configuration.")
42
- console.print("Run [bold]monoco spike sync[/bold] to download content.")
49
+ OutputManager.print({
50
+ "status": "added",
51
+ "name": name,
52
+ "url": url,
53
+ "message": f"Run 'monoco spike sync' to download content."
54
+ })
43
55
 
44
56
  @app.command("remove")
45
57
  def remove_repo(
46
58
  name: str = typer.Argument(..., help="Repository Name"),
47
59
  force: bool = typer.Option(False, "--force", "-f", help="Force delete physical directory without asking"),
60
+ json: AgentOutput = False,
48
61
  ):
49
62
  """Remove a repository from configuration."""
50
63
  config = get_config()
@@ -52,30 +65,34 @@ def remove_repo(
52
65
  spikes_dir = root_dir / config.paths.spikes
53
66
 
54
67
  if name not in config.project.spike_repos:
55
- console.print(f"[yellow]![/yellow] Repo [bold]{name}[/bold] not found in configuration.")
68
+ OutputManager.error(f"Repo {name} not found in configuration.")
56
69
  return
57
70
 
58
71
  # Remove from config
59
72
  core.update_config_repos(root_dir, name, "", remove=True)
60
- console.print(f"[green]✔[/green] Removed [bold]{name}[/bold] from configuration.")
61
73
 
62
74
  target_path = spikes_dir / name
75
+ deleted = False
63
76
  if target_path.exists():
64
77
  if force or typer.confirm(f"Do you want to delete the directory {target_path}?", default=False):
65
78
  core.remove_repo_dir(spikes_dir, name)
66
- console.print(f"[gray]✔[/gray] Deleted directory {target_path}.")
79
+ deleted = True
67
80
  else:
68
- console.print(f"[gray]ℹ[/gray] Directory {target_path} kept.")
81
+ deleted = False
82
+
83
+ OutputManager.print({
84
+ "status": "removed",
85
+ "name": name,
86
+ "directory_deleted": deleted
87
+ })
69
88
 
70
89
  @app.command("sync")
71
- def sync_repos():
90
+ def sync_repos(
91
+ json: AgentOutput = False,
92
+ ):
72
93
  """Sync (Clone/Pull) all configured repositories."""
73
94
  # Force reload config to get latest updates
74
95
  config = get_config()
75
- # Note: get_config is a singleton, so for 'add' then 'sync' in same process,
76
- # we rely on 'add' writing to disk and us reading from memory?
77
- # Actually, if we run standard CLI "monoco spike add" then "monoco spike sync",
78
- # they are separate processes, so config loads fresh.
79
96
 
80
97
  root_dir = Path(config.paths.root)
81
98
  spikes_dir = root_dir / config.paths.spikes
@@ -84,27 +101,31 @@ def sync_repos():
84
101
  repos = config.project.spike_repos
85
102
 
86
103
  if not repos:
87
- console.print("[yellow]No repositories configured.[/yellow] Use 'monoco spike add <url>' first.")
104
+ OutputManager.print({"status": "empty", "message": "No repositories configured."}, title="Sync")
88
105
  return
89
106
 
90
- console.print(f"Syncing {len(repos)} repositories...")
107
+ results = []
91
108
 
92
109
  for name, url in repos.items():
93
- core.sync_repo(root_dir, spikes_dir, name, url)
110
+ try:
111
+ core.sync_repo(root_dir, spikes_dir, name, url)
112
+ results.append({"name": name, "status": "synced", "url": url})
113
+ except Exception as e:
114
+ results.append({"name": name, "status": "failed", "error": str(e), "url": url})
94
115
 
95
- console.print("[green]✔[/green] Sync complete.")
116
+ OutputManager.print(results, title="Sync Results")
96
117
 
97
- # Alias for list (showing configured repos) could be useful but not strictly asked for.
98
- # Let's add a simple list command to see what we have.
99
118
  @app.command("list")
100
- def list_repos():
119
+ def list_repos(
120
+ json: AgentOutput = False,
121
+ ):
101
122
  """List configured repositories."""
102
123
  config = get_config()
103
124
  repos = config.project.spike_repos
104
125
 
105
126
  if not repos:
106
- console.print("[yellow]No repositories configured.[/yellow]")
127
+ OutputManager.print([], title="Repositories")
107
128
  return
108
129
 
109
- for name, url in repos.items():
110
- console.print(f"- [bold]{name}[/bold]: {url}")
130
+ data = [{"name": name, "url": url} for name, url in repos.items()]
131
+ OutputManager.print(data, title="Repositories")