moai-adk 0.8.3__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of moai-adk might be problematic. Click here for more details.

Files changed (27) hide show
  1. moai_adk/templates/.claude/hooks/alfred/core/project.py +750 -0
  2. moai_adk/templates/.claude/hooks/alfred/shared/core/project.py +77 -10
  3. moai_adk/templates/.moai/memory/gitflow-protection-policy.md +140 -30
  4. moai_adk/templates/README.md +256 -0
  5. {moai_adk-0.8.3.dist-info → moai_adk-0.9.0.dist-info}/METADATA +333 -35
  6. {moai_adk-0.8.3.dist-info → moai_adk-0.9.0.dist-info}/RECORD +9 -25
  7. moai_adk/templates/.claude/hooks/alfred/.moai/cache/version-check.json +0 -9
  8. moai_adk/templates/.claude/hooks/alfred/README.md +0 -343
  9. moai_adk/templates/.claude/hooks/alfred/TROUBLESHOOTING.md +0 -471
  10. moai_adk/templates/.github/workflows/tag-report.yml +0 -261
  11. moai_adk/templates/.github/workflows/tag-validation.yml +0 -176
  12. moai_adk/templates/.moai/docs/quick-issue-creation-guide.md +0 -219
  13. moai_adk/templates/.moai/hooks/install.sh +0 -79
  14. moai_adk/templates/.moai/hooks/pre-commit.sh +0 -66
  15. moai_adk/templates/.moai/memory/CONFIG-SCHEMA.md +0 -444
  16. moai_adk/templates/.moai/memory/GITFLOW-PROTECTION-POLICY.md +0 -220
  17. moai_adk/templates/.moai/memory/spec-metadata.md +0 -356
  18. moai_adk/templates/src/moai_adk/core/__init__.py +0 -5
  19. moai_adk/templates/src/moai_adk/core/tags/__init__.py +0 -86
  20. moai_adk/templates/src/moai_adk/core/tags/ci_validator.py +0 -433
  21. moai_adk/templates/src/moai_adk/core/tags/cli.py +0 -283
  22. moai_adk/templates/src/moai_adk/core/tags/pre_commit_validator.py +0 -355
  23. moai_adk/templates/src/moai_adk/core/tags/reporter.py +0 -957
  24. moai_adk/templates/src/moai_adk/core/tags/validator.py +0 -897
  25. {moai_adk-0.8.3.dist-info → moai_adk-0.9.0.dist-info}/WHEEL +0 -0
  26. {moai_adk-0.8.3.dist-info → moai_adk-0.9.0.dist-info}/entry_points.txt +0 -0
  27. {moai_adk-0.8.3.dist-info → moai_adk-0.9.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,957 +0,0 @@
1
- #!/usr/bin/env python3
2
- # @CODE:DOC-TAG-004 | Component 4: Documentation & Reporting system
3
- """TAG reporting and documentation generation for MoAI-ADK
4
-
5
- This module provides automated reporting for TAG system health and coverage:
6
- - Generates TAG inventories across entire codebase
7
- - Creates coverage matrices showing SPEC implementation status
8
- - Analyzes SPEC→CODE→TEST→DOC chain completeness
9
- - Produces statistics and metrics in multiple formats
10
- - Formats reports as Markdown, JSON, CSV, and HTML (optional)
11
-
12
- Architecture:
13
- ReportGenerator (orchestrator)
14
- ├── InventoryGenerator (tag-inventory.md)
15
- ├── MatrixGenerator (tag-matrix.md)
16
- ├── CoverageAnalyzer (coverage analysis)
17
- ├── StatisticsGenerator (tag-statistics.json)
18
- └── ReportFormatter (multi-format output)
19
-
20
- Usage:
21
- generator = ReportGenerator()
22
- result = generator.generate_all_reports("/path/to/project", "/path/to/output")
23
- print(f"Generated reports: {result.inventory_path}, {result.matrix_path}")
24
- """
25
-
26
- import json
27
- import re
28
- from dataclasses import dataclass, field
29
- from datetime import datetime
30
- from pathlib import Path
31
- from typing import Dict, List, Set
32
-
33
- # ============================================================================
34
- # Data Models
35
- # ============================================================================
36
-
37
- @dataclass
38
- class TagInventory:
39
- """Single TAG inventory item with metadata
40
-
41
- Attributes:
42
- tag_id: TAG identifier (e.g., "DOC-TAG-001")
43
- file_path: File path where TAG is located
44
- line_number: Line number of TAG
45
- context: Surrounding code snippet
46
- related_tags: List of related TAG strings
47
- last_modified: Last modification timestamp
48
- status: TAG status (active|deprecated|orphan|incomplete)
49
- """
50
- tag_id: str
51
- file_path: str
52
- line_number: int
53
- context: str
54
- related_tags: List[str] = field(default_factory=list)
55
- last_modified: datetime = field(default_factory=datetime.now)
56
- status: str = "active"
57
-
58
-
59
- @dataclass
60
- class TagMatrix:
61
- """Coverage matrix showing implementation status
62
-
63
- Attributes:
64
- rows: Dict mapping SPEC ID to coverage status
65
- {
66
- "AUTH-001": {
67
- "SPEC": True,
68
- "CODE": True,
69
- "TEST": False,
70
- "DOC": False
71
- }
72
- }
73
- completion_percentages: Dict mapping SPEC ID to completion percentage
74
- """
75
- rows: Dict[str, Dict[str, bool]] = field(default_factory=dict)
76
- completion_percentages: Dict[str, float] = field(default_factory=dict)
77
-
78
-
79
- @dataclass
80
- class CoverageMetrics:
81
- """Coverage metrics for a single SPEC
82
-
83
- Attributes:
84
- spec_id: SPEC identifier
85
- has_code: Whether CODE implementation exists
86
- has_test: Whether TEST exists
87
- has_doc: Whether DOC exists
88
- coverage_percentage: Overall completion percentage
89
- """
90
- spec_id: str
91
- has_code: bool = False
92
- has_test: bool = False
93
- has_doc: bool = False
94
- coverage_percentage: float = 0.0
95
-
96
-
97
- @dataclass
98
- class StatisticsReport:
99
- """Overall TAG statistics
100
-
101
- Attributes:
102
- generated_at: Report generation timestamp
103
- total_tags: Total TAG count
104
- by_type: Count by TAG type (SPEC, CODE, TEST, DOC)
105
- by_domain: Count by domain (AUTH, USER, etc.)
106
- coverage: Coverage metrics
107
- issues: Issue counts (orphans, incomplete chains, etc.)
108
- """
109
- generated_at: datetime
110
- total_tags: int
111
- by_type: Dict[str, int] = field(default_factory=dict)
112
- by_domain: Dict[str, int] = field(default_factory=dict)
113
- coverage: Dict[str, float] = field(default_factory=dict)
114
- issues: Dict[str, int] = field(default_factory=dict)
115
-
116
-
117
- @dataclass
118
- class ReportResult:
119
- """Result of report generation
120
-
121
- Attributes:
122
- inventory_path: Path to generated inventory file
123
- matrix_path: Path to generated matrix file
124
- statistics_path: Path to generated statistics file
125
- success: Whether generation succeeded
126
- error_message: Error message if failed
127
- """
128
- inventory_path: Path
129
- matrix_path: Path
130
- statistics_path: Path
131
- success: bool = True
132
- error_message: str = ""
133
-
134
-
135
- # ============================================================================
136
- # Core Generators
137
- # ============================================================================
138
-
139
- class InventoryGenerator:
140
- """Generates TAG inventory across codebase
141
-
142
- Scans entire codebase for TAGs and creates comprehensive inventory
143
- grouped by domain and type.
144
- """
145
-
146
- TAG_PATTERN = re.compile(r"@(SPEC|CODE|TEST|DOC):([A-Z]+(?:-[A-Z]+)*-\d{3})")
147
- IGNORE_PATTERNS = [".git/*", "node_modules/*", "__pycache__/*", "*.pyc", ".venv/*", "venv/*"]
148
-
149
- def generate_inventory(self, root_path: str) -> List[TagInventory]:
150
- """Scan directory and generate TAG inventory
151
-
152
- Args:
153
- root_path: Root directory to scan
154
-
155
- Returns:
156
- List of TagInventory objects
157
- """
158
- inventory = []
159
- root = Path(root_path)
160
-
161
- if not root.exists() or not root.is_dir():
162
- return inventory
163
-
164
- # Scan all files recursively
165
- for filepath in root.rglob("*"):
166
- if not filepath.is_file():
167
- continue
168
-
169
- # Check ignore patterns
170
- if self._should_ignore(filepath, root):
171
- continue
172
-
173
- # Extract TAGs from file
174
- tags = self._extract_tags_from_file(filepath, root)
175
- inventory.extend(tags)
176
-
177
- return inventory
178
-
179
- def _should_ignore(self, filepath: Path, root: Path) -> bool:
180
- """Check if file should be ignored
181
-
182
- Args:
183
- filepath: File path to check
184
- root: Root directory
185
-
186
- Returns:
187
- True if file should be ignored
188
- """
189
- try:
190
- relative = filepath.relative_to(root)
191
- relative_str = str(relative)
192
-
193
- for pattern in self.IGNORE_PATTERNS:
194
- pattern_clean = pattern.replace("/*", "").replace("*", "")
195
- if pattern_clean in relative_str:
196
- return True
197
-
198
- return False
199
-
200
- except ValueError:
201
- return True
202
-
203
- def _extract_tags_from_file(self, filepath: Path, root: Path) -> List[TagInventory]:
204
- """Extract TAGs from a single file
205
-
206
- Args:
207
- filepath: File to scan
208
- root: Root directory for relative paths
209
-
210
- Returns:
211
- List of TagInventory objects
212
- """
213
- inventory = []
214
-
215
- try:
216
- content = filepath.read_text(encoding="utf-8", errors="ignore")
217
- lines = content.splitlines()
218
-
219
- # Get file modification time
220
- last_modified = datetime.fromtimestamp(filepath.stat().st_mtime)
221
-
222
- for line_num, line in enumerate(lines, start=1):
223
- matches = self.TAG_PATTERN.findall(line)
224
-
225
- for tag_type, domain in matches:
226
- tag_id = domain
227
- full_tag = f"@{tag_type}:{domain}"
228
-
229
- # Extract context (±2 lines)
230
- context_lines = []
231
- for i in range(max(0, line_num - 3), min(len(lines), line_num + 2)):
232
- if i < len(lines):
233
- context_lines.append(lines[i])
234
- context = "\n".join(context_lines)
235
-
236
- # Create inventory item
237
- relative_path = str(filepath.relative_to(root))
238
- inventory.append(TagInventory(
239
- tag_id=tag_id,
240
- file_path=relative_path,
241
- line_number=line_num,
242
- context=context,
243
- related_tags=[], # Will be populated later
244
- last_modified=last_modified,
245
- status="active"
246
- ))
247
-
248
- except Exception:
249
- pass
250
-
251
- return inventory
252
-
253
- def group_by_domain(self, inventory: List[TagInventory]) -> Dict[str, List[TagInventory]]:
254
- """Group inventory by domain
255
-
256
- Args:
257
- inventory: List of TagInventory objects
258
-
259
- Returns:
260
- Dict mapping domain prefix to list of tags
261
- """
262
- grouped: Dict[str, List[TagInventory]] = {}
263
-
264
- for item in inventory:
265
- # Extract domain prefix (e.g., "AUTH" from "AUTH-LOGIN-001")
266
- parts = item.tag_id.split("-")
267
- if parts:
268
- domain = parts[0]
269
- if domain not in grouped:
270
- grouped[domain] = []
271
- grouped[domain].append(item)
272
-
273
- return grouped
274
-
275
- def format_as_markdown(self, grouped: Dict[str, List[TagInventory]]) -> str:
276
- """Format grouped inventory as markdown
277
-
278
- Args:
279
- grouped: Grouped inventory dict
280
-
281
- Returns:
282
- Markdown-formatted string
283
- """
284
- lines = []
285
- lines.append("# TAG Inventory")
286
- lines.append("")
287
- lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
288
-
289
- # Calculate totals
290
- total_tags = sum(len(tags) for tags in grouped.values())
291
- lines.append(f"Total TAGs: {total_tags}")
292
- lines.append("")
293
-
294
- # Group by domain
295
- lines.append("## By Domain")
296
- lines.append("")
297
-
298
- for domain in sorted(grouped.keys()):
299
- lines.append(f"### {domain}")
300
- lines.append("")
301
-
302
- for item in sorted(grouped[domain], key=lambda x: x.tag_id):
303
- lines.append(f"- **{item.tag_id}** (`{item.file_path}:{item.line_number}`)")
304
-
305
- lines.append("")
306
-
307
- return "\n".join(lines)
308
-
309
-
310
- class MatrixGenerator:
311
- """Generates TAG coverage matrix
312
-
313
- Creates matrix showing SPEC implementation status across
314
- CODE, TEST, and DOC components.
315
- """
316
-
317
- def generate_matrix(self, tags: Dict[str, Set[str]]) -> TagMatrix:
318
- """Generate coverage matrix from tags
319
-
320
- Args:
321
- tags: Dict mapping type to set of domain IDs
322
- {"SPEC": {"AUTH-001"}, "CODE": {"AUTH-001"}, ...}
323
-
324
- Returns:
325
- TagMatrix object
326
- """
327
- matrix = TagMatrix()
328
-
329
- # Get all unique domains
330
- all_domains = set()
331
- for tag_set in tags.values():
332
- all_domains.update(tag_set)
333
-
334
- # Build matrix rows
335
- for domain in all_domains:
336
- matrix.rows[domain] = {
337
- "SPEC": domain in tags.get("SPEC", set()),
338
- "CODE": domain in tags.get("CODE", set()),
339
- "TEST": domain in tags.get("TEST", set()),
340
- "DOC": domain in tags.get("DOC", set())
341
- }
342
-
343
- # Calculate completion percentage
344
- matrix.completion_percentages[domain] = self.calculate_completion_percentage(domain, tags)
345
-
346
- return matrix
347
-
348
- def calculate_completion_percentage(self, spec_id: str, tags: Dict[str, Set[str]]) -> float:
349
- """Calculate completion percentage for a SPEC
350
-
351
- Args:
352
- spec_id: SPEC domain ID
353
- tags: Tags dict
354
-
355
- Returns:
356
- Completion percentage (0-100)
357
- """
358
- components = ["SPEC", "CODE", "TEST", "DOC"]
359
- present = sum(1 for comp in components if spec_id in tags.get(comp, set()))
360
-
361
- return (present / len(components)) * 100.0
362
-
363
- def format_as_markdown_table(self, matrix: TagMatrix) -> str:
364
- """Format matrix as markdown table
365
-
366
- Args:
367
- matrix: TagMatrix object
368
-
369
- Returns:
370
- Markdown table string
371
- """
372
- lines = []
373
- lines.append("# TAG Coverage Matrix")
374
- lines.append("")
375
- lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
376
- lines.append("")
377
-
378
- # Table header
379
- lines.append("| SPEC | CODE | TEST | DOC | Completion |")
380
- lines.append("|------|------|------|-----|------------|")
381
-
382
- # Table rows
383
- for domain in sorted(matrix.rows.keys()):
384
- row = matrix.rows[domain]
385
- spec_mark = "✅" if row["SPEC"] else "❌"
386
- code_mark = "✅" if row["CODE"] else "❌"
387
- test_mark = "✅" if row["TEST"] else "❌"
388
- doc_mark = "✅" if row["DOC"] else "❌"
389
- completion = f"{matrix.completion_percentages[domain]:.0f}%"
390
-
391
- lines.append(f"| {domain} ({spec_mark}) | {code_mark} | {test_mark} | {doc_mark} | {completion} |")
392
-
393
- lines.append("")
394
-
395
- # Summary
396
- total_specs = len(matrix.rows)
397
- fully_implemented = sum(1 for pct in matrix.completion_percentages.values() if pct == 100.0)
398
-
399
- lines.append("## Summary")
400
- lines.append("")
401
- lines.append(f"- Total SPECs: {total_specs}")
402
- lines.append(f"- Fully Implemented (100%): {fully_implemented}")
403
- lines.append("")
404
-
405
- return "\n".join(lines)
406
-
407
- def format_as_csv(self, matrix: TagMatrix) -> str:
408
- """Format matrix as CSV
409
-
410
- Args:
411
- matrix: TagMatrix object
412
-
413
- Returns:
414
- CSV string
415
- """
416
- lines = []
417
- lines.append("SPEC,CODE,TEST,DOC,Completion")
418
-
419
- for domain in sorted(matrix.rows.keys()):
420
- row = matrix.rows[domain]
421
- spec = "1" if row["SPEC"] else "0"
422
- code = "1" if row["CODE"] else "0"
423
- test = "1" if row["TEST"] else "0"
424
- doc = "1" if row["DOC"] else "0"
425
- completion = f"{matrix.completion_percentages[domain]:.1f}"
426
-
427
- lines.append(f"{domain},{spec},{code},{test},{completion}")
428
-
429
- return "\n".join(lines)
430
-
431
-
432
- class CoverageAnalyzer:
433
- """Analyzes TAG coverage and chain integrity
434
-
435
- Analyzes SPEC→CODE→TEST→DOC chains to identify
436
- coverage gaps and orphan TAGs.
437
- """
438
-
439
- TAG_PATTERN = re.compile(r"@(SPEC|CODE|TEST|DOC):([A-Z]+(?:-[A-Z]+)*-\d{3})")
440
- IGNORE_PATTERNS = [".git/*", "node_modules/*", "__pycache__/*", "*.pyc", ".venv/*", "venv/*"]
441
-
442
- def analyze_spec_coverage(self, spec_id: str, root_path: str) -> CoverageMetrics:
443
- """Analyze coverage for a specific SPEC
444
-
445
- Args:
446
- spec_id: SPEC domain ID
447
- root_path: Root directory to scan
448
-
449
- Returns:
450
- CoverageMetrics object
451
- """
452
- tags = self._collect_tags(root_path)
453
-
454
- metrics = CoverageMetrics(spec_id=spec_id)
455
- metrics.has_code = spec_id in tags.get("CODE", set())
456
- metrics.has_test = spec_id in tags.get("TEST", set())
457
- metrics.has_doc = spec_id in tags.get("DOC", set())
458
-
459
- # Calculate coverage percentage
460
- components = [metrics.has_code, metrics.has_test, metrics.has_doc]
461
- metrics.coverage_percentage = (sum(components) / 3.0) * 100.0
462
-
463
- return metrics
464
-
465
- def get_specs_without_code(self, root_path: str) -> List[str]:
466
- """Find SPECs without CODE implementation
467
-
468
- Args:
469
- root_path: Root directory to scan
470
-
471
- Returns:
472
- List of SPEC IDs without CODE
473
- """
474
- tags = self._collect_tags(root_path)
475
-
476
- specs = tags.get("SPEC", set())
477
- codes = tags.get("CODE", set())
478
-
479
- return list(specs - codes)
480
-
481
- def get_code_without_tests(self, root_path: str) -> List[str]:
482
- """Find CODE without TEST
483
-
484
- Args:
485
- root_path: Root directory to scan
486
-
487
- Returns:
488
- List of CODE IDs without TEST
489
- """
490
- tags = self._collect_tags(root_path)
491
-
492
- codes = tags.get("CODE", set())
493
- tests = tags.get("TEST", set())
494
-
495
- return list(codes - tests)
496
-
497
- def get_code_without_docs(self, root_path: str) -> List[str]:
498
- """Find CODE without DOC
499
-
500
- Args:
501
- root_path: Root directory to scan
502
-
503
- Returns:
504
- List of CODE IDs without DOC
505
- """
506
- tags = self._collect_tags(root_path)
507
-
508
- codes = tags.get("CODE", set())
509
- docs = tags.get("DOC", set())
510
-
511
- return list(codes - docs)
512
-
513
- def calculate_overall_coverage(self, root_path: str) -> float:
514
- """Calculate overall coverage percentage
515
-
516
- Args:
517
- root_path: Root directory to scan
518
-
519
- Returns:
520
- Overall coverage percentage (0-100)
521
- """
522
- tags = self._collect_tags(root_path)
523
-
524
- specs = tags.get("SPEC", set())
525
- if not specs:
526
- return 0.0 if tags.get("CODE", set()) else 100.0
527
-
528
- # Calculate average coverage for all SPECs
529
- total_coverage = 0.0
530
- for spec_id in specs:
531
- metrics = self.analyze_spec_coverage(spec_id, root_path)
532
- total_coverage += metrics.coverage_percentage
533
-
534
- return total_coverage / len(specs)
535
-
536
- def _collect_tags(self, root_path: str) -> Dict[str, Set[str]]:
537
- """Collect all TAGs from directory
538
-
539
- Args:
540
- root_path: Root directory to scan
541
-
542
- Returns:
543
- Dict mapping type to set of domain IDs
544
- """
545
- tags: Dict[str, Set[str]] = {
546
- "SPEC": set(),
547
- "CODE": set(),
548
- "TEST": set(),
549
- "DOC": set()
550
- }
551
-
552
- root = Path(root_path)
553
- if not root.exists():
554
- return tags
555
-
556
- for filepath in root.rglob("*"):
557
- if not filepath.is_file():
558
- continue
559
-
560
- # Check ignore patterns
561
- if self._should_ignore(filepath, root):
562
- continue
563
-
564
- # Extract tags
565
- try:
566
- content = filepath.read_text(encoding="utf-8", errors="ignore")
567
- matches = self.TAG_PATTERN.findall(content)
568
-
569
- for tag_type, domain in matches:
570
- tags[tag_type].add(domain)
571
-
572
- except Exception:
573
- pass
574
-
575
- return tags
576
-
577
- def _should_ignore(self, filepath: Path, root: Path) -> bool:
578
- """Check if file should be ignored
579
-
580
- Args:
581
- filepath: File path
582
- root: Root directory
583
-
584
- Returns:
585
- True if should ignore
586
- """
587
- try:
588
- relative = filepath.relative_to(root)
589
- relative_str = str(relative)
590
-
591
- for pattern in self.IGNORE_PATTERNS:
592
- pattern_clean = pattern.replace("/*", "").replace("*", "")
593
- if pattern_clean in relative_str:
594
- return True
595
-
596
- return False
597
-
598
- except ValueError:
599
- return True
600
-
601
-
602
- class StatisticsGenerator:
603
- """Generates overall TAG statistics
604
-
605
- Produces aggregated statistics and metrics for TAG system health.
606
- """
607
-
608
- TAG_PATTERN = re.compile(r"@(SPEC|CODE|TEST|DOC):([A-Z]+(?:-[A-Z]+)*-\d{3})")
609
-
610
- def generate_statistics(self, tags: Dict[str, Set[str]]) -> StatisticsReport:
611
- """Generate statistics from tags
612
-
613
- Args:
614
- tags: Dict mapping type to set of domain IDs
615
-
616
- Returns:
617
- StatisticsReport object
618
- """
619
- report = StatisticsReport(
620
- generated_at=datetime.now(),
621
- total_tags=0,
622
- by_type={},
623
- by_domain={},
624
- coverage={},
625
- issues={}
626
- )
627
-
628
- # Count by type
629
- for tag_type, domains in tags.items():
630
- report.by_type[tag_type] = len(domains)
631
- report.total_tags += len(domains)
632
-
633
- # Count by domain
634
- all_domains = set()
635
- for domains in tags.values():
636
- all_domains.update(domains)
637
-
638
- for domain in all_domains:
639
- # Extract domain prefix
640
- parts = domain.split("-")
641
- if parts:
642
- domain_prefix = parts[0]
643
- if domain_prefix not in report.by_domain:
644
- report.by_domain[domain_prefix] = 0
645
- report.by_domain[domain_prefix] += 1
646
-
647
- # Calculate coverage metrics
648
- specs = tags.get("SPEC", set())
649
- codes = tags.get("CODE", set())
650
- tests = tags.get("TEST", set())
651
-
652
- if specs:
653
- spec_to_code = len(specs & codes) / len(specs) * 100.0
654
- report.coverage["spec_to_code"] = round(spec_to_code, 2)
655
-
656
- if codes:
657
- code_to_test = len(codes & tests) / len(codes) * 100.0
658
- report.coverage["code_to_test"] = round(code_to_test, 2)
659
-
660
- # Calculate overall coverage
661
- if specs:
662
- total_coverage = 0.0
663
- for spec in specs:
664
- components = 0
665
- if spec in codes:
666
- components += 1
667
- if spec in tests:
668
- components += 1
669
- if spec in tags.get("DOC", set()):
670
- components += 1
671
- total_coverage += (components / 3.0) * 100.0
672
-
673
- report.coverage["overall_percentage"] = round(total_coverage / len(specs), 2)
674
- else:
675
- report.coverage["overall_percentage"] = 0.0
676
-
677
- # Detect issues
678
- orphan_codes = codes - tests
679
- orphan_tests = tests - codes
680
- report.issues["orphan_count"] = len(orphan_codes) + len(orphan_tests)
681
-
682
- incomplete_specs = specs - codes
683
- incomplete_chains = len(incomplete_specs)
684
- for spec in specs & codes:
685
- if spec not in tests:
686
- incomplete_chains += 1
687
-
688
- report.issues["incomplete_chains"] = incomplete_chains
689
- report.issues["deprecated_count"] = 0 # Placeholder
690
-
691
- return report
692
-
693
- def format_as_json(self, stats: StatisticsReport) -> str:
694
- """Format statistics as JSON
695
-
696
- Args:
697
- stats: StatisticsReport object
698
-
699
- Returns:
700
- JSON string
701
- """
702
- data = {
703
- "generated_at": stats.generated_at.isoformat(),
704
- "total_tags": stats.total_tags,
705
- "by_type": stats.by_type,
706
- "by_domain": stats.by_domain,
707
- "coverage": stats.coverage,
708
- "issues": stats.issues
709
- }
710
-
711
- return json.dumps(data, indent=2)
712
-
713
- def format_as_human_readable(self, stats: StatisticsReport) -> str:
714
- """Format statistics as human-readable text
715
-
716
- Args:
717
- stats: StatisticsReport object
718
-
719
- Returns:
720
- Human-readable string
721
- """
722
- lines = []
723
- lines.append("# TAG Statistics")
724
- lines.append("")
725
- lines.append(f"Generated: {stats.generated_at.strftime('%Y-%m-%d %H:%M:%S')}")
726
- lines.append("")
727
-
728
- lines.append(f"Total TAGs: {stats.total_tags}")
729
- lines.append("")
730
-
731
- lines.append("## By Type")
732
- for tag_type, count in sorted(stats.by_type.items()):
733
- lines.append(f"- {tag_type}: {count}")
734
- lines.append("")
735
-
736
- lines.append("## By Domain")
737
- for domain, count in sorted(stats.by_domain.items()):
738
- lines.append(f"- {domain}: {count}")
739
- lines.append("")
740
-
741
- lines.append("## Coverage")
742
- for metric, value in sorted(stats.coverage.items()):
743
- lines.append(f"- {metric}: {value}%")
744
- lines.append("")
745
-
746
- return "\n".join(lines)
747
-
748
-
749
- class ReportFormatter:
750
- """Formats reports in multiple output formats
751
-
752
- Provides formatting utilities for inventory, matrix, and statistics
753
- in Markdown, HTML, CSV, and JSON formats.
754
- """
755
-
756
- def format_inventory_md(self, inventory: List[TagInventory]) -> str:
757
- """Format inventory as markdown
758
-
759
- Args:
760
- inventory: List of TagInventory objects
761
-
762
- Returns:
763
- Markdown string
764
- """
765
- generator = InventoryGenerator()
766
- grouped = generator.group_by_domain(inventory)
767
- return generator.format_as_markdown(grouped)
768
-
769
- def format_matrix_md(self, matrix: TagMatrix) -> str:
770
- """Format matrix as markdown
771
-
772
- Args:
773
- matrix: TagMatrix object
774
-
775
- Returns:
776
- Markdown string
777
- """
778
- generator = MatrixGenerator()
779
- return generator.format_as_markdown_table(matrix)
780
-
781
- def format_table(self, headers: List[str], rows: List[List[str]]) -> str:
782
- """Format data as markdown table
783
-
784
- Args:
785
- headers: Table headers
786
- rows: Table rows
787
-
788
- Returns:
789
- Markdown table string
790
- """
791
- lines = []
792
-
793
- # Header row
794
- lines.append("| " + " | ".join(headers) + " |")
795
-
796
- # Separator row
797
- lines.append("| " + " | ".join(["---"] * len(headers)) + " |")
798
-
799
- # Data rows
800
- for row in rows:
801
- lines.append("| " + " | ".join(row) + " |")
802
-
803
- return "\n".join(lines)
804
-
805
- def format_html_dashboard(self, inventory: List[TagInventory]) -> str:
806
- """Format inventory as HTML dashboard (OPTIONAL)
807
-
808
- Args:
809
- inventory: List of TagInventory objects
810
-
811
- Returns:
812
- HTML string
813
-
814
- Raises:
815
- NotImplementedError: HTML formatting is optional
816
- """
817
- raise NotImplementedError("HTML dashboard formatting is optional")
818
-
819
-
820
- class ReportGenerator:
821
- """Main orchestrator for report generation
822
-
823
- Coordinates all generators to produce complete reporting suite:
824
- - tag-inventory.md
825
- - tag-matrix.md
826
- - tag-statistics.json
827
- """
828
-
829
- def __init__(self):
830
- """Initialize report generator"""
831
- self.inventory_gen = InventoryGenerator()
832
- self.matrix_gen = MatrixGenerator()
833
- self.coverage_analyzer = CoverageAnalyzer()
834
- self.stats_gen = StatisticsGenerator()
835
- self.formatter = ReportFormatter()
836
-
837
- def generate_inventory_report(self, root_path: str) -> str:
838
- """Generate inventory report
839
-
840
- Args:
841
- root_path: Root directory to scan
842
-
843
- Returns:
844
- Markdown inventory report
845
- """
846
- inventory = self.inventory_gen.generate_inventory(root_path)
847
- return self.formatter.format_inventory_md(inventory)
848
-
849
- def generate_matrix_report(self, root_path: str) -> str:
850
- """Generate coverage matrix report
851
-
852
- Args:
853
- root_path: Root directory to scan
854
-
855
- Returns:
856
- Markdown matrix report
857
- """
858
- tags = self.coverage_analyzer._collect_tags(root_path)
859
- matrix = self.matrix_gen.generate_matrix(tags)
860
- return self.formatter.format_matrix_md(matrix)
861
-
862
- def generate_statistics_report(self, root_path: str) -> str:
863
- """Generate statistics report
864
-
865
- Args:
866
- root_path: Root directory to scan
867
-
868
- Returns:
869
- JSON statistics report
870
- """
871
- tags = self.coverage_analyzer._collect_tags(root_path)
872
- stats = self.stats_gen.generate_statistics(tags)
873
- return self.stats_gen.format_as_json(stats)
874
-
875
- def generate_combined_report(self, root_path: str) -> str:
876
- """Generate combined report with all sections
877
-
878
- Args:
879
- root_path: Root directory to scan
880
-
881
- Returns:
882
- Combined markdown report
883
- """
884
- lines = []
885
- lines.append("# MoAI-ADK TAG System Report")
886
- lines.append("")
887
- lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
888
- lines.append("")
889
-
890
- # Inventory section
891
- lines.append("---")
892
- lines.append("")
893
- lines.append(self.generate_inventory_report(root_path))
894
- lines.append("")
895
-
896
- # Matrix section
897
- lines.append("---")
898
- lines.append("")
899
- lines.append(self.generate_matrix_report(root_path))
900
- lines.append("")
901
-
902
- # Statistics section
903
- lines.append("---")
904
- lines.append("")
905
- lines.append("# Statistics")
906
- lines.append("")
907
- lines.append("```json")
908
- lines.append(self.generate_statistics_report(root_path))
909
- lines.append("```")
910
- lines.append("")
911
-
912
- return "\n".join(lines)
913
-
914
- def generate_all_reports(self, root_path: str, output_dir: str) -> ReportResult:
915
- """Generate all reports and save to output directory
916
-
917
- Args:
918
- root_path: Root directory to scan
919
- output_dir: Output directory for reports
920
-
921
- Returns:
922
- ReportResult with file paths
923
- """
924
- output = Path(output_dir)
925
- output.mkdir(parents=True, exist_ok=True)
926
-
927
- try:
928
- # Generate inventory
929
- inventory_path = output / "tag-inventory.md"
930
- inventory_report = self.generate_inventory_report(root_path)
931
- inventory_path.write_text(inventory_report, encoding="utf-8")
932
-
933
- # Generate matrix
934
- matrix_path = output / "tag-matrix.md"
935
- matrix_report = self.generate_matrix_report(root_path)
936
- matrix_path.write_text(matrix_report, encoding="utf-8")
937
-
938
- # Generate statistics
939
- statistics_path = output / "tag-statistics.json"
940
- statistics_report = self.generate_statistics_report(root_path)
941
- statistics_path.write_text(statistics_report, encoding="utf-8")
942
-
943
- return ReportResult(
944
- inventory_path=inventory_path,
945
- matrix_path=matrix_path,
946
- statistics_path=statistics_path,
947
- success=True
948
- )
949
-
950
- except Exception as e:
951
- return ReportResult(
952
- inventory_path=output / "tag-inventory.md",
953
- matrix_path=output / "tag-matrix.md",
954
- statistics_path=output / "tag-statistics.json",
955
- success=False,
956
- error_message=str(e)
957
- )