doit-toolkit-cli 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of doit-toolkit-cli might be problematic. Click here for more details.
- doit_cli/__init__.py +1356 -0
- doit_cli/cli/__init__.py +26 -0
- doit_cli/cli/analytics_command.py +616 -0
- doit_cli/cli/context_command.py +213 -0
- doit_cli/cli/diagram_command.py +304 -0
- doit_cli/cli/fixit_command.py +641 -0
- doit_cli/cli/hooks_command.py +211 -0
- doit_cli/cli/init_command.py +613 -0
- doit_cli/cli/memory_command.py +293 -0
- doit_cli/cli/roadmapit_command.py +10 -0
- doit_cli/cli/status_command.py +117 -0
- doit_cli/cli/sync_prompts_command.py +248 -0
- doit_cli/cli/validate_command.py +196 -0
- doit_cli/cli/verify_command.py +204 -0
- doit_cli/cli/workflow_mixin.py +224 -0
- doit_cli/cli/xref_command.py +555 -0
- doit_cli/formatters/__init__.py +8 -0
- doit_cli/formatters/base.py +38 -0
- doit_cli/formatters/json_formatter.py +126 -0
- doit_cli/formatters/markdown_formatter.py +97 -0
- doit_cli/formatters/rich_formatter.py +257 -0
- doit_cli/main.py +51 -0
- doit_cli/models/__init__.py +139 -0
- doit_cli/models/agent.py +74 -0
- doit_cli/models/analytics_models.py +384 -0
- doit_cli/models/context_config.py +464 -0
- doit_cli/models/crossref_models.py +182 -0
- doit_cli/models/diagram_models.py +363 -0
- doit_cli/models/fixit_models.py +355 -0
- doit_cli/models/hook_config.py +125 -0
- doit_cli/models/project.py +91 -0
- doit_cli/models/results.py +121 -0
- doit_cli/models/search_models.py +228 -0
- doit_cli/models/status_models.py +195 -0
- doit_cli/models/sync_models.py +146 -0
- doit_cli/models/template.py +77 -0
- doit_cli/models/validation_models.py +175 -0
- doit_cli/models/workflow_models.py +319 -0
- doit_cli/prompts/__init__.py +5 -0
- doit_cli/prompts/fixit_prompts.py +344 -0
- doit_cli/prompts/interactive.py +390 -0
- doit_cli/rules/__init__.py +5 -0
- doit_cli/rules/builtin_rules.py +160 -0
- doit_cli/services/__init__.py +79 -0
- doit_cli/services/agent_detector.py +168 -0
- doit_cli/services/analytics_service.py +218 -0
- doit_cli/services/architecture_generator.py +290 -0
- doit_cli/services/backup_service.py +204 -0
- doit_cli/services/config_loader.py +113 -0
- doit_cli/services/context_loader.py +1123 -0
- doit_cli/services/coverage_calculator.py +142 -0
- doit_cli/services/crossref_service.py +237 -0
- doit_cli/services/cycle_time_calculator.py +134 -0
- doit_cli/services/date_inferrer.py +349 -0
- doit_cli/services/diagram_service.py +337 -0
- doit_cli/services/drift_detector.py +109 -0
- doit_cli/services/entity_parser.py +301 -0
- doit_cli/services/er_diagram_generator.py +197 -0
- doit_cli/services/fixit_service.py +699 -0
- doit_cli/services/github_service.py +192 -0
- doit_cli/services/hook_manager.py +258 -0
- doit_cli/services/hook_validator.py +528 -0
- doit_cli/services/input_validator.py +322 -0
- doit_cli/services/memory_search.py +527 -0
- doit_cli/services/mermaid_validator.py +334 -0
- doit_cli/services/prompt_transformer.py +91 -0
- doit_cli/services/prompt_writer.py +133 -0
- doit_cli/services/query_interpreter.py +428 -0
- doit_cli/services/report_exporter.py +219 -0
- doit_cli/services/report_generator.py +256 -0
- doit_cli/services/requirement_parser.py +112 -0
- doit_cli/services/roadmap_summarizer.py +209 -0
- doit_cli/services/rule_engine.py +443 -0
- doit_cli/services/scaffolder.py +215 -0
- doit_cli/services/score_calculator.py +172 -0
- doit_cli/services/section_parser.py +204 -0
- doit_cli/services/spec_scanner.py +327 -0
- doit_cli/services/state_manager.py +355 -0
- doit_cli/services/status_reporter.py +143 -0
- doit_cli/services/task_parser.py +347 -0
- doit_cli/services/template_manager.py +710 -0
- doit_cli/services/template_reader.py +158 -0
- doit_cli/services/user_journey_generator.py +214 -0
- doit_cli/services/user_story_parser.py +232 -0
- doit_cli/services/validation_service.py +188 -0
- doit_cli/services/validator.py +232 -0
- doit_cli/services/velocity_tracker.py +173 -0
- doit_cli/services/workflow_engine.py +405 -0
- doit_cli/templates/agent-file-template.md +28 -0
- doit_cli/templates/checklist-template.md +39 -0
- doit_cli/templates/commands/doit.checkin.md +363 -0
- doit_cli/templates/commands/doit.constitution.md +187 -0
- doit_cli/templates/commands/doit.documentit.md +485 -0
- doit_cli/templates/commands/doit.fixit.md +181 -0
- doit_cli/templates/commands/doit.implementit.md +265 -0
- doit_cli/templates/commands/doit.planit.md +262 -0
- doit_cli/templates/commands/doit.reviewit.md +355 -0
- doit_cli/templates/commands/doit.roadmapit.md +389 -0
- doit_cli/templates/commands/doit.scaffoldit.md +458 -0
- doit_cli/templates/commands/doit.specit.md +521 -0
- doit_cli/templates/commands/doit.taskit.md +304 -0
- doit_cli/templates/commands/doit.testit.md +277 -0
- doit_cli/templates/config/context.yaml +134 -0
- doit_cli/templates/config/hooks.yaml +93 -0
- doit_cli/templates/config/validation-rules.yaml +64 -0
- doit_cli/templates/github-issue-templates/epic.yml +78 -0
- doit_cli/templates/github-issue-templates/feature.yml +116 -0
- doit_cli/templates/github-issue-templates/task.yml +129 -0
- doit_cli/templates/hooks/.gitkeep +0 -0
- doit_cli/templates/hooks/post-commit.sh +25 -0
- doit_cli/templates/hooks/post-merge.sh +75 -0
- doit_cli/templates/hooks/pre-commit.sh +17 -0
- doit_cli/templates/hooks/pre-push.sh +18 -0
- doit_cli/templates/memory/completed_roadmap.md +50 -0
- doit_cli/templates/memory/constitution.md +125 -0
- doit_cli/templates/memory/roadmap.md +61 -0
- doit_cli/templates/plan-template.md +146 -0
- doit_cli/templates/scripts/bash/check-prerequisites.sh +166 -0
- doit_cli/templates/scripts/bash/common.sh +156 -0
- doit_cli/templates/scripts/bash/create-new-feature.sh +297 -0
- doit_cli/templates/scripts/bash/setup-plan.sh +61 -0
- doit_cli/templates/scripts/bash/update-agent-context.sh +675 -0
- doit_cli/templates/scripts/powershell/check-prerequisites.ps1 +148 -0
- doit_cli/templates/scripts/powershell/common.ps1 +137 -0
- doit_cli/templates/scripts/powershell/create-new-feature.ps1 +283 -0
- doit_cli/templates/scripts/powershell/setup-plan.ps1 +61 -0
- doit_cli/templates/scripts/powershell/update-agent-context.ps1 +406 -0
- doit_cli/templates/spec-template.md +159 -0
- doit_cli/templates/tasks-template.md +313 -0
- doit_cli/templates/vscode-settings.json +14 -0
- doit_toolkit_cli-0.1.10.dist-info/METADATA +324 -0
- doit_toolkit_cli-0.1.10.dist-info/RECORD +135 -0
- doit_toolkit_cli-0.1.10.dist-info/WHEEL +4 -0
- doit_toolkit_cli-0.1.10.dist-info/entry_points.txt +2 -0
- doit_toolkit_cli-0.1.10.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""Quality score calculator for spec validation."""
|
|
2
|
+
|
|
3
|
+
from ..models.validation_models import Severity, ValidationIssue
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ScoreCalculator:
|
|
7
|
+
"""Calculates quality scores from validation issues.
|
|
8
|
+
|
|
9
|
+
Uses weighted category scoring where each issue deducts points
|
|
10
|
+
based on its category and severity. The score is deterministic -
|
|
11
|
+
the same issues always produce the same score.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
# Category weights (how much each category affects score)
|
|
15
|
+
CATEGORY_WEIGHTS: dict[str, int] = {
|
|
16
|
+
"structure": 20, # Missing required sections
|
|
17
|
+
"requirements": 15, # Naming convention violations
|
|
18
|
+
"acceptance": 10, # Missing scenarios
|
|
19
|
+
"clarity": 5, # Unresolved markers
|
|
20
|
+
"naming": 5, # Format violations
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
# Severity multipliers
|
|
24
|
+
SEVERITY_MULTIPLIERS: dict[Severity, float] = {
|
|
25
|
+
Severity.ERROR: 1.0, # Full weight deduction
|
|
26
|
+
Severity.WARNING: 0.5, # Half weight deduction
|
|
27
|
+
Severity.INFO: 0.1, # Minor deduction
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
# Default weight for unknown categories
|
|
31
|
+
DEFAULT_WEIGHT: int = 5
|
|
32
|
+
|
|
33
|
+
def calculate(self, issues: list[ValidationIssue]) -> int:
|
|
34
|
+
"""Calculate quality score from issues.
|
|
35
|
+
|
|
36
|
+
Score = 100 - sum(deductions)
|
|
37
|
+
|
|
38
|
+
For each issue:
|
|
39
|
+
deduction = CATEGORY_WEIGHTS[category] * SEVERITY_MULTIPLIERS[severity]
|
|
40
|
+
|
|
41
|
+
Multiple issues in same category are additive but capped at category weight.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
issues: List of validation issues found.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Integer score 0-100 (100 = perfect, 0 = many issues).
|
|
48
|
+
"""
|
|
49
|
+
if not issues:
|
|
50
|
+
return 100
|
|
51
|
+
|
|
52
|
+
category_deductions: dict[str, float] = {}
|
|
53
|
+
|
|
54
|
+
for issue in issues:
|
|
55
|
+
category = self._get_category(issue.rule_id)
|
|
56
|
+
weight = self.CATEGORY_WEIGHTS.get(category, self.DEFAULT_WEIGHT)
|
|
57
|
+
multiplier = self.SEVERITY_MULTIPLIERS.get(issue.severity, 0.5)
|
|
58
|
+
deduction = weight * multiplier
|
|
59
|
+
|
|
60
|
+
# Accumulate per category (capped at category weight)
|
|
61
|
+
current = category_deductions.get(category, 0.0)
|
|
62
|
+
category_deductions[category] = min(current + deduction, float(weight))
|
|
63
|
+
|
|
64
|
+
# Sum all category deductions
|
|
65
|
+
total_deduction = sum(category_deductions.values())
|
|
66
|
+
|
|
67
|
+
# Return score clamped to 0-100
|
|
68
|
+
return max(0, int(100 - total_deduction))
|
|
69
|
+
|
|
70
|
+
def get_breakdown(self, issues: list[ValidationIssue]) -> dict[str, int]:
|
|
71
|
+
"""Get score breakdown by category.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
issues: List of validation issues.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Dict mapping category to points deducted.
|
|
78
|
+
"""
|
|
79
|
+
breakdown: dict[str, int] = {}
|
|
80
|
+
|
|
81
|
+
for issue in issues:
|
|
82
|
+
category = self._get_category(issue.rule_id)
|
|
83
|
+
weight = self.CATEGORY_WEIGHTS.get(category, self.DEFAULT_WEIGHT)
|
|
84
|
+
multiplier = self.SEVERITY_MULTIPLIERS.get(issue.severity, 0.5)
|
|
85
|
+
deduction = int(weight * multiplier)
|
|
86
|
+
|
|
87
|
+
current = breakdown.get(category, 0)
|
|
88
|
+
max_for_category = self.CATEGORY_WEIGHTS.get(category, self.DEFAULT_WEIGHT)
|
|
89
|
+
breakdown[category] = min(current + deduction, max_for_category)
|
|
90
|
+
|
|
91
|
+
return breakdown
|
|
92
|
+
|
|
93
|
+
def _get_category(self, rule_id: str) -> str:
|
|
94
|
+
"""Determine category from rule ID.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
rule_id: The rule identifier.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Category name.
|
|
101
|
+
"""
|
|
102
|
+
# Map rule IDs to categories based on naming patterns
|
|
103
|
+
category_patterns = {
|
|
104
|
+
"structure": [
|
|
105
|
+
"missing-user-scenarios",
|
|
106
|
+
"missing-requirements",
|
|
107
|
+
"missing-success-criteria",
|
|
108
|
+
],
|
|
109
|
+
"requirements": [
|
|
110
|
+
"fr-naming-convention",
|
|
111
|
+
"sc-naming-convention",
|
|
112
|
+
],
|
|
113
|
+
"acceptance": [
|
|
114
|
+
"missing-acceptance-scenarios",
|
|
115
|
+
"incomplete-given-when-then",
|
|
116
|
+
],
|
|
117
|
+
"clarity": [
|
|
118
|
+
"unresolved-clarification",
|
|
119
|
+
"todo-in-approved-spec",
|
|
120
|
+
],
|
|
121
|
+
"naming": [
|
|
122
|
+
"feature-branch-format",
|
|
123
|
+
],
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
for category, rule_ids in category_patterns.items():
|
|
127
|
+
if rule_id in rule_ids:
|
|
128
|
+
return category
|
|
129
|
+
|
|
130
|
+
# Default to category based on naming heuristics
|
|
131
|
+
if "missing" in rule_id.lower():
|
|
132
|
+
return "structure"
|
|
133
|
+
if "naming" in rule_id.lower() or "convention" in rule_id.lower():
|
|
134
|
+
return "requirements"
|
|
135
|
+
if "acceptance" in rule_id.lower() or "scenario" in rule_id.lower():
|
|
136
|
+
return "acceptance"
|
|
137
|
+
if "todo" in rule_id.lower() or "clarification" in rule_id.lower():
|
|
138
|
+
return "clarity"
|
|
139
|
+
|
|
140
|
+
return "naming" # Default category
|
|
141
|
+
|
|
142
|
+
def get_score_interpretation(self, score: int) -> str:
|
|
143
|
+
"""Get interpretation text for a score.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
score: Quality score 0-100.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Interpretation string.
|
|
150
|
+
"""
|
|
151
|
+
if score >= 90:
|
|
152
|
+
return "Excellent - minor or no issues"
|
|
153
|
+
if score >= 70:
|
|
154
|
+
return "Good - some warnings, no errors"
|
|
155
|
+
if score >= 50:
|
|
156
|
+
return "Fair - has errors or many warnings"
|
|
157
|
+
return "Poor - multiple critical issues"
|
|
158
|
+
|
|
159
|
+
def get_status_from_score(self, score: int) -> str:
|
|
160
|
+
"""Get status string from score.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
score: Quality score 0-100.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Status string (PASS, WARN, or FAIL).
|
|
167
|
+
"""
|
|
168
|
+
if score >= 70:
|
|
169
|
+
return "PASS"
|
|
170
|
+
if score >= 50:
|
|
171
|
+
return "WARN"
|
|
172
|
+
return "FAIL"
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
"""Parser for AUTO-GENERATED sections in markdown files."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from ..models.diagram_models import DiagramSection
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SectionParser:
|
|
10
|
+
"""Parses AUTO-GENERATED sections from markdown files.
|
|
11
|
+
|
|
12
|
+
Finds and extracts content between BEGIN and END markers:
|
|
13
|
+
<!-- BEGIN:AUTO-GENERATED section="name" -->
|
|
14
|
+
[content]
|
|
15
|
+
<!-- END:AUTO-GENERATED -->
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
# Regex pattern for BEGIN marker with section name
|
|
19
|
+
BEGIN_PATTERN = re.compile(
|
|
20
|
+
r'<!--\s*BEGIN:AUTO-GENERATED\s+section="([^"]+)"\s*-->', re.IGNORECASE
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Regex pattern for END marker
|
|
24
|
+
END_PATTERN = re.compile(r"<!--\s*END:AUTO-GENERATED\s*-->", re.IGNORECASE)
|
|
25
|
+
|
|
26
|
+
def find_sections(self, content: str) -> list[DiagramSection]:
|
|
27
|
+
"""Find all AUTO-GENERATED sections in content.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
content: File content to parse
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
List of DiagramSection objects with section details
|
|
34
|
+
"""
|
|
35
|
+
sections = []
|
|
36
|
+
lines = content.split("\n")
|
|
37
|
+
|
|
38
|
+
current_section: Optional[DiagramSection] = None
|
|
39
|
+
section_content_lines: list[str] = []
|
|
40
|
+
|
|
41
|
+
for line_num, line in enumerate(lines, start=1):
|
|
42
|
+
# Check for BEGIN marker
|
|
43
|
+
begin_match = self.BEGIN_PATTERN.search(line)
|
|
44
|
+
if begin_match:
|
|
45
|
+
# Start new section
|
|
46
|
+
section_name = begin_match.group(1)
|
|
47
|
+
current_section = DiagramSection(
|
|
48
|
+
section_name=section_name,
|
|
49
|
+
start_line=line_num,
|
|
50
|
+
end_line=0, # Will be set when END found
|
|
51
|
+
content="",
|
|
52
|
+
)
|
|
53
|
+
section_content_lines = []
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
# Check for END marker
|
|
57
|
+
if current_section is not None and self.END_PATTERN.search(line):
|
|
58
|
+
# Complete current section
|
|
59
|
+
current_section.end_line = line_num
|
|
60
|
+
current_section.content = "\n".join(section_content_lines)
|
|
61
|
+
sections.append(current_section)
|
|
62
|
+
current_section = None
|
|
63
|
+
section_content_lines = []
|
|
64
|
+
continue
|
|
65
|
+
|
|
66
|
+
# Accumulate content within section
|
|
67
|
+
if current_section is not None:
|
|
68
|
+
section_content_lines.append(line)
|
|
69
|
+
|
|
70
|
+
return sections
|
|
71
|
+
|
|
72
|
+
def find_section(self, content: str, section_name: str) -> Optional[DiagramSection]:
|
|
73
|
+
"""Find a specific AUTO-GENERATED section by name.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
content: File content to parse
|
|
77
|
+
section_name: Name of section to find (e.g., "user-journey")
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
DiagramSection if found, None otherwise
|
|
81
|
+
"""
|
|
82
|
+
sections = self.find_sections(content)
|
|
83
|
+
for section in sections:
|
|
84
|
+
if section.section_name == section_name:
|
|
85
|
+
return section
|
|
86
|
+
return None
|
|
87
|
+
|
|
88
|
+
def replace_section_content(
|
|
89
|
+
self, content: str, section_name: str, new_content: str
|
|
90
|
+
) -> tuple[str, bool]:
|
|
91
|
+
"""Replace content within an AUTO-GENERATED section.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
content: Original file content
|
|
95
|
+
section_name: Name of section to update
|
|
96
|
+
new_content: New content to insert (without markers)
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Tuple of (updated content, success boolean)
|
|
100
|
+
"""
|
|
101
|
+
section = self.find_section(content, section_name)
|
|
102
|
+
if section is None:
|
|
103
|
+
return content, False
|
|
104
|
+
|
|
105
|
+
lines = content.split("\n")
|
|
106
|
+
|
|
107
|
+
# Build new content: before + marker + new + marker + after
|
|
108
|
+
before_lines = lines[: section.start_line] # Includes BEGIN marker line
|
|
109
|
+
after_lines = lines[section.end_line - 1 :] # Starts from END marker line
|
|
110
|
+
|
|
111
|
+
# Ensure new content has proper newlines
|
|
112
|
+
new_content_clean = new_content.strip()
|
|
113
|
+
|
|
114
|
+
# Reconstruct
|
|
115
|
+
result_lines = before_lines + [new_content_clean] + after_lines
|
|
116
|
+
|
|
117
|
+
return "\n".join(result_lines), True
|
|
118
|
+
|
|
119
|
+
def insert_section_markers(
|
|
120
|
+
self,
|
|
121
|
+
content: str,
|
|
122
|
+
section_name: str,
|
|
123
|
+
after_heading: str,
|
|
124
|
+
initial_content: str = "",
|
|
125
|
+
) -> tuple[str, bool]:
|
|
126
|
+
"""Insert new AUTO-GENERATED section markers after a heading.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
content: Original file content
|
|
130
|
+
section_name: Name for the new section
|
|
131
|
+
after_heading: Heading text to insert after (e.g., "## User Journey")
|
|
132
|
+
initial_content: Initial content to place between markers
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
Tuple of (updated content, success boolean)
|
|
136
|
+
"""
|
|
137
|
+
# Check if section already exists
|
|
138
|
+
if self.find_section(content, section_name) is not None:
|
|
139
|
+
return content, False # Section already exists
|
|
140
|
+
|
|
141
|
+
lines = content.split("\n")
|
|
142
|
+
heading_pattern = re.compile(
|
|
143
|
+
rf"^#+\s*{re.escape(after_heading)}\s*$", re.IGNORECASE
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
insert_index = -1
|
|
147
|
+
for i, line in enumerate(lines):
|
|
148
|
+
if heading_pattern.match(line.strip()):
|
|
149
|
+
insert_index = i + 1
|
|
150
|
+
break
|
|
151
|
+
|
|
152
|
+
if insert_index == -1:
|
|
153
|
+
return content, False # Heading not found
|
|
154
|
+
|
|
155
|
+
# Build marker block
|
|
156
|
+
begin_marker = f'<!-- BEGIN:AUTO-GENERATED section="{section_name}" -->'
|
|
157
|
+
end_marker = "<!-- END:AUTO-GENERATED -->"
|
|
158
|
+
|
|
159
|
+
marker_block = [
|
|
160
|
+
"",
|
|
161
|
+
begin_marker,
|
|
162
|
+
initial_content if initial_content else "",
|
|
163
|
+
end_marker,
|
|
164
|
+
"",
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
# Insert after heading
|
|
168
|
+
result_lines = lines[:insert_index] + marker_block + lines[insert_index:]
|
|
169
|
+
|
|
170
|
+
return "\n".join(result_lines), True
|
|
171
|
+
|
|
172
|
+
def extract_mermaid_from_section(self, section: DiagramSection) -> Optional[str]:
|
|
173
|
+
"""Extract Mermaid diagram content from a section.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
section: DiagramSection to extract from
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Mermaid content without code fences, or None if not found
|
|
180
|
+
"""
|
|
181
|
+
content = section.content
|
|
182
|
+
|
|
183
|
+
# Look for ```mermaid ... ``` block
|
|
184
|
+
mermaid_pattern = re.compile(
|
|
185
|
+
r"```mermaid\s*\n(.*?)\n```", re.DOTALL | re.IGNORECASE
|
|
186
|
+
)
|
|
187
|
+
match = mermaid_pattern.search(content)
|
|
188
|
+
|
|
189
|
+
if match:
|
|
190
|
+
return match.group(1).strip()
|
|
191
|
+
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
def has_section(self, content: str, section_name: str) -> bool:
|
|
195
|
+
"""Check if a section exists in content.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
content: File content to check
|
|
199
|
+
section_name: Name of section to look for
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
True if section exists, False otherwise
|
|
203
|
+
"""
|
|
204
|
+
return self.find_section(content, section_name) is not None
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""Spec scanner service for discovering and parsing spec metadata."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import subprocess
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from ..models.status_models import SpecState, SpecStatus, StatusReport
|
|
10
|
+
from ..models.validation_models import ValidationResult
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class NotADoitProjectError(Exception):
|
|
14
|
+
"""Raised when project_root lacks .doit/ directory."""
|
|
15
|
+
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SpecNotFoundError(Exception):
|
|
20
|
+
"""Raised when a spec directory doesn't exist."""
|
|
21
|
+
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SpecScanner:
|
|
26
|
+
"""Scans specs directory and parses spec metadata.
|
|
27
|
+
|
|
28
|
+
This service discovers all specification directories in a project,
|
|
29
|
+
parses their status from spec.md frontmatter, and returns structured
|
|
30
|
+
SpecStatus objects for each spec found.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
# Regex pattern to extract status from spec.md
|
|
34
|
+
# Matches: **Status**: Draft (or In Progress, Complete, Approved)
|
|
35
|
+
STATUS_PATTERN = re.compile(
|
|
36
|
+
r"\*\*Status\*\*:\s*([A-Za-z]+(?:\s+[A-Za-z]+)?)", re.IGNORECASE
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Default specs directory name
|
|
40
|
+
SPECS_DIR = "specs"
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
project_root: Optional[Path] = None,
|
|
45
|
+
validate: bool = True,
|
|
46
|
+
) -> None:
|
|
47
|
+
"""Initialize scanner with project root directory.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
project_root: Root directory of the doit project.
|
|
51
|
+
Defaults to current working directory.
|
|
52
|
+
validate: Whether to run validation on specs.
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
NotADoitProjectError: If project_root lacks .doit/ directory.
|
|
56
|
+
"""
|
|
57
|
+
self.project_root = project_root or Path.cwd()
|
|
58
|
+
self.validate = validate
|
|
59
|
+
self._validator = None
|
|
60
|
+
|
|
61
|
+
# Validate this is a doit project
|
|
62
|
+
doit_dir = self.project_root / ".doit"
|
|
63
|
+
if not doit_dir.exists():
|
|
64
|
+
raise NotADoitProjectError(
|
|
65
|
+
f"Not a doit project. Run 'doit init' first. "
|
|
66
|
+
f"(Missing .doit/ directory in {self.project_root})"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def validator(self):
|
|
71
|
+
"""Lazy-load the ValidationService."""
|
|
72
|
+
if self._validator is None and self.validate:
|
|
73
|
+
try:
|
|
74
|
+
from .validation_service import ValidationService
|
|
75
|
+
|
|
76
|
+
self._validator = ValidationService(self.project_root)
|
|
77
|
+
except ImportError:
|
|
78
|
+
# Validation service not available
|
|
79
|
+
self._validator = None
|
|
80
|
+
return self._validator
|
|
81
|
+
|
|
82
|
+
def scan(self, include_validation: bool = True) -> list[SpecStatus]:
|
|
83
|
+
"""Scan specs/ directory and return all spec statuses.
|
|
84
|
+
|
|
85
|
+
Discovers all subdirectories in specs/ that contain a spec.md file
|
|
86
|
+
and parses their metadata.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
include_validation: Whether to include validation results.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List of SpecStatus objects, one per spec directory.
|
|
93
|
+
Sorted by spec name alphabetically.
|
|
94
|
+
"""
|
|
95
|
+
specs_dir = self.project_root / self.SPECS_DIR
|
|
96
|
+
|
|
97
|
+
if not specs_dir.exists():
|
|
98
|
+
return []
|
|
99
|
+
|
|
100
|
+
statuses: list[SpecStatus] = []
|
|
101
|
+
|
|
102
|
+
# Find all spec.md files in subdirectories
|
|
103
|
+
for spec_file in sorted(specs_dir.rglob("spec.md")):
|
|
104
|
+
# Get spec name from parent directory
|
|
105
|
+
spec_name = spec_file.parent.name
|
|
106
|
+
|
|
107
|
+
# Skip if this is a nested spec (only want top-level)
|
|
108
|
+
relative_path = spec_file.parent.relative_to(specs_dir)
|
|
109
|
+
if len(relative_path.parts) > 1:
|
|
110
|
+
continue
|
|
111
|
+
|
|
112
|
+
status = self._parse_spec(spec_name, spec_file)
|
|
113
|
+
|
|
114
|
+
# Add validation if enabled
|
|
115
|
+
if include_validation and self.validate and self.validator:
|
|
116
|
+
status = self._add_validation(status)
|
|
117
|
+
status = self._compute_blocking(status)
|
|
118
|
+
|
|
119
|
+
statuses.append(status)
|
|
120
|
+
|
|
121
|
+
return statuses
|
|
122
|
+
|
|
123
|
+
def scan_single(self, spec_name: str) -> SpecStatus:
|
|
124
|
+
"""Parse status for a single spec by name.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
spec_name: Directory name of the spec (e.g., "032-status-dashboard")
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
SpecStatus for the specified spec.
|
|
131
|
+
|
|
132
|
+
Raises:
|
|
133
|
+
SpecNotFoundError: If spec directory doesn't exist.
|
|
134
|
+
"""
|
|
135
|
+
spec_dir = self.project_root / self.SPECS_DIR / spec_name
|
|
136
|
+
spec_file = spec_dir / "spec.md"
|
|
137
|
+
|
|
138
|
+
if not spec_dir.exists() or not spec_file.exists():
|
|
139
|
+
raise SpecNotFoundError(
|
|
140
|
+
f"Spec not found: {spec_name}. "
|
|
141
|
+
f"Expected spec.md at {spec_file}"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
status = self._parse_spec(spec_name, spec_file)
|
|
145
|
+
|
|
146
|
+
if self.validate and self.validator:
|
|
147
|
+
status = self._add_validation(status)
|
|
148
|
+
status = self._compute_blocking(status)
|
|
149
|
+
|
|
150
|
+
return status
|
|
151
|
+
|
|
152
|
+
def _parse_spec(self, spec_name: str, spec_file: Path) -> SpecStatus:
|
|
153
|
+
"""Parse a single spec.md file and extract metadata.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
spec_name: Name of the spec (directory name)
|
|
157
|
+
spec_file: Path to the spec.md file
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
SpecStatus with parsed metadata or error state
|
|
161
|
+
"""
|
|
162
|
+
try:
|
|
163
|
+
content = spec_file.read_text(encoding="utf-8")
|
|
164
|
+
status = self._parse_status(content)
|
|
165
|
+
last_modified = datetime.fromtimestamp(spec_file.stat().st_mtime)
|
|
166
|
+
|
|
167
|
+
return SpecStatus(
|
|
168
|
+
name=spec_name,
|
|
169
|
+
path=spec_file,
|
|
170
|
+
status=status,
|
|
171
|
+
last_modified=last_modified,
|
|
172
|
+
validation_result=None,
|
|
173
|
+
is_blocking=False,
|
|
174
|
+
error=None,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
except (OSError, UnicodeDecodeError) as e:
|
|
178
|
+
# File exists but couldn't be read
|
|
179
|
+
return SpecStatus(
|
|
180
|
+
name=spec_name,
|
|
181
|
+
path=spec_file,
|
|
182
|
+
status=SpecState.ERROR,
|
|
183
|
+
last_modified=datetime.now(),
|
|
184
|
+
validation_result=None,
|
|
185
|
+
is_blocking=False,
|
|
186
|
+
error=f"Unable to read file: {e}",
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
def _parse_status(self, content: str) -> SpecState:
|
|
190
|
+
"""Extract status from spec.md content.
|
|
191
|
+
|
|
192
|
+
Looks for pattern: **Status**: <value>
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
content: Full content of spec.md file
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
SpecState enum value, or ERROR if not found/parseable
|
|
199
|
+
"""
|
|
200
|
+
match = self.STATUS_PATTERN.search(content)
|
|
201
|
+
if not match:
|
|
202
|
+
return SpecState.ERROR
|
|
203
|
+
|
|
204
|
+
status_text = match.group(1).strip()
|
|
205
|
+
return SpecState.from_string(status_text)
|
|
206
|
+
|
|
207
|
+
def _add_validation(self, spec_status: SpecStatus) -> SpecStatus:
|
|
208
|
+
"""Add validation result to a SpecStatus.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
spec_status: SpecStatus to add validation to.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
Updated SpecStatus with validation_result populated.
|
|
215
|
+
"""
|
|
216
|
+
if self.validator is None or spec_status.error:
|
|
217
|
+
return spec_status
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
result = self.validator.validate_file(spec_status.path)
|
|
221
|
+
return SpecStatus(
|
|
222
|
+
name=spec_status.name,
|
|
223
|
+
path=spec_status.path,
|
|
224
|
+
status=spec_status.status,
|
|
225
|
+
last_modified=spec_status.last_modified,
|
|
226
|
+
validation_result=result,
|
|
227
|
+
is_blocking=spec_status.is_blocking,
|
|
228
|
+
error=spec_status.error,
|
|
229
|
+
)
|
|
230
|
+
except Exception as e:
|
|
231
|
+
# Validation failed, mark as error
|
|
232
|
+
return SpecStatus(
|
|
233
|
+
name=spec_status.name,
|
|
234
|
+
path=spec_status.path,
|
|
235
|
+
status=spec_status.status,
|
|
236
|
+
last_modified=spec_status.last_modified,
|
|
237
|
+
validation_result=None,
|
|
238
|
+
is_blocking=spec_status.is_blocking,
|
|
239
|
+
error=f"Validation error: {e}",
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
def _compute_blocking(self, spec_status: SpecStatus) -> SpecStatus:
|
|
243
|
+
"""Compute whether a spec is blocking commits.
|
|
244
|
+
|
|
245
|
+
A spec is blocking if:
|
|
246
|
+
1. Status is IN_PROGRESS and validation fails, OR
|
|
247
|
+
2. Status is DRAFT and validation fails AND spec is git-staged
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
spec_status: SpecStatus to check.
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
Updated SpecStatus with is_blocking computed.
|
|
254
|
+
"""
|
|
255
|
+
is_blocking = False
|
|
256
|
+
|
|
257
|
+
# Check if validation failed
|
|
258
|
+
validation_failed = (
|
|
259
|
+
spec_status.validation_result is not None
|
|
260
|
+
and spec_status.validation_result.error_count > 0
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
if validation_failed:
|
|
264
|
+
if spec_status.status == SpecState.IN_PROGRESS:
|
|
265
|
+
# In Progress specs always block when validation fails
|
|
266
|
+
is_blocking = True
|
|
267
|
+
elif spec_status.status == SpecState.DRAFT:
|
|
268
|
+
# Draft specs only block if they're staged
|
|
269
|
+
is_blocking = self._is_git_staged(spec_status.path)
|
|
270
|
+
|
|
271
|
+
return SpecStatus(
|
|
272
|
+
name=spec_status.name,
|
|
273
|
+
path=spec_status.path,
|
|
274
|
+
status=spec_status.status,
|
|
275
|
+
last_modified=spec_status.last_modified,
|
|
276
|
+
validation_result=spec_status.validation_result,
|
|
277
|
+
is_blocking=is_blocking,
|
|
278
|
+
error=spec_status.error,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def _is_git_staged(self, spec_path: Path) -> bool:
|
|
282
|
+
"""Check if a spec file is staged for commit.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
spec_path: Path to the spec file.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
True if the file is in git's staging area.
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
result = subprocess.run(
|
|
292
|
+
["git", "diff", "--cached", "--name-only"],
|
|
293
|
+
capture_output=True,
|
|
294
|
+
text=True,
|
|
295
|
+
cwd=self.project_root,
|
|
296
|
+
)
|
|
297
|
+
if result.returncode != 0:
|
|
298
|
+
return False
|
|
299
|
+
|
|
300
|
+
# Check if spec path (relative) is in staged files
|
|
301
|
+
try:
|
|
302
|
+
relative_path = spec_path.relative_to(self.project_root)
|
|
303
|
+
return str(relative_path) in result.stdout
|
|
304
|
+
except ValueError:
|
|
305
|
+
return False
|
|
306
|
+
|
|
307
|
+
except (FileNotFoundError, subprocess.SubprocessError):
|
|
308
|
+
# Git not available or command failed
|
|
309
|
+
return False
|
|
310
|
+
|
|
311
|
+
def generate_report(self, include_validation: bool = True) -> StatusReport:
|
|
312
|
+
"""Scan all specs and generate a StatusReport.
|
|
313
|
+
|
|
314
|
+
Convenience method that combines scan() with report generation.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
include_validation: Whether to include validation results.
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
StatusReport containing all spec statuses and computed stats.
|
|
321
|
+
"""
|
|
322
|
+
specs = self.scan(include_validation=include_validation)
|
|
323
|
+
return StatusReport(
|
|
324
|
+
specs=specs,
|
|
325
|
+
generated_at=datetime.now(),
|
|
326
|
+
project_root=self.project_root,
|
|
327
|
+
)
|