doit-toolkit-cli 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doit_cli/__init__.py +1356 -0
- doit_cli/cli/__init__.py +26 -0
- doit_cli/cli/analytics_command.py +616 -0
- doit_cli/cli/context_command.py +213 -0
- doit_cli/cli/diagram_command.py +304 -0
- doit_cli/cli/fixit_command.py +641 -0
- doit_cli/cli/hooks_command.py +211 -0
- doit_cli/cli/init_command.py +613 -0
- doit_cli/cli/memory_command.py +293 -0
- doit_cli/cli/status_command.py +117 -0
- doit_cli/cli/sync_prompts_command.py +248 -0
- doit_cli/cli/validate_command.py +196 -0
- doit_cli/cli/verify_command.py +204 -0
- doit_cli/cli/workflow_mixin.py +224 -0
- doit_cli/cli/xref_command.py +555 -0
- doit_cli/formatters/__init__.py +8 -0
- doit_cli/formatters/base.py +38 -0
- doit_cli/formatters/json_formatter.py +126 -0
- doit_cli/formatters/markdown_formatter.py +97 -0
- doit_cli/formatters/rich_formatter.py +257 -0
- doit_cli/main.py +49 -0
- doit_cli/models/__init__.py +139 -0
- doit_cli/models/agent.py +74 -0
- doit_cli/models/analytics_models.py +384 -0
- doit_cli/models/context_config.py +464 -0
- doit_cli/models/crossref_models.py +182 -0
- doit_cli/models/diagram_models.py +363 -0
- doit_cli/models/fixit_models.py +355 -0
- doit_cli/models/hook_config.py +125 -0
- doit_cli/models/project.py +91 -0
- doit_cli/models/results.py +121 -0
- doit_cli/models/search_models.py +228 -0
- doit_cli/models/status_models.py +195 -0
- doit_cli/models/sync_models.py +146 -0
- doit_cli/models/template.py +77 -0
- doit_cli/models/validation_models.py +175 -0
- doit_cli/models/workflow_models.py +319 -0
- doit_cli/prompts/__init__.py +5 -0
- doit_cli/prompts/fixit_prompts.py +344 -0
- doit_cli/prompts/interactive.py +390 -0
- doit_cli/rules/__init__.py +5 -0
- doit_cli/rules/builtin_rules.py +160 -0
- doit_cli/services/__init__.py +79 -0
- doit_cli/services/agent_detector.py +168 -0
- doit_cli/services/analytics_service.py +218 -0
- doit_cli/services/architecture_generator.py +290 -0
- doit_cli/services/backup_service.py +204 -0
- doit_cli/services/config_loader.py +113 -0
- doit_cli/services/context_loader.py +1121 -0
- doit_cli/services/coverage_calculator.py +142 -0
- doit_cli/services/crossref_service.py +237 -0
- doit_cli/services/cycle_time_calculator.py +134 -0
- doit_cli/services/date_inferrer.py +349 -0
- doit_cli/services/diagram_service.py +337 -0
- doit_cli/services/drift_detector.py +109 -0
- doit_cli/services/entity_parser.py +301 -0
- doit_cli/services/er_diagram_generator.py +197 -0
- doit_cli/services/fixit_service.py +699 -0
- doit_cli/services/github_service.py +192 -0
- doit_cli/services/hook_manager.py +258 -0
- doit_cli/services/hook_validator.py +528 -0
- doit_cli/services/input_validator.py +322 -0
- doit_cli/services/memory_search.py +527 -0
- doit_cli/services/mermaid_validator.py +334 -0
- doit_cli/services/prompt_transformer.py +91 -0
- doit_cli/services/prompt_writer.py +133 -0
- doit_cli/services/query_interpreter.py +428 -0
- doit_cli/services/report_exporter.py +219 -0
- doit_cli/services/report_generator.py +256 -0
- doit_cli/services/requirement_parser.py +112 -0
- doit_cli/services/roadmap_summarizer.py +209 -0
- doit_cli/services/rule_engine.py +443 -0
- doit_cli/services/scaffolder.py +215 -0
- doit_cli/services/score_calculator.py +172 -0
- doit_cli/services/section_parser.py +204 -0
- doit_cli/services/spec_scanner.py +327 -0
- doit_cli/services/state_manager.py +355 -0
- doit_cli/services/status_reporter.py +143 -0
- doit_cli/services/task_parser.py +347 -0
- doit_cli/services/template_manager.py +710 -0
- doit_cli/services/template_reader.py +158 -0
- doit_cli/services/user_journey_generator.py +214 -0
- doit_cli/services/user_story_parser.py +232 -0
- doit_cli/services/validation_service.py +188 -0
- doit_cli/services/validator.py +232 -0
- doit_cli/services/velocity_tracker.py +173 -0
- doit_cli/services/workflow_engine.py +405 -0
- doit_cli/templates/agent-file-template.md +28 -0
- doit_cli/templates/checklist-template.md +39 -0
- doit_cli/templates/commands/doit.checkin.md +363 -0
- doit_cli/templates/commands/doit.constitution.md +187 -0
- doit_cli/templates/commands/doit.documentit.md +485 -0
- doit_cli/templates/commands/doit.fixit.md +181 -0
- doit_cli/templates/commands/doit.implementit.md +265 -0
- doit_cli/templates/commands/doit.planit.md +262 -0
- doit_cli/templates/commands/doit.reviewit.md +355 -0
- doit_cli/templates/commands/doit.roadmapit.md +368 -0
- doit_cli/templates/commands/doit.scaffoldit.md +458 -0
- doit_cli/templates/commands/doit.specit.md +521 -0
- doit_cli/templates/commands/doit.taskit.md +304 -0
- doit_cli/templates/commands/doit.testit.md +277 -0
- doit_cli/templates/config/context.yaml +134 -0
- doit_cli/templates/config/hooks.yaml +93 -0
- doit_cli/templates/config/validation-rules.yaml +64 -0
- doit_cli/templates/github-issue-templates/epic.yml +78 -0
- doit_cli/templates/github-issue-templates/feature.yml +116 -0
- doit_cli/templates/github-issue-templates/task.yml +129 -0
- doit_cli/templates/hooks/.gitkeep +0 -0
- doit_cli/templates/hooks/post-commit.sh +25 -0
- doit_cli/templates/hooks/post-merge.sh +75 -0
- doit_cli/templates/hooks/pre-commit.sh +17 -0
- doit_cli/templates/hooks/pre-push.sh +18 -0
- doit_cli/templates/memory/completed_roadmap.md +50 -0
- doit_cli/templates/memory/constitution.md +125 -0
- doit_cli/templates/memory/roadmap.md +61 -0
- doit_cli/templates/plan-template.md +146 -0
- doit_cli/templates/scripts/bash/check-prerequisites.sh +166 -0
- doit_cli/templates/scripts/bash/common.sh +156 -0
- doit_cli/templates/scripts/bash/create-new-feature.sh +297 -0
- doit_cli/templates/scripts/bash/setup-plan.sh +61 -0
- doit_cli/templates/scripts/bash/update-agent-context.sh +675 -0
- doit_cli/templates/scripts/powershell/check-prerequisites.ps1 +148 -0
- doit_cli/templates/scripts/powershell/common.ps1 +137 -0
- doit_cli/templates/scripts/powershell/create-new-feature.ps1 +283 -0
- doit_cli/templates/scripts/powershell/setup-plan.ps1 +61 -0
- doit_cli/templates/scripts/powershell/update-agent-context.ps1 +406 -0
- doit_cli/templates/spec-template.md +159 -0
- doit_cli/templates/tasks-template.md +313 -0
- doit_cli/templates/vscode-settings.json +14 -0
- doit_toolkit_cli-0.1.9.dist-info/METADATA +324 -0
- doit_toolkit_cli-0.1.9.dist-info/RECORD +134 -0
- doit_toolkit_cli-0.1.9.dist-info/WHEEL +4 -0
- doit_toolkit_cli-0.1.9.dist-info/entry_points.txt +2 -0
- doit_toolkit_cli-0.1.9.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""Agent detector service for detecting existing AI agent configuration."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from ..models.agent import Agent
|
|
7
|
+
from ..models.project import Project
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AgentDetector:
|
|
11
|
+
"""Service for detecting which AI agents are configured in a project."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, project: Project):
|
|
14
|
+
"""Initialize agent detector.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
project: Project to detect agents for
|
|
18
|
+
"""
|
|
19
|
+
self.project = project
|
|
20
|
+
|
|
21
|
+
def detect_agents(self) -> list[Agent]:
|
|
22
|
+
"""Detect which agents are already configured in the project.
|
|
23
|
+
|
|
24
|
+
Detection order:
|
|
25
|
+
1. Check for existing .claude/ directory -> Claude
|
|
26
|
+
2. Check for existing .github/copilot-instructions.md -> Copilot
|
|
27
|
+
3. Check for existing .github/prompts/ directory -> Copilot
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
List of detected agents
|
|
31
|
+
"""
|
|
32
|
+
detected = []
|
|
33
|
+
|
|
34
|
+
if self._has_claude_setup():
|
|
35
|
+
detected.append(Agent.CLAUDE)
|
|
36
|
+
|
|
37
|
+
if self._has_copilot_setup():
|
|
38
|
+
detected.append(Agent.COPILOT)
|
|
39
|
+
|
|
40
|
+
return detected
|
|
41
|
+
|
|
42
|
+
def _has_claude_setup(self) -> bool:
|
|
43
|
+
"""Check if project has Claude setup.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
True if .claude/ or .claude/commands/ exists
|
|
47
|
+
"""
|
|
48
|
+
claude_dir = self.project.path / ".claude"
|
|
49
|
+
claude_commands = claude_dir / "commands"
|
|
50
|
+
|
|
51
|
+
return claude_dir.exists() or claude_commands.exists()
|
|
52
|
+
|
|
53
|
+
def _has_copilot_setup(self) -> bool:
|
|
54
|
+
"""Check if project has Copilot setup.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
True if .github/copilot-instructions.md or .github/prompts/ exists
|
|
58
|
+
"""
|
|
59
|
+
copilot_instructions = self.project.path / ".github" / "copilot-instructions.md"
|
|
60
|
+
copilot_prompts = self.project.path / ".github" / "prompts"
|
|
61
|
+
|
|
62
|
+
return copilot_instructions.exists() or copilot_prompts.exists()
|
|
63
|
+
|
|
64
|
+
def has_claude(self) -> bool:
|
|
65
|
+
"""Check if project has Claude agent configured.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
True if Claude is configured
|
|
69
|
+
"""
|
|
70
|
+
return self._has_claude_setup()
|
|
71
|
+
|
|
72
|
+
def has_copilot(self) -> bool:
|
|
73
|
+
"""Check if project has Copilot agent configured.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
True if Copilot is configured
|
|
77
|
+
"""
|
|
78
|
+
return self._has_copilot_setup()
|
|
79
|
+
|
|
80
|
+
def detect_primary_agent(self) -> Optional[Agent]:
|
|
81
|
+
"""Detect the primary (most likely) agent for this project.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Primary agent or None if none detected
|
|
85
|
+
"""
|
|
86
|
+
agents = self.detect_agents()
|
|
87
|
+
|
|
88
|
+
if not agents:
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
# Prefer Claude if both are detected
|
|
92
|
+
if Agent.CLAUDE in agents:
|
|
93
|
+
return Agent.CLAUDE
|
|
94
|
+
|
|
95
|
+
return agents[0]
|
|
96
|
+
|
|
97
|
+
def get_agent_status(self) -> dict:
|
|
98
|
+
"""Get detailed status for each agent.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Dict with agent status information
|
|
102
|
+
"""
|
|
103
|
+
return {
|
|
104
|
+
"claude": {
|
|
105
|
+
"detected": self._has_claude_setup(),
|
|
106
|
+
"directory": str(self.project.path / ".claude"),
|
|
107
|
+
"commands_dir": str(self.project.command_directory(Agent.CLAUDE)),
|
|
108
|
+
"has_commands": self._has_doit_commands(Agent.CLAUDE),
|
|
109
|
+
},
|
|
110
|
+
"copilot": {
|
|
111
|
+
"detected": self._has_copilot_setup(),
|
|
112
|
+
"instructions": str(self.project.path / ".github" / "copilot-instructions.md"),
|
|
113
|
+
"prompts_dir": str(self.project.command_directory(Agent.COPILOT)),
|
|
114
|
+
"has_prompts": self._has_doit_commands(Agent.COPILOT),
|
|
115
|
+
},
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
def _has_doit_commands(self, agent: Agent) -> bool:
|
|
119
|
+
"""Check if project has any doit commands for an agent.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
agent: Agent to check
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
True if any doit-prefixed command files exist
|
|
126
|
+
"""
|
|
127
|
+
cmd_dir = self.project.command_directory(agent)
|
|
128
|
+
|
|
129
|
+
if not cmd_dir.exists():
|
|
130
|
+
return False
|
|
131
|
+
|
|
132
|
+
# Check for any doit-prefixed files
|
|
133
|
+
for file in cmd_dir.iterdir():
|
|
134
|
+
if file.is_file():
|
|
135
|
+
if agent == Agent.CLAUDE:
|
|
136
|
+
if file.name.startswith("doit.") and file.name.endswith(".md"):
|
|
137
|
+
return True
|
|
138
|
+
else: # COPILOT
|
|
139
|
+
if file.name.startswith("doit.") and file.name.endswith(".prompt.md"):
|
|
140
|
+
return True
|
|
141
|
+
|
|
142
|
+
return False
|
|
143
|
+
|
|
144
|
+
def count_doit_commands(self, agent: Agent) -> int:
|
|
145
|
+
"""Count doit command files for an agent.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
agent: Agent to count commands for
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Number of doit command files
|
|
152
|
+
"""
|
|
153
|
+
cmd_dir = self.project.command_directory(agent)
|
|
154
|
+
|
|
155
|
+
if not cmd_dir.exists():
|
|
156
|
+
return 0
|
|
157
|
+
|
|
158
|
+
count = 0
|
|
159
|
+
for file in cmd_dir.iterdir():
|
|
160
|
+
if file.is_file():
|
|
161
|
+
if agent == Agent.CLAUDE:
|
|
162
|
+
if file.name.startswith("doit.") and file.name.endswith(".md"):
|
|
163
|
+
count += 1
|
|
164
|
+
else: # COPILOT
|
|
165
|
+
if file.name.startswith("doit.") and file.name.endswith(".prompt.md"):
|
|
166
|
+
count += 1
|
|
167
|
+
|
|
168
|
+
return count
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
"""Analytics service for spec metrics and reporting.
|
|
2
|
+
|
|
3
|
+
This service orchestrates the generation of analytics reports by:
|
|
4
|
+
1. Using SpecScanner to discover specs
|
|
5
|
+
2. Using DateInferrer to enrich specs with dates
|
|
6
|
+
3. Building AnalyticsReport with aggregated metrics
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from datetime import date
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
from ..models.analytics_models import (
|
|
14
|
+
AnalyticsReport,
|
|
15
|
+
CycleTimeRecord,
|
|
16
|
+
CycleTimeStats,
|
|
17
|
+
SpecMetadata,
|
|
18
|
+
VelocityDataPoint,
|
|
19
|
+
)
|
|
20
|
+
from ..models.status_models import SpecState
|
|
21
|
+
from .date_inferrer import DateInferrer
|
|
22
|
+
from .spec_scanner import NotADoitProjectError, SpecNotFoundError, SpecScanner
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class AnalyticsService:
|
|
26
|
+
"""Service for generating spec analytics and metrics.
|
|
27
|
+
|
|
28
|
+
Composes SpecScanner and DateInferrer to produce enriched
|
|
29
|
+
SpecMetadata and aggregated analytics reports.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, project_root: Optional[Path] = None):
|
|
33
|
+
"""Initialize the analytics service.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
project_root: Root directory of the project. Defaults to cwd.
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
NotADoitProjectError: If not a valid doit project
|
|
40
|
+
"""
|
|
41
|
+
self.project_root = project_root or Path.cwd()
|
|
42
|
+
self.scanner = SpecScanner(self.project_root, validate=False)
|
|
43
|
+
self.date_inferrer = DateInferrer(self.project_root)
|
|
44
|
+
|
|
45
|
+
def get_all_specs(self) -> list[SpecMetadata]:
|
|
46
|
+
"""Get all specs with enriched metadata.
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
List of SpecMetadata objects with date information
|
|
50
|
+
"""
|
|
51
|
+
spec_statuses = self.scanner.scan(include_validation=False)
|
|
52
|
+
|
|
53
|
+
return [self._enrich_with_dates(status) for status in spec_statuses]
|
|
54
|
+
|
|
55
|
+
def get_spec_details(self, spec_name: str) -> SpecMetadata:
|
|
56
|
+
"""Get detailed metadata for a single spec.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
spec_name: Name of the spec directory (e.g., "036-analytics")
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
SpecMetadata with enriched date information
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
SpecNotFoundError: If spec doesn't exist
|
|
66
|
+
"""
|
|
67
|
+
spec_status = self.scanner.scan_single(spec_name)
|
|
68
|
+
return self._enrich_with_dates(spec_status)
|
|
69
|
+
|
|
70
|
+
def get_completion_summary(self) -> dict:
|
|
71
|
+
"""Get completion metrics summary.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Dictionary with total_specs, by_status counts, and completion_pct
|
|
75
|
+
"""
|
|
76
|
+
specs = self.get_all_specs()
|
|
77
|
+
|
|
78
|
+
total = len(specs)
|
|
79
|
+
by_status: dict[str, int] = {}
|
|
80
|
+
|
|
81
|
+
for spec in specs:
|
|
82
|
+
status_name = spec.status.display_name
|
|
83
|
+
by_status[status_name] = by_status.get(status_name, 0) + 1
|
|
84
|
+
|
|
85
|
+
completed = sum(
|
|
86
|
+
1 for s in specs if s.status in (SpecState.COMPLETE, SpecState.APPROVED)
|
|
87
|
+
)
|
|
88
|
+
completion_pct = (completed / total * 100) if total > 0 else 0.0
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"total_specs": total,
|
|
92
|
+
"by_status": by_status,
|
|
93
|
+
"completion_pct": round(completion_pct, 1),
|
|
94
|
+
"draft_count": by_status.get("Draft", 0),
|
|
95
|
+
"in_progress_count": by_status.get("In Progress", 0),
|
|
96
|
+
"complete_count": by_status.get("Complete", 0),
|
|
97
|
+
"approved_count": by_status.get("Approved", 0),
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
def get_cycle_time_stats(
|
|
101
|
+
self,
|
|
102
|
+
days: Optional[int] = None,
|
|
103
|
+
since: Optional[date] = None,
|
|
104
|
+
) -> tuple[Optional[CycleTimeStats], list[CycleTimeRecord]]:
|
|
105
|
+
"""Get cycle time statistics for completed specs.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
days: Filter to specs completed in the last N days
|
|
109
|
+
since: Filter to specs completed since this date
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Tuple of (CycleTimeStats or None, list of CycleTimeRecords)
|
|
113
|
+
"""
|
|
114
|
+
specs = self.get_all_specs()
|
|
115
|
+
|
|
116
|
+
# Filter to completed specs with dates
|
|
117
|
+
records: list[CycleTimeRecord] = []
|
|
118
|
+
for spec in specs:
|
|
119
|
+
record = CycleTimeRecord.from_metadata(spec)
|
|
120
|
+
if record:
|
|
121
|
+
# Apply time filter
|
|
122
|
+
if days is not None:
|
|
123
|
+
cutoff = date.today().replace(day=date.today().day)
|
|
124
|
+
from datetime import timedelta
|
|
125
|
+
|
|
126
|
+
cutoff = date.today() - timedelta(days=days)
|
|
127
|
+
if record.end_date < cutoff:
|
|
128
|
+
continue
|
|
129
|
+
elif since is not None:
|
|
130
|
+
if record.end_date < since:
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
records.append(record)
|
|
134
|
+
|
|
135
|
+
# Sort by end date (most recent first)
|
|
136
|
+
records.sort(key=lambda r: r.end_date, reverse=True)
|
|
137
|
+
|
|
138
|
+
stats = CycleTimeStats.calculate(records)
|
|
139
|
+
return stats, records
|
|
140
|
+
|
|
141
|
+
def get_velocity_data(self, weeks: int = 8) -> list[VelocityDataPoint]:
|
|
142
|
+
"""Get weekly velocity data.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
weeks: Number of weeks to include (default 8)
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
List of VelocityDataPoint sorted by week (most recent first)
|
|
149
|
+
"""
|
|
150
|
+
specs = self.get_all_specs()
|
|
151
|
+
|
|
152
|
+
# Build velocity points from completed specs
|
|
153
|
+
weekly: dict[str, VelocityDataPoint] = {}
|
|
154
|
+
|
|
155
|
+
for spec in specs:
|
|
156
|
+
if spec.completed_at:
|
|
157
|
+
point = VelocityDataPoint.from_completion(spec.completed_at, spec.name)
|
|
158
|
+
if point.week_key in weekly:
|
|
159
|
+
weekly[point.week_key] = weekly[point.week_key].merge(point)
|
|
160
|
+
else:
|
|
161
|
+
weekly[point.week_key] = point
|
|
162
|
+
|
|
163
|
+
# Sort by week (descending) and limit
|
|
164
|
+
sorted_points = sorted(weekly.values(), key=lambda v: v.week_key, reverse=True)
|
|
165
|
+
return sorted_points[:weeks]
|
|
166
|
+
|
|
167
|
+
def generate_report(self) -> AnalyticsReport:
|
|
168
|
+
"""Generate a complete analytics report.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
AnalyticsReport with all metrics calculated
|
|
172
|
+
"""
|
|
173
|
+
specs = self.get_all_specs()
|
|
174
|
+
return AnalyticsReport.generate(specs, self.project_root)
|
|
175
|
+
|
|
176
|
+
def _enrich_with_dates(self, spec_status) -> SpecMetadata:
|
|
177
|
+
"""Enrich a SpecStatus with inferred dates.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
spec_status: SpecStatus from scanner
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
SpecMetadata with date information
|
|
184
|
+
"""
|
|
185
|
+
created_at = self.date_inferrer.infer_created_date(spec_status.path)
|
|
186
|
+
completed_at = self.date_inferrer.infer_completed_date(spec_status.path)
|
|
187
|
+
|
|
188
|
+
return SpecMetadata.from_spec_status(
|
|
189
|
+
spec_status,
|
|
190
|
+
created_at=created_at,
|
|
191
|
+
completed_at=completed_at,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
def find_spec(self, partial_name: str) -> list[str]:
|
|
195
|
+
"""Find specs matching a partial name.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
partial_name: Partial spec name to search for
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
List of matching spec names
|
|
202
|
+
"""
|
|
203
|
+
specs = self.scanner.scan(include_validation=False)
|
|
204
|
+
matches = [
|
|
205
|
+
s.name
|
|
206
|
+
for s in specs
|
|
207
|
+
if partial_name.lower() in s.name.lower()
|
|
208
|
+
]
|
|
209
|
+
return matches
|
|
210
|
+
|
|
211
|
+
def list_all_spec_names(self) -> list[str]:
|
|
212
|
+
"""List all available spec names.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
List of spec directory names
|
|
216
|
+
"""
|
|
217
|
+
specs = self.scanner.scan(include_validation=False)
|
|
218
|
+
return [s.name for s in specs]
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
"""Generator for Architecture diagrams from plan.md files."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
from ..models.diagram_models import DiagramType, GeneratedDiagram
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ComponentInfo:
|
|
12
|
+
"""Information about an architecture component.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
name: Component name
|
|
16
|
+
layer: Layer it belongs to (CLI, Service, Model, etc.)
|
|
17
|
+
description: Component description
|
|
18
|
+
dependencies: Other components it depends on
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
name: str
|
|
22
|
+
layer: str = ""
|
|
23
|
+
description: str = ""
|
|
24
|
+
dependencies: list[str] = field(default_factory=list)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ArchitectureGenerator:
|
|
28
|
+
"""Generates Mermaid architecture diagrams from plan.md content.
|
|
29
|
+
|
|
30
|
+
Parses Technical Context and Project Structure sections to create
|
|
31
|
+
a flowchart showing system layers and component relationships.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
# Pattern for project structure code blocks
|
|
35
|
+
STRUCTURE_PATTERN = re.compile(
|
|
36
|
+
r"```(?:text)?\s*\n((?:src/|tests/).*?)```", re.DOTALL
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Pattern for extracting file paths from structure
|
|
40
|
+
FILE_PATTERN = re.compile(r"^[\s│├└─]*([a-zA-Z_][a-zA-Z0-9_/]*\.py)", re.MULTILINE)
|
|
41
|
+
|
|
42
|
+
# Layer classification patterns
|
|
43
|
+
LAYER_PATTERNS = [
|
|
44
|
+
(re.compile(r"cli/|command", re.IGNORECASE), "CLI Layer"),
|
|
45
|
+
(re.compile(r"services?/", re.IGNORECASE), "Service Layer"),
|
|
46
|
+
(re.compile(r"models?/", re.IGNORECASE), "Data Layer"),
|
|
47
|
+
(re.compile(r"tests?/unit", re.IGNORECASE), "Unit Tests"),
|
|
48
|
+
(re.compile(r"tests?/integration", re.IGNORECASE), "Integration Tests"),
|
|
49
|
+
(re.compile(r"formatters?/", re.IGNORECASE), "Formatters"),
|
|
50
|
+
(re.compile(r"prompts?/", re.IGNORECASE), "Prompts"),
|
|
51
|
+
(re.compile(r"rules?/", re.IGNORECASE), "Rules"),
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
def __init__(self, direction: str = "TB"):
|
|
55
|
+
"""Initialize generator.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
direction: Flowchart direction (TB, LR, etc.)
|
|
59
|
+
"""
|
|
60
|
+
self.direction = direction
|
|
61
|
+
|
|
62
|
+
def generate(self, content: str) -> str:
|
|
63
|
+
"""Generate Mermaid architecture diagram from plan.md content.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
content: Full content of plan.md file
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Mermaid flowchart syntax
|
|
70
|
+
"""
|
|
71
|
+
components = self._extract_components(content)
|
|
72
|
+
if not components:
|
|
73
|
+
return ""
|
|
74
|
+
|
|
75
|
+
# Group components by layer
|
|
76
|
+
layers: dict[str, list[ComponentInfo]] = {}
|
|
77
|
+
for comp in components:
|
|
78
|
+
layer = comp.layer or "Other"
|
|
79
|
+
if layer not in layers:
|
|
80
|
+
layers[layer] = []
|
|
81
|
+
layers[layer].append(comp)
|
|
82
|
+
|
|
83
|
+
# Generate diagram
|
|
84
|
+
lines = [f"flowchart {self.direction}"]
|
|
85
|
+
|
|
86
|
+
# Add subgraphs for each layer
|
|
87
|
+
layer_order = [
|
|
88
|
+
"CLI Layer",
|
|
89
|
+
"Service Layer",
|
|
90
|
+
"Data Layer",
|
|
91
|
+
"Formatters",
|
|
92
|
+
"Rules",
|
|
93
|
+
"Prompts",
|
|
94
|
+
"Unit Tests",
|
|
95
|
+
"Integration Tests",
|
|
96
|
+
"Other",
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
for layer_name in layer_order:
|
|
100
|
+
if layer_name not in layers:
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
layer_id = self._to_id(layer_name)
|
|
104
|
+
lines.append(f' subgraph {layer_id}["{layer_name}"]')
|
|
105
|
+
|
|
106
|
+
for comp in layers[layer_name]:
|
|
107
|
+
comp_id = self._to_id(comp.name)
|
|
108
|
+
lines.append(f' {comp_id}["{comp.name}"]')
|
|
109
|
+
|
|
110
|
+
lines.append(" end")
|
|
111
|
+
lines.append("")
|
|
112
|
+
|
|
113
|
+
# Add connections based on common patterns
|
|
114
|
+
connections = self._infer_connections(components)
|
|
115
|
+
if connections:
|
|
116
|
+
lines.append(" %% Component connections")
|
|
117
|
+
for source, target in connections:
|
|
118
|
+
source_id = self._to_id(source)
|
|
119
|
+
target_id = self._to_id(target)
|
|
120
|
+
lines.append(f" {source_id} --> {target_id}")
|
|
121
|
+
|
|
122
|
+
return "\n".join(lines)
|
|
123
|
+
|
|
124
|
+
def generate_diagram(self, content: str) -> GeneratedDiagram:
|
|
125
|
+
"""Generate a GeneratedDiagram object from plan content.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
content: Plan.md content
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
GeneratedDiagram with content and metadata
|
|
132
|
+
"""
|
|
133
|
+
mermaid_content = self.generate(content)
|
|
134
|
+
|
|
135
|
+
# Count components
|
|
136
|
+
components = self._extract_components(content)
|
|
137
|
+
|
|
138
|
+
return GeneratedDiagram(
|
|
139
|
+
id="architecture",
|
|
140
|
+
diagram_type=DiagramType.ARCHITECTURE,
|
|
141
|
+
mermaid_content=mermaid_content,
|
|
142
|
+
is_valid=bool(mermaid_content),
|
|
143
|
+
node_count=len(components),
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def _extract_components(self, content: str) -> list[ComponentInfo]:
|
|
147
|
+
"""Extract component information from plan content.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
content: Plan content
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
List of ComponentInfo objects
|
|
154
|
+
"""
|
|
155
|
+
components = []
|
|
156
|
+
seen_names: set[str] = set()
|
|
157
|
+
|
|
158
|
+
# Find project structure blocks
|
|
159
|
+
for match in self.STRUCTURE_PATTERN.finditer(content):
|
|
160
|
+
structure_text = match.group(1)
|
|
161
|
+
|
|
162
|
+
# Extract file paths
|
|
163
|
+
for file_match in self.FILE_PATTERN.finditer(structure_text):
|
|
164
|
+
file_path = file_match.group(1)
|
|
165
|
+
|
|
166
|
+
# Get component name from filename
|
|
167
|
+
name = self._path_to_component_name(file_path)
|
|
168
|
+
if name in seen_names or name == "__init__":
|
|
169
|
+
continue
|
|
170
|
+
seen_names.add(name)
|
|
171
|
+
|
|
172
|
+
# Determine layer
|
|
173
|
+
layer = self._classify_layer(file_path)
|
|
174
|
+
|
|
175
|
+
components.append(
|
|
176
|
+
ComponentInfo(name=name, layer=layer, description="")
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
return components
|
|
180
|
+
|
|
181
|
+
def _path_to_component_name(self, path: str) -> str:
|
|
182
|
+
"""Convert file path to component name.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
path: File path (e.g., "src/doit_cli/services/diagram_service.py")
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Component name (e.g., "DiagramService")
|
|
189
|
+
"""
|
|
190
|
+
# Get filename without extension
|
|
191
|
+
filename = path.split("/")[-1].replace(".py", "")
|
|
192
|
+
|
|
193
|
+
# Convert snake_case to PascalCase
|
|
194
|
+
parts = filename.split("_")
|
|
195
|
+
return "".join(part.capitalize() for part in parts)
|
|
196
|
+
|
|
197
|
+
def _classify_layer(self, path: str) -> str:
|
|
198
|
+
"""Classify which layer a file belongs to.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
path: File path
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Layer name
|
|
205
|
+
"""
|
|
206
|
+
for pattern, layer_name in self.LAYER_PATTERNS:
|
|
207
|
+
if pattern.search(path):
|
|
208
|
+
return layer_name
|
|
209
|
+
return "Other"
|
|
210
|
+
|
|
211
|
+
def _to_id(self, name: str) -> str:
|
|
212
|
+
"""Convert name to valid Mermaid node ID.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
name: Component or layer name
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Valid node ID
|
|
219
|
+
"""
|
|
220
|
+
# Remove spaces and special characters
|
|
221
|
+
clean = re.sub(r"[^A-Za-z0-9]", "", name)
|
|
222
|
+
return clean
|
|
223
|
+
|
|
224
|
+
def _infer_connections(
|
|
225
|
+
self, components: list[ComponentInfo]
|
|
226
|
+
) -> list[tuple[str, str]]:
|
|
227
|
+
"""Infer connections between components based on naming patterns.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
components: List of components
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
List of (source, target) connection tuples
|
|
234
|
+
"""
|
|
235
|
+
connections = []
|
|
236
|
+
comp_names = {c.name for c in components}
|
|
237
|
+
|
|
238
|
+
# Common connection patterns
|
|
239
|
+
patterns = [
|
|
240
|
+
# Command -> Service
|
|
241
|
+
(r"(.+)Command$", r"\1Service"),
|
|
242
|
+
# Service -> Parser
|
|
243
|
+
(r"(.+)Service$", r"\1Parser"),
|
|
244
|
+
# Generator -> Models
|
|
245
|
+
(r"(.+)Generator$", "DiagramModels"),
|
|
246
|
+
# Parser -> Models
|
|
247
|
+
(r"(.+)Parser$", "DiagramModels"),
|
|
248
|
+
# Service -> Generator
|
|
249
|
+
(r"(.+)Service$", r"\1Generator"),
|
|
250
|
+
# Service -> Validator
|
|
251
|
+
(r"(.+)Service$", r"\1Validator"),
|
|
252
|
+
]
|
|
253
|
+
|
|
254
|
+
for comp in components:
|
|
255
|
+
for src_pattern, tgt_pattern in patterns:
|
|
256
|
+
match = re.match(src_pattern, comp.name)
|
|
257
|
+
if match:
|
|
258
|
+
# Try to find matching target
|
|
259
|
+
if r"\1" in tgt_pattern:
|
|
260
|
+
target = re.sub(src_pattern, tgt_pattern, comp.name)
|
|
261
|
+
else:
|
|
262
|
+
target = tgt_pattern
|
|
263
|
+
|
|
264
|
+
if target in comp_names and target != comp.name:
|
|
265
|
+
connections.append((comp.name, target))
|
|
266
|
+
|
|
267
|
+
# Deduplicate
|
|
268
|
+
return list(set(connections))
|
|
269
|
+
|
|
270
|
+
def generate_from_sections(
|
|
271
|
+
self,
|
|
272
|
+
tech_context: Optional[str] = None,
|
|
273
|
+
project_structure: Optional[str] = None,
|
|
274
|
+
) -> str:
|
|
275
|
+
"""Generate architecture diagram from specific sections.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
tech_context: Technical Context section content
|
|
279
|
+
project_structure: Project Structure section content
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Mermaid diagram syntax
|
|
283
|
+
"""
|
|
284
|
+
combined = ""
|
|
285
|
+
if tech_context:
|
|
286
|
+
combined += tech_context + "\n"
|
|
287
|
+
if project_structure:
|
|
288
|
+
combined += project_structure
|
|
289
|
+
|
|
290
|
+
return self.generate(combined)
|