doit-toolkit-cli 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. doit_cli/__init__.py +1356 -0
  2. doit_cli/cli/__init__.py +26 -0
  3. doit_cli/cli/analytics_command.py +616 -0
  4. doit_cli/cli/context_command.py +213 -0
  5. doit_cli/cli/diagram_command.py +304 -0
  6. doit_cli/cli/fixit_command.py +641 -0
  7. doit_cli/cli/hooks_command.py +211 -0
  8. doit_cli/cli/init_command.py +613 -0
  9. doit_cli/cli/memory_command.py +293 -0
  10. doit_cli/cli/status_command.py +117 -0
  11. doit_cli/cli/sync_prompts_command.py +248 -0
  12. doit_cli/cli/validate_command.py +196 -0
  13. doit_cli/cli/verify_command.py +204 -0
  14. doit_cli/cli/workflow_mixin.py +224 -0
  15. doit_cli/cli/xref_command.py +555 -0
  16. doit_cli/formatters/__init__.py +8 -0
  17. doit_cli/formatters/base.py +38 -0
  18. doit_cli/formatters/json_formatter.py +126 -0
  19. doit_cli/formatters/markdown_formatter.py +97 -0
  20. doit_cli/formatters/rich_formatter.py +257 -0
  21. doit_cli/main.py +49 -0
  22. doit_cli/models/__init__.py +139 -0
  23. doit_cli/models/agent.py +74 -0
  24. doit_cli/models/analytics_models.py +384 -0
  25. doit_cli/models/context_config.py +464 -0
  26. doit_cli/models/crossref_models.py +182 -0
  27. doit_cli/models/diagram_models.py +363 -0
  28. doit_cli/models/fixit_models.py +355 -0
  29. doit_cli/models/hook_config.py +125 -0
  30. doit_cli/models/project.py +91 -0
  31. doit_cli/models/results.py +121 -0
  32. doit_cli/models/search_models.py +228 -0
  33. doit_cli/models/status_models.py +195 -0
  34. doit_cli/models/sync_models.py +146 -0
  35. doit_cli/models/template.py +77 -0
  36. doit_cli/models/validation_models.py +175 -0
  37. doit_cli/models/workflow_models.py +319 -0
  38. doit_cli/prompts/__init__.py +5 -0
  39. doit_cli/prompts/fixit_prompts.py +344 -0
  40. doit_cli/prompts/interactive.py +390 -0
  41. doit_cli/rules/__init__.py +5 -0
  42. doit_cli/rules/builtin_rules.py +160 -0
  43. doit_cli/services/__init__.py +79 -0
  44. doit_cli/services/agent_detector.py +168 -0
  45. doit_cli/services/analytics_service.py +218 -0
  46. doit_cli/services/architecture_generator.py +290 -0
  47. doit_cli/services/backup_service.py +204 -0
  48. doit_cli/services/config_loader.py +113 -0
  49. doit_cli/services/context_loader.py +1121 -0
  50. doit_cli/services/coverage_calculator.py +142 -0
  51. doit_cli/services/crossref_service.py +237 -0
  52. doit_cli/services/cycle_time_calculator.py +134 -0
  53. doit_cli/services/date_inferrer.py +349 -0
  54. doit_cli/services/diagram_service.py +337 -0
  55. doit_cli/services/drift_detector.py +109 -0
  56. doit_cli/services/entity_parser.py +301 -0
  57. doit_cli/services/er_diagram_generator.py +197 -0
  58. doit_cli/services/fixit_service.py +699 -0
  59. doit_cli/services/github_service.py +192 -0
  60. doit_cli/services/hook_manager.py +258 -0
  61. doit_cli/services/hook_validator.py +528 -0
  62. doit_cli/services/input_validator.py +322 -0
  63. doit_cli/services/memory_search.py +527 -0
  64. doit_cli/services/mermaid_validator.py +334 -0
  65. doit_cli/services/prompt_transformer.py +91 -0
  66. doit_cli/services/prompt_writer.py +133 -0
  67. doit_cli/services/query_interpreter.py +428 -0
  68. doit_cli/services/report_exporter.py +219 -0
  69. doit_cli/services/report_generator.py +256 -0
  70. doit_cli/services/requirement_parser.py +112 -0
  71. doit_cli/services/roadmap_summarizer.py +209 -0
  72. doit_cli/services/rule_engine.py +443 -0
  73. doit_cli/services/scaffolder.py +215 -0
  74. doit_cli/services/score_calculator.py +172 -0
  75. doit_cli/services/section_parser.py +204 -0
  76. doit_cli/services/spec_scanner.py +327 -0
  77. doit_cli/services/state_manager.py +355 -0
  78. doit_cli/services/status_reporter.py +143 -0
  79. doit_cli/services/task_parser.py +347 -0
  80. doit_cli/services/template_manager.py +710 -0
  81. doit_cli/services/template_reader.py +158 -0
  82. doit_cli/services/user_journey_generator.py +214 -0
  83. doit_cli/services/user_story_parser.py +232 -0
  84. doit_cli/services/validation_service.py +188 -0
  85. doit_cli/services/validator.py +232 -0
  86. doit_cli/services/velocity_tracker.py +173 -0
  87. doit_cli/services/workflow_engine.py +405 -0
  88. doit_cli/templates/agent-file-template.md +28 -0
  89. doit_cli/templates/checklist-template.md +39 -0
  90. doit_cli/templates/commands/doit.checkin.md +363 -0
  91. doit_cli/templates/commands/doit.constitution.md +187 -0
  92. doit_cli/templates/commands/doit.documentit.md +485 -0
  93. doit_cli/templates/commands/doit.fixit.md +181 -0
  94. doit_cli/templates/commands/doit.implementit.md +265 -0
  95. doit_cli/templates/commands/doit.planit.md +262 -0
  96. doit_cli/templates/commands/doit.reviewit.md +355 -0
  97. doit_cli/templates/commands/doit.roadmapit.md +368 -0
  98. doit_cli/templates/commands/doit.scaffoldit.md +458 -0
  99. doit_cli/templates/commands/doit.specit.md +521 -0
  100. doit_cli/templates/commands/doit.taskit.md +304 -0
  101. doit_cli/templates/commands/doit.testit.md +277 -0
  102. doit_cli/templates/config/context.yaml +134 -0
  103. doit_cli/templates/config/hooks.yaml +93 -0
  104. doit_cli/templates/config/validation-rules.yaml +64 -0
  105. doit_cli/templates/github-issue-templates/epic.yml +78 -0
  106. doit_cli/templates/github-issue-templates/feature.yml +116 -0
  107. doit_cli/templates/github-issue-templates/task.yml +129 -0
  108. doit_cli/templates/hooks/.gitkeep +0 -0
  109. doit_cli/templates/hooks/post-commit.sh +25 -0
  110. doit_cli/templates/hooks/post-merge.sh +75 -0
  111. doit_cli/templates/hooks/pre-commit.sh +17 -0
  112. doit_cli/templates/hooks/pre-push.sh +18 -0
  113. doit_cli/templates/memory/completed_roadmap.md +50 -0
  114. doit_cli/templates/memory/constitution.md +125 -0
  115. doit_cli/templates/memory/roadmap.md +61 -0
  116. doit_cli/templates/plan-template.md +146 -0
  117. doit_cli/templates/scripts/bash/check-prerequisites.sh +166 -0
  118. doit_cli/templates/scripts/bash/common.sh +156 -0
  119. doit_cli/templates/scripts/bash/create-new-feature.sh +297 -0
  120. doit_cli/templates/scripts/bash/setup-plan.sh +61 -0
  121. doit_cli/templates/scripts/bash/update-agent-context.sh +675 -0
  122. doit_cli/templates/scripts/powershell/check-prerequisites.ps1 +148 -0
  123. doit_cli/templates/scripts/powershell/common.ps1 +137 -0
  124. doit_cli/templates/scripts/powershell/create-new-feature.ps1 +283 -0
  125. doit_cli/templates/scripts/powershell/setup-plan.ps1 +61 -0
  126. doit_cli/templates/scripts/powershell/update-agent-context.ps1 +406 -0
  127. doit_cli/templates/spec-template.md +159 -0
  128. doit_cli/templates/tasks-template.md +313 -0
  129. doit_cli/templates/vscode-settings.json +14 -0
  130. doit_toolkit_cli-0.1.9.dist-info/METADATA +324 -0
  131. doit_toolkit_cli-0.1.9.dist-info/RECORD +134 -0
  132. doit_toolkit_cli-0.1.9.dist-info/WHEEL +4 -0
  133. doit_toolkit_cli-0.1.9.dist-info/entry_points.txt +2 -0
  134. doit_toolkit_cli-0.1.9.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,188 @@
1
+ """Validation service for spec file validation."""
2
+
3
+ from datetime import datetime
4
+ from pathlib import Path
5
+ from typing import Optional
6
+
7
+ from ..models.validation_models import ValidationConfig, ValidationResult
8
+ from .config_loader import load_validation_config
9
+ from .rule_engine import RuleEngine
10
+ from .score_calculator import ScoreCalculator
11
+
12
+
13
+ class ValidationService:
14
+ """Orchestrates spec file validation.
15
+
16
+ Coordinates rule loading, spec parsing, and result aggregation.
17
+ """
18
+
19
+ # Default specs directory name
20
+ SPECS_DIR = "specs"
21
+
22
+ def __init__(
23
+ self,
24
+ project_root: Optional[Path] = None,
25
+ config: Optional[ValidationConfig] = None,
26
+ ) -> None:
27
+ """Initialize validation service.
28
+
29
+ Args:
30
+ project_root: Root directory for spec discovery. Defaults to cwd.
31
+ config: Validation configuration. Uses defaults if None.
32
+ If None, attempts to load from .doit/validation-rules.yaml.
33
+ """
34
+ self.project_root = project_root or Path.cwd()
35
+ self.config = config or load_validation_config(self.project_root)
36
+ self.rule_engine = RuleEngine(config=self.config)
37
+ self.score_calculator = ScoreCalculator()
38
+
39
+ def validate_file(self, spec_path: Path) -> ValidationResult:
40
+ """Validate a single spec file.
41
+
42
+ Args:
43
+ spec_path: Path to the spec file to validate.
44
+
45
+ Returns:
46
+ ValidationResult with all issues found.
47
+
48
+ Raises:
49
+ FileNotFoundError: If spec_path doesn't exist.
50
+ ValueError: If file is not a valid markdown file.
51
+ """
52
+ # Ensure path is absolute
53
+ if not spec_path.is_absolute():
54
+ spec_path = self.project_root / spec_path
55
+
56
+ if not spec_path.exists():
57
+ raise FileNotFoundError(f"Spec file not found: {spec_path}")
58
+
59
+ if not spec_path.suffix.lower() == ".md":
60
+ raise ValueError(f"Not a markdown file: {spec_path}")
61
+
62
+ # Read file content
63
+ try:
64
+ content = spec_path.read_text(encoding="utf-8")
65
+ except (OSError, UnicodeDecodeError) as e:
66
+ raise ValueError(f"Could not read file: {e}") from e
67
+
68
+ # Handle empty files
69
+ if not content.strip():
70
+ result = ValidationResult(
71
+ spec_path=str(spec_path),
72
+ validated_at=datetime.now(),
73
+ )
74
+ from ..models.validation_models import Severity, ValidationIssue
75
+
76
+ result.add_issue(
77
+ ValidationIssue(
78
+ rule_id="empty-file",
79
+ severity=Severity.ERROR,
80
+ line_number=0,
81
+ message="Spec file is empty",
82
+ suggestion="Add content following the spec template structure",
83
+ )
84
+ )
85
+ result.quality_score = 0
86
+ return result
87
+
88
+ # Evaluate rules
89
+ issues = self.rule_engine.evaluate(content, spec_path)
90
+
91
+ # Calculate score
92
+ score = self.score_calculator.calculate(issues)
93
+
94
+ # Create result
95
+ result = ValidationResult(
96
+ spec_path=str(spec_path),
97
+ issues=issues,
98
+ quality_score=score,
99
+ validated_at=datetime.now(),
100
+ )
101
+
102
+ return result
103
+
104
+ def validate_directory(self, specs_dir: Path) -> list[ValidationResult]:
105
+ """Validate all spec files in a directory.
106
+
107
+ Args:
108
+ specs_dir: Directory containing spec files.
109
+
110
+ Returns:
111
+ List of ValidationResult, one per spec file found.
112
+ """
113
+ if not specs_dir.is_absolute():
114
+ specs_dir = self.project_root / specs_dir
115
+
116
+ if not specs_dir.exists():
117
+ return []
118
+
119
+ results: list[ValidationResult] = []
120
+
121
+ # Find all spec.md files in subdirectories
122
+ for spec_file in sorted(specs_dir.rglob("spec.md")):
123
+ try:
124
+ result = self.validate_file(spec_file)
125
+ results.append(result)
126
+ except (FileNotFoundError, ValueError) as e:
127
+ # Create error result for unreadable files
128
+ from ..models.validation_models import Severity, ValidationIssue
129
+
130
+ result = ValidationResult(
131
+ spec_path=str(spec_file),
132
+ validated_at=datetime.now(),
133
+ )
134
+ result.add_issue(
135
+ ValidationIssue(
136
+ rule_id="file-error",
137
+ severity=Severity.ERROR,
138
+ line_number=0,
139
+ message=str(e),
140
+ suggestion="Check file permissions and encoding",
141
+ )
142
+ )
143
+ result.quality_score = 0
144
+ results.append(result)
145
+
146
+ return results
147
+
148
+ def validate_all(self) -> list[ValidationResult]:
149
+ """Validate all specs in project's specs/ directory.
150
+
151
+ Returns:
152
+ List of ValidationResult for all specs found.
153
+ """
154
+ specs_dir = self.project_root / self.SPECS_DIR
155
+ return self.validate_directory(specs_dir)
156
+
157
+ def get_summary(self, results: list[ValidationResult]) -> dict:
158
+ """Generate summary statistics for multiple results.
159
+
160
+ Args:
161
+ results: List of validation results.
162
+
163
+ Returns:
164
+ Dict with total_specs, passed, warned, failed, avg_score.
165
+ """
166
+ if not results:
167
+ return {
168
+ "total_specs": 0,
169
+ "passed": 0,
170
+ "warned": 0,
171
+ "failed": 0,
172
+ "average_score": 0,
173
+ }
174
+
175
+ from ..models.validation_models import ValidationStatus
176
+
177
+ passed = sum(1 for r in results if r.status == ValidationStatus.PASS)
178
+ warned = sum(1 for r in results if r.status == ValidationStatus.WARN)
179
+ failed = sum(1 for r in results if r.status == ValidationStatus.FAIL)
180
+ avg_score = sum(r.quality_score for r in results) // len(results)
181
+
182
+ return {
183
+ "total_specs": len(results),
184
+ "passed": passed,
185
+ "warned": warned,
186
+ "failed": failed,
187
+ "average_score": avg_score,
188
+ }
@@ -0,0 +1,232 @@
1
+ """Validator service for checking project setup."""
2
+
3
+ from pathlib import Path
4
+ from typing import Optional
5
+
6
+ from ..models.agent import Agent
7
+ from ..models.project import Project
8
+ from ..models.results import VerifyResult, VerifyCheck, VerifyStatus
9
+ from ..models.template import DOIT_COMMANDS
10
+
11
+
12
+ class Validator:
13
+ """Service for validating doit project setup."""
14
+
15
+ def __init__(self, project: Project):
16
+ """Initialize validator.
17
+
18
+ Args:
19
+ project: Project to validate
20
+ """
21
+ self.project = project
22
+ self.checks: list[VerifyCheck] = []
23
+
24
+ def check_doit_folder(self) -> VerifyCheck:
25
+ """Check if .doit/ folder exists and has required subdirectories."""
26
+ if not self.project.doit_folder.exists():
27
+ return VerifyCheck(
28
+ name="doit_folder",
29
+ status=VerifyStatus.FAIL,
30
+ message=".doit/ folder does not exist",
31
+ suggestion="Run 'doit init' to create the project structure",
32
+ )
33
+
34
+ # Check subdirectories
35
+ missing_subdirs = []
36
+ for subdir in ["memory", "templates", "scripts"]:
37
+ if not (self.project.doit_folder / subdir).exists():
38
+ missing_subdirs.append(subdir)
39
+
40
+ if missing_subdirs:
41
+ return VerifyCheck(
42
+ name="doit_folder",
43
+ status=VerifyStatus.WARN,
44
+ message=f".doit/ exists but missing subdirectories: {', '.join(missing_subdirs)}",
45
+ suggestion="Run 'doit init --update' to restore missing directories",
46
+ )
47
+
48
+ return VerifyCheck(
49
+ name="doit_folder",
50
+ status=VerifyStatus.PASS,
51
+ message=".doit/ folder structure is complete",
52
+ )
53
+
54
+ def check_agent_directory(self, agent: Agent) -> VerifyCheck:
55
+ """Check if agent command directory exists.
56
+
57
+ Args:
58
+ agent: Agent to check
59
+ """
60
+ cmd_dir = self.project.command_directory(agent)
61
+
62
+ if not cmd_dir.exists():
63
+ return VerifyCheck(
64
+ name=f"{agent.value}_directory",
65
+ status=VerifyStatus.FAIL,
66
+ message=f"{agent.display_name} command directory does not exist",
67
+ suggestion=f"Run 'doit init --agent {agent.value}' to create it",
68
+ )
69
+
70
+ return VerifyCheck(
71
+ name=f"{agent.value}_directory",
72
+ status=VerifyStatus.PASS,
73
+ message=f"{agent.display_name} command directory exists at {agent.command_directory}",
74
+ )
75
+
76
+ def check_command_files(self, agent: Agent) -> VerifyCheck:
77
+ """Check if all required command files exist for an agent.
78
+
79
+ Args:
80
+ agent: Agent to check
81
+ """
82
+ cmd_dir = self.project.command_directory(agent)
83
+
84
+ if not cmd_dir.exists():
85
+ return VerifyCheck(
86
+ name=f"{agent.value}_commands",
87
+ status=VerifyStatus.FAIL,
88
+ message=f"Cannot check commands - directory does not exist",
89
+ )
90
+
91
+ # Get expected file names
92
+ if agent == Agent.CLAUDE:
93
+ expected_files = {f"doit.{cmd}.md" for cmd in DOIT_COMMANDS}
94
+ else: # COPILOT
95
+ expected_files = {f"doit.{cmd}.prompt.md" for cmd in DOIT_COMMANDS}
96
+
97
+ # Get actual files
98
+ actual_files = {f.name for f in cmd_dir.iterdir() if f.is_file()}
99
+
100
+ # Check for missing
101
+ missing = expected_files - actual_files
102
+
103
+ if missing:
104
+ return VerifyCheck(
105
+ name=f"{agent.value}_commands",
106
+ status=VerifyStatus.WARN,
107
+ message=f"Missing {len(missing)} command files: {', '.join(sorted(missing)[:3])}{'...' if len(missing) > 3 else ''}",
108
+ suggestion="Run 'doit init --update' to restore missing templates",
109
+ )
110
+
111
+ return VerifyCheck(
112
+ name=f"{agent.value}_commands",
113
+ status=VerifyStatus.PASS,
114
+ message=f"All {len(DOIT_COMMANDS)} command files present",
115
+ )
116
+
117
+ def check_constitution(self) -> VerifyCheck:
118
+ """Check if constitution.md exists in memory folder."""
119
+ constitution_path = self.project.memory_folder / "constitution.md"
120
+
121
+ if not constitution_path.exists():
122
+ return VerifyCheck(
123
+ name="constitution",
124
+ status=VerifyStatus.WARN,
125
+ message="Project constitution not found",
126
+ suggestion="Run '/doit.constitution' to create project principles",
127
+ )
128
+
129
+ return VerifyCheck(
130
+ name="constitution",
131
+ status=VerifyStatus.PASS,
132
+ message="Project constitution exists",
133
+ )
134
+
135
+ def check_roadmap(self) -> VerifyCheck:
136
+ """Check if roadmap.md exists in memory folder."""
137
+ roadmap_path = self.project.memory_folder / "roadmap.md"
138
+
139
+ if not roadmap_path.exists():
140
+ return VerifyCheck(
141
+ name="roadmap",
142
+ status=VerifyStatus.WARN,
143
+ message="Project roadmap not found",
144
+ suggestion="Run '/doit.roadmapit' to create a feature roadmap",
145
+ )
146
+
147
+ return VerifyCheck(
148
+ name="roadmap",
149
+ status=VerifyStatus.PASS,
150
+ message="Project roadmap exists",
151
+ )
152
+
153
+ def check_copilot_instructions(self) -> VerifyCheck:
154
+ """Check if copilot-instructions.md exists for Copilot projects."""
155
+ instructions_path = self.project.path / ".github" / "copilot-instructions.md"
156
+ prompts_dir = self.project.path / ".github" / "prompts"
157
+
158
+ # Only check if Copilot appears to be configured
159
+ if not prompts_dir.exists():
160
+ return VerifyCheck(
161
+ name="copilot_instructions",
162
+ status=VerifyStatus.PASS,
163
+ message="Copilot not configured (skipped)",
164
+ )
165
+
166
+ if not instructions_path.exists():
167
+ return VerifyCheck(
168
+ name="copilot_instructions",
169
+ status=VerifyStatus.WARN,
170
+ message="copilot-instructions.md not found",
171
+ suggestion="Run 'doit init --agent copilot' to create it",
172
+ )
173
+
174
+ # Check if it has doit section
175
+ content = instructions_path.read_text(encoding="utf-8")
176
+ if "DOIT INSTRUCTIONS" not in content:
177
+ return VerifyCheck(
178
+ name="copilot_instructions",
179
+ status=VerifyStatus.WARN,
180
+ message="copilot-instructions.md missing doit section",
181
+ suggestion="Run 'doit init --agent copilot --update' to add doit instructions",
182
+ )
183
+
184
+ return VerifyCheck(
185
+ name="copilot_instructions",
186
+ status=VerifyStatus.PASS,
187
+ message="copilot-instructions.md configured correctly",
188
+ )
189
+
190
+ def run_all_checks(self, agents: Optional[list[Agent]] = None) -> VerifyResult:
191
+ """Run all validation checks.
192
+
193
+ Args:
194
+ agents: List of agents to check (None = auto-detect)
195
+
196
+ Returns:
197
+ VerifyResult with all check results
198
+ """
199
+ self.checks = []
200
+
201
+ # Core structure check
202
+ self.checks.append(self.check_doit_folder())
203
+
204
+ # Auto-detect agents if not specified
205
+ if agents is None:
206
+ agents = []
207
+ if (self.project.path / ".claude").exists():
208
+ agents.append(Agent.CLAUDE)
209
+ if (self.project.path / ".github" / "prompts").exists():
210
+ agents.append(Agent.COPILOT)
211
+
212
+ # Default to Claude if nothing detected
213
+ if not agents:
214
+ agents = [Agent.CLAUDE]
215
+
216
+ # Agent-specific checks
217
+ for agent in agents:
218
+ self.checks.append(self.check_agent_directory(agent))
219
+ self.checks.append(self.check_command_files(agent))
220
+
221
+ # Memory content checks
222
+ self.checks.append(self.check_constitution())
223
+ self.checks.append(self.check_roadmap())
224
+
225
+ # Copilot-specific check
226
+ if Agent.COPILOT in agents:
227
+ self.checks.append(self.check_copilot_instructions())
228
+
229
+ return VerifyResult(
230
+ project=self.project,
231
+ checks=self.checks,
232
+ )
@@ -0,0 +1,173 @@
1
+ """Velocity tracker service for spec analytics.
2
+
3
+ Provides weekly velocity aggregation and trend analysis.
4
+ """
5
+
6
+ from datetime import date, timedelta
7
+ from typing import Optional
8
+
9
+ from ..models.analytics_models import SpecMetadata, VelocityDataPoint
10
+
11
+
12
+ class VelocityTracker:
13
+ """Tracker for spec completion velocity.
14
+
15
+ Aggregates spec completions by ISO week to enable
16
+ trend analysis and velocity metrics.
17
+ """
18
+
19
+ def __init__(self, specs: list[SpecMetadata]):
20
+ """Initialize tracker with spec metadata.
21
+
22
+ Args:
23
+ specs: List of SpecMetadata objects to analyze
24
+ """
25
+ self.specs = specs
26
+ self._weekly_data: Optional[dict[str, VelocityDataPoint]] = None
27
+
28
+ @property
29
+ def weekly_data(self) -> dict[str, VelocityDataPoint]:
30
+ """Get weekly aggregated velocity data.
31
+
32
+ Returns:
33
+ Dictionary mapping week keys to VelocityDataPoint objects
34
+ """
35
+ if self._weekly_data is None:
36
+ self._weekly_data = self._aggregate_by_week()
37
+ return self._weekly_data
38
+
39
+ def _aggregate_by_week(self) -> dict[str, VelocityDataPoint]:
40
+ """Aggregate completions by ISO week.
41
+
42
+ Returns:
43
+ Dictionary of week_key -> VelocityDataPoint
44
+ """
45
+ weekly: dict[str, VelocityDataPoint] = {}
46
+
47
+ for spec in self.specs:
48
+ if spec.completed_at:
49
+ point = VelocityDataPoint.from_completion(
50
+ spec.completed_at, spec.name
51
+ )
52
+ if point.week_key in weekly:
53
+ weekly[point.week_key] = weekly[point.week_key].merge(point)
54
+ else:
55
+ weekly[point.week_key] = point
56
+
57
+ return weekly
58
+
59
+ def aggregate_by_week(self, weeks: int = 8) -> list[VelocityDataPoint]:
60
+ """Get velocity data for the specified number of weeks.
61
+
62
+ Args:
63
+ weeks: Number of weeks to include (default 8)
64
+
65
+ Returns:
66
+ List of VelocityDataPoint sorted by week (most recent first)
67
+ """
68
+ # Get all weekly data
69
+ all_weeks = list(self.weekly_data.values())
70
+
71
+ # Sort by week key (descending)
72
+ sorted_weeks = sorted(all_weeks, key=lambda v: v.week_key, reverse=True)
73
+
74
+ # Limit to requested number of weeks
75
+ return sorted_weeks[:weeks]
76
+
77
+ def get_velocity_trend(
78
+ self,
79
+ weeks: int = 8,
80
+ fill_missing: bool = True,
81
+ ) -> list[VelocityDataPoint]:
82
+ """Get velocity trend with optional gap filling.
83
+
84
+ Args:
85
+ weeks: Number of weeks to analyze
86
+ fill_missing: If True, include weeks with zero completions
87
+
88
+ Returns:
89
+ List of VelocityDataPoint covering the time range
90
+ """
91
+ if not fill_missing:
92
+ return self.aggregate_by_week(weeks)
93
+
94
+ # Generate all weeks in range
95
+ today = date.today()
96
+ current_week_start = today - timedelta(days=today.weekday())
97
+
98
+ result: list[VelocityDataPoint] = []
99
+
100
+ for i in range(weeks):
101
+ week_date = current_week_start - timedelta(weeks=i)
102
+ year, week, _ = week_date.isocalendar()
103
+ week_key = f"{year}-W{week:02d}"
104
+
105
+ if week_key in self.weekly_data:
106
+ result.append(self.weekly_data[week_key])
107
+ else:
108
+ # Create empty data point for missing week
109
+ result.append(
110
+ VelocityDataPoint(
111
+ week_key=week_key,
112
+ week_start=week_date,
113
+ specs_completed=0,
114
+ spec_names=[],
115
+ )
116
+ )
117
+
118
+ return result
119
+
120
+ def calculate_average_velocity(self, weeks: int = 8) -> float:
121
+ """Calculate average specs completed per week.
122
+
123
+ Args:
124
+ weeks: Number of weeks to average over
125
+
126
+ Returns:
127
+ Average completions per week
128
+ """
129
+ data = self.aggregate_by_week(weeks)
130
+ if not data:
131
+ return 0.0
132
+
133
+ total = sum(v.specs_completed for v in data)
134
+ return total / len(data)
135
+
136
+ def get_peak_week(self) -> Optional[VelocityDataPoint]:
137
+ """Get the week with most completions.
138
+
139
+ Returns:
140
+ VelocityDataPoint for peak week, or None if no data
141
+ """
142
+ if not self.weekly_data:
143
+ return None
144
+
145
+ return max(
146
+ self.weekly_data.values(),
147
+ key=lambda v: v.specs_completed,
148
+ )
149
+
150
+ def has_sufficient_data(self, min_weeks: int = 2) -> bool:
151
+ """Check if there's enough data for trend analysis.
152
+
153
+ Args:
154
+ min_weeks: Minimum weeks required
155
+
156
+ Returns:
157
+ True if sufficient data exists
158
+ """
159
+ return len(self.weekly_data) >= min_weeks
160
+
161
+ def to_csv(self, weeks: int = 8) -> str:
162
+ """Export velocity data as CSV string.
163
+
164
+ Args:
165
+ weeks: Number of weeks to include
166
+
167
+ Returns:
168
+ CSV formatted string with header
169
+ """
170
+ lines = ["week,completed"]
171
+ for v in self.aggregate_by_week(weeks):
172
+ lines.append(f"{v.week_key},{v.specs_completed}")
173
+ return "\n".join(lines)