doit-toolkit-cli 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. doit_cli/__init__.py +1356 -0
  2. doit_cli/cli/__init__.py +26 -0
  3. doit_cli/cli/analytics_command.py +616 -0
  4. doit_cli/cli/context_command.py +213 -0
  5. doit_cli/cli/diagram_command.py +304 -0
  6. doit_cli/cli/fixit_command.py +641 -0
  7. doit_cli/cli/hooks_command.py +211 -0
  8. doit_cli/cli/init_command.py +613 -0
  9. doit_cli/cli/memory_command.py +293 -0
  10. doit_cli/cli/status_command.py +117 -0
  11. doit_cli/cli/sync_prompts_command.py +248 -0
  12. doit_cli/cli/validate_command.py +196 -0
  13. doit_cli/cli/verify_command.py +204 -0
  14. doit_cli/cli/workflow_mixin.py +224 -0
  15. doit_cli/cli/xref_command.py +555 -0
  16. doit_cli/formatters/__init__.py +8 -0
  17. doit_cli/formatters/base.py +38 -0
  18. doit_cli/formatters/json_formatter.py +126 -0
  19. doit_cli/formatters/markdown_formatter.py +97 -0
  20. doit_cli/formatters/rich_formatter.py +257 -0
  21. doit_cli/main.py +49 -0
  22. doit_cli/models/__init__.py +139 -0
  23. doit_cli/models/agent.py +74 -0
  24. doit_cli/models/analytics_models.py +384 -0
  25. doit_cli/models/context_config.py +464 -0
  26. doit_cli/models/crossref_models.py +182 -0
  27. doit_cli/models/diagram_models.py +363 -0
  28. doit_cli/models/fixit_models.py +355 -0
  29. doit_cli/models/hook_config.py +125 -0
  30. doit_cli/models/project.py +91 -0
  31. doit_cli/models/results.py +121 -0
  32. doit_cli/models/search_models.py +228 -0
  33. doit_cli/models/status_models.py +195 -0
  34. doit_cli/models/sync_models.py +146 -0
  35. doit_cli/models/template.py +77 -0
  36. doit_cli/models/validation_models.py +175 -0
  37. doit_cli/models/workflow_models.py +319 -0
  38. doit_cli/prompts/__init__.py +5 -0
  39. doit_cli/prompts/fixit_prompts.py +344 -0
  40. doit_cli/prompts/interactive.py +390 -0
  41. doit_cli/rules/__init__.py +5 -0
  42. doit_cli/rules/builtin_rules.py +160 -0
  43. doit_cli/services/__init__.py +79 -0
  44. doit_cli/services/agent_detector.py +168 -0
  45. doit_cli/services/analytics_service.py +218 -0
  46. doit_cli/services/architecture_generator.py +290 -0
  47. doit_cli/services/backup_service.py +204 -0
  48. doit_cli/services/config_loader.py +113 -0
  49. doit_cli/services/context_loader.py +1121 -0
  50. doit_cli/services/coverage_calculator.py +142 -0
  51. doit_cli/services/crossref_service.py +237 -0
  52. doit_cli/services/cycle_time_calculator.py +134 -0
  53. doit_cli/services/date_inferrer.py +349 -0
  54. doit_cli/services/diagram_service.py +337 -0
  55. doit_cli/services/drift_detector.py +109 -0
  56. doit_cli/services/entity_parser.py +301 -0
  57. doit_cli/services/er_diagram_generator.py +197 -0
  58. doit_cli/services/fixit_service.py +699 -0
  59. doit_cli/services/github_service.py +192 -0
  60. doit_cli/services/hook_manager.py +258 -0
  61. doit_cli/services/hook_validator.py +528 -0
  62. doit_cli/services/input_validator.py +322 -0
  63. doit_cli/services/memory_search.py +527 -0
  64. doit_cli/services/mermaid_validator.py +334 -0
  65. doit_cli/services/prompt_transformer.py +91 -0
  66. doit_cli/services/prompt_writer.py +133 -0
  67. doit_cli/services/query_interpreter.py +428 -0
  68. doit_cli/services/report_exporter.py +219 -0
  69. doit_cli/services/report_generator.py +256 -0
  70. doit_cli/services/requirement_parser.py +112 -0
  71. doit_cli/services/roadmap_summarizer.py +209 -0
  72. doit_cli/services/rule_engine.py +443 -0
  73. doit_cli/services/scaffolder.py +215 -0
  74. doit_cli/services/score_calculator.py +172 -0
  75. doit_cli/services/section_parser.py +204 -0
  76. doit_cli/services/spec_scanner.py +327 -0
  77. doit_cli/services/state_manager.py +355 -0
  78. doit_cli/services/status_reporter.py +143 -0
  79. doit_cli/services/task_parser.py +347 -0
  80. doit_cli/services/template_manager.py +710 -0
  81. doit_cli/services/template_reader.py +158 -0
  82. doit_cli/services/user_journey_generator.py +214 -0
  83. doit_cli/services/user_story_parser.py +232 -0
  84. doit_cli/services/validation_service.py +188 -0
  85. doit_cli/services/validator.py +232 -0
  86. doit_cli/services/velocity_tracker.py +173 -0
  87. doit_cli/services/workflow_engine.py +405 -0
  88. doit_cli/templates/agent-file-template.md +28 -0
  89. doit_cli/templates/checklist-template.md +39 -0
  90. doit_cli/templates/commands/doit.checkin.md +363 -0
  91. doit_cli/templates/commands/doit.constitution.md +187 -0
  92. doit_cli/templates/commands/doit.documentit.md +485 -0
  93. doit_cli/templates/commands/doit.fixit.md +181 -0
  94. doit_cli/templates/commands/doit.implementit.md +265 -0
  95. doit_cli/templates/commands/doit.planit.md +262 -0
  96. doit_cli/templates/commands/doit.reviewit.md +355 -0
  97. doit_cli/templates/commands/doit.roadmapit.md +368 -0
  98. doit_cli/templates/commands/doit.scaffoldit.md +458 -0
  99. doit_cli/templates/commands/doit.specit.md +521 -0
  100. doit_cli/templates/commands/doit.taskit.md +304 -0
  101. doit_cli/templates/commands/doit.testit.md +277 -0
  102. doit_cli/templates/config/context.yaml +134 -0
  103. doit_cli/templates/config/hooks.yaml +93 -0
  104. doit_cli/templates/config/validation-rules.yaml +64 -0
  105. doit_cli/templates/github-issue-templates/epic.yml +78 -0
  106. doit_cli/templates/github-issue-templates/feature.yml +116 -0
  107. doit_cli/templates/github-issue-templates/task.yml +129 -0
  108. doit_cli/templates/hooks/.gitkeep +0 -0
  109. doit_cli/templates/hooks/post-commit.sh +25 -0
  110. doit_cli/templates/hooks/post-merge.sh +75 -0
  111. doit_cli/templates/hooks/pre-commit.sh +17 -0
  112. doit_cli/templates/hooks/pre-push.sh +18 -0
  113. doit_cli/templates/memory/completed_roadmap.md +50 -0
  114. doit_cli/templates/memory/constitution.md +125 -0
  115. doit_cli/templates/memory/roadmap.md +61 -0
  116. doit_cli/templates/plan-template.md +146 -0
  117. doit_cli/templates/scripts/bash/check-prerequisites.sh +166 -0
  118. doit_cli/templates/scripts/bash/common.sh +156 -0
  119. doit_cli/templates/scripts/bash/create-new-feature.sh +297 -0
  120. doit_cli/templates/scripts/bash/setup-plan.sh +61 -0
  121. doit_cli/templates/scripts/bash/update-agent-context.sh +675 -0
  122. doit_cli/templates/scripts/powershell/check-prerequisites.ps1 +148 -0
  123. doit_cli/templates/scripts/powershell/common.ps1 +137 -0
  124. doit_cli/templates/scripts/powershell/create-new-feature.ps1 +283 -0
  125. doit_cli/templates/scripts/powershell/setup-plan.ps1 +61 -0
  126. doit_cli/templates/scripts/powershell/update-agent-context.ps1 +406 -0
  127. doit_cli/templates/spec-template.md +159 -0
  128. doit_cli/templates/tasks-template.md +313 -0
  129. doit_cli/templates/vscode-settings.json +14 -0
  130. doit_toolkit_cli-0.1.9.dist-info/METADATA +324 -0
  131. doit_toolkit_cli-0.1.9.dist-info/RECORD +134 -0
  132. doit_toolkit_cli-0.1.9.dist-info/WHEEL +4 -0
  133. doit_toolkit_cli-0.1.9.dist-info/entry_points.txt +2 -0
  134. doit_toolkit_cli-0.1.9.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,304 @@
1
+ ---
2
+ description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
3
+ handoffs:
4
+ - label: Analyze For Consistency
5
+ agent: doit.analyze
6
+ prompt: Run a project analysis for consistency
7
+ send: true
8
+ - label: Implement Project
9
+ agent: doit.implement
10
+ prompt: Start the implementation in phases
11
+ send: true
12
+ ---
13
+
14
+ ## User Input
15
+
16
+ ```text
17
+ $ARGUMENTS
18
+ ```
19
+
20
+ You **MUST** consider the user input before proceeding (if not empty).
21
+
22
+ ## Load Project Context
23
+
24
+ Before proceeding, load the project context to inform your responses:
25
+
26
+ ```bash
27
+ doit context show
28
+ ```
29
+
30
+ **If the command fails or doit is not installed**: Continue without context, but note that alignment with project principles cannot be verified.
31
+
32
+ **Use loaded context to**:
33
+
34
+ - Reference constitution principles when making decisions
35
+ - Consider roadmap priorities
36
+ - Identify connections to related specifications
37
+
38
+ ## Outline
39
+
40
+ 1. **Setup**: Run `.doit/scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
41
+
42
+ 2. **Load design documents**: Read from FEATURE_DIR:
43
+ - **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities)
44
+ - **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
45
+ - Note: Not all projects have all documents. Generate tasks based on what's available.
46
+
47
+ 3. **Execute task generation workflow**:
48
+ - Load plan.md and extract tech stack, libraries, project structure
49
+ - Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
50
+ - If data-model.md exists: Extract entities and map to user stories
51
+ - If contracts/ exists: Map endpoints to user stories
52
+ - If research.md exists: Extract decisions for setup tasks
53
+ - Generate tasks organized by user story (see Task Generation Rules below)
54
+ - Generate dependency graph showing user story completion order
55
+ - Create parallel execution examples per user story
56
+ - Validate task completeness (each user story has all needed tasks, independently testable)
57
+
58
+ 4. **Generate tasks.md**: Use `.doit/templates/tasks-template.md` as structure, fill with:
59
+ - Correct feature name from plan.md
60
+ - Phase 1: Setup tasks (project initialization)
61
+ - Phase 2: Foundational tasks (blocking prerequisites for all user stories)
62
+ - Phase 3+: One phase per user story (in priority order from spec.md)
63
+ - Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
64
+ - Final Phase: Polish & cross-cutting concerns
65
+ - All tasks must follow the strict checklist format (see Task Generation Rules below)
66
+ - Clear file paths for each task
67
+ - Dependencies section showing story completion order
68
+ - Parallel execution examples per story
69
+ - Implementation strategy section (MVP first, incremental delivery)
70
+
71
+ 5. **Generate Mermaid Visualizations** (FR-008, FR-009, FR-010):
72
+
73
+ After generating the task list, create visual diagrams to show execution order and timelines:
74
+
75
+ a. **Task Dependencies Flowchart**:
76
+ - Parse all generated tasks and their dependencies
77
+ - Identify parallel tasks (marked with [P])
78
+ - Group tasks by phase using subgraphs
79
+ - Generate flowchart showing task execution order
80
+ - Use `&` syntax for parallel tasks: `T003 --> T004 & T005`
81
+ - Replace content in `<!-- BEGIN:AUTO-GENERATED section="task-dependencies" -->` markers
82
+
83
+ ```mermaid
84
+ flowchart TD
85
+ subgraph "Phase 1: Setup"
86
+ T001[T001: Project init]
87
+ end
88
+
89
+ subgraph "Phase 2: Foundation"
90
+ T002[T002: Dependencies]
91
+ T003[T003: Core setup]
92
+ end
93
+
94
+ subgraph "Phase 3: US1"
95
+ T004[T004: Model A]
96
+ T005[T005: Model B]
97
+ T006[T006: Service]
98
+ end
99
+
100
+ T001 --> T002 --> T003
101
+ T003 --> T004 & T005
102
+ T004 & T005 --> T006
103
+ ```
104
+
105
+ b. **Phase Timeline Gantt Chart**:
106
+ - Extract phases and their task counts
107
+ - Estimate duration based on task complexity (1 task ≈ 0.5-1 day)
108
+ - Generate gantt chart showing phase timeline
109
+ - Use `after` syntax for dependencies
110
+ - Replace content in `<!-- BEGIN:AUTO-GENERATED section="phase-timeline" -->` markers
111
+
112
+ ```mermaid
113
+ gantt
114
+ title Implementation Phases
115
+ dateFormat YYYY-MM-DD
116
+
117
+ section Phase 1: Setup
118
+ Project initialization :a1, 2024-01-01, 1d
119
+
120
+ section Phase 2: Foundation
121
+ Core infrastructure :b1, after a1, 2d
122
+
123
+ section Phase 3: US1 (P1)
124
+ User Story 1 implementation :c1, after b1, 3d
125
+
126
+ section Phase 4: US2 (P2)
127
+ User Story 2 implementation :d1, after c1, 2d
128
+
129
+ section Final: Polish
130
+ Cross-cutting concerns :e1, after d1, 1d
131
+ ```
132
+
133
+ c. **Parallel Task Detection**:
134
+ - Scan all tasks for [P] markers
135
+ - Group consecutive parallel tasks for diagram optimization
136
+ - In flowchart: Connect parallel tasks with `&` syntax
137
+ - Add legend note: "[P] = Can run in parallel"
138
+
139
+ d. **Diagram Validation**:
140
+ - Verify mermaid syntax is valid
141
+ - Check task count per subgraph (max 15 per phase)
142
+ - If exceeding limits, group smaller tasks into summary nodes
143
+ - Ensure all task IDs in diagram match task list
144
+
145
+ 6. **Report**: Output path to generated tasks.md and summary:
146
+ - Total task count
147
+ - Task count per user story
148
+ - Parallel opportunities identified
149
+ - Independent test criteria for each story
150
+ - Suggested MVP scope (typically just User Story 1)
151
+ - Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
152
+
153
+ 7. **GitHub Issue Integration**:
154
+ - Check for `--skip-issues` in $ARGUMENTS - if present, skip issue creation
155
+ - Detect GitHub remote: `git remote get-url origin`
156
+ - If GitHub remote found and not skipped:
157
+ - For each generated task, create a GitHub Task issue using the task.yml template
158
+ - Find parent Feature issues by searching for feature labels matching current spec
159
+ - Link Task issues to parent Feature using "Part of Feature #XXX" in body
160
+ - Add phase label (e.g., "Phase 3 - Core Implementation")
161
+ - Add effort estimate if extractable from task description
162
+ - If GitHub unavailable or API fails: Log warning and continue without issues
163
+ - Report: Number of issues created, any linking errors
164
+
165
+ Context for task generation: $ARGUMENTS
166
+
167
+ The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
168
+
169
+ ## Task Generation Rules
170
+
171
+ **CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
172
+
173
+ **Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
174
+
175
+ ### Checklist Format (REQUIRED)
176
+
177
+ Every task MUST strictly follow this format:
178
+
179
+ ```text
180
+ - [ ] [TaskID] [P?] [Story?] Description with file path
181
+ ```
182
+
183
+ **Format Components**:
184
+
185
+ 1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
186
+ 2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
187
+ 3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
188
+ 4. **[Story] label**: REQUIRED for user story phase tasks only
189
+ - Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
190
+ - Setup phase: NO story label
191
+ - Foundational phase: NO story label
192
+ - User Story phases: MUST have story label
193
+ - Polish phase: NO story label
194
+ 5. **Description**: Clear action with exact file path
195
+
196
+ **Examples**:
197
+
198
+ - ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
199
+ - ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
200
+ - ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
201
+ - ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
202
+ - ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
203
+ - ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
204
+ - ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
205
+ - ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
206
+
207
+ ### Task Organization
208
+
209
+ 1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
210
+ - Each user story (P1, P2, P3...) gets its own phase
211
+ - Map all related components to their story:
212
+ - Models needed for that story
213
+ - Services needed for that story
214
+ - Endpoints/UI needed for that story
215
+ - If tests requested: Tests specific to that story
216
+ - Mark story dependencies (most stories should be independent)
217
+
218
+ 2. **From Contracts**:
219
+ - Map each contract/endpoint → to the user story it serves
220
+ - If tests requested: Each contract → contract test task [P] before implementation in that story's phase
221
+
222
+ 3. **From Data Model**:
223
+ - Map each entity to the user story(ies) that need it
224
+ - If entity serves multiple stories: Put in earliest story or Setup phase
225
+ - Relationships → service layer tasks in appropriate story phase
226
+
227
+ 4. **From Setup/Infrastructure**:
228
+ - Shared infrastructure → Setup phase (Phase 1)
229
+ - Foundational/blocking tasks → Foundational phase (Phase 2)
230
+ - Story-specific setup → within that story's phase
231
+
232
+ ### Phase Structure
233
+
234
+ - **Phase 1**: Setup (project initialization)
235
+ - **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
236
+ - **Phase 3+**: User Stories in priority order (P1, P2, P3...)
237
+ - Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
238
+ - Each phase should be a complete, independently testable increment
239
+ - **Final Phase**: Polish & Cross-Cutting Concerns
240
+
241
+ ---
242
+
243
+ ## Next Steps
244
+
245
+ After completing this command, display a recommendation section based on the outcome:
246
+
247
+ ### On Success (tasks generated)
248
+
249
+ Display the following at the end of your output:
250
+
251
+ ```markdown
252
+ ---
253
+
254
+ ## Next Steps
255
+
256
+ ┌─────────────────────────────────────────────────────────────┐
257
+ │ Workflow Progress │
258
+ │ ● specit → ● planit → ● taskit → ○ implementit → ○ checkin │
259
+ └─────────────────────────────────────────────────────────────┘
260
+
261
+ **Recommended**: Run `/doit.implementit` to start executing the implementation tasks.
262
+ ```
263
+
264
+ ### On Error (missing plan.md)
265
+
266
+ If the command fails because plan.md is not found:
267
+
268
+ ```markdown
269
+ ---
270
+
271
+ ## Next Steps
272
+
273
+ **Issue**: No implementation plan found. The taskit command requires plan.md to exist.
274
+
275
+ **Recommended**: Run `/doit.planit` to create an implementation plan first.
276
+ ```
277
+
278
+ ### On Error (missing spec.md)
279
+
280
+ If the command fails because spec.md is not found:
281
+
282
+ ```markdown
283
+ ---
284
+
285
+ ## Next Steps
286
+
287
+ **Issue**: No feature specification found.
288
+
289
+ **Recommended**: Run `/doit.specit [feature description]` to create a feature specification first.
290
+ ```
291
+
292
+ ### On Error (other issues)
293
+
294
+ If the command fails for another reason:
295
+
296
+ ```markdown
297
+ ---
298
+
299
+ ## Next Steps
300
+
301
+ **Issue**: [Brief description of what went wrong]
302
+
303
+ **Recommended**: [Specific recovery action based on the error]
304
+ ```
@@ -0,0 +1,277 @@
1
+ ---
2
+ description: Execute automated tests and generate test reports with requirement mapping
3
+ handoffs:
4
+ - label: Check In
5
+ agent: doit.checkin
6
+ prompt: Finalize feature and create pull request
7
+ send: true
8
+ - label: Review Again
9
+ agent: doit.review
10
+ prompt: Re-review code after test fixes
11
+ send: true
12
+ ---
13
+
14
+ ## User Input
15
+
16
+ ```text
17
+ $ARGUMENTS
18
+ ```
19
+
20
+ You **MUST** consider the user input before proceeding (if not empty).
21
+
22
+ ## Load Project Context
23
+
24
+ Before proceeding, load the project context to inform your responses:
25
+
26
+ ```bash
27
+ doit context show
28
+ ```
29
+
30
+ **If the command fails or doit is not installed**: Continue without context, but note that alignment with project principles cannot be verified.
31
+
32
+ **Use loaded context to**:
33
+
34
+ - Reference constitution principles when making decisions
35
+ - Consider roadmap priorities
36
+ - Identify connections to related specifications
37
+
38
+ ## Outline
39
+
40
+ 1. **Setup**: Run `.doit/scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute.
41
+
42
+ 2. **Load test context**:
43
+ - **REQUIRED**: Read spec.md for requirements (FR-XXX identifiers)
44
+ - **REQUIRED**: Read tasks.md for test file locations
45
+ - **IF EXISTS**: Read plan.md for test strategy and coverage goals
46
+ - **IF EXISTS**: Read contracts/ for API test expectations
47
+
48
+ 3. **Detect test framework**:
49
+ - Check for test configuration files:
50
+ - **Python**: pytest.ini, pyproject.toml [tool.pytest], setup.cfg, conftest.py
51
+ - **JavaScript/TypeScript**: jest.config.js/ts, vitest.config.js/ts, mocha.opts, .mocharc.*
52
+ - **Java**: pom.xml (maven-surefire), build.gradle (test task)
53
+ - **Go**: *_test.go files present
54
+ - **Ruby**: Rakefile, .rspec, spec/ directory
55
+ - **C#/.NET**: *.csproj with test SDK, xunit, nunit references
56
+ - **Rust**: Cargo.toml with [dev-dependencies] test crates
57
+ - Determine test command:
58
+ - pytest: `pytest -v --tb=short`
59
+ - jest: `npm test` or `npx jest`
60
+ - vitest: `npm test` or `npx vitest run`
61
+ - go: `go test ./...`
62
+ - maven: `mvn test`
63
+ - gradle: `./gradlew test`
64
+ - dotnet: `dotnet test`
65
+ - cargo: `cargo test`
66
+
67
+ 4. **Execute test suite**:
68
+ - Run detected test command
69
+ - Capture stdout/stderr
70
+ - Parse test results:
71
+ - Total tests run
72
+ - Passed tests
73
+ - Failed tests
74
+ - Skipped tests
75
+ - Test duration
76
+ - Capture any coverage reports if generated
77
+
78
+ 5. **Generate test report**:
79
+
80
+ ```text
81
+ ## Automated Test Results
82
+
83
+ **Framework**: [detected framework]
84
+ **Command**: [test command used]
85
+ **Duration**: [total time]
86
+
87
+ ### Summary
88
+ | Metric | Count |
89
+ |--------|-------|
90
+ | Total | X |
91
+ | Passed | X |
92
+ | Failed | X |
93
+ | Skipped | X |
94
+
95
+ ### Failed Tests
96
+ | Test | File | Error |
97
+ |------|------|-------|
98
+ | test_name | path/to/test.py | AssertionError: ... |
99
+
100
+ ### Coverage (if available)
101
+ | File | Coverage |
102
+ |------|----------|
103
+ | src/module.py | 85% |
104
+ ```
105
+
106
+ 6. **Map tests to requirements**:
107
+ - Parse test names and docstrings for FR-XXX references
108
+ - Match tests to requirements from spec.md
109
+ - Generate requirement coverage matrix:
110
+
111
+ ```text
112
+ ## Requirement Coverage
113
+
114
+ | Requirement | Description | Tests | Status |
115
+ |-------------|-------------|-------|--------|
116
+ | FR-001 | User login | test_login, test_auth | COVERED |
117
+ | FR-002 | Password reset | - | NOT COVERED |
118
+ | FR-003 | Session timeout | test_session_expiry | COVERED |
119
+ ```
120
+
121
+ - Calculate coverage percentage: (covered requirements / total requirements) * 100
122
+
123
+ 7. **Generate manual testing checklist**:
124
+ - Extract acceptance criteria from spec.md that cannot be automated
125
+ - Create checklist format:
126
+
127
+ ```text
128
+ ## Manual Testing Checklist
129
+
130
+ ### UI/UX Tests
131
+ - [ ] MT-001: Verify login form displays correctly on mobile
132
+ - [ ] MT-002: Verify error messages are user-friendly
133
+
134
+ ### Integration Tests
135
+ - [ ] MT-003: Verify third-party payment processing
136
+ - [ ] MT-004: Verify email notifications are received
137
+
138
+ ### Edge Cases
139
+ - [ ] MT-005: Verify behavior with slow network
140
+ - [ ] MT-006: Verify recovery from server timeout
141
+ ```
142
+
143
+ 8. **Record manual test results**:
144
+ - If $ARGUMENTS contains `--manual`:
145
+ - Present each manual test item
146
+ - Ask for PASS/FAIL/SKIP result
147
+ - Record notes for failed tests
148
+ - Track completion progress
149
+ - Otherwise, output checklist for later completion
150
+
151
+ 9. **Generate test-report.md** in FEATURE_DIR:
152
+
153
+ ```markdown
154
+ # Test Report: [Feature Name]
155
+
156
+ **Date**: [timestamp]
157
+ **Branch**: [current branch]
158
+ **Test Framework**: [detected]
159
+
160
+ ## Automated Tests
161
+
162
+ ### Execution Summary
163
+ | Metric | Value |
164
+ |--------|-------|
165
+ | Total Tests | X |
166
+ | Passed | X |
167
+ | Failed | X |
168
+ | Skipped | X |
169
+ | Duration | Xs |
170
+
171
+ ### Failed Tests Detail
172
+ [list of failed tests with errors]
173
+
174
+ ### Code Coverage
175
+ [coverage summary if available]
176
+
177
+ ## Requirement Coverage
178
+
179
+ | Requirement | Tests | Status |
180
+ |-------------|-------|--------|
181
+ | FR-XXX | test_xxx | COVERED |
182
+ ...
183
+
184
+ **Coverage**: X/Y requirements (Z%)
185
+
186
+ ## Manual Testing
187
+
188
+ ### Checklist Status
189
+ | Test ID | Description | Status |
190
+ |---------|-------------|--------|
191
+ | MT-001 | ... | PENDING |
192
+ ...
193
+
194
+ ## Recommendations
195
+
196
+ 1. [Fix failing tests before merge]
197
+ 2. [Add tests for uncovered requirements]
198
+ 3. [Complete manual testing checklist]
199
+
200
+ ## Next Steps
201
+
202
+ - Fix any failing tests
203
+ - Complete manual testing if not done
204
+ - Run `/doit.checkin` when all tests pass
205
+ ```
206
+
207
+ 10. **Report**: Output path to test-report.md and summary:
208
+ - Automated test pass rate
209
+ - Requirement coverage percentage
210
+ - Manual test completion status
211
+ - Overall readiness for merge
212
+
213
+ ## Key Rules
214
+
215
+ - Use absolute paths for all file operations
216
+ - If no test framework detected, report and suggest adding tests
217
+ - Continue generating report even if some tests fail
218
+ - Preserve test output for debugging
219
+ - Map requirements using FR-XXX pattern matching in test names/docstrings
220
+
221
+ ---
222
+
223
+ ## Next Steps
224
+
225
+ After completing this command, display a recommendation section based on the outcome:
226
+
227
+ ### On Success (all tests pass)
228
+
229
+ Display the following at the end of your output:
230
+
231
+ ```markdown
232
+ ---
233
+
234
+ ## Next Steps
235
+
236
+ ┌─────────────────────────────────────────────────────────────────────────┐
237
+ │ Workflow Progress │
238
+ │ ● specit → ● planit → ● taskit → ● implementit → ● testit → ○ checkin │
239
+ └─────────────────────────────────────────────────────────────────────────┘
240
+
241
+ **Recommended**: Run `/doit.reviewit` for a code review before finalizing.
242
+
243
+ **Alternative**: Run `/doit.checkin` to merge your changes if code review is not needed.
244
+ ```
245
+
246
+ ### On Failure (tests fail)
247
+
248
+ If some tests fail:
249
+
250
+ ```markdown
251
+ ---
252
+
253
+ ## Next Steps
254
+
255
+ ┌─────────────────────────────────────────────────────────────────────────┐
256
+ │ Workflow Progress │
257
+ │ ● specit → ● planit → ● taskit → ● implementit → ◐ testit → ○ checkin │
258
+ └─────────────────────────────────────────────────────────────────────────┘
259
+
260
+ **Status**: N tests failed out of M total.
261
+
262
+ **Recommended**: Run `/doit.implementit` to fix the failing tests.
263
+ ```
264
+
265
+ ### On Error (no test framework detected)
266
+
267
+ If no test framework is detected:
268
+
269
+ ```markdown
270
+ ---
271
+
272
+ ## Next Steps
273
+
274
+ **Issue**: No test framework detected in this project.
275
+
276
+ **Recommended**: Add tests to your project and run `/doit.testit` again, or proceed with `/doit.reviewit` for code review.
277
+ ```