@zigrivers/scaffold 2.1.2 → 2.28.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +272 -59
- package/knowledge/core/adr-craft.md +53 -0
- package/knowledge/core/ai-memory-management.md +246 -0
- package/knowledge/core/api-design.md +4 -0
- package/knowledge/core/claude-md-patterns.md +254 -0
- package/knowledge/core/coding-conventions.md +246 -0
- package/knowledge/core/database-design.md +4 -0
- package/knowledge/core/design-system-tokens.md +465 -0
- package/knowledge/core/dev-environment.md +223 -0
- package/knowledge/core/domain-modeling.md +4 -0
- package/knowledge/core/eval-craft.md +1008 -0
- package/knowledge/core/multi-model-review-dispatch.md +250 -0
- package/knowledge/core/operations-runbook.md +37 -226
- package/knowledge/core/project-structure-patterns.md +231 -0
- package/knowledge/core/review-step-template.md +247 -0
- package/knowledge/core/{security-review.md → security-best-practices.md} +5 -1
- package/knowledge/core/task-decomposition.md +57 -34
- package/knowledge/core/task-tracking.md +225 -0
- package/knowledge/core/tech-stack-selection.md +214 -0
- package/knowledge/core/testing-strategy.md +63 -70
- package/knowledge/core/user-stories.md +69 -60
- package/knowledge/core/user-story-innovation.md +57 -0
- package/knowledge/core/ux-specification.md +5 -148
- package/knowledge/finalization/apply-fixes-and-freeze.md +165 -14
- package/knowledge/product/prd-craft.md +55 -34
- package/knowledge/review/review-adr.md +32 -0
- package/knowledge/review/{review-api-contracts.md → review-api-design.md} +34 -1
- package/knowledge/review/{review-database-schema.md → review-database-design.md} +27 -1
- package/knowledge/review/review-domain-modeling.md +33 -0
- package/knowledge/review/review-implementation-tasks.md +50 -0
- package/knowledge/review/review-operations.md +55 -0
- package/knowledge/review/review-prd.md +33 -0
- package/knowledge/review/review-security.md +53 -0
- package/knowledge/review/review-system-architecture.md +28 -0
- package/knowledge/review/review-testing-strategy.md +51 -0
- package/knowledge/review/review-user-stories.md +54 -0
- package/knowledge/review/{review-ux-spec.md → review-ux-specification.md} +37 -1
- package/methodology/custom-defaults.yml +32 -3
- package/methodology/deep.yml +32 -3
- package/methodology/mvp.yml +32 -3
- package/package.json +2 -1
- package/pipeline/architecture/review-architecture.md +18 -6
- package/pipeline/architecture/system-architecture.md +14 -2
- package/pipeline/consolidation/claude-md-optimization.md +73 -0
- package/pipeline/consolidation/workflow-audit.md +73 -0
- package/pipeline/decisions/adrs.md +14 -2
- package/pipeline/decisions/review-adrs.md +18 -5
- package/pipeline/environment/ai-memory-setup.md +70 -0
- package/pipeline/environment/automated-pr-review.md +70 -0
- package/pipeline/environment/design-system.md +73 -0
- package/pipeline/environment/dev-env-setup.md +65 -0
- package/pipeline/environment/git-workflow.md +71 -0
- package/pipeline/finalization/apply-fixes-and-freeze.md +1 -1
- package/pipeline/finalization/developer-onboarding-guide.md +1 -1
- package/pipeline/finalization/implementation-playbook.md +3 -3
- package/pipeline/foundation/beads.md +68 -0
- package/pipeline/foundation/coding-standards.md +68 -0
- package/pipeline/foundation/project-structure.md +69 -0
- package/pipeline/foundation/tdd.md +60 -0
- package/pipeline/foundation/tech-stack.md +74 -0
- package/pipeline/integration/add-e2e-testing.md +65 -0
- package/pipeline/modeling/domain-modeling.md +14 -2
- package/pipeline/modeling/review-domain-modeling.md +18 -5
- package/pipeline/parity/platform-parity-review.md +70 -0
- package/pipeline/planning/implementation-plan-review.md +56 -0
- package/pipeline/planning/{implementation-tasks.md → implementation-plan.md} +29 -9
- package/pipeline/pre/create-prd.md +13 -4
- package/pipeline/pre/innovate-prd.md +37 -8
- package/pipeline/pre/innovate-user-stories.md +38 -7
- package/pipeline/pre/review-prd.md +18 -6
- package/pipeline/pre/review-user-stories.md +23 -6
- package/pipeline/pre/user-stories.md +12 -2
- package/pipeline/quality/create-evals.md +102 -0
- package/pipeline/quality/operations.md +38 -13
- package/pipeline/quality/review-operations.md +17 -5
- package/pipeline/quality/review-security.md +17 -5
- package/pipeline/quality/review-testing.md +20 -8
- package/pipeline/quality/security.md +25 -3
- package/pipeline/quality/story-tests.md +73 -0
- package/pipeline/specification/api-contracts.md +17 -2
- package/pipeline/specification/database-schema.md +17 -2
- package/pipeline/specification/review-api.md +18 -6
- package/pipeline/specification/review-database.md +18 -6
- package/pipeline/specification/review-ux.md +19 -7
- package/pipeline/specification/ux-spec.md +29 -10
- package/pipeline/validation/critical-path-walkthrough.md +34 -7
- package/pipeline/validation/cross-phase-consistency.md +34 -7
- package/pipeline/validation/decision-completeness.md +34 -7
- package/pipeline/validation/dependency-graph-validation.md +34 -7
- package/pipeline/validation/implementability-dry-run.md +34 -7
- package/pipeline/validation/scope-creep-check.md +34 -7
- package/pipeline/validation/traceability-matrix.md +34 -7
- package/skills/multi-model-dispatch/SKILL.md +326 -0
- package/skills/scaffold-pipeline/SKILL.md +195 -0
- package/skills/scaffold-runner/SKILL.md +465 -0
- package/pipeline/planning/review-tasks.md +0 -38
- package/pipeline/quality/testing-strategy.md +0 -42
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
---
|
|
2
|
-
name: review-api-
|
|
2
|
+
name: review-api-design
|
|
3
3
|
description: Failure modes and review passes specific to API contract specifications
|
|
4
4
|
topics: [review, api, contracts, rest, graphql]
|
|
5
5
|
---
|
|
@@ -231,3 +231,36 @@ The implementation tasks step needs:
|
|
|
231
231
|
- P0: "The UX wireframe shows a 'user dashboard' with order count, recent orders, and account balance, but the API has no endpoint that provides this aggregated data. The frontend would need to make 3+ separate calls."
|
|
232
232
|
- P1: "Several endpoints are marked as 'async' (returns 202) but there is no documented polling or webhook mechanism for the frontend to get the result."
|
|
233
233
|
- P2: "API response examples do not include null/empty cases. The UX spec needs to know what an empty order list or a user with no profile photo looks like in API terms."
|
|
234
|
+
|
|
235
|
+
### Example Review Finding
|
|
236
|
+
|
|
237
|
+
```markdown
|
|
238
|
+
### Finding: Payment endpoint missing idempotency specification
|
|
239
|
+
|
|
240
|
+
**Pass:** 6 — Idempotency
|
|
241
|
+
**Priority:** P0
|
|
242
|
+
**Location:** API Contract Section 5.3 "POST /payments/charge"
|
|
243
|
+
|
|
244
|
+
**Issue:** The POST /payments/charge endpoint accepts a payment method and amount,
|
|
245
|
+
charges the customer, and returns a payment confirmation. The endpoint documents
|
|
246
|
+
only the 201 (success) and 400 (bad request) responses.
|
|
247
|
+
|
|
248
|
+
No idempotency mechanism is specified. If a client sends a charge request and
|
|
249
|
+
receives a network timeout (no response), it cannot safely retry — the retry
|
|
250
|
+
may charge the customer a second time. This is a financial data integrity issue.
|
|
251
|
+
|
|
252
|
+
**Impact:** Frontend developers will either (a) not retry on timeout, leaving
|
|
253
|
+
the user unsure if payment succeeded, or (b) retry unconditionally, risking
|
|
254
|
+
double charges. Both outcomes damage user trust and create support burden.
|
|
255
|
+
|
|
256
|
+
**Recommendation:** Add an Idempotency-Key header requirement:
|
|
257
|
+
- Client must include `Idempotency-Key: <uuid>` on every POST /payments/charge
|
|
258
|
+
- Server stores the key with the payment result for 24 hours
|
|
259
|
+
- Repeated requests with the same key return the original result without
|
|
260
|
+
re-processing
|
|
261
|
+
- Document the key format (UUIDv4), retention window (24h), and behavior on
|
|
262
|
+
key reuse (return cached result with 200, not 201)
|
|
263
|
+
|
|
264
|
+
**Trace:** API Contract 5.3 → PRD Section 3.2 "Payment Processing" →
|
|
265
|
+
ADR-009 "Financial data integrity requirements"
|
|
266
|
+
```
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
---
|
|
2
|
-
name: review-database-
|
|
2
|
+
name: review-database-design
|
|
3
3
|
description: Failure modes and review passes specific to database schema design artifacts
|
|
4
4
|
topics: [review, database, schema, data-modeling]
|
|
5
5
|
---
|
|
@@ -227,3 +227,29 @@ The API contracts step specifically needs:
|
|
|
227
227
|
- P0: "API will need 'get all orders for a customer with their line items and product details.' This requires joining orders -> line_items -> products, but line_items has no index on order_id, and the relationship from line_items to products is missing."
|
|
228
228
|
- P1: "The schema supports 'get user by email' but the API will also need 'search users by name.' No index exists on user name columns."
|
|
229
229
|
- P2: "Some tables use soft delete (deleted_at column) and some use hard delete. The API contract needs to know which approach applies to determine whether 'delete' operations return 204 or 200."
|
|
230
|
+
|
|
231
|
+
### Example Review Finding
|
|
232
|
+
|
|
233
|
+
```markdown
|
|
234
|
+
### Finding: Missing composite index for primary order query pattern
|
|
235
|
+
|
|
236
|
+
**Pass:** 4 — Index Coverage
|
|
237
|
+
**Priority:** P0
|
|
238
|
+
**Location:** orders table, schema.sql lines 45-72
|
|
239
|
+
|
|
240
|
+
**Issue:** Architecture data flow DF-003 ("Customer views order history") describes
|
|
241
|
+
the primary query as "find all orders by customer, sorted by most recent first." This
|
|
242
|
+
query filters on customer_id and sorts on created_at DESC. The orders table has a
|
|
243
|
+
single-column index on customer_id but no composite index on (customer_id, created_at).
|
|
244
|
+
|
|
245
|
+
**Impact:** Without a composite index, PostgreSQL will use the customer_id index to
|
|
246
|
+
filter, then perform a filesort on the matching rows. At projected volume (50K orders
|
|
247
|
+
per customer for enterprise accounts), this filesort will cause multi-second response
|
|
248
|
+
times on the most frequently executed query.
|
|
249
|
+
|
|
250
|
+
**Recommendation:** Add composite index: CREATE INDEX idx_orders_customer_date
|
|
251
|
+
ON orders (customer_id, created_at DESC). The DESC matches the sort direction,
|
|
252
|
+
enabling an index-only scan for this query pattern.
|
|
253
|
+
|
|
254
|
+
**Trace:** Architecture data flow DF-003 → PRD Feature 2.1 "Order History"
|
|
255
|
+
```
|
|
@@ -286,3 +286,36 @@ Internal inconsistencies within a single domain model erode trust in the artifac
|
|
|
286
286
|
- P0: "Invariant 'PaymentAmount must not exceed OrderTotal' references PaymentAmount, but the Payment entity has an attribute called 'amount', not 'paymentAmount'."
|
|
287
287
|
- P1: "Relationship diagram shows Order -> Customer as one-to-many, but the Order entity definition says 'each order belongs to one customer' (many-to-one). Direction is inverted."
|
|
288
288
|
- P2: "The Inventory domain model calls the same concept 'stock level' in the overview and 'quantity on hand' in the entity definition."
|
|
289
|
+
|
|
290
|
+
### Example Review Finding
|
|
291
|
+
|
|
292
|
+
```markdown
|
|
293
|
+
### Finding: Aggregate boundary cannot enforce cross-aggregate invariant
|
|
294
|
+
|
|
295
|
+
**Pass:** 4 — Aggregate Boundary Validation
|
|
296
|
+
**Priority:** P0
|
|
297
|
+
**Location:** Order aggregate and Discount aggregate (domain-models.md, Section 3.2)
|
|
298
|
+
|
|
299
|
+
**Issue:** Domain invariant INV-007 states "discount amount must not exceed order
|
|
300
|
+
subtotal." Enforcing this requires access to both the Order aggregate (to read the
|
|
301
|
+
subtotal, which is the sum of line items) and the Discount aggregate (to read the
|
|
302
|
+
discount amount). These are modeled as separate aggregates with independent
|
|
303
|
+
lifecycles.
|
|
304
|
+
|
|
305
|
+
Because aggregates are consistency boundaries, there is no transactional guarantee
|
|
306
|
+
that the discount and order subtotal are evaluated atomically. A line item could be
|
|
307
|
+
removed from the Order (reducing subtotal) after a discount was validated against
|
|
308
|
+
the previous subtotal, violating the invariant.
|
|
309
|
+
|
|
310
|
+
**Impact:** Without resolution, implementing agents will either (a) ignore the
|
|
311
|
+
invariant, allowing invalid discount states, or (b) create tight coupling between
|
|
312
|
+
Order and Discount aggregates, defeating the purpose of the boundary.
|
|
313
|
+
|
|
314
|
+
**Recommendation:** Move Discount inside the Order aggregate as a value object.
|
|
315
|
+
The discount lifecycle is tied to the order — discounts do not exist independently.
|
|
316
|
+
This allows the Order aggregate root to enforce INV-007 within a single
|
|
317
|
+
consistency boundary.
|
|
318
|
+
|
|
319
|
+
**Trace:** Invariant INV-007 → Order aggregate + Discount aggregate → PRD
|
|
320
|
+
Feature 3.4 "Apply discount codes at checkout"
|
|
321
|
+
```
|
|
@@ -200,3 +200,53 @@ AI agents have limited context windows. If a task does not specify what to read,
|
|
|
200
200
|
- P0: "Task 'Implement order creation endpoint' lists no context documents. The agent needs the API contract (endpoint spec), database schema (orders table), domain model (Order aggregate invariants), and architecture section (Order Service design)."
|
|
201
201
|
- P1: "Task 'Build user dashboard' references the architecture document but not the UX spec. The agent will build the component structure correctly but not the visual design."
|
|
202
202
|
- P2: "Task context references 'docs/system-architecture.md' without specifying which section. The agent will load the entire 2000-line document instead of the relevant 100-line section."
|
|
203
|
+
|
|
204
|
+
---
|
|
205
|
+
|
|
206
|
+
## Common Review Anti-Patterns
|
|
207
|
+
|
|
208
|
+
### 1. Reviewing Tasks in Isolation
|
|
209
|
+
|
|
210
|
+
The reviewer checks each task individually (sizing, acceptance criteria, context) but never builds the full dependency graph or traces the critical path. Individual tasks may look fine, but the overall task structure has cycles, missing coverage, or an incorrect critical path. Passes 2, 5, and 6 require looking at the task set as a whole, not one task at a time.
|
|
211
|
+
|
|
212
|
+
**How to spot it:** The review report has findings only from Passes 3, 4, and 7 (task-level checks) and none from Passes 1, 2, 5, or 6 (structural checks). The reviewer never drew the dependency graph.
|
|
213
|
+
|
|
214
|
+
### 2. Trusting Dependency Declarations Without Verification
|
|
215
|
+
|
|
216
|
+
The reviewer reads the declared dependencies for each task and checks for cycles, but never verifies that the declared dependencies are complete. A task that says "depends on: database schema" may also implicitly depend on "auth middleware" (because the endpoint requires authentication), but this dependency is not declared. The reviewer must read the task description and infer actual prerequisites, not just validate declared ones.
|
|
217
|
+
|
|
218
|
+
**Example finding:**
|
|
219
|
+
|
|
220
|
+
```markdown
|
|
221
|
+
## Finding: ITR-022
|
|
222
|
+
|
|
223
|
+
**Priority:** P0
|
|
224
|
+
**Pass:** Missing Dependencies (Pass 2)
|
|
225
|
+
**Document:** docs/implementation-tasks.md, Task 14
|
|
226
|
+
|
|
227
|
+
**Issue:** Task 14 ("Implement order creation endpoint") declares dependency on Task 3
|
|
228
|
+
("Create database schema") but does not declare dependency on Task 7 ("Implement auth
|
|
229
|
+
middleware"). The task's acceptance criteria include "returns 401 for unauthenticated
|
|
230
|
+
requests," which requires auth middleware to exist. If an agent starts Task 14 before
|
|
231
|
+
Task 7 is complete, they cannot implement or test the auth requirement.
|
|
232
|
+
|
|
233
|
+
**Recommendation:** Add Task 7 as an explicit dependency for Task 14.
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
### 3. Accepting "Implement Feature X" as a Valid Task
|
|
237
|
+
|
|
238
|
+
The reviewer sees a task titled "Implement user management" with acceptance criteria listing 8 endpoints, 3 database tables, 2 background jobs, and role-based access control — and does not flag it as too large. A single task should be completable in one agent session (30-60 minutes). "Implement user management" is a project phase, not a task.
|
|
239
|
+
|
|
240
|
+
**How to spot it:** Count the acceptance criteria and the distinct concerns. More than 5-7 acceptance criteria or more than 2 distinct concerns (e.g., API + database + auth) means the task needs splitting.
|
|
241
|
+
|
|
242
|
+
### 4. Ignoring Test Tasks
|
|
243
|
+
|
|
244
|
+
The reviewer verifies implementation tasks but does not check whether corresponding test tasks exist. The testing strategy says "integration tests for all API endpoints," but there is no task for writing those tests. Tests are not free — they require their own implementation time, and if no task exists for them, they will not be written.
|
|
245
|
+
|
|
246
|
+
**How to spot it:** For each implementation task, search for a corresponding test task. If implementation tasks outnumber test tasks by more than 3:1, testing is systematically under-tasked.
|
|
247
|
+
|
|
248
|
+
### 5. No Verification of Parallelization Claims
|
|
249
|
+
|
|
250
|
+
Tasks are marked as parallelizable, and the reviewer accepts this at face value. But two tasks marked as parallel both modify `src/config/database.ts` or both add routes to the same router file. The reviewer must check for shared file modifications, not just logical independence.
|
|
251
|
+
|
|
252
|
+
**How to spot it:** The review has no findings from Pass 6 (Parallelization Validity). The reviewer checked for logical dependencies but not for file-level conflicts.
|
|
@@ -210,3 +210,58 @@ Without a backup strategy, data loss is permanent. Without a disaster recovery p
|
|
|
210
210
|
- P0: "Backups run daily but the RPO is 15 minutes. Up to 24 hours of data could be lost, far exceeding the business tolerance."
|
|
211
211
|
- P1: "Backup restoration procedure says 'restore from backup' with no specifics. What tool? What command? How long does it take? What is the verification step?"
|
|
212
212
|
- P2: "DR strategy exists but has never been tested. The team does not know if recovery actually works within the stated RTO."
|
|
213
|
+
|
|
214
|
+
---
|
|
215
|
+
|
|
216
|
+
## Common Review Anti-Patterns
|
|
217
|
+
|
|
218
|
+
### 1. Reviewing the Runbook as a Standalone Document
|
|
219
|
+
|
|
220
|
+
The reviewer checks the operations runbook for completeness (are all sections present? are rollback procedures documented?) but never cross-references with the architecture or deployment infrastructure. The runbook may describe a deployment pipeline that does not match the actual CI/CD configuration, or monitoring that covers components that no longer exist. Operations documentation must be validated against the architecture it describes.
|
|
221
|
+
|
|
222
|
+
**How to spot it:** The review has no findings that reference the architecture document or infrastructure configuration. All findings are about what the runbook says, not whether what it says matches reality.
|
|
223
|
+
|
|
224
|
+
### 2. "We Use Kubernetes" as a Complete Deployment Strategy
|
|
225
|
+
|
|
226
|
+
The deployment section names the orchestration platform (Kubernetes, ECS, Heroku) but does not describe the actual deployment process. How are images built? How are they tagged? What triggers a deployment? What is the rollout strategy (rolling update, blue-green, canary)? What happens when a pod fails health checks during rollout? Naming the platform is not a strategy.
|
|
227
|
+
|
|
228
|
+
**Example finding:**
|
|
229
|
+
|
|
230
|
+
```markdown
|
|
231
|
+
## Finding: OPS-011
|
|
232
|
+
|
|
233
|
+
**Priority:** P1
|
|
234
|
+
**Pass:** Deployment Strategy Completeness (Pass 1)
|
|
235
|
+
**Document:** docs/operations-runbook.md, Section 2
|
|
236
|
+
|
|
237
|
+
**Issue:** Deployment section states: "The application is deployed to Kubernetes using
|
|
238
|
+
Helm charts." No further detail is provided. The following questions are unanswered:
|
|
239
|
+
- What triggers a deployment? (manual, on PR merge, on tag?)
|
|
240
|
+
- What is the rollout strategy? (rolling update, blue-green, canary?)
|
|
241
|
+
- What are the health check endpoints and thresholds?
|
|
242
|
+
- When do database migrations run relative to the code deployment?
|
|
243
|
+
- What is the rollback procedure? (Helm rollback? Redeploy previous image?)
|
|
244
|
+
- How long does a typical deployment take?
|
|
245
|
+
|
|
246
|
+
**Recommendation:** Expand the deployment section to cover each stage of the pipeline
|
|
247
|
+
from merged PR to verified production deployment, with specific commands, tools,
|
|
248
|
+
and decision points.
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
### 3. Monitoring Section Lists Tools but Not Metrics
|
|
252
|
+
|
|
253
|
+
The monitoring section says "we use Datadog for monitoring and PagerDuty for alerting" but does not specify what metrics are collected, what dashboards exist, or what alert thresholds are configured. Tools are not a monitoring strategy. The question is not "do we have Datadog?" but "what does Datadog measure, and when does it wake someone up?"
|
|
254
|
+
|
|
255
|
+
**How to spot it:** The monitoring section mentions tool names but contains no metric names (error rate, p95 latency, request throughput), no threshold values, and no alert severity definitions.
|
|
256
|
+
|
|
257
|
+
### 4. Rollback Procedure That Ignores Data
|
|
258
|
+
|
|
259
|
+
The rollback section describes how to revert code (redeploy previous version, Helm rollback) but does not address database schema changes or data migrations. If the deployment included a migration that added a column and backfilled data, "rollback" is not just reverting the code — it requires a reverse migration, and the reverse migration may be destructive (dropping the new column loses the backfilled data).
|
|
260
|
+
|
|
261
|
+
**How to spot it:** The rollback section mentions code rollback but not database rollback. Search for "migration," "schema," or "database" in the rollback procedure — if absent, data rollback is unaddressed.
|
|
262
|
+
|
|
263
|
+
### 5. No Runbook Entries for the Most Likely Failures
|
|
264
|
+
|
|
265
|
+
The runbook has procedures for exotic failure scenarios (complete region outage, database corruption) but not for the failures that actually happen weekly: a single pod crashing, a dependency timing out, disk filling up from log accumulation, a certificate expiring. The most useful runbook entries cover common, mundane failures — not catastrophic ones.
|
|
266
|
+
|
|
267
|
+
**How to spot it:** Count runbook entries. If there are fewer than 5, the most likely failure scenarios are probably missing. Check specifically for: service restart, dependency timeout, disk full, certificate expiration, and failed deployment rollback.
|
|
@@ -233,3 +233,36 @@ The PRD's primary consumer is the user stories phase. If features are too vague
|
|
|
233
233
|
|
|
234
234
|
- P0: "Feature 'user management' cannot be decomposed into stories — what operations? What user types? What permissions model?"
|
|
235
235
|
- P1: "Business rules for discount application are implied but not stated — story acceptance criteria will have to guess at validation logic."
|
|
236
|
+
|
|
237
|
+
### Example Review Finding
|
|
238
|
+
|
|
239
|
+
```markdown
|
|
240
|
+
### Finding: NFRs use qualitative adjectives instead of quantified targets
|
|
241
|
+
|
|
242
|
+
**Pass:** 5 — NFR Quantification
|
|
243
|
+
**Priority:** P1
|
|
244
|
+
**Location:** PRD Section 6 "Non-Functional Requirements"
|
|
245
|
+
|
|
246
|
+
**Issue:** Performance requirements state "the system should be fast and responsive."
|
|
247
|
+
No response time targets, percentile thresholds, or load conditions are specified.
|
|
248
|
+
"Fast" is subjective — it means <100ms to a backend engineer and <3s to a product
|
|
249
|
+
manager evaluating full page loads.
|
|
250
|
+
|
|
251
|
+
Similarly, availability requirement states "high availability" without specifying
|
|
252
|
+
a target uptime percentage, maximum acceptable downtime window, or recovery time
|
|
253
|
+
objective (RTO).
|
|
254
|
+
|
|
255
|
+
**Impact:** The architecture step cannot make infrastructure decisions (single
|
|
256
|
+
instance vs. load-balanced, database read replicas, CDN) without quantified
|
|
257
|
+
performance targets. The testing step cannot write performance tests without
|
|
258
|
+
thresholds to assert against. Implementing agents will make arbitrary performance
|
|
259
|
+
trade-offs with no shared baseline.
|
|
260
|
+
|
|
261
|
+
**Recommendation:** Replace with quantified targets:
|
|
262
|
+
- "API response time: p95 < 200ms, p99 < 500ms under 1000 concurrent users"
|
|
263
|
+
- "Page load time: Largest Contentful Paint < 2.5s on 4G connection"
|
|
264
|
+
- "Availability: 99.9% uptime (8.7 hours maximum downtime per year)"
|
|
265
|
+
- "Recovery: RTO < 15 minutes, RPO < 1 hour"
|
|
266
|
+
|
|
267
|
+
**Trace:** PRD Section 6 → blocks Architecture Phase → blocks Implementation
|
|
268
|
+
```
|
|
@@ -211,3 +211,56 @@ Frontend-only validation is a UX convenience, not a security control. Attackers
|
|
|
211
211
|
- P1: "File upload endpoint validates file extension (.jpg, .png) but does not validate file content. An attacker could upload a malicious script with a .jpg extension."
|
|
212
212
|
- P1: "Webhook receiver accepts payloads from external services with no signature validation. An attacker could forge webhook calls."
|
|
213
213
|
- P2: "Input validation is specified for API endpoints but not for message queue consumers. A malformed message could cause the consumer to crash."
|
|
214
|
+
|
|
215
|
+
---
|
|
216
|
+
|
|
217
|
+
## Common Review Anti-Patterns
|
|
218
|
+
|
|
219
|
+
### 1. Generic OWASP Checklist Without Project Mapping
|
|
220
|
+
|
|
221
|
+
The security document lists all 10 OWASP categories with textbook mitigations ("use parameterized queries," "encrypt data at rest") but never connects them to the actual project. No component names, no endpoint references, no architecture-specific analysis. The same document could describe any web application.
|
|
222
|
+
|
|
223
|
+
**How to spot it:** The OWASP section contains zero references to the project's architecture document, API contracts, or database schema. Mitigations are general advice rather than specific implementation plans tied to named components.
|
|
224
|
+
|
|
225
|
+
### 2. Auth Designed in Isolation from API Contracts
|
|
226
|
+
|
|
227
|
+
The security document defines roles, permissions, and access control policies, but the reviewer does not cross-reference these with the API contract's endpoint-level auth requirements. The security document says "admin-only" for user management, but the API contract has no auth annotation on DELETE /users/{id}. This gap means the security design exists on paper but may not be enforced.
|
|
228
|
+
|
|
229
|
+
**Example finding:**
|
|
230
|
+
|
|
231
|
+
```markdown
|
|
232
|
+
## Finding: SEC-018
|
|
233
|
+
|
|
234
|
+
**Priority:** P0
|
|
235
|
+
**Pass:** Auth/AuthZ Boundary Alignment (Pass 2)
|
|
236
|
+
**Document:** docs/security-review.md, Section 3 / docs/api-contracts.md, Section 5.2
|
|
237
|
+
|
|
238
|
+
**Issue:** Security document defines three roles (admin, editor, viewer) with a permission
|
|
239
|
+
matrix. API contract defines 24 endpoints. Cross-referencing reveals:
|
|
240
|
+
- 6 endpoints have no auth requirement specified in the API contract
|
|
241
|
+
- 3 endpoints specify "authenticated" but the security document requires "admin" role
|
|
242
|
+
- Endpoint PATCH /users/{id}/role has no authorization check — any authenticated user
|
|
243
|
+
could escalate privileges
|
|
244
|
+
|
|
245
|
+
**Recommendation:** Add auth/authz annotations to all 24 endpoints in the API contract.
|
|
246
|
+
Reconcile the 3 mismatched endpoints with the security document's permission matrix.
|
|
247
|
+
Add explicit admin-only restriction to the role-change endpoint.
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
### 3. Secrets Strategy Says "Environment Variables" and Stops
|
|
251
|
+
|
|
252
|
+
The security document addresses secrets management by stating "secrets are stored in environment variables" with no further detail. This leaves critical questions unanswered: how are environment variables populated in production (plain text config file on the server? Kubernetes secrets? A vault?)? How are secrets rotated? What happens when a secret is compromised? "Environment variables" is a mechanism, not a strategy.
|
|
253
|
+
|
|
254
|
+
**How to spot it:** The secrets management section is shorter than half a page. It mentions environment variables but not rotation, not emergency response, not CI/CD secret injection, and not local development secrets handling.
|
|
255
|
+
|
|
256
|
+
### 4. Threat Model Without Trust Boundaries
|
|
257
|
+
|
|
258
|
+
The security document includes a threat model section that lists generic threats (SQL injection, XSS, CSRF) without mapping them to the system's trust boundaries. No data flow analysis, no identification of where untrusted input enters the system, no assessment of service-to-service trust. The threats listed are a vocabulary exercise, not a risk analysis.
|
|
259
|
+
|
|
260
|
+
**How to spot it:** The threat model section does not reference the architecture diagram. No trust boundaries are drawn. Threats are listed as a flat list rather than organized by boundary (client-to-API, service-to-service, service-to-database).
|
|
261
|
+
|
|
262
|
+
### 5. Reviewing Security Document Without Cross-Referencing Other Artifacts
|
|
263
|
+
|
|
264
|
+
The reviewer checks the security document internally (are all sections present? is the OWASP analysis complete?) but never opens the API contracts, architecture document, or operations runbook. Security findings that span multiple documents — auth gaps between the security doc and API contract, secrets handling gaps between the security doc and operations runbook — are invisible to a single-document review.
|
|
265
|
+
|
|
266
|
+
**How to spot it:** The review report cites only the security document. No findings reference the API contracts (Pass 2), the architecture (Pass 5 trust boundaries), or the operations runbook (Pass 3 secrets in deployment).
|
|
@@ -294,3 +294,31 @@ Architecture documents are long. Inconsistencies between early and late sections
|
|
|
294
294
|
- P1: "Section 3 describes the system as having five microservices, but the component diagram shows six. The 'Scheduler' component appears in the diagram but not in the prose."
|
|
295
295
|
- P1: "The architecture uses 'API Gateway' in sections 2-4 and 'Reverse Proxy' in section 6 for what appears to be the same component."
|
|
296
296
|
- P2: "Node.js version is stated as 18 in section 1 and 20 in the deployment section."
|
|
297
|
+
|
|
298
|
+
### Example Review Finding
|
|
299
|
+
|
|
300
|
+
```markdown
|
|
301
|
+
### Finding: Orphaned component with no data flow connections
|
|
302
|
+
|
|
303
|
+
**Pass:** 3 — Data Flow Completeness
|
|
304
|
+
**Priority:** P0
|
|
305
|
+
**Location:** Component diagram (architecture.md, Section 2.1)
|
|
306
|
+
|
|
307
|
+
**Issue:** Component 'AnalyticsEngine' appears in the component diagram as a
|
|
308
|
+
standalone service but is not referenced in any of the 12 documented data flows.
|
|
309
|
+
It has no documented inputs (what data does it consume?), no documented outputs
|
|
310
|
+
(where do analytics results go?), and no documented trigger (what initiates
|
|
311
|
+
analytics processing?).
|
|
312
|
+
|
|
313
|
+
**Impact:** The database schema step cannot design analytics storage without
|
|
314
|
+
knowing what data the AnalyticsEngine processes. The implementation tasks step
|
|
315
|
+
cannot scope analytics work without knowing the component's interfaces. The UX
|
|
316
|
+
spec step cannot design analytics dashboards without knowing what data is available.
|
|
317
|
+
|
|
318
|
+
**Recommendation:** Either (a) add data flows showing how AnalyticsEngine receives
|
|
319
|
+
events from other components, what processing it performs, and where results are
|
|
320
|
+
stored/served, or (b) remove AnalyticsEngine from the diagram if analytics is
|
|
321
|
+
out of scope for v1.
|
|
322
|
+
|
|
323
|
+
**Trace:** Component diagram → missing data flow coverage
|
|
324
|
+
```
|
|
@@ -174,3 +174,54 @@ A quality gate that exists in documentation but not in CI is not a gate. If the
|
|
|
174
174
|
- P0: "Testing strategy requires 80% code coverage but the CI pipeline has no coverage reporting or enforcement. The requirement is unverifiable."
|
|
175
175
|
- P1: "Security scanning is listed as a quality requirement but no specific tool or CI pipeline step implements it."
|
|
176
176
|
- P2: "Quality gates run linting, unit tests, and integration tests, but do not validate database migrations. A broken migration would pass all gates and fail in production."
|
|
177
|
+
|
|
178
|
+
---
|
|
179
|
+
|
|
180
|
+
## Common Review Anti-Patterns
|
|
181
|
+
|
|
182
|
+
### 1. Copy-Pasted Generic Strategy
|
|
183
|
+
|
|
184
|
+
The testing strategy is a boilerplate document that says "we will have unit tests, integration tests, and E2E tests" without connecting to the actual architecture. No mention of specific components, no mapping of test types to architectural layers, no project-specific invariants.
|
|
185
|
+
|
|
186
|
+
**How to spot it:** The strategy could be copy-pasted into any other project and still read correctly. No component names, no domain terms, no architecture-specific decisions.
|
|
187
|
+
|
|
188
|
+
### 2. Testing Strategy Disconnected from Architecture
|
|
189
|
+
|
|
190
|
+
The strategy defines test types and coverage goals but does not reference the system architecture. Tests are organized by test framework (Jest unit tests, Playwright E2E tests) rather than by architectural component. This makes it impossible to verify coverage — you cannot tell which components are tested and which are not.
|
|
191
|
+
|
|
192
|
+
**How to spot it:** Search for component names from the architecture document. If none appear in the testing strategy, the two documents are disconnected.
|
|
193
|
+
|
|
194
|
+
### 3. Mock-Everything Mentality
|
|
195
|
+
|
|
196
|
+
Every external dependency is mocked, including the database. Unit test coverage is high, but no test ever executes a real query, a real HTTP call, or a real message queue interaction. The test suite provides confidence that the mocking layer works, not that the system works.
|
|
197
|
+
|
|
198
|
+
**Example finding:**
|
|
199
|
+
|
|
200
|
+
```markdown
|
|
201
|
+
## Finding: TSR-009
|
|
202
|
+
|
|
203
|
+
**Priority:** P1
|
|
204
|
+
**Pass:** Integration Boundary Coverage (Pass 5)
|
|
205
|
+
**Document:** docs/testing-strategy.md, Section 4.2
|
|
206
|
+
|
|
207
|
+
**Issue:** All database tests use an in-memory mock repository. The repository interface
|
|
208
|
+
is tested, but no test ever executes SQL against a real PostgreSQL instance. The following
|
|
209
|
+
risks are untested: query syntax errors, constraint violations, transaction isolation
|
|
210
|
+
behavior, migration correctness.
|
|
211
|
+
|
|
212
|
+
**Recommendation:** Add integration tests using testcontainers or a CI-managed PostgreSQL
|
|
213
|
+
instance for at least the OrderRepository and UserRepository (the two repositories with
|
|
214
|
+
complex queries).
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### 4. No Negative Test Scenarios
|
|
218
|
+
|
|
219
|
+
The strategy defines tests for the happy path but never specifies what happens when things fail. No test scenarios for invalid input, network timeouts, concurrent modification, or resource exhaustion. The system is verified to work when everything goes right — the most uninteresting case.
|
|
220
|
+
|
|
221
|
+
**How to spot it:** Scan test scenario descriptions for words like "invalid," "timeout," "failure," "error," "reject," "concurrent," "duplicate." If these are absent, negative scenarios are missing.
|
|
222
|
+
|
|
223
|
+
### 5. Coverage Percentage as the Only Quality Metric
|
|
224
|
+
|
|
225
|
+
The strategy defines 80% code coverage as the quality gate but specifies no other quality criteria. High coverage with no assertion quality means tests that execute code paths without verifying behavior — "tests" that call functions and ignore the return value. Coverage measures how much code was run, not whether it was tested correctly.
|
|
226
|
+
|
|
227
|
+
**How to spot it:** The quality gates section mentions only code coverage. No mention of mutation testing, assertion density, test execution time budgets, or flakiness tracking.
|
|
@@ -170,3 +170,57 @@ Stories are the primary input to domain discovery in the domain modeling step. I
|
|
|
170
170
|
- P1: "US-007 ('As a teacher, I want to manage my classes') — acceptance criteria say 'classes are managed correctly.' No mention of what entities are involved (Class, Enrollment, Student?), what state transitions occur, or what business rules apply. Domain modeling will have to guess."
|
|
171
171
|
- P2: "Cross-story entity naming is inconsistent: US-003 uses 'User,' US-008 uses 'Account,' US-015 uses 'Member.' These may be different bounded context terms or may be accidental inconsistency — clarify before domain modeling."
|
|
172
172
|
- P2: "Stories in the 'Payments' epic mention 'processing a payment' but no acceptance criteria describe the payment lifecycle states (pending → processing → completed/failed). Domain events cannot be discovered from these stories."
|
|
173
|
+
|
|
174
|
+
---
|
|
175
|
+
|
|
176
|
+
## Common Review Anti-Patterns
|
|
177
|
+
|
|
178
|
+
### 1. Reviewing Against a Generic Checklist Instead of the PRD
|
|
179
|
+
|
|
180
|
+
The reviewer checks whether stories have acceptance criteria and follow INVEST principles, but never opens the PRD to verify coverage. The stories could be missing entire PRD features and this review would not catch it. Reviews must cross-reference the PRD — checking story quality without checking story completeness misses the highest-severity failure mode.
|
|
181
|
+
|
|
182
|
+
**How to spot it:** The review report contains no references to specific PRD sections. Findings are all about story quality (vague criteria, poor sizing) and none about story coverage (missing features, missing flows).
|
|
183
|
+
|
|
184
|
+
### 2. Accepting Vague Acceptance Criteria as "Good Enough"
|
|
185
|
+
|
|
186
|
+
The reviewer sees acceptance criteria like "user can manage their profile" and does not flag it because the intent is clear. But intent is not implementation guidance. Two agents reading "manage their profile" will implement different field sets, different validation rules, and different UX flows. Acceptance criteria must be testable — if you cannot write an automated test directly from the criterion, it is too vague.
|
|
187
|
+
|
|
188
|
+
**Example finding:**
|
|
189
|
+
|
|
190
|
+
```markdown
|
|
191
|
+
## Finding: USR-014
|
|
192
|
+
|
|
193
|
+
**Priority:** P1
|
|
194
|
+
**Pass:** Acceptance Criteria Quality (Pass 2)
|
|
195
|
+
**Document:** docs/user-stories.md, US-008
|
|
196
|
+
|
|
197
|
+
**Issue:** Acceptance criteria for US-008 ("As a user, I want to manage my profile"):
|
|
198
|
+
- "Given I am logged in, when I update my profile, then my changes are saved"
|
|
199
|
+
|
|
200
|
+
This criterion does not specify: which fields are editable, what validation rules apply,
|
|
201
|
+
whether partial updates are supported, what happens on validation failure, or whether
|
|
202
|
+
changes require re-authentication (e.g., email change).
|
|
203
|
+
|
|
204
|
+
**Recommendation:** Replace with specific Given/When/Then scenarios:
|
|
205
|
+
- Given I am logged in, when I change my display name to a valid name (1-100 chars), then my display name is updated
|
|
206
|
+
- Given I am logged in, when I change my email, then a verification email is sent to the new address and the email is not changed until verified
|
|
207
|
+
- Given I am logged in, when I submit a display name longer than 100 characters, then I see a validation error
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
### 3. Ignoring Story Dependencies
|
|
211
|
+
|
|
212
|
+
The reviewer checks each story in isolation but never maps dependencies between stories. Stories that secretly depend on each other are not flagged. This creates false parallelization opportunities downstream — the implementation tasks phase will mark these as parallel, and agents will produce conflicting work.
|
|
213
|
+
|
|
214
|
+
**How to spot it:** The review report has no findings from Pass 3 (Story Independence). Dependencies are only discovered later during implementation tasks or during actual implementation.
|
|
215
|
+
|
|
216
|
+
### 4. Persona Name Drift Without Flagging
|
|
217
|
+
|
|
218
|
+
The PRD defines personas as "Teacher," "Student," and "Admin." Stories reference "Instructor," "Learner," and "Administrator." The reviewer does not flag the terminology mismatch because the mapping is obvious to a human. But downstream, the domain model and implementation tasks may use either set of terms inconsistently, creating confusion.
|
|
219
|
+
|
|
220
|
+
**How to spot it:** Compare persona names in the PRD with persona names in story "As a..." statements. Any mismatch is a finding, even if the intent is obvious.
|
|
221
|
+
|
|
222
|
+
### 5. Reviewing Only Happy-Path Stories
|
|
223
|
+
|
|
224
|
+
The reviewer verifies that the main user flows have stories but does not check for error handling, edge cases, or administrative workflows. Stories exist for "user creates an account" and "user places an order" but not for "user enters invalid payment info," "user tries to order an out-of-stock item," or "admin resolves a disputed transaction." These missing stories become missing tasks and missing implementations.
|
|
225
|
+
|
|
226
|
+
**How to spot it:** Count the ratio of happy-path stories to error/edge-case stories. If the ratio is heavily skewed (e.g., 20 happy-path stories and 2 error stories), error handling is systematically under-specified.
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
---
|
|
2
|
-
name: review-ux-
|
|
2
|
+
name: review-ux-specification
|
|
3
3
|
description: Failure modes and review passes specific to UI/UX specification artifacts
|
|
4
4
|
topics: [review, ux, design, accessibility, responsive]
|
|
5
5
|
---
|
|
@@ -206,3 +206,39 @@ When the UX spec designs components that do not match the architecture's compone
|
|
|
206
206
|
- P1: "The UX spec designs an 'OrderSummaryWidget' that combines order details, customer info, and payment status. The architecture separates these into three independent components (OrderComponent, CustomerComponent, PaymentComponent) with separate data sources."
|
|
207
207
|
- P1: "The UX spec assumes global state for user preferences (accessible from any component), but the architecture specifies component-local state with prop drilling."
|
|
208
208
|
- P2: "The UX spec's 'ProductCard' component bundles product image, price, and add-to-cart button. The architecture models 'ProductDisplay' and 'CartAction' as separate concerns."
|
|
209
|
+
|
|
210
|
+
### Example Review Finding
|
|
211
|
+
|
|
212
|
+
```markdown
|
|
213
|
+
### Finding: Dashboard has no empty state or loading state design
|
|
214
|
+
|
|
215
|
+
**Pass:** 3 — Interaction State Completeness
|
|
216
|
+
**Priority:** P0
|
|
217
|
+
**Location:** UX Spec Section 4.1 "User Dashboard"
|
|
218
|
+
|
|
219
|
+
**Issue:** The dashboard screen shows charts (order volume, revenue trend) and
|
|
220
|
+
summary metrics (total orders, account balance, recent activity). The spec provides
|
|
221
|
+
only the populated state — what the screen looks like with data.
|
|
222
|
+
|
|
223
|
+
Missing states:
|
|
224
|
+
- **Empty state:** A new user with zero orders sees empty chart containers with
|
|
225
|
+
no axes, no labels, and no guidance. The metrics show "$0" and "0 orders" with
|
|
226
|
+
no context.
|
|
227
|
+
- **Loading state:** When dashboard data is being fetched (3 separate API calls
|
|
228
|
+
per the API contract), what does the user see? No skeleton, spinner, or
|
|
229
|
+
progressive loading is specified.
|
|
230
|
+
- **Partial error state:** If the revenue chart API fails but the orders API
|
|
231
|
+
succeeds, does the entire dashboard show an error, or just the revenue widget?
|
|
232
|
+
|
|
233
|
+
**Impact:** Implementing agents will either show blank containers (confusing for
|
|
234
|
+
new users), a full-page spinner (poor perceived performance), or nothing at all
|
|
235
|
+
while loading. The first-time user experience — which is critical for activation
|
|
236
|
+
metrics in the PRD — is completely undesigned.
|
|
237
|
+
|
|
238
|
+
**Recommendation:** Design three additional states:
|
|
239
|
+
1. Empty state with onboarding CTA ("Create your first order to see analytics here")
|
|
240
|
+
2. Skeleton loading state with placeholder shapes matching the populated layout
|
|
241
|
+
3. Per-widget error state with retry button, so partial failures are isolated
|
|
242
|
+
|
|
243
|
+
**Trace:** UX Spec 4.1 → PRD Success Metric "70% user activation within 7 days"
|
|
244
|
+
```
|
|
@@ -5,32 +5,60 @@ default_depth: 3
|
|
|
5
5
|
|
|
6
6
|
# All steps enabled by default at depth 3 — user overrides individual steps
|
|
7
7
|
steps:
|
|
8
|
+
# Phase 1 — Product Definition (pre)
|
|
8
9
|
create-prd: { enabled: true }
|
|
9
10
|
review-prd: { enabled: true }
|
|
10
11
|
innovate-prd: { enabled: false }
|
|
11
12
|
user-stories: { enabled: true }
|
|
12
13
|
review-user-stories: { enabled: true }
|
|
13
14
|
innovate-user-stories: { enabled: false }
|
|
15
|
+
# Phase 2 — Project Foundation (foundation)
|
|
16
|
+
beads: { enabled: true, conditional: "if-needed" }
|
|
17
|
+
tech-stack: { enabled: true }
|
|
18
|
+
coding-standards: { enabled: true }
|
|
19
|
+
tdd: { enabled: true }
|
|
20
|
+
project-structure: { enabled: true }
|
|
21
|
+
# Phase 3 — Development Environment (environment)
|
|
22
|
+
dev-env-setup: { enabled: true }
|
|
23
|
+
design-system: { enabled: true, conditional: "if-needed" }
|
|
24
|
+
git-workflow: { enabled: true }
|
|
25
|
+
automated-pr-review: { enabled: false }
|
|
26
|
+
ai-memory-setup: { enabled: true }
|
|
27
|
+
# Phase 4 — Testing Integration (integration)
|
|
28
|
+
add-e2e-testing: { enabled: true, conditional: "if-needed" }
|
|
29
|
+
# Phase 5 — Domain Modeling (modeling)
|
|
14
30
|
domain-modeling: { enabled: true }
|
|
15
31
|
review-domain-modeling: { enabled: true }
|
|
32
|
+
# Phase 6 — Architecture Decisions (decisions)
|
|
16
33
|
adrs: { enabled: true }
|
|
17
34
|
review-adrs: { enabled: true }
|
|
35
|
+
# Phase 7 — System Architecture (architecture)
|
|
18
36
|
system-architecture: { enabled: true }
|
|
19
37
|
review-architecture: { enabled: true }
|
|
38
|
+
# Phase 8 — Specifications (specification)
|
|
20
39
|
database-schema: { enabled: true, conditional: "if-needed" }
|
|
21
40
|
review-database: { enabled: true, conditional: "if-needed" }
|
|
22
41
|
api-contracts: { enabled: true, conditional: "if-needed" }
|
|
23
42
|
review-api: { enabled: true, conditional: "if-needed" }
|
|
24
43
|
ux-spec: { enabled: true, conditional: "if-needed" }
|
|
25
44
|
review-ux: { enabled: true, conditional: "if-needed" }
|
|
26
|
-
|
|
45
|
+
# Phase 9 — Quality Gates (quality)
|
|
27
46
|
review-testing: { enabled: true }
|
|
47
|
+
story-tests: { enabled: true }
|
|
48
|
+
create-evals: { enabled: true }
|
|
28
49
|
operations: { enabled: true }
|
|
29
50
|
review-operations: { enabled: true }
|
|
30
51
|
security: { enabled: true }
|
|
31
52
|
review-security: { enabled: true }
|
|
32
|
-
|
|
33
|
-
review
|
|
53
|
+
# Phase 10 — Platform Parity (parity)
|
|
54
|
+
platform-parity-review: { enabled: true, conditional: "if-needed" }
|
|
55
|
+
# Phase 11 — Consolidation (consolidation)
|
|
56
|
+
claude-md-optimization: { enabled: true }
|
|
57
|
+
workflow-audit: { enabled: true }
|
|
58
|
+
# Phase 12 — Planning (planning)
|
|
59
|
+
implementation-plan: { enabled: true }
|
|
60
|
+
implementation-plan-review: { enabled: true }
|
|
61
|
+
# Phase 13 — Validation (validation)
|
|
34
62
|
cross-phase-consistency: { enabled: true }
|
|
35
63
|
traceability-matrix: { enabled: true }
|
|
36
64
|
decision-completeness: { enabled: true }
|
|
@@ -38,6 +66,7 @@ steps:
|
|
|
38
66
|
implementability-dry-run: { enabled: true }
|
|
39
67
|
dependency-graph-validation: { enabled: true }
|
|
40
68
|
scope-creep-check: { enabled: true }
|
|
69
|
+
# Phase 14 — Finalization (finalization)
|
|
41
70
|
apply-fixes-and-freeze: { enabled: true }
|
|
42
71
|
developer-onboarding-guide: { enabled: true }
|
|
43
72
|
implementation-playbook: { enabled: true }
|