@zigrivers/scaffold 2.38.1 → 2.44.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -7
- package/dist/cli/commands/build.js +4 -4
- package/dist/cli/commands/build.js.map +1 -1
- package/dist/cli/commands/check.test.js +11 -8
- package/dist/cli/commands/check.test.js.map +1 -1
- package/dist/cli/commands/complete.d.ts.map +1 -1
- package/dist/cli/commands/complete.js +2 -1
- package/dist/cli/commands/complete.js.map +1 -1
- package/dist/cli/commands/complete.test.js +4 -1
- package/dist/cli/commands/complete.test.js.map +1 -1
- package/dist/cli/commands/dashboard.js +4 -4
- package/dist/cli/commands/dashboard.js.map +1 -1
- package/dist/cli/commands/knowledge.js +2 -2
- package/dist/cli/commands/knowledge.js.map +1 -1
- package/dist/cli/commands/knowledge.test.js +5 -12
- package/dist/cli/commands/knowledge.test.js.map +1 -1
- package/dist/cli/commands/list.d.ts +1 -1
- package/dist/cli/commands/list.d.ts.map +1 -1
- package/dist/cli/commands/list.js +84 -3
- package/dist/cli/commands/list.js.map +1 -1
- package/dist/cli/commands/list.test.js +82 -0
- package/dist/cli/commands/list.test.js.map +1 -1
- package/dist/cli/commands/next.test.js +4 -1
- package/dist/cli/commands/next.test.js.map +1 -1
- package/dist/cli/commands/reset.d.ts.map +1 -1
- package/dist/cli/commands/reset.js +5 -2
- package/dist/cli/commands/reset.js.map +1 -1
- package/dist/cli/commands/reset.test.js +4 -1
- package/dist/cli/commands/reset.test.js.map +1 -1
- package/dist/cli/commands/rework.d.ts.map +1 -1
- package/dist/cli/commands/rework.js +3 -2
- package/dist/cli/commands/rework.js.map +1 -1
- package/dist/cli/commands/run.d.ts.map +1 -1
- package/dist/cli/commands/run.js +28 -13
- package/dist/cli/commands/run.js.map +1 -1
- package/dist/cli/commands/run.test.js +1 -1
- package/dist/cli/commands/run.test.js.map +1 -1
- package/dist/cli/commands/skip.d.ts.map +1 -1
- package/dist/cli/commands/skip.js +2 -1
- package/dist/cli/commands/skip.js.map +1 -1
- package/dist/cli/commands/skip.test.js +4 -1
- package/dist/cli/commands/skip.test.js.map +1 -1
- package/dist/cli/commands/status.d.ts.map +1 -1
- package/dist/cli/commands/status.js +88 -4
- package/dist/cli/commands/status.js.map +1 -1
- package/dist/cli/commands/version.d.ts.map +1 -1
- package/dist/cli/commands/version.js +22 -3
- package/dist/cli/commands/version.js.map +1 -1
- package/dist/cli/commands/version.test.js +42 -0
- package/dist/cli/commands/version.test.js.map +1 -1
- package/dist/cli/output/context.test.js +14 -13
- package/dist/cli/output/context.test.js.map +1 -1
- package/dist/cli/output/interactive.js +4 -4
- package/dist/cli/output/json.d.ts +1 -0
- package/dist/cli/output/json.d.ts.map +1 -1
- package/dist/cli/output/json.js +14 -1
- package/dist/cli/output/json.js.map +1 -1
- package/dist/config/loader.d.ts.map +1 -1
- package/dist/config/loader.js +10 -3
- package/dist/config/loader.js.map +1 -1
- package/dist/config/loader.test.js +28 -0
- package/dist/config/loader.test.js.map +1 -1
- package/dist/core/assembly/engine.d.ts.map +1 -1
- package/dist/core/assembly/engine.js +6 -1
- package/dist/core/assembly/engine.js.map +1 -1
- package/dist/e2e/init.test.js +3 -0
- package/dist/e2e/init.test.js.map +1 -1
- package/dist/index.js +2 -1
- package/dist/index.js.map +1 -1
- package/dist/project/adopt.test.js +3 -0
- package/dist/project/adopt.test.js.map +1 -1
- package/dist/project/claude-md.d.ts.map +1 -1
- package/dist/project/claude-md.js +2 -1
- package/dist/project/claude-md.js.map +1 -1
- package/dist/project/detector.js +3 -3
- package/dist/project/detector.js.map +1 -1
- package/dist/project/signals.d.ts +1 -0
- package/dist/project/signals.d.ts.map +1 -1
- package/dist/state/decision-logger.d.ts.map +1 -1
- package/dist/state/decision-logger.js +7 -4
- package/dist/state/decision-logger.js.map +1 -1
- package/dist/state/lock-manager.js +1 -1
- package/dist/state/lock-manager.js.map +1 -1
- package/dist/state/lock-manager.test.js +27 -3
- package/dist/state/lock-manager.test.js.map +1 -1
- package/dist/state/state-manager.d.ts.map +1 -1
- package/dist/state/state-manager.js +6 -0
- package/dist/state/state-manager.js.map +1 -1
- package/dist/state/state-manager.test.js +7 -0
- package/dist/state/state-manager.test.js.map +1 -1
- package/dist/types/assembly.d.ts +2 -0
- package/dist/types/assembly.d.ts.map +1 -1
- package/dist/utils/eligible.d.ts +8 -0
- package/dist/utils/eligible.d.ts.map +1 -0
- package/dist/utils/eligible.js +36 -0
- package/dist/utils/eligible.js.map +1 -0
- package/dist/validation/config-validator.test.js +15 -13
- package/dist/validation/config-validator.test.js.map +1 -1
- package/dist/validation/index.test.js +1 -1
- package/dist/wizard/wizard.d.ts.map +1 -1
- package/dist/wizard/wizard.js +1 -0
- package/dist/wizard/wizard.js.map +1 -1
- package/dist/wizard/wizard.test.js +2 -0
- package/dist/wizard/wizard.test.js.map +1 -1
- package/knowledge/core/automated-review-tooling.md +4 -4
- package/knowledge/core/eval-craft.md +44 -0
- package/knowledge/core/multi-model-review-dispatch.md +8 -0
- package/knowledge/core/system-architecture.md +39 -0
- package/knowledge/core/task-decomposition.md +53 -0
- package/knowledge/core/testing-strategy.md +160 -0
- package/knowledge/finalization/implementation-playbook.md +24 -7
- package/knowledge/product/prd-craft.md +41 -0
- package/knowledge/review/review-adr.md +1 -1
- package/knowledge/review/review-api-design.md +1 -1
- package/knowledge/review/review-database-design.md +1 -1
- package/knowledge/review/review-domain-modeling.md +1 -1
- package/knowledge/review/review-implementation-tasks.md +1 -1
- package/knowledge/review/review-methodology.md +1 -1
- package/knowledge/review/review-operations.md +1 -1
- package/knowledge/review/review-prd.md +1 -1
- package/knowledge/review/review-security.md +1 -1
- package/knowledge/review/review-system-architecture.md +1 -1
- package/knowledge/review/review-testing-strategy.md +1 -1
- package/knowledge/review/review-user-stories.md +1 -1
- package/knowledge/review/review-ux-specification.md +1 -1
- package/knowledge/review/review-vision.md +1 -1
- package/knowledge/tools/post-implementation-review-methodology.md +107 -0
- package/knowledge/validation/critical-path-analysis.md +13 -0
- package/knowledge/validation/implementability-review.md +14 -0
- package/package.json +2 -1
- package/pipeline/architecture/review-architecture.md +8 -5
- package/pipeline/architecture/system-architecture.md +9 -3
- package/pipeline/build/multi-agent-resume.md +21 -7
- package/pipeline/build/multi-agent-start.md +22 -7
- package/pipeline/build/new-enhancement.md +20 -12
- package/pipeline/build/quick-task.md +18 -11
- package/pipeline/build/single-agent-resume.md +20 -6
- package/pipeline/build/single-agent-start.md +24 -8
- package/pipeline/consolidation/claude-md-optimization.md +8 -4
- package/pipeline/consolidation/workflow-audit.md +9 -5
- package/pipeline/decisions/adrs.md +7 -3
- package/pipeline/decisions/review-adrs.md +8 -5
- package/pipeline/environment/ai-memory-setup.md +6 -2
- package/pipeline/environment/automated-pr-review.md +79 -12
- package/pipeline/environment/design-system.md +9 -6
- package/pipeline/environment/dev-env-setup.md +8 -5
- package/pipeline/environment/git-workflow.md +16 -13
- package/pipeline/finalization/apply-fixes-and-freeze.md +10 -5
- package/pipeline/finalization/developer-onboarding-guide.md +10 -3
- package/pipeline/finalization/implementation-playbook.md +13 -4
- package/pipeline/foundation/beads.md +8 -5
- package/pipeline/foundation/coding-standards.md +13 -10
- package/pipeline/foundation/project-structure.md +16 -13
- package/pipeline/foundation/tdd.md +9 -4
- package/pipeline/foundation/tech-stack.md +7 -5
- package/pipeline/integration/add-e2e-testing.md +12 -8
- package/pipeline/modeling/domain-modeling.md +9 -7
- package/pipeline/modeling/review-domain-modeling.md +8 -6
- package/pipeline/parity/platform-parity-review.md +9 -6
- package/pipeline/planning/implementation-plan-review.md +10 -7
- package/pipeline/planning/implementation-plan.md +41 -9
- package/pipeline/pre/create-prd.md +7 -4
- package/pipeline/pre/innovate-prd.md +12 -8
- package/pipeline/pre/innovate-user-stories.md +10 -7
- package/pipeline/pre/review-prd.md +12 -10
- package/pipeline/pre/review-user-stories.md +12 -9
- package/pipeline/pre/user-stories.md +7 -4
- package/pipeline/quality/create-evals.md +6 -3
- package/pipeline/quality/operations.md +7 -3
- package/pipeline/quality/review-operations.md +12 -5
- package/pipeline/quality/review-security.md +11 -6
- package/pipeline/quality/review-testing.md +11 -6
- package/pipeline/quality/security.md +6 -2
- package/pipeline/quality/story-tests.md +14 -9
- package/pipeline/specification/api-contracts.md +9 -3
- package/pipeline/specification/database-schema.md +8 -2
- package/pipeline/specification/review-api.md +10 -4
- package/pipeline/specification/review-database.md +8 -3
- package/pipeline/specification/review-ux.md +9 -3
- package/pipeline/specification/ux-spec.md +9 -4
- package/pipeline/validation/critical-path-walkthrough.md +10 -5
- package/pipeline/validation/cross-phase-consistency.md +9 -4
- package/pipeline/validation/decision-completeness.md +8 -3
- package/pipeline/validation/dependency-graph-validation.md +8 -3
- package/pipeline/validation/implementability-dry-run.md +9 -5
- package/pipeline/validation/scope-creep-check.md +11 -6
- package/pipeline/validation/traceability-matrix.md +10 -5
- package/pipeline/vision/create-vision.md +7 -4
- package/pipeline/vision/innovate-vision.md +11 -8
- package/pipeline/vision/review-vision.md +15 -12
- package/skills/multi-model-dispatch/SKILL.md +6 -5
- package/skills/scaffold-runner/SKILL.md +47 -3
- package/tools/dashboard.md +53 -0
- package/tools/post-implementation-review.md +655 -0
- package/tools/prompt-pipeline.md +160 -0
- package/tools/release.md +440 -0
- package/tools/review-pr.md +229 -0
- package/tools/session-analyzer.md +299 -0
- package/tools/update.md +113 -0
- package/tools/version-bump.md +290 -0
- package/tools/version.md +82 -0
|
@@ -1006,3 +1006,47 @@ Style observations, minor inconsistencies, documentation improvements. Not actio
|
|
|
1006
1006
|
- "3 documentation files have no cross-references from other docs (possibly orphaned)"
|
|
1007
1007
|
- "Coverage eval matched 'user profile' by file name only, not by test content — confidence is low"
|
|
1008
1008
|
- "Makefile has 2 targets not listed in CLAUDE.md Key Commands, but they start with `_` (internal targets)"
|
|
1009
|
+
|
|
1010
|
+
### Per-Category Implementation Guidance
|
|
1011
|
+
|
|
1012
|
+
Concrete checks to implement for each eval category. For each category, these are the highest-value grep/scan targets.
|
|
1013
|
+
|
|
1014
|
+
#### Adherence
|
|
1015
|
+
|
|
1016
|
+
- **Naming conventions**: Grep source files for patterns that violate documented naming (e.g., `camelCase` in a `snake_case` project, uppercase constants that should be enums)
|
|
1017
|
+
- **Error handling patterns**: Scan for bare `catch {}`, swallowed errors (`catch (e) { /* ignore */ }`), and missing error propagation per `docs/coding-standards.md`
|
|
1018
|
+
- **Import rules**: Check for barrel import violations, circular imports, and forbidden cross-layer imports (e.g., UI importing directly from DB layer)
|
|
1019
|
+
- **TODO hygiene**: Grep for `TODO|FIXME|HACK` without a task ID tag like `[BD-xxx]` — untagged TODOs are tracking gaps
|
|
1020
|
+
|
|
1021
|
+
#### Consistency
|
|
1022
|
+
|
|
1023
|
+
- **Cross-doc refs match**: Extract all file path references from markdown docs (`docs/*.md`) and verify each referenced path exists on disk
|
|
1024
|
+
- **Format standardization**: Verify commit messages follow the documented pattern in `docs/coding-standards.md` by regex-matching `git log --oneline`
|
|
1025
|
+
- **Command table sync**: Parse the Key Commands table in `CLAUDE.md`, extract each backtick-quoted command, and verify a matching Makefile target or package.json script exists
|
|
1026
|
+
- **Config value consistency**: Check that port numbers, env var names, and feature flags in config files match what documentation describes
|
|
1027
|
+
|
|
1028
|
+
#### Structure
|
|
1029
|
+
|
|
1030
|
+
- **File placement rules**: For each source file, verify its directory matches the module placement rules in `docs/project-structure.md` (e.g., no feature code in `shared/`, no stray files in root)
|
|
1031
|
+
- **Test co-location**: For each source file with logic, verify a corresponding `.test.*` file exists per the documented convention (co-located or mirror directory)
|
|
1032
|
+
- **Shared code 2+ consumers**: Scan every file in `shared/`, `common/`, or `lib/` directories and count distinct importers — flag any with fewer than 2 consumers
|
|
1033
|
+
- **No orphan files**: Verify every source file is either imported by another file or is a documented entry point (main, index, CLI handler)
|
|
1034
|
+
|
|
1035
|
+
#### Coverage
|
|
1036
|
+
|
|
1037
|
+
- **Feature-to-code mapping**: Extract Must-have features from `docs/plan.md`, derive domain keywords, and grep source tree for 2+ keyword matches per feature
|
|
1038
|
+
- **AC-to-test mapping**: Extract acceptance criteria from `docs/user-stories.md`, extract keywords, and search test files for keyword co-occurrence (high confidence: exact AC ID reference; medium: 2+ domain keywords)
|
|
1039
|
+
- **API endpoint coverage**: Parse documented endpoints from `docs/api-contracts.md`, verify each has a route definition in code and at least one test file asserting its status codes
|
|
1040
|
+
|
|
1041
|
+
#### Cross-doc
|
|
1042
|
+
|
|
1043
|
+
- **Terminology consistency**: Extract key domain terms from the PRD and verify the same terms (not synonyms) appear in architecture, user stories, and coding standards docs
|
|
1044
|
+
- **Tech stack references**: Verify that technology names referenced across docs match the canonical list in `docs/tech-stack.md` (e.g., no doc says "Postgres" when the canonical name is "PostgreSQL")
|
|
1045
|
+
- **Path consistency**: Collect all file path references across all docs and verify they use the same path format (no mix of `src/features/` and `features/src/`)
|
|
1046
|
+
|
|
1047
|
+
#### Security
|
|
1048
|
+
|
|
1049
|
+
- **Auth middleware usage**: Parse the security review for protected routes, then verify each route definition includes auth middleware (`requireAuth`, `@authenticated`, or equivalent)
|
|
1050
|
+
- **Secret patterns**: Grep for hardcoded API keys, tokens, and passwords using known patterns (`AKIA...`, `sk_live_...`, `ghp_...`, and generic `password\s*=\s*['"][^'"]+`)
|
|
1051
|
+
- **Input validation**: For each API endpoint accepting user input, verify a validation step exists (Zod `.parse()`, Joi `.validate()`, express-validator chain, or equivalent)
|
|
1052
|
+
- **No secrets in git**: Run `git log --diff-filter=A --name-only` and check that no `.env`, credentials, or key files were ever committed
|
|
@@ -176,6 +176,14 @@ When models actively disagree (one flags an issue, another says the same thing i
|
|
|
176
176
|
3. **Default to the stricter interpretation.** If genuinely ambiguous, the finding stands at reduced severity (P1 → P2).
|
|
177
177
|
4. **Document the disagreement.** The reconciliation report should note: "Models disagreed on [topic]. Resolution: [decision and rationale]."
|
|
178
178
|
|
|
179
|
+
### Consensus Classification
|
|
180
|
+
|
|
181
|
+
When synthesizing multi-model findings, classify each finding:
|
|
182
|
+
- **Consensus**: All participating models flagged the same issue at similar severity → report at the agreed severity
|
|
183
|
+
- **Majority**: 2+ models agree, 1 dissents → report at the lower of the agreeing severities; note the dissent
|
|
184
|
+
- **Divergent**: Models disagree on severity or one model found an issue others missed → present to user for decision, minimum P2 severity
|
|
185
|
+
- **Unique**: Only one model raised the finding → include with attribution, flag as "single-model finding" for user review
|
|
186
|
+
|
|
179
187
|
### Output Format
|
|
180
188
|
|
|
181
189
|
#### Review Summary (review-summary.md)
|
|
@@ -85,6 +85,45 @@ For most scaffold pipeline projects:
|
|
|
85
85
|
|
|
86
86
|
## Deep Guidance
|
|
87
87
|
|
|
88
|
+
## Reading ADRs to Derive Architectural Constraints
|
|
89
|
+
|
|
90
|
+
The `system-architecture` pipeline step consumes ADR decision records as direct inputs. Each accepted ADR represents a binding constraint on the architecture — not a suggestion, but a committed decision that component choices must honor.
|
|
91
|
+
|
|
92
|
+
### How to Extract Constraints from ADRs
|
|
93
|
+
|
|
94
|
+
For each ADR, read the **Decision** and **Consequences** sections:
|
|
95
|
+
|
|
96
|
+
1. **Identify the affected component scope.** An ADR about the database engine constrains the data access layer and all repository implementations. An ADR about the messaging system constrains all async communication patterns.
|
|
97
|
+
2. **Extract the affirmative constraint.** "We will use PostgreSQL" → every database component must use the `pg` driver (Node.js) or `psycopg2` (Python), not a generic ORM that could target SQLite.
|
|
98
|
+
3. **Extract the prohibitive constraint.** "We will not use a microservice architecture" → no service discovery, no inter-service REST calls, no per-service databases.
|
|
99
|
+
4. **Note the rationale.** If an ADR says "because of GDPR data residency requirements," this rationale extends to related decisions not explicitly stated in the ADR (e.g., no third-party analytics SDKs that exfiltrate data).
|
|
100
|
+
|
|
101
|
+
### Mapping ADR Outcomes to Component Decisions
|
|
102
|
+
|
|
103
|
+
Document constraints inline with component definitions:
|
|
104
|
+
|
|
105
|
+
```
|
|
106
|
+
Component: UserRepository
|
|
107
|
+
Constraint source: ADR-003 (PostgreSQL as the database)
|
|
108
|
+
Implementation requirement: Must use pg driver. ORM must target PostgreSQL dialect.
|
|
109
|
+
Cannot use SQLite, MySQL, or a generic query builder that doesn't validate SQL dialect.
|
|
110
|
+
|
|
111
|
+
Component: NotificationService
|
|
112
|
+
Constraint source: ADR-007 (async via Redis pub/sub, not a message broker)
|
|
113
|
+
Implementation requirement: Must use ioredis for pub/sub.
|
|
114
|
+
Cannot use RabbitMQ, Kafka, or in-process EventEmitter for cross-service notifications.
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### Cross-Referencing ADR Status
|
|
118
|
+
|
|
119
|
+
ADR status affects how strictly to apply constraints:
|
|
120
|
+
|
|
121
|
+
- **Accepted**: Binding. The architecture must comply. Any component that would violate this decision requires a new ADR to supersede it.
|
|
122
|
+
- **Proposed**: Treat as intended but not yet locked. Note the dependency — if the ADR is rejected, the architecture may need revision.
|
|
123
|
+
- **Deprecated** or **Superseded**: The old constraint no longer applies; the superseding ADR's constraint applies instead. Remove any component requirements derived from the deprecated ADR.
|
|
124
|
+
|
|
125
|
+
When a component's implementation would conflict with an accepted ADR, that conflict must be surfaced explicitly — either by revising the component design or by drafting a new ADR before proceeding.
|
|
126
|
+
|
|
88
127
|
## Component Design
|
|
89
128
|
|
|
90
129
|
### Identifying Components from Domain Models
|
|
@@ -508,3 +508,56 @@ If a task genuinely can't be split further without creating tasks that have no i
|
|
|
508
508
|
**Premature shared utilities.** Creating "shared utility library" tasks before any feature needs them. This produces speculative abstractions that don't fit actual use cases. Fix: shared code emerges from feature work. Only create shared utility tasks after two or more features demonstrate the need.
|
|
509
509
|
|
|
510
510
|
**Ignoring the critical path.** Assigning agents to low-priority tasks while critical-path tasks wait for resources. Fix: always prioritize critical-path tasks. Non-critical tasks are parallelized around the critical path, not instead of it.
|
|
511
|
+
|
|
512
|
+
### Critical Path and Wave Planning
|
|
513
|
+
|
|
514
|
+
#### Identifying the Critical Path
|
|
515
|
+
|
|
516
|
+
The critical path is the longest chain of sequentially dependent tasks from project start to finish. To find it:
|
|
517
|
+
|
|
518
|
+
1. **Build the full DAG** — list every task and its dependencies (logical, file contention, infrastructure)
|
|
519
|
+
2. **Assign effort estimates** — use story points or hours per task
|
|
520
|
+
3. **Trace all paths** — walk from every root node (no dependencies) to every leaf node (no dependents)
|
|
521
|
+
4. **Sum each path** — the path with the highest total effort is the critical path
|
|
522
|
+
5. **Mark float** — non-critical tasks have float equal to (critical path length - their path length); they can slip by that amount without delaying the project
|
|
523
|
+
|
|
524
|
+
Critical path tasks get top priority for agent assignment. Delays on these tasks delay the entire project; delays on non-critical tasks do not (up to their float).
|
|
525
|
+
|
|
526
|
+
#### Wave Planning
|
|
527
|
+
|
|
528
|
+
Waves group independent tasks for parallel execution. Each wave starts only after its dependency wave completes.
|
|
529
|
+
|
|
530
|
+
```
|
|
531
|
+
Wave 0: Project infrastructure (DB setup, CI pipeline, auth scaffold)
|
|
532
|
+
Wave 1: Core data models, base API framework, design tokens
|
|
533
|
+
Wave 2: Feature endpoints, UI components, middleware (per-feature)
|
|
534
|
+
Wave 3: Integration flows, cross-feature wiring, E2E test scaffolds
|
|
535
|
+
Wave 4: Polish, performance, E2E tests, documentation finalization
|
|
536
|
+
```
|
|
537
|
+
|
|
538
|
+
**Rules for wave construction:**
|
|
539
|
+
- A task belongs to the earliest wave where all its dependencies are satisfied
|
|
540
|
+
- Tasks within a wave have zero dependencies on each other
|
|
541
|
+
- The number of useful parallel agents equals the task count of the widest wave
|
|
542
|
+
- If one wave has 8 tasks and the next has 2, consider whether splitting wave-2 tasks could improve parallelism
|
|
543
|
+
|
|
544
|
+
#### Agent Allocation by Wave
|
|
545
|
+
|
|
546
|
+
Assign agents based on task type to maximize context reuse within an agent session:
|
|
547
|
+
|
|
548
|
+
- **Backend agents** — API endpoints, database migrations, service logic. Context: architecture doc, API contracts, coding standards
|
|
549
|
+
- **Frontend agents** — UI components, pages, client-side state. Context: UX spec, design system, component patterns
|
|
550
|
+
- **Infrastructure agents** — CI/CD, deployment, config, monitoring. Context: dev setup, operations runbook
|
|
551
|
+
- **Cross-cutting agents** — Auth, error handling, shared utilities. Context: security review, coding standards
|
|
552
|
+
|
|
553
|
+
An agent working consecutive tasks of the same type retains relevant context and produces more consistent output.
|
|
554
|
+
|
|
555
|
+
#### Parallelization Signals
|
|
556
|
+
|
|
557
|
+
Tasks are safe to run in parallel when they share no file dependencies. Quick checklist:
|
|
558
|
+
|
|
559
|
+
- **Different feature directories** — `src/features/auth/` vs `src/features/billing/` can always parallelize
|
|
560
|
+
- **Different layers of different features** — backend auth + frontend billing have no file overlap
|
|
561
|
+
- **Same feature, different layers** — only if the interface contract is agreed upfront (API shape, component props)
|
|
562
|
+
- **Same file touched** — must be sequenced, no exceptions (merge conflicts are expensive)
|
|
563
|
+
- **Shared utility creation** — block until the utility task merges, then dependents can parallelize
|
|
@@ -397,6 +397,166 @@ Initial data loaded into the test database for integration tests. Rules:
|
|
|
397
397
|
|
|
398
398
|
**No test naming convention.** Test descriptions like "test 1," "works correctly," or "handles the thing." Uninformative when tests fail. Fix: test names should describe the scenario and expected outcome: "returns 404 when user does not exist," "applies 10% discount for premium members."
|
|
399
399
|
|
|
400
|
+
### From Acceptance Criteria to Test Cases
|
|
401
|
+
|
|
402
|
+
Acceptance criteria are the bridge between user stories and automated tests. Every AC should produce one or more test cases with clear traceability.
|
|
403
|
+
|
|
404
|
+
#### Given/When/Then to Arrange/Act/Assert
|
|
405
|
+
|
|
406
|
+
The mapping is direct:
|
|
407
|
+
|
|
408
|
+
- **Given** (precondition) becomes **Arrange** — set up test data, mock dependencies, configure state
|
|
409
|
+
- **When** (action) becomes **Act** — call the function, hit the endpoint, trigger the event
|
|
410
|
+
- **Then** (expected outcome) becomes **Assert** — verify return value, check database state, assert response body
|
|
411
|
+
|
|
412
|
+
```typescript
|
|
413
|
+
// AC: Given a user with 5 failed login attempts,
|
|
414
|
+
// When they attempt a 6th login,
|
|
415
|
+
// Then the account is locked and they see "Account locked"
|
|
416
|
+
it('locks account after 5 failed attempts', async () => {
|
|
417
|
+
// Arrange: create user with 5 failed attempts
|
|
418
|
+
const user = await createUser({ failedAttempts: 5 });
|
|
419
|
+
// Act: attempt login
|
|
420
|
+
const res = await request(app).post('/login').send({ email: user.email, password: 'wrong' });
|
|
421
|
+
// Assert: locked
|
|
422
|
+
expect(res.status).toBe(423);
|
|
423
|
+
expect(res.body.error.message).toContain('Account locked');
|
|
424
|
+
});
|
|
425
|
+
```
|
|
426
|
+
|
|
427
|
+
#### One AC, Multiple Test Cases
|
|
428
|
+
|
|
429
|
+
Each AC produces at minimum one happy-path test. Then derive edge cases:
|
|
430
|
+
|
|
431
|
+
- **Boundary values**: If the AC says "max 50 characters," test 49, 50, and 51
|
|
432
|
+
- **Empty/null inputs**: If the AC assumes input exists, test what happens when it does not
|
|
433
|
+
- **Concurrency**: If the AC describes a state change, test what happens with simultaneous requests
|
|
434
|
+
|
|
435
|
+
#### Negative Case Derivation
|
|
436
|
+
|
|
437
|
+
For every "Given X" in an AC, systematically test "Given NOT X":
|
|
438
|
+
|
|
439
|
+
- AC says "Given user is authenticated" — test unauthenticated access (expect 401)
|
|
440
|
+
- AC says "Given the order exists" — test with nonexistent order ID (expect 404)
|
|
441
|
+
- AC says "Given valid payment details" — test with expired card, insufficient funds, invalid CVV
|
|
442
|
+
|
|
443
|
+
#### Parameterized Tests for Similar ACs
|
|
444
|
+
|
|
445
|
+
When multiple ACs follow the same pattern with different inputs, use data-driven tests:
|
|
446
|
+
|
|
447
|
+
```typescript
|
|
448
|
+
it.each([
|
|
449
|
+
['empty email', { email: '', password: 'valid' }, 'Email is required'],
|
|
450
|
+
['invalid email', { email: 'notanemail', password: 'valid' }, 'Invalid email format'],
|
|
451
|
+
['short password', { email: 'a@b.com', password: '123' }, 'Password too short'],
|
|
452
|
+
])('rejects registration with %s', async (_, input, expectedError) => {
|
|
453
|
+
const res = await request(app).post('/register').send(input);
|
|
454
|
+
expect(res.status).toBe(400);
|
|
455
|
+
expect(res.body.error.message).toContain(expectedError);
|
|
456
|
+
});
|
|
457
|
+
```
|
|
458
|
+
|
|
459
|
+
#### Test Naming for Traceability
|
|
460
|
+
|
|
461
|
+
Test names should mirror the AC wording so that when a test fails, the team can trace it back to the requirement without reading the test body:
|
|
462
|
+
|
|
463
|
+
- AC: "User sees error when email is already taken" — Test: `'returns 409 when email is already taken'`
|
|
464
|
+
- AC: "Profile updates immediately after save" — Test: `'updates profile and reflects changes on next fetch'`
|
|
465
|
+
- Include the story or AC ID in the describe block when practical: `describe('US-002: Edit profile', () => { ... })`
|
|
466
|
+
|
|
467
|
+
### Pending Test Syntax and Skeleton-to-TDD Workflow
|
|
468
|
+
|
|
469
|
+
#### Pending Test Syntax
|
|
470
|
+
|
|
471
|
+
A pending test (also called a test skeleton or todo test) marks a test case that is known to be needed but not yet implemented. It fails intentionally, serving as a reminder and a contract — CI will report it, nobody can accidentally claim the work is done.
|
|
472
|
+
|
|
473
|
+
**TypeScript / Jest:**
|
|
474
|
+
```typescript
|
|
475
|
+
// it.todo() — built-in pending marker; no callback needed
|
|
476
|
+
it.todo('returns 404 when user does not exist');
|
|
477
|
+
it.todo('rejects payment with expired card');
|
|
478
|
+
|
|
479
|
+
// xit() / xdescribe() — skipped test with a body (useful to sketch logic first)
|
|
480
|
+
xit('locks account after 5 failed login attempts', async () => {
|
|
481
|
+
// stub: arrange/act/assert goes here
|
|
482
|
+
});
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
**Python / pytest:**
|
|
486
|
+
```python
|
|
487
|
+
import pytest
|
|
488
|
+
|
|
489
|
+
@pytest.mark.skip(reason="not yet implemented — US-014 payment failure handling")
|
|
490
|
+
def test_rejects_expired_card():
|
|
491
|
+
pass # implementation stub
|
|
492
|
+
|
|
493
|
+
# Or with pytest-todo plugin:
|
|
494
|
+
@pytest.mark.todo
|
|
495
|
+
def test_sends_confirmation_email_on_order():
|
|
496
|
+
pass
|
|
497
|
+
```
|
|
498
|
+
|
|
499
|
+
**Go / testing:**
|
|
500
|
+
```go
|
|
501
|
+
func TestRejectsExpiredCard(t *testing.T) {
|
|
502
|
+
t.Skip("not yet implemented — US-014")
|
|
503
|
+
}
|
|
504
|
+
```
|
|
505
|
+
|
|
506
|
+
**Bats (shell):**
|
|
507
|
+
```bash
|
|
508
|
+
@test "rejects request without auth token" {
|
|
509
|
+
skip "not yet implemented — US-007"
|
|
510
|
+
}
|
|
511
|
+
```
|
|
512
|
+
|
|
513
|
+
The key property of a pending test: it must be **visible in CI output** (not silently ignored) and **clearly labeled** with the story or AC it corresponds to.
|
|
514
|
+
|
|
515
|
+
#### Skeleton-to-TDD Workflow
|
|
516
|
+
|
|
517
|
+
The skeleton-to-TDD workflow takes user stories all the way to passing tests through four explicit stages:
|
|
518
|
+
|
|
519
|
+
1. **Story → Acceptance Criteria.** Extract the Given/When/Then conditions from the user story. Each condition becomes a candidate test.
|
|
520
|
+
2. **Acceptance Criteria → Pending Tests.** Write one `it.todo()` (or language equivalent) per AC. Name each test after the AC's expected outcome. Commit. CI now shows red for all pending tests — this is the intended state.
|
|
521
|
+
3. **Pending Test → Failing Test.** For one pending test at a time: fill in the Arrange/Act/Assert body. Remove `.todo`. Run tests — the test must fail (because the implementation doesn't exist yet). If it passes immediately, the test is not testing anything real.
|
|
522
|
+
4. **Failing Test → Passing Test.** Write the minimum implementation to make the failing test pass. No speculative code. Run tests — green. Commit.
|
|
523
|
+
|
|
524
|
+
Repeat steps 3-4 for each pending test. When all pending tests for a story are passing, the story is done.
|
|
525
|
+
|
|
526
|
+
**Example progression for US-014: "User sees error when paying with expired card":**
|
|
527
|
+
|
|
528
|
+
```
|
|
529
|
+
Stage 1: AC derived → "given expired card, POST /checkout returns 402 with error.code CARD_EXPIRED"
|
|
530
|
+
Stage 2: it.todo('returns 402 CARD_EXPIRED for expired card') ← CI: pending
|
|
531
|
+
Stage 3: it('returns 402 CARD_EXPIRED for expired card', ...) { ... } ← CI: failing
|
|
532
|
+
Stage 4: PaymentService.charge() → check expiry → throw CardExpiredError ← CI: passing
|
|
533
|
+
```
|
|
534
|
+
|
|
535
|
+
#### story-tests-map.md Format
|
|
536
|
+
|
|
537
|
+
The `story-tests-map.md` file provides a traceability matrix linking user stories to the test files and test names that verify them. It lives in the project root or `docs/` directory.
|
|
538
|
+
|
|
539
|
+
**Minimal format (Markdown table):**
|
|
540
|
+
|
|
541
|
+
```markdown
|
|
542
|
+
# Story-to-Tests Map
|
|
543
|
+
|
|
544
|
+
| Story ID | Story Title | Test File | Test Name(s) | Status |
|
|
545
|
+
|----------|----------------------------|----------------------------------------|-----------------------------------------------------------|----------|
|
|
546
|
+
| US-001 | User registers account | tests/auth/register.test.ts | creates user and returns 201 | passing |
|
|
547
|
+
| US-001 | User registers account | tests/auth/register.test.ts | returns 409 when email already exists | passing |
|
|
548
|
+
| US-002 | User logs in | tests/auth/login.test.ts | returns JWT on valid credentials | passing |
|
|
549
|
+
| US-002 | User logs in | tests/auth/login.test.ts | returns 401 on invalid password | passing |
|
|
550
|
+
| US-014 | Payment with expired card | tests/checkout/payment.test.ts | returns 402 CARD_EXPIRED for expired card | pending |
|
|
551
|
+
| US-014 | Payment with expired card | tests/checkout/payment.test.ts | returns 402 INSUFFICIENT_FUNDS for declined card | pending |
|
|
552
|
+
```
|
|
553
|
+
|
|
554
|
+
**Rules for maintaining the map:**
|
|
555
|
+
- Every user story must have at least one row (even if all tests are pending)
|
|
556
|
+
- The `Status` column reflects the current CI state: `passing`, `failing`, or `pending`
|
|
557
|
+
- When a test is renamed or moved, update the map in the same commit
|
|
558
|
+
- The map is machine-readable — keep it parseable (consistent column counts, no merged cells)
|
|
559
|
+
|
|
400
560
|
## See Also
|
|
401
561
|
|
|
402
562
|
- [api-design](../core/api-design.md) — Contract testing patterns
|
|
@@ -69,14 +69,19 @@ If a task does not have a context brief, the agent should create one from the sp
|
|
|
69
69
|
|
|
70
70
|
When a per-task context block is incomplete, agents should consult this taxonomy to ensure they have sufficient context:
|
|
71
71
|
|
|
72
|
+
**Before starting any task**, check `docs/story-tests-map.md` to find test skeletons for your task's user stories. If test skeletons exist, begin TDD with those pending tests rather than writing new ones.
|
|
73
|
+
|
|
72
74
|
| Task Type | Required Docs | Additional Context |
|
|
73
75
|
|-----------|--------------|-------------------|
|
|
74
|
-
| Backend API | `docs/api-contracts.md`, `docs/database-schema.md`, `docs/domain-models/`, `docs/coding-standards.md`, `docs/tdd-standards.md` | Relevant ADR for API style choices |
|
|
75
|
-
| Frontend UI | `docs/ux-spec.md`, `docs/design-system.md`, `docs/api-contracts.md`, `docs/coding-standards.md`, `docs/tdd-standards.md` | Component patterns from design system |
|
|
76
|
-
| Database migration | `docs/database-schema.md`, `docs/domain-models/`, `docs/operations-runbook.md` | Rollback strategy from ops runbook |
|
|
77
|
-
| Infrastructure/CI | `docs/dev-setup.md`, `docs/git-workflow.md`, `docs/operations-runbook.md` | Deployment pipeline stages |
|
|
78
|
-
| Bug fix | Relevant source code, `docs/tdd-standards.md`, `docs/coding-standards.md` | Related test files, reproduction steps |
|
|
79
|
-
| Security hardening | `docs/security-review.md`, `docs/api-contracts.md`, `docs/coding-standards.md` | OWASP checklist items from security review |
|
|
76
|
+
| Backend API | `docs/api-contracts.md`, `docs/database-schema.md`, `docs/domain-models/`, `docs/coding-standards.md`, `docs/tdd-standards.md`, `docs/story-tests-map.md` | Relevant ADR for API style choices, `tests/acceptance/` skeletons |
|
|
77
|
+
| Frontend UI | `docs/ux-spec.md`, `docs/design-system.md`, `docs/api-contracts.md`, `docs/coding-standards.md`, `docs/tdd-standards.md`, `docs/story-tests-map.md` | Component patterns from design system, `tests/acceptance/` skeletons |
|
|
78
|
+
| Database migration | `docs/database-schema.md`, `docs/domain-models/`, `docs/operations-runbook.md`, `docs/story-tests-map.md` | Rollback strategy from ops runbook, `tests/acceptance/` skeletons |
|
|
79
|
+
| Infrastructure/CI | `docs/dev-setup.md`, `docs/git-workflow.md`, `docs/operations-runbook.md`, `docs/story-tests-map.md` | Deployment pipeline stages |
|
|
80
|
+
| Bug fix | Relevant source code, `docs/tdd-standards.md`, `docs/coding-standards.md`, `docs/story-tests-map.md` | Related test files, reproduction steps, `tests/acceptance/` skeletons |
|
|
81
|
+
| Security hardening | `docs/security-review.md`, `docs/api-contracts.md`, `docs/coding-standards.md`, `docs/story-tests-map.md` | OWASP checklist items from security review, `tests/acceptance/` skeletons |
|
|
82
|
+
| Refactoring | `docs/coding-standards.md`, `docs/system-architecture.md` (if available), `docs/domain-models/` (if available), `docs/story-tests-map.md` | All existing tests pass; no behavior change verified |
|
|
83
|
+
| Performance | `docs/system-architecture.md`, `docs/operations-runbook.md` (if available), `docs/story-tests-map.md` | Benchmark comparison before/after; make check passes |
|
|
84
|
+
| E2E / Integration | `docs/tdd-standards.md`, `tests/acceptance/` (if available), `docs/api-contracts.md` (if available), `docs/story-tests-map.md` | E2E tests pass; make check passes |
|
|
80
85
|
|
|
81
86
|
## Deep Guidance
|
|
82
87
|
|
|
@@ -460,6 +465,16 @@ When `make eval` fails during implementation:
|
|
|
460
465
|
- **Security evals**: Missing input validation or auth check. Fix: add the missing security control per security-review.md.
|
|
461
466
|
4. **If eval seems wrong**: Check if the eval itself is outdated. Flag for upstream review rather than working around it.
|
|
462
467
|
|
|
468
|
+
**Eval Failure → Root Cause Reference**:
|
|
469
|
+
|
|
470
|
+
| Eval Category | Root Cause Doc | What to Check |
|
|
471
|
+
|---------------|---------------|---------------|
|
|
472
|
+
| Adherence | `docs/coding-standards.md` | The specific pattern or convention that was violated |
|
|
473
|
+
| Consistency | Cross-doc references | Naming, paths, and commands match across all documents |
|
|
474
|
+
| Structure | `docs/project-structure.md` | File placement rules, directory conventions |
|
|
475
|
+
| Coverage | `docs/story-tests-map.md` | Missing acceptance-criteria-to-test mapping |
|
|
476
|
+
| Security | `docs/security-review.md` | The specific security control that was violated |
|
|
477
|
+
|
|
463
478
|
**Spec gap discovered during implementation:**
|
|
464
479
|
1. Document the gap with specific details (what's missing, what's needed)
|
|
465
480
|
2. Check if an ADR or architecture decision covers the case
|
|
@@ -476,7 +491,9 @@ When `make eval` fails during implementation:
|
|
|
476
491
|
|
|
477
492
|
When a task's upstream dependency hasn't merged or has failed:
|
|
478
493
|
|
|
479
|
-
1. **Check the dependency task status**
|
|
494
|
+
1. **Check the dependency task status** — Look at git branch status (`git log --oneline origin/main..origin/<branch>`), PR state (`gh pr view <branch>`), or the task tracking system (Beads `bd show <task-id>` / `docs/implementation-plan.md` status column) to determine whether the dependency is in-progress, merged, failed, or blocked.
|
|
480
495
|
2. **If in-progress**: Wait for it to merge. Do not start work that depends on uncommitted changes.
|
|
481
496
|
3. **If failed/blocked**: Flag for human review. The task may need to be reworked, reordered, or its dependency removed.
|
|
482
497
|
4. **If the dependency is in a different agent's worktree**: Coordinate via AGENTS.md or the task tracking system. Never duplicate work.
|
|
498
|
+
5. **Max wait**: If blocked for more than 30 minutes, find an unblocked task from the implementation plan and work on that instead. Do not idle.
|
|
499
|
+
6. **Escalation**: If no unblocked tasks remain, document the blocker in a PR comment (or Beads note) and notify via AGENTS.md or the project's communication channel so the blocker is visible to all agents and the project owner.
|
|
@@ -343,3 +343,44 @@ Before considering a PRD complete:
|
|
|
343
343
|
- [ ] Competitive context is provided
|
|
344
344
|
- [ ] The PRD says WHAT, not HOW
|
|
345
345
|
- [ ] Every stakeholder group has been considered (end users, admins, support, integrators)
|
|
346
|
+
|
|
347
|
+
### Non-Functional Requirements — Specification and Quantification
|
|
348
|
+
|
|
349
|
+
Every NFR must have three components: a **measurable target**, a **measurement method**, and an **acceptable threshold**. Without all three, an NFR is aspirational, not actionable.
|
|
350
|
+
|
|
351
|
+
#### Performance
|
|
352
|
+
|
|
353
|
+
- **Response time**: Specify percentile targets — e.g., "API p95 < 200ms, p99 < 500ms for read operations; p95 < 500ms for writes"
|
|
354
|
+
- **Throughput**: Define sustained request rate — e.g., "System handles 500 requests/second under normal load"
|
|
355
|
+
- **Concurrent users**: State peak capacity — e.g., "10,000 simultaneous authenticated sessions without degradation"
|
|
356
|
+
- **Measurement**: Name the tool and method — "Measured via k6 load test against staging, run nightly in CI"
|
|
357
|
+
|
|
358
|
+
#### Security
|
|
359
|
+
|
|
360
|
+
- **Compliance standards**: Name the specific standards — OWASP Top 10, SOC2 Type II, PCI DSS Level 1, HIPAA
|
|
361
|
+
- **Authentication requirements**: Specify method and strength — "OAuth 2.0 + PKCE, session timeout 30 min, MFA for admin roles"
|
|
362
|
+
- **Data classification**: Label data tiers — "PII (encrypted at rest AES-256, in transit TLS 1.3), public (CDN-cacheable)"
|
|
363
|
+
- **Audit logging**: Define what is logged — "All auth events, all data mutations, all admin actions; retained 90 days"
|
|
364
|
+
|
|
365
|
+
#### Scalability
|
|
366
|
+
|
|
367
|
+
- **Growth targets**: Quantify the horizon — "Support 10x current load within 12 months without architecture changes"
|
|
368
|
+
- **Scaling strategy**: State horizontal vs vertical — "Stateless API servers behind load balancer; horizontal auto-scale at 70% CPU"
|
|
369
|
+
- **Data volume**: Project storage growth — "100GB Year 1, 1TB Year 3; archive records older than 2 years to cold storage"
|
|
370
|
+
|
|
371
|
+
#### Availability
|
|
372
|
+
|
|
373
|
+
- **Uptime SLA**: State the target and what it means — "99.9% monthly (43 min downtime/month allowed)"
|
|
374
|
+
- **RTO/RPO**: Recovery time objective and recovery point objective — "RTO: 15 min, RPO: 5 min (continuous replication)"
|
|
375
|
+
- **Graceful degradation**: Define fallback behavior — "If payment provider is down, queue orders and retry; show user 'processing' status"
|
|
376
|
+
- **Maintenance windows**: Specify schedule — "Zero-downtime deploys via rolling update; no scheduled maintenance windows"
|
|
377
|
+
|
|
378
|
+
#### Accessibility
|
|
379
|
+
|
|
380
|
+
- **WCAG level**: State the target — "WCAG 2.1 AA compliance for all public-facing pages"
|
|
381
|
+
- **Screen reader support**: Name tested readers — "VoiceOver (macOS/iOS), NVDA (Windows); tested quarterly"
|
|
382
|
+
- **Keyboard navigation**: Full keyboard operability for all interactive elements; visible focus indicators
|
|
383
|
+
|
|
384
|
+
#### The Three-Part Rule
|
|
385
|
+
|
|
386
|
+
Every NFR entry in the PRD must answer: *What is the target?* (p95 < 200ms), *How is it measured?* (k6 load test in CI), *What is acceptable?* (p95 between 200-300ms triggers warning; above 300ms blocks deploy). If any of the three is missing, the NFR is incomplete.
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-database-design
|
|
3
3
|
description: Failure modes and review passes specific to database schema design artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [database, schema, data-modeling, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Database Schema
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-domain-modeling
|
|
3
3
|
description: Failure modes and review passes specific to domain modeling artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [domain-modeling, ddd, bounded-contexts, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Domain Modeling
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-implementation-tasks
|
|
3
3
|
description: Failure modes and review passes specific to implementation tasks artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [tasks, planning, decomposition, agents, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Implementation Tasks
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-methodology
|
|
3
3
|
description: Shared process for conducting multi-pass reviews of documentation artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [methodology, quality-assurance, multi-pass, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review Methodology
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-operations
|
|
3
3
|
description: Failure modes and review passes specific to operations and deployment runbook artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [operations, deployment, monitoring, runbooks, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Operations & Deployment
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-prd
|
|
3
3
|
description: Failure modes and review passes specific to product requirements document artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [prd, requirements, completeness, clarity, nfr, constraints, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Product Requirements Document
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-security
|
|
3
3
|
description: Failure modes and review passes specific to security review and documentation artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [security, owasp, auth, threat-modeling, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Security
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-system-architecture
|
|
3
3
|
description: Failure modes and review passes specific to system architecture documents
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [architecture, components, data-flow, modules, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: System Architecture
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-testing-strategy
|
|
3
3
|
description: Failure modes and review passes specific to testing and quality strategy artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [testing, quality, coverage, test-pyramid, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Testing Strategy
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-user-stories
|
|
3
3
|
description: Failure modes and review passes specific to user story artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [user-stories, coverage, acceptance-criteria, INVEST, testability, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: User Stories
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-ux-specification
|
|
3
3
|
description: Failure modes and review passes specific to UI/UX specification artifacts
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [ux, design, accessibility, responsive-design, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: UX Specification
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: review-vision
|
|
3
3
|
description: Vision-specific review passes, failure modes, and quality criteria for product vision documents
|
|
4
|
-
topics: [
|
|
4
|
+
topics: [vision, product-strategy, validation, review]
|
|
5
5
|
---
|
|
6
6
|
|
|
7
7
|
# Review: Product Vision
|