prizmkit 1.0.148 → 1.0.149

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  {
2
- "frameworkVersion": "1.0.148",
3
- "bundledAt": "2026-03-31T16:37:46.664Z",
4
- "bundledFrom": "b56218a"
2
+ "frameworkVersion": "1.0.149",
3
+ "bundledAt": "2026-03-31T17:50:05.806Z",
4
+ "bundledFrom": "9c1abf7"
5
5
  }
@@ -103,8 +103,9 @@ export async function installCommand(corePath, targetRoot) {
103
103
  const hasAssets = existsSync(path.join(corePath, 'assets'));
104
104
  const hasScripts = existsSync(path.join(corePath, 'scripts'));
105
105
  const hasRules = existsSync(path.join(corePath, 'rules'));
106
+ const hasReferences = existsSync(path.join(corePath, 'references'));
106
107
 
107
- if (hasAssets || hasScripts || hasRules) {
108
+ if (hasAssets || hasScripts || hasRules || hasReferences) {
108
109
  // Use directory structure for commands with resources
109
110
  const targetDir = path.join(targetRoot, COMMANDS_DIR, skillName);
110
111
  mkdirSync(targetDir, { recursive: true });
@@ -118,7 +119,7 @@ export async function installCommand(corePath, targetRoot) {
118
119
  }
119
120
 
120
121
  // Copy assets and scripts
121
- for (const subdir of ['scripts', 'assets', 'rules']) {
122
+ for (const subdir of ['scripts', 'assets', 'rules', 'references']) {
122
123
  const srcSubdir = path.join(corePath, subdir);
123
124
  if (existsSync(srcSubdir)) {
124
125
  cpSync(srcSubdir, path.join(targetDir, subdir), { recursive: true });
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "1.0.148",
2
+ "version": "1.0.149",
3
3
  "skills": {
4
4
  "prizm-kit": {
5
5
  "description": "Full-lifecycle dev toolkit. Covers spec-driven development, Prizm context docs, code quality, debugging, deployment, and knowledge management.",
@@ -0,0 +1,48 @@
1
+ # Architecture Decision Capture
2
+
3
+ During planning, key **framework-level** architectural decisions may emerge. When they do, capture them in the project instruction file so all future AI sessions have this context.
4
+
5
+ ## What Qualifies (ALL must apply)
6
+
7
+ Only capture decisions that are **framework-shaping** — NOT individual feature details. Qualifying categories:
8
+
9
+ | Category | Examples |
10
+ |----------|----------|
11
+ | Tech stack choices | PostgreSQL over MongoDB, React over Vue, Node.js runtime |
12
+ | Communication patterns | REST vs GraphQL, WebSocket vs SSE vs polling |
13
+ | Architectural patterns | Monorepo, microservices, monolith, event-driven |
14
+ | Data model strategies | Relational vs document, event sourcing, CQRS |
15
+ | Security architecture | JWT vs session, OAuth provider, RBAC model |
16
+
17
+ **Do NOT capture**: individual feature implementation details, UI component choices, specific API endpoint designs, or anything scoped to a single feature.
18
+
19
+ **This is conditional** — most planning sessions will NOT produce architecture decisions. Only capture when genuinely impactful decisions are made during the discussion.
20
+
21
+ ## When to Capture
22
+
23
+ After Phase 5 (DAG verification), before Phase 6 (JSON generation). At this point decisions are settled.
24
+
25
+ ## How to Capture
26
+
27
+ 1. **Detect platform** — determine which project instruction file to update:
28
+ - `.claude/` directory exists → append to `CLAUDE.md`
29
+ - `.codebuddy/` directory exists → append to `CODEBUDDY.md`
30
+ - Both exist → append to both
31
+ - Neither exists → skip (no project instruction file)
32
+
33
+ 2. **Check for existing section** — read the target file and look for `### Architecture Decisions` heading:
34
+ - If heading exists → append new entries below it (avoid duplicates with existing entries)
35
+ - If heading does not exist → create it at the end of the file
36
+
37
+ 3. **Format** — one line per decision, no feature IDs:
38
+ ```markdown
39
+ ### Architecture Decisions
40
+ - WebSocket for real-time: sub-second latency required for collaboration features
41
+ - PostgreSQL: relational data model with complex queries, ACID compliance needed
42
+ - Monorepo structure: shared types between frontend and backend
43
+ ```
44
+
45
+ 4. **User confirmation** — before writing, show the collected decisions and ask:
46
+ > "These architecture decisions were identified during planning. Record them to [CLAUDE.md / CODEBUDDY.md]? (Y/n)"
47
+
48
+ If user declines, skip without further prompting.
@@ -0,0 +1,101 @@
1
+ # Brainstorm Guide — Structured Ideation Before Implementation
2
+
3
+ > Separate WHAT from HOW. Explore the problem space before committing to a solution.
4
+
5
+ This guide provides the structured brainstorming framework used in Phase 1 of the workflow.
6
+ The AI facilitates this process as a **design collaborator**, not a builder.
7
+
8
+ ## Four Phases
9
+
10
+ ### Phase A: Assess Clarity
11
+
12
+ Evaluate the user's initial goal statement:
13
+
14
+ - **Clear** — Specific and actionable (e.g., "add JWT auth to the API")
15
+ - **Vague** — Direction exists but needs narrowing (e.g., "improve security")
16
+ - **Exploring** — No firm goal yet, just a direction (e.g., "something with auth")
17
+
18
+ If **vague** or **exploring**, ask follow-up questions to sharpen the goal.
19
+ Do NOT proceed until there is a concrete, testable problem statement (one sentence).
20
+
21
+ ### Phase B: Understand the Idea
22
+
23
+ Answer these questions (use codebase exploration as needed):
24
+
25
+ 1. **What problem does this solve?** — State the pain point in concrete terms.
26
+ 2. **Who benefits?** — End users, developers, operators?
27
+ 3. **What exists today?** — Current state, prior art in the codebase, adjacent systems.
28
+ 4. **What constraints matter?** — Performance, compatibility, security, timeline.
29
+
30
+ Non-functional requirements to explicitly clarify or propose defaults for:
31
+ - Performance expectations
32
+ - Scale (users, data, traffic)
33
+ - Security or privacy constraints
34
+ - Reliability / availability needs
35
+ - Maintenance and ownership expectations
36
+
37
+ If the user is unsure on any point, propose reasonable defaults and clearly mark them as **assumptions**.
38
+
39
+ Summarize findings before moving on. If anything is unclear, ask.
40
+
41
+ ### Phase C: Explore Approaches
42
+
43
+ Generate **2-3 distinct approaches**. For each:
44
+
45
+ - **Name** — Short label (e.g., "JWT middleware", "OAuth proxy")
46
+ - **How it works** — 2-3 sentences
47
+ - **Pros** — What it gets right
48
+ - **Cons** — What it gets wrong or defers
49
+ - **Effort** — Rough scope (small / medium / large)
50
+
51
+ #### Adversarial Critique (Red Team)
52
+
53
+ Before asking the user to choose, stress-test each approach using the red team checklist
54
+ (`references/red-team-checklist.md`):
55
+
56
+ 1. What breaks first?
57
+ 2. What's the hidden cost?
58
+ 3. What assumption is wrong?
59
+ 4. Who disagrees?
60
+
61
+ Mark any approach that fails 2+ red team questions as **HIGH RISK**.
62
+ If all approaches fail, generate a hybrid addressing the weaknesses.
63
+
64
+ Present the comparison and let the user pick an approach or request a hybrid.
65
+
66
+ ### Phase D: Capture Design
67
+
68
+ Produce a structured summary:
69
+
70
+ ```markdown
71
+ ## Problem Statement
72
+ [One sentence, testable]
73
+
74
+ ## Approaches Considered
75
+ [2-3 approaches with pros/cons/effort]
76
+
77
+ ## Selected Approach
78
+ [User's choice + rationale]
79
+
80
+ ## Assumptions
81
+ [All assumptions explicitly listed]
82
+
83
+ ## Open Questions
84
+ [Unresolved items, if any]
85
+
86
+ ## Key Decisions
87
+ [What was decided and why — alternatives and rationale]
88
+ ```
89
+
90
+ This summary becomes the input for the next phase (specification or planning).
91
+
92
+ ---
93
+
94
+ ## Rules
95
+
96
+ - Ask as many questions as needed — no rushing
97
+ - One topic at a time for complex clarifications
98
+ - Prefer multiple-choice questions when possible
99
+ - Assumptions must be explicit, never silent
100
+ - YAGNI ruthlessly — avoid premature complexity
101
+ - Do NOT implement, code, or modify behavior during brainstorming
@@ -0,0 +1,34 @@
1
+ # Browser Interaction Planning
2
+
3
+ For web apps with UI, features that involve user-facing pages or interactive flows can optionally include a `browser_interaction` field. This enables the dev-pipeline to verify UI behavior automatically using `playwright-cli` after implementation.
4
+
5
+ ## How to Capture
6
+
7
+ During Phase 4 (refine descriptions and acceptance criteria), for qualifying features ask:
8
+
9
+ > "This feature has UI behavior. Want to add browser verification so the pipeline can auto-check it after implementation? (Y/n)"
10
+
11
+ If yes, generate the `browser_interaction` object:
12
+
13
+ ```json
14
+ {
15
+ "browser_interaction": {
16
+ "url": "http://localhost:3000/login",
17
+ "setup_command": "npm run dev",
18
+ "verify_steps": [
19
+ "snapshot",
20
+ "click <ref> — click login button",
21
+ "fill <ref> 'test@example.com' — enter email",
22
+ "screenshot"
23
+ ],
24
+ "screenshot": true
25
+ }
26
+ }
27
+ ```
28
+
29
+ ## Field Rules
30
+
31
+ - `url` is required — the page URL to verify
32
+ - `setup_command` is optional — command to start dev server (omit if already running)
33
+ - `verify_steps` are descriptive placeholders — the actual `ref` IDs are resolved at runtime by `playwright-cli snapshot`. Use natural language descriptions (e.g., "click login button") that the pipeline agent will map to real refs.
34
+ - `screenshot` defaults to `true` — capture final state for human review
@@ -0,0 +1,109 @@
1
+ # Error Recovery & Resume Support
2
+
3
+ Structured error handling for validation failures, interrupted sessions, and checkpoint-based resumption.
4
+
5
+ ## Validation Failures
6
+
7
+ When `python3 scripts/validate-and-generate.py validate --input <file> --mode <mode>` returns errors:
8
+
9
+ ### Parse validation output
10
+ Script returns JSON with `"valid": false`, `"errors": [...]`, `"warnings": [...]`
11
+
12
+ ### Decision Tree
13
+
14
+ **if `error_count == 0` (warnings only):**
15
+ - Proceed with user approval
16
+ - Show warnings and ask: "Continue? (Y/n)"
17
+
18
+ **elif `error_count > 0` (critical errors):**
19
+
20
+ Group errors by type and apply targeted fixes:
21
+
22
+ | Error Type | Symptom | Fix Offered | Auto-Fix? |
23
+ |-----------|---------|------------|-----------|
24
+ | **Schema mismatch** | `$schema` invalid, missing `app_name`, wrong `features` type | "Set `$schema` to `dev-pipeline-feature-list-v1`, `app_name` to string" | Yes |
25
+ | **Feature ID issues** | Invalid format (not `F-NNN`), duplicate IDs, undefined refs | "Suggest corrected IDs, show duplicates" | Yes |
26
+ | **Dependency errors** | Circular dependency, undefined target features | "Show cycle chain (e.g., `F-003 → F-005 → F-003`), suggest break point" | No |
27
+ | **Missing fields** | Feature missing required keys (title, description, AC) | "List each feature + missing keys, guide patch" | Partial |
28
+ | **Insufficient AC** | Feature has <2 acceptance criteria | "Show feature, suggest AC examples" | No |
29
+ | **Invalid values** | complexity not in [low/medium/high], status not pending | "Show field, valid values" | Yes |
30
+
31
+ ### Execution
32
+
33
+ ```
34
+ For auto-fixable errors:
35
+ 1. Show summary: "Found N schema/ID/format issues"
36
+ 2. Offer: auto-fix? (Y/n)
37
+ 3. Apply fix → regenerate file
38
+ 4. Re-run validation
39
+ 5. If new errors → loop (max 2 more attempts)
40
+
41
+ For manual fixes (dependencies, AC content):
42
+ 1. Show concise prompt: "Edit line X-Y in feature-list.json"
43
+ 2. Wait for user action
44
+ 3. Retry validation (max 2 more attempts)
45
+
46
+ if all_retries_exceeded:
47
+ → Escalate: "After 3 attempts, validation still fails.
48
+ (a) Review file manually, OR
49
+ (b) Restart planning from Phase 1"
50
+ ```
51
+
52
+ ## Resume Support
53
+
54
+ App-planner sessions can be resumed from the last completed checkpoint without losing context.
55
+
56
+ ### Detection Logic
57
+
58
+ Check for artifact files in working directory:
59
+
60
+ | Artifacts Found | Last Completed Checkpoint | Next Phase | Resume Action |
61
+ |-----------------|--------------------------|-----------|----------------|
62
+ | None | (new session) | Phase 1: Vision | Start fresh planning |
63
+ | `feature-list.json` exists | CP-AP-6 (file generated) | Phase 7: Final validation | Offer to validate or extend |
64
+ | `feature-list.json` + validation passed | CP-AP-7 (validation pass) | Handoff: dev-pipeline | Offer: execute pipeline now |
65
+ | Partial state (incomplete file) | CP-AP-2 or CP-AP-4 | Next phase after last checkpoint | Resume interactive planning |
66
+ | User restarts mid-session | User says "restart" | Return to Phase 1 Vision, or load previous checkpoint if requested |
67
+ | Max validation retries exceeded | 3 failed validation loops | Offer: (a) manual review, (b) restart from Phase 1 |
68
+
69
+ ### Resume Command (Project Structure)
70
+
71
+ For projects using `.prizmkit/` structure:
72
+
73
+ ```bash
74
+ # Explicit resume (if file is not in current directory):
75
+ app-planner --resume-from <path-to-existing-feature-list.json>
76
+ ```
77
+
78
+ AI detects existing file → suggests:
79
+ ```
80
+ "Existing plan found with N features, M newly added.
81
+ Resume incremental planning? (Y/n)"
82
+ ```
83
+
84
+ ### Incremental Mode Abort
85
+
86
+ If in Incremental mode but existing `feature-list.json` not found:
87
+ - Ask: "Start new plan or provide existing file?"
88
+ - If new plan chosen → switch to Route A (New App Planning)
89
+ - If existing file uploaded → continue Route B
90
+
91
+ ### Artifact Path Convention
92
+
93
+ **CRITICAL PATH RULE**: `feature-list.json` MUST be written to the project root directory
94
+ (same level as `package.json` / `.git`).
95
+
96
+ Before writing, verify: `ls package.json .git 2>/dev/null` — if these exist in the current
97
+ directory, you are at the project root. NEVER write to `dev-pipeline/` or any subdirectory.
98
+
99
+ After writing, verify: `ls -la feature-list.json` from project root.
100
+
101
+ ```
102
+ <project-root>/
103
+ ├── feature-list.json # Primary output (always here, at project root)
104
+ └── .prizmkit/planning/ # Optional organization for backups
105
+ ├── feature-list.validated.json # Checkpoint backup after CP-AP-7
106
+ └── <ISO-timestamp>.backup.json # Optional incremental backups
107
+ ```
108
+
109
+ The pipeline reads `feature-list.json` from the project root by default. If the user specifies a custom path, the launcher accepts it as an argument.
@@ -0,0 +1,71 @@
1
+ # Frontend Design Guide — High-Quality UI Implementation
2
+
3
+ > Create distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics.
4
+
5
+ **App-planner context**: In the planning phase, use this guide to establish **design direction decisions** (aesthetic tone, typography approach, color strategy, layout philosophy). Do NOT produce CSS, code, or implementation artifacts — capture the design direction as decisions in the project instruction file (`CLAUDE.md` / `CODEBUDDY.md`). Downstream implementation skills will consume these decisions when building features.
6
+
7
+ Load this guide **only when the feature involves frontend/UI work**. Skip for backend-only or infrastructure features.
8
+
9
+ ## Context Gathering (Required Before Design)
10
+
11
+ Before any UI design work, gather:
12
+ - **Target audience**: Who uses this product and in what context?
13
+ - **Use cases**: What jobs are they trying to get done?
14
+ - **Brand personality/tone**: How should the interface feel?
15
+
16
+ You cannot infer this from codebase alone — ask the user.
17
+
18
+ ## Design Direction
19
+
20
+ Commit to a **bold** aesthetic direction:
21
+ - **Purpose**: What problem does this interface solve? Who uses it?
22
+ - **Tone**: Pick an intentional aesthetic — minimalist, editorial, brutalist, organic, luxury, playful, retro-futuristic, industrial, art deco, etc.
23
+ - **Differentiation**: What makes this unforgettable?
24
+
25
+ ## Typography
26
+
27
+ - Choose distinctive fonts. Pair a display font with a refined body font.
28
+ - Use a modular type scale with fluid sizing (`clamp()`)
29
+ - Vary font weights and sizes for clear visual hierarchy
30
+ - Avoid overused fonts: Inter, Roboto, Arial, Open Sans, system defaults
31
+ - Avoid monospace as lazy shorthand for "technical" vibes
32
+
33
+ ## Color & Theme
34
+
35
+ - Use modern CSS color functions (oklch, color-mix, light-dark)
36
+ - Tint neutrals toward brand hue for subconscious cohesion
37
+ - Avoid: pure black (#000) or pure white (#fff) — always tint
38
+ - Avoid: the AI palette (cyan-on-dark, purple-to-blue gradients, neon accents on dark)
39
+ - Avoid: gradient text for "impact", default dark mode with glowing accents
40
+
41
+ ## Layout & Space
42
+
43
+ - Create visual rhythm through varied spacing — not uniform padding
44
+ - Use fluid spacing with `clamp()` that breathes on larger screens
45
+ - Embrace asymmetry and unexpected compositions
46
+ - Avoid: wrapping everything in cards, nesting cards, identical card grids
47
+ - Avoid: centering everything, uniform spacing
48
+
49
+ ## Motion
50
+
51
+ - Focus on high-impact moments: one well-orchestrated page load > scattered micro-interactions
52
+ - Use exponential easing for natural deceleration
53
+ - Use `transform` and `opacity` only — avoid animating layout properties
54
+ - Avoid: bounce/elastic easing, excessive animations
55
+
56
+ ## Interaction
57
+
58
+ - Use progressive disclosure — start simple, reveal complexity through interaction
59
+ - Design empty states that teach the interface
60
+ - Use optimistic UI — update immediately, sync later
61
+ - Avoid: making every button primary, redundant headers
62
+
63
+ ## Responsive
64
+
65
+ - Use container queries (`@container`) for component-level responsiveness
66
+ - Adapt the interface for different contexts, don't just shrink it
67
+ - Never hide critical functionality on mobile
68
+
69
+ ## The AI Slop Test
70
+
71
+ If you showed this interface to someone and said "AI made this", would they believe you immediately? If yes, redesign. A distinctive interface should make someone ask "how was this made?" not "which AI made this?"
@@ -0,0 +1,112 @@
1
+ # Incremental Feature Planning Reference
2
+
3
+ Use this reference when the user adds features to an existing app/plan.
4
+
5
+ ## Pre-Checks
6
+
7
+ 1. Read existing `feature-list.json`.
8
+ 2. Determine current max ID and continue from next `F-NNN`.
9
+ 3. **Detect existing writing style** (see §Style Detection below).
10
+ 4. Preserve compatibility with existing dependency structure.
11
+ 5. **Project conventions** — handled by the main SKILL.md flow (rule #6) before incremental planning begins. No additional action needed here; conventions are loaded automatically.
12
+
13
+ If `feature-list.json` is missing, ask whether to initialize a new plan.
14
+
15
+ ## Style Detection (Automatic)
16
+
17
+ Before drafting new features, analyze existing plan to preserve consistency:
18
+
19
+ 1. **Language Detection**
20
+ - Scan `title` and `description` fields
21
+ - If >70% English titles → default to English
22
+ - If >70% Chinese titles → suggest Chinese (or allow bilingual)
23
+
24
+ 2. **Description Density**
25
+ - Calculate avg word count per description
26
+ - If avg <30 words → draft concise descriptions
27
+ - If avg 30-80 words → draft standard detail
28
+ - If avg >80 words → draft detailed descriptions
29
+
30
+ 3. **Acceptance Criteria Patterns**
31
+ - Count avg AC per feature
32
+ - Identify dominant format (Given/When/Then Gherkin, BDD, or loose)
33
+ - Draft new AC in same format
34
+
35
+ 4. **Complexity Distribution**
36
+ - Count low/medium/high distribution in existing features
37
+ - Alert if new features deviate significantly (>20 percentile points)
38
+ - Suggest rebalancing if needed
39
+
40
+ ### Style Consistency Prompt
41
+
42
+ If new features deviate significantly from detected style:
43
+
44
+ ```
45
+ "Your new features use avg X words/description, but existing features use Y.
46
+ Current ratio: low:M%, medium:N%, high:O%.
47
+ Adjust new features to match? (Y/n)"
48
+ ```
49
+
50
+ Accept user choice, then adjust draft accordingly before JSON generation.
51
+
52
+ ## Incremental Planning Flow
53
+
54
+ ### Step 1: Clarify Increment Scope
55
+ Capture:
56
+ - business objective of the new increment
57
+ - affected existing modules/features
58
+ - timeline or priority constraints
59
+
60
+ ### Step 2: Impact Mapping
61
+ For each candidate feature, identify:
62
+ - upstream dependencies
63
+ - downstream impacts
64
+ - risk hotspots (auth, data migration, API compatibility)
65
+
66
+ ### Step 3: Append Features
67
+ Append new items only (do not rewrite old validated features unless user asks).
68
+
69
+ For each new feature:
70
+ - assign next ID
71
+ - set `status: "pending"`
72
+ - link dependencies to existing IDs where needed
73
+ - keep title in English
74
+ - **write rich descriptions** (see `planning-guide.md` §4):
75
+ - minimum 15 words (validation error below this)
76
+ - recommended: 30+ words (low), 50+ words (medium), 80+ words (high complexity)
77
+ - include: what to build, key behaviors, integration points, data model, error/edge cases
78
+
79
+ ### Step 4: Rebalance Priority
80
+ Allow priority updates for both old and new features if user requests reprioritization.
81
+ Keep dependency correctness as first constraint.
82
+
83
+ ### Step 5: Validate
84
+ Run:
85
+ ```bash
86
+ python3 ${SKILL_DIR}/scripts/validate-and-generate.py validate --input feature-list.json --mode incremental
87
+ ```
88
+
89
+ Fix and re-run until pass.
90
+
91
+ ## Merge/Rewrite Rules
92
+
93
+ - Default: append only
94
+ - Rewrite existing features only when user explicitly asks
95
+ - Never break valid IDs/references
96
+ - Never set new features to `in_progress` or `completed`
97
+
98
+ ## Practical Prompts
99
+
100
+ Use concise prompts during interaction:
101
+ - "What is the goal of this increment? Which user problem is the priority?"
102
+ - "Which existing Feature IDs do these new features depend on?"
103
+ - "Do you want to reprioritize at the same time, or just append to the current sequence?"
104
+
105
+ ## Final Delivery Checklist
106
+
107
+ - [ ] Existing file read before edits
108
+ - [ ] New IDs continue sequence
109
+ - [ ] Existing style preserved
110
+ - [ ] Dependency graph still DAG
111
+ - [ ] Validation passes
112
+ - [ ] Next step recommendation follows priority: launcher → daemon → run.sh
@@ -0,0 +1,85 @@
1
+ # New App Planning Reference
2
+
3
+ Use this reference when the user is planning a product from scratch.
4
+
5
+ ## Phase Guide
6
+
7
+ ### Phase 1: Vision
8
+ Capture:
9
+ - problem statement
10
+ - target users
11
+ - core value proposition
12
+ - non-goals (what to exclude from MVP)
13
+
14
+ ### Phase 2: Stack Defaults
15
+ If user has no preference, propose defaults aligned with project conventions:
16
+ - Frontend: Next.js + TypeScript
17
+ - Backend: Express/Nest (choose one and stay consistent)
18
+ - DB: PostgreSQL
19
+ - ORM: Prisma
20
+ - Test: unit + e2e baseline
21
+
22
+ If `.prizmkit/config.json` exists, prioritize its settings.
23
+
24
+ ### Phase 3: MVP Features
25
+ Rules:
26
+ - Include foundational setup feature first
27
+ - Aim 5-12 features for MVP
28
+ - Keep each feature implementable in one pipeline unit unless clearly too large
29
+
30
+ For each feature define:
31
+ - `id`
32
+ - `title`
33
+ - `description`
34
+ - `priority` — string: `"high"`, `"medium"`, or `"low"` (never numeric)
35
+ - `estimated_complexity`
36
+ - `dependencies`
37
+ - `acceptance_criteria`
38
+ - `status: "pending"`
39
+ - `browser_interaction` (optional — for UI features, see §Browser Interaction Planning in SKILL.md)
40
+
41
+ ### Phase 4: Dependency & Priority
42
+ Check:
43
+ - no cycles
44
+ - all dependency targets exist
45
+ - order is executable
46
+ - priorities align with delivery value and risk
47
+
48
+ ### Phase 5: Granularity
49
+ Split into `sub_features` when:
50
+ - scope crosses too many modules
51
+ - acceptance criteria are excessive
52
+ - complexity is high and uncertainty is high
53
+
54
+ ### Phase 6: Generate + Validate
55
+ 1. Write `feature-list.json`.
56
+ 2. Run:
57
+ ```bash
58
+ python3 ${SKILL_DIR}/scripts/validate-and-generate.py validate --input feature-list.json --mode new
59
+ ```
60
+ 3. Fix all errors, then re-run.
61
+
62
+ ## Quality Rules
63
+
64
+ - Keep titles concise and English
65
+ - Make descriptions implementation-oriented (clear boundaries, interfaces, behavior)
66
+ - **Description depth by complexity:**
67
+ - **Low complexity**: ≥30 words — what to build, key behavior, which files/modules are affected
68
+ - **Medium complexity**: ≥50 words — add integration points, data model overview, error handling approach
69
+ - **High complexity**: ≥80 words — add architecture decisions, performance considerations, security implications, migration strategy if applicable
70
+ - **Description must cover** (adapt per feature):
71
+ 1. **What**: concrete deliverable (API endpoints, UI components, data models)
72
+ 2. **How it integrates**: which existing modules/services it connects to
73
+ 3. **Key behaviors**: business rules, validation rules, state transitions
74
+ 4. **Data model**: entities, relationships, key fields (when applicable)
75
+ 5. **Error/edge cases**: what happens on failure, empty states, limits
76
+ - Write testable acceptance criteria (at least 3; prefer 5+ for medium/high)
77
+ - Keep dependency graph simple and explicit
78
+
79
+ ## Final Delivery Checklist
80
+
81
+ - [ ] User confirmed MVP scope
82
+ - [ ] IDs are sequential
83
+ - [ ] `status` initialized to `pending`
84
+ - [ ] Validation passes
85
+ - [ ] Next step recommendation follows priority: launcher → daemon → run.sh
@@ -0,0 +1,93 @@
1
+ # Project Conventions — First-Run Setup Questions
2
+
3
+ > Capture project-wide norms once, reuse across all planning sessions.
4
+
5
+ These questions establish foundational conventions that affect every feature. They should be asked **once** during the first `app-planner` session and persisted to `.prizmkit/project-conventions.json` so subsequent sessions skip them.
6
+
7
+ ## Persistence
8
+
9
+ - **File**: `.prizmkit/project-conventions.json`
10
+ - **Read on startup**: If the file exists and a convention has a non-null value, skip that question.
11
+ - **Write after asking**: Save answers immediately after the user responds.
12
+ - **Shared**: Other skills (`prizmkit-init`, `dev-pipeline`) may also read this file.
13
+
14
+ ## Convention Questions
15
+
16
+ Ask only unanswered conventions. Group related questions together in a single prompt when possible.
17
+
18
+ ### 1. UI Display Language
19
+
20
+ **Key**: `ui_language`
21
+
22
+ > "What is the primary language for the application's user interface? (e.g., English, 中文, 日本語, etc.)"
23
+
24
+ **Follow-up if applicable**: If the user specifies a non-English language, confirm whether all UI text (buttons, labels, error messages, tooltips) should be in that language.
25
+
26
+ ### 2. Multi-Language Support (i18n)
27
+
28
+ **Key**: `i18n_enabled`, `i18n_languages`
29
+
30
+ > "Does the application need multi-language support (i18n)?"
31
+
32
+ - If **yes** → ask: "Which languages should be supported? List all target languages."
33
+ - If **no** → set `i18n_enabled: false`, skip `i18n_languages`.
34
+
35
+ **Impact on planning**: If i18n is enabled, add an infrastructure feature for i18n setup (framework, translation file structure, language switcher) early in the dependency graph.
36
+
37
+ ### 3. Date, Time & Currency Formats
38
+
39
+ **Key**: `date_format`, `timezone_strategy`, `currency`
40
+
41
+ > "What are your preferences for date/time and currency display?"
42
+
43
+ Sub-questions (ask as a group):
44
+ - **Date format**: "Preferred date display format?" (e.g., `YYYY-MM-DD`, `MM/DD/YYYY`, `DD/MM/YYYY`, locale-auto)
45
+ - **Timezone strategy**: "How should the app handle timezones?" (e.g., UTC storage + local display, user-selected timezone, server timezone only)
46
+ - **Currency**: "If the app handles money, which currency format?" (e.g., USD `$1,234.56`, CNY `¥1,234.56`, EUR `€1.234,56`, or N/A if no monetary values)
47
+
48
+ If the user says "not applicable" for currency, set `currency: null`.
49
+
50
+ ### 4. Code & Git Language Conventions
51
+
52
+ **Key**: `code_comment_language`, `git_commit_language`
53
+
54
+ > "What language should be used for code comments and git commit messages?"
55
+
56
+ Options to present:
57
+ - **Code comments**: English / Chinese / Match UI language / Mixed
58
+ - **Git commit messages**: English / Chinese / Match code comments
59
+
60
+ **Default suggestion**: English for both (widely accessible, compatible with open-source contribution).
61
+
62
+ ## JSON Schema
63
+
64
+ ```json
65
+ {
66
+ "ui_language": "English",
67
+ "i18n_enabled": false,
68
+ "i18n_languages": [],
69
+ "date_format": "YYYY-MM-DD",
70
+ "timezone_strategy": "utc_storage_local_display",
71
+ "currency": null,
72
+ "code_comment_language": "English",
73
+ "git_commit_language": "English"
74
+ }
75
+ ```
76
+
77
+ ## How Conventions Are Used
78
+
79
+ Conventions are **AI context** — they inform your behavior during planning but are NOT written into `feature-list.json` `global_context` fields. Specifically:
80
+
81
+ - `ui_language` → Write feature descriptions and acceptance criteria in a way that acknowledges the target UI language (e.g., mention CJK text handling if Chinese, RTL layout if Arabic)
82
+ - `i18n_enabled` → If true, consider proposing an i18n infrastructure feature early in the dependency graph; ensure feature descriptions mention translation-ready patterns
83
+ - `date_format`, `timezone_strategy` → When features involve date/time display or storage, reference the chosen convention in feature descriptions
84
+ - `currency` → When features involve monetary values, reference the currency convention in descriptions
85
+ - `code_comment_language` → Inform the language used in code-related examples within feature descriptions
86
+ - `git_commit_language` → Inform pipeline and workflow language expectations
87
+
88
+ ## Rules
89
+
90
+ - **Ask at most once per convention** — if answered, never re-ask unless user invokes a reset.
91
+ - **No blocking** — if the user skips a question ("I'll decide later"), set value to `null` and move on. The question will be re-asked next session.
92
+ - **Respect existing config** — if `.prizmkit/config.json` already has equivalent fields (e.g., `tech_stack.language`), do not duplicate. Only ask questions not covered by existing config.
93
+ - **Minimal interruption** — batch all unanswered questions into a single interaction round, not one-by-one.
@@ -0,0 +1,40 @@
1
+ # Red Team Checklist for Ideation
2
+
3
+ Use these questions to stress-test approaches during brainstorm Phase C.
4
+
5
+ ## Structural Questions
6
+
7
+ 1. **What breaks first?** — Identify the weakest link under stress (load, concurrency, edge cases, adversarial input). If you can't name a specific failure mode, the approach is under-specified.
8
+
9
+ 2. **What's the hidden cost?** — Every approach has costs beyond implementation time: maintenance burden, cognitive load for new contributors, infrastructure requirements, monitoring needs, migration complexity.
10
+
11
+ 3. **What assumption is wrong?** — List the unstated assumptions. Which one, if false, invalidates the approach? Common false assumptions: "the API won't change", "data fits in memory", "users will read the docs", "this library is maintained".
12
+
13
+ 4. **Who disagrees?** — Steel-man the opposing view. A performance engineer and a UX designer will critique the same approach differently. What does the most skeptical qualified person say?
14
+
15
+ ## Scoring
16
+
17
+ | Red Team Failures | Classification |
18
+ |-------------------|---------------|
19
+ | 0 | Strong approach |
20
+ | 1 | Viable with mitigation |
21
+ | 2+ | HIGH RISK — needs rethinking or mitigation plan |
22
+
23
+ ## When All Approaches Are HIGH RISK
24
+
25
+ If every approach fails 2+ questions, the problem statement may be wrong. Consider:
26
+ - Is the goal too broad? Split it.
27
+ - Is there a constraint you haven't stated? Surface it.
28
+ - Generate a hybrid approach that addresses the specific red team failures.
29
+
30
+ ## Common False Assumptions
31
+
32
+ | Assumption | Why It Fails |
33
+ |-----------|-------------|
34
+ | "The API won't change" | External APIs change without notice; pin versions and add contract tests |
35
+ | "Data fits in memory" | Works in dev, breaks in prod when dataset grows 10x |
36
+ | "Users will read the docs" | They won't; make the happy path obvious and errors informative |
37
+ | "This library is maintained" | Check commit history; many popular libraries are effectively abandoned |
38
+ | "We can refactor later" | Technical debt compounds; later never comes without explicit scheduling |
39
+ | "Performance doesn't matter yet" | Architecture decisions that ignore performance are expensive to fix |
40
+ | "The team will adopt it" | New tools need champions, training, and visible wins to gain traction |
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "prizmkit",
3
- "version": "1.0.148",
3
+ "version": "1.0.149",
4
4
  "description": "Create a new PrizmKit-powered project with clean initialization — no framework dev files, just what you need.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/scaffold.js CHANGED
@@ -156,8 +156,8 @@ export async function installSkills(platform, skills, projectRoot, dryRun) {
156
156
  if (!frontmatter.name) frontmatter.name = skillName;
157
157
  await fs.writeFile(path.join(targetDir, 'SKILL.md'), buildMarkdown(frontmatter, body));
158
158
 
159
- // 复制 assets/ 和 scripts/
160
- for (const subdir of ['assets', 'scripts']) {
159
+ // 复制 assets/、scripts/ 和 references/
160
+ for (const subdir of ['assets', 'scripts', 'references']) {
161
161
  const srcSubdir = path.join(corePath, subdir);
162
162
  if (await fs.pathExists(srcSubdir)) {
163
163
  await fs.copy(srcSubdir, path.join(targetDir, subdir));
@@ -172,6 +172,7 @@ export async function installSkills(platform, skills, projectRoot, dryRun) {
172
172
 
173
173
  const hasAssets = await fs.pathExists(path.join(corePath, 'assets'));
174
174
  const hasScripts = await fs.pathExists(path.join(corePath, 'scripts'));
175
+ const hasReferences = await fs.pathExists(path.join(corePath, 'references'));
175
176
 
176
177
  // Always write the command file at the flat level (.claude/commands/skillName.md)
177
178
  // so Claude Code shows it as /skillName (not /skillName:skillName).
@@ -184,12 +185,12 @@ export async function installSkills(platform, skills, projectRoot, dryRun) {
184
185
  await fs.ensureDir(commandsDir);
185
186
  await fs.writeFile(path.join(commandsDir, `${skillName}.md`), converted);
186
187
 
187
- if (hasAssets || hasScripts) {
188
+ if (hasAssets || hasScripts || hasReferences) {
188
189
  // Place assets/scripts outside .claude/commands/ to prevent Claude Code
189
190
  // from registering them as slash commands (e.g. /skillName:assets:file).
190
191
  const assetTargetDir = path.join(projectRoot, '.claude', 'command-assets', skillName);
191
192
  await fs.ensureDir(assetTargetDir);
192
- for (const subdir of ['assets', 'scripts']) {
193
+ for (const subdir of ['assets', 'scripts', 'references']) {
193
194
  const srcSubdir = path.join(corePath, subdir);
194
195
  if (await fs.pathExists(srcSubdir)) {
195
196
  await fs.copy(srcSubdir, path.join(assetTargetDir, subdir));