prizmkit 1.0.148 → 1.0.150

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  {
2
- "frameworkVersion": "1.0.148",
3
- "bundledAt": "2026-03-31T16:37:46.664Z",
4
- "bundledFrom": "b56218a"
2
+ "frameworkVersion": "1.0.150",
3
+ "bundledAt": "2026-03-31T17:59:38.986Z",
4
+ "bundledFrom": "51bf160"
5
5
  }
@@ -11,7 +11,7 @@
11
11
 
12
12
  import { parseFrontmatter, buildMarkdown } from '../shared/frontmatter.js';
13
13
  import { COMMANDS_DIR } from './paths.js';
14
- import { existsSync, mkdirSync, cpSync } from 'node:fs';
14
+ import { existsSync, mkdirSync, cpSync, readdirSync } from 'node:fs';
15
15
  import { readFile, writeFile } from 'node:fs/promises';
16
16
  import path from 'path';
17
17
 
@@ -100,11 +100,13 @@ export function convertSkillToCommand(skillContent, skillName) {
100
100
  */
101
101
  export async function installCommand(corePath, targetRoot) {
102
102
  const skillName = path.basename(corePath);
103
- const hasAssets = existsSync(path.join(corePath, 'assets'));
104
- const hasScripts = existsSync(path.join(corePath, 'scripts'));
105
- const hasRules = existsSync(path.join(corePath, 'rules'));
106
103
 
107
- if (hasAssets || hasScripts || hasRules) {
104
+ // Discover all subdirectories in the skill
105
+ const subdirs = existsSync(corePath)
106
+ ? readdirSync(corePath, { withFileTypes: true }).filter(e => e.isDirectory()).map(e => e.name)
107
+ : [];
108
+
109
+ if (subdirs.length > 0) {
108
110
  // Use directory structure for commands with resources
109
111
  const targetDir = path.join(targetRoot, COMMANDS_DIR, skillName);
110
112
  mkdirSync(targetDir, { recursive: true });
@@ -117,12 +119,9 @@ export async function installCommand(corePath, targetRoot) {
117
119
  await writeFile(path.join(targetDir, `${skillName}.md`), converted);
118
120
  }
119
121
 
120
- // Copy assets and scripts
121
- for (const subdir of ['scripts', 'assets', 'rules']) {
122
- const srcSubdir = path.join(corePath, subdir);
123
- if (existsSync(srcSubdir)) {
124
- cpSync(srcSubdir, path.join(targetDir, subdir), { recursive: true });
125
- }
122
+ // Copy all subdirectories
123
+ for (const subdir of subdirs) {
124
+ cpSync(path.join(corePath, subdir), path.join(targetDir, subdir), { recursive: true });
126
125
  }
127
126
  } else {
128
127
  // Single file command
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "1.0.148",
2
+ "version": "1.0.150",
3
3
  "skills": {
4
4
  "prizm-kit": {
5
5
  "description": "Full-lifecycle dev toolkit. Covers spec-driven development, Prizm context docs, code quality, debugging, deployment, and knowledge management.",
@@ -58,23 +58,21 @@ Do NOT use this skill when the user only wants to run the pipeline (`dev-pipelin
58
58
 
59
59
  ## Resource Loading Rules (Mandatory)
60
60
 
61
- `SKILL_DIR` definition:
62
- - `SKILL_DIR` is the absolute path of this skill directory.
63
61
  1. **Choose scenario reference before planning**:
64
- - New app → read `references/new-app-planning.md`
65
- - Existing app incremental features → read `references/incremental-feature-planning.md`
62
+ - New app → read `${SKILL_DIR}/references/new-app-planning.md`
63
+ - Existing app incremental features → read `${SKILL_DIR}/references/incremental-feature-planning.md`
66
64
 
67
65
  2. **Use shared quality examples as needed**:
68
- - read `assets/planning-guide.md` for decomposition and acceptance criteria patterns
66
+ - read `${SKILL_DIR}/assets/planning-guide.md` for decomposition and acceptance criteria patterns
69
67
 
70
68
  3. **Load on-demand references when triggered**:
71
- - Validation errors or interrupted session → read `references/error-recovery.md`
72
- - Architecture decisions emerged → read `references/architecture-decisions.md`
73
- - Browser interaction fields needed → read `references/browser-interaction.md`
69
+ - Validation errors or interrupted session → read `${SKILL_DIR}/references/error-recovery.md`
70
+ - Architecture decisions emerged → read `${SKILL_DIR}/references/architecture-decisions.md`
71
+ - Browser interaction fields needed → read `${SKILL_DIR}/references/browser-interaction.md`
74
72
 
75
73
  4. **Brainstorm deep-dive** — If the user chooses to continue discussing or exploring ideas before finalizing features (e.g., during Intent Confirmation the user selects "just explore ideas", or at any point says "let's discuss more" / "I want to think through this"):
76
- → read `references/brainstorm-guide.md` and follow its four-phase structured ideation process (Assess Clarity → Understand → Explore Approaches → Capture Design)
77
- → During Phase C (Explore Approaches), also read `references/red-team-checklist.md` for adversarial critique of each approach
74
+ → read `${SKILL_DIR}/references/brainstorm-guide.md` and follow its four-phase structured ideation process (Assess Clarity → Understand → Explore Approaches → Capture Design)
75
+ → During Phase C (Explore Approaches), also read `${SKILL_DIR}/references/red-team-checklist.md` for adversarial critique of each approach
78
76
  → When brainstorm Phase D produces a "Capture Design" summary, use it as the Vision Summary (fulfilling CP-AP-2) and proceed to Phase 2 (constraints and tech assumptions)
79
77
 
80
78
  5. **Frontend / UI design check** — Evaluate during Phase 2 (constraints and tech assumptions), after tech stack is confirmed. If the project has a frontend framework:
@@ -82,17 +80,17 @@ Do NOT use this skill when the user only wants to run the pipeline (`dev-pipelin
82
80
  → Also search the active CLI's project instruction file (`CLAUDE.md` / `CODEBUDDY.md`) for keywords: "UI", "UX", "design system", "style guide", "theme", "typography", "color palette"
83
81
  → If unified design guidance is found → use it as constraint for feature descriptions
84
82
  → If NO unified design guidance found → ask user: "项目中未找到统一的 UI/UX 设计规范。是否需要在特性规划前先建立一套 UI/UX 设计方向?(No unified UI/UX design system found. Would you like to establish one before feature planning?)"
85
- → If yes → read `references/frontend-design-guide.md` and conduct a design direction session. Capture the result in the project instruction file (with user consent).
83
+ → If yes → read `${SKILL_DIR}/references/frontend-design-guide.md` and conduct a design direction session. Capture the result in the project instruction file (with user consent).
86
84
  → If no → proceed without; individual features may still define their own UI approach.
87
85
 
88
86
  6. **Project conventions check** — After Intent Confirmation (CP-AP-0), before brainstorm or Phase 1 vision work. This runs regardless of session goal (produce or explore):
89
87
  → Read `.prizmkit/project-conventions.json` if it exists
90
88
  → If the file exists but cannot be parsed as valid JSON → warn user ("conventions file is corrupted, will re-ask all questions"), treat all conventions as unanswered
91
- → Cross-reference with `references/project-conventions.md` for the full question list
89
+ → Cross-reference with `${SKILL_DIR}/references/project-conventions.md` for the full question list
92
90
  → For any convention with a `null` or missing value → batch all unanswered questions into a single prompt to the user
93
91
  → Save answers to `.prizmkit/project-conventions.json` (create `.prizmkit/` directory if needed — this is an allowed writable output)
94
92
  → If all conventions are already answered → skip silently, do not re-ask
95
- → Use convention answers as context when writing feature descriptions and proposing features (see `references/project-conventions.md` §How Conventions Are Used)
93
+ → Use convention answers as context when writing feature descriptions and proposing features (see `${SKILL_DIR}/references/project-conventions.md` §How Conventions Are Used)
96
94
 
97
95
  7. **Always validate output via script**:
98
96
  - run:
@@ -134,7 +132,7 @@ Classify user intent first:
134
132
  Use when user starts from idea/blank slate or asks for initial end-to-end plan.
135
133
 
136
134
  Actions:
137
- 1. Load `references/new-app-planning.md`
135
+ 1. Load `${SKILL_DIR}/references/new-app-planning.md`
138
136
  2. Run interactive planning phases
139
137
  3. Generate initial `feature-list.json`
140
138
 
@@ -142,7 +140,7 @@ Actions:
142
140
  Use when user already has app/code/plan and asks to add or adjust features.
143
141
 
144
142
  Actions:
145
- 1. Load `references/incremental-feature-planning.md`
143
+ 1. Load `${SKILL_DIR}/references/incremental-feature-planning.md`
146
144
  2. Read existing `feature-list.json` first (if missing, ask whether to start new plan)
147
145
  3. Append features with next sequential `F-NNN` IDs
148
146
  4. Preserve style/language/detail consistency with existing plan
@@ -159,7 +157,7 @@ After scenario routing, immediately confirm the user's deliverable intent:
159
157
  - **"Produce feature-list.json"** → Continue to Core Workflow. Set session goal = `produce`.
160
158
  - **"Just explore ideas"** → Enter **Exploration Mode**:
161
159
  - Run project conventions check first (CP-AP-1) — same as produce mode
162
- - Load `references/brainstorm-guide.md` and follow its structured ideation process (Phases A-D)
160
+ - Load `${SKILL_DIR}/references/brainstorm-guide.md` and follow its structured ideation process (Phases A-D)
163
161
  - Brainstorm Phase D output serves as the Vision Summary (CP-AP-2)
164
162
  - Continue with Phase 2 (constraints — including frontend design check CP-AP-3 if applicable), then Phases 3-5 normally
165
163
  - At Phase 5 completion, re-ask: "Ideas are taking shape. Ready to generate `feature-list.json` now?"
@@ -206,11 +204,11 @@ Note: Checkpoint numbers (CP-AP-N) are sequential identifiers for the gate, NOT
206
204
  | **CP-AP-6** | `feature-list.json` Generated | Schema validates, all required keys present | 6-7 |
207
205
  | **CP-AP-7** | Final Validation Pass | Python script returns `"valid": true` with zero errors | 8 |
208
206
 
209
- **Resume Detection**: If existing artifacts are found, read `references/error-recovery.md` §Resume Support for checkpoint-based resumption.
207
+ **Resume Detection**: If existing artifacts are found, read `${SKILL_DIR}/references/error-recovery.md` §Resume Support for checkpoint-based resumption.
210
208
 
211
209
  ## Architecture Decision Capture
212
210
 
213
- After Phase 5, if framework-shaping architecture decisions emerged during planning (tech stack, communication patterns, data model strategies — not individual feature details), read `references/architecture-decisions.md` and follow the capture flow. Most sessions will NOT produce architecture decisions — only capture when genuinely impactful.
211
+ After Phase 5, if framework-shaping architecture decisions emerged during planning (tech stack, communication patterns, data model strategies — not individual feature details), read `${SKILL_DIR}/references/architecture-decisions.md` and follow the capture flow. Most sessions will NOT produce architecture decisions — only capture when genuinely impactful.
214
212
 
215
213
  ## Fast Path — Incremental Shortcuts
216
214
 
@@ -270,7 +268,7 @@ A feature is **exempt** when ANY of these are true:
270
268
 
271
269
  ### Default Behavior (Phase 4.2)
272
270
 
273
- 1. **Auto-generate** `browser_interaction` for ALL qualifying features. Read `references/browser-interaction.md` for the object format and field rules.
271
+ 1. **Auto-generate** `browser_interaction` for ALL qualifying features. Read `${SKILL_DIR}/references/browser-interaction.md` for the object format and field rules.
274
272
  2. **Present a summary** to the user showing which features received `browser_interaction`:
275
273
  > "以下 N 个前端特性已自动添加 browser_interaction 用于 Playwright 自动验证:F-002, F-004, F-007。如需移除某个特性的浏览器验证,请告知。"
276
274
  3. **User can opt-OUT** specific features — remove the field for declined features.
@@ -371,7 +369,7 @@ When launcher is available, do not prioritize raw scripts.
371
369
 
372
370
  ## Error Recovery & Resume
373
371
 
374
- If validation fails or a session is interrupted → read `references/error-recovery.md` for the full error type table, decision tree, retry logic, and checkpoint-based resume support.
372
+ If validation fails or a session is interrupted → read `${SKILL_DIR}/references/error-recovery.md` for the full error type table, decision tree, retry logic, and checkpoint-based resume support.
375
373
 
376
374
  Key behaviors:
377
375
  - Warnings only → proceed with user approval
@@ -0,0 +1,48 @@
1
+ # Architecture Decision Capture
2
+
3
+ During planning, key **framework-level** architectural decisions may emerge. When they do, capture them in the project instruction file so all future AI sessions have this context.
4
+
5
+ ## What Qualifies (ALL must apply)
6
+
7
+ Only capture decisions that are **framework-shaping** — NOT individual feature details. Qualifying categories:
8
+
9
+ | Category | Examples |
10
+ |----------|----------|
11
+ | Tech stack choices | PostgreSQL over MongoDB, React over Vue, Node.js runtime |
12
+ | Communication patterns | REST vs GraphQL, WebSocket vs SSE vs polling |
13
+ | Architectural patterns | Monorepo, microservices, monolith, event-driven |
14
+ | Data model strategies | Relational vs document, event sourcing, CQRS |
15
+ | Security architecture | JWT vs session, OAuth provider, RBAC model |
16
+
17
+ **Do NOT capture**: individual feature implementation details, UI component choices, specific API endpoint designs, or anything scoped to a single feature.
18
+
19
+ **This is conditional** — most planning sessions will NOT produce architecture decisions. Only capture when genuinely impactful decisions are made during the discussion.
20
+
21
+ ## When to Capture
22
+
23
+ After Phase 5 (DAG verification), before Phase 6 (JSON generation). At this point decisions are settled.
24
+
25
+ ## How to Capture
26
+
27
+ 1. **Detect platform** — determine which project instruction file to update:
28
+ - `.claude/` directory exists → append to `CLAUDE.md`
29
+ - `.codebuddy/` directory exists → append to `CODEBUDDY.md`
30
+ - Both exist → append to both
31
+ - Neither exists → skip (no project instruction file)
32
+
33
+ 2. **Check for existing section** — read the target file and look for `### Architecture Decisions` heading:
34
+ - If heading exists → append new entries below it (avoid duplicates with existing entries)
35
+ - If heading does not exist → create it at the end of the file
36
+
37
+ 3. **Format** — one line per decision, no feature IDs:
38
+ ```markdown
39
+ ### Architecture Decisions
40
+ - WebSocket for real-time: sub-second latency required for collaboration features
41
+ - PostgreSQL: relational data model with complex queries, ACID compliance needed
42
+ - Monorepo structure: shared types between frontend and backend
43
+ ```
44
+
45
+ 4. **User confirmation** — before writing, show the collected decisions and ask:
46
+ > "These architecture decisions were identified during planning. Record them to [CLAUDE.md / CODEBUDDY.md]? (Y/n)"
47
+
48
+ If user declines, skip without further prompting.
@@ -0,0 +1,101 @@
1
+ # Brainstorm Guide — Structured Ideation Before Implementation
2
+
3
+ > Separate WHAT from HOW. Explore the problem space before committing to a solution.
4
+
5
+ This guide provides the structured brainstorming framework used in Phase 1 of the workflow.
6
+ The AI facilitates this process as a **design collaborator**, not a builder.
7
+
8
+ ## Four Phases
9
+
10
+ ### Phase A: Assess Clarity
11
+
12
+ Evaluate the user's initial goal statement:
13
+
14
+ - **Clear** — Specific and actionable (e.g., "add JWT auth to the API")
15
+ - **Vague** — Direction exists but needs narrowing (e.g., "improve security")
16
+ - **Exploring** — No firm goal yet, just a direction (e.g., "something with auth")
17
+
18
+ If **vague** or **exploring**, ask follow-up questions to sharpen the goal.
19
+ Do NOT proceed until there is a concrete, testable problem statement (one sentence).
20
+
21
+ ### Phase B: Understand the Idea
22
+
23
+ Answer these questions (use codebase exploration as needed):
24
+
25
+ 1. **What problem does this solve?** — State the pain point in concrete terms.
26
+ 2. **Who benefits?** — End users, developers, operators?
27
+ 3. **What exists today?** — Current state, prior art in the codebase, adjacent systems.
28
+ 4. **What constraints matter?** — Performance, compatibility, security, timeline.
29
+
30
+ Non-functional requirements to explicitly clarify or propose defaults for:
31
+ - Performance expectations
32
+ - Scale (users, data, traffic)
33
+ - Security or privacy constraints
34
+ - Reliability / availability needs
35
+ - Maintenance and ownership expectations
36
+
37
+ If the user is unsure on any point, propose reasonable defaults and clearly mark them as **assumptions**.
38
+
39
+ Summarize findings before moving on. If anything is unclear, ask.
40
+
41
+ ### Phase C: Explore Approaches
42
+
43
+ Generate **2-3 distinct approaches**. For each:
44
+
45
+ - **Name** — Short label (e.g., "JWT middleware", "OAuth proxy")
46
+ - **How it works** — 2-3 sentences
47
+ - **Pros** — What it gets right
48
+ - **Cons** — What it gets wrong or defers
49
+ - **Effort** — Rough scope (small / medium / large)
50
+
51
+ #### Adversarial Critique (Red Team)
52
+
53
+ Before asking the user to choose, stress-test each approach using the red team checklist
54
+ (`references/red-team-checklist.md`):
55
+
56
+ 1. What breaks first?
57
+ 2. What's the hidden cost?
58
+ 3. What assumption is wrong?
59
+ 4. Who disagrees?
60
+
61
+ Mark any approach that fails 2+ red team questions as **HIGH RISK**.
62
+ If all approaches fail, generate a hybrid addressing the weaknesses.
63
+
64
+ Present the comparison and let the user pick an approach or request a hybrid.
65
+
66
+ ### Phase D: Capture Design
67
+
68
+ Produce a structured summary:
69
+
70
+ ```markdown
71
+ ## Problem Statement
72
+ [One sentence, testable]
73
+
74
+ ## Approaches Considered
75
+ [2-3 approaches with pros/cons/effort]
76
+
77
+ ## Selected Approach
78
+ [User's choice + rationale]
79
+
80
+ ## Assumptions
81
+ [All assumptions explicitly listed]
82
+
83
+ ## Open Questions
84
+ [Unresolved items, if any]
85
+
86
+ ## Key Decisions
87
+ [What was decided and why — alternatives and rationale]
88
+ ```
89
+
90
+ This summary becomes the input for the next phase (specification or planning).
91
+
92
+ ---
93
+
94
+ ## Rules
95
+
96
+ - Ask as many questions as needed — no rushing
97
+ - One topic at a time for complex clarifications
98
+ - Prefer multiple-choice questions when possible
99
+ - Assumptions must be explicit, never silent
100
+ - YAGNI ruthlessly — avoid premature complexity
101
+ - Do NOT implement, code, or modify behavior during brainstorming
@@ -0,0 +1,34 @@
1
+ # Browser Interaction Planning
2
+
3
+ For web apps with UI, features that involve user-facing pages or interactive flows can optionally include a `browser_interaction` field. This enables the dev-pipeline to verify UI behavior automatically using `playwright-cli` after implementation.
4
+
5
+ ## How to Capture
6
+
7
+ During Phase 4 (refine descriptions and acceptance criteria), for qualifying features ask:
8
+
9
+ > "This feature has UI behavior. Want to add browser verification so the pipeline can auto-check it after implementation? (Y/n)"
10
+
11
+ If yes, generate the `browser_interaction` object:
12
+
13
+ ```json
14
+ {
15
+ "browser_interaction": {
16
+ "url": "http://localhost:3000/login",
17
+ "setup_command": "npm run dev",
18
+ "verify_steps": [
19
+ "snapshot",
20
+ "click <ref> — click login button",
21
+ "fill <ref> 'test@example.com' — enter email",
22
+ "screenshot"
23
+ ],
24
+ "screenshot": true
25
+ }
26
+ }
27
+ ```
28
+
29
+ ## Field Rules
30
+
31
+ - `url` is required — the page URL to verify
32
+ - `setup_command` is optional — command to start dev server (omit if already running)
33
+ - `verify_steps` are descriptive placeholders — the actual `ref` IDs are resolved at runtime by `playwright-cli snapshot`. Use natural language descriptions (e.g., "click login button") that the pipeline agent will map to real refs.
34
+ - `screenshot` defaults to `true` — capture final state for human review
@@ -0,0 +1,109 @@
1
+ # Error Recovery & Resume Support
2
+
3
+ Structured error handling for validation failures, interrupted sessions, and checkpoint-based resumption.
4
+
5
+ ## Validation Failures
6
+
7
+ When `python3 scripts/validate-and-generate.py validate --input <file> --mode <mode>` returns errors:
8
+
9
+ ### Parse validation output
10
+ Script returns JSON with `"valid": false`, `"errors": [...]`, `"warnings": [...]`
11
+
12
+ ### Decision Tree
13
+
14
+ **if `error_count == 0` (warnings only):**
15
+ - Proceed with user approval
16
+ - Show warnings and ask: "Continue? (Y/n)"
17
+
18
+ **elif `error_count > 0` (critical errors):**
19
+
20
+ Group errors by type and apply targeted fixes:
21
+
22
+ | Error Type | Symptom | Fix Offered | Auto-Fix? |
23
+ |-----------|---------|------------|-----------|
24
+ | **Schema mismatch** | `$schema` invalid, missing `app_name`, wrong `features` type | "Set `$schema` to `dev-pipeline-feature-list-v1`, `app_name` to string" | Yes |
25
+ | **Feature ID issues** | Invalid format (not `F-NNN`), duplicate IDs, undefined refs | "Suggest corrected IDs, show duplicates" | Yes |
26
+ | **Dependency errors** | Circular dependency, undefined target features | "Show cycle chain (e.g., `F-003 → F-005 → F-003`), suggest break point" | No |
27
+ | **Missing fields** | Feature missing required keys (title, description, AC) | "List each feature + missing keys, guide patch" | Partial |
28
+ | **Insufficient AC** | Feature has <2 acceptance criteria | "Show feature, suggest AC examples" | No |
29
+ | **Invalid values** | complexity not in [low/medium/high], status not pending | "Show field, valid values" | Yes |
30
+
31
+ ### Execution
32
+
33
+ ```
34
+ For auto-fixable errors:
35
+ 1. Show summary: "Found N schema/ID/format issues"
36
+ 2. Offer: auto-fix? (Y/n)
37
+ 3. Apply fix → regenerate file
38
+ 4. Re-run validation
39
+ 5. If new errors → loop (max 2 more attempts)
40
+
41
+ For manual fixes (dependencies, AC content):
42
+ 1. Show concise prompt: "Edit line X-Y in feature-list.json"
43
+ 2. Wait for user action
44
+ 3. Retry validation (max 2 more attempts)
45
+
46
+ if all_retries_exceeded:
47
+ → Escalate: "After 3 attempts, validation still fails.
48
+ (a) Review file manually, OR
49
+ (b) Restart planning from Phase 1"
50
+ ```
51
+
52
+ ## Resume Support
53
+
54
+ App-planner sessions can be resumed from the last completed checkpoint without losing context.
55
+
56
+ ### Detection Logic
57
+
58
+ Check for artifact files in working directory:
59
+
60
+ | Artifacts Found | Last Completed Checkpoint | Next Phase | Resume Action |
61
+ |-----------------|--------------------------|-----------|----------------|
62
+ | None | (new session) | Phase 1: Vision | Start fresh planning |
63
+ | `feature-list.json` exists | CP-AP-6 (file generated) | Phase 7: Final validation | Offer to validate or extend |
64
+ | `feature-list.json` + validation passed | CP-AP-7 (validation pass) | Handoff: dev-pipeline | Offer: execute pipeline now |
65
+ | Partial state (incomplete file) | CP-AP-2 or CP-AP-4 | Next phase after last checkpoint | Resume interactive planning |
66
+ | User restarts mid-session | User says "restart" | Return to Phase 1 Vision, or load previous checkpoint if requested |
67
+ | Max validation retries exceeded | 3 failed validation loops | Offer: (a) manual review, (b) restart from Phase 1 |
68
+
69
+ ### Resume Command (Project Structure)
70
+
71
+ For projects using `.prizmkit/` structure:
72
+
73
+ ```bash
74
+ # Explicit resume (if file is not in current directory):
75
+ app-planner --resume-from <path-to-existing-feature-list.json>
76
+ ```
77
+
78
+ AI detects existing file → suggests:
79
+ ```
80
+ "Existing plan found with N features, M newly added.
81
+ Resume incremental planning? (Y/n)"
82
+ ```
83
+
84
+ ### Incremental Mode Abort
85
+
86
+ If in Incremental mode but existing `feature-list.json` not found:
87
+ - Ask: "Start new plan or provide existing file?"
88
+ - If new plan chosen → switch to Route A (New App Planning)
89
+ - If existing file uploaded → continue Route B
90
+
91
+ ### Artifact Path Convention
92
+
93
+ **CRITICAL PATH RULE**: `feature-list.json` MUST be written to the project root directory
94
+ (same level as `package.json` / `.git`).
95
+
96
+ Before writing, verify: `ls package.json .git 2>/dev/null` — if these exist in the current
97
+ directory, you are at the project root. NEVER write to `dev-pipeline/` or any subdirectory.
98
+
99
+ After writing, verify: `ls -la feature-list.json` from project root.
100
+
101
+ ```
102
+ <project-root>/
103
+ ├── feature-list.json # Primary output (always here, at project root)
104
+ └── .prizmkit/planning/ # Optional organization for backups
105
+ ├── feature-list.validated.json # Checkpoint backup after CP-AP-7
106
+ └── <ISO-timestamp>.backup.json # Optional incremental backups
107
+ ```
108
+
109
+ The pipeline reads `feature-list.json` from the project root by default. If the user specifies a custom path, the launcher accepts it as an argument.
@@ -0,0 +1,71 @@
1
+ # Frontend Design Guide — High-Quality UI Implementation
2
+
3
+ > Create distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics.
4
+
5
+ **App-planner context**: In the planning phase, use this guide to establish **design direction decisions** (aesthetic tone, typography approach, color strategy, layout philosophy). Do NOT produce CSS, code, or implementation artifacts — capture the design direction as decisions in the project instruction file (`CLAUDE.md` / `CODEBUDDY.md`). Downstream implementation skills will consume these decisions when building features.
6
+
7
+ Load this guide **only when the feature involves frontend/UI work**. Skip for backend-only or infrastructure features.
8
+
9
+ ## Context Gathering (Required Before Design)
10
+
11
+ Before any UI design work, gather:
12
+ - **Target audience**: Who uses this product and in what context?
13
+ - **Use cases**: What jobs are they trying to get done?
14
+ - **Brand personality/tone**: How should the interface feel?
15
+
16
+ You cannot infer this from codebase alone — ask the user.
17
+
18
+ ## Design Direction
19
+
20
+ Commit to a **bold** aesthetic direction:
21
+ - **Purpose**: What problem does this interface solve? Who uses it?
22
+ - **Tone**: Pick an intentional aesthetic — minimalist, editorial, brutalist, organic, luxury, playful, retro-futuristic, industrial, art deco, etc.
23
+ - **Differentiation**: What makes this unforgettable?
24
+
25
+ ## Typography
26
+
27
+ - Choose distinctive fonts. Pair a display font with a refined body font.
28
+ - Use a modular type scale with fluid sizing (`clamp()`)
29
+ - Vary font weights and sizes for clear visual hierarchy
30
+ - Avoid overused fonts: Inter, Roboto, Arial, Open Sans, system defaults
31
+ - Avoid monospace as lazy shorthand for "technical" vibes
32
+
33
+ ## Color & Theme
34
+
35
+ - Use modern CSS color functions (oklch, color-mix, light-dark)
36
+ - Tint neutrals toward brand hue for subconscious cohesion
37
+ - Avoid: pure black (#000) or pure white (#fff) — always tint
38
+ - Avoid: the AI palette (cyan-on-dark, purple-to-blue gradients, neon accents on dark)
39
+ - Avoid: gradient text for "impact", default dark mode with glowing accents
40
+
41
+ ## Layout & Space
42
+
43
+ - Create visual rhythm through varied spacing — not uniform padding
44
+ - Use fluid spacing with `clamp()` that breathes on larger screens
45
+ - Embrace asymmetry and unexpected compositions
46
+ - Avoid: wrapping everything in cards, nesting cards, identical card grids
47
+ - Avoid: centering everything, uniform spacing
48
+
49
+ ## Motion
50
+
51
+ - Focus on high-impact moments: one well-orchestrated page load > scattered micro-interactions
52
+ - Use exponential easing for natural deceleration
53
+ - Use `transform` and `opacity` only — avoid animating layout properties
54
+ - Avoid: bounce/elastic easing, excessive animations
55
+
56
+ ## Interaction
57
+
58
+ - Use progressive disclosure — start simple, reveal complexity through interaction
59
+ - Design empty states that teach the interface
60
+ - Use optimistic UI — update immediately, sync later
61
+ - Avoid: making every button primary, redundant headers
62
+
63
+ ## Responsive
64
+
65
+ - Use container queries (`@container`) for component-level responsiveness
66
+ - Adapt the interface for different contexts, don't just shrink it
67
+ - Never hide critical functionality on mobile
68
+
69
+ ## The AI Slop Test
70
+
71
+ If you showed this interface to someone and said "AI made this", would they believe you immediately? If yes, redesign. A distinctive interface should make someone ask "how was this made?" not "which AI made this?"
@@ -0,0 +1,112 @@
1
+ # Incremental Feature Planning Reference
2
+
3
+ Use this reference when the user adds features to an existing app/plan.
4
+
5
+ ## Pre-Checks
6
+
7
+ 1. Read existing `feature-list.json`.
8
+ 2. Determine current max ID and continue from next `F-NNN`.
9
+ 3. **Detect existing writing style** (see §Style Detection below).
10
+ 4. Preserve compatibility with existing dependency structure.
11
+ 5. **Project conventions** — handled by the main SKILL.md flow (rule #6) before incremental planning begins. No additional action needed here; conventions are loaded automatically.
12
+
13
+ If `feature-list.json` is missing, ask whether to initialize a new plan.
14
+
15
+ ## Style Detection (Automatic)
16
+
17
+ Before drafting new features, analyze existing plan to preserve consistency:
18
+
19
+ 1. **Language Detection**
20
+ - Scan `title` and `description` fields
21
+ - If >70% English titles → default to English
22
+ - If >70% Chinese titles → suggest Chinese (or allow bilingual)
23
+
24
+ 2. **Description Density**
25
+ - Calculate avg word count per description
26
+ - If avg <30 words → draft concise descriptions
27
+ - If avg 30-80 words → draft standard detail
28
+ - If avg >80 words → draft detailed descriptions
29
+
30
+ 3. **Acceptance Criteria Patterns**
31
+ - Count avg AC per feature
32
+ - Identify dominant format (Given/When/Then Gherkin, BDD, or loose)
33
+ - Draft new AC in same format
34
+
35
+ 4. **Complexity Distribution**
36
+ - Count low/medium/high distribution in existing features
37
+ - Alert if new features deviate significantly (>20 percentile points)
38
+ - Suggest rebalancing if needed
39
+
40
+ ### Style Consistency Prompt
41
+
42
+ If new features deviate significantly from detected style:
43
+
44
+ ```
45
+ "Your new features use avg X words/description, but existing features use Y.
46
+ Current ratio: low:M%, medium:N%, high:O%.
47
+ Adjust new features to match? (Y/n)"
48
+ ```
49
+
50
+ Accept user choice, then adjust draft accordingly before JSON generation.
51
+
52
+ ## Incremental Planning Flow
53
+
54
+ ### Step 1: Clarify Increment Scope
55
+ Capture:
56
+ - business objective of the new increment
57
+ - affected existing modules/features
58
+ - timeline or priority constraints
59
+
60
+ ### Step 2: Impact Mapping
61
+ For each candidate feature, identify:
62
+ - upstream dependencies
63
+ - downstream impacts
64
+ - risk hotspots (auth, data migration, API compatibility)
65
+
66
+ ### Step 3: Append Features
67
+ Append new items only (do not rewrite old validated features unless user asks).
68
+
69
+ For each new feature:
70
+ - assign next ID
71
+ - set `status: "pending"`
72
+ - link dependencies to existing IDs where needed
73
+ - keep title in English
74
+ - **write rich descriptions** (see `planning-guide.md` §4):
75
+ - minimum 15 words (validation error below this)
76
+ - recommended: 30+ words (low), 50+ words (medium), 80+ words (high complexity)
77
+ - include: what to build, key behaviors, integration points, data model, error/edge cases
78
+
79
+ ### Step 4: Rebalance Priority
80
+ Allow priority updates for both old and new features if user requests reprioritization.
81
+ Keep dependency correctness as first constraint.
82
+
83
+ ### Step 5: Validate
84
+ Run:
85
+ ```bash
86
+ python3 ${SKILL_DIR}/scripts/validate-and-generate.py validate --input feature-list.json --mode incremental
87
+ ```
88
+
89
+ Fix and re-run until pass.
90
+
91
+ ## Merge/Rewrite Rules
92
+
93
+ - Default: append only
94
+ - Rewrite existing features only when user explicitly asks
95
+ - Never break valid IDs/references
96
+ - Never set new features to `in_progress` or `completed`
97
+
98
+ ## Practical Prompts
99
+
100
+ Use concise prompts during interaction:
101
+ - "What is the goal of this increment? Which user problem is the priority?"
102
+ - "Which existing Feature IDs do these new features depend on?"
103
+ - "Do you want to reprioritize at the same time, or just append to the current sequence?"
104
+
105
+ ## Final Delivery Checklist
106
+
107
+ - [ ] Existing file read before edits
108
+ - [ ] New IDs continue sequence
109
+ - [ ] Existing style preserved
110
+ - [ ] Dependency graph still DAG
111
+ - [ ] Validation passes
112
+ - [ ] Next step recommendation follows priority: launcher → daemon → run.sh
@@ -0,0 +1,85 @@
1
+ # New App Planning Reference
2
+
3
+ Use this reference when the user is planning a product from scratch.
4
+
5
+ ## Phase Guide
6
+
7
+ ### Phase 1: Vision
8
+ Capture:
9
+ - problem statement
10
+ - target users
11
+ - core value proposition
12
+ - non-goals (what to exclude from MVP)
13
+
14
+ ### Phase 2: Stack Defaults
15
+ If user has no preference, propose defaults aligned with project conventions:
16
+ - Frontend: Next.js + TypeScript
17
+ - Backend: Express/Nest (choose one and stay consistent)
18
+ - DB: PostgreSQL
19
+ - ORM: Prisma
20
+ - Test: unit + e2e baseline
21
+
22
+ If `.prizmkit/config.json` exists, prioritize its settings.
23
+
24
+ ### Phase 3: MVP Features
25
+ Rules:
26
+ - Include foundational setup feature first
27
+ - Aim 5-12 features for MVP
28
+ - Keep each feature implementable in one pipeline unit unless clearly too large
29
+
30
+ For each feature define:
31
+ - `id`
32
+ - `title`
33
+ - `description`
34
+ - `priority` — string: `"high"`, `"medium"`, or `"low"` (never numeric)
35
+ - `estimated_complexity`
36
+ - `dependencies`
37
+ - `acceptance_criteria`
38
+ - `status: "pending"`
39
+ - `browser_interaction` (optional — for UI features, see §Browser Interaction Planning in SKILL.md)
40
+
41
+ ### Phase 4: Dependency & Priority
42
+ Check:
43
+ - no cycles
44
+ - all dependency targets exist
45
+ - order is executable
46
+ - priorities align with delivery value and risk
47
+
48
+ ### Phase 5: Granularity
49
+ Split into `sub_features` when:
50
+ - scope crosses too many modules
51
+ - acceptance criteria are excessive
52
+ - complexity is high and uncertainty is high
53
+
54
+ ### Phase 6: Generate + Validate
55
+ 1. Write `feature-list.json`.
56
+ 2. Run:
57
+ ```bash
58
+ python3 ${SKILL_DIR}/scripts/validate-and-generate.py validate --input feature-list.json --mode new
59
+ ```
60
+ 3. Fix all errors, then re-run.
61
+
62
+ ## Quality Rules
63
+
64
+ - Keep titles concise and English
65
+ - Make descriptions implementation-oriented (clear boundaries, interfaces, behavior)
66
+ - **Description depth by complexity:**
67
+ - **Low complexity**: ≥30 words — what to build, key behavior, which files/modules are affected
68
+ - **Medium complexity**: ≥50 words — add integration points, data model overview, error handling approach
69
+ - **High complexity**: ≥80 words — add architecture decisions, performance considerations, security implications, migration strategy if applicable
70
+ - **Description must cover** (adapt per feature):
71
+ 1. **What**: concrete deliverable (API endpoints, UI components, data models)
72
+ 2. **How it integrates**: which existing modules/services it connects to
73
+ 3. **Key behaviors**: business rules, validation rules, state transitions
74
+ 4. **Data model**: entities, relationships, key fields (when applicable)
75
+ 5. **Error/edge cases**: what happens on failure, empty states, limits
76
+ - Write testable acceptance criteria (at least 3; prefer 5+ for medium/high)
77
+ - Keep dependency graph simple and explicit
78
+
79
+ ## Final Delivery Checklist
80
+
81
+ - [ ] User confirmed MVP scope
82
+ - [ ] IDs are sequential
83
+ - [ ] `status` initialized to `pending`
84
+ - [ ] Validation passes
85
+ - [ ] Next step recommendation follows priority: launcher → daemon → run.sh
@@ -0,0 +1,93 @@
1
+ # Project Conventions — First-Run Setup Questions
2
+
3
+ > Capture project-wide norms once, reuse across all planning sessions.
4
+
5
+ These questions establish foundational conventions that affect every feature. They should be asked **once** during the first `app-planner` session and persisted to `.prizmkit/project-conventions.json` so subsequent sessions skip them.
6
+
7
+ ## Persistence
8
+
9
+ - **File**: `.prizmkit/project-conventions.json`
10
+ - **Read on startup**: If the file exists and a convention has a non-null value, skip that question.
11
+ - **Write after asking**: Save answers immediately after the user responds.
12
+ - **Shared**: Other skills (`prizmkit-init`, `dev-pipeline`) may also read this file.
13
+
14
+ ## Convention Questions
15
+
16
+ Ask only unanswered conventions. Group related questions together in a single prompt when possible.
17
+
18
+ ### 1. UI Display Language
19
+
20
+ **Key**: `ui_language`
21
+
22
+ > "What is the primary language for the application's user interface? (e.g., English, 中文, 日本語, etc.)"
23
+
24
+ **Follow-up if applicable**: If the user specifies a non-English language, confirm whether all UI text (buttons, labels, error messages, tooltips) should be in that language.
25
+
26
+ ### 2. Multi-Language Support (i18n)
27
+
28
+ **Key**: `i18n_enabled`, `i18n_languages`
29
+
30
+ > "Does the application need multi-language support (i18n)?"
31
+
32
+ - If **yes** → ask: "Which languages should be supported? List all target languages."
33
+ - If **no** → set `i18n_enabled: false`, skip `i18n_languages`.
34
+
35
+ **Impact on planning**: If i18n is enabled, add an infrastructure feature for i18n setup (framework, translation file structure, language switcher) early in the dependency graph.
36
+
37
+ ### 3. Date, Time & Currency Formats
38
+
39
+ **Key**: `date_format`, `timezone_strategy`, `currency`
40
+
41
+ > "What are your preferences for date/time and currency display?"
42
+
43
+ Sub-questions (ask as a group):
44
+ - **Date format**: "Preferred date display format?" (e.g., `YYYY-MM-DD`, `MM/DD/YYYY`, `DD/MM/YYYY`, locale-auto)
45
+ - **Timezone strategy**: "How should the app handle timezones?" (e.g., UTC storage + local display, user-selected timezone, server timezone only)
46
+ - **Currency**: "If the app handles money, which currency format?" (e.g., USD `$1,234.56`, CNY `¥1,234.56`, EUR `€1.234,56`, or N/A if no monetary values)
47
+
48
+ If the user says "not applicable" for currency, set `currency: null`.
49
+
50
+ ### 4. Code & Git Language Conventions
51
+
52
+ **Key**: `code_comment_language`, `git_commit_language`
53
+
54
+ > "What language should be used for code comments and git commit messages?"
55
+
56
+ Options to present:
57
+ - **Code comments**: English / Chinese / Match UI language / Mixed
58
+ - **Git commit messages**: English / Chinese / Match code comments
59
+
60
+ **Default suggestion**: English for both (widely accessible, compatible with open-source contribution).
61
+
62
+ ## JSON Schema
63
+
64
+ ```json
65
+ {
66
+ "ui_language": "English",
67
+ "i18n_enabled": false,
68
+ "i18n_languages": [],
69
+ "date_format": "YYYY-MM-DD",
70
+ "timezone_strategy": "utc_storage_local_display",
71
+ "currency": null,
72
+ "code_comment_language": "English",
73
+ "git_commit_language": "English"
74
+ }
75
+ ```
76
+
77
+ ## How Conventions Are Used
78
+
79
+ Conventions are **AI context** — they inform your behavior during planning but are NOT written into `feature-list.json` `global_context` fields. Specifically:
80
+
81
+ - `ui_language` → Write feature descriptions and acceptance criteria in a way that acknowledges the target UI language (e.g., mention CJK text handling if Chinese, RTL layout if Arabic)
82
+ - `i18n_enabled` → If true, consider proposing an i18n infrastructure feature early in the dependency graph; ensure feature descriptions mention translation-ready patterns
83
+ - `date_format`, `timezone_strategy` → When features involve date/time display or storage, reference the chosen convention in feature descriptions
84
+ - `currency` → When features involve monetary values, reference the currency convention in descriptions
85
+ - `code_comment_language` → Inform the language used in code-related examples within feature descriptions
86
+ - `git_commit_language` → Inform pipeline and workflow language expectations
87
+
88
+ ## Rules
89
+
90
+ - **Ask at most once per convention** — if answered, never re-ask unless user invokes a reset.
91
+ - **No blocking** — if the user skips a question ("I'll decide later"), set value to `null` and move on. The question will be re-asked next session.
92
+ - **Respect existing config** — if `.prizmkit/config.json` already has equivalent fields (e.g., `tech_stack.language`), do not duplicate. Only ask questions not covered by existing config.
93
+ - **Minimal interruption** — batch all unanswered questions into a single interaction round, not one-by-one.
@@ -0,0 +1,40 @@
1
+ # Red Team Checklist for Ideation
2
+
3
+ Use these questions to stress-test approaches during brainstorm Phase C.
4
+
5
+ ## Structural Questions
6
+
7
+ 1. **What breaks first?** — Identify the weakest link under stress (load, concurrency, edge cases, adversarial input). If you can't name a specific failure mode, the approach is under-specified.
8
+
9
+ 2. **What's the hidden cost?** — Every approach has costs beyond implementation time: maintenance burden, cognitive load for new contributors, infrastructure requirements, monitoring needs, migration complexity.
10
+
11
+ 3. **What assumption is wrong?** — List the unstated assumptions. Which one, if false, invalidates the approach? Common false assumptions: "the API won't change", "data fits in memory", "users will read the docs", "this library is maintained".
12
+
13
+ 4. **Who disagrees?** — Steel-man the opposing view. A performance engineer and a UX designer will critique the same approach differently. What does the most skeptical qualified person say?
14
+
15
+ ## Scoring
16
+
17
+ | Red Team Failures | Classification |
18
+ |-------------------|---------------|
19
+ | 0 | Strong approach |
20
+ | 1 | Viable with mitigation |
21
+ | 2+ | HIGH RISK — needs rethinking or mitigation plan |
22
+
23
+ ## When All Approaches Are HIGH RISK
24
+
25
+ If every approach fails 2+ questions, the problem statement may be wrong. Consider:
26
+ - Is the goal too broad? Split it.
27
+ - Is there a constraint you haven't stated? Surface it.
28
+ - Generate a hybrid approach that addresses the specific red team failures.
29
+
30
+ ## Common False Assumptions
31
+
32
+ | Assumption | Why It Fails |
33
+ |-----------|-------------|
34
+ | "The API won't change" | External APIs change without notice; pin versions and add contract tests |
35
+ | "Data fits in memory" | Works in dev, breaks in prod when dataset grows 10x |
36
+ | "Users will read the docs" | They won't; make the happy path obvious and errors informative |
37
+ | "This library is maintained" | Check commit history; many popular libraries are effectively abandoned |
38
+ | "We can refactor later" | Technical debt compounds; later never comes without explicit scheduling |
39
+ | "Performance doesn't matter yet" | Architecture decisions that ignore performance are expensive to fix |
40
+ | "The team will adopt it" | New tools need champions, training, and visible wins to gain traction |
@@ -0,0 +1,30 @@
1
+ {
2
+ "skill_name": "recovery-workflow",
3
+ "notes": "These test cases require pipeline state setup. Each has a setup_script that creates the necessary mock state before the skill is invoked. Due to the interactive nature and state-dependency of this skill, automated evaluation is limited — qualitative review is primary.",
4
+ "evals": [
5
+ {
6
+ "id": 1,
7
+ "name": "scenario-a-mid-implementation",
8
+ "prompt": "F-003 的 session 超时了,工作区里已经写了大部分代码了,帮我恢复一下",
9
+ "expected_output": "Should detect existing spec.md, plan.md with partial task completion, code changes in worktree. Should recommend 'Smart Resume' (continue_implementation). Should present clear recovery report showing what was found and what remains.",
10
+ "setup_description": "Create feature-list.json with F-003, .prizmkit/specs/003-*/spec.md and plan.md (3/5 tasks checked), some source files modified, pipeline state showing 'failed'.",
11
+ "files": []
12
+ },
13
+ {
14
+ "id": 2,
15
+ "name": "scenario-b-plan-only",
16
+ "prompt": "我看到 F-001 有 spec 和 plan 但是还没开始写代码,帮我从 plan 开始执行",
17
+ "expected_output": "Should detect spec.md and plan.md exist but no code changes. Should recommend 'Start Implementation'. Should read existing plan and begin implementing from task 1.",
18
+ "setup_description": "Create feature-list.json with F-001, .prizmkit/specs/001-*/spec.md and plan.md (0/4 tasks checked), no code changes, pipeline state showing 'failed' at early phase.",
19
+ "files": []
20
+ },
21
+ {
22
+ "id": 3,
23
+ "name": "scenario-a-nearly-complete",
24
+ "prompt": "recover F-005. Token limit exceeded but I think the code is mostly done, tests should be passing",
25
+ "expected_output": "Should detect all plan tasks completed, code changes present, tests passing. Should recommend 'Review & Commit Only'. Should proceed to code review and commit without re-implementing.",
26
+ "setup_description": "Create feature-list.json with F-005, full spec.md and plan.md (all tasks checked), source files complete and tests passing, pipeline state showing 'failed' with token limit error.",
27
+ "files": []
28
+ }
29
+ ]
30
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "prizmkit",
3
- "version": "1.0.148",
3
+ "version": "1.0.150",
4
4
  "description": "Create a new PrizmKit-powered project with clean initialization — no framework dev files, just what you need.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/scaffold.js CHANGED
@@ -156,11 +156,11 @@ export async function installSkills(platform, skills, projectRoot, dryRun) {
156
156
  if (!frontmatter.name) frontmatter.name = skillName;
157
157
  await fs.writeFile(path.join(targetDir, 'SKILL.md'), buildMarkdown(frontmatter, body));
158
158
 
159
- // 复制 assets/ scripts/
160
- for (const subdir of ['assets', 'scripts']) {
161
- const srcSubdir = path.join(corePath, subdir);
162
- if (await fs.pathExists(srcSubdir)) {
163
- await fs.copy(srcSubdir, path.join(targetDir, subdir));
159
+ // 复制所有子目录(assets/, scripts/, references/ 等)
160
+ const cbEntries = await fs.readdir(corePath, { withFileTypes: true });
161
+ for (const entry of cbEntries) {
162
+ if (entry.isDirectory()) {
163
+ await fs.copy(path.join(corePath, entry.name), path.join(targetDir, entry.name));
164
164
  }
165
165
  }
166
166
 
@@ -170,8 +170,9 @@ export async function installSkills(platform, skills, projectRoot, dryRun) {
170
170
  const content = await fs.readFile(skillMdPath, 'utf8');
171
171
  const converted = convertSkillToCommand(content, skillName);
172
172
 
173
- const hasAssets = await fs.pathExists(path.join(corePath, 'assets'));
174
- const hasScripts = await fs.pathExists(path.join(corePath, 'scripts'));
173
+ // Discover all subdirectories in the skill
174
+ const clEntries = await fs.readdir(corePath, { withFileTypes: true });
175
+ const skillSubdirs = clEntries.filter(e => e.isDirectory()).map(e => e.name);
175
176
 
176
177
  // Always write the command file at the flat level (.claude/commands/skillName.md)
177
178
  // so Claude Code shows it as /skillName (not /skillName:skillName).
@@ -184,16 +185,13 @@ export async function installSkills(platform, skills, projectRoot, dryRun) {
184
185
  await fs.ensureDir(commandsDir);
185
186
  await fs.writeFile(path.join(commandsDir, `${skillName}.md`), converted);
186
187
 
187
- if (hasAssets || hasScripts) {
188
- // Place assets/scripts outside .claude/commands/ to prevent Claude Code
188
+ if (skillSubdirs.length > 0) {
189
+ // Place subdirectories outside .claude/commands/ to prevent Claude Code
189
190
  // from registering them as slash commands (e.g. /skillName:assets:file).
190
191
  const assetTargetDir = path.join(projectRoot, '.claude', 'command-assets', skillName);
191
192
  await fs.ensureDir(assetTargetDir);
192
- for (const subdir of ['assets', 'scripts']) {
193
- const srcSubdir = path.join(corePath, subdir);
194
- if (await fs.pathExists(srcSubdir)) {
195
- await fs.copy(srcSubdir, path.join(assetTargetDir, subdir));
196
- }
193
+ for (const subdir of skillSubdirs) {
194
+ await fs.copy(path.join(corePath, subdir), path.join(assetTargetDir, subdir));
197
195
  }
198
196
  }
199
197
  console.log(chalk.green(` ✓ .claude/commands/${skillName}.md`));