@jgamaraalv/ts-dev-kit 2.3.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,222 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Core Web Vitals Visual Report Generator
4
+
5
+ Usage:
6
+ python3 visualize.py --lcp 2.1 --inp 180 --cls 0.05
7
+ python3 visualize.py --lighthouse report.json
8
+ python3 visualize.py --lcp 1.8 --inp 95 --cls 0.02 --url https://example.com --output report.html
9
+ """
10
+
11
+ import argparse
12
+ import json
13
+ import sys
14
+ import webbrowser
15
+ from datetime import datetime
16
+ from pathlib import Path
17
+
18
+ METRICS = {
19
+ "lcp": {
20
+ "name": "LCP",
21
+ "full": "Largest Contentful Paint",
22
+ "desc": "How fast the main content loads",
23
+ "unit": "s",
24
+ "good": 2.5,
25
+ "ni": 4.0,
26
+ "scale": 6.0,
27
+ "fmt": lambda v: f"{v:.2f} s",
28
+ },
29
+ "inp": {
30
+ "name": "INP",
31
+ "full": "Interaction to Next Paint",
32
+ "desc": "How fast the page responds to interactions",
33
+ "unit": "ms",
34
+ "good": 200,
35
+ "ni": 500,
36
+ "scale": 700,
37
+ "fmt": lambda v: f"{int(v)} ms",
38
+ },
39
+ "cls": {
40
+ "name": "CLS",
41
+ "full": "Cumulative Layout Shift",
42
+ "desc": "How stable the layout is while loading",
43
+ "unit": "",
44
+ "good": 0.1,
45
+ "ni": 0.25,
46
+ "scale": 0.35,
47
+ "fmt": lambda v: f"{v:.3f}",
48
+ },
49
+ }
50
+
51
+ COLORS = {
52
+ "good": "#0CCE6B",
53
+ "needs-improvement": "#FFA400",
54
+ "poor": "#FF4E42",
55
+ }
56
+
57
+ LABELS = {
58
+ "good": "Good",
59
+ "needs-improvement": "Needs Improvement",
60
+ "poor": "Poor",
61
+ }
62
+
63
+
64
+ def get_rating(key, value):
65
+ m = METRICS[key]
66
+ if value <= m["good"]:
67
+ return "good"
68
+ if value <= m["ni"]:
69
+ return "needs-improvement"
70
+ return "poor"
71
+
72
+
73
+ def pct(key, value):
74
+ return min(100, round((value / METRICS[key]["scale"]) * 100))
75
+
76
+
77
+ def zone_pct(key, threshold):
78
+ return round((threshold / METRICS[key]["scale"]) * 100)
79
+
80
+
81
+ def card_html(key, value):
82
+ m = METRICS[key]
83
+ rating = get_rating(key, value)
84
+ color = COLORS[rating]
85
+ marker_left = pct(key, value)
86
+ good_w = zone_pct(key, m["good"])
87
+ ni_w = zone_pct(key, m["ni"]) - good_w
88
+ poor_w = 100 - good_w - ni_w
89
+
90
+ return f"""
91
+ <div class="card" style="border-top:4px solid {color}">
92
+ <div class="card-top">
93
+ <span class="metric-name">{m["name"]}</span>
94
+ <span class="badge" style="background:{color}">{LABELS[rating]}</span>
95
+ </div>
96
+ <div class="metric-full">{m["full"]}</div>
97
+ <div class="metric-desc">{m["desc"]}</div>
98
+ <div class="value" style="color:{color}">{m["fmt"](value)}</div>
99
+ <div class="bar-wrap">
100
+ <div class="bar-track">
101
+ <div class="zone" style="width:{good_w}%;background:{COLORS['good']}30"></div>
102
+ <div class="zone" style="width:{ni_w}%;background:{COLORS['needs-improvement']}30"></div>
103
+ <div class="zone" style="width:{poor_w}%;background:{COLORS['poor']}30"></div>
104
+ </div>
105
+ <div class="marker" style="left:{marker_left}%;background:{color}"></div>
106
+ </div>
107
+ <div class="thresholds">
108
+ <span style="color:{COLORS['good']}">&#9679; Good ≤ {m["fmt"](m["good"])}</span>
109
+ <span style="color:{COLORS['needs-improvement']}">&#9679; NI ≤ {m["fmt"](m["ni"])}</span>
110
+ <span style="color:{COLORS['poor']}">&#9679; Poor > {m["fmt"](m["ni"])}</span>
111
+ </div>
112
+ </div>"""
113
+
114
+
115
+ def generate_report(url, lcp, inp, cls_, generated_at):
116
+ values = {"lcp": lcp, "inp": inp, "cls": cls_}
117
+ present = {k: v for k, v in values.items() if v is not None}
118
+ ratings = {k: get_rating(k, v) for k, v in present.items()}
119
+
120
+ all_good = all(r == "good" for r in ratings.values())
121
+ any_poor = any(r == "poor" for r in ratings.values())
122
+ overall_label = "PASS" if all_good else ("FAIL" if any_poor else "NEEDS IMPROVEMENT")
123
+ overall_color = COLORS["good"] if all_good else (COLORS["poor"] if any_poor else COLORS["needs-improvement"])
124
+
125
+ cards = "\n".join(card_html(k, v) for k, v in present.items())
126
+ url_line = f'<p class="url">{url}</p>' if url else ""
127
+
128
+ return f"""<!DOCTYPE html>
129
+ <html lang="en">
130
+ <head>
131
+ <meta charset="UTF-8">
132
+ <meta name="viewport" content="width=device-width,initial-scale=1">
133
+ <title>Core Web Vitals Report</title>
134
+ <style>
135
+ *{{box-sizing:border-box;margin:0;padding:0}}
136
+ body{{font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',sans-serif;background:#f4f4f5;color:#18181b}}
137
+ header{{background:#fff;border-bottom:1px solid #e4e4e7;padding:24px 32px}}
138
+ h1{{font-size:20px;font-weight:700;color:#09090b}}
139
+ .url{{font-size:13px;color:#71717a;margin-top:4px;word-break:break-all}}
140
+ .meta{{font-size:12px;color:#a1a1aa;margin-top:3px}}
141
+ .overall{{display:inline-flex;align-items:center;gap:6px;margin-top:12px;padding:5px 14px;border-radius:999px;font-size:13px;font-weight:700;color:#fff}}
142
+ .grid{{display:grid;grid-template-columns:repeat(auto-fit,minmax(270px,1fr));gap:16px;padding:28px 32px;max-width:1050px;margin:0 auto}}
143
+ .card{{background:#fff;border-radius:10px;padding:22px;box-shadow:0 1px 3px rgba(0,0,0,.07)}}
144
+ .card-top{{display:flex;justify-content:space-between;align-items:center;margin-bottom:3px}}
145
+ .metric-name{{font-size:22px;font-weight:800;letter-spacing:-.5px}}
146
+ .badge{{font-size:10px;font-weight:700;letter-spacing:.4px;color:#fff;padding:3px 9px;border-radius:999px}}
147
+ .metric-full{{font-size:12px;color:#52525b;margin-bottom:2px}}
148
+ .metric-desc{{font-size:11px;color:#a1a1aa;margin-bottom:16px}}
149
+ .value{{font-size:42px;font-weight:800;letter-spacing:-1.5px;line-height:1;margin-bottom:18px}}
150
+ .bar-wrap{{position:relative;height:12px;margin-bottom:12px}}
151
+ .bar-track{{display:flex;width:100%;height:8px;border-radius:4px;overflow:hidden;margin-top:2px}}
152
+ .zone{{height:100%}}
153
+ .marker{{position:absolute;top:50%;transform:translate(-50%,-50%);width:14px;height:14px;border-radius:50%;border:3px solid #fff;box-shadow:0 1px 4px rgba(0,0,0,.25)}}
154
+ .thresholds{{display:flex;gap:10px;flex-wrap:wrap;font-size:11px}}
155
+ footer{{text-align:center;padding:20px;font-size:12px;color:#d4d4d8}}
156
+ </style>
157
+ </head>
158
+ <body>
159
+ <header>
160
+ <h1>Core Web Vitals Report</h1>
161
+ {url_line}
162
+ <p class="meta">Generated: {generated_at}</p>
163
+ <div class="overall" style="background:{overall_color}">{overall_label}</div>
164
+ </header>
165
+ <div class="grid">
166
+ {cards}
167
+ </div>
168
+ <footer>Generated by ts-dev-kit &middot; core-web-vitals skill</footer>
169
+ </body>
170
+ </html>"""
171
+
172
+
173
+ def parse_lighthouse(path):
174
+ with open(path) as f:
175
+ data = json.load(f)
176
+ url = data.get("finalUrl") or data.get("requestedUrl") or ""
177
+ audits = data.get("audits", {})
178
+ lcp_ms = (audits.get("largest-contentful-paint") or {}).get("numericValue")
179
+ inp_ms = (audits.get("interaction-to-next-paint") or {}).get("numericValue")
180
+ cls = (audits.get("cumulative-layout-shift") or {}).get("numericValue")
181
+ lcp = lcp_ms / 1000 if lcp_ms is not None else None
182
+ return url, lcp, inp_ms, cls
183
+
184
+
185
+ def main():
186
+ p = argparse.ArgumentParser(description="Generate a Core Web Vitals HTML report")
187
+ p.add_argument("--lcp", type=float, help="LCP in seconds (e.g. 2.1)")
188
+ p.add_argument("--inp", type=float, help="INP in milliseconds (e.g. 180)")
189
+ p.add_argument("--cls", type=float, help="CLS score (e.g. 0.05)")
190
+ p.add_argument("--lighthouse", help="Path to a Lighthouse JSON output file")
191
+ p.add_argument("--url", help="URL that was tested (display only)")
192
+ p.add_argument("--output", default="cwv-report.html", help="Output HTML file path")
193
+ p.add_argument("--no-open", action="store_true", help="Do not open in browser")
194
+ args = p.parse_args()
195
+
196
+ url = args.url or ""
197
+ lcp, inp, cls_ = args.lcp, args.inp, args.cls
198
+
199
+ if args.lighthouse:
200
+ lh_url, lh_lcp, lh_inp, lh_cls = parse_lighthouse(args.lighthouse)
201
+ url = url or lh_url
202
+ lcp = lcp if lcp is not None else lh_lcp
203
+ inp = inp if inp is not None else lh_inp
204
+ cls_ = cls_ if cls_ is not None else lh_cls
205
+
206
+ if lcp is None and inp is None and cls_ is None:
207
+ print("Error: no metrics provided.", file=sys.stderr)
208
+ print("Usage: visualize.py --lcp 2.1 --inp 180 --cls 0.05", file=sys.stderr)
209
+ print(" visualize.py --lighthouse report.json", file=sys.stderr)
210
+ sys.exit(1)
211
+
212
+ html = generate_report(url, lcp, inp, cls_, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
213
+ out = Path(args.output)
214
+ out.write_text(html, encoding="utf-8")
215
+ print(f"Report saved to {out.resolve()}")
216
+
217
+ if not args.no_open:
218
+ webbrowser.open(out.resolve().as_uri())
219
+
220
+
221
+ if __name__ == "__main__":
222
+ main()
@@ -2,8 +2,17 @@
2
2
  name: debug
3
3
  description: "End-to-end debugging workflow that triages, reproduces, and fixes bugs across the full stack using multi-agent orchestration. Use when: (1) encountering runtime errors in the API or web app, (2) investigating failed requests or broken user flows, (3) debugging production issues via Sentry/PostHog, (4) tracing data flow across backend and frontend, or (5) the user reports a bug that spans multiple layers."
4
4
  argument-hint: "[error-description or sentry-issue-url]"
5
+ allowed-tools: Bash(git *)
5
6
  ---
6
7
 
8
+ <live_context>
9
+ **Recent git changes (regression candidates):**
10
+ !`git log --oneline -10 2>/dev/null || echo "(not a git repo)"`
11
+
12
+ **Working tree status:**
13
+ !`git status --short 2>/dev/null || echo "(not a git repo)"`
14
+ </live_context>
15
+
7
16
  <trigger_examples>
8
17
  - "Debug why the form submission fails with a 500 error"
9
18
  - "The dashboard page shows a blank screen after login"
@@ -220,33 +229,7 @@ Quick reference for the most frequent bugs in this stack. Use these to accelerat
220
229
  </common_patterns>
221
230
 
222
231
  <output>
223
- When complete, produce a debug report:
224
-
225
- ```
226
- ## Bug resolved
227
-
228
- **Root cause**: one sentence describing why the bug occurred.
229
- **Fix**: one sentence describing what was changed to fix it.
230
-
231
- ### Investigation path
232
- Brief trace of how the root cause was found (which layer, what evidence).
233
-
234
- ### Files changed
235
- List every file created/modified.
236
-
237
- ### Verification
238
- - Reproduction: pass (the original error no longer occurs)
239
- - tsc: pass/fail (per package)
240
- - lint: pass/fail (per package)
241
- - test: pass/fail (per package)
242
- - Browser: pass/fail (if applicable)
243
-
244
- ### Skills loaded
245
- List every Skill() call made.
246
-
247
- ### MCPs used
248
- List every MCP used, or "none".
249
- ```
232
+ When complete, produce a debug report using the template in [template.md](template.md).
250
233
 
251
234
  Do not add explanations, caveats, or follow-up suggestions unless the user asks.
252
235
  </output>
@@ -0,0 +1,23 @@
1
+ ## Bug resolved
2
+
3
+ **Root cause**: one sentence describing why the bug occurred.
4
+ **Fix**: one sentence describing what was changed to fix it.
5
+
6
+ ### Investigation path
7
+ Brief trace of how the root cause was found (which layer, what evidence).
8
+
9
+ ### Files changed
10
+ List every file created/modified.
11
+
12
+ ### Verification
13
+ - Reproduction: pass (the original error no longer occurs)
14
+ - tsc: pass/fail (per package)
15
+ - lint: pass/fail (per package)
16
+ - test: pass/fail (per package)
17
+ - Browser: pass/fail (if applicable)
18
+
19
+ ### Skills loaded
20
+ List every Skill() call made.
21
+
22
+ ### MCPs used
23
+ List every MCP used, or "none".
@@ -1,27 +1,59 @@
1
1
  ---
2
- name: task
3
- description: "Use this skill when the user wants to implement a new task in the project. Covers feature development, refactoring, bug fixes, and any code change that requires context analysis, role assignment, and structured execution. For changes under ~30 lines in a single file, implement directly without this workflow."
4
- argument-hint: "[task-description or task-md-file-path]"
2
+ name: execute-task
3
+ description: "Executes a task, either from a TASK_N.md file or a free-form description. Use when: (1) implementing a task from a TASK_N.md file, (2) the user provides a task file path or says 'execute this task', 'implement TASK_02', or 'run this task', (3) the user describes a task directly without a document. Accepts a file path or a free-form description — if the task document is missing required sections (scope, success criteria, verification plan), automatically calls /generate-tasks to produce it before executing."
4
+ argument-hint: "[task-md-file-path | task-description]"
5
5
  ---
6
6
 
7
7
  <trigger_examples>
8
+ - "Execute TASK_02"
9
+ - "Implement docs/features/search/TASK_01.md"
10
+ - "Run this task"
11
+ - "Execute the task"
8
12
  - "Implement the search feature"
9
13
  - "Build the user creation flow end-to-end"
10
- - "Refactor the auth module to use shared schemas"
11
14
  - "Add the notifications endpoint to the API"
12
- - "Fix the pagination bug"
13
- - "Execute this task"
14
15
  </trigger_examples>
15
16
 
17
+ <task>
18
+ $ARGUMENTS
19
+ </task>
20
+
16
21
  <workflow>
17
22
  Follow each phase in order. Each one feeds the next.
18
23
 
24
+ <phase_0_intake>
25
+ Resolve the input and ensure the task document is complete before executing.
26
+
27
+ **Step 1 — Determine input type:**
28
+ - If `$ARGUMENTS` looks like a file path (ends in `.md`, or starts with `/`, `./`, `docs/`, or contains a directory separator): read the file.
29
+ - Otherwise: treat it as a free-form task description — skip to Step 3.
30
+
31
+ **Step 2 — Validate the task document:**
32
+ A task document is ready for execution when it contains ALL of:
33
+ - `## Scope — Files` with at least one file entry
34
+ - `## Success Criteria` with at least one testable criterion
35
+ - `## Verification Plan` with baseline and post-change checks
36
+
37
+ If all required sections are present → proceed to phase 1.
38
+
39
+ **Step 3 — Generate the task document:**
40
+ If the document is missing any required section, OR the input was a free-form description:
41
+
42
+ 1. Inform the user:
43
+ > **Task document incomplete.** Generating a structured task document via `/generate-tasks` before proceeding.
44
+ 2. Call `Skill(skill: "generate-tasks")` passing the original input as context.
45
+ 3. After generation, note the path of the saved `TASK_N.md` file.
46
+ 4. Read the generated file and confirm all required sections are present.
47
+ 5. Resume execution using the generated document — continue to phase 1.
48
+ </phase_0_intake>
49
+
19
50
  <phase_1_context_analysis>
20
51
  Before writing any code, build a mental model of the task scope.
21
52
 
22
- 1. Read the project CLAUDE.md and root package.json understand the project structure, available commands, and dependency graph.
23
- 2. Search the codebase for existing patternsuse Grep/Glob to find related files, similar implementations, and reusable code.
24
- 3. Identify the affected packages/directoriesdetermine which parts of the project will be touched and in what order.
53
+ 1. Read the resolved task document fully (path determined in phase 0).
54
+ 2. Read the project CLAUDE.md and root package.json understand project structure, available commands, and dependency graph.
55
+ 3. Search the codebase for existing patterns use Grep/Glob to find related files, similar implementations, and reusable code relevant to the task's file scope.
56
+ 4. Identify the affected packages/directories from the task's "Scope — Files" section.
25
57
  </phase_1_context_analysis>
26
58
 
27
59
  <phase_2_role_assignment>
@@ -223,7 +255,7 @@ The main session acts as the orchestrator:
223
255
  3. Dispatch the first wave of agents (those with no blockers).
224
256
  4. When a blocking agent completes, dispatch the next wave.
225
257
  5. After all agents complete, run the final quality gates.
226
- 6. Produce the completion report (see references/output-templates.md).
258
+ 6. Produce the completion report (see template.md).
227
259
 
228
260
  Constraints:
229
261
  1. Subagents cannot spawn other subagents. All dispatch happens from the main session.
@@ -260,42 +292,44 @@ In PLAN mode: use EnterPlanMode to design the full plan. Once the user approves
260
292
  </phase_2b_multi_role_decomposition>
261
293
 
262
294
  <phase_3_task_analysis>
263
- 1. Extract task-defined criteria scan the task document for explicit:
264
- - **Success criteria** (checklists, acceptance conditions)
265
- - **Initial/baseline tests** (to run BEFORE changes, for comparison)
266
- - **Post-change tests** (to run AFTER changes, for verification)
267
- - **Performance benchmarks** (bundle size, API call counts, Lighthouse metrics)
268
-
269
- Look for section names like: "Initial Tests", "Before Changes", "Baseline", "Post-Change Tests", "After Changes", "Success Criteria", "Acceptance Criteria", "Functionality Tests", "Performance Tests".
270
-
271
- If found, state them to the user:
272
- > **Task-defined criteria found:**
273
- > - Success criteria: [list]
274
- > - Baseline tests (before changes): [list or "none"]
275
- > - Post-change tests (after changes): [list or "none"]
276
-
277
- These are binding requirements that extend the default quality gates. Do not skip them.
278
-
279
- 2. Understand the request — identify what is being asked, which files will be created or modified, and what the expected outcome is.
280
- 3. Define success criteria combine the task's own criteria (from step 1) with your analysis. Task-defined criteria take priority over defaults.
281
- 4. For questions about project libraries, use Context7 (`mcp__context7__resolve-library-id` → `mcp__context7__query-docs`) to query up-to-date documentation. If anything is ambiguous, ask the user before proceeding.
282
- 5. Check for helpful MCPs does the task involve browser testing, external docs?
283
- 6. Plan the implementation order — determine which changes must happen first (e.g., types before lib before hooks before components before pages).
284
- 7. Generate the verification plan build a before/after test plan combining task-defined criteria with automatic checks based on domain and available MCPs. See references/verification-protocol.md for the full protocol.
285
- - Detect available testing MCPs (playwright, chrome-devtools, or none).
286
- - Map the task domain to checks: frontend visual + performance, backend API responses, database → schema state.
287
- - Always include standard quality gates (lint, build, test) as baseline.
288
- - Present the plan:
289
- > **Verification plan:**
290
- > - Baseline checks: [list]
291
- > - MCPs for verification: [list or "none available — shell-only checks"]
292
- > - Post-change checks: [list]
295
+ Read the task document and load the criteria defined there. These are binding requirements for this execution.
296
+
297
+ 1. Extract from the task document:
298
+ - **Success criteria** exact conditions for task completion
299
+ - **Baseline checks** what to run/capture BEFORE changes
300
+ - **Post-change checks** — what to verify AFTER changes
301
+ - **Performance benchmarks** specific metrics with targets
302
+ - **Non-functional requirements** — scoped to this task's domain
303
+ - **Scope Files** the exact list of files to create or modify
304
+
305
+ State them to the user:
306
+ > **Task loaded:** [task title]
307
+ > - Dependencies: [list or "none"]
308
+ > - Files in scope: N files
309
+ > - Success criteria: [count] criteria
310
+ > - Baseline checks: [list]
311
+ > - Post-change checks: [list]
312
+ > - Performance targets: [list or "none defined"]
313
+
314
+ 2. For questions about project libraries, use Context7 (`mcp__context7__resolve-library-id` `mcp__context7__query-docs`) to query up-to-date documentation. If anything is ambiguous, ask the user before proceeding.
315
+
316
+ 3. Check MCP availabilityuse ToolSearch to detect which browser MCPs are available (playwright, chrome-devtools, or neither), then confirm against the task's "MCP Checks" section.
317
+
318
+ 4. Plan the implementation order from the task's file scope build dependencies before dependents:
319
+ shared types database schema API layer UI components → pages → tests.
320
+
321
+ 5. Confirm the verification plan with the user:
322
+ > **Verification plan:**
323
+ > - Baseline checks: [from task doc]
324
+ > - MCPs available: [detected list or "none — shell-only"]
325
+ > - Post-change checks: [from task doc]
293
326
  </phase_3_task_analysis>
294
327
 
295
328
  <phase_3b_baseline_capture>
296
329
  **MANDATORY.** Run the verification plan before writing any code to establish the baseline for comparison. Do NOT skip this phase.
297
330
 
298
331
  **Step 1: Standard quality gates** — run and record results (pass/fail, counts, bundle sizes).
332
+ Discover the exact commands from package.json scripts for each affected package.
299
333
 
300
334
  **Step 2: MCP-based checks** — follow this decision tree in order:
301
335
 
@@ -360,7 +394,7 @@ Discover the available commands from package.json scripts for each affected pack
360
394
  2. Linting — run the project's lint command (e.g., `lint`, `eslint`)
361
395
  3. Tests (if available) — run the project's test command (e.g., `test`, `vitest`)
362
396
  4. Build — run the project's build command (e.g., `build`)
363
- 5. Self-check — review your changes against the success criteria defined in phase 3.
397
+ 5. Self-check — review your changes against the success criteria from the task document.
364
398
 
365
399
  For monorepos, run these for each affected workspace/package. Discover the workspace command pattern from CLAUDE.md or the root package.json (e.g., `yarn workspace <name> <script>`, `pnpm --filter <name> <script>`, `npm -w <name> <script>`, `turbo run <script> --filter=<name>`).
366
400
 
@@ -378,9 +412,9 @@ After all quality gates pass, re-run the verification plan from phase 3b and com
378
412
  2. Compare each result against baseline:
379
413
  - **Quality gates**: must remain passing. New failures = regression.
380
414
  - **Visual checks** (if MCPs available): compare screenshots for unintended changes.
381
- - **Performance** (if MCPs available): compare metrics. Regressions > 10% must be investigated.
415
+ - **Performance** (if MCPs available): compare metrics against task-defined benchmarks. Regressions > 10% must be investigated.
382
416
  - **API responses**: compare status codes and payload shapes. Breaking changes = regression.
383
- 3. Build the comparison table (see references/output-templates.md for format).
417
+ 3. Build the comparison table (see template.md for format).
384
418
 
385
419
  If any regression is found, fix it, re-run phase 5 quality gates, then re-run this phase. Repeat until clean.
386
420
  </phase_5b_post_change_verification>
@@ -396,13 +430,7 @@ Only update documentation directly affected by the changes. Do not create new do
396
430
  </workflow>
397
431
 
398
432
  <output>
399
- When complete, produce the completion report including the baseline vs post-change comparison table. See references/output-templates.md for the exact format.
400
-
401
- If the task document specifies a results file path, also create the comparison report at that path.
433
+ When complete, produce the completion report including the baseline vs post-change comparison table. See template.md for the exact format.
402
434
 
403
435
  Do not add explanations, caveats, or follow-up suggestions unless the user explicitly asks. The report is the final output.
404
436
  </output>
405
-
406
- <task>
407
- {{task}}
408
- </task>
@@ -0,0 +1,56 @@
1
+ ---
2
+ name: generate-prd
3
+ description: "Generates a complete, structured, and implementation-ready Product Requirements Document (PRD). Use when: (1) creating a PRD from a product idea or business requirement, (2) turning a feature request into structured documentation for engineering and design teams, (3) the user says 'generate a PRD', 'write a PRD', 'create product requirements', or 'document this feature'. Covers vision, problem statement, objectives, scope, personas, user journeys, functional and non-functional requirements, constraints, success metrics, assumptions, risks, and acceptance criteria.'
4
+ argument-hint: "[product-idea | feature-description | business-context | prd-md-file-path]"
5
+ ---
6
+
7
+ <system>
8
+ You are a Senior Product Manager and Product Strategist. Generate a complete, structured, and implementation-ready Product Requirements Document (PRD).
9
+ </system>
10
+
11
+ <spec>
12
+ $ARGUMENTS
13
+ </spec>
14
+
15
+ <workflow>
16
+ Follow each phase in order.
17
+
18
+ <phase_1_context_analysis>
19
+ Build a mental model of the spec scope before writing:
20
+ - Identify target users and their core problem
21
+ - Understand business objectives and success criteria
22
+ - Clarify scope boundaries (what's included and what's not)
23
+ </phase_1_context_analysis>
24
+
25
+ <phase_2_clarify>
26
+ If you are uncertain about anything or information is missing to generate the PRD, pause and ask the user before continuing. Do not assume or deduce missing context.
27
+ </phase_2_clarify>
28
+
29
+ <phase_3_plan>
30
+ If the request is broad or complex, enter plan mode to outline sections before generating the full document.
31
+ </phase_3_plan>
32
+
33
+ <phase_4_generate>
34
+ Write the PRD following these standards:
35
+ - Translate business goals into clear, testable product requirements
36
+ - Define user problems, value proposition, and success criteria
37
+ - Use the sections defined in [template.md](template.md)
38
+ - Ensure precision — avoid vague or generic statements
39
+ - Write for engineering, design, and business stakeholders
40
+ - Include Mermaid diagrams for key user journeys
41
+ - Include functional and non-functional requirements
42
+ </phase_4_generate>
43
+ </workflow>
44
+
45
+
46
+ <constraints>
47
+ Do not include:
48
+ - Code or implementation details
49
+ - Technical architecture decisions
50
+ - Step-by-step "how to build" guides
51
+ - File or folder structure suggestions
52
+ </constraints>
53
+
54
+ <output>
55
+ Save the document to `[project-root]/docs/features/[FEATURE_NAME]/PRD.md`.
56
+ </output>
@@ -0,0 +1,69 @@
1
+ # [Product Name] — PRD
2
+
3
+ **Version**: [x.x]
4
+ **Date**: [YYYY-MM-DD]
5
+ **Authors**: [names]
6
+
7
+ ## 1. Problem Statement
8
+ [User pain points and context]
9
+
10
+ ## 2. Product Vision
11
+ [Goal and value proposition]
12
+
13
+ ## 3. Objectives
14
+ [Measurable business and product goals]
15
+
16
+ ## 4. Scope
17
+
18
+ **Included:**
19
+ - [...]
20
+
21
+ **Excluded:**
22
+ - [...]
23
+
24
+ ## 5. User Personas
25
+
26
+ ### [Persona Name]
27
+ - **Goal**: [...]
28
+ - **Pain point**: [...]
29
+
30
+ ## 6. User Journeys
31
+
32
+ ```mermaid
33
+ flowchart TD
34
+ A[...] --> B[...]
35
+ ```
36
+
37
+ ## 7. Functional Requirements
38
+
39
+ 1. [Atomic, numbered requirement]
40
+ 2. [...]
41
+
42
+ ## 8. Non-functional Requirements
43
+
44
+ - **Performance**: [targets]
45
+ - **Security**: [requirements]
46
+ - **Accessibility**: [WCAG level and specifics]
47
+ - **Scalability**: [requirements]
48
+
49
+ ## 9. Constraints
50
+
51
+ - [Technical, business, or regulatory limitations]
52
+
53
+ ## 10. Success Metrics
54
+
55
+ - [KPI]: [target value]
56
+
57
+ ## 11. Assumptions
58
+
59
+ - [What is assumed true for this PRD]
60
+
61
+ ## 12. Risks
62
+
63
+ | Risk | Likelihood | Impact | Mitigation |
64
+ |------|-----------|--------|-----------|
65
+ | [risk] | High/Med/Low | High/Med/Low | [mitigation] |
66
+
67
+ ## 13. Acceptance Criteria
68
+
69
+ - [ ] [Condition for feature completion]