panopticon-cli 0.4.32 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. package/README.md +96 -210
  2. package/dist/{agents-BDFHF4T3.js → agents-E43Y3HNU.js} +10 -7
  3. package/dist/chunk-7SN4L4PH.js +150 -0
  4. package/dist/chunk-7SN4L4PH.js.map +1 -0
  5. package/dist/{chunk-2NIAOCIC.js → chunk-AAFQANKW.js} +358 -97
  6. package/dist/chunk-AAFQANKW.js.map +1 -0
  7. package/dist/chunk-AQXETQHW.js +113 -0
  8. package/dist/chunk-AQXETQHW.js.map +1 -0
  9. package/dist/chunk-B3PF6JPQ.js +212 -0
  10. package/dist/chunk-B3PF6JPQ.js.map +1 -0
  11. package/dist/chunk-CFCUOV3Q.js +669 -0
  12. package/dist/chunk-CFCUOV3Q.js.map +1 -0
  13. package/dist/chunk-CWELWPWQ.js +32 -0
  14. package/dist/chunk-CWELWPWQ.js.map +1 -0
  15. package/dist/chunk-DI7ABPNQ.js +352 -0
  16. package/dist/chunk-DI7ABPNQ.js.map +1 -0
  17. package/dist/{chunk-VU4FLXV5.js → chunk-FQ66DECN.js} +31 -4
  18. package/dist/chunk-FQ66DECN.js.map +1 -0
  19. package/dist/{chunk-VIWUCJ4V.js → chunk-FTCPTHIJ.js} +57 -432
  20. package/dist/chunk-FTCPTHIJ.js.map +1 -0
  21. package/dist/{review-status-GWQYY77L.js → chunk-GFP3PIPB.js} +14 -7
  22. package/dist/chunk-GFP3PIPB.js.map +1 -0
  23. package/dist/chunk-GR6ZZMCX.js +816 -0
  24. package/dist/chunk-GR6ZZMCX.js.map +1 -0
  25. package/dist/chunk-HJSM6E6U.js +1038 -0
  26. package/dist/chunk-HJSM6E6U.js.map +1 -0
  27. package/dist/{chunk-XP2DXWYP.js → chunk-HZT2AOPN.js} +164 -39
  28. package/dist/chunk-HZT2AOPN.js.map +1 -0
  29. package/dist/chunk-JQBV3Q2W.js +29 -0
  30. package/dist/chunk-JQBV3Q2W.js.map +1 -0
  31. package/dist/{chunk-BWGFN44T.js → chunk-JT4O4YVM.js} +28 -16
  32. package/dist/chunk-JT4O4YVM.js.map +1 -0
  33. package/dist/chunk-NTO3EDB3.js +600 -0
  34. package/dist/chunk-NTO3EDB3.js.map +1 -0
  35. package/dist/{chunk-JY7R7V4G.js → chunk-OMNXYPXC.js} +2 -2
  36. package/dist/chunk-OMNXYPXC.js.map +1 -0
  37. package/dist/chunk-PELXV435.js +215 -0
  38. package/dist/chunk-PELXV435.js.map +1 -0
  39. package/dist/chunk-PPRFKTVC.js +154 -0
  40. package/dist/chunk-PPRFKTVC.js.map +1 -0
  41. package/dist/chunk-WQG2TYCB.js +677 -0
  42. package/dist/chunk-WQG2TYCB.js.map +1 -0
  43. package/dist/{chunk-HCTJFIJJ.js → chunk-YLPSQAM2.js} +2 -2
  44. package/dist/{chunk-HCTJFIJJ.js.map → chunk-YLPSQAM2.js.map} +1 -1
  45. package/dist/{chunk-6HXKTOD7.js → chunk-ZTFNYOC7.js} +53 -38
  46. package/dist/chunk-ZTFNYOC7.js.map +1 -0
  47. package/dist/cli/index.js +5103 -3165
  48. package/dist/cli/index.js.map +1 -1
  49. package/dist/{config-BOAMSKTF.js → config-4CJNUE3O.js} +7 -3
  50. package/dist/dashboard/prompts/merge-agent.md +217 -0
  51. package/dist/dashboard/prompts/review-agent.md +409 -0
  52. package/dist/dashboard/prompts/sync-main.md +84 -0
  53. package/dist/dashboard/prompts/test-agent.md +283 -0
  54. package/dist/dashboard/prompts/work-agent.md +249 -0
  55. package/dist/dashboard/public/assets/index-BxpjweAL.css +32 -0
  56. package/dist/dashboard/public/assets/index-DQHkwvvJ.js +743 -0
  57. package/dist/dashboard/public/index.html +2 -2
  58. package/dist/dashboard/server.js +17619 -4044
  59. package/dist/{dns-L3L2BB27.js → dns-7BDJSD3E.js} +4 -2
  60. package/dist/{feedback-writer-AAKF5BTK.js → feedback-writer-LVZ5TFYZ.js} +8 -4
  61. package/dist/feedback-writer-LVZ5TFYZ.js.map +1 -0
  62. package/dist/hume-WMAUBBV2.js +13 -0
  63. package/dist/index.d.ts +162 -40
  64. package/dist/index.js +67 -23
  65. package/dist/index.js.map +1 -1
  66. package/dist/{projects-VXRUCMLM.js → projects-JEIVIYC6.js} +3 -3
  67. package/dist/rally-RKFSWC7E.js +10 -0
  68. package/dist/{remote-agents-Z3R2A5BN.js → remote-agents-TFSMW7GN.js} +2 -2
  69. package/dist/{remote-workspace-2G6V2KNP.js → remote-workspace-AHVHQEES.js} +8 -8
  70. package/dist/review-status-EPFG4XM7.js +19 -0
  71. package/dist/shadow-state-5MDP6YXH.js +30 -0
  72. package/dist/shadow-state-5MDP6YXH.js.map +1 -0
  73. package/dist/{specialist-context-N32QBNNQ.js → specialist-context-ZC6A4M3I.js} +8 -7
  74. package/dist/{specialist-context-N32QBNNQ.js.map → specialist-context-ZC6A4M3I.js.map} +1 -1
  75. package/dist/{specialist-logs-GF3YV4KL.js → specialist-logs-KLGJCEUL.js} +7 -6
  76. package/dist/specialist-logs-KLGJCEUL.js.map +1 -0
  77. package/dist/{specialists-JBIW6MP4.js → specialists-O4HWDJL5.js} +7 -6
  78. package/dist/specialists-O4HWDJL5.js.map +1 -0
  79. package/dist/tldr-daemon-T3THOUGT.js +21 -0
  80. package/dist/tldr-daemon-T3THOUGT.js.map +1 -0
  81. package/dist/traefik-QN7R5I6V.js +19 -0
  82. package/dist/traefik-QN7R5I6V.js.map +1 -0
  83. package/dist/tunnel-W2GZBLEV.js +13 -0
  84. package/dist/tunnel-W2GZBLEV.js.map +1 -0
  85. package/dist/workspace-manager-IE4JL2JP.js +22 -0
  86. package/dist/workspace-manager-IE4JL2JP.js.map +1 -0
  87. package/package.json +2 -2
  88. package/scripts/heartbeat-hook +37 -10
  89. package/scripts/patches/llm-tldr-tsx-support.py +109 -0
  90. package/scripts/pre-tool-hook +26 -15
  91. package/scripts/record-cost-event.js +177 -43
  92. package/scripts/record-cost-event.ts +87 -3
  93. package/scripts/statusline.sh +169 -0
  94. package/scripts/stop-hook +21 -11
  95. package/scripts/tldr-post-edit +72 -0
  96. package/scripts/tldr-read-enforcer +275 -0
  97. package/scripts/work-agent-stop-hook +137 -0
  98. package/skills/check-merged/SKILL.md +143 -0
  99. package/skills/crash-investigation/SKILL.md +301 -0
  100. package/skills/github-cli/SKILL.md +185 -0
  101. package/skills/myn-standards/SKILL.md +351 -0
  102. package/skills/pan-reopen/SKILL.md +65 -0
  103. package/skills/pan-sync-main/SKILL.md +87 -0
  104. package/skills/pan-tldr/SKILL.md +149 -0
  105. package/skills/react-best-practices/SKILL.md +125 -0
  106. package/skills/spec-readiness/REPORT-TEMPLATE.md +158 -0
  107. package/skills/spec-readiness/SCORING-REFERENCE.md +369 -0
  108. package/skills/spec-readiness/SKILL.md +400 -0
  109. package/skills/spec-readiness-setup/SKILL.md +361 -0
  110. package/skills/workspace-status/SKILL.md +56 -0
  111. package/skills/write-spec/SKILL.md +138 -0
  112. package/templates/traefik/dynamic/panopticon.yml.template +0 -5
  113. package/templates/traefik/traefik.yml +0 -8
  114. package/dist/chunk-2NIAOCIC.js.map +0 -1
  115. package/dist/chunk-3XAB4IXF.js +0 -51
  116. package/dist/chunk-3XAB4IXF.js.map +0 -1
  117. package/dist/chunk-6HXKTOD7.js.map +0 -1
  118. package/dist/chunk-BBCUK6N2.js +0 -241
  119. package/dist/chunk-BBCUK6N2.js.map +0 -1
  120. package/dist/chunk-BWGFN44T.js.map +0 -1
  121. package/dist/chunk-ELK6Q7QI.js +0 -545
  122. package/dist/chunk-ELK6Q7QI.js.map +0 -1
  123. package/dist/chunk-JY7R7V4G.js.map +0 -1
  124. package/dist/chunk-LYSBSZYV.js +0 -1523
  125. package/dist/chunk-LYSBSZYV.js.map +0 -1
  126. package/dist/chunk-VIWUCJ4V.js.map +0 -1
  127. package/dist/chunk-VU4FLXV5.js.map +0 -1
  128. package/dist/chunk-XP2DXWYP.js.map +0 -1
  129. package/dist/dashboard/public/assets/index-C7X6LP5Z.css +0 -32
  130. package/dist/dashboard/public/assets/index-ClYqpcAJ.js +0 -645
  131. package/dist/feedback-writer-AAKF5BTK.js.map +0 -1
  132. package/dist/review-status-GWQYY77L.js.map +0 -1
  133. package/dist/traefik-CUJM6K5Z.js +0 -12
  134. /package/dist/{agents-BDFHF4T3.js.map → agents-E43Y3HNU.js.map} +0 -0
  135. /package/dist/{config-BOAMSKTF.js.map → config-4CJNUE3O.js.map} +0 -0
  136. /package/dist/{dns-L3L2BB27.js.map → dns-7BDJSD3E.js.map} +0 -0
  137. /package/dist/{projects-VXRUCMLM.js.map → hume-WMAUBBV2.js.map} +0 -0
  138. /package/dist/{remote-agents-Z3R2A5BN.js.map → projects-JEIVIYC6.js.map} +0 -0
  139. /package/dist/{specialist-logs-GF3YV4KL.js.map → rally-RKFSWC7E.js.map} +0 -0
  140. /package/dist/{specialists-JBIW6MP4.js.map → remote-agents-TFSMW7GN.js.map} +0 -0
  141. /package/dist/{remote-workspace-2G6V2KNP.js.map → remote-workspace-AHVHQEES.js.map} +0 -0
  142. /package/dist/{traefik-CUJM6K5Z.js.map → review-status-EPFG4XM7.js.map} +0 -0
@@ -0,0 +1,125 @@
1
+ ---
2
+ name: vercel-react-best-practices
3
+ description: React and Next.js performance optimization guidelines from Vercel Engineering. This skill should be used when writing, reviewing, or refactoring React/Next.js code to ensure optimal performance patterns. Triggers on tasks involving React components, Next.js pages, data fetching, bundle optimization, or performance improvements.
4
+ license: MIT
5
+ metadata:
6
+ author: vercel
7
+ version: "1.0.0"
8
+ ---
9
+
10
+ # Vercel React Best Practices
11
+
12
+ Comprehensive performance optimization guide for React and Next.js applications, maintained by Vercel. Contains 45 rules across 8 categories, prioritized by impact to guide automated refactoring and code generation.
13
+
14
+ ## When to Apply
15
+
16
+ Reference these guidelines when:
17
+ - Writing new React components or Next.js pages
18
+ - Implementing data fetching (client or server-side)
19
+ - Reviewing code for performance issues
20
+ - Refactoring existing React/Next.js code
21
+ - Optimizing bundle size or load times
22
+
23
+ ## Rule Categories by Priority
24
+
25
+ | Priority | Category | Impact | Prefix |
26
+ |----------|----------|--------|--------|
27
+ | 1 | Eliminating Waterfalls | CRITICAL | `async-` |
28
+ | 2 | Bundle Size Optimization | CRITICAL | `bundle-` |
29
+ | 3 | Server-Side Performance | HIGH | `server-` |
30
+ | 4 | Client-Side Data Fetching | MEDIUM-HIGH | `client-` |
31
+ | 5 | Re-render Optimization | MEDIUM | `rerender-` |
32
+ | 6 | Rendering Performance | MEDIUM | `rendering-` |
33
+ | 7 | JavaScript Performance | LOW-MEDIUM | `js-` |
34
+ | 8 | Advanced Patterns | LOW | `advanced-` |
35
+
36
+ ## Quick Reference
37
+
38
+ ### 1. Eliminating Waterfalls (CRITICAL)
39
+
40
+ - `async-defer-await` - Move await into branches where actually used
41
+ - `async-parallel` - Use Promise.all() for independent operations
42
+ - `async-dependencies` - Use better-all for partial dependencies
43
+ - `async-api-routes` - Start promises early, await late in API routes
44
+ - `async-suspense-boundaries` - Use Suspense to stream content
45
+
46
+ ### 2. Bundle Size Optimization (CRITICAL)
47
+
48
+ - `bundle-barrel-imports` - Import directly, avoid barrel files
49
+ - `bundle-dynamic-imports` - Use next/dynamic for heavy components
50
+ - `bundle-defer-third-party` - Load analytics/logging after hydration
51
+ - `bundle-conditional` - Load modules only when feature is activated
52
+ - `bundle-preload` - Preload on hover/focus for perceived speed
53
+
54
+ ### 3. Server-Side Performance (HIGH)
55
+
56
+ - `server-cache-react` - Use React.cache() for per-request deduplication
57
+ - `server-cache-lru` - Use LRU cache for cross-request caching
58
+ - `server-serialization` - Minimize data passed to client components
59
+ - `server-parallel-fetching` - Restructure components to parallelize fetches
60
+ - `server-after-nonblocking` - Use after() for non-blocking operations
61
+
62
+ ### 4. Client-Side Data Fetching (MEDIUM-HIGH)
63
+
64
+ - `client-swr-dedup` - Use SWR for automatic request deduplication
65
+ - `client-event-listeners` - Deduplicate global event listeners
66
+
67
+ ### 5. Re-render Optimization (MEDIUM)
68
+
69
+ - `rerender-defer-reads` - Don't subscribe to state only used in callbacks
70
+ - `rerender-memo` - Extract expensive work into memoized components
71
+ - `rerender-dependencies` - Use primitive dependencies in effects
72
+ - `rerender-derived-state` - Subscribe to derived booleans, not raw values
73
+ - `rerender-functional-setstate` - Use functional setState for stable callbacks
74
+ - `rerender-lazy-state-init` - Pass function to useState for expensive values
75
+ - `rerender-transitions` - Use startTransition for non-urgent updates
76
+
77
+ ### 6. Rendering Performance (MEDIUM)
78
+
79
+ - `rendering-animate-svg-wrapper` - Animate div wrapper, not SVG element
80
+ - `rendering-content-visibility` - Use content-visibility for long lists
81
+ - `rendering-hoist-jsx` - Extract static JSX outside components
82
+ - `rendering-svg-precision` - Reduce SVG coordinate precision
83
+ - `rendering-hydration-no-flicker` - Use inline script for client-only data
84
+ - `rendering-activity` - Use Activity component for show/hide
85
+ - `rendering-conditional-render` - Use ternary, not && for conditionals
86
+
87
+ ### 7. JavaScript Performance (LOW-MEDIUM)
88
+
89
+ - `js-batch-dom-css` - Group CSS changes via classes or cssText
90
+ - `js-index-maps` - Build Map for repeated lookups
91
+ - `js-cache-property-access` - Cache object properties in loops
92
+ - `js-cache-function-results` - Cache function results in module-level Map
93
+ - `js-cache-storage` - Cache localStorage/sessionStorage reads
94
+ - `js-combine-iterations` - Combine multiple filter/map into one loop
95
+ - `js-length-check-first` - Check array length before expensive comparison
96
+ - `js-early-exit` - Return early from functions
97
+ - `js-hoist-regexp` - Hoist RegExp creation outside loops
98
+ - `js-min-max-loop` - Use loop for min/max instead of sort
99
+ - `js-set-map-lookups` - Use Set/Map for O(1) lookups
100
+ - `js-tosorted-immutable` - Use toSorted() for immutability
101
+
102
+ ### 8. Advanced Patterns (LOW)
103
+
104
+ - `advanced-event-handler-refs` - Store event handlers in refs
105
+ - `advanced-use-latest` - useLatest for stable callback refs
106
+
107
+ ## How to Use
108
+
109
+ Read individual rule files for detailed explanations and code examples:
110
+
111
+ ```
112
+ rules/async-parallel.md
113
+ rules/bundle-barrel-imports.md
114
+ rules/_sections.md
115
+ ```
116
+
117
+ Each rule file contains:
118
+ - Brief explanation of why it matters
119
+ - Incorrect code example with explanation
120
+ - Correct code example with explanation
121
+ - Additional context and references
122
+
123
+ ## Full Compiled Document
124
+
125
+ For the complete guide with all rules expanded: `AGENTS.md`
@@ -0,0 +1,158 @@
1
+ # Spec Readiness — Report Template
2
+
3
+ This file defines the report structure for both HTML and EML formats. Only the report-generation subagent reads this.
4
+
5
+ ## Branding
6
+
7
+ Configurable via wrapper `config.yaml`. Defaults:
8
+ - Primary Color: #1e293b (dark slate — headers, text)
9
+ - Stripe Color: matches primary (top accent bar)
10
+ - Score colors: Red (#E53935) < 8, Yellow (#FF9800) 8-14, Green (#43A047) 15+
11
+ - Status colors: Red = 0-39, Yellow = 40-69, Green = 70-100
12
+
13
+ If a wrapper `config.yaml` exists, use `branding.primary_color`, `branding.stripe_color`, `branding.company_name`, and `branding.footer_text` from it.
14
+
15
+ ## Report Sections
16
+
17
+ ### 1. Brand Stripe (4px)
18
+ Top-of-page accent bar using `branding.stripe_color` or default.
19
+
20
+ ### 2. Header Block (primary_color background)
21
+ - Eyebrow text: "REQUIREMENTS READINESS ASSESSMENT" (uppercase, small, lighter accent)
22
+ - Title: "{Issue Title}" (white, large)
23
+ - Metadata line: "{Identifier} · {Project} · {Milestone/Release} · Assessed {date}" (light gray)
24
+ - Score badge: Large score number with status label and color (circular or pill badge)
25
+
26
+ ### 3. Issue Info Row (Light Gray background, #F5F5F5)
27
+ Grid/flex row showing:
28
+ - Owner/Assignee
29
+ - Project
30
+ - Milestone/Release dates
31
+ - Child issue count (completed/total)
32
+ - Estimate if set
33
+
34
+ ### 4. Score Dashboard — 5 metric cards in a row/grid
35
+ Each card contains:
36
+ - Dimension name (uppercase label, small text)
37
+ - Score as "X / 20" (large number)
38
+ - Visual bar (colored by score: red < 8, yellow 8-14, green 15+)
39
+ - One-line summary from dimension findings
40
+
41
+ ### 5. Overall Score Bar
42
+ - Full-width progress bar showing 0-100
43
+ - Fill color matches status (red/yellow/green)
44
+ - Status label badge aligned right
45
+ - Score number displayed inside or above bar
46
+
47
+ ### 6. Top Blockers (Amber/warning callout box)
48
+ - Background: light amber (#FFF8E1)
49
+ - Border-left: 4px solid #FF9800
50
+ - Header: "Top Blockers" with warning icon
51
+ - 3-5 actionable bullets
52
+ - Each with estimated point-improvement in parentheses
53
+
54
+ ### 7. Dimension Details — One section per dimension
55
+ Each section contains:
56
+ - Section heading with dimension name and score badge (colored pill)
57
+ - Findings table with columns: Finding | Impact | Source | Recommendation
58
+ - Row colors: Green background tint for bonus findings, Red tint for deductions, Gray for neutral
59
+ - Impact column shows "+N" or "-N" with color
60
+
61
+ ### 8. External Document Analysis (only if PRD/BRD found)
62
+ - Document source and access status
63
+ - Coverage summary
64
+ - Gaps identified
65
+ - Note if document was attached before or after development started
66
+
67
+ ### 9. Child Issue Assessment Table
68
+ - Columns: ID (linked if possible), Title, Status, Has AC?, AC Quality, Notes
69
+ - AC Quality badges: Good (green pill), Weak (yellow pill), None (red pill)
70
+ - Summary row at bottom: "X of Y issues have testable acceptance criteria"
71
+ - Sortable appearance (styled headers)
72
+
73
+ ### 10. Footer
74
+ - Light gray background
75
+ - Text: `branding.footer_text` or default: "Spec Readiness Assessment · Generated from {tracker} data · {Project}"
76
+ - Sub-text: "Methodology: 5-dimension scoring model"
77
+ - Assessment date
78
+
79
+ ## HTML Format Guidelines
80
+
81
+ Use when generating `.html` output (default):
82
+
83
+ - Self-contained single HTML file (inline CSS, no external dependencies)
84
+ - Use `<style>` block in `<head>` for main styles
85
+ - Print-friendly: `@media print` rules for clean printing
86
+ - Table cells: `padding: 8px 12px; border-bottom: 1px solid #E0E0E0`
87
+ - Font stack: `-apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif`
88
+ - Max width: 1000px, centered with `margin: 0 auto`
89
+ - Score badge CSS: `display: inline-block; padding: 4px 12px; border-radius: 12px; font-weight: 600`
90
+ - `open` command opens in browser
91
+
92
+ ## EML Format Guidelines
93
+
94
+ Use when generating `.eml` output (`--eml` flag):
95
+
96
+ ### MIME Headers
97
+
98
+ The file must start with RFC 2822 headers, then a blank line, then the HTML body:
99
+
100
+ ```
101
+ MIME-Version: 1.0
102
+ Content-Type: text/html; charset="UTF-8"
103
+ Subject: Spec Readiness: {Identifier} — {Title} ({Score}/100 {StatusLabel})
104
+ From: noreply@example.com
105
+ To: noreply@example.com
106
+ Date: {RFC 2822 date, e.g. Thu, 27 Feb 2026 12:00:00 -0600}
107
+
108
+ <!DOCTYPE html>
109
+ <html>...
110
+ ```
111
+
112
+ The `From` and `To` addresses are placeholders — the wrapper `config.yaml` can override them via `branding.eml_from` and `branding.eml_to`. Without a wrapper, use `noreply@example.com` as defaults. The user opens the `.eml` in their mail client and edits recipients before sending.
113
+
114
+ ### Email-Safe HTML Constraints
115
+
116
+ - **All inline CSS** — no `<style>` blocks (many email clients strip them)
117
+ - **Table-based layout** using `<table role="presentation">` — no flexbox/grid
118
+ - **Max width: 560px** — email-friendly width
119
+ - **No external resources** — no linked stylesheets, fonts, or images
120
+ - Font stack: `Arial, 'Helvetica Neue', Helvetica, sans-serif`
121
+ - Use `&nbsp;` in empty cells, `&middot;` for separators
122
+ - `border-collapse: separate` on data tables for border-radius support
123
+
124
+ ### Status Badge Inline Styles
125
+
126
+ ```
127
+ Completed: background-color:#E6F7F5; color:#00897B; padding:2px 8px; border-radius:10px; font-size:11px;
128
+ In-Progress: background-color:#DBEAFE; color:#1e40af; padding:2px 8px; border-radius:10px; font-size:11px;
129
+ Backlog: background-color:#F5F5F5; color:#374151; border:1px solid #CFD1D1; padding:2px 8px; border-radius:10px; font-size:11px;
130
+ Blocked: background-color:#FEE2E2; color:#DC2626; padding:2px 8px; border-radius:10px; font-size:11px;
131
+ ```
132
+
133
+ ### Score Badge Inline Styles
134
+
135
+ ```
136
+ Red (0-39): background-color:#FDECEA; color:#DC2626; padding:4px 12px; border-radius:12px; font-weight:600;
137
+ Yellow (40-69): background-color:#FFF8E1; color:#C2410C; padding:4px 12px; border-radius:12px; font-weight:600;
138
+ Green (70-100): background-color:#E6F7F5; color:#00897B; padding:4px 12px; border-radius:12px; font-weight:600;
139
+ ```
140
+
141
+ ### EML Branding Colors (defaults, overridable via wrapper)
142
+
143
+ - Primary Dark: `branding.primary_color` or `#1e293b` (headers, primary text)
144
+ - Body Text: `#374151` (labels, secondary text)
145
+ - Accent: `branding.stripe_color` or `#2563eb` (brand stripe, accents)
146
+ - Alert Red: `#DC2626`, Amber: `#D97706`, Green: `#00897B`
147
+
148
+ ### Opening
149
+
150
+ `open` command opens `.eml` in the default mail client (Thunderbird, Mail.app, Outlook).
151
+
152
+ ## JSON Sidecar
153
+
154
+ Always generated regardless of report format. Schema is defined in SCORING-REFERENCE.md.
155
+ Files save to the same output directory with naming convention:
156
+ - HTML: `spec-readiness-{identifier}.html`
157
+ - EML: `spec-readiness-{identifier}.eml`
158
+ - JSON: `spec-readiness-{identifier}.json`
@@ -0,0 +1,369 @@
1
+ # Spec Readiness — Scoring Reference
2
+
3
+ This file contains detailed scoring criteria, deduction examples, and calibration data.
4
+ Subagents read this file to score individual dimensions.
5
+
6
+ ## Scoring Baseline
7
+
8
+ For each dimension, start at **15** (baseline "decent" score), apply deductions for negative findings and bonuses for positive findings, then clamp to 0-20.
9
+
10
+ Record each finding with:
11
+ - **Finding text** — what was observed
12
+ - **Impact** — points added or deducted
13
+ - **Source** — traceability (e.g., "Issue description", "Comment by @dev on Jan 15", "Child issue #42 created 4 weeks after initial batch", "Edit #17 on Oct 15")
14
+ - **Recommendation** — what to do about it (if deduction)
15
+
16
+ ---
17
+
18
+ ## Dimension 1: Requirements Clarity (0-20)
19
+
20
+ *"Is the spec complete and unambiguous?"*
21
+
22
+ ### Scoring Guide
23
+
24
+ | Points | Criteria |
25
+ |--------|----------|
26
+ | 16-20 | Description reads as a locked spec. No open questions. ON/OFF/edge behaviors documented. Validation rules explicit. No conflicting text. |
27
+ | 11-15 | Most requirements documented. Minor open questions remain. Description is mostly stable. |
28
+ | 6-10 | Multiple open questions, TBD markers, or "needs confirmation" items. Description reads like meeting notes. High edit churn. |
29
+ | 0-5 | Description is a placeholder or contains conflicting information. Major behaviors undefined. |
30
+
31
+ ### What to Check
32
+
33
+ 1. **Open question markers** in description and notes:
34
+ - Literal "?" in requirement statements (not just punctuation)
35
+ - "TBD", "TODO", "needs confirmation", "open question", "to be determined"
36
+ - "needs discussion", "to be decided", "pending", "awaiting"
37
+ - Strikethrough text (HTML `<s>`, `<strike>`, `<del>`, `text-decoration:line-through`, markdown `~~`)
38
+ - Highlighting (yellow/green background — inline Q&A, not a finished spec)
39
+ - Square brackets: `[?]`, `[TBD]`, `[OPEN]`
40
+
41
+ 2. **Description edit history** (if available from tracker):
42
+ - < 10 edits: Normal (neutral)
43
+ - 10-30 edits: Moderate churn (minor deduction)
44
+ - 30+ edits: High churn — spec is a living document, not locked (major deduction)
45
+ - **Late edits** (after first child issue moves to In-Progress): Each late description change = deduction. Requirements changing during development is the #1 risk signal.
46
+
47
+ 3. **External documents**:
48
+ - Scan description for links to PRD, BRD, spec documents (Google Docs, Confluence, SharePoint, Notion, attached files)
49
+ - Check issue attachments for documents
50
+ - If found: bonus points. If attached AFTER development started (check edit history for when attachment was added vs. when first child moved to In-Progress): reduced bonus.
51
+ - If a URL is accessible, use WebFetch to analyze coverage and cross-reference with description for gaps. Note: WebFetch may fail on authenticated URLs.
52
+
53
+ 4. **Discussion threads / comments** — scan for requirement questions asked after work began:
54
+ - "Where can I find...", "How should we handle...", "What happens when..."
55
+ - Questions from developers (not product) indicate requirements gaps
56
+ - Each requirement question during active development = deduction
57
+
58
+ 5. **Child issue descriptions** — spot-check for acceptance criteria:
59
+ - Issues with only a title and no description: deduction
60
+ - Issues with description but no testable acceptance criteria: minor deduction
61
+ - "Given/When/Then" or explicit acceptance criteria: bonus
62
+
63
+ ### Deduction Examples
64
+ - Each unresolved open question: -2 pts
65
+ - Description has 30+ edits: -3 pts
66
+ - Description changed after dev started: -2 pts per significant change
67
+ - No PRD/BRD attached (if customer-directed): -2 pts
68
+ - PRD attached after dev started: -1 pt
69
+ - >50% of child issues lack acceptance criteria: -3 pts
70
+
71
+ ---
72
+
73
+ ## Dimension 2: Technical Discovery (0-20)
74
+
75
+ *"Have the technical unknowns been investigated?"*
76
+
77
+ ### Scoring Guide
78
+
79
+ | Points | Criteria |
80
+ |--------|----------|
81
+ | 16-20 | Spike/investigation completed and accepted BEFORE implementation started. Repos, DB schema, APIs all identified. Team familiarity acknowledged with buffer. |
82
+ | 11-15 | Investigation exists but ran alongside implementation. Some technical details identified. |
83
+ | 6-10 | No investigation, or investigation is incomplete. Minimal technical discovery. |
84
+ | 0-5 | No technical discovery at all. Estimate is a business number. Team unfamiliarity not acknowledged. |
85
+
86
+ ### What to Check
87
+
88
+ 1. **Spike / investigation issues** among children:
89
+ - Look for issues with "SPIKE", "spike", "Spike", "investigation", "analysis", "discovery", "POC", "prototype" in the name (plus any custom patterns from wrapper config `conventions.spike_patterns`)
90
+ - Check if spike is Accepted/Completed BEFORE the earliest implementation issue moves to In-Progress
91
+ - Spike completed after implementation started = not gating, deduction
92
+ - No spike at all = major deduction
93
+ - Multiple spikes (insufficient first attempt, had to redo) = deduction for spike quality
94
+
95
+ 2. **Technical detail in description**:
96
+ - Mentions of specific repositories, file paths, class names: bonus
97
+ - Mentions of specific database tables or schema: bonus
98
+ - Mentions of specific API endpoints or services: bonus
99
+ - Generic/vague references ("update the system"): no bonus
100
+
101
+ 3. **Team familiarity signals** in description/notes:
102
+ - "Team is learning", "new codebase", "old system", "legacy": risk acknowledged (neutral if buffer applied, deduction if no buffer)
103
+ - No mention when the work touches unfamiliar code: deduction
104
+
105
+ 4. **Estimation**:
106
+ - Estimate field populated: neutral (estimate exists)
107
+ - No estimate: deduction
108
+ - Estimate set before investigation completed: deduction (estimate preceded discovery)
109
+
110
+ 5. **Infrastructure issues** among children:
111
+ - Look for issues mentioning "database", "schema", "migration", "table", "infrastructure", "permissions", "settings"
112
+ - Sequenced early (low issue number, early sprint)? Bonus.
113
+ - Added late (created weeks after initial batch)? Deduction — infrastructure was an afterthought.
114
+
115
+ ### Deduction Examples
116
+ - No spike/investigation issue: -8 pts
117
+ - Spike not accepted before implementation started: -4 pts
118
+ - Investigation had to be redone (insufficient first attempt): -3 pts
119
+ - No specific repos/tables/endpoints in description: -3 pts
120
+ - No estimate: -2 pts
121
+ - Infrastructure issues added late: -3 pts
122
+ - Team familiarity risk not acknowledged: -2 pts
123
+
124
+ ---
125
+
126
+ ## Dimension 3: Scope & Decomposition (0-20)
127
+
128
+ *"Is the issue right-sized with clear boundaries?"*
129
+
130
+ ### Scoring Guide
131
+
132
+ | Points | Criteria |
133
+ |--------|----------|
134
+ | 16-20 | Explicit "in scope" / "out of scope" statements. Issue is a single coherent deliverable. Child count reasonable. No overflow markers. |
135
+ | 11-15 | Scope mostly clear. "Out of scope" not explicitly stated. Slightly large but manageable. |
136
+ | 6-10 | Overloaded (multiple work streams). Notes suggest splitting but it wasn't done. |
137
+ | 0-5 | No scope definition. Issue is a catch-all. Already has overflow markers. |
138
+
139
+ ### What to Check
140
+
141
+ 1. **Issue name overflow markers** (configurable via wrapper `conventions.overflow_markers`):
142
+ - Default patterns: `[Unfinished]`, `[Continued]`, `[Carry-over]`, `[Part N]`
143
+ - These mean the issue was NOT right-sized from the start. Major deduction.
144
+
145
+ 2. **Child issue count vs. timeline**:
146
+ - Determine the planned duration (milestone/cycle dates, or sprint count)
147
+ - Calculate child issues per sprint
148
+ - > 8 issues per sprint: Red flag (overloaded)
149
+ - > 20 total children for a single cycle: Deduction
150
+ - > 30 total children: Major deduction
151
+
152
+ 3. **Decomposition signals** in description/notes:
153
+ - "Break this up", "split into", "multiple issues/features": If this advice exists but wasn't followed = major deduction
154
+ - "Phase 1", "Phase 2": Acknowledged phasing is positive
155
+ - "Out of scope", "not included", "explicitly excluded": Bonus for scope clarity
156
+
157
+ 4. **Child issue carryover**:
158
+ - Count children with overflow markers in their names
159
+ - Each carried-over issue = evidence of underestimation
160
+ - Carryover rate > 30%: Major deduction
161
+
162
+ 5. **Scope creep signals** (from edit history or child creation dates):
163
+ - New requirements added after initial description (look for "New Requirement", "added requirement", "additional scope" in edit descriptions)
164
+ - Child issues created significantly later than the initial batch (check CreationDate spread)
165
+ - Late-created children suggest scope was discovered, not planned
166
+
167
+ ### Deduction Examples
168
+ - Overflow markers in issue name: -6 pts
169
+ - >20 children for a single cycle: -3 pts
170
+ - Notes say "break into N issues" but it wasn't done: -5 pts
171
+ - No "out of scope" statement: -2 pts
172
+ - >30% child carryover rate: -4 pts
173
+ - Scope added after initial description: -2 pts per addition
174
+
175
+ ---
176
+
177
+ ## Dimension 4: Dependencies & Prerequisites (0-20)
178
+
179
+ *"Is the critical path mapped?"*
180
+
181
+ ### Scoring Guide
182
+
183
+ | Points | Criteria |
184
+ |--------|----------|
185
+ | 16-20 | Issue dependencies tracked in the tracker. External dependencies identified. Prerequisites all accounted for before dev starts. |
186
+ | 11-15 | Key dependencies documented informally (in descriptions). Most prerequisites identified. |
187
+ | 6-10 | Dependencies exist but aren't tracked. Some prerequisites discovered after work started. |
188
+ | 0-5 | No dependency mapping. Issues treated as independent backlog. Prerequisites discovered during sprints. |
189
+
190
+ ### What to Check
191
+
192
+ 1. **Formal dependency links** (blocking/blocked-by relations in tracker):
193
+ - Check each child issue for dependency links (predecessors/successors, blocking/blocked-by relations)
194
+ - Any formal links: Bonus
195
+ - Zero links across all children: Major deduction
196
+
197
+ 2. **Implicit dependency signals** in descriptions:
198
+ - "Depends on", "requires", "blocked by", "after X is done", "prerequisite"
199
+ - These indicate dependencies exist but aren't formally tracked
200
+
201
+ 3. **Questions about prerequisites** in comments/discussions:
202
+ - "Where can I find the [X] ID?" — External ID prerequisite not identified
203
+ - "Does [object] have an external identifier?" — Data prerequisite missed
204
+ - "Which repo is this in?" — Code location not identified
205
+ - Each such question during active development = deduction
206
+
207
+ 4. **Foundation issue sequencing**:
208
+ - Are DB/settings/permissions issues early in the sequence?
209
+ - Or were they added after other issues started? (Check creation dates)
210
+ - Foundation work added late = infrastructure prerequisites missed
211
+
212
+ 5. **External integration points**:
213
+ - Does the description mention other systems, APIs, teams, or repos?
214
+ - Are those reflected in child issues?
215
+ - Missing integration issues = deduction
216
+
217
+ ### Deduction Examples
218
+ - Zero formal dependency links across all children: -6 pts
219
+ - Prerequisite discovered during sprint (evidence in comments): -3 pts each
220
+ - Foundation issues added after initial batch: -3 pts
221
+ - External integration mentioned but no corresponding issue: -3 pts
222
+ - No external dependencies documented (when issue clearly has them): -4 pts
223
+
224
+ ---
225
+
226
+ ## Dimension 5: Edge Cases & Test Strategy (0-20)
227
+
228
+ *"Are failure modes documented?"*
229
+
230
+ ### Scoring Guide
231
+
232
+ | Points | Criteria |
233
+ |--------|----------|
234
+ | 16-20 | Error paths documented (missing config, null values, partial setup). Test scenarios enumerated. QA strategy defined. Acceptance criteria are testable. |
235
+ | 11-15 | Happy path well-defined. Some edge cases mentioned. Test issue exists. |
236
+ | 6-10 | Only happy path documented. No "what if" scenarios. Testing is an afterthought. |
237
+ | 0-5 | No edge case discussion. No test strategy. Assumptions stated without validation. |
238
+
239
+ ### What to Check
240
+
241
+ 1. **Edge case language** in description/notes:
242
+ - "What if", "when missing", "if not configured", "null", "empty", "partial"
243
+ - "Error handling", "fallback", "default behavior"
244
+ - These indicate failure modes were considered
245
+
246
+ 2. **Assumptions without test coverage**:
247
+ - Look for "Assumption:", "We assume", "Expected:", "Must have"
248
+ - For each assumption, check if a corresponding test case or edge case issue exists
249
+ - Unvalidated assumptions = deduction
250
+
251
+ 3. **Test issues** among children:
252
+ - Look for issues with "QA", "test", "automation", "validate", "verify" in name
253
+ - Created early (with implementation) = good. Created months later = afterthought.
254
+ - Test issue with no work done late in the cycle = deduction
255
+
256
+ 4. **Bugs already filed**:
257
+ - Search for bugs linked to this issue's children
258
+ - Bugs filed during development = edge cases discovered in execution (not design)
259
+ - Bugs of the form "what happens when X is missing/null/not configured" = major deduction
260
+
261
+ 5. **Acceptance criteria quality** on child issues:
262
+ - Check if descriptions contain testable criteria
263
+ - Criteria should be measurable: "When X, then Y" not "it should work correctly"
264
+ - Rate: "good" (testable criteria), "weak" (vague criteria), "none" (no criteria)
265
+ - >50% "none": Major deduction
266
+ - >50% "good": Bonus
267
+
268
+ ### Deduction Examples
269
+ - No edge case language in description: -4 pts
270
+ - Assumptions stated without validation plan: -2 pts each
271
+ - No test issues: -4 pts
272
+ - Test issue added as afterthought (late creation, no work done): -2 pts
273
+ - Bugs filed for "missing config" scenarios during dev: -3 pts
274
+ - >50% of children lack acceptance criteria: -4 pts
275
+
276
+ ---
277
+
278
+ ## JSON Sidecar Schema
279
+
280
+ The JSON sidecar must follow this structure:
281
+
282
+ ```json
283
+ {
284
+ "identifier": "MIN-704",
285
+ "title": "Add Code Mode API discovery tools to MCP server",
286
+ "project": "Mind Your Now",
287
+ "milestone": "Q1 CY26",
288
+ "owner": "Edward Becker",
289
+ "score": 74,
290
+ "maxScore": 100,
291
+ "status": "Mostly Ready",
292
+ "statusColor": "green",
293
+ "assessmentDate": "2026-02-27",
294
+ "tracker": "linear",
295
+ "childIssueCount": 5,
296
+ "dimensions": {
297
+ "requirementsClarity": {
298
+ "score": 17,
299
+ "maxScore": 20,
300
+ "summary": "One-line summary of findings",
301
+ "findings": [
302
+ {
303
+ "finding": "Description of what was observed",
304
+ "impact": -3,
305
+ "source": "Traceability reference",
306
+ "recommendation": "What to do about it"
307
+ }
308
+ ]
309
+ },
310
+ "technicalDiscovery": { "score": 0, "maxScore": 20, "summary": "", "findings": [] },
311
+ "scopeDecomposition": { "score": 0, "maxScore": 20, "summary": "", "findings": [] },
312
+ "dependencies": { "score": 0, "maxScore": 20, "summary": "", "findings": [] },
313
+ "edgeCasesTestStrategy": { "score": 0, "maxScore": 20, "summary": "", "findings": [] }
314
+ },
315
+ "topBlockers": [
316
+ "Actionable blocker 1 with point-improvement estimate",
317
+ "Actionable blocker 2"
318
+ ],
319
+ "childAssessments": [
320
+ {
321
+ "identifier": "MIN-705",
322
+ "title": "Child Issue Title",
323
+ "status": "Backlog",
324
+ "hasAcceptanceCriteria": true,
325
+ "criteriaQuality": "good",
326
+ "notes": ""
327
+ }
328
+ ],
329
+ "externalDocuments": {
330
+ "found": false,
331
+ "type": null,
332
+ "attachedDate": null,
333
+ "devStartDate": null,
334
+ "attachedBeforeDevStarted": null,
335
+ "accessible": null,
336
+ "notes": ""
337
+ },
338
+ "wrapper": null,
339
+ "metadata": {
340
+ "skillVersion": "2.0.0",
341
+ "generatedBy": "spec-readiness",
342
+ "methodology": "5-dimension scoring model"
343
+ }
344
+ }
345
+ ```
346
+
347
+ ---
348
+
349
+ ## Scoring Calibration Reference
350
+
351
+ The scoring model baseline was derived from post-mortem analysis of two significantly overrun features. Both traced their overruns to incomplete requirements at development start.
352
+
353
+ ### Case 1 — Expected Score: ~42 (Partial / Risky)
354
+ - Quoted 20 days, actual 41.5+ days (+107% overrun)
355
+ - Requirements Clarity: ~7/20 (58+ description edits, open questions, BRD attached 4 months late, requirements changed during dev)
356
+ - Technical Discovery: ~8/20 (3 spikes but none gating, key repo not identified until week 5)
357
+ - Scope & Decomposition: ~12/20 (13 children reasonable, but "New Requirement" added mid-dev)
358
+ - Dependencies: ~5/20 (zero dependency links, investigation spike sat idle 3 months)
359
+ - Edge Cases: ~10/20 (QA issue exists but not started, validation rules debated during dev)
360
+
361
+ ### Case 2 — Expected Score: ~35 (Not Ready)
362
+ - Planned 1 quarter, took 3 quarters (2.3x overrun)
363
+ - Requirements Clarity: ~8/20 (external ID prerequisite discovered after start)
364
+ - Technical Discovery: ~6/20 (team learning curve acknowledged but no buffer, no gating investigation)
365
+ - Scope & Decomposition: ~4/20 (notes said "split into 4 features" but it wasn't done, overflow to next quarter)
366
+ - Dependencies: ~7/20 (some implicit sequencing but no formal links)
367
+ - Edge Cases: ~10/20 (4 bugs for "missing config" scenarios, assumption that every record has a required field)
368
+
369
+ If your organization has its own post-mortems, use them to validate and adjust the deduction weights. The scoring guide point ranges and deduction examples are tunable through experience.