nightytidy 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +314 -0
  3. package/bin/nightytidy.js +3 -0
  4. package/package.json +55 -0
  5. package/src/checks.js +367 -0
  6. package/src/claude.js +655 -0
  7. package/src/cli.js +1012 -0
  8. package/src/consolidation.js +81 -0
  9. package/src/dashboard-html.js +496 -0
  10. package/src/dashboard-standalone.js +167 -0
  11. package/src/dashboard-tui.js +208 -0
  12. package/src/dashboard.js +427 -0
  13. package/src/env.js +100 -0
  14. package/src/executor.js +550 -0
  15. package/src/git.js +348 -0
  16. package/src/lock.js +186 -0
  17. package/src/logger.js +111 -0
  18. package/src/notifications.js +33 -0
  19. package/src/orchestrator.js +919 -0
  20. package/src/prompts/loader.js +55 -0
  21. package/src/prompts/manifest.json +138 -0
  22. package/src/prompts/specials/changelog.md +28 -0
  23. package/src/prompts/specials/consolidation.md +61 -0
  24. package/src/prompts/specials/doc-update.md +1 -0
  25. package/src/prompts/specials/report.md +95 -0
  26. package/src/prompts/steps/01-documentation.md +173 -0
  27. package/src/prompts/steps/02-test-coverage.md +181 -0
  28. package/src/prompts/steps/03-test-hardening.md +181 -0
  29. package/src/prompts/steps/04-test-architecture.md +130 -0
  30. package/src/prompts/steps/05-test-consolidation.md +165 -0
  31. package/src/prompts/steps/06-test-quality.md +211 -0
  32. package/src/prompts/steps/07-api-design.md +165 -0
  33. package/src/prompts/steps/08-security-sweep.md +207 -0
  34. package/src/prompts/steps/09-dependency-health.md +217 -0
  35. package/src/prompts/steps/10-codebase-cleanup.md +189 -0
  36. package/src/prompts/steps/11-crosscutting-concerns.md +196 -0
  37. package/src/prompts/steps/12-file-decomposition.md +263 -0
  38. package/src/prompts/steps/13-code-elegance.md +329 -0
  39. package/src/prompts/steps/14-architectural-complexity.md +297 -0
  40. package/src/prompts/steps/15-type-safety.md +192 -0
  41. package/src/prompts/steps/16-logging-error-message.md +173 -0
  42. package/src/prompts/steps/17-data-integrity.md +139 -0
  43. package/src/prompts/steps/18-performance.md +183 -0
  44. package/src/prompts/steps/19-cost-resource-optimization.md +136 -0
  45. package/src/prompts/steps/20-error-recovery.md +145 -0
  46. package/src/prompts/steps/21-race-condition-audit.md +178 -0
  47. package/src/prompts/steps/22-bug-hunt.md +229 -0
  48. package/src/prompts/steps/23-frontend-quality.md +210 -0
  49. package/src/prompts/steps/24-uiux-audit.md +284 -0
  50. package/src/prompts/steps/25-state-management.md +170 -0
  51. package/src/prompts/steps/26-perceived-performance.md +190 -0
  52. package/src/prompts/steps/27-devops.md +165 -0
  53. package/src/prompts/steps/28-scheduled-job-chron-jobs.md +141 -0
  54. package/src/prompts/steps/29-observability.md +152 -0
  55. package/src/prompts/steps/30-backup-check.md +155 -0
  56. package/src/prompts/steps/31-product-polish-ux-friction.md +122 -0
  57. package/src/prompts/steps/32-feature-discovery-opportunity.md +128 -0
  58. package/src/prompts/steps/33-strategic-opportunities.md +217 -0
  59. package/src/report.js +540 -0
  60. package/src/setup.js +133 -0
  61. package/src/sync.js +536 -0
@@ -0,0 +1,55 @@
1
+ /**
2
+ * Loads improvement prompts from individual markdown files.
3
+ *
4
+ * Reads manifest.json for step ordering and display names,
5
+ * then loads each prompt's content from steps/*.md files.
6
+ * Exports the same interface as the old steps.js: STEPS array
7
+ * of { number, name, prompt } plus DOC_UPDATE_PROMPT, CHANGELOG_PROMPT, and CONSOLIDATION_PROMPT.
8
+ *
9
+ * Uses `export let` for ESM live bindings — reloadSteps() can
10
+ * reassign these after a sync, and all importers see updated values.
11
+ */
12
+
13
+ import { readFileSync } from 'fs';
14
+ import { fileURLToPath } from 'url';
15
+ import path from 'path';
16
+
17
+ const __dirname = path.dirname(fileURLToPath(import.meta.url));
18
+
19
+ function loadFile(...segments) {
20
+ return readFileSync(path.join(__dirname, ...segments), 'utf8');
21
+ }
22
+
23
+ function loadAllSteps() {
24
+ const m = JSON.parse(loadFile('manifest.json'));
25
+ return m.steps.map((entry, index) => ({
26
+ number: index + 1,
27
+ name: entry.name,
28
+ prompt: loadFile('steps', `${entry.id}.md`),
29
+ }));
30
+ }
31
+
32
+ export let STEPS = loadAllSteps();
33
+
34
+ export let DOC_UPDATE_PROMPT = loadFile('specials', 'doc-update.md');
35
+
36
+ export let CHANGELOG_PROMPT = loadFile('specials', 'changelog.md');
37
+
38
+ export let CONSOLIDATION_PROMPT = loadFile('specials', 'consolidation.md');
39
+
40
+ export let REPORT_PROMPT = loadFile('specials', 'report.md');
41
+
42
+ /**
43
+ * Re-read manifest and all prompt files from disk.
44
+ * Uses ESM live bindings — all importers see the updated values
45
+ * through their existing binding references.
46
+ *
47
+ * Call this after syncPrompts() writes new files to disk.
48
+ */
49
+ export function reloadSteps() {
50
+ STEPS = loadAllSteps();
51
+ DOC_UPDATE_PROMPT = loadFile('specials', 'doc-update.md');
52
+ CHANGELOG_PROMPT = loadFile('specials', 'changelog.md');
53
+ CONSOLIDATION_PROMPT = loadFile('specials', 'consolidation.md');
54
+ REPORT_PROMPT = loadFile('specials', 'report.md');
55
+ }
@@ -0,0 +1,138 @@
1
+ {
2
+ "version": 1,
3
+ "sourceUrl": "https://docs.google.com/document/d/e/2PACX-1vRtQJyud1t-ESLJqKXTdBGTzFnkxFvZRKJ8_MrOjSGn4fmBluXWVTvJZFIxgSefVag8MoAW8bd0-A6K/pub",
4
+ "steps": [
5
+ {
6
+ "id": "01-documentation",
7
+ "name": "Documentation"
8
+ },
9
+ {
10
+ "id": "02-test-coverage",
11
+ "name": "Test Coverage"
12
+ },
13
+ {
14
+ "id": "03-test-hardening",
15
+ "name": "Test Hardening"
16
+ },
17
+ {
18
+ "id": "04-test-architecture",
19
+ "name": "Test Architecture"
20
+ },
21
+ {
22
+ "id": "05-test-consolidation",
23
+ "name": "Test Consolidation"
24
+ },
25
+ {
26
+ "id": "06-test-quality",
27
+ "name": "Test Quality"
28
+ },
29
+ {
30
+ "id": "07-api-design",
31
+ "name": "API Design"
32
+ },
33
+ {
34
+ "id": "08-security-sweep",
35
+ "name": "Security Sweep"
36
+ },
37
+ {
38
+ "id": "09-dependency-health",
39
+ "name": "Dependency Health"
40
+ },
41
+ {
42
+ "id": "10-codebase-cleanup",
43
+ "name": "Codebase Cleanup"
44
+ },
45
+ {
46
+ "id": "11-crosscutting-concerns",
47
+ "name": "Cross-Cutting Concerns"
48
+ },
49
+ {
50
+ "id": "12-file-decomposition",
51
+ "name": "File Decomposition"
52
+ },
53
+ {
54
+ "id": "13-code-elegance",
55
+ "name": "Code Elegance"
56
+ },
57
+ {
58
+ "id": "14-architectural-complexity",
59
+ "name": "Architectural Complexity"
60
+ },
61
+ {
62
+ "id": "15-type-safety",
63
+ "name": "Type Safety"
64
+ },
65
+ {
66
+ "id": "16-logging-error-message",
67
+ "name": "Logging & Error Message"
68
+ },
69
+ {
70
+ "id": "17-data-integrity",
71
+ "name": "Data Integrity"
72
+ },
73
+ {
74
+ "id": "18-performance",
75
+ "name": "Performance"
76
+ },
77
+ {
78
+ "id": "19-cost-resource-optimization",
79
+ "name": "Cost & Resource Optimization"
80
+ },
81
+ {
82
+ "id": "20-error-recovery",
83
+ "name": "Error Recovery"
84
+ },
85
+ {
86
+ "id": "21-race-condition-audit",
87
+ "name": "Race Condition Audit"
88
+ },
89
+ {
90
+ "id": "22-bug-hunt",
91
+ "name": "Bug Hunt"
92
+ },
93
+ {
94
+ "id": "23-frontend-quality",
95
+ "name": "Frontend Quality"
96
+ },
97
+ {
98
+ "id": "24-uiux-audit",
99
+ "name": "UI/UX Audit"
100
+ },
101
+ {
102
+ "id": "25-state-management",
103
+ "name": "State Management"
104
+ },
105
+ {
106
+ "id": "26-perceived-performance",
107
+ "name": "Perceived Performance"
108
+ },
109
+ {
110
+ "id": "27-devops",
111
+ "name": "DevOps"
112
+ },
113
+ {
114
+ "id": "28-scheduled-job-chron-jobs",
115
+ "name": "Scheduled Job & Chron Jobs"
116
+ },
117
+ {
118
+ "id": "29-observability",
119
+ "name": "Observability"
120
+ },
121
+ {
122
+ "id": "30-backup-check",
123
+ "name": "Backup Check"
124
+ },
125
+ {
126
+ "id": "31-product-polish-ux-friction",
127
+ "name": "Product Polish & UX Friction"
128
+ },
129
+ {
130
+ "id": "32-feature-discovery-opportunity",
131
+ "name": "Feature Discovery & Opportunity"
132
+ },
133
+ {
134
+ "id": "33-strategic-opportunities",
135
+ "name": "Strategic Opportunities"
136
+ }
137
+ ]
138
+ }
@@ -0,0 +1,28 @@
1
+ You just finished an overnight codebase improvement run. Your job now is to write a plain-English summary of everything that changed — written for someone who is NOT a developer.
2
+
3
+ Review the full git log and diffs for this run (all commits on this branch). Then write a summary that:
4
+
5
+ 1. Uses first person ("I") as if you personally worked on the codebase overnight
6
+ 2. Uses zero jargon — explain everything in terms a non-technical person would understand
7
+ 3. References SPECIFIC numbers from the actual changes (e.g., "I added 47 tests" not "I improved test coverage"; "I removed 1,200 lines of code that weren't being used" not "I cleaned up dead code")
8
+ 4. Groups related changes into short, friendly paragraphs — don't use bullet points or headers
9
+ 5. Leads with the most impressive or valuable changes first
10
+ 6. Keeps the tone warm and slightly proud of the work done — like a helpful colleague leaving a note about what they accomplished overnight
11
+ 7. Ends with a brief honest note about anything that didn't go as planned (steps that failed or were skipped), framed constructively
12
+ 8. Is no longer than 400 words — concise and scannable
13
+
14
+ DO NOT use any of these words: refactor, lint, dependency, CI/CD, middleware, endpoint, schema, migration, module, pipeline, coverage metrics, regression, assertion, deprecation.
15
+
16
+ Instead of technical terms, describe what the change DOES for the person: "I made sure your login page can't be tricked into running malicious code" instead of "I fixed an XSS vulnerability in the auth middleware."
17
+
18
+ The summary should make a non-technical person feel genuinely excited about the improvements and confident that their codebase is in better shape — without needing to understand a single technical concept.
19
+
20
+ Output ONLY the summary text. No headers, no markdown formatting, no preamble.
21
+
22
+ DO NOT start your response with any of these patterns:
23
+ - "I understand" / "I'm ready" / "I'll help" / "Sure" / "Certainly"
24
+ - "Here is" / "Here's" / "Based on" / "Let me"
25
+ - "Of course" / "Absolutely" / "Great"
26
+ - Any acknowledgment of these instructions
27
+
28
+ Begin your response with the very first word of your actual summary. Your response will be embedded directly into a document — any conversational preamble will be visible to the reader and look broken.
@@ -0,0 +1,61 @@
1
+ You just completed a multi-step automated codebase improvement run. Below are the outputs from each step — what was analyzed, changed, and recommended.
2
+
3
+ Your task is to produce a **consolidated, prioritized action plan** of recommendations that still need to be done.
4
+
5
+ ## Instructions
6
+
7
+ 1. Review each step's output to extract actionable recommendations, suggestions, and identified issues.
8
+ 2. **Check the current codebase** — read the relevant files to determine which recommendations have ALREADY been implemented by previous steps in this run.
9
+ 3. **Deduplicate** — if multiple steps flagged the same issue, consolidate into one recommendation.
10
+ 4. **Tier** the remaining (not-yet-implemented) items by importance.
11
+ 5. Output the action plan in the exact format below.
12
+
13
+ ## Output Format
14
+
15
+ ```markdown
16
+ # NightyTidy Action Plan
17
+
18
+ > Generated from a {N}-step improvement run. Items below have been verified as **not yet implemented** in the current codebase.
19
+
20
+ ## Critical
21
+
22
+ <!-- Security vulnerabilities, data loss risks, breaking bugs, blocking issues -->
23
+
24
+ ### [Short, specific title]
25
+ - **What**: [Concrete action — reference specific files, functions, or patterns]
26
+ - **Value**: [Why this matters — plain language, one sentence]
27
+ - **Impact**: [Which files/modules/areas are affected]
28
+ - **Risk**: [Low / Medium / High — risk of implementing this change, and why]
29
+
30
+ ## High
31
+
32
+ <!-- Reliability, performance, error handling, significant code quality gaps -->
33
+
34
+ (same item format)
35
+
36
+ ## Medium
37
+
38
+ <!-- Maintainability, test coverage gaps, refactoring opportunities, minor UX issues -->
39
+
40
+ (same item format)
41
+
42
+ ## Low
43
+
44
+ <!-- Polish, style improvements, nice-to-haves, minor optimizations -->
45
+
46
+ (same item format)
47
+
48
+ ## Summary
49
+
50
+ [One sentence on overall codebase health. One sentence on the single highest-value next action.]
51
+ ```
52
+
53
+ ## Rules
54
+
55
+ - Do NOT include anything already implemented in the codebase — verify by reading files.
56
+ - Do NOT include vague advice like "add more tests" — be specific about WHAT to test and WHERE.
57
+ - Each recommendation MUST reference specific files, functions, or code patterns.
58
+ - Deduplicate ruthlessly — one item per distinct issue, even if multiple steps found it.
59
+ - Maximum **5 items per tier** (20 items total). Prioritize ruthlessly.
60
+ - If a tier has zero items, include the heading with a note: *No items at this priority level.*
61
+ - Output ONLY the markdown document. No preamble, no commentary, no code fences wrapping the whole document.
@@ -0,0 +1 @@
1
+ Please update any and all documentation (if necessary) so future AIs know about these changes (only if it will be value-add information to them) and do a git commit/merge.
@@ -0,0 +1,95 @@
1
+ You are generating the final run report for a NightyTidy codebase improvement run. You will be given:
2
+
3
+ 1. Pre-built markdown sections (summary table, step results, failed steps, undo instructions) — include these VERBATIM
4
+ 2. Step outputs from the improvement run — use these to generate the action plan section
5
+ 3. The report filename to write to
6
+
7
+ Your job is to produce a single markdown file that combines a human-friendly narration with the pre-built sections and an action plan.
8
+
9
+ ## Part 1: Narration
10
+
11
+ Review the full git log and diffs for this run (all commits on this branch). Write a plain-English summary that:
12
+
13
+ 1. Uses first person ("I") as if you personally worked on the codebase overnight
14
+ 2. Uses zero jargon — explain everything in terms a non-technical person would understand
15
+ 3. References SPECIFIC numbers from the actual changes (e.g., "I added 47 tests" not "I improved test coverage"; "I removed 1,200 lines of code that weren't being used" not "I cleaned up dead code")
16
+ 4. Groups related changes into short, friendly paragraphs — don't use bullet points or headers
17
+ 5. Leads with the most impressive or valuable changes first
18
+ 6. Keeps the tone warm and slightly proud of the work done — like a helpful colleague leaving a note about what they accomplished overnight
19
+ 7. Ends with a brief honest note about anything that didn't go as planned (steps that failed or were skipped), framed constructively
20
+ 8. Is no longer than 400 words — concise and scannable
21
+
22
+ DO NOT use any of these words: refactor, lint, dependency, CI/CD, middleware, endpoint, schema, migration, module, pipeline, coverage metrics, regression, assertion, deprecation.
23
+
24
+ Instead of technical terms, describe what the change DOES for the person: "I made sure your login page can't be tricked into running malicious code" instead of "I fixed an XSS vulnerability in the auth middleware."
25
+
26
+ ## Part 2: Action Plan
27
+
28
+ Review the step outputs provided below to extract actionable recommendations that still need to be done.
29
+
30
+ 1. Review each step's output to extract actionable recommendations, suggestions, and identified issues.
31
+ 2. **Check the current codebase** — read the relevant files to determine which recommendations have ALREADY been implemented by previous steps in this run.
32
+ 3. **Deduplicate** — if multiple steps flagged the same issue, consolidate into one recommendation.
33
+ 4. **Tier** the remaining (not-yet-implemented) items by importance.
34
+
35
+ Structure the action plan as:
36
+
37
+ ```
38
+ ## NightyTidy Action Plan
39
+
40
+ > Generated from a {N}-step improvement run. Items below have been verified as **not yet implemented** in the current codebase.
41
+
42
+ ### Critical
43
+ <!-- Security vulnerabilities, data loss risks, breaking bugs, blocking issues -->
44
+ (items or "No items at this priority level.")
45
+
46
+ ### High
47
+ <!-- Reliability, performance, error handling, significant code quality gaps -->
48
+ (items)
49
+
50
+ ### Medium
51
+ <!-- Maintainability, test coverage gaps, refactoring opportunities, minor UX issues -->
52
+ (items)
53
+
54
+ ### Low
55
+ <!-- Polish, style improvements, nice-to-haves, minor optimizations -->
56
+ (items)
57
+
58
+ ### Summary
59
+ [One sentence on overall codebase health. One sentence on the single highest-value next action.]
60
+ ```
61
+
62
+ Each item uses this format:
63
+ - **[Short, specific title]**: [Concrete action — reference specific files, functions, or patterns]. Value: [Why this matters]. Impact: [Which areas affected]. Risk: [Low/Medium/High].
64
+
65
+ Rules:
66
+ - Do NOT include anything already implemented — verify by reading files
67
+ - Be specific — reference files, functions, patterns. No vague advice like "add more tests"
68
+ - Maximum 5 items per tier (20 total)
69
+ - Deduplicate ruthlessly
70
+
71
+ ## Part 3: Write the Report File
72
+
73
+ Write the complete report to the file specified below. Use this exact structure:
74
+
75
+ ```
76
+ # NightyTidy Report — {date}
77
+
78
+ {your narration from Part 1}
79
+
80
+ ---
81
+
82
+ {VERBATIM summary section}
83
+ {VERBATIM step results table}
84
+ {VERBATIM failed steps section, if present}
85
+
86
+ {your action plan from Part 2}
87
+
88
+ {VERBATIM undo section}
89
+ ```
90
+
91
+ Rules:
92
+ 1. The pre-built sections below are wrapped in VERBATIM markers. Copy them EXACTLY as-is into the output file. Do not reformat, reword, or restructure them.
93
+ 2. Write the complete report to the exact filename specified.
94
+ 3. Commit the file with message: "NightyTidy: Add run report"
95
+ 4. Do NOT start the narration with any preamble ("I understand", "Sure", "Here is", etc.). Begin with the first word of your actual summary.
@@ -0,0 +1,173 @@
1
+ You are running an overnight documentation generation pass. Deeply understand this codebase and produce a three-tier documentation system optimized for AI coding agents, plus human-facing reference docs. Work on branch `documentation-[date]`.
2
+
3
+ ## The Three-Tier System
4
+
5
+ AI agents pay a token cost for every line loaded into context — whether relevant or not. A 1,000-line guide burns ~31K tokens (~15% of 200K window) on every conversation. The fix: tiered loading.
6
+
7
+ - **Tier 1 (Always Loaded):** Rules/conventions preventing mistakes on ANY task. Compact — target 5-7% of context.
8
+ - **Tier 2 (On-Demand):** Per-topic implementation details. Loaded only when relevant. ~1-2% per task.
9
+ - **Tier 3 (Deep Reference):** Human-facing docs, ADRs, API reference. Never auto-loaded. Zero token cost.
10
+
11
+ | Tier | Lines | Tokens | % of 200K |
12
+ |------|-------|--------|-----------|
13
+ | Always (Tier 1) | 300-400 | 10-13K | 5-7% |
14
+ | Per-task (Tier 2, 1-2 files) | 60-120 | 2-4K | 1-2% |
15
+ | **Typical total** | **360-520** | **12-17K** | **6-9%** |
16
+
17
+ Primary deliverable: Tier 1 + Tier 2. Tier 3 is secondary.
18
+
19
+ ---
20
+
21
+ ## Phases
22
+
23
+ ### Phase 0: Check Existing Standards
24
+
25
+ Look for CLAUDE.md, .cursorrules, CONTRIBUTING.md, or similar. **If conflicts with three-tier system → STOP and ask user** with: what you found, what conflicts, 2-3 options with tradeoffs. No conflicts → proceed.
26
+
27
+ ### Phase 1: Codebase Discovery
28
+
29
+ Read and map everything. No files produced — only understanding.
30
+
31
+ **Map:** App identity, tech stack, audience. Directory responsibilities. Request/data flow (entry → routing → middleware → handlers → data → response). External deps. Module dependency graph. Architectural patterns.
32
+
33
+ **Conventions:** Naming (files, vars, functions, components, DB). Imports, error handling, testing, state management. Lint/format configs. Build/test/deploy commands. Types as self-documentation.
34
+
35
+ **Pitfalls:** Non-obvious side effects, library workarounds, magic values, complex regex, unexplained constants, non-obvious business logic.
36
+
37
+ **Cluster** learnings into topic areas → these become Tier 2 files.
38
+
39
+ ### Phase 2: CLAUDE.md (Tier 1)
40
+
41
+ Create `CLAUDE.md` at project root. **Target: 250-350 lines. Hard constraint.**
42
+
43
+ **Inclusion test:** *"If I removed this, would the AI write incorrect code on an unrelated task?"* No → Tier 2.
44
+
45
+ **Required sections:**
46
+ - **Project Identity** — One paragraph: what, who, why
47
+ - **Workflow Rules** — Non-negotiable process (deploy, test, etc.)
48
+ - **Tech Stack** — Table: technology | version | purpose
49
+ - **Project Structure** — Condensed tree, ~30 lines max, top-level + key second-level
50
+ - **Architectural Rules** — Do/don't imperatives, not explanations
51
+ - **Data Model Overview** — Collection/table names + relationships, not field-level
52
+ - **Auth Model** (if applicable) — Roles + high-level flow
53
+ - **Environment Variables** — What's needed to run
54
+ - **Build/Deploy Commands** — Copy-paste ready
55
+ - **Coding Conventions** — Only those consistently followed in code
56
+ - **Design System Rules** (if applicable) — Only if affecting every UI task; otherwise Tier 2
57
+ - **Documentation Hierarchy** — Table telling AI where knowledge lives:
58
+ ```markdown
59
+ ## Documentation Hierarchy
60
+
61
+ | Layer | Loaded | What goes here |
62
+ |-------|--------|---------------|
63
+ | **CLAUDE.md** | Every conversation | Rules preventing mistakes on ANY task |
64
+ | **MEMORY.md** | Every conversation | Cross-cutting patterns/pitfalls |
65
+ | **Sub-memory** (.claude/memory/) | On demand | Feature-specific deep dives |
66
+ | **Inline comments** | When code is read | Non-obvious "why" explanations |
67
+
68
+ Rule: Prevents mistakes on unrelated tasks → CLAUDE.md. Spans features → MEMORY.md. One feature only → sub-memory. Single line → inline comment.
69
+ ```
70
+
71
+ **Does NOT belong in CLAUDE.md:** Feature implementation details, API response shapes, field-level schemas, testing patterns, debugging notes, security findings, historical context. All → Tier 2/3.
72
+
73
+ **Format:** Terse, imperative. Tables and bullets, not paragraphs.
74
+
75
+ ### Phase 3: Tier 2 Memory Files
76
+
77
+ Create files at `.claude/memory/`.
78
+
79
+ **Rules:** One topic per file, 40-80 lines. Terse reference format. Don't repeat CLAUDE.md. Name by topic (`testing.md`) not area (`backend-stuff.md`). Assume reader has CLAUDE.md loaded.
80
+
81
+ **Each file covers:** Patterns/conventions, config details, correct-pattern snippets, common mistakes, external API quirks.
82
+
83
+ **Good** — tells you what to do:
84
+ ```markdown
85
+ ## Firestore Mock Routing
86
+ Callables using `loadPromptForPhase()` + `recordUsage()` need collection routing:
87
+ - `"prompts"` → return `{ doc: vi.fn(() => ({ get: async () => ({ exists: false }) })) }`
88
+ - `"_rateLimits"` → return safe no-op mock
89
+ ```
90
+
91
+ **Bad** — teaches background knowledge (that's Tier 3):
92
+ ```markdown
93
+ ## About Firestore Mock Routing
94
+ When writing tests for callable functions, you need to be aware that some callables
95
+ access multiple Firestore collections...
96
+ ```
97
+
98
+ **Suggested files** (create only what's relevant):
99
+
100
+ | File | Covers |
101
+ |------|--------|
102
+ | testing.md | Framework config, mocks, pitfalls |
103
+ | data-model.md | Field schemas, indexes, storage paths, migrations |
104
+ | api-providers.md | External endpoints, auth, rate limits, quirks |
105
+ | pitfalls-frontend.md | Framework gotchas, state traps, build issues |
106
+ | pitfalls-backend.md | Server gotchas, auth helpers, error patterns |
107
+ | feature-inventory.md | Features, shared components, reusable systems |
108
+ | security.md | Auth details, vulnerabilities, audit findings |
109
+ | deployment.md | Deploy process, env configs, infrastructure |
110
+
111
+ Split/merge by project shape. **Target 8-15 files.** <5 = too broad. >20 = too granular.
112
+
113
+ ### Phase 4: MEMORY.md (Tier 1 — Index)
114
+
115
+ Create `.claude/memory/MEMORY.md`. **Target: 30-60 lines.** Index and state tracker only.
116
+ ```markdown
117
+ # Project Memory — Index
118
+ [One-line description]. See CLAUDE.md for rules.
119
+
120
+ ## Current State
121
+ - [Key metrics: test count, endpoints, deploy URL, etc.]
122
+ - [Recent major changes from git]
123
+
124
+ ## Topic Files
125
+ | File | When to load |
126
+ |------|-------------|
127
+ | testing.md | Writing or fixing tests |
128
+ | data-model.md | Database schema or queries |
129
+ ```
130
+
131
+ ### Phase 5: Version Control
132
+
133
+ `.gitignore`:
134
+
135
+ ## Chat Output Requirement
136
+
137
+ In addition to writing the full report file, you MUST print a summary directly in the conversation when you finish. Do not make the user open the report to get the highlights. The chat summary should include:
138
+
139
+ ### 1. Status Line
140
+ One sentence: what you did, how long it took, and whether all tests still pass.
141
+
142
+ ### 2. Key Findings
143
+ The most important things discovered — bugs, risks, wins, or surprises. Each bullet should be specific and actionable, not vague. Lead with severity or impact.
144
+
145
+ **Good:** "CRITICAL: No backup configuration found for the primary Postgres database — total data loss risk."
146
+ **Bad:** "Found some issues with backups."
147
+
148
+ ### 3. Changes Made (if applicable)
149
+ Bullet list of what was actually modified, added, or removed. Skip this section for read-only analysis runs.
150
+
151
+ ### 4. Recommendations
152
+
153
+ If there are legitimately beneficial recommendations worth pursuing right now, present them in a table. Do **not** force recommendations — if the audit surfaced no actionable improvements, simply state that no recommendations are warranted at this time and move on.
154
+
155
+ When recommendations exist, use this table format:
156
+
157
+ | # | Recommendation | Impact | Risk if Ignored | Worth Doing? | Details |
158
+ |---|---|---|---|---|---|
159
+ | *Sequential number* | *Short description (≤10 words)* | *What improves if addressed* | *Low / Medium / High / Critical* | *Yes / Probably / Only if time allows* | *1–3 sentences explaining the reasoning, context, or implementation guidance* |
160
+
161
+ Order rows by risk descending (Critical → High → Medium → Low). Be honest in the "Worth Doing?" column — not everything flagged is worth the engineering time. If a recommendation is marginal, say so.
162
+
163
+ ### 5. Report Location
164
+ State the full path to the detailed report file for deeper review.
165
+
166
+ Create `audit-reports/` in project root if needed. Save as `audit-reports/01_DOCUMENTATION_COVERAGE_REPORT_[run-number]_[date]_[time in user's local time].md`, incrementing run number based on existing reports.
167
+
168
+ ---
169
+
170
+ **Formatting rules for chat output:**
171
+ - Use markdown headers, bold for severity labels, and bullet points for scannability.
172
+ - Do not duplicate the full report contents — just the highlights and recommendations.
173
+ - If you made zero findings in a phase, say so in one line rather than omitting it silently.