smash-os-install 0.2.2 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/install.mjs +848 -671
  2. package/package.json +2 -2
package/install.mjs CHANGED
@@ -1,671 +1,848 @@
1
- #!/usr/bin/env node
2
- /**
3
- * smash-os-install — Install the SmashOS Claude Code harness into any repo.
4
- *
5
- * Usage (inside your repo root):
6
- * npx smash-os-install — connected mode (requires SmashOS web app)
7
- * npx smash-os-install --local — local-only mode (no web app, no API keys)
8
- *
9
- * What it does (connected):
10
- * 1. Prompts for your SmashOS URL, Repo ID, and API key
11
- * 2. Validates credentials against the SmashOS API
12
- * 3. Fetches the generated harness files for your repo
13
- * 4. Writes CLAUDE.md, .claude/hooks/, .claude/skills/, .env.smash-os
14
- * 5. Merges hooks into existing .claude/settings.json (if present)
15
- * 6. Adds .env.smash-os to .gitignore
16
- *
17
- * What it does (--local):
18
- * 1. Prompts for project name and tech stack only
19
- * 2. Writes CLAUDE.md + /ai/ skeleton + 2 skills locally
20
- * 3. No web app connection required
21
- */
22
-
23
- import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
24
- import { join, dirname, basename } from 'path';
25
- import { homedir } from 'os';
26
- import { execSync } from 'child_process';
27
- import prompts from 'prompts';
28
- import chalk from 'chalk';
29
-
30
- const cwd = process.cwd();
31
- const isLocal = process.argv.includes('--local');
32
-
33
- // ─── Helpers ──────────────────────────────────────────────────────────────────
34
-
35
- function writeFile(relPath, content) {
36
- const abs = join(cwd, relPath);
37
- mkdirSync(dirname(abs), { recursive: true });
38
- writeFileSync(abs, content, 'utf8');
39
- }
40
-
41
- function mergeSettingsJson(newSettingsContent) {
42
- const settingsPath = join(cwd, '.claude', 'settings.json');
43
-
44
- if (!existsSync(settingsPath)) {
45
- writeFile('.claude/settings.json', newSettingsContent);
46
- return false; // not merged, freshly created
47
- }
48
-
49
- let existing;
50
- try {
51
- existing = JSON.parse(readFileSync(settingsPath, 'utf8'));
52
- } catch {
53
- // Unreadable overwrite
54
- writeFile('.claude/settings.json', newSettingsContent);
55
- return false;
56
- }
57
-
58
- const incoming = JSON.parse(newSettingsContent);
59
- if (!existing.hooks) existing.hooks = {};
60
-
61
- for (const [event, hookList] of Object.entries(incoming.hooks ?? {})) {
62
- if (!existing.hooks[event]) existing.hooks[event] = [];
63
- for (const hookGroup of hookList) {
64
- for (const hook of hookGroup.hooks ?? []) {
65
- const alreadyPresent = existing.hooks[event].some((g) =>
66
- g.hooks?.some((h) => h.command === hook.command)
67
- );
68
- if (!alreadyPresent) {
69
- existing.hooks[event].push({ hooks: [hook] });
70
- }
71
- }
72
- }
73
- }
74
-
75
- writeFile('.claude/settings.json', JSON.stringify(existing, null, 2));
76
- return true; // merged into existing
77
- }
78
-
79
- function ensureGitignore(entry) {
80
- const gitignorePath = join(cwd, '.gitignore');
81
- if (existsSync(gitignorePath)) {
82
- const content = readFileSync(gitignorePath, 'utf8');
83
- if (content.includes(entry)) return;
84
- writeFileSync(gitignorePath, content.trimEnd() + '\n' + entry + '\n', 'utf8');
85
- } else {
86
- writeFileSync(gitignorePath, entry + '\n', 'utf8');
87
- }
88
- }
89
-
90
- // ─── Main ─────────────────────────────────────────────────────────────────────
91
-
92
- console.log('');
93
- console.log(chalk.bold(' SmashOS Harness Installer'));
94
- console.log(chalk.dim(isLocal ? ' Local-only mode — no web app required' : ' Installs the Claude Code harness into your repo'));
95
- console.log('');
96
-
97
- // ─── Local-only mode ──────────────────────────────────────────────────────────
98
-
99
- if (isLocal) {
100
- const onCancel = () => { console.log(chalk.yellow('\n Cancelled.')); process.exit(0); };
101
-
102
- const baseAnswers = await prompts([
103
- {
104
- type: 'text',
105
- name: 'projectName',
106
- message: 'Project name',
107
- initial: basename(cwd),
108
- validate: (v) => (v.trim().length > 0 ? true : 'Required'),
109
- },
110
- ], { onCancel });
111
-
112
- const { projectName } = baseAnswers;
113
-
114
- console.log('');
115
- console.log(chalk.bold(' Tech Stack'));
116
- console.log(chalk.dim(' Select your tools — choose "Other" to enter a custom value'));
117
- console.log('');
118
-
119
- const O = '__other__';
120
- const o = (prev) => prev === O ? 'text' : null;
121
- const req = (v) => v.trim().length > 0 ? true : 'Required';
122
-
123
- const stackAnswers = await prompts([
124
- // ── Frontend Framework ─────────────────────────────────────────────────
125
- {
126
- type: 'select',
127
- name: 'frontend',
128
- message: 'Frontend framework',
129
- choices: [
130
- { title: 'Next.js', value: 'Next.js' },
131
- { title: 'React Router v7', value: 'React Router v7' },
132
- { title: 'Remix', value: 'Remix' },
133
- { title: 'Vite + React (SPA)', value: 'Vite + React' },
134
- { title: 'SvelteKit', value: 'SvelteKit' },
135
- { title: 'Nuxt (Vue 3)', value: 'Nuxt' },
136
- { title: 'Astro', value: 'Astro' },
137
- { title: 'Angular', value: 'Angular' },
138
- { title: 'Solid Start', value: 'Solid Start' },
139
- { title: 'Other…', value: O },
140
- ],
141
- },
142
- { type: o, name: 'frontendCustom', message: 'Specify your frontend framework', validate: req },
143
-
144
- // ── Backend / API ──────────────────────────────────────────────────────
145
- {
146
- type: 'select',
147
- name: 'backend',
148
- message: 'Backend / API layer',
149
- choices: [
150
- { title: 'Fullstack (handled by framework)', value: 'Fullstack (framework handles API)' },
151
- { title: 'Node.js + Express', value: 'Node.js + Express' },
152
- { title: 'Node.js + Hono', value: 'Node.js + Hono' },
153
- { title: 'Node.js + Fastify', value: 'Node.js + Fastify' },
154
- { title: 'Node.js + NestJS', value: 'Node.js + NestJS' },
155
- { title: 'Python + FastAPI', value: 'Python + FastAPI' },
156
- { title: 'Python + Django / Flask', value: 'Python + Django/Flask' },
157
- { title: 'Go (Gin / Chi)', value: 'Go' },
158
- { title: 'Bun + Elysia', value: 'Bun + Elysia' },
159
- { title: 'Other…', value: O },
160
- ],
161
- },
162
- { type: o, name: 'backendCustom', message: 'Specify your backend/API layer', validate: req },
163
-
164
- // ── Database ───────────────────────────────────────────────────────────
165
- {
166
- type: 'select',
167
- name: 'database',
168
- message: 'Database',
169
- choices: [
170
- { title: 'Supabase (Postgres)', value: 'Supabase (Postgres)' },
171
- { title: 'Neon (Postgres)', value: 'Neon (Postgres)' },
172
- { title: 'MongoDB Atlas', value: 'MongoDB Atlas' },
173
- { title: 'PlanetScale / Vitess (MySQL)', value: 'PlanetScale (MySQL)' },
174
- { title: 'Firebase Firestore', value: 'Firebase Firestore' },
175
- { title: 'Turso (SQLite / libSQL)', value: 'Turso (SQLite)' },
176
- { title: 'Prisma + PostgreSQL', value: 'Prisma + PostgreSQL' },
177
- { title: 'Drizzle + D1 (SQLite)', value: 'Drizzle + Cloudflare D1' },
178
- { title: 'Upstash Redis', value: 'Upstash Redis' },
179
- { title: 'Other…', value: O },
180
- ],
181
- },
182
- { type: o, name: 'databaseCustom', message: 'Specify your database', validate: req },
183
-
184
- // ── Authentication ─────────────────────────────────────────────────────
185
- {
186
- type: 'select',
187
- name: 'auth',
188
- message: 'Authentication',
189
- choices: [
190
- { title: 'None / Custom', value: 'None' },
191
- { title: 'Clerk', value: 'Clerk' },
192
- { title: 'Supabase Auth', value: 'Supabase Auth' },
193
- { title: 'Auth.js (NextAuth.js)', value: 'Auth.js (NextAuth)' },
194
- { title: 'Better Auth', value: 'Better Auth' },
195
- { title: 'Firebase Auth', value: 'Firebase Auth' },
196
- { title: 'Auth0', value: 'Auth0' },
197
- { title: 'Lucia Auth', value: 'Lucia Auth' },
198
- { title: 'Kinde', value: 'Kinde' },
199
- { title: 'Other…', value: O },
200
- ],
201
- },
202
- { type: o, name: 'authCustom', message: 'Specify your auth provider', validate: req },
203
-
204
- // ── Deployment ─────────────────────────────────────────────────────────
205
- {
206
- type: 'select',
207
- name: 'deployment',
208
- message: 'Deployment target',
209
- choices: [
210
- { title: 'Vercel', value: 'Vercel' },
211
- { title: 'Cloudflare Workers / Pages', value: 'Cloudflare Workers/Pages' },
212
- { title: 'Netlify', value: 'Netlify' },
213
- { title: 'Railway', value: 'Railway' },
214
- { title: 'Fly.io', value: 'Fly.io' },
215
- { title: 'Render', value: 'Render' },
216
- { title: 'AWS (Lambda / ECS)', value: 'AWS' },
217
- { title: 'Google Cloud Run', value: 'Google Cloud Run' },
218
- { title: 'Self-hosted / VPS', value: 'Self-hosted / VPS' },
219
- { title: 'Other…', value: O },
220
- ],
221
- },
222
- { type: o, name: 'deploymentCustom', message: 'Specify your deployment target', validate: req },
223
-
224
- // ── Styling / UI ───────────────────────────────────────────────────────
225
- {
226
- type: 'select',
227
- name: 'styling',
228
- message: 'Styling / UI library',
229
- choices: [
230
- { title: 'Tailwind CSS', value: 'Tailwind CSS' },
231
- { title: 'shadcn/ui + Tailwind', value: 'shadcn/ui + Tailwind CSS' },
232
- { title: 'CSS Modules', value: 'CSS Modules' },
233
- { title: 'Material UI (MUI)', value: 'Material UI (MUI)' },
234
- { title: 'Chakra UI', value: 'Chakra UI' },
235
- { title: 'Styled Components / Emotion', value: 'Styled Components' },
236
- { title: 'Mantine', value: 'Mantine' },
237
- { title: 'Radix UI + Tailwind', value: 'Radix UI + Tailwind CSS' },
238
- { title: 'UnoCSS', value: 'UnoCSS' },
239
- { title: 'Other…', value: O },
240
- ],
241
- },
242
- { type: o, name: 'stylingCustom', message: 'Specify your styling/UI library', validate: req },
243
- ], { onCancel });
244
-
245
- function pick(val, custom) { return val === O ? (custom || 'Other') : val; }
246
-
247
- const techStack = [
248
- `- Frontend: ${pick(stackAnswers.frontend, stackAnswers.frontendCustom)}`,
249
- `- Backend: ${pick(stackAnswers.backend, stackAnswers.backendCustom)}`,
250
- `- Database: ${pick(stackAnswers.database, stackAnswers.databaseCustom)}`,
251
- `- Auth: ${pick(stackAnswers.auth, stackAnswers.authCustom)}`,
252
- `- Deployment: ${pick(stackAnswers.deployment, stackAnswers.deploymentCustom)}`,
253
- `- Styling: ${pick(stackAnswers.styling, stackAnswers.stylingCustom)}`,
254
- ].join('\n');
255
-
256
- console.log('');
257
- const today = new Date().toISOString().split('T')[0];
258
-
259
- const claudeMd = `# ${projectName} — SmashOS Harness Active
260
-
261
- You are operating inside the SmashOS AI Workflow Harness.
262
- You are not a single assistant. You are a coordinated AI engineering organisation.
263
-
264
- ## On Session Start
265
- 1. Read \`ai/context/product.md\` (if it exists)
266
- 2. Read \`ai/context/architecture.md\` (if it exists)
267
- 3. Read \`ai/context/coding-standards.md\` (if it exists)
268
- 4. Read \`ai/memory/decisions.md\` — last 20 entries (if it exists)
269
- 5. Adopt the role: Staff Engineer
270
-
271
- ## Cognitive Mode Rules
272
- THINKING → Staff Engineer, Product Manager (no file writes)
273
- EXECUTION Senior Developer, DevOps (no specs or decisions)
274
- VALIDATION Security Engineer, QA Engineer (no production code)
275
-
276
- ## Golden Rules
277
- - Never skip architecture review
278
- - Never write code without an approved spec
279
- - Never deploy without QA + Security validation
280
- - Output structured results only
281
- - Save decisions to memory after every significant choice
282
-
283
- ## Slash Commands
284
- - /smash-os:role [name] switch cognitive mode + load role definition
285
-
286
- ## Tech Stack
287
- ${techStack}
288
- `;
289
-
290
- const aiFiles = {
291
- 'ai/orchestrator.md': `# ${projectName} — SmashOS Orchestrator
292
- **Generated:** ${today}
293
- **Version:** 1.0
294
-
295
- ---
296
-
297
- ## Overview
298
-
299
- You are operating inside the SmashOS AI Workflow Harness for **${projectName}**.
300
- You are not a single assistant. You are a coordinated AI engineering organisation.
301
-
302
- This orchestrator.md is your master boot document. Read it fully at the start of every session.
303
-
304
- ---
305
-
306
- ## On Session Start
307
-
308
- 1. Read \`/ai/context/product.md\` understand what you are building and for whom
309
- 2. Read \`/ai/context/architecture.md\` understand the technical system
310
- 3. Read \`/ai/context/coding-standards.md\` know the rules before touching any file
311
- 4. Read \`/ai/memory/decisions.md\` (last 20 entries) — know what has been decided and why
312
- 5. Adopt the role: Staff Engineer
313
-
314
- ---
315
-
316
- ## Cognitive Mode Rules
317
-
318
- | Mode | Roles | Permitted | Forbidden |
319
- |---|---|---|---|
320
- | THINKING | Staff Engineer, Product Manager | Specs, decisions, architecture docs | File writes, commits |
321
- | EXECUTION | Senior Developer, DevOps Engineer | Code, commits, branches, PRs | Specs, decisions |
322
- | VALIDATION | Security Engineer, QA Engineer | Scores, test results, audit reports | Production code |
323
-
324
- The orchestrator hard-blocks mode mixing. Never mix thinking and execution in the same phase.
325
-
326
- ---
327
-
328
- ## Golden Rules
329
-
330
- 1. Never skip architecture review
331
- 2. Never write code without an approved spec
332
- 3. Never deploy without QA + Security validation
333
- 4. Output structured results only — every phase output is a typed JSON object
334
- 5. Save decisions to memory after every significant architectural choice
335
- 6. Revenue risk outranks technical purity
336
- 7. Block automation when stability drops below 40
337
-
338
- ---
339
-
340
- ## Active Role Definitions
341
-
342
- | Role | Mode | Primary Skill |
343
- |---|---|---|
344
- | Staff Engineer | THINKING | architecture-review |
345
- | Product Manager | THINKING | |
346
- | Senior Developer | EXECUTION | code-generation |
347
- | Security Engineer | VALIDATION | security-audit |
348
- | QA Engineer | VALIDATION | qa-validation |
349
- | DevOps Engineer | EXECUTION | — |
350
-
351
- Full role definitions: \`/ai/roles/{role-name}.md\`
352
-
353
- ---
354
-
355
- ## Selective Context Loading
356
-
357
- Load only what your role requires. Never load the full context bundle.
358
-
359
- | Role | Load |
360
- |---|---|
361
- | Product Manager | product.md + database.md + trigger payload |
362
- | Staff Engineer | architecture.md + coding-standards.md + decisions.md + previous phase output |
363
- | Senior Developer | architecture.md + coding-standards.md + approved spec |
364
- | Security Engineer | database.md + auth section of architecture.md + file diff |
365
- | QA Engineer | coding-standards.md + acceptance criteria + file diff |
366
- | DevOps Engineer | architecture.md + PR metadata + score summary |
367
-
368
- ---
369
-
370
- ## Product Summary
371
-
372
- *Context not yet generated. Fill in \`/ai/context/product.md\` with your project details.*
373
-
374
- ---
375
-
376
- ## Architecture Summary
377
-
378
- *Context not yet generated. Fill in \`/ai/context/architecture.md\` with your architecture.*
379
-
380
- ---
381
-
382
- ## Coding Standards Summary
383
-
384
- *Context not yet generated. Fill in \`/ai/context/coding-standards.md\` with your coding standards.*
385
-
386
- ---
387
-
388
- ## Database Summary
389
-
390
- *Context not yet generated. Fill in \`/ai/context/database.md\` with your database details.*
391
-
392
- ---
393
-
394
- ## Memory
395
-
396
- - Architecture decisions: \`/ai/memory/decisions.md\`
397
- - Lessons learned: \`/ai/memory/lessons.md\`
398
-
399
- After every completed session, append decisions and lessons to the relevant memory file.
400
-
401
- ---
402
-
403
- ## Workflow References
404
-
405
- - Feature: \`/ai/workflows/feature.md\`
406
- - Bug Fix: \`/ai/workflows/bug-fix.md\`
407
- - Weekly Improvement: \`/ai/workflows/weekly-improvement.md\`
408
-
409
- ---
410
-
411
- *Generated by smash-os-install (local mode) — fill in the /ai/context/ files to activate full context.*
412
- `,
413
- 'ai/context/product.md': `# Product Context ${projectName}\n\n## What is this project?\n<!-- Describe what this project does -->\n\n## Who uses it?\n<!-- Describe the users -->\n\n## Core goals\n<!-- List the main goals -->\n\n## Tech Stack\n${techStack}\n`,
414
- 'ai/context/architecture.md': `# Architecture — ${projectName}\n\n## Overview\n<!-- Describe the high-level architecture -->\n\n## Key decisions\n<!-- List major architectural decisions and why they were made -->\n\n## Constraints\n<!-- List things that must not change -->\n`,
415
- 'ai/context/coding-standards.md': `# Coding Standards — ${projectName}\n\n## Language & formatting\n<!-- ESLint config, prettier, etc. -->\n\n## Naming conventions\n<!-- Variables, files, functions -->\n\n## Patterns to follow\n<!-- e.g. always use server actions, never bypass RLS -->\n\n## Patterns to avoid\n<!-- e.g. no raw SQL, no any types -->\n`,
416
- 'ai/memory/decisions.md': `# Decisions Log\n\n<!-- SmashOS writes here after each session. Format: -->\n<!-- ## YYYY-MM-DD — Decision title -->\n<!-- **Rationale:** why this was chosen -->\n<!-- **Outcome:** what changed -->\n`,
417
- 'ai/memory/lessons.md': `# Lessons Learned\n\n<!-- SmashOS writes here after bugs and incidents. Format: -->\n<!-- ## YYYY-MM-DD — Lesson title -->\n<!-- **What happened:** -->\n<!-- **What to do differently:** -->\n`,
418
- };
419
-
420
- const localSkills = {
421
- 'smash-os-role': `---\nname: smash-os-role\ndescription: Switch cognitive mode and load a SmashOS role definition. Use when the user says "act as X", "switch to X role", "be the X", or names a SmashOS role (Staff Engineer, Product Manager, Senior Developer, Security Engineer, QA Engineer, DevOps Engineer).\n---\n\n# /smash-os:role\n\nSwitch to the named role and adopt its cognitive mode.\n\n## Roles\n\n| Role | Mode | Allowed |\n|---|---|---|\n| Staff Engineer | THINKING | Architecture, decisions, reviews |\n| Product Manager | THINKING | Specs, user stories, acceptance criteria |\n| Senior Developer | EXECUTION | Writing code, editing files |\n| Security Engineer | VALIDATION | Security review only, no code changes |\n| QA Engineer | VALIDATION | Testing and verification only |\n| DevOps Engineer | EXECUTION | Infrastructure, deployment |\n\n## Steps\n1. Read the role name from the argument (default: Staff Engineer)\n2. Announce: "Switching to [Role] — [Mode] mode"\n3. Load \`ai/roles/<role-slug>.md\` if it exists\n4. Apply the cognitive mode restrictions for the rest of the session\n`,
422
- 'smash-os-memory': `---\nname: smash-os-memory\ndescription: Show recent SmashOS decisions and lessons from the ai/memory/ folder. Use when the user says "show memory", "what decisions were made", "show lessons", or "smash-os memory".\n---\n\n# /smash-os:memory\n\nRead and display the local SmashOS memory files.\n\n## Steps\n1. Read \`ai/memory/decisions.md\` (last 20 entries)\n2. Read \`ai/memory/lessons.md\` (last 10 entries)\n3. Display them clearly — decisions first, then lessons\n4. If either file is empty or missing, say so\n`,
423
- };
424
-
425
- let written = 0;
426
-
427
- writeFile('CLAUDE.md', claudeMd);
428
- console.log(' ' + chalk.green('✓') + ' ' + chalk.white('CLAUDE.md'));
429
- written++;
430
-
431
- for (const [relPath, content] of Object.entries(aiFiles)) {
432
- if (!existsSync(join(cwd, relPath))) {
433
- writeFile(relPath, content);
434
- console.log(' ' + chalk.green('✓') + ' ' + chalk.white(relPath));
435
- written++;
436
- } else {
437
- console.log(' ' + chalk.dim('↷') + ' ' + chalk.dim(`${relPath} (already exists skipped)`));
438
- }
439
- }
440
-
441
- console.log('');
442
- for (const [skillName, skillContent] of Object.entries(localSkills)) {
443
- const skillDir = join(homedir(), '.claude', 'skills', skillName);
444
- mkdirSync(skillDir, { recursive: true });
445
- writeFileSync(join(skillDir, 'SKILL.md'), skillContent, 'utf8');
446
- console.log(' ' + chalk.green('✓') + ' ' + chalk.white(`/smash-os:${skillName.replace('smash-os-', '')}`) + chalk.dim(' → ~/.claude/skills/'));
447
- written++;
448
- }
449
-
450
- console.log('');
451
- console.log(chalk.bold.green(' SmashOS harness installed!') + chalk.dim(` (${written} files — local mode)`));
452
- console.log('');
453
- console.log(chalk.dim(' Open Claude Code in this directory and start working:'));
454
- console.log(' ' + chalk.white(' claude .'));
455
- console.log('');
456
- console.log(chalk.dim(' Fill in the ai/context/ files with your project details,'));
457
- console.log(chalk.dim(' then Claude Code will load them automatically every session.'));
458
- console.log('');
459
-
460
- // ─── MCP Installation ───────────────────────────────────────────────────────
461
-
462
- const MCP_SERVERS = [
463
- {
464
- name: 'jcodemunch',
465
- title: 'jcodemunch — code intelligence (search, navigate, refactor)',
466
- cmd: 'claude mcp add jcodemunch --transport stdio -- uvx jcodemunch-mcp',
467
- },
468
- {
469
- name: 'jdocmunch',
470
- title: 'jdocmunch — documentation intelligence (index + search docs)',
471
- cmd: 'claude mcp add jdocmunch --transport stdio -- uvx jdocmunch-mcp',
472
- },
473
- {
474
- name: 'context7',
475
- title: 'context7 — live library docs for any npm/pip package',
476
- cmd: null, // built dynamicallyrequires API key prompt
477
- promptKey: true,
478
- },
479
- {
480
- name: 'chrome-devtools',
481
- title: 'chrome-devtools — browser automation and debugging',
482
- cmd: 'claude mcp add chrome-devtools --transport stdio -- npx -y chrome-devtools-mcp@latest',
483
- },
484
- ];
485
-
486
- // Check which MCPs are already installed
487
- let alreadyInstalled = new Set();
488
- try {
489
- const out = execSync('claude mcp list', { encoding: 'utf8', stdio: ['pipe','pipe','pipe'] });
490
- for (const s of MCP_SERVERS) {
491
- if (out.includes(s.name)) alreadyInstalled.add(s.name);
492
- }
493
- } catch { /* claude CLI not available — skip pre-check */ }
494
-
495
- const available = MCP_SERVERS.filter(s => !alreadyInstalled.has(s.name));
496
-
497
- if (alreadyInstalled.size > 0) {
498
- console.log(chalk.dim(` Already installed: ${[...alreadyInstalled].join(', ')}`));
499
- }
500
-
501
- if (available.length > 0) {
502
- console.log(chalk.bold(' MCP Servers'));
503
- console.log(chalk.dim(' These extend Claude Code with code intelligence and browser tools.'));
504
- console.log('');
505
-
506
- const { mcpChoices } = await prompts({
507
- type: 'multiselect',
508
- name: 'mcpChoices',
509
- message: 'Select MCP servers to install (space to toggle, enter to confirm)',
510
- choices: available.map(s => ({ title: s.title, value: s.name, selected: true })),
511
- hint: '- Space to select. Return to submit',
512
- }, { onCancel });
513
-
514
- if (mcpChoices && mcpChoices.length > 0) {
515
- console.log('');
516
-
517
- // Resolve context7 API key if selected
518
- let context7Key = '';
519
- if (mcpChoices.includes('context7')) {
520
- const { key } = await prompts({
521
- type: 'text',
522
- name: 'key',
523
- message: 'context7 API key (get one free at context7.com — leave blank to skip key)',
524
- }, { onCancel });
525
- context7Key = key || '';
526
- }
527
-
528
- for (const name of mcpChoices) {
529
- const server = MCP_SERVERS.find(s => s.name === name);
530
- let cmd = server.cmd;
531
- if (name === 'context7') {
532
- cmd = context7Key
533
- ? `claude mcp add context7 --transport http https://mcp.context7.com/mcp -H "CONTEXT7_API_KEY: ${context7Key}"`
534
- : 'claude mcp add context7 --transport http https://mcp.context7.com/mcp';
535
- }
536
- try {
537
- execSync(cmd, { stdio: 'pipe' });
538
- console.log(' ' + chalk.green('✓') + ' ' + chalk.white(name) + chalk.dim(' installed'));
539
- } catch (err) {
540
- console.log(' ' + chalk.red('✗') + ' ' + chalk.white(name) + chalk.dim(` — ${err.message.split('\n')[0]}`));
541
- console.log(' ' + chalk.dim(`Run manually: ${cmd}`));
542
- }
543
- }
544
- console.log('');
545
- }
546
- }
547
-
548
- process.exit(0);
549
- }
550
-
551
- // ─── Connected mode ────────────────────────────────────────────────────────────
552
-
553
- const answers = await prompts(
554
- [
555
- {
556
- type: 'text',
557
- name: 'apiUrl',
558
- message: 'SmashOS URL (where your SmashOS is deployed)',
559
- initial: 'http://localhost:5173',
560
- validate: (v) => (v.startsWith('http') ? true : 'Must be a valid URL'),
561
- },
562
- {
563
- type: 'text',
564
- name: 'repoId',
565
- message: 'Repo ID (from the SmashOS dashboard URL: /dashboard/repos/<ID>)',
566
- validate: (v) => (v.trim().length > 0 ? true : 'Required'),
567
- },
568
- {
569
- type: 'password',
570
- name: 'apiKey',
571
- message: 'API Key (from SmashOS Settings → API Keys)',
572
- validate: (v) => (v.trim().length > 0 ? true : 'Required'),
573
- },
574
- ],
575
- {
576
- onCancel: () => {
577
- console.log(chalk.yellow('\n Cancelled.'));
578
- process.exit(0);
579
- },
580
- }
581
- );
582
-
583
- const { apiUrl, repoId, apiKey } = answers;
584
- const cleanUrl = apiUrl.replace(/\/$/, '');
585
-
586
- // ─── Validate & fetch ─────────────────────────────────────────────────────────
587
-
588
- process.stdout.write('\n ' + chalk.dim('Connecting to SmashOS…'));
589
-
590
- let data;
591
- try {
592
- const resp = await fetch(`${cleanUrl}/api/repos/${repoId}/layer2-files`, {
593
- headers: { Authorization: `Bearer ${apiKey}` },
594
- });
595
-
596
- if (resp.status === 401) {
597
- console.log(chalk.red(' ✗'));
598
- console.error(chalk.red('\n Invalid API key or Repo ID. Check SmashOS → Settings → API Keys.\n'));
599
- process.exit(1);
600
- }
601
- if (resp.status === 404) {
602
- console.log(chalk.red(' ✗'));
603
- console.error(chalk.red('\n Repo not found. Check the Repo ID in your SmashOS dashboard URL.\n'));
604
- process.exit(1);
605
- }
606
- if (!resp.ok) {
607
- console.log(chalk.red(' ✗'));
608
- console.error(chalk.red(`\n SmashOS returned ${resp.status}. Is the URL correct?\n`));
609
- process.exit(1);
610
- }
611
-
612
- data = await resp.json();
613
- } catch (err) {
614
- console.log(chalk.red(' ✗'));
615
- console.error(chalk.red(`\n Could not reach SmashOS at ${cleanUrl}\n ${err.message}\n`));
616
- process.exit(1);
617
- }
618
-
619
- console.log(chalk.green(' ✓'));
620
- console.log(' ' + chalk.dim(`Connected to SmashOS — repo: ${chalk.white(data.repo.name)}`));
621
- console.log('');
622
-
623
- // ─── Write files ──────────────────────────────────────────────────────────────
624
-
625
- const { files } = data;
626
- let written = 0;
627
- let merged = false;
628
-
629
- for (const [relPath, content] of Object.entries(files)) {
630
- if (relPath === '.claude/settings.json') {
631
- merged = mergeSettingsJson(content);
632
- console.log(
633
- ' ' + chalk.green('✓') + ' ' +
634
- chalk.white('.claude/settings.json') +
635
- chalk.dim(merged ? ' (merged with existing)' : ' (created)')
636
- );
637
- } else {
638
- writeFile(relPath, content);
639
- console.log(' ' + chalk.green('✓') + ' ' + chalk.white(relPath));
640
- }
641
- written++;
642
- }
643
-
644
- // ─── Write .env.smash-os (pre-filled) ────────────────────────────────────────
645
-
646
- const envContent = [
647
- '# SmashOS harness credentials — do not commit this file',
648
- `SMASH_OS_API_URL=${cleanUrl}`,
649
- `SMASH_OS_REPO_ID=${repoId}`,
650
- `SMASH_OS_API_KEY=${apiKey}`,
651
- ].join('\n') + '\n';
652
-
653
- writeFile('.env.smash-os', envContent);
654
- console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.env.smash-os') + chalk.dim(' (pre-filled)'));
655
-
656
- // ─── Gitignore ────────────────────────────────────────────────────────────────
657
-
658
- ensureGitignore('.env.smash-os');
659
- console.log(' ' + chalk.green('✓') + ' ' + chalk.dim('.env.smash-os added to .gitignore'));
660
-
661
- // ─── Done ─────────────────────────────────────────────────────────────────────
662
-
663
- console.log('');
664
- console.log(chalk.bold.green(' SmashOS harness installed!') + chalk.dim(` (${written} files)`));
665
- console.log('');
666
- console.log(chalk.dim(' Next steps:'));
667
- console.log(chalk.dim(' 1. Open a Claude Code session in this directory'));
668
- console.log(chalk.dim(' 2. The harness activates automatically on session start'));
669
- console.log(chalk.dim(` 3. Use ${chalk.white('/smash-os:run')} to trigger a pipeline`));
670
- console.log(chalk.dim(` 4. View results at: ${chalk.white(cleanUrl + '/dashboard/repos/' + repoId)}`));
671
- console.log('');
1
+ #!/usr/bin/env node
2
+ /**
3
+ * smash-os-install — Install the SmashOS Claude Code harness into any repo.
4
+ *
5
+ * Usage (inside your repo root):
6
+ * npx smash-os-install
7
+ *
8
+ * What it does:
9
+ * 1. Prompts for project name and tech stack
10
+ * 2. Writes CLAUDE.md + /ai/ skeleton + .smash-os-mode=local
11
+ * 3. Installs SmashOS skills globally (~/.claude/skills/)
12
+ * 4. No web app, no API keys, no cloud dependencies required
13
+ */
14
+
15
+ import { execSync } from 'child_process';
16
+ import { existsSync, readFileSync, writeFileSync, mkdirSync, readdirSync, copyFileSync } from 'fs';
17
+ import { join, dirname, basename } from 'path';
18
+ import { homedir } from 'os';
19
+ import prompts from 'prompts';
20
+ import chalk from 'chalk';
21
+
22
+ const cwd = process.cwd();
23
+ const isMarketing = process.argv.includes('--marketing');
24
+ const vaultConventions = join(process.env.USERPROFILE || process.env.HOME || homedir(), 'Desktop', 'SmashBurgerBar', 'SmashVault', 'Architecture', 'conventions.md');
25
+ const globalConventions = join(homedir(), '.claude', 'conventions.md');
26
+ const conventionsFile = existsSync(vaultConventions) ? vaultConventions : globalConventions;
27
+
28
+ // ─── Helpers ──────────────────────────────────────────────────────────────────
29
+
30
+ function writeFile(relPath, content) {
31
+ const abs = join(cwd, relPath);
32
+ mkdirSync(dirname(abs), { recursive: true });
33
+ writeFileSync(abs, content, 'utf8');
34
+ }
35
+
36
+ function mergeSettingsJson(newSettingsContent) {
37
+ const settingsPath = join(cwd, '.claude', 'settings.json');
38
+
39
+ if (!existsSync(settingsPath)) {
40
+ writeFile('.claude/settings.json', newSettingsContent);
41
+ return false;
42
+ }
43
+
44
+ let existing;
45
+ try {
46
+ existing = JSON.parse(readFileSync(settingsPath, 'utf8'));
47
+ } catch {
48
+ writeFile('.claude/settings.json', newSettingsContent);
49
+ return false;
50
+ }
51
+
52
+ const incoming = JSON.parse(newSettingsContent);
53
+ if (!existing.hooks) existing.hooks = {};
54
+
55
+ for (const [event, hookList] of Object.entries(incoming.hooks ?? {})) {
56
+ if (!existing.hooks[event]) existing.hooks[event] = [];
57
+ for (const hookGroup of hookList) {
58
+ for (const hook of hookGroup.hooks ?? []) {
59
+ const alreadyPresent = existing.hooks[event].some((g) =>
60
+ g.hooks?.some((h) => h.command === hook.command)
61
+ );
62
+ if (!alreadyPresent) {
63
+ existing.hooks[event].push({ hooks: [hook] });
64
+ }
65
+ }
66
+ }
67
+ }
68
+
69
+ writeFile('.claude/settings.json', JSON.stringify(existing, null, 2));
70
+ return true;
71
+ }
72
+
73
+ // ─── MCP Detection & Installation ─────────────────────────────────────────────
74
+
75
+ function getInstalledMcpOutput() {
76
+ try {
77
+ return execSync('claude mcp list', { encoding: 'utf8', stdio: 'pipe' }).toLowerCase();
78
+ } catch { return ''; }
79
+ }
80
+
81
+ const MCP_CATALOG = [
82
+ {
83
+ name: 'context7',
84
+ description: 'Live docs lookup (GSD research phases, library references)',
85
+ enhancementLevels: ['light', 'medium', 'high'],
86
+ autoInstall: 'claude mcp add --transport http context7 https://mcp.context7.com/mcp',
87
+ },
88
+ {
89
+ name: 'jcodemunch',
90
+ description: 'Code intelligence — symbol search, blast radius, dependency graphs',
91
+ enhancementLevels: ['light', 'medium', 'high'],
92
+ autoInstall: null,
93
+ docs: 'https://jcodemunch.com',
94
+ },
95
+ {
96
+ name: 'jdocmunch',
97
+ description: 'Documentation intelligence companion to jcodemunch',
98
+ enhancementLevels: ['medium', 'high'],
99
+ autoInstall: null,
100
+ docs: 'https://jcodemunch.com',
101
+ },
102
+ {
103
+ name: 'chrome-devtools',
104
+ description: 'Browser automation — visual testing and verification',
105
+ enhancementLevels: ['medium', 'high'],
106
+ autoInstall: null,
107
+ docs: 'https://github.com/modelcontextprotocol/servers',
108
+ },
109
+ {
110
+ name: 'claude-peers',
111
+ description: 'Parallel pipeline execution + cross-session awareness (Security+QA run simultaneously)',
112
+ enhancementLevels: ['medium', 'high'],
113
+ autoInstall: 'claude mcp add claude-peers npx @louislva/claude-peers-mcp',
114
+ docs: 'https://github.com/louislva/claude-peers-mcp',
115
+ },
116
+ {
117
+ name: 'openspace',
118
+ description: 'Self-improving skill library — captures execution patterns, reduces token usage over time',
119
+ enhancementLevels: ['high'],
120
+ autoInstall: 'claude mcp add openspace npx openspace-mcp',
121
+ docs: 'https://github.com/HKUDS/OpenSpace',
122
+ },
123
+ ];
124
+
125
+ async function checkAndInstallMcps(enhancement) {
126
+ if (enhancement === 'off') return;
127
+
128
+ const installed = getInstalledMcpOutput();
129
+ const relevant = MCP_CATALOG.filter(m => m.enhancementLevels.includes(enhancement));
130
+ const present = relevant.filter(m => installed.includes(m.name.toLowerCase()));
131
+ const missing = relevant.filter(m => !installed.includes(m.name.toLowerCase()));
132
+
133
+ console.log('');
134
+ console.log(chalk.bold(' Recommended MCPs') + chalk.dim(` for enhancement: ${enhancement}`));
135
+ console.log('');
136
+
137
+ for (const m of present) {
138
+ console.log(' ' + chalk.green('') + ' ' + chalk.white(m.name) + chalk.dim(` — ${m.description}`));
139
+ }
140
+
141
+ if (missing.length === 0) {
142
+ console.log(chalk.dim(' All recommended MCPs already installed.'));
143
+ return;
144
+ }
145
+
146
+ const autoInstallable = missing.filter(m => m.autoInstall);
147
+ const manualOnly = missing.filter(m => !m.autoInstall);
148
+
149
+ if (autoInstallable.length > 0) {
150
+ const { toInstall } = await prompts({
151
+ type: 'multiselect',
152
+ name: 'toInstall',
153
+ message: 'Install these MCPs now?',
154
+ choices: autoInstallable.map(m => ({
155
+ title: `${m.name} ${m.description}`,
156
+ value: m.name,
157
+ selected: true,
158
+ })),
159
+ }, { onCancel: () => ({ toInstall: [] }) });
160
+
161
+ for (const name of (toInstall || [])) {
162
+ const mcp = autoInstallable.find(m => m.name === name);
163
+ try {
164
+ execSync(mcp.autoInstall, { encoding: 'utf8', stdio: 'pipe' });
165
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white(name) + chalk.dim(' installed'));
166
+ } catch (e) {
167
+ console.error(' ' + chalk.red('✗') + ' ' + name + '' + (e.stderr?.toString().trim() || e.message));
168
+ }
169
+ }
170
+ }
171
+
172
+ if (manualOnly.length > 0) {
173
+ console.log('');
174
+ console.log(chalk.dim(' Manual setup required:'));
175
+ for (const m of manualOnly) {
176
+ console.log(' ' + chalk.yellow('→') + ' ' + chalk.white(m.name) + chalk.dim(` — ${m.docs}`));
177
+ }
178
+ }
179
+ }
180
+
181
+ // ─── Automation (Windows Task Scheduler) ──────────────────────────────────────
182
+
183
+ const SMASH_BASE = String.raw`C:\SmashOS`;
184
+ const REGISTRY_FILE = join(SMASH_BASE, 'projects.json');
185
+ const SCRIPTS_DIR = join(SMASH_BASE, 'scripts');
186
+ const SKILL_SCRIPTS = join(SMASH_BASE, '_skills', 'scripts');
187
+ const SKILL_LOGS = join(SMASH_BASE, '_skills', 'logs');
188
+
189
+ function autoRun(cmd) {
190
+ return execSync(cmd, { encoding: 'utf8', stdio: 'pipe' });
191
+ }
192
+
193
+ function findClaude() {
194
+ try {
195
+ const found = autoRun('where claude').trim().split('\n')[0].trim();
196
+ if (found && existsSync(found)) return found;
197
+ } catch { /* not in PATH */ }
198
+ const home = process.env.USERPROFILE || process.env.HOME || 'C:\\Users\\Administrator';
199
+ const candidates = [
200
+ join(home, '.local', 'bin', 'claude.exe'),
201
+ join(home, 'AppData', 'Local', 'Programs', 'claude', 'claude.exe'),
202
+ join(home, 'AppData', 'Roaming', 'npm', 'claude.cmd'),
203
+ 'C:\\Program Files\\claude\\claude.exe',
204
+ ];
205
+ for (const c of candidates) if (existsSync(c)) return c;
206
+ return null;
207
+ }
208
+
209
+ function loadRegistry() {
210
+ if (!existsSync(REGISTRY_FILE)) return [];
211
+ try { return JSON.parse(readFileSync(REGISTRY_FILE, 'utf8')); } catch { return []; }
212
+ }
213
+
214
+ function saveRegistry(projects) {
215
+ mkdirSync(SMASH_BASE, { recursive: true });
216
+ writeFileSync(REGISTRY_FILE, JSON.stringify(projects, null, 2), 'utf8');
217
+ }
218
+
219
+ function schtask(name, batPath, schedule) {
220
+ const user = process.env.USERNAME || process.env.USER || 'Administrator';
221
+ try {
222
+ autoRun(`schtasks /create /tn "SmashOS\\${name}" /tr "${batPath}" ${schedule} /f /ru "${user}"`);
223
+ console.log(' ' + chalk.green('✓') + ' SmashOS\\' + name);
224
+ } catch (e) {
225
+ console.error(' ' + chalk.red('✗') + ' SmashOS\\' + name + ' — ' + (e.stderr?.toString().trim() || e.message));
226
+ }
227
+ }
228
+
229
+ function buildBats(projects, claudeExe) {
230
+ const C = `"${claudeExe}" --dangerously-skip-permissions --print`;
231
+ const home = process.env.USERPROFILE || process.env.HOME || 'C:\\Users\\Administrator';
232
+ const hdr = `@echo off\nset CLAUDE="${claudeExe}" --dangerously-skip-permissions --print\n`;
233
+
234
+ function block(p, prompt, slug) {
235
+ const log = join(SMASH_BASE, p.name, 'logs', `${slug}.log`);
236
+ return `\n:: === ${p.name} ===\ncd /d "${p.path}"\necho [%date% %time%] Running ${slug}... >> "${log}"\n${C} "${prompt}" >> "${log}" 2>&1\necho [%date% %time%] Done. >> "${log}"`;
237
+ }
238
+ function blockCmd(p, cmd, slug) {
239
+ const log = join(SMASH_BASE, p.name, 'logs', `${slug}.log`);
240
+ return `\n:: === ${p.name} ===\ncd /d "${p.path}"\necho [%date% %time%] Running ${slug}... >> "${log}"\n${C} "${cmd}" >> "${log}" 2>&1\necho [%date% %time%] Done. >> "${log}"`;
241
+ }
242
+ function bat(slug, getBlock) { return hdr + '\n' + projects.map(p => getBlock(p)).join('\n') + '\n'; }
243
+
244
+ return [
245
+ { taskName: 'Projects\\LockCleanup', batFile: join(SCRIPTS_DIR, 'lock-cleanup.bat'), schedule: '/sc hourly /st 00:00', content: bat('lock-cleanup', p => block(p, 'Review .claude/scheduled_tasks.lock and any stale pipeline lock files. Delete any locks older than 2 hours and report what was cleaned.', 'lock-cleanup')) },
246
+ { taskName: 'Projects\\MemoryConsolidation', batFile: join(SCRIPTS_DIR, 'memory-consolidation.bat'), schedule: '/sc daily /st 01:00', content: bat('memory-consolidation', p => block(p, 'Read all files in ai/memory/. Consolidate duplicates and summarise entries older than 30 days into a single summary entry. Save the updated files.', 'memory-consolidation')) },
247
+ { taskName: 'Projects\\NightlyAudit', batFile: join(SCRIPTS_DIR, 'nightly-audit.bat'), schedule: '/sc daily /st 02:00', content: bat('nightly-audit', p => blockCmd(p, '/smash-os:audit', 'nightly-audit')) },
248
+ { taskName: 'Projects\\DocsRegeneration', batFile: join(SCRIPTS_DIR, 'docs-regeneration.bat'), schedule: '/sc daily /st 03:00', content: bat('docs-regeneration', p => block(p, 'Read ai/context/ files. Check if they are still accurate against the codebase. Flag outdated sections. Do NOT overwrite orchestrator.md.', 'docs-regeneration')) },
249
+ { taskName: 'Projects\\CTOLoop', batFile: join(SCRIPTS_DIR, 'cto-loop.bat'), schedule: '/sc weekly /d MON /st 04:00', content: bat('cto-loop', p => block(p, 'Act as CTO. Read ai/memory/decisions.md and ai/memory/audits.md. Score codebase health out of 100 across security, architecture, tests, risks. Output structured report with top 3 risks and recommended next pipeline.', 'cto-loop')) },
250
+ { taskName: 'Projects\\WeeklyImprovement', batFile: join(SCRIPTS_DIR, 'weekly-improvement.bat'), schedule: '/sc weekly /d MON /st 05:00', content: bat('weekly-improvement', p => blockCmd(p, '/smash-os:run weekly-improvement', 'weekly-improvement')) },
251
+ { taskName: 'Projects\\RoleEvolution', batFile: join(SCRIPTS_DIR, 'role-evolution.bat'), schedule: '/sc weekly /d MON /st 06:30', content: bat('role-evolution', p => blockCmd(p, '/smash-os:evolve-roles', 'role-evolution')) },
252
+ { taskName: '_skills\DreamMemory', batFile: join(SKILL_SCRIPTS, 'dream-memory.bat'), schedule: '/sc daily /st 04:00', content: `@echo off
253
+ cd /d "${home}"
254
+ set CONVENTIONS=%USERPROFILE%\Desktop\SmashBurgerBar\SmashVault\Architecture\conventions.md
255
+ if not exist "%CONVENTIONS%" set CONVENTIONS=%USERPROFILE%\.claude\conventions.md
256
+ echo [%date% %time%] Running dream-memory --all... >> "${join(SKILL_LOGS, 'dream-memory.log')}"
257
+ "${claudeExe}" --dangerously-skip-permissions --print "/dream-memory --all" >> "${join(SKILL_LOGS, 'dream-memory.log')}" 2>&1
258
+ echo [%date% %time%] Running dream-memory --vault on %CONVENTIONS%... >> "${join(SKILL_LOGS, 'dream-memory.log')}"
259
+ "${claudeExe}" --dangerously-skip-permissions --print "/dream-memory --vault %CONVENTIONS%" >> "${join(SKILL_LOGS, 'dream-memory.log')}" 2>&1
260
+ echo [%date% %time%] Done. >> "${join(SKILL_LOGS, 'dream-memory.log')}"
261
+ ` },
262
+ { taskName: '_skills\\SkillEvolution', batFile: join(SKILL_SCRIPTS, 'skill-evolution.bat'), schedule: '/sc weekly /d MON /st 06:00', content: `@echo off\ncd /d "${home}"\necho [%date% %time%] Running skill evolution... >> "${join(SKILL_LOGS, 'skill-evolution.log')}"\n"${claudeExe}" --dangerously-skip-permissions --print "/skill-evolution" >> "${join(SKILL_LOGS, 'skill-evolution.log')}" 2>&1\necho [%date% %time%] Done. >> "${join(SKILL_LOGS, 'skill-evolution.log')}"\n` },
263
+ { taskName: '_skills\\SkillResearch', batFile: join(SKILL_SCRIPTS, 'skill-research.bat'), schedule: '/sc monthly /d 1 /st 07:00', content: `@echo off\ncd /d "${home}"\necho [%date% %time%] Running skill research... >> "${join(SKILL_LOGS, 'skill-research.log')}"\nfor %%S in (close-session open-session smash-os-run smash-os-audit smash-os-onboarding) do (\n "${claudeExe}" --dangerously-skip-permissions --print "/skill-researcher %%S" >> "${join(SKILL_LOGS, 'skill-research.log')}" 2>&1\n timeout /t 30 /nobreak >nul\n)\necho [%date% %time%] Done. >> "${join(SKILL_LOGS, 'skill-research.log')}"\n` },
264
+ ];
265
+ }
266
+
267
+ function runAutomation(projectPath) {
268
+ console.log('');
269
+ console.log(chalk.bold(' Setting up Windows Task Scheduler...'));
270
+ console.log('');
271
+
272
+ const claudeExe = findClaude();
273
+ if (!claudeExe) {
274
+ console.error(' ' + chalk.red('✗') + ' claude.exe not found in PATH — skipping automation.');
275
+ console.error(chalk.dim(' Re-run: node install-automation.mjs after adding claude to PATH'));
276
+ return;
277
+ }
278
+ console.log(' ' + chalk.green('✓') + ' claude: ' + chalk.dim(claudeExe));
279
+
280
+ const projectName = (() => {
281
+ try { return JSON.parse(readFileSync(join(projectPath, 'package.json'), 'utf8')).name || basename(projectPath); }
282
+ catch { return basename(projectPath); }
283
+ })();
284
+ const current = { name: projectName, path: projectPath };
285
+
286
+ let projects = loadRegistry();
287
+ if (!projects.find(p => p.path === current.path)) {
288
+ projects.push({ name: current.name, path: current.path, addedAt: new Date().toISOString().slice(0, 10) });
289
+ saveRegistry(projects);
290
+ }
291
+ console.log(' ' + chalk.green('✓') + ` registry: ${projects.length} project(s)`);
292
+
293
+ // Ensure ~/.claude/conventions.md exists as fallback
294
+ const globalConv = join(homedir(), '.claude', 'conventions.md');
295
+ if (!existsSync(globalConv)) {
296
+ mkdirSync(join(homedir(), '.claude'), { recursive: true });
297
+ writeFileSync(globalConv, '# Conventions\n\n<!-- Consolidated by /dream-memory --vault -->\n', 'utf8');
298
+ console.log(' ' + chalk.green('✓') + ' ~/.claude/conventions.md (created)');
299
+ } else {
300
+ console.log(' ' + chalk.dim('↷') + ' ~/.claude/conventions.md (exists)');
301
+ }
302
+
303
+ for (const p of projects) mkdirSync(join(SMASH_BASE, p.name, 'logs'), { recursive: true });
304
+ mkdirSync(SCRIPTS_DIR, { recursive: true });
305
+ mkdirSync(SKILL_SCRIPTS, { recursive: true });
306
+ mkdirSync(SKILL_LOGS, { recursive: true });
307
+
308
+ const bats = buildBats(projects, claudeExe);
309
+ for (const b of bats) writeFileSync(b.batFile, b.content, 'utf8');
310
+ console.log(' ' + chalk.green('✓') + ` ${bats.length} bat files written`);
311
+
312
+ console.log('');
313
+ for (const b of bats) schtask(b.taskName, b.batFile, b.schedule);
314
+
315
+ console.log('');
316
+ console.log(chalk.dim(' Schedule: hourly lock-cleanup · 1am memory · 2am audit · 3am docs · Mon CTO+improvement · Mon 6am skills · Mon 6:30am role-evolution · 1st skill-research'));
317
+ console.log(chalk.dim(` Logs: C:\\SmashOS\\${projectName}\\logs\\`));
318
+ }
319
+
320
+ // ─── Main ─────────────────────────────────────────────────────────────────────
321
+
322
+ console.log('');
323
+ console.log(chalk.bold(` SmashOS ${isMarketing ? 'Marketing Harness' : 'Harness'} Installer`));
324
+ console.log(chalk.dim(' Local mode no web app, no API keys required'));
325
+ console.log('');
326
+
327
+ // ─── Marketing install (--marketing flag) ─────────────────────────────────────
328
+
329
+ if (isMarketing) {
330
+ const mAnswers = await prompts([
331
+ {
332
+ type: 'text',
333
+ name: 'brandName',
334
+ message: 'Brand / project name',
335
+ initial: basename(cwd),
336
+ validate: (v) => (v.trim().length > 0 ? true : 'Required'),
337
+ },
338
+ {
339
+ type: 'select',
340
+ name: 'enhancement',
341
+ message: 'Enhancement mode',
342
+ choices: [
343
+ { title: 'off — roles only, manual framework use', value: 'off' },
344
+ { title: 'light — roles hint relevant workflows', value: 'light' },
345
+ { title: 'medium structured workflows enforced (recommended)', value: 'medium' },
346
+ { title: 'high — full GSD phase engine + learning extraction', value: 'high' },
347
+ ],
348
+ initial: 2,
349
+ },
350
+ ], { onCancel: () => { console.log(chalk.yellow('\n Cancelled.')); process.exit(0); } });
351
+
352
+ const { brandName, enhancement: mEnhancement } = mAnswers;
353
+
354
+ const smashOsRoot = join(dirname(new URL(import.meta.url).pathname.replace(/^\/([A-Z]:)/, '$1')), '..');
355
+ const marketingSrc = join(smashOsRoot, 'other', 'cross-industry-domains', 'marketing');
356
+
357
+ function copyDir(src, destPrefix, skipIfExists = false) {
358
+ for (const entry of readdirSync(src, { withFileTypes: true })) {
359
+ const srcPath = join(src, entry.name);
360
+ const destRel = join(destPrefix, entry.name);
361
+ if (entry.isDirectory()) {
362
+ copyDir(srcPath, destRel, skipIfExists);
363
+ } else {
364
+ const destAbs = join(cwd, destRel);
365
+ if (skipIfExists && existsSync(destAbs)) {
366
+ console.log(' ' + chalk.dim('↷') + ' ' + chalk.dim(`${destRel} (exists — skipped)`));
367
+ } else {
368
+ mkdirSync(dirname(destAbs), { recursive: true });
369
+ copyFileSync(srcPath, destAbs);
370
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white(destRel));
371
+ }
372
+ }
373
+ }
374
+ }
375
+
376
+ console.log('');
377
+ writeFile('.smash-os-mode', 'marketing\n');
378
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.smash-os-mode') + chalk.dim(' (marketing)'));
379
+ writeFile('.smash-os-enhancement', (mEnhancement || 'medium') + '\n');
380
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.smash-os-enhancement') + chalk.dim(` (${mEnhancement || 'medium'})`));
381
+
382
+ const mClaudeMd = `# ${brandName} — SmashOS Marketing Harness Active\n\nYou are operating inside the SmashOS Marketing Workflow Harness.\nYou are not a single assistant. You are a coordinated AI marketing organisation.\nThis is a **fully local** installation — no web app, no API keys, no cloud dependencies.\n\n## On Session Start\n1. Read \`marketing/context/product.md\` (what are we marketing?)\n2. Read \`marketing/context/brand-guidelines.md\` (tone, colours, fonts, voice)\n3. Read \`marketing/context/target-audience.md\` (ICPs, segments, pain points)\n4. Read \`marketing/memory/decisions.md\` — last 20 entries (if it exists)\n5. Adopt the role: Brand Strategist\n\n## Cognitive Mode Rules\nTHINKING → Brand Strategist, Marketing Analyst (no content creation)\nEXECUTION → Content Writer, Social Media Manager (no strategy, no decisions)\nVALIDATION → SEO Specialist, Paid Media Analyst (no content creation)\n\n## Golden Rules\n- Never create content without an approved brief\n- Never publish without SEO + brand validation\n- Never run paid campaigns without ROI projections and goal alignment\n- Output structured results only\n- Save campaign decisions to memory after every significant choice\n\n## Slash Commands\n- /marketing:run [type] — trigger pipeline (campaign | content | seo-audit | paid | custom)\n- /marketing:role [name] — switch cognitive mode + load role definition\n- /marketing:memory — show recent campaign decisions and lessons\n- /marketing:status — print active campaigns, recent content, pipeline health\n- /marketing:brief [topic] — create a campaign brief for a specific topic\n\n## Enhancement Mode\nCurrent level: ${mEnhancement || 'medium'}\nRead \`.smash-os-enhancement\` at session start.\n\n## Mode\nLocal mode — all pipelines run inline via Claude Code.\n`;
383
+ writeFile('CLAUDE.md', mClaudeMd);
384
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('CLAUDE.md') + chalk.dim(' (marketing)'));
385
+
386
+ console.log('');
387
+ console.log(chalk.dim(' Roles + workflows:'));
388
+ copyDir(join(marketingSrc, 'roles'), 'marketing/roles', false);
389
+ copyDir(join(marketingSrc, 'workflows'), 'marketing/workflows', false);
390
+
391
+ console.log('');
392
+ console.log(chalk.dim(' Context templates (skipped if already customised):'));
393
+ copyDir(join(marketingSrc, 'context'), 'marketing/context', true);
394
+
395
+ if (!existsSync(join(cwd, 'marketing/memory/decisions.md'))) {
396
+ writeFile('marketing/memory/decisions.md', '# Campaign Decisions Log\n\n<!-- SmashOS writes here after each campaign decision. -->\n<!-- ## YYYY-MM-DD — Decision title -->\n<!-- **Rationale:** why this was chosen -->\n<!-- **Outcome:** what changed -->\n');
397
+ console.log('');
398
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('marketing/memory/decisions.md'));
399
+ }
400
+
401
+ const marketingSkills = {
402
+ 'marketing-run': `---\nname: marketing-run\ndescription: Trigger a SmashOS marketing pipeline. Usage: /marketing:run [type] where type is campaign | content | seo-audit | paid | custom.\nallowed-tools: Bash, Read, Write, Edit, Glob, Grep\n---\n\n# /marketing:run\n\nTrigger a marketing pipeline for this project (local mode).\n\nLoad context from \`marketing/context/\` and \`marketing/memory/\` before starting.\n\n## Pipeline Types\n- \`campaign\` — full 6-phase campaign launch pipeline\n- \`content\` — weekly 30-day content calendar\n- \`seo-audit\` — monthly SEO health check\n- \`paid\` — paid media pre-launch review\n- \`custom\` — user-defined pipeline\n\n## Phase Output Format\n\n\`\`\`\n▸ SMASH MARKETING · phase {N} · {role}\n─────────────────────────────────────\n{output}\n─────────────────────────────────────\n\`\`\`\n\n## After All Phases\n\nSave key campaign decisions to \`marketing/memory/decisions.md\`.\n`,
403
+ 'marketing-role': `---\nname: marketing-role\ndescription: Switch to a SmashOS marketing role. Usage: /marketing:role [name].\n---\n\n# /marketing:role\n\nSwitch to the named marketing role and adopt its cognitive mode.\n\n## Roles\n\n| Role | Mode |\n|---|---|\n| Brand Strategist | THINKING |\n| Marketing Analyst | THINKING |\n| Content Writer | EXECUTION |\n| Social Media Manager | EXECUTION |\n| SEO Specialist | VALIDATION |\n| Paid Media Analyst | VALIDATION |\n\n## Steps\n1. Read the role name from the argument\n2. Announce: "Switching to [Role] — [Mode] mode"\n3. Load \`marketing/roles/<role-slug>.md\` if it exists\n4. Apply cognitive mode restrictions\n`,
404
+ 'marketing-brief': `---\nname: marketing-brief\ndescription: Generate a campaign brief for a topic. Usage: /marketing:brief [topic].\nallowed-tools: Read\n---\n\n# /marketing:brief\n\nGenerate a campaign brief as Brand Strategist.\n\n## Steps\n1. Read \`marketing/context/product.md\`\n2. Read \`marketing/context/brand-guidelines.md\`\n3. Read \`marketing/context/target-audience.md\`\n4. Adopt Brand Strategist [THINKING] mode\n5. Produce a full approved_brief JSON for the topic\n`,
405
+ };
406
+
407
+ console.log('');
408
+ const mSkillsHome = join(homedir(), '.claude', 'skills');
409
+ for (const [skillName, skillContent] of Object.entries(marketingSkills)) {
410
+ const skillDir = join(mSkillsHome, skillName);
411
+ mkdirSync(skillDir, { recursive: true });
412
+ writeFileSync(join(skillDir, 'SKILL.md'), skillContent, 'utf8');
413
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white(`/marketing:${skillName.replace('marketing-', '')}`) + chalk.dim(' ~/.claude/skills/'));
414
+ }
415
+
416
+ console.log('');
417
+ console.log(chalk.bold.green(' SmashOS Marketing Harness installed!'));
418
+ console.log('');
419
+ console.log(chalk.dim(' Open Claude Code in this directory:'));
420
+ console.log(' ' + chalk.white(' claude .'));
421
+ console.log('');
422
+ console.log(chalk.dim(' Fill in marketing/context/ with your brand details.'));
423
+ console.log(chalk.dim(' Then run /marketing:run campaign to start a campaign.'));
424
+ console.log('');
425
+ process.exit(0);
426
+ }
427
+
428
+ const onCancel = () => { console.log(chalk.yellow('\n Cancelled.')); process.exit(0); };
429
+
430
+ // ── Q1: New or existing project? ──────────────────────────────────────────────
431
+ const { projectType } = await prompts({
432
+ type: 'select',
433
+ name: 'projectType',
434
+ message: 'Is this a new project or an existing one?',
435
+ choices: [
436
+ { title: 'New project — I will describe the tech stack', value: 'new' },
437
+ { title: 'Existing project auto-discover tech stack from codebase', value: 'existing' },
438
+ ],
439
+ initial: 0,
440
+ }, { onCancel });
441
+
442
+ // ── Auto-discover tech stack for existing projects ────────────────────────────
443
+ function discoverTechStack() {
444
+ const hints = [];
445
+ try {
446
+ const pkg = JSON.parse(readFileSync(join(cwd, 'package.json'), 'utf8'));
447
+ const deps = { ...pkg.dependencies, ...pkg.devDependencies };
448
+ if (deps['next']) hints.push('Next.js');
449
+ else if (deps['react']) hints.push('React');
450
+ if (deps['vue']) hints.push('Vue');
451
+ if (deps['svelte']) hints.push('Svelte');
452
+ if (deps['vite']) hints.push('Vite');
453
+ if (deps['typescript'] || deps['@types/node']) hints.push('TypeScript');
454
+ if (deps['@supabase/supabase-js']) hints.push('Supabase');
455
+ if (deps['prisma']) hints.push('Prisma');
456
+ if (deps['express']) hints.push('Express');
457
+ if (deps['tailwindcss']) hints.push('Tailwind CSS');
458
+ if (deps['react-native'] || deps['expo']) hints.push('React Native / Expo');
459
+ } catch { /* no package.json */ }
460
+ if (existsSync(join(cwd, 'requirements.txt')) || existsSync(join(cwd, 'pyproject.toml'))) hints.push('Python');
461
+ if (existsSync(join(cwd, 'go.mod'))) hints.push('Go');
462
+ if (existsSync(join(cwd, 'Cargo.toml'))) hints.push('Rust');
463
+ if (existsSync(join(cwd, 'pubspec.yaml'))) hints.push('Flutter/Dart');
464
+ return hints.length > 0 ? hints.join(' + ') : 'TypeScript';
465
+ }
466
+
467
+ // ── Q2: Project name ──────────────────────────────────────────────────────────
468
+ const { projectName } = await prompts({
469
+ type: 'text',
470
+ name: 'projectName',
471
+ message: 'Project name',
472
+ initial: basename(cwd),
473
+ validate: (v) => (v.trim().length > 0 ? true : 'Required'),
474
+ }, { onCancel });
475
+
476
+ // ── Q3: Tech stack (skip for existing auto-discovered) ──────────────────────
477
+ let techStack;
478
+ if (projectType === 'existing') {
479
+ techStack = discoverTechStack();
480
+ console.log(' ' + chalk.green('✓') + ' Tech stack detected: ' + chalk.white(techStack));
481
+ } else {
482
+ const r = await prompts({
483
+ type: 'text',
484
+ name: 'techStack',
485
+ message: 'Tech stack (e.g. React + Node.js + Postgres)',
486
+ initial: 'TypeScript',
487
+ }, { onCancel });
488
+ techStack = r.techStack;
489
+ }
490
+
491
+ // ── Q4: Enhancement mode ──────────────────────────────────────────────────────
492
+ const { enhancement } = await prompts({
493
+ type: 'select',
494
+ name: 'enhancement',
495
+ message: 'Enhancement mode (superpowers + GSD framework integration)',
496
+ choices: [
497
+ { title: 'off — roles only, manual framework use', value: 'off' },
498
+ { title: 'light — roles hint relevant workflows', value: 'light' },
499
+ { title: 'medium — structured workflows enforced (recommended)', value: 'medium' },
500
+ { title: 'high — full GSD phase engine + learning extraction', value: 'high' },
501
+ ],
502
+ initial: 2,
503
+ }, { onCancel });
504
+
505
+ // ── Q5: Frontend? ─────────────────────────────────────────────────────────────
506
+ const frontendKeywords = ['react', 'next', 'vue', 'svelte', 'vite', 'tailwind', 'expo'];
507
+ const autoFrontend = frontendKeywords.some(k => techStack.toLowerCase().includes(k));
508
+ const { isFrontend } = await prompts({
509
+ type: 'confirm',
510
+ name: 'isFrontend',
511
+ message: 'Is this a frontend project? (adds UI/UX Designer, Frontend Developer, Frontend QA roles)',
512
+ initial: autoFrontend,
513
+ }, { onCancel });
514
+
515
+ // ── Q6: Windows automation ────────────────────────────────────────────────────
516
+ const { setupAutomation } = process.platform === 'win32'
517
+ ? await prompts({
518
+ type: 'confirm',
519
+ name: 'setupAutomation',
520
+ message: 'Set up Windows Task Scheduler automation? (nightly AI, weekly improvements)',
521
+ initial: true,
522
+ }, { onCancel })
523
+ : { setupAutomation: false };
524
+
525
+ // ─── File content ─────────────────────────────────────────────────────────────
526
+
527
+ const claudeMd = `# ${projectName} — SmashOS Harness Active
528
+
529
+ You are operating inside the SmashOS AI Workflow Harness.
530
+ You are not a single assistant. You are a coordinated AI engineering organisation.
531
+ This is a **fully local** installation — no web app, no API keys, no cloud dependencies.
532
+
533
+ ## On Session Start
534
+ 1. Read \`ai/context/product.md\` (if it exists)
535
+ 2. Read \`ai/context/architecture.md\` (if it exists)
536
+ 3. Read \`ai/context/coding-standards.md\` (if it exists)
537
+ 4. Read \`ai/memory/decisions.md\` last 20 entries (if it exists)
538
+ 5. Adopt the role: Staff Engineer
539
+
540
+ ## Cognitive Mode Rules
541
+ THINKING → Staff Engineer, Product Manager${isFrontend ? ', UI/UX Designer' : ''} (no file writes)
542
+ EXECUTION → Senior Developer, DevOps${isFrontend ? ', Frontend Developer' : ''} (no specs or decisions)
543
+ VALIDATION → Security Engineer, QA Engineer${isFrontend ? ', Frontend QA' : ''} (no production code)${isFrontend ? `
544
+
545
+ ## Frontend Team (active — frontend files detected trigger these roles)
546
+ - UI/UX Designer [THINKING] — design review, brand compliance, UX audit
547
+ - Frontend Developer [EXECUTION] — component code, a11y, performance, design system
548
+ - Frontend QA [VALIDATION] — Playwright E2E tests, coverage sign-off
549
+ Trigger: any change to .tsx/.jsx/.css/.scss or components/app/pages/ directories
550
+ Manual: /smash-os:run frontend-audit` : ''}
551
+
552
+ ## Golden Rules
553
+ - Never skip architecture review
554
+ - Never write code without an approved spec
555
+ - Never deploy without QA + Security validation
556
+ - Output structured results only
557
+ - Save decisions to memory after every significant choice
558
+
559
+ ## Slash Commands
560
+ - /smash-os:run [type] — trigger pipeline (feature | bug-fix | security-audit | weekly-improvement | custom)
561
+ - /smash-os:role [name] — switch cognitive mode + load role definition
562
+ - /smash-os:memory — show recent decisions and lessons
563
+ - /smash-os:status — print health, pipelines, signals, recent decisions
564
+ - /smash-os:enhancement [lvl] — view or change enhancement mode (off | light | medium | high)
565
+
566
+ ## Enhancement Mode
567
+ Current level: ${enhancement || 'medium'}
568
+
569
+ Read \`.smash-os-enhancement\` at session start and apply the corresponding framework integration.
570
+
571
+ ## Tech Stack
572
+ ${techStack}
573
+ `;
574
+
575
+ const aiFiles = {
576
+ 'ai/context/product.md': `# Product Context — ${projectName}\n\n## What is this project?\n<!-- Describe what this project does -->\n\n## Who uses it?\n<!-- Describe the users -->\n\n## Core goals\n<!-- List the main goals -->\n\n## Tech Stack\n${techStack}\n`,
577
+ 'ai/context/architecture.md': `# Architecture — ${projectName}\n\n## Overview\n<!-- Describe the high-level architecture -->\n\n## Key decisions\n<!-- List major architectural decisions and why they were made -->\n\n## Constraints\n<!-- List things that must not change -->\n`,
578
+ 'ai/context/coding-standards.md': `# Coding Standards — ${projectName}\n\n## Language & formatting\n<!-- ESLint config, prettier, etc. -->\n\n## Naming conventions\n<!-- Variables, files, functions -->\n\n## Patterns to follow\n<!-- e.g. always use server actions, never bypass RLS -->\n\n## Patterns to avoid\n<!-- e.g. no raw SQL, no any types -->\n`,
579
+ 'ai/memory/decisions.md': `# Decisions Log\n\n<!-- SmashOS writes here after each session. Format: -->\n<!-- ## YYYY-MM-DD — Decision title -->\n<!-- **Rationale:** why this was chosen -->\n<!-- **Outcome:** what changed -->\n`,
580
+ 'ai/memory/lessons.md': `# Lessons Learned\n\n<!-- SmashOS writes here after bugs and incidents. Format: -->\n<!-- ## YYYY-MM-DD — Lesson title -->\n<!-- **What happened:** -->\n<!-- **What to do differently:** -->\n`,
581
+ };
582
+
583
+ const settingsJson = JSON.stringify({
584
+ hooks: {
585
+ SessionStart: [
586
+ {
587
+ hooks: [
588
+ {
589
+ type: 'command',
590
+ command: 'node "$CLAUDE_PROJECT_DIR/.claude/hooks/smash-os-boot.mjs"',
591
+ },
592
+ ],
593
+ },
594
+ ],
595
+ Stop: [
596
+ {
597
+ hooks: [
598
+ {
599
+ type: 'command',
600
+ command: 'node "$CLAUDE_PROJECT_DIR/.claude/hooks/smash-os-sync.mjs"',
601
+ },
602
+ ],
603
+ },
604
+ ],
605
+ },
606
+ }, null, 2);
607
+
608
+ const bootHook = `#!/usr/bin/env node
609
+ /**
610
+ * smash-os-boot.mjs — SessionStart hook (local mode)
611
+ * Reads ai/memory/decisions.md and injects recent decisions as session context.
612
+ * No network calls. No credentials required.
613
+ */
614
+
615
+ import { readFileSync, existsSync } from 'fs';
616
+ import { join } from 'path';
617
+
618
+ const cwd = process.cwd();
619
+
620
+ function readFile(relPath) {
621
+ const abs = join(cwd, relPath);
622
+ return existsSync(abs) ? readFileSync(abs, 'utf8') : null;
623
+ }
624
+
625
+ function extractLastN(content, n) {
626
+ if (!content) return [];
627
+ const sections = content.split(/\\n(?=## )/).filter(s => s.trim());
628
+ return sections.slice(-n);
629
+ }
630
+
631
+ async function main() {
632
+ const enhancement = readFile('.smash-os-enhancement');
633
+ const enhancementLevel = enhancement ? enhancement.trim() : 'off';
634
+
635
+ const lines = [
636
+ '--- SmashOS Context (local mode) ---',
637
+ '',
638
+ 'Mode: local',
639
+ \`Enhancement: \${enhancementLevel}\`,
640
+ '',
641
+ ];
642
+
643
+ const decisions = readFile('ai/memory/decisions.md');
644
+ if (decisions) {
645
+ const recent = extractLastN(decisions, 5);
646
+ if (recent.length) {
647
+ lines.push('Recent Decisions (last 5):');
648
+ for (const section of recent) {
649
+ const firstLine = section.split('\\n')[0].replace(/^## /, '').trim();
650
+ lines.push(\` • \${firstLine}\`);
651
+ }
652
+ lines.push('');
653
+ }
654
+ }
655
+
656
+ lines.push('Run /smash-os:status for full health check.');
657
+ lines.push('Run /smash-os:run [type] to start a pipeline.');
658
+ lines.push('--- End SmashOS Context ---');
659
+
660
+ const output = { type: 'context', content: lines.join('\\n') };
661
+ process.stdout.write(JSON.stringify(output) + '\\n');
662
+ }
663
+
664
+ main().catch((err) => {
665
+ process.stderr.write(\`[SmashOS boot] Error: \${err.message}\\n\`);
666
+ process.exit(0);
667
+ });
668
+ `;
669
+
670
+ const syncHook = `#!/usr/bin/env node
671
+ /**
672
+ * smash-os-sync.mjs — Stop hook (local mode)
673
+ * Appends session summary to ai/memory/sessions.md locally.
674
+ * No network calls. No credentials required.
675
+ */
676
+
677
+ import { readFileSync, existsSync, writeFileSync, mkdirSync } from 'fs';
678
+ import { join, dirname } from 'path';
679
+ import { homedir } from 'os';
680
+
681
+ const cwd = process.cwd();
682
+ const vaultConventions = join(process.env.USERPROFILE || process.env.HOME || homedir(), 'Desktop', 'SmashBurgerBar', 'SmashVault', 'Architecture', 'conventions.md');
683
+ const globalConventions = join(homedir(), '.claude', 'conventions.md');
684
+ const conventionsFile = existsSync(vaultConventions) ? vaultConventions : globalConventions;
685
+
686
+ function appendFile(relPath, content) {
687
+ const abs = join(cwd, relPath);
688
+ mkdirSync(dirname(abs), { recursive: true });
689
+ const existing = existsSync(abs) ? readFileSync(abs, 'utf8') : '';
690
+ writeFileSync(abs, existing + content, 'utf8');
691
+ }
692
+
693
+ async function main() {
694
+ let hookInput = {};
695
+ try {
696
+ const chunks = [];
697
+ for await (const chunk of process.stdin) chunks.push(chunk);
698
+ const raw = Buffer.concat(chunks).toString('utf8');
699
+ if (raw.trim()) hookInput = JSON.parse(raw);
700
+ } catch { /* stdin may be empty or non-JSON */ }
701
+
702
+ const summary = hookInput.session_summary || hookInput.summary || '';
703
+ const decisions = hookInput.decisions_made || [];
704
+ const files = hookInput.files_changed || [];
705
+
706
+ if (!summary && !decisions.length && !files.length) process.exit(0);
707
+
708
+ const timestamp = new Date().toISOString().slice(0, 16).replace('T', ' ');
709
+ const entry = [
710
+ \`\\n## \${timestamp}\`,
711
+ summary ? \`\\n\${summary}\` : '',
712
+ decisions.length ? \`\\n**Decisions:** \${decisions.join(', ')}\` : '',
713
+ files.length ? \`\\n**Files changed:** \${files.join(', ')}\` : '',
714
+ '',
715
+ ].join('\\n');
716
+
717
+ appendFile('ai/memory/sessions.md', entry);
718
+ process.stderr.write('[SmashOS] Session saved locally to ai/memory/sessions.md\\n');
719
+
720
+ if (summary || decisions.length) {
721
+ const convEntry = [
722
+ \`\\n## \${timestamp} — pending consolidation\`,
723
+ summary ? \`\\nContext: \${summary.slice(0, 300)}\` : '',
724
+ decisions.length ? \`\\nDecisions: \${decisions.join('; ')}\` : '',
725
+ '\\n<!-- awaiting /dream-memory --vault -->',
726
+ '',
727
+ ].join('\\n');
728
+ if (!existsSync(conventionsFile)) {
729
+ mkdirSync(dirname(conventionsFile), { recursive: true });
730
+ writeFileSync(conventionsFile, '# Conventions\\n\\n<!-- Consolidated by /dream-memory --vault -->\\n', 'utf8');
731
+ }
732
+ const existing = readFileSync(conventionsFile, 'utf8');
733
+ writeFileSync(conventionsFile, existing + convEntry, 'utf8');
734
+ process.stderr.write(\`[SmashOS] Conventions updated at \${conventionsFile}\\n\`);
735
+ }
736
+ process.exit(0);
737
+ }
738
+
739
+ main().catch((err) => {
740
+ process.stderr.write(\`[SmashOS sync] Error: \${err.message}\\n\`);
741
+ process.exit(0);
742
+ });
743
+ `;
744
+
745
+ const localSkills = {
746
+ 'smash-os-role': `---\nname: smash-os-role\ndescription: Switch cognitive mode and load a SmashOS role definition. Use when switching roles or at session start.\n---\n\n# /smash-os:role\n\nSwitch to the named role and adopt its cognitive mode.\n\n## Roles\n\n| Role | Mode | Allowed |\n|---|---|---|\n| Staff Engineer | THINKING | Architecture, decisions, reviews |\n| Product Manager | THINKING | Specs, user stories, acceptance criteria |\n| Senior Developer | EXECUTION | Writing code, editing files |\n| Security Engineer | VALIDATION | Security review only, no code changes |\n| QA Engineer | VALIDATION | Testing and verification only |\n| DevOps Engineer | EXECUTION | Infrastructure, deployment |\n| UI/UX Designer | THINKING | Design review, UX audit, brand compliance (frontend projects only) |\n| Frontend Developer | EXECUTION | Component code, a11y, performance, design system (frontend projects only) |\n| Frontend QA | VALIDATION | Playwright E2E tests, coverage sign-off (frontend projects only) |\n\n## Steps\n1. Read the role name from the argument (default: Staff Engineer)\n2. Read \`.smash-os-enhancement\` (default: off if missing)\n3. Announce: "Switching to [Role] — [Mode] mode · Enhancement: [level]"\n4. Load \`ai/roles/<role-slug>.md\` if it exists\n5. Apply cognitive mode restrictions AND the enhancement behaviors below\n\n## Enhancement Mode Behaviors\n\n**off** — Role operates standalone. Use your own judgment on process.\n\n**light** — Surface framework hints before significant tasks:\n- Architecture or complex decisions → suggest \`/superpowers:brainstorming\` first\n- New code → remind about TDD (\`/superpowers:test-driven-development\`)\n- Bug investigation → suggest \`/superpowers:systematic-debugging\`\n- Do not force these — surface as a suggestion, then proceed\n\n**medium** — Enforce structured workflows:\n- Feature work: brainstorm → write-plans → TDD → verification-before-completion (mandatory gates)\n- Bug work: systematic-debugging with hypothesis log\n- Before claiming complete: always invoke \`/superpowers:verification-before-completion\`\n- Complex decisions: brainstorm alternatives before committing\n- After significant work: extract key learnings to \`ai/memory/lessons.md\`\n\n**high** — Full GSD phase management + active learning extraction:\n- Complex work (3+ implementation steps): use GSD (discuss → plan → execute → verify)\n- Use \`/gsd:plan-phase\` before any multi-step implementation\n- Simple/reversible tasks (< 30 min, single obvious approach): use medium workflows instead\n- After each phase: run \`/smash-os:extract-learnings\` to capture conventions and decisions\n- Completion: \`/gsd:verify-work\` before signing off\n- Learnings feed into \`ai/memory/\` and inform skill evolution\n`,
747
+ 'smash-os-memory': `---\nname: smash-os-memory\ndescription: Show recent SmashOS decisions and lessons from the ai/memory/ folder.\n---\n\n# /smash-os:memory\n\nRead and display the local SmashOS memory files.\n\n## Steps\n1. Read \`ai/memory/decisions.md\` (last 20 entries)\n2. Read \`ai/memory/lessons.md\` (last 10 entries)\n3. Display them clearly — decisions first, then lessons\n4. If either file is empty or missing, say so\n`,
748
+ 'smash-os-onboarding': `---\nname: smash-os:onboarding\ndescription: Run SmashOS onboarding — scans the codebase, pre-fills answers from what it finds, then walks through registration questions section by section. Each question can be skipped. Writes all ai/context/ files when done.\nallowed-tools: Bash, Read, Glob, Grep, Write\n---\n\n# /smash-os:onboarding\n\nOnboard this repo into SmashOS. Always local — no API keys required.\n\n## Phase 1 — Codebase Scan\n\nAnnounce start, then silently read:\n- \`package.json\`, \`README.md\`, \`CLAUDE.md\`, \`.smash-os-mode\`\n- Top-level dirs + \`src/\`, \`app/\`, \`lib/\` contents\n- Key config files: vite, next, react-router, tsconfig, tailwind, drizzle\n- \`supabase/migrations/\`, \`prisma/schema.prisma\`, \`.env.example\`\n- Any existing \`ai/context/\` files\n- Up to 5 representative source files\n\nBuild internal knowledge from what you find — used to pre-fill Phase 2.\n\n## Phase 2 — Registration Interview\n\nTell the user:\n\`\`\`\n▸ SMASH OS · registration interview\n─────────────────────────────────────\n I've scanned your codebase. Now I'll ask questions to fill in\n your AI context files. Each question shows what I already found.\n Type SKIP to skip any question. Type DONE to finish a section early.\n─────────────────────────────────────\n\`\`\`\n\nFor each question: show your pre-filled guess first, then ask. If user types SKIP — accept the pre-fill and move on. User answers always override pre-fills. One section at a time.\n\n**Section 1 — Product & Business → ai/context/product.md**\nQ1 What does this product do? | Q2 Who are the users? | Q3 Main modules/features? | Q4 Revenue/business model? | Q5 3–5 core daily workflows? | Q6 Non-negotiable rules? | Q7 What does "broken" look like?\n\n**Section 2 — Architecture & Tech Stack → ai/context/architecture.md**\nQ1 Frontend framework + version? | Q2 Backend/API layer? | Q3 Database(s)? | Q4 Auth method? | Q5 Deployment target? | Q6 External services/APIs? | Q7 Folder/module structure? | Q8 Architectural rules/boundaries?\n\n**Section 3 — Coding Standards → ai/context/coding-standards.md**\nQ1 Language + strictness? | Q2 Naming conventions? | Q3 How are mutations handled? | Q4 Styling approach? | Q5 Testing setup? | Q6 Consistent patterns/abstractions? | Q7 What should AI-generated code always avoid?\n\n**Section 4 — Database → ai/context/database.md** (skip if no DB detected)\nQ1 Main tables + what each stores? | Q2 Key relationships? | Q3 RLS or data isolation? | Q4 How is DB accessed in code? | Q5 Business-critical queries to document? | Q6 Tables needing special care?\n\n**Section 5 — Current State → ai/memory/decisions.md**\nQ1 Current state of codebase? | Q2 3 biggest pain points? | Q3 Actively being worked on? | Q4 Settled decisions not to revisit? | Q5 What has been tried and failed?\n\n## Phase 3 — Write Context Files\n\nMerge pre-fills + user answers. Write real prose — no placeholder comments.\nIf a file exists with real content, append rather than overwrite.\n\nFiles: ai/context/product.md | ai/context/architecture.md | ai/context/coding-standards.md | ai/context/database.md | ai/memory/decisions.md (append) | ai/context/orchestrator.md (update summaries)\n\n## Phase 4 — Confirm\n\n\`\`\`\n▸ SMASH OS · onboarded\n─────────────────────────────────────\n FILES WRITTEN\n · ai/context/product.md\n · ai/context/architecture.md\n · ai/context/coding-standards.md\n · ai/context/database.md\n · ai/memory/decisions.md (initial context appended)\n · ai/context/orchestrator.md (summaries updated)\n\n WHAT I NOW KNOW\n {2–4 sentence natural language summary}\n\n Run /smash-os:run to start your first pipeline.\n─────────────────────────────────────\n\`\`\`\n\n## Rules\n- Never ask for credentials\n- Always show pre-fill before asking\n- Never repeat a skipped question\n- Write real content — no placeholder comments\n- Re-runs append, never overwrite history\n`,
749
+ 'smash-os-run': `---\nname: smash-os-run\ndescription: Trigger a SmashOS pipeline manually. Usage: /smash-os:run [type] where type is feature | bug-fix | weekly-improvement | security-audit | custom.\nallowed-tools: Bash, Read, Write, Edit, Glob, Grep\n---\n\n# /smash-os:run\n\nTrigger a SmashOS pipeline for this repo (local mode).\n\nAll pipelines execute inline — Claude Code IS the pipeline engine.\nLoad context from \`ai/context/\` and \`ai/memory/\` before starting.\n\n## Pipeline Types\n- \`feature\` — full 7-phase feature development\n- \`bug-fix\` — diagnose and fix a bug\n- \`security-audit\` — security review of recent changes\n- \`weekly-improvement\` — code quality and refactor pass\n- \`frontend-audit\` — UI/UX design review + component quality + Playwright tests (frontend projects only)\n- \`playwright\` — Frontend QA phase only: generate/review Playwright E2E tests (frontend projects only)\n- \`custom\` — user-defined pipeline (ask for description first)\n\n## Phase Sequence\n\nRun all 7 phases in order. For each phase adopt the role, do the work, output the result.\n\n**Phase 0 — Debate (Staff Engineer)**\n- Analyse from multiple architectural angles\n- Surface trade-offs and open questions\n\n**Phase 1 — Product Manager**\n- Define acceptance criteria and user stories\n- Output: spec with measurable outcomes\n\n**Phase 2 — Staff Engineer / Architecture**\n- Design implementation approach\n- Identify files to create/modify\n\n**Phase 3 — Senior Developer**\n- Write the actual code changes\n- Follow \`ai/context/coding-standards.md\`\n\n**Phase 4 — Security Engineer**\n- Review for auth, input validation, injection risks\n- Output: security sign-off or blockers\n\n**Phase 5 — QA Engineer**\n- Write test cases, identify edge cases\n- Output: test plan + pass/fail verdict\n\n**Phase 6 — DevOps**\n- Deployment impact, migrations, config changes\n- Output: deployment checklist\n\n## Phase Output Format\n\n\`\`\`\n▸ SMASH OS · phase {N} · {role}\n─────────────────────────────────────\n{output}\n─────────────────────────────────────\n\`\`\`\n\n## After All Phases\n\nSave key decisions to \`ai/memory/decisions.md\`.\n\nFinal output:\n\`\`\`\n▸ SMASH OS · pipeline complete [local]\n─────────────────────────────────────\n type {type}\n phases 7 / 7\n status completed\n─────────────────────────────────────\n\`\`\`\n\nIf enhancement is medium or high, also run \`/smash-os:extract-learnings\` after completion.\n`,
750
+ 'smash-os-enhancement': `---\nname: smash-os-enhancement\ndescription: View or change the SmashOS enhancement level (off/light/medium/high). Controls how deeply superpowers and GSD frameworks integrate into role workflows.\nallowed-tools: Read, Write\n---\n\n# /smash-os:enhancement [level]\n\nView or set the SmashOS enhancement mode.\n\n## Steps\n1. Read the argument (if any). Valid values: \`off\`, \`light\`, \`medium\`, \`high\`\n2. If no argument: read \`.smash-os-enhancement\` (default: off) and display current level + description\n3. If argument provided: validate, write to \`.smash-os-enhancement\`, confirm change\n\n## Level Descriptions\n\n| Level | What it does |\n|---|---|\n| off | Roles operate standalone — no framework integration |\n| light | Roles surface hints for relevant superpowers skills |\n| medium | Structured workflows enforced: brainstorm → plan → TDD → verify + learning extraction |\n| high | Full GSD phase management + active learning extraction → feeds skill evolution |\n\n## Output Format\n\n\`\`\`\n▸ SMASH OS · enhancement mode\n─────────────────────────────────────\n level {level}\n {description}\n─────────────────────────────────────\n\`\`\`\n`,
751
+ 'smash-os-extract-learnings': `---\nname: smash-os-extract-learnings\ndescription: Extract conventions, decisions, and lessons from the current session and save to ai/memory/. At high enhancement mode this runs automatically after each significant phase. Feeds into skill evolution.\nallowed-tools: Read, Write, Glob\n---\n\n# /smash-os:extract-learnings\n\nExtract and persist learnings from the current session.\n\n## Steps\n\n1. Review recent work in this session — what was built, what decisions were made, what friction was encountered\n2. Identify items in each category:\n - **Conventions discovered**: patterns or standards that emerged or were reinforced\n - **Decisions made**: architectural or product choices with rationale\n - **Lessons learned**: what went wrong or could be improved\n - **Role improvements**: behaviors that should be added/removed from a role definition\n3. Write to \`ai/memory/lessons.md\` (append) — lessons + conventions\n4. Write to \`ai/memory/decisions.md\` (append) — decisions with rationale\n5. If role improvements were identified:\n - Read the relevant \`ai/roles/<role-slug>.md\` file (create it if missing)\n - Propose the improvement inline\n - Ask: "Apply this improvement to the [Role] definition? (y/n)"\n - If yes: write the updated role file\n6. Output a brief summary of what was extracted\n\n## Output Format\n\n\`\`\`\n▸ SMASH OS · learnings extracted\n─────────────────────────────────────\n conventions {N}\n decisions {N}\n lessons {N}\n role updates {N applied | none}\n─────────────────────────────────────\n\`\`\`\n\n## Notes\n- Never overwrite existing entries — always append\n- Role improvements should be additive, not destructive\n- These files are read by skill-evolution — write clearly for future context\n- If nothing significant happened this session: output "No learnings to extract"\n`,
752
+ 'smash-os-role-improve': `---\nname: smash-os-role-improve\ndescription: Role improvement pass — reads accumulated learnings from ai/memory/role-learnings/, applies insights back to each role definition. Gains new rules, prunes stale ones, consolidates duplicates, enhances the Enhancement Layer. Run manually or via the RoleImprovement scheduled task.\nallowed-tools: Read, Write, Edit\n---\n\n# /smash-os:role-improve\n\nRead all role learning logs. For each role with new entries, analyse and apply improvements back to the role definition.\n\n## Step 1 — Discover roles with learnings\n\nRead all files in \`ai/memory/role-learnings/\`. Skip any with no entries beyond the header (nothing after the \`---\` separator).\n\n## Step 2 — Analyse each learning log\n\nFor each role that has entries, extract:\n- **Gains** — new constraints or patterns that consistently improved output\n- **Prunes** — Hard Rules flagged as redundant, too slow, or consistently skipped\n- **Consolidations** — duplicate or near-duplicate rules that can be merged\n- **Enhancement Layer improvements** — which skills actually added value at which levels\n\n## Step 3 — Apply improvements to each role file\n\n1. Add new Hard Rules from Gains — each dated \`[YYYY-MM-DD]\`\n2. Remove pruned rules if flagged 3+ times; convert to comment if 1–2 times\n3. Consolidate duplicate rules\n4. Update Enhancement Layer skill mappings\n5. Append to \`## Improvement History\` (create if missing): \`- YYYY-MM-DD: {N gains, M prunes, K consolidations}\`\n\nDo not change Purpose, Responsibilities, Output Format, or Selective Context Load unless a learning entry explicitly proposes it.\n\n## Step 4 — Archive processed learnings\n\nAfter updating a role file, append an archive marker and move processed entries below it. New entries append above the archive.\n\n## Step 5 — Report\n\n\`\`\`\n▸ SMASH OS · role improvement pass\n─────────────────────────────────────\n {date}\n ROLES UPDATED {list with counts}\n ROLES SKIPPED {list — no new learnings}\n─────────────────────────────────────\n\`\`\`\n\n## Rules\n- Never remove a Hard Rule without 3+ learning entries flagging it\n- All new Hard Rules carry a derivation date\n- Idempotent — no new entries = no changes\n`,
753
+ 'smash-os-evolve-roles': `---\nname: smash-os-evolve-roles\ndescription: Analyze accumulated learnings from ai/memory/ and propose targeted improvements to SmashOS role files. Bridges extract-learnings to skill evolution. Runs automatically weekly via Task Scheduler.\nallowed-tools: Read, Write, Glob\n---\n\n# /smash-os:evolve-roles\n\nAnalyze learnings accumulated across sessions and evolve role definitions based on real project experience.\n\n## Steps\n\n1. Read \`ai/memory/lessons.md\` — find entries tagged with role improvement suggestions (look for "Role improvements:", "role:", "[role improvement]" markers)\n2. Read \`ai/memory/decisions.md\` — find conventions that should be encoded into specific roles\n3. Skip any learning already marked with \`[applied YYYY-MM-DD]\`\n4. List all files in \`ai/roles/\` — project-specific role overrides\n - If \`ai/roles/\` is empty or missing: offer to scaffold defaults (staff-engineer, senior-developer, security-engineer)\n5. For each pending improvement, identify the target role and present:\n - Current relevant section (or "no file yet")\n - Proposed addition/change (be specific — not "be more careful", but "always check for stale migrations before running tests")\n - Ask: "Apply to [role-name].md? (y/n)"\n6. If approved: write/update \`ai/roles/<role-slug>.md\`, mark the source lesson as \`[applied {date}]\` in lessons.md\n7. After all project-level changes, ask: "Any improvements universal enough for the global skill? (y/n)"\n - If yes: list candidates, confirm each, then append to \`~/.claude/skills/smash-os-role/SKILL.md\` under a "## Project-Learned Behaviors" section\n8. Output summary\n\n## Output Format\n\n\`\`\`\n▸ SMASH OS · role evolution\n─────────────────────────────────────\n learnings reviewed {N}\n improvements found {N}\n applied to roles {N}\n global skill updated {yes | no}\n─────────────────────────────────────\n\`\`\`\n\n## Rules\n- Never remove existing role capabilities — only add or refine\n- Improvements must be specific and actionable, not vague\n- Mark applied learnings so they are not re-proposed on the next run\n- Global skill updates: only universal, project-agnostic behaviors (not "use Postgres migrations" — yes to "always confirm schema changes with Security Engineer before applying")\n- If no pending improvements: output "Roles are up to date — no changes needed"\n`,
754
+ 'smash-os-performance': `---\nname: smash-os-performance\ndescription: Full performance audit and fix pipeline for any SmashBurgerBar web app. Scores performance across 6 dimensions, auto-fixes safe issues, generates prioritised action plans.\n---\n\n# /smash-os:performance\n\nRun a full performance audit for the current project. Scores every dimension, auto-fixes safe wins, and generates a prioritised fix plan.\n\n---\n\n## Phase 1 — Detect Project Type\n\nRead \`package.json\` and identify:\n- Framework: React Router v7 / Next.js / plain React\n- Renderer: SSR / CSR / SSG\n- Key deps: \`@tanstack/react-virtual\`, \`@vercel/analytics\`, \`@vercel/speed-insights\`\n- Build tool: Vite / Webpack / Turbopack\n\n---\n\n## Phase 2 — Score Performance Dimensions (0–100 each)\n\n| # | Dimension | Weight | How to measure |\n|---|---|---|---|\n| 1 | **Data Loading Strategy** | 25% | Single Fetch enabled? Loaders return naked objects? \`json()\`/\`defer()\` still used? Streaming used for slow data? |\n| 2 | **Rendering & Hydration** | 20% | Server-first? Unnecessary client components? Hydration mismatches? Third-party SDKs causing flicker? |\n| 3 | **Bundle Size** | 20% | Check \`build/\` output sizes. Large deps in client bundle? Code splitting in place? |\n| 4 | **List & Grid Performance** | 15% | Lists over 100 items without virtualisation? \`@tanstack/react-virtual\` installed? |\n| 5 | **UX Responsiveness** | 10% | \`viewTransition\` on navigation links? \`<Suspense>\` wrapping slow components? \`streamTimeout\` set? |\n| 6 | **Asset & Cache Optimisation** | 10% | Images use \`next/image\` or equivalent? Fonts optimised? \`Cache-Control\` headers set? CDN cache on static assets? |\n\n**Performance Score (PS) = sum of weighted contributions**\n\nGrades: 90–100 A · 75–89 B · 60–74 C · 40–59 D · 0–39 F\n\n---\n\n## Phase 3 — Detect and Fix Common Issues\n\n### Data loading\n- [ ] Replace \`json(return {...})\` with \`return {...}\` (naked object) in all loaders\n- [ ] Replace \`defer()\` with non-awaited promises returned directly from loaders\n- [ ] Add \`shouldRevalidate\` to routes that don't need to re-fetch on every navigation\n- [ ] Identify loaders that await slow external calls — split into critical (awaited) + secondary (streamed)\n\n### Streaming\n- [ ] Check \`entry.server.tsx\` — add \`export const streamTimeout = 5000\` if absent\n- [ ] Wrap components that receive streamed promises in \`<Suspense fallback={...}>\`\n\n### Rendering\n- [ ] Audit \`'use client'\` usage — push boundaries as far down the tree as possible\n- [ ] Check for third-party SDK initialisation in components — move to server loaders\n\n### Lists\n- [ ] Find all \`.map()\` renders of lists > 100 items\n- [ ] Install \`@tanstack/react-virtual\` if not present\n- [ ] Wrap identified lists with \`useVirtualizer\`\n\n### UX transitions\n- [ ] Add \`viewTransition\` to primary navigation \`<Link>\` components\n- [ ] Add \`streamTimeout\` export to \`entry.server.tsx\`\n\n### Assets\n- [ ] Check image tags — ensure \`loading="lazy"\` on below-fold images\n- [ ] Verify \`Cache-Control: public, max-age=31536000, immutable\` on static assets in \`vercel.json\` or equivalent\n\n---\n\n## Phase 4 — Auto-Fix Protocol\n\n| Severity | Action |\n|---|---|\n| Safe mechanical change (remove \`json()\`, add \`streamTimeout\`) | Fix inline, note in report |\n| Requires logic change (split loader, add virtualisation) | Generate \`PERF_PR_N.md\` with exact code change |\n| Requires package install | Print install command, do not run automatically |\n\n---\n\n## Phase 5 — Report Output\n\n\`\`\`\n╔══════════════════════════════════════════════════╗\n║ PERFORMANCE SCORE · [PROJECT NAME] ║\n╠══════════════════════════════════════════════════╣\n║ 1. Data Loading Strategy ██████░░░ 65% ×0.25 ║\n║ 2. Rendering & Hydration ████████░ 80% ×0.20 ║\n║ 3. Bundle Size ███████░░ 70% ×0.20 ║\n║ 4. List Performance ██░░░░░░░ 20% ×0.15 ║\n║ 5. UX Responsiveness █████░░░░ 50% ×0.10 ║\n║ 6. Asset & Cache ████████░ 80% ×0.10 ║\n╠══════════════════════════════════════════════════╣\n║ PERFORMANCE SCORE 63.75 → C ║\n╚══════════════════════════════════════════════════╝\n\nAuto-fixed: N items\nPRs queued: N → PERF_PR_1.md, PERF_PR_2.md\n\nNext actions:\n [HIGH] Virtualise order history list — 3,000 rows rendered on mount\n [HIGH] Split menu loader — Supabase call blocks 1.8s before first byte\n [MED] Add streamTimeout to entry.server.tsx\n [MED] Remove json() from 4 loaders\n [LOW] Add viewTransition to nav links\n\`\`\`\n\nSave report to \`ai/memory/audits/PERF_REPORT_<YYYY-MM-DD>.md\`.\n\n---\n\n## Roles active during this pipeline\n\n| Role | Mode | Responsibility |\n|---|---|---|\n| Staff Engineer | THINKING | Identifies architectural causes of slowness |\n| Senior Developer | EXECUTION | Implements fixes |\n| QA Engineer | VALIDATION | Verifies fixes don't break functionality |\n\n---\n\n## Key research reference\n\n\`docs/research/react-router-v7-performance.md\` — React Router v7 + React 19 performance patterns (Single Fetch, Streaming, View Transitions, Virtualisation).\n\n---\n\n## Rules\n\n- Never virtualise a list without first confirming item count from real data or code\n- Never remove \`await\` from a loader without checking if the data is used in the initial render\n- Always run \`typecheck\` after auto-fixes\n- Store all findings in \`ai/memory/audits/PERF_REPORT_<date>.md\`\n`,
755
+ };
756
+
757
+ // ─── Write files ──────────────────────────────────────────────────────────────
758
+
759
+ let written = 0;
760
+
761
+ writeFile('.smash-os-mode', 'local\n');
762
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.smash-os-mode') + chalk.dim(' (local)'));
763
+ written++;
764
+
765
+ writeFile('.smash-os-enhancement', (enhancement || 'medium') + '\n');
766
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.smash-os-enhancement') + chalk.dim(` (${enhancement || 'medium'})`));
767
+ written++;
768
+
769
+ writeFile('CLAUDE.md', claudeMd);
770
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('CLAUDE.md'));
771
+ written++;
772
+
773
+ for (const [relPath, content] of Object.entries(aiFiles)) {
774
+ if (!existsSync(join(cwd, relPath))) {
775
+ writeFile(relPath, content);
776
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white(relPath));
777
+ written++;
778
+ } else {
779
+ console.log(' ' + chalk.dim('↷') + ' ' + chalk.dim(`${relPath} (already exists — skipped)`));
780
+ }
781
+ }
782
+
783
+ // ─── Frontend roles (opt-in) ──────────────────────────────────────────────────
784
+
785
+ if (isFrontend) {
786
+ const smashOsRoot = join(dirname(new URL(import.meta.url).pathname.replace(/^\/([A-Z]:)/, '$1')), '..');
787
+ const frontendRoles = [
788
+ { src: 'ai/roles/ui-ux-designer.md', dest: 'ai/roles/ui-ux-designer.md' },
789
+ { src: 'ai/roles/frontend-developer.md', dest: 'ai/roles/frontend-developer.md' },
790
+ { src: 'ai/roles/frontend-qa.md', dest: 'ai/roles/frontend-qa.md' },
791
+ { src: 'ai/workflows/frontend-audit.md', dest: 'ai/workflows/frontend-audit.md' },
792
+ ];
793
+ console.log('');
794
+ console.log(chalk.dim(' Frontend team:'));
795
+ for (const { src, dest } of frontendRoles) {
796
+ const srcPath = join(smashOsRoot, src);
797
+ if (!existsSync(join(cwd, dest))) {
798
+ if (existsSync(srcPath)) {
799
+ writeFile(dest, readFileSync(srcPath, 'utf8'));
800
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white(dest));
801
+ written++;
802
+ } else {
803
+ console.log(' ' + chalk.yellow('!') + ' ' + chalk.dim(`${src} — source not found, skipped`));
804
+ }
805
+ } else {
806
+ console.log(' ' + chalk.dim('↷') + ' ' + chalk.dim(`${dest} (already exists — skipped)`));
807
+ }
808
+ }
809
+ }
810
+
811
+ writeFile('.claude/hooks/smash-os-boot.mjs', bootHook);
812
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.claude/hooks/smash-os-boot.mjs'));
813
+ written++;
814
+
815
+ writeFile('.claude/hooks/smash-os-sync.mjs', syncHook);
816
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white('.claude/hooks/smash-os-sync.mjs'));
817
+ written++;
818
+
819
+ const merged = mergeSettingsJson(settingsJson);
820
+ console.log(
821
+ ' ' + chalk.green('✓') + ' ' +
822
+ chalk.white('.claude/settings.json') +
823
+ chalk.dim(merged ? ' (merged with existing)' : ' (created)')
824
+ );
825
+ written++;
826
+
827
+ console.log('');
828
+ for (const [skillName, skillContent] of Object.entries(localSkills)) {
829
+ const skillDir = join(homedir(), '.claude', 'skills', skillName);
830
+ mkdirSync(skillDir, { recursive: true });
831
+ writeFileSync(join(skillDir, 'SKILL.md'), skillContent, 'utf8');
832
+ console.log(' ' + chalk.green('✓') + ' ' + chalk.white(`/smash-os:${skillName.replace('smash-os-', '')}`) + chalk.dim(' → ~/.claude/skills/'));
833
+ written++;
834
+ }
835
+
836
+ console.log('');
837
+ console.log(chalk.bold.green(' SmashOS harness installed!') + chalk.dim(` (${written} files — local mode)`));
838
+ console.log('');
839
+ console.log(chalk.dim(' Open Claude Code in this directory:'));
840
+ console.log(' ' + chalk.white(' claude .'));
841
+ console.log('');
842
+ console.log(chalk.dim(' Fill in ai/context/ with your project details.'));
843
+ console.log(chalk.dim(' Then run /smash-os:run to start a pipeline.'));
844
+ console.log('');
845
+
846
+ if (setupAutomation) runAutomation(cwd);
847
+
848
+ await checkAndInstallMcps(enhancement || 'medium');