@sandrinio/vbounce 1.8.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +439 -159
  2. package/bin/vbounce.mjs +255 -25
  3. package/brains/AGENTS.md +62 -24
  4. package/brains/CHANGELOG.md +24 -2
  5. package/brains/CLAUDE.md +124 -30
  6. package/brains/GEMINI.md +64 -27
  7. package/brains/SETUP.md +15 -15
  8. package/brains/claude-agents/architect.md +1 -1
  9. package/brains/claude-agents/developer.md +7 -5
  10. package/brains/claude-agents/devops.md +1 -1
  11. package/brains/claude-agents/qa.md +1 -1
  12. package/brains/claude-agents/scribe.md +1 -1
  13. package/brains/copilot/copilot-instructions.md +8 -3
  14. package/brains/cursor-rules/vbounce-docs.mdc +2 -2
  15. package/brains/cursor-rules/vbounce-process.mdc +6 -3
  16. package/brains/cursor-rules/vbounce-rules.mdc +2 -2
  17. package/brains/windsurf/.windsurfrules +7 -2
  18. package/docs/HOTFIX_EDGE_CASES.md +1 -1
  19. package/package.json +5 -5
  20. package/scripts/close_sprint.mjs +32 -1
  21. package/scripts/doctor.mjs +3 -3
  22. package/scripts/hotfix_manager.sh +2 -2
  23. package/scripts/init_gate_config.sh +1 -1
  24. package/scripts/post_sprint_improve.mjs +486 -0
  25. package/scripts/pre_gate_common.sh +1 -1
  26. package/scripts/pre_gate_runner.sh +1 -1
  27. package/scripts/suggest_improvements.mjs +207 -44
  28. package/scripts/validate_report.mjs +1 -1
  29. package/scripts/verify_framework.mjs +1 -1
  30. package/skills/agent-team/SKILL.md +48 -25
  31. package/skills/agent-team/references/discovery.md +97 -0
  32. package/skills/doc-manager/SKILL.md +146 -22
  33. package/skills/improve/SKILL.md +149 -58
  34. package/skills/lesson/SKILL.md +14 -0
  35. package/templates/epic.md +19 -16
  36. package/templates/spike.md +143 -0
  37. package/templates/sprint.md +32 -12
  38. package/templates/sprint_report.md +6 -4
  39. package/templates/story.md +23 -8
package/bin/vbounce.mjs CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  import fs from 'fs';
4
4
  import path from 'path';
5
+ import crypto from 'crypto';
5
6
  import { fileURLToPath } from 'url';
6
7
  import readline from 'readline';
7
8
 
@@ -65,10 +66,10 @@ const askQuestion = (query) => new Promise(resolve => rl.question(query, resolve
65
66
 
66
67
  function displayHelp() {
67
68
  console.log(`
68
- V-Bounce OS CLI
69
+ V-Bounce Engine CLI
69
70
 
70
71
  Usage:
71
- vbounce install <platform> Install V-Bounce OS into a project
72
+ vbounce install <platform> Install V-Bounce Engine into a project
72
73
  vbounce state show Show current sprint state
73
74
  vbounce state update <storyId> <state|--qa-bounce>
74
75
  vbounce sprint init <sprintId> <deliveryId> [--stories STORY-001,...]
@@ -85,6 +86,7 @@ Usage:
85
86
  vbounce docs check <sprintId> Detect stale vdocs and generate Scribe task
86
87
  vbounce trends Cross-sprint trend analysis
87
88
  vbounce suggest <sprintId> Generate improvement suggestions
89
+ vbounce improve <sprintId> Run full self-improvement pipeline
88
90
  vbounce doctor Validate all configs and state files
89
91
 
90
92
  Install Platforms:
@@ -194,6 +196,26 @@ if (command === 'suggest') {
194
196
  runScript('suggest_improvements.mjs', args.slice(1));
195
197
  }
196
198
 
199
+ // -- improve --
200
+ if (command === 'improve') {
201
+ rl.close();
202
+ // Full pipeline: analyze → trends → suggest
203
+ const sprintArg = args[1];
204
+ if (!sprintArg) {
205
+ console.error('Usage: vbounce improve S-XX');
206
+ process.exit(1);
207
+ }
208
+ // Run trends first
209
+ const trendsPath = path.join(pkgRoot, 'scripts', 'sprint_trends.mjs');
210
+ if (fs.existsSync(trendsPath)) {
211
+ console.log('Step 1/2: Running cross-sprint trend analysis...');
212
+ spawnSync(process.execPath, [trendsPath], { stdio: 'inherit', cwd: process.cwd() });
213
+ }
214
+ // Run suggest (which internally runs post_sprint_improve.mjs)
215
+ console.log('\nStep 2/2: Running improvement analyzer + suggestions...');
216
+ runScript('suggest_improvements.mjs', [sprintArg]);
217
+ }
218
+
197
219
  // -- docs --
198
220
  if (command === 'docs') {
199
221
  rl.close();
@@ -227,6 +249,7 @@ if (command === 'install') {
227
249
  }
228
250
 
229
251
  const CWD = process.cwd();
252
+ const pkgVersion = JSON.parse(fs.readFileSync(path.join(pkgRoot, 'package.json'), 'utf8')).version;
230
253
 
231
254
  // Map vbounce platform names to vdoc platform names
232
255
  const vdocPlatformMap = {
@@ -287,34 +310,208 @@ if (command === 'install') {
287
310
  displayHelp();
288
311
  }
289
312
 
290
- console.log(`\n🚀 Preparing to install V-Bounce OS for \x1b[36m${targetPlatform}\x1b[0m...\n`);
313
+ // ---------------------------------------------------------------------------
314
+ // Upgrade-safe install helpers
315
+ // ---------------------------------------------------------------------------
291
316
 
292
- const toCopy = [];
293
- const toOverwrite = [];
317
+ const META_PATH = path.join(CWD, '.bounce', 'install-meta.json');
318
+ const BACKUPS_DIR = path.join(CWD, '.bounce', 'backups');
294
319
 
295
- mapping.forEach(rule => {
296
- const sourcePath = path.join(pkgRoot, rule.src);
297
- const destPath = path.join(CWD, rule.dest);
320
+ /** Compute MD5 hash of a single file's contents. */
321
+ function computeFileHash(filePath) {
322
+ const content = fs.readFileSync(filePath);
323
+ return crypto.createHash('md5').update(content).digest('hex');
324
+ }
298
325
 
299
- if (!fs.existsSync(sourcePath)) {
300
- return; // Source does not exist internally, skip
326
+ /** Compute a combined hash for a directory by hashing all files sorted by relative path. */
327
+ function computeDirHash(dirPath) {
328
+ const hash = crypto.createHash('md5');
329
+ const entries = [];
330
+
331
+ function walk(dir, rel) {
332
+ for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
333
+ const fullPath = path.join(dir, entry.name);
334
+ const relPath = path.join(rel, entry.name);
335
+ if (entry.isDirectory()) {
336
+ walk(fullPath, relPath);
337
+ } else {
338
+ entries.push({ relPath, fullPath });
339
+ }
340
+ }
301
341
  }
302
342
 
303
- if (fs.existsSync(destPath)) {
304
- toOverwrite.push(rule.dest);
305
- } else {
306
- toCopy.push(rule.dest);
343
+ walk(dirPath, '');
344
+ entries.sort((a, b) => a.relPath.localeCompare(b.relPath));
345
+ for (const e of entries) {
346
+ hash.update(e.relPath);
347
+ hash.update(fs.readFileSync(e.fullPath));
307
348
  }
308
- });
349
+ return hash.digest('hex');
350
+ }
351
+
352
+ /** Compute hash for a path (file or directory). */
353
+ function computeHash(p) {
354
+ const stats = fs.statSync(p);
355
+ return stats.isDirectory() ? computeDirHash(p) : computeFileHash(p);
356
+ }
357
+
358
+ /** Count files in a path (1 for a file, recursive count for a directory). */
359
+ function countFiles(p) {
360
+ const stats = fs.statSync(p);
361
+ if (!stats.isDirectory()) return 1;
362
+ let count = 0;
363
+ function walk(dir) {
364
+ for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
365
+ if (entry.isDirectory()) walk(path.join(dir, entry.name));
366
+ else count++;
367
+ }
368
+ }
369
+ walk(p);
370
+ return count;
371
+ }
372
+
373
+ /** Read install-meta.json, returns null if missing. */
374
+ function readInstallMeta() {
375
+ if (!fs.existsSync(META_PATH)) return null;
376
+ try {
377
+ return JSON.parse(fs.readFileSync(META_PATH, 'utf8'));
378
+ } catch {
379
+ return null;
380
+ }
381
+ }
382
+
383
+ /** Write install-meta.json. */
384
+ function writeInstallMeta(version, platform, files, hashes) {
385
+ const meta = {
386
+ version,
387
+ platform,
388
+ installed_at: new Date().toISOString(),
389
+ files,
390
+ hashes
391
+ };
392
+ fs.mkdirSync(path.dirname(META_PATH), { recursive: true });
393
+ fs.writeFileSync(META_PATH, JSON.stringify(meta, null, 2) + '\n');
394
+ }
395
+
396
+ /** Backup files to .bounce/backups/<version>/. Removes previous backup first. */
397
+ function backupFiles(version, paths) {
398
+ // Remove previous backup (keep only one)
399
+ if (fs.existsSync(BACKUPS_DIR)) {
400
+ for (const entry of fs.readdirSync(BACKUPS_DIR, { withFileTypes: true })) {
401
+ if (entry.isDirectory()) {
402
+ fs.rmSync(path.join(BACKUPS_DIR, entry.name), { recursive: true, force: true });
403
+ }
404
+ }
405
+ }
406
+
407
+ const backupDir = path.join(BACKUPS_DIR, version);
408
+ fs.mkdirSync(backupDir, { recursive: true });
409
+
410
+ for (const relPath of paths) {
411
+ const src = path.join(CWD, relPath);
412
+ const dest = path.join(backupDir, relPath);
413
+
414
+ if (!fs.existsSync(src)) continue;
415
+
416
+ const stats = fs.statSync(src);
417
+ if (stats.isDirectory()) {
418
+ fs.mkdirSync(dest, { recursive: true });
419
+ fs.cpSync(src, dest, { recursive: true });
420
+ } else {
421
+ fs.mkdirSync(path.dirname(dest), { recursive: true });
422
+ fs.copyFileSync(src, dest);
423
+ }
424
+ }
425
+
426
+ return backupDir;
427
+ }
428
+
429
+ /**
430
+ * Classify files into unchanged, modified, and newFiles.
431
+ * - unchanged: dest exists and matches what was installed (safe to overwrite)
432
+ * - modified: dest exists but differs from what was installed (user changed it)
433
+ * - newFiles: dest does not exist
434
+ */
435
+ function classifyFiles(mappingRules, meta) {
436
+ const unchanged = [];
437
+ const modified = [];
438
+ const newFiles = [];
439
+
440
+ for (const rule of mappingRules) {
441
+ const sourcePath = path.join(pkgRoot, rule.src);
442
+ const destPath = path.join(CWD, rule.dest);
443
+
444
+ if (!fs.existsSync(sourcePath)) continue;
445
+
446
+ if (!fs.existsSync(destPath)) {
447
+ newFiles.push(rule);
448
+ continue;
449
+ }
450
+
451
+ // Dest exists — classify as unchanged or modified
452
+ if (!meta || !meta.hashes || !meta.hashes[rule.dest]) {
453
+ // No metadata (first upgrade) — treat as modified to be safe
454
+ modified.push(rule);
455
+ continue;
456
+ }
457
+
458
+ const currentHash = computeHash(destPath);
459
+ const installedHash = meta.hashes[rule.dest];
460
+
461
+ if (currentHash === installedHash) {
462
+ unchanged.push(rule);
463
+ } else {
464
+ modified.push(rule);
465
+ }
466
+ }
467
+
468
+ return { unchanged, modified, newFiles };
469
+ }
470
+
471
+ // ---------------------------------------------------------------------------
472
+ // Begin install flow
473
+ // ---------------------------------------------------------------------------
309
474
 
310
- if (toCopy.length > 0) {
311
- console.log('The following will be \x1b[32mCREATED\x1b[0m:');
312
- toCopy.forEach(f => console.log(` + ${f}`));
475
+ const meta = readInstallMeta();
476
+ const isUpgrade = meta !== null;
477
+
478
+ if (isUpgrade) {
479
+ console.log(`\n🚀 V-Bounce Engine \x1b[36m${pkgVersion}\x1b[0m (upgrading from \x1b[33m${meta.version}\x1b[0m)\n`);
480
+ } else {
481
+ console.log(`\n🚀 Preparing to install V-Bounce Engine \x1b[36m${pkgVersion}\x1b[0m for \x1b[36m${targetPlatform}\x1b[0m...\n`);
313
482
  }
314
483
 
315
- if (toOverwrite.length > 0) {
316
- console.log('\nThe following will be \x1b[33mOVERWRITTEN\x1b[0m:');
317
- toOverwrite.forEach(f => console.log(` ! ${f}`));
484
+ const { unchanged, modified, newFiles } = classifyFiles(mapping, meta);
485
+
486
+ if (unchanged.length > 0) {
487
+ console.log('Will update (unchanged by you):');
488
+ for (const rule of unchanged) {
489
+ const destPath = path.join(CWD, rule.dest);
490
+ const n = countFiles(destPath);
491
+ const label = n > 1 ? `(${n} files)` : '';
492
+ console.log(` \x1b[32m✓\x1b[0m ${rule.dest} ${label}`);
493
+ }
494
+ }
495
+
496
+ if (modified.length > 0) {
497
+ const backupLabel = isUpgrade ? `.bounce/backups/${meta.version}/` : '.bounce/backups/pre-install/';
498
+ console.log(`\nModified by you (backed up to ${backupLabel}):`);
499
+ for (const rule of modified) {
500
+ console.log(` \x1b[33m⚠\x1b[0m ${rule.dest}`);
501
+ }
502
+ }
503
+
504
+ if (newFiles.length > 0) {
505
+ console.log('\nNew in this version:');
506
+ for (const rule of newFiles) {
507
+ console.log(` \x1b[32m+\x1b[0m ${rule.dest}`);
508
+ }
509
+ }
510
+
511
+ if (unchanged.length === 0 && modified.length === 0 && newFiles.length === 0) {
512
+ rl.close();
513
+ console.log('Nothing to install — all source files are missing from the package.');
514
+ process.exit(0);
318
515
  }
319
516
 
320
517
  console.log('');
@@ -328,13 +525,23 @@ if (command === 'install') {
328
525
  process.exit(0);
329
526
  }
330
527
 
528
+ // Backup modified files before overwriting
529
+ if (modified.length > 0) {
530
+ const backupVersion = isUpgrade ? meta.version : 'pre-install';
531
+ const backupDir = backupFiles(backupVersion, modified.map(r => r.dest));
532
+ console.log(`\n📂 Backed up modified files to ${path.relative(CWD, backupDir)}/`);
533
+ }
534
+
331
535
  console.log('\n📦 Installing files...');
332
536
 
333
- mapping.forEach(rule => {
537
+ const installedFiles = [];
538
+ const hashes = {};
539
+
540
+ for (const rule of [...unchanged, ...modified, ...newFiles]) {
334
541
  const sourcePath = path.join(pkgRoot, rule.src);
335
542
  const destPath = path.join(CWD, rule.dest);
336
543
 
337
- if (!fs.existsSync(sourcePath)) return;
544
+ if (!fs.existsSync(sourcePath)) continue;
338
545
 
339
546
  const stats = fs.statSync(sourcePath);
340
547
  if (stats.isDirectory()) {
@@ -347,8 +554,16 @@ if (command === 'install') {
347
554
  }
348
555
  fs.copyFileSync(sourcePath, destPath);
349
556
  }
557
+
558
+ // Record hash of what we just installed (from source)
559
+ hashes[rule.dest] = computeHash(sourcePath);
560
+ installedFiles.push(rule.dest);
350
561
  console.log(` \x1b[32m✓\x1b[0m ${rule.dest}`);
351
- });
562
+ }
563
+
564
+ // Write install metadata
565
+ writeInstallMeta(pkgVersion, targetPlatform, installedFiles, hashes);
566
+ console.log(` \x1b[32m✓\x1b[0m .bounce/install-meta.json`);
352
567
 
353
568
  console.log('\n⚙️ Installing dependencies...');
354
569
  try {
@@ -380,7 +595,22 @@ if (command === 'install') {
380
595
  }
381
596
  }
382
597
 
383
- console.log('\n✅ V-Bounce OS successfully installed! Welcome to the team.\n');
598
+ // Auto-run doctor to verify installation
599
+ console.log('\n🩺 Running doctor to verify installation...');
600
+ const doctorPath = path.join(CWD, 'scripts', 'doctor.mjs');
601
+ if (fs.existsSync(doctorPath)) {
602
+ const result = spawnSync(process.execPath, [doctorPath], {
603
+ stdio: 'inherit',
604
+ cwd: CWD
605
+ });
606
+ if (result.status !== 0) {
607
+ console.error('\n \x1b[33m⚠\x1b[0m Doctor reported issues. Review the output above.');
608
+ }
609
+ } else {
610
+ console.log(' \x1b[33m⚠\x1b[0m Doctor script not found — skipping verification.');
611
+ }
612
+
613
+ console.log('\n✅ V-Bounce Engine successfully installed! Welcome to the team.\n');
384
614
  });
385
615
 
386
616
  } else {
package/brains/AGENTS.md CHANGED
@@ -1,10 +1,14 @@
1
- # V-Bounce OS — Agent Brain (Codex CLI)
1
+ # V-Bounce Engine — Agent Brain (Codex CLI)
2
2
 
3
- > This file configures OpenAI Codex CLI to operate within the V-Bounce OS framework.
3
+ > This file configures OpenAI Codex CLI to operate within the V-Bounce Engine framework.
4
4
 
5
5
  ## Identity
6
6
 
7
- You are an AI coding agent operating within **V-Bounce OS** — a structured system for planning, implementing, and validating software using AI agents. You work as part of a team: Team Lead, Developer, QA, Architect, DevOps, and Scribe agents collaborate through structured reports.
7
+ You are an AI operating within **V-Bounce Engine** — a structured system for planning, implementing, and validating software.
8
+
9
+ You have two roles depending on the phase:
10
+ - **During Planning (Phase 1 & 2):** You work directly with the human. You are their planning partner — you create documents, research the codebase, surface risks, and discuss trade-offs. No subagents are involved.
11
+ - **During Execution (Phase 3):** You are the Team Lead orchestrating specialist agents (Developer, QA, Architect, DevOps, Scribe) through structured reports.
8
12
 
9
13
  You MUST follow the V-Bounce process. Deviating from it — skipping validation, ignoring LESSONS.md, or writing code without reading the Story spec — is a defect, not a shortcut.
10
14
 
@@ -24,34 +28,51 @@ Skills are in the `skills/` directory. Each skill has a `SKILL.md` with instruct
24
28
 
25
29
  ## The V-Bounce Process
26
30
 
27
- ### Phase 1: Verification (Planning)
28
- Documents are created in strict hierarchy — no level can be skipped:
31
+ The process has four phases. You determine which phase to operate in based on what the human is asking for.
32
+
33
+ ### Phase 1: Planning (AI + Human — No Subagents)
34
+
35
+ **When to enter:** The human talks about what to build, asks to create or modify planning documents, discusses features, priorities, or asks about work status. This is a direct conversation — no subagents.
36
+
37
+ Read `skills/doc-manager/SKILL.md` and follow its workflows.
38
+
39
+ **Document hierarchy** — no level can be skipped:
29
40
  Charter (why) → Roadmap (strategic what/when) → Epic (detailed what) → Story (how) → Delivery Plan (execution) → Risk Registry (risks)
30
41
 
31
- ### Pre-Bounce Checks
32
- Before starting any sprint, the Team Lead MUST:
33
- - **Triage the Request**: Is this an L1 Trivial change (1-2 files, cosmetic/minor)?
34
- - If YES Use the **Hotfix Path** (create a Hotfix document, bypass Epic/Story).
35
- - If NOUse the **Standard Path** (create/find Epic, Story).
36
- - **Determine Execution Mode**: Full Bounce vs Fast Track.
37
- - **Dependency Check**: Stories with `Depends On:` must execute sequentially.
38
- - Read RISK_REGISTRY.md — flag high-severity risks that affect planned stories.
39
- - Read `sprint-{XX}.md` §2 Sprint Open Questionsdo not bounce stories with unresolved blocking questions.
40
- - If `vdocs/_manifest.json` exists, read it.
41
- - **Strategic Freeze**: Charter/Roadmap frozen during sprints. Use **Impact Analysis Protocol** if emergency changes occur. Evaluate active stories against new strategy. Pause until human approval.
42
-
43
- ### Phase 2: The Bounce (Implementation)
42
+ **Your responsibilities during planning:**
43
+ 1. **Creating documents:** Read upstream documents, research the codebase, draft the document. Follow doc-manager's CREATE and DECOMPOSE workflows.
44
+ 2. **Surfacing problems:** Assess ambiguity, open questions, edge cases, and risks. Present these clearly to the human — this is collaborative.
45
+ 3. **Answering status questions:** Read `product_plans/` to understand current state (backlog/, sprints/, archive/, strategy/).
46
+ 4. **Triaging requests:** L1 Trivial Hotfix Path. Everything else → Standard Path (Epic Story → Sprint).
47
+
48
+ ### Phase 2: Sprint Planning (AI + Human Collaborative Gate)
49
+
50
+ **When to enter:** The human wants to start executing work "let's start a sprint", "what should we work on next?"
51
+
52
+ **Hard rule: No bounce can start without a finalized, human-confirmed Sprint Plan.**
53
+
54
+ 1. Read backlog, archive, Risk Registry, vdocs manifest
55
+ 2. Propose sprint scope based on priority, dependencies, complexity
56
+ 3. Surface blockers: open questions, 🔴 ambiguity, missing prerequisites, risks, edge cases
57
+ 4. Discuss and refine with human
58
+ 5. Create Sprint Plan from `templates/sprint.md` — fill §0 Readiness Gate, §1 Active Scope, §2 Execution Strategy, §3 Open Questions
59
+ 6. **Gate:** Human confirms the Sprint Plan. Only then set status to "Active"
60
+
61
+ **Strategic Freeze:** Charter/Roadmap frozen during sprints. Use **Impact Analysis Protocol** if emergency changes occur. Pause until human approval.
62
+
63
+ ### Phase 3: The Bounce (Execution)
44
64
  **Standard Path (L2-L4 Stories):**
45
65
  0. **Orient via state**: Read `.bounce/state.json` (`vbounce state show`) for instant context. Run `vbounce prep sprint S-{XX}` to generate a fresh context pack.
46
66
  1. Team Lead sends Story context pack to Developer.
47
67
  2. Developer reads LESSONS.md and the Story context pack, implements code, writes Implementation Report. CLI Orchestrator must run `./scripts/validate_report.mjs` on the report to enforce YAML strictness.
48
68
  3. **Pre-QA Gate Scan:** Team Lead runs `./scripts/pre_gate_runner.sh qa` to catch mechanical failures before spawning QA. If trivial issues → return to Dev.
49
69
  4. QA runs Quick Scan + PR Review (skipping pre-scanned checks), validates against Story §2 The Truth. If fail → Bug Report to Dev. CLI Orchestrator must run `./scripts/validate_report.mjs` on the QA report.
50
- 5. Dev fixes and resubmits. 3+ failures → Escalated.
70
+ 5. Dev fixes and resubmits. 3+ failures → Escalated (see Escalation Recovery below).
51
71
  6. **Pre-Architect Gate Scan:** Team Lead runs `./scripts/pre_gate_runner.sh arch` to catch structural issues before spawning Architect. If mechanical failures → return to Dev.
52
72
  7. Architect runs Deep Audit + Trend Check (skipping pre-scanned checks), validates Safe Zone compliance and ADR adherence.
53
73
  8. DevOps merges story branch into sprint branch, validates post-merge (tests + lint + build), handles release tagging.
54
- 9. Team Lead consolidates reports into Sprint Report.
74
+ 9. **Record lessons immediately**: After DevOps merge, check Dev and QA reports for `lessons_flagged`. Record to LESSONS.md now — do not wait for sprint close.
75
+ 10. Team Lead consolidates reports into Sprint Report.
55
76
 
56
77
  **Hotfix Path (L1 Trivial Tasks):**
57
78
  1. Team Lead evaluates request and creates `HOTFIX-{Date}-{Name}.md`.
@@ -61,10 +82,27 @@ Before starting any sprint, the Team Lead MUST:
61
82
  5. Hotfix is merged directly into the active branch.
62
83
  6. DevOps (or Team Lead) runs `./scripts/hotfix_manager.sh sync` to update active worktrees.
63
84
 
64
- ### Phase 3: Review
65
- Sprint Report Human review Delivery Plan updated → Lessons recorded → Next sprint.
85
+ **Escalation Recovery (3+ bounce failures):**
86
+ 1. Mark story as "Escalated" in Sprint Plan
87
+ 2. Present to human: what failed, root causes from bounce reports, pattern analysis
88
+ 3. Propose options: re-scope the story, split into smaller stories, create a spike, or remove from sprint
89
+ 4. Human decides. Execute the decision.
90
+
91
+ ### Phase 4: Review
92
+ Sprint Report → Human review → Delivery Plan updated (at boundary only) → Lessons recorded → Next sprint.
66
93
  If sprint delivered new features or Dev reports flagged stale product docs → spawn Scribe agent to generate/update vdocs/ via vdoc.
67
94
 
95
+ **Self-Improvement Pipeline** (auto-runs on `vbounce sprint close`):
96
+ 1. `sprint_trends.mjs` → cross-sprint trend analysis → `.bounce/trends.md`
97
+ 2. `post_sprint_improve.mjs` → parses §5 retro tables + LESSONS.md automation candidates + recurring patterns + effectiveness checks → `.bounce/improvement-manifest.json`
98
+ 3. `suggest_improvements.mjs` → generates human-readable suggestions with impact levels → `.bounce/improvement-suggestions.md`
99
+ 4. Human reviews suggestions → approve/reject/defer each item
100
+ 5. Run `/improve` to apply approved changes with brain-file sync
101
+
102
+ **Impact Levels:** P0 Critical (blocks agents), P1 High (causes rework), P2 Medium (friction), P3 Low (polish). See `/improve` skill for details.
103
+
104
+ On-demand: `vbounce improve S-{XX}` runs the full pipeline.
105
+
68
106
  ## Story States
69
107
 
70
108
  Draft → Refinement → Ready to Bounce → Bouncing → QA Passed → Architect Passed → Sprint Review → Done
@@ -101,12 +139,12 @@ Bouncing → Escalated (3+ failures)
101
139
  10. One source of truth. Reference upstream documents, don't duplicate.
102
140
  11. Change Logs are mandatory on every document modification.
103
141
  12. Agent Reports MUST use YAML Frontmatter. Every `.bounce/report/` generated must start with a strict `---` YAML block containing the core status and metrics before the Markdown body.
104
- 13. Framework Integrity. Any modification to a `brains/` or `skills/` file MUST be recorded in `brains/CHANGELOG.md`.
142
+ 13. Framework Integrity. Any modification to a `brains/`, `skills/`, `templates/`, or `scripts/` file MUST be recorded in `brains/CHANGELOG.md` and reflected in `MANIFEST.md`.
105
143
 
106
144
  ## Framework Structure
107
145
 
108
146
  ```
109
- V-Bounce OS/
147
+ V-Bounce Engine/
110
148
  ├── brains/ — Agent brain files (this file)
111
149
  ├── templates/ — Document templates (immutable)
112
150
  ├── skills/ — Agent skills (SKILL.md files)
@@ -1,8 +1,30 @@
1
- # V-Bounce OS Brains & Skills Changelog
1
+ # V-Bounce Engine Brains & Skills Changelog
2
2
 
3
3
  This log tracks modifications to the core agentic framework (e.g., `brains/`, `skills/`).
4
4
  Per **Rule 13: Framework Integrity**, anytime an entry is made here, all tool-specific brain files must be reviewed for consistency.
5
5
 
6
+ ## [2026-03-13] — Discovery Phase: Structured Spike System
7
+
8
+ ### Spike Template (New)
9
+ - **Added**: `templates/spike.md` — spike document template with YAML frontmatter (spike_id, parent_epic_ref, status, ambiguity_before, time_box), 8 sections (Question, Constraints, Approach, Findings, Decision, Residual Risk, Affected Documents checklist, Change Log). Hierarchy Level 3.5 — child of Epic, sibling of Story. Output location: `product_plans/backlog/EPIC-{NNN}_{name}/SPIKE-{EpicID}-{NNN}-{topic}.md`.
10
+
11
+ ### Discovery Reference (New)
12
+ - **Added**: `skills/agent-team/references/discovery.md` — spike execution protocol. Covers: when discovery triggers, spike lifecycle (Open → Investigating → Findings Ready → Validated → Closed), 4-step execution protocol (Create → Investigate → Validate → Close & Propagate), timing rules, integration with bounce sequence.
13
+
14
+ ### Doc-Manager Skill (Modified)
15
+ - **Modified**: `skills/doc-manager/SKILL.md` — added Spike row to Template Locations table; added spike file to folder structure diagram; added spike information flows (Epic §8 → Spike §1, Spike §4 → Epic §4, Spike §5 → Roadmap §3, Spike §6 → Risk Registry); added Spike pre-read requirements; added spike cascade rules; added spike transition gates (Probing/Spiking → Refinement, Spike → Validated, Spike → Closed); updated Developer and Architect agent integration rows with spike ownership; added Ambiguity Assessment Rubric section with 🔴/🟡/🟢 signal definitions and spike creation trigger.
16
+
17
+ ### Agent-Team Skill (Modified)
18
+ - **Modified**: `skills/agent-team/SKILL.md` — added Step 0.5: Discovery Check between Sprint Setup and Story Initialization; added critical rule "Resolve discovery before bouncing" requiring L4/🔴 stories to complete spikes before entering bounce sequence.
19
+
20
+ ### Claude Brain (Modified)
21
+ - **Modified**: `brains/CLAUDE.md` — added Discovery Check to Pre-Bounce Checks; expanded L4 complexity label with spike creation and validation requirements; updated Story States diagram to show spike sub-flow (Dev investigates → Arch validates → docs updated).
22
+
23
+ ### Sync Notes
24
+ - Other brain files (`GEMINI.md`, `AGENTS.md`, `cursor-rules/`) not yet updated — should be synced in a follow-up change.
25
+
26
+ ---
27
+
6
28
  ## [2026-03-12] — LanceDB Removal
7
29
 
8
30
  - **Removed**: `scripts/vbounce_ask.mjs` — LanceDB semantic query tool. Replaced by direct `LESSONS.md` reads.
@@ -21,7 +43,7 @@ Per **Rule 13: Framework Integrity**, anytime an entry is made here, all tool-sp
21
43
  - **Modified**: `.gitignore` — Removed `.bounce/.lancedb/` entry.
22
44
  - **Rationale**: Modern LLMs have 200K+ token context windows. The prep scripts (`vbounce prep sprint/qa/arch`) + LESSONS.md graduation provide targeted, deterministic context without embedding models, sync steps, or heavy dependencies. Removes ~50MB of node_modules and eliminates the most common setup failure point.
23
45
 
24
- ## [2026-03-12] — V-Bounce OS Optimization Plan (12-Change Batch)
46
+ ## [2026-03-12] — V-Bounce Engine Optimization Plan (12-Change Batch)
25
47
 
26
48
  ### State Management (Change #1)
27
49
  - **Added**: `.bounce/state.json` — machine-readable sprint state snapshot for crash recovery. Tracks sprint_id, delivery_id, current_phase, last_action, and per-story state (V-Bounce state, bounce counts, worktree path, updated_at).