@sandrinio/vbounce 2.0.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -7
- package/bin/vbounce.mjs +23 -0
- package/brains/AGENTS.md +44 -151
- package/brains/CHANGELOG.md +113 -0
- package/brains/CLAUDE.md +50 -219
- package/brains/GEMINI.md +63 -181
- package/package.json +1 -1
- package/scripts/close_sprint.mjs +9 -0
- package/scripts/complete_story.mjs +8 -0
- package/scripts/init_sprint.mjs +8 -0
- package/scripts/product_graph.mjs +387 -0
- package/scripts/product_impact.mjs +167 -0
- package/skills/agent-team/SKILL.md +15 -3
- package/skills/agent-team/references/mid-sprint-triage.md +40 -26
- package/skills/doc-manager/SKILL.md +30 -1
- package/skills/improve/SKILL.md +4 -4
- package/skills/product-graph/SKILL.md +102 -0
- package/templates/bug.md +90 -0
- package/templates/change_request.md +105 -0
- package/templates/sprint.md +19 -5
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* product_graph.mjs
|
|
5
|
+
* Scans product_plans/ for planning documents, extracts YAML frontmatter,
|
|
6
|
+
* and outputs a lightweight JSON graph to .bounce/product-graph.json.
|
|
7
|
+
*
|
|
8
|
+
* The graph gives AI instant awareness of all product documents and their
|
|
9
|
+
* relationships without reading every file.
|
|
10
|
+
*
|
|
11
|
+
* Usage:
|
|
12
|
+
* node scripts/product_graph.mjs
|
|
13
|
+
* node scripts/product_graph.mjs --json # output to stdout instead of file
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import fs from 'fs';
|
|
17
|
+
import path from 'path';
|
|
18
|
+
import { fileURLToPath } from 'url';
|
|
19
|
+
|
|
20
|
+
let yaml;
|
|
21
|
+
try {
|
|
22
|
+
yaml = await import('js-yaml');
|
|
23
|
+
} catch {
|
|
24
|
+
console.error('ERROR: js-yaml not installed. Run: npm install js-yaml');
|
|
25
|
+
process.exit(1);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
29
|
+
const ROOT = path.resolve(__dirname, '..');
|
|
30
|
+
|
|
31
|
+
const SCAN_DIRS = ['strategy', 'backlog', 'sprints', 'hotfixes'];
|
|
32
|
+
const PRODUCT_PLANS = path.join(ROOT, 'product_plans');
|
|
33
|
+
const OUTPUT_PATH = path.join(ROOT, '.bounce', 'product-graph.json');
|
|
34
|
+
|
|
35
|
+
const args = process.argv.slice(2);
|
|
36
|
+
const toStdout = args.includes('--json');
|
|
37
|
+
|
|
38
|
+
// ── Document type detection ──────────────────────────────────────
|
|
39
|
+
|
|
40
|
+
const DOC_PATTERNS = [
|
|
41
|
+
{ pattern: /^EPIC-(\d+)/i, type: 'epic' },
|
|
42
|
+
{ pattern: /^STORY-(\d+)-(\d+)/i, type: 'story' },
|
|
43
|
+
{ pattern: /^SPIKE-(\d+)-(\d+)/i, type: 'spike' },
|
|
44
|
+
{ pattern: /^HOTFIX-/i, type: 'hotfix' },
|
|
45
|
+
{ pattern: /sprint-(\d+)\.md$/i, type: 'sprint-plan' },
|
|
46
|
+
{ pattern: /sprint-report/i, type: 'sprint-report' },
|
|
47
|
+
{ pattern: /charter/i, type: 'charter' },
|
|
48
|
+
{ pattern: /roadmap/i, type: 'roadmap' },
|
|
49
|
+
{ pattern: /delivery[_-]plan/i, type: 'delivery-plan' },
|
|
50
|
+
{ pattern: /risk[_-]registry/i, type: 'risk-registry' },
|
|
51
|
+
];
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Detect document type from filename.
|
|
55
|
+
* @param {string} filename
|
|
56
|
+
* @returns {string}
|
|
57
|
+
*/
|
|
58
|
+
function detectType(filename) {
|
|
59
|
+
for (const { pattern, type } of DOC_PATTERNS) {
|
|
60
|
+
if (pattern.test(filename)) return type;
|
|
61
|
+
}
|
|
62
|
+
return 'unknown';
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Derive a document ID from filename and frontmatter.
|
|
67
|
+
* @param {string} filename
|
|
68
|
+
* @param {object} frontmatter
|
|
69
|
+
* @param {string} docType
|
|
70
|
+
* @returns {string}
|
|
71
|
+
*/
|
|
72
|
+
function deriveId(filename, frontmatter, docType) {
|
|
73
|
+
// Use explicit ID fields from frontmatter if available
|
|
74
|
+
if (frontmatter.epic_id) return frontmatter.epic_id;
|
|
75
|
+
if (frontmatter.story_id) return frontmatter.story_id;
|
|
76
|
+
if (frontmatter.spike_id) return frontmatter.spike_id;
|
|
77
|
+
if (frontmatter.sprint_id) return frontmatter.sprint_id;
|
|
78
|
+
if (frontmatter.hotfix_id) return frontmatter.hotfix_id;
|
|
79
|
+
if (frontmatter.delivery_id) return frontmatter.delivery_id;
|
|
80
|
+
|
|
81
|
+
// Derive from filename
|
|
82
|
+
const base = path.basename(filename, '.md');
|
|
83
|
+
|
|
84
|
+
const epicMatch = base.match(/^(EPIC-\d+)/i);
|
|
85
|
+
if (epicMatch) return epicMatch[1].toUpperCase();
|
|
86
|
+
|
|
87
|
+
const storyMatch = base.match(/^(STORY-\d+-\d+)/i);
|
|
88
|
+
if (storyMatch) return storyMatch[1].toUpperCase();
|
|
89
|
+
|
|
90
|
+
const spikeMatch = base.match(/^(SPIKE-\d+-\d+)/i);
|
|
91
|
+
if (spikeMatch) return spikeMatch[1].toUpperCase();
|
|
92
|
+
|
|
93
|
+
const sprintMatch = base.match(/^sprint-(\d+)$/i);
|
|
94
|
+
if (sprintMatch) return `S-${sprintMatch[1].padStart(2, '0')}`;
|
|
95
|
+
|
|
96
|
+
const hotfixMatch = base.match(/^(HOTFIX-[^.]+)/i);
|
|
97
|
+
if (hotfixMatch) return hotfixMatch[1].toUpperCase();
|
|
98
|
+
|
|
99
|
+
// Fallback: use docType + sanitized filename
|
|
100
|
+
if (docType === 'charter') return 'CHARTER';
|
|
101
|
+
if (docType === 'roadmap') return 'ROADMAP';
|
|
102
|
+
if (docType === 'risk-registry') return 'RISK-REGISTRY';
|
|
103
|
+
if (docType === 'delivery-plan') {
|
|
104
|
+
const dpMatch = base.match(/^(D-\d+)/i);
|
|
105
|
+
if (dpMatch) return dpMatch[1].toUpperCase();
|
|
106
|
+
return `DP-${base}`;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return base.toUpperCase();
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// ── YAML extraction ──────────────────────────────────────────────
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Extract YAML frontmatter from a markdown file.
|
|
116
|
+
* @param {string} filePath
|
|
117
|
+
* @returns {{ frontmatter: object|null, title: string|null }}
|
|
118
|
+
*/
|
|
119
|
+
function extractFrontmatter(filePath) {
|
|
120
|
+
let content;
|
|
121
|
+
try {
|
|
122
|
+
content = fs.readFileSync(filePath, 'utf8');
|
|
123
|
+
} catch {
|
|
124
|
+
return { frontmatter: null, title: null };
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Extract title from first heading
|
|
128
|
+
const titleMatch = content.match(/^#\s+(.+)$/m);
|
|
129
|
+
const title = titleMatch ? titleMatch[1].trim() : null;
|
|
130
|
+
|
|
131
|
+
// Extract YAML frontmatter
|
|
132
|
+
const fmMatch = content.match(/^---\s*\n([\s\S]*?)\n---/);
|
|
133
|
+
if (!fmMatch) return { frontmatter: null, title };
|
|
134
|
+
|
|
135
|
+
try {
|
|
136
|
+
const frontmatter = yaml.default?.load
|
|
137
|
+
? yaml.default.load(fmMatch[1])
|
|
138
|
+
: yaml.load(fmMatch[1]);
|
|
139
|
+
return { frontmatter: frontmatter || {}, title };
|
|
140
|
+
} catch (err) {
|
|
141
|
+
console.error(` WARN: Malformed YAML in ${path.relative(ROOT, filePath)}: ${err.message}`);
|
|
142
|
+
return { frontmatter: null, title };
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ── Edge extraction ──────────────────────────────────────────────
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Extract edges from frontmatter fields and document content.
|
|
150
|
+
* @param {string} docId
|
|
151
|
+
* @param {object} fm - frontmatter
|
|
152
|
+
* @param {string} docType
|
|
153
|
+
* @param {string} filePath
|
|
154
|
+
* @returns {Array<{from: string, to: string, type: string}>}
|
|
155
|
+
*/
|
|
156
|
+
function extractEdges(docId, fm, docType, filePath) {
|
|
157
|
+
const edges = [];
|
|
158
|
+
|
|
159
|
+
// parent_epic_ref → parent edge
|
|
160
|
+
if (fm.parent_epic_ref) {
|
|
161
|
+
edges.push({ from: fm.parent_epic_ref, to: docId, type: 'parent' });
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// Derive parent epic from story/spike ID pattern (STORY-003-01 → EPIC-003)
|
|
165
|
+
if (!fm.parent_epic_ref && (docType === 'story' || docType === 'spike')) {
|
|
166
|
+
const epicNum = docId.match(/(?:STORY|SPIKE)-(\d+)/i);
|
|
167
|
+
if (epicNum) {
|
|
168
|
+
const parentId = `EPIC-${epicNum[1].padStart(3, '0')}`;
|
|
169
|
+
edges.push({ from: parentId, to: docId, type: 'parent' });
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// charter_ref → context-source edge
|
|
174
|
+
if (fm.charter_ref) {
|
|
175
|
+
edges.push({ from: fm.charter_ref, to: docId, type: 'context-source' });
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// roadmap_ref → context-source edge
|
|
179
|
+
if (fm.roadmap_ref) {
|
|
180
|
+
edges.push({ from: fm.roadmap_ref, to: docId, type: 'context-source' });
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// context_source → context-source edge (text field, try to match doc IDs)
|
|
184
|
+
if (fm.context_source && typeof fm.context_source === 'string') {
|
|
185
|
+
const refs = fm.context_source.match(/(?:EPIC|STORY|SPIKE|CHARTER|ROADMAP|S|D)-[\w-]+/gi) || [];
|
|
186
|
+
for (const ref of refs) {
|
|
187
|
+
edges.push({ from: ref.toUpperCase(), to: docId, type: 'context-source' });
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// release → feeds edge (e.g., release: "D-02")
|
|
192
|
+
if (fm.release) {
|
|
193
|
+
const releaseId = fm.release.match(/(D-\d+)/i);
|
|
194
|
+
if (releaseId) {
|
|
195
|
+
edges.push({ from: docId, to: releaseId[1].toUpperCase(), type: 'feeds' });
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// risk_registry_ref → context-source edge
|
|
200
|
+
if (fm.risk_registry_ref) {
|
|
201
|
+
edges.push({ from: 'RISK-REGISTRY', to: docId, type: 'context-source' });
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// delivery field in sprint plans
|
|
205
|
+
if (fm.delivery) {
|
|
206
|
+
const dpId = fm.delivery.match(/(D-\d+)/i);
|
|
207
|
+
if (dpId) {
|
|
208
|
+
edges.push({ from: docId, to: dpId[1].toUpperCase(), type: 'feeds' });
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// depends_on / dependencies (array or string)
|
|
213
|
+
const deps = fm.depends_on || fm.dependencies || [];
|
|
214
|
+
const depList = Array.isArray(deps) ? deps : [deps];
|
|
215
|
+
for (const dep of depList) {
|
|
216
|
+
if (typeof dep === 'string') {
|
|
217
|
+
const depIds = dep.match(/(?:EPIC|STORY|SPIKE|S|D)-[\w-]+/gi) || [];
|
|
218
|
+
for (const depId of depIds) {
|
|
219
|
+
edges.push({ from: depId.toUpperCase(), to: docId, type: 'depends-on' });
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// Extract dependency table from document content (§4.2 pattern)
|
|
225
|
+
try {
|
|
226
|
+
const content = fs.readFileSync(filePath, 'utf8');
|
|
227
|
+
const depTableMatch = content.match(/(?:depend|block|prerequisite)[\s\S]*?\|[\s\S]*?\|/gi);
|
|
228
|
+
if (depTableMatch) {
|
|
229
|
+
for (const tableBlock of depTableMatch) {
|
|
230
|
+
const docRefs = tableBlock.match(/(?:EPIC|STORY|SPIKE)-\d+(?:-\d+)?/gi) || [];
|
|
231
|
+
for (const ref of docRefs) {
|
|
232
|
+
const refId = ref.toUpperCase();
|
|
233
|
+
if (refId !== docId) {
|
|
234
|
+
edges.push({ from: refId, to: docId, type: 'depends-on' });
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
// Extract "unlocks" references
|
|
241
|
+
const unlocksMatch = content.match(/unlocks?[:\s]+([^\n]+)/gi) || [];
|
|
242
|
+
for (const line of unlocksMatch) {
|
|
243
|
+
const refs = line.match(/(?:EPIC|STORY|SPIKE)-\d+(?:-\d+)?/gi) || [];
|
|
244
|
+
for (const ref of refs) {
|
|
245
|
+
edges.push({ from: docId, to: ref.toUpperCase(), type: 'unlocks' });
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
} catch {
|
|
249
|
+
// Content extraction is best-effort
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
return edges;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// ── File scanning ────────────────────────────────────────────────
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* Recursively find all .md files in a directory.
|
|
259
|
+
* @param {string} dir
|
|
260
|
+
* @returns {string[]}
|
|
261
|
+
*/
|
|
262
|
+
function findMarkdownFiles(dir) {
|
|
263
|
+
const results = [];
|
|
264
|
+
if (!fs.existsSync(dir)) return results;
|
|
265
|
+
|
|
266
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
267
|
+
for (const entry of entries) {
|
|
268
|
+
const fullPath = path.join(dir, entry.name);
|
|
269
|
+
if (entry.isDirectory()) {
|
|
270
|
+
results.push(...findMarkdownFiles(fullPath));
|
|
271
|
+
} else if (entry.name.endsWith('.md')) {
|
|
272
|
+
results.push(fullPath);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
return results;
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// ── Main ─────────────────────────────────────────────────────────
|
|
279
|
+
|
|
280
|
+
function buildGraph() {
|
|
281
|
+
const nodes = {};
|
|
282
|
+
const edges = [];
|
|
283
|
+
const warnings = [];
|
|
284
|
+
|
|
285
|
+
if (!fs.existsSync(PRODUCT_PLANS)) {
|
|
286
|
+
// Graceful: empty graph for missing product_plans/
|
|
287
|
+
const graph = { generated_at: new Date().toISOString(), nodes: {}, edges: [] };
|
|
288
|
+
return graph;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Scan active directories only (not archive/)
|
|
292
|
+
for (const subdir of SCAN_DIRS) {
|
|
293
|
+
const scanPath = path.join(PRODUCT_PLANS, subdir);
|
|
294
|
+
const files = findMarkdownFiles(scanPath);
|
|
295
|
+
|
|
296
|
+
for (const filePath of files) {
|
|
297
|
+
const filename = path.basename(filePath);
|
|
298
|
+
const docType = detectType(filename);
|
|
299
|
+
|
|
300
|
+
if (docType === 'unknown' || docType === 'sprint-report') continue;
|
|
301
|
+
|
|
302
|
+
const { frontmatter, title } = extractFrontmatter(filePath);
|
|
303
|
+
const relPath = path.relative(ROOT, filePath);
|
|
304
|
+
|
|
305
|
+
// Build node even without frontmatter (use filename-derived data)
|
|
306
|
+
const fm = frontmatter || {};
|
|
307
|
+
const docId = deriveId(filename, fm, docType);
|
|
308
|
+
|
|
309
|
+
nodes[docId] = {
|
|
310
|
+
type: docType,
|
|
311
|
+
status: fm.status || null,
|
|
312
|
+
ambiguity: fm.ambiguity || null,
|
|
313
|
+
path: relPath,
|
|
314
|
+
title: title || docId,
|
|
315
|
+
};
|
|
316
|
+
|
|
317
|
+
// Extract edges
|
|
318
|
+
if (frontmatter) {
|
|
319
|
+
const docEdges = extractEdges(docId, fm, docType, filePath);
|
|
320
|
+
edges.push(...docEdges);
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
// Also scan root of product_plans/ for charter files
|
|
326
|
+
const rootFiles = fs.readdirSync(PRODUCT_PLANS, { withFileTypes: true })
|
|
327
|
+
.filter(e => !e.isDirectory() && e.name.endsWith('.md'));
|
|
328
|
+
|
|
329
|
+
for (const entry of rootFiles) {
|
|
330
|
+
const filePath = path.join(PRODUCT_PLANS, entry.name);
|
|
331
|
+
const docType = detectType(entry.name);
|
|
332
|
+
if (docType === 'unknown') continue;
|
|
333
|
+
|
|
334
|
+
const { frontmatter, title } = extractFrontmatter(filePath);
|
|
335
|
+
const relPath = path.relative(ROOT, filePath);
|
|
336
|
+
const fm = frontmatter || {};
|
|
337
|
+
const docId = deriveId(entry.name, fm, docType);
|
|
338
|
+
|
|
339
|
+
if (!nodes[docId]) {
|
|
340
|
+
nodes[docId] = {
|
|
341
|
+
type: docType,
|
|
342
|
+
status: fm.status || null,
|
|
343
|
+
ambiguity: fm.ambiguity || null,
|
|
344
|
+
path: relPath,
|
|
345
|
+
title: title || docId,
|
|
346
|
+
};
|
|
347
|
+
|
|
348
|
+
if (frontmatter) {
|
|
349
|
+
const docEdges = extractEdges(docId, fm, docType, filePath);
|
|
350
|
+
edges.push(...docEdges);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// Deduplicate edges
|
|
356
|
+
const edgeSet = new Set();
|
|
357
|
+
const uniqueEdges = edges.filter(e => {
|
|
358
|
+
const key = `${e.from}→${e.to}:${e.type}`;
|
|
359
|
+
if (edgeSet.has(key)) return false;
|
|
360
|
+
edgeSet.add(key);
|
|
361
|
+
return true;
|
|
362
|
+
});
|
|
363
|
+
|
|
364
|
+
return {
|
|
365
|
+
generated_at: new Date().toISOString(),
|
|
366
|
+
node_count: Object.keys(nodes).length,
|
|
367
|
+
edge_count: uniqueEdges.length,
|
|
368
|
+
nodes,
|
|
369
|
+
edges: uniqueEdges,
|
|
370
|
+
};
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
// ── Execute ──────────────────────────────────────────────────────
|
|
374
|
+
|
|
375
|
+
const graph = buildGraph();
|
|
376
|
+
|
|
377
|
+
if (toStdout) {
|
|
378
|
+
console.log(JSON.stringify(graph, null, 2));
|
|
379
|
+
} else {
|
|
380
|
+
// Ensure .bounce/ exists
|
|
381
|
+
const bounceDir = path.join(ROOT, '.bounce');
|
|
382
|
+
fs.mkdirSync(bounceDir, { recursive: true });
|
|
383
|
+
|
|
384
|
+
fs.writeFileSync(OUTPUT_PATH, JSON.stringify(graph, null, 2) + '\n');
|
|
385
|
+
console.log(`✓ Product graph generated: .bounce/product-graph.json`);
|
|
386
|
+
console.log(` Nodes: ${graph.node_count} | Edges: ${graph.edge_count}`);
|
|
387
|
+
}
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* product_impact.mjs
|
|
5
|
+
* Query "what's affected by changing document X?" using BFS traversal
|
|
6
|
+
* of the product graph.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* node scripts/product_impact.mjs EPIC-002
|
|
10
|
+
* node scripts/product_impact.mjs EPIC-002 --json
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import fs from 'fs';
|
|
14
|
+
import path from 'path';
|
|
15
|
+
import { fileURLToPath } from 'url';
|
|
16
|
+
|
|
17
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
18
|
+
const ROOT = path.resolve(__dirname, '..');
|
|
19
|
+
const GRAPH_PATH = path.join(ROOT, '.bounce', 'product-graph.json');
|
|
20
|
+
|
|
21
|
+
const args = process.argv.slice(2);
|
|
22
|
+
const docId = args.find(a => !a.startsWith('--'));
|
|
23
|
+
const jsonOutput = args.includes('--json');
|
|
24
|
+
|
|
25
|
+
if (!docId) {
|
|
26
|
+
console.error('Usage: product_impact.mjs <DOC-ID> [--json]');
|
|
27
|
+
console.error(' Example: product_impact.mjs EPIC-002');
|
|
28
|
+
console.error(' Run `vbounce graph` first to generate the product graph.');
|
|
29
|
+
process.exit(1);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// ── Load graph ───────────────────────────────────────────────────
|
|
33
|
+
|
|
34
|
+
if (!fs.existsSync(GRAPH_PATH)) {
|
|
35
|
+
console.error('ERROR: .bounce/product-graph.json not found.');
|
|
36
|
+
console.error('Run `vbounce graph` first to generate the product graph.');
|
|
37
|
+
process.exit(1);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const graph = JSON.parse(fs.readFileSync(GRAPH_PATH, 'utf8'));
|
|
41
|
+
const targetId = docId.toUpperCase();
|
|
42
|
+
|
|
43
|
+
if (!graph.nodes[targetId]) {
|
|
44
|
+
console.error(`ERROR: Document "${targetId}" not found in the product graph.`);
|
|
45
|
+
console.error(`Available documents: ${Object.keys(graph.nodes).join(', ')}`);
|
|
46
|
+
process.exit(1);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// ── Build adjacency lists ────────────────────────────────────────
|
|
50
|
+
|
|
51
|
+
// Downstream: edges where targetId is the source (from)
|
|
52
|
+
// "What does this document feed into?"
|
|
53
|
+
const downstream = new Map(); // from → [{to, type}]
|
|
54
|
+
const upstream = new Map(); // to → [{from, type}]
|
|
55
|
+
|
|
56
|
+
for (const edge of graph.edges) {
|
|
57
|
+
if (!downstream.has(edge.from)) downstream.set(edge.from, []);
|
|
58
|
+
downstream.get(edge.from).push({ to: edge.to, type: edge.type });
|
|
59
|
+
|
|
60
|
+
if (!upstream.has(edge.to)) upstream.set(edge.to, []);
|
|
61
|
+
upstream.get(edge.to).push({ from: edge.from, type: edge.type });
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// ── BFS: direct + transitive dependents ──────────────────────────
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* BFS traversal from a node following outgoing edges.
|
|
68
|
+
* @param {string} startId
|
|
69
|
+
* @param {Map} adjList - adjacency list (from → targets)
|
|
70
|
+
* @param {'downstream'|'upstream'} direction
|
|
71
|
+
* @returns {{ direct: Array, transitive: Array }}
|
|
72
|
+
*/
|
|
73
|
+
function bfs(startId, adjList, direction) {
|
|
74
|
+
const visited = new Set([startId]);
|
|
75
|
+
const direct = [];
|
|
76
|
+
const transitive = [];
|
|
77
|
+
const queue = [{ id: startId, depth: 0 }];
|
|
78
|
+
|
|
79
|
+
while (queue.length > 0) {
|
|
80
|
+
const { id, depth } = queue.shift();
|
|
81
|
+
const neighbors = adjList.get(id) || [];
|
|
82
|
+
|
|
83
|
+
for (const neighbor of neighbors) {
|
|
84
|
+
const neighborId = direction === 'downstream' ? neighbor.to : neighbor.from;
|
|
85
|
+
|
|
86
|
+
if (visited.has(neighborId)) continue; // Cycle protection
|
|
87
|
+
visited.add(neighborId);
|
|
88
|
+
|
|
89
|
+
const entry = {
|
|
90
|
+
id: neighborId,
|
|
91
|
+
type: neighbor.type,
|
|
92
|
+
via: id,
|
|
93
|
+
node: graph.nodes[neighborId] || null,
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
if (depth === 0) {
|
|
97
|
+
direct.push(entry);
|
|
98
|
+
} else {
|
|
99
|
+
transitive.push(entry);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
queue.push({ id: neighborId, depth: depth + 1 });
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return { direct, transitive };
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// ── Run analysis ─────────────────────────────────────────────────
|
|
110
|
+
|
|
111
|
+
const dependents = bfs(targetId, downstream, 'downstream');
|
|
112
|
+
const feeders = bfs(targetId, upstream, 'upstream');
|
|
113
|
+
|
|
114
|
+
const result = {
|
|
115
|
+
document: targetId,
|
|
116
|
+
document_info: graph.nodes[targetId],
|
|
117
|
+
direct_dependents: dependents.direct,
|
|
118
|
+
transitive_dependents: dependents.transitive,
|
|
119
|
+
upstream_feeders: feeders.direct.concat(feeders.transitive),
|
|
120
|
+
graph_generated_at: graph.generated_at,
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
// ── Output ───────────────────────────────────────────────────────
|
|
124
|
+
|
|
125
|
+
if (jsonOutput) {
|
|
126
|
+
console.log(JSON.stringify(result, null, 2));
|
|
127
|
+
} else {
|
|
128
|
+
const node = graph.nodes[targetId];
|
|
129
|
+
console.log(`\n📊 Impact Analysis: ${targetId}`);
|
|
130
|
+
console.log(` ${node.title}`);
|
|
131
|
+
console.log(` Status: ${node.status || 'N/A'} | Type: ${node.type}`);
|
|
132
|
+
console.log(` Path: ${node.path}`);
|
|
133
|
+
|
|
134
|
+
if (dependents.direct.length > 0) {
|
|
135
|
+
console.log(`\n🔽 Direct Dependents (${dependents.direct.length}):`);
|
|
136
|
+
for (const dep of dependents.direct) {
|
|
137
|
+
const label = dep.node ? dep.node.title : dep.id;
|
|
138
|
+
const status = dep.node?.status ? ` [${dep.node.status}]` : '';
|
|
139
|
+
console.log(` ${dep.type}: ${label}${status}`);
|
|
140
|
+
}
|
|
141
|
+
} else {
|
|
142
|
+
console.log('\n🔽 Direct Dependents: none');
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
if (dependents.transitive.length > 0) {
|
|
146
|
+
console.log(`\n🔽 Transitive Dependents (${dependents.transitive.length}):`);
|
|
147
|
+
for (const dep of dependents.transitive) {
|
|
148
|
+
const label = dep.node ? dep.node.title : dep.id;
|
|
149
|
+
const status = dep.node?.status ? ` [${dep.node.status}]` : '';
|
|
150
|
+
console.log(` ${dep.type}: ${label}${status} (via ${dep.via})`);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (feeders.direct.length > 0 || feeders.transitive.length > 0) {
|
|
155
|
+
const allFeeders = [...feeders.direct, ...feeders.transitive];
|
|
156
|
+
console.log(`\n🔼 Upstream Feeders (${allFeeders.length}):`);
|
|
157
|
+
for (const f of allFeeders) {
|
|
158
|
+
const label = f.node ? f.node.title : f.id;
|
|
159
|
+
console.log(` ${f.type}: ${label}`);
|
|
160
|
+
}
|
|
161
|
+
} else {
|
|
162
|
+
console.log('\n🔼 Upstream Feeders: none');
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
console.log(`\nGraph generated: ${graph.generated_at}`);
|
|
166
|
+
console.log('');
|
|
167
|
+
}
|
|
@@ -263,6 +263,8 @@ mkdir -p .worktrees/STORY-{ID}-{StoryName}/.bounce/{tasks,reports}
|
|
|
263
263
|
./scripts/pre_gate_runner.sh qa .worktrees/STORY-{ID}-{StoryName}/ sprint/S-{XX}
|
|
264
264
|
- If scan FAILS on trivial issues (debug statements, missing JSDoc, TODOs):
|
|
265
265
|
Return to Developer for quick fix. Do NOT spawn QA for mechanical failures.
|
|
266
|
+
If pre-gate scan fails 3+ times → Escalate: present failures to human with options:
|
|
267
|
+
a) Human fixes manually, b) Descope the story, c) Re-assign to a different approach.
|
|
266
268
|
- If scan PASSES: Include scan output path in the QA task file.
|
|
267
269
|
1. Spawn qa subagent in .worktrees/STORY-{ID}-{StoryName}/ with:
|
|
268
270
|
- Developer Implementation Report
|
|
@@ -287,6 +289,7 @@ mkdir -p .worktrees/STORY-{ID}-{StoryName}/.bounce/{tasks,reports}
|
|
|
287
289
|
./scripts/pre_gate_runner.sh arch .worktrees/STORY-{ID}-{StoryName}/ sprint/S-{XX}
|
|
288
290
|
- If scan reveals new dependencies or structural violations:
|
|
289
291
|
Return to Developer for resolution. Do NOT spawn Architect for mechanical failures.
|
|
292
|
+
If pre-gate scan fails 3+ times → Escalate to human (same options as pre-QA escalation).
|
|
290
293
|
- If scan PASSES: Include scan output path in the Architect task file.
|
|
291
294
|
1. Spawn architect subagent in .worktrees/STORY-{ID}-{StoryName}/ with:
|
|
292
295
|
- All reports for this story
|
|
@@ -319,7 +322,12 @@ mkdir -p .worktrees/STORY-{ID}-{StoryName}/.bounce/{tasks,reports}
|
|
|
319
322
|
4. If merge conflicts:
|
|
320
323
|
- Simple (imports, whitespace): DevOps resolves directly
|
|
321
324
|
- Complex (logic): DevOps writes Conflict Report, Lead creates fix story
|
|
322
|
-
5. If post-merge tests fail:
|
|
325
|
+
5. If post-merge tests fail:
|
|
326
|
+
- DevOps reverts the merge and writes a Post-Merge Failure Report (what failed, which tests, suspected cause)
|
|
327
|
+
- Lead returns story to Developer with the failure report as input
|
|
328
|
+
- Developer fixes in the original worktree (which is preserved until merge succeeds)
|
|
329
|
+
- Story re-enters the bounce at Step 2 (Dev pass). QA/Arch bounce counts are NOT reset — this is a merge issue, not a gate failure.
|
|
330
|
+
- If post-merge fails 3+ times → Escalate to human
|
|
323
331
|
```
|
|
324
332
|
Update sprint-{XX}.md: V-Bounce State → "Done"
|
|
325
333
|
|
|
@@ -342,7 +350,11 @@ After ALL stories are merged into `sprint/S-01`:
|
|
|
342
350
|
2. First, Architect runs `./scripts/hotfix_manager.sh audit` to check for hotfix drift. If it fails, perform deep audit on flagged files.
|
|
343
351
|
3. Run Sprint Integration Audit — Deep Audit on combined changes
|
|
344
352
|
4. Check for: duplicate routes, competing state, overlapping migrations
|
|
345
|
-
5. If issues found
|
|
353
|
+
5. If issues found:
|
|
354
|
+
- Present findings to human with severity assessment
|
|
355
|
+
- AI suggests which epic the fix story should belong to
|
|
356
|
+
- Fix stories are added to the BACKLOG (not the current sprint) — they enter the next sprint through normal planning
|
|
357
|
+
- Exception: if the issue blocks the sprint release (e.g., broken build), fix inline on the sprint branch without creating a story
|
|
346
358
|
```
|
|
347
359
|
|
|
348
360
|
### Step 7: Sprint Consolidation
|
|
@@ -457,7 +469,7 @@ The Team Lead MUST update the active `sprint-{XX}.md` at every state transition.
|
|
|
457
469
|
| Architect passes | §1: V-Bounce State → "Architect Passed" | **Nothing** |
|
|
458
470
|
| DevOps merges story | §1: V-Bounce State → "Done". §4: Add Execution Log row (via `vbounce story complete`) | **Nothing** |
|
|
459
471
|
| Escalated | §1: Move story to Escalated section | **Nothing** |
|
|
460
|
-
| Sprint CLOSES | Status → "Completed" in frontmatter | §2: sprint → Completed. §4: add summary. §3: remove delivered stories |
|
|
472
|
+
| Sprint CLOSES | Status → "Completed" in frontmatter | §2: sprint → Completed. §4: add summary. §3: remove delivered stories. **This is the ONLY time Delivery Plan updates.** |
|
|
461
473
|
|
|
462
474
|
> **Key rule**: The Delivery Plan is updated ONLY at sprint close, never during active bouncing.
|
|
463
475
|
> See `skills/agent-team/references/delivery-sync.md` for full sync rules.
|