deepflow 0.1.107 → 0.1.108

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,127 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * deepflow worktree-deps
4
+ * Symlinks node_modules from the main repo into a worktree so that
5
+ * TypeScript / LSP / builds resolve dependencies without a full install.
6
+ *
7
+ * Usage: node bin/worktree-deps.js --source /path/to/repo --worktree /path/to/worktree
8
+ *
9
+ * Walks the source repo looking for node_modules directories (max depth 2)
10
+ * and creates corresponding symlinks in the worktree.
11
+ *
12
+ * Exit codes: 0=OK, 1=ERROR
13
+ */
14
+
15
+ 'use strict';
16
+
17
+ const fs = require('fs');
18
+ const path = require('path');
19
+
20
+ // ---------------------------------------------------------------------------
21
+ // Args
22
+ // ---------------------------------------------------------------------------
23
+
24
+ function parseArgs() {
25
+ const args = process.argv.slice(2);
26
+ const opts = {};
27
+ for (let i = 0; i < args.length; i++) {
28
+ if (args[i] === '--source' && args[i + 1]) opts.source = args[++i];
29
+ else if (args[i] === '--worktree' && args[i + 1]) opts.worktree = args[++i];
30
+ }
31
+ if (!opts.source || !opts.worktree) {
32
+ console.error('Usage: node bin/worktree-deps.js --source <repo> --worktree <worktree>');
33
+ process.exit(1);
34
+ }
35
+ return opts;
36
+ }
37
+
38
+ // ---------------------------------------------------------------------------
39
+ // Find node_modules directories (depth 0 and 1 level of nesting)
40
+ // ---------------------------------------------------------------------------
41
+
42
+ function findNodeModules(root) {
43
+ const results = [];
44
+
45
+ // Root node_modules
46
+ const rootNM = path.join(root, 'node_modules');
47
+ if (fs.existsSync(rootNM)) {
48
+ results.push('node_modules');
49
+ }
50
+
51
+ // Scan common monorepo directory patterns for nested node_modules
52
+ const monorepoPatterns = ['packages', 'apps', 'libs', 'services', 'modules', 'plugins'];
53
+
54
+ for (const dir of monorepoPatterns) {
55
+ const dirPath = path.join(root, dir);
56
+ if (!fs.existsSync(dirPath) || !fs.statSync(dirPath).isDirectory()) continue;
57
+
58
+ let entries;
59
+ try {
60
+ entries = fs.readdirSync(dirPath);
61
+ } catch (_) {
62
+ continue;
63
+ }
64
+
65
+ for (const entry of entries) {
66
+ const entryPath = path.join(dirPath, entry);
67
+ if (!fs.statSync(entryPath).isDirectory()) continue;
68
+
69
+ const nm = path.join(entryPath, 'node_modules');
70
+ if (fs.existsSync(nm)) {
71
+ results.push(path.join(dir, entry, 'node_modules'));
72
+ }
73
+ }
74
+ }
75
+
76
+ return results;
77
+ }
78
+
79
+ // ---------------------------------------------------------------------------
80
+ // Create symlinks
81
+ // ---------------------------------------------------------------------------
82
+
83
+ function symlinkDeps(source, worktree) {
84
+ const nodeModulesPaths = findNodeModules(source);
85
+
86
+ if (nodeModulesPaths.length === 0) {
87
+ console.log('{"linked":0,"message":"no node_modules found in source"}');
88
+ return;
89
+ }
90
+
91
+ let linked = 0;
92
+ const errors = [];
93
+
94
+ for (const relPath of nodeModulesPaths) {
95
+ const srcAbs = path.join(source, relPath);
96
+ const dstAbs = path.join(worktree, relPath);
97
+
98
+ // Skip if already exists (symlink or directory)
99
+ if (fs.existsSync(dstAbs)) {
100
+ continue;
101
+ }
102
+
103
+ // Ensure parent directory exists in worktree
104
+ const parent = path.dirname(dstAbs);
105
+ if (!fs.existsSync(parent)) {
106
+ fs.mkdirSync(parent, { recursive: true });
107
+ }
108
+
109
+ try {
110
+ fs.symlinkSync(srcAbs, dstAbs, 'dir');
111
+ linked++;
112
+ } catch (err) {
113
+ errors.push({ path: relPath, error: err.message });
114
+ }
115
+ }
116
+
117
+ const result = { linked, total: nodeModulesPaths.length };
118
+ if (errors.length > 0) result.errors = errors;
119
+ console.log(JSON.stringify(result));
120
+ }
121
+
122
+ // ---------------------------------------------------------------------------
123
+ // Main
124
+ // ---------------------------------------------------------------------------
125
+
126
+ const opts = parseArgs();
127
+ symlinkDeps(opts.source, opts.worktree);
@@ -123,12 +123,23 @@ function computeLayer(content) {
123
123
  * @param {string} content - The raw markdown content of the spec file.
124
124
  * @param {object} opts
125
125
  * @param {'interactive'|'auto'} opts.mode
126
+ * @param {string|null} opts.filename - Optional filename (basename) used for stem validation.
126
127
  * @returns {{ hard: string[], advisory: string[] }}
127
128
  */
128
- function validateSpec(content, { mode = 'interactive', specsDir = null } = {}) {
129
+ function validateSpec(content, { mode = 'interactive', specsDir = null, filename = null } = {}) {
129
130
  const hard = [];
130
131
  const advisory = [];
131
132
 
133
+ // ── Spec filename stem validation ────────────────────────────────────
134
+ if (filename !== null) {
135
+ let stem = path.basename(filename, '.md');
136
+ stem = stem.replace(/^(doing-|done-)/, '');
137
+ const SAFE_STEM = /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/;
138
+ if (!SAFE_STEM.test(stem)) {
139
+ hard.push(`Spec filename stem contains unsafe characters: "${stem}"`);
140
+ }
141
+ }
142
+
132
143
  // ── Frontmatter: parse and validate derives-from ─────────────────────
133
144
  const { frontmatter } = parseFrontmatter(content);
134
145
  if (frontmatter['derives-from'] !== undefined) {
@@ -339,7 +350,7 @@ if (require.main === module) {
339
350
  const content = fs.readFileSync(filePath, 'utf8');
340
351
  const mode = process.argv.includes('--auto') ? 'auto' : 'interactive';
341
352
  const specsDir = path.resolve(path.dirname(filePath));
342
- const result = validateSpec(content, { mode, specsDir });
353
+ const result = validateSpec(content, { mode, specsDir, filename: path.basename(filePath) });
343
354
 
344
355
  if (result.hard.length > 0) {
345
356
  console.error('HARD invariant failures:');
@@ -410,3 +410,136 @@ describe('derives-from validation', () => {
410
410
  assert.deepEqual(resultWith.hard, resultWithout.hard);
411
411
  });
412
412
  });
413
+
414
+ // ---------------------------------------------------------------------------
415
+ // validateSpec — spec filename stem validation
416
+ // ---------------------------------------------------------------------------
417
+
418
+ describe('validateSpec stem validation', () => {
419
+ test('valid plain name passes', () => {
420
+ const result = validateSpec(fullSpec(), { filename: 'my-spec.md' });
421
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
422
+ assert.equal(stemErrors.length, 0);
423
+ });
424
+
425
+ test('valid name with numbers passes', () => {
426
+ const result = validateSpec(fullSpec(), { filename: 'spec-v2-fix.md' });
427
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
428
+ assert.equal(stemErrors.length, 0);
429
+ });
430
+
431
+ test('single character name passes', () => {
432
+ const result = validateSpec(fullSpec(), { filename: 'a.md' });
433
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
434
+ assert.equal(stemErrors.length, 0);
435
+ });
436
+
437
+ test('doing- prefix is stripped before validation', () => {
438
+ const result = validateSpec(fullSpec(), { filename: 'doing-my-spec.md' });
439
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
440
+ assert.equal(stemErrors.length, 0);
441
+ });
442
+
443
+ test('done- prefix is stripped before validation', () => {
444
+ const result = validateSpec(fullSpec(), { filename: 'done-my-spec.md' });
445
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
446
+ assert.equal(stemErrors.length, 0);
447
+ });
448
+
449
+ test('filename with dollar sign is rejected as hard failure', () => {
450
+ const result = validateSpec(fullSpec(), { filename: 'spec-$bad.md' });
451
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
452
+ assert.equal(stemErrors.length, 1);
453
+ });
454
+
455
+ test('filename with backtick is rejected as hard failure', () => {
456
+ const result = validateSpec(fullSpec(), { filename: 'spec-`bad.md' });
457
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
458
+ assert.equal(stemErrors.length, 1);
459
+ });
460
+
461
+ test('filename with pipe character is rejected as hard failure', () => {
462
+ const result = validateSpec(fullSpec(), { filename: 'spec|bad.md' });
463
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
464
+ assert.equal(stemErrors.length, 1);
465
+ });
466
+
467
+ test('filename with semicolon is rejected as hard failure', () => {
468
+ const result = validateSpec(fullSpec(), { filename: 'spec;bad.md' });
469
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
470
+ assert.equal(stemErrors.length, 1);
471
+ });
472
+
473
+ test('filename with ampersand is rejected as hard failure', () => {
474
+ const result = validateSpec(fullSpec(), { filename: 'spec&bad.md' });
475
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
476
+ assert.equal(stemErrors.length, 1);
477
+ });
478
+
479
+ test('filename with space is rejected as hard failure', () => {
480
+ const result = validateSpec(fullSpec(), { filename: 'spec bad.md' });
481
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
482
+ assert.equal(stemErrors.length, 1);
483
+ });
484
+
485
+ test('filename with path traversal (..) is rejected as hard failure', () => {
486
+ const result = validateSpec(fullSpec(), { filename: '..evil.md' });
487
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
488
+ assert.equal(stemErrors.length, 1);
489
+ });
490
+
491
+ test('filename with leading hyphen is rejected as hard failure', () => {
492
+ const result = validateSpec(fullSpec(), { filename: '-leading.md' });
493
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
494
+ assert.equal(stemErrors.length, 1);
495
+ });
496
+
497
+ test('filename with trailing hyphen is rejected as hard failure', () => {
498
+ const result = validateSpec(fullSpec(), { filename: 'trailing-.md' });
499
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
500
+ assert.equal(stemErrors.length, 1);
501
+ });
502
+
503
+ test('empty stem (only prefix) is rejected as hard failure', () => {
504
+ // A filename of just "doing-.md" strips to empty string
505
+ const result = validateSpec(fullSpec(), { filename: 'doing-.md' });
506
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
507
+ assert.equal(stemErrors.length, 1);
508
+ });
509
+
510
+ test('empty filename stem (.md only) is rejected as hard failure', () => {
511
+ const result = validateSpec(fullSpec(), { filename: '.md' });
512
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
513
+ assert.equal(stemErrors.length, 1);
514
+ });
515
+
516
+ test('stem validation failure is in hard array, not advisory', () => {
517
+ const result = validateSpec(fullSpec(), { filename: 'spec$bad.md' });
518
+ const hardErrors = result.hard.filter((m) => m.includes('unsafe characters'));
519
+ const advisoryErrors = result.advisory.filter((m) => m.includes('unsafe characters'));
520
+ assert.equal(hardErrors.length, 1);
521
+ assert.equal(advisoryErrors.length, 0);
522
+ });
523
+
524
+ test('no filename passed (null) skips stem validation', () => {
525
+ // No filename option — stem check should not run
526
+ const result = validateSpec(fullSpec());
527
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
528
+ assert.equal(stemErrors.length, 0);
529
+ });
530
+
531
+ test('all existing repo spec names pass validation', () => {
532
+ const existingNames = [
533
+ 'done-dashboard-model-cost-fixes.md',
534
+ 'done-orchestrator-v2.md',
535
+ 'done-plan-cleanup.md',
536
+ 'done-plan-fanout.md',
537
+ 'done-quality-gates.md',
538
+ ];
539
+ for (const filename of existingNames) {
540
+ const result = validateSpec(fullSpec(), { filename });
541
+ const stemErrors = result.hard.filter((m) => m.includes('unsafe characters'));
542
+ assert.equal(stemErrors.length, 0, `Expected ${filename} to pass but got stem errors`);
543
+ }
544
+ });
545
+ });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "deepflow",
3
- "version": "0.1.107",
3
+ "version": "0.1.108",
4
4
  "description": "Doing reveals what thinking can't predict — spec-driven iterative development for Claude Code",
5
5
  "keywords": [
6
6
  "claude",
@@ -44,6 +44,14 @@ Shell: `` !`cat .deepflow/checkpoint.json 2>/dev/null || echo 'NOT_FOUND'` `` /
44
44
 
45
45
  Require clean HEAD. Derive SPEC_NAME from `specs/doing-*.md`. Create `.deepflow/worktrees/{spec}` on branch `df/{spec}`. Reuse if exists; `--fresh` deletes first. If `worktree.sparse_paths` non-empty: `git worktree add --no-checkout`, `sparse-checkout set {paths}`, checkout.
46
46
 
47
+ ### 1.5.1. SYMLINK DEPENDENCIES
48
+
49
+ After worktree creation, symlink `node_modules` from the main repo so TypeScript/LSP/build can resolve dependencies without a full install:
50
+ ```bash
51
+ node "${HOME}/.claude/bin/worktree-deps.js" --source "$(git rev-parse --show-toplevel)" --worktree "${WORKTREE_PATH}"
52
+ ```
53
+ The script finds `node_modules` at root and inside monorepo directories (`packages/`, `apps/`, etc.) and creates symlinks in the worktree. Outputs JSON: `{"linked": N, "total": M}`. Errors are non-fatal — log and continue.
54
+
47
55
  ### 1.6. RATCHET SNAPSHOT
48
56
 
49
57
  Snapshot pre-existing test files — only these count for ratchet (agent-created excluded):
@@ -159,7 +167,7 @@ The script handles all health checks internally and outputs structured JSON:
159
167
  **Broken-tests policy:** Updating pre-existing tests requires a separate dedicated task in PLAN.md with explicit justification — never inline during execution.
160
168
 
161
169
  **Orchestrator response by exit code:**
162
- - **Exit 0 (PASS):** Commit stands. TaskUpdate(status: "completed"), update PLAN.md [x] + commit hash.
170
+ - **Exit 0 (PASS):** Commit stands. TaskUpdate(status: "completed"), update PLAN.md [x] + commit hash. **Extract decisions** (see §5.5.1).
163
171
  - **Exit 1 (FAIL):** Script already reverted. Set `TaskUpdate(status: "pending")`. Recompute remaining waves:
164
172
  ```
165
173
  WAVE_JSON=!`node "${HOME}/.claude/bin/wave-runner.js" --json --plan PLAN.md --recalc --failed T{N} 2>/dev/null || echo 'WAVE_ERROR'`
@@ -168,6 +176,18 @@ The script handles all health checks internally and outputs structured JSON:
168
176
  Report: `"✗ T{n}: reverted"`.
169
177
  - **Exit 2 (SALVAGEABLE):** Spawn `Agent(model="sonnet")` to fix lint/typecheck issues. Re-run `node "${HOME}/.claude/bin/ratchet.js"`. If still non-zero → revert both commits, set status pending.
170
178
 
179
+ #### 5.5.1. DECISION EXTRACTION (on ratchet pass)
180
+
181
+ Parse the agent's response for `DECISIONS:` line. If present:
182
+ 1. Split by ` | ` to get individual decisions
183
+ 2. Each decision has format `[TAG] description — rationale` where TAG ∈ {APPROACH, PROVISIONAL, ASSUMPTION, FUTURE, UPDATE}
184
+ 3. Append to `.deepflow/decisions.md` under `### {date} — {spec_name}` header (create header if first decision for this spec today, reuse if exists)
185
+ 4. Format: `- [TAG] description — rationale`
186
+
187
+ If no `DECISIONS:` line in agent output → skip silently (mechanical tasks don't produce decisions).
188
+
189
+ **This runs on every ratchet pass, not just at verify time.** Decisions are captured incrementally as tasks complete, so they're never lost even if verify fails or merge is manual.
190
+
171
191
  **Edit scope validation:** `git diff HEAD~1 --name-only` vs allowed globs. Violation → revert, report.
172
192
  **Impact completeness:** diff vs Impact callers/duplicates. Gap → advisory warning (no revert).
173
193
 
@@ -324,12 +344,42 @@ Prior tasks: {dep_id}: {summary}
324
344
  Steps: 1. chub search/get for APIs 2. LSP findReferences, add unlisted callers 3. LSP documentSymbol on Impact files → Read with offset/limit on relevant ranges only (never read full files) 4. Implement 5. Commit
325
345
  --- END ---
326
346
  Duplicates: [active]→consolidate [dead]→DELETE. ONLY job: code+commit. No merge/rename/checkout.
347
+ DECISIONS: If you made non-obvious choices, append to the LAST LINE BEFORE TASK_STATUS:
348
+ DECISIONS: [TAG] {decision} — {rationale} | [TAG] {decision2} — {rationale2}
349
+ Tags:
350
+ [APPROACH] — chose X over Y (architectural/design choice)
351
+ [PROVISIONAL] — works for now but won't scale / needs revisit
352
+ [ASSUMPTION] — assumed X is true; if wrong, Y breaks
353
+ [FUTURE] — deferred X because Y; revisit when Z
354
+ [UPDATE] — changed prior decision from X to Y because Z
355
+ Skip for trivial/mechanical changes.
327
356
  Last line of your response MUST be: TASK_STATUS:pass (if successful) or TASK_STATUS:fail (if failed) or TASK_STATUS:revert (if reverted)
328
357
  ```
329
358
 
359
+ **Integration Task** (`Agent(model="opus")`):
360
+ ```
361
+ --- START ---
362
+ {task_id} [INTEGRATION]: Verify contracts between {spec_a} ↔ {spec_b}
363
+ Integration ACs: {list from PLAN.md}
364
+ --- MIDDLE ---
365
+ Specs involved: {spec file paths}
366
+ Interface Map: {from integration task detail}
367
+ Contract Risks: {from integration task detail}
368
+ --- END ---
369
+ RULES:
370
+ - Fix the CONSUMER to match the PRODUCER's declared interface. Never weaken the producer.
371
+ - Each fix must reference the specific contract being repaired.
372
+ - If a migration conflict exists, make ALL migrations idempotent (IF NOT EXISTS, IF NOT COLUMN, etc.)
373
+ - Do NOT create new variables or intermediate adapters to paper over mismatches. Fix the actual call site.
374
+ - Do NOT modify acceptance criteria or spec definitions.
375
+ - Commit as fix({spec}): {contract description}. One commit per contract fix.
376
+ DECISIONS: Report each contract fix as: [TAG] {what was mismatched} — {which side changed and why}. Use [APPROACH] for definitive fixes, [PROVISIONAL] if the fix is a workaround, [UPDATE] if changing a prior decision.
377
+ Last line: TASK_STATUS:pass or TASK_STATUS:fail
378
+ ```
379
+
330
380
  **Bootstrap:** `BOOTSTRAP: Write tests for edit_scope files. Do NOT change implementation. Commit as test({spec}): bootstrap. Last line: TASK_STATUS:pass or TASK_STATUS:fail`
331
381
 
332
- **Spike:** `{task_id} [SPIKE]: {hypothesis}. Files+Spec. {reverted warnings}. Minimal spike. Commit as spike({spec}): {desc}. Last line: TASK_STATUS:pass or TASK_STATUS:fail`
382
+ **Spike:** `{task_id} [SPIKE]: {hypothesis}. Files+Spec. {reverted warnings}. Minimal spike. Commit as spike({spec}): {desc}. If you discovered constraints, rejected approaches, or made assumptions, report: DECISIONS: [TAG] {finding} — {why it matters} (use PROVISIONAL for "works but needs revisit", ASSUMPTION for "assumed X; if wrong Y breaks", APPROACH for definitive choices). Last line: TASK_STATUS:pass or TASK_STATUS:fail`
333
383
 
334
384
  **Optimize Task** (`Agent(model="opus")`):
335
385
  ```
@@ -399,6 +449,7 @@ Reverted task: `TaskUpdate(status: "pending")`, dependents stay blocked. Repeate
399
449
 
400
450
  | Rule | Detail |
401
451
  |------|--------|
452
+ | Integration tasks run last | [INTEGRATION] tasks execute after all blocked-by tasks complete. Fix tasks from integration failures are prescriptive (name the contract, producer, consumer, and which side to change). Never weaken the producer's declared interface — prefer fixing the consumer. |
402
453
  | Zero tests → bootstrap first | Sole task when snapshot empty |
403
454
  | 1 task = 1 agent = 1 commit | `atomic-commits` skill |
404
455
  | 1 file = 1 writer | Sequential on conflict |