deepflow 0.1.110 → 0.1.112

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: PostToolUse
3
+ // @hook-owner: deepflow
3
4
  /**
4
5
  * deepflow invariant checker
5
6
  * Checks implementation diffs against spec invariants.
@@ -16,7 +17,7 @@
16
17
 
17
18
  const fs = require('fs');
18
19
  const path = require('path');
19
- const { execFileSync } = require('child_process');
20
+ const { execFileSync, spawn } = require('child_process');
20
21
  const { extractSection } = require('./df-spec-lint');
21
22
  const { readStdinIfMain } = require('./lib/hook-stdin');
22
23
 
@@ -134,6 +135,157 @@ function checkLspAvailability(projectRoot, diffFilePaths) {
134
135
  return { available: true, binary: detected.binary, installCmd: null, message: null };
135
136
  }
136
137
 
138
+ /**
139
+ * REQ-9: Query a language server using JSON-RPC 2.0 over stdio (LSP wire protocol).
140
+ *
141
+ * Fast-fails in <1s when the binary is missing (existence check before spawn).
142
+ * Applies a 10s overall timeout for the full LSP query.
143
+ *
144
+ * Protocol sequence:
145
+ * 1. Send "initialize" request
146
+ * 2. Send the caller-supplied method request
147
+ * 3. Parse Content-Length–framed responses from stdout until we get a
148
+ * response matching the method request id
149
+ *
150
+ * @param {string} binary - LSP binary name or absolute path (e.g. "typescript-language-server")
151
+ * @param {string} projectRoot - Absolute path to the project root (passed as rootUri/rootPath)
152
+ * @param {string} fileUri - file:// URI of the document being queried
153
+ * @param {string} method - LSP method name (e.g. "textDocument/definition")
154
+ * @param {object} params - Method parameters object
155
+ * @returns {Promise<{ ok: true, result: * } | { ok: false, reason: 'lsp_unavailable' }>}
156
+ */
157
+ async function queryLsp(binary, projectRoot, fileUri, method, params) {
158
+ // ── Fast-fail: binary existence check (<1s) ────────────────────────────────
159
+ // Resolve absolute path directly, or locate via which-style PATH scan.
160
+ const binaryExists = (() => {
161
+ if (path.isAbsolute(binary)) {
162
+ return fs.existsSync(binary);
163
+ }
164
+ // Check each directory on PATH for the binary
165
+ const pathDirs = (process.env.PATH || '').split(path.delimiter);
166
+ for (const dir of pathDirs) {
167
+ if (!dir) continue;
168
+ const candidate = path.join(dir, binary);
169
+ try {
170
+ fs.accessSync(candidate, fs.constants.X_OK);
171
+ return true;
172
+ } catch (_) {
173
+ // Not found in this dir
174
+ }
175
+ }
176
+ return false;
177
+ })();
178
+
179
+ if (!binaryExists) {
180
+ return { ok: false, reason: 'lsp_unavailable' };
181
+ }
182
+
183
+ return new Promise((resolve) => {
184
+ const LSP_TIMEOUT_MS = 10_000;
185
+ let settled = false;
186
+
187
+ function fail() {
188
+ if (settled) return;
189
+ settled = true;
190
+ try { proc.kill(); } catch (_) { /* ignore */ }
191
+ resolve({ ok: false, reason: 'lsp_unavailable' });
192
+ }
193
+
194
+ // ── Spawn the LSP binary ───────────────────────────────────────────────
195
+ let proc;
196
+ try {
197
+ proc = spawn(binary, ['--stdio'], {
198
+ cwd: projectRoot,
199
+ stdio: ['pipe', 'pipe', 'ignore'],
200
+ });
201
+ } catch (_) {
202
+ return resolve({ ok: false, reason: 'lsp_unavailable' });
203
+ }
204
+
205
+ proc.on('error', fail);
206
+ proc.on('close', () => { if (!settled) fail(); });
207
+
208
+ // ── Overall 10s timeout ────────────────────────────────────────────────
209
+ const timer = setTimeout(fail, LSP_TIMEOUT_MS);
210
+
211
+ // ── JSON-RPC helpers ───────────────────────────────────────────────────
212
+ let msgId = 1;
213
+
214
+ function sendMessage(msg) {
215
+ const json = JSON.stringify(msg);
216
+ const header = `Content-Length: ${Buffer.byteLength(json, 'utf8')}\r\n\r\n`;
217
+ proc.stdin.write(header + json, 'utf8');
218
+ }
219
+
220
+ const INIT_ID = msgId++;
221
+ const METHOD_ID = msgId++;
222
+
223
+ // ── stdout parser: Content-Length framing ─────────────────────────────
224
+ let buffer = '';
225
+ let expectedLength = -1;
226
+
227
+ proc.stdout.on('data', (chunk) => {
228
+ if (settled) return;
229
+ buffer += chunk.toString('utf8');
230
+
231
+ while (true) { // eslint-disable-line no-constant-condition
232
+ if (expectedLength === -1) {
233
+ // Look for the header/body separator
234
+ const sepIdx = buffer.indexOf('\r\n\r\n');
235
+ if (sepIdx === -1) break;
236
+
237
+ const header = buffer.slice(0, sepIdx);
238
+ const lenMatch = header.match(/Content-Length:\s*(\d+)/i);
239
+ if (!lenMatch) { fail(); return; }
240
+
241
+ expectedLength = parseInt(lenMatch[1], 10);
242
+ buffer = buffer.slice(sepIdx + 4); // skip past "\r\n\r\n"
243
+ }
244
+
245
+ if (buffer.length < expectedLength) break;
246
+
247
+ const body = buffer.slice(0, expectedLength);
248
+ buffer = buffer.slice(expectedLength);
249
+ expectedLength = -1;
250
+
251
+ let msg;
252
+ try { msg = JSON.parse(body); } catch (_) { fail(); return; }
253
+
254
+ if (msg.id === INIT_ID) {
255
+ // Initialize response received — now send the actual method request
256
+ sendMessage({ jsonrpc: '2.0', id: METHOD_ID, method, params });
257
+ } else if (msg.id === METHOD_ID) {
258
+ // Got our response
259
+ if (!settled) {
260
+ settled = true;
261
+ clearTimeout(timer);
262
+ try { proc.kill(); } catch (_) { /* ignore */ }
263
+ if (msg.error) {
264
+ resolve({ ok: false, reason: 'lsp_unavailable' });
265
+ } else {
266
+ resolve({ ok: true, result: msg.result });
267
+ }
268
+ }
269
+ }
270
+ // Ignore notifications (id-less messages) and other responses
271
+ }
272
+ });
273
+
274
+ // ── Send initialize request ────────────────────────────────────────────
275
+ sendMessage({
276
+ jsonrpc: '2.0',
277
+ id: INIT_ID,
278
+ method: 'initialize',
279
+ params: {
280
+ processId: process.pid,
281
+ rootUri: `file://${projectRoot}`,
282
+ rootPath: projectRoot,
283
+ capabilities: {},
284
+ },
285
+ });
286
+ });
287
+ }
288
+
137
289
  // ── Valid violation tags (REQ-7) ──────────────────────────────────────────────
138
290
  const TAGS = {
139
291
  MOCK: 'MOCK', // Production code contains mock/stub placeholders
@@ -1249,6 +1401,7 @@ module.exports = {
1249
1401
  checkLspAvailability,
1250
1402
  detectLanguageServer,
1251
1403
  isBinaryAvailable,
1404
+ queryLsp,
1252
1405
  formatOutput,
1253
1406
  formatViolation,
1254
1407
  parseDiff,
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: SessionStart, SessionEnd
3
+ // @hook-owner: dashboard
3
4
  /**
4
5
  * deepflow quota logger
5
6
  * Logs Anthropic API quota/usage data to ~/.claude/quota-history.jsonl
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: PostToolUse
3
+ // @hook-owner: deepflow
3
4
  /**
4
5
  * deepflow snapshot guard
5
6
  * PostToolUse hook: blocks Write/Edit to files listed in .deepflow/auto-snapshot.txt.
@@ -1,4 +1,6 @@
1
1
  #!/usr/bin/env node
2
+ // @hook-event: PostToolUse
3
+ // @hook-owner: deepflow
2
4
  /**
3
5
  * deepflow spec linter
4
6
  * Validates spec files against hard invariants and advisory checks.
@@ -222,6 +224,10 @@ function validateSpec(content, { mode = 'interactive', specsDir = null, filename
222
224
  hard.push(
223
225
  `Acceptance Criteria line missing "- [ ]" checkbox: "${line.trim()}"`
224
226
  );
227
+ } else if (!/\*\*AC-\d+\*\*/.test(line)) {
228
+ hard.push(
229
+ `Acceptance Criteria checkbox missing **AC-N** identifier: "${line.trim()}"`
230
+ );
225
231
  }
226
232
  }
227
233
  } else if (!hasInlineAC) {
@@ -43,7 +43,7 @@ function fullSpec() {
43
43
  'Not doing X',
44
44
  '',
45
45
  '## Acceptance Criteria',
46
- '- [ ] REQ-1 works',
46
+ '- [ ] **AC-1** REQ-1 works',
47
47
  '',
48
48
  '## Technical Notes',
49
49
  'Use module Y',
@@ -150,6 +150,62 @@ describe('validateSpec with frontmatter', () => {
150
150
  });
151
151
  });
152
152
 
153
+ // ---------------------------------------------------------------------------
154
+ // validateSpec — Acceptance Criteria AC-N format enforcement
155
+ // ---------------------------------------------------------------------------
156
+
157
+ describe('validateSpec AC-N format enforcement', () => {
158
+ test('bare "- [ ]" line without **AC-N** identifier causes hard failure', () => {
159
+ const specWithBareAC = [
160
+ '## Objective',
161
+ 'Build the thing',
162
+ '',
163
+ '## Requirements',
164
+ '- REQ-1: Do something',
165
+ '',
166
+ '## Constraints',
167
+ 'Must be fast',
168
+ '',
169
+ '## Out of Scope',
170
+ 'Not doing X',
171
+ '',
172
+ '## Acceptance Criteria',
173
+ '- [ ] REQ-1 works',
174
+ '',
175
+ '## Technical Notes',
176
+ 'Use module Y',
177
+ ].join('\n');
178
+ const result = validateSpec(specWithBareAC);
179
+ const acErrors = result.hard.filter((m) => m.includes('AC-N'));
180
+ assert.ok(acErrors.length > 0, 'should hard-fail when AC checkbox lacks **AC-N** identifier');
181
+ });
182
+
183
+ test('AC line with **AC-N** identifier passes without hard error', () => {
184
+ const result = validateSpec(fullSpec());
185
+ const acErrors = result.hard.filter((m) => m.includes('AC-N'));
186
+ assert.equal(acErrors.length, 0, 'should not hard-fail when AC checkbox has **AC-N** identifier');
187
+ });
188
+
189
+ test('bare "- [ ]" line hard error message references the offending line', () => {
190
+ const specWithBareAC = [
191
+ '## Objective',
192
+ 'Build the thing',
193
+ '',
194
+ '## Requirements',
195
+ '- REQ-1: Do something',
196
+ '',
197
+ '## Acceptance Criteria',
198
+ '- [ ] bare item without identifier',
199
+ ].join('\n');
200
+ const result = validateSpec(specWithBareAC);
201
+ const acErrors = result.hard.filter((m) => m.includes('AC-N'));
202
+ assert.ok(
203
+ acErrors.some((m) => m.includes('bare item without identifier')),
204
+ 'hard error message should include the offending line text'
205
+ );
206
+ });
207
+ });
208
+
153
209
  // ---------------------------------------------------------------------------
154
210
  // extractSection — frontmatter handling
155
211
  // ---------------------------------------------------------------------------
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: statusLine
3
+ // @hook-owner: deepflow
3
4
  /**
4
5
  * deepflow statusline for Claude Code
5
6
  * Displays: update | model | project | context usage
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: SubagentStop
3
+ // @hook-owner: dashboard
3
4
  'use strict';
4
5
  const fs = require('fs');
5
6
  const path = require('path');
@@ -1,4 +1,6 @@
1
1
  #!/usr/bin/env node
2
+ // @hook-event: PostToolUse
3
+ // @hook-owner: dashboard
2
4
  /**
3
5
  * Spike hook: capture raw PostToolUse stdin payload
4
6
  * Writes the raw JSON to /tmp/df-posttooluse-payload.json for inspection.
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: PostToolUse
3
+ // @hook-owner: dashboard
3
4
  /**
4
5
  * deepflow tool usage logger
5
6
  * Logs every PostToolUse event to ~/.claude/tool-usage.jsonl for token instrumentation.
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  // @hook-event: PostToolUse
3
+ // @hook-owner: deepflow
3
4
  /**
4
5
  * deepflow worktree guard
5
6
  * PostToolUse hook: blocks Write/Edit to main-branch files when a df/* worktree exists.
@@ -19,6 +20,8 @@
19
20
  'use strict';
20
21
 
21
22
  const { execFileSync } = require('child_process');
23
+ const fs = require('fs');
24
+ const path = require('path');
22
25
  const { readStdinIfMain } = require('./lib/hook-stdin');
23
26
 
24
27
  // Paths that are always allowed regardless of worktree state
@@ -57,6 +60,111 @@ function dfWorktreeExists(cwd) {
57
60
  }
58
61
  }
59
62
 
63
+ /**
64
+ * List all worktrees as { path, branch } objects parsed from `git worktree list --porcelain`.
65
+ * Returns [] on git failure.
66
+ */
67
+ function listWorktrees(cwd) {
68
+ let out;
69
+ try {
70
+ out = execFileSync('git', ['worktree', 'list', '--porcelain'], {
71
+ encoding: 'utf8',
72
+ cwd,
73
+ stdio: ['ignore', 'pipe', 'ignore'],
74
+ });
75
+ } catch (_) {
76
+ return [];
77
+ }
78
+
79
+ const worktrees = [];
80
+ let current = {};
81
+ for (const line of out.split('\n')) {
82
+ if (line.startsWith('worktree ')) {
83
+ if (current.path) worktrees.push(current);
84
+ current = { path: line.slice('worktree '.length).trim() };
85
+ } else if (line.startsWith('branch ')) {
86
+ // e.g. "branch refs/heads/df/foo"
87
+ const ref = line.slice('branch '.length).trim();
88
+ current.branch = ref.replace(/^refs\/heads\//, '');
89
+ } else if (line === '') {
90
+ if (current.path) {
91
+ worktrees.push(current);
92
+ current = {};
93
+ }
94
+ }
95
+ }
96
+ if (current.path) worktrees.push(current);
97
+ return worktrees;
98
+ }
99
+
100
+ /**
101
+ * Parse PLAN.md content and return the in-progress task's Files: list.
102
+ * "In-progress" = first unchecked `[ ]` task, or the task whose id matches branchName.
103
+ * Returns an array of file path strings (possibly empty).
104
+ */
105
+ function extractInProgressFiles(planContent, branchName) {
106
+ if (!planContent) return [];
107
+ const lines = planContent.split('\n');
108
+
109
+ // Collect task blocks: start indices at lines matching `- [ ]` or `- [x]`
110
+ const taskStarts = [];
111
+ for (let i = 0; i < lines.length; i++) {
112
+ if (/^\s*-\s*\[[ xX]\]\s*\*?\*?T\d+/.test(lines[i]) || /^\s*-\s*\[[ xX]\]/.test(lines[i])) {
113
+ taskStarts.push(i);
114
+ }
115
+ }
116
+
117
+ function getBlock(startIdx) {
118
+ const end = taskStarts.find(i => i > startIdx) ?? lines.length;
119
+ return lines.slice(startIdx, end);
120
+ }
121
+
122
+ function getFilesFromBlock(block) {
123
+ for (const line of block) {
124
+ const m = line.match(/^\s*-?\s*Files:\s*(.+)$/i);
125
+ if (m) {
126
+ return m[1]
127
+ .split(',')
128
+ .map(s => s.trim().replace(/^[`"']|[`"']$/g, ''))
129
+ .filter(s => s.length > 0 && !/^\{.*\}$/.test(s) && !/^\[.*\]$/.test(s));
130
+ }
131
+ }
132
+ return [];
133
+ }
134
+
135
+ // Prefer: first unchecked task
136
+ for (const idx of taskStarts) {
137
+ if (/^\s*-\s*\[ \]/.test(lines[idx])) {
138
+ return getFilesFromBlock(getBlock(idx));
139
+ }
140
+ }
141
+
142
+ // Fallback: task matching branch name (e.g., df/feature-x → match "feature-x" in header)
143
+ if (branchName) {
144
+ const slug = branchName.replace(/^df\//, '');
145
+ for (const idx of taskStarts) {
146
+ if (lines[idx].toLowerCase().includes(slug.toLowerCase())) {
147
+ return getFilesFromBlock(getBlock(idx));
148
+ }
149
+ }
150
+ }
151
+
152
+ return [];
153
+ }
154
+
155
+ /**
156
+ * Normalize a file path for intersection comparison.
157
+ * Returns path relative to repo root when possible; otherwise basename.
158
+ */
159
+ function normalizePath(p, repoRoot) {
160
+ if (!p) return '';
161
+ const abs = path.isAbsolute(p) ? p : path.resolve(repoRoot || '', p);
162
+ if (repoRoot && abs.startsWith(repoRoot + path.sep)) {
163
+ return abs.slice(repoRoot.length + 1);
164
+ }
165
+ return p;
166
+ }
167
+
60
168
  readStdinIfMain(module, (data) => {
61
169
  const toolName = data.tool_name || '';
62
170
 
@@ -75,6 +183,55 @@ readStdinIfMain(module, (data) => {
75
183
 
76
184
  const branch = currentBranch(cwd);
77
185
 
186
+ // REQ-2: Cross-worktree file intersection check.
187
+ // For every df/* worktree OTHER than the current one, read its PLAN.md,
188
+ // find the in-progress task, and BLOCK if filePath intersects its Files: list.
189
+ try {
190
+ const worktrees = listWorktrees(cwd);
191
+ const currentWorktreePath = worktrees.find(w => w.branch === branch)?.path || cwd;
192
+
193
+ for (const wt of worktrees) {
194
+ if (!wt.branch || !wt.branch.startsWith('df/')) continue;
195
+ if (wt.path === currentWorktreePath) continue;
196
+
197
+ const planPath = path.join(wt.path, 'PLAN.md');
198
+ let planContent;
199
+ try {
200
+ planContent = fs.readFileSync(planPath, 'utf8');
201
+ } catch (_) {
202
+ continue; // no PLAN.md — skip gracefully
203
+ }
204
+
205
+ const claimed = extractInProgressFiles(planContent, wt.branch);
206
+ if (claimed.length === 0) continue;
207
+
208
+ // Repo root of the OTHER worktree: normalize writing file against current cwd
209
+ // but compare against claimed paths (which are repo-relative in PLAN.md).
210
+ const writeRel = normalizePath(filePath, currentWorktreePath);
211
+ const writeBase = path.basename(filePath);
212
+
213
+ for (const claimedFile of claimed) {
214
+ const claimedRel = claimedFile.replace(/^\.\//, '');
215
+ const claimedBase = path.basename(claimedRel);
216
+ if (
217
+ writeRel === claimedRel ||
218
+ filePath === claimedRel ||
219
+ filePath.endsWith('/' + claimedRel) ||
220
+ writeBase === claimedBase && (writeRel.endsWith(claimedRel) || claimedRel.endsWith(writeRel))
221
+ ) {
222
+ console.error(
223
+ `[df-worktree-guard] Blocked ${toolName} to "${filePath}" — ` +
224
+ `file is claimed by in-progress task in worktree ${wt.branch} (${wt.path}). ` +
225
+ `Coordinate or wait for that task to complete.`
226
+ );
227
+ process.exit(1);
228
+ }
229
+ }
230
+ }
231
+ } catch (_) {
232
+ // Never break tool execution on unexpected errors
233
+ }
234
+
78
235
  // Only guard when on main/master
79
236
  if (branch !== 'main' && branch !== 'master') {
80
237
  return;
@@ -0,0 +1,114 @@
1
+ 'use strict';
2
+ /**
3
+ * Shared installer utilities for deepflow hook management.
4
+ * Used by bin/install.js and any owner-specific installer logic.
5
+ */
6
+
7
+ const fs = require('fs');
8
+ const path = require('path');
9
+
10
+ // Valid hook events (settings.hooks keys + special "statusLine")
11
+ const VALID_HOOK_EVENTS = new Set([
12
+ 'SessionStart', 'SessionEnd', 'PreToolUse', 'PostToolUse', 'SubagentStop', 'statusLine'
13
+ ]);
14
+
15
+ /**
16
+ * Atomically write data to targetPath using a write-to-temp + rename pattern.
17
+ * If the write fails, the original file is left untouched and the temp file is
18
+ * cleaned up. Temp file is created in the same directory as the target so the
19
+ * rename is within the same filesystem (atomic on POSIX).
20
+ */
21
+ function atomicWriteFileSync(targetPath, data) {
22
+ const tmpPath = targetPath + '.tmp';
23
+ try {
24
+ fs.writeFileSync(tmpPath, data);
25
+ fs.renameSync(tmpPath, targetPath);
26
+ } catch (err) {
27
+ try { fs.unlinkSync(tmpPath); } catch (_) {}
28
+ throw err;
29
+ }
30
+ }
31
+
32
+ /**
33
+ * Scan hook source files for @hook-event tags. Returns:
34
+ * { eventMap: Map<event, [filename, ...]>, untagged: [filename, ...] }
35
+ *
36
+ * @param {string} hooksSourceDir - Directory to scan for hook files
37
+ * @param {string} [filterOwner] - When set, only include files whose @hook-owner tag
38
+ * matches this value (case-sensitive). Files with no
39
+ * @hook-owner tag are always excluded when filterOwner
40
+ * is provided.
41
+ */
42
+ function scanHookEvents(hooksSourceDir, filterOwner) {
43
+ const eventMap = new Map(); // event → [filenames]
44
+ const untagged = [];
45
+
46
+ if (!fs.existsSync(hooksSourceDir)) return { eventMap, untagged };
47
+
48
+ for (const file of fs.readdirSync(hooksSourceDir)) {
49
+ if (!file.endsWith('.js') || file.endsWith('.test.js')) continue;
50
+
51
+ const content = fs.readFileSync(path.join(hooksSourceDir, file), 'utf8');
52
+ const firstLines = content.split('\n').slice(0, 10).join('\n');
53
+
54
+ // Apply owner filter if requested
55
+ if (filterOwner !== undefined) {
56
+ const ownerMatch = firstLines.match(/\/\/\s*@hook-owner:\s*(.+)/);
57
+ if (!ownerMatch || ownerMatch[1].trim() !== filterOwner) continue;
58
+ }
59
+
60
+ const match = firstLines.match(/\/\/\s*@hook-event:\s*(.+)/);
61
+
62
+ if (!match) {
63
+ untagged.push(file);
64
+ continue;
65
+ }
66
+
67
+ const events = match[1].split(',').map(e => e.trim()).filter(Boolean);
68
+ let hasValidEvent = false;
69
+
70
+ for (const event of events) {
71
+ if (!VALID_HOOK_EVENTS.has(event)) {
72
+ // Surface warning via stderr so callers can decide how to display it
73
+ process.stderr.write(`[installer-utils] Warning: unknown event "${event}" in ${file} — skipped\n`);
74
+ continue;
75
+ }
76
+ hasValidEvent = true;
77
+ if (!eventMap.has(event)) eventMap.set(event, []);
78
+ eventMap.get(event).push(file);
79
+ }
80
+
81
+ if (!hasValidEvent) {
82
+ untagged.push(file);
83
+ }
84
+ }
85
+
86
+ return { eventMap, untagged };
87
+ }
88
+
89
+ /**
90
+ * Remove all deepflow hook entries (commands containing /hooks/df-) from settings.
91
+ * Preserves non-deepflow hooks.
92
+ */
93
+ function removeDeepflowHooks(settings) {
94
+ const isDeepflow = (hook) => {
95
+ const cmd = hook.hooks?.[0]?.command || '';
96
+ return cmd.includes('/hooks/df-');
97
+ };
98
+
99
+ // Clean settings.hooks.*
100
+ if (settings.hooks) {
101
+ for (const event of Object.keys(settings.hooks)) {
102
+ settings.hooks[event] = settings.hooks[event].filter(h => !isDeepflow(h));
103
+ if (settings.hooks[event].length === 0) delete settings.hooks[event];
104
+ }
105
+ if (Object.keys(settings.hooks).length === 0) delete settings.hooks;
106
+ }
107
+
108
+ // Clean settings.statusLine if it's a deepflow hook
109
+ if (settings.statusLine?.command && settings.statusLine.command.includes('/hooks/df-')) {
110
+ delete settings.statusLine;
111
+ }
112
+ }
113
+
114
+ module.exports = { atomicWriteFileSync, scanHookEvents, removeDeepflowHooks, VALID_HOOK_EVENTS };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "deepflow",
3
- "version": "0.1.110",
3
+ "version": "0.1.112",
4
4
  "description": "Doing reveals what thinking can't predict — spec-driven iterative development for Claude Code",
5
5
  "keywords": [
6
6
  "claude",
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: df:debate
3
3
  description: Generate multi-perspective analysis of a problem before formalizing into a spec
4
- allowed-tools: [Read, Grep, Glob, Agent]
4
+ allowed-tools: [Agent, AskUserQuestion]
5
5
  ---
6
6
 
7
7
  # /df:debate — Multi-Perspective Analysis
@@ -12,7 +12,7 @@ Coordinate reasoner agents to debate a problem from multiple perspectives, then
12
12
 
13
13
  **NEVER:** use TaskOutput, `run_in_background`, Explore agents, EnterPlanMode, ExitPlanMode
14
14
 
15
- **ONLY:** Gather codebase context (Glob/Grep/Read), spawn reasoner agents (non-background), write debate file, respond conversationally
15
+ **ONLY:** Spawn context-fork agent for codebase gathering, spawn reasoner agents (non-background), write debate file, respond conversationally
16
16
 
17
17
  ## Agents
18
18
 
@@ -30,7 +30,23 @@ Coordinate reasoner agents to debate a problem from multiple perspectives, then
30
30
  Summarize conversation context in ~200 words: core problem, requirements, constraints, user priorities. Passed to each perspective agent.
31
31
 
32
32
  ### 2. GATHER CODEBASE CONTEXT
33
- Prefer LSP documentSymbol to understand file structure, then Read with offset/limit on relevant ranges only (never read full files). Glob/Grep to locate files (up to 5-6, focus on core logic). Produce ~300 word codebase summary: what exists, key interfaces, current limitations, dependencies. Passed to every agent.
33
+ Spawn a context-fork agent (subagent_type="default", model="sonnet") with the following prompt:
34
+
35
+ ```
36
+ ## Task: Codebase Context Gathering
37
+
38
+ Problem being analyzed: {summary}
39
+
40
+ Instructions:
41
+ - Use LSP documentSymbol to understand file structure where available
42
+ - Use Read with offset/limit on relevant ranges only (never read full files)
43
+ - Use Glob/Grep to locate relevant files (up to 5-6, focus on core logic)
44
+ - Produce a ~300 word summary covering: what exists, key interfaces, current limitations, dependencies
45
+
46
+ Return ONLY the codebase summary text (~300 words). No preamble, no explanation.
47
+ ```
48
+
49
+ Store the agent's response as {codebase_summary}. Passed to every perspective agent.
34
50
 
35
51
  ### 3. SPAWN PERSPECTIVES
36
52
 
@@ -82,6 +98,6 @@ Present key tensions and open decisions, then: `Next: Run /df:spec {name} to for
82
98
  ## Rules
83
99
 
84
100
  - ALL 4 perspective agents MUST be spawned in ONE message (parallel, non-background)
85
- - Orchestrator gathers codebase context (step 2), passes to agents via promptagents never read files
101
+ - Orchestrator delegates codebase gathering (step 2) to a context-fork agentorchestrator never reads files directly
86
102
  - File name MUST be `.debate-{name}.md` (dot prefix = auxiliary file, lives in `specs/`)
87
103
  - Word limits: each perspective <400 words, synthesis <500 words
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: df:discover
3
3
  description: Explore a problem space deeply through structured questioning to surface requirements and constraints
4
- allowed-tools: [AskUserQuestion, Read, Agent]
4
+ allowed-tools: [AskUserQuestion, Agent]
5
5
  ---
6
6
 
7
7
  # /df:discover — Deep Problem Exploration