deepflow 0.1.87 → 0.1.88

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/install.js CHANGED
@@ -238,6 +238,7 @@ async function configureHooks(claudeDir) {
238
238
  const updateCheckCmd = `node "${path.join(claudeDir, 'hooks', 'df-check-update.js')}"`;
239
239
  const consolidationCheckCmd = `node "${path.join(claudeDir, 'hooks', 'df-consolidation-check.js')}"`;
240
240
  const quotaLoggerCmd = `node "${path.join(claudeDir, 'hooks', 'df-quota-logger.js')}"`;
241
+ const toolUsageCmd = `node "${path.join(claudeDir, 'hooks', 'df-tool-usage.js')}"`;
241
242
 
242
243
  let settings = {};
243
244
 
@@ -338,6 +339,26 @@ async function configureHooks(claudeDir) {
338
339
  });
339
340
  log('Quota logger configured');
340
341
 
342
+ // Configure PostToolUse hook for tool usage instrumentation
343
+ if (!settings.hooks.PostToolUse) {
344
+ settings.hooks.PostToolUse = [];
345
+ }
346
+
347
+ // Remove any existing deepflow tool usage hooks from PostToolUse
348
+ settings.hooks.PostToolUse = settings.hooks.PostToolUse.filter(hook => {
349
+ const cmd = hook.hooks?.[0]?.command || '';
350
+ return !cmd.includes('df-tool-usage');
351
+ });
352
+
353
+ // Add tool usage hook
354
+ settings.hooks.PostToolUse.push({
355
+ hooks: [{
356
+ type: 'command',
357
+ command: toolUsageCmd
358
+ }]
359
+ });
360
+ log('PostToolUse hook configured');
361
+
341
362
  fs.writeFileSync(settingsPath, JSON.stringify(settings, null, 2));
342
363
  }
343
364
 
@@ -518,7 +539,7 @@ async function uninstall() {
518
539
  ];
519
540
 
520
541
  if (level === 'global') {
521
- toRemove.push('hooks/df-statusline.js', 'hooks/df-check-update.js', 'hooks/df-consolidation-check.js', 'hooks/df-invariant-check.js', 'hooks/df-quota-logger.js');
542
+ toRemove.push('hooks/df-statusline.js', 'hooks/df-check-update.js', 'hooks/df-consolidation-check.js', 'hooks/df-invariant-check.js', 'hooks/df-quota-logger.js', 'hooks/df-tool-usage.js');
522
543
  }
523
544
 
524
545
  for (const item of toRemove) {
@@ -562,11 +583,20 @@ async function uninstall() {
562
583
  delete settings.hooks.SessionEnd;
563
584
  }
564
585
  }
586
+ if (settings.hooks?.PostToolUse) {
587
+ settings.hooks.PostToolUse = settings.hooks.PostToolUse.filter(hook => {
588
+ const cmd = hook.hooks?.[0]?.command || '';
589
+ return !cmd.includes('df-tool-usage');
590
+ });
591
+ if (settings.hooks.PostToolUse.length === 0) {
592
+ delete settings.hooks.PostToolUse;
593
+ }
594
+ }
565
595
  if (settings.hooks && Object.keys(settings.hooks).length === 0) {
566
596
  delete settings.hooks;
567
597
  }
568
598
  fs.writeFileSync(settingsPath, JSON.stringify(settings, null, 2));
569
- console.log(` ${c.green}✓${c.reset} Removed SessionStart/SessionEnd hooks`);
599
+ console.log(` ${c.green}✓${c.reset} Removed SessionStart/SessionEnd/PostToolUse hooks`);
570
600
  } catch (e) {
571
601
  // Fail silently
572
602
  }
@@ -22,6 +22,62 @@ const REQUIRED_SECTIONS = [
22
22
  ['Technical Notes', 'architecture notes', 'architecture', 'tech notes', 'implementation notes'],
23
23
  ];
24
24
 
25
+ // ── Spec layers (onion model) ───────────────────────────────────────────
26
+ // Each layer defines sections that must ALL be present (cumulative with prior layers).
27
+ // The computed layer is the highest where all cumulative sections exist.
28
+ //
29
+ // L0: Problem defined → spikes only
30
+ // L1: Requirements known → targeted spikes
31
+ // L2: Verifiable → implementation tasks
32
+ // L3: Fully constrained → full impact analysis + optimize tasks
33
+ const LAYER_DEFINITIONS = [
34
+ { layer: 0, sections: ['Objective'] },
35
+ { layer: 1, sections: ['Requirements'] },
36
+ { layer: 2, sections: ['Acceptance Criteria'] },
37
+ { layer: 3, sections: ['Constraints', 'Out of Scope', 'Technical Notes'] },
38
+ ];
39
+
40
+ /**
41
+ * Compute the spec layer from its content.
42
+ * Returns the highest layer (0–3) where ALL cumulative required sections are present.
43
+ * Returns -1 if not even L0 (no Objective).
44
+ */
45
+ function computeLayer(content) {
46
+ const headersFound = [];
47
+ for (const line of content.split('\n')) {
48
+ const m = line.match(/^##\s+(.+)/i);
49
+ if (m) {
50
+ const raw = m[1].trim().replace(/^\d+\.\s*/, '');
51
+ headersFound.push(raw.toLowerCase());
52
+ }
53
+ }
54
+
55
+ // Inline *AC: lines satisfy the Acceptance Criteria requirement
56
+ const hasInlineAC = /\*AC[:.]/.test(content);
57
+
58
+ let currentLayer = -1;
59
+ for (const { layer, sections } of LAYER_DEFINITIONS) {
60
+ const allPresent = sections.every((section) => {
61
+ // Find the REQUIRED_SECTIONS entry for aliases
62
+ const entry = REQUIRED_SECTIONS.find(
63
+ ([canonical]) => canonical.toLowerCase() === section.toLowerCase()
64
+ );
65
+ const allNames = entry
66
+ ? [entry[0], ...entry.slice(1)].map((n) => n.toLowerCase())
67
+ : [section.toLowerCase()];
68
+
69
+ if (section === 'Acceptance Criteria' && hasInlineAC) return true;
70
+ return headersFound.some((h) => allNames.includes(h));
71
+ });
72
+ if (allPresent) {
73
+ currentLayer = layer;
74
+ } else {
75
+ break; // layers are cumulative — can't skip
76
+ }
77
+ }
78
+ return currentLayer;
79
+ }
80
+
25
81
  /**
26
82
  * Validate a spec's content against hard invariants and advisory checks.
27
83
  *
@@ -34,7 +90,11 @@ function validateSpec(content, { mode = 'interactive', specsDir = null } = {}) {
34
90
  const hard = [];
35
91
  const advisory = [];
36
92
 
37
- // ── (a) Required sections ────────────────────────────────────────────
93
+ const layer = computeLayer(content);
94
+
95
+ // ── (a) Required sections (layer-aware) ──────────────────────────────
96
+ // Hard-fail only for sections required by the CURRENT layer.
97
+ // Missing sections beyond the current layer are advisory (hints to deepen).
38
98
  const headersFound = [];
39
99
  for (const line of content.split('\n')) {
40
100
  const m = line.match(/^##\s+(.+)/i);
@@ -45,13 +105,25 @@ function validateSpec(content, { mode = 'interactive', specsDir = null } = {}) {
45
105
  }
46
106
  }
47
107
 
108
+ // Collect all sections required up to the current layer
109
+ const layerRequiredSections = new Set();
110
+ for (const { layer: l, sections } of LAYER_DEFINITIONS) {
111
+ if (l <= layer) {
112
+ for (const s of sections) layerRequiredSections.add(s.toLowerCase());
113
+ }
114
+ }
115
+
48
116
  for (const [canonical, ...aliases] of REQUIRED_SECTIONS) {
49
117
  const allNames = [canonical, ...aliases].map((n) => n.toLowerCase());
50
118
  const found = headersFound.some((h) => allNames.includes(h.toLowerCase()));
51
119
  if (!found) {
52
120
  // Inline *AC: lines satisfy the Acceptance Criteria requirement
53
121
  if (canonical === 'Acceptance Criteria' && /\*AC[:.]/.test(content)) continue;
54
- hard.push(`Missing required section: "## ${canonical}"`);
122
+ if (layerRequiredSections.has(canonical.toLowerCase())) {
123
+ hard.push(`Missing required section: "## ${canonical}"`);
124
+ } else {
125
+ advisory.push(`Missing section for deeper layer: "## ${canonical}"`);
126
+ }
55
127
  }
56
128
  }
57
129
 
@@ -161,7 +233,7 @@ function validateSpec(content, { mode = 'interactive', specsDir = null } = {}) {
161
233
  hard.push(...advisory.splice(0, advisory.length));
162
234
  }
163
235
 
164
- return { hard, advisory };
236
+ return { layer, hard, advisory };
165
237
  }
166
238
 
167
239
  /**
@@ -230,7 +302,9 @@ if (require.main === module) {
230
302
  console.log('All checks passed.');
231
303
  }
232
304
 
305
+ console.log(`Spec layer: L${result.layer} (${['problem defined', 'requirements known', 'verifiable', 'fully constrained'][result.layer] || 'incomplete'})`);
306
+
233
307
  process.exit(result.hard.length > 0 ? 1 : 0);
234
308
  }
235
309
 
236
- module.exports = { validateSpec, extractSection };
310
+ module.exports = { validateSpec, extractSection, computeLayer };
@@ -75,11 +75,14 @@ function buildContextMeter(contextWindow, data) {
75
75
  percentage = Math.min(100, Math.round(percentage));
76
76
 
77
77
  // Write context usage to file for deepflow commands
78
- writeContextUsage(percentage);
78
+ writeContextUsage(percentage, data);
79
79
 
80
80
  // Write token history for instrumentation
81
81
  writeTokenHistory(contextWindow, data);
82
82
 
83
+ // Write cache history for cross-session persistence
84
+ writeCacheHistory(contextWindow, data);
85
+
83
86
  // Build 10-segment bar
84
87
  const segments = 10;
85
88
  const filled = Math.round((percentage / 100) * segments);
@@ -112,9 +115,10 @@ function checkForUpdate() {
112
115
  return null;
113
116
  }
114
117
 
115
- function writeContextUsage(percentage) {
118
+ function writeContextUsage(percentage, data) {
116
119
  try {
117
- const deepflowDir = path.join(process.cwd(), '.deepflow');
120
+ const baseDir = data?.workspace?.current_dir || process.cwd();
121
+ const deepflowDir = path.join(baseDir, '.deepflow');
118
122
  if (!fs.existsSync(deepflowDir)) {
119
123
  fs.mkdirSync(deepflowDir, { recursive: true });
120
124
  }
@@ -130,7 +134,8 @@ function writeContextUsage(percentage) {
130
134
 
131
135
  function writeTokenHistory(contextWindow, data) {
132
136
  try {
133
- const deepflowDir = path.join(process.cwd(), '.deepflow');
137
+ const baseDir = data?.workspace?.current_dir || process.cwd();
138
+ const deepflowDir = path.join(baseDir, '.deepflow');
134
139
  if (!fs.existsSync(deepflowDir)) {
135
140
  fs.mkdirSync(deepflowDir, { recursive: true });
136
141
  }
@@ -142,6 +147,9 @@ function writeTokenHistory(contextWindow, data) {
142
147
  const contextWindowSize = contextWindow.context_window_size || 0;
143
148
  const usedPercentage = contextWindow.used_percentage || 0;
144
149
 
150
+ const agentRole = process.env.DEEPFLOW_AGENT_ROLE || 'orchestrator';
151
+ const taskId = process.env.DEEPFLOW_TASK_ID || null;
152
+
145
153
  const record = {
146
154
  timestamp,
147
155
  input_tokens: usage.input_tokens || 0,
@@ -150,7 +158,9 @@ function writeTokenHistory(contextWindow, data) {
150
158
  context_window_size: contextWindowSize,
151
159
  used_percentage: usedPercentage,
152
160
  model,
153
- session_id: sessionId
161
+ session_id: sessionId,
162
+ agent_role: agentRole,
163
+ task_id: taskId
154
164
  };
155
165
 
156
166
  const tokenHistoryPath = path.join(deepflowDir, 'token-history.jsonl');
@@ -159,3 +169,65 @@ function writeTokenHistory(contextWindow, data) {
159
169
  // Fail silently
160
170
  }
161
171
  }
172
+
173
+ function writeCacheHistory(contextWindow, data) {
174
+ try {
175
+ const usage = contextWindow.current_usage || {};
176
+ const sessionId = data.session_id || 'unknown';
177
+
178
+ const inputTokens = usage.input_tokens || 0;
179
+ const cacheCreationTokens = usage.cache_creation_input_tokens || 0;
180
+ const cacheReadTokens = usage.cache_read_input_tokens || 0;
181
+ const totalTokens = inputTokens + cacheCreationTokens + cacheReadTokens;
182
+
183
+ // Compute cache hit ratio: cache_read / total
184
+ const cacheHitRatio = totalTokens > 0 ? cacheReadTokens / totalTokens : 0;
185
+
186
+ const model = data.model?.id || data.model?.display_name || 'unknown';
187
+ const agentRole = process.env.DEEPFLOW_AGENT_ROLE || 'orchestrator';
188
+ const taskId = process.env.DEEPFLOW_TASK_ID || null;
189
+
190
+ const cacheHistoryPath = path.join(os.homedir(), '.claude', 'cache-history.jsonl');
191
+
192
+ // Dedup: only write if session_id differs from last written record
193
+ let lastSessionId = null;
194
+ if (fs.existsSync(cacheHistoryPath)) {
195
+ const content = fs.readFileSync(cacheHistoryPath, 'utf8');
196
+ const lines = content.trimEnd().split('\n');
197
+ if (lines.length > 0) {
198
+ try {
199
+ const lastRecord = JSON.parse(lines[lines.length - 1]);
200
+ lastSessionId = lastRecord.session_id;
201
+ } catch (e) {
202
+ // Ignore parse errors on last line
203
+ }
204
+ }
205
+ }
206
+
207
+ if (sessionId === lastSessionId) {
208
+ return;
209
+ }
210
+
211
+ const record = {
212
+ timestamp: new Date().toISOString(),
213
+ session_id: sessionId,
214
+ cache_hit_ratio: Math.round(cacheHitRatio * 10000) / 10000,
215
+ total_tokens: totalTokens,
216
+ agent_breakdown: {
217
+ agent_role: agentRole,
218
+ task_id: taskId,
219
+ model
220
+ }
221
+ };
222
+
223
+ // Ensure ~/.claude directory exists
224
+ const claudeDir = path.join(os.homedir(), '.claude');
225
+ if (!fs.existsSync(claudeDir)) {
226
+ fs.mkdirSync(claudeDir, { recursive: true });
227
+ }
228
+
229
+ fs.appendFileSync(cacheHistoryPath, JSON.stringify(record) + '\n');
230
+ } catch (e) {
231
+ // Fail silently
232
+ }
233
+ }
@@ -0,0 +1,41 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Spike hook: capture raw PostToolUse stdin payload
4
+ * Writes the raw JSON to /tmp/df-posttooluse-payload.json for inspection.
5
+ * Safe to install temporarily — exits cleanly (code 0) always.
6
+ *
7
+ * Usage in ~/.claude/settings.json:
8
+ * "PostToolUse": [{ "hooks": [{ "type": "command", "command": "node /path/to/df-tool-usage-spike.js" }] }]
9
+ */
10
+
11
+ 'use strict';
12
+
13
+ const fs = require('fs');
14
+
15
+ let raw = '';
16
+ process.stdin.setEncoding('utf8');
17
+ process.stdin.on('data', chunk => { raw += chunk; });
18
+ process.stdin.on('end', () => {
19
+ try {
20
+ // Write raw payload for inspection
21
+ fs.writeFileSync('/tmp/df-posttooluse-payload.json', raw);
22
+
23
+ // Also append a minimal summary line for quick review
24
+ const data = JSON.parse(raw);
25
+ const summary = {
26
+ hook_event_name: data.hook_event_name,
27
+ tool_name: data.tool_name,
28
+ tool_use_id: data.tool_use_id,
29
+ session_id: data.session_id,
30
+ cwd: data.cwd,
31
+ permission_mode: data.permission_mode,
32
+ tool_input_keys: data.tool_input ? Object.keys(data.tool_input) : [],
33
+ tool_response_keys: data.tool_response ? Object.keys(data.tool_response) : [],
34
+ transcript_path: data.transcript_path,
35
+ };
36
+ fs.appendFileSync('/tmp/df-posttooluse-summary.jsonl', JSON.stringify(summary) + '\n');
37
+ } catch (_e) {
38
+ // Fail silently — never break tool execution
39
+ }
40
+ process.exit(0);
41
+ });
@@ -0,0 +1,86 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * deepflow tool usage logger
4
+ * Logs every PostToolUse event to ~/.claude/tool-usage.jsonl for token instrumentation.
5
+ * Exits silently (code 0) on all errors — never breaks tool execution.
6
+ *
7
+ * Output record fields (REQ-2):
8
+ * timestamp, session_id, tool_name, command, output_size_est_tokens,
9
+ * project, phase, task_id
10
+ */
11
+
12
+ 'use strict';
13
+
14
+ const fs = require('fs');
15
+ const path = require('path');
16
+ const os = require('os');
17
+
18
+ const TOOL_USAGE_LOG = path.join(os.homedir(), '.claude', 'tool-usage.jsonl');
19
+
20
+ /**
21
+ * Infer phase from cwd.
22
+ * If cwd contains .deepflow/worktrees/, parse the worktree dir name for phase.
23
+ * Worktree dirs are named "execute", "verify", or task-specific names.
24
+ * Default: "manual"
25
+ */
26
+ function inferPhase(cwd) {
27
+ if (!cwd) return 'manual';
28
+ const match = cwd.match(/\.deepflow[/\\]worktrees[/\\]([^/\\]+)/);
29
+ if (!match) return 'manual';
30
+ const worktreeName = match[1].toLowerCase();
31
+ if (worktreeName === 'execute') return 'execute';
32
+ if (worktreeName === 'verify') return 'verify';
33
+ // Could be a task-specific worktree — still inside worktrees/, treat as execute
34
+ return 'execute';
35
+ }
36
+
37
+ /**
38
+ * Extract task_id from worktree directory name.
39
+ * Pattern: T{n} prefix, e.g. "T3-feature" → "T3", "T12" → "T12"
40
+ * Returns null if not in a worktree or no task prefix found.
41
+ */
42
+ function extractTaskId(cwd) {
43
+ if (!cwd) return null;
44
+ const match = cwd.match(/\.deepflow[/\\]worktrees[/\\]([^/\\]+)/);
45
+ if (!match) return null;
46
+ const worktreeName = match[1];
47
+ const taskMatch = worktreeName.match(/^(T\d+)/i);
48
+ return taskMatch ? taskMatch[1].toUpperCase() : null;
49
+ }
50
+
51
+ // Read all stdin, then process
52
+ let raw = '';
53
+ process.stdin.setEncoding('utf8');
54
+ process.stdin.on('data', chunk => { raw += chunk; });
55
+ process.stdin.on('end', () => {
56
+ try {
57
+ const data = JSON.parse(raw);
58
+
59
+ const toolName = data.tool_name || null;
60
+ const toolResponse = data.tool_response;
61
+ const cwd = data.cwd || '';
62
+
63
+ const record = {
64
+ timestamp: new Date().toISOString(),
65
+ session_id: data.session_id || null,
66
+ tool_name: toolName,
67
+ command: (toolName === 'Bash' && data.tool_input && data.tool_input.command != null)
68
+ ? data.tool_input.command
69
+ : null,
70
+ output_size_est_tokens: Math.ceil(JSON.stringify(toolResponse).length / 4),
71
+ project: cwd ? path.basename(cwd) : null,
72
+ phase: inferPhase(cwd),
73
+ task_id: extractTaskId(cwd),
74
+ };
75
+
76
+ const logDir = path.dirname(TOOL_USAGE_LOG);
77
+ if (!fs.existsSync(logDir)) {
78
+ fs.mkdirSync(logDir, { recursive: true });
79
+ }
80
+
81
+ fs.appendFileSync(TOOL_USAGE_LOG, JSON.stringify(record) + '\n');
82
+ } catch (_e) {
83
+ // Fail silently — never break tool execution
84
+ }
85
+ process.exit(0);
86
+ });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "deepflow",
3
- "version": "0.1.87",
3
+ "version": "0.1.88",
4
4
  "description": "Doing reveals what thinking can't predict — spec-driven iterative development for Claude Code",
5
5
  "keywords": [
6
6
  "claude",