deepflow 0.1.102 → 0.1.103
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/install.js +55 -9
- package/bin/install.test.js +214 -0
- package/hooks/df-command-usage.js +287 -0
- package/hooks/df-command-usage.test.js +1019 -0
- package/hooks/df-subagent-registry.js +33 -14
- package/hooks/df-tool-usage.js +8 -0
- package/hooks/df-tool-usage.test.js +200 -0
- package/package.json +1 -1
|
@@ -0,0 +1,1019 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for hooks/df-command-usage.js
|
|
3
|
+
*
|
|
4
|
+
* Tests the command usage tracker hook that tracks df:* command invocations
|
|
5
|
+
* with token deltas and tool call counts across PreToolUse, PostToolUse,
|
|
6
|
+
* and SessionEnd events.
|
|
7
|
+
*
|
|
8
|
+
* Uses Node.js built-in node:test to avoid adding dependencies.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
'use strict';
|
|
12
|
+
|
|
13
|
+
const { test, describe, beforeEach, afterEach } = require('node:test');
|
|
14
|
+
const assert = require('node:assert/strict');
|
|
15
|
+
const fs = require('node:fs');
|
|
16
|
+
const path = require('node:path');
|
|
17
|
+
const os = require('node:os');
|
|
18
|
+
const { execFileSync } = require('node:child_process');
|
|
19
|
+
|
|
20
|
+
// ---------------------------------------------------------------------------
|
|
21
|
+
// Helpers
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
|
|
24
|
+
const HOOK_PATH = path.resolve(__dirname, 'df-command-usage.js');
|
|
25
|
+
|
|
26
|
+
function makeTmpDir() {
|
|
27
|
+
return fs.mkdtempSync(path.join(os.tmpdir(), 'df-command-usage-test-'));
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
function rmrf(dir) {
|
|
31
|
+
if (fs.existsSync(dir)) {
|
|
32
|
+
fs.rmSync(dir, { recursive: true, force: true });
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Run the command usage hook as a child process with JSON piped to stdin.
|
|
38
|
+
* Sets CLAUDE_HOOK_EVENT to control which handler runs.
|
|
39
|
+
* Returns { stdout, stderr, code }.
|
|
40
|
+
*/
|
|
41
|
+
function runHook(input, { event, cwd, env: extraEnv } = {}) {
|
|
42
|
+
const json = typeof input === 'string' ? input : JSON.stringify(input);
|
|
43
|
+
const env = { ...process.env, ...extraEnv };
|
|
44
|
+
if (event) env.CLAUDE_HOOK_EVENT = event;
|
|
45
|
+
try {
|
|
46
|
+
const stdout = execFileSync(
|
|
47
|
+
process.execPath,
|
|
48
|
+
[HOOK_PATH],
|
|
49
|
+
{
|
|
50
|
+
input: json,
|
|
51
|
+
cwd: cwd || os.tmpdir(),
|
|
52
|
+
encoding: 'utf8',
|
|
53
|
+
timeout: 5000,
|
|
54
|
+
env,
|
|
55
|
+
}
|
|
56
|
+
);
|
|
57
|
+
return { stdout, stderr: '', code: 0 };
|
|
58
|
+
} catch (err) {
|
|
59
|
+
return {
|
|
60
|
+
stdout: err.stdout || '',
|
|
61
|
+
stderr: err.stderr || '',
|
|
62
|
+
code: err.status ?? 1,
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Read the active-command.json marker file.
|
|
69
|
+
*/
|
|
70
|
+
function readMarker(tmpDir) {
|
|
71
|
+
const markerPath = path.join(tmpDir, '.deepflow', 'active-command.json');
|
|
72
|
+
if (!fs.existsSync(markerPath)) return null;
|
|
73
|
+
return JSON.parse(fs.readFileSync(markerPath, 'utf8'));
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Read command-usage.jsonl and return parsed records.
|
|
78
|
+
*/
|
|
79
|
+
function readUsage(tmpDir) {
|
|
80
|
+
const usagePath = path.join(tmpDir, '.deepflow', 'command-usage.jsonl');
|
|
81
|
+
if (!fs.existsSync(usagePath)) return [];
|
|
82
|
+
const content = fs.readFileSync(usagePath, 'utf8').trim();
|
|
83
|
+
if (!content) return [];
|
|
84
|
+
return content.split('\n').map(line => JSON.parse(line));
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Write a token-history.jsonl file in .deepflow/ with the given records.
|
|
89
|
+
*/
|
|
90
|
+
function writeTokenHistory(tmpDir, records) {
|
|
91
|
+
const deepflowDir = path.join(tmpDir, '.deepflow');
|
|
92
|
+
fs.mkdirSync(deepflowDir, { recursive: true });
|
|
93
|
+
const content = records.map(r => JSON.stringify(r)).join('\n') + '\n';
|
|
94
|
+
fs.writeFileSync(path.join(deepflowDir, 'token-history.jsonl'), content);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Write a fake transcript file with usage entries.
|
|
99
|
+
*/
|
|
100
|
+
function writeTranscript(filePath, entries) {
|
|
101
|
+
fs.mkdirSync(path.dirname(filePath), { recursive: true });
|
|
102
|
+
const content = entries.map(e => JSON.stringify(e)).join('\n') + '\n';
|
|
103
|
+
fs.writeFileSync(filePath, content);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Write an active-command.json marker directly.
|
|
108
|
+
*/
|
|
109
|
+
function writeMarker(tmpDir, marker) {
|
|
110
|
+
const deepflowDir = path.join(tmpDir, '.deepflow');
|
|
111
|
+
fs.mkdirSync(deepflowDir, { recursive: true });
|
|
112
|
+
fs.writeFileSync(path.join(deepflowDir, 'active-command.json'), JSON.stringify(marker, null, 2));
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// ---------------------------------------------------------------------------
|
|
116
|
+
// 1. PreToolUse — opening a new command marker
|
|
117
|
+
// ---------------------------------------------------------------------------
|
|
118
|
+
|
|
119
|
+
describe('df-command-usage — PreToolUse opens marker', () => {
|
|
120
|
+
let tmpDir;
|
|
121
|
+
|
|
122
|
+
beforeEach(() => {
|
|
123
|
+
tmpDir = makeTmpDir();
|
|
124
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
afterEach(() => {
|
|
128
|
+
rmrf(tmpDir);
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
test('creates marker for df:plan Skill call', () => {
|
|
132
|
+
const payload = {
|
|
133
|
+
tool_name: 'Skill',
|
|
134
|
+
tool_input: { skill: 'df:plan' },
|
|
135
|
+
session_id: 'sess-123',
|
|
136
|
+
cwd: tmpDir,
|
|
137
|
+
};
|
|
138
|
+
const result = runHook(payload, { event: 'PreToolUse' });
|
|
139
|
+
assert.equal(result.code, 0);
|
|
140
|
+
|
|
141
|
+
const marker = readMarker(tmpDir);
|
|
142
|
+
assert.ok(marker, 'marker should exist');
|
|
143
|
+
assert.equal(marker.command, 'df:plan');
|
|
144
|
+
assert.equal(marker.session_id, 'sess-123');
|
|
145
|
+
assert.equal(marker.tool_calls_count, 0);
|
|
146
|
+
assert.ok(marker.started_at, 'started_at should be set');
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
test('creates marker for df:execute Skill call', () => {
|
|
150
|
+
const payload = {
|
|
151
|
+
tool_name: 'Skill',
|
|
152
|
+
tool_input: { skill: 'df:execute' },
|
|
153
|
+
session_id: 'sess-456',
|
|
154
|
+
cwd: tmpDir,
|
|
155
|
+
};
|
|
156
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
157
|
+
|
|
158
|
+
const marker = readMarker(tmpDir);
|
|
159
|
+
assert.equal(marker.command, 'df:execute');
|
|
160
|
+
assert.equal(marker.session_id, 'sess-456');
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
test('ignores non-Skill tool calls', () => {
|
|
164
|
+
const payload = {
|
|
165
|
+
tool_name: 'Read',
|
|
166
|
+
tool_input: { file_path: '/some/file.js' },
|
|
167
|
+
cwd: tmpDir,
|
|
168
|
+
};
|
|
169
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
170
|
+
|
|
171
|
+
const marker = readMarker(tmpDir);
|
|
172
|
+
assert.equal(marker, null, 'no marker should be created for non-Skill tools');
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
test('ignores Skill calls without df: prefix', () => {
|
|
176
|
+
const payload = {
|
|
177
|
+
tool_name: 'Skill',
|
|
178
|
+
tool_input: { skill: 'browse-fetch' },
|
|
179
|
+
cwd: tmpDir,
|
|
180
|
+
};
|
|
181
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
182
|
+
|
|
183
|
+
const marker = readMarker(tmpDir);
|
|
184
|
+
assert.equal(marker, null, 'no marker for non-df: skills');
|
|
185
|
+
});
|
|
186
|
+
|
|
187
|
+
test('marker started_at is ISO-8601 format', () => {
|
|
188
|
+
const payload = {
|
|
189
|
+
tool_name: 'Skill',
|
|
190
|
+
tool_input: { skill: 'df:verify' },
|
|
191
|
+
session_id: 'sess-ts',
|
|
192
|
+
cwd: tmpDir,
|
|
193
|
+
};
|
|
194
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
195
|
+
|
|
196
|
+
const marker = readMarker(tmpDir);
|
|
197
|
+
const isoRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/;
|
|
198
|
+
assert.match(marker.started_at, isoRegex, 'started_at should be ISO-8601');
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
test('marker has token_snapshot with expected fields', () => {
|
|
202
|
+
const payload = {
|
|
203
|
+
tool_name: 'Skill',
|
|
204
|
+
tool_input: { skill: 'df:plan' },
|
|
205
|
+
session_id: 'sess-tok',
|
|
206
|
+
cwd: tmpDir,
|
|
207
|
+
};
|
|
208
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
209
|
+
|
|
210
|
+
const marker = readMarker(tmpDir);
|
|
211
|
+
assert.ok(marker.token_snapshot, 'token_snapshot should exist');
|
|
212
|
+
assert.equal(typeof marker.token_snapshot.input_tokens, 'number');
|
|
213
|
+
assert.equal(typeof marker.token_snapshot.cache_read_input_tokens, 'number');
|
|
214
|
+
assert.equal(typeof marker.token_snapshot.cache_creation_input_tokens, 'number');
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
test('reads token snapshot from token-history.jsonl', () => {
|
|
218
|
+
writeTokenHistory(tmpDir, [
|
|
219
|
+
{ input_tokens: 1000, cache_read_input_tokens: 500, cache_creation_input_tokens: 200 },
|
|
220
|
+
{ input_tokens: 2000, cache_read_input_tokens: 800, cache_creation_input_tokens: 300 },
|
|
221
|
+
]);
|
|
222
|
+
|
|
223
|
+
const payload = {
|
|
224
|
+
tool_name: 'Skill',
|
|
225
|
+
tool_input: { skill: 'df:discover' },
|
|
226
|
+
session_id: 'sess-tok2',
|
|
227
|
+
cwd: tmpDir,
|
|
228
|
+
};
|
|
229
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
230
|
+
|
|
231
|
+
const marker = readMarker(tmpDir);
|
|
232
|
+
assert.equal(marker.token_snapshot.input_tokens, 2000);
|
|
233
|
+
assert.equal(marker.token_snapshot.cache_read_input_tokens, 800);
|
|
234
|
+
assert.equal(marker.token_snapshot.cache_creation_input_tokens, 300);
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
test('defaults token snapshot to zeros when no token-history exists', () => {
|
|
238
|
+
const payload = {
|
|
239
|
+
tool_name: 'Skill',
|
|
240
|
+
tool_input: { skill: 'df:plan' },
|
|
241
|
+
session_id: 'sess-notok',
|
|
242
|
+
cwd: tmpDir,
|
|
243
|
+
};
|
|
244
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
245
|
+
|
|
246
|
+
const marker = readMarker(tmpDir);
|
|
247
|
+
assert.equal(marker.token_snapshot.input_tokens, 0);
|
|
248
|
+
assert.equal(marker.token_snapshot.cache_read_input_tokens, 0);
|
|
249
|
+
assert.equal(marker.token_snapshot.cache_creation_input_tokens, 0);
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
test('uses transcript_path from payload', () => {
|
|
253
|
+
const payload = {
|
|
254
|
+
tool_name: 'Skill',
|
|
255
|
+
tool_input: { skill: 'df:plan' },
|
|
256
|
+
session_id: 'sess-tp',
|
|
257
|
+
cwd: tmpDir,
|
|
258
|
+
transcript_path: '/tmp/some-transcript.jsonl',
|
|
259
|
+
};
|
|
260
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
261
|
+
|
|
262
|
+
const marker = readMarker(tmpDir);
|
|
263
|
+
assert.equal(marker.transcript_path, '/tmp/some-transcript.jsonl');
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
test('builds transcript_path from session_storage_path', () => {
|
|
267
|
+
const storagePath = path.join(tmpDir, 'session-storage');
|
|
268
|
+
fs.mkdirSync(storagePath, { recursive: true });
|
|
269
|
+
|
|
270
|
+
const payload = {
|
|
271
|
+
tool_name: 'Skill',
|
|
272
|
+
tool_input: { skill: 'df:plan' },
|
|
273
|
+
session_id: 'sess-ssp',
|
|
274
|
+
cwd: tmpDir,
|
|
275
|
+
session_storage_path: storagePath,
|
|
276
|
+
};
|
|
277
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
278
|
+
|
|
279
|
+
const marker = readMarker(tmpDir);
|
|
280
|
+
assert.equal(marker.transcript_path, path.join(storagePath, 'transcript.jsonl'));
|
|
281
|
+
});
|
|
282
|
+
|
|
283
|
+
test('uses CLAUDE_SESSION_ID when session_id not in payload', () => {
|
|
284
|
+
const payload = {
|
|
285
|
+
tool_name: 'Skill',
|
|
286
|
+
tool_input: { skill: 'df:plan' },
|
|
287
|
+
cwd: tmpDir,
|
|
288
|
+
};
|
|
289
|
+
runHook(payload, { event: 'PreToolUse', env: { CLAUDE_SESSION_ID: 'env-sess-42' } });
|
|
290
|
+
|
|
291
|
+
const marker = readMarker(tmpDir);
|
|
292
|
+
assert.equal(marker.session_id, 'env-sess-42');
|
|
293
|
+
});
|
|
294
|
+
|
|
295
|
+
test('defaults session_id to "unknown" when not available', () => {
|
|
296
|
+
const payload = {
|
|
297
|
+
tool_name: 'Skill',
|
|
298
|
+
tool_input: { skill: 'df:plan' },
|
|
299
|
+
cwd: tmpDir,
|
|
300
|
+
};
|
|
301
|
+
// Clear CLAUDE_SESSION_ID from env
|
|
302
|
+
runHook(payload, { event: 'PreToolUse', env: { CLAUDE_SESSION_ID: '' } });
|
|
303
|
+
|
|
304
|
+
const marker = readMarker(tmpDir);
|
|
305
|
+
// Either 'unknown' or empty string depending on fallback
|
|
306
|
+
assert.ok(marker.session_id !== undefined, 'session_id should exist');
|
|
307
|
+
});
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
// ---------------------------------------------------------------------------
|
|
311
|
+
// 2. PreToolUse — close-on-next (switching commands)
|
|
312
|
+
// ---------------------------------------------------------------------------
|
|
313
|
+
|
|
314
|
+
describe('df-command-usage — PreToolUse close-on-next', () => {
|
|
315
|
+
let tmpDir;
|
|
316
|
+
|
|
317
|
+
beforeEach(() => {
|
|
318
|
+
tmpDir = makeTmpDir();
|
|
319
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
320
|
+
});
|
|
321
|
+
|
|
322
|
+
afterEach(() => {
|
|
323
|
+
rmrf(tmpDir);
|
|
324
|
+
});
|
|
325
|
+
|
|
326
|
+
test('closes previous command and opens new one when marker exists', () => {
|
|
327
|
+
// First command
|
|
328
|
+
const payload1 = {
|
|
329
|
+
tool_name: 'Skill',
|
|
330
|
+
tool_input: { skill: 'df:plan' },
|
|
331
|
+
session_id: 'sess-1',
|
|
332
|
+
cwd: tmpDir,
|
|
333
|
+
};
|
|
334
|
+
runHook(payload1, { event: 'PreToolUse' });
|
|
335
|
+
|
|
336
|
+
// Second command — should close first and open second
|
|
337
|
+
const payload2 = {
|
|
338
|
+
tool_name: 'Skill',
|
|
339
|
+
tool_input: { skill: 'df:execute' },
|
|
340
|
+
session_id: 'sess-1',
|
|
341
|
+
cwd: tmpDir,
|
|
342
|
+
};
|
|
343
|
+
runHook(payload2, { event: 'PreToolUse' });
|
|
344
|
+
|
|
345
|
+
// Verify first command was recorded
|
|
346
|
+
const records = readUsage(tmpDir);
|
|
347
|
+
assert.equal(records.length, 1, 'first command should be closed and recorded');
|
|
348
|
+
assert.equal(records[0].command, 'df:plan');
|
|
349
|
+
|
|
350
|
+
// Verify new marker is for the second command
|
|
351
|
+
const marker = readMarker(tmpDir);
|
|
352
|
+
assert.equal(marker.command, 'df:execute');
|
|
353
|
+
});
|
|
354
|
+
|
|
355
|
+
test('closed command record has ended_at', () => {
|
|
356
|
+
const payload1 = {
|
|
357
|
+
tool_name: 'Skill',
|
|
358
|
+
tool_input: { skill: 'df:plan' },
|
|
359
|
+
session_id: 'sess-1',
|
|
360
|
+
cwd: tmpDir,
|
|
361
|
+
};
|
|
362
|
+
runHook(payload1, { event: 'PreToolUse' });
|
|
363
|
+
|
|
364
|
+
const payload2 = {
|
|
365
|
+
tool_name: 'Skill',
|
|
366
|
+
tool_input: { skill: 'df:verify' },
|
|
367
|
+
session_id: 'sess-1',
|
|
368
|
+
cwd: tmpDir,
|
|
369
|
+
};
|
|
370
|
+
runHook(payload2, { event: 'PreToolUse' });
|
|
371
|
+
|
|
372
|
+
const records = readUsage(tmpDir);
|
|
373
|
+
assert.equal(records.length, 1);
|
|
374
|
+
const isoRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/;
|
|
375
|
+
assert.match(records[0].ended_at, isoRegex);
|
|
376
|
+
});
|
|
377
|
+
|
|
378
|
+
test('computes token deltas when closing a command', () => {
|
|
379
|
+
// Write initial token history
|
|
380
|
+
writeTokenHistory(tmpDir, [
|
|
381
|
+
{ input_tokens: 1000, cache_read_input_tokens: 500, cache_creation_input_tokens: 100 },
|
|
382
|
+
]);
|
|
383
|
+
|
|
384
|
+
const payload1 = {
|
|
385
|
+
tool_name: 'Skill',
|
|
386
|
+
tool_input: { skill: 'df:plan' },
|
|
387
|
+
session_id: 'sess-1',
|
|
388
|
+
cwd: tmpDir,
|
|
389
|
+
};
|
|
390
|
+
runHook(payload1, { event: 'PreToolUse' });
|
|
391
|
+
|
|
392
|
+
// Update token history to simulate token usage
|
|
393
|
+
writeTokenHistory(tmpDir, [
|
|
394
|
+
{ input_tokens: 1000, cache_read_input_tokens: 500, cache_creation_input_tokens: 100 },
|
|
395
|
+
{ input_tokens: 3000, cache_read_input_tokens: 1200, cache_creation_input_tokens: 400 },
|
|
396
|
+
]);
|
|
397
|
+
|
|
398
|
+
const payload2 = {
|
|
399
|
+
tool_name: 'Skill',
|
|
400
|
+
tool_input: { skill: 'df:execute' },
|
|
401
|
+
session_id: 'sess-1',
|
|
402
|
+
cwd: tmpDir,
|
|
403
|
+
};
|
|
404
|
+
runHook(payload2, { event: 'PreToolUse' });
|
|
405
|
+
|
|
406
|
+
const records = readUsage(tmpDir);
|
|
407
|
+
assert.equal(records.length, 1);
|
|
408
|
+
assert.equal(records[0].input_tokens_delta, 2000);
|
|
409
|
+
assert.equal(records[0].cache_read_delta, 700);
|
|
410
|
+
assert.equal(records[0].cache_creation_delta, 300);
|
|
411
|
+
});
|
|
412
|
+
});
|
|
413
|
+
|
|
414
|
+
// ---------------------------------------------------------------------------
|
|
415
|
+
// 3. PostToolUse — incrementing tool_calls_count
|
|
416
|
+
// ---------------------------------------------------------------------------
|
|
417
|
+
|
|
418
|
+
describe('df-command-usage — PostToolUse increments tool count', () => {
|
|
419
|
+
let tmpDir;
|
|
420
|
+
|
|
421
|
+
beforeEach(() => {
|
|
422
|
+
tmpDir = makeTmpDir();
|
|
423
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
424
|
+
});
|
|
425
|
+
|
|
426
|
+
afterEach(() => {
|
|
427
|
+
rmrf(tmpDir);
|
|
428
|
+
});
|
|
429
|
+
|
|
430
|
+
test('increments tool_calls_count on active marker', () => {
|
|
431
|
+
writeMarker(tmpDir, {
|
|
432
|
+
command: 'df:plan',
|
|
433
|
+
session_id: 'sess-1',
|
|
434
|
+
started_at: new Date().toISOString(),
|
|
435
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
436
|
+
tool_calls_count: 0,
|
|
437
|
+
});
|
|
438
|
+
|
|
439
|
+
const payload = {
|
|
440
|
+
tool_name: 'Read',
|
|
441
|
+
tool_input: { file_path: '/some/file.js' },
|
|
442
|
+
cwd: tmpDir,
|
|
443
|
+
};
|
|
444
|
+
runHook(payload, { event: 'PostToolUse' });
|
|
445
|
+
|
|
446
|
+
const marker = readMarker(tmpDir);
|
|
447
|
+
assert.equal(marker.tool_calls_count, 1);
|
|
448
|
+
});
|
|
449
|
+
|
|
450
|
+
test('increments multiple times', () => {
|
|
451
|
+
writeMarker(tmpDir, {
|
|
452
|
+
command: 'df:plan',
|
|
453
|
+
session_id: 'sess-1',
|
|
454
|
+
started_at: new Date().toISOString(),
|
|
455
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
456
|
+
tool_calls_count: 0,
|
|
457
|
+
});
|
|
458
|
+
|
|
459
|
+
for (let i = 0; i < 3; i++) {
|
|
460
|
+
runHook({ tool_name: 'Bash', tool_input: { command: 'ls' }, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
const marker = readMarker(tmpDir);
|
|
464
|
+
assert.equal(marker.tool_calls_count, 3);
|
|
465
|
+
});
|
|
466
|
+
|
|
467
|
+
test('does not count df:* Skill calls (avoids double-counting)', () => {
|
|
468
|
+
writeMarker(tmpDir, {
|
|
469
|
+
command: 'df:plan',
|
|
470
|
+
session_id: 'sess-1',
|
|
471
|
+
started_at: new Date().toISOString(),
|
|
472
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
473
|
+
tool_calls_count: 0,
|
|
474
|
+
});
|
|
475
|
+
|
|
476
|
+
const dfSkillPayload = {
|
|
477
|
+
tool_name: 'Skill',
|
|
478
|
+
tool_input: { skill: 'df:execute' },
|
|
479
|
+
cwd: tmpDir,
|
|
480
|
+
};
|
|
481
|
+
runHook(dfSkillPayload, { event: 'PostToolUse' });
|
|
482
|
+
|
|
483
|
+
const marker = readMarker(tmpDir);
|
|
484
|
+
assert.equal(marker.tool_calls_count, 0, 'df:* Skill calls should not increment count');
|
|
485
|
+
});
|
|
486
|
+
|
|
487
|
+
test('counts non-df: Skill calls', () => {
|
|
488
|
+
writeMarker(tmpDir, {
|
|
489
|
+
command: 'df:plan',
|
|
490
|
+
session_id: 'sess-1',
|
|
491
|
+
started_at: new Date().toISOString(),
|
|
492
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
493
|
+
tool_calls_count: 0,
|
|
494
|
+
});
|
|
495
|
+
|
|
496
|
+
const payload = {
|
|
497
|
+
tool_name: 'Skill',
|
|
498
|
+
tool_input: { skill: 'browse-fetch' },
|
|
499
|
+
cwd: tmpDir,
|
|
500
|
+
};
|
|
501
|
+
runHook(payload, { event: 'PostToolUse' });
|
|
502
|
+
|
|
503
|
+
const marker = readMarker(tmpDir);
|
|
504
|
+
assert.equal(marker.tool_calls_count, 1, 'non-df: Skill calls should increment count');
|
|
505
|
+
});
|
|
506
|
+
|
|
507
|
+
test('does nothing when no marker exists', () => {
|
|
508
|
+
const payload = {
|
|
509
|
+
tool_name: 'Read',
|
|
510
|
+
tool_input: { file_path: '/some/file.js' },
|
|
511
|
+
cwd: tmpDir,
|
|
512
|
+
};
|
|
513
|
+
const result = runHook(payload, { event: 'PostToolUse' });
|
|
514
|
+
assert.equal(result.code, 0);
|
|
515
|
+
|
|
516
|
+
const marker = readMarker(tmpDir);
|
|
517
|
+
assert.equal(marker, null, 'no marker should be created by PostToolUse');
|
|
518
|
+
});
|
|
519
|
+
});
|
|
520
|
+
|
|
521
|
+
// ---------------------------------------------------------------------------
|
|
522
|
+
// 4. SessionEnd — closing the last command
|
|
523
|
+
// ---------------------------------------------------------------------------
|
|
524
|
+
|
|
525
|
+
describe('df-command-usage — SessionEnd closes active command', () => {
|
|
526
|
+
let tmpDir;
|
|
527
|
+
|
|
528
|
+
beforeEach(() => {
|
|
529
|
+
tmpDir = makeTmpDir();
|
|
530
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
531
|
+
});
|
|
532
|
+
|
|
533
|
+
afterEach(() => {
|
|
534
|
+
rmrf(tmpDir);
|
|
535
|
+
});
|
|
536
|
+
|
|
537
|
+
test('closes active marker and writes usage record', () => {
|
|
538
|
+
writeMarker(tmpDir, {
|
|
539
|
+
command: 'df:execute',
|
|
540
|
+
session_id: 'sess-end',
|
|
541
|
+
started_at: '2025-01-01T00:00:00.000Z',
|
|
542
|
+
token_snapshot: { input_tokens: 500, cache_read_input_tokens: 200, cache_creation_input_tokens: 50 },
|
|
543
|
+
tool_calls_count: 7,
|
|
544
|
+
});
|
|
545
|
+
|
|
546
|
+
// Write updated token history
|
|
547
|
+
writeTokenHistory(tmpDir, [
|
|
548
|
+
{ input_tokens: 1500, cache_read_input_tokens: 600, cache_creation_input_tokens: 150 },
|
|
549
|
+
]);
|
|
550
|
+
|
|
551
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
552
|
+
|
|
553
|
+
// Marker should be deleted
|
|
554
|
+
const marker = readMarker(tmpDir);
|
|
555
|
+
assert.equal(marker, null, 'marker should be deleted after SessionEnd');
|
|
556
|
+
|
|
557
|
+
// Usage record should be written
|
|
558
|
+
const records = readUsage(tmpDir);
|
|
559
|
+
assert.equal(records.length, 1);
|
|
560
|
+
assert.equal(records[0].command, 'df:execute');
|
|
561
|
+
assert.equal(records[0].session_id, 'sess-end');
|
|
562
|
+
assert.equal(records[0].started_at, '2025-01-01T00:00:00.000Z');
|
|
563
|
+
assert.equal(records[0].tool_calls_count, 7);
|
|
564
|
+
assert.equal(records[0].input_tokens_delta, 1000);
|
|
565
|
+
assert.equal(records[0].cache_read_delta, 400);
|
|
566
|
+
assert.equal(records[0].cache_creation_delta, 100);
|
|
567
|
+
});
|
|
568
|
+
|
|
569
|
+
test('does nothing when no marker exists', () => {
|
|
570
|
+
const result = runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
571
|
+
assert.equal(result.code, 0);
|
|
572
|
+
|
|
573
|
+
const records = readUsage(tmpDir);
|
|
574
|
+
assert.equal(records.length, 0, 'no usage record should be written without a marker');
|
|
575
|
+
});
|
|
576
|
+
|
|
577
|
+
test('deletes marker even if it contains invalid JSON', () => {
|
|
578
|
+
const markerPath = path.join(tmpDir, '.deepflow', 'active-command.json');
|
|
579
|
+
fs.writeFileSync(markerPath, 'not valid json{{{');
|
|
580
|
+
|
|
581
|
+
const result = runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
582
|
+
assert.equal(result.code, 0);
|
|
583
|
+
assert.ok(!fs.existsSync(markerPath), 'invalid marker should be deleted');
|
|
584
|
+
});
|
|
585
|
+
});
|
|
586
|
+
|
|
587
|
+
// ---------------------------------------------------------------------------
|
|
588
|
+
// 5. Full lifecycle — open, increment, close
|
|
589
|
+
// ---------------------------------------------------------------------------
|
|
590
|
+
|
|
591
|
+
describe('df-command-usage — full lifecycle', () => {
|
|
592
|
+
let tmpDir;
|
|
593
|
+
|
|
594
|
+
beforeEach(() => {
|
|
595
|
+
tmpDir = makeTmpDir();
|
|
596
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
597
|
+
});
|
|
598
|
+
|
|
599
|
+
afterEach(() => {
|
|
600
|
+
rmrf(tmpDir);
|
|
601
|
+
});
|
|
602
|
+
|
|
603
|
+
test('open → increment → session end produces correct record', () => {
|
|
604
|
+
// Open command
|
|
605
|
+
runHook(
|
|
606
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, session_id: 'sess-life', cwd: tmpDir },
|
|
607
|
+
{ event: 'PreToolUse' }
|
|
608
|
+
);
|
|
609
|
+
|
|
610
|
+
// Several tool calls
|
|
611
|
+
runHook({ tool_name: 'Read', tool_input: {}, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
612
|
+
runHook({ tool_name: 'Bash', tool_input: {}, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
613
|
+
runHook({ tool_name: 'Grep', tool_input: {}, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
614
|
+
|
|
615
|
+
// End session
|
|
616
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
617
|
+
|
|
618
|
+
const records = readUsage(tmpDir);
|
|
619
|
+
assert.equal(records.length, 1);
|
|
620
|
+
assert.equal(records[0].command, 'df:plan');
|
|
621
|
+
assert.equal(records[0].session_id, 'sess-life');
|
|
622
|
+
assert.equal(records[0].tool_calls_count, 3);
|
|
623
|
+
assert.ok(records[0].started_at);
|
|
624
|
+
assert.ok(records[0].ended_at);
|
|
625
|
+
});
|
|
626
|
+
|
|
627
|
+
test('open → open (different command) → session end produces two records', () => {
|
|
628
|
+
// Open first command
|
|
629
|
+
runHook(
|
|
630
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, session_id: 'sess-multi', cwd: tmpDir },
|
|
631
|
+
{ event: 'PreToolUse' }
|
|
632
|
+
);
|
|
633
|
+
|
|
634
|
+
// Some tool calls
|
|
635
|
+
runHook({ tool_name: 'Read', tool_input: {}, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
636
|
+
|
|
637
|
+
// Open second command (closes first)
|
|
638
|
+
runHook(
|
|
639
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:execute' }, session_id: 'sess-multi', cwd: tmpDir },
|
|
640
|
+
{ event: 'PreToolUse' }
|
|
641
|
+
);
|
|
642
|
+
|
|
643
|
+
// More tool calls
|
|
644
|
+
runHook({ tool_name: 'Bash', tool_input: {}, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
645
|
+
runHook({ tool_name: 'Edit', tool_input: {}, cwd: tmpDir }, { event: 'PostToolUse' });
|
|
646
|
+
|
|
647
|
+
// End session
|
|
648
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
649
|
+
|
|
650
|
+
const records = readUsage(tmpDir);
|
|
651
|
+
assert.equal(records.length, 2);
|
|
652
|
+
assert.equal(records[0].command, 'df:plan');
|
|
653
|
+
assert.equal(records[0].tool_calls_count, 1);
|
|
654
|
+
assert.equal(records[1].command, 'df:execute');
|
|
655
|
+
assert.equal(records[1].tool_calls_count, 2);
|
|
656
|
+
});
|
|
657
|
+
});
|
|
658
|
+
|
|
659
|
+
// ---------------------------------------------------------------------------
|
|
660
|
+
// 6. Token delta edge cases
|
|
661
|
+
// ---------------------------------------------------------------------------
|
|
662
|
+
|
|
663
|
+
describe('df-command-usage — token delta edge cases', () => {
|
|
664
|
+
let tmpDir;
|
|
665
|
+
|
|
666
|
+
beforeEach(() => {
|
|
667
|
+
tmpDir = makeTmpDir();
|
|
668
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
669
|
+
});
|
|
670
|
+
|
|
671
|
+
afterEach(() => {
|
|
672
|
+
rmrf(tmpDir);
|
|
673
|
+
});
|
|
674
|
+
|
|
675
|
+
test('token deltas are clamped to zero (never negative)', () => {
|
|
676
|
+
// Start with high token count
|
|
677
|
+
writeTokenHistory(tmpDir, [
|
|
678
|
+
{ input_tokens: 5000, cache_read_input_tokens: 3000, cache_creation_input_tokens: 1000 },
|
|
679
|
+
]);
|
|
680
|
+
|
|
681
|
+
runHook(
|
|
682
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, session_id: 's1', cwd: tmpDir },
|
|
683
|
+
{ event: 'PreToolUse' }
|
|
684
|
+
);
|
|
685
|
+
|
|
686
|
+
// Replace with lower token count (simulating reset or file rotation)
|
|
687
|
+
writeTokenHistory(tmpDir, [
|
|
688
|
+
{ input_tokens: 100, cache_read_input_tokens: 50, cache_creation_input_tokens: 10 },
|
|
689
|
+
]);
|
|
690
|
+
|
|
691
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
692
|
+
|
|
693
|
+
const records = readUsage(tmpDir);
|
|
694
|
+
assert.equal(records.length, 1);
|
|
695
|
+
assert.equal(records[0].input_tokens_delta, 0, 'negative delta should be clamped to 0');
|
|
696
|
+
assert.equal(records[0].cache_read_delta, 0);
|
|
697
|
+
assert.equal(records[0].cache_creation_delta, 0);
|
|
698
|
+
});
|
|
699
|
+
|
|
700
|
+
test('token deltas default to zero when token-history is empty', () => {
|
|
701
|
+
// Write empty token history
|
|
702
|
+
fs.writeFileSync(path.join(tmpDir, '.deepflow', 'token-history.jsonl'), '');
|
|
703
|
+
|
|
704
|
+
runHook(
|
|
705
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, session_id: 's1', cwd: tmpDir },
|
|
706
|
+
{ event: 'PreToolUse' }
|
|
707
|
+
);
|
|
708
|
+
|
|
709
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
710
|
+
|
|
711
|
+
const records = readUsage(tmpDir);
|
|
712
|
+
assert.equal(records.length, 1);
|
|
713
|
+
assert.equal(records[0].input_tokens_delta, 0);
|
|
714
|
+
assert.equal(records[0].cache_read_delta, 0);
|
|
715
|
+
assert.equal(records[0].cache_creation_delta, 0);
|
|
716
|
+
});
|
|
717
|
+
});
|
|
718
|
+
|
|
719
|
+
// ---------------------------------------------------------------------------
|
|
720
|
+
// 7. Transcript output token parsing
|
|
721
|
+
// ---------------------------------------------------------------------------
|
|
722
|
+
|
|
723
|
+
describe('df-command-usage — transcript output token parsing', () => {
|
|
724
|
+
let tmpDir;
|
|
725
|
+
|
|
726
|
+
beforeEach(() => {
|
|
727
|
+
tmpDir = makeTmpDir();
|
|
728
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
729
|
+
});
|
|
730
|
+
|
|
731
|
+
afterEach(() => {
|
|
732
|
+
rmrf(tmpDir);
|
|
733
|
+
});
|
|
734
|
+
|
|
735
|
+
test('accumulates output_tokens from transcript entries after offset', () => {
|
|
736
|
+
const transcriptPath = path.join(tmpDir, 'transcript.jsonl');
|
|
737
|
+
|
|
738
|
+
// Write transcript with some entries before and after offset
|
|
739
|
+
const preEntries = [
|
|
740
|
+
{ message: { usage: { output_tokens: 100 } } },
|
|
741
|
+
{ message: { usage: { output_tokens: 200 } } },
|
|
742
|
+
];
|
|
743
|
+
const postEntries = [
|
|
744
|
+
{ message: { usage: { output_tokens: 300 } } },
|
|
745
|
+
{ message: { usage: { output_tokens: 400 } } },
|
|
746
|
+
];
|
|
747
|
+
|
|
748
|
+
// Write pre-entries first to get offset
|
|
749
|
+
writeTranscript(transcriptPath, preEntries);
|
|
750
|
+
const offset = fs.statSync(transcriptPath).size;
|
|
751
|
+
|
|
752
|
+
// Append post-entries
|
|
753
|
+
const postContent = postEntries.map(e => JSON.stringify(e)).join('\n') + '\n';
|
|
754
|
+
fs.appendFileSync(transcriptPath, postContent);
|
|
755
|
+
|
|
756
|
+
// Set up marker with transcript info
|
|
757
|
+
writeMarker(tmpDir, {
|
|
758
|
+
command: 'df:plan',
|
|
759
|
+
session_id: 'sess-txn',
|
|
760
|
+
started_at: '2025-01-01T00:00:00.000Z',
|
|
761
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
762
|
+
tool_calls_count: 0,
|
|
763
|
+
transcript_path: transcriptPath,
|
|
764
|
+
transcript_offset: offset,
|
|
765
|
+
});
|
|
766
|
+
|
|
767
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
768
|
+
|
|
769
|
+
const records = readUsage(tmpDir);
|
|
770
|
+
assert.equal(records.length, 1);
|
|
771
|
+
assert.equal(records[0].output_tokens, 700, 'should sum output_tokens from entries after offset');
|
|
772
|
+
});
|
|
773
|
+
|
|
774
|
+
test('handles transcript with usage at top level', () => {
|
|
775
|
+
const transcriptPath = path.join(tmpDir, 'transcript.jsonl');
|
|
776
|
+
writeTranscript(transcriptPath, [
|
|
777
|
+
{ usage: { output_tokens: 150 } },
|
|
778
|
+
{ usage: { output_tokens: 250 } },
|
|
779
|
+
]);
|
|
780
|
+
|
|
781
|
+
writeMarker(tmpDir, {
|
|
782
|
+
command: 'df:plan',
|
|
783
|
+
session_id: 'sess-top',
|
|
784
|
+
started_at: '2025-01-01T00:00:00.000Z',
|
|
785
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
786
|
+
tool_calls_count: 0,
|
|
787
|
+
transcript_path: transcriptPath,
|
|
788
|
+
transcript_offset: 0,
|
|
789
|
+
});
|
|
790
|
+
|
|
791
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
792
|
+
|
|
793
|
+
const records = readUsage(tmpDir);
|
|
794
|
+
assert.equal(records.length, 1);
|
|
795
|
+
assert.equal(records[0].output_tokens, 400);
|
|
796
|
+
});
|
|
797
|
+
|
|
798
|
+
test('returns 0 when transcript does not exist', () => {
|
|
799
|
+
writeMarker(tmpDir, {
|
|
800
|
+
command: 'df:plan',
|
|
801
|
+
session_id: 'sess-nofile',
|
|
802
|
+
started_at: '2025-01-01T00:00:00.000Z',
|
|
803
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
804
|
+
tool_calls_count: 0,
|
|
805
|
+
transcript_path: '/nonexistent/transcript.jsonl',
|
|
806
|
+
transcript_offset: 0,
|
|
807
|
+
});
|
|
808
|
+
|
|
809
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
810
|
+
|
|
811
|
+
const records = readUsage(tmpDir);
|
|
812
|
+
assert.equal(records.length, 1);
|
|
813
|
+
assert.equal(records[0].output_tokens, 0);
|
|
814
|
+
});
|
|
815
|
+
|
|
816
|
+
test('returns 0 when transcript_path is empty', () => {
|
|
817
|
+
writeMarker(tmpDir, {
|
|
818
|
+
command: 'df:plan',
|
|
819
|
+
session_id: 'sess-empty-tp',
|
|
820
|
+
started_at: '2025-01-01T00:00:00.000Z',
|
|
821
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
822
|
+
tool_calls_count: 0,
|
|
823
|
+
transcript_path: '',
|
|
824
|
+
transcript_offset: 0,
|
|
825
|
+
});
|
|
826
|
+
|
|
827
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
828
|
+
|
|
829
|
+
const records = readUsage(tmpDir);
|
|
830
|
+
assert.equal(records.length, 1);
|
|
831
|
+
assert.equal(records[0].output_tokens, 0);
|
|
832
|
+
});
|
|
833
|
+
|
|
834
|
+
test('skips malformed lines in transcript', () => {
|
|
835
|
+
const transcriptPath = path.join(tmpDir, 'transcript.jsonl');
|
|
836
|
+
const content = [
|
|
837
|
+
JSON.stringify({ message: { usage: { output_tokens: 100 } } }),
|
|
838
|
+
'not valid json',
|
|
839
|
+
JSON.stringify({ message: { usage: { output_tokens: 200 } } }),
|
|
840
|
+
].join('\n') + '\n';
|
|
841
|
+
fs.writeFileSync(transcriptPath, content);
|
|
842
|
+
|
|
843
|
+
writeMarker(tmpDir, {
|
|
844
|
+
command: 'df:plan',
|
|
845
|
+
session_id: 'sess-malformed',
|
|
846
|
+
started_at: '2025-01-01T00:00:00.000Z',
|
|
847
|
+
token_snapshot: { input_tokens: 0, cache_read_input_tokens: 0, cache_creation_input_tokens: 0 },
|
|
848
|
+
tool_calls_count: 0,
|
|
849
|
+
transcript_path: transcriptPath,
|
|
850
|
+
transcript_offset: 0,
|
|
851
|
+
});
|
|
852
|
+
|
|
853
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
854
|
+
|
|
855
|
+
const records = readUsage(tmpDir);
|
|
856
|
+
assert.equal(records.length, 1);
|
|
857
|
+
assert.equal(records[0].output_tokens, 300, 'should skip malformed lines and sum the rest');
|
|
858
|
+
});
|
|
859
|
+
});
|
|
860
|
+
|
|
861
|
+
// ---------------------------------------------------------------------------
|
|
862
|
+
// 8. findProjectDir — project directory resolution
|
|
863
|
+
// ---------------------------------------------------------------------------
|
|
864
|
+
|
|
865
|
+
describe('df-command-usage — findProjectDir resolution', () => {
|
|
866
|
+
let tmpDir;
|
|
867
|
+
|
|
868
|
+
beforeEach(() => {
|
|
869
|
+
tmpDir = makeTmpDir();
|
|
870
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
871
|
+
});
|
|
872
|
+
|
|
873
|
+
afterEach(() => {
|
|
874
|
+
rmrf(tmpDir);
|
|
875
|
+
});
|
|
876
|
+
|
|
877
|
+
test('uses cwd from payload', () => {
|
|
878
|
+
const payload = {
|
|
879
|
+
tool_name: 'Skill',
|
|
880
|
+
tool_input: { skill: 'df:plan' },
|
|
881
|
+
session_id: 'sess-cwd',
|
|
882
|
+
cwd: tmpDir,
|
|
883
|
+
};
|
|
884
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
885
|
+
|
|
886
|
+
const marker = readMarker(tmpDir);
|
|
887
|
+
assert.ok(marker, 'marker should exist in cwd-specified directory');
|
|
888
|
+
});
|
|
889
|
+
|
|
890
|
+
test('uses workspace.current_dir from payload', () => {
|
|
891
|
+
const payload = {
|
|
892
|
+
tool_name: 'Skill',
|
|
893
|
+
tool_input: { skill: 'df:plan' },
|
|
894
|
+
session_id: 'sess-ws',
|
|
895
|
+
workspace: { current_dir: tmpDir },
|
|
896
|
+
};
|
|
897
|
+
runHook(payload, { event: 'PreToolUse' });
|
|
898
|
+
|
|
899
|
+
const marker = readMarker(tmpDir);
|
|
900
|
+
assert.ok(marker, 'marker should exist in workspace.current_dir directory');
|
|
901
|
+
});
|
|
902
|
+
|
|
903
|
+
test('falls back to CLAUDE_PROJECT_DIR env var', () => {
|
|
904
|
+
const payload = {
|
|
905
|
+
tool_name: 'Skill',
|
|
906
|
+
tool_input: { skill: 'df:plan' },
|
|
907
|
+
session_id: 'sess-env',
|
|
908
|
+
};
|
|
909
|
+
runHook(payload, { event: 'PreToolUse', env: { CLAUDE_PROJECT_DIR: tmpDir } });
|
|
910
|
+
|
|
911
|
+
const marker = readMarker(tmpDir);
|
|
912
|
+
assert.ok(marker, 'marker should exist when using CLAUDE_PROJECT_DIR');
|
|
913
|
+
});
|
|
914
|
+
});
|
|
915
|
+
|
|
916
|
+
// ---------------------------------------------------------------------------
|
|
917
|
+
// 9. Fail-open (REQ-8) — never break Claude Code
|
|
918
|
+
// ---------------------------------------------------------------------------
|
|
919
|
+
|
|
920
|
+
describe('df-command-usage — fail-open (REQ-8)', () => {
|
|
921
|
+
test('exits 0 on invalid JSON stdin', () => {
|
|
922
|
+
const result = runHook('not valid json{{{', { event: 'PreToolUse' });
|
|
923
|
+
assert.equal(result.code, 0);
|
|
924
|
+
});
|
|
925
|
+
|
|
926
|
+
test('exits 0 on empty stdin', () => {
|
|
927
|
+
const result = runHook('', { event: 'PreToolUse' });
|
|
928
|
+
assert.equal(result.code, 0);
|
|
929
|
+
});
|
|
930
|
+
|
|
931
|
+
test('exits 0 on unknown event type', () => {
|
|
932
|
+
const result = runHook({ cwd: '/tmp' }, { event: 'UnknownEvent' });
|
|
933
|
+
assert.equal(result.code, 0);
|
|
934
|
+
});
|
|
935
|
+
|
|
936
|
+
test('exits 0 when no CLAUDE_HOOK_EVENT is set', () => {
|
|
937
|
+
const result = runHook({ cwd: '/tmp' }, {});
|
|
938
|
+
assert.equal(result.code, 0);
|
|
939
|
+
});
|
|
940
|
+
|
|
941
|
+
test('exits 0 when .deepflow directory cannot be created (no cwd)', () => {
|
|
942
|
+
const result = runHook(
|
|
943
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, cwd: '/nonexistent/path/12345' },
|
|
944
|
+
{ event: 'PreToolUse' }
|
|
945
|
+
);
|
|
946
|
+
assert.equal(result.code, 0);
|
|
947
|
+
});
|
|
948
|
+
});
|
|
949
|
+
|
|
950
|
+
// ---------------------------------------------------------------------------
|
|
951
|
+
// 10. JSONL output format
|
|
952
|
+
// ---------------------------------------------------------------------------
|
|
953
|
+
|
|
954
|
+
describe('df-command-usage — JSONL output format', () => {
|
|
955
|
+
let tmpDir;
|
|
956
|
+
|
|
957
|
+
beforeEach(() => {
|
|
958
|
+
tmpDir = makeTmpDir();
|
|
959
|
+
fs.mkdirSync(path.join(tmpDir, '.deepflow'), { recursive: true });
|
|
960
|
+
});
|
|
961
|
+
|
|
962
|
+
afterEach(() => {
|
|
963
|
+
rmrf(tmpDir);
|
|
964
|
+
});
|
|
965
|
+
|
|
966
|
+
test('usage record has all expected fields', () => {
|
|
967
|
+
runHook(
|
|
968
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, session_id: 'sess-fields', cwd: tmpDir },
|
|
969
|
+
{ event: 'PreToolUse' }
|
|
970
|
+
);
|
|
971
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
972
|
+
|
|
973
|
+
const records = readUsage(tmpDir);
|
|
974
|
+
assert.equal(records.length, 1);
|
|
975
|
+
const r = records[0];
|
|
976
|
+
const expectedKeys = [
|
|
977
|
+
'command', 'session_id', 'started_at', 'ended_at',
|
|
978
|
+
'tool_calls_count', 'input_tokens_delta', 'output_tokens',
|
|
979
|
+
'cache_read_delta', 'cache_creation_delta',
|
|
980
|
+
];
|
|
981
|
+
for (const key of expectedKeys) {
|
|
982
|
+
assert.ok(key in r, `record should have field "${key}"`);
|
|
983
|
+
}
|
|
984
|
+
});
|
|
985
|
+
|
|
986
|
+
test('multiple records are each on their own line (valid JSONL)', () => {
|
|
987
|
+
// Three command cycles
|
|
988
|
+
for (const cmd of ['df:plan', 'df:execute', 'df:verify']) {
|
|
989
|
+
runHook(
|
|
990
|
+
{ tool_name: 'Skill', tool_input: { skill: cmd }, session_id: 'sess-jsonl', cwd: tmpDir },
|
|
991
|
+
{ event: 'PreToolUse' }
|
|
992
|
+
);
|
|
993
|
+
}
|
|
994
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
995
|
+
|
|
996
|
+
const usagePath = path.join(tmpDir, '.deepflow', 'command-usage.jsonl');
|
|
997
|
+
const raw = fs.readFileSync(usagePath, 'utf8');
|
|
998
|
+
const lines = raw.trim().split('\n');
|
|
999
|
+
assert.equal(lines.length, 3);
|
|
1000
|
+
|
|
1001
|
+
// Each line must be independently parseable
|
|
1002
|
+
lines.forEach((line, i) => {
|
|
1003
|
+
const parsed = JSON.parse(line);
|
|
1004
|
+
assert.ok(parsed.command, `line ${i} should have command field`);
|
|
1005
|
+
});
|
|
1006
|
+
});
|
|
1007
|
+
|
|
1008
|
+
test('usage file ends with newline', () => {
|
|
1009
|
+
runHook(
|
|
1010
|
+
{ tool_name: 'Skill', tool_input: { skill: 'df:plan' }, session_id: 's', cwd: tmpDir },
|
|
1011
|
+
{ event: 'PreToolUse' }
|
|
1012
|
+
);
|
|
1013
|
+
runHook({ cwd: tmpDir }, { event: 'SessionEnd' });
|
|
1014
|
+
|
|
1015
|
+
const usagePath = path.join(tmpDir, '.deepflow', 'command-usage.jsonl');
|
|
1016
|
+
const raw = fs.readFileSync(usagePath, 'utf8');
|
|
1017
|
+
assert.ok(raw.endsWith('\n'), 'JSONL file should end with newline');
|
|
1018
|
+
});
|
|
1019
|
+
});
|