@mauribadnights/clooks 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  // clooks — public API exports
3
3
  Object.defineProperty(exports, "__esModule", { value: true });
4
- exports.LOG_FILE = exports.METRICS_FILE = exports.PID_FILE = exports.MANIFEST_PATH = exports.CONFIG_DIR = exports.DEFAULT_PORT = exports.executeHandlers = exports.runDoctor = exports.getSettingsPath = exports.restore = exports.migrate = exports.MetricsCollector = exports.createDefaultManifest = exports.validateManifest = exports.loadManifest = exports.isDaemonRunning = exports.stopDaemon = exports.startDaemon = exports.createServer = void 0;
4
+ exports.LLM_PRICING = exports.DEFAULT_LLM_MAX_TOKENS = exports.DEFAULT_LLM_TIMEOUT = exports.COSTS_FILE = exports.LOG_FILE = exports.METRICS_FILE = exports.PID_FILE = exports.MANIFEST_PATH = exports.CONFIG_DIR = exports.DEFAULT_PORT = exports.renderPromptTemplate = exports.prefetchContext = exports.resetClient = exports.calculateCost = exports.executeLLMHandlersBatched = exports.executeLLMHandler = exports.evaluateFilter = exports.executeHandlers = exports.runDoctor = exports.getSettingsPath = exports.restore = exports.migrate = exports.MetricsCollector = exports.createDefaultManifest = exports.validateManifest = exports.loadManifest = exports.isDaemonRunning = exports.stopDaemon = exports.startDaemon = exports.createServer = void 0;
5
5
  var server_js_1 = require("./server.js");
6
6
  Object.defineProperty(exports, "createServer", { enumerable: true, get: function () { return server_js_1.createServer; } });
7
7
  Object.defineProperty(exports, "startDaemon", { enumerable: true, get: function () { return server_js_1.startDaemon; } });
@@ -21,6 +21,16 @@ var doctor_js_1 = require("./doctor.js");
21
21
  Object.defineProperty(exports, "runDoctor", { enumerable: true, get: function () { return doctor_js_1.runDoctor; } });
22
22
  var handlers_js_1 = require("./handlers.js");
23
23
  Object.defineProperty(exports, "executeHandlers", { enumerable: true, get: function () { return handlers_js_1.executeHandlers; } });
24
+ var filter_js_1 = require("./filter.js");
25
+ Object.defineProperty(exports, "evaluateFilter", { enumerable: true, get: function () { return filter_js_1.evaluateFilter; } });
26
+ var llm_js_1 = require("./llm.js");
27
+ Object.defineProperty(exports, "executeLLMHandler", { enumerable: true, get: function () { return llm_js_1.executeLLMHandler; } });
28
+ Object.defineProperty(exports, "executeLLMHandlersBatched", { enumerable: true, get: function () { return llm_js_1.executeLLMHandlersBatched; } });
29
+ Object.defineProperty(exports, "calculateCost", { enumerable: true, get: function () { return llm_js_1.calculateCost; } });
30
+ Object.defineProperty(exports, "resetClient", { enumerable: true, get: function () { return llm_js_1.resetClient; } });
31
+ var prefetch_js_1 = require("./prefetch.js");
32
+ Object.defineProperty(exports, "prefetchContext", { enumerable: true, get: function () { return prefetch_js_1.prefetchContext; } });
33
+ Object.defineProperty(exports, "renderPromptTemplate", { enumerable: true, get: function () { return prefetch_js_1.renderPromptTemplate; } });
24
34
  var constants_js_1 = require("./constants.js");
25
35
  Object.defineProperty(exports, "DEFAULT_PORT", { enumerable: true, get: function () { return constants_js_1.DEFAULT_PORT; } });
26
36
  Object.defineProperty(exports, "CONFIG_DIR", { enumerable: true, get: function () { return constants_js_1.CONFIG_DIR; } });
@@ -28,3 +38,7 @@ Object.defineProperty(exports, "MANIFEST_PATH", { enumerable: true, get: functio
28
38
  Object.defineProperty(exports, "PID_FILE", { enumerable: true, get: function () { return constants_js_1.PID_FILE; } });
29
39
  Object.defineProperty(exports, "METRICS_FILE", { enumerable: true, get: function () { return constants_js_1.METRICS_FILE; } });
30
40
  Object.defineProperty(exports, "LOG_FILE", { enumerable: true, get: function () { return constants_js_1.LOG_FILE; } });
41
+ Object.defineProperty(exports, "COSTS_FILE", { enumerable: true, get: function () { return constants_js_1.COSTS_FILE; } });
42
+ Object.defineProperty(exports, "DEFAULT_LLM_TIMEOUT", { enumerable: true, get: function () { return constants_js_1.DEFAULT_LLM_TIMEOUT; } });
43
+ Object.defineProperty(exports, "DEFAULT_LLM_MAX_TOKENS", { enumerable: true, get: function () { return constants_js_1.DEFAULT_LLM_MAX_TOKENS; } });
44
+ Object.defineProperty(exports, "LLM_PRICING", { enumerable: true, get: function () { return constants_js_1.LLM_PRICING; } });
package/dist/llm.d.ts ADDED
@@ -0,0 +1,19 @@
1
+ import type { LLMHandlerConfig, HandlerResult, HookInput, PrefetchContext, TokenUsage } from './types.js';
2
+ /** Reset client (for testing) */
3
+ export declare function resetClient(): void;
4
+ /**
5
+ * Calculate cost in USD from token usage and model.
6
+ */
7
+ export declare function calculateCost(model: string, usage: TokenUsage): number;
8
+ /**
9
+ * Execute a single LLM handler: render prompt, call Messages API, return result.
10
+ */
11
+ export declare function executeLLMHandler(handler: LLMHandlerConfig, input: HookInput, context: PrefetchContext): Promise<HandlerResult>;
12
+ /**
13
+ * Execute multiple LLM handlers, batching those with the same batchGroup.
14
+ *
15
+ * Strategy: handlers with the same batchGroup get their prompts combined into
16
+ * a single API call with a structured multi-task prompt. Handlers without a
17
+ * batchGroup are executed individually.
18
+ */
19
+ export declare function executeLLMHandlersBatched(handlers: LLMHandlerConfig[], input: HookInput, context: PrefetchContext): Promise<HandlerResult[]>;
package/dist/llm.js ADDED
@@ -0,0 +1,225 @@
1
+ "use strict";
2
+ // clooks LLM handler execution — Anthropic Messages API with batching
3
+ Object.defineProperty(exports, "__esModule", { value: true });
4
+ exports.resetClient = resetClient;
5
+ exports.calculateCost = calculateCost;
6
+ exports.executeLLMHandler = executeLLMHandler;
7
+ exports.executeLLMHandlersBatched = executeLLMHandlersBatched;
8
+ const prefetch_js_1 = require("./prefetch.js");
9
+ const constants_js_1 = require("./constants.js");
10
+ /** Lazy-loaded Anthropic SDK client */
11
+ let anthropicClient = null;
12
+ async function getClient() {
13
+ if (!anthropicClient) {
14
+ if (!process.env.ANTHROPIC_API_KEY) {
15
+ throw new Error('ANTHROPIC_API_KEY environment variable is not set. ' +
16
+ 'LLM handlers require a valid API key.');
17
+ }
18
+ try {
19
+ // Dynamic import with variable to avoid TypeScript resolving the module at compile time
20
+ const sdkModule = '@anthropic-ai/sdk';
21
+ const { default: Anthropic } = await import(/* webpackIgnore: true */ sdkModule);
22
+ anthropicClient = new Anthropic();
23
+ }
24
+ catch (err) {
25
+ throw new Error('Anthropic SDK not installed. Run: npm install @anthropic-ai/sdk\n' +
26
+ 'Then set ANTHROPIC_API_KEY environment variable.');
27
+ }
28
+ }
29
+ return anthropicClient;
30
+ }
31
+ /** Reset client (for testing) */
32
+ function resetClient() {
33
+ anthropicClient = null;
34
+ }
35
+ /**
36
+ * Calculate cost in USD from token usage and model.
37
+ */
38
+ function calculateCost(model, usage) {
39
+ const pricing = constants_js_1.LLM_PRICING[model];
40
+ if (!pricing)
41
+ return 0;
42
+ const inputCost = (usage.input_tokens / 1_000_000) * pricing.input;
43
+ const outputCost = (usage.output_tokens / 1_000_000) * pricing.output;
44
+ return inputCost + outputCost;
45
+ }
46
+ /**
47
+ * Execute a single LLM handler: render prompt, call Messages API, return result.
48
+ */
49
+ async function executeLLMHandler(handler, input, context) {
50
+ const start = performance.now();
51
+ const timeout = handler.timeout ?? constants_js_1.DEFAULT_LLM_TIMEOUT;
52
+ const maxTokens = handler.maxTokens ?? constants_js_1.DEFAULT_LLM_MAX_TOKENS;
53
+ try {
54
+ const client = await getClient();
55
+ const prompt = (0, prefetch_js_1.renderPromptTemplate)(handler.prompt, input, context);
56
+ const apiCall = client.messages.create({
57
+ model: handler.model,
58
+ max_tokens: maxTokens,
59
+ messages: [{ role: 'user', content: prompt }],
60
+ });
61
+ const timeoutPromise = new Promise((_resolve, reject) => setTimeout(() => reject(new Error(`LLM handler timed out after ${timeout}ms`)), timeout));
62
+ const response = await Promise.race([apiCall, timeoutPromise]);
63
+ const text = response.content?.[0]?.text ?? '';
64
+ const usage = {
65
+ input_tokens: response.usage?.input_tokens ?? 0,
66
+ output_tokens: response.usage?.output_tokens ?? 0,
67
+ };
68
+ const cost_usd = calculateCost(handler.model, usage);
69
+ return {
70
+ id: handler.id,
71
+ ok: true,
72
+ output: { additionalContext: text },
73
+ duration_ms: performance.now() - start,
74
+ usage,
75
+ cost_usd,
76
+ };
77
+ }
78
+ catch (err) {
79
+ return {
80
+ id: handler.id,
81
+ ok: false,
82
+ error: err instanceof Error ? err.message : String(err),
83
+ duration_ms: performance.now() - start,
84
+ };
85
+ }
86
+ }
87
+ /**
88
+ * Execute a batched group of LLM handlers: combine prompts into a single
89
+ * multi-task API call, parse JSON response back into individual results.
90
+ */
91
+ async function executeBatchGroup(handlers, input, context) {
92
+ const start = performance.now();
93
+ // Use model from first handler; warn if others differ
94
+ const model = handlers[0].model;
95
+ for (let i = 1; i < handlers.length; i++) {
96
+ if (handlers[i].model !== model) {
97
+ console.warn(`[clooks] Batch group "${handlers[0].batchGroup}": handler "${handlers[i].id}" ` +
98
+ `uses model "${handlers[i].model}" but batch uses "${model}". Using "${model}".`);
99
+ }
100
+ }
101
+ // Use highest maxTokens and timeout among group members
102
+ const maxTokens = Math.max(...handlers.map(h => h.maxTokens ?? constants_js_1.DEFAULT_LLM_MAX_TOKENS));
103
+ const timeout = Math.max(...handlers.map(h => h.timeout ?? constants_js_1.DEFAULT_LLM_TIMEOUT));
104
+ // Build combined prompt
105
+ const taskSections = handlers.map((h, i) => {
106
+ const rendered = (0, prefetch_js_1.renderPromptTemplate)(h.prompt, input, context);
107
+ return `TASK "${h.id}":\n${rendered}`;
108
+ });
109
+ const combinedPrompt = 'You must complete multiple analysis tasks. Respond with a JSON object where each key is the task ID and the value is your analysis for that task.\n\n' +
110
+ taskSections.join('\n\n') +
111
+ '\n\nRespond ONLY with valid JSON in this format:\n' +
112
+ '{' + handlers.map(h => `"${h.id}": <your analysis>`).join(', ') + '}';
113
+ try {
114
+ const client = await getClient();
115
+ const apiCall = client.messages.create({
116
+ model,
117
+ max_tokens: maxTokens,
118
+ messages: [{ role: 'user', content: combinedPrompt }],
119
+ });
120
+ const timeoutPromise = new Promise((_resolve, reject) => setTimeout(() => reject(new Error(`Batched LLM call timed out after ${timeout}ms`)), timeout));
121
+ const response = await Promise.race([apiCall, timeoutPromise]);
122
+ const text = response.content?.[0]?.text ?? '';
123
+ const totalUsage = {
124
+ input_tokens: response.usage?.input_tokens ?? 0,
125
+ output_tokens: response.usage?.output_tokens ?? 0,
126
+ };
127
+ // Try to parse as JSON and split results
128
+ let parsed = {};
129
+ try {
130
+ // Strip markdown code fences if present
131
+ const cleaned = text.replace(/^```(?:json)?\s*\n?/m, '').replace(/\n?```\s*$/m, '').trim();
132
+ parsed = JSON.parse(cleaned);
133
+ }
134
+ catch {
135
+ // If JSON parsing fails, give all handlers the raw text
136
+ const duration = performance.now() - start;
137
+ return handlers.map(h => ({
138
+ id: h.id,
139
+ ok: true,
140
+ output: { additionalContext: text },
141
+ duration_ms: duration,
142
+ usage: splitUsage(totalUsage, handlers.length),
143
+ cost_usd: calculateCost(model, splitUsage(totalUsage, handlers.length)),
144
+ }));
145
+ }
146
+ // Distribute results to each handler
147
+ const duration = performance.now() - start;
148
+ const perHandlerUsage = splitUsage(totalUsage, handlers.length);
149
+ return handlers.map(h => {
150
+ const handlerResult = parsed[h.id];
151
+ const resultText = typeof handlerResult === 'string'
152
+ ? handlerResult
153
+ : JSON.stringify(handlerResult ?? '');
154
+ return {
155
+ id: h.id,
156
+ ok: true,
157
+ output: { additionalContext: resultText },
158
+ duration_ms: duration,
159
+ usage: perHandlerUsage,
160
+ cost_usd: calculateCost(model, perHandlerUsage),
161
+ };
162
+ });
163
+ }
164
+ catch (err) {
165
+ const duration = performance.now() - start;
166
+ const errorMsg = err instanceof Error ? err.message : String(err);
167
+ return handlers.map(h => ({
168
+ id: h.id,
169
+ ok: false,
170
+ error: errorMsg,
171
+ duration_ms: duration,
172
+ }));
173
+ }
174
+ }
175
+ /**
176
+ * Split total token usage evenly across N handlers (for cost attribution in batches).
177
+ */
178
+ function splitUsage(total, count) {
179
+ if (count <= 0)
180
+ return { input_tokens: 0, output_tokens: 0 };
181
+ return {
182
+ input_tokens: Math.ceil(total.input_tokens / count),
183
+ output_tokens: Math.ceil(total.output_tokens / count),
184
+ };
185
+ }
186
+ /**
187
+ * Execute multiple LLM handlers, batching those with the same batchGroup.
188
+ *
189
+ * Strategy: handlers with the same batchGroup get their prompts combined into
190
+ * a single API call with a structured multi-task prompt. Handlers without a
191
+ * batchGroup are executed individually.
192
+ */
193
+ async function executeLLMHandlersBatched(handlers, input, context) {
194
+ // Group by batchGroup
195
+ const grouped = new Map();
196
+ const ungrouped = [];
197
+ for (const handler of handlers) {
198
+ if (handler.batchGroup) {
199
+ const existing = grouped.get(handler.batchGroup) ?? [];
200
+ existing.push(handler);
201
+ grouped.set(handler.batchGroup, existing);
202
+ }
203
+ else {
204
+ ungrouped.push(handler);
205
+ }
206
+ }
207
+ // Execute all in parallel: individual calls + batch groups
208
+ const promises = [];
209
+ // Individual (ungrouped) handlers
210
+ for (const handler of ungrouped) {
211
+ promises.push(executeLLMHandler(handler, input, context).then(r => [r]));
212
+ }
213
+ // Batch groups
214
+ for (const [_groupId, groupHandlers] of grouped) {
215
+ if (groupHandlers.length === 1) {
216
+ // Single handler in group — no point batching
217
+ promises.push(executeLLMHandler(groupHandlers[0], input, context).then(r => [r]));
218
+ }
219
+ else {
220
+ promises.push(executeBatchGroup(groupHandlers, input, context));
221
+ }
222
+ }
223
+ const resultArrays = await Promise.all(promises);
224
+ return resultArrays.flat();
225
+ }
package/dist/manifest.js CHANGED
@@ -48,15 +48,40 @@ function validateManifest(manifest) {
48
48
  throw new Error(`Duplicate handler id: "${handler.id}"`);
49
49
  }
50
50
  seenIds.add(handler.id);
51
- if (!handler.type || !['script', 'inline'].includes(handler.type)) {
52
- throw new Error(`Handler "${handler.id}" must have type "script" or "inline"`);
51
+ if (!handler.type || !['script', 'inline', 'llm'].includes(handler.type)) {
52
+ throw new Error(`Handler "${handler.id}" must have type "script", "inline", or "llm"`);
53
53
  }
54
- if (handler.type === 'script' && !handler.command) {
54
+ if (handler.type === 'script' && !('command' in handler && handler.command)) {
55
55
  throw new Error(`Script handler "${handler.id}" must have a "command" field`);
56
56
  }
57
- if (handler.type === 'inline' && !handler.module) {
57
+ if (handler.type === 'inline' && !('module' in handler && handler.module)) {
58
58
  throw new Error(`Inline handler "${handler.id}" must have a "module" field`);
59
59
  }
60
+ if (handler.type === 'llm') {
61
+ const llm = handler;
62
+ if (!llm.model) {
63
+ throw new Error(`LLM handler "${handler.id}" must have a "model" field`);
64
+ }
65
+ if (!llm.prompt) {
66
+ throw new Error(`LLM handler "${handler.id}" must have a "prompt" field`);
67
+ }
68
+ const validModels = ['claude-haiku-4-5', 'claude-sonnet-4-6', 'claude-opus-4-6'];
69
+ if (!validModels.includes(llm.model)) {
70
+ throw new Error(`LLM handler "${handler.id}" model must be one of: ${validModels.join(', ')}`);
71
+ }
72
+ }
73
+ }
74
+ }
75
+ // Validate prefetch if present
76
+ if (manifest.prefetch !== undefined) {
77
+ if (!Array.isArray(manifest.prefetch)) {
78
+ throw new Error('prefetch must be an array');
79
+ }
80
+ const validKeys = ['transcript', 'git_status', 'git_diff'];
81
+ for (const key of manifest.prefetch) {
82
+ if (!validKeys.includes(key)) {
83
+ throw new Error(`Invalid prefetch key: "${key}". Valid keys: ${validKeys.join(', ')}`);
84
+ }
60
85
  }
61
86
  }
62
87
  // Validate settings if present
package/dist/metrics.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { MetricEntry } from './types.js';
1
+ import type { MetricEntry, CostEntry } from './types.js';
2
2
  interface AggregatedStats {
3
3
  event: string;
4
4
  fires: number;
@@ -21,6 +21,26 @@ export declare class MetricsCollector {
21
21
  formatStatsTable(): string;
22
22
  /** Estimate how many process spawns were saved. */
23
23
  estimateSpawnsSaved(): number;
24
+ /** Track a cost entry — appends to costs.jsonl. */
25
+ trackCost(entry: CostEntry): void;
26
+ /** Get cost statistics from persisted cost entries. */
27
+ getCostStats(): {
28
+ totalCost: number;
29
+ totalTokens: number;
30
+ byModel: Record<string, {
31
+ cost: number;
32
+ tokens: number;
33
+ }>;
34
+ byHandler: Record<string, {
35
+ cost: number;
36
+ tokens: number;
37
+ calls: number;
38
+ }>;
39
+ };
40
+ /** Format cost data as a CLI-friendly table. */
41
+ formatCostTable(): string;
42
+ /** Load cost entries from disk. */
43
+ private loadCosts;
24
44
  /** Load all entries from disk + memory (deduped by combining disk file). */
25
45
  private loadAll;
26
46
  }
package/dist/metrics.js CHANGED
@@ -100,6 +100,102 @@ class MetricsCollector {
100
100
  const all = this.loadAll();
101
101
  return all.length;
102
102
  }
103
+ // --- Cost tracking ---
104
+ /** Track a cost entry — appends to costs.jsonl. */
105
+ trackCost(entry) {
106
+ try {
107
+ const dir = (0, path_1.dirname)(constants_js_1.COSTS_FILE);
108
+ if (!(0, fs_1.existsSync)(dir)) {
109
+ (0, fs_1.mkdirSync)(dir, { recursive: true });
110
+ }
111
+ (0, fs_1.appendFileSync)(constants_js_1.COSTS_FILE, JSON.stringify(entry) + '\n', 'utf-8');
112
+ }
113
+ catch {
114
+ // Non-critical — cost tracking should not crash the daemon
115
+ }
116
+ }
117
+ /** Get cost statistics from persisted cost entries. */
118
+ getCostStats() {
119
+ const entries = this.loadCosts();
120
+ let totalCost = 0;
121
+ let totalTokens = 0;
122
+ const byModel = {};
123
+ const byHandler = {};
124
+ for (const entry of entries) {
125
+ totalCost += entry.cost_usd;
126
+ const tokens = entry.usage.input_tokens + entry.usage.output_tokens;
127
+ totalTokens += tokens;
128
+ // By model
129
+ if (!byModel[entry.model]) {
130
+ byModel[entry.model] = { cost: 0, tokens: 0 };
131
+ }
132
+ byModel[entry.model].cost += entry.cost_usd;
133
+ byModel[entry.model].tokens += tokens;
134
+ // By handler
135
+ if (!byHandler[entry.handler]) {
136
+ byHandler[entry.handler] = { cost: 0, tokens: 0, calls: 0 };
137
+ }
138
+ byHandler[entry.handler].cost += entry.cost_usd;
139
+ byHandler[entry.handler].tokens += tokens;
140
+ byHandler[entry.handler].calls++;
141
+ }
142
+ return { totalCost, totalTokens, byModel, byHandler };
143
+ }
144
+ /** Format cost data as a CLI-friendly table. */
145
+ formatCostTable() {
146
+ const entries = this.loadCosts();
147
+ if (entries.length === 0) {
148
+ return 'No LLM cost data recorded yet.';
149
+ }
150
+ const stats = this.getCostStats();
151
+ const lines = [];
152
+ lines.push('LLM Cost Summary');
153
+ lines.push(` Total: $${stats.totalCost.toFixed(4)} (${formatTokenCount(stats.totalTokens)} tokens)`);
154
+ lines.push('');
155
+ // By Model
156
+ lines.push(' By Model:');
157
+ for (const [model, data] of Object.entries(stats.byModel)) {
158
+ lines.push(` ${model.padEnd(22)} $${data.cost.toFixed(4)} (${formatTokenCount(data.tokens)} tokens)`);
159
+ }
160
+ lines.push('');
161
+ // By Handler
162
+ lines.push(' By Handler:');
163
+ for (const [handler, data] of Object.entries(stats.byHandler)) {
164
+ const avgTokens = data.calls > 0 ? Math.round(data.tokens / data.calls) : 0;
165
+ lines.push(` ${handler.padEnd(22)} $${data.cost.toFixed(4)} (${data.calls} calls, avg ${avgTokens} tokens)`);
166
+ }
167
+ // Batching savings estimate
168
+ const batchedCount = entries.filter(e => e.batched).length;
169
+ const unbatchedCount = entries.length - batchedCount;
170
+ if (batchedCount > 0) {
171
+ // Estimate: batched calls saved roughly (batchedCount - unique_batch_calls) API calls
172
+ // Simple heuristic: batched entries share cost, individual would each cost input overhead
173
+ const batchedCost = entries.filter(e => e.batched).reduce((s, e) => s + e.cost_usd, 0);
174
+ // Rough estimate: without batching, each would have its own input tokens overhead
175
+ const estimatedIndividualCost = batchedCost * 2; // conservative 2x estimate
176
+ const saved = estimatedIndividualCost - batchedCost;
177
+ if (saved > 0) {
178
+ const pct = Math.round((saved / (stats.totalCost + saved)) * 100);
179
+ lines.push('');
180
+ lines.push(` Batching saved: ~$${saved.toFixed(4)} (~${pct}% of what individual calls would cost)`);
181
+ }
182
+ }
183
+ return lines.join('\n');
184
+ }
185
+ /** Load cost entries from disk. */
186
+ loadCosts() {
187
+ if (!(0, fs_1.existsSync)(constants_js_1.COSTS_FILE)) {
188
+ return [];
189
+ }
190
+ try {
191
+ const raw = (0, fs_1.readFileSync)(constants_js_1.COSTS_FILE, 'utf-8');
192
+ const lines = raw.trim().split('\n').filter(Boolean);
193
+ return lines.map((line) => JSON.parse(line));
194
+ }
195
+ catch {
196
+ return [];
197
+ }
198
+ }
103
199
  /** Load all entries from disk + memory (deduped by combining disk file). */
104
200
  loadAll() {
105
201
  if (!(0, fs_1.existsSync)(constants_js_1.METRICS_FILE)) {
@@ -116,6 +212,13 @@ class MetricsCollector {
116
212
  }
117
213
  }
118
214
  exports.MetricsCollector = MetricsCollector;
215
+ function formatTokenCount(tokens) {
216
+ if (tokens >= 1_000_000)
217
+ return `${(tokens / 1_000_000).toFixed(1)}M`;
218
+ if (tokens >= 1_000)
219
+ return `${(tokens / 1_000).toFixed(1)}k`;
220
+ return String(tokens);
221
+ }
119
222
  function padRow(cols) {
120
223
  const widths = [20, 8, 8, 10, 10, 10];
121
224
  return cols.map((col, i) => col.padEnd(widths[i])).join(' ');
@@ -0,0 +1,11 @@
1
+ import type { PrefetchKey, PrefetchContext, HookInput } from './types.js';
2
+ /**
3
+ * Pre-fetch requested context data. Each key is fetched once and cached.
4
+ * Errors are caught per-key (a failed git_status doesn't block transcript).
5
+ */
6
+ export declare function prefetchContext(keys: PrefetchKey[], input: HookInput): Promise<PrefetchContext>;
7
+ /**
8
+ * Render a prompt template by replacing $VARIABLES with actual values.
9
+ * Supported: $TRANSCRIPT, $GIT_STATUS, $GIT_DIFF, $ARGUMENTS, $TOOL_NAME, $PROMPT, $CWD
10
+ */
11
+ export declare function renderPromptTemplate(template: string, input: HookInput, context: PrefetchContext): string;
@@ -0,0 +1,71 @@
1
+ "use strict";
2
+ // clooks prefetch — shared context pre-fetching for handlers
3
+ Object.defineProperty(exports, "__esModule", { value: true });
4
+ exports.prefetchContext = prefetchContext;
5
+ exports.renderPromptTemplate = renderPromptTemplate;
6
+ const fs_1 = require("fs");
7
+ const child_process_1 = require("child_process");
8
+ const MAX_TRANSCRIPT_BYTES = 50 * 1024; // 50KB
9
+ const MAX_GIT_DIFF_BYTES = 20 * 1024; // 20KB
10
+ /**
11
+ * Pre-fetch requested context data. Each key is fetched once and cached.
12
+ * Errors are caught per-key (a failed git_status doesn't block transcript).
13
+ */
14
+ async function prefetchContext(keys, input) {
15
+ const ctx = {};
16
+ for (const key of keys) {
17
+ try {
18
+ switch (key) {
19
+ case 'transcript': {
20
+ if (input.transcript_path && (0, fs_1.existsSync)(input.transcript_path)) {
21
+ const raw = (0, fs_1.readFileSync)(input.transcript_path, 'utf-8');
22
+ // Truncate to last 50KB to avoid memory issues
23
+ ctx.transcript = raw.length > MAX_TRANSCRIPT_BYTES
24
+ ? raw.slice(-MAX_TRANSCRIPT_BYTES)
25
+ : raw;
26
+ }
27
+ break;
28
+ }
29
+ case 'git_status': {
30
+ const status = (0, child_process_1.execSync)('git status --porcelain', {
31
+ cwd: input.cwd,
32
+ encoding: 'utf-8',
33
+ timeout: 5000,
34
+ });
35
+ ctx.git_status = status;
36
+ break;
37
+ }
38
+ case 'git_diff': {
39
+ const diff = (0, child_process_1.execSync)('git diff --no-ext-diff --stat', {
40
+ cwd: input.cwd,
41
+ encoding: 'utf-8',
42
+ timeout: 5000,
43
+ });
44
+ // Truncate to 20KB
45
+ ctx.git_diff = diff.length > MAX_GIT_DIFF_BYTES
46
+ ? diff.slice(0, MAX_GIT_DIFF_BYTES)
47
+ : diff;
48
+ break;
49
+ }
50
+ }
51
+ }
52
+ catch {
53
+ // Errors are silently caught per-key — a failed git_status doesn't block transcript
54
+ }
55
+ }
56
+ return ctx;
57
+ }
58
+ /**
59
+ * Render a prompt template by replacing $VARIABLES with actual values.
60
+ * Supported: $TRANSCRIPT, $GIT_STATUS, $GIT_DIFF, $ARGUMENTS, $TOOL_NAME, $PROMPT, $CWD
61
+ */
62
+ function renderPromptTemplate(template, input, context) {
63
+ return template
64
+ .replace(/\$TRANSCRIPT/g, context.transcript ?? '')
65
+ .replace(/\$GIT_STATUS/g, context.git_status ?? '')
66
+ .replace(/\$GIT_DIFF/g, context.git_diff ?? '')
67
+ .replace(/\$ARGUMENTS/g, input.tool_input ? JSON.stringify(input.tool_input) : '')
68
+ .replace(/\$TOOL_NAME/g, input.tool_name ?? '')
69
+ .replace(/\$PROMPT/g, input.prompt ?? '')
70
+ .replace(/\$CWD/g, input.cwd ?? '');
71
+ }
package/dist/server.js CHANGED
@@ -10,6 +10,7 @@ const http_1 = require("http");
10
10
  const fs_1 = require("fs");
11
11
  const child_process_1 = require("child_process");
12
12
  const handlers_js_1 = require("./handlers.js");
13
+ const prefetch_js_1 = require("./prefetch.js");
13
14
  const constants_js_1 = require("./constants.js");
14
15
  function log(msg) {
15
16
  const line = `[${new Date().toISOString()}] ${msg}\n`;
@@ -119,8 +120,13 @@ function createServer(manifest, metrics) {
119
120
  }
120
121
  log(`Hook: ${eventName} (${handlers.length} handler${handlers.length > 1 ? 's' : ''})`);
121
122
  try {
122
- const results = await (0, handlers_js_1.executeHandlers)(event, input, handlers);
123
- // Record metrics
123
+ // Pre-fetch shared context if configured
124
+ let context;
125
+ if (manifest.prefetch && manifest.prefetch.length > 0) {
126
+ context = await (0, prefetch_js_1.prefetchContext)(manifest.prefetch, input);
127
+ }
128
+ const results = await (0, handlers_js_1.executeHandlers)(event, input, handlers, context);
129
+ // Record metrics and costs
124
130
  for (const result of results) {
125
131
  metrics.record({
126
132
  ts: new Date().toISOString(),
@@ -129,7 +135,27 @@ function createServer(manifest, metrics) {
129
135
  duration_ms: result.duration_ms,
130
136
  ok: result.ok,
131
137
  error: result.error,
138
+ filtered: result.filtered,
139
+ usage: result.usage,
140
+ cost_usd: result.cost_usd,
132
141
  });
142
+ // Track cost for LLM handlers
143
+ if (result.usage && result.cost_usd !== undefined && result.cost_usd > 0) {
144
+ // Find the handler config to get model info
145
+ const handlerConfig = handlers.find(h => h.id === result.id);
146
+ if (handlerConfig && handlerConfig.type === 'llm') {
147
+ const llmConfig = handlerConfig;
148
+ metrics.trackCost({
149
+ ts: new Date().toISOString(),
150
+ event,
151
+ handler: result.id,
152
+ model: llmConfig.model,
153
+ usage: result.usage,
154
+ cost_usd: result.cost_usd,
155
+ batched: !!llmConfig.batchGroup,
156
+ });
157
+ }
158
+ }
133
159
  }
134
160
  const merged = mergeResults(results);
135
161
  log(` -> ${results.filter((r) => r.ok).length}/${results.length} ok, response keys: ${Object.keys(merged).join(', ') || '(empty)'}`);