claude-session-insights 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +153 -0
- package/bin/cli.js +49 -0
- package/package.json +30 -0
- package/public/index.html +1560 -0
- package/src/ai-analyze.js +277 -0
- package/src/export.js +79 -0
- package/src/parser.js +273 -0
- package/src/scorer.js +464 -0
- package/src/server.js +229 -0
- package/src/summarizer.js +253 -0
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
// AI-powered analysis using Claude Code CLI
|
|
2
|
+
// Manually triggered — streams output from `claude` via SSE.
|
|
3
|
+
|
|
4
|
+
import { spawn, execFile } from "node:child_process";
|
|
5
|
+
import { promisify } from "node:util";
|
|
6
|
+
|
|
7
|
+
// Strip all Claude Code env vars so spawned claude processes don't think they're nested
|
|
8
|
+
const cleanEnv = Object.fromEntries(
|
|
9
|
+
Object.entries(process.env).filter(([k]) => !k.startsWith("CLAUDE"))
|
|
10
|
+
);
|
|
11
|
+
|
|
12
|
+
const execFileAsync = promisify(execFile);
|
|
13
|
+
|
|
14
|
+
const MODELS = [
|
|
15
|
+
{ id: "claude-sonnet-4-6", label: "Sonnet 4.6", family: "sonnet" },
|
|
16
|
+
{ id: "claude-opus-4-6", label: "Opus 4.6", family: "opus" },
|
|
17
|
+
{ id: "claude-haiku-4-5", label: "Haiku 4.5", family: "haiku" },
|
|
18
|
+
];
|
|
19
|
+
|
|
20
|
+
let detectedDefaultModel = "claude-sonnet-4-6";
|
|
21
|
+
|
|
22
|
+
function prettyModelName(modelId) {
|
|
23
|
+
if (!modelId) return null;
|
|
24
|
+
const m = MODELS.find(m => m.id === modelId);
|
|
25
|
+
if (m) return m.label;
|
|
26
|
+
// Fallback: clean up the raw ID
|
|
27
|
+
return modelId.replace("claude-", "").replace(/-/g, " ").replace(/(\d+) (\d+)/, "$1.$2");
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Detect the default model from Claude CLI config.
|
|
32
|
+
* Called once at startup — non-blocking, best-effort.
|
|
33
|
+
*/
|
|
34
|
+
export async function detectDefaultModel() {
|
|
35
|
+
try {
|
|
36
|
+
const { stdout } = await execFileAsync("claude", ["config", "get", "model"], {
|
|
37
|
+
timeout: 5000,
|
|
38
|
+
env: cleanEnv,
|
|
39
|
+
});
|
|
40
|
+
const model = stdout.trim();
|
|
41
|
+
if (model && model !== "undefined" && model !== "null") {
|
|
42
|
+
detectedDefaultModel = model;
|
|
43
|
+
}
|
|
44
|
+
} catch {
|
|
45
|
+
// Ignore — we'll detect from the first stream result instead
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function buildDataSnapshot(scoredData) {
|
|
50
|
+
const { sessions, overallScore, badges, dailyScores, overallSummary } = scoredData;
|
|
51
|
+
|
|
52
|
+
const totalCost = sessions.reduce((s, x) => s + x.totals.estimatedCost, 0);
|
|
53
|
+
const totalTokens = sessions.reduce((s, x) => s + x.totals.totalTokens, 0);
|
|
54
|
+
const avgCacheHit = sessions.length > 0
|
|
55
|
+
? sessions.reduce((s, x) => s + x.totals.cacheHitRate, 0) / sessions.length : 0;
|
|
56
|
+
|
|
57
|
+
const topSessions = [...sessions]
|
|
58
|
+
.sort((a, b) => b.totals.estimatedCost - a.totals.estimatedCost)
|
|
59
|
+
.slice(0, 10)
|
|
60
|
+
.map(s => ({
|
|
61
|
+
project: s.project,
|
|
62
|
+
model: s.model,
|
|
63
|
+
score: s.score,
|
|
64
|
+
messages: s.totals.userMessages,
|
|
65
|
+
tokens: s.totals.totalTokens,
|
|
66
|
+
cost: s.totals.estimatedCost.toFixed(2),
|
|
67
|
+
cacheHitRate: (s.totals.cacheHitRate * 100).toFixed(0) + '%',
|
|
68
|
+
toolCalls: s.totals.toolCalls,
|
|
69
|
+
toolRatio: s.totals.userMessages > 0
|
|
70
|
+
? (s.totals.toolCalls / s.totals.userMessages).toFixed(1) + 'x'
|
|
71
|
+
: 'n/a',
|
|
72
|
+
suggestedModel: s.suggestedModel || null,
|
|
73
|
+
summary: s.summary,
|
|
74
|
+
}));
|
|
75
|
+
|
|
76
|
+
const modelCounts = {};
|
|
77
|
+
for (const s of sessions) {
|
|
78
|
+
const m = s.model || 'unknown';
|
|
79
|
+
const family = m.includes('opus') ? 'opus' : m.includes('haiku') ? 'haiku' : 'sonnet';
|
|
80
|
+
modelCounts[family] = (modelCounts[family] || 0) + 1;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return JSON.stringify({
|
|
84
|
+
overview: {
|
|
85
|
+
totalSessions: sessions.length,
|
|
86
|
+
overallScore,
|
|
87
|
+
totalCost: '$' + totalCost.toFixed(2),
|
|
88
|
+
totalTokens,
|
|
89
|
+
avgCacheHitRate: (avgCacheHit * 100).toFixed(0) + '%',
|
|
90
|
+
badges: badges.map(b => ({ name: b.name, negative: b.negative })),
|
|
91
|
+
modelDistribution: modelCounts,
|
|
92
|
+
},
|
|
93
|
+
dailyScores: dailyScores.slice(-14),
|
|
94
|
+
topSessionsByCost: topSessions,
|
|
95
|
+
staticInsights: {
|
|
96
|
+
patterns: overallSummary?.patterns || [],
|
|
97
|
+
recommendations: overallSummary?.recommendations || [],
|
|
98
|
+
},
|
|
99
|
+
}, null, 2);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
function buildPrompt(dataSnapshot) {
|
|
103
|
+
return `You are analyzing a developer's Claude Code usage data. Your job is to find non-obvious patterns and give specific, actionable advice that goes beyond the static rule-based analysis already shown to the user.
|
|
104
|
+
|
|
105
|
+
Here is their usage data:
|
|
106
|
+
${dataSnapshot}
|
|
107
|
+
|
|
108
|
+
Analyze this data and provide insights in the following format. Be specific — reference actual numbers, sessions, and projects. Be concise — no filler.
|
|
109
|
+
|
|
110
|
+
## Key Patterns
|
|
111
|
+
2-3 non-obvious patterns you notice (things the static rules might miss — e.g. time-of-day patterns, project-specific habits, cost trajectory trends, session clustering).
|
|
112
|
+
|
|
113
|
+
## Biggest Opportunities
|
|
114
|
+
2-3 specific workflow changes that would have the highest impact on cost or efficiency. Quantify the potential savings where possible (e.g. "switching to Sonnet for your X project sessions could save ~$Y/week").
|
|
115
|
+
|
|
116
|
+
## What's Working Well
|
|
117
|
+
1-2 things the user is doing right that they should keep doing.
|
|
118
|
+
|
|
119
|
+
## Standout Session
|
|
120
|
+
Pick the single most interesting session (most expensive, most efficient, or most unusual) and explain what makes it notable and what can be learned from it.
|
|
121
|
+
|
|
122
|
+
Keep the entire response under 400 words. Use markdown formatting.`;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
export { buildPrompt, buildDataSnapshot };
|
|
126
|
+
|
|
127
|
+
let cachedResult = null;
|
|
128
|
+
const activeChildren = new Set();
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Kill all in-flight claude processes (called on server shutdown).
|
|
132
|
+
*/
|
|
133
|
+
export function killActiveProcesses() {
|
|
134
|
+
for (const child of activeChildren) {
|
|
135
|
+
if (child.exitCode === null) child.kill();
|
|
136
|
+
}
|
|
137
|
+
activeChildren.clear();
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Stream AI analysis to an SSE response.
|
|
142
|
+
* Sends: { event: "model", data: modelId }
|
|
143
|
+
* { event: "chunk", data: text }
|
|
144
|
+
* { event: "done", data: { generatedAt } }
|
|
145
|
+
* { event: "error", data: { message } }
|
|
146
|
+
*/
|
|
147
|
+
export function streamAIAnalysis(scoredData, res, modelId) {
|
|
148
|
+
const dataSnapshot = buildDataSnapshot(scoredData);
|
|
149
|
+
const prompt = buildPrompt(dataSnapshot);
|
|
150
|
+
|
|
151
|
+
const args = ["-p", "-", "--output-format", "stream-json", "--verbose"];
|
|
152
|
+
if (modelId) {
|
|
153
|
+
args.push("--model", modelId);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Send the model being used
|
|
157
|
+
const resolvedModel = modelId || "default";
|
|
158
|
+
console.log(`[ai] Starting analysis (model: ${resolvedModel})`);
|
|
159
|
+
res.write(`event: model\ndata: ${JSON.stringify(resolvedModel)}\n\n`);
|
|
160
|
+
|
|
161
|
+
const child = spawn("claude", args, {
|
|
162
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
163
|
+
env: cleanEnv,
|
|
164
|
+
});
|
|
165
|
+
activeChildren.add(child);
|
|
166
|
+
|
|
167
|
+
// Pipe prompt via stdin to avoid CLI argument length limits
|
|
168
|
+
child.stdin.write(prompt);
|
|
169
|
+
child.stdin.end();
|
|
170
|
+
|
|
171
|
+
let fullContent = "";
|
|
172
|
+
let errOutput = "";
|
|
173
|
+
let detectedModel = modelId || detectedDefaultModel || null;
|
|
174
|
+
let lineBuf = "";
|
|
175
|
+
|
|
176
|
+
child.stdout.on("data", (buf) => {
|
|
177
|
+
lineBuf += buf.toString();
|
|
178
|
+
const lines = lineBuf.split("\n");
|
|
179
|
+
// Keep the last (possibly incomplete) line in the buffer
|
|
180
|
+
lineBuf = lines.pop();
|
|
181
|
+
for (const line of lines) {
|
|
182
|
+
if (!line.trim()) continue;
|
|
183
|
+
try {
|
|
184
|
+
const obj = JSON.parse(line);
|
|
185
|
+
// Skip system init and rate_limit events
|
|
186
|
+
if (obj.type === "system" || obj.type === "rate_limit_event") continue;
|
|
187
|
+
// Final result — extract model from modelUsage keys
|
|
188
|
+
if (obj.type === "result") {
|
|
189
|
+
const models = Object.keys(obj.modelUsage || {});
|
|
190
|
+
if (models.length > 0) {
|
|
191
|
+
detectedModel = models[0];
|
|
192
|
+
if (!modelId) detectedDefaultModel = models[0];
|
|
193
|
+
}
|
|
194
|
+
continue;
|
|
195
|
+
}
|
|
196
|
+
// Assistant message — text is in message.content[]
|
|
197
|
+
if (obj.type === "assistant" && obj.message?.content) {
|
|
198
|
+
for (const block of obj.message.content) {
|
|
199
|
+
if (block.type === "text" && block.text) {
|
|
200
|
+
fullContent += block.text;
|
|
201
|
+
res.write(`event: chunk\ndata: ${JSON.stringify(block.text)}\n\n`);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
// Extract model from message
|
|
205
|
+
if (obj.message.model && !detectedModel) {
|
|
206
|
+
detectedModel = obj.message.model;
|
|
207
|
+
if (!modelId) detectedDefaultModel = obj.message.model;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
} catch {
|
|
211
|
+
// Not JSON — treat as raw text chunk
|
|
212
|
+
if (line.trim()) {
|
|
213
|
+
fullContent += line;
|
|
214
|
+
res.write(`event: chunk\ndata: ${JSON.stringify(line)}\n\n`);
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
child.stderr.on("data", (buf) => {
|
|
221
|
+
const text = buf.toString();
|
|
222
|
+
errOutput += text;
|
|
223
|
+
console.error(`[ai] stderr: ${text.trim()}`);
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
child.on("error", (err) => {
|
|
227
|
+
const message = err.code === "ENOENT"
|
|
228
|
+
? "Claude CLI not found. Make sure `claude` is installed and in your PATH."
|
|
229
|
+
: `Claude CLI error: ${err.message}`;
|
|
230
|
+
console.error(`[ai] Process error: ${message}`);
|
|
231
|
+
res.write(`event: error\ndata: ${JSON.stringify({ message })}\n\n`);
|
|
232
|
+
res.end();
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
child.on("close", (code) => {
|
|
236
|
+
activeChildren.delete(child);
|
|
237
|
+
if (code !== 0 && !fullContent) {
|
|
238
|
+
const message = errOutput.trim() || `Claude CLI exited with code ${code}`;
|
|
239
|
+
console.error(`[ai] Failed (exit ${code}): ${message}`);
|
|
240
|
+
res.write(`event: error\ndata: ${JSON.stringify({ message })}\n\n`);
|
|
241
|
+
} else {
|
|
242
|
+
const finalModel = detectedModel || resolvedModel;
|
|
243
|
+
console.log(`[ai] Done (model: ${finalModel}, ${fullContent.length} chars)`);
|
|
244
|
+
cachedResult = {
|
|
245
|
+
content: fullContent.trim(),
|
|
246
|
+
generatedAt: new Date().toISOString(),
|
|
247
|
+
model: finalModel,
|
|
248
|
+
};
|
|
249
|
+
res.write(`event: done\ndata: ${JSON.stringify({ generatedAt: cachedResult.generatedAt, model: finalModel })}\n\n`);
|
|
250
|
+
}
|
|
251
|
+
res.end();
|
|
252
|
+
});
|
|
253
|
+
|
|
254
|
+
// Allow client disconnect to kill the process
|
|
255
|
+
res.on("close", () => {
|
|
256
|
+
if (child.exitCode === null) {
|
|
257
|
+
console.log("[ai] Client disconnected, killing claude process");
|
|
258
|
+
child.kill();
|
|
259
|
+
}
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
export function getCachedAnalysis() {
|
|
264
|
+
return cachedResult;
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
export function clearCachedAnalysis() {
|
|
268
|
+
cachedResult = null;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
export function getAvailableModels() {
|
|
272
|
+
return {
|
|
273
|
+
models: MODELS,
|
|
274
|
+
defaultModel: detectedDefaultModel,
|
|
275
|
+
defaultModelLabel: prettyModelName(detectedDefaultModel),
|
|
276
|
+
};
|
|
277
|
+
}
|
package/src/export.js
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import { createHash } from "node:crypto";
|
|
2
|
+
import { userInfo } from "node:os";
|
|
3
|
+
import { writeFile } from "node:fs/promises";
|
|
4
|
+
import { parseAllSessions } from "./parser.js";
|
|
5
|
+
import { scoreAllSessions } from "./scorer.js";
|
|
6
|
+
|
|
7
|
+
function hashUsername() {
|
|
8
|
+
const username = userInfo().username;
|
|
9
|
+
return createHash("sha256").update(username).digest("hex").slice(0, 16);
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export async function generateExport(outputPath = "team-export.json") {
|
|
13
|
+
const sessions = await parseAllSessions();
|
|
14
|
+
const data = scoreAllSessions(sessions);
|
|
15
|
+
|
|
16
|
+
const dates = data.sessions
|
|
17
|
+
.map((s) => s.startTime)
|
|
18
|
+
.filter(Boolean)
|
|
19
|
+
.sort();
|
|
20
|
+
|
|
21
|
+
// Model mix
|
|
22
|
+
const modelTokens = {};
|
|
23
|
+
let totalTokens = 0;
|
|
24
|
+
for (const s of data.sessions) {
|
|
25
|
+
const model = s.model || "unknown";
|
|
26
|
+
const family = model.includes("opus")
|
|
27
|
+
? "opus"
|
|
28
|
+
: model.includes("haiku")
|
|
29
|
+
? "haiku"
|
|
30
|
+
: "sonnet";
|
|
31
|
+
modelTokens[family] = (modelTokens[family] || 0) + s.totals.totalTokens;
|
|
32
|
+
totalTokens += s.totals.totalTokens;
|
|
33
|
+
}
|
|
34
|
+
const modelMix = {};
|
|
35
|
+
for (const [k, v] of Object.entries(modelTokens)) {
|
|
36
|
+
modelMix[k] = totalTokens > 0 ? Math.round((v / totalTokens) * 100) / 100 : 0;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const totalCost = data.sessions.reduce((s, x) => s + x.totals.estimatedCost, 0);
|
|
40
|
+
const avgCacheHitRate =
|
|
41
|
+
data.sessions.length > 0
|
|
42
|
+
? data.sessions.reduce((s, x) => s + x.totals.cacheHitRate, 0) / data.sessions.length
|
|
43
|
+
: 0;
|
|
44
|
+
const avgToolRatio =
|
|
45
|
+
data.sessions.length > 0
|
|
46
|
+
? data.sessions.reduce((s, x) => {
|
|
47
|
+
const um = x.totals.userMessages || 1;
|
|
48
|
+
return s + x.totals.toolCalls / um;
|
|
49
|
+
}, 0) / data.sessions.length
|
|
50
|
+
: 0;
|
|
51
|
+
|
|
52
|
+
// Unique tip types
|
|
53
|
+
const tipTypes = [...new Set(data.tips.map((t) => t.type))];
|
|
54
|
+
|
|
55
|
+
const exportData = {
|
|
56
|
+
exportVersion: "1.0",
|
|
57
|
+
devId: hashUsername(),
|
|
58
|
+
exportDate: new Date().toISOString().slice(0, 10),
|
|
59
|
+
period: {
|
|
60
|
+
from: dates[0]?.slice(0, 10) || null,
|
|
61
|
+
to: dates[dates.length - 1]?.slice(0, 10) || null,
|
|
62
|
+
},
|
|
63
|
+
summary: {
|
|
64
|
+
efficiencyScore: data.overallScore,
|
|
65
|
+
totalSessions: data.sessions.length,
|
|
66
|
+
totalTokens,
|
|
67
|
+
estimatedCost: Math.round(totalCost * 100) / 100,
|
|
68
|
+
cacheHitRate: Math.round(avgCacheHitRate * 100) / 100,
|
|
69
|
+
toolCallRatio: Math.round(avgToolRatio * 10) / 10,
|
|
70
|
+
modelMix,
|
|
71
|
+
},
|
|
72
|
+
dailyScores: data.dailyScores,
|
|
73
|
+
topInsights: tipTypes,
|
|
74
|
+
badges: data.badges.map((b) => b.id),
|
|
75
|
+
};
|
|
76
|
+
|
|
77
|
+
await writeFile(outputPath, JSON.stringify(exportData, null, 2) + "\n");
|
|
78
|
+
return exportData;
|
|
79
|
+
}
|
package/src/parser.js
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
import { readdir, stat } from "node:fs/promises";
|
|
2
|
+
import { createReadStream } from "node:fs";
|
|
3
|
+
import { createInterface } from "node:readline";
|
|
4
|
+
import { join, basename } from "node:path";
|
|
5
|
+
import { homedir } from "node:os";
|
|
6
|
+
|
|
7
|
+
// API-equivalent pricing per million tokens
|
|
8
|
+
const PRICING = {
|
|
9
|
+
"claude-opus-4-6": { input: 5, output: 25 },
|
|
10
|
+
"claude-opus-4-5": { input: 5, output: 25 },
|
|
11
|
+
"claude-opus-4-20250514": { input: 15, output: 75 },
|
|
12
|
+
"claude-sonnet-4-6": { input: 3, output: 15 },
|
|
13
|
+
"claude-sonnet-4-5": { input: 3, output: 15 },
|
|
14
|
+
"claude-sonnet-4-5-20250514": { input: 3, output: 15 },
|
|
15
|
+
"claude-haiku-4-5": { input: 0.8, output: 4 },
|
|
16
|
+
"claude-haiku-4-5-20251001": { input: 0.8, output: 4 },
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
const DEFAULT_PRICING = { input: 3, output: 15 }; // fallback to sonnet-tier
|
|
20
|
+
const CACHE_WRITE_MULTIPLIER = 1.25;
|
|
21
|
+
const CACHE_READ_MULTIPLIER = 0.1;
|
|
22
|
+
|
|
23
|
+
function getModelPricing(model) {
|
|
24
|
+
if (!model) return DEFAULT_PRICING;
|
|
25
|
+
return PRICING[model] ?? DEFAULT_PRICING;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
function computeTurnCost(tokens, model) {
|
|
29
|
+
const p = getModelPricing(model);
|
|
30
|
+
const m = 1_000_000;
|
|
31
|
+
return (
|
|
32
|
+
(tokens.input * p.input) / m +
|
|
33
|
+
(tokens.cacheCreation * p.input * CACHE_WRITE_MULTIPLIER) / m +
|
|
34
|
+
(tokens.cacheRead * p.input * CACHE_READ_MULTIPLIER) / m +
|
|
35
|
+
(tokens.output * p.output) / m
|
|
36
|
+
);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// "-Users-archie-dev-carepatron-App" -> { name: "App", path: "/Users/archie/dev/carepatron/App" }
|
|
40
|
+
function decodeProjectDir(dirName) {
|
|
41
|
+
// Leading dash + split by dash, reconstruct as path
|
|
42
|
+
const fullPath = dirName.startsWith("-")
|
|
43
|
+
? "/" + dirName.slice(1).replace(/-/g, "/")
|
|
44
|
+
: dirName.replace(/-/g, "/");
|
|
45
|
+
const name = fullPath.split("/").pop() || dirName;
|
|
46
|
+
return { name, path: fullPath };
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async function parseSessionFile(filePath) {
|
|
50
|
+
const turns = [];
|
|
51
|
+
const clearPoints = [];
|
|
52
|
+
const modelCounts = {};
|
|
53
|
+
let turnIndex = 0;
|
|
54
|
+
|
|
55
|
+
const rl = createInterface({
|
|
56
|
+
input: createReadStream(filePath),
|
|
57
|
+
crlfDelay: Infinity,
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
for await (const line of rl) {
|
|
61
|
+
let entry;
|
|
62
|
+
try {
|
|
63
|
+
entry = JSON.parse(line);
|
|
64
|
+
} catch {
|
|
65
|
+
continue;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (entry.type === "user" && entry.message?.role === "user") {
|
|
69
|
+
const content = entry.message.content;
|
|
70
|
+
// Detect /clear commands
|
|
71
|
+
if (typeof content === "string" && content.includes("<command-name>/clear</command-name>")) {
|
|
72
|
+
clearPoints.push(turnIndex);
|
|
73
|
+
}
|
|
74
|
+
// Extract raw text for preview
|
|
75
|
+
let rawText = "";
|
|
76
|
+
if (typeof content === "string") {
|
|
77
|
+
rawText = content;
|
|
78
|
+
} else if (Array.isArray(content)) {
|
|
79
|
+
rawText = content.map((c) => c.text || "").join(" ");
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Strip XML tags (command wrappers, system reminders, etc.)
|
|
83
|
+
const cleanText = rawText
|
|
84
|
+
.replace(/<[^>]+>/g, " ")
|
|
85
|
+
.replace(/\s+/g, " ")
|
|
86
|
+
.trim();
|
|
87
|
+
|
|
88
|
+
const promptLength = cleanText.length;
|
|
89
|
+
|
|
90
|
+
// Skip meta messages (system-generated)
|
|
91
|
+
if (entry.isMeta) continue;
|
|
92
|
+
// Skip empty/system-only messages
|
|
93
|
+
if (promptLength === 0) continue;
|
|
94
|
+
|
|
95
|
+
turns.push({
|
|
96
|
+
role: "user",
|
|
97
|
+
timestamp: entry.timestamp,
|
|
98
|
+
tokens: { input: 0, output: 0, cacheCreation: 0, cacheRead: 0 },
|
|
99
|
+
toolCalls: [],
|
|
100
|
+
promptLength,
|
|
101
|
+
promptPreview: cleanText.slice(0, 300),
|
|
102
|
+
});
|
|
103
|
+
turnIndex++;
|
|
104
|
+
} else if (entry.type === "assistant" && entry.message) {
|
|
105
|
+
const msg = entry.message;
|
|
106
|
+
const usage = msg.usage;
|
|
107
|
+
if (!usage) continue;
|
|
108
|
+
|
|
109
|
+
const model = msg.model || null;
|
|
110
|
+
if (model) {
|
|
111
|
+
modelCounts[model] = (modelCounts[model] || 0) + (usage.output_tokens || 0);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
const contentBlocks = msg.content || [];
|
|
115
|
+
const toolCalls = contentBlocks
|
|
116
|
+
.filter((c) => c.type === "tool_use")
|
|
117
|
+
.map((c) => c.name);
|
|
118
|
+
|
|
119
|
+
const textPreview = contentBlocks
|
|
120
|
+
.filter((c) => c.type === "text")
|
|
121
|
+
.map((c) => c.text || "")
|
|
122
|
+
.join(" ")
|
|
123
|
+
.replace(/\s+/g, " ")
|
|
124
|
+
.trim()
|
|
125
|
+
.slice(0, 300);
|
|
126
|
+
|
|
127
|
+
const tokens = {
|
|
128
|
+
input: usage.input_tokens || 0,
|
|
129
|
+
output: usage.output_tokens || 0,
|
|
130
|
+
cacheCreation: usage.cache_creation_input_tokens || 0,
|
|
131
|
+
cacheRead: usage.cache_read_input_tokens || 0,
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
turns.push({
|
|
135
|
+
role: "assistant",
|
|
136
|
+
timestamp: entry.timestamp,
|
|
137
|
+
tokens,
|
|
138
|
+
toolCalls,
|
|
139
|
+
model,
|
|
140
|
+
cost: computeTurnCost(tokens, model),
|
|
141
|
+
textPreview,
|
|
142
|
+
});
|
|
143
|
+
turnIndex++;
|
|
144
|
+
} else if (entry.type === "system" && entry.subtype === "compact_boundary") {
|
|
145
|
+
clearPoints.push(turnIndex);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
return { turns, clearPoints, modelCounts };
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
function computeSessionTotals(turns) {
|
|
153
|
+
let inputTokens = 0;
|
|
154
|
+
let outputTokens = 0;
|
|
155
|
+
let cacheCreationTokens = 0;
|
|
156
|
+
let cacheReadTokens = 0;
|
|
157
|
+
let toolCalls = 0;
|
|
158
|
+
let userMessages = 0;
|
|
159
|
+
let assistantMessages = 0;
|
|
160
|
+
let estimatedCost = 0;
|
|
161
|
+
|
|
162
|
+
for (const turn of turns) {
|
|
163
|
+
if (turn.role === "user") {
|
|
164
|
+
userMessages++;
|
|
165
|
+
} else {
|
|
166
|
+
assistantMessages++;
|
|
167
|
+
inputTokens += turn.tokens.input;
|
|
168
|
+
outputTokens += turn.tokens.output;
|
|
169
|
+
cacheCreationTokens += turn.tokens.cacheCreation;
|
|
170
|
+
cacheReadTokens += turn.tokens.cacheRead;
|
|
171
|
+
toolCalls += turn.toolCalls.length;
|
|
172
|
+
estimatedCost += turn.cost || 0;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
const totalInput = inputTokens + cacheCreationTokens + cacheReadTokens;
|
|
177
|
+
const cacheHitRate = totalInput > 0 ? cacheReadTokens / totalInput : 0;
|
|
178
|
+
|
|
179
|
+
return {
|
|
180
|
+
inputTokens,
|
|
181
|
+
outputTokens,
|
|
182
|
+
cacheCreationTokens,
|
|
183
|
+
cacheReadTokens,
|
|
184
|
+
totalTokens: totalInput + outputTokens,
|
|
185
|
+
estimatedCost,
|
|
186
|
+
toolCalls,
|
|
187
|
+
userMessages,
|
|
188
|
+
assistantMessages,
|
|
189
|
+
cacheHitRate,
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
function primaryModel(modelCounts) {
|
|
194
|
+
let best = null;
|
|
195
|
+
let max = 0;
|
|
196
|
+
for (const [model, count] of Object.entries(modelCounts)) {
|
|
197
|
+
if (count > max) {
|
|
198
|
+
best = model;
|
|
199
|
+
max = count;
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
return best;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
export async function parseAllSessions(claudeDir) {
|
|
206
|
+
const projectsDir = join(claudeDir || join(homedir(), ".claude"), "projects");
|
|
207
|
+
let projectDirs;
|
|
208
|
+
try {
|
|
209
|
+
projectDirs = await readdir(projectsDir);
|
|
210
|
+
} catch {
|
|
211
|
+
return [];
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
const sessions = [];
|
|
215
|
+
|
|
216
|
+
for (const projDir of projectDirs) {
|
|
217
|
+
const projPath = join(projectsDir, projDir);
|
|
218
|
+
const projStat = await stat(projPath).catch(() => null);
|
|
219
|
+
if (!projStat?.isDirectory()) continue;
|
|
220
|
+
|
|
221
|
+
const { name: projectName, path: projectPath } = decodeProjectDir(projDir);
|
|
222
|
+
|
|
223
|
+
let files;
|
|
224
|
+
try {
|
|
225
|
+
files = await readdir(projPath);
|
|
226
|
+
} catch {
|
|
227
|
+
continue;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
for (const file of files) {
|
|
231
|
+
if (!file.endsWith(".jsonl")) continue;
|
|
232
|
+
const filePath = join(projPath, file);
|
|
233
|
+
const sessionId = basename(file, ".jsonl");
|
|
234
|
+
|
|
235
|
+
try {
|
|
236
|
+
const { turns, clearPoints, modelCounts } = await parseSessionFile(filePath);
|
|
237
|
+
|
|
238
|
+
// Skip empty sessions
|
|
239
|
+
const assistantTurns = turns.filter((t) => t.role === "assistant");
|
|
240
|
+
if (assistantTurns.length === 0) continue;
|
|
241
|
+
|
|
242
|
+
const totals = computeSessionTotals(turns);
|
|
243
|
+
const timestamps = turns.map((t) => t.timestamp).filter(Boolean);
|
|
244
|
+
const firstUserTurn = turns.find((t) => t.role === "user");
|
|
245
|
+
const title = firstUserTurn?.promptPreview?.slice(0, 120) || "Untitled session";
|
|
246
|
+
|
|
247
|
+
sessions.push({
|
|
248
|
+
id: sessionId,
|
|
249
|
+
project: projectName,
|
|
250
|
+
projectPath,
|
|
251
|
+
model: primaryModel(modelCounts),
|
|
252
|
+
startTime: timestamps[0] || null,
|
|
253
|
+
endTime: timestamps[timestamps.length - 1] || null,
|
|
254
|
+
title,
|
|
255
|
+
turns,
|
|
256
|
+
totals,
|
|
257
|
+
clearPoints,
|
|
258
|
+
});
|
|
259
|
+
} catch {
|
|
260
|
+
// Skip unreadable files
|
|
261
|
+
continue;
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// Sort by start time, newest first
|
|
267
|
+
sessions.sort((a, b) => {
|
|
268
|
+
if (!a.startTime || !b.startTime) return 0;
|
|
269
|
+
return new Date(b.startTime) - new Date(a.startTime);
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
return sessions;
|
|
273
|
+
}
|