cchubber 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,247 @@
1
+ import { readdirSync, readFileSync, existsSync, statSync } from 'fs';
2
+ import { join, basename } from 'path';
3
+ import { homedir } from 'os';
4
+
5
+ /**
6
+ * Read all JSONL conversation files from Claude Code's data directories.
7
+ * This is the primary data source — same approach as ccusage (12K stars, proven pattern).
8
+ * Claude Code stores full conversation transcripts with token usage per message.
9
+ */
10
+ export function readAllJSONL(claudeDir) {
11
+ const projectsDir = join(claudeDir, 'projects');
12
+ const xdgDir = join(homedir(), '.config', 'claude', 'projects'); // XDG fallback for Linux
13
+
14
+ const entries = [];
15
+
16
+ // Read from primary location
17
+ if (existsSync(projectsDir)) {
18
+ readProjectsDir(projectsDir, entries);
19
+ }
20
+
21
+ // XDG fallback (Linux with newer Claude Code)
22
+ if (existsSync(xdgDir) && xdgDir !== projectsDir) {
23
+ readProjectsDir(xdgDir, entries);
24
+ }
25
+
26
+ return entries.sort((a, b) => a.timestamp.localeCompare(b.timestamp));
27
+ }
28
+
29
+ function readProjectsDir(dir, entries) {
30
+ try {
31
+ const projectHashes = readdirSync(dir).filter(f => {
32
+ const full = join(dir, f);
33
+ return statSync(full).isDirectory();
34
+ });
35
+
36
+ for (const hash of projectHashes) {
37
+ const projectDir = join(dir, hash);
38
+ const jsonlFiles = readdirSync(projectDir).filter(f => f.endsWith('.jsonl'));
39
+
40
+ for (const file of jsonlFiles) {
41
+ const sessionId = basename(file, '.jsonl');
42
+ const filePath = join(projectDir, file);
43
+
44
+ try {
45
+ const raw = readFileSync(filePath, 'utf-8');
46
+ const lines = raw.split('\n').filter(l => l.trim());
47
+
48
+ for (const line of lines) {
49
+ try {
50
+ const record = JSON.parse(line);
51
+
52
+ // Only assistant messages have token usage
53
+ if (record.type !== 'assistant') continue;
54
+
55
+ const usage = record.message?.usage;
56
+ if (!usage) continue;
57
+
58
+ entries.push({
59
+ sessionId,
60
+ projectHash: hash,
61
+ timestamp: record.timestamp || '',
62
+ model: record.message?.model || 'unknown',
63
+ inputTokens: usage.input_tokens || 0,
64
+ outputTokens: usage.output_tokens || 0,
65
+ cacheCreationTokens: usage.cache_creation_input_tokens || 0,
66
+ cacheReadTokens: usage.cache_read_input_tokens || 0,
67
+ costUSD: record.costUSD || 0, // Pre-calculated by Claude Code
68
+ });
69
+ } catch {
70
+ // Skip malformed lines
71
+ }
72
+ }
73
+ } catch {
74
+ // Skip unreadable files
75
+ }
76
+ }
77
+ }
78
+ } catch {
79
+ // Directory read failed
80
+ }
81
+ }
82
+
83
+ /**
84
+ * Aggregate JSONL entries into daily summaries.
85
+ */
86
+ export function aggregateDaily(entries) {
87
+ const byDate = {};
88
+
89
+ for (const entry of entries) {
90
+ // Extract date from timestamp
91
+ let date;
92
+ if (entry.timestamp && entry.timestamp.length >= 10) {
93
+ date = entry.timestamp.slice(0, 10);
94
+ } else if (entry.timestamp) {
95
+ // Epoch milliseconds
96
+ const ts = parseInt(entry.timestamp);
97
+ if (!isNaN(ts)) {
98
+ date = new Date(ts).toISOString().slice(0, 10);
99
+ }
100
+ }
101
+ if (!date) continue;
102
+
103
+ if (!byDate[date]) {
104
+ byDate[date] = {
105
+ date,
106
+ totalCost: 0,
107
+ inputTokens: 0,
108
+ outputTokens: 0,
109
+ cacheCreationTokens: 0,
110
+ cacheReadTokens: 0,
111
+ messageCount: 0,
112
+ models: {},
113
+ sessions: new Set(),
114
+ };
115
+ }
116
+
117
+ const day = byDate[date];
118
+ day.totalCost += entry.costUSD;
119
+ day.inputTokens += entry.inputTokens;
120
+ day.outputTokens += entry.outputTokens;
121
+ day.cacheCreationTokens += entry.cacheCreationTokens;
122
+ day.cacheReadTokens += entry.cacheReadTokens;
123
+ day.messageCount++;
124
+ day.sessions.add(entry.sessionId);
125
+
126
+ // Per-model breakdown
127
+ const model = entry.model;
128
+ if (!day.models[model]) {
129
+ day.models[model] = {
130
+ inputTokens: 0,
131
+ outputTokens: 0,
132
+ cacheCreationTokens: 0,
133
+ cacheReadTokens: 0,
134
+ cost: 0,
135
+ messageCount: 0,
136
+ };
137
+ }
138
+ const m = day.models[model];
139
+ m.inputTokens += entry.inputTokens;
140
+ m.outputTokens += entry.outputTokens;
141
+ m.cacheCreationTokens += entry.cacheCreationTokens;
142
+ m.cacheReadTokens += entry.cacheReadTokens;
143
+ m.cost += entry.costUSD;
144
+ m.messageCount++;
145
+ }
146
+
147
+ // Convert sets to counts and sort
148
+ return Object.values(byDate)
149
+ .map(d => ({
150
+ ...d,
151
+ sessionCount: d.sessions.size,
152
+ sessions: undefined,
153
+ cacheOutputRatio: d.outputTokens > 0 ? Math.round(d.cacheReadTokens / d.outputTokens) : 0,
154
+ }))
155
+ .sort((a, b) => a.date.localeCompare(b.date));
156
+ }
157
+
158
+ /**
159
+ * Aggregate entries into per-model totals.
160
+ */
161
+ export function aggregateByModel(entries) {
162
+ const byModel = {};
163
+
164
+ for (const entry of entries) {
165
+ const model = cleanModelName(entry.model);
166
+ if (!byModel[model]) {
167
+ byModel[model] = { inputTokens: 0, outputTokens: 0, cacheCreationTokens: 0, cacheReadTokens: 0, cost: 0, messageCount: 0 };
168
+ }
169
+ const m = byModel[model];
170
+ m.inputTokens += entry.inputTokens;
171
+ m.outputTokens += entry.outputTokens;
172
+ m.cacheCreationTokens += entry.cacheCreationTokens;
173
+ m.cacheReadTokens += entry.cacheReadTokens;
174
+ m.cost += entry.costUSD;
175
+ m.messageCount++;
176
+ }
177
+
178
+ return byModel;
179
+ }
180
+
181
+ /**
182
+ * Aggregate entries into per-project totals.
183
+ * Uses the project directory hash — resolves to real path where possible.
184
+ */
185
+ export function aggregateByProject(entries, claudeDir) {
186
+ const byProject = {};
187
+
188
+ for (const entry of entries) {
189
+ const hash = entry.projectHash || 'unknown';
190
+ if (!byProject[hash]) {
191
+ byProject[hash] = {
192
+ hash,
193
+ path: null,
194
+ inputTokens: 0,
195
+ outputTokens: 0,
196
+ cacheCreationTokens: 0,
197
+ cacheReadTokens: 0,
198
+ messageCount: 0,
199
+ sessionCount: 0,
200
+ sessions: new Set(),
201
+ firstSeen: entry.timestamp,
202
+ lastSeen: entry.timestamp,
203
+ };
204
+ }
205
+ const p = byProject[hash];
206
+ p.inputTokens += entry.inputTokens;
207
+ p.outputTokens += entry.outputTokens;
208
+ p.cacheCreationTokens += entry.cacheCreationTokens;
209
+ p.cacheReadTokens += entry.cacheReadTokens;
210
+ p.messageCount++;
211
+ p.sessions.add(entry.sessionId);
212
+ if (entry.timestamp > p.lastSeen) p.lastSeen = entry.timestamp;
213
+ if (entry.timestamp < p.firstSeen) p.firstSeen = entry.timestamp;
214
+ }
215
+
216
+ // Resolve project paths from .claude/projects/<hash>/.project_path if available
217
+ const projectsDir = join(claudeDir, 'projects');
218
+ for (const proj of Object.values(byProject)) {
219
+ proj.sessionCount = proj.sessions.size;
220
+ delete proj.sessions;
221
+
222
+ // Try to read the project path file
223
+ try {
224
+ const pathFile = join(projectsDir, proj.hash, '.project_path');
225
+ if (existsSync(pathFile)) {
226
+ const raw = readFileSync(pathFile, 'utf-8').trim();
227
+ proj.path = raw;
228
+ // Use last directory segment as display name
229
+ proj.name = raw.split(/[/\\]/).filter(Boolean).pop() || proj.hash.slice(0, 8);
230
+ } else {
231
+ proj.name = proj.hash.slice(0, 8);
232
+ }
233
+ } catch {
234
+ proj.name = proj.hash.slice(0, 8);
235
+ }
236
+ }
237
+
238
+ return Object.values(byProject)
239
+ .sort((a, b) => b.messageCount - a.messageCount);
240
+ }
241
+
242
+ function cleanModelName(name) {
243
+ return (name || 'unknown')
244
+ .replace('claude-', '')
245
+ .replace(/-\d{8}$/, '')
246
+ .replace(/-20\d{6}$/, '');
247
+ }
@@ -0,0 +1,82 @@
1
+ import { readFileSync, existsSync } from 'fs';
2
+ import { join } from 'path';
3
+ import https from 'https';
4
+
5
+ export async function readOAuthUsage(claudeDir) {
6
+ const token = getOAuthToken(claudeDir);
7
+ if (!token) return null;
8
+
9
+ try {
10
+ const data = await fetchUsage(token);
11
+ return data;
12
+ } catch {
13
+ // Try cached version
14
+ return readCachedUsage(claudeDir);
15
+ }
16
+ }
17
+
18
+ function getOAuthToken(claudeDir) {
19
+ // Check env var first
20
+ if (process.env.CLAUDE_CODE_OAUTH_TOKEN) {
21
+ return process.env.CLAUDE_CODE_OAUTH_TOKEN;
22
+ }
23
+
24
+ // Read from credentials file
25
+ const credPath = join(claudeDir, '.credentials.json');
26
+ if (!existsSync(credPath)) return null;
27
+
28
+ try {
29
+ const raw = readFileSync(credPath, 'utf-8');
30
+ const creds = JSON.parse(raw);
31
+ return creds?.claudeAiOauth?.accessToken || null;
32
+ } catch {
33
+ return null;
34
+ }
35
+ }
36
+
37
+ function fetchUsage(token) {
38
+ return new Promise((resolve, reject) => {
39
+ const options = {
40
+ hostname: 'api.anthropic.com',
41
+ path: '/api/oauth/usage',
42
+ method: 'GET',
43
+ headers: {
44
+ 'Authorization': `Bearer ${token}`,
45
+ 'anthropic-beta': 'oauth-2025-04-20',
46
+ 'Content-Type': 'application/json',
47
+ },
48
+ };
49
+
50
+ const req = https.request(options, (res) => {
51
+ let body = '';
52
+ res.on('data', (chunk) => body += chunk);
53
+ res.on('end', () => {
54
+ if (res.statusCode === 200) {
55
+ try {
56
+ resolve(JSON.parse(body));
57
+ } catch {
58
+ reject(new Error('Invalid JSON'));
59
+ }
60
+ } else {
61
+ reject(new Error(`HTTP ${res.statusCode}`));
62
+ }
63
+ });
64
+ });
65
+
66
+ req.on('error', reject);
67
+ req.setTimeout(5000, () => { req.destroy(); reject(new Error('Timeout')); });
68
+ req.end();
69
+ });
70
+ }
71
+
72
+ function readCachedUsage(claudeDir) {
73
+ const cachePath = join(claudeDir, 'tmp', 'statusline-usage.json');
74
+ if (!existsSync(cachePath)) return null;
75
+
76
+ try {
77
+ const raw = readFileSync(cachePath, 'utf-8');
78
+ return JSON.parse(raw);
79
+ } catch {
80
+ return null;
81
+ }
82
+ }
@@ -0,0 +1,42 @@
1
+ import { readdirSync, readFileSync, existsSync } from 'fs';
2
+ import { join } from 'path';
3
+
4
+ export function readSessionMeta(claudeDir) {
5
+ const metaDir = join(claudeDir, 'usage-data', 'session-meta');
6
+ if (!existsSync(metaDir)) return [];
7
+
8
+ const sessions = [];
9
+
10
+ try {
11
+ const files = readdirSync(metaDir).filter(f => f.endsWith('.json'));
12
+
13
+ for (const file of files) {
14
+ try {
15
+ const raw = readFileSync(join(metaDir, file), 'utf-8');
16
+ const data = JSON.parse(raw);
17
+ sessions.push({
18
+ sessionId: data.session_id || file.replace('.json', ''),
19
+ projectPath: data.project_path || 'unknown',
20
+ startTime: data.start_time || null,
21
+ durationMinutes: data.duration_minutes || 0,
22
+ userMessageCount: data.user_message_count || 0,
23
+ assistantMessageCount: data.assistant_message_count || 0,
24
+ toolCounts: data.tool_counts || {},
25
+ inputTokens: data.input_tokens || 0,
26
+ outputTokens: data.output_tokens || 0,
27
+ usesTaskAgent: data.uses_task_agent || false,
28
+ usesMcp: data.uses_mcp || false,
29
+ linesAdded: data.lines_added || 0,
30
+ linesRemoved: data.lines_removed || 0,
31
+ filesModified: data.files_modified || 0,
32
+ });
33
+ } catch {
34
+ // Skip corrupt files
35
+ }
36
+ }
37
+ } catch {
38
+ return [];
39
+ }
40
+
41
+ return sessions.sort((a, b) => (a.startTime || '').localeCompare(b.startTime || ''));
42
+ }
@@ -0,0 +1,54 @@
1
+ import { readFileSync, existsSync } from 'fs';
2
+ import { join } from 'path';
3
+
4
+ export function readStatsCache(claudeDir) {
5
+ const filePath = join(claudeDir, 'stats-cache.json');
6
+ if (!existsSync(filePath)) return null;
7
+
8
+ try {
9
+ const raw = readFileSync(filePath, 'utf-8');
10
+ const data = JSON.parse(raw);
11
+
12
+ // Extract daily model tokens into a usable format
13
+ const dailyData = [];
14
+ const dailyTokens = data.dailyModelTokens || [];
15
+ const dailyActivity = data.dailyActivity || [];
16
+
17
+ // Build a map of activity data
18
+ const activityMap = {};
19
+ for (const entry of dailyActivity) {
20
+ activityMap[entry.date] = entry;
21
+ }
22
+
23
+ for (const entry of dailyTokens) {
24
+ const models = [];
25
+ for (const [modelName, tokens] of Object.entries(entry.tokensByModel || {})) {
26
+ models.push({ modelName, tokens });
27
+ }
28
+
29
+ const activity = activityMap[entry.date] || {};
30
+
31
+ dailyData.push({
32
+ date: entry.date,
33
+ models,
34
+ messageCount: activity.messageCount || 0,
35
+ sessionCount: activity.sessionCount || 0,
36
+ toolCallCount: activity.toolCallCount || 0,
37
+ });
38
+ }
39
+
40
+ // Also extract the aggregate model usage with full token breakdowns
41
+ const modelUsage = data.modelUsage || {};
42
+
43
+ return {
44
+ version: data.version,
45
+ lastComputedDate: data.lastComputedDate,
46
+ totalSessions: data.totalSessions || 0,
47
+ totalMessages: data.totalMessages || 0,
48
+ dailyData,
49
+ modelUsage,
50
+ };
51
+ } catch (err) {
52
+ return null;
53
+ }
54
+ }