agentlytics 0.1.15 → 0.1.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -31,6 +31,7 @@ You switch between Cursor, Windsurf, Claude Code, VS Code Copilot, and more —
31
31
  - ✗ Can't compare which editor is more effective
32
32
  - ✗ Can't search across all your AI conversations
33
33
  - ✗ No way to share session context with your team
34
+ - ✗ No unified view of your plans, credits, and rate limits
34
35
 
35
36
  ## The Solution
36
37
 
@@ -77,6 +78,7 @@ npx agentlytics --collect
77
78
  - **Projects** — Per-project analytics: sessions, messages, tokens, models, editor breakdown, and drill-down detail views
78
79
  - **Deep Analysis** — Tool frequency heatmaps, model distribution, token breakdown, and filterable drill-down analytics
79
80
  - **Compare** — Side-by-side editor comparison with efficiency ratios, token usage, and session patterns
81
+ - **Subscriptions** — Live view of your editor plans, usage quotas, remaining credits, and rate limits across Cursor, Windsurf, Claude Code, Copilot, Codex, and more
80
82
  - **Relay** — Share AI session context across your team via MCP
81
83
 
82
84
  ## Supported Editors
@@ -0,0 +1,507 @@
1
+ const { execSync } = require('child_process');
2
+ const os = require('os');
3
+
4
+ // Static fallback for legacy placeholders no longer returned by the LS
5
+ const LEGACY_MODEL_MAP = {
6
+ 'MODEL_PLACEHOLDER_M1': 'Claude 3.5 Sonnet',
7
+ 'MODEL_PLACEHOLDER_M2': 'Claude 3.5 Sonnet',
8
+ 'MODEL_PLACEHOLDER_M3': 'Claude 3.5 Sonnet',
9
+ 'MODEL_PLACEHOLDER_M4': 'Claude 3.5 Haiku',
10
+ 'MODEL_PLACEHOLDER_M5': 'Claude 3.5 Haiku',
11
+ 'MODEL_PLACEHOLDER_M6': 'Claude 3.5 Haiku',
12
+ 'MODEL_PLACEHOLDER_M7': 'Claude 3.5 Sonnet',
13
+ 'MODEL_PLACEHOLDER_M8': 'Claude 3.5 Sonnet',
14
+ 'MODEL_PLACEHOLDER_M9': 'Claude 3.5 Sonnet',
15
+ 'MODEL_PLACEHOLDER_M10': 'Claude 3.5 Sonnet',
16
+ 'MODEL_CLAUDE_4_5_SONNET': 'Claude 4.5 Sonnet',
17
+ };
18
+
19
+ // Dynamic model map populated from GetUserStatus RPC (placeholder → friendly label)
20
+ let _modelMap = null;
21
+
22
+ function getModelMap() {
23
+ if (_modelMap) return _modelMap;
24
+ _modelMap = { ...LEGACY_MODEL_MAP };
25
+ try {
26
+ const resp = callRpc('GetUserStatus', {});
27
+ const configs = resp?.userStatus?.cascadeModelConfigData?.clientModelConfigs || [];
28
+ for (const c of configs) {
29
+ const key = c.modelOrAlias?.model;
30
+ const label = c.label;
31
+ if (key && label) _modelMap[key] = label;
32
+ }
33
+ } catch {}
34
+ return _modelMap;
35
+ }
36
+
37
+ // Convert friendly label → pricing-compatible model ID
38
+ // "Gemini 3.1 Pro (High)" → "gemini-3.1-pro"
39
+ // "Claude Sonnet 4.6 (Thinking)" → "claude-sonnet-4.6"
40
+ function labelToModelId(label) {
41
+ return label
42
+ .replace(/\s*\([^)]*\)\s*/g, '') // strip "(High)", "(Thinking)", etc.
43
+ .trim()
44
+ .toLowerCase()
45
+ .replace(/\s+/g, '-'); // spaces → dashes
46
+ }
47
+
48
+ function normalizeModel(modelId) {
49
+ if (!modelId) return null;
50
+ const map = getModelMap();
51
+ const label = map[modelId];
52
+ if (label) return labelToModelId(label);
53
+ return modelId;
54
+ }
55
+
56
+ // ============================================================
57
+ // Cross-platform process utilities
58
+ // ============================================================
59
+
60
+ const IS_WINDOWS = process.platform === 'win32';
61
+
62
+ function getProcessList() {
63
+ try {
64
+ if (IS_WINDOWS) {
65
+ const output = execSync('wmic process get CommandLine,ProcessId /format:csv', {
66
+ encoding: 'utf-8',
67
+ maxBuffer: 10 * 1024 * 1024,
68
+ });
69
+ const lines = output.split('\n').slice(1);
70
+ return lines.map(line => {
71
+ const parts = line.split(',');
72
+ if (parts.length < 2) return null;
73
+ const commandLine = parts.slice(0, -1).join(',').trim().replace(/^"|"$/g, '');
74
+ const pid = parts[parts.length - 1].trim();
75
+ return { commandLine, pid };
76
+ }).filter(Boolean);
77
+ } else {
78
+ const output = execSync('ps aux', { encoding: 'utf-8', maxBuffer: 10 * 1024 * 1024 });
79
+ return output.split('\n').slice(1).map(line => {
80
+ const parts = line.trim().split(/\s+/);
81
+ if (parts.length < 11) return null;
82
+ const pid = parts[1];
83
+ const commandLine = parts.slice(10).join(' ');
84
+ return { commandLine, pid };
85
+ }).filter(Boolean);
86
+ }
87
+ } catch { return []; }
88
+ }
89
+
90
+ function getListeningPorts(pid) {
91
+ try {
92
+ if (IS_WINDOWS) {
93
+ const output = execSync(`netstat -ano | findstr ${pid}`, {
94
+ encoding: 'utf-8',
95
+ maxBuffer: 10 * 1024 * 1024,
96
+ });
97
+ const ports = [];
98
+ for (const line of output.split('\n')) {
99
+ if (!line.trim().endsWith(pid)) continue;
100
+ const match = line.match(/127\.0\.0\.1:(\d+).*LISTENING/);
101
+ if (match) {
102
+ ports.push(parseInt(match[1]));
103
+ }
104
+ }
105
+ return ports;
106
+ } else {
107
+ const output = execSync(`lsof -i TCP -P -n -a -p ${pid} 2>/dev/null`, {
108
+ encoding: 'utf-8',
109
+ maxBuffer: 10 * 1024 * 1024,
110
+ });
111
+ const ports = [];
112
+ for (const line of output.split('\n')) {
113
+ const match = line.match(/TCP\s+127\.0\.0\.1:(\d+)\s+\(LISTEN\)/);
114
+ if (match) {
115
+ ports.push(parseInt(match[1]));
116
+ }
117
+ }
118
+ return ports;
119
+ }
120
+ } catch { return []; }
121
+ }
122
+
123
+ // ============================================================
124
+ // Find running Antigravity language server (port + CSRF token)
125
+ // ============================================================
126
+
127
+ let _lsCache = null;
128
+
129
+ function findLanguageServer() {
130
+ if (_lsCache !== null) return _lsCache;
131
+
132
+ const serverProcessName = IS_WINDOWS
133
+ ? 'language_server_windows'
134
+ : process.platform === 'darwin'
135
+ ? 'language_server_macos'
136
+ : 'language_server_linux';
137
+
138
+ for (const proc of getProcessList()) {
139
+ const { commandLine, pid } = proc;
140
+ if (!commandLine.includes(serverProcessName)) continue;
141
+
142
+ const appDirMatch = commandLine.match(/--app_data_dir\s+(\S+)/);
143
+ if (!appDirMatch || !appDirMatch[1].includes('antigravity')) continue;
144
+
145
+ const csrfMatch = commandLine.match(/--csrf_token\s+(\S+)/);
146
+ if (!csrfMatch) continue;
147
+
148
+ const serverPortMatch = commandLine.match(/--server_port\s+(\d+)/);
149
+ const ports = getListeningPorts(pid);
150
+ if (ports.length === 0) continue;
151
+
152
+ let port;
153
+ if (serverPortMatch) {
154
+ port = parseInt(serverPortMatch[1], 10);
155
+ if (!ports.includes(port)) port = Math.min(...ports);
156
+ } else {
157
+ port = Math.min(...ports);
158
+ }
159
+
160
+ _lsCache = { port, csrf: csrfMatch[1], pid };
161
+ return _lsCache;
162
+ }
163
+
164
+ _lsCache = false;
165
+ return null;
166
+ }
167
+
168
+ // ============================================================
169
+ // Connect protocol HTTP client (always HTTPS, always main CSRF)
170
+ // ============================================================
171
+
172
+ function callRpc(method, body) {
173
+ const ls = findLanguageServer();
174
+ if (!ls) return null;
175
+
176
+ const data = JSON.stringify(body || {});
177
+ const url = `https://127.0.0.1:${ls.port}/exa.language_server_pb.LanguageServerService/${method}`;
178
+
179
+ try {
180
+ const result = execSync(
181
+ `curl -s -k -X POST ${JSON.stringify(url)} ` +
182
+ `-H "Content-Type: application/json" ` +
183
+ `-H "x-codeium-csrf-token: ${ls.csrf}" ` +
184
+ `-d ${JSON.stringify(data)} ` +
185
+ `--max-time 10`,
186
+ { encoding: 'utf-8', maxBuffer: 50 * 1024 * 1024, stdio: ['pipe', 'pipe', 'pipe'] }
187
+ );
188
+ return JSON.parse(result);
189
+ } catch { return null; }
190
+ }
191
+
192
+ // ============================================================
193
+ // Adapter interface
194
+ // ============================================================
195
+
196
+ const name = 'antigravity';
197
+
198
+ function getChats() {
199
+ const resp = callRpc('GetAllCascadeTrajectories', {});
200
+ if (!resp || !resp.trajectorySummaries) return [];
201
+
202
+ const chats = [];
203
+ for (const [cascadeId, summary] of Object.entries(resp.trajectorySummaries)) {
204
+ const ws = (summary.workspaces || [])[0];
205
+ const folder = ws?.workspaceFolderAbsoluteUri?.replace('file://', '') || null;
206
+ const rawModel = summary.lastGeneratorModelUid;
207
+ chats.push({
208
+ source: 'antigravity',
209
+ composerId: cascadeId,
210
+ name: summary.summary || null,
211
+ createdAt: summary.createdTime ? new Date(summary.createdTime).getTime() : null,
212
+ lastUpdatedAt: summary.lastModifiedTime ? new Date(summary.lastModifiedTime).getTime() : null,
213
+ mode: 'cascade',
214
+ folder,
215
+ encrypted: false,
216
+ bubbleCount: summary.stepCount || 0,
217
+ _stepCount: summary.stepCount,
218
+ _model: rawModel ? normalizeModel(rawModel) : rawModel,
219
+ _rawModel: rawModel,
220
+ });
221
+ }
222
+
223
+ return chats;
224
+ }
225
+
226
+ function getSteps(chat) {
227
+ // Prefer GetCascadeTrajectorySteps (returns more steps than GetCascadeTrajectory)
228
+ const resp = callRpc('GetCascadeTrajectorySteps', { cascadeId: chat.composerId });
229
+ if (resp && resp.steps && resp.steps.length > 0) return resp.steps;
230
+
231
+ // Fallback to old method
232
+ const resp2 = callRpc('GetCascadeTrajectory', { cascadeId: chat.composerId });
233
+ if (resp2 && resp2.trajectory && resp2.trajectory.steps) return resp2.trajectory.steps;
234
+
235
+ return [];
236
+ }
237
+
238
+ /**
239
+ * Get the tail messages beyond the step limit using generatorMetadata.
240
+ * The last generatorMetadata entry with messagePrompts has the conversation context.
241
+ * We find the overlap with step-based messages by matching the last user message content.
242
+ */
243
+ function getTailMessages(chat, stepMessages) {
244
+ const resp = callRpc('GetCascadeTrajectory', { cascadeId: chat.composerId });
245
+ if (!resp || !resp.trajectory) return [];
246
+
247
+ const gm = resp.trajectory.generatorMetadata || [];
248
+ // Find the last entry that has messagePrompts
249
+ let lastWithMsgs = null;
250
+ for (let i = gm.length - 1; i >= 0; i--) {
251
+ if (gm[i].chatModel && gm[i].chatModel.messagePrompts && gm[i].chatModel.messagePrompts.length > 0) {
252
+ lastWithMsgs = gm[i];
253
+ break;
254
+ }
255
+ }
256
+ if (!lastWithMsgs) return [];
257
+
258
+ const mp = lastWithMsgs.chatModel.messagePrompts;
259
+
260
+ // Find the last user message from step-based parsing
261
+ let lastUserContent = '';
262
+ for (let i = stepMessages.length - 1; i >= 0; i--) {
263
+ if (stepMessages[i].role === 'user' && stepMessages[i].content.length > 20) {
264
+ lastUserContent = stepMessages[i].content;
265
+ break;
266
+ }
267
+ }
268
+ if (!lastUserContent) return [];
269
+
270
+ // Find this message in the messagePrompts (search from end for efficiency)
271
+ const needle = lastUserContent.substring(0, 50);
272
+ let matchIdx = -1;
273
+ for (let i = mp.length - 1; i >= 0; i--) {
274
+ if (mp[i].source === 'CHAT_MESSAGE_SOURCE_USER' && mp[i].prompt && mp[i].prompt.includes(needle)) {
275
+ matchIdx = i;
276
+ break;
277
+ }
278
+ }
279
+ if (matchIdx < 0 || matchIdx >= mp.length - 1) return [];
280
+
281
+ // Convert everything after the match point to messages
282
+ const tail = [];
283
+ for (let i = matchIdx + 1; i < mp.length; i++) {
284
+ const m = mp[i];
285
+ const src = m.source || '';
286
+ const prompt = m.prompt || '';
287
+ if (!prompt || !prompt.trim()) continue;
288
+
289
+ let role;
290
+ if (src === 'CHAT_MESSAGE_SOURCE_USER') role = 'user';
291
+ else if (src === 'CHAT_MESSAGE_SOURCE_SYSTEM') role = 'assistant';
292
+ else if (src === 'CHAT_MESSAGE_SOURCE_TOOL') role = 'tool';
293
+ else continue;
294
+
295
+ tail.push({ role, content: prompt });
296
+ }
297
+ return tail;
298
+ }
299
+
300
+ function parseStep(step) {
301
+ const type = step.type || '';
302
+ const meta = step.metadata || {};
303
+
304
+ if (type === 'CORTEX_STEP_TYPE_USER_INPUT' && step.userInput) {
305
+ return {
306
+ role: 'user',
307
+ content: step.userInput.userResponse || step.userInput.items?.map(i => i.text).join('') || '',
308
+ };
309
+ }
310
+
311
+ if (type === 'CORTEX_STEP_TYPE_ASK_USER_QUESTION' && step.askUserQuestion) {
312
+ const q = step.askUserQuestion;
313
+ return {
314
+ role: 'user',
315
+ content: q.userResponse || q.question || '',
316
+ };
317
+ }
318
+
319
+ if (type === 'CORTEX_STEP_TYPE_PLANNER_RESPONSE' && step.plannerResponse) {
320
+ const pr = step.plannerResponse;
321
+ const parts = [];
322
+ if (pr.thinking) parts.push(`[thinking] ${pr.thinking}`);
323
+ const text = pr.modifiedResponse || pr.response || pr.textContent || '';
324
+ if (text.trim()) parts.push(text.trim());
325
+ const _toolCalls = [];
326
+ if (pr.toolCalls && pr.toolCalls.length > 0) {
327
+ for (const tc of pr.toolCalls) {
328
+ let args = {};
329
+ try { args = tc.argumentsJson ? JSON.parse(tc.argumentsJson) : {}; } catch { args = {}; }
330
+ const argKeys = typeof args === 'object' ? Object.keys(args).join(', ') : '';
331
+ parts.push(`[tool-call: ${tc.name}(${argKeys})]`);
332
+ _toolCalls.push({ name: tc.name, args });
333
+ }
334
+ }
335
+ if (parts.length > 0) {
336
+ const model = meta.generatorModel || meta.generatorModelUid;
337
+ return {
338
+ role: 'assistant',
339
+ content: parts.join('\n'),
340
+ _model: model ? normalizeModel(model) : model,
341
+ _toolCalls,
342
+ };
343
+ }
344
+ return null;
345
+ }
346
+
347
+ // Tool-like step types
348
+ if (type === 'CORTEX_STEP_TYPE_TOOL_EXECUTION' && step.toolExecution) {
349
+ const te = step.toolExecution;
350
+ const toolName = te.toolName || te.name || 'tool';
351
+ const result = te.output || te.result || '';
352
+ const preview = typeof result === 'string' ? result.substring(0, 500) : JSON.stringify(result).substring(0, 500);
353
+ return { role: 'tool', content: `[${toolName}] ${preview}` };
354
+ }
355
+
356
+ if (type === 'CORTEX_STEP_TYPE_RUN_COMMAND' && step.runCommand) {
357
+ const rc = step.runCommand;
358
+ const cmd = rc.command || rc.commandLine || '';
359
+ const out = (rc.output || rc.stdout || '').substring(0, 500);
360
+ return { role: 'tool', content: `[run_command] ${cmd}${out ? '\n' + out : ''}` };
361
+ }
362
+
363
+ if (type === 'CORTEX_STEP_TYPE_COMMAND_STATUS' && step.commandStatus) {
364
+ const cs = step.commandStatus;
365
+ const out = (cs.output || cs.stdout || '').substring(0, 500);
366
+ return out ? { role: 'tool', content: `[command_status] ${out}` } : null;
367
+ }
368
+
369
+ if (type === 'CORTEX_STEP_TYPE_VIEW_FILE' && step.viewFile) {
370
+ const vf = step.viewFile;
371
+ const filePath = vf.filePath || vf.path || '';
372
+ return { role: 'tool', content: `[view_file] ${filePath}` };
373
+ }
374
+
375
+ if (type === 'CORTEX_STEP_TYPE_CODE_ACTION' && step.codeAction) {
376
+ const ca = step.codeAction;
377
+ const filePath = ca.filePath || ca.path || '';
378
+ return { role: 'tool', content: `[code_action] ${filePath}` };
379
+ }
380
+
381
+ if (type === 'CORTEX_STEP_TYPE_GREP_SEARCH' && step.grepSearch) {
382
+ const gs = step.grepSearch;
383
+ const query = gs.query || gs.pattern || '';
384
+ return { role: 'tool', content: `[grep_search] ${query}` };
385
+ }
386
+
387
+ if (type === 'CORTEX_STEP_TYPE_LIST_DIRECTORY' && step.listDirectory) {
388
+ const ld = step.listDirectory;
389
+ const dir = ld.directoryPath || ld.path || '';
390
+ return { role: 'tool', content: `[list_directory] ${dir}` };
391
+ }
392
+
393
+ if (type === 'CORTEX_STEP_TYPE_MCP_TOOL' && step.mcpTool) {
394
+ const mt = step.mcpTool;
395
+ const name = mt.toolName || mt.name || 'mcp_tool';
396
+ return { role: 'tool', content: `[${name}]` };
397
+ }
398
+
399
+ // Skip non-content steps
400
+ if (type === 'CORTEX_STEP_TYPE_CHECKPOINT' || type === 'CORTEX_STEP_TYPE_RETRIEVE_MEMORY' ||
401
+ type === 'CORTEX_STEP_TYPE_MEMORY' || type === 'CORTEX_STEP_TYPE_TODO_LIST' ||
402
+ type === 'CORTEX_STEP_TYPE_EXIT_PLAN_MODE' || type === 'CORTEX_STEP_TYPE_PROXY_WEB_SERVER') {
403
+ return null;
404
+ }
405
+
406
+ return null;
407
+ }
408
+
409
+ function getMessages(chat) {
410
+ const steps = getSteps(chat);
411
+ const messages = [];
412
+ for (const step of steps) {
413
+ const msg = parseStep(step);
414
+ if (msg) messages.push(msg);
415
+ }
416
+
417
+ // If steps are truncated, fill in the tail from generatorMetadata
418
+ const tail = getTailMessages(chat, messages);
419
+ if (tail.length > 0) {
420
+ messages.push(...tail);
421
+ }
422
+
423
+ return messages;
424
+ }
425
+
426
+ // ============================================================
427
+ // Usage / quota data from language server RPC
428
+ // ============================================================
429
+
430
+ function getUsage() {
431
+ const resp = callRpc('GetUserStatus', {});
432
+ if (!resp || !resp.userStatus) return null;
433
+
434
+ const us = resp.userStatus;
435
+ const ps = us.planStatus || {};
436
+ const pi = ps.planInfo || {};
437
+ const modelConfigs = (us.cascadeModelConfigData || {}).clientModelConfigs || [];
438
+
439
+ const models = modelConfigs.map((m) => {
440
+ const qi = m.quotaInfo || {};
441
+ return {
442
+ label: m.label || null,
443
+ model: m.modelOrAlias?.model || null,
444
+ remainingFraction: qi.remainingFraction != null ? qi.remainingFraction : null,
445
+ resetTime: qi.resetTime || null,
446
+ supportsImages: m.supportsImages || false,
447
+ };
448
+ });
449
+
450
+ // Antigravity returns credits already in display units (no ÷100 needed)
451
+ const promptAlloc = ps.availablePromptCredits || 0;
452
+ const promptUsed = ps.usedPromptCredits || 0;
453
+ const flexAlloc = ps.availableFlexCredits || 0;
454
+ const flexUsed = ps.usedFlexCredits || 0;
455
+ const flowAlloc = ps.availableFlowCredits || 0;
456
+
457
+ const remainingPrompt = Math.max(0, promptAlloc - promptUsed);
458
+ const remainingFlex = Math.max(0, flexAlloc - flexUsed);
459
+ const totalRemaining = remainingPrompt + remainingFlex;
460
+
461
+ // Credit multipliers per model
462
+ const creditMultipliers = (pi.creditMultiplierOverrides || []).reduce((acc, entry) => {
463
+ const model = entry.modelOrAlias?.model;
464
+ if (model && entry.creditMultiplier != null) acc[model] = entry.creditMultiplier;
465
+ return acc;
466
+ }, {});
467
+
468
+ return {
469
+ source: 'antigravity',
470
+ plan: {
471
+ name: pi.planName || null,
472
+ tier: pi.teamsTier || null,
473
+ monthlyPromptCredits: (pi.monthlyPromptCredits || 0) / 100,
474
+ monthlyFlowCredits: (pi.monthlyFlowCredits || 0) / 100,
475
+ canBuyMoreCredits: pi.canBuyMoreCredits || false,
476
+ },
477
+ usage: {
478
+ promptCredits: { allocated: promptAlloc, used: promptUsed, remaining: remainingPrompt },
479
+ flexCredits: { allocated: flexAlloc, used: flexUsed, remaining: remainingFlex },
480
+ flowCredits: { allocated: flowAlloc },
481
+ totalRemainingCredits: totalRemaining,
482
+ },
483
+ billingCycle: {
484
+ start: ps.planStart || null,
485
+ end: ps.planEnd || null,
486
+ },
487
+ features: {
488
+ webSearch: pi.cascadeWebSearchEnabled || false,
489
+ browser: pi.browserEnabled || false,
490
+ knowledgeBase: pi.knowledgeBaseEnabled || false,
491
+ autoRunCommands: pi.cascadeCanAutoRunCommands || false,
492
+ commitMessages: pi.canGenerateCommitMessages || false,
493
+ },
494
+ models,
495
+ creditMultipliers,
496
+ user: {
497
+ name: us.name || null,
498
+ email: us.email || null,
499
+ },
500
+ };
501
+ }
502
+
503
+ function resetCache() { _lsCache = null; _modelMap = null; }
504
+
505
+ const labels = { 'antigravity': 'Antigravity' };
506
+
507
+ module.exports = { name, labels, getChats, getMessages, resetCache, getUsage };
package/editors/claude.js CHANGED
@@ -200,6 +200,105 @@ function extractAssistantContent(content) {
200
200
  return { text: parts.join('\n') || '', toolCalls };
201
201
  }
202
202
 
203
+ // ============================================================
204
+ // Usage / quota data from Anthropic OAuth API
205
+ // ============================================================
206
+
207
+ function getClaudeCredentials() {
208
+ // macOS: Keychain; Linux: secret-tool; Windows: not yet supported
209
+ try {
210
+ const { execSync } = require('child_process');
211
+ let raw;
212
+ if (process.platform === 'darwin') {
213
+ raw = execSync('security find-generic-password -s "Claude Code-credentials" -w', { encoding: 'utf-8', timeout: 5000 }).trim();
214
+ } else if (process.platform === 'linux') {
215
+ raw = execSync('secret-tool lookup service "Claude Code-credentials"', { encoding: 'utf-8', timeout: 5000 }).trim();
216
+ } else {
217
+ return null;
218
+ }
219
+ const creds = JSON.parse(raw);
220
+ const oauth = creds.claudeAiOauth;
221
+ if (!oauth || !oauth.accessToken) return null;
222
+ return oauth;
223
+ } catch { return null; }
224
+ }
225
+
226
+ function claudeApiFetch(token) {
227
+ return new Promise((resolve) => {
228
+ const https = require('https');
229
+ const req = https.get('https://api.anthropic.com/api/oauth/usage', {
230
+ headers: {
231
+ 'Accept': 'application/json',
232
+ 'Content-Type': 'application/json',
233
+ 'User-Agent': 'agentlytics/1.0',
234
+ 'Authorization': `Bearer ${token}`,
235
+ 'anthropic-beta': 'oauth-2025-04-20',
236
+ },
237
+ timeout: 10000,
238
+ }, (res) => {
239
+ let data = '';
240
+ res.on('data', (chunk) => { data += chunk; });
241
+ res.on('end', () => {
242
+ try { resolve(JSON.parse(data)); } catch { resolve(null); }
243
+ });
244
+ });
245
+ req.on('error', () => resolve(null));
246
+ req.on('timeout', () => { req.destroy(); resolve(null); });
247
+ });
248
+ }
249
+
250
+ async function getUsage() {
251
+ const creds = getClaudeCredentials();
252
+ if (!creds) return null;
253
+
254
+ const usage = await claudeApiFetch(creds.accessToken);
255
+ if (!usage) return null;
256
+
257
+ const result = {
258
+ source: 'claude-code',
259
+ plan: {
260
+ name: creds.subscriptionType || null,
261
+ },
262
+ usage: {},
263
+ extraUsage: null,
264
+ };
265
+
266
+ if (usage.five_hour) {
267
+ result.usage.fiveHour = {
268
+ utilization: usage.five_hour.utilization,
269
+ resetsAt: usage.five_hour.resets_at || null,
270
+ };
271
+ }
272
+ if (usage.seven_day) {
273
+ result.usage.sevenDay = {
274
+ utilization: usage.seven_day.utilization,
275
+ resetsAt: usage.seven_day.resets_at || null,
276
+ };
277
+ }
278
+ if (usage.seven_day_sonnet) {
279
+ result.usage.sevenDaySonnet = {
280
+ utilization: usage.seven_day_sonnet.utilization,
281
+ resetsAt: usage.seven_day_sonnet.resets_at || null,
282
+ };
283
+ }
284
+ if (usage.seven_day_opus) {
285
+ result.usage.sevenDayOpus = {
286
+ utilization: usage.seven_day_opus.utilization,
287
+ resetsAt: usage.seven_day_opus.resets_at || null,
288
+ };
289
+ }
290
+ if (usage.extra_usage) {
291
+ result.extraUsage = {
292
+ isEnabled: usage.extra_usage.is_enabled || false,
293
+ monthlyLimit: usage.extra_usage.monthly_limit || null,
294
+ usedCredits: usage.extra_usage.used_credits || null,
295
+ utilization: usage.extra_usage.utilization || null,
296
+ };
297
+ }
298
+
299
+ return result;
300
+ }
301
+
203
302
  const labels = { 'claude-code': 'Claude Code' };
204
303
 
205
- module.exports = { name, labels, getChats, getMessages };
304
+ module.exports = { name, labels, getChats, getMessages, getUsage };
package/editors/codex.js CHANGED
@@ -435,6 +435,66 @@ function safeParseJson(value) {
435
435
  }
436
436
  }
437
437
 
438
+ // ============================================================
439
+ // Usage / quota data from Codex auth.json JWT
440
+ // ============================================================
441
+
442
+ function getCodexAuth() {
443
+ const authPath = path.join(
444
+ process.env.CODEX_HOME && process.env.CODEX_HOME.trim()
445
+ ? path.resolve(process.env.CODEX_HOME.trim())
446
+ : DEFAULT_CODEX_HOME,
447
+ 'auth.json'
448
+ );
449
+ try {
450
+ return JSON.parse(fs.readFileSync(authPath, 'utf-8'));
451
+ } catch { return null; }
452
+ }
453
+
454
+ function decodeJwtPayload(token) {
455
+ if (!token) return null;
456
+ try {
457
+ const parts = token.split('.');
458
+ if (parts.length < 2) return null;
459
+ let payload = parts[1];
460
+ // Fix base64url padding
461
+ payload += '='.repeat((4 - payload.length % 4) % 4);
462
+ const decoded = Buffer.from(payload, 'base64').toString('utf-8');
463
+ return JSON.parse(decoded);
464
+ } catch { return null; }
465
+ }
466
+
467
+ async function getUsage() {
468
+ const auth = getCodexAuth();
469
+ if (!auth || !auth.tokens) return null;
470
+
471
+ const idPayload = decodeJwtPayload(auth.tokens.id_token);
472
+ const accessPayload = decodeJwtPayload(auth.tokens.access_token);
473
+
474
+ const authClaims = idPayload?.['https://api.openai.com/auth'] || accessPayload?.['https://api.openai.com/auth'] || {};
475
+ const profileClaims = idPayload?.['https://api.openai.com/profile'] || accessPayload?.['https://api.openai.com/profile'] || {};
476
+
477
+ const planType = authClaims.chatgpt_plan_type || null;
478
+ const email = profileClaims.email || null;
479
+ const subscriptionStart = authClaims.chatgpt_subscription_active_start || null;
480
+ const subscriptionEnd = authClaims.chatgpt_subscription_active_until || null;
481
+
482
+ if (!planType && !email) return null;
483
+
484
+ return {
485
+ source: 'codex',
486
+ plan: {
487
+ name: planType,
488
+ subscriptionStart,
489
+ subscriptionEnd,
490
+ },
491
+ user: {
492
+ email,
493
+ },
494
+ authMode: auth.auth_mode || null,
495
+ };
496
+ }
497
+
438
498
  const labels = { 'codex': 'Codex' };
439
499
 
440
500
  module.exports = {
@@ -442,4 +502,5 @@ module.exports = {
442
502
  labels,
443
503
  getChats,
444
504
  getMessages,
505
+ getUsage,
445
506
  };