getaimeter 0.11.2 → 0.11.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/watcher.js +14 -31
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "getaimeter",
3
- "version": "0.11.2",
3
+ "version": "0.11.4",
4
4
  "description": "Track AI coding costs across Claude, Cursor, Codex, Copilot, and Gemini. MCP server, billing blocks, optimization recommendations.",
5
5
  "bin": {
6
6
  "aimeter": "cli.js",
package/watcher.js CHANGED
@@ -207,6 +207,11 @@ function getConversationMeta(filePath) {
207
207
  projectPath = obj.payload.cwd;
208
208
  break;
209
209
  }
210
+ // Copilot: session.start stores cwd at data.context.cwd
211
+ if (obj.type === 'session.start' && obj.data?.context?.cwd) {
212
+ projectPath = obj.data.context.cwd;
213
+ break;
214
+ }
210
215
  } catch {}
211
216
  }
212
217
  } catch {}
@@ -343,30 +348,6 @@ function extractNewUsage(filePath) {
343
348
  continue;
344
349
  }
345
350
 
346
- // ── Copilot VS Code agent: real-time per-turn output tracking ──
347
- // assistant.message fires after every AI response with data.outputTokens.
348
- // Input tokens are not available per-turn — they come from session.shutdown.
349
- if (obj.type === 'assistant.message' && obj.data?.outputTokens > 0) {
350
- const msgId = obj.data.messageId;
351
- const hashKey = `${filePath}:copilot-msg:${msgId}`;
352
- const hash = crypto.createHash('md5').update(hashKey).digest('hex');
353
- if (isDuplicate(hash)) continue;
354
-
355
- usageEvents.push({
356
- provider: 'github',
357
- model: 'copilot',
358
- source: detectSource(filePath),
359
- inputTokens: 0,
360
- outputTokens: obj.data.outputTokens,
361
- thinkingTokens: 0,
362
- cacheReadTokens: 0,
363
- cacheWriteTokens: 0,
364
- conversationId: convMeta.conversationId,
365
- projectPath: convMeta.projectPath,
366
- });
367
- continue;
368
- }
369
-
370
351
  // ── Copilot format (old: token_usage, new: session.shutdown with modelMetrics) ──
371
352
  if (obj.type === 'token_usage' && (obj.input_tokens !== undefined || obj.output_tokens !== undefined)) {
372
353
  const copilotModel = obj.model || 'copilot';
@@ -390,18 +371,20 @@ function extractNewUsage(filePath) {
390
371
  }
391
372
 
392
373
  // GitHub Copilot agent: session.shutdown reports the per-model session totals.
393
- // Output tokens were already reported turn-by-turn via assistant.message above,
394
- // so here we only emit input tokens (+ cache) to avoid double-counting output.
374
+ // We report full tokens (input + output) here with real model names.
375
+ // assistant.message per-turn events are intentionally skipped they don't
376
+ // include a model field, which would cause all output to be misattributed
377
+ // to a generic "copilot" entry instead of the actual model used.
395
378
  if (obj.type === 'session.shutdown' && obj.data?.modelMetrics) {
396
379
  for (const [model, metrics] of Object.entries(obj.data.modelMetrics)) {
397
380
  const u = metrics.usage || {};
398
- const inputTokens = u.inputTokens || 0;
381
+ const inputTokens = u.inputTokens || 0;
382
+ const outputTokens = u.outputTokens || 0;
399
383
  const cacheReadTokens = u.cacheReadTokens || 0;
400
384
  const cacheWriteTokens = u.cacheWriteTokens || 0;
401
- // Skip if no input data (output was already handled per-turn)
402
- if (inputTokens === 0 && cacheReadTokens === 0) continue;
385
+ if (inputTokens === 0 && outputTokens === 0 && cacheReadTokens === 0) continue;
403
386
 
404
- const hashKey = `${filePath}:copilot-shutdown-input:${model}:${inputTokens}`;
387
+ const hashKey = `${filePath}:copilot-shutdown:${model}:${inputTokens}:${outputTokens}`;
405
388
  const hash = crypto.createHash('md5').update(hashKey).digest('hex');
406
389
  if (isDuplicate(hash)) continue;
407
390
 
@@ -416,7 +399,7 @@ function extractNewUsage(filePath) {
416
399
  model,
417
400
  source: detectSource(filePath),
418
401
  inputTokens,
419
- outputTokens: 0, // already reported per assistant.message turn
402
+ outputTokens,
420
403
  thinkingTokens: 0,
421
404
  cacheReadTokens,
422
405
  cacheWriteTokens,