getaimeter 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +5 -2
  2. package/watcher.js +22 -4
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "getaimeter",
3
- "version": "0.1.7",
3
+ "version": "0.1.9",
4
4
  "description": "Track your Claude AI usage across CLI, VS Code, and Desktop App. One command to start.",
5
5
  "bin": {
6
6
  "aimeter": "cli.js"
@@ -38,5 +38,8 @@
38
38
  },
39
39
  "homepage": "https://getaimeter.com",
40
40
  "author": "Alejandro Ceja",
41
- "preferGlobal": true
41
+ "preferGlobal": true,
42
+ "dependencies": {
43
+ "systray2": "^2.1.4"
44
+ }
42
45
  }
package/watcher.js CHANGED
@@ -73,6 +73,7 @@ function extractNewUsage(filePath) {
73
73
 
74
74
  const usageEvents = [];
75
75
  let lineOffset = lastOffset;
76
+ let pendingThinkingChars = 0; // Track thinking chars from streaming progress messages
76
77
 
77
78
  for (const line of lines) {
78
79
  const trimmed = line.trim();
@@ -84,13 +85,30 @@ function extractNewUsage(filePath) {
84
85
 
85
86
  if (obj.type !== 'assistant' || !obj.message || !obj.message.usage) continue;
86
87
 
87
- // Skip streaming progress messages (stop_reason: null = not yet complete)
88
- // Only count final responses with a real stop_reason
89
- if (obj.message.stop_reason === null || obj.message.stop_reason === undefined) continue;
88
+ // Skip synthetic/internal messages
89
+ if (obj.message.model === '<synthetic>') continue;
90
+
91
+ // Check for thinking content blocks (appear in streaming progress messages)
92
+ const contentBlocks = obj.message.content || [];
93
+ for (const block of contentBlocks) {
94
+ if (block.type === 'thinking' && block.thinking) {
95
+ pendingThinkingChars = Math.max(pendingThinkingChars, block.thinking.length);
96
+ }
97
+ }
98
+
99
+ // Skip streaming progress messages (stop_reason: null/undefined/missing = not yet complete)
100
+ if (!obj.message.stop_reason) continue;
90
101
 
91
102
  const u = obj.message.usage;
92
103
  const model = obj.message.model || 'unknown';
93
104
 
105
+ // Estimate thinking tokens: ~4 chars per token (conservative estimate)
106
+ // The API doesn't separate thinking_tokens in the JSONL usage field
107
+ const estimatedThinkingTokens = pendingThinkingChars > 0
108
+ ? Math.ceil(pendingThinkingChars / 4)
109
+ : 0;
110
+ pendingThinkingChars = 0; // Reset for next turn
111
+
94
112
  // Build dedup hash — include line offset for uniqueness
95
113
  const hash = crypto.createHash('md5')
96
114
  .update(`${filePath}:${lineOffset}:${model}:${u.input_tokens || 0}:${u.output_tokens || 0}`)
@@ -104,7 +122,7 @@ function extractNewUsage(filePath) {
104
122
  source: detectSource(filePath),
105
123
  inputTokens: u.input_tokens || 0,
106
124
  outputTokens: u.output_tokens || 0,
107
- thinkingTokens: u.thinking_tokens || 0,
125
+ thinkingTokens: estimatedThinkingTokens,
108
126
  cacheReadTokens: u.cache_read_input_tokens || 0,
109
127
  cacheWriteTokens: u.cache_creation_input_tokens || 0,
110
128
  });