getaimeter 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/watcher.js +21 -1
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "getaimeter",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "description": "Track your Claude AI usage across CLI, VS Code, and Desktop App. One command to start.",
5
5
  "bin": {
6
6
  "aimeter": "cli.js"
package/watcher.js CHANGED
@@ -73,6 +73,7 @@ function extractNewUsage(filePath) {
73
73
 
74
74
  const usageEvents = [];
75
75
  let lineOffset = lastOffset;
76
+ let pendingThinkingChars = 0; // Track thinking chars from streaming progress messages
76
77
 
77
78
  for (const line of lines) {
78
79
  const trimmed = line.trim();
@@ -84,9 +85,28 @@ function extractNewUsage(filePath) {
84
85
 
85
86
  if (obj.type !== 'assistant' || !obj.message || !obj.message.usage) continue;
86
87
 
88
+ // Check for thinking content blocks (appear in streaming progress messages)
89
+ const contentBlocks = obj.message.content || [];
90
+ for (const block of contentBlocks) {
91
+ if (block.type === 'thinking' && block.thinking) {
92
+ pendingThinkingChars = Math.max(pendingThinkingChars, block.thinking.length);
93
+ }
94
+ }
95
+
96
+ // Skip streaming progress messages (stop_reason: null = not yet complete)
97
+ // Only count final responses with a real stop_reason
98
+ if (obj.message.stop_reason === null || obj.message.stop_reason === undefined) continue;
99
+
87
100
  const u = obj.message.usage;
88
101
  const model = obj.message.model || 'unknown';
89
102
 
103
+ // Estimate thinking tokens: ~4 chars per token (conservative estimate)
104
+ // The API doesn't separate thinking_tokens in the JSONL usage field
105
+ const estimatedThinkingTokens = pendingThinkingChars > 0
106
+ ? Math.ceil(pendingThinkingChars / 4)
107
+ : 0;
108
+ pendingThinkingChars = 0; // Reset for next turn
109
+
90
110
  // Build dedup hash — include line offset for uniqueness
91
111
  const hash = crypto.createHash('md5')
92
112
  .update(`${filePath}:${lineOffset}:${model}:${u.input_tokens || 0}:${u.output_tokens || 0}`)
@@ -100,7 +120,7 @@ function extractNewUsage(filePath) {
100
120
  source: detectSource(filePath),
101
121
  inputTokens: u.input_tokens || 0,
102
122
  outputTokens: u.output_tokens || 0,
103
- thinkingTokens: u.thinking_tokens || 0,
123
+ thinkingTokens: estimatedThinkingTokens,
104
124
  cacheReadTokens: u.cache_read_input_tokens || 0,
105
125
  cacheWriteTokens: u.cache_creation_input_tokens || 0,
106
126
  });