getaimeter 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/watcher.js +17 -1
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "getaimeter",
3
- "version": "0.1.7",
3
+ "version": "0.1.8",
4
4
  "description": "Track your Claude AI usage across CLI, VS Code, and Desktop App. One command to start.",
5
5
  "bin": {
6
6
  "aimeter": "cli.js"
package/watcher.js CHANGED
@@ -73,6 +73,7 @@ function extractNewUsage(filePath) {
73
73
 
74
74
  const usageEvents = [];
75
75
  let lineOffset = lastOffset;
76
+ let pendingThinkingChars = 0; // Track thinking chars from streaming progress messages
76
77
 
77
78
  for (const line of lines) {
78
79
  const trimmed = line.trim();
@@ -84,6 +85,14 @@ function extractNewUsage(filePath) {
84
85
 
85
86
  if (obj.type !== 'assistant' || !obj.message || !obj.message.usage) continue;
86
87
 
88
+ // Check for thinking content blocks (appear in streaming progress messages)
89
+ const contentBlocks = obj.message.content || [];
90
+ for (const block of contentBlocks) {
91
+ if (block.type === 'thinking' && block.thinking) {
92
+ pendingThinkingChars = Math.max(pendingThinkingChars, block.thinking.length);
93
+ }
94
+ }
95
+
87
96
  // Skip streaming progress messages (stop_reason: null = not yet complete)
88
97
  // Only count final responses with a real stop_reason
89
98
  if (obj.message.stop_reason === null || obj.message.stop_reason === undefined) continue;
@@ -91,6 +100,13 @@ function extractNewUsage(filePath) {
91
100
  const u = obj.message.usage;
92
101
  const model = obj.message.model || 'unknown';
93
102
 
103
+ // Estimate thinking tokens: ~4 chars per token (conservative estimate)
104
+ // The API doesn't separate thinking_tokens in the JSONL usage field
105
+ const estimatedThinkingTokens = pendingThinkingChars > 0
106
+ ? Math.ceil(pendingThinkingChars / 4)
107
+ : 0;
108
+ pendingThinkingChars = 0; // Reset for next turn
109
+
94
110
  // Build dedup hash — include line offset for uniqueness
95
111
  const hash = crypto.createHash('md5')
96
112
  .update(`${filePath}:${lineOffset}:${model}:${u.input_tokens || 0}:${u.output_tokens || 0}`)
@@ -104,7 +120,7 @@ function extractNewUsage(filePath) {
104
120
  source: detectSource(filePath),
105
121
  inputTokens: u.input_tokens || 0,
106
122
  outputTokens: u.output_tokens || 0,
107
- thinkingTokens: u.thinking_tokens || 0,
123
+ thinkingTokens: estimatedThinkingTokens,
108
124
  cacheReadTokens: u.cache_read_input_tokens || 0,
109
125
  cacheWriteTokens: u.cache_creation_input_tokens || 0,
110
126
  });