modelmix 4.3.0 β†’ 4.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -92,6 +92,7 @@ This pattern allows you to:
92
92
  - Chain multiple models together
93
93
  - Automatically fall back to the next model if one fails
94
94
  - Get structured JSON responses when needed
95
+ - Track token usage across all providers
95
96
  - Keep your code clean and maintainable
96
97
 
97
98
  ## πŸ”§ Model Context Protocol (MCP) Integration
@@ -291,6 +292,24 @@ const result = await model.json(
291
292
 
292
293
  These options give you fine-grained control over how much guidance you provide to the model for generating properly formatted JSON responses.
293
294
 
295
+ ## πŸ“Š Token Usage Tracking
296
+
297
+ ModelMix automatically tracks token usage for all requests across different providers, providing a unified format regardless of the underlying API.
298
+
299
+ ### How it works
300
+
301
+ Every response from `raw()` now includes a `tokens` object with the following structure:
302
+
303
+ ```javascript
304
+ {
305
+ tokens: {
306
+ input: 150, // Number of tokens in the prompt/input
307
+ output: 75, // Number of tokens in the completion/output
308
+ total: 225 // Total tokens used (input + output)
309
+ }
310
+ }
311
+ ```
312
+
294
313
  ## πŸ› Enabling Debug Mode
295
314
 
296
315
  To activate debug mode in ModelMix and view detailed request information, follow these two steps:
@@ -375,7 +394,12 @@ new ModelMix(args = { options: {}, config: {} })
375
394
  - `addImage(filePath, config = { role: "user" })`: Adds an image message from a file path.
376
395
  - `addImageFromUrl(url, config = { role: "user" })`: Adds an image message from URL.
377
396
  - `message()`: Sends the message and returns the response.
378
- - `raw()`: Sends the message and returns the raw response data.
397
+ - `raw()`: Sends the message and returns the complete response data including:
398
+ - `message`: The text response from the model
399
+ - `think`: Reasoning/thinking content (if available)
400
+ - `toolCalls`: Array of tool calls made by the model (if any)
401
+ - `tokens`: Object with `input`, `output`, and `total` token counts
402
+ - `response`: The raw API response
379
403
  - `stream(callback)`: Sends the message and streams the response, invoking the callback with each streamed part.
380
404
  - `json(schemaExample, descriptions = {})`: Forces the model to return a response in a specific JSON format.
381
405
  - `schemaExample`: Optional example of the JSON structure to be returned.
@@ -0,0 +1,18 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+
4
+ // Ejemplo simple: obtener informaciΓ³n de tokens
5
+ const model = ModelMix.new()
6
+ .gpt5nano()
7
+ .addText('What is 2+2?');
8
+
9
+ const result = await model.raw();
10
+
11
+ console.log('\nπŸ“Š Token Usage Information:');
12
+ console.log('━'.repeat(50));
13
+ console.log(`Input tokens: ${result.tokens.input}`);
14
+ console.log(`Output tokens: ${result.tokens.output}`);
15
+ console.log(`Total tokens: ${result.tokens.total}`);
16
+ console.log('━'.repeat(50));
17
+ console.log('\nπŸ’¬ Response:', result.message);
18
+ console.log();
package/demo/tokens.js ADDED
@@ -0,0 +1,109 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+
4
+ console.log('\nπŸ”’ Token Usage Tracking Demo\n');
5
+ console.log('='.repeat(60));
6
+
7
+ // Example 1: Get token usage from a simple request
8
+ console.log('\nπŸ“ Example 1: Basic token usage tracking');
9
+ console.log('-'.repeat(60));
10
+
11
+ const model1 = ModelMix.new({ config: { debug: 1 } })
12
+ .gpt5nano()
13
+ .addText('What is 2+2?');
14
+
15
+ const result1 = await model1.raw();
16
+ console.log('\nπŸ“Š Token Usage:');
17
+ console.log(' Input tokens:', result1.tokens.input);
18
+ console.log(' Output tokens:', result1.tokens.output);
19
+ console.log(' Total tokens:', result1.tokens.total);
20
+ console.log('\nπŸ’¬ Response:', result1.message);
21
+
22
+ // Example 2: Compare token usage across different providers
23
+ console.log('\n\nπŸ“ Example 2: Token usage across providers');
24
+ console.log('-'.repeat(60));
25
+
26
+ const providers = [
27
+ { name: 'OpenAI GPT-5-nano', fn: (m) => m.gpt5nano() },
28
+ { name: 'Anthropic Haiku', fn: (m) => m.haiku35() },
29
+ { name: 'Google Gemini', fn: (m) => m.gemini25flash() }
30
+ ];
31
+
32
+ const prompt = 'Explain quantum computing in one sentence.';
33
+
34
+ for (const provider of providers) {
35
+ try {
36
+ const model = ModelMix.new({ config: { debug: 0 } });
37
+ provider.fn(model).addText(prompt);
38
+
39
+ const result = await model.raw();
40
+
41
+ console.log(`\nπŸ€– ${provider.name}`);
42
+ console.log(` Input: ${result.tokens.input} | Output: ${result.tokens.output} | Total: ${result.tokens.total}`);
43
+ console.log(` Response: ${result.message.substring(0, 80)}...`);
44
+ } catch (error) {
45
+ console.log(`\n❌ ${provider.name}: ${error.message}`);
46
+ }
47
+ }
48
+
49
+ // Example 3: Track tokens in a conversation
50
+ console.log('\n\nπŸ“ Example 3: Token usage in conversation history');
51
+ console.log('-'.repeat(60));
52
+
53
+ const conversation = ModelMix.new({ config: { debug: 0, max_history: 10 } })
54
+ .gpt5nano();
55
+
56
+ let totalInput = 0;
57
+ let totalOutput = 0;
58
+
59
+ // First message
60
+ conversation.addText('Hi! My name is Alice.');
61
+ let result = await conversation.raw();
62
+ totalInput += result.tokens.input;
63
+ totalOutput += result.tokens.output;
64
+ console.log(`\nπŸ’¬ Turn 1: ${result.tokens.input} in, ${result.tokens.output} out`);
65
+
66
+ // Second message (includes history)
67
+ conversation.addText('What is my name?');
68
+ result = await conversation.raw();
69
+ totalInput += result.tokens.input;
70
+ totalOutput += result.tokens.output;
71
+ console.log(`πŸ’¬ Turn 2: ${result.tokens.input} in, ${result.tokens.output} out`);
72
+
73
+ // Third message (includes more history)
74
+ conversation.addText('Tell me a joke about my name.');
75
+ result = await conversation.raw();
76
+ totalInput += result.tokens.input;
77
+ totalOutput += result.tokens.output;
78
+ console.log(`πŸ’¬ Turn 3: ${result.tokens.input} in, ${result.tokens.output} out`);
79
+
80
+ console.log('\nπŸ“Š Conversation totals:');
81
+ console.log(` Total input tokens: ${totalInput}`);
82
+ console.log(` Total output tokens: ${totalOutput}`);
83
+ console.log(` Grand total: ${totalInput + totalOutput}`);
84
+
85
+ // Example 4: JSON response with token tracking
86
+ console.log('\n\nπŸ“ Example 4: JSON response with token tracking');
87
+ console.log('-'.repeat(60));
88
+
89
+ const jsonModel = ModelMix.new({ config: { debug: 0 } })
90
+ .gpt5nano()
91
+ .addText('List 3 programming languages');
92
+
93
+ const jsonResult = await jsonModel.json(
94
+ { languages: [{ name: '', year: 0 }] }
95
+ );
96
+
97
+ // Get raw result for token info
98
+ const rawJsonModel = ModelMix.new({ config: { debug: 0 } })
99
+ .gpt5nano()
100
+ .addText('List 3 programming languages');
101
+
102
+ const rawJsonResult = await rawJsonModel.raw();
103
+
104
+ console.log('\nπŸ“Š Token Usage for JSON response:');
105
+ console.log(` Input: ${rawJsonResult.tokens.input} | Output: ${rawJsonResult.tokens.output} | Total: ${rawJsonResult.tokens.total}`);
106
+ console.log('\nπŸ“‹ JSON Result:', jsonResult);
107
+
108
+ console.log('\n' + '='.repeat(60));
109
+ console.log('βœ… Token tracking demo complete!\n');
package/index.js CHANGED
@@ -1135,7 +1135,8 @@ class MixCustom {
1135
1135
  response: raw,
1136
1136
  message: message.trim(),
1137
1137
  toolCalls: [],
1138
- think: null
1138
+ think: null,
1139
+ tokens: raw.length > 0 ? MixCustom.extractTokens(raw[raw.length - 1]) : { input: 0, output: 0, total: 0 }
1139
1140
  }));
1140
1141
  response.data.on('error', reject);
1141
1142
  });
@@ -1181,11 +1182,28 @@ class MixCustom {
1181
1182
  })) || []
1182
1183
  }
1183
1184
 
1185
+ static extractTokens(data) {
1186
+ // OpenAI/Groq/Together/Lambda/Cerebras/Fireworks format
1187
+ if (data.usage) {
1188
+ return {
1189
+ input: data.usage.prompt_tokens || 0,
1190
+ output: data.usage.completion_tokens || 0,
1191
+ total: data.usage.total_tokens || 0
1192
+ };
1193
+ }
1194
+ return {
1195
+ input: 0,
1196
+ output: 0,
1197
+ total: 0
1198
+ };
1199
+ }
1200
+
1184
1201
  processResponse(response) {
1185
1202
  return {
1186
1203
  message: MixCustom.extractMessage(response.data),
1187
1204
  think: MixCustom.extractThink(response.data),
1188
1205
  toolCalls: MixCustom.extractToolCalls(response.data),
1206
+ tokens: MixCustom.extractTokens(response.data),
1189
1207
  response: response.data
1190
1208
  }
1191
1209
  }
@@ -1478,11 +1496,28 @@ class MixAnthropic extends MixCustom {
1478
1496
  return data.content[0]?.signature || null;
1479
1497
  }
1480
1498
 
1499
+ static extractTokens(data) {
1500
+ // Anthropic format
1501
+ if (data.usage) {
1502
+ return {
1503
+ input: data.usage.input_tokens || 0,
1504
+ output: data.usage.output_tokens || 0,
1505
+ total: (data.usage.input_tokens || 0) + (data.usage.output_tokens || 0)
1506
+ };
1507
+ }
1508
+ return {
1509
+ input: 0,
1510
+ output: 0,
1511
+ total: 0
1512
+ };
1513
+ }
1514
+
1481
1515
  processResponse(response) {
1482
1516
  return {
1483
1517
  message: MixAnthropic.extractMessage(response.data),
1484
1518
  think: MixAnthropic.extractThink(response.data),
1485
1519
  toolCalls: MixAnthropic.extractToolCalls(response.data),
1520
+ tokens: MixAnthropic.extractTokens(response.data),
1486
1521
  response: response.data,
1487
1522
  signature: MixAnthropic.extractSignature(response.data)
1488
1523
  }
@@ -1706,6 +1741,7 @@ class MixLMStudio extends MixCustom {
1706
1741
  message: MixLMStudio.extractMessage(response.data),
1707
1742
  think: MixLMStudio.extractThink(response.data),
1708
1743
  toolCalls: MixCustom.extractToolCalls(response.data),
1744
+ tokens: MixCustom.extractTokens(response.data),
1709
1745
  response: response.data
1710
1746
  };
1711
1747
  }
@@ -1926,6 +1962,7 @@ class MixGoogle extends MixCustom {
1926
1962
  message: MixGoogle.extractMessage(response.data),
1927
1963
  think: null,
1928
1964
  toolCalls: MixGoogle.extractToolCalls(response.data),
1965
+ tokens: MixGoogle.extractTokens(response.data),
1929
1966
  response: response.data
1930
1967
  }
1931
1968
  }
@@ -1951,6 +1988,22 @@ class MixGoogle extends MixCustom {
1951
1988
  return data.candidates?.[0]?.content?.parts?.[0]?.text;
1952
1989
  }
1953
1990
 
1991
+ static extractTokens(data) {
1992
+ // Google Gemini format
1993
+ if (data.usageMetadata) {
1994
+ return {
1995
+ input: data.usageMetadata.promptTokenCount || 0,
1996
+ output: data.usageMetadata.candidatesTokenCount || 0,
1997
+ total: data.usageMetadata.totalTokenCount || 0
1998
+ };
1999
+ }
2000
+ return {
2001
+ input: 0,
2002
+ output: 0,
2003
+ total: 0
2004
+ };
2005
+ }
2006
+
1954
2007
  static getOptionsTools(tools) {
1955
2008
  const functionDeclarations = [];
1956
2009
  for (const tool in tools) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "4.3.0",
3
+ "version": "4.3.2",
4
4
  "description": "🧬 Reliable interface with automatic fallback for AI LLMs.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -46,7 +46,7 @@
46
46
  },
47
47
  "homepage": "https://github.com/clasen/ModelMix#readme",
48
48
  "dependencies": {
49
- "@modelcontextprotocol/sdk": "^1.25.3",
49
+ "@modelcontextprotocol/sdk": "^1.26.0",
50
50
  "axios": "^1.12.2",
51
51
  "bottleneck": "^2.19.5",
52
52
  "file-type": "^16.5.4",
@@ -69,6 +69,7 @@
69
69
  "test:images": "mocha test/images.test.js --timeout 10000 --require test/setup.js",
70
70
  "test:bottleneck": "mocha test/bottleneck.test.js --timeout 10000 --require test/setup.js",
71
71
  "test:live": "mocha test/live.test.js --timeout 10000 --require dotenv/config --require test/setup.js",
72
- "test:live.mcp": "mocha test/live.mcp.js --timeout 60000 --require dotenv/config --require test/setup.js"
72
+ "test:live.mcp": "mocha test/live.mcp.js --timeout 60000 --require dotenv/config --require test/setup.js",
73
+ "test:tokens": "mocha test/tokens.test.js --timeout 10000 --require dotenv/config --require test/setup.js"
73
74
  }
74
75
  }
@@ -0,0 +1,135 @@
1
+ import { expect } from 'chai';
2
+ import { ModelMix } from '../index.js';
3
+
4
+ describe('Token Usage Tracking', () => {
5
+
6
+ it('should track tokens in OpenAI response', async function () {
7
+ this.timeout(30000);
8
+
9
+ const model = ModelMix.new()
10
+ .gpt5nano()
11
+ .addText('Say hi');
12
+
13
+ const result = await model.raw();
14
+
15
+ expect(result).to.have.property('tokens');
16
+ expect(result.tokens).to.have.property('input');
17
+ expect(result.tokens).to.have.property('output');
18
+ expect(result.tokens).to.have.property('total');
19
+
20
+ expect(result.tokens.input).to.be.a('number');
21
+ expect(result.tokens.output).to.be.a('number');
22
+ expect(result.tokens.total).to.be.a('number');
23
+
24
+ expect(result.tokens.input).to.be.greaterThan(0);
25
+ expect(result.tokens.output).to.be.greaterThan(0);
26
+ expect(result.tokens.total).to.be.greaterThan(0);
27
+ });
28
+
29
+ it('should track tokens in Anthropic response', async function () {
30
+ this.timeout(30000);
31
+
32
+ const model = ModelMix.new()
33
+ .haiku35()
34
+ .addText('Say hi');
35
+
36
+ const result = await model.raw();
37
+
38
+ expect(result).to.have.property('tokens');
39
+ expect(result.tokens).to.have.property('input');
40
+ expect(result.tokens).to.have.property('output');
41
+ expect(result.tokens).to.have.property('total');
42
+
43
+ expect(result.tokens.input).to.be.greaterThan(0);
44
+ expect(result.tokens.output).to.be.greaterThan(0);
45
+ expect(result.tokens.total).to.equal(result.tokens.input + result.tokens.output);
46
+ });
47
+
48
+ it('should track tokens in Google Gemini response', async function () {
49
+ this.timeout(30000);
50
+
51
+ const model = ModelMix.new()
52
+ .gemini25flash()
53
+ .addText('Say hi');
54
+
55
+ const result = await model.raw();
56
+
57
+ expect(result).to.have.property('tokens');
58
+ expect(result.tokens).to.have.property('input');
59
+ expect(result.tokens).to.have.property('output');
60
+ expect(result.tokens).to.have.property('total');
61
+
62
+ expect(result.tokens.input).to.be.greaterThan(0);
63
+ expect(result.tokens.output).to.be.greaterThan(0);
64
+ expect(result.tokens.total).to.be.greaterThan(0);
65
+ });
66
+
67
+ it('should accumulate tokens across conversation turns', async function () {
68
+ this.timeout(60000);
69
+
70
+ const conversation = ModelMix.new({ config: { max_history: 10 } })
71
+ .gpt5nano();
72
+
73
+ // First turn
74
+ conversation.addText('My name is Alice');
75
+ const result1 = await conversation.raw();
76
+
77
+ expect(result1.tokens.input).to.be.greaterThan(0);
78
+ expect(result1.tokens.output).to.be.greaterThan(0);
79
+
80
+ // Second turn (should have more input tokens due to history)
81
+ conversation.addText('What is my name?');
82
+ const result2 = await conversation.raw();
83
+
84
+ expect(result2.tokens.input).to.be.greaterThan(result1.tokens.input);
85
+ expect(result2.tokens.output).to.be.greaterThan(0);
86
+
87
+ // Verify both results have valid token counts
88
+ expect(result1.tokens.total).to.equal(result1.tokens.input + result1.tokens.output);
89
+ expect(result2.tokens.total).to.be.greaterThan(0);
90
+ });
91
+
92
+ it('should track tokens with JSON responses', async function () {
93
+ this.timeout(30000);
94
+
95
+ const model = ModelMix.new()
96
+ .gpt5nano()
97
+ .addText('Return a simple greeting');
98
+
99
+ // Using raw() to get token info
100
+ const result = await model.raw();
101
+
102
+ expect(result).to.have.property('tokens');
103
+ expect(result.tokens.input).to.be.greaterThan(0);
104
+ expect(result.tokens.output).to.be.greaterThan(0);
105
+ expect(result.tokens.total).to.be.greaterThan(0);
106
+ });
107
+
108
+ it('should have consistent token format across providers', async function () {
109
+ this.timeout(90000);
110
+
111
+ const providers = [
112
+ { name: 'OpenAI', create: (m) => m.gpt5nano() },
113
+ { name: 'Anthropic', create: (m) => m.haiku35() },
114
+ { name: 'Google', create: (m) => m.gemini25flash() }
115
+ ];
116
+
117
+ for (const provider of providers) {
118
+ const model = ModelMix.new();
119
+ provider.create(model).addText('Hi');
120
+
121
+ const result = await model.raw();
122
+
123
+ // Verify consistent structure
124
+ expect(result.tokens, `${provider.name} should have tokens object`).to.exist;
125
+ expect(result.tokens.input, `${provider.name} should have input`).to.be.a('number');
126
+ expect(result.tokens.output, `${provider.name} should have output`).to.be.a('number');
127
+ expect(result.tokens.total, `${provider.name} should have total`).to.be.a('number');
128
+
129
+ // Verify values are positive
130
+ expect(result.tokens.input, `${provider.name} input should be > 0`).to.be.greaterThan(0);
131
+ expect(result.tokens.output, `${provider.name} output should be > 0`).to.be.greaterThan(0);
132
+ expect(result.tokens.total, `${provider.name} total should be > 0`).to.be.greaterThan(0);
133
+ }
134
+ });
135
+ });