modelmix 4.3.6 → 4.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,11 +18,9 @@ Ever found yourself wanting to integrate AI models into your projects but worrie
18
18
  ## 🛠️ Usage
19
19
 
20
20
  1. **Install the ModelMix package:**
21
- Recommended: install dotenv to manage environment variables
22
21
  ```bash
23
- npm install modelmix dotenv
22
+ npm install modelmix
24
23
  ```
25
-
26
24
  > **AI Skill**: You can also add ModelMix as a skill for AI agentic development:
27
25
  > ```bash
28
26
  > npx skills add https://github.com/clasen/ModelMix --skill modelmix
@@ -39,6 +37,8 @@ MINIMAX_API_KEY="your-minimax-key..."
39
37
  GEMINI_API_KEY="AIza..."
40
38
  ```
41
39
 
40
+ For environment variables, use `dotenv` or Node's built-in `process.loadEnvFile()`.
41
+
42
42
  3. **Create and configure your models**:
43
43
 
44
44
  ```javascript
@@ -159,7 +159,7 @@ Here's a comprehensive list of available methods:
159
159
  | `grok41[think]()` | Grok | grok-4-1-fast | [\$0.20 / \$0.50][6] |
160
160
  | `deepseekV32()` | Fireworks | fireworks/models/deepseek-v3p2 | [\$0.56 / \$1.68][10] |
161
161
  | `GLM47()` | Fireworks | fireworks/models/glm-4p7 | [\$0.55 / \$2.19][10] |
162
- | `minimaxM21()` | MiniMax | MiniMax-M2.1 | [\$0.30 / \$1.20][9] |
162
+ | `minimaxM25()` | MiniMax | MiniMax-M2.5 | [\$0.30 / \$1.20][9] |
163
163
  | `sonar()` | Perplexity | sonar | [\$1.00 / \$1.00][4] |
164
164
  | `sonarPro()` | Perplexity | sonar-pro | [\$3.00 / \$15.00][4] |
165
165
  | `scout()` | Groq | Llama-4-Scout-17B-16E-Instruct | [\$0.11 / \$0.34][5] |
@@ -177,7 +177,7 @@ Here's a comprehensive list of available methods:
177
177
  [6]: https://docs.x.ai/docs/models "xAI"
178
178
  [7]: https://www.together.ai/pricing "Together AI"
179
179
  [8]: https://lambda.ai/inference "Lambda Pricing"
180
- [9]: https://www.minimax.io/price "MiniMax Pricing"
180
+ [9]: https://platform.minimax.io/docs/api-reference/anthropic-api-compatible-cache#supported-models-and-pricing "MiniMax Pricing"
181
181
  [10]: https://fireworks.ai/pricing#serverless-pricing "Fireworks Pricing"
182
182
 
183
183
  Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
@@ -400,12 +400,12 @@ The `cost` field is the estimated cost in USD based on the model's pricing per 1
400
400
 
401
401
  To activate debug mode in ModelMix and view detailed request information, follow these two steps:
402
402
 
403
- 1. In the ModelMix constructor, include `debug: 3` in the configuration:
403
+ 1. In the ModelMix constructor, include a `debug` level in the configuration:
404
404
 
405
405
  ```javascript
406
406
  const mix = ModelMix.new({
407
407
  config: {
408
- debug: 3
408
+ debug: 4 // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose (raw details)
409
409
  // ... other configuration options ...
410
410
  }
411
411
  });
package/demo/gpt51.js CHANGED
@@ -3,7 +3,7 @@ import { ModelMix } from '../index.js';
3
3
 
4
4
  const mmix = new ModelMix({
5
5
  config: {
6
- debug: 2,
6
+ debug: 3,
7
7
  }
8
8
  });
9
9
 
package/demo/minimax.js CHANGED
@@ -7,7 +7,7 @@ const main = async () => {
7
7
 
8
8
  const bot = ModelMix
9
9
  .new({ config: { debug: 3 } })
10
- .minimaxM21()
10
+ .minimaxM25()
11
11
  .setSystem('You are a helpful assistant.');
12
12
 
13
13
  bot.addText('What is the capital of France?');
package/index.js CHANGED
@@ -48,8 +48,10 @@ const MODEL_PRICING = {
48
48
  'accounts/fireworks/models/deepseek-v3p2': [0.56, 1.68],
49
49
  'accounts/fireworks/models/glm-4p7': [0.55, 2.19],
50
50
  'accounts/fireworks/models/kimi-k2p5': [0.50, 2.80],
51
+ 'fireworks/glm-5': [1.00, 3.20],
51
52
  // MiniMax
52
53
  'MiniMax-M2.1': [0.30, 1.20],
54
+ 'MiniMax-M2.5': [0.30, 1.20],
53
55
  // Perplexity
54
56
  'sonar': [1.00, 1.00],
55
57
  'sonar-pro': [3.00, 15.00],
@@ -110,7 +112,7 @@ class ModelMix {
110
112
  this.config = {
111
113
  system: 'You are an assistant.',
112
114
  max_history: 1, // Default max history
113
- debug: 0, // 0=silent, 1=minimal, 2=readable summary, 3=full details
115
+ debug: 0, // 0=silent, 1=minimal, 2=readable summary, 3=full (no truncate), 4=verbose (raw details)
114
116
  bottleneck: defaultBottleneckConfig,
115
117
  roundRobin: false, // false=fallback mode, true=round robin rotation
116
118
  ...config
@@ -170,7 +172,7 @@ class ModelMix {
170
172
  return (tokens.input * inputPerMillion / 1_000_000) + (tokens.output * outputPerMillion / 1_000_000);
171
173
  }
172
174
 
173
- static formatInputSummary(messages, system) {
175
+ static formatInputSummary(messages, system, debug = 2) {
174
176
  const lastMessage = messages[messages.length - 1];
175
177
  let inputText = '';
176
178
 
@@ -181,38 +183,39 @@ class ModelMix {
181
183
  inputText = lastMessage.content;
182
184
  }
183
185
 
184
- const systemStr = `System: ${ModelMix.truncate(system, 500)}`;
185
- const inputStr = `Input: ${ModelMix.truncate(inputText, 1200)}`;
186
+ const noTruncate = debug >= 3;
187
+ const systemStr = noTruncate ? (system || '') : ModelMix.truncate(system, 500);
188
+ const inputStr = noTruncate ? inputText : ModelMix.truncate(inputText, 1200);
186
189
  const msgCount = `(${messages.length} msg${messages.length !== 1 ? 's' : ''})`;
187
190
 
188
- return `${systemStr} \n| ${inputStr} ${msgCount}`;
191
+ return `| SYSTEM\n${systemStr}\n| INPUT ${msgCount}\n${inputStr}`;
189
192
  }
190
193
 
191
194
  static formatOutputSummary(result, debug) {
192
195
  const parts = [];
196
+ const noTruncate = debug >= 3;
193
197
  if (result.message) {
194
198
  // Try to parse as JSON for better formatting
195
199
  try {
196
200
  const parsed = JSON.parse(result.message.trim());
197
201
  // If it's valid JSON and debug >= 2, show it formatted
198
202
  if (debug >= 2) {
199
- parts.push(`Output (JSON):\n${ModelMix.formatJSON(parsed)}`);
203
+ parts.push(`| OUTPUT (JSON)\n${ModelMix.formatJSON(parsed)}`);
200
204
  } else {
201
- parts.push(`Output: ${ModelMix.truncate(result.message, 1500)}`);
205
+ parts.push(`| OUTPUT\n${ModelMix.truncate(result.message, 1500)}`);
202
206
  }
203
207
  } catch (e) {
204
- // Not JSON, show truncated as before
205
- parts.push(`Output: ${ModelMix.truncate(result.message, 1500)}`);
208
+ parts.push(`| OUTPUT\n${noTruncate ? result.message : ModelMix.truncate(result.message, 1500)}`);
206
209
  }
207
210
  }
208
211
  if (result.think) {
209
- parts.push(`Think: ${ModelMix.truncate(result.think, 800)}`);
212
+ parts.push(`| THINK\n${noTruncate ? result.think : ModelMix.truncate(result.think, 800)}`);
210
213
  }
211
214
  if (result.toolCalls && result.toolCalls.length > 0) {
212
215
  const toolNames = result.toolCalls.map(t => t.function?.name || t.name).join(', ');
213
- parts.push(`Tools: ${toolNames}`);
216
+ parts.push(`| TOOLS\n${toolNames}`);
214
217
  }
215
- return parts.join(' | ');
218
+ return parts.join('\n');
216
219
  }
217
220
 
218
221
  attach(key, provider) {
@@ -435,6 +438,10 @@ class ModelMix {
435
438
  return this;
436
439
  }
437
440
 
441
+ minimaxM25({ options = {}, config = {} } = {}) {
442
+ return this.attach('MiniMax-M2.5', new MixMiniMax({ options, config }));
443
+ }
444
+
438
445
  minimaxM2Stable({ options = {}, config = {} } = {}) {
439
446
  return this.attach('MiniMax-M2-Stable', new MixMiniMax({ options, config }));
440
447
  }
@@ -446,6 +453,12 @@ class ModelMix {
446
453
  return this;
447
454
  }
448
455
 
456
+ GLM5({ options = {}, config = {}, mix = { fireworks: true } } = {}) {
457
+ mix = { ...this.mix, ...mix };
458
+ if (mix.fireworks) this.attach('fireworks/glm-5', new MixFireworks({ options, config }));
459
+ return this;
460
+ }
461
+
449
462
  GLM47({ options = {}, config = {}, mix = { fireworks: true } } = {}) {
450
463
  mix = { ...this.mix, ...mix };
451
464
  if (mix.fireworks) this.attach('accounts/fireworks/models/glm-4p7', new MixFireworks({ options, config }));
@@ -842,7 +855,7 @@ class ModelMix {
842
855
  const header = `\n${prefix} [${providerName}:${currentModelKey}] #${originalIndex + 1}${suffix}`;
843
856
 
844
857
  if (currentConfig.debug >= 2) {
845
- console.log(`${header} | ${ModelMix.formatInputSummary(this.messages, currentConfig.system)}`);
858
+ console.log(`${header}\n${ModelMix.formatInputSummary(this.messages, currentConfig.system, currentConfig.debug)}`);
846
859
  } else {
847
860
  console.log(header);
848
861
  }
@@ -897,11 +910,14 @@ class ModelMix {
897
910
 
898
911
  // debug level 2: Readable summary of output
899
912
  if (currentConfig.debug >= 2) {
900
- console.log(`✓ ${ModelMix.formatOutputSummary(result, currentConfig.debug).trim()}`);
913
+ const tokenInfo = result.tokens
914
+ ? ` ${result.tokens.input} → ${result.tokens.output} tok` + (result.tokens.cost != null ? ` $${result.tokens.cost.toFixed(4)}` : '')
915
+ : '';
916
+ console.log(`✓${tokenInfo}\n${ModelMix.formatOutputSummary(result, currentConfig.debug).trim()}`);
901
917
  }
902
918
 
903
- // debug level 3 (debug): Full response details
904
- if (currentConfig.debug >= 3) {
919
+ // debug level 4 (verbose): Full response details
920
+ if (currentConfig.debug >= 4) {
905
921
  if (result.response) {
906
922
  console.log('\n[RAW RESPONSE]');
907
923
  console.log(ModelMix.formatJSON(result.response));
@@ -1148,8 +1164,8 @@ class MixCustom {
1148
1164
 
1149
1165
  options.messages = this.convertMessages(options.messages, config);
1150
1166
 
1151
- // debug level 3 (debug): Full request details
1152
- if (config.debug >= 3) {
1167
+ // debug level 4 (verbose): Full request details
1168
+ if (config.debug >= 4) {
1153
1169
  console.log('\n[REQUEST DETAILS]');
1154
1170
 
1155
1171
  console.log('\n[CONFIG]');
@@ -2032,8 +2048,8 @@ class MixGoogle extends MixCustom {
2032
2048
  };
2033
2049
 
2034
2050
  try {
2035
- // debug level 3 (debug): Full request details
2036
- if (config.debug >= 3) {
2051
+ // debug level 4 (verbose): Full request details
2052
+ if (config.debug >= 4) {
2037
2053
  console.log('\n[REQUEST DETAILS - GOOGLE]');
2038
2054
 
2039
2055
  console.log('\n[CONFIG]');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "4.3.6",
3
+ "version": "4.4.2",
4
4
  "description": "🧬 Reliable interface with automatic fallback for AI LLMs.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -20,6 +20,23 @@ Do NOT use this skill for:
20
20
  - Python or non-Node.js projects
21
21
  - Direct HTTP calls to LLM APIs (use ModelMix instead)
22
22
 
23
+ ## Common Tasks
24
+
25
+ - [Get a text response](#get-a-text-response)
26
+ - [Get structured JSON](#get-structured-json)
27
+ - [Stream a response](#stream-a-response)
28
+ - [Get raw response (tokens, thinking, tool calls)](#get-raw-response-tokens-thinking-tool-calls)
29
+ - [Access full response after `message()` or `json()` with `lastRaw`](#access-full-response-after-message-or-json-with-lastraw)
30
+ - [Add images](#add-images)
31
+ - [Use templates with placeholders](#use-templates-with-placeholders)
32
+ - [Round-robin load balancing](#round-robin-load-balancing)
33
+ - [MCP integration (external tools)](#mcp-integration-external-tools)
34
+ - [Custom local tools (addTool)](#custom-local-tools-addtool)
35
+ - [Rate limiting (Bottleneck)](#rate-limiting-bottleneck)
36
+ - [Debug mode](#debug-mode)
37
+ - [Use free-tier models](#use-free-tier-models)
38
+ - [Conversation history](#conversation-history)
39
+
23
40
  ## Installation
24
41
 
25
42
  ```bash
@@ -46,7 +63,7 @@ const model = ModelMix.new({
46
63
  config: {
47
64
  system: "You are a helpful assistant.",
48
65
  max_history: 5,
49
- debug: 0, // 0=silent, 1=minimal, 2=summary, 3=full
66
+ debug: 0, // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose
50
67
  roundRobin: false // false=fallback, true=rotate models
51
68
  }
52
69
  });
@@ -229,7 +246,7 @@ const model = ModelMix.new({
229
246
 
230
247
  ```javascript
231
248
  const model = ModelMix.new({
232
- config: { debug: 2 } // 0=silent, 1=minimal, 2=summary, 3=full
249
+ config: { debug: 2 } // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose
233
250
  }).gpt5mini();
234
251
  ```
235
252