modelmix 3.9.4 → 3.9.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/MODELS.md CHANGED
@@ -342,4 +342,14 @@ All providers inherit from `MixCustom` base class which provides common function
342
342
  - **Base URL**: `https://api.groq.com/openai/v1/chat/completions`
343
343
  - **Input Format**: Same as OpenAI
344
344
  - **Output Format**: Same as OpenAI
345
- - **Special Notes**: Uses OpenAI-compatible format
345
+ - **Special Notes**: Uses OpenAI-compatible format
346
+
347
+ ### MiniMax (MixMiniMax)
348
+ - **Base URL**: `https://api.minimax.io/v1/chat/completions`
349
+ - **Input Format**: Same as OpenAI
350
+ - **Output Format**: Same as OpenAI
351
+ - **Special Notes**:
352
+ - Uses OpenAI-compatible API interface
353
+ - Requires `MINIMAX_API_KEY` environment variable
354
+ - Inherits all OpenAI functionality including tool calling
355
+ - Available models: `MiniMax-M2`
package/README.md CHANGED
@@ -27,6 +27,7 @@ Only the API keys you plan to use are required.
27
27
  ```plaintext
28
28
  ANTHROPIC_API_KEY="sk-ant-..."
29
29
  OPENAI_API_KEY="sk-proj-..."
30
+ MINIMAX_API_KEY="your-minimax-key..."
30
31
  ...
31
32
  GOOGLE_API_KEY="AIza..."
32
33
  ```
@@ -136,6 +137,7 @@ Here's a comprehensive list of available methods:
136
137
  | `grok3()` | Grok | grok-3 | [\$3.00 / \$15.00][6] |
137
138
  | `grok3mini()` | Grok | grok-3-mini | [\$0.30 / \$0.50][6] |
138
139
  | `grok4()` | Grok | grok-4-0709 | [\$3.00 / \$15.00][6] |
140
+ | `minimaxM2()` | MiniMax | MiniMax-M2 | [\$0.30 / \$1.20][9] |
139
141
  | `sonar()` | Perplexity | sonar | [\$1.00 / \$1.00][4] |
140
142
  | `sonarPro()` | Perplexity | sonar-pro | [\$3.00 / \$15.00][4] |
141
143
  | `scout()` | Groq | Llama-4-Scout-17B-16E-Instruct | [\$0.11 / \$0.34][5] |
@@ -153,6 +155,7 @@ Here's a comprehensive list of available methods:
153
155
  [6]: https://docs.x.ai/docs/models "xAI"
154
156
  [7]: https://www.together.ai/pricing "Together AI"
155
157
  [8]: https://lambda.ai/inference "Lambda Pricing"
158
+ [9]: https://www.minimax.io/price "MiniMax Pricing"
156
159
 
157
160
  Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
158
161
 
@@ -0,0 +1,22 @@
1
+ import { ModelMix } from '../index.js';
2
+ process.loadEnvFile();
3
+
4
+
5
+
6
+ const main = async () => {
7
+
8
+ const bot = ModelMix
9
+ .new({ config: { debug: true } })
10
+ .minimaxM2()
11
+ .setSystem('You are a helpful assistant.');
12
+
13
+ bot.addText('What is the capital of France?');
14
+
15
+ const all = await bot.raw();
16
+
17
+ console.log('\n=== RESPONSE ===');
18
+ console.log(all);
19
+ }
20
+
21
+ main().catch(console.error);
22
+
package/index.js CHANGED
@@ -216,6 +216,14 @@ class ModelMix {
216
216
  return this.attach('lmstudio', new MixLMStudio({ options, config }));
217
217
  }
218
218
 
219
+ minimaxM2({ options = {}, config = {} } = {}) {
220
+ return this.attach('MiniMax-M2', new MixMiniMax({ options, config }));
221
+ }
222
+
223
+ minimaxM2Stable({ options = {}, config = {} } = {}) {
224
+ return this.attach('MiniMax-M2-Stable', new MixMiniMax({ options, config }));
225
+ }
226
+
219
227
  addText(text, { role = "user" } = {}) {
220
228
  const content = [{
221
229
  type: "text",
@@ -569,7 +577,7 @@ class ModelMix {
569
577
 
570
578
  const result = await providerInstance.create({ options: currentOptions, config: currentConfig });
571
579
 
572
- if (result.toolCalls.length > 0) {
580
+ if (result.toolCalls && result.toolCalls.length > 0) {
573
581
 
574
582
  if (result.message) {
575
583
  if (result.signature) {
@@ -902,7 +910,12 @@ class MixCustom {
902
910
  }
903
911
  });
904
912
 
905
- response.data.on('end', () => resolve({ response: raw, message: message.trim() }));
913
+ response.data.on('end', () => resolve({
914
+ response: raw,
915
+ message: message.trim(),
916
+ toolCalls: [],
917
+ think: null
918
+ }));
906
919
  response.data.on('error', reject);
907
920
  });
908
921
  }
@@ -1245,6 +1258,29 @@ class MixAnthropic extends MixCustom {
1245
1258
  }
1246
1259
  }
1247
1260
 
1261
+ class MixMiniMax extends MixOpenAI {
1262
+ getDefaultConfig(customConfig) {
1263
+
1264
+ if (!process.env.MINIMAX_API_KEY) {
1265
+ throw new Error('MiniMax API key not found. Please provide it in config or set MINIMAX_API_KEY environment variable.');
1266
+ }
1267
+
1268
+ return MixCustom.prototype.getDefaultConfig.call(this, {
1269
+ url: 'https://api.minimax.io/v1/chat/completions',
1270
+ apiKey: process.env.MINIMAX_API_KEY,
1271
+ ...customConfig
1272
+ });
1273
+ }
1274
+
1275
+ extractDelta(data) {
1276
+ // MiniMax might send different formats during streaming
1277
+ if (data.choices && data.choices[0] && data.choices[0].delta && data.choices[0].delta.content) {
1278
+ return data.choices[0].delta.content;
1279
+ }
1280
+ return '';
1281
+ }
1282
+ }
1283
+
1248
1284
  class MixPerplexity extends MixCustom {
1249
1285
  getDefaultConfig(customConfig) {
1250
1286
 
@@ -1662,4 +1698,4 @@ class MixGoogle extends MixCustom {
1662
1698
  }
1663
1699
  }
1664
1700
 
1665
- module.exports = { MixCustom, ModelMix, MixAnthropic, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok, MixCerebras, MixGoogle };
1701
+ module.exports = { MixCustom, ModelMix, MixAnthropic, MixMiniMax, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok, MixCerebras, MixGoogle };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "3.9.4",
3
+ "version": "3.9.6",
4
4
  "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -26,7 +26,7 @@
26
26
  "opus",
27
27
  "sonnet",
28
28
  "multimodal",
29
- "groq",
29
+ "m2",
30
30
  "gemini",
31
31
  "ollama",
32
32
  "lmstudio",
@@ -35,7 +35,7 @@
35
35
  "oss",
36
36
  "k2",
37
37
  "reasoning",
38
- "bottleneck",
38
+ "minimax",
39
39
  "cerebras",
40
40
  "thinking",
41
41
  "clasen"