modelmix 4.2.0 → 4.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/demo/package.json CHANGED
@@ -12,6 +12,7 @@
12
12
  "dependencies": {
13
13
  "@anthropic-ai/sdk": "^0.20.9",
14
14
  "dotenv": "^17.2.3",
15
+ "isolated-vm": "^6.0.2",
15
16
  "lemonlog": "^1.1.4"
16
17
  }
17
18
  }
@@ -0,0 +1,77 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+ import ivm from 'isolated-vm';
4
+
5
+ console.log('🧬 ModelMix - JavaScript REPL Tool Demo');
6
+
7
+ // Crear isolate una sola vez (reutilizable)
8
+ const isolate = new ivm.Isolate({ memoryLimit: 128 }); // 128MB máximo
9
+
10
+ // Ejemplo simple: REPL de JavaScript para calcular potencias de 2
11
+ async function replPowersExample() {
12
+ console.log('\n=== JavaScript REPL - Potencias de 2 ===\n');
13
+ const gptArgs = { options: { reasoning_effort: "none", verbosity: null } };
14
+ const mmix = ModelMix.new({ config: { debug: true, max_history: 10 } })
15
+ .gpt41nano()
16
+ .gpt52(gptArgs)
17
+ .gemini3flash()
18
+ .setSystem('You are a helpful assistant with access to a JavaScript REPL. When you use the REPL and get results, always show them to the user in your response.');
19
+
20
+ // Variable para capturar el resultado de la herramienta
21
+ let toolResult = null;
22
+
23
+ // Agregar herramienta REPL personalizada
24
+ mmix.addTool({
25
+ name: "javascript_repl",
26
+ description: "Execute JavaScript code in a REPL environment. You can run any valid JavaScript code and get the result.",
27
+ inputSchema: {
28
+ type: "object",
29
+ properties: {
30
+ code: {
31
+ type: "string",
32
+ description: "JavaScript code to execute"
33
+ }
34
+ },
35
+ required: ["code"]
36
+ }
37
+ }, async ({ code }) => {
38
+ console.log('🔧 Ejecutando código JavaScript:');
39
+ console.log('─'.repeat(50));
40
+ console.log(code);
41
+ console.log('─'.repeat(50));
42
+
43
+ try {
44
+ const context = await isolate.createContext();
45
+ const result = await context.eval(`JSON.stringify(eval(${JSON.stringify(code)}))`, { timeout: 10000 });
46
+ toolResult = JSON.parse(result);
47
+ console.log('\n✅ Resultado:', toolResult);
48
+ return result;
49
+ } catch (error) {
50
+ console.log('\n❌ Error:', error.message);
51
+ return `Error: ${error.message}`;
52
+ }
53
+ });
54
+
55
+ // Pedir al modelo que calcule 100 potencias de 2
56
+ mmix.addText('Calcular las primeras 100 potencias de 2 (2^0 hasta 2^99). Después de ejecutar el código, menciona algunos valores del resultado como las primeras 5 y las últimas 5 potencias.');
57
+
58
+ const result = await mmix.message();
59
+ console.log('\n💬 Respuesta del modelo:');
60
+ console.log(result);
61
+
62
+ // Mostrar muestra del resultado si está disponible
63
+ if (toolResult && Array.isArray(toolResult)) {
64
+ console.log('\n📊 Muestra de resultados (primeros 10 y últimos 10):');
65
+ console.log('Primeros 10:', toolResult.slice(0, 10));
66
+ console.log('Últimos 10:', toolResult.slice(-10));
67
+ console.log(`\nTotal: ${toolResult.length} potencias calculadas`);
68
+ }
69
+ }
70
+
71
+ try {
72
+ await replPowersExample();
73
+ console.log('\n✅ Ejemplo completado');
74
+ } catch (error) {
75
+ console.error('❌ Error:', error);
76
+ }
77
+
package/index.js CHANGED
@@ -12,7 +12,7 @@ const { MCPToolsManager } = require('./mcp-tools');
12
12
 
13
13
  class ModelMix {
14
14
 
15
- constructor({ options = {}, config = {} } = {}) {
15
+ constructor({ options = {}, config = {}, mix = {} } = {}) {
16
16
  this.models = [];
17
17
  this.messages = [];
18
18
  this.tools = {};
@@ -20,7 +20,7 @@ class ModelMix {
20
20
  this.mcp = {};
21
21
  this.mcpToolsManager = new MCPToolsManager();
22
22
  this.options = {
23
- max_tokens: 5000,
23
+ max_tokens: 8192,
24
24
  temperature: 1, // 1 --> More creative, 0 --> More deterministic.
25
25
  ...options
26
26
  };
@@ -35,9 +35,12 @@ class ModelMix {
35
35
  system: 'You are an assistant.',
36
36
  max_history: 1, // Default max history
37
37
  debug: false,
38
+ verbose: 2, // 0=silent, 1=minimal, 2=readable summary, 3=full details
38
39
  bottleneck: defaultBottleneckConfig,
39
40
  ...config
40
41
  }
42
+ const freeMix = { openrouter: true, cerebras: true, groq: true, together: false, lambda: false };
43
+ this.mix = { ...freeMix, ...mix };
41
44
 
42
45
  this.limiter = new Bottleneck(this.config.bottleneck);
43
46
 
@@ -49,12 +52,12 @@ class ModelMix {
49
52
  return this;
50
53
  }
51
54
 
52
- static new({ options = {}, config = {} } = {}) {
53
- return new ModelMix({ options, config });
55
+ static new({ options = {}, config = {}, mix = {} } = {}) {
56
+ return new ModelMix({ options, config, mix });
54
57
  }
55
58
 
56
- new() {
57
- return new ModelMix({ options: this.options, config: this.config });
59
+ new({ options = {}, config = {}, mix = {} } = {}) {
60
+ return new ModelMix({ options: { ...this.options, ...options }, config: { ...this.config, ...config }, mix: { ...this.mix, ...mix } });
58
61
  }
59
62
 
60
63
  static formatJSON(obj) {
@@ -77,6 +80,57 @@ class ModelMix {
77
80
  }
78
81
  }
79
82
 
83
+ // Verbose logging helpers
84
+ static truncate(str, maxLen = 100) {
85
+ if (!str || typeof str !== 'string') return str;
86
+ return str.length > maxLen ? str.substring(0, maxLen) + '...' : str;
87
+ }
88
+
89
+ static getVerboseLevel(config) {
90
+ // debug=true acts as verbose level 3
91
+ return config.verbose || 0;
92
+ }
93
+
94
+ static verboseLog(level, config, ...args) {
95
+ const verboseLevel = ModelMix.getVerboseLevel(config);
96
+ if (verboseLevel >= level) {
97
+ console.log(...args);
98
+ }
99
+ }
100
+
101
+ static formatInputSummary(messages, system) {
102
+ const lastMessage = messages[messages.length - 1];
103
+ let inputText = '';
104
+
105
+ if (lastMessage && Array.isArray(lastMessage.content)) {
106
+ const textContent = lastMessage.content.find(c => c.type === 'text');
107
+ if (textContent) inputText = textContent.text;
108
+ } else if (lastMessage && typeof lastMessage.content === 'string') {
109
+ inputText = lastMessage.content;
110
+ }
111
+
112
+ const lines = [];
113
+ lines.push(` 📝 System: ${ModelMix.truncate(system, 60)}`);
114
+ lines.push(` 💬 Input: ${ModelMix.truncate(inputText, 150)}`);
115
+ lines.push(` 📊 Messages: ${messages.length}`);
116
+ return lines.join('\n');
117
+ }
118
+
119
+ static formatOutputSummary(result) {
120
+ const lines = [];
121
+ if (result.message) {
122
+ lines.push(` 📤 Output: ${ModelMix.truncate(result.message, 200)}`);
123
+ }
124
+ if (result.think) {
125
+ lines.push(` 🧠 Thinking: ${ModelMix.truncate(result.think, 100)}`);
126
+ }
127
+ if (result.toolCalls && result.toolCalls.length > 0) {
128
+ const toolNames = result.toolCalls.map(t => t.function?.name || t.name).join(', ');
129
+ lines.push(` 🔧 Tools: ${toolNames}`);
130
+ }
131
+ return lines.join('\n');
132
+ }
133
+
80
134
  attach(key, provider) {
81
135
 
82
136
  if (this.models.some(model => model.key === key)) {
@@ -100,9 +154,6 @@ class ModelMix {
100
154
  gpt41nano({ options = {}, config = {} } = {}) {
101
155
  return this.attach('gpt-4.1-nano', new MixOpenAI({ options, config }));
102
156
  }
103
- gpt4o({ options = {}, config = {} } = {}) {
104
- return this.attach('gpt-4o', new MixOpenAI({ options, config }));
105
- }
106
157
  o4mini({ options = {}, config = {} } = {}) {
107
158
  return this.attach('o4-mini', new MixOpenAI({ options, config }));
108
159
  }
@@ -130,10 +181,12 @@ class ModelMix {
130
181
  gpt52chat({ options = {}, config = {} } = {}) {
131
182
  return this.attach('gpt-5.2-chat-latest', new MixOpenAI({ options, config }));
132
183
  }
133
- gptOss({ options = {}, config = {}, mix = { together: false, cerebras: false, groq: true } } = {}) {
134
- if (mix.together) return this.attach('openai/gpt-oss-120b', new MixTogether({ options, config }));
135
- if (mix.cerebras) return this.attach('gpt-oss-120b', new MixCerebras({ options, config }));
136
- if (mix.groq) return this.attach('openai/gpt-oss-120b', new MixGroq({ options, config }));
184
+ gptOss({ options = {}, config = {}, mix = {} } = {}) {
185
+ mix = { ...this.mix, ...mix };
186
+ if (mix.together) this.attach('openai/gpt-oss-120b', new MixTogether({ options, config }));
187
+ if (mix.cerebras) this.attach('gpt-oss-120b', new MixCerebras({ options, config }));
188
+ if (mix.groq) this.attach('openai/gpt-oss-120b', new MixGroq({ options, config }));
189
+ if (mix.openrouter) this.attach('openai/gpt-oss-120b:free', new MixOpenRouter({ options, config }));
137
190
  return this;
138
191
  }
139
192
  opus45({ options = {}, config = {} } = {}) {
@@ -218,39 +271,50 @@ class ModelMix {
218
271
  return this;
219
272
  }
220
273
 
221
- scout({ options = {}, config = {}, mix = { groq: true, together: false, cerebras: false } } = {}) {
274
+ scout({ options = {}, config = {}, mix = {} } = {}) {
275
+ mix = { ...this.mix, ...mix };
222
276
  if (mix.groq) this.attach('meta-llama/llama-4-scout-17b-16e-instruct', new MixGroq({ options, config }));
223
277
  if (mix.together) this.attach('meta-llama/Llama-4-Scout-17B-16E-Instruct', new MixTogether({ options, config }));
224
278
  if (mix.cerebras) this.attach('llama-4-scout-17b-16e-instruct', new MixCerebras({ options, config }));
225
279
  return this;
226
280
  }
227
- maverick({ options = {}, config = {}, mix = { groq: true, together: false, lambda: false } } = {}) {
281
+ maverick({ options = {}, config = {}, mix = {} } = {}) {
282
+ mix = { ...this.mix, ...mix };
228
283
  if (mix.groq) this.attach('meta-llama/llama-4-maverick-17b-128e-instruct', new MixGroq({ options, config }));
229
284
  if (mix.together) this.attach('meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8', new MixTogether({ options, config }));
230
285
  if (mix.lambda) this.attach('llama-4-maverick-17b-128e-instruct-fp8', new MixLambda({ options, config }));
231
286
  return this;
232
287
  }
233
288
 
234
- deepseekR1({ options = {}, config = {}, mix = { groq: true, together: false, cerebras: false } } = {}) {
289
+ deepseekR1({ options = {}, config = {}, mix = {} } = {}) {
290
+ mix = { ...this.mix, ...mix };
235
291
  if (mix.groq) this.attach('deepseek-r1-distill-llama-70b', new MixGroq({ options, config }));
236
292
  if (mix.together) this.attach('deepseek-ai/DeepSeek-R1', new MixTogether({ options, config }));
237
293
  if (mix.cerebras) this.attach('deepseek-r1-distill-llama-70b', new MixCerebras({ options, config }));
294
+ if (mix.openrouter) this.attach('deepseek/deepseek-r1-0528:free', new MixOpenRouter({ options, config }));
238
295
  return this;
239
296
  }
240
297
 
241
- hermes3({ options = {}, config = {}, mix = { lambda: true } } = {}) {
242
- this.attach('Hermes-3-Llama-3.1-405B-FP8', new MixLambda({ options, config }));
298
+ hermes3({ options = {}, config = {}, mix = {} } = {}) {
299
+ mix = { ...this.mix, ...mix };
300
+ if (mix.lambda) this.attach('Hermes-3-Llama-3.1-405B-FP8', new MixLambda({ options, config }));
301
+ if (mix.openrouter) this.attach('nousresearch/hermes-3-llama-3.1-405b:free', new MixOpenRouter({ options, config }));
243
302
  return this;
244
303
  }
245
304
 
246
- kimiK2({ options = {}, config = {}, mix = { together: false, groq: true } } = {}) {
305
+ kimiK2({ options = {}, config = {}, mix = {} } = {}) {
306
+ mix = { ...this.mix, ...mix };
247
307
  if (mix.together) this.attach('moonshotai/Kimi-K2-Instruct-0905', new MixTogether({ options, config }));
248
308
  if (mix.groq) this.attach('moonshotai/kimi-k2-instruct-0905', new MixGroq({ options, config }));
309
+ if (mix.openrouter) this.attach('moonshotai/kimi-k2:free', new MixOpenRouter({ options, config }));
249
310
  return this;
250
311
  }
251
312
 
252
- kimiK2think({ options = {}, config = {} } = {}) {
253
- return this.attach('moonshotai/Kimi-K2-Thinking', new MixTogether({ options, config }));
313
+ kimiK2think({ options = {}, config = {}, mix = { together: true } } = {}) {
314
+ mix = { ...this.mix, ...mix };
315
+ if (mix.together) this.attach('moonshotai/Kimi-K2-Thinking', new MixTogether({ options, config }));
316
+ if (mix.openrouter) this.attach('moonshotai/kimi-k2-thinking', new MixOpenRouter({ options, config }));
317
+ return this;
254
318
  }
255
319
 
256
320
  lmstudio({ options = {}, config = {} } = {}) {
@@ -261,23 +325,43 @@ class ModelMix {
261
325
  return this.attach('MiniMax-M2', new MixMiniMax({ options, config }));
262
326
  }
263
327
 
264
- minimaxM21({ options = {}, config = {} } = {}) {
265
- return this.attach('MiniMax-M2.1', new MixMiniMax({ options, config }));
328
+ minimaxM21({ options = {}, config = {}, mix = { minimax: true } } = {}) {
329
+ mix = { ...this.mix, ...mix };
330
+ if (mix.minimax) this.attach('MiniMax-M2.1', new MixMiniMax({ options, config }));
331
+ if (mix.cerebras) this.attach('MiniMax-M2.1', new MixCerebras({ options, config }));
332
+ return this;
266
333
  }
267
334
 
268
335
  minimaxM2Stable({ options = {}, config = {} } = {}) {
269
336
  return this.attach('MiniMax-M2-Stable', new MixMiniMax({ options, config }));
270
337
  }
271
338
 
272
- deepseekV32({ options = {}, config = {}, mix = { fireworks: true } } = {}) {
339
+ deepseekV32({ options = {}, config = {}, mix = {} } = {}) {
340
+ mix = { ...this.mix, ...mix };
273
341
  if (mix.fireworks) this.attach('accounts/fireworks/models/deepseek-v3p2', new MixFireworks({ options, config }));
342
+ if (mix.openrouter) this.attach('deepseek/deepseek-v3.2', new MixOpenRouter({ options, config }));
274
343
  return this;
275
344
  }
276
345
 
277
346
  GLM47({ options = {}, config = {}, mix = { fireworks: true } } = {}) {
347
+ mix = { ...this.mix, ...mix };
278
348
  if (mix.fireworks) this.attach('accounts/fireworks/models/glm-4p7', new MixFireworks({ options, config }));
349
+ if (mix.openrouter) this.attach('z-ai/glm-4.7', new MixOpenRouter({ options, config }));
350
+ if (mix.cerebras) this.attach('zai-glm-4.7', new MixCerebras({ options, config }));
351
+ return this;
352
+ }
353
+
354
+ GLM46({ options = {}, config = {}, mix = { cerebras: true } } = {}) {
355
+ mix = { ...this.mix, ...mix };
356
+ if (mix.cerebras) this.attach('zai-glm-4.6', new MixCerebras({ options, config }));
279
357
  return this;
280
- }
358
+ }
359
+
360
+ GLM45({ options = {}, config = {}, mix = { openrouter: true } } = {}) {
361
+ mix = { ...this.mix, ...mix };
362
+ if (mix.openrouter) this.attach('z-ai/glm-4.5-air:free', new MixOpenRouter({ options, config }));
363
+ return this;
364
+ }
281
365
 
282
366
  addText(text, { role = "user" } = {}) {
283
367
  const content = [{
@@ -626,9 +710,16 @@ class ModelMix {
626
710
  ...config,
627
711
  };
628
712
 
629
- if (currentConfig.debug) {
713
+ const verboseLevel = ModelMix.getVerboseLevel(currentConfig);
714
+
715
+ if (verboseLevel >= 1) {
630
716
  const isPrimary = i === 0;
631
- log.debug(`[${currentModelKey}] Attempt #${i + 1}` + (isPrimary ? ' (Primary)' : ' (Fallback)'));
717
+ const tag = isPrimary ? '🚀' : '🔄';
718
+ console.log(`\n${tag} [${currentModelKey}] Attempt #${i + 1}` + (isPrimary ? '' : ' (Fallback)'));
719
+ }
720
+
721
+ if (verboseLevel >= 2) {
722
+ console.log(ModelMix.formatInputSummary(this.messages, currentConfig.system));
632
723
  }
633
724
 
634
725
  try {
@@ -662,27 +753,36 @@ class ModelMix {
662
753
  return this.execute();
663
754
  }
664
755
 
665
- if (currentConfig.debug) {
666
- console.log(`\nRequest successful: ${currentModelKey}`);
756
+ // Verbose level 1: Just success indicator
757
+ if (verboseLevel >= 1) {
758
+ console.log(` ✅ Success`);
759
+ }
760
+
761
+ // Verbose level 2: Readable summary of output
762
+ if (verboseLevel >= 2) {
763
+ console.log(ModelMix.formatOutputSummary(result));
764
+ }
667
765
 
766
+ // Verbose level 3 (debug): Full response details
767
+ if (verboseLevel >= 3) {
668
768
  if (result.response) {
669
- console.log('\nRAW RESPONSE:');
769
+ console.log('\n 📦 RAW RESPONSE:');
670
770
  console.log(ModelMix.formatJSON(result.response));
671
771
  }
672
772
 
673
773
  if (result.message) {
674
- console.log('\nMESSAGE:');
774
+ console.log('\n 💬 FULL MESSAGE:');
675
775
  console.log(ModelMix.formatMessage(result.message));
676
776
  }
677
777
 
678
778
  if (result.think) {
679
- console.log('\nTHINKING:');
779
+ console.log('\n 🧠 FULL THINKING:');
680
780
  console.log(result.think);
681
781
  }
682
-
683
- console.log('');
684
782
  }
685
783
 
784
+ if (verboseLevel >= 1) console.log('');
785
+
686
786
  return result;
687
787
 
688
788
  } catch (error) {
@@ -910,15 +1010,19 @@ class MixCustom {
910
1010
 
911
1011
  options.messages = this.convertMessages(options.messages, config);
912
1012
 
913
- if (config.debug) {
914
- console.log('\nREQUEST:');
1013
+ const verboseLevel = ModelMix.getVerboseLevel(config);
1014
+
1015
+ // Verbose level 3 (debug): Full request details
1016
+ if (verboseLevel >= 3) {
1017
+ console.log('\n 📡 REQUEST DETAILS:');
915
1018
 
916
- console.log('\nCONFIG:');
1019
+ console.log('\n ⚙️ CONFIG:');
917
1020
  const configToLog = { ...config };
918
1021
  delete configToLog.debug;
1022
+ delete configToLog.verbose;
919
1023
  console.log(ModelMix.formatJSON(configToLog));
920
1024
 
921
- console.log('\nOPTIONS:');
1025
+ console.log('\n 📋 OPTIONS:');
922
1026
  console.log(ModelMix.formatJSON(options));
923
1027
  }
924
1028
 
@@ -1157,6 +1261,21 @@ class MixOpenAI extends MixCustom {
1157
1261
  }
1158
1262
  }
1159
1263
 
1264
+ class MixOpenRouter extends MixOpenAI {
1265
+ getDefaultConfig(customConfig) {
1266
+
1267
+ if (!process.env.OPENROUTER_API_KEY) {
1268
+ throw new Error('OpenRouter API key not found. Please provide it in config or set OPENROUTER_API_KEY environment variable.');
1269
+ }
1270
+
1271
+ return MixCustom.prototype.getDefaultConfig.call(this, {
1272
+ url: 'https://openrouter.ai/api/v1/chat/completions',
1273
+ apiKey: process.env.OPENROUTER_API_KEY,
1274
+ ...customConfig
1275
+ });
1276
+ }
1277
+ }
1278
+
1160
1279
  class MixAnthropic extends MixCustom {
1161
1280
 
1162
1281
  static thinkingOptions = {
@@ -1635,7 +1754,8 @@ class MixGoogle extends MixCustom {
1635
1754
  functionCall: {
1636
1755
  name: toolCall.function.name,
1637
1756
  args: JSON.parse(toolCall.function.arguments)
1638
- }
1757
+ },
1758
+ thought_signature: toolCall.thought_signature || ""
1639
1759
  }))
1640
1760
  }
1641
1761
  }
@@ -1721,15 +1841,19 @@ class MixGoogle extends MixCustom {
1721
1841
  };
1722
1842
 
1723
1843
  try {
1724
- if (config.debug) {
1725
- console.log('\nREQUEST (GOOGLE):');
1844
+ const verboseLevel = ModelMix.getVerboseLevel(config);
1845
+
1846
+ // Verbose level 3 (debug): Full request details
1847
+ if (verboseLevel >= 3) {
1848
+ console.log('\n 📡 REQUEST DETAILS (GOOGLE):');
1726
1849
 
1727
- console.log('\nCONFIG:');
1850
+ console.log('\n ⚙️ CONFIG:');
1728
1851
  const configToLog = { ...config };
1729
1852
  delete configToLog.debug;
1853
+ delete configToLog.verbose;
1730
1854
  console.log(ModelMix.formatJSON(configToLog));
1731
1855
 
1732
- console.log('\nPAYLOAD:');
1856
+ console.log('\n 📋 PAYLOAD:');
1733
1857
  console.log(ModelMix.formatJSON(payload));
1734
1858
  }
1735
1859
 
@@ -1763,7 +1887,8 @@ class MixGoogle extends MixCustom {
1763
1887
  function: {
1764
1888
  name: part.functionCall.name,
1765
1889
  arguments: JSON.stringify(part.functionCall.args)
1766
- }
1890
+ },
1891
+ thought_signature: part.thoughtSignature || ""
1767
1892
  };
1768
1893
  }
1769
1894
  return null;
@@ -1800,4 +1925,4 @@ class MixGoogle extends MixCustom {
1800
1925
  }
1801
1926
  }
1802
1927
 
1803
- module.exports = { MixCustom, ModelMix, MixAnthropic, MixMiniMax, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok, MixCerebras, MixGoogle, MixFireworks };
1928
+ module.exports = { MixCustom, ModelMix, MixAnthropic, MixMiniMax, MixOpenAI, MixOpenRouter, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok, MixCerebras, MixGoogle, MixFireworks };
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "4.2.0",
4
- "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
3
+ "version": "4.2.4",
4
+ "description": "🧬 Reliable interface with automatic fallback for AI LLMs.",
5
5
  "main": "index.js",
6
6
  "repository": {
7
7
  "type": "git",
@@ -25,9 +25,9 @@
25
25
  "gpt5",
26
26
  "opus",
27
27
  "sonnet",
28
- "multimodal",
28
+ "openrouter",
29
29
  "gemini",
30
- "ollama",
30
+ "glm",
31
31
  "lmstudio",
32
32
  "deepseek",
33
33
  "oss",
@@ -46,7 +46,7 @@
46
46
  },
47
47
  "homepage": "https://github.com/clasen/ModelMix#readme",
48
48
  "dependencies": {
49
- "@modelcontextprotocol/sdk": "^1.23.0",
49
+ "@modelcontextprotocol/sdk": "^1.25.2",
50
50
  "axios": "^1.12.2",
51
51
  "bottleneck": "^2.19.5",
52
52
  "file-type": "^16.5.4",
@@ -72,7 +72,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
72
72
  it('should enforce minimum time between requests', async () => {
73
73
  const startTimes = [];
74
74
 
75
- model.gpt4o();
75
+ model.gpt41();
76
76
 
77
77
  // Mock API responses
78
78
  nock('https://api.openai.com')
@@ -122,7 +122,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
122
122
  }
123
123
  });
124
124
 
125
- model.gpt4o();
125
+ model.gpt41();
126
126
 
127
127
  // Mock API with delay to simulate concurrent requests
128
128
  nock('https://api.openai.com')
@@ -184,7 +184,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
184
184
  it('should apply rate limiting to OpenAI requests', async () => {
185
185
  const requestTimes = [];
186
186
 
187
- model.gpt4o();
187
+ model.gpt41();
188
188
 
189
189
  nock('https://api.openai.com')
190
190
  .post('/v1/chat/completions')
@@ -267,7 +267,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
267
267
  });
268
268
 
269
269
  it('should handle rate limiting with API errors', async () => {
270
- model.gpt4o();
270
+ model.gpt41();
271
271
 
272
272
  nock('https://api.openai.com')
273
273
  .post('/v1/chat/completions')
@@ -289,7 +289,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
289
289
  it('should continue rate limiting after errors', async () => {
290
290
  const requestTimes = [];
291
291
 
292
- model.gpt4o();
292
+ model.gpt41();
293
293
 
294
294
  // First request fails
295
295
  nock('https://api.openai.com')
@@ -345,7 +345,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
345
345
  }
346
346
  });
347
347
 
348
- model.gpt4o();
348
+ model.gpt41();
349
349
 
350
350
  let requestCount = 0;
351
351
 
@@ -392,7 +392,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
392
392
  }
393
393
  });
394
394
 
395
- model.gpt4o();
395
+ model.gpt41();
396
396
 
397
397
  const results = [];
398
398
 
@@ -440,7 +440,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
440
440
  }
441
441
  });
442
442
 
443
- model.gpt4o();
443
+ model.gpt41();
444
444
 
445
445
  nock('https://api.openai.com')
446
446
  .post('/v1/chat/completions')
@@ -489,7 +489,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
489
489
  done();
490
490
  });
491
491
 
492
- model.gpt4o();
492
+ model.gpt41();
493
493
 
494
494
  nock('https://api.openai.com')
495
495
  .post('/v1/chat/completions')
@@ -25,7 +25,7 @@ describe('Image Processing and Multimodal Support Tests', () => {
25
25
  it('should handle base64 image data correctly', async () => {
26
26
  const base64Image = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8z8BQz0AEYBxVSF+FABJADveWkH6oAAAAAElFTkSuQmCC';
27
27
 
28
- model.gpt4o()
28
+ model.gpt41()
29
29
  .addText('What do you see in this image?')
30
30
  .addImageFromUrl(base64Image);
31
31
 
package/test/json.test.js CHANGED
@@ -198,7 +198,7 @@ describe('JSON Schema and Structured Output Tests', () => {
198
198
  }]
199
199
  };
200
200
 
201
- model.gpt4o().addText('List 3 countries');
201
+ model.gpt41().addText('List 3 countries');
202
202
 
203
203
  // Mock the API response
204
204
  nock('https://api.openai.com')
@@ -270,7 +270,7 @@ describe('JSON Schema and Structured Output Tests', () => {
270
270
  });
271
271
 
272
272
  it('should handle JSON parsing errors gracefully', async () => {
273
- model.gpt4o().addText('Generate invalid JSON');
273
+ model.gpt41().addText('Generate invalid JSON');
274
274
 
275
275
  // Mock invalid JSON response
276
276
  nock('https://api.openai.com')
package/test/live.mcp.js CHANGED
@@ -110,8 +110,8 @@ describe('Live MCP Integration Tests', function () {
110
110
  }
111
111
  });
112
112
 
113
- it('should use custom MCP tools with Gemini 2.5 Flash', async function () {
114
- const model = ModelMix.new(setup).gemini25flash();
113
+ it('should use custom MCP tools with Gemini 3 Flash', async function () {
114
+ const model = ModelMix.new(setup).gemini3flash();
115
115
 
116
116
  // Add password generator tool
117
117
  model.addTool({
@@ -149,11 +149,13 @@ describe('Live MCP Integration Tests', function () {
149
149
  model.addText('Generate a secure password of 16 characters with symbols.');
150
150
 
151
151
  const response = await model.message();
152
- console.log(`Gemini 2.5 Flash with MCP tools: ${response}`);
152
+ console.log(`Gemini 3 Flash with MCP tools: ${response}`);
153
153
 
154
154
  expect(response).to.be.a('string');
155
- expect(response).to.include('password');
156
- expect(response).to.include('16');
155
+ // Check password is mentioned and a generated password string is present
156
+ expect(response.toLowerCase()).to.include('password');
157
+ // Verify a generated password is in the response (at least 12 chars with mix of alphanumeric/symbols)
158
+ expect(response).to.match(/[a-zA-Z0-9!@#$%^&*()_+\-=\[\]{}|;:,.<>?]{12,}/);
157
159
  });
158
160
 
159
161
  });
@@ -374,8 +376,8 @@ describe('Live MCP Integration Tests', function () {
374
376
  expect(result.factorial_result).to.equal(120);
375
377
  });
376
378
 
377
- it('should use MCP tools with JSON output using Gemini 2.5 Flash', async function () {
378
- const model = ModelMix.new(setup).gemini25flash();
379
+ it('should use MCP tools with JSON output using Gemini 3 Flash', async function () {
380
+ const model = ModelMix.new(setup).gemini3flash();
379
381
 
380
382
  // Add system info tool
381
383
  model.addTool({
@@ -414,7 +416,7 @@ describe('Live MCP Integration Tests', function () {
414
416
  generated_at: ""
415
417
  });
416
418
 
417
- console.log(`Gemini 2.5 Flash with MCP tools JSON result:`, result);
419
+ console.log(`Gemini 3 Flash with MCP tools JSON result:`, result);
418
420
 
419
421
  expect(result).to.be.an('object');
420
422
  expect(result.timestamp).to.be.a('number');