modelmix 2.9.0 → 2.9.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -8,6 +8,7 @@
8
8
  - **Request Rate Control**: Manage the rate of requests to adhere to provider limitations using Bottleneck.
9
9
  - **Flexible Integration**: Easily integrate popular models like OpenAI, Anthropic, Perplexity, Groq, Together AI, Ollama, LM Studio or custom models.
10
10
  - **History Tracking**: Automatically logs the conversation history with model responses, allowing you to limit the number of historical messages with `max_history`.
11
+ - **Model Fallbacks**: Automatically try different models if one fails or is unavailable.
11
12
 
12
13
  ## 📦 Installation
13
14
 
@@ -82,6 +83,21 @@ Here's a quick example to get you started:
82
83
 
83
84
  3. **Generate responses from different models**:
84
85
 
86
+ #### Model Fallbacks
87
+ ```javascript
88
+ // Create a message handler with multiple fallback models
89
+ const handler = mmix.create(['grok-2-latest', 'claude-3-7-sonnet-20250219']);
90
+
91
+ // If the first model (grok-2-latest) fails or is unavailable,
92
+ // ModelMix will automatically try the next model (claude-3-7-sonnet)
93
+ const response = await handler.addText('do you like cats?').message();
94
+ ```
95
+
96
+ The `create()` method accepts either a single model name as a string or an array of model names. When an array is provided, ModelMix will attempt to use each model in order until a successful response is received. This is useful for:
97
+ - Implementing fallback options when a primary model is unavailable
98
+ - Load balancing across different providers
99
+ - Ensuring high availability in your application
100
+
85
101
  #### gpt-4o-mini
86
102
  ```javascript
87
103
  const gpt = mmix.create('gpt-4o-mini', { options: { temperature: 0 } });
@@ -247,7 +263,7 @@ new ModelMix(args = { options: {}, config: {} })
247
263
  - ...(Additional default options can be added as needed)
248
264
  - **config**: This object contains configuration settings that control the behavior of the `ModelMix` instance. These settings can also be overridden for specific model instances. Examples of configuration settings include:
249
265
  - `system`: Sets the default system message for the model, e.g., "You are an assistant."
250
- - `max_history`: Limits the number of historical messages to retain, e.g., 5.
266
+ - `max_history`: Limits the number of historical messages to retain, e.g., 1.
251
267
  - `bottleneck`: Configures the rate limiting behavior using Bottleneck. For example:
252
268
  - `maxConcurrent`: Maximum number of concurrent requests
253
269
  - `minTime`: Minimum time between requests (in ms)
package/demo/default.env CHANGED
@@ -1,4 +1,6 @@
1
1
  ANTHROPIC_API_KEY=""
2
2
  OPENAI_API_KEY=""
3
3
  PPLX_API_KEY=""
4
- GROQ_API_KEY=""
4
+ GROQ_API_KEY=""
5
+ TOGETHER_API_KEY=""
6
+ XAI_API_KEY=""
package/demo/grok.mjs ADDED
@@ -0,0 +1,18 @@
1
+ import 'dotenv/config'
2
+
3
+ import { ModelMix, MixGrok, MixAnthropic, MixOpenAI } from '../index.js';
4
+
5
+ const mmix = new ModelMix({
6
+ options: {
7
+ max_tokens: 2000,
8
+ },
9
+ config: {
10
+ system: 'You are ALF from Melmac.',
11
+ max_history: 2
12
+ }
13
+ });
14
+
15
+ mmix.attach(new MixGrok(), new MixAnthropic(), new MixOpenAI());
16
+
17
+ const r = await mmix.create(['claude-3-7-sonnet-20250219', 'o3-mini', 'grok-2-latest']).addText('do you like cats?').message();
18
+ console.log(r);
package/index.js CHANGED
@@ -23,7 +23,7 @@ class ModelMix {
23
23
 
24
24
  this.config = {
25
25
  system: 'You are an assistant.',
26
- max_history: 5, // Default max history
26
+ max_history: 1, // Default max history
27
27
  debug: false,
28
28
  bottleneck: defaultBottleneckConfig,
29
29
  ...args.config
@@ -37,21 +37,39 @@ class ModelMix {
37
37
  return this;
38
38
  }
39
39
 
40
- attach(modelInstance) {
41
- const key = modelInstance.config.prefix.join("_");
42
- this.models[key] = modelInstance;
40
+ attach(...modelInstances) {
41
+ for (const modelInstance of modelInstances) {
42
+ const key = modelInstance.config.prefix.join("_");
43
+ this.models[key] = modelInstance;
44
+ }
43
45
  return this;
44
46
  }
45
47
 
46
- create(modelKey, args = { config: {}, options: {} }) {
48
+ create(modelKeys, args = { config: {}, options: {} }) {
49
+ // If modelKeys is a string, convert it to an array for backwards compatibility
50
+ const modelArray = Array.isArray(modelKeys) ? modelKeys : [modelKeys];
51
+
52
+ if (modelArray.length === 0) {
53
+ throw new Error('No model keys provided');
54
+ }
55
+
56
+ // Verificar que todos los modelos estén disponibles
57
+ const unavailableModels = modelArray.filter(modelKey => {
58
+ return !Object.values(this.models).some(entry =>
59
+ entry.config.prefix.some(p => modelKey.startsWith(p))
60
+ );
61
+ });
62
+
63
+ if (unavailableModels.length > 0) {
64
+ throw new Error(`The following models are not available: ${unavailableModels.join(', ')}`);
65
+ }
66
+
67
+ // Una vez verificado que todos están disponibles, obtener el primer modelo
68
+ const modelKey = modelArray[0];
47
69
  const modelEntry = Object.values(this.models).find(entry =>
48
70
  entry.config.prefix.some(p => modelKey.startsWith(p))
49
71
  );
50
72
 
51
- if (!modelEntry) {
52
- throw new Error(`Model with prefix matching ${modelKey} is not attached.`);
53
- }
54
-
55
73
  const options = {
56
74
  ...this.defaultOptions,
57
75
  ...modelEntry.options,
@@ -65,7 +83,8 @@ class ModelMix {
65
83
  ...args.config
66
84
  };
67
85
 
68
- return new MessageHandler(this, modelEntry, options, config);
86
+ // Pass remaining models array for fallback
87
+ return new MessageHandler(this, modelEntry, options, config, modelArray.slice(1));
69
88
  }
70
89
 
71
90
  setSystem(text) {
@@ -96,13 +115,13 @@ class ModelMix {
96
115
  }
97
116
 
98
117
  class MessageHandler {
99
- constructor(mix, modelEntry, options, config) {
118
+ constructor(mix, modelEntry, options, config, fallbackModels = []) {
100
119
  this.mix = mix;
101
120
  this.modelEntry = modelEntry;
102
121
  this.options = options;
103
122
  this.config = config;
104
123
  this.messages = [];
105
-
124
+ this.fallbackModels = fallbackModels;
106
125
  this.imagesToProcess = [];
107
126
  }
108
127
 
@@ -276,22 +295,56 @@ class MessageHandler {
276
295
 
277
296
  async execute() {
278
297
  return this.mix.limiter.schedule(async () => {
279
- await this.processImageUrls();
280
-
281
- this.applyTemplate();
282
- this.messages = this.messages.slice(-this.config.max_history);
283
- this.messages = this.groupByRoles(this.messages);
298
+ try {
299
+ await this.processImageUrls();
300
+ this.applyTemplate();
301
+ this.messages = this.messages.slice(-this.config.max_history);
302
+ this.messages = this.groupByRoles(this.messages);
284
303
 
285
- if (this.messages.length === 0) {
286
- throw new Error("No user messages have been added. Use addText(prompt), addTextFromFile(filePath), addImage(filePath), or addImageFromUrl(url) to add a prompt.");
287
- }
304
+ if (this.messages.length === 0) {
305
+ throw new Error("No user messages have been added. Use addText(prompt), addTextFromFile(filePath), addImage(filePath), or addImageFromUrl(url) to add a prompt.");
306
+ }
288
307
 
289
- this.options.messages = this.messages;
308
+ this.options.messages = this.messages;
290
309
 
291
- try {
292
- const result = await this.modelEntry.create({ options: this.options, config: this.config });
293
- this.messages.push({ role: "assistant", content: result.message });
294
- return result;
310
+ try {
311
+ const result = await this.modelEntry.create({ options: this.options, config: this.config });
312
+ this.messages.push({ role: "assistant", content: result.message });
313
+ return result;
314
+ } catch (error) {
315
+ // If there are fallback models available, try the next one
316
+ if (this.fallbackModels.length > 0) {
317
+ const nextModelKey = this.fallbackModels[0];
318
+ log.warn(`Model ${this.options.model} failed, trying fallback model ${nextModelKey}...`);
319
+
320
+ // Create a completely new handler with the fallback model
321
+ const nextHandler = this.mix.create(
322
+ [nextModelKey, ...this.fallbackModels.slice(1)],
323
+ {
324
+ options: {
325
+ // Keep only generic options, not model-specific ones
326
+ max_tokens: this.options.max_tokens,
327
+ temperature: this.options.temperature,
328
+ top_p: this.options.top_p,
329
+ stream: this.options.stream
330
+ }
331
+ }
332
+ );
333
+
334
+ // Asignar directamente todos los mensajes
335
+ nextHandler.messages = [...this.messages];
336
+
337
+ // Mantener el mismo sistema y reemplazos
338
+ nextHandler.setSystem(this.config.system);
339
+ if (this.config.replace) {
340
+ nextHandler.replace(this.config.replace);
341
+ }
342
+
343
+ // Try with next model
344
+ return nextHandler.execute();
345
+ }
346
+ throw error;
347
+ }
295
348
  } catch (error) {
296
349
  throw error;
297
350
  }
@@ -581,6 +634,17 @@ class MixOllama extends MixCustom {
581
634
  }
582
635
  }
583
636
 
637
+ class MixGrok extends MixOpenAI {
638
+ getDefaultConfig(customConfig) {
639
+ return super.getDefaultConfig({
640
+ url: 'https://api.x.ai/v1/chat/completions',
641
+ prefix: ['grok'],
642
+ apiKey: process.env.XAI_API_KEY,
643
+ ...customConfig
644
+ });
645
+ }
646
+ }
647
+
584
648
  class MixLMStudio extends MixCustom {
585
649
  getDefaultConfig(customConfig) {
586
650
  return super.getDefaultConfig({
@@ -655,4 +719,4 @@ class MixTogether extends MixCustom {
655
719
  }
656
720
  }
657
721
 
658
- module.exports = { MixCustom, ModelMix, MixAnthropic, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether };
722
+ module.exports = { MixCustom, ModelMix, MixAnthropic, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "2.9.0",
3
+ "version": "2.9.4",
4
4
  "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -15,6 +15,7 @@
15
15
  "anthropic",
16
16
  "agent",
17
17
  "perplexity",
18
+ "grok",
18
19
  "sonnet-3",
19
20
  "gpt",
20
21
  "claude",
@@ -28,6 +29,7 @@
28
29
  "together",
29
30
  "o1",
30
31
  "deepseek",
32
+ "fallback",
31
33
  "o3",
32
34
  "o3-mini",
33
35
  "nousresearch",