modelmix 3.1.8 → 3.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +92 -122
- package/demo/custom.mjs +1 -1
- package/demo/short.mjs +28 -0
- package/demo/stream.mjs +3 -3
- package/index.js +550 -148
- package/package.json +3 -2
package/README.md
CHANGED
|
@@ -2,13 +2,16 @@
|
|
|
2
2
|
|
|
3
3
|
**ModelMix** is a versatile module that enables seamless integration of various language models from different providers through a unified interface. With ModelMix, you can effortlessly manage and utilize multiple AI models while controlling request rates to avoid provider restrictions.
|
|
4
4
|
|
|
5
|
+
Are you one of those developers who wants to apply language models to everything? Do you need a reliable fallback system to ensure your application never fails? ModelMix is the answer! It allows you to chain multiple models together, automatically falling back to the next model if one fails, ensuring your application always gets a response.
|
|
6
|
+
|
|
5
7
|
## ✨ Features
|
|
6
8
|
|
|
7
9
|
- **Unified Interface**: Interact with multiple AI models through a single, coherent API.
|
|
8
10
|
- **Request Rate Control**: Manage the rate of requests to adhere to provider limitations using Bottleneck.
|
|
9
|
-
- **Flexible Integration**: Easily integrate popular models like OpenAI, Anthropic, Perplexity, Groq, Together AI, Ollama, LM Studio or custom models.
|
|
11
|
+
- **Flexible Integration**: Easily integrate popular models like OpenAI, Anthropic, Perplexity, Groq, Together AI, Ollama, LM Studio, Google Gemini or custom models.
|
|
10
12
|
- **History Tracking**: Automatically logs the conversation history with model responses, allowing you to limit the number of historical messages with `max_history`.
|
|
11
13
|
- **Model Fallbacks**: Automatically try different models if one fails or is unavailable.
|
|
14
|
+
- **Chain Multiple Models**: Create powerful chains of models that work together, with automatic fallback if one fails.
|
|
12
15
|
|
|
13
16
|
## 📦 Installation
|
|
14
17
|
|
|
@@ -18,7 +21,7 @@ First, install the ModelMix package:
|
|
|
18
21
|
npm install modelmix
|
|
19
22
|
```
|
|
20
23
|
|
|
21
|
-
|
|
24
|
+
Recommended: install dotenv to manage environment variables:
|
|
22
25
|
|
|
23
26
|
```bash
|
|
24
27
|
npm install dotenv
|
|
@@ -35,129 +38,49 @@ Here's a quick example to get you started:
|
|
|
35
38
|
PPLX_API_KEY="your_perplexity_api_key"
|
|
36
39
|
GROQ_API_KEY="your_groq_api_key"
|
|
37
40
|
TOGETHER_API_KEY="your_together_api_key"
|
|
41
|
+
GOOGLE_API_KEY="your_google_api_key"
|
|
38
42
|
```
|
|
39
43
|
|
|
40
44
|
2. **Create and configure your models**:
|
|
41
45
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
url: 'http://localhost:11434/api/chat',
|
|
75
|
-
prefix: ['llava'],
|
|
76
|
-
},
|
|
77
|
-
options: {
|
|
78
|
-
temperature: 0.5,
|
|
79
|
-
}
|
|
80
|
-
}));
|
|
81
|
-
mmix.attach(new MixTogether());
|
|
82
|
-
```
|
|
83
|
-
|
|
84
|
-
3. **Generate responses from different models**:
|
|
85
|
-
|
|
86
|
-
#### Model Fallbacks
|
|
87
|
-
```javascript
|
|
88
|
-
// Create a message handler with multiple fallback models
|
|
89
|
-
const handler = mmix.create(['grok-2-latest', 'claude-3-7-sonnet-20250219']);
|
|
90
|
-
|
|
91
|
-
// If the first model (grok-2-latest) fails or is unavailable,
|
|
92
|
-
// ModelMix will automatically try the next model (claude-3-7-sonnet)
|
|
93
|
-
const response = await handler.addText('do you like cats?').message();
|
|
94
|
-
```
|
|
95
|
-
|
|
96
|
-
The `create()` method accepts either a single model name as a string or an array of model names. When an array is provided, ModelMix will attempt to use each model in order until a successful response is received. This is useful for:
|
|
97
|
-
- Implementing fallback options when a primary model is unavailable
|
|
98
|
-
- Load balancing across different providers
|
|
99
|
-
- Ensuring high availability in your application
|
|
100
|
-
|
|
101
|
-
#### gpt-4o-mini
|
|
102
|
-
```javascript
|
|
103
|
-
const gpt = mmix.create('gpt-4o-mini', { options: { temperature: 0 } });
|
|
104
|
-
gpt.addText("Have you ever eaten a {animal}?");
|
|
105
|
-
gpt.replace({ '{animal}': 'cat' });
|
|
106
|
-
console.log(await gpt.message());
|
|
107
|
-
```
|
|
108
|
-
|
|
109
|
-
#### gpt-4.1-nano (json)
|
|
110
|
-
```javascript
|
|
111
|
-
console.log("\n" + '--------| gpt-4.1-nano |--------');
|
|
112
|
-
const gpt = mmix.create('gpt-4.1-nano', { options: { temperature: 0 } }).addText("Have you ever eaten a {animal}?");
|
|
113
|
-
gpt.replace({ '{animal}': 'cat' });
|
|
114
|
-
const schemaExample = { time: '24:00:00', message: 'Hello' };
|
|
115
|
-
const schemaDescription = { time: 'Time in format HH:MM:SS' }; // optional
|
|
116
|
-
console.log(await gpt.json(schemaExample, schemaDescription));
|
|
117
|
-
```
|
|
118
|
-
|
|
119
|
-
#### claude-3-5-sonnet-20240620 (writer)
|
|
120
|
-
```javascript
|
|
121
|
-
const writer = mmix.create('claude-3-5-sonnet-20240620', { options: { temperature: 0.5 } });
|
|
122
|
-
writer.setSystem('You are a writer like Stephen King'); // or setSystemFromFile
|
|
123
|
-
writer.replace({ '{story_title}': 'The Mysterious Package' })
|
|
124
|
-
// or write.replaceKeyFromFile('{story_title}', './title.md');
|
|
125
|
-
const story = await writer.addTextFromFile('./prompt.md').message();
|
|
126
|
-
console.log(story);
|
|
127
|
-
```
|
|
128
|
-
#### claude-3-5-sonnet-20240620 (image)
|
|
129
|
-
```javascript
|
|
130
|
-
console.log("\n" + '--------| |--------');
|
|
131
|
-
const claude = mmix.create('claude-3-5-sonnet-20240620', { options: { temperature: 0 } });
|
|
132
|
-
claude.addImage("./watson.jpg"); // or claude.addImageFromUrl(url)
|
|
133
|
-
const imageDescription = await claude.addText("Describe the image").message();
|
|
134
|
-
console.log(imageDescription);
|
|
135
|
-
```
|
|
136
|
-
|
|
137
|
-
#### pplx-70b-online
|
|
138
|
-
```javascript
|
|
139
|
-
const pplx = mmix.create('pplx-70b-online', { config: { max_tokens: 500 } });
|
|
140
|
-
pplx.addText('How much is ETH trading in USD?');
|
|
141
|
-
const news = await pplx.addText('What are the 3 most recent Ethereum news?').message();
|
|
142
|
-
console.log(news);
|
|
143
|
-
```
|
|
144
|
-
|
|
145
|
-
#### ollama (llava:latest)
|
|
146
|
-
```javascript
|
|
147
|
-
await mmix.create('llava:latest')
|
|
148
|
-
.addImage("./watson.jpg")
|
|
149
|
-
.addText("What is the predominant color?")
|
|
150
|
-
.stream((data) => { console.log(data.message); });
|
|
151
|
-
```
|
|
46
|
+
```javascript
|
|
47
|
+
import 'dotenv/config';
|
|
48
|
+
import { ModelMix } from 'modelmix';
|
|
49
|
+
|
|
50
|
+
// Basic setup with system prompt and debug mode
|
|
51
|
+
const setup = {
|
|
52
|
+
config: {
|
|
53
|
+
system: "You are ALF, if they ask your name, respond with 'ALF'.",
|
|
54
|
+
debug: true
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
// Chain multiple models with automatic fallback
|
|
59
|
+
const result = await ModelMix.create(setup)
|
|
60
|
+
.sonnet37think()
|
|
61
|
+
.o4mini({ config: { temperature: 0 } })
|
|
62
|
+
.gemini25proExp()
|
|
63
|
+
.gpt41nano()
|
|
64
|
+
.grok3mini()
|
|
65
|
+
.addText("What's your name?")
|
|
66
|
+
.message();
|
|
67
|
+
|
|
68
|
+
console.log(result);
|
|
69
|
+
|
|
70
|
+
// Get structured JSON responses
|
|
71
|
+
const jsonResult = await ModelMix.create()
|
|
72
|
+
.sonnet37()
|
|
73
|
+
.addText("Name and capital of 3 South American countries.")
|
|
74
|
+
.json({ countries: [{ name: "", capital: "" }] });
|
|
75
|
+
|
|
76
|
+
console.log(jsonResult);
|
|
77
|
+
```
|
|
152
78
|
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
console.log(comparison);
|
|
159
|
-
```
|
|
160
|
-
4. Find the files for this example at: [/ModelMix/demo](https://github.com/clasen/ModelMix/tree/master/demo).
|
|
79
|
+
This pattern allows you to:
|
|
80
|
+
- Chain multiple models together
|
|
81
|
+
- Automatically fall back to the next model if one fails
|
|
82
|
+
- Get structured JSON responses when needed
|
|
83
|
+
- Keep your code clean and maintainable
|
|
161
84
|
|
|
162
85
|
## 🔄 Templating Methods
|
|
163
86
|
|
|
@@ -243,14 +166,14 @@ ModelMix now uses Bottleneck for efficient rate limiting of API requests. This i
|
|
|
243
166
|
1. **Configuration**: Bottleneck is configured in the ModelMix constructor. You can customize the settings or use the default configuration:
|
|
244
167
|
|
|
245
168
|
```javascript
|
|
246
|
-
const
|
|
169
|
+
const setup = {
|
|
247
170
|
config: {
|
|
248
171
|
bottleneck: {
|
|
249
172
|
maxConcurrent: 8, // Maximum number of concurrent requests
|
|
250
173
|
minTime: 500 // Minimum time between requests (in ms)
|
|
251
174
|
}
|
|
252
175
|
}
|
|
253
|
-
}
|
|
176
|
+
};
|
|
254
177
|
```
|
|
255
178
|
|
|
256
179
|
2. **Rate Limiting**: When you make a request using any of the attached models, Bottleneck automatically manages the request flow based on the configured settings.
|
|
@@ -282,6 +205,43 @@ new ModelMix(args = { options: {}, config: {} })
|
|
|
282
205
|
- `reservoirRefreshInterval`: Reservoir refresh interval
|
|
283
206
|
- ...(Additional configuration parameters can be added as needed)
|
|
284
207
|
|
|
208
|
+
### Shorthand Methods
|
|
209
|
+
|
|
210
|
+
ModelMix provides convenient shorthand methods for quickly accessing different AI models. Here's a comprehensive list of available methods:
|
|
211
|
+
|
|
212
|
+
| Method | Provider | Model | Description |
|
|
213
|
+
| ------------------ | ----------- | ------------------------------ | -------------------------------------------- |
|
|
214
|
+
| `gpt41()` | OpenAI | gpt-4.1 | OpenAI's GPT-4.1 model |
|
|
215
|
+
| `gpt41mini()` | OpenAI | gpt-4.1-mini | OpenAI's GPT-4.1 Mini model |
|
|
216
|
+
| `gpt41nano()` | OpenAI | gpt-4.1-nano | OpenAI's GPT-4.1 Nano model |
|
|
217
|
+
| `gpt4o()` | OpenAI | gpt-4o | OpenAI's GPT-4 Optimized model |
|
|
218
|
+
| `o4mini()` | OpenAI | o4-mini | OpenAI's O4 Mini model |
|
|
219
|
+
| `o3()` | OpenAI | o3 | OpenAI's O3 model |
|
|
220
|
+
| `sonnet37()` | Anthropic | claude-3-7-sonnet-20250219 | Anthropic's Claude 3.7 Sonnet model |
|
|
221
|
+
| `sonnet37think()` | Anthropic | claude-3-7-sonnet-20250219 | Claude 3.7 Sonnet with thinking mode enabled |
|
|
222
|
+
| `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | Anthropic's Claude 3.5 Sonnet model |
|
|
223
|
+
| `haiku35()` | Anthropic | claude-3-5-haiku-20241022 | Anthropic's Claude 3.5 Haiku model |
|
|
224
|
+
| `gemini25flash()` | Google | gemini-2.5-flash-preview-04-17 | Google's Gemini 2.5 Flash model |
|
|
225
|
+
| `gemini25proExp()` | Google | gemini-2.5-pro-exp-03-25 | Google's Gemini 2.5 Pro Experimental model |
|
|
226
|
+
| `gemini25pro()` | Google | gemini-2.5-pro-preview-05-06 | Google's Gemini 2.5 Pro model |
|
|
227
|
+
| `sonar()` | Perplexity | sonar-pro | Perplexity's Sonar Pro model |
|
|
228
|
+
| `qwen3()` | Together AI | Qwen/Qwen3-235B-A22B-fp8-tput | Together AI's Qwen 3 model |
|
|
229
|
+
| `grok2()` | Grok | grok-2-latest | Grok's latest version 2 model |
|
|
230
|
+
| `grok3()` | Grok | grok-3-beta | Grok's version 3 beta model |
|
|
231
|
+
| `grok3mini()` | Grok | grok-3-mini-beta | Grok's version 3 mini beta model |
|
|
232
|
+
| `scout()` | Cerebras | llama-4-scout-17b-16e-instruct | Cerebras' Llama 4 Scout model |
|
|
233
|
+
|
|
234
|
+
Each method accepts optional `options` and `config` parameters to customize the model's behavior. For example:
|
|
235
|
+
|
|
236
|
+
```javascript
|
|
237
|
+
const result = await ModelMix.create()
|
|
238
|
+
.sonnet37({
|
|
239
|
+
options: { temperature: 0.7 },
|
|
240
|
+
config: { system: "You are a helpful assistant" }
|
|
241
|
+
})
|
|
242
|
+
.message();
|
|
243
|
+
```
|
|
244
|
+
|
|
285
245
|
**Methods**
|
|
286
246
|
|
|
287
247
|
- `attach(modelInstance)`: Attaches a model instance to the `ModelMix`.
|
|
@@ -404,6 +364,16 @@ new MixTogether(args = { config: {}, options: {} })
|
|
|
404
364
|
- **config**: Specific configuration settings for Together AI, including the `apiKey`.
|
|
405
365
|
- **options**: Default options for Together AI model instances.
|
|
406
366
|
|
|
367
|
+
### MixGoogle Class Overview
|
|
368
|
+
|
|
369
|
+
```javascript
|
|
370
|
+
new MixGoogle(args = { config: {}, options: {} })
|
|
371
|
+
```
|
|
372
|
+
|
|
373
|
+
- **args**: Configuration object with `config` and `options` properties.
|
|
374
|
+
- **config**: Specific configuration settings for Google Gemini, including the `apiKey`.
|
|
375
|
+
- **options**: Default options for Google Gemini model instances.
|
|
376
|
+
|
|
407
377
|
## 🤝 Contributing
|
|
408
378
|
|
|
409
379
|
Contributions are welcome! If you find any issues or have suggestions for improvement, please open an issue or submit a pull request on the [GitHub repository](https://github.com/clasen/ModelMix).
|
package/demo/custom.mjs
CHANGED
package/demo/short.mjs
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import 'dotenv/config'
|
|
2
|
+
|
|
3
|
+
import { ModelMix } from '../index.js';
|
|
4
|
+
|
|
5
|
+
const setup = {
|
|
6
|
+
config: {
|
|
7
|
+
system: "You are ALF, if they ask your name, answer 'ALF'.",
|
|
8
|
+
debug: true
|
|
9
|
+
}
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
const result = await ModelMix.create(setup)
|
|
13
|
+
.sonnet37think()
|
|
14
|
+
.o4mini({ config: { temperature: 0 } })
|
|
15
|
+
.gpt41nano()
|
|
16
|
+
.grok3mini()
|
|
17
|
+
.gemini25flash()
|
|
18
|
+
.addText("What's your name?")
|
|
19
|
+
.message();
|
|
20
|
+
|
|
21
|
+
console.log(result);
|
|
22
|
+
|
|
23
|
+
const jsonResult = await ModelMix.create({ config: { debug: false } })
|
|
24
|
+
.sonnet37()
|
|
25
|
+
.addText("Name and capital of 3 South American countries.")
|
|
26
|
+
.json({ countries: [{ name: "", capital: "" }] });
|
|
27
|
+
|
|
28
|
+
console.log(jsonResult);
|
package/demo/stream.mjs
CHANGED
|
@@ -52,17 +52,17 @@ mmix.attach(new MixOllama({
|
|
|
52
52
|
|
|
53
53
|
|
|
54
54
|
await mmix.create('gpt-4o')
|
|
55
|
-
.
|
|
55
|
+
.addImageFromUrl('https://pbs.twimg.com/media/F6-GsjraAAADDGy?format=jpg')
|
|
56
56
|
.addText('describe')
|
|
57
57
|
.stream((data) => { console.log(data.message); });
|
|
58
58
|
|
|
59
59
|
await mmix.create('claude-3-haiku-20240307')
|
|
60
|
-
.
|
|
60
|
+
.addImageFromUrl('https://pbs.twimg.com/media/F6-GsjraAAADDGy?format=jpg')
|
|
61
61
|
.addText('describe')
|
|
62
62
|
.stream((data) => { console.log(data.message); });
|
|
63
63
|
|
|
64
64
|
await mmix.create('llava:latest')
|
|
65
|
-
.
|
|
65
|
+
.addImageFromUrl('https://pbs.twimg.com/media/F6-GsjraAAADDGy?format=jpg')
|
|
66
66
|
.addText('describe')
|
|
67
67
|
.stream((data) => { console.log(data.message); });
|
|
68
68
|
|
package/index.js
CHANGED
|
@@ -6,14 +6,163 @@ const Bottleneck = require('bottleneck');
|
|
|
6
6
|
const path = require('path');
|
|
7
7
|
const generateJsonSchema = require('./schema');
|
|
8
8
|
|
|
9
|
+
class ModelMixBuilder {
|
|
10
|
+
constructor(args = {}) {
|
|
11
|
+
this.models = []; // Array of { key: string, providerClass: class, options: {}, config: {} }
|
|
12
|
+
this.mix = new ModelMix(args);
|
|
13
|
+
this.handler = null;
|
|
14
|
+
this._messageHandlerMethods = [ // Methods to delegate after handler creation
|
|
15
|
+
'new', 'addText', 'addTextFromFile', 'setSystem', 'setSystemFromFile',
|
|
16
|
+
'addImage', 'addImageFromUrl', 'message', 'json', 'block', 'raw',
|
|
17
|
+
'stream', 'replace', 'replaceKeyFromFile'
|
|
18
|
+
];
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
addModel(key, providerClass, { options = {}, config = {} } = {}) {
|
|
22
|
+
if (this.handler) {
|
|
23
|
+
throw new Error("Cannot add models after message generation has started.");
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// Attach provider if not already attached
|
|
27
|
+
const providerInstance = new providerClass();
|
|
28
|
+
const mainPrefix = providerInstance.config.prefix[0];
|
|
29
|
+
if (!Object.values(this.mix.models).some(p => p.config.prefix.includes(mainPrefix))) {
|
|
30
|
+
this.mix.attach(providerInstance);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if (!key) {
|
|
34
|
+
throw new Error(`Model key is required when adding a model via ${providerClass.name}.`);
|
|
35
|
+
}
|
|
36
|
+
this.models.push({ key, providerClass, options, config });
|
|
37
|
+
return this;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
_getHandler() {
|
|
41
|
+
if (!this.handler) {
|
|
42
|
+
if (!this.mix || this.models.length === 0) {
|
|
43
|
+
throw new Error("No models specified. Use methods like .gpt(), .sonnet() first.");
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Pass all model definitions. The create method will handle it appropriately
|
|
47
|
+
this.handler = this.mix.createByDef(this.models);
|
|
48
|
+
|
|
49
|
+
// Delegate chainable methods to the handler
|
|
50
|
+
this._messageHandlerMethods.forEach(methodName => {
|
|
51
|
+
if (typeof this.handler[methodName] === 'function') {
|
|
52
|
+
this[methodName] = (...args) => {
|
|
53
|
+
const result = this.handler[methodName](...args);
|
|
54
|
+
// Return the handler instance for chainable methods, otherwise the result
|
|
55
|
+
return result === this.handler ? this : result;
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
});
|
|
59
|
+
// Special handling for async methods that return results
|
|
60
|
+
['message', 'json', 'block', 'raw', 'stream'].forEach(asyncMethodName => {
|
|
61
|
+
if (typeof this.handler[asyncMethodName] === 'function') {
|
|
62
|
+
this[asyncMethodName] = async (...args) => {
|
|
63
|
+
return await this.handler[asyncMethodName](...args);
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
return this.handler;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// --- Instance methods for adding models (primary or fallback) ---
|
|
72
|
+
// These will be mirrored by static methods on ModelMix
|
|
73
|
+
gpt41({ model = 'gpt-4.1', options = {}, config = {} } = {}) {
|
|
74
|
+
return this.addModel(model, MixOpenAI, { options, config });
|
|
75
|
+
}
|
|
76
|
+
gpt41mini({ model = 'gpt-4.1-mini', options = {}, config = {} } = {}) {
|
|
77
|
+
return this.addModel(model, MixOpenAI, { options, config });
|
|
78
|
+
}
|
|
79
|
+
gpt41nano({ model = 'gpt-4.1-nano', options = {}, config = {} } = {}) {
|
|
80
|
+
return this.addModel(model, MixOpenAI, { options, config });
|
|
81
|
+
}
|
|
82
|
+
gpt4o({ model = 'gpt-4o', options = {}, config = {} } = {}) {
|
|
83
|
+
return this.addModel(model, MixOpenAI, { options, config });
|
|
84
|
+
}
|
|
85
|
+
o4mini({ model = 'o4-mini', options = {}, config = {} } = {}) {
|
|
86
|
+
return this.addModel(model, MixOpenAI, { options, config });
|
|
87
|
+
}
|
|
88
|
+
o3({ model = 'o3', options = {}, config = {} } = {}) {
|
|
89
|
+
return this.addModel(model, MixOpenAI, { options, config });
|
|
90
|
+
}
|
|
91
|
+
sonnet37({ model = 'claude-3-7-sonnet-20250219', options = {}, config = {} } = {}) {
|
|
92
|
+
return this.addModel(model, MixAnthropic, { options, config });
|
|
93
|
+
}
|
|
94
|
+
sonnet37think({ model = 'claude-3-7-sonnet-20250219', options = {
|
|
95
|
+
thinking: {
|
|
96
|
+
"type": "enabled",
|
|
97
|
+
"budget_tokens": 1024
|
|
98
|
+
},
|
|
99
|
+
temperature: 1
|
|
100
|
+
}, config = {} } = {}) {
|
|
101
|
+
return this.addModel(model, MixAnthropic, { options, config });
|
|
102
|
+
}
|
|
103
|
+
sonnet35({ model = 'claude-3-5-sonnet-20241022', options = {}, config = {} } = {}) {
|
|
104
|
+
return this.addModel(model, MixAnthropic, { options, config });
|
|
105
|
+
}
|
|
106
|
+
haiku35({ model = 'claude-3-5-haiku-20241022', options = {}, config = {} } = {}) {
|
|
107
|
+
return this.addModel(model, MixAnthropic, { options, config });
|
|
108
|
+
}
|
|
109
|
+
gemini25flash({ model = 'gemini-2.5-flash-preview-04-17', options = {}, config = {} } = {}) {
|
|
110
|
+
return this.addModel(model, MixGoogle, { options, config });
|
|
111
|
+
}
|
|
112
|
+
gemini25proExp({ model = 'gemini-2.5-pro-exp-03-25', options = {}, config = {} } = {}) {
|
|
113
|
+
return this.addModel(model, MixGoogle, { options, config });
|
|
114
|
+
}
|
|
115
|
+
gemini25pro({ model = 'gemini-2.5-pro-preview-05-06', options = {}, config = {} } = {}) {
|
|
116
|
+
return this.addModel(model, MixGoogle, { options, config });
|
|
117
|
+
}
|
|
118
|
+
sonar({ model = 'sonar-pro', options = {}, config = {} } = {}) {
|
|
119
|
+
return this.addModel(model, MixPerplexity, { options, config });
|
|
120
|
+
}
|
|
121
|
+
qwen3({ model = 'Qwen/Qwen3-235B-A22B-fp8-tput', options = {}, config = {} } = {}) {
|
|
122
|
+
return this.addModel(model, MixTogether, { options, config });
|
|
123
|
+
}
|
|
124
|
+
grok2({ model = 'grok-2-latest', options = {}, config = {} } = {}) {
|
|
125
|
+
return this.addModel(model, MixGrok, { options, config });
|
|
126
|
+
}
|
|
127
|
+
grok3({ model = 'grok-3-beta', options = {}, config = {} } = {}) {
|
|
128
|
+
return this.addModel(model, MixGrok, { options, config });
|
|
129
|
+
}
|
|
130
|
+
grok3mini({ model = 'grok-3-mini-beta', options = {}, config = {} } = {}) {
|
|
131
|
+
return this.addModel(model, MixGrok, { options, config });
|
|
132
|
+
}
|
|
133
|
+
scout({ model = 'llama-4-scout-17b-16e-instruct', options = {}, config = {} } = {}) {
|
|
134
|
+
return this.addModel(model, MixCerebras, { options, config });
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// --- Methods delegated to MessageHandler after creation ---
|
|
138
|
+
// Define stubs that will call _getHandler first
|
|
139
|
+
|
|
140
|
+
new() { this._getHandler(); return this.new(...arguments); }
|
|
141
|
+
addText() { this._getHandler(); return this.addText(...arguments); }
|
|
142
|
+
addTextFromFile() { this._getHandler(); return this.addTextFromFile(...arguments); }
|
|
143
|
+
setSystem() { this._getHandler(); return this.setSystem(...arguments); }
|
|
144
|
+
setSystemFromFile() { this._getHandler(); return this.setSystemFromFile(...arguments); }
|
|
145
|
+
addImage() { this._getHandler(); return this.addImage(...arguments); }
|
|
146
|
+
addImageFromUrl() { this._getHandler(); return this.addImageFromUrl(...arguments); }
|
|
147
|
+
replace() { this._getHandler(); return this.replace(...arguments); }
|
|
148
|
+
replaceKeyFromFile() { this._getHandler(); return this.replaceKeyFromFile(...arguments); }
|
|
149
|
+
|
|
150
|
+
// Async methods need await
|
|
151
|
+
async message() { this._getHandler(); return await this.message(...arguments); }
|
|
152
|
+
async json() { this._getHandler(); return await this.json(...arguments); }
|
|
153
|
+
async block() { this._getHandler(); return await this.block(...arguments); }
|
|
154
|
+
async raw() { this._getHandler(); return await this.raw(...arguments); }
|
|
155
|
+
async stream() { this._getHandler(); return await this.stream(...arguments); }
|
|
156
|
+
}
|
|
157
|
+
|
|
9
158
|
class ModelMix {
|
|
10
|
-
constructor(
|
|
159
|
+
constructor({ options = {}, config = {} } = {}) {
|
|
11
160
|
this.models = {};
|
|
12
161
|
this.defaultOptions = {
|
|
13
|
-
max_tokens:
|
|
14
|
-
temperature: 1,
|
|
15
|
-
top_p: 1,
|
|
16
|
-
...
|
|
162
|
+
max_tokens: 5000,
|
|
163
|
+
temperature: 1, // 1 --> More creative, 0 --> More deterministic.
|
|
164
|
+
top_p: 1, // 100% --> The model considers all possible tokens.
|
|
165
|
+
...options
|
|
17
166
|
};
|
|
18
167
|
|
|
19
168
|
// Standard Bottleneck configuration
|
|
@@ -28,7 +177,7 @@ class ModelMix {
|
|
|
28
177
|
max_history: 1, // Default max history
|
|
29
178
|
debug: false,
|
|
30
179
|
bottleneck: defaultBottleneckConfig,
|
|
31
|
-
...
|
|
180
|
+
...config
|
|
32
181
|
}
|
|
33
182
|
|
|
34
183
|
this.limiter = new Bottleneck(this.config.bottleneck);
|
|
@@ -47,16 +196,23 @@ class ModelMix {
|
|
|
47
196
|
return this;
|
|
48
197
|
}
|
|
49
198
|
|
|
50
|
-
create(
|
|
51
|
-
|
|
52
|
-
|
|
199
|
+
static create(args = {}) {
|
|
200
|
+
return new ModelMixBuilder(args);
|
|
201
|
+
}
|
|
53
202
|
|
|
54
|
-
|
|
55
|
-
|
|
203
|
+
createByDef(modelDefinitions, { config: explicitOverallConfig = {}, options: explicitOverallOptions = {} } = {}) {
|
|
204
|
+
|
|
205
|
+
// modelDefinitions is expected to be the array from ModelMixBuilder.models
|
|
206
|
+
// e.g., [{ key, providerClass, options, config }, ...]
|
|
207
|
+
const allModelsInfo = modelDefinitions;
|
|
208
|
+
const modelKeys = allModelsInfo.map(m => m.key);
|
|
209
|
+
|
|
210
|
+
if (modelKeys.length === 0) {
|
|
211
|
+
throw new Error('No model keys provided in modelDefinitions.');
|
|
56
212
|
}
|
|
57
213
|
|
|
58
214
|
// Verificar que todos los modelos estén disponibles
|
|
59
|
-
const unavailableModels =
|
|
215
|
+
const unavailableModels = modelKeys.filter(modelKey => {
|
|
60
216
|
return !Object.values(this.models).some(entry =>
|
|
61
217
|
entry.config.prefix.some(p => modelKey.startsWith(p))
|
|
62
218
|
);
|
|
@@ -66,27 +222,74 @@ class ModelMix {
|
|
|
66
222
|
throw new Error(`The following models are not available: ${unavailableModels.join(', ')}`);
|
|
67
223
|
}
|
|
68
224
|
|
|
69
|
-
// Una vez verificado que todos están disponibles, obtener el primer modelo
|
|
70
|
-
const
|
|
71
|
-
const
|
|
72
|
-
|
|
225
|
+
// Una vez verificado que todos están disponibles, obtener el primer modelo (primary)
|
|
226
|
+
const primaryModelInfo = allModelsInfo[0];
|
|
227
|
+
const primaryModelKey = primaryModelInfo.key;
|
|
228
|
+
const primaryModelEntry = Object.values(this.models).find(entry =>
|
|
229
|
+
entry.config.prefix.some(p => primaryModelKey.startsWith(p))
|
|
73
230
|
);
|
|
74
231
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
232
|
+
if (!primaryModelEntry) { // Should be caught by unavailableModels, but good for robustness
|
|
233
|
+
throw new Error(`Primary model provider for key ${primaryModelKey} not found or attached.`);
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// Options/config for the MessageHandler instance (session-level)
|
|
237
|
+
// These are based on the primary model's specification.
|
|
238
|
+
const optionsHandler = {
|
|
239
|
+
...this.defaultOptions, // ModelMix global defaults
|
|
240
|
+
...(primaryModelEntry.options || {}), // Primary provider class defaults
|
|
241
|
+
...(primaryModelInfo.options || {}), // Options from addModel for primary
|
|
242
|
+
...explicitOverallOptions, // Explicit options to .create() if any
|
|
243
|
+
model: primaryModelKey // Ensure primary model key is set
|
|
80
244
|
};
|
|
81
245
|
|
|
82
|
-
const
|
|
83
|
-
...this.config,
|
|
84
|
-
...
|
|
85
|
-
...
|
|
246
|
+
const configHandler = {
|
|
247
|
+
...this.config, // ModelMix global config
|
|
248
|
+
...(primaryModelEntry.config || {}), // Primary provider class config
|
|
249
|
+
...(primaryModelInfo.config || {}), // Config from addModel for primary
|
|
250
|
+
...explicitOverallConfig // Explicit config to .create()
|
|
86
251
|
};
|
|
87
252
|
|
|
88
|
-
// Pass
|
|
89
|
-
return new MessageHandler(this,
|
|
253
|
+
// Pass the entire allModelsInfo array for fallback/iteration
|
|
254
|
+
return new MessageHandler(this, primaryModelEntry, optionsHandler, configHandler, allModelsInfo);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
create(modelKeys = [], { config = {}, options = {} } = {}) {
|
|
258
|
+
|
|
259
|
+
// Backward compatibility for string model keys
|
|
260
|
+
if (!modelKeys || (Array.isArray(modelKeys) && modelKeys.length === 0)) {
|
|
261
|
+
return new ModelMixBuilder({ config: { ...this.config, ...config }, options: { ...this.defaultOptions, ...options } });
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// If modelKeys is a string, convert it to an array for backward compatibility
|
|
265
|
+
const modelArray = Array.isArray(modelKeys) ? modelKeys : [modelKeys];
|
|
266
|
+
|
|
267
|
+
if (modelArray.length === 0) {
|
|
268
|
+
throw new Error('No model keys provided');
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Create model definitions based on string keys
|
|
272
|
+
const modelDefinitions = modelArray.map(key => {
|
|
273
|
+
// Find the provider for this model key
|
|
274
|
+
const providerEntry = Object.values(this.models).find(entry =>
|
|
275
|
+
entry.config.prefix.some(p => key.startsWith(p))
|
|
276
|
+
);
|
|
277
|
+
|
|
278
|
+
if (!providerEntry) {
|
|
279
|
+
throw new Error(`Model provider not found for key: ${key}`);
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// Return a synthesized model definition with just the key and options/config from the create call
|
|
283
|
+
return {
|
|
284
|
+
key,
|
|
285
|
+
providerClass: null, // Not needed for our purpose
|
|
286
|
+
options, // Use the options from create call for all models
|
|
287
|
+
config // Use the config from create call for all models
|
|
288
|
+
};
|
|
289
|
+
});
|
|
290
|
+
|
|
291
|
+
// Pass to the new implementation
|
|
292
|
+
return this.createByDef(modelDefinitions, { config, options });
|
|
90
293
|
}
|
|
91
294
|
|
|
92
295
|
setSystem(text) {
|
|
@@ -100,10 +303,10 @@ class ModelMix {
|
|
|
100
303
|
return this;
|
|
101
304
|
}
|
|
102
305
|
|
|
103
|
-
readFile(filePath,
|
|
306
|
+
readFile(filePath, { encoding = 'utf8' } = {}) {
|
|
104
307
|
try {
|
|
105
308
|
const absolutePath = path.resolve(filePath);
|
|
106
|
-
return fs.readFileSync(absolutePath,
|
|
309
|
+
return fs.readFileSync(absolutePath, { encoding });
|
|
107
310
|
} catch (error) {
|
|
108
311
|
if (error.code === 'ENOENT') {
|
|
109
312
|
throw new Error(`File not found: ${filePath}`);
|
|
@@ -117,13 +320,13 @@ class ModelMix {
|
|
|
117
320
|
}
|
|
118
321
|
|
|
119
322
|
class MessageHandler {
|
|
120
|
-
constructor(mix, modelEntry, options, config,
|
|
323
|
+
constructor(mix, modelEntry, options, config, allModelsInfo = []) {
|
|
121
324
|
this.mix = mix;
|
|
122
|
-
this.modelEntry = modelEntry;
|
|
123
|
-
this.options = options;
|
|
124
|
-
this.config = config;
|
|
325
|
+
this.modelEntry = modelEntry; // Primary model's provider instance
|
|
326
|
+
this.options = options; // Session-level options, based on primary
|
|
327
|
+
this.config = config; // Session-level config, based on primary
|
|
125
328
|
this.messages = [];
|
|
126
|
-
this.
|
|
329
|
+
this.allModelsInfo = allModelsInfo; // Store the full info array [{ key, providerClass, options, config }, ...]
|
|
127
330
|
this.imagesToProcess = [];
|
|
128
331
|
}
|
|
129
332
|
|
|
@@ -142,9 +345,9 @@ class MessageHandler {
|
|
|
142
345
|
return this;
|
|
143
346
|
}
|
|
144
347
|
|
|
145
|
-
addTextFromFile(filePath,
|
|
348
|
+
addTextFromFile(filePath, { role = "user" } = {}) {
|
|
146
349
|
const content = this.mix.readFile(filePath);
|
|
147
|
-
this.addText(content,
|
|
350
|
+
this.addText(content, { role });
|
|
148
351
|
return this;
|
|
149
352
|
}
|
|
150
353
|
|
|
@@ -159,7 +362,7 @@ class MessageHandler {
|
|
|
159
362
|
return this;
|
|
160
363
|
}
|
|
161
364
|
|
|
162
|
-
addImage(filePath,
|
|
365
|
+
addImage(filePath, { role = "user" } = {}) {
|
|
163
366
|
const imageBuffer = this.mix.readFile(filePath, { encoding: null });
|
|
164
367
|
const mimeType = mime.lookup(filePath);
|
|
165
368
|
|
|
@@ -170,7 +373,7 @@ class MessageHandler {
|
|
|
170
373
|
const data = imageBuffer.toString('base64');
|
|
171
374
|
|
|
172
375
|
const imageMessage = {
|
|
173
|
-
...
|
|
376
|
+
...{ role },
|
|
174
377
|
content: [
|
|
175
378
|
{
|
|
176
379
|
type: "image",
|
|
@@ -230,18 +433,25 @@ class MessageHandler {
|
|
|
230
433
|
|
|
231
434
|
async message() {
|
|
232
435
|
this.options.stream = false;
|
|
233
|
-
|
|
234
|
-
|
|
436
|
+
let raw = await this.execute();
|
|
437
|
+
if (!raw.message && raw.response?.content?.[1]?.text) {
|
|
438
|
+
return raw.response.content[1].text;
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
return raw.message;
|
|
235
442
|
}
|
|
236
443
|
|
|
237
|
-
async json(schemaExample = null, schemaDescription = {}, { type = 'json_object', addExample = false } = {}) {
|
|
444
|
+
async json(schemaExample = null, schemaDescription = {}, { type = 'json_object', addExample = false, addSchema = true } = {}) {
|
|
238
445
|
this.options.response_format = { type };
|
|
239
446
|
if (schemaExample) {
|
|
240
|
-
|
|
241
|
-
|
|
447
|
+
|
|
448
|
+
if (addSchema) {
|
|
449
|
+
const schema = generateJsonSchema(schemaExample, schemaDescription);
|
|
450
|
+
this.config.systemExtra = "\nOutput JSON Schema: \n```\n" + JSON.stringify(schema) + "\n```";
|
|
451
|
+
}
|
|
242
452
|
|
|
243
453
|
if (addExample) {
|
|
244
|
-
this.config.systemExtra += "\nOutput Example: \n```\n" + JSON.stringify(schemaExample) + "\n```";
|
|
454
|
+
this.config.systemExtra += "\nOutput JSON Example: \n```\n" + JSON.stringify(schemaExample) + "\n```";
|
|
245
455
|
}
|
|
246
456
|
}
|
|
247
457
|
const response = await this.message();
|
|
@@ -334,70 +544,143 @@ class MessageHandler {
|
|
|
334
544
|
|
|
335
545
|
async execute() {
|
|
336
546
|
return this.mix.limiter.schedule(async () => {
|
|
337
|
-
|
|
338
|
-
await this.prepareMessages();
|
|
547
|
+
await this.prepareMessages(); // Prepare messages once, outside the loop
|
|
339
548
|
|
|
340
|
-
|
|
341
|
-
|
|
549
|
+
if (this.messages.length === 0) {
|
|
550
|
+
throw new Error("No user messages have been added. Use addText(prompt), addTextFromFile(filePath), addImage(filePath), or addImageFromUrl(url) to add a prompt.");
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
let lastError = null;
|
|
554
|
+
const modelIterationList = this.allModelsInfo; // Use the full info for iteration
|
|
555
|
+
|
|
556
|
+
// Iterate through the models defined in the handler's list
|
|
557
|
+
for (let i = 0; i < modelIterationList.length; i++) {
|
|
558
|
+
const currentModelDetail = modelIterationList[i];
|
|
559
|
+
const currentModelKey = currentModelDetail.key;
|
|
560
|
+
const currentModelBuilderOptions = currentModelDetail.options || {};
|
|
561
|
+
const currentModelBuilderConfig = currentModelDetail.config || {};
|
|
562
|
+
|
|
563
|
+
// Find the corresponding model provider instance in the ModelMix instance
|
|
564
|
+
const currentModelProviderInstance = Object.values(this.mix.models).find(entry =>
|
|
565
|
+
entry.config.prefix.some(p => currentModelKey.startsWith(p))
|
|
566
|
+
);
|
|
567
|
+
|
|
568
|
+
if (!currentModelProviderInstance) {
|
|
569
|
+
log.warn(`Model provider not found or attached for key: ${currentModelKey}. Skipping.`);
|
|
570
|
+
if (!lastError) {
|
|
571
|
+
lastError = new Error(`Model provider not found for key: ${currentModelKey}`);
|
|
572
|
+
}
|
|
573
|
+
continue; // Try the next model
|
|
342
574
|
}
|
|
343
575
|
|
|
576
|
+
// Construct effective options and config for THIS attempt
|
|
577
|
+
const attemptOptions = {
|
|
578
|
+
...this.mix.defaultOptions, // 1. ModelMix global defaults
|
|
579
|
+
...(currentModelProviderInstance.options || {}), // 2. Provider class defaults for current model
|
|
580
|
+
...this.options, // 3. MessageHandler current general options (from primary + handler changes)
|
|
581
|
+
...currentModelBuilderOptions, // 4. Specific options from addModel for THIS model
|
|
582
|
+
model: currentModelKey // 5. Crucial: set current model key
|
|
583
|
+
};
|
|
584
|
+
|
|
585
|
+
const attemptConfig = {
|
|
586
|
+
...this.mix.config, // 1. ModelMix global config
|
|
587
|
+
...(currentModelProviderInstance.config || {}), // 2. Provider class config for current model
|
|
588
|
+
...this.config, // 3. MessageHandler current general config
|
|
589
|
+
...currentModelBuilderConfig // 4. Specific config from addModel for THIS model
|
|
590
|
+
};
|
|
591
|
+
|
|
592
|
+
// Determine the effective debug flag for this attempt (for logging and API call context)
|
|
593
|
+
// Precedence: model-specific builder config -> handler config -> mix config
|
|
594
|
+
const effectiveDebugForAttempt = attemptConfig.hasOwnProperty('debug') ? attemptConfig.debug :
|
|
595
|
+
this.config.hasOwnProperty('debug') ? this.config.debug :
|
|
596
|
+
this.mix.config.debug;
|
|
597
|
+
|
|
598
|
+
// Update attemptConfig with the finally resolved debug flag for the API call
|
|
599
|
+
const apiCallConfig = { ...attemptConfig, debug: effectiveDebugForAttempt };
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
if (effectiveDebugForAttempt) {
|
|
603
|
+
const isPrimary = i === 0;
|
|
604
|
+
log.debug(`Attempt #${i + 1}: Using model ${currentModelKey}` + (isPrimary ? ' (Primary)' : ' (Fallback)'));
|
|
605
|
+
log.debug("Effective attemptOptions for " + currentModelKey + ":");
|
|
606
|
+
log.inspect(attemptOptions);
|
|
607
|
+
log.debug("Effective apiCallConfig for " + currentModelKey + ":");
|
|
608
|
+
log.inspect(apiCallConfig);
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
// Apply model-specific adjustments to a copy of options for this attempt
|
|
613
|
+
let finalAttemptOptions = { ...attemptOptions };
|
|
614
|
+
if (currentModelProviderInstance instanceof MixOpenAI && finalAttemptOptions.model?.startsWith('o')) {
|
|
615
|
+
delete finalAttemptOptions.max_tokens;
|
|
616
|
+
delete finalAttemptOptions.temperature;
|
|
617
|
+
}
|
|
618
|
+
if (currentModelProviderInstance instanceof MixAnthropic) {
|
|
619
|
+
if (finalAttemptOptions.thinking) {
|
|
620
|
+
delete finalAttemptOptions.top_p;
|
|
621
|
+
// if (finalAttemptOptions.temperature < 1) {
|
|
622
|
+
// finalAttemptOptions.temperature = 1;
|
|
623
|
+
// }
|
|
624
|
+
}
|
|
625
|
+
delete finalAttemptOptions.response_format; // Anthropic doesn't use this top-level option
|
|
626
|
+
}
|
|
627
|
+
// ... add other potential model-specific option adjustments here ...
|
|
628
|
+
|
|
344
629
|
try {
|
|
345
|
-
|
|
346
|
-
this.
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
if (this.fallbackModels.length > 0) {
|
|
351
|
-
const nextModelKey = this.fallbackModels[0];
|
|
352
|
-
log.warn(`Model ${this.options.model} failed, trying fallback model ${nextModelKey}...`);
|
|
353
|
-
error.details && log.warn(error.details);
|
|
354
|
-
|
|
355
|
-
// Create a completely new handler with the fallback model
|
|
356
|
-
const nextHandler = this.mix.create(
|
|
357
|
-
[nextModelKey, ...this.fallbackModels.slice(1)],
|
|
358
|
-
{
|
|
359
|
-
options: {
|
|
360
|
-
// Keep only generic options, not model-specific ones
|
|
361
|
-
max_tokens: this.options.max_tokens,
|
|
362
|
-
temperature: this.options.temperature,
|
|
363
|
-
top_p: this.options.top_p,
|
|
364
|
-
stream: this.options.stream
|
|
365
|
-
}
|
|
366
|
-
}
|
|
367
|
-
);
|
|
630
|
+
// Attach the stream callback to the *current* model entry for this attempt
|
|
631
|
+
// this.modelEntry is the primary model's provider instance where streamCallback was stored by MessageHandler.stream()
|
|
632
|
+
if (finalAttemptOptions.stream && this.modelEntry && this.modelEntry.streamCallback) {
|
|
633
|
+
currentModelProviderInstance.streamCallback = this.modelEntry.streamCallback;
|
|
634
|
+
}
|
|
368
635
|
|
|
369
|
-
|
|
370
|
-
|
|
636
|
+
// Pass the adjusted options/config for this specific attempt
|
|
637
|
+
const result = await currentModelProviderInstance.create({ options: finalAttemptOptions, config: apiCallConfig });
|
|
371
638
|
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
639
|
+
// Add successful response to history *before* returning
|
|
640
|
+
let messageContentToAdd = result.message;
|
|
641
|
+
if (currentModelProviderInstance instanceof MixAnthropic && result.response?.content?.[0]?.text) {
|
|
642
|
+
messageContentToAdd = result.response.content[0].text;
|
|
643
|
+
} else if (currentModelProviderInstance instanceof MixOllama && result.response?.message?.content) {
|
|
644
|
+
messageContentToAdd = result.response.message.content;
|
|
645
|
+
} // Add more cases if other providers have different structures
|
|
378
646
|
|
|
379
|
-
|
|
647
|
+
this.messages.push({ role: "assistant", content: messageContentToAdd });
|
|
380
648
|
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
649
|
+
if (effectiveDebugForAttempt) {
|
|
650
|
+
log.debug(`Request successful with model: ${currentModelKey}`);
|
|
651
|
+
log.inspect(result.response);
|
|
652
|
+
}
|
|
653
|
+
return result; // Success!
|
|
654
|
+
} catch (error) {
|
|
655
|
+
lastError = error; // Store the most recent error
|
|
656
|
+
log.warn(`Model ${currentModelKey} failed (Attempt #${i + 1}/${modelIterationList.length}).`);
|
|
657
|
+
if (error.message) log.warn(`Error: ${error.message}`);
|
|
658
|
+
if (error.statusCode) log.warn(`Status Code: ${error.statusCode}`);
|
|
659
|
+
if (error.details) log.warn(`Details: ${JSON.stringify(error.details)}`);
|
|
660
|
+
|
|
661
|
+
// Check if this is the last model in the list
|
|
662
|
+
if (i === modelIterationList.length - 1) {
|
|
663
|
+
log.error(`All ${modelIterationList.length} model(s) failed. Throwing last error from ${currentModelKey}.`);
|
|
664
|
+
throw lastError; // Re-throw the last encountered error
|
|
665
|
+
} else {
|
|
666
|
+
const nextModelKey = modelIterationList[i + 1].key;
|
|
667
|
+
log.info(`-> Proceeding to next model: ${nextModelKey}`);
|
|
387
668
|
}
|
|
388
|
-
throw error;
|
|
389
669
|
}
|
|
390
|
-
} catch (error) {
|
|
391
|
-
throw error;
|
|
392
670
|
}
|
|
671
|
+
|
|
672
|
+
// This point should theoretically not be reached if there's at least one model key
|
|
673
|
+
// and the loop either returns a result or throws an error.
|
|
674
|
+
log.error("Fallback logic completed without success or throwing the final error.");
|
|
675
|
+
throw lastError || new Error("Failed to get response from any model, and no specific error was caught.");
|
|
393
676
|
});
|
|
394
677
|
}
|
|
395
678
|
}
|
|
396
679
|
class MixCustom {
|
|
397
|
-
constructor(
|
|
398
|
-
this.config = this.getDefaultConfig(
|
|
399
|
-
this.options = this.getDefaultOptions(
|
|
400
|
-
this.headers = this.getDefaultHeaders(
|
|
680
|
+
constructor({ config = {}, options = {}, headers = {} } = {}) {
|
|
681
|
+
this.config = this.getDefaultConfig(config);
|
|
682
|
+
this.options = this.getDefaultOptions(options);
|
|
683
|
+
this.headers = this.getDefaultHeaders(headers);
|
|
401
684
|
this.streamCallback = null; // Definimos streamCallback aquí
|
|
402
685
|
}
|
|
403
686
|
|
|
@@ -425,31 +708,31 @@ class MixCustom {
|
|
|
425
708
|
};
|
|
426
709
|
}
|
|
427
710
|
|
|
428
|
-
async create(
|
|
711
|
+
async create({ config = {}, options = {} } = {}) {
|
|
429
712
|
try {
|
|
430
|
-
if (
|
|
713
|
+
if (config.debug) {
|
|
431
714
|
log.debug("config");
|
|
432
|
-
log.info(
|
|
715
|
+
log.info(config);
|
|
433
716
|
log.debug("options");
|
|
434
|
-
log.inspect(
|
|
717
|
+
log.inspect(options);
|
|
435
718
|
}
|
|
436
719
|
|
|
437
|
-
if (
|
|
438
|
-
return this.processStream(await axios.post(this.config.url,
|
|
720
|
+
if (options.stream) {
|
|
721
|
+
return this.processStream(await axios.post(this.config.url, options, {
|
|
439
722
|
headers: this.headers,
|
|
440
723
|
responseType: 'stream'
|
|
441
724
|
}));
|
|
442
725
|
} else {
|
|
443
|
-
return this.processResponse(await axios.post(this.config.url,
|
|
726
|
+
return this.processResponse(await axios.post(this.config.url, options, {
|
|
444
727
|
headers: this.headers
|
|
445
728
|
}));
|
|
446
729
|
}
|
|
447
730
|
} catch (error) {
|
|
448
|
-
throw this.handleError(error,
|
|
731
|
+
throw this.handleError(error, { config, options });
|
|
449
732
|
}
|
|
450
733
|
}
|
|
451
734
|
|
|
452
|
-
handleError(error,
|
|
735
|
+
handleError(error, { config, options }) {
|
|
453
736
|
let errorMessage = 'An error occurred in MixCustom';
|
|
454
737
|
let statusCode = null;
|
|
455
738
|
let errorDetails = null;
|
|
@@ -465,8 +748,8 @@ class MixCustom {
|
|
|
465
748
|
statusCode,
|
|
466
749
|
details: errorDetails,
|
|
467
750
|
stack: error.stack,
|
|
468
|
-
config:
|
|
469
|
-
options:
|
|
751
|
+
config: config,
|
|
752
|
+
options: options
|
|
470
753
|
};
|
|
471
754
|
|
|
472
755
|
return formattedError;
|
|
@@ -529,21 +812,21 @@ class MixOpenAI extends MixCustom {
|
|
|
529
812
|
});
|
|
530
813
|
}
|
|
531
814
|
|
|
532
|
-
create(
|
|
815
|
+
async create({ config = {}, options = {} } = {}) {
|
|
533
816
|
if (!this.config.apiKey) {
|
|
534
817
|
throw new Error('OpenAI API key not found. Please provide it in config or set OPENAI_API_KEY environment variable.');
|
|
535
818
|
}
|
|
536
819
|
|
|
537
820
|
// Remove max_tokens and temperature for o1/o3 models
|
|
538
|
-
if (
|
|
539
|
-
delete
|
|
540
|
-
delete
|
|
821
|
+
if (options.model?.startsWith('o')) {
|
|
822
|
+
delete options.max_tokens;
|
|
823
|
+
delete options.temperature;
|
|
541
824
|
}
|
|
542
825
|
|
|
543
|
-
const content =
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
return super.create(
|
|
826
|
+
const content = config.system + config.systemExtra;
|
|
827
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
828
|
+
options.messages = MixOpenAI.convertMessages(options.messages);
|
|
829
|
+
return super.create({ config, options });
|
|
547
830
|
}
|
|
548
831
|
|
|
549
832
|
static convertMessages(messages) {
|
|
@@ -577,15 +860,20 @@ class MixAnthropic extends MixCustom {
|
|
|
577
860
|
});
|
|
578
861
|
}
|
|
579
862
|
|
|
580
|
-
create(
|
|
863
|
+
async create({ config = {}, options = {} } = {}) {
|
|
581
864
|
if (!this.config.apiKey) {
|
|
582
865
|
throw new Error('Anthropic API key not found. Please provide it in config or set ANTHROPIC_API_KEY environment variable.');
|
|
583
866
|
}
|
|
584
867
|
|
|
585
|
-
|
|
868
|
+
// Remove top_p for thinking
|
|
869
|
+
if (options.thinking) {
|
|
870
|
+
delete options.top_p;
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
delete options.response_format;
|
|
586
874
|
|
|
587
|
-
|
|
588
|
-
return super.create(
|
|
875
|
+
options.system = config.system + config.systemExtra;
|
|
876
|
+
return super.create({ config, options });
|
|
589
877
|
}
|
|
590
878
|
|
|
591
879
|
getDefaultHeaders(customHeaders) {
|
|
@@ -610,20 +898,20 @@ class MixPerplexity extends MixCustom {
|
|
|
610
898
|
getDefaultConfig(customConfig) {
|
|
611
899
|
return super.getDefaultConfig({
|
|
612
900
|
url: 'https://api.perplexity.ai/chat/completions',
|
|
613
|
-
prefix: ['
|
|
901
|
+
prefix: ['sonar'],
|
|
614
902
|
apiKey: process.env.PPLX_API_KEY,
|
|
615
903
|
...customConfig
|
|
616
904
|
});
|
|
617
905
|
}
|
|
618
906
|
|
|
619
|
-
create(
|
|
907
|
+
async create({ config = {}, options = {} } = {}) {
|
|
620
908
|
if (!this.config.apiKey) {
|
|
621
909
|
throw new Error('Perplexity API key not found. Please provide it in config or set PPLX_API_KEY environment variable.');
|
|
622
910
|
}
|
|
623
911
|
|
|
624
|
-
const content =
|
|
625
|
-
|
|
626
|
-
return super.create(
|
|
912
|
+
const content = config.system + config.systemExtra;
|
|
913
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
914
|
+
return super.create({ config, options });
|
|
627
915
|
}
|
|
628
916
|
}
|
|
629
917
|
|
|
@@ -647,12 +935,12 @@ class MixOllama extends MixCustom {
|
|
|
647
935
|
return '';
|
|
648
936
|
}
|
|
649
937
|
|
|
650
|
-
create(
|
|
938
|
+
async create({ config = {}, options = {} } = {}) {
|
|
651
939
|
|
|
652
|
-
|
|
653
|
-
const content =
|
|
654
|
-
|
|
655
|
-
return super.create(
|
|
940
|
+
options.messages = MixOllama.convertMessages(options.messages);
|
|
941
|
+
const content = config.system + config.systemExtra;
|
|
942
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
943
|
+
return super.create({ config, options });
|
|
656
944
|
}
|
|
657
945
|
|
|
658
946
|
processResponse(response) {
|
|
@@ -700,11 +988,11 @@ class MixLMStudio extends MixCustom {
|
|
|
700
988
|
});
|
|
701
989
|
}
|
|
702
990
|
|
|
703
|
-
create(
|
|
704
|
-
const content =
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
return super.create(
|
|
991
|
+
async create({ config = {}, options = {} } = {}) {
|
|
992
|
+
const content = config.system + config.systemExtra;
|
|
993
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
994
|
+
options.messages = MixOpenAI.convertMessages(options.messages);
|
|
995
|
+
return super.create({ config, options });
|
|
708
996
|
}
|
|
709
997
|
}
|
|
710
998
|
|
|
@@ -718,15 +1006,15 @@ class MixGroq extends MixCustom {
|
|
|
718
1006
|
});
|
|
719
1007
|
}
|
|
720
1008
|
|
|
721
|
-
create(
|
|
1009
|
+
async create({ config = {}, options = {} } = {}) {
|
|
722
1010
|
if (!this.config.apiKey) {
|
|
723
1011
|
throw new Error('Groq API key not found. Please provide it in config or set GROQ_API_KEY environment variable.');
|
|
724
1012
|
}
|
|
725
1013
|
|
|
726
|
-
const content =
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
return super.create(
|
|
1014
|
+
const content = config.system + config.systemExtra;
|
|
1015
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
1016
|
+
options.messages = MixOpenAI.convertMessages(options.messages);
|
|
1017
|
+
return super.create({ config, options });
|
|
730
1018
|
}
|
|
731
1019
|
}
|
|
732
1020
|
|
|
@@ -734,7 +1022,7 @@ class MixTogether extends MixCustom {
|
|
|
734
1022
|
getDefaultConfig(customConfig) {
|
|
735
1023
|
return super.getDefaultConfig({
|
|
736
1024
|
url: 'https://api.together.xyz/v1/chat/completions',
|
|
737
|
-
prefix: ["meta-llama", "google", "NousResearch", "deepseek-ai"],
|
|
1025
|
+
prefix: ["meta-llama", "google", "NousResearch", "deepseek-ai", "Qwen"],
|
|
738
1026
|
apiKey: process.env.TOGETHER_API_KEY,
|
|
739
1027
|
...customConfig
|
|
740
1028
|
});
|
|
@@ -756,16 +1044,16 @@ class MixTogether extends MixCustom {
|
|
|
756
1044
|
});
|
|
757
1045
|
}
|
|
758
1046
|
|
|
759
|
-
create(
|
|
1047
|
+
async create({ config = {}, options = {} } = {}) {
|
|
760
1048
|
if (!this.config.apiKey) {
|
|
761
1049
|
throw new Error('Together API key not found. Please provide it in config or set TOGETHER_API_KEY environment variable.');
|
|
762
1050
|
}
|
|
763
1051
|
|
|
764
|
-
const content =
|
|
765
|
-
|
|
766
|
-
|
|
1052
|
+
const content = config.system + config.systemExtra;
|
|
1053
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
1054
|
+
options.messages = MixTogether.convertMessages(options.messages);
|
|
767
1055
|
|
|
768
|
-
return super.create(
|
|
1056
|
+
return super.create({ config, options });
|
|
769
1057
|
}
|
|
770
1058
|
}
|
|
771
1059
|
|
|
@@ -779,12 +1067,126 @@ class MixCerebras extends MixCustom {
|
|
|
779
1067
|
});
|
|
780
1068
|
}
|
|
781
1069
|
|
|
782
|
-
create(
|
|
783
|
-
const content =
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
return super.create(
|
|
1070
|
+
async create({ config = {}, options = {} } = {}) {
|
|
1071
|
+
const content = config.system + config.systemExtra;
|
|
1072
|
+
options.messages = [{ role: 'system', content }, ...options.messages || []];
|
|
1073
|
+
options.messages = MixTogether.convertMessages(options.messages);
|
|
1074
|
+
return super.create({ config, options });
|
|
1075
|
+
}
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
class MixGoogle extends MixCustom {
|
|
1079
|
+
getDefaultConfig(customConfig) {
|
|
1080
|
+
return super.getDefaultConfig({
|
|
1081
|
+
url: 'https://generativelanguage.googleapis.com/v1beta/models',
|
|
1082
|
+
prefix: ['gemini'],
|
|
1083
|
+
apiKey: process.env.GOOGLE_API_KEY,
|
|
1084
|
+
...customConfig
|
|
1085
|
+
});
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
getDefaultHeaders(customHeaders) {
|
|
1089
|
+
// Remove the authorization header as we'll use the API key as a query parameter
|
|
1090
|
+
return {
|
|
1091
|
+
'Content-Type': 'application/json',
|
|
1092
|
+
...customHeaders
|
|
1093
|
+
};
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
getDefaultOptions(customOptions) {
|
|
1097
|
+
return {
|
|
1098
|
+
generationConfig: {
|
|
1099
|
+
responseMimeType: "text/plain"
|
|
1100
|
+
},
|
|
1101
|
+
...customOptions
|
|
1102
|
+
};
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
static convertMessages(messages) {
|
|
1106
|
+
return messages.map(message => {
|
|
1107
|
+
const parts = [];
|
|
1108
|
+
|
|
1109
|
+
if (message.content instanceof Array) {
|
|
1110
|
+
message.content.forEach(content => {
|
|
1111
|
+
if (content.type === 'text') {
|
|
1112
|
+
parts.push({ text: content.text });
|
|
1113
|
+
} else if (content.type === 'image') {
|
|
1114
|
+
parts.push({
|
|
1115
|
+
inline_data: {
|
|
1116
|
+
mime_type: content.source.media_type,
|
|
1117
|
+
data: content.source.data
|
|
1118
|
+
}
|
|
1119
|
+
});
|
|
1120
|
+
}
|
|
1121
|
+
});
|
|
1122
|
+
} else {
|
|
1123
|
+
parts.push({ text: message.content });
|
|
1124
|
+
}
|
|
1125
|
+
|
|
1126
|
+
return {
|
|
1127
|
+
role: message.role === 'assistant' ? 'model' : 'user',
|
|
1128
|
+
parts
|
|
1129
|
+
};
|
|
1130
|
+
});
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
async create({ config = {}, options = {} } = {}) {
|
|
1134
|
+
if (!this.config.apiKey) {
|
|
1135
|
+
throw new Error('Google API key not found. Please provide it in config or set GOOGLE_API_KEY environment variable.');
|
|
1136
|
+
}
|
|
1137
|
+
|
|
1138
|
+
const modelId = options.model || 'gemini-2.5-flash-preview-04-17';
|
|
1139
|
+
const generateContentApi = options.stream ? 'streamGenerateContent' : 'generateContent';
|
|
1140
|
+
|
|
1141
|
+
// Construct the full URL with model ID, API endpoint, and API key
|
|
1142
|
+
const fullUrl = `${this.config.url}/${modelId}:${generateContentApi}?key=${this.config.apiKey}`;
|
|
1143
|
+
|
|
1144
|
+
// Convert messages to Gemini format
|
|
1145
|
+
const contents = MixGoogle.convertMessages(options.messages);
|
|
1146
|
+
|
|
1147
|
+
// Add system message if present
|
|
1148
|
+
if (config.system || config.systemExtra) {
|
|
1149
|
+
contents.unshift({
|
|
1150
|
+
role: 'user',
|
|
1151
|
+
parts: [{ text: (config.system || '') + (config.systemExtra || '') }]
|
|
1152
|
+
});
|
|
1153
|
+
}
|
|
1154
|
+
|
|
1155
|
+
// Prepare the request payload
|
|
1156
|
+
const payload = {
|
|
1157
|
+
contents,
|
|
1158
|
+
generationConfig: options.generationConfig || this.getDefaultOptions().generationConfig
|
|
1159
|
+
};
|
|
1160
|
+
|
|
1161
|
+
try {
|
|
1162
|
+
if (options.stream) {
|
|
1163
|
+
throw new Error('Stream is not supported for Gemini');
|
|
1164
|
+
} else {
|
|
1165
|
+
return this.processResponse(await axios.post(fullUrl, payload, {
|
|
1166
|
+
headers: this.headers
|
|
1167
|
+
}));
|
|
1168
|
+
}
|
|
1169
|
+
} catch (error) {
|
|
1170
|
+
throw this.handleError(error, { config, options });
|
|
1171
|
+
}
|
|
1172
|
+
}
|
|
1173
|
+
|
|
1174
|
+
extractDelta(data) {
|
|
1175
|
+
try {
|
|
1176
|
+
const parsed = JSON.parse(data);
|
|
1177
|
+
if (parsed.candidates?.[0]?.content?.parts?.[0]?.text) {
|
|
1178
|
+
return parsed.candidates[0].content.parts[0].text;
|
|
1179
|
+
}
|
|
1180
|
+
} catch (e) {
|
|
1181
|
+
// If parsing fails, return empty string
|
|
1182
|
+
}
|
|
1183
|
+
return '';
|
|
1184
|
+
}
|
|
1185
|
+
|
|
1186
|
+
processResponse(response) {
|
|
1187
|
+
const content = response.data.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
1188
|
+
return { response: response.data, message: content };
|
|
787
1189
|
}
|
|
788
1190
|
}
|
|
789
1191
|
|
|
790
|
-
module.exports = { MixCustom, ModelMix, MixAnthropic, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok, MixCerebras };
|
|
1192
|
+
module.exports = { MixCustom, ModelMix, MixAnthropic, MixOpenAI, MixPerplexity, MixOllama, MixLMStudio, MixGroq, MixTogether, MixGrok, MixCerebras, MixGoogle };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "modelmix",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.2.2",
|
|
4
4
|
"description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"repository": {
|
|
@@ -16,7 +16,6 @@
|
|
|
16
16
|
"agent",
|
|
17
17
|
"perplexity",
|
|
18
18
|
"grok",
|
|
19
|
-
"sonnet-3",
|
|
20
19
|
"gpt",
|
|
21
20
|
"claude",
|
|
22
21
|
"llama",
|
|
@@ -24,6 +23,7 @@
|
|
|
24
23
|
"chat",
|
|
25
24
|
"multimodal",
|
|
26
25
|
"groq",
|
|
26
|
+
"gemini",
|
|
27
27
|
"ollama",
|
|
28
28
|
"lmstudio",
|
|
29
29
|
"together",
|
|
@@ -31,6 +31,7 @@
|
|
|
31
31
|
"deepseek",
|
|
32
32
|
"o4",
|
|
33
33
|
"4.1",
|
|
34
|
+
"qwen",
|
|
34
35
|
"nousresearch",
|
|
35
36
|
"reasoning",
|
|
36
37
|
"bottleneck",
|