modelmix 2.9.8 → 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/demo/custom.mjs CHANGED
@@ -15,6 +15,6 @@ const mmix = new ModelMix({
15
15
 
16
16
  mmix.attach(new MixCerebras());
17
17
 
18
- let r = mmix.create('llama-4-scout-17b-16e-instruct').addText('what is the capital of the moon?');
18
+ let r = mmix.create('llama-4-scout-17b-16e-instruct').addText('hi there');
19
19
  r = await r.addText('do you like cats?').message();
20
20
  console.log(r);
@@ -0,0 +1,38 @@
1
+ import { ModelMix, MixOpenAI, MixAnthropic, MixGrok } from '../index.js';
2
+ import dotenv from 'dotenv';
3
+ dotenv.config();
4
+
5
+ const mmix = new ModelMix({
6
+ config: {
7
+ max_history: 1,
8
+ debug: false,
9
+ bottleneck: {
10
+ minTime: 15000,
11
+ maxConcurrent: 1
12
+ }
13
+ },
14
+ options: {
15
+ max_tokens: 8192,
16
+ }
17
+ });
18
+ const an = new MixAnthropic();
19
+ an.config.url = 'fail';
20
+ mmix.attach(new MixOpenAI(), an, new MixGrok());
21
+
22
+
23
+ const modelOptionsRef = ['claude-3-5-sonnet-20241022', 'gpt-4.1-nano'];
24
+
25
+ async function main() {
26
+ const response = await generateThread(modelOptionsRef);
27
+ console.log(response);
28
+ }
29
+
30
+ async function generateThread(modelOptionsRef) {
31
+ const model = mmix.create(modelOptionsRef, { options: { temperature: 0.5 } });
32
+ model.addText('hola, como estas?');
33
+ const response = await model.message();
34
+
35
+ return response.split('---').map(section => section.trim());
36
+ }
37
+
38
+ main();
package/index.js CHANGED
@@ -293,20 +293,23 @@ class MessageHandler {
293
293
  });
294
294
  }
295
295
 
296
+ async prepareMessages() {
297
+ await this.processImageUrls();
298
+ this.applyTemplate();
299
+ this.messages = this.messages.slice(-this.config.max_history);
300
+ this.messages = this.groupByRoles(this.messages);
301
+ this.options.messages = this.messages;
302
+ }
303
+
296
304
  async execute() {
297
305
  return this.mix.limiter.schedule(async () => {
298
306
  try {
299
- await this.processImageUrls();
300
- this.applyTemplate();
301
- this.messages = this.messages.slice(-this.config.max_history);
302
- this.messages = this.groupByRoles(this.messages);
307
+ await this.prepareMessages();
303
308
 
304
309
  if (this.messages.length === 0) {
305
310
  throw new Error("No user messages have been added. Use addText(prompt), addTextFromFile(filePath), addImage(filePath), or addImageFromUrl(url) to add a prompt.");
306
311
  }
307
312
 
308
- this.options.messages = this.messages;
309
-
310
313
  try {
311
314
  const result = await this.modelEntry.create({ options: this.options, config: this.config });
312
315
  this.messages.push({ role: "assistant", content: result.message });
@@ -316,7 +319,7 @@ class MessageHandler {
316
319
  if (this.fallbackModels.length > 0) {
317
320
  const nextModelKey = this.fallbackModels[0];
318
321
  log.warn(`Model ${this.options.model} failed, trying fallback model ${nextModelKey}...`);
319
- log.warn(error.details);
322
+ error.details && log.warn(error.details);
320
323
 
321
324
  // Create a completely new handler with the fallback model
322
325
  const nextHandler = this.mix.create(
@@ -332,17 +335,23 @@ class MessageHandler {
332
335
  }
333
336
  );
334
337
 
335
- // Asignar directamente todos los mensajes
338
+ // Assign all messages directly
336
339
  nextHandler.messages = [...this.messages];
337
340
 
338
- // Mantener el mismo sistema y reemplazos
341
+ // Keep same system and replacements
339
342
  nextHandler.setSystem(this.config.system);
340
343
  if (this.config.replace) {
341
344
  nextHandler.replace(this.config.replace);
342
345
  }
343
346
 
344
- // Try with next model
345
- return nextHandler.execute();
347
+ await nextHandler.prepareMessages();
348
+
349
+ const result = await nextHandler.modelEntry.create({
350
+ options: nextHandler.options,
351
+ config: nextHandler.config
352
+ });
353
+ nextHandler.messages.push({ role: "assistant", content: result.message });
354
+ return result;
346
355
  }
347
356
  throw error;
348
357
  }
@@ -482,7 +491,7 @@ class MixOpenAI extends MixCustom {
482
491
  getDefaultConfig(customConfig) {
483
492
  return super.getDefaultConfig({
484
493
  url: 'https://api.openai.com/v1/chat/completions',
485
- prefix: ['gpt', 'ft:', 'o3', 'o1'],
494
+ prefix: ['gpt', 'ft:', 'o'],
486
495
  apiKey: process.env.OPENAI_API_KEY,
487
496
  ...customConfig
488
497
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "2.9.8",
3
+ "version": "3.0.2",
4
4
  "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -27,17 +27,17 @@
27
27
  "ollama",
28
28
  "lmstudio",
29
29
  "together",
30
- "o1",
30
+ "nano",
31
31
  "deepseek",
32
- "fallback",
33
- "o3",
34
- "o3-mini",
32
+ "o4",
33
+ "4.1",
35
34
  "nousresearch",
36
35
  "reasoning",
37
36
  "bottleneck",
38
37
  "claude-3-7-sonnet",
39
38
  "cerebras",
40
39
  "scout",
40
+ "fallback",
41
41
  "clasen"
42
42
  ],
43
43
  "author": "Martin Clasen",