modelmix 3.8.0 → 3.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -123,7 +123,8 @@ Here's a comprehensive list of available methods:
123
123
  | `gpt4o()` | OpenAI | gpt-4o | [\$5.00 / \$20.00][1] |
124
124
  | `o4mini()` | OpenAI | o4-mini | [\$1.10 / \$4.40][1] |
125
125
  | `o3()` | OpenAI | o3 | [\$10.00 / \$40.00][1] |
126
- | `opus4[think]()` | Anthropic | claude-opus-4-20250514 | [\$15.00 / \$75.00][2] |
126
+ | `gptOss()` | Together | gpt-oss-120B | [\$0.15 / \$0.60][7] |
127
+ | `opus41[think]()` | Anthropic | claude-opus-4-1-20250805 | [\$15.00 / \$75.00][2] |
127
128
  | `sonnet4[think]()` | Anthropic | claude-sonnet-4-20250514 | [\$3.00 / \$15.00][2] |
128
129
  | `sonnet37[think]()`| Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
129
130
  | `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | [\$3.00 / \$15.00][2] |
package/demo/json.mjs CHANGED
@@ -1,7 +1,9 @@
1
1
  import 'dotenv/config'
2
2
  import { ModelMix } from '../index.js';
3
3
 
4
- const model = await ModelMix.new({ config: { debug: true } })
4
+ const model = await ModelMix.new({ options: { max_tokens: 10000 }, config: { debug: true } })
5
+ .kimiK2()
6
+ .gptOss()
5
7
  .scout({ config: { temperature: 0 } })
6
8
  .o4mini()
7
9
  .sonnet37think()
@@ -9,5 +11,5 @@ const model = await ModelMix.new({ config: { debug: true } })
9
11
  .gemini25flash()
10
12
  .addText("Name and capital of 3 South American countries.")
11
13
 
12
- const jsonResult = await model.json({ countries: [{ name: "", capital: "" }] });
14
+ const jsonResult = await model.json({ countries: [{ name: "", capital: "" }] }, {}, { addNote: true });
13
15
  console.log(jsonResult);
package/index.js CHANGED
@@ -90,8 +90,11 @@ class ModelMix {
90
90
  gpt45({ options = {}, config = {} } = {}) {
91
91
  return this.attach('gpt-4.5-preview', new MixOpenAI({ options, config }));
92
92
  }
93
- gptOss({ options = {}, config = {}, mix = { together: true } } = {}) {
93
+ gptOss({ options = {}, config = {}, mix = { together: false, cerebras: false, groq: true, lmstudio: false } } = {}) {
94
94
  if (mix.together) return this.attach('openai/gpt-oss-120b', new MixTogether({ options, config }));
95
+ if (mix.cerebras) return this.attach('gpt-oss-120b', new MixCerebras({ options, config }));
96
+ if (mix.groq) return this.attach('openai/gpt-oss-120b', new MixGroq({ options, config }));
97
+ if (mix.lmstudio) return this.attach('openai/gpt-oss-120b', new MixLMStudio({ options, config }));
95
98
  return this;
96
99
  }
97
100
  opus4think({ options = {}, config = {} } = {}) {
@@ -185,8 +188,9 @@ class ModelMix {
185
188
  return this;
186
189
  }
187
190
 
188
- kimiK2({ options = {}, config = {}} = {}) {
189
- this.attach('moonshotai/Kimi-K2-Instruct', new MixTogether({ options, config }));
191
+ kimiK2({ options = {}, config = {}, mix = { together: false, groq: true } } = {}) {
192
+ if (mix.together) this.attach('moonshotai/Kimi-K2-Instruct', new MixTogether({ options, config }));
193
+ if (mix.groq) this.attach('moonshotai/kimi-k2-instruct', new MixGroq({ options, config }));
190
194
  return this;
191
195
  }
192
196
 
@@ -264,30 +268,30 @@ class ModelMix {
264
268
  for (let i = 0; i < this.messages.length; i++) {
265
269
  const message = this.messages[i];
266
270
  if (!message.content) continue;
267
-
271
+
268
272
  for (let j = 0; j < message.content.length; j++) {
269
273
  const content = message.content[j];
270
274
  if (content.type !== 'image' || content.source.type === 'base64') continue;
271
-
275
+
272
276
  try {
273
277
  let buffer, mimeType;
274
-
278
+
275
279
  switch (content.source.type) {
276
280
  case 'url':
277
281
  const response = await axios.get(content.source.data, { responseType: 'arraybuffer' });
278
282
  buffer = Buffer.from(response.data);
279
283
  mimeType = response.headers['content-type'];
280
284
  break;
281
-
285
+
282
286
  case 'file':
283
287
  buffer = this.readFile(content.source.data, { encoding: null });
284
288
  break;
285
-
289
+
286
290
  case 'buffer':
287
291
  buffer = content.source.data;
288
292
  break;
289
293
  }
290
-
294
+
291
295
  // Detect mimeType if not provided
292
296
  if (!mimeType) {
293
297
  const fileType = await fromBuffer(buffer);
@@ -296,7 +300,7 @@ class ModelMix {
296
300
  }
297
301
  mimeType = fileType.mime;
298
302
  }
299
-
303
+
300
304
  // Update the content with processed image
301
305
  message.content[j] = {
302
306
  type: "image",
@@ -306,7 +310,7 @@ class ModelMix {
306
310
  data: buffer.toString('base64')
307
311
  }
308
312
  };
309
-
313
+
310
314
  } catch (error) {
311
315
  console.error(`Error processing image:`, error);
312
316
  // Remove failed image from content
@@ -750,6 +754,8 @@ class MixCustom {
750
754
 
751
755
  if (data.choices[0].message?.reasoning_content) {
752
756
  return data.choices[0].message.reasoning_content;
757
+ } else if (data.choices[0].message?.reasoning) {
758
+ return data.choices[0].message.reasoning;
753
759
  }
754
760
 
755
761
  const message = data.choices[0].message?.content?.trim() || '';
@@ -1139,6 +1145,61 @@ class MixLMStudio extends MixCustom {
1139
1145
  ...customConfig
1140
1146
  });
1141
1147
  }
1148
+
1149
+ create({ config = {}, options = {} } = {}) {
1150
+ if (config.schema) {
1151
+ options.response_format = {
1152
+ type: 'json_schema',
1153
+ json_schema: { schema: config.schema }
1154
+ };
1155
+ }
1156
+ return super.create({ config, options });
1157
+ }
1158
+
1159
+ static extractThink(data) {
1160
+ const message = data.choices[0].message?.content?.trim() || '';
1161
+
1162
+ // Check for LMStudio special tags
1163
+ const startTag = '<|channel|>analysis<|message|>';
1164
+ const endTag = '<|end|><|start|>assistant<|channel|>final<|message|>';
1165
+
1166
+ const startIndex = message.indexOf(startTag);
1167
+ const endIndex = message.indexOf(endTag);
1168
+
1169
+ if (startIndex !== -1 && endIndex !== -1) {
1170
+ // Extract content between the special tags
1171
+ const thinkContent = message.substring(startIndex + startTag.length, endIndex).trim();
1172
+ return thinkContent;
1173
+ }
1174
+
1175
+ // Fall back to default extraction method
1176
+ return MixCustom.extractThink(data);
1177
+ }
1178
+
1179
+ static extractMessage(data) {
1180
+ const message = data.choices[0].message?.content?.trim() || '';
1181
+
1182
+ // Check for LMStudio special tags and extract final message
1183
+ const endTag = '<|end|><|start|>assistant<|channel|>final<|message|>';
1184
+ const endIndex = message.indexOf(endTag);
1185
+
1186
+ if (endIndex !== -1) {
1187
+ // Return only the content after the final message tag
1188
+ return message.substring(endIndex + endTag.length).trim();
1189
+ }
1190
+
1191
+ // Fall back to default extraction method
1192
+ return MixCustom.extractMessage(data);
1193
+ }
1194
+
1195
+ processResponse(response) {
1196
+ return {
1197
+ message: MixLMStudio.extractMessage(response.data),
1198
+ think: MixLMStudio.extractThink(response.data),
1199
+ toolCalls: MixCustom.extractToolCalls(response.data),
1200
+ response: response.data
1201
+ };
1202
+ }
1142
1203
  }
1143
1204
 
1144
1205
  class MixGroq extends MixCustom {
@@ -1191,6 +1252,11 @@ class MixCerebras extends MixCustom {
1191
1252
  ...customConfig
1192
1253
  });
1193
1254
  }
1255
+
1256
+ create({ config = {}, options = {} } = {}) {
1257
+ delete options.response_format;
1258
+ return super.create({ config, options });
1259
+ }
1194
1260
  }
1195
1261
 
1196
1262
  class MixGoogle extends MixCustom {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "3.8.0",
3
+ "version": "3.8.2",
4
4
  "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
5
5
  "main": "index.js",
6
6
  "repository": {