modelmix 3.7.8 → 3.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -123,7 +123,8 @@ Here's a comprehensive list of available methods:
123
123
  | `gpt4o()` | OpenAI | gpt-4o | [\$5.00 / \$20.00][1] |
124
124
  | `o4mini()` | OpenAI | o4-mini | [\$1.10 / \$4.40][1] |
125
125
  | `o3()` | OpenAI | o3 | [\$10.00 / \$40.00][1] |
126
- | `opus4[think]()` | Anthropic | claude-opus-4-20250514 | [\$15.00 / \$75.00][2] |
126
+ | `gptOss()` | Together | gpt-oss-120B | [\$0.15 / \$0.60][7] |
127
+ | `opus41[think]()` | Anthropic | claude-opus-4-1-20250805 | [\$15.00 / \$75.00][2] |
127
128
  | `sonnet4[think]()` | Anthropic | claude-sonnet-4-20250514 | [\$3.00 / \$15.00][2] |
128
129
  | `sonnet37[think]()`| Anthropic | claude-3-7-sonnet-20250219 | [\$3.00 / \$15.00][2] |
129
130
  | `sonnet35()` | Anthropic | claude-3-5-sonnet-20241022 | [\$3.00 / \$15.00][2] |
package/demo/json.mjs CHANGED
@@ -1,7 +1,9 @@
1
1
  import 'dotenv/config'
2
2
  import { ModelMix } from '../index.js';
3
3
 
4
- const model = await ModelMix.new({ config: { debug: true } })
4
+ const model = await ModelMix.new({ options: { max_tokens: 10000 }, config: { debug: true } })
5
+ .kimiK2()
6
+ .gptOss()
5
7
  .scout({ config: { temperature: 0 } })
6
8
  .o4mini()
7
9
  .sonnet37think()
@@ -9,5 +11,5 @@ const model = await ModelMix.new({ config: { debug: true } })
9
11
  .gemini25flash()
10
12
  .addText("Name and capital of 3 South American countries.")
11
13
 
12
- const jsonResult = await model.json({ countries: [{ name: "", capital: "" }] });
14
+ const jsonResult = await model.json({ countries: [{ name: "", capital: "" }] }, {}, { addNote: true });
13
15
  console.log(jsonResult);
package/index.js CHANGED
@@ -90,6 +90,13 @@ class ModelMix {
90
90
  gpt45({ options = {}, config = {} } = {}) {
91
91
  return this.attach('gpt-4.5-preview', new MixOpenAI({ options, config }));
92
92
  }
93
+ gptOss({ options = {}, config = {}, mix = { together: false, cerebras: false, groq: true, lmstudio: false } } = {}) {
94
+ if (mix.together) return this.attach('openai/gpt-oss-120b', new MixTogether({ options, config }));
95
+ if (mix.cerebras) return this.attach('gpt-oss-120b', new MixCerebras({ options, config }));
96
+ if (mix.groq) return this.attach('openai/gpt-oss-120b', new MixGroq({ options, config }));
97
+ if (mix.lmstudio) return this.attach('openai/gpt-oss-120b', new MixLMStudio({ options, config }));
98
+ return this;
99
+ }
93
100
  opus4think({ options = {}, config = {} } = {}) {
94
101
  options = { ...MixAnthropic.thinkingOptions, ...options };
95
102
  return this.attach('claude-opus-4-20250514', new MixAnthropic({ options, config }));
@@ -97,6 +104,13 @@ class ModelMix {
97
104
  opus4({ options = {}, config = {} } = {}) {
98
105
  return this.attach('claude-opus-4-20250514', new MixAnthropic({ options, config }));
99
106
  }
107
+ opus41({ options = {}, config = {} } = {}) {
108
+ return this.attach('claude-opus-4-1-20250805', new MixAnthropic({ options, config }));
109
+ }
110
+ opus41think({ options = {}, config = {} } = {}) {
111
+ options = { ...MixAnthropic.thinkingOptions, ...options };
112
+ return this.attach('claude-opus-4-1-20250805', new MixAnthropic({ options, config }));
113
+ }
100
114
  sonnet4({ options = {}, config = {} } = {}) {
101
115
  return this.attach('claude-sonnet-4-20250514', new MixAnthropic({ options, config }));
102
116
  }
@@ -174,8 +188,9 @@ class ModelMix {
174
188
  return this;
175
189
  }
176
190
 
177
- kimiK2({ options = {}, config = {}} = {}) {
178
- this.attach('moonshotai/Kimi-K2-Instruct', new MixTogether({ options, config }));
191
+ kimiK2({ options = {}, config = {}, mix = { together: false, groq: true } } = {}) {
192
+ if (mix.together) this.attach('moonshotai/Kimi-K2-Instruct', new MixTogether({ options, config }));
193
+ if (mix.groq) this.attach('moonshotai/kimi-k2-instruct', new MixGroq({ options, config }));
179
194
  return this;
180
195
  }
181
196
 
@@ -253,30 +268,30 @@ class ModelMix {
253
268
  for (let i = 0; i < this.messages.length; i++) {
254
269
  const message = this.messages[i];
255
270
  if (!message.content) continue;
256
-
271
+
257
272
  for (let j = 0; j < message.content.length; j++) {
258
273
  const content = message.content[j];
259
274
  if (content.type !== 'image' || content.source.type === 'base64') continue;
260
-
275
+
261
276
  try {
262
277
  let buffer, mimeType;
263
-
278
+
264
279
  switch (content.source.type) {
265
280
  case 'url':
266
281
  const response = await axios.get(content.source.data, { responseType: 'arraybuffer' });
267
282
  buffer = Buffer.from(response.data);
268
283
  mimeType = response.headers['content-type'];
269
284
  break;
270
-
285
+
271
286
  case 'file':
272
287
  buffer = this.readFile(content.source.data, { encoding: null });
273
288
  break;
274
-
289
+
275
290
  case 'buffer':
276
291
  buffer = content.source.data;
277
292
  break;
278
293
  }
279
-
294
+
280
295
  // Detect mimeType if not provided
281
296
  if (!mimeType) {
282
297
  const fileType = await fromBuffer(buffer);
@@ -285,7 +300,7 @@ class ModelMix {
285
300
  }
286
301
  mimeType = fileType.mime;
287
302
  }
288
-
303
+
289
304
  // Update the content with processed image
290
305
  message.content[j] = {
291
306
  type: "image",
@@ -295,7 +310,7 @@ class ModelMix {
295
310
  data: buffer.toString('base64')
296
311
  }
297
312
  };
298
-
313
+
299
314
  } catch (error) {
300
315
  console.error(`Error processing image:`, error);
301
316
  // Remove failed image from content
@@ -739,6 +754,8 @@ class MixCustom {
739
754
 
740
755
  if (data.choices[0].message?.reasoning_content) {
741
756
  return data.choices[0].message.reasoning_content;
757
+ } else if (data.choices[0].message?.reasoning) {
758
+ return data.choices[0].message.reasoning;
742
759
  }
743
760
 
744
761
  const message = data.choices[0].message?.content?.trim() || '';
@@ -889,6 +906,12 @@ class MixAnthropic extends MixCustom {
889
906
  delete options.top_p;
890
907
  }
891
908
 
909
+ if (options.model && options.model.includes('claude-opus-4-1')) {
910
+ if (options.temperature !== undefined && options.top_p !== undefined) {
911
+ delete options.top_p;
912
+ }
913
+ }
914
+
892
915
  delete options.response_format;
893
916
 
894
917
  options.system = config.system;
@@ -1122,6 +1145,61 @@ class MixLMStudio extends MixCustom {
1122
1145
  ...customConfig
1123
1146
  });
1124
1147
  }
1148
+
1149
+ create({ config = {}, options = {} } = {}) {
1150
+ if (config.schema) {
1151
+ options.response_format = {
1152
+ type: 'json_schema',
1153
+ json_schema: { schema: config.schema }
1154
+ };
1155
+ }
1156
+ return super.create({ config, options });
1157
+ }
1158
+
1159
+ static extractThink(data) {
1160
+ const message = data.choices[0].message?.content?.trim() || '';
1161
+
1162
+ // Check for LMStudio special tags
1163
+ const startTag = '<|channel|>analysis<|message|>';
1164
+ const endTag = '<|end|><|start|>assistant<|channel|>final<|message|>';
1165
+
1166
+ const startIndex = message.indexOf(startTag);
1167
+ const endIndex = message.indexOf(endTag);
1168
+
1169
+ if (startIndex !== -1 && endIndex !== -1) {
1170
+ // Extract content between the special tags
1171
+ const thinkContent = message.substring(startIndex + startTag.length, endIndex).trim();
1172
+ return thinkContent;
1173
+ }
1174
+
1175
+ // Fall back to default extraction method
1176
+ return MixCustom.extractThink(data);
1177
+ }
1178
+
1179
+ static extractMessage(data) {
1180
+ const message = data.choices[0].message?.content?.trim() || '';
1181
+
1182
+ // Check for LMStudio special tags and extract final message
1183
+ const endTag = '<|end|><|start|>assistant<|channel|>final<|message|>';
1184
+ const endIndex = message.indexOf(endTag);
1185
+
1186
+ if (endIndex !== -1) {
1187
+ // Return only the content after the final message tag
1188
+ return message.substring(endIndex + endTag.length).trim();
1189
+ }
1190
+
1191
+ // Fall back to default extraction method
1192
+ return MixCustom.extractMessage(data);
1193
+ }
1194
+
1195
+ processResponse(response) {
1196
+ return {
1197
+ message: MixLMStudio.extractMessage(response.data),
1198
+ think: MixLMStudio.extractThink(response.data),
1199
+ toolCalls: MixCustom.extractToolCalls(response.data),
1200
+ response: response.data
1201
+ };
1202
+ }
1125
1203
  }
1126
1204
 
1127
1205
  class MixGroq extends MixCustom {
@@ -1174,6 +1252,11 @@ class MixCerebras extends MixCustom {
1174
1252
  ...customConfig
1175
1253
  });
1176
1254
  }
1255
+
1256
+ create({ config = {}, options = {} } = {}) {
1257
+ delete options.response_format;
1258
+ return super.create({ config, options });
1259
+ }
1177
1260
  }
1178
1261
 
1179
1262
  class MixGoogle extends MixCustom {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "3.7.8",
3
+ "version": "3.8.2",
4
4
  "description": "🧬 ModelMix - Unified API for Diverse AI LLM.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -32,7 +32,7 @@
32
32
  "together",
33
33
  "nano",
34
34
  "deepseek",
35
- "o4",
35
+ "oss",
36
36
  "4.1",
37
37
  "qwen",
38
38
  "nousresearch",