modelmix 4.4.6 → 4.4.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -47,7 +47,7 @@ import { ModelMix } from 'modelmix';
47
47
 
48
48
  // Get structured JSON responses
49
49
  const model = ModelMix.new()
50
- .sonnet45() // Anthropic claude-sonnet-4-20250514
50
+ .sonnet46() // Anthropic claude-sonnet-4-6
51
51
  .addText("Name and capital of 3 South American countries.");
52
52
 
53
53
  const outputExample = { countries: [{ name: "", capital: "" }] };
@@ -65,7 +65,7 @@ const setup = {
65
65
  };
66
66
 
67
67
  const model = await ModelMix.new(setup)
68
- .sonnet45() // (main model) Anthropic claude-sonnet-4-5-20250929
68
+ .sonnet46() // (main model) Anthropic claude-sonnet-4-5-20250929
69
69
  .gpt5mini() // (fallback 2) OpenAI gpt-5-mini
70
70
  .gemini3flash({ config: { temperature: 0 } }) // (fallback 3) Google gemini-3-flash
71
71
  .grok3mini() // (fallback 4) Grok grok-3-mini
@@ -146,9 +146,8 @@ Here's a comprehensive list of available methods:
146
146
  | `gptOss()` | Together | gpt-oss-120B | [\$0.15 / \$0.60][7] |
147
147
  | `opus46[think]()` | Anthropic | claude-opus-4-6 | [\$5.00 / \$25.00][2] |
148
148
  | `opus45[think]()` | Anthropic | claude-opus-4-5-20251101 | [\$5.00 / \$25.00][2] |
149
- | `opus41[think]()` | Anthropic | claude-opus-4-1-20250805 | [\$15.00 / \$75.00][2] |
149
+ | `sonnet46[think]()`| Anthropic | claude-sonnet-4-6 | [\$3.00 / \$15.00][2] |
150
150
  | `sonnet45[think]()`| Anthropic | claude-sonnet-4-5-20250929 | [\$3.00 / \$15.00][2] |
151
- | `sonnet4[think]()` | Anthropic | claude-sonnet-4-20250514 | [\$3.00 / \$15.00][2] |
152
151
  | `haiku35()` | Anthropic | claude-3-5-haiku-20241022 | [\$0.80 / \$4.00][2] |
153
152
  | `haiku45[think]()` | Anthropic | claude-haiku-4-5-20251001 | [\$1.00 / \$5.00][2] |
154
153
  | `gemini3pro()` | Google | gemini-3-pro-preview | [\$2.00 / \$12.00][3] |
@@ -208,7 +207,7 @@ ModelMix includes a simple but powerful templating system. You can write your sy
208
207
  ### Basic example with `replace`
209
208
 
210
209
  ```javascript
211
- const gpt = ModelMix.new().gpt5mini();
210
+ const gpt = ModelMix.new().gpt52();
212
211
 
213
212
  gpt.addText('Write a short story about a {animal} that lives in {place}.');
214
213
  gpt.replace({ '{animal}': 'cat', '{place}': 'a haunted castle' });
@@ -325,19 +324,14 @@ console.log(result);
325
324
 
326
325
  ### Adding field descriptions
327
326
 
328
- The second argument lets you describe each field so the model understands exactly what you expect:
327
+ The second argument lets you describe each field so the model understands exactly what you expect. Descriptions can be **strings** (simple) or **descriptor objects** (with metadata):
329
328
 
330
329
  ```javascript
331
- const model = ModelMix.new()
332
- .gpt5mini()
333
- .addText('Name and capital of 3 South American countries.');
334
-
335
330
  const result = await model.json(
336
331
  { countries: [{ name: "Argentina", capital: "BUENOS AIRES" }] },
337
332
  { countries: [{ name: "name of the country", capital: "capital of the country in uppercase" }] },
338
333
  { addNote: true }
339
334
  );
340
- console.log(result);
341
335
  // { countries: [
342
336
  // { name: "Brazil", capital: "BRASILIA" },
343
337
  // { name: "Colombia", capital: "BOGOTA" },
@@ -345,7 +339,40 @@ console.log(result);
345
339
  // ]}
346
340
  ```
347
341
 
348
- The example values (like `"Argentina"` and `"BUENOS AIRES"`) show the model the expected format, while the descriptions clarify what each field should contain.
342
+ ### Enhanced descriptors
343
+
344
+ Descriptions support **descriptor objects** with `description`, `required`, `enum`, and `default`:
345
+
346
+ ```javascript
347
+ const result = await model.json(
348
+ { name: 'martin', age: 22, sex: 'm' },
349
+ {
350
+ name: { description: 'Name of the actor', required: false },
351
+ age: 'Age of the actor', // string still works
352
+ sex: { description: 'Gender', enum: ['m', 'f', null], default: 'm' }
353
+ }
354
+ );
355
+ ```
356
+
357
+ | Property | Type | Default | Description |
358
+ | --- | --- | --- | --- |
359
+ | `description` | `string` | — | Field description for the model |
360
+ | `required` | `boolean` | `true` | If `false`, field is removed from `required` and type becomes nullable |
361
+ | `enum` | `array` | — | Allowed values. If includes `null`, type auto-becomes nullable |
362
+ | `default` | `any` | — | Default value for the field |
363
+
364
+ You can mix strings and descriptor objects freely in the same descriptions parameter.
365
+
366
+ ### Array auto-wrap
367
+
368
+ When you pass a top-level array as the example, ModelMix automatically wraps it for better LLM compatibility and unwraps the result transparently:
369
+
370
+ ```javascript
371
+ const result = await model.json([{ name: 'martin' }]);
372
+ // result is an array: [{ name: "Martin" }, { name: "Carlos" }, ...]
373
+ ```
374
+
375
+ Internally, the array is wrapped as `{ out: [...] }` so the model receives a proper object schema, then `result.out` is returned automatically.
349
376
 
350
377
  ### Options
351
378
 
@@ -491,15 +518,16 @@ new ModelMix(args = { options: {}, config: {} })
491
518
  - `tokens`: Object with `input`, `output`, and `total` token counts
492
519
  - `response`: The raw API response
493
520
  - `stream(callback)`: Sends the message and streams the response, invoking the callback with each streamed part.
494
- - `json(schemaExample, descriptions = {})`: Forces the model to return a response in a specific JSON format.
495
- - `schemaExample`: Optional example of the JSON structure to be returned.
496
- - `descriptions`: Optional descriptions for each field in the JSON structure
521
+ - `json(schemaExample, descriptions = {}, options = {})`: Forces the model to return a response in a specific JSON format.
522
+ - `schemaExample`: Example of the JSON structure to be returned. Top-level arrays are auto-wrapped for better LLM compatibility.
523
+ - `descriptions`: Descriptions for each field can be strings or descriptor objects with `{ description, required, enum, default }`.
524
+ - `options`: `{ addSchema: true, addExample: false, addNote: false }`
497
525
  - Returns a Promise that resolves to the structured JSON response
498
526
  - Example:
499
527
  ```javascript
500
528
  const response = await handler.json(
501
529
  { time: '24:00:00', message: 'Hello' },
502
- { time: 'Time in format HH:MM:SS' }
530
+ { time: 'Time in format HH:MM:SS', message: { description: 'Greeting', required: false } }
503
531
  );
504
532
  ```
505
533
  - `block({ addText = true })`: Forces the model to return a response in a specific block format.
package/demo/json.js CHANGED
@@ -2,7 +2,7 @@ process.loadEnvFile();
2
2
  import { ModelMix } from '../index.js';
3
3
 
4
4
  const model = await ModelMix.new({ options: { max_tokens: 10000 }, config: { debug: 3 } })
5
- .gemini3flash()
5
+ .sonnet46()
6
6
  // .gptOss()
7
7
  // .scout({ config: { temperature: 0 } })
8
8
  // .o4mini()
package/index.js CHANGED
@@ -31,6 +31,7 @@ const MODEL_PRICING = {
31
31
  'claude-opus-4-6': [5.00, 25.00],
32
32
  'claude-opus-4-5-20251101': [5.00, 25.00],
33
33
  'claude-opus-4-1-20250805': [15.00, 75.00],
34
+ 'claude-sonnet-4-6': [3.00, 15.00],
34
35
  'claude-sonnet-4-5-20250929': [3.00, 15.00],
35
36
  'claude-sonnet-4-20250514': [3.00, 15.00],
36
37
  'claude-3-5-haiku-20241022': [0.80, 4.00],
@@ -305,6 +306,14 @@ class ModelMix {
305
306
  options = { ...MixAnthropic.thinkingOptions, ...options };
306
307
  return this.attach('claude-sonnet-4-20250514', new MixAnthropic({ options, config }));
307
308
  }
309
+ sonnet46({ options = {}, config = {} } = {}) {
310
+ return this.attach('claude-sonnet-4-6', new MixAnthropic({ options, config }));
311
+ }
312
+ sonnet46think({ options = {}, config = {} } = {}) {
313
+ options = { ...MixAnthropic.thinkingOptions, ...options };
314
+ return this.attach('claude-sonnet-4-6', new MixAnthropic({ options, config }));
315
+ }
316
+
308
317
  sonnet45({ options = {}, config = {} } = {}) {
309
318
  return this.attach('claude-sonnet-4-5-20250929', new MixAnthropic({ options, config }));
310
319
  }
@@ -775,16 +784,13 @@ class ModelMix {
775
784
  if (this.config.max_history > 0) {
776
785
  let sliceStart = Math.max(0, this.messages.length - this.config.max_history);
777
786
 
778
- // If we're slicing and there's a tool message at the start,
779
- // ensure we include the preceding assistant message with tool_calls
780
- while (sliceStart > 0 &&
781
- sliceStart < this.messages.length &&
782
- this.messages[sliceStart].role === 'tool') {
783
- sliceStart--;
784
- // Also need to include the assistant message with tool_calls
785
- if (sliceStart > 0 &&
786
- this.messages[sliceStart].role === 'assistant' &&
787
- this.messages[sliceStart].tool_calls) {
787
+ // If we're slicing into the middle of a tool interaction,
788
+ // backtrack to include the full sequence (user → assistant/tool_calls tool results)
789
+ while (sliceStart > 0 && sliceStart < this.messages.length) {
790
+ const msg = this.messages[sliceStart];
791
+ if (msg.role === 'tool' || (msg.role === 'assistant' && msg.tool_calls)) {
792
+ sliceStart--;
793
+ } else {
788
794
  break;
789
795
  }
790
796
  }
@@ -913,11 +919,12 @@ class ModelMix {
913
919
  this.messages.push({
914
920
  role: 'tool',
915
921
  tool_call_id: toolResult.tool_call_id,
922
+ name: toolResult.name,
916
923
  content: toolResult.content
917
924
  });
918
925
  }
919
926
 
920
- return this.execute();
927
+ return this.execute({ options, config });
921
928
  }
922
929
 
923
930
  // debug level 1: Just success indicator
@@ -1567,6 +1574,18 @@ class MixAnthropic extends MixCustom {
1567
1574
 
1568
1575
  return filteredMessages.map(message => {
1569
1576
  if (message.role === 'tool') {
1577
+ // Handle new format: tool_call_id directly on message
1578
+ if (message.tool_call_id) {
1579
+ return {
1580
+ role: "user",
1581
+ content: [{
1582
+ type: "tool_result",
1583
+ tool_use_id: message.tool_call_id,
1584
+ content: message.content
1585
+ }]
1586
+ }
1587
+ }
1588
+ // Handle old format: content is an array
1570
1589
  return {
1571
1590
  role: "user",
1572
1591
  content: message.content.map(content => ({
@@ -1997,13 +2016,33 @@ class MixGoogle extends MixCustom {
1997
2016
  if (message.role === 'assistant' && message.tool_calls) {
1998
2017
  return {
1999
2018
  role: 'model',
2000
- parts: message.tool_calls.map(toolCall => ({
2001
- functionCall: {
2002
- name: toolCall.function.name,
2003
- args: JSON.parse(toolCall.function.arguments)
2004
- },
2005
- thought_signature: toolCall.thought_signature || ""
2006
- }))
2019
+ parts: message.tool_calls.map(toolCall => {
2020
+ const part = {
2021
+ functionCall: {
2022
+ name: toolCall.function.name,
2023
+ args: JSON.parse(toolCall.function.arguments)
2024
+ }
2025
+ };
2026
+ if (toolCall.thought_signature) {
2027
+ part.thoughtSignature = toolCall.thought_signature;
2028
+ }
2029
+ return part;
2030
+ })
2031
+ }
2032
+ }
2033
+
2034
+ // Handle new tool result format: tool_call_id and name directly on message
2035
+ if (message.role === 'tool' && message.name) {
2036
+ return {
2037
+ role: 'user',
2038
+ parts: [{
2039
+ functionResponse: {
2040
+ name: message.name,
2041
+ response: {
2042
+ output: message.content,
2043
+ },
2044
+ }
2045
+ }]
2007
2046
  }
2008
2047
  }
2009
2048
 
@@ -2011,6 +2050,7 @@ class MixGoogle extends MixCustom {
2011
2050
  const role = (message.role === 'assistant' || message.role === 'tool') ? 'model' : 'user'
2012
2051
 
2013
2052
  if (message.role === 'tool') {
2053
+ // Handle old format: content is an array of {name, content}
2014
2054
  return {
2015
2055
  role,
2016
2056
  parts: message.content.map(content => ({
@@ -2053,6 +2093,22 @@ class MixGoogle extends MixCustom {
2053
2093
  })
2054
2094
  }
2055
2095
  });
2096
+
2097
+ // Merge consecutive user messages containing only functionResponse parts
2098
+ // Google requires all function responses for a turn in a single message
2099
+ return converted.reduce((acc, msg) => {
2100
+ if (acc.length > 0) {
2101
+ const prev = acc[acc.length - 1];
2102
+ if (prev.role === 'user' && msg.role === 'user' &&
2103
+ prev.parts.every(p => p.functionResponse) &&
2104
+ msg.parts.every(p => p.functionResponse)) {
2105
+ prev.parts.push(...msg.parts);
2106
+ return acc;
2107
+ }
2108
+ }
2109
+ acc.push(msg);
2110
+ return acc;
2111
+ }, []);
2056
2112
  }
2057
2113
 
2058
2114
  async create({ config = {}, options = {} } = {}) {
@@ -2078,7 +2134,13 @@ class MixGoogle extends MixCustom {
2078
2134
  generationConfig.topP = options.top_p;
2079
2135
  }
2080
2136
 
2081
- generationConfig.responseMimeType = "text/plain";
2137
+ // Gemini does not support responseMimeType when function calling is used
2138
+ const hasTools = options.tools && options.tools.length > 0 &&
2139
+ options.tools.some(t => t.functionDeclarations && t.functionDeclarations.length > 0);
2140
+
2141
+ if (!hasTools) {
2142
+ generationConfig.responseMimeType = "text/plain";
2143
+ }
2082
2144
 
2083
2145
  const payload = {
2084
2146
  generationConfig,
@@ -2160,6 +2222,21 @@ class MixGoogle extends MixCustom {
2160
2222
  };
2161
2223
  }
2162
2224
 
2225
+ static stripUnsupportedSchemaProps(schema) {
2226
+ if (!schema || typeof schema !== 'object') return schema;
2227
+ const cleaned = { ...schema };
2228
+ delete cleaned.default;
2229
+ if (cleaned.properties) {
2230
+ cleaned.properties = Object.fromEntries(
2231
+ Object.entries(cleaned.properties).map(([key, value]) => [key, MixGoogle.stripUnsupportedSchemaProps(value)])
2232
+ );
2233
+ }
2234
+ if (cleaned.items) {
2235
+ cleaned.items = MixGoogle.stripUnsupportedSchemaProps(cleaned.items);
2236
+ }
2237
+ return cleaned;
2238
+ }
2239
+
2163
2240
  static getOptionsTools(tools) {
2164
2241
  const functionDeclarations = [];
2165
2242
  for (const tool in tools) {
@@ -2167,7 +2244,7 @@ class MixGoogle extends MixCustom {
2167
2244
  functionDeclarations.push({
2168
2245
  name: item.name,
2169
2246
  description: item.description,
2170
- parameters: item.inputSchema
2247
+ parameters: MixGoogle.stripUnsupportedSchemaProps(item.inputSchema)
2171
2248
  });
2172
2249
  }
2173
2250
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "4.4.6",
3
+ "version": "4.4.8",
4
4
  "description": "🧬 Reliable interface with automatic fallback for AI LLMs.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -48,7 +48,7 @@
48
48
  "homepage": "https://github.com/clasen/ModelMix#readme",
49
49
  "dependencies": {
50
50
  "@modelcontextprotocol/sdk": "^1.26.0",
51
- "axios": "^1.12.2",
51
+ "axios": "^1.13.5",
52
52
  "bottleneck": "^2.19.5",
53
53
  "file-type": "^16.5.4",
54
54
  "form-data": "^4.0.4",
@@ -73,4 +73,4 @@
73
73
  "test:live.mcp": "mocha test/live.mcp.js --timeout 60000 --require dotenv/config --require test/setup.js",
74
74
  "test:tokens": "mocha test/tokens.test.js --timeout 10000 --require dotenv/config --require test/setup.js"
75
75
  }
76
- }
76
+ }
package/schema.js CHANGED
@@ -1,4 +1,4 @@
1
- const META_KEYS = new Set(['description', 'required', 'enum', 'default']);
1
+ const META_KEYS = new Set(['description', 'required', 'enum', 'default', 'nullable']);
2
2
 
3
3
  function isDescriptor(value) {
4
4
  if (!value || typeof value !== 'object' || Array.isArray(value)) return false;
@@ -105,6 +105,9 @@ function generateJsonSchema(example, descriptions = {}) {
105
105
  isRequired = false;
106
106
  makeNullable(fieldSchema);
107
107
  }
108
+ if (desc.nullable === true) {
109
+ makeNullable(fieldSchema);
110
+ }
108
111
  if (desc.enum && desc.enum.includes(null)) {
109
112
  makeNullable(fieldSchema);
110
113
  }
@@ -75,8 +75,8 @@ Chain shorthand methods to attach providers. First model is primary; others are
75
75
 
76
76
  ```javascript
77
77
  const model = ModelMix.new()
78
- .sonnet45() // primary
79
- .gpt5mini() // fallback 1
78
+ .sonnet46() // primary
79
+ .gpt52() // fallback 1
80
80
  .gemini3flash() // fallback 2
81
81
  .addText("Hello!")
82
82
  ```
@@ -86,7 +86,7 @@ If `sonnet45` fails, it automatically tries `gpt5mini`, then `gemini3flash`.
86
86
  ## Available Model Shorthands
87
87
 
88
88
  - **OpenAI**: `gpt52` `gpt51` `gpt5` `gpt5mini` `gpt5nano` `gpt41` `gpt41mini` `gpt41nano`
89
- - **Anthropic**: `opus46` `opus45` `sonnet45` `sonnet4` `haiku45` `haiku35` (thinking variants: add `think` suffix)
89
+ - **Anthropic**: `opus46` `opus45` `sonnet46` `sonnet45` `haiku45` `haiku35` (thinking variants: add `think` suffix)
90
90
  - **Google**: `gemini3pro` `gemini3flash` `gemini25pro` `gemini25flash`
91
91
  - **Grok**: `grok4` `grok41` (thinking variant available)
92
92
  - **Perplexity**: `sonar` `sonarPro`
@@ -125,6 +125,36 @@ const result = await ModelMix.new()
125
125
 
126
126
  `json()` signature: `json(schemaExample, schemaDescription?, { addSchema, addExample, addNote }?)`
127
127
 
128
+ #### Enhanced descriptors
129
+
130
+ Descriptions can be **strings** or **descriptor objects** with metadata:
131
+
132
+ ```javascript
133
+ const result = await model.json(
134
+ { name: 'martin', age: 22, sex: 'Male' },
135
+ {
136
+ name: { description: 'Name of the actor', required: false },
137
+ age: 'Age of the actor', // string still works
138
+ sex: { description: 'Gender', enum: ['Male', 'Female', null] }
139
+ }
140
+ );
141
+ ```
142
+
143
+ Descriptor properties:
144
+ - `description` (string) — field description
145
+ - `required` (boolean, default `true`) — if `false`: removed from required array, type becomes nullable
146
+ - `enum` (array) — allowed values; if includes `null`, type auto-becomes nullable
147
+ - `default` (any) — default value
148
+
149
+ #### Array auto-wrap
150
+
151
+ Top-level arrays are auto-wrapped as `{ out: [...] }` for better LLM compatibility, and unwrapped on return:
152
+
153
+ ```javascript
154
+ const result = await model.json([{ name: 'martin' }]);
155
+ // result is an array: [{ name: "Martin" }, { name: "Carlos" }, ...]
156
+ ```
157
+
128
158
  ### Stream a response
129
159
 
130
160
  ```javascript
@@ -282,7 +312,7 @@ const reply = await chat.message(); // "Martin"
282
312
  - Store API keys in `.env` and load with `dotenv/config` or `process.loadEnvFile()`. Never hardcode keys.
283
313
  - Chain models for resilience: primary model first, fallbacks after.
284
314
  - When using MCP tools or `addTool()`, set `max_history` to at least 3.
285
- - Use `.json()` for structured output instead of parsing text manually.
315
+ - Use `.json()` for structured output instead of parsing text manually. Use descriptor objects `{ description, required, enum, default }` in descriptions for richer schema control.
286
316
  - Use `.message()` for simple text, `.raw()` when you need tokens/thinking/toolCalls.
287
317
  - For thinking models, append `think` to the method name (e.g. `sonnet45think()`).
288
318
  - Template placeholders use `{key}` syntax in both system prompts and user messages.
@@ -302,7 +332,7 @@ const reply = await chat.message(); // "Martin"
302
332
  | `.replace({})` | `this` | Set placeholder replacements |
303
333
  | `.replaceKeyFromFile(key, path)` | `this` | Replace placeholder with file content |
304
334
  | `.message()` | `Promise<string>` | Get text response |
305
- | `.json(example, desc?, opts?)` | `Promise<object>` | Get structured JSON |
335
+ | `.json(example, desc?, opts?)` | `Promise<object\|array>` | Get structured JSON. Descriptions support descriptor objects `{ description, required, enum, default }`. Top-level arrays auto-wrapped |
306
336
  | `.raw()` | `Promise<{message, think, toolCalls, tokens, response}>` | Full response |
307
337
  | `.lastRaw` | `object \| null` | Full response from last `message()`/`json()`/`block()`/`stream()` call |
308
338
  | `.stream(callback)` | `Promise` | Stream response |
@@ -72,7 +72,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
72
72
  it('should enforce minimum time between requests', async () => {
73
73
  const startTimes = [];
74
74
 
75
- model.gpt41();
75
+ model.gpt51();
76
76
 
77
77
  // Mock API responses
78
78
  nock('https://api.openai.com')
@@ -123,7 +123,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
123
123
  }
124
124
  });
125
125
 
126
- model.gpt41();
126
+ model.gpt51();
127
127
 
128
128
  // Mock API with delay to simulate concurrent requests
129
129
  nock('https://api.openai.com')
@@ -185,7 +185,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
185
185
  it('should apply rate limiting to OpenAI requests', async () => {
186
186
  const requestTimes = [];
187
187
 
188
- model.gpt41();
188
+ model.gpt51();
189
189
 
190
190
  nock('https://api.openai.com')
191
191
  .post('/v1/chat/completions')
@@ -216,7 +216,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
216
216
  it('should apply rate limiting to Anthropic requests', async () => {
217
217
  const requestTimes = [];
218
218
 
219
- model.sonnet4();
219
+ model.sonnet46();
220
220
 
221
221
  nock('https://api.anthropic.com')
222
222
  .post('/v1/messages')
@@ -268,7 +268,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
268
268
  });
269
269
 
270
270
  it('should handle rate limiting with API errors', async () => {
271
- model.gpt41();
271
+ model.gpt51();
272
272
 
273
273
  nock('https://api.openai.com')
274
274
  .post('/v1/chat/completions')
@@ -290,7 +290,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
290
290
  it('should continue rate limiting after errors', async () => {
291
291
  const requestTimes = [];
292
292
 
293
- model.gpt41();
293
+ model.gpt51();
294
294
 
295
295
  // First request fails
296
296
  nock('https://api.openai.com')
@@ -347,7 +347,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
347
347
  }
348
348
  });
349
349
 
350
- model.gpt41();
350
+ model.gpt51();
351
351
 
352
352
  let requestCount = 0;
353
353
 
@@ -395,7 +395,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
395
395
  }
396
396
  });
397
397
 
398
- model.gpt41();
398
+ model.gpt51();
399
399
 
400
400
  const results = [];
401
401
 
@@ -444,7 +444,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
444
444
  }
445
445
  });
446
446
 
447
- model.gpt41();
447
+ model.gpt51();
448
448
 
449
449
  nock('https://api.openai.com')
450
450
  .post('/v1/chat/completions')
@@ -493,7 +493,7 @@ describe('Rate Limiting with Bottleneck Tests', () => {
493
493
  done();
494
494
  });
495
495
 
496
- model.gpt41();
496
+ model.gpt51();
497
497
 
498
498
  nock('https://api.openai.com')
499
499
  .post('/v1/chat/completions')
@@ -25,7 +25,7 @@ describe('Provider Fallback Chain Tests', () => {
25
25
  });
26
26
 
27
27
  it('should use primary provider when available', async () => {
28
- model.gpt5mini().sonnet4().addText('Hello');
28
+ model.gpt5mini().sonnet46().addText('Hello');
29
29
 
30
30
  // Mock successful OpenAI response
31
31
  nock('https://api.openai.com')
@@ -45,7 +45,7 @@ describe('Provider Fallback Chain Tests', () => {
45
45
  });
46
46
 
47
47
  it('should fallback to secondary provider when primary fails', async () => {
48
- model.gpt5mini().sonnet4().addText('Hello');
48
+ model.gpt5mini().sonnet46().addText('Hello');
49
49
 
50
50
  // Mock failed OpenAI response (GPT-5 mini)
51
51
  nock('https://api.openai.com')
@@ -68,7 +68,7 @@ describe('Provider Fallback Chain Tests', () => {
68
68
  });
69
69
 
70
70
  it('should cascade through multiple fallbacks', async () => {
71
- model.gpt5mini().sonnet4().gemini25flash().addText('Hello');
71
+ model.gpt5mini().sonnet46().gemini3flash().addText('Hello');
72
72
 
73
73
  // Mock failed OpenAI response
74
74
  nock('https://api.openai.com')
@@ -99,7 +99,7 @@ describe('Provider Fallback Chain Tests', () => {
99
99
  });
100
100
 
101
101
  it('should throw error when all providers fail', async () => {
102
- model.gpt5mini().sonnet4().addText('Hello');
102
+ model.gpt5mini().sonnet46().addText('Hello');
103
103
 
104
104
  // Mock all providers failing
105
105
  nock('https://api.openai.com')
@@ -129,7 +129,7 @@ describe('Provider Fallback Chain Tests', () => {
129
129
  });
130
130
 
131
131
  it('should fallback from OpenAI to Anthropic', async () => {
132
- model.gpt5mini().sonnet4().addText('Test message');
132
+ model.gpt5mini().sonnet46().addText('Test message');
133
133
 
134
134
  // Mock OpenAI failure
135
135
  nock('https://api.openai.com')
@@ -152,7 +152,7 @@ describe('Provider Fallback Chain Tests', () => {
152
152
  });
153
153
 
154
154
  it('should fallback from Anthropic to Google', async () => {
155
- model.sonnet4().gemini25flash().addText('Test message');
155
+ model.sonnet46().gemini3flash().addText('Test message');
156
156
 
157
157
  // Mock Anthropic failure
158
158
  nock('https://api.anthropic.com')
@@ -178,7 +178,7 @@ describe('Provider Fallback Chain Tests', () => {
178
178
  });
179
179
 
180
180
  it('should handle network timeout fallback', async () => {
181
- model.gpt5mini().sonnet4().addText('Hello');
181
+ model.gpt5mini().sonnet46().addText('Hello');
182
182
 
183
183
  // Mock timeout error on first provider (using 408 Request Timeout)
184
184
  nock('https://api.openai.com')
@@ -212,7 +212,7 @@ describe('Provider Fallback Chain Tests', () => {
212
212
 
213
213
  it('should handle JSON fallback correctly', async () => {
214
214
  const schema = { name: 'Alice', age: 30 };
215
- model.gpt5mini().sonnet4().addText('Generate user data');
215
+ model.gpt5mini().sonnet46().addText('Generate user data');
216
216
 
217
217
  // Mock OpenAI failure
218
218
  nock('https://api.openai.com')
@@ -238,7 +238,7 @@ describe('Provider Fallback Chain Tests', () => {
238
238
  });
239
239
 
240
240
  it('should preserve message history through fallbacks', async () => {
241
- model.gpt5mini().sonnet4()
241
+ model.gpt5mini().sonnet46()
242
242
  .addText('First message')
243
243
  .addText('Second message');
244
244
 
@@ -272,7 +272,7 @@ describe('Provider Fallback Chain Tests', () => {
272
272
 
273
273
  // Configure with custom temperature for fallback
274
274
  model.gpt5mini({ options: { temperature: 0.6 } })
275
- .sonnet4({ options: { temperature: 0.7 } })
275
+ .sonnet46({ options: { temperature: 0.7 } })
276
276
  .addText('Creative response');
277
277
 
278
278
  // Mock first provider failure
@@ -305,7 +305,7 @@ describe('Provider Fallback Chain Tests', () => {
305
305
  });
306
306
 
307
307
  model.gpt5mini({ options: { max_tokens: 100 } })
308
- .sonnet4({ options: { max_tokens: 200 } })
308
+ .sonnet46({ options: { max_tokens: 200 } })
309
309
  .addText('Generate text');
310
310
 
311
311
  // Mock OpenAI failure
@@ -342,7 +342,7 @@ describe('Provider Fallback Chain Tests', () => {
342
342
  });
343
343
 
344
344
  it('should provide detailed error information when all fallbacks fail', async () => {
345
- model.gpt5mini().sonnet4().gemini25flash().addText('Test');
345
+ model.gpt5mini().sonnet46().gemini3flash().addText('Test');
346
346
 
347
347
  // Mock all providers failing with different errors
348
348
  nock('https://api.openai.com')
@@ -367,7 +367,7 @@ describe('Provider Fallback Chain Tests', () => {
367
367
  });
368
368
 
369
369
  it('should handle malformed responses in fallback', async () => {
370
- model.gpt5mini().sonnet4().addText('Test');
370
+ model.gpt5mini().sonnet46().addText('Test');
371
371
 
372
372
  // Mock malformed response from first provider
373
373
  nock('https://api.openai.com')
@@ -47,7 +47,7 @@ describe('Conversation History Tests', () => {
47
47
  });
48
48
 
49
49
  it('should add assistant response to message history after raw()', async () => {
50
- model.sonnet4().addText('Hello');
50
+ model.sonnet46().addText('Hello');
51
51
 
52
52
  nock('https://api.anthropic.com')
53
53
  .post('/v1/messages')
@@ -128,7 +128,7 @@ describe('Conversation History Tests', () => {
128
128
  const model = ModelMix.new({
129
129
  config: { debug: false, max_history: 10 }
130
130
  });
131
- model.sonnet4();
131
+ model.sonnet46();
132
132
 
133
133
  // First turn
134
134
  model.addText('Capital of France?');
@@ -426,7 +426,7 @@ describe('Conversation History Tests', () => {
426
426
  const model = ModelMix.new({
427
427
  config: { debug: false, max_history: 10 }
428
428
  });
429
- model.haiku35();
429
+ model.haiku45();
430
430
 
431
431
  model.addText('Hello');
432
432
  nock('https://api.anthropic.com')
@@ -460,7 +460,7 @@ describe('Conversation History Tests', () => {
460
460
  const model = ModelMix.new({
461
461
  config: { debug: false, max_history: 10 }
462
462
  });
463
- model.gemini25flash();
463
+ model.gemini3flash();
464
464
 
465
465
  model.addText('Hello');
466
466
  nock('https://generativelanguage.googleapis.com')
@@ -25,7 +25,7 @@ describe('Image Processing and Multimodal Support Tests', () => {
25
25
  it('should handle base64 image data correctly', async () => {
26
26
  const base64Image = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8z8BQz0AEYBxVSF+FABJADveWkH6oAAAAAElFTkSuQmCC';
27
27
 
28
- model.gpt41()
28
+ model.gpt52()
29
29
  .addText('What do you see in this image?')
30
30
  .addImageFromUrl(base64Image);
31
31
 
@@ -50,10 +50,10 @@ describe('Image Processing and Multimodal Support Tests', () => {
50
50
  expect(response).to.include('I can see a small test image');
51
51
  });
52
52
 
53
- it('should support multimodal with sonnet4()', async () => {
53
+ it('should support multimodal with sonnet46()', async () => {
54
54
  const base64Image = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8z8BQz0AEYBxVSF+FABJADveWkH6oAAAAAElFTkSuQmCC';
55
55
 
56
- model.sonnet4()
56
+ model.sonnet46()
57
57
  .addText('Describe this image')
58
58
  .addImageFromUrl(base64Image);
59
59
 
package/test/json.test.js CHANGED
@@ -355,7 +355,7 @@ describe('JSON Schema and Structured Output Tests', () => {
355
355
  }]
356
356
  };
357
357
 
358
- model.gpt41().addText('List 3 countries');
358
+ model.gpt52().addText('List 3 countries');
359
359
 
360
360
  // Mock the API response
361
361
  nock('https://api.openai.com')
@@ -406,7 +406,7 @@ describe('JSON Schema and Structured Output Tests', () => {
406
406
  }
407
407
  };
408
408
 
409
- model.sonnet4().addText('Generate user data');
409
+ model.sonnet46().addText('Generate user data');
410
410
 
411
411
  // Mock the API response
412
412
  nock('https://api.anthropic.com')
@@ -427,7 +427,7 @@ describe('JSON Schema and Structured Output Tests', () => {
427
427
  });
428
428
 
429
429
  it('should handle JSON parsing errors gracefully', async () => {
430
- model.gpt41().addText('Generate invalid JSON');
430
+ model.gpt52().addText('Generate invalid JSON');
431
431
 
432
432
  // Mock invalid JSON response
433
433
  nock('https://api.openai.com')
@@ -450,7 +450,7 @@ describe('JSON Schema and Structured Output Tests', () => {
450
450
  });
451
451
 
452
452
  it('should auto-wrap top-level array and unwrap on return', async () => {
453
- model.gpt41().addText('List 3 countries');
453
+ model.gpt52().addText('List 3 countries');
454
454
 
455
455
  nock('https://api.openai.com')
456
456
  .post('/v1/chat/completions')
package/test/live.mcp.js CHANGED
@@ -31,8 +31,8 @@ describe('Live MCP Integration Tests', function () {
31
31
 
32
32
  describe('Basic MCP Tool Integration', function () {
33
33
 
34
- it('should use custom MCP tools with GPT-4.1', async function () {
35
- const model = ModelMix.new(setup).gpt41();
34
+ it('should use custom MCP tools with GPT-5.2', async function () {
35
+ const model = ModelMix.new(setup).gpt52();
36
36
 
37
37
  // Add custom calculator tool
38
38
  model.addTool({
@@ -68,8 +68,8 @@ describe('Live MCP Integration Tests', function () {
68
68
  expect(response).to.include('345');
69
69
  });
70
70
 
71
- it('should use custom MCP tools with Claude Sonnet 4', async function () {
72
- const model = ModelMix.new(setup).sonnet4();
71
+ it('should use custom MCP tools with Claude Sonnet 4.6', async function () {
72
+ const model = ModelMix.new(setup).sonnet46();
73
73
 
74
74
  // Add time tool
75
75
  model.addTool({
@@ -505,7 +505,7 @@ describe('Live MCP Integration Tests', function () {
505
505
  const models = [
506
506
  { name: 'GPT-5 Mini', model: ModelMix.new(setup).gpt5mini() },
507
507
  { name: 'GPT-5 Nano', model: ModelMix.new(setup).gpt5nano() },
508
- { name: 'GPT-4.1', model: ModelMix.new(setup).gpt41() }
508
+ { name: 'GPT-5.2', model: ModelMix.new(setup).gpt52() }
509
509
  ];
510
510
 
511
511
  const results = [];
@@ -528,8 +528,8 @@ describe('Live MCP Integration Tests', function () {
528
528
 
529
529
  it('should work with same MCP tools across different Anthropic models', async function () {
530
530
  const models = [
531
- { name: 'Sonnet 4', model: ModelMix.new(setup).sonnet4() },
532
- { name: 'Sonnet 4.5', model: ModelMix.new(setup).sonnet45() },
531
+ { name: 'Sonnet 4', model: ModelMix.new(setup).sonnet46() },
532
+ { name: 'Sonnet 4.6', model: ModelMix.new(setup).sonnet46() },
533
533
  { name: 'Haiku 4.5', model: ModelMix.new(setup).haiku45() }
534
534
  ];
535
535
 
package/test/live.test.js CHANGED
@@ -41,14 +41,14 @@ describe('Live Integration Tests', function () {
41
41
 
42
42
  const response = await model.message();
43
43
 
44
- console.log(`OpenAI GPT-4o response: ${response}`);
44
+ console.log(`OpenAI GPT-5.2 response: ${response}`);
45
45
 
46
46
  expect(response).to.be.a('string');
47
47
  expect(response.toLowerCase()).to.include('blue');
48
48
  });
49
49
 
50
- it('should process images with Anthropic Claude', async function () {
51
- const model = ModelMix.new(setup).sonnet45();
50
+ it('should process images with Anthropic Sonnet 4.6', async function () {
51
+ const model = ModelMix.new(setup).sonnet46();
52
52
 
53
53
  model.addImageFromUrl(blueSquareBase64)
54
54
  .addText('What color is this image? Answer in one word only.');
@@ -61,7 +61,7 @@ describe('Live Integration Tests', function () {
61
61
  });
62
62
 
63
63
  it('should process images with Google Gemini', async function () {
64
- const model = ModelMix.new(setup).gemini25flash();
64
+ const model = ModelMix.new(setup).gemini3flash();
65
65
 
66
66
  model.addImageFromUrl(blueSquareBase64)
67
67
  .addText('What color is this image? Answer in one word only.');
@@ -99,8 +99,8 @@ describe('Live Integration Tests', function () {
99
99
  expect(result.skills).to.be.an('array');
100
100
  });
101
101
 
102
- it('should return structured JSON with Sonnet 4.5 thinking', async function () {
103
- const model = ModelMix.new(setup).sonnet45think();
102
+ it('should return structured JSON with Sonnet 4.6 thinking', async function () {
103
+ const model = ModelMix.new(setup).sonnet46think();
104
104
 
105
105
  model.addText('Generate information about a fictional city.');
106
106
 
@@ -122,7 +122,7 @@ describe('Live Integration Tests', function () {
122
122
  });
123
123
 
124
124
  it('should return structured JSON with Google Gemini', async function () {
125
- const model = ModelMix.new(setup).gemini25flash();
125
+ const model = ModelMix.new(setup).gemini3flash();
126
126
 
127
127
  model.addText('Generate information about a fictional city.');
128
128
 
@@ -152,7 +152,7 @@ describe('Live Integration Tests', function () {
152
152
  // Create a model chain: non-existent model -> Claude
153
153
  const model = ModelMix.new(setup)
154
154
  .attach('non-existent-model', new MixOpenAI())
155
- .sonnet4();
155
+ .sonnet46();
156
156
 
157
157
  model.addText('Say "fallback test successful" and nothing else.');
158
158
 
@@ -27,7 +27,7 @@ describe('Template and File Operations Tests', () => {
27
27
  });
28
28
 
29
29
  it('should replace simple template variables', async () => {
30
- model.gpt41()
30
+ model.gpt51()
31
31
  .replace({
32
32
  '{{name}}': 'Alice',
33
33
  '{{age}}': '30',
@@ -56,7 +56,7 @@ describe('Template and File Operations Tests', () => {
56
56
  });
57
57
 
58
58
  it('should handle multiple template replacements', async () => {
59
- model.gpt41()
59
+ model.gpt51()
60
60
  .replace({ '{{greeting}}': 'Hello' })
61
61
  .replace({ '{{name}}': 'Bob' })
62
62
  .replace({ '{{action}}': 'welcome' })
@@ -82,7 +82,7 @@ describe('Template and File Operations Tests', () => {
82
82
  });
83
83
 
84
84
  it('should handle nested template objects', async () => {
85
- model.gpt41()
85
+ model.gpt51()
86
86
  .replace({
87
87
  '{{user_name}}': 'Charlie',
88
88
  '{{user_role}}': 'admin',
@@ -111,7 +111,7 @@ describe('Template and File Operations Tests', () => {
111
111
  });
112
112
 
113
113
  it('should preserve unreplaced templates', async () => {
114
- model.gpt41()
114
+ model.gpt51()
115
115
  .replace({ '{{name}}': 'David' })
116
116
  .addText('Hello {{name}}, your ID is {{user_id}} and status is {{status}}');
117
117
 
@@ -135,7 +135,7 @@ describe('Template and File Operations Tests', () => {
135
135
  });
136
136
 
137
137
  it('should handle empty and special character replacements', async () => {
138
- model.gpt41()
138
+ model.gpt51()
139
139
  .replace({
140
140
  '{{empty}}': '',
141
141
  '{{special}}': 'Hello & "World" <test>',
@@ -175,7 +175,7 @@ describe('Template and File Operations Tests', () => {
175
175
  });
176
176
 
177
177
  it('should load and replace from template file', async () => {
178
- model.gpt41()
178
+ model.gpt51()
179
179
  .replaceKeyFromFile('{{template}}', path.join(fixturesPath, 'template.txt'))
180
180
  .replace({
181
181
  '{{name}}': 'Eve',
@@ -214,7 +214,7 @@ describe('Template and File Operations Tests', () => {
214
214
  });
215
215
 
216
216
  it('should load and process JSON data file', async () => {
217
- model.gpt41()
217
+ model.gpt51()
218
218
  .replaceKeyFromFile('{{data}}', path.join(fixturesPath, 'data.json'))
219
219
  .addText('Process this data: {{data}}');
220
220
 
@@ -246,7 +246,7 @@ describe('Template and File Operations Tests', () => {
246
246
  });
247
247
 
248
248
  it('should handle file loading errors gracefully', async () => {
249
- model.gpt41()
249
+ model.gpt51()
250
250
  .replaceKeyFromFile('{{missing}}', path.join(fixturesPath, 'nonexistent.txt'))
251
251
  .addText('This should contain: {{missing}}');
252
252
 
@@ -271,7 +271,7 @@ describe('Template and File Operations Tests', () => {
271
271
  });
272
272
 
273
273
  it('should handle multiple file replacements', async () => {
274
- model.gpt41()
274
+ model.gpt51()
275
275
  .replaceKeyFromFile('{{template}}', path.join(fixturesPath, 'template.txt'))
276
276
  .replaceKeyFromFile('{{data}}', path.join(fixturesPath, 'data.json'))
277
277
  .replace({
@@ -315,7 +315,7 @@ describe('Template and File Operations Tests', () => {
315
315
  it('should handle relative and absolute paths', async () => {
316
316
  const absolutePath = path.resolve(fixturesPath, 'template.txt');
317
317
 
318
- model.gpt41()
318
+ model.gpt51()
319
319
  .replaceKeyFromFile('{{absolute}}', absolutePath)
320
320
  .replace({
321
321
  '{{name}}': 'Grace',
@@ -362,7 +362,7 @@ describe('Template and File Operations Tests', () => {
362
362
  });
363
363
 
364
364
  it('should combine file loading with template replacement in complex scenarios', async () => {
365
- model.gpt41()
365
+ model.gpt51()
366
366
  .replaceKeyFromFile('{{user_data}}', path.join(fixturesPath, 'data.json'))
367
367
  .replace({
368
368
  '{{action}}': 'analyze',
@@ -402,7 +402,7 @@ describe('Template and File Operations Tests', () => {
402
402
  roles: ['admin', 'user']
403
403
  };
404
404
 
405
- model.gpt41()
405
+ model.gpt51()
406
406
  .replaceKeyFromFile('{{data}}', path.join(fixturesPath, 'data.json'))
407
407
  .replace({ '{{instruction}}': 'Count active users by role' })
408
408
  .addText('{{instruction}} from this data: {{data}}');
@@ -447,16 +447,16 @@ describe('Template and File Operations Tests', () => {
447
447
 
448
448
  it('should handle template replacement errors gracefully', () => {
449
449
  expect(() => {
450
- model.gpt41().replace(null);
450
+ model.gpt51().replace(null);
451
451
  }).to.not.throw();
452
452
 
453
453
  expect(() => {
454
- model.gpt41().replace(undefined);
454
+ model.gpt51().replace(undefined);
455
455
  }).to.not.throw();
456
456
  });
457
457
 
458
458
  it('should handle file reading errors without crashing', async () => {
459
- model.gpt41()
459
+ model.gpt51()
460
460
  .replaceKeyFromFile('{{bad_file}}', '/path/that/does/not/exist.txt')
461
461
  .addText('Content: {{bad_file}}');
462
462
 
@@ -1,8 +1,23 @@
1
1
  import { expect } from 'chai';
2
2
  import { ModelMix } from '../index.js';
3
+ import { createRequire } from 'module';
4
+
5
+ const require = createRequire(import.meta.url);
6
+ const nock = require('nock');
3
7
 
4
8
  describe('Token Usage Tracking', () => {
5
9
 
10
+ // Ensure nock doesn't interfere with live requests via MockHttpSocket
11
+ before(function() {
12
+ nock.cleanAll();
13
+ nock.restore();
14
+ });
15
+
16
+ after(function() {
17
+ // Re-activate nock for any subsequent test suites
18
+ nock.activate();
19
+ });
20
+
6
21
  it('should track tokens in OpenAI response', async function () {
7
22
  this.timeout(30000);
8
23
 
@@ -30,7 +45,7 @@ describe('Token Usage Tracking', () => {
30
45
  this.timeout(30000);
31
46
 
32
47
  const model = ModelMix.new()
33
- .haiku35()
48
+ .haiku45()
34
49
  .addText('Say hi');
35
50
 
36
51
  const result = await model.raw();
@@ -49,7 +64,7 @@ describe('Token Usage Tracking', () => {
49
64
  this.timeout(30000);
50
65
 
51
66
  const model = ModelMix.new()
52
- .gemini25flash()
67
+ .gemini3flash()
53
68
  .addText('Say hi');
54
69
 
55
70
  const result = await model.raw();
@@ -110,8 +125,8 @@ describe('Token Usage Tracking', () => {
110
125
 
111
126
  const providers = [
112
127
  { name: 'OpenAI', create: (m) => m.gpt5nano() },
113
- { name: 'Anthropic', create: (m) => m.haiku35() },
114
- { name: 'Google', create: (m) => m.gemini25flash() }
128
+ { name: 'Anthropic', create: (m) => m.haiku45() },
129
+ { name: 'Google', create: (m) => m.gemini3flash() }
115
130
  ];
116
131
 
117
132
  for (const provider of providers) {