modelmix 4.3.6 → 4.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,11 +18,9 @@ Ever found yourself wanting to integrate AI models into your projects but worrie
18
18
  ## 🛠️ Usage
19
19
 
20
20
  1. **Install the ModelMix package:**
21
- Recommended: install dotenv to manage environment variables
22
21
  ```bash
23
- npm install modelmix dotenv
22
+ npm install modelmix
24
23
  ```
25
-
26
24
  > **AI Skill**: You can also add ModelMix as a skill for AI agentic development:
27
25
  > ```bash
28
26
  > npx skills add https://github.com/clasen/ModelMix --skill modelmix
@@ -39,6 +37,8 @@ MINIMAX_API_KEY="your-minimax-key..."
39
37
  GEMINI_API_KEY="AIza..."
40
38
  ```
41
39
 
40
+ For environment variables, use `dotenv` or Node's built-in `process.loadEnvFile()`.
41
+
42
42
  3. **Create and configure your models**:
43
43
 
44
44
  ```javascript
@@ -400,12 +400,12 @@ The `cost` field is the estimated cost in USD based on the model's pricing per 1
400
400
 
401
401
  To activate debug mode in ModelMix and view detailed request information, follow these two steps:
402
402
 
403
- 1. In the ModelMix constructor, include `debug: 3` in the configuration:
403
+ 1. In the ModelMix constructor, include a `debug` level in the configuration:
404
404
 
405
405
  ```javascript
406
406
  const mix = ModelMix.new({
407
407
  config: {
408
- debug: 3
408
+ debug: 4 // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose (raw details)
409
409
  // ... other configuration options ...
410
410
  }
411
411
  });
package/demo/gpt51.js CHANGED
@@ -3,7 +3,7 @@ import { ModelMix } from '../index.js';
3
3
 
4
4
  const mmix = new ModelMix({
5
5
  config: {
6
- debug: 2,
6
+ debug: 3,
7
7
  }
8
8
  });
9
9
 
package/index.js CHANGED
@@ -110,7 +110,7 @@ class ModelMix {
110
110
  this.config = {
111
111
  system: 'You are an assistant.',
112
112
  max_history: 1, // Default max history
113
- debug: 0, // 0=silent, 1=minimal, 2=readable summary, 3=full details
113
+ debug: 0, // 0=silent, 1=minimal, 2=readable summary, 3=full (no truncate), 4=verbose (raw details)
114
114
  bottleneck: defaultBottleneckConfig,
115
115
  roundRobin: false, // false=fallback mode, true=round robin rotation
116
116
  ...config
@@ -170,7 +170,7 @@ class ModelMix {
170
170
  return (tokens.input * inputPerMillion / 1_000_000) + (tokens.output * outputPerMillion / 1_000_000);
171
171
  }
172
172
 
173
- static formatInputSummary(messages, system) {
173
+ static formatInputSummary(messages, system, debug = 2) {
174
174
  const lastMessage = messages[messages.length - 1];
175
175
  let inputText = '';
176
176
 
@@ -181,38 +181,39 @@ class ModelMix {
181
181
  inputText = lastMessage.content;
182
182
  }
183
183
 
184
- const systemStr = `System: ${ModelMix.truncate(system, 500)}`;
185
- const inputStr = `Input: ${ModelMix.truncate(inputText, 1200)}`;
184
+ const noTruncate = debug >= 3;
185
+ const systemStr = noTruncate ? (system || '') : ModelMix.truncate(system, 500);
186
+ const inputStr = noTruncate ? inputText : ModelMix.truncate(inputText, 1200);
186
187
  const msgCount = `(${messages.length} msg${messages.length !== 1 ? 's' : ''})`;
187
188
 
188
- return `${systemStr} \n| ${inputStr} ${msgCount}`;
189
+ return `| SYSTEM\n${systemStr}\n| INPUT ${msgCount}\n${inputStr}`;
189
190
  }
190
191
 
191
192
  static formatOutputSummary(result, debug) {
192
193
  const parts = [];
194
+ const noTruncate = debug >= 3;
193
195
  if (result.message) {
194
196
  // Try to parse as JSON for better formatting
195
197
  try {
196
198
  const parsed = JSON.parse(result.message.trim());
197
199
  // If it's valid JSON and debug >= 2, show it formatted
198
200
  if (debug >= 2) {
199
- parts.push(`Output (JSON):\n${ModelMix.formatJSON(parsed)}`);
201
+ parts.push(`| OUTPUT (JSON)\n${ModelMix.formatJSON(parsed)}`);
200
202
  } else {
201
- parts.push(`Output: ${ModelMix.truncate(result.message, 1500)}`);
203
+ parts.push(`| OUTPUT\n${ModelMix.truncate(result.message, 1500)}`);
202
204
  }
203
205
  } catch (e) {
204
- // Not JSON, show truncated as before
205
- parts.push(`Output: ${ModelMix.truncate(result.message, 1500)}`);
206
+ parts.push(`| OUTPUT\n${noTruncate ? result.message : ModelMix.truncate(result.message, 1500)}`);
206
207
  }
207
208
  }
208
209
  if (result.think) {
209
- parts.push(`Think: ${ModelMix.truncate(result.think, 800)}`);
210
+ parts.push(`| THINK\n${noTruncate ? result.think : ModelMix.truncate(result.think, 800)}`);
210
211
  }
211
212
  if (result.toolCalls && result.toolCalls.length > 0) {
212
213
  const toolNames = result.toolCalls.map(t => t.function?.name || t.name).join(', ');
213
- parts.push(`Tools: ${toolNames}`);
214
+ parts.push(`| TOOLS\n${toolNames}`);
214
215
  }
215
- return parts.join(' | ');
216
+ return parts.join('\n');
216
217
  }
217
218
 
218
219
  attach(key, provider) {
@@ -842,7 +843,7 @@ class ModelMix {
842
843
  const header = `\n${prefix} [${providerName}:${currentModelKey}] #${originalIndex + 1}${suffix}`;
843
844
 
844
845
  if (currentConfig.debug >= 2) {
845
- console.log(`${header} | ${ModelMix.formatInputSummary(this.messages, currentConfig.system)}`);
846
+ console.log(`${header}\n${ModelMix.formatInputSummary(this.messages, currentConfig.system, currentConfig.debug)}`);
846
847
  } else {
847
848
  console.log(header);
848
849
  }
@@ -897,11 +898,14 @@ class ModelMix {
897
898
 
898
899
  // debug level 2: Readable summary of output
899
900
  if (currentConfig.debug >= 2) {
900
- console.log(`✓ ${ModelMix.formatOutputSummary(result, currentConfig.debug).trim()}`);
901
+ const tokenInfo = result.tokens
902
+ ? ` ${result.tokens.input}→${result.tokens.output} tok` + (result.tokens.cost != null ? ` $${result.tokens.cost.toFixed(4)}` : '')
903
+ : '';
904
+ console.log(`✓${tokenInfo}\n${ModelMix.formatOutputSummary(result, currentConfig.debug).trim()}`);
901
905
  }
902
906
 
903
- // debug level 3 (debug): Full response details
904
- if (currentConfig.debug >= 3) {
907
+ // debug level 4 (verbose): Full response details
908
+ if (currentConfig.debug >= 4) {
905
909
  if (result.response) {
906
910
  console.log('\n[RAW RESPONSE]');
907
911
  console.log(ModelMix.formatJSON(result.response));
@@ -1148,8 +1152,8 @@ class MixCustom {
1148
1152
 
1149
1153
  options.messages = this.convertMessages(options.messages, config);
1150
1154
 
1151
- // debug level 3 (debug): Full request details
1152
- if (config.debug >= 3) {
1155
+ // debug level 4 (verbose): Full request details
1156
+ if (config.debug >= 4) {
1153
1157
  console.log('\n[REQUEST DETAILS]');
1154
1158
 
1155
1159
  console.log('\n[CONFIG]');
@@ -2032,8 +2036,8 @@ class MixGoogle extends MixCustom {
2032
2036
  };
2033
2037
 
2034
2038
  try {
2035
- // debug level 3 (debug): Full request details
2036
- if (config.debug >= 3) {
2039
+ // debug level 4 (verbose): Full request details
2040
+ if (config.debug >= 4) {
2037
2041
  console.log('\n[REQUEST DETAILS - GOOGLE]');
2038
2042
 
2039
2043
  console.log('\n[CONFIG]');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "modelmix",
3
- "version": "4.3.6",
3
+ "version": "4.4.0",
4
4
  "description": "🧬 Reliable interface with automatic fallback for AI LLMs.",
5
5
  "main": "index.js",
6
6
  "repository": {
@@ -20,6 +20,23 @@ Do NOT use this skill for:
20
20
  - Python or non-Node.js projects
21
21
  - Direct HTTP calls to LLM APIs (use ModelMix instead)
22
22
 
23
+ ## Common Tasks
24
+
25
+ - [Get a text response](#get-a-text-response)
26
+ - [Get structured JSON](#get-structured-json)
27
+ - [Stream a response](#stream-a-response)
28
+ - [Get raw response (tokens, thinking, tool calls)](#get-raw-response-tokens-thinking-tool-calls)
29
+ - [Access full response after `message()` or `json()` with `lastRaw`](#access-full-response-after-message-or-json-with-lastraw)
30
+ - [Add images](#add-images)
31
+ - [Use templates with placeholders](#use-templates-with-placeholders)
32
+ - [Round-robin load balancing](#round-robin-load-balancing)
33
+ - [MCP integration (external tools)](#mcp-integration-external-tools)
34
+ - [Custom local tools (addTool)](#custom-local-tools-addtool)
35
+ - [Rate limiting (Bottleneck)](#rate-limiting-bottleneck)
36
+ - [Debug mode](#debug-mode)
37
+ - [Use free-tier models](#use-free-tier-models)
38
+ - [Conversation history](#conversation-history)
39
+
23
40
  ## Installation
24
41
 
25
42
  ```bash
@@ -46,7 +63,7 @@ const model = ModelMix.new({
46
63
  config: {
47
64
  system: "You are a helpful assistant.",
48
65
  max_history: 5,
49
- debug: 0, // 0=silent, 1=minimal, 2=summary, 3=full
66
+ debug: 0, // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose
50
67
  roundRobin: false // false=fallback, true=rotate models
51
68
  }
52
69
  });
@@ -229,7 +246,7 @@ const model = ModelMix.new({
229
246
 
230
247
  ```javascript
231
248
  const model = ModelMix.new({
232
- config: { debug: 2 } // 0=silent, 1=minimal, 2=summary, 3=full
249
+ config: { debug: 2 } // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose
233
250
  }).gpt5mini();
234
251
  ```
235
252