modelmix 4.2.4 → 4.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,239 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+ import ivm from 'isolated-vm';
4
+
5
+ console.log('🧬 ModelMix - IVM + mmix Callback Demo');
6
+
7
+ // ═══════════════════════════════════════════════════════════════════════════
8
+ // UTILITIES: Describe data for the model (without exposing raw content)
9
+ // ═══════════════════════════════════════════════════════════════════════════
10
+
11
+ function describeData(data, name = 'data') {
12
+ const type = Array.isArray(data) ? 'array' : typeof data;
13
+
14
+ if (Array.isArray(data)) {
15
+ const itemDescriptions = data.map((item, i) => {
16
+ if (typeof item === 'string') return `[${i}]: string, ${item.length} chars`;
17
+ if (typeof item === 'object') return `[${i}]: object with keys [${Object.keys(item).join(', ')}]`;
18
+ return `[${i}]: ${typeof item}`;
19
+ });
20
+ return `Variable '${name}' is an array with ${data.length} elements:\n ${itemDescriptions.join('\n ')}`;
21
+ }
22
+
23
+ if (typeof data === 'object' && data !== null) {
24
+ const keys = Object.keys(data);
25
+ return `Variable '${name}' is an object with keys: [${keys.join(', ')}]`;
26
+ }
27
+
28
+ if (typeof data === 'string') {
29
+ return `Variable '${name}' is a string with ${data.length} characters`;
30
+ }
31
+
32
+ return `Variable '${name}' is of type ${type}`;
33
+ }
34
+
35
+ // ═══════════════════════════════════════════════════════════════════════════
36
+ // IVM CONTEXT: Setup isolated environment with mmix callback
37
+ // ═══════════════════════════════════════════════════════════════════════════
38
+
39
+ async function createIvmContext(isolate, contextData, mmixInstance) {
40
+ const context = await isolate.createContext();
41
+ const jail = context.global;
42
+
43
+ // Expose context data as variables
44
+ for (const [key, value] of Object.entries(contextData)) {
45
+ await jail.set(key, new ivm.ExternalCopy(value).copyInto());
46
+ }
47
+
48
+ // Expose mmix as async callback
49
+ // The callback receives: system prompt, user message, and output schema (as JSON string)
50
+ await jail.set('__mmixCallback', new ivm.Reference(async (system, message, outputJson) => {
51
+ const output = JSON.parse(outputJson);
52
+ const result = await mmixInstance.new()
53
+ .gpt41nano()
54
+ .setSystem(system)
55
+ .addText(message)
56
+ .json(output, output);
57
+ return new ivm.ExternalCopy(JSON.stringify(result)).copyInto();
58
+ }));
59
+
60
+ // Create async wrapper for mmix inside the isolate
61
+ await context.eval(`
62
+ const mmix = {
63
+ async query(system, message, output) {
64
+ const outputJson = JSON.stringify(output);
65
+ const resultJson = await __mmixCallback.apply(undefined, [system, message, outputJson], { result: { promise: true } });
66
+ return JSON.parse(resultJson);
67
+ }
68
+ };
69
+ `);
70
+
71
+ return context;
72
+ }
73
+
74
+ // ═══════════════════════════════════════════════════════════════════════════
75
+ // MAIN: Execute model-generated code in IVM with mmix access
76
+ // ═══════════════════════════════════════════════════════════════════════════
77
+
78
+ async function runIvmWithMmix({ task, contextData, model, maxChunkSize = 1500 }) {
79
+ const isolate = new ivm.Isolate({ memoryLimit: 128 });
80
+
81
+ try {
82
+ // Build data description for the model
83
+ const dataDescriptions = Object.entries(contextData)
84
+ .map(([key, value]) => describeData(value, key))
85
+ .join('\n');
86
+
87
+ // System prompt explaining the environment
88
+ const systemPrompt = `You are a code generator. You have access to an isolated JavaScript environment with:
89
+
90
+ AVAILABLE DATA:
91
+ ${dataDescriptions}
92
+
93
+ AVAILABLE API:
94
+ - mmix.query(system, message, outputSchema): async function that calls an LLM. Returns a JSON object.
95
+ - system: the system prompt for the LLM
96
+ - message: the user message/query
97
+ - outputSchema: object defining expected output structure (keys are variable names, values are descriptions)
98
+
99
+ EXAMPLES:
100
+ // Single query - translation
101
+ const result = await mmix.query(
102
+ 'You are a professional translator',
103
+ 'Translate to Spanish: Hello world',
104
+ { translation: 'text translated to spanish' }
105
+ );
106
+ // Returns: { translation: 'Hola mundo' }
107
+
108
+ // Single query - summary with metadata
109
+ const summary = await mmix.query(
110
+ 'You are a summarizer',
111
+ 'Summarize: ' + text,
112
+ { summary: 'brief summary of the text', wordCount: 'number of words in summary' }
113
+ );
114
+ // Returns: { summary: '...', wordCount: 42 }
115
+
116
+ // Parallel queries (recommended for multiple independent operations)
117
+ const results = await Promise.all([
118
+ mmix.query('Translator', 'Translate to Spanish: ' + paragraphs[0], { translation: 'text translated to spanish' }),
119
+ mmix.query('Translator', 'Translate to Spanish: ' + paragraphs[1], { translation: 'text translated to spanish' })
120
+ ]);
121
+
122
+ // Combine results
123
+ return results.map(r => r.translation).join('\\n\\n');
124
+
125
+ PARALLEL PROCESSING RULES:
126
+ - When processing large texts in parallel, split into chunks of maximum ${maxChunkSize} characters
127
+ - Use Promise.all() to process all chunks simultaneously
128
+ - Example splitting a text:
129
+ const chunkSize = ${maxChunkSize};
130
+ const chunks = [];
131
+ for (let i = 0; i < input.length; i += chunkSize) {
132
+ chunks.push(input.substring(i, i + chunkSize));
133
+ }
134
+ const results = await Promise.all(
135
+ chunks.map(chunk => mmix.query('System prompt', chunk, outputSchema))
136
+ );
137
+ return results.map(r => r.translation).join('');
138
+
139
+ IMPORTANT:
140
+ - Write an async IIFE that returns the final result directly (string, array, or simple object)
141
+ - mmix.query() returns JSON objects, extract the fields you need (e.g., result.translation)
142
+ - Use Promise.all for parallel operations when possible
143
+ - When splitting text, prefer to split at natural boundaries (paragraphs, sentences) when near the ${maxChunkSize} limit
144
+ - Return ONLY the code, no explanations or markdown`;
145
+
146
+ model.setSystem(systemPrompt);
147
+
148
+ // Request code generation
149
+ const code = await model
150
+ .addText(`Task: ${task}\n\nGenerate the JavaScript code. Return ONLY the async IIFE code, nothing else.`)
151
+ .message();
152
+
153
+ console.log('\nšŸ“„ Generated code:');
154
+ console.log('─'.repeat(60));
155
+ console.log(code);
156
+ console.log('─'.repeat(60));
157
+
158
+ // Create IVM context with data and mmix callback
159
+ const context = await createIvmContext(isolate, contextData, model);
160
+
161
+ // Execute the generated code (wrap in JSON.stringify for safe transfer)
162
+ console.log('\n⚔ Executing in IVM...');
163
+ const wrappedCode = `(async () => {
164
+ const __result = await ${code};
165
+ return JSON.stringify(__result);
166
+ })()`;
167
+
168
+ const resultJson = await context.eval(wrappedCode, {
169
+ timeout: 60000, // 60s timeout for LLM calls
170
+ promise: true
171
+ });
172
+
173
+ // Parse result (handle both primitives and objects)
174
+ try {
175
+ return JSON.parse(resultJson);
176
+ } catch {
177
+ return resultJson;
178
+ }
179
+
180
+ } finally {
181
+ isolate.dispose();
182
+ }
183
+ }
184
+
185
+ // ═══════════════════════════════════════════════════════════════════════════
186
+ // DEMO: Summarize paragraphs example
187
+ // ═══════════════════════════════════════════════════════════════════════════
188
+
189
+ async function demo() {
190
+ console.log('\n=== Demo: Summarize Paragraphs via IVM + mmix ===\n');
191
+
192
+ // Sample data: 3 paragraphs (the model won't see the content, only metadata)
193
+ const paragraphs = [
194
+ `Artificial intelligence has transformed numerous industries over the past decade. From healthcare diagnostics to autonomous vehicles, AI systems now perform tasks that were once thought to require human intelligence. Machine learning algorithms can analyze vast amounts of data, identify patterns, and make predictions with remarkable accuracy.`,
195
+
196
+ `Climate change represents one of the most pressing challenges facing humanity today. Rising global temperatures are causing more frequent extreme weather events, melting polar ice caps, and threatening biodiversity across the planet. Scientists warn that immediate action is needed to reduce greenhouse gas emissions and transition to renewable energy sources.`,
197
+
198
+ `The evolution of remote work has fundamentally changed how businesses operate. Companies have discovered that distributed teams can be highly productive while offering employees better work-life balance. However, this shift also presents challenges in maintaining company culture, ensuring effective communication, and managing across different time zones.`,
199
+
200
+ `Artificial intelligence has transformed numerous industries over the past decade. From healthcare diagnostics to autonomous vehicles, AI systems now perform tasks that were once thought to require human intelligence. Machine learning algorithms can analyze vast amounts of data, identify patterns, and make predictions with remarkable accuracy.`,
201
+
202
+ `Climate change represents one of the most pressing challenges facing humanity today. Rising global temperatures are causing more frequent extreme weather events, melting polar ice caps, and threatening biodiversity across the planet. Scientists warn that immediate action is needed to reduce greenhouse gas emissions and transition to renewable energy sources.`,
203
+
204
+ `The evolution of remote work has fundamentally changed how businesses operate. Companies have discovered that distributed teams can be highly productive while offering employees better work-life balance. However, this shift also presents challenges in maintaining company culture, ensuring effective communication, and managing across different time zones.`
205
+
206
+
207
+ ];
208
+
209
+ // Create base mmix instance for the callbacks
210
+ const model = ModelMix.new({ config: { debug: 2, bottleneck: {} } })
211
+ .gpt52({ options: { reasoning_effort: 'none', verbosity: null } })
212
+ .gpt41nano()
213
+ .gemini3flash();
214
+
215
+ // Run the IVM task
216
+ const result = await runIvmWithMmix({
217
+ task: 'Translate each paragraph to latin spanish, then return all joined by double newlines.',
218
+ contextData: { paragraphs },
219
+ model
220
+ });
221
+
222
+ console.log('\nāœ… Final result from IVM:');
223
+ console.log('═'.repeat(60));
224
+ console.log(result);
225
+ console.log('═'.repeat(60));
226
+
227
+ return result;
228
+ }
229
+
230
+ // ═══════════════════════════════════════════════════════════════════════════
231
+ // RUN
232
+ // ═══════════════════════════════════════════════════════════════════════════
233
+
234
+ try {
235
+ await demo();
236
+ console.log('\nāœ… Demo completed successfully');
237
+ } catch (error) {
238
+ console.error('āŒ Error:', error);
239
+ }
@@ -0,0 +1,271 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+ import ivm from 'isolated-vm';
4
+
5
+ console.log('🧬 ModelMix - RLM (Recursive Language Model) Demo');
6
+ console.log('šŸ“„ Basado en: https://arxiv.org/html/2512.24601v1\n');
7
+
8
+ // Simulamos un documento largo que serĆ­a demasiado grande para pasar directamente
9
+ const LONG_DOCUMENT = `
10
+ USERS DATABASE - System Report 2025
11
+ ====================================
12
+
13
+ USER_001: Alice Johnson
14
+ Role: Senior Developer
15
+ Department: Engineering
16
+ Location: San Francisco, CA
17
+ Skills: JavaScript, Python, React, Node.js
18
+ Projects: 15 completed, 3 in progress
19
+ Performance Score: 9.2/10
20
+ Last Active: 2025-01-09
21
+ Notes: Excellent team leader, mentors junior developers
22
+
23
+ USER_002: Bob Smith
24
+ Role: Data Scientist
25
+ Department: Analytics
26
+ Location: New York, NY
27
+ Skills: Python, R, TensorFlow, SQL
28
+ Projects: 8 completed, 2 in progress
29
+ Performance Score: 8.7/10
30
+ Last Active: 2025-01-08
31
+ Notes: Strong statistical background, innovative approaches
32
+
33
+ USER_003: Carol White
34
+ Role: Product Manager
35
+ Department: Product
36
+ Location: Austin, TX
37
+ Skills: Agile, Jira, User Research, SQL
38
+ Projects: 22 completed, 5 in progress
39
+ Performance Score: 9.5/10
40
+ Last Active: 2025-01-09
41
+ Notes: Exceptional stakeholder management
42
+
43
+ USER_004: David Chen
44
+ Role: Junior Developer
45
+ Department: Engineering
46
+ Location: San Francisco, CA
47
+ Skills: JavaScript, HTML, CSS, Git
48
+ Projects: 3 completed, 1 in progress
49
+ Performance Score: 7.8/10
50
+ Last Active: 2025-01-07
51
+ Notes: Fast learner, needs more experience with backend
52
+
53
+ USER_005: Emma Davis
54
+ Role: Senior Data Scientist
55
+ Department: Analytics
56
+ Location: Boston, MA
57
+ Skills: Python, Machine Learning, PyTorch, AWS
58
+ Projects: 12 completed, 4 in progress
59
+ Performance Score: 9.0/10
60
+ Last Active: 2025-01-09
61
+ Notes: Published 5 research papers, conference speaker
62
+
63
+ USER_006: Frank Martinez
64
+ Role: DevOps Engineer
65
+ Department: Engineering
66
+ Location: Seattle, WA
67
+ Skills: Docker, Kubernetes, AWS, Terraform
68
+ Projects: 18 completed, 2 in progress
69
+ Performance Score: 8.9/10
70
+ Last Active: 2025-01-09
71
+ Notes: Reduced deployment time by 60%
72
+
73
+ USER_007: Grace Lee
74
+ Role: UX Designer
75
+ Department: Design
76
+ Location: Los Angeles, CA
77
+ Skills: Figma, User Research, Prototyping, CSS
78
+ Projects: 25 completed, 6 in progress
79
+ Performance Score: 9.3/10
80
+ Last Active: 2025-01-08
81
+ Notes: Award-winning designer, excellent user empathy
82
+
83
+ USER_008: Henry Taylor
84
+ Role: Junior Data Analyst
85
+ Department: Analytics
86
+ Location: Chicago, IL
87
+ Skills: SQL, Excel, Tableau, Python
88
+ Projects: 5 completed, 2 in progress
89
+ Performance Score: 7.5/10
90
+ Last Active: 2025-01-06
91
+ Notes: Good attention to detail, improving Python skills
92
+ `;
93
+
94
+ // Crear isolate para el REPL
95
+ const isolate = new ivm.Isolate({ memoryLimit: 256 });
96
+
97
+ // Contador de llamadas recursivas (para demostrar la recursión)
98
+ let recursionDepth = 0;
99
+ const maxDepth = 3;
100
+
101
+ async function rlmExample() {
102
+ console.log('=== RLM: AnƔlisis de Documento Largo ===\n');
103
+ console.log(`šŸ“Š Documento: ${LONG_DOCUMENT.length} caracteres`);
104
+ console.log(`šŸŽÆ Tarea: Encontrar usuarios de Engineering con score > 8.5\n`);
105
+
106
+ const gptArgs = { options: { reasoning_effort: "none", verbosity: null } };
107
+ const mmix = ModelMix.new({ config: { debug: false, max_history: 15 } })
108
+ .gpt41nano()
109
+ .gpt52(gptArgs)
110
+ .gemini3flash()
111
+ .setSystem(`You are an RLM (Recursive Language Model) agent.
112
+
113
+ KEY PRINCIPLE: Instead of processing the entire document directly, you can:
114
+ 1. Inspect the document structure using code
115
+ 2. Break it into smaller chunks programmatically
116
+ 3. Process each chunk recursively if needed
117
+
118
+ You have access to:
119
+ - inspect_document: Examine parts of the document via JavaScript. The DOCUMENT variable is available. Write code that returns a value (e.g., "return DOCUMENT.length" or just "DOCUMENT.length").
120
+ - recursive_call: Make a recursive call to yourself with a focused sub-task
121
+
122
+ Current recursion depth: ${recursionDepth}/${maxDepth}`);
123
+
124
+ // Variables compartidas
125
+ let inspectionResults = [];
126
+
127
+ // Tool 1: Inspeccionar el documento programƔticamente
128
+ mmix.addTool({
129
+ name: "inspect_document",
130
+ description: "Execute JavaScript code to inspect and analyze the document. The document is available as 'DOCUMENT' variable. You can use string methods, regex, parsing, etc. The code should return a value (not just log it).",
131
+ inputSchema: {
132
+ type: "object",
133
+ properties: {
134
+ code: {
135
+ type: "string",
136
+ description: "JavaScript code to execute. The DOCUMENT variable contains the full text. Your code should return a value. Example: 'return DOCUMENT.split(\"\\n\").length' or just 'DOCUMENT.length'"
137
+ },
138
+ explanation: {
139
+ type: "string",
140
+ description: "Brief explanation of what this code does"
141
+ }
142
+ },
143
+ required: ["code", "explanation"]
144
+ }
145
+ }, async ({ code, explanation }) => {
146
+ console.log(`\nšŸ” [Depth ${recursionDepth}] Inspecting document: ${explanation}`);
147
+ console.log('─'.repeat(60));
148
+ console.log(code);
149
+ console.log('─'.repeat(60));
150
+
151
+ try {
152
+ const context = await isolate.createContext();
153
+
154
+ // Inyectar el documento en el contexto como variable global
155
+ const jail = context.global;
156
+ await jail.set('DOCUMENT', LONG_DOCUMENT);
157
+
158
+ // Ejecutar el código y convertir el resultado a JSON dentro del contexto
159
+ const wrappedCode = `
160
+ (function() {
161
+ const result = (function() {
162
+ ${code}
163
+ })();
164
+ return JSON.stringify(result);
165
+ })()
166
+ `;
167
+
168
+ const script = await isolate.compileScript(wrappedCode);
169
+ const jsonResult = await script.run(context, { timeout: 10000 });
170
+
171
+ // Parsear el resultado JSON
172
+ const parsedResult = JSON.parse(jsonResult);
173
+
174
+ inspectionResults.push({ explanation, result: parsedResult });
175
+
176
+ console.log('āœ… Resultado:', JSON.stringify(parsedResult, null, 2));
177
+ return JSON.stringify(parsedResult);
178
+ } catch (error) {
179
+ console.log('āŒ Error:', error.message);
180
+ return `Error: ${error.message}`;
181
+ }
182
+ });
183
+
184
+ // Tool 2: Llamada recursiva (simplificada para el ejemplo)
185
+ mmix.addTool({
186
+ name: "recursive_call",
187
+ description: "Make a recursive call to the RLM with a focused sub-task and optional document chunk. Use this to decompose complex queries into simpler ones.",
188
+ inputSchema: {
189
+ type: "object",
190
+ properties: {
191
+ sub_task: {
192
+ type: "string",
193
+ description: "The focused sub-task to solve recursively"
194
+ },
195
+ document_chunk: {
196
+ type: "string",
197
+ description: "Optional: A smaller chunk of the document to focus on"
198
+ }
199
+ },
200
+ required: ["sub_task"]
201
+ }
202
+ }, async ({ sub_task, document_chunk }) => {
203
+ recursionDepth++;
204
+
205
+ if (recursionDepth > maxDepth) {
206
+ recursionDepth--;
207
+ return `Maximum recursion depth reached (${maxDepth}). Please solve this sub-task directly.`;
208
+ }
209
+
210
+ console.log(`\nšŸ”„ [Depth ${recursionDepth}] Recursive call:`);
211
+ console.log(`šŸ“ Sub-task: ${sub_task}`);
212
+ if (document_chunk) {
213
+ console.log(`šŸ“„ Chunk size: ${document_chunk.length} chars`);
214
+ }
215
+ console.log('─'.repeat(60));
216
+
217
+ // Crear una nueva instancia para la llamada recursiva
218
+ const recursiveMmix = ModelMix.new({ config: { debug: false } })
219
+ .gpt41nano()
220
+ .setSystem(`You are processing a sub-task. Be concise and direct.
221
+ Recursion depth: ${recursionDepth}/${maxDepth}
222
+ ${document_chunk ? 'Document chunk provided.' : 'No document chunk provided.'}`);
223
+
224
+ if (document_chunk) {
225
+ recursiveMmix.addText(`Document chunk:\n${document_chunk}\n\nTask: ${sub_task}`);
226
+ } else {
227
+ recursiveMmix.addText(sub_task);
228
+ }
229
+
230
+ const result = await recursiveMmix.message();
231
+
232
+ recursionDepth--;
233
+
234
+ console.log(`āœ… [Depth ${recursionDepth + 1}] Result: ${result.substring(0, 100)}...`);
235
+ return result;
236
+ });
237
+
238
+ // La tarea principal
239
+ mmix.addText(`
240
+ Using the RLM approach, find all users in the Engineering department with a Performance Score greater than 8.5.
241
+
242
+ APPROACH:
243
+ 1. First, use inspect_document to understand the document structure
244
+ 2. Use inspect_document to extract all user entries
245
+ 3. Use inspect_document to filter users by department and score
246
+ 4. Optionally use recursive_call if you need to process sub-tasks
247
+
248
+ Finally, provide a clear summary with the user names and their scores.
249
+ `);
250
+
251
+ const result = await mmix.message();
252
+
253
+ console.log('\n' + '='.repeat(60));
254
+ console.log('šŸ’¬ FINAL ANSWER:');
255
+ console.log('='.repeat(60));
256
+ console.log(result);
257
+
258
+ console.log('\nšŸ“Š INSPECTION SUMMARY:');
259
+ console.log(`Total inspections: ${inspectionResults.length}`);
260
+ inspectionResults.forEach((r, i) => {
261
+ console.log(`${i + 1}. ${r.explanation}`);
262
+ });
263
+ }
264
+
265
+ try {
266
+ await rlmExample();
267
+ console.log('\nāœ… RLM Ejemplo completado');
268
+ } catch (error) {
269
+ console.error('āŒ Error:', error);
270
+ console.error(error.stack);
271
+ }
@@ -0,0 +1,26 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+
4
+ console.log('\n=== Round Robin Simple Demo ===\n');
5
+
6
+ // Create instance with round robin enabled
7
+ const ai = ModelMix.new({
8
+ config: {
9
+ debug: 2, // Show which model is being used
10
+ roundRobin: true
11
+ },
12
+ mix: { openrouter: false } // Exclude OpenRouter (free tier often rate-limited)
13
+ })
14
+ .gptOss();
15
+
16
+ console.log('Making 6 requests with round robin enabled...\n');
17
+
18
+ // Make 6 requests to see rotation through all models (cerebras + groq)
19
+ for (let i = 1; i <= 6; i++) {
20
+ const result = await ai.new()
21
+ .addText(`Calculate ${i} * 2`)
22
+ .message();
23
+ console.log(` Result: ${result.trim()}\n`);
24
+ }
25
+
26
+ console.log('āœ“ Demo completed!\n');
@@ -0,0 +1,103 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix, MixOpenAI } from '../index.js';
3
+
4
+ const prompt = "Say 'Hello World' in exactly 2 words.";
5
+
6
+ console.log('═══════════════════════════════════════════════════════════════');
7
+ console.log('DEMO: Verbose Modes in ModelMix');
8
+ console.log('═══════════════════════════════════════════════════════════════\n');
9
+
10
+
11
+ // ===================================================================
12
+ // VERBOSE LEVEL 0 - Silent Mode
13
+ // ===================================================================
14
+ console.log('─────────────────────────────────────────────────────────────');
15
+ console.log('1. VERBOSE LEVEL 0 - Silent Mode');
16
+ console.log(' No output at all, only the result');
17
+ console.log('─────────────────────────────────────────────────────────────\n');
18
+
19
+ await ModelMix
20
+ .new({ config: { verbose: 0 } })
21
+ .gpt41nano()
22
+ .addText(prompt)
23
+ .message();
24
+
25
+
26
+ // ===================================================================
27
+ // VERBOSE LEVEL 1 - Minimal Mode
28
+ // ===================================================================
29
+ console.log('─────────────────────────────────────────────────────────────');
30
+ console.log('2. VERBOSE LEVEL 1 - Minimal Mode');
31
+ console.log(' Shows: → [model] #N and āœ“ Success');
32
+ console.log('─────────────────────────────────────────────────────────────\n');
33
+
34
+ await ModelMix
35
+ .new({ config: { verbose: 1 } })
36
+ .gpt41nano()
37
+ .addText(prompt)
38
+ .message();
39
+
40
+ // ===================================================================
41
+ // VERBOSE LEVEL 2 - Readable Summary (DEFAULT)
42
+ // ===================================================================
43
+ console.log('─────────────────────────────────────────────────────────────');
44
+ console.log('3. VERBOSE LEVEL 2 - Readable Summary (DEFAULT)');
45
+ console.log(' Shows: model, system prompt, input, message count, output');
46
+ console.log(' Everything in compact format on 2 lines');
47
+ console.log('─────────────────────────────────────────────────────────────\n');
48
+
49
+ await ModelMix
50
+ .new({ config: { verbose: 2 } })
51
+ .gpt41nano()
52
+ .addText(prompt)
53
+ .json({ message: 'string' });
54
+
55
+ // ===================================================================
56
+ // VERBOSE LEVEL 3 - Full Debug
57
+ // ===================================================================
58
+ console.log('─────────────────────────────────────────────────────────────');
59
+ console.log('4. VERBOSE LEVEL 3 - Full Debug Mode');
60
+ console.log(' Shows: everything from level 2 + raw response, full message,');
61
+ console.log(' request details, config, and options');
62
+ console.log('─────────────────────────────────────────────────────────────\n');
63
+
64
+ await ModelMix
65
+ .new({ config: { verbose: 3 } })
66
+ .gpt41nano()
67
+ .addText(prompt)
68
+ .message();
69
+
70
+ // ===================================================================
71
+ // FALLBACK EXAMPLE (with verbose 2)
72
+ // ===================================================================
73
+ console.log('─────────────────────────────────────────────────────────────');
74
+ console.log('5. FALLBACK EXAMPLE (Verbose Level 2)');
75
+ console.log(' Shows how fallback models are displayed');
76
+ console.log('─────────────────────────────────────────────────────────────\n');
77
+
78
+ try {
79
+ const resultFallback = await ModelMix
80
+ .new({ config: { verbose: 2 } })
81
+ .attach('fake-model-that-will-fail', new MixOpenAI())
82
+ .gpt41nano() // This will be the fallback
83
+ .addText(prompt)
84
+ .message();
85
+
86
+ console.log(`Result: ${resultFallback}\n`);
87
+ } catch (error) {
88
+ console.log(`Error (should not happen): ${error.message}\n`);
89
+ }
90
+
91
+
92
+ // ===================================================================
93
+ // SUMMARY
94
+ // ===================================================================
95
+ console.log('═══════════════════════════════════════════════════════════════');
96
+ console.log('DEMO COMPLETED');
97
+ console.log('═══════════════════════════════════════════════════════════════\n');
98
+ console.log('Summary:');
99
+ console.log(' - Level 0: Silent, no logs');
100
+ console.log(' - Level 1: Minimal (→ model, āœ“ Success)');
101
+ console.log(' - Level 2: Readable (1 line input + 1 line output) [DEFAULT]');
102
+ console.log(' - Level 3: Full debug (includes raw responses and configs)');
103
+ console.log('');