modelmix 4.2.4 → 4.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,479 @@
1
+ /**
2
+ * RECURSIVE LANGUAGE MODELS (RLM) - Generic Parallel Strategy
3
+ *
4
+ * Based on: https://arxiv.org/html/2512.24601v1
5
+ *
6
+ * This implements the RLM paradigm where:
7
+ * - Long prompts/data are treated as VARIABLES in an environment (not fed to the model)
8
+ * - The model programmatically inspects and decomposes data
9
+ * - The model CONSTRUCTS its own prompts for parallel ModelMix calls
10
+ * - Recursive refinement allows iterative improvement based on partial results
11
+ *
12
+ * ENVIRONMENT:
13
+ * - Variables can contain any data: text, structured data, arrays, etc.
14
+ * - The model uses tools to inspect and manipulate these variables
15
+ * - The model decides what prompts to create for parallel processing
16
+ *
17
+ * TOOLS:
18
+ * - inspect_variables: See what's available in the environment
19
+ * - execute_code: Run JavaScript to analyze/transform data (returns result)
20
+ * - parallel_modelmix: Execute parallel ModelMix calls with custom prompts
21
+ * - recursive_call: Make recursive calls with updated context
22
+ *
23
+ * This is GENERIC - works with any data structure, not hardcoded for specific use cases.
24
+ */
25
+
26
+ process.loadEnvFile();
27
+ import { ModelMix } from '../index.js';
28
+
29
+ console.log('🧬 ModelMix - RLM (Recursive Language Models) Demo');
30
+ console.log('šŸŽÆ Generic parallel strategy with environment variables\n');
31
+
32
+ // Example 1: Book with chapters (variable in environment)
33
+ const BOOK_DATA = {
34
+ title: "The Future of Artificial Intelligence",
35
+ type: "book",
36
+ chapters: [
37
+ {
38
+ id: 1,
39
+ title: "Introduction to AI",
40
+ content: `Artificial Intelligence represents one of the most transformative technologies of our time.
41
+ From its inception in the 1950s at Dartmouth College to today's large language models, AI has evolved
42
+ dramatically. Early symbolic AI systems could play chess and prove mathematical theorems, but struggled
43
+ with perception and common sense reasoning. The field went through multiple "AI winters" where progress
44
+ stalled and funding dried up. However, the advent of machine learning, particularly deep learning in the
45
+ 2010s, revolutionized the field. Neural networks with millions or billions of parameters can now understand
46
+ natural language, generate images, and even write code. This chapter explores the historical context and
47
+ foundational concepts that make modern AI possible.`
48
+ },
49
+ {
50
+ id: 2,
51
+ title: "Machine Learning Fundamentals",
52
+ content: `Machine learning is the backbone of modern AI systems. Unlike traditional programming where
53
+ rules are explicitly coded, machine learning algorithms learn patterns from data. Supervised learning uses
54
+ labeled examples to train models that can make predictions on new data. Unsupervised learning finds hidden
55
+ patterns without labels. Reinforcement learning trains agents through trial and error with reward signals.
56
+ The key innovation of deep learning is the use of neural networks with multiple layers that can learn
57
+ hierarchical representations. Convolutional Neural Networks (CNNs) excel at visual tasks by learning
58
+ spatial hierarchies. Recurrent Neural Networks (RNNs) and their modern variant, Transformers, process
59
+ sequential data like text and speech.`
60
+ },
61
+ {
62
+ id: 3,
63
+ title: "Natural Language Processing",
64
+ content: `Natural Language Processing (NLP) enables computers to understand and generate human language.
65
+ Early NLP systems relied on hand-crafted rules and linguistic features. The statistical revolution in the
66
+ 1990s brought probabilistic models that could learn from text corpora. Word embeddings like Word2Vec and
67
+ GloVe represented words as vectors, capturing semantic relationships. The transformer architecture, introduced
68
+ in 2017, revolutionized NLP with its attention mechanism. BERT pioneered bidirectional pretraining, while
69
+ GPT models showed the power of scaling and autoregressive generation. Today's large language models like
70
+ GPT-4 and Claude demonstrate remarkable capabilities in translation, summarization, and reasoning.`
71
+ },
72
+ {
73
+ id: 4,
74
+ title: "Computer Vision",
75
+ content: `Computer vision aims to give machines the ability to see and understand visual information.
76
+ Early systems used edge detection and template matching. The breakthrough came with Convolutional Neural
77
+ Networks (CNNs), particularly AlexNet's victory in ImageNet 2012. Object detection systems like YOLO can
78
+ identify and locate multiple objects in images. Segmentation models like U-Net precisely delineate object
79
+ boundaries. Recent advances include Vision Transformers and multimodal models like CLIP that connect vision
80
+ and language. Applications span autonomous vehicles, medical imaging, and facial recognition.`
81
+ },
82
+ {
83
+ id: 5,
84
+ title: "The Future",
85
+ content: `The trajectory of AI points toward increasingly capable and general systems. Artificial
86
+ General Intelligence (AGI) that matches human-level reasoning across all domains remains aspirational.
87
+ Scaling laws suggest that larger models with more data continue to improve. Multimodal AI that seamlessly
88
+ integrates text, vision, and audio is rapidly advancing. Few-shot and zero-shot learning reduce data
89
+ requirements. The convergence of AI with biotechnology and neuroscience could lead to transformative
90
+ breakthroughs. Society will need to adapt to increasingly automated work and AI-augmented capabilities.`
91
+ }
92
+ ]
93
+ };
94
+
95
+ // Example 2: Research papers dataset (variable in environment)
96
+ const PAPERS_DATA = {
97
+ dataset: "AI Research Papers 2024",
98
+ type: "papers",
99
+ papers: [
100
+ { id: 1, title: "Attention Is All You Need", topic: "transformers", citations: 95000, year: 2017 },
101
+ { id: 2, title: "BERT: Pre-training of Deep Bidirectional Transformers", topic: "nlp", citations: 75000, year: 2018 },
102
+ { id: 3, title: "GPT-3: Language Models are Few-Shot Learners", topic: "llm", citations: 45000, year: 2020 },
103
+ { id: 4, title: "ResNet: Deep Residual Learning", topic: "vision", citations: 120000, year: 2015 },
104
+ { id: 5, title: "DALL-E: Zero-Shot Text-to-Image Generation", topic: "multimodal", citations: 8000, year: 2021 }
105
+ ]
106
+ };
107
+
108
+ async function genericRLMExample(variableName, variableData, task) {
109
+ console.log('=== RLM: Generic Parallel Processing ===\n');
110
+ console.log(`šŸ“Š Variable: ${variableName}`);
111
+ console.log(`šŸ“‹ Task: ${task}\n`);
112
+
113
+ // Environment - data is stored here, NOT in the model's context
114
+ let environment = {
115
+ variables: {
116
+ [variableName]: variableData
117
+ },
118
+ execution_log: [],
119
+ iteration: 0
120
+ };
121
+
122
+ const mmix = ModelMix.new({
123
+ config: {
124
+ debug: false,
125
+ max_history: 50,
126
+ bottleneck: {
127
+ maxConcurrent: 10,
128
+ minTime: 100
129
+ }
130
+ }
131
+ })
132
+ .gpt41nano()
133
+ .setSystem(`You are a Recursive Language Model (RLM) agent.
134
+
135
+ Data is in the ENVIRONMENT as variables, not in your context. You work programmatically:
136
+ - Inspect variables to understand what's available
137
+ - Execute code to analyze and transform data
138
+ - Construct prompts for parallel ModelMix calls
139
+ - Make recursive calls to refine results
140
+
141
+ You decide the strategy based on the data and task.`);
142
+
143
+ // Tool 1: Inspect environment variables
144
+ mmix.addTool({
145
+ name: "inspect_variables",
146
+ description: "Lists all variables in the environment with their types and basic info. Does not return the full content.",
147
+ inputSchema: {
148
+ type: "object",
149
+ properties: {},
150
+ required: []
151
+ }
152
+ }, async () => {
153
+ const info = {};
154
+ for (const [name, value] of Object.entries(environment.variables)) {
155
+ if (typeof value === 'object' && value !== null) {
156
+ if (Array.isArray(value)) {
157
+ info[name] = {
158
+ type: 'array',
159
+ length: value.length,
160
+ sample: value[0]
161
+ };
162
+ } else {
163
+ info[name] = {
164
+ type: 'object',
165
+ keys: Object.keys(value),
166
+ structure: typeof value.chapters !== 'undefined' ? 'book' :
167
+ typeof value.papers !== 'undefined' ? 'dataset' : 'generic'
168
+ };
169
+ }
170
+ } else {
171
+ info[name] = {
172
+ type: typeof value,
173
+ value: value
174
+ };
175
+ }
176
+ }
177
+
178
+ console.log('šŸ” Environment variables:', JSON.stringify(info, null, 2));
179
+ return JSON.stringify(info);
180
+ });
181
+
182
+ // Tool 2: Execute code to analyze/transform data
183
+ mmix.addTool({
184
+ name: "execute_code",
185
+ description: "Executes JavaScript code in the environment. All variables are available. Return the result. Examples: 'return BOOK_DATA.chapters.length', 'return PAPERS_DATA.papers.map(p => ({title: p.title, content: p.content}))'",
186
+ inputSchema: {
187
+ type: "object",
188
+ properties: {
189
+ code: {
190
+ type: "string",
191
+ description: "JavaScript code to execute. Should return a value. All environment variables are in scope."
192
+ },
193
+ description: {
194
+ type: "string",
195
+ description: "Brief description of what this code does"
196
+ }
197
+ },
198
+ required: ["code", "description"]
199
+ }
200
+ }, async ({ code, description }) => {
201
+ console.log(`\nšŸ’» Executing code: ${description}`);
202
+ console.log('─'.repeat(60));
203
+ console.log(code);
204
+ console.log('─'.repeat(60));
205
+
206
+ try {
207
+ // Create function with all environment variables in scope
208
+ const varNames = Object.keys(environment.variables);
209
+ const varValues = Object.values(environment.variables);
210
+
211
+ const func = new Function(...varNames, `
212
+ ${code}
213
+ `);
214
+
215
+ const result = func(...varValues);
216
+
217
+ // Limit display output but return full result
218
+ const displayResult = JSON.stringify(result, null, 2);
219
+ const truncated = displayResult.length > 1000 ? displayResult.substring(0, 1000) + '...(truncated)' : displayResult;
220
+ console.log('āœ… Result:', truncated);
221
+
222
+ return JSON.stringify(result);
223
+ } catch (error) {
224
+ console.log('āŒ Error:', error.message);
225
+ return JSON.stringify({ error: error.message });
226
+ }
227
+ });
228
+
229
+ // Tool 3: Parallel ModelMix calls with CUSTOM prompts
230
+ mmix.addTool({
231
+ name: "parallel_modelmix",
232
+ description: "Executes multiple ModelMix calls in parallel. YOU construct the prompt and context for each call. The model processes each item independently and returns results.",
233
+ inputSchema: {
234
+ type: "object",
235
+ properties: {
236
+ calls: {
237
+ type: "array",
238
+ description: "Array of ModelMix calls to execute in parallel",
239
+ items: {
240
+ type: "object",
241
+ properties: {
242
+ id: {
243
+ type: "string",
244
+ description: "Unique identifier for this call"
245
+ },
246
+ system_prompt: {
247
+ type: "string",
248
+ description: "System prompt for this ModelMix instance (optional)"
249
+ },
250
+ user_prompt: {
251
+ type: "string",
252
+ description: "The main prompt/instruction for this call. YOU construct this."
253
+ },
254
+ context: {
255
+ type: "string",
256
+ description: "Additional context/data for this call. YOU provide the relevant data."
257
+ }
258
+ },
259
+ required: ["id", "user_prompt"]
260
+ }
261
+ }
262
+ },
263
+ required: ["calls"]
264
+ }
265
+ }, async ({ calls }) => {
266
+ console.log(`\nšŸš€ Parallel ModelMix execution: ${calls.length} calls`);
267
+ console.log('─'.repeat(70));
268
+
269
+ const startTime = Date.now();
270
+
271
+ const results = await Promise.all(
272
+ calls.map(async (call) => {
273
+ const taskStart = Date.now();
274
+ console.log(`šŸ“ [${call.id}] Starting...`);
275
+ console.log(` Prompt: ${call.user_prompt.substring(0, 80)}...`);
276
+
277
+ // Create a new ModelMix instance for this call
278
+ const callMmix = ModelMix.new({
279
+ config: {
280
+ debug: false,
281
+ max_history: 5
282
+ }
283
+ });
284
+
285
+ callMmix.gpt41mini();
286
+
287
+ if (call.system_prompt) {
288
+ callMmix.setSystem(call.system_prompt);
289
+ }
290
+
291
+ let fullPrompt = call.user_prompt;
292
+ if (call.context) {
293
+ fullPrompt += `\n\nCONTEXT:\n${call.context}`;
294
+ }
295
+
296
+ callMmix.addText(fullPrompt);
297
+
298
+ try {
299
+ const result = await callMmix.message();
300
+ const duration = Date.now() - taskStart;
301
+
302
+ console.log(`āœ… [${call.id}] Completed in ${duration}ms`);
303
+
304
+ environment.execution_log.push({
305
+ type: 'parallel_call',
306
+ id: call.id,
307
+ duration,
308
+ success: true
309
+ });
310
+
311
+ return {
312
+ id: call.id,
313
+ success: true,
314
+ result: result
315
+ };
316
+ } catch (error) {
317
+ console.log(`āŒ [${call.id}] Error: ${error.message}`);
318
+
319
+ environment.execution_log.push({
320
+ type: 'parallel_call',
321
+ id: call.id,
322
+ duration: Date.now() - taskStart,
323
+ success: false,
324
+ error: error.message
325
+ });
326
+
327
+ return {
328
+ id: call.id,
329
+ success: false,
330
+ error: error.message
331
+ };
332
+ }
333
+ })
334
+ );
335
+
336
+ const totalDuration = Date.now() - startTime;
337
+ console.log('─'.repeat(70));
338
+ console.log(`⚔ Total: ${totalDuration}ms for ${calls.length} calls`);
339
+ console.log(`šŸ“Š Average: ${Math.round(totalDuration / calls.length)}ms per call\n`);
340
+
341
+ return JSON.stringify(results, null, 2);
342
+ });
343
+
344
+ // Tool 4: Recursive call for refinement
345
+ mmix.addTool({
346
+ name: "recursive_call",
347
+ description: "Makes a recursive ModelMix call to refine, synthesize, or further process results. Use this to combine partial results or iterate on the solution.",
348
+ inputSchema: {
349
+ type: "object",
350
+ properties: {
351
+ task: {
352
+ type: "string",
353
+ description: "The task for the recursive call"
354
+ },
355
+ context: {
356
+ type: "string",
357
+ description: "Context or data for the recursive call"
358
+ },
359
+ system_prompt: {
360
+ type: "string",
361
+ description: "Optional system prompt for this recursive call"
362
+ }
363
+ },
364
+ required: ["task", "context"]
365
+ }
366
+ }, async ({ task, context, system_prompt }) => {
367
+ environment.iteration++;
368
+
369
+ console.log(`\nšŸ”„ Recursive call (iteration ${environment.iteration})`);
370
+ console.log(`šŸ“ Task: ${task.substring(0, 100)}...`);
371
+ console.log('─'.repeat(70));
372
+
373
+ const recursiveMmix = ModelMix.new({
374
+ config: {
375
+ debug: false,
376
+ max_history: 10
377
+ }
378
+ })
379
+ .gpt41nano();
380
+
381
+ if (system_prompt) {
382
+ recursiveMmix.setSystem(system_prompt);
383
+ } else {
384
+ recursiveMmix.setSystem('You are a synthesis and analysis assistant. Be clear and concise.');
385
+ }
386
+
387
+ recursiveMmix.addText(`${task}\n\n${context}`);
388
+
389
+ try {
390
+ const result = await recursiveMmix.message();
391
+ console.log(`āœ… Recursive call completed\n`);
392
+
393
+ environment.execution_log.push({
394
+ type: 'recursive_call',
395
+ iteration: environment.iteration,
396
+ task: task
397
+ });
398
+
399
+ return result;
400
+ } catch (error) {
401
+ console.log(`āŒ Recursive call error: ${error.message}\n`);
402
+ return `Error: ${error.message}`;
403
+ }
404
+ });
405
+
406
+ // Set the task for the agent
407
+ mmix.addText(`
408
+ ENVIRONMENT VARIABLE: ${variableName}
409
+
410
+ TASK: ${task}
411
+
412
+ STRATEGY (RLM paradigm):
413
+ 1. Use inspect_variables to see what's available
414
+ 2. Use execute_code to analyze the data structure and extract information
415
+ 3. Design your approach - decide what needs parallel processing
416
+ 4. Use parallel_modelmix to execute multiple ModelMix calls
417
+ - YOU construct the prompts for each call
418
+ - YOU provide the relevant context/data for each call
419
+ 5. Use recursive_call if you need to synthesize results
420
+
421
+ The data is in the ENVIRONMENT. Work programmatically. Be strategic and efficient.
422
+ `);
423
+
424
+ console.log('šŸ¤” RLM agent analyzing task...\n');
425
+ const result = await mmix.message();
426
+
427
+ console.log('\n' + '='.repeat(70));
428
+ console.log('šŸ“Š FINAL RESULT:');
429
+ console.log('='.repeat(70));
430
+ console.log(result);
431
+
432
+ console.log('\n' + '='.repeat(70));
433
+ console.log('šŸ“ˆ EXECUTION STATISTICS:');
434
+ console.log('='.repeat(70));
435
+ console.log(`Total iterations: ${environment.iteration}`);
436
+ console.log(`Execution log entries: ${environment.execution_log.length}`);
437
+
438
+ const parallelCalls = environment.execution_log.filter(e => e.type === 'parallel_call');
439
+ if (parallelCalls.length > 0) {
440
+ const successful = parallelCalls.filter(e => e.success).length;
441
+ const avgDuration = parallelCalls.reduce((sum, e) => sum + (e.duration || 0), 0) / parallelCalls.length;
442
+ console.log(`Parallel ModelMix calls: ${successful}/${parallelCalls.length} successful`);
443
+ console.log(`Average call duration: ${Math.round(avgDuration)}ms`);
444
+ }
445
+
446
+ const recursiveCalls = environment.execution_log.filter(e => e.type === 'recursive_call');
447
+ if (recursiveCalls.length > 0) {
448
+ console.log(`Recursive calls: ${recursiveCalls.length}`);
449
+ }
450
+ }
451
+
452
+ // Run examples
453
+ (async () => {
454
+ try {
455
+ // Example 1: Book summarization
456
+ await genericRLMExample(
457
+ 'BOOK_DATA',
458
+ BOOK_DATA,
459
+ 'Provide a comprehensive summary of the book, focusing on key concepts in each chapter and overall themes.'
460
+ );
461
+
462
+ console.log('\n\n' + '='.repeat(70));
463
+ console.log('\n');
464
+
465
+ // Example 2: Research papers analysis
466
+ await genericRLMExample(
467
+ 'PAPERS_DATA',
468
+ PAPERS_DATA,
469
+ 'Analyze the research papers dataset. Identify trends, compare topics, and highlight the most influential papers.'
470
+ );
471
+
472
+ console.log('\nāœ… RLM Demo completed');
473
+ process.exit(0);
474
+ } catch (error) {
475
+ console.error('āŒ Error:', error);
476
+ console.error(error.stack);
477
+ process.exit(1);
478
+ }
479
+ })();
@@ -0,0 +1,183 @@
1
+ process.loadEnvFile();
2
+ import { ModelMix } from '../index.js';
3
+
4
+ console.log('🧬 RLM Basic Demo - Recursive Language Model');
5
+ console.log('šŸ“– Inspired by: https://arxiv.org/html/2512.24601v1\n');
6
+
7
+ /**
8
+ * RLM Concept: Instead of passing a huge prompt directly to the LLM,
9
+ * we treat it as an external "environment" that the LLM can:
10
+ * 1. Inspect programmatically (peek into parts)
11
+ * 2. Decompose into sub-tasks
12
+ * 3. Process recursively
13
+ */
14
+
15
+ // Simulamos un documento muy largo (en RLM real, esto podrĆ­a ser millones de tokens)
16
+ const LARGE_DATASET = {
17
+ total_length: '1.5M tokens (simulated)',
18
+ data: [
19
+ { id: 1, name: 'Alice', dept: 'Engineering', score: 9.2, skills: ['JavaScript', 'Python', 'React'] },
20
+ { id: 2, name: 'Bob', dept: 'Analytics', score: 8.7, skills: ['Python', 'R', 'SQL'] },
21
+ { id: 3, name: 'Carol', dept: 'Product', score: 9.5, skills: ['Agile', 'SQL'] },
22
+ { id: 4, name: 'David', dept: 'Engineering', score: 7.8, skills: ['JavaScript', 'Git'] },
23
+ { id: 5, name: 'Emma', dept: 'Analytics', score: 9.0, skills: ['Python', 'ML', 'AWS'] },
24
+ { id: 6, name: 'Frank', dept: 'Engineering', score: 8.9, skills: ['Docker', 'Kubernetes'] },
25
+ { id: 7, name: 'Grace', dept: 'Design', score: 9.3, skills: ['Figma', 'CSS'] },
26
+ { id: 8, name: 'Henry', dept: 'Analytics', score: 7.5, skills: ['SQL', 'Python'] },
27
+ ]
28
+ };
29
+
30
+ let recursionCount = 0;
31
+
32
+ async function basicRLM() {
33
+ console.log('=== RLM: Procesamiento de Dataset Grande ===\n');
34
+ console.log(`šŸ“Š Dataset: ${LARGE_DATASET.total_length}`);
35
+ console.log(`šŸ‘„ Records: ${LARGE_DATASET.data.length}`);
36
+ console.log('šŸŽÆ Query: "Find top Engineering talents with Python skills"\n');
37
+
38
+ const mmix = ModelMix.new({ config: { debug: false, max_history: 20 } })
39
+ .gpt41nano()
40
+ .setSystem(`You are an RLM (Recursive Language Model).
41
+
42
+ Instead of reading all data at once, you:
43
+ 1. INSPECT: Use 'query_data' to explore the dataset programmatically
44
+ 2. DECOMPOSE: Break complex queries into simpler sub-queries
45
+ 3. RECURSE: Use 'solve_subtask' for focused sub-problems
46
+
47
+ Be strategic: think about what information you need and query only that.`);
48
+
49
+ // Tool 1: Query the data environment (instead of loading all into context)
50
+ mmix.addTool({
51
+ name: "query_data",
52
+ description: "Query the large dataset without loading it entirely. You can filter, aggregate, or inspect specific fields.",
53
+ inputSchema: {
54
+ type: "object",
55
+ properties: {
56
+ operation: {
57
+ type: "string",
58
+ enum: ["filter", "count", "get_fields", "get_unique_values"],
59
+ description: "Operation to perform"
60
+ },
61
+ field: {
62
+ type: "string",
63
+ description: "Field name to query (e.g., 'dept', 'score', 'skills')"
64
+ },
65
+ condition: {
66
+ type: "object",
67
+ description: "Condition for filtering (e.g., {field: 'dept', value: 'Engineering'})"
68
+ }
69
+ },
70
+ required: ["operation"]
71
+ }
72
+ }, async ({ operation, field, condition }) => {
73
+ console.log(`šŸ” Query: ${operation}${field ? ` on "${field}"` : ''}${condition ? ` where ${JSON.stringify(condition)}` : ''}`);
74
+
75
+ switch (operation) {
76
+ case "count":
77
+ return JSON.stringify({ count: LARGE_DATASET.data.length });
78
+
79
+ case "get_fields":
80
+ const fields = Object.keys(LARGE_DATASET.data[0]);
81
+ return JSON.stringify({ fields });
82
+
83
+ case "get_unique_values":
84
+ if (!field) return JSON.stringify({ error: "field required" });
85
+ const unique = [...new Set(LARGE_DATASET.data.map(d => d[field]))];
86
+ return JSON.stringify({ field, unique_values: unique });
87
+
88
+ case "filter":
89
+ let filtered = LARGE_DATASET.data;
90
+ if (condition) {
91
+ filtered = filtered.filter(item => {
92
+ if (condition.operator === 'gt') {
93
+ return item[condition.field] > condition.value;
94
+ } else if (condition.operator === 'contains') {
95
+ return item[condition.field]?.includes(condition.value);
96
+ } else {
97
+ return item[condition.field] === condition.value;
98
+ }
99
+ });
100
+ }
101
+ console.log(` → Found ${filtered.length} records`);
102
+ return JSON.stringify({ count: filtered.length, data: filtered });
103
+
104
+ default:
105
+ return JSON.stringify({ error: "Unknown operation" });
106
+ }
107
+ });
108
+
109
+ // Tool 2: Recursive sub-task solver
110
+ mmix.addTool({
111
+ name: "solve_subtask",
112
+ description: "Recursively solve a focused sub-task. Use this to decompose complex queries.",
113
+ inputSchema: {
114
+ type: "object",
115
+ properties: {
116
+ task: {
117
+ type: "string",
118
+ description: "The sub-task to solve"
119
+ },
120
+ context: {
121
+ type: "string",
122
+ description: "Relevant context/data for this sub-task"
123
+ }
124
+ },
125
+ required: ["task", "context"]
126
+ }
127
+ }, async ({ task, context }) => {
128
+ recursionCount++;
129
+ console.log(`\nšŸ”„ Recursion ${recursionCount}: ${task.substring(0, 60)}...`);
130
+
131
+ if (recursionCount > 3) {
132
+ recursionCount--;
133
+ return "Max recursion depth reached. Please solve directly.";
134
+ }
135
+
136
+ // Create a focused recursive call
137
+ const subMmix = ModelMix.new({ config: { debug: false } })
138
+ .gpt41nano()
139
+ .setSystem(`Solve this focused sub-task concisely. Recursion level: ${recursionCount}`);
140
+
141
+ subMmix.addText(`Context: ${context}\n\nTask: ${task}`);
142
+
143
+ const result = await subMmix.message();
144
+ recursionCount--;
145
+
146
+ console.log(` āœ… Sub-result: ${result.substring(0, 80)}...`);
147
+ return result;
148
+ });
149
+
150
+ // Main query
151
+ mmix.addText(`Find the top Engineering employees who have Python skills and score above 8.5.
152
+
153
+ Use the RLM approach:
154
+ 1. First explore what fields are available
155
+ 2. Filter by department
156
+ 3. Check for Python skills
157
+ 4. Apply score threshold
158
+ 5. Summarize the results
159
+
160
+ Be efficient - only query what you need!`);
161
+
162
+ console.log('šŸ¤– LLM Processing with RLM tools...\n');
163
+ const result = await mmix.message();
164
+
165
+ console.log('\n' + '═'.repeat(70));
166
+ console.log('šŸ’” FINAL ANSWER:');
167
+ console.log('═'.repeat(70));
168
+ console.log(result);
169
+ console.log('═'.repeat(70));
170
+
171
+ console.log(`\nšŸ“ˆ Statistics:`);
172
+ console.log(` - Total recursive calls: ${recursionCount}`);
173
+ console.log(` - Dataset never fully loaded into context āœ“`);
174
+ console.log(` - Programmatic exploration used āœ“`);
175
+ }
176
+
177
+ try {
178
+ await basicRLM();
179
+ console.log('\nāœ… RLM Basic example completed successfully!');
180
+ } catch (error) {
181
+ console.error('āŒ Error:', error);
182
+ console.error(error.stack);
183
+ }