soma-lite 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,69 @@
1
+ # SOMA Lite
2
+
3
+ **660 lines that give any LLM infinite autonomous horizon.**
4
+
5
+ SOMA Lite is a minimal, zero-dependency implementation of the Sovereign Operating Memory Architecture. It provides persistent memory, terminal state, and tool execution for any LLM via JSON tool calls.
6
+
7
+ ## Quick Start
8
+
9
+ ```bash
10
+ # Install
11
+ npm install soma-lite
12
+
13
+ # Run
14
+ npx soma-lite --task "Create a hello.js that prints Hello World"
15
+ ```
16
+
17
+ ## Features
18
+
19
+ - **Memory Architecture (L1/L2/L3)**: Persistent context across turns
20
+ - **Stateful Terminal**: `cd`, `export`, and variables persist between commands
21
+ - **Pressure Memory (Pm)**: Automatic context management with checkpointing
22
+ - **Zero Dependencies**: Pure Node.js, ~700 lines of code
23
+
24
+ ## Architecture
25
+
26
+ ```
27
+ soma-lite.js → Kernel: memory, tools, terminal, prompt builder
28
+ run-agent-lite.js → Agent loop: LLM calls, JSON parsing, tool execution
29
+ ```
30
+
31
+ ## Usage
32
+
33
+ ```bash
34
+ node run-agent-lite.js --task "Your task here" --max-turns 10
35
+ ```
36
+
37
+ Options:
38
+ - `--task` : Task description
39
+ - `--max-turns` : Max turns (default: 5)
40
+ - `--provider` : `google`, `openrouter`, `anthropic`, `openai`
41
+ - `--model` : Model name
42
+ - `--workspace` : Working directory
43
+ - `--debug` : Show L1 memory content
44
+ - `--rpm` : Requests per minute limit
45
+
46
+ ## LLM Communication Protocol
47
+
48
+ SOMA Lite uses simple JSON tool calls:
49
+
50
+ ```json
51
+ {"tool": "execute_command", "args": {"command": "ls -la", "reason": "List files"}}
52
+ {"tool": "write_file", "args": {"path": "test.js", "content": "console.log(1)"}}
53
+ {"tool": "checkpoint", "args": {"description": "Core features done"}}
54
+ {"tool": "finish_task", "args": {"status": "success", "summary": "Task complete"}}
55
+ ```
56
+
57
+ ## Memory Layers
58
+
59
+ - **L1**: Current context (identity + dashboard + action log)
60
+ - **L2**: Episodic memory (session_log.jsonl + turn folders)
61
+ - **L3**: Persistent knowledge (identity.md, task.md, CHANGELOG.md)
62
+
63
+ ## License
64
+
65
+ MIT
66
+
67
+ ## Author
68
+
69
+ Mario Raúl Carbonell Martínez
package/package.json ADDED
@@ -0,0 +1,30 @@
1
+ {
2
+ "name": "soma-lite",
3
+ "version": "1.0.0",
4
+ "description": "SOMA Lite — Sovereign Operating Memory Architecture (Zero-Dependency)",
5
+ "author": "Mario Raúl Carbonell Martínez <marioraulcarbonell@gmail.com>",
6
+ "main": "soma-lite.js",
7
+ "bin": {
8
+ "soma-lite": "./run-agent-lite.js"
9
+ },
10
+ "keywords": [
11
+ "ai",
12
+ "agent",
13
+ "soma",
14
+ "autonomous",
15
+ "memory",
16
+ "llm"
17
+ ],
18
+ "license": "MIT",
19
+ "engines": {
20
+ "node": ">=18"
21
+ },
22
+ "scripts": {
23
+ "start": "node run-agent-lite.js",
24
+ "test": "node soma-lite.js"
25
+ },
26
+ "repository": {
27
+ "type": "git",
28
+ "url": "https://github.com/mcarbonell/soma-lite"
29
+ }
30
+ }
@@ -0,0 +1,534 @@
1
+ #!/usr/bin/env node
2
+
3
+ const fs = require('fs');
4
+ const path = require('path');
5
+ const { SOMALite } = require('./soma-lite');
6
+
7
+ // Cargar variables de entorno desde .env
8
+ function loadEnvManually() {
9
+ const envPath = path.join(process.cwd(), '.env');
10
+ if (fs.existsSync(envPath)) {
11
+ const content = fs.readFileSync(envPath, 'utf-8');
12
+ const lines = content.split('\n');
13
+ for (const line of lines) {
14
+ if (line.includes('=') && !line.trim().startsWith('#')) {
15
+ const [k, ...vParts] = line.split('=');
16
+ const key = k.trim();
17
+ const value = vParts.join('=').trim();
18
+ if (key) {
19
+ process.env[key] = value;
20
+ }
21
+ }
22
+ }
23
+ }
24
+ }
25
+
26
+ function extractJson(text) {
27
+ const originalText = text;
28
+ text = text.trim();
29
+
30
+ // Quitar bloques de código markdown
31
+ if (text.startsWith('```')) {
32
+ const lines = text.split('\n');
33
+ if (lines[0].startsWith('```')) {
34
+ lines.shift();
35
+ }
36
+ if (lines[lines.length - 1].startsWith('```')) {
37
+ lines.pop();
38
+ }
39
+ text = lines.join('\n').trim();
40
+ }
41
+
42
+ // Quitar etiquetas XML/thinking
43
+ text = text.replace(/<thinking>[\s\S]*?<\/thinking>/g, '');
44
+ text = text.replace(/<tool_call>[\s\S]*?<\/tool_call>/g, '');
45
+ text = text.replace(/<invoke>[\s\S]*?<\/invoke>/g, '');
46
+ text = text.replace(/<function=[^>]*>[\s\S]*?<\/function>/g, '');
47
+ text = text.replace(/<parameter=[^>]*>[\s\S]*?<\/parameter>/g, '');
48
+ text = text.trim();
49
+
50
+ try {
51
+ // Buscar el primer '{' o '['
52
+ let firstBrace = text.indexOf('{');
53
+ let firstBracket = text.indexOf('[');
54
+
55
+ let start = -1;
56
+ let openChar, closeChar;
57
+
58
+ if (firstBrace !== -1 && (firstBracket === -1 || firstBrace < firstBracket)) {
59
+ start = firstBrace;
60
+ openChar = '{';
61
+ closeChar = '}';
62
+ } else if (firstBracket !== -1) {
63
+ start = firstBracket;
64
+ openChar = '[';
65
+ closeChar = ']';
66
+ }
67
+
68
+ if (start === -1) return null;
69
+
70
+ let count = 0;
71
+ let end = -1;
72
+ for (let i = start; i < text.length; i++) {
73
+ if (text[i] === openChar) count++;
74
+ else if (text[i] === closeChar) {
75
+ count--;
76
+ if (count === 0) {
77
+ end = i;
78
+ break;
79
+ }
80
+ }
81
+ }
82
+
83
+ if (end === -1) return null;
84
+
85
+ const jsonStr = text.substring(start, end + 1);
86
+ return JSON.parse(jsonStr);
87
+ } catch (e) {
88
+ return null;
89
+ }
90
+ }
91
+
92
+ function estimateTokens(text) {
93
+ return Math.floor(text.length / 4);
94
+ }
95
+
96
+ function truncatePrompt(prompt, maxTokens) {
97
+ const estimatedTokens = estimateTokens(prompt);
98
+ if (estimatedTokens <= maxTokens) {
99
+ return prompt;
100
+ }
101
+
102
+ const maxChars = maxTokens * 4;
103
+ return prompt.substring(0, maxChars) +
104
+ `\n\n[... CONTENIDO TRUNCADO: ${prompt.length - maxChars} caracteres omitidos para ajustarse al contexto ...]`;
105
+ }
106
+
107
+ async function callOpenRouter(apiKey, model, messages, maxTokens) {
108
+ const response = await fetch('https://openrouter.ai/api/v1/chat/completions', {
109
+ method: 'POST',
110
+ headers: {
111
+ 'Content-Type': 'application/json',
112
+ 'Authorization': `Bearer ${apiKey}`,
113
+ 'HTTP-Referer': 'https://github.com/soma-agent',
114
+ 'X-Title': 'SOMA Lite Agent'
115
+ },
116
+ body: JSON.stringify({
117
+ model: model,
118
+ messages: messages,
119
+ temperature: 0.0,
120
+ max_tokens: maxTokens
121
+ })
122
+ });
123
+
124
+ if (!response.ok) {
125
+ const errorText = await response.text();
126
+ throw new Error(`OpenRouter API error: ${response.status} - ${errorText}`);
127
+ }
128
+
129
+ return await response.json();
130
+ }
131
+
132
+ async function callGoogle(apiKey, model, messages, maxTokens) {
133
+ // Convertir formato OpenAI a formato Gemini
134
+ const contents = [];
135
+ for (const msg of messages) {
136
+ contents.push({
137
+ role: msg.role === 'user' ? 'user' : 'model',
138
+ parts: [{ text: msg.content }]
139
+ });
140
+ }
141
+
142
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`;
143
+ const response = await fetch(url, {
144
+ method: 'POST',
145
+ headers: { 'Content-Type': 'application/json' },
146
+ body: JSON.stringify({
147
+ contents: contents,
148
+ generationConfig: {
149
+ temperature: 0.0,
150
+ maxOutputTokens: maxTokens
151
+ }
152
+ })
153
+ });
154
+
155
+ if (!response.ok) {
156
+ const errorText = await response.text();
157
+ throw new Error(`Google API error: ${response.status} - ${errorText}`);
158
+ }
159
+
160
+ const data = await response.json();
161
+ // Convertir formato Gemini a formato OpenAI
162
+ return {
163
+ choices: [{
164
+ message: {
165
+ content: data.candidates?.[0]?.content?.parts?.[0]?.text || ''
166
+ }
167
+ }]
168
+ };
169
+ }
170
+
171
+ async function callAnthropic(apiKey, model, messages, maxTokens) {
172
+ // Separar system de mensajes
173
+ let system = "";
174
+ const conversation = [];
175
+ for (const msg of messages) {
176
+ if (msg.role === 'system') {
177
+ system = msg.content;
178
+ } else {
179
+ conversation.push({
180
+ role: msg.role,
181
+ content: msg.content
182
+ });
183
+ }
184
+ }
185
+
186
+ const response = await fetch('https://api.anthropic.com/v1/messages', {
187
+ method: 'POST',
188
+ headers: {
189
+ 'Content-Type': 'application/json',
190
+ 'x-api-key': apiKey,
191
+ 'anthropic-version': '2023-06-01'
192
+ },
193
+ body: JSON.stringify({
194
+ model: model,
195
+ max_tokens: maxTokens,
196
+ temperature: 0.0,
197
+ system: system,
198
+ messages: conversation
199
+ })
200
+ });
201
+
202
+ if (!response.ok) {
203
+ const errorText = await response.text();
204
+ throw new Error(`Anthropic API error: ${response.status} - ${errorText}`);
205
+ }
206
+
207
+ const data = await response.json();
208
+ return {
209
+ choices: [{
210
+ message: {
211
+ content: data.content?.[0]?.text || ''
212
+ }
213
+ }]
214
+ };
215
+ }
216
+
217
+ async function callOpenAI(apiKey, model, messages, maxTokens) {
218
+ const response = await fetch('https://api.openai.com/v1/chat/completions', {
219
+ method: 'POST',
220
+ headers: {
221
+ 'Content-Type': 'application/json',
222
+ 'Authorization': `Bearer ${apiKey}`
223
+ },
224
+ body: JSON.stringify({
225
+ model: model,
226
+ messages: messages,
227
+ temperature: 0.0,
228
+ max_tokens: maxTokens
229
+ })
230
+ });
231
+
232
+ if (!response.ok) {
233
+ const errorText = await response.text();
234
+ throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
235
+ }
236
+
237
+ return await response.json();
238
+ }
239
+
240
+ async function callInference(provider, apiKey, model, messages, maxTokens) {
241
+ switch (provider) {
242
+ case 'openrouter':
243
+ return callOpenRouter(apiKey, model, messages, maxTokens);
244
+ case 'google':
245
+ return callGoogle(apiKey, model, messages, maxTokens);
246
+ case 'anthropic':
247
+ return callAnthropic(apiKey, model, messages, maxTokens);
248
+ case 'openai':
249
+ return callOpenAI(apiKey, model, messages, maxTokens);
250
+ default:
251
+ throw new Error(`Proveedor no soportado: ${provider}`);
252
+ }
253
+ }
254
+
255
+ function getApiKey(provider) {
256
+ const keyMap = {
257
+ 'openrouter': 'OPENROUTER_API_KEY',
258
+ 'google': 'GOOGLE_API_KEY',
259
+ 'anthropic': 'ANTHROPIC_API_KEY',
260
+ 'openai': 'OPENAI_API_KEY'
261
+ };
262
+ const envKey = keyMap[provider];
263
+ if (!envKey) return null;
264
+ return process.env[envKey] || null;
265
+ }
266
+
267
+ function getContextWindow(model, provider) {
268
+ // Context windows comunes por proveedor/modelo
269
+ const defaults = {
270
+ 'openrouter': 128000,
271
+ 'google': 1000000, // Gemini tiene 1M contexto
272
+ 'anthropic': 200000, // Claude 3
273
+ 'openai': 128000
274
+ };
275
+ return defaults[provider] || 128000;
276
+ }
277
+
278
+ async function sleep(ms) {
279
+ return new Promise(resolve => setTimeout(resolve, ms));
280
+ }
281
+
282
+ async function runAgent() {
283
+ loadEnvManually();
284
+
285
+ // Parsear argumentos manualmente (sin dependencias externas)
286
+ const args = {
287
+ task: null,
288
+ maxTurns: 5,
289
+ model: 'gemini-flash-lite-latest',
290
+ provider: 'google',
291
+ workspace: 'agent_workspace_test_2',
292
+ debug: false,
293
+ rpm: 5,
294
+ contextWindow: null,
295
+ maxTokens: 4096
296
+ };
297
+
298
+ for (let i = 2; i < process.argv.length; i++) {
299
+ const arg = process.argv[i];
300
+ const nextArg = process.argv[i + 1];
301
+
302
+ switch (arg) {
303
+ case '--task':
304
+ args.task = nextArg;
305
+ i++;
306
+ break;
307
+ case '--max-turns':
308
+ args.maxTurns = parseInt(nextArg);
309
+ i++;
310
+ break;
311
+ case '--model':
312
+ args.model = nextArg;
313
+ i++;
314
+ break;
315
+ case '--provider':
316
+ args.provider = nextArg;
317
+ i++;
318
+ break;
319
+ case '--workspace':
320
+ args.workspace = nextArg;
321
+ i++;
322
+ break;
323
+ case '--debug':
324
+ args.debug = true;
325
+ break;
326
+ case '--rpm':
327
+ args.rpm = parseInt(nextArg);
328
+ i++;
329
+ break;
330
+ case '--context-window':
331
+ args.contextWindow = parseInt(nextArg);
332
+ i++;
333
+ break;
334
+ case '--max-tokens':
335
+ args.maxTokens = parseInt(nextArg);
336
+ i++;
337
+ break;
338
+ }
339
+ }
340
+
341
+ const turnDelay = args.rpm > 0 ? (60.0 / args.rpm) * 1000 : 1000;
342
+
343
+ // Verificar API key
344
+ const apiKey = getApiKey(args.provider);
345
+ if (!apiKey) {
346
+ console.error(`❌ ERROR: API Key para ${args.provider} no encontrada.`);
347
+ console.error(` Define ${args.provider.toUpperCase()}_API_KEY en tu archivo .env`);
348
+ process.exit(1);
349
+ }
350
+
351
+ const contextWindow = args.contextWindow || getContextWindow(args.model, args.provider);
352
+ console.log(`📏 Context window: ${contextWindow.toLocaleString()} tokens`);
353
+
354
+ const workspacePath = path.resolve(args.workspace);
355
+ console.log(`🚀 Iniciando SOMA Lite Agent Loop en: ${workspacePath}`);
356
+ console.log(`🤖 Proveedor: ${args.provider} | Modelo: ${args.model} | Turnos: ${args.maxTurns}`);
357
+
358
+ const soma = new SOMALite(workspacePath);
359
+
360
+ // Configurar la tarea
361
+ const taskFile = path.join(soma.l3Path, 'task.md');
362
+ if (args.task) {
363
+ const taskContent = `# OBJETIVO DE LA TAREA\n${args.task}\n`;
364
+ fs.writeFileSync(taskFile, taskContent, 'utf-8');
365
+ } else {
366
+ if (!fs.existsSync(taskFile)) {
367
+ const taskContent = `# OBJETIVO DE LA TAREA\nCrea un script 'hola.js' que imprima la fecha actual y ejecútalo.\n`;
368
+ fs.writeFileSync(taskFile, taskContent, 'utf-8');
369
+ }
370
+ }
371
+ const sysInstr = "You are SOMA Lite, an autonomous software engineer. Follow the protocol and rules defined in your <identity> context.";
372
+
373
+ const availableContext = contextWindow - args.maxTokens - estimateTokens(sysInstr);
374
+ console.log(`📏 Espacio disponible para L1: ${availableContext.toLocaleString()} tokens`);
375
+
376
+ // Preparar directorio de debug
377
+ const debugDir = path.join(soma.l2Path, 'debug');
378
+ if (args.debug) {
379
+ if (!fs.existsSync(debugDir)) {
380
+ fs.mkdirSync(debugDir, { recursive: true });
381
+ }
382
+ // Limpiar archivos debug anteriores
383
+ for (const f of fs.readdirSync(debugDir)) {
384
+ if (f.startsWith('L1_')) {
385
+ fs.unlinkSync(path.join(debugDir, f));
386
+ }
387
+ }
388
+ }
389
+
390
+ for (let i = 0; i < args.maxTurns; i++) {
391
+ const prompt = soma.buildPrompt();
392
+ const pm = soma.calculatePm();
393
+
394
+ if (args.debug) {
395
+ const debugFile = path.join(debugDir, `L1_${i}.txt`);
396
+ const timestamp = new Date().toISOString().replace('T', ' ').substring(0, 19);
397
+ const debugContent = `=== SOMA Lite Debug - Turno ${i} ===
398
+ Timestamp: ${timestamp}
399
+ Pm: ${pm.toFixed(2)}%
400
+ Turns since checkpoint: ${soma.turnsSinceCheckpoint}
401
+ CWD (Terminal): ${soma.terminal.cwd}
402
+
403
+ === CONTENIDO L1 (Lo que ve el agente) ===
404
+
405
+ ${prompt}
406
+
407
+ === FIN L1 ===
408
+ `;
409
+ fs.writeFileSync(debugFile, debugContent, 'utf-8');
410
+ console.log(`\n💾 Debug: L1 guardado en ${debugFile}`);
411
+ console.log('\n🔍 DEBUG: CONTENIDO MEMORIA L1 🔍');
412
+ console.log('-'.repeat(50));
413
+ console.log(prompt);
414
+ console.log('-'.repeat(50));
415
+ console.log(`📊 Pm Calculado: ${pm.toFixed(2)}%`);
416
+ console.log('-'.repeat(50) + '\n');
417
+ }
418
+
419
+ console.log(`\n${'='.repeat(40)}`);
420
+ console.log(`🔄 Turno ${i + 1}/${args.maxTurns} - Pensando (${args.provider})...`);
421
+
422
+ let agentReply = "";
423
+ let finalPrompt = prompt;
424
+ try {
425
+ const promptTokens = estimateTokens(prompt);
426
+
427
+ if (promptTokens > availableContext) {
428
+ console.log(`⚠️ Prompt excede el contexto (${promptTokens} > ${availableContext} tokens). Truncando...`);
429
+ finalPrompt = truncatePrompt(prompt, availableContext);
430
+ console.log(`📏 Prompt truncado a ${estimateTokens(finalPrompt)} tokens`);
431
+ }
432
+
433
+ const messages = [
434
+ { role: 'system', content: sysInstr },
435
+ { role: 'user', content: finalPrompt }
436
+ ];
437
+
438
+ const response = await callInference(args.provider, apiKey, args.model, messages, args.maxTokens);
439
+ agentReply = response.choices[0].message.content;
440
+
441
+ console.log(`🤖 Respuesta:\n${agentReply}`);
442
+
443
+ // Guardar Memoria Episódica L2 (Prompt + Respuesta Cruda)
444
+ soma.logEpisodicMemory(finalPrompt, agentReply);
445
+ } catch (err) {
446
+ console.error(`❌ Error de API: ${err.message}`);
447
+ if (args.debug) {
448
+ console.error(err.stack);
449
+ }
450
+ break;
451
+ }
452
+
453
+ if (args.debug) {
454
+ const rawFile = path.join(debugDir, `RAW_turn_${i}.txt`);
455
+ fs.writeFileSync(rawFile, `=== Turno ${i} - RESPUESTA RAW DEL MODELO ===\n\n${agentReply}\n\n=== FIN ===`, 'utf-8');
456
+ }
457
+
458
+ const action = extractJson(agentReply);
459
+ if (!action) {
460
+ console.log("❌ Error de parseo JSON. Reintentando...");
461
+ if (args.debug) {
462
+ const errorFile = path.join(debugDir, `JSON_ERROR_turn_${i}.txt`);
463
+ fs.writeFileSync(errorFile, `=== Turno ${i} - JSON Parse Error ===\n\nRespuesta del agente:\n${agentReply}\n\n=== FIN ===`, 'utf-8');
464
+ console.log(`💾 Respuesta malformada guardada en: ${errorFile}`);
465
+ }
466
+ soma.logToL2("JSON_ERROR", { raw_response: agentReply.substring(0, 200) }, "Error: Envía solo JSON válido.");
467
+ continue;
468
+ }
469
+
470
+ const actions = Array.isArray(action) ? action : [action];
471
+ let taskFinished = false;
472
+
473
+ for (const act of actions) {
474
+ const tool = act.tool;
475
+ const argsTool = act.args || {};
476
+
477
+ console.log(`🛠️ Ejecutando: ${tool}`);
478
+ const { result, warning } = soma.invokeTool(tool, argsTool);
479
+
480
+ console.log(`📄 Resultado (trunc): ${String(result).substring(0, 150)}...`);
481
+ if (warning) {
482
+ console.log(`⚠️ ${warning}`);
483
+ }
484
+
485
+ if (tool === "finish_task") {
486
+ const status = (argsTool.status || 'N/A').toUpperCase();
487
+ console.log(`\n✅ ¡TAREA FINALIZADA POR EL AGENTE! [STATUS: ${status}]`);
488
+ console.log(`Resumen: ${argsTool.summary}`);
489
+ taskFinished = true;
490
+ break;
491
+ }
492
+ }
493
+
494
+ if (taskFinished) break;
495
+
496
+ if (i < args.maxTurns - 1) {
497
+ console.log(`⏳ Esperando ${(turnDelay / 1000).toFixed(1)}s para respetar límite de ${args.rpm} RPM...`);
498
+ await sleep(turnDelay);
499
+ }
500
+ }
501
+ }
502
+
503
+ // Mostrar ayuda si se solicita
504
+ if (process.argv.includes('--help') || process.argv.includes('-h')) {
505
+ console.log(`
506
+ SOMA Lite Agent Runner (Node.js)
507
+
508
+ Uso: node run-agent-lite.js [opciones]
509
+
510
+ Opciones:
511
+ --task "descripción" Tarea a realizar por el agente
512
+ --max-turns N Máximo de turnos (default: 5)
513
+ --model "modelo" Modelo a usar (default: stepfun/step-3.5-flash:free)
514
+ --provider PROV Proveedor: openrouter|google|anthropic|openai (default: openrouter)
515
+ --workspace DIR Directorio de trabajo (default: agent_workspace_test_2)
516
+ --debug Muestra contenido de memoria L1 y Pm en cada turno
517
+ --rpm N Máximo de peticiones por minuto (default: 5)
518
+ --context-window N Override del tamaño de contexto
519
+ --max-tokens N Máximo de tokens de salida (default: 4096)
520
+ --help, -h Muestra esta ayuda
521
+
522
+ Ejemplos:
523
+ node run-agent-lite.js --task "Crea un script hola.js" --max-turns 5
524
+ node run-agent-lite.js --provider google --model gemini-3.1-flash-lite-preview --task "Analiza archivos"
525
+ node run-agent-lite.js --provider anthropic --model claude-3-5-haiku-20241022 --task "Refactoriza código"
526
+ `);
527
+ process.exit(0);
528
+ }
529
+
530
+ // Ejecutar
531
+ runAgent().catch(err => {
532
+ console.error('Error fatal:', err);
533
+ process.exit(1);
534
+ });