nlos 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/nlos.js +85 -5
- package/package.json +1 -1
package/bin/nlos.js
CHANGED
|
@@ -6,7 +6,8 @@
|
|
|
6
6
|
* A model-agnostic kernel that turns any LLM into a cognitive operating system.
|
|
7
7
|
*
|
|
8
8
|
* Usage:
|
|
9
|
-
* nlos
|
|
9
|
+
* nlos chat [options] Interactive NL-OS chat session (recommended)
|
|
10
|
+
* nlos boot [options] Boot NL-OS and verify kernel loads
|
|
10
11
|
* nlos payload [options] Generate portable kernel payloads
|
|
11
12
|
* nlos verify Verify kernel files exist
|
|
12
13
|
* nlos tokens Show token estimates
|
|
@@ -247,6 +248,79 @@ function boot(options = {}) {
|
|
|
247
248
|
}
|
|
248
249
|
}
|
|
249
250
|
|
|
251
|
+
function chat(options = {}) {
|
|
252
|
+
const {
|
|
253
|
+
model = 'qwen2.5:3b',
|
|
254
|
+
full = false,
|
|
255
|
+
profile = null,
|
|
256
|
+
} = options;
|
|
257
|
+
|
|
258
|
+
// Resolve model based on profile
|
|
259
|
+
let selectedModel = model;
|
|
260
|
+
if (profile) {
|
|
261
|
+
const profiles = {
|
|
262
|
+
speed: 'qwen2.5:3b',
|
|
263
|
+
balanced: 'mistral:7b',
|
|
264
|
+
quality: 'llama3.1:8b',
|
|
265
|
+
memory_constrained: 'qwen2.5:3b',
|
|
266
|
+
};
|
|
267
|
+
selectedModel = profiles[profile] || model;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
log('blue', `Starting NL-OS chat session...`);
|
|
271
|
+
log('cyan', `Model: ${selectedModel}`);
|
|
272
|
+
log('cyan', `Tier: ${full ? 'FULL' : 'MANDATORY'}`);
|
|
273
|
+
console.log();
|
|
274
|
+
|
|
275
|
+
// Generate the kernel payload
|
|
276
|
+
log('yellow', 'Building kernel payload...');
|
|
277
|
+
const payload = generatePayload(full ? 'full' : 'mandatory', 'markdown');
|
|
278
|
+
|
|
279
|
+
// Write to temp file (ollama --system has length limits, file is safer)
|
|
280
|
+
const tempPayloadPath = path.join(PACKAGE_ROOT, 'portable', '.kernel-payload-session.md');
|
|
281
|
+
fs.mkdirSync(path.dirname(tempPayloadPath), { recursive: true });
|
|
282
|
+
fs.writeFileSync(tempPayloadPath, payload);
|
|
283
|
+
|
|
284
|
+
const tokenEstimate = full ? '~15,500' : '~10,600';
|
|
285
|
+
log('green', `Kernel payload ready (${tokenEstimate} tokens)`);
|
|
286
|
+
console.log();
|
|
287
|
+
|
|
288
|
+
// Check if model exists locally
|
|
289
|
+
try {
|
|
290
|
+
execSync(`ollama list | grep -q "${selectedModel.split(':')[0]}"`, { stdio: 'pipe' });
|
|
291
|
+
} catch {
|
|
292
|
+
log('yellow', `Model ${selectedModel} not found locally. Pulling...`);
|
|
293
|
+
try {
|
|
294
|
+
execSync(`ollama pull ${selectedModel}`, { stdio: 'inherit' });
|
|
295
|
+
} catch (error) {
|
|
296
|
+
log('red', `Failed to pull model: ${error.message}`);
|
|
297
|
+
process.exit(1);
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
log('green', `Launching interactive session with ${selectedModel}...`);
|
|
302
|
+
log('cyan', '─'.repeat(60));
|
|
303
|
+
console.log();
|
|
304
|
+
|
|
305
|
+
// Spawn interactive ollama session with system prompt from file
|
|
306
|
+
const child = spawn('ollama', ['run', selectedModel, '--system', payload], {
|
|
307
|
+
stdio: 'inherit',
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
child.on('error', (error) => {
|
|
311
|
+
log('red', `Error: ${error.message}`);
|
|
312
|
+
log('yellow', 'Make sure Ollama is installed and running: https://ollama.ai');
|
|
313
|
+
process.exit(1);
|
|
314
|
+
});
|
|
315
|
+
|
|
316
|
+
child.on('exit', (code) => {
|
|
317
|
+
console.log();
|
|
318
|
+
log('cyan', '─'.repeat(60));
|
|
319
|
+
log('blue', 'NL-OS session ended.');
|
|
320
|
+
process.exit(code || 0);
|
|
321
|
+
});
|
|
322
|
+
}
|
|
323
|
+
|
|
250
324
|
function payload(options = {}) {
|
|
251
325
|
const {
|
|
252
326
|
tier = 'mandatory',
|
|
@@ -298,7 +372,8 @@ ${colors.yellow}Usage:${colors.reset}
|
|
|
298
372
|
nlos <command> [options]
|
|
299
373
|
|
|
300
374
|
${colors.yellow}Commands:${colors.reset}
|
|
301
|
-
|
|
375
|
+
chat Interactive NL-OS chat session (recommended)
|
|
376
|
+
boot Boot NL-OS and verify kernel loads
|
|
302
377
|
payload Generate portable kernel payloads
|
|
303
378
|
verify Verify kernel files exist
|
|
304
379
|
tokens Show token estimates
|
|
@@ -318,9 +393,10 @@ ${colors.yellow}Payload Options:${colors.reset}
|
|
|
318
393
|
--all Generate all variants
|
|
319
394
|
|
|
320
395
|
${colors.yellow}Examples:${colors.reset}
|
|
321
|
-
nlos
|
|
322
|
-
nlos
|
|
323
|
-
nlos
|
|
396
|
+
nlos chat # Start interactive chat (recommended)
|
|
397
|
+
nlos chat --model llama3.1:8b # Chat with specific model
|
|
398
|
+
nlos chat --profile quality --full # Quality mode with full kernel
|
|
399
|
+
nlos boot # Verify kernel loads (one-shot)
|
|
324
400
|
nlos boot --dry-run # Preview system prompt
|
|
325
401
|
nlos payload # Generate default payload
|
|
326
402
|
nlos payload --all # Generate all payloads
|
|
@@ -372,6 +448,10 @@ const command = args[0];
|
|
|
372
448
|
const options = parseArgs(args.slice(1));
|
|
373
449
|
|
|
374
450
|
switch (command) {
|
|
451
|
+
case 'chat':
|
|
452
|
+
chat(options);
|
|
453
|
+
break;
|
|
454
|
+
|
|
375
455
|
case 'boot':
|
|
376
456
|
boot(options);
|
|
377
457
|
break;
|