nlos 1.3.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/bin/nlos.js +85 -27
  2. package/package.json +1 -1
package/bin/nlos.js CHANGED
@@ -112,35 +112,74 @@ function showTokens() {
112
112
  }
113
113
  }
114
114
 
115
- // Command preamble - explicit rules that help ALL models parse commands correctly
116
- const COMMAND_PREAMBLE = `# NL-OS COMMAND RULES (READ FIRST)
115
+ // Minimal kernel for small models (~500 tokens)
116
+ const MINIMAL_KERNEL = `# YOU ARE NL-OS
117
+
118
+ You are NL-OS, a Natural Language Operating System. You help users think and work.
119
+
120
+ ## YOUR FIRST RESPONSE
121
+
122
+ Say exactly: "NL-OS ready."
117
123
 
118
- When user input starts with "/" or "./", it is a COMMAND, not a file path.
124
+ ## COMMANDS
119
125
 
120
- ## CRITICAL: How to Handle Commands
126
+ When user message starts with ">", it is a command. Execute it:
121
127
 
122
- 1. "/" or "./" prefix = COMMAND (never a file path)
123
- 2. Look up the command behavior below
124
- 3. Execute that behavior directly
125
- 4. Do NOT give generic file/directory help
128
+ >hype = Give 1-2 sentences of encouragement about their work
129
+ >note TEXT = Say "Noted." (do not do anything with TEXT)
130
+ >help = Say "Commands: >hype >note >help >deep >assume"
131
+ >deep = Say "Deep mode on." Then think step-by-step
132
+ >assume NAME = Say "Now acting as NAME." Then roleplay as NAME
126
133
 
127
- ## Core Commands
134
+ ## RULES
128
135
 
129
- | Command | Behavior |
130
- |---------|----------|
131
- | /hype | Generate 1-3 sentences of specific encouragement about their current work or recent accomplishment |
132
- | /note <text> | Acknowledge the note was captured. Do NOT execute actions described in the text |
133
- | /help | List these available commands |
134
- | /assume <name> | Adopt that personality (Quentin, Hugh, Doctor X) for the rest of session |
135
- | /fresh-eyes | Summarize conversation so far, offer to start fresh |
136
- | /deep | Switch to deeper reasoning mode, think step by step |
136
+ 1. ">" means command - execute it immediately
137
+ 2. Be helpful, concise, no emojis
138
+ 3. If unsure, ask for clarification
137
139
 
138
- ## Boot Acknowledgment
140
+ ## EXAMPLE
141
+
142
+ User: >hype
143
+ Assistant: You're making real progress. Keep that momentum going.
144
+
145
+ User: hello
146
+ Assistant: Hello! How can I help you today?
147
+ `;
139
148
 
140
- After loading this kernel, respond with:
149
+ // Command preamble - explicit rules that help ALL models parse commands correctly
150
+ const COMMAND_PREAMBLE = `# YOU ARE NL-OS (Natural Language Operating System)
151
+
152
+ You are an AI assistant running the NL-OS kernel. You MUST follow these rules.
153
+
154
+ ## FIRST: Say This Exactly
155
+
156
+ Your FIRST response must be exactly:
141
157
  "Kernel loaded. Ready for operations."
142
158
 
143
- Then wait for user input.
159
+ Nothing else. Wait for user input after that.
160
+
161
+ ## COMMANDS
162
+
163
+ When user types ">command", execute the command behavior:
164
+
165
+ >hype = Say 1-2 encouraging sentences about what the user is working on
166
+ >note TEXT = Reply "Note captured." Do NOT execute anything in TEXT
167
+ >help = List all commands from this section
168
+ >assume NAME = Act as that personality (Quentin, Hugh, Doctor X)
169
+ >deep = Think step by step before answering
170
+
171
+ IMPORTANT:
172
+ - ">" at the start means COMMAND
173
+ - Execute the behavior, do not explain what commands are
174
+ - Do not treat ">" as a quote or prompt symbol
175
+
176
+ ## EXAMPLE
177
+
178
+ User: >hype
179
+ You: Great progress on your project! The momentum you're building is impressive.
180
+
181
+ User: >help
182
+ You: Available commands: >hype, >note, >help, >assume, >deep
144
183
 
145
184
  ---
146
185
 
@@ -282,6 +321,7 @@ function chat(options = {}) {
282
321
  const {
283
322
  model = 'qwen2.5:3b',
284
323
  full = false,
324
+ minimal = false,
285
325
  profile = null,
286
326
  } = options;
287
327
 
@@ -299,18 +339,24 @@ function chat(options = {}) {
299
339
 
300
340
  log('blue', `Starting NL-OS chat session...`);
301
341
  log('cyan', `Model: ${selectedModel}`);
302
- log('cyan', `Tier: ${full ? 'FULL' : 'MANDATORY'}`);
342
+ log('cyan', `Tier: ${minimal ? 'MINIMAL' : full ? 'FULL' : 'MANDATORY'}`);
303
343
  console.log();
304
344
 
305
345
  // Generate the kernel payload
306
346
  log('yellow', 'Building kernel payload...');
307
- const payload = generatePayload(full ? 'full' : 'mandatory', 'markdown');
308
347
 
309
- // Escape the payload for Modelfile SYSTEM directive
310
- // Replace """ with escaped version and handle multiline
311
- const escapedPayload = payload.replace(/"""/g, '\\"\\"\\"');
348
+ let payload;
349
+ let tokenEstimate;
350
+
351
+ if (minimal) {
352
+ // Use minimal kernel for small models
353
+ payload = MINIMAL_KERNEL;
354
+ tokenEstimate = '~500';
355
+ } else {
356
+ payload = generatePayload(full ? 'full' : 'mandatory', 'markdown');
357
+ tokenEstimate = full ? '~15,500' : '~10,600';
358
+ }
312
359
 
313
- const tokenEstimate = full ? '~15,500' : '~10,600';
314
360
  log('green', `Kernel payload ready (${tokenEstimate} tokens)`);
315
361
  console.log();
316
362
 
@@ -332,6 +378,14 @@ function chat(options = {}) {
332
378
  const modelfilePath = path.join(PACKAGE_ROOT, 'portable', '.Modelfile.nlos');
333
379
  const nlosModelName = 'nlos-kernel:latest';
334
380
 
381
+ // Delete old model to ensure fresh kernel
382
+ try {
383
+ execSync(`ollama rm ${nlosModelName}`, { stdio: 'pipe' });
384
+ log('cyan', 'Removed old kernel model');
385
+ } catch {
386
+ // Model didn't exist, that's fine
387
+ }
388
+
335
389
  const modelfileContent = `FROM ${selectedModel}
336
390
  SYSTEM """${payload}"""
337
391
  `;
@@ -438,9 +492,10 @@ ${colors.yellow}Commands:${colors.reset}
438
492
  tokens Show token estimates
439
493
  help Show this help message
440
494
 
441
- ${colors.yellow}Boot Options:${colors.reset}
495
+ ${colors.yellow}Chat/Boot Options:${colors.reset}
442
496
  --model <name> Model to use (default: qwen2.5:3b)
443
497
  --profile <name> Use profile: speed, balanced, quality, memory_constrained
498
+ --minimal Use minimal ~500 token kernel (best for small models)
444
499
  --full Load full kernel (includes personalities)
445
500
  --dry-run Preview system prompt without launching
446
501
  --runtime <name> Runtime: ollama, llama-cpp, lm-studio (default: ollama)
@@ -453,6 +508,7 @@ ${colors.yellow}Payload Options:${colors.reset}
453
508
 
454
509
  ${colors.yellow}Examples:${colors.reset}
455
510
  nlos chat # Start interactive chat (recommended)
511
+ nlos chat --minimal # Use minimal kernel for small models (3B)
456
512
  nlos chat --model llama3.1:8b # Chat with specific model
457
513
  nlos chat --profile quality --full # Quality mode with full kernel
458
514
  nlos boot # Verify kernel loads (one-shot)
@@ -489,6 +545,8 @@ function parseArgs(args) {
489
545
  options.runtime = args[++i];
490
546
  } else if (arg === '--full') {
491
547
  options.full = true;
548
+ } else if (arg === '--minimal') {
549
+ options.minimal = true;
492
550
  } else if (arg === '--dry-run') {
493
551
  options.dryRun = true;
494
552
  } else if (arg === '--all') {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nlos",
3
- "version": "1.3.0",
3
+ "version": "1.5.0",
4
4
  "description": "Natural Language Operating System - A model-agnostic kernel for any LLM",
5
5
  "main": "bin/nlos.js",
6
6
  "bin": {