naisys 1.5.0 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE.md CHANGED
File without changes
package/README.md CHANGED
@@ -1,13 +1,18 @@
1
1
  ## NAISYS (Node.js Autonomous Intelligence System)
2
2
 
3
- NAISYS acts as a proxy shell between LLM(s) and a real shell. The goal is to see how far a LLM can
4
- get into writing a website from scratch as well as work with other LLM agents on the same project. Trying to figure
5
- out what works and what doesn't when it comes to 'cognitive architectures' for autonomy. NAISYS isn't
6
- limited to websites, but it seemed like a good place to start.
3
+ NAISYS allows any LLM you want to operate a standard Linux shell given your instructions. You can control how much
4
+ to spend, the maximum number of tokens to use per session, how long to wait between commands, etc.. Between each command
5
+ NAISYS will wait a few seconds to accept any input you want to put in yourself in case you want to colllaborate with the
6
+ LLM, give it hints, and/or diagnose the session. Once the LLM reaches the token max you specified for the sesssion it
7
+ will wrap things up, and start a fresh shell for the LLM to continue on its work.
7
8
 
8
- Since the LLM has a limited context, NAISYS takes this into account and helps the LLM
9
- perform 'context friendly' operations. For example reading/writing a file can't use a typical editor like
10
- vim or nano so point the LLM to use cat to read/write files in a single operation.
9
+ NAISYS tries to be a minimal wrapper, just helping the LLM operate in the shell 'better'. Making commands 'context friendly'. For instace if a command is long running, NAISYS will interrupt it, show the LLM the current output, and ask the LLM what it wants to
10
+ do next - wait, kill, or send input. The custom command prompt helps the LLM keep track of its token usage during the session. The 'comment' command helps the LLM think out loud without putting invalid commands into the shell.
11
+
12
+ Some use cases are building websites, diagnosing a system for security concerns, mapping out the topology of the local
13
+ network, learning and performing arbitrary tasks, or just plain exploring the limits of autonomy. NAISYS has a built-in
14
+ system for inter-agent communiation. You can manually startup mulitple instances of NAISYS with different roles, or
15
+ you can allow agents to start their own sub-agents on demand with instructions defined by the LLM itself!
11
16
 
12
17
  [NPM](https://www.npmjs.com/package/naisys) | [Website](https://naisys.org) | [Discord](https://discord.gg/JBUPWSbaEt) | [Demo Video](https://www.youtube.com/watch?v=Ttya3ixjumo)
13
18
 
@@ -194,9 +199,18 @@ initialCommands:
194
199
  - To use NAISYS on Windows you need to run it locally from source (or from within WSL)
195
200
  - Use the above instructions to install locally, and then continue with the instructions below
196
201
  - Install WSL (Windows Subsystem for Linux)
202
+ - Install a Linux distribution, Ubuntu can easily be installed from the Microsoft Store
203
+ - Make sure to the checked out code perserves the original line endings
204
+ - Files in the /bin folder should have LF endings only, not CRLF
197
205
  - The `NAISYS_FOLDER` and `WEBSITE_FOLDER` should be set to the WSL path
198
206
  - So `C:\var\naisys` should be `/mnt/c/var/naisys` in the `.env` file
199
207
 
208
+ #### Notes for MacOS users
209
+
210
+ - The browser llmynx requires `timeout` and `lynx`. Run these commands to install them:
211
+ - `brew install coreutils`
212
+ - `brew install lynx`
213
+
200
214
  #### Using NAISYS for a website
201
215
 
202
216
  - Many frameworks come with their own dev server
@@ -205,6 +219,7 @@ initialCommands:
205
219
 
206
220
  ## Changelog
207
221
 
222
+ - 1.6: Support for long running shell commands and full screen terminal output
208
223
  - 1.5: Allow agents to start their own parallel `subagents`
209
224
  - 1.4: `genimg` command for generating images
210
225
  - 1.3: Post-session 'dreaming' as well as a mail 'blackout' period
@@ -0,0 +1,2 @@
1
+ # Even on Windows, these bin commands are run by WSL so the line endings need to be LF, or bash will error out
2
+ * text eol=lf
@@ -0,0 +1,4 @@
1
+ #!/bin/bash
2
+
3
+ # ./src/command/commandHandler.ts has the same message
4
+ echo "Successful: Naisys fallback bash commands are being caught."
@@ -78,6 +78,10 @@ export async function processCommand(prompt, consoleInput) {
78
78
  if (!config.endSessionEnabled) {
79
79
  throw 'The "trimsession" command is not enabled in this environment.';
80
80
  }
81
+ if (shellCommand.isShellSuspended()) {
82
+ await contextManager.append("Session cannot be ended while a shell command is active.");
83
+ break;
84
+ }
81
85
  // Don't need to check end line as this is the last command in the context, just read to the end
82
86
  const endSessionNotes = utilities.trimChars(cmdArgs, '"');
83
87
  if (!endSessionNotes) {
@@ -158,8 +162,8 @@ export async function processCommand(prompt, consoleInput) {
158
162
  ? NextCommandAction.ExitApplication
159
163
  : NextCommandAction.Continue;
160
164
  }
161
- }
162
- }
165
+ } // End switch
166
+ } // End loop processing LLM response
163
167
  // display unprocessed lines to aid in debugging
164
168
  if (consoleInput.trim()) {
165
169
  await output.errorAndLog(`Unprocessed LLM response:\n${consoleInput}`);
@@ -223,7 +227,15 @@ async function splitMultipleInputCommands(nextInput) {
223
227
  else if (newLinePos > 0 &&
224
228
  (nextInput.startsWith("comment ") ||
225
229
  nextInput.startsWith("genimg ") ||
226
- nextInput.startsWith("trimsession "))) {
230
+ nextInput.startsWith("trimsession ") ||
231
+ nextInput.startsWith("pause "))) {
232
+ input = nextInput.slice(0, newLinePos);
233
+ nextInput = nextInput.slice(newLinePos).trim();
234
+ }
235
+ // If shell is suspended, the process can kill/wait the shell, and may run some commands after
236
+ else if (newLinePos > 0 &&
237
+ shellCommand.isShellSuspended() &&
238
+ (nextInput.startsWith("kill") || nextInput.startsWith("wait"))) {
227
239
  input = nextInput.slice(0, newLinePos);
228
240
  nextInput = nextInput.slice(newLinePos).trim();
229
241
  }
@@ -19,6 +19,7 @@ import * as utilities from "../utils/utilities.js";
19
19
  import * as commandHandler from "./commandHandler.js";
20
20
  import { NextCommandAction } from "./commandHandler.js";
21
21
  import * as promptBuilder from "./promptBuilder.js";
22
+ import * as shellCommand from "./shellCommand.js";
22
23
  const maxErrorCount = 5;
23
24
  export async function run() {
24
25
  // Show Agent Config exept the agent prompt
@@ -39,8 +40,7 @@ export async function run() {
39
40
  await output.commentAndLog("Starting Context:");
40
41
  const latestDream = await dreamMaker.goodmorning();
41
42
  if (latestDream) {
42
- await contextManager.append("Previous Session Notes:");
43
- await contextManager.append(latestDream);
43
+ await displayPreviousSessionNotes(latestDream, nextPromptIndex++);
44
44
  }
45
45
  for (const initialCommand of config.agent.initialCommands) {
46
46
  let prompt = await promptBuilder.getPrompt(0, false);
@@ -52,6 +52,9 @@ export async function run() {
52
52
  let pauseSeconds = config.agent.debugPauseSeconds;
53
53
  let wakeOnMessage = config.agent.wakeOnMessage;
54
54
  while (nextCommandAction == NextCommandAction.Continue) {
55
+ if (shellCommand.isShellSuspended()) {
56
+ await contextManager.append(`Command still running. Enter 'wait' to continue waiting. 'kill' to terminate. Other input will be sent to the process.`, ContentSource.Console);
57
+ }
55
58
  let prompt = await promptBuilder.getPrompt(pauseSeconds, wakeOnMessage);
56
59
  let consoleInput = "";
57
60
  // Debug command prompt
@@ -234,4 +237,13 @@ function setPromptIndex(prompt, index) {
234
237
  }
235
238
  return newPrompt;
236
239
  }
240
+ async function displayPreviousSessionNotes(prevSessionNotes, nextPromptIndex) {
241
+ let prompt = await promptBuilder.getPrompt(0, false);
242
+ prompt = setPromptIndex(prompt, ++nextPromptIndex);
243
+ await contextManager.append(prompt, ContentSource.ConsolePrompt, nextPromptIndex);
244
+ const prevSessionNotesCommand = "cat ~/prev_session_notes";
245
+ await contextManager.append(prevSessionNotesCommand, ContentSource.LlmPromptResponse);
246
+ output.write(prompt + chalk[OutputColor.llm](prevSessionNotesCommand));
247
+ await contextManager.append(prevSessionNotes);
248
+ }
237
249
  //# sourceMappingURL=commandLoop.js.map
File without changes
@@ -8,28 +8,34 @@ import * as inputMode from "../utils/inputMode.js";
8
8
  import { InputMode } from "../utils/inputMode.js";
9
9
  import * as output from "../utils/output.js";
10
10
  import * as shellWrapper from "./shellWrapper.js";
11
- // When actual output is entered by the user we want to cancel any auto-continue timers and/or wake on message
12
- // We don't want to cancel if the user is entering a chords like ctrl+b then down arrow, when using tmux
13
- // This is why we can't put the event listener on the standard process.stdin/keypress event.
14
- // There is no 'data entered' output event so this monkey patch does that
11
+ /**
12
+ * When actual output is entered by the user we want to cancel any auto-continue timers and/or wake on message
13
+ * We don't want to cancel if the user is entering a chords like ctrl+b then down arrow, when using tmux
14
+ * This is why we can't put the event listener on the standard process.stdin/keypress event.
15
+ * There is no 'data entered' output event so this monkey patch does that
16
+ */
17
+ const _writeEventEmitter = new events.EventEmitter();
15
18
  const _writeEventName = "write";
16
- const _outputEmitter = new events.EventEmitter();
17
19
  const _originalWrite = process.stdout.write.bind(process.stdout);
18
20
  process.stdout.write = (...args) => {
19
- _outputEmitter.emit(_writeEventName, false, ...args);
20
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
21
+ _writeEventEmitter.emit(_writeEventName, false, ...args);
21
22
  return _originalWrite.apply(process.stdout, args);
22
23
  };
23
- const _readlineInterface = readline.createInterface({
24
+ /**
25
+ * Tried to make this local and have it cleaned up with close() after using it, but
26
+ * due to the terminal settings below there are bugs with both terminal true and false
27
+ * pause() actually is nice in that it queues up the input, and doesn't allow the user
28
+ * to enter anything while the LLM is working
29
+ */
30
+ const readlineInterface = readline.createInterface({
24
31
  input: process.stdin,
25
32
  output: process.stdout,
33
+ // With this set to ture, after an abort the second input will not be processed, see:
34
+ // https://gist.github.com/swax/964a2488494048c8e03d05493d9370f8
35
+ // With this set to false, the stdout.write event above will not be triggered
36
+ terminal: true,
26
37
  });
27
- // Happens when ctrl+c is pressed
28
- let readlineInterfaceClosed = false;
29
- _readlineInterface.on("close", () => {
30
- readlineInterfaceClosed = true;
31
- output.error("Readline interface closed");
32
- });
38
+ readlineInterface.pause();
33
39
  export async function getPrompt(pauseSeconds, wakeOnMessage) {
34
40
  const promptSuffix = inputMode.current == InputMode.Debug ? "#" : "$";
35
41
  const tokenMax = config.agent.tokenMax;
@@ -60,27 +66,24 @@ export function getInput(commandPrompt, pauseSeconds, wakeOnMessage) {
60
66
  let timeout;
61
67
  let interval;
62
68
  let timeoutCancelled = false;
63
- if (readlineInterfaceClosed) {
64
- output.error("Hanging because readline interface is closed.");
65
- return;
69
+ function clearTimers() {
70
+ timeoutCancelled = true;
71
+ _writeEventEmitter.off(_writeEventName, cancelWaitingForUserInput);
72
+ clearTimeout(timeout);
73
+ clearInterval(interval);
66
74
  }
67
75
  /** Cancels waiting for user input */
68
- function onStdinWrite_cancelTimers(questionAborted, buffer) {
76
+ const cancelWaitingForUserInput = (questionAborted, buffer) => {
69
77
  // Don't allow console escape commands like \x1B[1G to cancel the timeout
70
78
  if (timeoutCancelled || (buffer && !/^[a-zA-Z0-9 ]+$/.test(buffer))) {
71
79
  return;
72
80
  }
73
- timeoutCancelled = true;
74
- _outputEmitter.off(_writeEventName, onStdinWrite_cancelTimers);
75
- clearTimeout(timeout);
76
- clearInterval(interval);
77
- timeout = undefined;
78
- interval = undefined;
81
+ clearTimers();
79
82
  if (questionAborted) {
80
83
  return;
81
84
  }
82
- // Else timeout interrupted by user input, clear out the timeout information from the prompt
83
- // to prevent the user from thinking the timeout still applies
85
+ // Else timeout interrupted by user input
86
+ // Clear out the timeout information from the prompt to prevent the user from thinking the timeout still applies
84
87
  let pausePos = commandPrompt.indexOf("[Paused:");
85
88
  pausePos =
86
89
  pausePos == -1 ? commandPrompt.indexOf("[WakeOnMsg]") : pausePos;
@@ -92,21 +95,22 @@ export function getInput(commandPrompt, pauseSeconds, wakeOnMessage) {
92
95
  process.stdout.write("-".repeat(charsBack - 3));
93
96
  readline.moveCursor(process.stdout, 3, 0);
94
97
  }
95
- }
96
- _readlineInterface.question(chalk.greenBright(commandPrompt), { signal: questionController.signal }, (answer) => {
98
+ };
99
+ readlineInterface.question(chalk.greenBright(commandPrompt), { signal: questionController.signal }, (answer) => {
100
+ clearTimers();
101
+ readlineInterface.pause();
97
102
  resolve(answer);
98
103
  });
99
104
  // If user starts typing in prompt, cancel any auto timeouts or wake on msg
100
- _outputEmitter.on(_writeEventName, onStdinWrite_cancelTimers);
101
- const abortQuestion = () => {
102
- onStdinWrite_cancelTimers(true);
105
+ _writeEventEmitter.on(_writeEventName, cancelWaitingForUserInput);
106
+ function abortQuestion() {
107
+ cancelWaitingForUserInput(true);
103
108
  questionController.abort();
109
+ readlineInterface.pause();
104
110
  resolve("");
105
- };
111
+ }
106
112
  if (pauseSeconds) {
107
- timeout = setTimeout(() => {
108
- abortQuestion();
109
- }, pauseSeconds * 1000);
113
+ timeout = setTimeout(abortQuestion, pauseSeconds * 1000);
110
114
  }
111
115
  if (wakeOnMessage) {
112
116
  // Break timeout if new message is received
@@ -133,7 +137,8 @@ export function getInput(commandPrompt, pauseSeconds, wakeOnMessage) {
133
137
  }
134
138
  export function getCommandConfirmation() {
135
139
  return new Promise((resolve) => {
136
- _readlineInterface.question(chalk.greenBright("Allow command to run? [y/n] "), (answer) => {
140
+ readlineInterface.question(chalk.greenBright("Allow command to run? [y/n] "), (answer) => {
141
+ readlineInterface.pause();
137
142
  resolve(answer);
138
143
  });
139
144
  });
@@ -4,27 +4,35 @@ import * as inputMode from "../utils/inputMode.js";
4
4
  import { InputMode } from "../utils/inputMode.js";
5
5
  import * as utilities from "../utils/utilities.js";
6
6
  import * as shellWrapper from "./shellWrapper.js";
7
+ export const isShellSuspended = () => shellWrapper.isShellSuspended();
7
8
  export async function handleCommand(input) {
8
9
  const cmdParams = input.split(" ");
9
- // Route user to context friendly edit commands that can read/write the entire file in one go
10
- // Having EOF in quotes is important as it prevents the shell from replacing $variables with bash values
11
- if (["nano", "vi", "vim"].includes(cmdParams[0])) {
12
- throw `${cmdParams[0]} not supported. Use \`cat\` to read a file and \`cat > filename << 'EOF'\` to write a file`;
13
- }
14
- if (cmdParams[0] == "lynx" && cmdParams[1] != "--dump") {
15
- throw `Interactive mode with lynx is not supported. Use --dump with lynx to view a website`;
16
- }
17
- if (cmdParams[0] == "exit") {
18
- if (inputMode.current == InputMode.LLM) {
19
- throw "Use 'endsession' to end the session and clear the console log.";
10
+ let response;
11
+ if (!isShellSuspended()) {
12
+ if (["nano", "vi", "vim"].includes(cmdParams[0])) {
13
+ // Route user to context friendly edit commands that can read/write the entire file in one go
14
+ // Having EOF in quotes is important as it prevents the shell from replacing $variables with bash values
15
+ throw `${cmdParams[0]} not supported. Use \`cat\` to read a file and \`cat > filename << 'EOF'\` to write a file`;
20
16
  }
21
- // Only the debug user is allowed to exit the shell
22
- else if (inputMode.current == InputMode.Debug) {
23
- await shellWrapper.terminate();
24
- return true;
17
+ if (cmdParams[0] == "lynx" && cmdParams[1] != "--dump") {
18
+ throw `Interactive mode with lynx is not supported. Use --dump with lynx to view a website`;
25
19
  }
20
+ if (cmdParams[0] == "exit") {
21
+ if (inputMode.current == InputMode.LLM) {
22
+ throw "Use 'endsession' to end the session and clear the console log.";
23
+ }
24
+ // Only the debug user is allowed to exit the shell
25
+ else if (inputMode.current == InputMode.Debug) {
26
+ await shellWrapper.terminate();
27
+ return true;
28
+ }
29
+ }
30
+ response = await shellWrapper.executeCommand(input);
31
+ }
32
+ // Else shell is suspended, continue
33
+ else {
34
+ response = await shellWrapper.continueCommand(input);
26
35
  }
27
- let response = await shellWrapper.executeCommand(input);
28
36
  let outputLimitExceeded = false;
29
37
  const tokenCount = utilities.getTokenCount(response);
30
38
  // Prevent too much output from blowing up the context
@@ -41,9 +49,9 @@ export async function handleCommand(input) {
41
49
  }
42
50
  if (response.endsWith(": command not found")) {
43
51
  response +=
44
- "Please enter a valid Linux or NAISYS command after the prompt. Use the 'comment' command for thoughts.";
52
+ "\nPlease enter a valid Linux or NAISYS command after the prompt. Use the 'comment' command for thoughts.";
45
53
  }
46
- // todo move this into the command handler to remove the context manager dependency
54
+ // TODO: move this into the command handler to remove the context manager dependency
47
55
  await contextManager.append(response);
48
56
  return false;
49
57
  }
@@ -1,8 +1,12 @@
1
+ import xterm from "@xterm/headless";
1
2
  import { spawn } from "child_process";
2
3
  import * as fs from "fs";
3
4
  import * as os from "os";
5
+ import stripAnsi from "strip-ansi";
6
+ import treeKill from "tree-kill";
4
7
  import * as config from "../config.js";
5
8
  import * as output from "../utils/output.js";
9
+ import * as pathService from "../utils/pathService.js";
6
10
  import { NaisysPath } from "../utils/pathService.js";
7
11
  var ShellEvent;
8
12
  (function (ShellEvent) {
@@ -14,31 +18,35 @@ let _process;
14
18
  let _currentProcessId;
15
19
  let _commandOutput = "";
16
20
  let _currentPath;
21
+ let _terminal;
22
+ let _bufferChangeEvent;
23
+ let _currentBufferType = "normal";
17
24
  let _resolveCurrentCommand;
18
25
  let _currentCommandTimeout;
19
- let _startTime;
20
26
  /** How we know the command has completed when running the command inside a shell like bash or wsl */
21
27
  const _commandDelimiter = "__COMMAND_END_X7YUTT__";
28
+ let _wrapperSuspended = false;
29
+ const _queuedOutput = [];
22
30
  async function ensureOpen() {
23
31
  if (_process) {
24
32
  return;
25
33
  }
26
34
  resetCommand();
27
- const spawnProcess = os.platform() === "win32" ? "wsl" : "bash";
28
- _process = spawn(spawnProcess, [], { stdio: "pipe" });
35
+ const spawnCmd = os.platform() === "win32" ? "wsl" : "bash";
36
+ _process = spawn(spawnCmd, [], { stdio: "pipe" });
29
37
  const pid = _process.pid;
30
38
  if (!pid) {
31
39
  throw "Shell process failed to start";
32
40
  }
33
41
  _currentProcessId = pid;
34
42
  _process.stdout.on("data", (data) => {
35
- processOutput(data.toString(), ShellEvent.Ouptput, pid);
43
+ processOutput(data, ShellEvent.Ouptput, pid);
36
44
  });
37
45
  _process.stderr.on("data", (data) => {
38
- processOutput(data.toString(), ShellEvent.Error, pid);
46
+ processOutput(data, ShellEvent.Error, pid);
39
47
  });
40
48
  _process.on("close", (code) => {
41
- processOutput(`${code}`, ShellEvent.Exit, pid);
49
+ processOutput(Buffer.from(`${code}`), ShellEvent.Exit, pid);
42
50
  });
43
51
  // Init users home dir on first run, on shell crash/rerun go back to the current path
44
52
  if (!_currentPath) {
@@ -61,7 +69,12 @@ function errorIfNotEmpty(response) {
61
69
  output.error(response);
62
70
  }
63
71
  }
64
- function processOutput(dataStr, eventType, pid) {
72
+ function processOutput(rawDataStr, eventType, pid) {
73
+ if (_wrapperSuspended) {
74
+ _queuedOutput.push({ rawDataStr, eventType, pid });
75
+ return;
76
+ }
77
+ let dataStr = stripAnsi(rawDataStr.toString());
65
78
  if (pid != _currentProcessId) {
66
79
  output.comment(`Ignoring '${eventType}' from old shell process ${pid}: ` + dataStr);
67
80
  return;
@@ -72,75 +85,173 @@ function processOutput(dataStr, eventType, pid) {
72
85
  return;
73
86
  }
74
87
  if (eventType === ShellEvent.Exit) {
75
- output.error("SHELL EXITED. PID: " + _process?.pid + " CODE: " + dataStr);
76
- const elapsedSeconds = _startTime
77
- ? Math.round((new Date().getTime() - _startTime.getTime()) / 1000)
78
- : -1;
79
- const outputWithError = _commandOutput.trim() +
80
- `\nNAISYS: Command hit time out limit after ${elapsedSeconds} seconds. If possible figure out how to run the command faster or break it up into smaller parts.`;
88
+ output.error(`SHELL EXIT. PID: ${_process?.pid}, CODE: ${rawDataStr}`);
89
+ let finalOutput = _currentBufferType == "alternate"
90
+ ? _getTerminalActiveBuffer()
91
+ : _commandOutput.trim();
92
+ if (finalOutput.endsWith("command not found")) {
93
+ finalOutput += `\nNAISYS: Make sure that you are using valid linux commands, and that any non-commands are prefixed with the 'commment' command.`;
94
+ }
95
+ finalOutput += `\nNAISYS: Command killed.`;
81
96
  resetProcess();
82
- _resolveCurrentCommand(outputWithError);
97
+ _completeCommand(finalOutput);
83
98
  return;
84
99
  }
85
- else {
86
- // Extend the timeout of the current command
87
- setOrExtendShellTimeout();
100
+ // Should only happen back in normal mode, so we don't need to modify the rawDataStr
101
+ let endDelimiterHit = false;
102
+ const endDelimiterPos = dataStr.indexOf(_commandDelimiter);
103
+ if (endDelimiterPos != -1 &&
104
+ // Quotes will only precede the delimiter if the echo command got in the output, so don't count it
105
+ // For example running nano or vi will cause this
106
+ dataStr[endDelimiterPos - 1] != '"') {
107
+ endDelimiterHit = true;
108
+ dataStr = dataStr.slice(0, endDelimiterPos);
109
+ // If it does happen somehow, log it so I can figure out why/how and what to do about it
110
+ if (_currentBufferType == "alternate") {
111
+ output.error("UNEXPECTED END DELIMITER IN ALTERNATE BUFFER: " + dataStr);
112
+ }
113
+ }
114
+ // If we're in alternate mode, just write the data to the terminal
115
+ // When the buffer changes back to normal, the output will be copied back to the command output
116
+ if (_currentBufferType == "normal") {
88
117
  _commandOutput += dataStr;
89
118
  }
90
- const delimiterIndex = _commandOutput.indexOf(_commandDelimiter);
91
- if (delimiterIndex != -1) {
92
- // trim everything after delimiter
93
- _commandOutput = _commandOutput.slice(0, delimiterIndex);
94
- const response = _commandOutput.trim();
119
+ // TODO: get token size of buffer, if too big, switch it front/middle/back
120
+ _terminal?.write(rawDataStr); // Not synchronous, second param takes a call back, don't need to handle it AFAIK
121
+ if (endDelimiterHit) {
122
+ const finalOutput = _commandOutput.trim();
95
123
  resetCommand();
96
- _resolveCurrentCommand(response);
124
+ _completeCommand(finalOutput);
97
125
  }
98
126
  }
99
127
  export async function executeCommand(command) {
128
+ if (_wrapperSuspended) {
129
+ throw "Use continueCommand to send input to a shell command in process";
130
+ }
131
+ command = command.trim();
132
+ _lastCommand = command; // Set here before it gets reset by the multi line script below
100
133
  await ensureOpen();
101
- if (_currentPath && command.trim().split("\n").length > 1) {
134
+ if (_currentPath && command.split("\n").length > 1) {
102
135
  command = await putMultilineCommandInAScript(command);
103
136
  }
104
137
  return new Promise((resolve, reject) => {
105
138
  _resolveCurrentCommand = resolve;
106
- const commandWithDelimiter = `${command.trim()}\necho "${_commandDelimiter} LINE:\${LINENO}"\n`;
107
139
  if (!_process) {
108
140
  reject("Shell process is not open");
109
141
  return;
110
142
  }
143
+ const commandWithDelimiter = `${command}\necho "${_commandDelimiter}"\n`;
111
144
  _process.stdin.write(commandWithDelimiter);
112
- _startTime = new Date();
113
- // If no response, kill and reset the shell, often hanging on some unescaped input
114
- setOrExtendShellTimeout();
145
+ // Set timeout to wait for response from command
146
+ setCommandTimeout();
115
147
  });
116
148
  }
117
- function setOrExtendShellTimeout() {
118
- // Don't extend if we've been waiting longer than the max timeout seconds
119
- const timeWaiting = new Date().getTime() - (_startTime?.getTime() || 0);
120
- if (!_process?.pid ||
121
- timeWaiting > config.shellCommand.maxTimeoutSeconds * 1000) {
122
- return;
149
+ /** The LLM made its decision on how it wants to continue with the shell that previously timed out */
150
+ export function continueCommand(command) {
151
+ if (!_wrapperSuspended) {
152
+ throw "Shell is not suspended, use execute command";
153
+ }
154
+ command = command.trim();
155
+ _wrapperSuspended = false;
156
+ let choice;
157
+ if (command != "wait" && command != "kill") {
158
+ choice = "input";
159
+ }
160
+ else {
161
+ choice = command;
162
+ }
163
+ return new Promise((resolve, reject) => {
164
+ _resolveCurrentCommand = resolve;
165
+ // If new output from the shell was queued while waiting for the LLM to decide what to do
166
+ if (_queuedOutput.length > 0) {
167
+ for (const output of _queuedOutput) {
168
+ processOutput(output.rawDataStr, output.eventType, output.pid);
169
+ }
170
+ _queuedOutput.length = 0;
171
+ // If processing queue resolved the command, then we're done
172
+ if (!_resolveCurrentCommand) {
173
+ return;
174
+ }
175
+ // Used to return here if LLM was sending if output was generated while waiting for the LLM
176
+ // In normal mode this would make the log confusing and out of order
177
+ // But since we only use the terminal in alternate mode, this is fine and works
178
+ // with commands like `mtr` changing the display type
179
+ }
180
+ // LLM wants to wait for more output
181
+ if (choice == "wait") {
182
+ setCommandTimeout();
183
+ return;
184
+ }
185
+ // Else LLM wants to kill the process
186
+ else if (choice == "kill") {
187
+ if (!_currentProcessId) {
188
+ reject("No process to kill");
189
+ }
190
+ else if (resetShell(_currentProcessId)) {
191
+ return; // Wait for exit event
192
+ }
193
+ else {
194
+ reject("Unable to kill. Process not found");
195
+ }
196
+ return;
197
+ }
198
+ // Else LLM wants to send input to the process
199
+ else {
200
+ if (!_process) {
201
+ reject("Shell process is not open");
202
+ return;
203
+ }
204
+ _process.stdin.write(command + "\n");
205
+ _lastCommand = command;
206
+ setCommandTimeout();
207
+ }
208
+ });
209
+ }
210
+ let _startCommandTime;
211
+ /** Pulled out because for commands like 'wait' we want to vary the run time based on the 'last command' */
212
+ let _lastCommand;
213
+ function setCommandTimeout() {
214
+ _startCommandTime = new Date();
215
+ let timeoutSeconds = config.shellCommand.timeoutSeconds;
216
+ if (config.shellCommand.longRunningCommands.some((cmd) => _lastCommand?.startsWith(cmd))) {
217
+ timeoutSeconds = config.shellCommand.longRunningTimeoutSeconds;
123
218
  }
124
- // Define the pid for use in the timeout closure, as _process.pid may change
125
- const pid = _process.pid;
126
- clearTimeout(_currentCommandTimeout);
127
219
  _currentCommandTimeout = setTimeout(() => {
128
- resetShell(pid);
129
- }, config.shellCommand.timeoutSeconds * 1000);
220
+ returnControlToNaisys();
221
+ }, timeoutSeconds * 1000);
222
+ }
223
+ function returnControlToNaisys() {
224
+ _wrapperSuspended = true;
225
+ _queuedOutput.length = 0;
226
+ // Flush the output to the consol, and give the LLM instructions of how it might continue
227
+ let outputWithInstruction = _currentBufferType == "alternate"
228
+ ? _getTerminalActiveBuffer()
229
+ : _commandOutput.trim();
230
+ _commandOutput = "";
231
+ // Don't clear the alternate buffer, it's a special terminal full screen mode that the
232
+ // LLM might want to see updates too
233
+ if (_currentBufferType != "alternate") {
234
+ resetTerminal();
235
+ }
236
+ const waitSeconds = Math.round((new Date().getTime() - _startCommandTime.getTime()) / 1000);
237
+ outputWithInstruction += `\nNAISYS: Command interrupted after waiting ${waitSeconds} seconds.`;
238
+ _completeCommand(outputWithInstruction);
130
239
  }
131
240
  function resetShell(pid) {
132
241
  if (!_process || _process.pid != pid) {
133
242
  output.comment("Ignoring timeout for old shell process " + pid);
134
- return;
243
+ return false;
135
244
  }
136
- // There is still an issue here when running on linux where if a command like 'ping' is running
137
- // then kill() won't actually kill the 'bash' process hosting the ping, it will just hang here indefinitely
138
- // A not fail proof workaround is to tell the LLM to prefix long running commands with 'timeout 10s' or similar
139
- const killResponse = _process.kill();
140
- output.error(`KILL SIGNAL SENT TO PID: ${_process.pid}, RESPONSE: ${killResponse ? "SUCCESS" : "FAILED"}`);
245
+ output.error(`KILL-TREE SIGNAL SENT TO PID: ${_process.pid}`);
246
+ treeKill(pid, "SIGKILL");
141
247
  // Should trigger the process close event from here
248
+ return true;
142
249
  }
143
250
  export async function getCurrentPath() {
251
+ // If wrapper suspended just give the last known path
252
+ if (_wrapperSuspended) {
253
+ return _currentPath;
254
+ }
144
255
  await ensureOpen();
145
256
  _currentPath = await executeCommand("pwd");
146
257
  return _currentPath;
@@ -152,18 +263,40 @@ export async function terminate() {
152
263
  }
153
264
  function resetCommand() {
154
265
  _commandOutput = "";
155
- _startTime = undefined;
266
+ resetTerminal();
156
267
  clearTimeout(_currentCommandTimeout);
157
268
  }
269
+ function resetTerminal() {
270
+ _bufferChangeEvent?.dispose();
271
+ _terminal?.dispose();
272
+ _terminal = new xterm.Terminal({
273
+ allowProposedApi: true,
274
+ rows: process.stdout.rows,
275
+ cols: process.stdout.columns,
276
+ });
277
+ _currentBufferType = "normal";
278
+ _bufferChangeEvent = _terminal.buffer.onBufferChange((buffer) => {
279
+ // If changing back to normal, copy the alternate buffer back to the output
280
+ // so it shows up when the command is resolved
281
+ if (_currentBufferType == "alternate" && buffer.type == "normal") {
282
+ output.comment("NAISYS: BUFFER CHANGE BACK TO NORMAL");
283
+ _commandOutput += "\n" + _getTerminalActiveBuffer() + "\n";
284
+ }
285
+ _currentBufferType = buffer.type;
286
+ });
287
+ }
158
288
  function resetProcess() {
159
289
  resetCommand();
160
290
  _process?.removeAllListeners();
161
291
  _process = undefined;
292
+ _terminal?.dispose();
293
+ _terminal = undefined;
162
294
  }
163
295
  /** Wraps multi line commands in a script to make it easier to diagnose the source of errors based on line number
164
296
  * May also help with common escaping errors */
165
297
  function putMultilineCommandInAScript(command) {
166
- const scriptPath = new NaisysPath(`${config.naisysFolder}/home/${config.agent.username}/.command.tmp.sh`);
298
+ const scriptPath = new NaisysPath(`${config.naisysFolder}/agent-data/${config.agent.username}/multiline-command.sh`);
299
+ pathService.ensureFileDirExists(scriptPath);
167
300
  // set -e causes the script to exit on the first error
168
301
  const scriptContent = `#!/bin/bash
169
302
  set -e
@@ -176,4 +309,32 @@ ${command.trim()}`;
176
309
  // `source` will run the script in the current shell, so any change directories in the script will persist in the current shell
177
310
  return `PATH=${config.binPath}:$PATH source ${scriptPath.getNaisysPath()}`;
178
311
  }
312
+ function _completeCommand(output) {
313
+ if (!_resolveCurrentCommand) {
314
+ throw "No command to resolve";
315
+ }
316
+ _resolveCurrentCommand(output);
317
+ _resolveCurrentCommand = undefined;
318
+ }
319
+ export function isShellSuspended() {
320
+ return _wrapperSuspended;
321
+ }
322
+ /**
323
+ * The alternate/active buffer is a special terminal mode that runs full screen
324
+ * independent of the 'normal' buffer that is more like a log
325
+ */
326
+ function _getTerminalActiveBuffer() {
327
+ let output = "";
328
+ const bufferLineCount = _terminal?.buffer.normal?.length || 0;
329
+ for (let i = 0; i < bufferLineCount; i++) {
330
+ const line = _terminal?.buffer.alternate
331
+ ?.getLine(i)
332
+ ?.translateToString()
333
+ .trim();
334
+ if (line) {
335
+ output += line + "\n";
336
+ }
337
+ }
338
+ return output.trim();
339
+ }
179
340
  //# sourceMappingURL=shellWrapper.js.map
package/dist/config.js CHANGED
@@ -13,19 +13,25 @@ dotenv.config();
13
13
  export const hostname = "naisys";
14
14
  export const shellCommand = {
15
15
  /** Limits the size of files that can be read/wrote */
16
- outputTokenMax: 3000,
16
+ outputTokenMax: 5000,
17
17
  /** The time NAISYS will wait for new shell output before giving up */
18
18
  timeoutSeconds: 15,
19
- /** The max time NAISYS will wait for a shell command to complete */
20
- maxTimeoutSeconds: 60,
19
+ /** These commands have their own timeout so the LLM doesn't have to continually waste tokens on wait commands */
20
+ longRunningCommands: ["nmap", "traceroute", "tracepath", "mtr"],
21
+ longRunningTimeoutSeconds: 120,
21
22
  };
23
+ export const agent = loadAgentConfig();
22
24
  /** Web pages loaded with llmynx will be reduced down to around this number of tokens */
23
- export const webTokenMax = 2500;
25
+ export const webTokenMax = 3000;
26
+ /** Allows the LLM to end it's own session */
24
27
  export const endSessionEnabled = true;
25
- export const mailEnabled = true;
28
+ /** Inter agent communication */
29
+ export const mailEnabled = agent.mailEnabled || false;
30
+ /** The LLM optimized browser */
31
+ export const webEnabled = agent.webEnabled || false;
26
32
  /** Experimental, live updating spot in the context for the LLM to put files, to avoid having to continually cat */
27
33
  export const workspacesEnabled = false;
28
- /** Experimental, allow LLM to trim prompts from it's own session context */
34
+ /** Experimental, allow LLM to trim it's own session context to avoid having to restart the session */
29
35
  export const trimSessionEnabled = false;
30
36
  /* .env is used for global configs across naisys, while agent configs are for the specific agent */
31
37
  export const naisysFolder = getEnv("NAISYS_FOLDER", true);
@@ -35,7 +41,7 @@ export const localLlmName = getEnv("LOCAL_LLM_NAME");
35
41
  export const openaiApiKey = getEnv("OPENAI_API_KEY");
36
42
  export const googleApiKey = getEnv("GOOGLE_API_KEY");
37
43
  export const anthropicApiKey = getEnv("ANTHROPIC_API_KEY");
38
- export const agent = loadAgentConfig();
44
+ export const openRouterApiKey = getEnv("OPENROUTER_API_KEY");
39
45
  function loadAgentConfig() {
40
46
  const config = yaml.load(fs.readFileSync(program.args[0], "utf8"));
41
47
  config.hostpath = path.resolve(program.args[0]);
@@ -106,9 +112,7 @@ export function resolveConfigVars(templateString) {
106
112
  resolvedString = resolveTemplateVars(resolvedString, "env", process.env);
107
113
  return resolvedString;
108
114
  }
109
- function resolveTemplateVars(templateString, allowedVarString,
110
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
111
- mappedVar) {
115
+ function resolveTemplateVars(templateString, allowedVarString, mappedVar) {
112
116
  const pattern = new RegExp(`\\$\\{${allowedVarString}\\.([^}]+)\\}`, "g");
113
117
  return templateString.replace(pattern, (match, key) => {
114
118
  const value = valueFromString(mappedVar, key);
@@ -24,6 +24,10 @@ export async function handleCommand(args) {
24
24
  if (!filepath) {
25
25
  throw "Error: Filepath is required";
26
26
  }
27
+ // Validate path is fully qualified
28
+ if (!filepath.getNaisysPath().startsWith("/")) {
29
+ throw "Error: Filepath must be fully qualified";
30
+ }
27
31
  pathService.ensureFileDirExists(filepath);
28
32
  output.comment(`Generating image with ${config.agent.imageModel}...`);
29
33
  const openai = new OpenAI();
@@ -46,15 +50,14 @@ export async function handleCommand(args) {
46
50
  const hostPath = filepath.toHostPath();
47
51
  const fileExtension = path.extname(hostPath).substring(1);
48
52
  await sharp(imageBuffer)
49
- .resize(256, 256, {
50
- fit: "inside",
51
- })
52
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
53
+ /*.resize(512, 512, {
54
+ fit: "inside",
55
+ })*/
53
56
  .toFormat(fileExtension)
54
57
  .toFile(hostPath);
55
58
  // Record the cost
56
59
  await costTracker.recordCost(model.cost, "genimg", model.name);
57
- return "Image generated and saved to " + filepath.getNaisysPath();
60
+ return "1024x1024 Image generated and saved to " + filepath.getNaisysPath();
58
61
  }
59
62
  const imageModels = [
60
63
  {
@@ -19,6 +19,8 @@ async function init() {
19
19
  const newDbCreated = await dbUtils.initDatabase(_dbFilePath);
20
20
  await usingDatabase(async (db) => {
21
21
  if (newDbCreated) {
22
+ // For llmail to work, the usernames need to be unique
23
+ // The agentPaths also need to be unique so we know what configuration each agent should use when we restart/reload naisys
22
24
  const createTables = [
23
25
  `CREATE TABLE Users (
24
26
  id INTEGER PRIMARY KEY,
@@ -65,16 +67,22 @@ async function init() {
65
67
  ]);
66
68
  // If user not in database, add them
67
69
  if (!user) {
68
- const insertedUser = await db.run("INSERT INTO Users (username, title, agentPath, leadUsername) VALUES (?, ?, ?, ?)", [
69
- config.agent.username,
70
- config.agent.title,
71
- config.agent.hostpath,
72
- config.agent.leadAgent,
73
- ]);
74
- if (!insertedUser.lastID) {
75
- throw "Error adding local user to llmail database";
70
+ try {
71
+ const insertedUser = await db.run("INSERT INTO Users (username, title, agentPath, leadUsername) VALUES (?, ?, ?, ?)", [
72
+ config.agent.username,
73
+ config.agent.title,
74
+ config.agent.hostpath,
75
+ config.agent.leadAgent,
76
+ ]);
77
+ if (!insertedUser.lastID) {
78
+ throw "Error adding local user to llmail database";
79
+ }
80
+ _myUserId = insertedUser.lastID;
81
+ }
82
+ catch (e) {
83
+ throw (`A user already exists in the database with the agent path (${config.agent.hostpath})\n` +
84
+ `Either create a new agent config file, or delete the ${config.naisysFolder} folder to reset the database.`);
76
85
  }
77
- _myUserId = insertedUser.lastID;
78
86
  }
79
87
  // Else already exists, validate it's config path is correct
80
88
  else {
File without changes
@@ -46,6 +46,7 @@ export async function handleCommand(args) {
46
46
  if (!argParams[0]) {
47
47
  argParams[0] = "help";
48
48
  }
49
+ let errorText = "";
49
50
  switch (argParams[0]) {
50
51
  case "help": {
51
52
  let helpOutput = `subagent <command>
@@ -75,6 +76,11 @@ export async function handleCommand(args) {
75
76
  const newParams = argParams.slice(1).join(" ").split('"');
76
77
  const title = newParams[1];
77
78
  const task = newParams[3];
79
+ // Validate title and task set
80
+ if (!title || !task) {
81
+ errorText = "See valid 'create' syntax below:\n";
82
+ break;
83
+ }
78
84
  return await _createAgent(title, task);
79
85
  }
80
86
  case "start": {
@@ -90,10 +96,11 @@ export async function handleCommand(args) {
90
96
  _debugFlushContext(subagentId);
91
97
  return "";
92
98
  }
93
- default:
94
- return ("Error, unknown command. See valid commands below:\n" +
95
- (await handleCommand("help")));
99
+ default: {
100
+ errorText = "Error, unknown command. See valid commands below:\n";
101
+ }
96
102
  }
103
+ return errorText + (await handleCommand("help"));
97
104
  }
98
105
  export function getRunningSubagentNames() {
99
106
  return _subagents
@@ -112,10 +119,6 @@ export function unreadContextSummary() {
112
119
  .join(" | "));
113
120
  }
114
121
  async function _createAgent(title, taskDescription) {
115
- // Validate title and task set
116
- if (!title || !taskDescription) {
117
- throw "Title and task description must be set";
118
- }
119
122
  // Get available username
120
123
  const usernames = await llmail.getAllUserNames();
121
124
  let agentName = "";
@@ -218,6 +221,6 @@ function _debugFlushContext(subagentId) {
218
221
  subagent.log = "";
219
222
  }
220
223
  function _getSubagentDir() {
221
- return new NaisysPath(`${config.naisysFolder}/home/${config.agent.username}/.subagents`);
224
+ return new NaisysPath(`${config.naisysFolder}/agent-data/${config.agent.username}/subagents`);
222
225
  }
223
226
  //# sourceMappingURL=subagent.js.map
File without changes
File without changes
File without changes
File without changes
@@ -4,57 +4,82 @@ export var LlmApiType;
4
4
  LlmApiType["OpenAI"] = "openai";
5
5
  LlmApiType["Google"] = "google";
6
6
  LlmApiType["Anthropic"] = "anthropic";
7
+ LlmApiType["OpenRouter"] = "openrouter";
7
8
  })(LlmApiType || (LlmApiType = {}));
8
9
  const llmModels = [
9
10
  {
10
- key: "gpt4turbo",
11
- name: "gpt-4-0125-preview",
11
+ key: "local",
12
+ name: config.localLlmName || "local",
13
+ baseUrl: config.localLlmUrl,
12
14
  apiType: LlmApiType.OpenAI,
15
+ maxTokens: 8000,
16
+ // Prices are per 1M tokens
17
+ inputCost: 0,
18
+ outputCost: 0,
19
+ },
20
+ // Open Router
21
+ {
22
+ key: "llama3-405b",
23
+ name: "meta-llama/llama-3.1-405b-instruct",
24
+ baseUrl: "https://openrouter.ai/api/v1",
25
+ apiType: LlmApiType.OpenRouter,
13
26
  maxTokens: 128000,
14
27
  // Prices are per 1M tokens
15
- inputCost: 10,
16
- outputCost: 30,
28
+ inputCost: 2.7,
29
+ outputCost: 2.7,
17
30
  },
31
+ // OpenAI Models
32
+ // https://openai.com/api/pricing/
18
33
  {
19
- key: "gpt3turbo",
20
- name: "gpt-3.5-turbo-0125",
34
+ key: "gpto3mini",
35
+ name: "o3-mini",
21
36
  apiType: LlmApiType.OpenAI,
22
- maxTokens: 16000,
37
+ maxTokens: 200000,
23
38
  // Prices are per 1M tokens
24
- inputCost: 0.5,
25
- outputCost: 1.5,
39
+ inputCost: 1.1,
40
+ outputCost: 4.4,
26
41
  },
27
42
  {
28
- key: "local",
29
- name: config.localLlmName || "local",
30
- baseUrl: config.localLlmUrl,
43
+ key: "gpt4mini",
44
+ name: "gpt-4o-mini",
31
45
  apiType: LlmApiType.OpenAI,
32
- maxTokens: 8000,
46
+ maxTokens: 128000,
33
47
  // Prices are per 1M tokens
34
- inputCost: 0,
35
- outputCost: 0,
48
+ inputCost: 0.15,
49
+ outputCost: 0.6,
36
50
  },
51
+ {
52
+ key: "gpt4o",
53
+ name: "gpt-4o",
54
+ apiType: LlmApiType.OpenAI,
55
+ maxTokens: 128000,
56
+ // Prices are per 1M tokens
57
+ inputCost: 2.5,
58
+ outputCost: 10,
59
+ },
60
+ // Google Models
37
61
  {
38
62
  key: "gemini1.5",
39
63
  name: "gemini-1.5-pro-latest",
40
64
  apiType: LlmApiType.Google,
41
- maxTokens: 1048576,
65
+ maxTokens: 1000000,
42
66
  // 2 queries per minute free then the prices below are per 1000 characters
43
- inputCost: 7,
44
- outputCost: 21,
67
+ inputCost: 1.25,
68
+ outputCost: 5,
45
69
  },
46
70
  {
47
- key: "gemini1.0",
48
- name: "gemini-pro",
71
+ key: "gemini2.0flash",
72
+ name: "gemini-2.0-flash",
49
73
  apiType: LlmApiType.Google,
50
- maxTokens: 30720,
74
+ maxTokens: 1000000,
51
75
  // 60 queries per minute free then the prices below are per 1000 characters
52
- inputCost: 0.50,
53
- outputCost: 1.50,
76
+ inputCost: 0.1,
77
+ outputCost: 0.4,
54
78
  },
79
+ // Anthropic Models
55
80
  {
56
81
  key: "claude3opus",
57
- name: "claude-3-opus-20240229",
82
+ name: "claude-3-opus-latest",
58
83
  apiType: LlmApiType.Anthropic,
59
84
  maxTokens: 200000,
60
85
  // Prices are per 1M tokens
@@ -62,8 +87,8 @@ const llmModels = [
62
87
  outputCost: 75,
63
88
  },
64
89
  {
65
- key: "claude3sonnet",
66
- name: "claude-3-sonnet-20240229",
90
+ key: "claude3.7sonnet",
91
+ name: "claude-3-7-sonnet-latest",
67
92
  apiType: LlmApiType.Anthropic,
68
93
  maxTokens: 200000,
69
94
  // Prices are per 1M tokens
@@ -71,20 +96,24 @@ const llmModels = [
71
96
  outputCost: 15,
72
97
  },
73
98
  {
74
- key: "claude3haiku",
75
- name: "claude-3-haiku-20240307",
99
+ key: "claude3.5haiku",
100
+ name: "claude-3-5-haiku-latest",
76
101
  apiType: LlmApiType.Anthropic,
77
102
  maxTokens: 200000,
78
103
  // Prices are per 1M tokens
79
- inputCost: 0.25,
80
- outputCost: 1.25,
104
+ inputCost: 0.8,
105
+ outputCost: 4,
81
106
  },
82
107
  ];
83
- export function getLLModel(key) {
84
- const model = llmModels.find((m) => m.key === key);
108
+ export function getLLModel(keyName) {
109
+ const [key, name] = keyName.split("/");
110
+ const model = structuredClone(llmModels.find((m) => m.key === key));
85
111
  if (!model) {
86
112
  throw `Error, model not found: ${key}`;
87
113
  }
114
+ if (name) {
115
+ model.name = name;
116
+ }
88
117
  return model;
89
118
  }
90
119
  //# sourceMappingURL=llModels.js.map
File without changes
@@ -18,14 +18,18 @@ export async function query(modelKey, systemMessage, context, source) {
18
18
  else if (model.apiType == LlmApiType.Anthropic) {
19
19
  return sendWithAnthropic(modelKey, systemMessage, context, source);
20
20
  }
21
- else if (model.apiType == LlmApiType.OpenAI) {
22
- return sendWithOpenAiCompatible(modelKey, systemMessage, context, source);
21
+ else if (model.apiType == LlmApiType.OpenAI ||
22
+ model.apiType == LlmApiType.OpenRouter) {
23
+ const apiKey = model.apiType == LlmApiType.OpenAI
24
+ ? config.openaiApiKey
25
+ : config.openRouterApiKey;
26
+ return sendWithOpenAiCompatible(modelKey, systemMessage, context, source, apiKey);
23
27
  }
24
28
  else {
25
29
  throw `Error, unknown LLM API type ${model.apiType}`;
26
30
  }
27
31
  }
28
- async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source) {
32
+ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source, apiKey) {
29
33
  const model = getLLModel(modelKey);
30
34
  if (model.key === "local") {
31
35
  if (!model.baseUrl) {
@@ -37,7 +41,7 @@ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source
37
41
  }
38
42
  const openAI = new OpenAI({
39
43
  baseURL: model.baseUrl,
40
- apiKey: config.openaiApiKey,
44
+ apiKey,
41
45
  });
42
46
  // Assert the last message on the context is a user message
43
47
  const lastMessage = context[context.length - 1];
@@ -48,7 +52,7 @@ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source
48
52
  model: model.name,
49
53
  messages: [
50
54
  {
51
- role: LlmRole.System,
55
+ role: LlmRole.System, // LlmRole.User, //
52
56
  content: systemMessage,
53
57
  },
54
58
  ...context.map((m) => ({
@@ -57,8 +61,11 @@ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source
57
61
  })),
58
62
  ],
59
63
  });
64
+ if (!model.inputCost && !model.outputCost) {
65
+ // Don't cost models with no costs
66
+ }
60
67
  // Total up costs, prices are per 1M tokens
61
- if (chatResponse.usage) {
68
+ else if (chatResponse.usage) {
62
69
  const cost = chatResponse.usage.prompt_tokens * model.inputCost +
63
70
  chatResponse.usage.completion_tokens * model.outputCost;
64
71
  await costTracker.recordCost(cost / 1000000, source, model.name);
@@ -118,7 +125,7 @@ async function sendWithGoogle(modelKey, systemMessage, context, source) {
118
125
  throw `Google API Request Blocked, ${result.response.promptFeedback.blockReason}`;
119
126
  }
120
127
  const responseText = result.response.text();
121
- // todo: take into account google allows 60 queries per minute for free for 1.0, 2 queries/min for 1.5
128
+ // TODO: take into account google allows 60 queries per minute for free for 1.0, 2 queries/min for 1.5
122
129
  // AFAIK Google API doesn't provide usage data, so we have to estimate it ourselves
123
130
  const inputTokenCount = getTokenCount(systemMessage) +
124
131
  context
@@ -18,6 +18,10 @@ let llmailCmd = "";
18
18
  if (config.mailEnabled) {
19
19
  llmailCmd = `\n llmail: A local mail system for communicating with your team`;
20
20
  }
21
+ let llmynxCmd = "";
22
+ if (config.webEnabled) {
23
+ llmynxCmd = `\n llmynx: A context optimized web browser. Enter 'llmynx help' to learn how to use it`;
24
+ }
21
25
  let workspaces = "";
22
26
  if (config.workspacesEnabled) {
23
27
  workspaces = `\nWorkspaces:`;
@@ -71,8 +75,7 @@ LINUX Commands:
71
75
  vi and nano are not supported
72
76
  Read files with cat. Write files with \`cat > filename << 'EOF'\`
73
77
  Do not input notes after the prompt. Only valid commands.
74
- NAISYS Commands: (cannot be used with other commands on the same prompt)${llmailCmd}${subagentNote}
75
- llmynx: A context optimized web browser. Enter 'llmynx help' to learn how to use it${genImgCmd}
78
+ NAISYS Commands: (cannot be used with other commands on the same prompt)${llmailCmd}${subagentNote}${llmynxCmd}${genImgCmd}
76
79
  comment "<thought>": Any non-command output like thinking out loud, prefix with the 'comment' command
77
80
  pause <seconds>: Pause for <seconds>${trimSession}${endsession}
78
81
  Tokens:
package/dist/naisys.js CHANGED
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -91,7 +91,7 @@ export function roleToSource(role) {
91
91
  }
92
92
  /** Write entire context to a file in the users home directory */
93
93
  export function recordContext(contextLog) {
94
- const filePath = new NaisysPath(`${config.naisysFolder}/home/${config.agent.username}/.current-context.txt`);
94
+ const filePath = new NaisysPath(`${config.naisysFolder}/agent-data/${config.agent.username}/current-context.txt`);
95
95
  pathService.ensureFileDirExists(filePath);
96
96
  fs.writeFileSync(filePath.toHostPath(), contextLog);
97
97
  }
File without changes
File without changes
@@ -1,7 +1,5 @@
1
1
  import { get_encoding } from "tiktoken";
2
- export function valueFromString(
3
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
4
- obj, path, defaultValue) {
2
+ export function valueFromString(obj, path, defaultValue) {
5
3
  if (!path) {
6
4
  return obj;
7
5
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "naisys",
3
3
  "description": "Node.js Autonomous Intelligence System",
4
- "version": "1.5.0",
4
+ "version": "1.6.1",
5
5
  "type": "module",
6
6
  "main": "dist/naisys.js",
7
7
  "preferGlobal": true,
@@ -9,16 +9,17 @@
9
9
  "naisys": "bin/naisys"
10
10
  },
11
11
  "scripts": {
12
- "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/assistant.yaml",
12
+ "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/solo-websites/webdev-fansite.yaml",
13
13
  "agent:assistant": "node dist/naisys.js ./agents/assistant.yaml",
14
14
  "agent:nightwatch": "node dist/naisys.js ./agents/nightwatch.yaml",
15
15
  "clean": "rm -rf dist",
16
16
  "compile": "tsc",
17
- "eslint": "npx eslint --rulesdir eslint-rules src",
17
+ "eslint": "npx eslint src",
18
18
  "test": "tsc && node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=dist/__tests__",
19
19
  "prettier": "npx prettier --write .",
20
20
  "dependency-graph": "madge --image dependency-graph.png dist",
21
21
  "detect-cycles": "madge --circular dist",
22
+ "gen-tuning": "tsc && node dist/__fine-tuning__/gen-jsonl.js",
22
23
  "updates:check": "npm-check-updates",
23
24
  "updates:apply": "npm-check-updates -u && npm install",
24
25
  "npm:publish:dryrun": "npm run clean && npm ci && npm run compile && npm publish --dry-run",
@@ -42,29 +43,33 @@
42
43
  "devDependencies": {
43
44
  "@types/escape-html": "1.0.4",
44
45
  "@types/js-yaml": "4.0.9",
45
- "@types/node": "20.12.4",
46
+ "@types/node": "22.13.5",
46
47
  "@types/text-table": "0.2.5",
47
- "@typescript-eslint/eslint-plugin": "7.5.0",
48
- "@typescript-eslint/parser": "7.5.0",
49
- "eslint": "8.57.0",
48
+ "@typescript-eslint/eslint-plugin": "8.25.0",
49
+ "@typescript-eslint/parser": "8.25.0",
50
+ "eslint": "9.21.0",
50
51
  "jest": "29.7.0",
51
- "prettier": "3.2.5",
52
+ "prettier": "3.5.2",
52
53
  "ts-node": "10.9.2",
53
- "typescript": "5.4.4"
54
+ "typescript": "5.7.3"
54
55
  },
55
56
  "dependencies": {
56
- "@anthropic-ai/sdk": "0.20.1",
57
- "@google/generative-ai": "0.5.0",
58
- "chalk": "5.3.0",
59
- "commander": "12.0.0",
60
- "dotenv": "16.4.5",
57
+ "@anthropic-ai/sdk": "0.38.0",
58
+ "@google/generative-ai": "0.22.0",
59
+ "@xterm/headless": "5.5.0",
60
+ "chalk": "5.4.1",
61
+ "commander": "13.1.0",
62
+ "dotenv": "16.4.7",
61
63
  "escape-html": "1.0.3",
62
64
  "js-yaml": "4.1.0",
63
- "openai": "4.33.0",
64
- "sharp": "0.33.3",
65
+ "openai": "4.86.1",
66
+ "sharp": "0.33.5",
65
67
  "sqlite": "5.1.1",
66
68
  "sqlite3": "5.1.7",
69
+ "strip-ansi": "7.1.0",
67
70
  "text-table": "0.2.0",
68
- "tiktoken": "1.0.13"
71
+ "tiktoken": "1.0.20",
72
+ "tree-kill": "1.2.2",
73
+ "web-streams-polyfill": "4.1.0"
69
74
  }
70
75
  }