naisys 1.1.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -11,6 +11,10 @@ vim or nano so point the LLM to use cat to read/write files in a single operatio
11
11
 
12
12
  [NPM](https://www.npmjs.com/package/naisys) | [Website](https://naisys.org) | [Discord](https://discord.gg/JBUPWSbaEt) | [Demo Video](https://www.youtube.com/watch?v=Ttya3ixjumo)
13
13
 
14
+ ```bash
15
+ npm install -g naisys
16
+ ```
17
+
14
18
  #### Node.js is used to create a simple proxy shell environment for the LLM that
15
19
 
16
20
  - Helps the LLM keep track of its current context size
@@ -61,10 +65,15 @@ title: Software Engineer
61
65
 
62
66
  # The model to use for console interactions
63
67
  # (gpt4turbo, gpt4turbo, gemini-pro, claude3sonnet, claude3opus, local)
64
- shellModel: claude3sonnet
68
+ shellModel: gpt4turbo
65
69
 
66
- # The model to use for llmynx, pre-processing websites to fit into a smaller context
67
- webModel: gpt3turbo
70
+ # Only used between sessions to provide guidance for the next session (use a more powerful model for this)
71
+ # defaults to the shellModel if omitted
72
+ dreamModel: claude3opus
73
+
74
+ # The model to use for llmynx, pre-processing websites to fit into a smaller context (use a cheaper model)
75
+ # defaults to the shellModel if omitted
76
+ webModel: gemini-pro
68
77
 
69
78
  # A system like prompt explaining the agent's role and responsibilities
70
79
  # You can use config variables in this string
@@ -85,20 +94,26 @@ tokenMax: 5000
85
94
  # No value or zero means wait indefinitely (debug driven)
86
95
  debugPauseSeconds: 5
87
96
 
88
- # If true, regardless of the debugPauseSeconds, the agent will not wake up on messages
89
- # With lots of agents this could be costly if they all end up mailing/replying each other in quick succession
97
+ # If true, regardless of the debugPauseSeconds, the agent will wake up on messages
98
+ # Useful for agents with long debugPauseSeconds, so that they can wake up and reply quickly
90
99
  wakeOnMessage: false
91
100
 
92
101
  # The maximum amount to spend on LLM interactions
93
102
  # Once reached the agent will stop and this value will need to be increased to continue
94
103
  spendLimitDollars: 2.00
95
104
 
96
- # None: Commands from the LLM run automatically, this is the default setting as well if the value is not set
97
- # Manual: Every command the LLM wants you run you have to approve [y/n]
98
- # Auto: All commands are run through the separate LLM instace, commands that look like they'll modify the system are blocked
99
- commandProtection: 'none'
100
-
101
- # Additional custom variables can be defined here and/or in the .env file to be loaded into the agent prompt
105
+ # Command Protection: Useful for agents you want to restrict from modifying the system
106
+ # None: Commands from the LLM run automatically, this is the default setting as well if the value is not set
107
+ # Manual: Every command the LLM wants to run has to be approved [y/n]. Not very autonomous.
108
+ # Auto: All commands are run through the separate LLM instace that will check to see if the command is safe
109
+ commandProtection: "none"
110
+
111
+ # Run these commands on session start, in the example below the agent will see how to use mail and a list of other agents
112
+ initialCommands:
113
+ - llmail users
114
+ - llmail help
115
+ - cat ${env.NAISYS_FOLDER}/home/${agent.username}/PLAN.md
116
+ # Additional custom variables can be defined here and/or in the agent config to be loaded into the agent prompt
102
117
  ```
103
118
 
104
119
  - Run `naisys <path to yaml or directory>`
package/bin/comment ADDED
@@ -0,0 +1,4 @@
1
+ #!/bin/bash
2
+
3
+ # ./src/command/commandHandler.ts has the same message
4
+ echo "Comment noted. Try running commands now to achieve your goal. ."
package/bin/endsession ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'endsession' cannot be used with other commands on the same prompt."
package/bin/llmail ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'llmail' cannot be used with other commands on the same prompt."
package/bin/llmynx ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'llmynx' cannot be used with other commands on the same prompt."
@@ -12,7 +12,7 @@ fi
12
12
 
13
13
  # Resolves the location of naisys from the bin directory
14
14
  SCRIPT=$(readlink -f "$0" || echo "$0")
15
- SCRIPT_DIR=$(dirname "$SCRIPT")
15
+ SCRIPT_DIR=$(dirname "$SCRIPT")/..
16
16
 
17
17
  # if path is a yaml file then start a single agent
18
18
  if [ -f "$1" ]; then
package/bin/pause ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'pause' cannot be used with other commands on the same prompt."
@@ -6,9 +6,8 @@ import * as utilities from "../utils/utilities.js";
6
6
  import { naisysToHostPath } from "../utils/utilities.js";
7
7
  const _dbFilePath = naisysToHostPath(`${config.naisysFolder}/lib/llmail.db`);
8
8
  let _myUserId = -1;
9
- // Implement maxes so that LLMs actively manage threads, archive, and create new ones
10
- const _threadTokenMax = config.agent.tokenMax / 2; // So 4000, would be 2000 thread max
11
- const _messageTokenMax = _threadTokenMax / 5; // Given the above a 400 token max, and 5 big messages per thread
9
+ /** Threading is not currently used so this doesn't matter */
10
+ const _threadTokenMax = config.mailMessageTokenMax * 5;
12
11
  /** The 'non-simple' version of this is a thread first mail system. Where agents can create threads, add users, and reply to threads, etc..
13
12
  * The problem with this was the agents were too chatty with so many mail commands, wasting context replying, reading threads, etc..
14
13
  * Simple mode only has two commands. It still requires db persistance to support offline agents. */
@@ -85,7 +84,9 @@ export async function handleCommand(args) {
85
84
  if (simpleMode) {
86
85
  return `llmail <command>
87
86
  users: Get list of users on the system
88
- send "<users>" "subject" "message": Send a message. ${_messageTokenMax} token max.`;
87
+ send "<users>" "subject" "message": Send a message. ${config.mailMessageTokenMax} token max.
88
+
89
+ * Attachments are not supported, use file paths to refence files in emails as all users are on the same machine`;
89
90
  }
90
91
  else {
91
92
  return `llmail <command>
@@ -138,7 +139,8 @@ export async function handleCommand(args) {
138
139
  await init();
139
140
  return "llmail database reset";
140
141
  default:
141
- return "Unknown llmail command: " + argParams[0];
142
+ return ("Error, unknown command. See valid commands below:\n" +
143
+ (await handleCommand("help")));
142
144
  }
143
145
  }
144
146
  export async function getUnreadThreads() {
@@ -339,8 +341,8 @@ async function getUser(db, username) {
339
341
  }
340
342
  function validateMsgTokenCount(message) {
341
343
  const msgTokenCount = utilities.getTokenCount(message);
342
- if (msgTokenCount > _messageTokenMax) {
343
- throw `Error: Message is ${msgTokenCount} tokens, exceeding the limit of ${_messageTokenMax} tokens`;
344
+ if (msgTokenCount > config.mailMessageTokenMax) {
345
+ throw `Error: Message is ${msgTokenCount} tokens, exceeding the limit of ${config.mailMessageTokenMax} tokens`;
344
346
  }
345
347
  return msgTokenCount;
346
348
  }
@@ -8,6 +8,7 @@ import * as output from "../utils/output.js";
8
8
  import * as utilities from "../utils/utilities.js";
9
9
  // A bad play on words, but this is like lynx but for LLMs..
10
10
  let debugMode = false;
11
+ const _contentCache = new Map();
11
12
  /** Links numbers are unique in the context so that `llmynx follow <linknum>` can be called on all previous output */
12
13
  const _globalLinkMap = new Map();
13
14
  const _globalUrlMap = new Map();
@@ -15,63 +16,106 @@ let _nextGlobalLinkNum = 1;
15
16
  export async function handleCommand(cmdArgs) {
16
17
  outputInDebugMode("LLMYNX DEBUG MODE IS ON");
17
18
  const argParams = cmdArgs.split(" ");
18
- const defualtTokenMax = config.agent.tokenMax / 8;
19
19
  if (!argParams[0]) {
20
20
  argParams[0] = "help";
21
21
  }
22
22
  switch (argParams[0]) {
23
23
  case "help":
24
- return `llmynx <command> (results will be reduced to around ${defualtTokenMax})
24
+ return `llmynx <command> (results will be reduced to around ${config.webTokenMax} tokens)
25
25
  search <query>: Search google for the given query
26
26
  open <url>: Opens the given url. Links are represented as numbers in brackets which prefix the word they are linking like [123]
27
27
  follow <link number>: Opens the given link number. Link numbers work across all previous outputs
28
- links <url> <page>: Lists only the links for the given url. Use the page number to get more links`;
28
+ links <url> <page>: Lists only the links for the given url. Use the page number to get more links
29
+
30
+ *llmynx does not support input. Use llmynx or curl to call APIs directly*`;
29
31
  case "search": {
30
32
  const query = argParams.slice(1).join(" ");
31
- return await loadUrl("https://www.google.com/search?q=" + encodeURIComponent(query), config.agent.tokenMax / 2, // Prevent form being reduced as google results are usually short anyways and we want to maintainq the links
32
- true, true);
33
+ return await loadUrlContent("https://www.google.com/search?q=" + encodeURIComponent(query), true, true);
33
34
  }
34
35
  case "open": {
35
36
  const url = argParams[1];
36
- const isNumber = !isNaN(parseInt(argParams[2]));
37
- const tokenMax = isNumber ? parseInt(argParams[2]) : defualtTokenMax;
38
- return await loadUrl(url, tokenMax, false, true);
37
+ return await loadUrlContent(url, false, true);
39
38
  }
40
39
  case "follow": {
41
40
  const linkNum = parseInt(argParams[1]);
42
- const isNumber = !isNaN(parseInt(argParams[2]));
43
- const tokenMax = isNumber ? parseInt(argParams[2]) : defualtTokenMax;
44
41
  const linkUrl = _globalLinkMap.get(linkNum);
45
42
  if (!linkUrl) {
46
43
  return "Link number not found";
47
44
  }
48
- return await loadUrl(linkUrl, tokenMax, true, false);
45
+ return await loadUrlContent(linkUrl, true, false);
49
46
  }
50
47
  case "links": {
51
48
  const url = argParams[1];
52
49
  const isNumber = !isNaN(parseInt(argParams[2]));
53
50
  const pageNumber = isNumber ? parseInt(argParams[2]) : 1;
54
- return await loadUrl(url, 600, false, false, pageNumber);
51
+ return await loadUrlLinks(url, pageNumber);
55
52
  }
56
53
  // Secret command to toggle debug mode
57
54
  case "debug":
58
55
  debugMode = !debugMode;
59
56
  return "Debug mode toggled " + (debugMode ? "on" : "off");
60
57
  default:
61
- return "Unknown llmynx command: " + argParams[0];
58
+ return ("Error, unknown command. See valid commands below:\n" +
59
+ (await handleCommand("help")));
62
60
  }
63
61
  }
64
- async function loadUrl(url, tokenMax, showUrl, showFollowHint, linkPageAsContent) {
62
+ /** The content here is not reduced by an LLM, just a paged list of global links is returned */
63
+ async function loadUrlLinks(url, linkPageAsContent) {
65
64
  let content = await runLynx(url);
66
65
  let links = "";
67
66
  // Reverse find 'References: ' and cut everything after it from the content
68
67
  const refPos = content.lastIndexOf("References\n");
68
+ if (refPos > 0) {
69
+ links = content.slice(refPos);
70
+ content = "";
71
+ }
72
+ else {
73
+ return "No Links Found";
74
+ }
75
+ // Iterate links and de-duplicate
76
+ const linkLines = links.split("\n");
77
+ const linkSet = new Set();
78
+ for (const linkLine of linkLines) {
79
+ const dotPos = linkLine.indexOf(".");
80
+ if (dotPos < 0) {
81
+ continue;
82
+ }
83
+ const url = linkLine.substring(dotPos + 1).trim();
84
+ if (!linkSet.has(url)) {
85
+ linkSet.add(url);
86
+ content += url + "\n";
87
+ }
88
+ }
89
+ // Get the token size of the output
90
+ const linksTokenSize = utilities.getTokenCount(content);
91
+ outputInDebugMode(`Links Token size: ${linksTokenSize}`);
92
+ // Reduce content using LLM if it's over the token max
93
+ if (linksTokenSize > config.webTokenMax) {
94
+ content = await reduceContent(url, content, linksTokenSize, linkPageAsContent);
95
+ }
96
+ else {
97
+ output.comment(`Link Content is already under ${config.webTokenMax} tokens.`);
98
+ content = globalizeLinkList(content);
99
+ }
100
+ return content;
101
+ }
102
+ async function loadUrlContent(url, showUrl, showFollowHint) {
103
+ const originalContent = await runLynx(url);
104
+ let content = originalContent;
105
+ let links = "";
106
+ // Reverse find 'References: ' and cut everything after it from the content
107
+ const refPos = content.lastIndexOf("References\n");
69
108
  if (refPos > 0) {
70
109
  links = content.slice(refPos);
71
110
  content = content.slice(0, refPos);
72
111
  }
73
- if (linkPageAsContent) {
74
- content = links;
112
+ let usingCachedContent = false;
113
+ if (_contentCache.has(url)) {
114
+ const cachedContent = _contentCache.get(url);
115
+ if (cachedContent.originalContent === originalContent) {
116
+ content = cachedContent.reducedContent;
117
+ usingCachedContent = true;
118
+ }
75
119
  }
76
120
  // Get the token size of the output
77
121
  const contentTokenSize = utilities.getTokenCount(content);
@@ -79,41 +123,18 @@ async function loadUrl(url, tokenMax, showUrl, showFollowHint, linkPageAsContent
79
123
  outputInDebugMode(`Content Token size: ${contentTokenSize}\n` +
80
124
  `Links Token size: ${linksTokenSize}`);
81
125
  // Reduce content using LLM if it's over the token max
82
- if (contentTokenSize > tokenMax) {
83
- const model = getLLModel(config.agent.webModel);
84
- // For example if context is 16k, and max tokens is 2k, 3k with 1.5x overrun
85
- // That would be 3k for the current compressed content, 10k for the chunk, and 3k for the output
86
- let tokenChunkSize = model.maxTokens - tokenMax * 2 * 1.5;
87
- if (linkPageAsContent) {
88
- tokenChunkSize = tokenMax;
89
- }
90
- outputInDebugMode(`Token max chunk size: ${tokenChunkSize}`);
91
- const pieceCount = Math.ceil(contentTokenSize / tokenChunkSize);
92
- const pieceSize = content.length / pieceCount;
93
- let reducedOutput = "";
94
- for (let i = 0; i < pieceCount; i++) {
95
- const startPos = i * pieceSize;
96
- const pieceStr = content.substring(startPos, startPos + pieceSize);
97
- if (linkPageAsContent) {
98
- if (linkPageAsContent === i + 1) {
99
- return formatLinkPiece(pieceStr);
100
- }
101
- continue;
102
- }
103
- output.comment(`Processing Piece ${i + 1} of ${pieceCount}...`);
104
- outputInDebugMode(` Reduced output tokens: ${utilities.getTokenCount(reducedOutput)}\n` +
105
- ` Current Piece tokens: ${utilities.getTokenCount(pieceStr)}`);
106
- reducedOutput = await llmReduce(url, reducedOutput, i + 1, pieceCount, pieceStr, tokenMax);
107
- }
108
- if (linkPageAsContent) {
109
- return "";
110
- }
111
- content = reducedOutput;
112
- const finalTokenSize = utilities.getTokenCount(reducedOutput);
113
- output.comment(`Content reduced from ${contentTokenSize} to ${finalTokenSize} tokens`);
126
+ if (usingCachedContent) {
127
+ output.comment("No changes detected, using already cached reduced content");
128
+ }
129
+ else if (contentTokenSize > config.webTokenMax) {
130
+ content = await reduceContent(url, content, contentTokenSize);
131
+ _contentCache.set(url, {
132
+ originalContent,
133
+ reducedContent: content,
134
+ });
114
135
  }
115
136
  else {
116
- output.comment(`Content is already under ${tokenMax} tokens.`);
137
+ output.comment(`Content is already under ${config.webTokenMax} tokens.`);
117
138
  }
118
139
  // Prefix content with url if following as otherwise the url is never shown
119
140
  if (showUrl) {
@@ -132,33 +153,70 @@ async function runLynx(url) {
132
153
  const modeParams = "";
133
154
  const ifWindows = os.platform() === "win32" ? "wsl " : "";
134
155
  exec(`${ifWindows}lynx -dump ${modeParams} "${url}"`, (error, stdout, stderr) => {
135
- if (error) {
136
- resolve(`error: ${error.message}`);
137
- return;
156
+ let output = "";
157
+ if (stdout) {
158
+ output += stdout;
159
+ }
160
+ // I've only seen either/or, but just in case
161
+ if (stdout && stderr) {
162
+ output += "\nError:\n";
138
163
  }
139
164
  if (stderr) {
140
- resolve(`stderr: ${stderr}`);
141
- return;
165
+ output += stderr;
142
166
  }
143
- resolve(stdout);
167
+ resolve(output);
144
168
  });
145
169
  });
146
170
  }
147
- async function llmReduce(url, reducedOutput, pieceNumber, pieceTotal, pieceStr, tokenMax) {
148
- const systemMessage = `You will be iteratively fed the web page ${url} broken into ${pieceTotal} sequential equally sized pieces.
149
- Each piece should be reduced into the final content in order to maintain the meaning of the page while reducing verbosity and duplication.
150
- The final output should be around ${tokenMax} tokens.
151
- Don't remove links which are represented as numbers in brackets which prefix the word they are linking like [123].
171
+ async function reduceContent(url, content, contentTokenSize, linkPageAsContent) {
172
+ const model = getLLModel(config.agent.webModel);
173
+ // For example if context is 16k, and max tokens is 2k, 3k with 1.5x overrun
174
+ // That would be 3k for the current compressed content, 10k for the chunk, and 3k for the output
175
+ let tokenChunkSize = model.maxTokens - config.webTokenMax * 2 * 1.5;
176
+ if (linkPageAsContent) {
177
+ tokenChunkSize = config.webTokenMax;
178
+ }
179
+ outputInDebugMode(`Token max chunk size: ${tokenChunkSize}`);
180
+ const pieceCount = Math.ceil(contentTokenSize / tokenChunkSize);
181
+ const pieceSize = content.length / pieceCount;
182
+ let reducedOutput = "";
183
+ for (let i = 0; i < pieceCount; i++) {
184
+ const startPos = i * pieceSize;
185
+ const pieceStr = content.substring(startPos, startPos + pieceSize);
186
+ if (linkPageAsContent) {
187
+ if (linkPageAsContent === i + 1) {
188
+ return globalizeLinkList(pieceStr);
189
+ }
190
+ continue;
191
+ }
192
+ output.comment(`Processing Piece ${i + 1} of ${pieceCount} with ${model.key}...`);
193
+ outputInDebugMode(` Reduced output tokens: ${utilities.getTokenCount(reducedOutput)}\n` +
194
+ ` Current Piece tokens: ${utilities.getTokenCount(pieceStr)}`);
195
+ reducedOutput = await llmReduce(url, reducedOutput, i + 1, pieceCount, pieceStr);
196
+ }
197
+ if (linkPageAsContent) {
198
+ return "";
199
+ }
200
+ const finalTokenSize = utilities.getTokenCount(reducedOutput);
201
+ output.comment(`Content reduced from ${contentTokenSize} to ${finalTokenSize} tokens`);
202
+ return reducedOutput;
203
+ }
204
+ async function llmReduce(url, reducedOutput, pieceNumber, pieceTotal, pieceStr) {
205
+ const systemMessage = `You will be iteratively fed the web page "${url}" broken into ${pieceTotal} pieces.
206
+ Each 'Web Page Piece' should be merged with the in order 'Current Reduced Content' to maintain the meaning of the page while reducing verbosity and duplication.
207
+ The final output should be around ${config.webTokenMax} tokens.
208
+ Links are represented as numbers in brackets, for example [4]. Try not to remove them in the 'Final Merged Content'
152
209
  Try to prioritize content of substance over advertising content.`;
153
- const content = `Web page piece ${pieceNumber} of ${pieceTotal}:
210
+ const content = `Web Page Piece ${pieceNumber} of ${pieceTotal}:
154
211
  ${pieceStr}
155
212
 
156
- Current reduced content:
213
+ Please merge the 'Web Page Piece' above into the 'Current Reduced Content' below while keeping the result to around ${config.webTokenMax} tokens.
214
+
215
+ Current Reduced Content:
157
216
  ${reducedOutput}
158
217
 
159
- Please merge the new piece into the existing reduced content above while keeping the result to around ${tokenMax} tokens.
160
218
 
161
- Merged reduced content:
219
+ Final Merged Content:
162
220
  `;
163
221
  const context = {
164
222
  role: LlmRole.User,
@@ -207,23 +265,19 @@ function registerUrl(url) {
207
265
  }
208
266
  return globalLinkNum;
209
267
  }
210
- function formatLinkPiece(pieceStr) {
268
+ function globalizeLinkList(pieceStr) {
211
269
  const alreadySeen = new Set();
212
270
  const linkLines = pieceStr.split("\n");
213
- let links = "";
271
+ let globalLinks = "";
214
272
  for (const linkLine of linkLines) {
215
- const dotPos = linkLine.indexOf(".");
216
- if (dotPos < 0) {
217
- continue;
218
- }
219
- const url = linkLine.substring(dotPos + 1).trim();
220
- if (alreadySeen.has(url)) {
273
+ const url = linkLine.trim();
274
+ if (!url || alreadySeen.has(url)) {
221
275
  continue;
222
276
  }
223
277
  alreadySeen.add(url);
224
278
  const globalLinkNum = registerUrl(url);
225
- links += `[${globalLinkNum}]${url}\n`;
279
+ globalLinks += `[${globalLinkNum}]${url}\n`;
226
280
  }
227
- return links;
281
+ return globalLinks;
228
282
  }
229
283
  //# sourceMappingURL=llmynx.js.map
@@ -5,9 +5,9 @@ import * as config from "../config.js";
5
5
  import * as contextManager from "../llm/contextManager.js";
6
6
  import { ContentSource } from "../llm/contextManager.js";
7
7
  import * as costTracker from "../llm/costTracker.js";
8
+ import * as dreamMaker from "../llm/dreamMaker.js";
8
9
  import * as inputMode from "../utils/inputMode.js";
9
10
  import { InputMode } from "../utils/inputMode.js";
10
- import * as logService from "../utils/logService.js";
11
11
  import * as output from "../utils/output.js";
12
12
  import { OutputColor } from "../utils/output.js";
13
13
  import * as utilities from "../utils/utilities.js";
@@ -20,7 +20,6 @@ export var NextCommandAction;
20
20
  NextCommandAction[NextCommandAction["EndSession"] = 1] = "EndSession";
21
21
  NextCommandAction[NextCommandAction["ExitApplication"] = 2] = "ExitApplication";
22
22
  })(NextCommandAction || (NextCommandAction = {}));
23
- export let previousSessionNotes = await logService.getPreviousEndSessionNote();
24
23
  export async function processCommand(prompt, consoleInput) {
25
24
  // We process the lines one at a time so we can support multiple commands with line breaks
26
25
  let firstLine = true;
@@ -61,16 +60,18 @@ export async function processCommand(prompt, consoleInput) {
61
60
  switch (cmdParams[0]) {
62
61
  case "comment": {
63
62
  // Important - Hint the LLM to turn their thoughts into accounts
63
+ // ./bin/comment shell script has the same message
64
64
  await contextManager.append("Comment noted. Try running commands now to achieve your goal.");
65
65
  break;
66
66
  }
67
67
  case "endsession": {
68
68
  // Don't need to check end line as this is the last command in the context, just read to the end
69
- previousSessionNotes = utilities.trimChars(cmdArgs, '"');
70
- if (!previousSessionNotes) {
69
+ const endSessionNotes = utilities.trimChars(cmdArgs, '"');
70
+ if (!endSessionNotes) {
71
71
  await contextManager.append(`End session notes are required. Use endsession "<notes>"`);
72
72
  break;
73
73
  }
74
+ await dreamMaker.goodnight();
74
75
  await output.commentAndLog("------------------------------------------------------");
75
76
  nextCommandAction = NextCommandAction.EndSession;
76
77
  processNextLLMpromptBlock = false;
@@ -5,6 +5,7 @@ import * as llmynx from "../apps/llmynx.js";
5
5
  import * as config from "../config.js";
6
6
  import * as contextManager from "../llm/contextManager.js";
7
7
  import { ContentSource } from "../llm/contextManager.js";
8
+ import * as dreamMaker from "../llm/dreamMaker.js";
8
9
  import { LlmRole } from "../llm/llmDtos.js";
9
10
  import * as llmService from "../llm/llmService.js";
10
11
  import * as inputMode from "../utils/inputMode.js";
@@ -33,11 +34,13 @@ export async function run() {
33
34
  while (nextCommandAction != NextCommandAction.ExitApplication) {
34
35
  inputMode.toggle(InputMode.LLM);
35
36
  await output.commentAndLog("Starting Context:");
36
- await contextManager.append("Previous Session Note:");
37
- await contextManager.append(commandHandler.previousSessionNotes || "None");
38
- if (await llmail.hasMultipleUsers()) {
39
- await commandHandler.processCommand(await promptBuilder.getPrompt(), "llmail help");
40
- await commandHandler.processCommand(await promptBuilder.getPrompt(), "llmail users");
37
+ const latestDream = await dreamMaker.goodmorning();
38
+ if (latestDream) {
39
+ await contextManager.append("Previous Session Notes:");
40
+ await contextManager.append(latestDream);
41
+ }
42
+ for (const initialCommand of config.agent.initialCommands) {
43
+ await commandHandler.processCommand(await promptBuilder.getPrompt(0, false), config.resolveConfigVars(initialCommand));
41
44
  }
42
45
  inputMode.toggle(InputMode.Debug);
43
46
  let pauseSeconds = config.agent.debugPauseSeconds;
@@ -38,7 +38,10 @@ export async function handleCommand(input) {
38
38
  if (tokenCount > config.shellOutputTokenMax) {
39
39
  outputLimitExceeded = true;
40
40
  const trimLength = (text.length * config.shellOutputTokenMax) / tokenCount;
41
- text = text.slice(0, trimLength);
41
+ text =
42
+ text.slice(0, trimLength / 2) +
43
+ "\n\n...\n\n" +
44
+ text.slice(-trimLength / 2);
42
45
  }
43
46
  await contextManager.append(text);
44
47
  if (outputLimitExceeded) {
@@ -170,14 +170,16 @@ function resetProcess() {
170
170
  * May also help with common escaping errors */
171
171
  function runCommandFromScript(command) {
172
172
  const scriptPath = `${config.naisysFolder}/home/${config.agent.username}/.command.tmp.sh`;
173
- // set -e causes the script to exit on any error
173
+ // set -e causes the script to exit on the first error
174
174
  const scriptContent = `#!/bin/bash
175
175
  set -e
176
176
  cd ${_currentPath}
177
177
  ${command.trim()}`;
178
178
  // create/writewrite file
179
179
  fs.writeFileSync(naisysToHostPath(scriptPath), scriptContent);
180
- // Source will run the script in the current shell, so any change directories in the script should persist in the current shell
181
- return `source ${scriptPath}`;
180
+ // `Path` is set to the ./bin folder because custom NAISYS commands that follow shell commands will be handled by the shell, which will fail
181
+ // so we need to remind the LLM that 'naisys commands cannot be used with other commands on the same prompt'
182
+ // `source` will run the script in the current shell, so any change directories in the script will persist in the current shell
183
+ return `PATH=${config.binPath}:$PATH source ${scriptPath}`;
182
184
  }
183
185
  //# sourceMappingURL=shellWrapper.js.map
package/dist/config.js CHANGED
@@ -8,8 +8,10 @@ program.argument("<agent-path>", "Path to agent configuration file").parse();
8
8
  dotenv.config();
9
9
  /** The system name that shows after the @ in the command prompt */
10
10
  export const hostname = "naisys";
11
- export const shellOutputTokenMax = 2500;
12
- export const shellCommmandTimeoutSeconds = 10;
11
+ export const shellOutputTokenMax = 2500; // Limits the size of files that can be read/wrote
12
+ export const shellCommmandTimeoutSeconds = 15; // The number of seconds NAISYS will wait for a shell command to complete
13
+ export const webTokenMax = 2500;
14
+ export const mailMessageTokenMax = 400;
13
15
  /* .env is used for global configs across naisys, while agent configs are for the specific agent */
14
16
  export const naisysFolder = getEnv("NAISYS_FOLDER", true);
15
17
  export const websiteFolder = getEnv("WEBSITE_FOLDER");
@@ -21,31 +23,44 @@ export const anthropicApiKey = getEnv("ANTHROPIC_API_KEY");
21
23
  export const agent = loadAgentConfig();
22
24
  function loadAgentConfig() {
23
25
  const agentPath = program.args[0];
24
- const checkAgentConfig = yaml.load(fs.readFileSync(agentPath, "utf8"));
26
+ const config = yaml.load(fs.readFileSync(agentPath, "utf8"));
25
27
  // throw if any property is undefined
26
28
  for (const key of [
27
29
  "username",
28
30
  "title",
29
31
  "shellModel",
30
- "webModel",
31
32
  "agentPrompt",
32
33
  "spendLimitDollars",
33
34
  "tokenMax",
34
35
  // other properties can be undefined
35
36
  ]) {
36
- if (!valueFromString(checkAgentConfig, key)) {
37
+ if (!valueFromString(config, key)) {
37
38
  throw `Agent config: Error, ${key} is not defined`;
38
39
  }
39
40
  }
40
- if (!checkAgentConfig.commandProtection) {
41
- checkAgentConfig.commandProtection = CommandProtection.None;
41
+ // Sanitize input
42
+ if (!config.initialCommands) {
43
+ config.initialCommands = [];
42
44
  }
43
- if (!Object.values(CommandProtection).includes(checkAgentConfig.commandProtection)) {
45
+ else if (!Array.isArray(config.initialCommands)) {
46
+ throw `Agent config: Error, 'initialCommands' is not an array`;
47
+ }
48
+ config.debugPauseSeconds = config.debugPauseSeconds
49
+ ? Number(config.debugPauseSeconds)
50
+ : 0;
51
+ config.wakeOnMessage = Boolean(config.wakeOnMessage);
52
+ config.webModel || (config.webModel = config.shellModel);
53
+ config.dreamModel || (config.dreamModel = config.shellModel);
54
+ if (!config.commandProtection) {
55
+ config.commandProtection = CommandProtection.None;
56
+ }
57
+ if (!Object.values(CommandProtection).includes(config.commandProtection)) {
44
58
  throw `Agent config: Error, 'commandProtection' is not a valid value`;
45
59
  }
46
- return checkAgentConfig;
60
+ return config;
47
61
  }
48
62
  export const packageVersion = await getVersion();
63
+ export const binPath = getBinPath();
49
64
  /** Can only get version from env variable when naisys is started with npm,
50
65
  * otherwise need to rip it from the package ourselves relative to where this file is located */
51
66
  async function getVersion() {
@@ -67,4 +82,28 @@ function getEnv(key, required) {
67
82
  }
68
83
  return value;
69
84
  }
85
+ export function resolveConfigVars(templateString) {
86
+ let resolvedString = templateString;
87
+ resolvedString = resolveTemplateVars(resolvedString, "agent", agent);
88
+ resolvedString = resolveTemplateVars(resolvedString, "env", process.env);
89
+ return resolvedString;
90
+ }
91
+ function resolveTemplateVars(templateString, allowedVarString, mappedVar) {
92
+ const pattern = new RegExp(`\\$\\{${allowedVarString}\\.([^}]+)\\}`, "g");
93
+ return templateString.replace(pattern, (match, key) => {
94
+ const value = valueFromString(mappedVar, key);
95
+ if (value === undefined) {
96
+ throw `Agent config: Error, ${key} is not defined`;
97
+ }
98
+ return value;
99
+ });
100
+ }
101
+ function getBinPath() {
102
+ // C:/git/naisys/dist/config.js
103
+ let binPath = new URL("../bin", import.meta.url).pathname;
104
+ if (binPath.startsWith("/C:")) {
105
+ binPath = "/mnt/c" + binPath.substring(3);
106
+ }
107
+ return binPath;
108
+ }
70
109
  //# sourceMappingURL=config.js.map
@@ -5,7 +5,6 @@ import * as logService from "../utils/logService.js";
5
5
  import * as output from "../utils/output.js";
6
6
  import { OutputColor } from "../utils/output.js";
7
7
  import * as utilities from "../utils/utilities.js";
8
- import { valueFromString } from "../utils/utilities.js";
9
8
  import { LlmRole } from "./llmDtos.js";
10
9
  export var ContentSource;
11
10
  (function (ContentSource) {
@@ -23,8 +22,7 @@ export function getSystemMessage() {
23
22
  // A lot of the stipulations in here are to prevent common LLM mistakes
24
23
  // Like we can't jump between standard and special commands in a single prompt, which the LLM will try to do if not warned
25
24
  let agentPrompt = config.agent.agentPrompt;
26
- agentPrompt = resolveTemplateVars(agentPrompt, "agent", config.agent);
27
- agentPrompt = resolveTemplateVars(agentPrompt, "env", process.env);
25
+ agentPrompt = config.resolveConfigVars(agentPrompt);
28
26
  const systemMessage = `${agentPrompt.trim()}
29
27
 
30
28
  This is a command line interface presenting you with the next command prompt.
@@ -38,12 +36,12 @@ NAISYS ${config.packageVersion} Shell
38
36
  Welcome back ${config.agent.username}!
39
37
  MOTD:
40
38
  Date: ${new Date().toLocaleString()}
41
- Commands:
42
- Standard Unix commands are available
39
+ LINUX Commands:
40
+ Standard Linux commands are available
43
41
  vi and nano are not supported
44
42
  Read files with cat. Write files with \`cat > filename << 'EOF'\`
45
43
  Do not input notes after the prompt. Only valid commands.
46
- Special Commands: (Don't mix with standard commands on the same prompt)
44
+ NAISYS Commands: (cannot be used with other commands on the same prompt)
47
45
  llmail: A local mail system for communicating with your team
48
46
  llmynx: A context optimized web browser. Enter 'llmynx help' to learn how to use it
49
47
  comment "<thought>": Any non-command output like thinking out loud, prefix with the 'comment' command
@@ -51,23 +49,12 @@ Special Commands: (Don't mix with standard commands on the same prompt)
51
49
  endsession "<note>": Ends this session, clears the console log and context.
52
50
  The note should help you find your bearings in the next session.
53
51
  The note should contain your next goal, and important things should you remember.
54
- Try to keep the note around 400 tokens.
55
52
  Tokens:
56
53
  The console log can only hold a certain number of 'tokens' that is specified in the prompt
57
54
  Make sure to call endsession before the limit is hit so you can continue your work with a fresh console`;
58
55
  _cachedSystemMessage = systemMessage;
59
56
  return systemMessage;
60
57
  }
61
- function resolveTemplateVars(templateString, allowedVarString, mappedVar) {
62
- const pattern = new RegExp(`\\$\\{${allowedVarString}\\.([^}]+)\\}`, "g");
63
- return templateString.replace(pattern, (match, key) => {
64
- const value = valueFromString(mappedVar, key);
65
- if (value === undefined) {
66
- throw `Agent config: Error, ${key} is not defined`;
67
- }
68
- return value;
69
- });
70
- }
71
58
  export let messages = [];
72
59
  export async function append(text, source = ContentSource.Console) {
73
60
  // Debug runs in a shadow mode where their activity is not recorded in the context
@@ -0,0 +1,71 @@
1
+ import * as config from "../config.js";
2
+ import * as dbUtils from "../utils/dbUtils.js";
3
+ import * as output from "../utils/output.js";
4
+ import { naisysToHostPath } from "../utils/utilities.js";
5
+ import * as contextManager from "./contextManager.js";
6
+ import { LlmRole } from "./llmDtos.js";
7
+ import * as llmService from "./llmService.js";
8
+ const _dbFilePath = naisysToHostPath(`${config.naisysFolder}/lib/dream.db`);
9
+ await init();
10
+ async function init() {
11
+ const newDbCreated = await dbUtils.initDatabase(_dbFilePath);
12
+ await usingDatabase(async (db) => {
13
+ if (!newDbCreated) {
14
+ return;
15
+ }
16
+ await db.exec(`CREATE TABLE DreamLog (
17
+ id INTEGER PRIMARY KEY,
18
+ username TEXT NOT NULL,
19
+ date TEXT NOT NULL,
20
+ dream TEXT NOT NULL
21
+ )`);
22
+ });
23
+ }
24
+ export async function goodmorning() {
25
+ return await usingDatabase(async (db) => {
26
+ const row = await db.get(`SELECT dream
27
+ FROM DreamLog
28
+ WHERE username = ?
29
+ ORDER BY date DESC LIMIT 1`, config.agent.username);
30
+ return row === null || row === void 0 ? void 0 : row.dream;
31
+ });
32
+ }
33
+ export async function goodnight() {
34
+ output.comment("Dreaming about the session...");
35
+ const dream = await runDreamSequence();
36
+ await storeDream(dream);
37
+ return dream;
38
+ }
39
+ async function runDreamSequence() {
40
+ const systemMessage = `You are ${config.agent.username}'s unconcious sleep process. You compile all ${config.agent.username}'s
41
+ thoughts during the day and reduce them down to important things to remember - references, plans, project structure, schemas,
42
+ file locations, urls, and more. You are the sleep process, and you are the most important process. Using your results,
43
+ when ${config.agent.username} wakes up they'll know exactly what to do and how to do it.`;
44
+ const allTheThings = contextManager.messages.map((m) => m.content).join("\n");
45
+ return await llmService.query(config.agent.dreamModel, systemMessage, [
46
+ {
47
+ role: LlmRole.User,
48
+ content: allTheThings,
49
+ },
50
+ {
51
+ role: LlmRole.Assistant,
52
+ content: "We sure had an eventful day",
53
+ },
54
+ {
55
+ role: LlmRole.User,
56
+ content: `Dream on all these things and let me know what you come up with. Use what was done in the previous session as a guide
57
+ for what's possible tomorrow. Don't overload yourself with too many thoughts and ideas. Keep important references for the future
58
+ but don't go into any great detail of future plans unless it's happening soon. `,
59
+ },
60
+ ], "dream");
61
+ }
62
+ async function usingDatabase(run) {
63
+ return dbUtils.usingDatabase(_dbFilePath, run);
64
+ }
65
+ async function storeDream(dream) {
66
+ await usingDatabase(async (db) => {
67
+ await db.run(`INSERT INTO DreamLog (username, date, dream)
68
+ VALUES (?, datetime('now'), ?)`, config.agent.username, dream);
69
+ });
70
+ }
71
+ //# sourceMappingURL=dreamMaker.js.map
@@ -80,24 +80,37 @@ async function sendWithGoogle(modelKey, systemMessage, context, source) {
80
80
  if (lastMessage.role !== LlmRole.User) {
81
81
  throw "Error, last message on context is not a user message";
82
82
  }
83
+ const contextHistory = context
84
+ .filter((m) => m != lastMessage)
85
+ .map((m) => ({
86
+ role: m.role == LlmRole.Assistant ? "model" : "user",
87
+ parts: [
88
+ {
89
+ text: m.content,
90
+ },
91
+ ],
92
+ }));
83
93
  const history = [
84
94
  {
85
95
  role: LlmRole.User, // System role is not supported by Google API
86
- parts: systemMessage,
96
+ parts: [
97
+ {
98
+ text: systemMessage,
99
+ },
100
+ ],
87
101
  },
88
102
  {
89
103
  role: "model",
90
- parts: "Understood",
104
+ parts: [
105
+ {
106
+ text: "Understood",
107
+ },
108
+ ],
91
109
  },
92
- ...context
93
- .filter((m) => m != lastMessage)
94
- .map((m) => ({
95
- role: m.role == LlmRole.Assistant ? "model" : LlmRole.User,
96
- parts: m.content,
97
- })),
110
+ ...contextHistory,
98
111
  ];
99
112
  const chat = googleModel.startChat({
100
- history: history,
113
+ history,
101
114
  generationConfig: {
102
115
  maxOutputTokens: 2000,
103
116
  },
@@ -87,19 +87,6 @@ function appendToLogFile(filepath, message) {
87
87
  </td>
88
88
  </tr>`);
89
89
  }
90
- export function getPreviousEndSessionNote() {
91
- // Find the most recent message in the log that starts with 'endsession' for the local user
92
- return usingDatabase(async (db) => {
93
- const result = await db.get(`SELECT message
94
- FROM ContextLog
95
- WHERE username = ? AND message LIKE 'endsession %'
96
- ORDER BY id DESC
97
- LIMIT 1`, [config.agent.username]);
98
- const endSessionMsg = result === null || result === void 0 ? void 0 : result.message;
99
- // Trim endsession prefix
100
- return (endSessionMsg === null || endSessionMsg === void 0 ? void 0 : endSessionMsg.slice("endsession ".length)) || "";
101
- });
102
- }
103
90
  async function usingDatabase(run) {
104
91
  return dbUtils.usingDatabase(_dbFilePath, run);
105
92
  }
package/package.json CHANGED
@@ -1,19 +1,19 @@
1
1
  {
2
2
  "name": "naisys",
3
3
  "description": "Node.js Autonomous Intelligence System",
4
- "version": "1.1.0",
4
+ "version": "1.3.0",
5
5
  "type": "module",
6
6
  "main": "dist/naisys.js",
7
7
  "preferGlobal": true,
8
8
  "bin": {
9
- "naisys": "naisys.sh"
9
+ "naisys": "bin/naisys"
10
10
  },
11
11
  "scripts": {
12
12
  "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/webdev-fansite.yaml",
13
- "run agent:dev": "node dist/naisys.js ./agents/2-team/dev.yaml",
14
- "run agent:admin": "node dist/naisys.js ./agents/2-team/admin.yaml",
13
+ "agent:dev": "node dist/naisys.js ./agents/3-team-dev-db-content/dev.yaml",
14
+ "agent:db": "node dist/naisys.js ./agents/3-team-dev-db-content/db.yaml",
15
+ "agent:content": "node dist/naisys.js ./agents/3-team-dev-db-content/content.yaml",
15
16
  "clean": "rm -rf dist",
16
- "clean:win": "wsl rm -rf dist",
17
17
  "compile": "tsc",
18
18
  "eslint": "npx eslint --rulesdir eslint-rules src",
19
19
  "test": "tsc && node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=dist/__tests__",
@@ -21,8 +21,9 @@
21
21
  "dependency-graph": "madge --image dependency-graph.png dist",
22
22
  "detect-cycles": "madge --circular dist",
23
23
  "updates:check": "npm-check-updates",
24
- "updates:apply": "npm-check-updates -u && npm update",
25
- "npm:publish:dryrun": "npm run clean && npm run compile && npm publish --dry-run"
24
+ "updates:apply": "npm-check-updates -u && npm install",
25
+ "npm:publish:dryrun": "npm run clean && npm install && npm run compile && npm publish --dry-run",
26
+ "postinstall": "chmod +x ./bin/*"
26
27
  },
27
28
  "repository": {
28
29
  "type": "git",
@@ -42,25 +43,25 @@
42
43
  "devDependencies": {
43
44
  "@types/escape-html": "1.0.4",
44
45
  "@types/js-yaml": "4.0.9",
45
- "@types/node": "20.11.25",
46
+ "@types/node": "20.11.28",
46
47
  "@types/text-table": "0.2.5",
47
- "@typescript-eslint/eslint-plugin": "7.1.1",
48
- "@typescript-eslint/parser": "7.1.1",
48
+ "@typescript-eslint/eslint-plugin": "7.2.0",
49
+ "@typescript-eslint/parser": "7.2.0",
49
50
  "eslint": "8.57.0",
50
51
  "jest": "29.7.0",
51
52
  "prettier": "3.2.5",
52
53
  "ts-node": "10.9.2",
53
- "typescript": "5.3.3"
54
+ "typescript": "5.4.2"
54
55
  },
55
56
  "dependencies": {
56
- "@anthropic-ai/sdk": "0.16.1",
57
- "@google/generative-ai": "0.2.1",
57
+ "@anthropic-ai/sdk": "0.18.0",
58
+ "@google/generative-ai": "0.3.0",
58
59
  "chalk": "5.3.0",
59
60
  "commander": "12.0.0",
60
61
  "dotenv": "16.4.5",
61
62
  "escape-html": "1.0.3",
62
63
  "js-yaml": "4.1.0",
63
- "openai": "4.28.4",
64
+ "openai": "4.29.0",
64
65
  "sqlite": "5.1.1",
65
66
  "sqlite3": "5.1.7",
66
67
  "text-table": "0.2.0",