naisys 1.2.0 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -65,10 +65,15 @@ title: Software Engineer
65
65
 
66
66
  # The model to use for console interactions
67
67
  # (gpt4turbo, gpt4turbo, gemini-pro, claude3sonnet, claude3opus, local)
68
- shellModel: claude3sonnet
68
+ shellModel: gpt4turbo
69
69
 
70
- # The model to use for llmynx, pre-processing websites to fit into a smaller context
71
- webModel: gpt3turbo
70
+ # Only used between sessions to provide guidance for the next session (use a more powerful model for this)
71
+ # defaults to the shellModel if omitted
72
+ dreamModel: claude3opus
73
+
74
+ # The model to use for llmynx, pre-processing websites to fit into a smaller context (use a cheaper model)
75
+ # defaults to the shellModel if omitted
76
+ webModel: gemini-pro
72
77
 
73
78
  # A system like prompt explaining the agent's role and responsibilities
74
79
  # You can use config variables in this string
package/bin/comment CHANGED
File without changes
package/bin/endsession CHANGED
File without changes
package/bin/llmail CHANGED
File without changes
package/bin/llmynx CHANGED
File without changes
package/bin/naisys CHANGED
File without changes
package/bin/pause CHANGED
File without changes
@@ -69,6 +69,7 @@ async function init() {
69
69
  }
70
70
  });
71
71
  }
72
+ export const waitingForMailMessage = "Waiting for new mail messages...";
72
73
  export async function handleCommand(args) {
73
74
  const argParams = args.split(" ");
74
75
  if (!argParams[0]) {
@@ -84,13 +85,17 @@ export async function handleCommand(args) {
84
85
  if (simpleMode) {
85
86
  return `llmail <command>
86
87
  users: Get list of users on the system
87
- send "<users>" "subject" "message": Send a message. ${config.mailMessageTokenMax} token max.`;
88
+ send "<users>" "subject" "message": Send a message. ${config.mailMessageTokenMax} token max.
89
+ wait: Pause the session until a new mail message is received
90
+
91
+ * Attachments are not supported, use file paths to refence files in emails as all users are on the same machine`;
88
92
  }
89
93
  else {
90
94
  return `llmail <command>
91
95
  no params: List all active threads
92
96
  users: Get list of users on the system
93
97
  send "<users>" "subject" "message": Send a new mail, starting a new thread
98
+ wait: Pause the session until a new mail message is received
94
99
  read <id>: Read a thread
95
100
  reply <id> <message>: Reply to a thread
96
101
  adduser <id> <username>: Add a user to thread with id
@@ -104,6 +109,9 @@ export async function handleCommand(args) {
104
109
  const message = newParams[5];
105
110
  return await newThread(usernames, subject, message);
106
111
  }
112
+ case "wait": {
113
+ return waitingForMailMessage;
114
+ }
107
115
  case "read": {
108
116
  const threadId = parseInt(argParams[1]);
109
117
  return await readThread(threadId);
@@ -8,6 +8,7 @@ import * as output from "../utils/output.js";
8
8
  import * as utilities from "../utils/utilities.js";
9
9
  // A bad play on words, but this is like lynx but for LLMs..
10
10
  let debugMode = false;
11
+ const _contentCache = new Map();
11
12
  /** Links numbers are unique in the context so that `llmynx follow <linknum>` can be called on all previous output */
12
13
  const _globalLinkMap = new Map();
13
14
  const _globalUrlMap = new Map();
@@ -20,7 +21,7 @@ export async function handleCommand(cmdArgs) {
20
21
  }
21
22
  switch (argParams[0]) {
22
23
  case "help":
23
- return `llmynx <command> (results will be reduced to around ${config.webTokenMax})
24
+ return `llmynx <command> (results will be reduced to around ${config.webTokenMax} tokens)
24
25
  search <query>: Search google for the given query
25
26
  open <url>: Opens the given url. Links are represented as numbers in brackets which prefix the word they are linking like [123]
26
27
  follow <link number>: Opens the given link number. Link numbers work across all previous outputs
@@ -29,11 +30,11 @@ export async function handleCommand(cmdArgs) {
29
30
  *llmynx does not support input. Use llmynx or curl to call APIs directly*`;
30
31
  case "search": {
31
32
  const query = argParams.slice(1).join(" ");
32
- return await loadUrl("https://www.google.com/search?q=" + encodeURIComponent(query), true, true);
33
+ return await loadUrlContent("https://www.google.com/search?q=" + encodeURIComponent(query), true, true);
33
34
  }
34
35
  case "open": {
35
36
  const url = argParams[1];
36
- return await loadUrl(url, false, true);
37
+ return await loadUrlContent(url, false, true);
37
38
  }
38
39
  case "follow": {
39
40
  const linkNum = parseInt(argParams[1]);
@@ -41,13 +42,13 @@ export async function handleCommand(cmdArgs) {
41
42
  if (!linkUrl) {
42
43
  return "Link number not found";
43
44
  }
44
- return await loadUrl(linkUrl, true, false);
45
+ return await loadUrlContent(linkUrl, true, false);
45
46
  }
46
47
  case "links": {
47
48
  const url = argParams[1];
48
49
  const isNumber = !isNaN(parseInt(argParams[2]));
49
50
  const pageNumber = isNumber ? parseInt(argParams[2]) : 1;
50
- return await loadUrl(url, false, false, pageNumber);
51
+ return await loadUrlLinks(url, pageNumber);
51
52
  }
52
53
  // Secret command to toggle debug mode
53
54
  case "debug":
@@ -58,17 +59,63 @@ export async function handleCommand(cmdArgs) {
58
59
  (await handleCommand("help")));
59
60
  }
60
61
  }
61
- async function loadUrl(url, showUrl, showFollowHint, linkPageAsContent) {
62
+ /** The content here is not reduced by an LLM, just a paged list of global links is returned */
63
+ async function loadUrlLinks(url, linkPageAsContent) {
62
64
  let content = await runLynx(url);
63
65
  let links = "";
64
66
  // Reverse find 'References: ' and cut everything after it from the content
65
67
  const refPos = content.lastIndexOf("References\n");
68
+ if (refPos > 0) {
69
+ links = content.slice(refPos);
70
+ content = "";
71
+ }
72
+ else {
73
+ return "No Links Found";
74
+ }
75
+ // Iterate links and de-duplicate
76
+ const linkLines = links.split("\n");
77
+ const linkSet = new Set();
78
+ for (const linkLine of linkLines) {
79
+ const dotPos = linkLine.indexOf(".");
80
+ if (dotPos < 0) {
81
+ continue;
82
+ }
83
+ const url = linkLine.substring(dotPos + 1).trim();
84
+ if (!linkSet.has(url)) {
85
+ linkSet.add(url);
86
+ content += url + "\n";
87
+ }
88
+ }
89
+ // Get the token size of the output
90
+ const linksTokenSize = utilities.getTokenCount(content);
91
+ outputInDebugMode(`Links Token size: ${linksTokenSize}`);
92
+ // Reduce content using LLM if it's over the token max
93
+ if (linksTokenSize > config.webTokenMax) {
94
+ content = await reduceContent(url, content, linksTokenSize, linkPageAsContent);
95
+ }
96
+ else {
97
+ output.comment(`Link Content is already under ${config.webTokenMax} tokens.`);
98
+ content = globalizeLinkList(content);
99
+ }
100
+ return content;
101
+ }
102
+ async function loadUrlContent(url, showUrl, showFollowHint) {
103
+ const originalContent = await runLynx(url);
104
+ let content = originalContent;
105
+ let links = "";
106
+ // Reverse find 'References: ' and cut everything after it from the content
107
+ const refPos = content.lastIndexOf("References\n");
66
108
  if (refPos > 0) {
67
109
  links = content.slice(refPos);
68
110
  content = content.slice(0, refPos);
69
111
  }
70
- if (linkPageAsContent) {
71
- content = links;
112
+ let usingCachedContent = false;
113
+ if (_contentCache.has(url)) {
114
+ const cachedContent = _contentCache.get(url);
115
+ if (cachedContent.originalContent === originalContent) {
116
+ content = cachedContent.reducedContent;
117
+ usingCachedContent = true;
118
+ }
72
119
  }
73
120
  // Get the token size of the output
74
121
  const contentTokenSize = utilities.getTokenCount(content);
@@ -76,38 +123,15 @@ async function loadUrl(url, showUrl, showFollowHint, linkPageAsContent) {
76
123
  outputInDebugMode(`Content Token size: ${contentTokenSize}\n` +
77
124
  `Links Token size: ${linksTokenSize}`);
78
125
  // Reduce content using LLM if it's over the token max
79
- if (contentTokenSize > config.webTokenMax) {
80
- const model = getLLModel(config.agent.webModel);
81
- // For example if context is 16k, and max tokens is 2k, 3k with 1.5x overrun
82
- // That would be 3k for the current compressed content, 10k for the chunk, and 3k for the output
83
- let tokenChunkSize = model.maxTokens - config.webTokenMax * 2 * 1.5;
84
- if (linkPageAsContent) {
85
- tokenChunkSize = config.webTokenMax;
86
- }
87
- outputInDebugMode(`Token max chunk size: ${tokenChunkSize}`);
88
- const pieceCount = Math.ceil(contentTokenSize / tokenChunkSize);
89
- const pieceSize = content.length / pieceCount;
90
- let reducedOutput = "";
91
- for (let i = 0; i < pieceCount; i++) {
92
- const startPos = i * pieceSize;
93
- const pieceStr = content.substring(startPos, startPos + pieceSize);
94
- if (linkPageAsContent) {
95
- if (linkPageAsContent === i + 1) {
96
- return formatLinkPiece(pieceStr);
97
- }
98
- continue;
99
- }
100
- output.comment(`Processing Piece ${i + 1} of ${pieceCount} with ${model.key}...`);
101
- outputInDebugMode(` Reduced output tokens: ${utilities.getTokenCount(reducedOutput)}\n` +
102
- ` Current Piece tokens: ${utilities.getTokenCount(pieceStr)}`);
103
- reducedOutput = await llmReduce(url, reducedOutput, i + 1, pieceCount, pieceStr);
104
- }
105
- if (linkPageAsContent) {
106
- return "";
107
- }
108
- content = reducedOutput;
109
- const finalTokenSize = utilities.getTokenCount(reducedOutput);
110
- output.comment(`Content reduced from ${contentTokenSize} to ${finalTokenSize} tokens`);
126
+ if (usingCachedContent) {
127
+ output.comment("No changes detected, using already cached reduced content");
128
+ }
129
+ else if (contentTokenSize > config.webTokenMax) {
130
+ content = await reduceContent(url, content, contentTokenSize);
131
+ _contentCache.set(url, {
132
+ originalContent,
133
+ reducedContent: content,
134
+ });
111
135
  }
112
136
  else {
113
137
  output.comment(`Content is already under ${config.webTokenMax} tokens.`);
@@ -144,21 +168,55 @@ async function runLynx(url) {
144
168
  });
145
169
  });
146
170
  }
171
+ async function reduceContent(url, content, contentTokenSize, linkPageAsContent) {
172
+ const model = getLLModel(config.agent.webModel);
173
+ // For example if context is 16k, and max tokens is 2k, 3k with 1.5x overrun
174
+ // That would be 3k for the current compressed content, 10k for the chunk, and 3k for the output
175
+ let tokenChunkSize = model.maxTokens - config.webTokenMax * 2 * 1.5;
176
+ if (linkPageAsContent) {
177
+ tokenChunkSize = config.webTokenMax;
178
+ }
179
+ outputInDebugMode(`Token max chunk size: ${tokenChunkSize}`);
180
+ const pieceCount = Math.ceil(contentTokenSize / tokenChunkSize);
181
+ const pieceSize = content.length / pieceCount;
182
+ let reducedOutput = "";
183
+ for (let i = 0; i < pieceCount; i++) {
184
+ const startPos = i * pieceSize;
185
+ const pieceStr = content.substring(startPos, startPos + pieceSize);
186
+ if (linkPageAsContent) {
187
+ if (linkPageAsContent === i + 1) {
188
+ return globalizeLinkList(pieceStr);
189
+ }
190
+ continue;
191
+ }
192
+ output.comment(`Processing Piece ${i + 1} of ${pieceCount} with ${model.key}...`);
193
+ outputInDebugMode(` Reduced output tokens: ${utilities.getTokenCount(reducedOutput)}\n` +
194
+ ` Current Piece tokens: ${utilities.getTokenCount(pieceStr)}`);
195
+ reducedOutput = await llmReduce(url, reducedOutput, i + 1, pieceCount, pieceStr);
196
+ }
197
+ if (linkPageAsContent) {
198
+ return "";
199
+ }
200
+ const finalTokenSize = utilities.getTokenCount(reducedOutput);
201
+ output.comment(`Content reduced from ${contentTokenSize} to ${finalTokenSize} tokens`);
202
+ return reducedOutput;
203
+ }
147
204
  async function llmReduce(url, reducedOutput, pieceNumber, pieceTotal, pieceStr) {
148
- const systemMessage = `You will be iteratively fed the web page ${url} broken into ${pieceTotal} sequential equally sized pieces.
149
- Each piece should be reduced into the final content in order to maintain the meaning of the page while reducing verbosity and duplication.
205
+ const systemMessage = `You will be iteratively fed the web page "${url}" broken into ${pieceTotal} pieces.
206
+ Each 'Web Page Piece' should be merged with the in order 'Current Reduced Content' to maintain the meaning of the page while reducing verbosity and duplication.
150
207
  The final output should be around ${config.webTokenMax} tokens.
151
- Don't remove links which are represented as numbers in brackets which prefix the word they are linking like [123].
208
+ Links are represented as numbers in brackets, for example [4]. Try not to remove them in the 'Final Merged Content'
152
209
  Try to prioritize content of substance over advertising content.`;
153
- const content = `Web page piece ${pieceNumber} of ${pieceTotal}:
210
+ const content = `Web Page Piece ${pieceNumber} of ${pieceTotal}:
154
211
  ${pieceStr}
155
212
 
156
- Current reduced content:
213
+ Please merge the 'Web Page Piece' above into the 'Current Reduced Content' below while keeping the result to around ${config.webTokenMax} tokens.
214
+
215
+ Current Reduced Content:
157
216
  ${reducedOutput}
158
217
 
159
- Please merge the new piece into the existing reduced content above while keeping the result to around ${config.webTokenMax} tokens.
160
218
 
161
- Merged reduced content:
219
+ Final Merged Content:
162
220
  `;
163
221
  const context = {
164
222
  role: LlmRole.User,
@@ -207,23 +265,19 @@ function registerUrl(url) {
207
265
  }
208
266
  return globalLinkNum;
209
267
  }
210
- function formatLinkPiece(pieceStr) {
268
+ function globalizeLinkList(pieceStr) {
211
269
  const alreadySeen = new Set();
212
270
  const linkLines = pieceStr.split("\n");
213
- let links = "";
271
+ let globalLinks = "";
214
272
  for (const linkLine of linkLines) {
215
- const dotPos = linkLine.indexOf(".");
216
- if (dotPos < 0) {
217
- continue;
218
- }
219
- const url = linkLine.substring(dotPos + 1).trim();
220
- if (alreadySeen.has(url)) {
273
+ const url = linkLine.trim();
274
+ if (!url || alreadySeen.has(url)) {
221
275
  continue;
222
276
  }
223
277
  alreadySeen.add(url);
224
278
  const globalLinkNum = registerUrl(url);
225
- links += `[${globalLinkNum}]${url}\n`;
279
+ globalLinks += `[${globalLinkNum}]${url}\n`;
226
280
  }
227
- return links;
281
+ return globalLinks;
228
282
  }
229
283
  //# sourceMappingURL=llmynx.js.map
@@ -5,9 +5,9 @@ import * as config from "../config.js";
5
5
  import * as contextManager from "../llm/contextManager.js";
6
6
  import { ContentSource } from "../llm/contextManager.js";
7
7
  import * as costTracker from "../llm/costTracker.js";
8
+ import * as dreamMaker from "../llm/dreamMaker.js";
8
9
  import * as inputMode from "../utils/inputMode.js";
9
10
  import { InputMode } from "../utils/inputMode.js";
10
- import * as logService from "../utils/logService.js";
11
11
  import * as output from "../utils/output.js";
12
12
  import { OutputColor } from "../utils/output.js";
13
13
  import * as utilities from "../utils/utilities.js";
@@ -20,7 +20,6 @@ export var NextCommandAction;
20
20
  NextCommandAction[NextCommandAction["EndSession"] = 1] = "EndSession";
21
21
  NextCommandAction[NextCommandAction["ExitApplication"] = 2] = "ExitApplication";
22
22
  })(NextCommandAction || (NextCommandAction = {}));
23
- export let previousSessionNotes = await logService.getPreviousEndSessionNote();
24
23
  export async function processCommand(prompt, consoleInput) {
25
24
  // We process the lines one at a time so we can support multiple commands with line breaks
26
25
  let firstLine = true;
@@ -67,11 +66,12 @@ export async function processCommand(prompt, consoleInput) {
67
66
  }
68
67
  case "endsession": {
69
68
  // Don't need to check end line as this is the last command in the context, just read to the end
70
- previousSessionNotes = utilities.trimChars(cmdArgs, '"');
71
- if (!previousSessionNotes) {
69
+ const endSessionNotes = utilities.trimChars(cmdArgs, '"');
70
+ if (!endSessionNotes) {
72
71
  await contextManager.append(`End session notes are required. Use endsession "<notes>"`);
73
72
  break;
74
73
  }
74
+ await dreamMaker.goodnight();
75
75
  await output.commentAndLog("------------------------------------------------------");
76
76
  nextCommandAction = NextCommandAction.EndSession;
77
77
  processNextLLMpromptBlock = false;
@@ -90,15 +90,17 @@ export async function processCommand(prompt, consoleInput) {
90
90
  }
91
91
  break;
92
92
  }
93
- // With no argument, in debug mode, pause will pause forever,
94
- // in LLM mode it will pause until a message is receieved
95
- // Don't want the llm to hang itself, but it still can if it's the only agent or if all the agents pause..
96
- // The setting only lasts for the next command, next loop it uses the agent default
97
93
  case "pause": {
94
+ const pauseSeconds = cmdArgs ? parseInt(cmdArgs) : 0;
95
+ // Don't allow the LLM to hang itself
96
+ if (inputMode.current === InputMode.LLM && !pauseSeconds) {
97
+ await contextManager.append("Puase command requires a number of seconds to pause for");
98
+ break;
99
+ }
98
100
  return {
99
101
  nextCommandAction: NextCommandAction.Continue,
100
- pauseSeconds: cmdArgs ? parseInt(cmdArgs) : 0,
101
- wakeOnMessage: inputMode.current === InputMode.LLM,
102
+ pauseSeconds,
103
+ wakeOnMessage: false, // llmail has a 'wait' command that is useful in multi-agent situations
102
104
  };
103
105
  }
104
106
  case "cost": {
@@ -114,6 +116,13 @@ export async function processCommand(prompt, consoleInput) {
114
116
  case "llmail": {
115
117
  const mailResponse = await llmail.handleCommand(cmdArgs);
116
118
  await contextManager.append(mailResponse);
119
+ if (mailResponse == llmail.waitingForMailMessage) {
120
+ return {
121
+ nextCommandAction: NextCommandAction.Continue,
122
+ pauseSeconds: 0,
123
+ wakeOnMessage: true,
124
+ };
125
+ }
117
126
  break;
118
127
  }
119
128
  case "context":
@@ -5,6 +5,7 @@ import * as llmynx from "../apps/llmynx.js";
5
5
  import * as config from "../config.js";
6
6
  import * as contextManager from "../llm/contextManager.js";
7
7
  import { ContentSource } from "../llm/contextManager.js";
8
+ import * as dreamMaker from "../llm/dreamMaker.js";
8
9
  import { LlmRole } from "../llm/llmDtos.js";
9
10
  import * as llmService from "../llm/llmService.js";
10
11
  import * as inputMode from "../utils/inputMode.js";
@@ -33,10 +34,15 @@ export async function run() {
33
34
  while (nextCommandAction != NextCommandAction.ExitApplication) {
34
35
  inputMode.toggle(InputMode.LLM);
35
36
  await output.commentAndLog("Starting Context:");
36
- await contextManager.append("Previous Session Note:");
37
- await contextManager.append(commandHandler.previousSessionNotes || "None");
37
+ const latestDream = await dreamMaker.goodmorning();
38
+ if (latestDream) {
39
+ await contextManager.append("Previous Session Notes:");
40
+ await contextManager.append(latestDream);
41
+ }
38
42
  for (const initialCommand of config.agent.initialCommands) {
39
- await commandHandler.processCommand(await promptBuilder.getPrompt(0, false), config.resolveConfigVars(initialCommand));
43
+ const prompt = await promptBuilder.getPrompt(0, false);
44
+ await contextManager.append(prompt, ContentSource.ConsolePrompt);
45
+ await commandHandler.processCommand(prompt, config.resolveConfigVars(initialCommand));
40
46
  }
41
47
  inputMode.toggle(InputMode.Debug);
42
48
  let pauseSeconds = config.agent.debugPauseSeconds;
@@ -137,12 +143,22 @@ async function handleErrorAndSwitchToDebugMode(e, llmErrorCount, addToContext) {
137
143
  wakeOnMessage,
138
144
  };
139
145
  }
146
+ let mailBlackoutCountdown = 0;
140
147
  async function checkNewMailNotification() {
148
+ let supressMail = false;
149
+ if (mailBlackoutCountdown > 0) {
150
+ mailBlackoutCountdown--;
151
+ supressMail = true;
152
+ }
141
153
  // Check for unread threads
142
154
  const unreadThreads = await llmail.getUnreadThreads();
143
155
  if (!unreadThreads.length) {
144
156
  return;
145
157
  }
158
+ if (supressMail) {
159
+ await output.commentAndLog(`New mail notifications blackout in effect. ${mailBlackoutCountdown} cycles remaining.`);
160
+ return;
161
+ }
146
162
  // Get the new messages for each thread
147
163
  const newMessages = [];
148
164
  for (const { threadId, newMsgId } of unreadThreads) {
@@ -162,10 +178,11 @@ async function checkNewMailNotification() {
162
178
  for (const unreadThread of unreadThreads) {
163
179
  await llmail.markAsRead(unreadThread.threadId);
164
180
  }
181
+ mailBlackoutCountdown = config.mailBlackoutCycles;
165
182
  }
166
183
  else if (llmail.simpleMode) {
167
184
  await contextManager.append(`You have new mail, but not enough context to read them.\n` +
168
- `Finish up what you're doing. After you 'endsession' and the context resets, you will be able to read them.`, ContentSource.Console);
185
+ `After you 'endsession' and the context resets, you will be able to read them.`, ContentSource.Console);
169
186
  }
170
187
  // LLM will in many cases end the session here, when the new session starts
171
188
  // this code will run again, and show a full preview of the messages
package/dist/config.js CHANGED
@@ -8,10 +8,14 @@ program.argument("<agent-path>", "Path to agent configuration file").parse();
8
8
  dotenv.config();
9
9
  /** The system name that shows after the @ in the command prompt */
10
10
  export const hostname = "naisys";
11
- export const shellOutputTokenMax = 2500; // Limits the size of files that can be read/wrote
12
- export const shellCommmandTimeoutSeconds = 15; // The number of seconds NAISYS will wait for a shell command to complete
11
+ /** Limits the size of files that can be read/wrote */
12
+ export const shellOutputTokenMax = 2500; //
13
+ /** The number of seconds NAISYS will wait for a shell command to complete */
14
+ export const shellCommmandTimeoutSeconds = 15;
13
15
  export const webTokenMax = 2500;
14
16
  export const mailMessageTokenMax = 400;
17
+ /** Used to prevent the agent from constantly responding to mail and not getting any work done */
18
+ export const mailBlackoutCycles = 3;
15
19
  /* .env is used for global configs across naisys, while agent configs are for the specific agent */
16
20
  export const naisysFolder = getEnv("NAISYS_FOLDER", true);
17
21
  export const websiteFolder = getEnv("WEBSITE_FOLDER");
@@ -29,7 +33,6 @@ function loadAgentConfig() {
29
33
  "username",
30
34
  "title",
31
35
  "shellModel",
32
- "webModel",
33
36
  "agentPrompt",
34
37
  "spendLimitDollars",
35
38
  "tokenMax",
@@ -50,6 +53,8 @@ function loadAgentConfig() {
50
53
  ? Number(config.debugPauseSeconds)
51
54
  : 0;
52
55
  config.wakeOnMessage = Boolean(config.wakeOnMessage);
56
+ config.webModel || (config.webModel = config.shellModel);
57
+ config.dreamModel || (config.dreamModel = config.shellModel);
53
58
  if (!config.commandProtection) {
54
59
  config.commandProtection = CommandProtection.None;
55
60
  }
@@ -45,7 +45,7 @@ NAISYS Commands: (cannot be used with other commands on the same prompt)
45
45
  llmail: A local mail system for communicating with your team
46
46
  llmynx: A context optimized web browser. Enter 'llmynx help' to learn how to use it
47
47
  comment "<thought>": Any non-command output like thinking out loud, prefix with the 'comment' command
48
- pause <seconds>: Pause for <seconds> or indeterminite if no argument is provided. Auto wake up on new mail message
48
+ pause <seconds>: Pause for <seconds>
49
49
  endsession "<note>": Ends this session, clears the console log and context.
50
50
  The note should help you find your bearings in the next session.
51
51
  The note should contain your next goal, and important things should you remember.
@@ -0,0 +1,71 @@
1
+ import * as config from "../config.js";
2
+ import * as dbUtils from "../utils/dbUtils.js";
3
+ import * as output from "../utils/output.js";
4
+ import { naisysToHostPath } from "../utils/utilities.js";
5
+ import * as contextManager from "./contextManager.js";
6
+ import { LlmRole } from "./llmDtos.js";
7
+ import * as llmService from "./llmService.js";
8
+ const _dbFilePath = naisysToHostPath(`${config.naisysFolder}/lib/dream.db`);
9
+ await init();
10
+ async function init() {
11
+ const newDbCreated = await dbUtils.initDatabase(_dbFilePath);
12
+ await usingDatabase(async (db) => {
13
+ if (!newDbCreated) {
14
+ return;
15
+ }
16
+ await db.exec(`CREATE TABLE DreamLog (
17
+ id INTEGER PRIMARY KEY,
18
+ username TEXT NOT NULL,
19
+ date TEXT NOT NULL,
20
+ dream TEXT NOT NULL
21
+ )`);
22
+ });
23
+ }
24
+ export async function goodmorning() {
25
+ return await usingDatabase(async (db) => {
26
+ const row = await db.get(`SELECT dream
27
+ FROM DreamLog
28
+ WHERE username = ?
29
+ ORDER BY date DESC LIMIT 1`, config.agent.username);
30
+ return row === null || row === void 0 ? void 0 : row.dream;
31
+ });
32
+ }
33
+ export async function goodnight() {
34
+ output.comment("Dreaming about the session...");
35
+ const dream = await runDreamSequence();
36
+ await storeDream(dream);
37
+ return dream;
38
+ }
39
+ async function runDreamSequence() {
40
+ const systemMessage = `You are ${config.agent.username}'s unconcious sleep process. You compile all ${config.agent.username}'s
41
+ thoughts during the day and reduce them down to important things to remember - references, plans, project structure, schemas,
42
+ file locations, urls, and more. You are the sleep process, and you are the most important process. Using your results,
43
+ when ${config.agent.username} wakes up they'll know exactly what to do and how to do it.`;
44
+ const allTheThings = contextManager.messages.map((m) => m.content).join("\n");
45
+ return await llmService.query(config.agent.dreamModel, systemMessage, [
46
+ {
47
+ role: LlmRole.User,
48
+ content: allTheThings,
49
+ },
50
+ {
51
+ role: LlmRole.Assistant,
52
+ content: "We sure had an eventful day",
53
+ },
54
+ {
55
+ role: LlmRole.User,
56
+ content: `Dream on all these things and let me know what you come up with. Use what was done in the previous session as a guide
57
+ for what's possible tomorrow. Don't overload yourself with too many thoughts and ideas. Keep important references for the future
58
+ but don't go into any great detail of future plans unless it's happening soon. `,
59
+ },
60
+ ], "dream");
61
+ }
62
+ async function usingDatabase(run) {
63
+ return dbUtils.usingDatabase(_dbFilePath, run);
64
+ }
65
+ async function storeDream(dream) {
66
+ await usingDatabase(async (db) => {
67
+ await db.run(`INSERT INTO DreamLog (username, date, dream)
68
+ VALUES (?, datetime('now'), ?)`, config.agent.username, dream);
69
+ });
70
+ }
71
+ //# sourceMappingURL=dreamMaker.js.map
@@ -87,19 +87,6 @@ function appendToLogFile(filepath, message) {
87
87
  </td>
88
88
  </tr>`);
89
89
  }
90
- export function getPreviousEndSessionNote() {
91
- // Find the most recent message in the log that starts with 'endsession' for the local user
92
- return usingDatabase(async (db) => {
93
- const result = await db.get(`SELECT message
94
- FROM ContextLog
95
- WHERE username = ? AND message LIKE 'endsession %'
96
- ORDER BY id DESC
97
- LIMIT 1`, [config.agent.username]);
98
- const endSessionMsg = result === null || result === void 0 ? void 0 : result.message;
99
- // Trim endsession prefix
100
- return (endSessionMsg === null || endSessionMsg === void 0 ? void 0 : endSessionMsg.slice("endsession ".length)) || "";
101
- });
102
- }
103
90
  async function usingDatabase(run) {
104
91
  return dbUtils.usingDatabase(_dbFilePath, run);
105
92
  }
package/package.json CHANGED
@@ -1,27 +1,28 @@
1
1
  {
2
2
  "name": "naisys",
3
3
  "description": "Node.js Autonomous Intelligence System",
4
- "version": "1.2.0",
4
+ "version": "1.3.1",
5
5
  "type": "module",
6
6
  "main": "dist/naisys.js",
7
7
  "preferGlobal": true,
8
8
  "bin": {
9
- "naisys": "./bin/naisys"
9
+ "naisys": "bin/naisys"
10
10
  },
11
11
  "scripts": {
12
- "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/webdev-fansite.yaml",
13
- "run agent:p1": "node dist/naisys.js ./agents/webdev-battle/player1.yaml",
14
- "run agent:p2": "node dist/naisys.js ./agents/webdev-battle/player2.yaml",
12
+ "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/juicer.yaml",
13
+ "agent:dev": "node dist/naisys.js ./agents/3-team-dev-db-content/dev.yaml",
14
+ "agent:db": "node dist/naisys.js ./agents/3-team-dev-db-content/db.yaml",
15
+ "agent:content": "node dist/naisys.js ./agents/3-team-dev-db-content/content.yaml",
15
16
  "clean": "rm -rf dist",
16
- "compile": "tsc --build --verbose",
17
+ "compile": "tsc",
17
18
  "eslint": "npx eslint --rulesdir eslint-rules src",
18
19
  "test": "tsc && node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=dist/__tests__",
19
20
  "prettier": "npx prettier --write .",
20
21
  "dependency-graph": "madge --image dependency-graph.png dist",
21
22
  "detect-cycles": "madge --circular dist",
22
23
  "updates:check": "npm-check-updates",
23
- "updates:apply": "npm-check-updates -u && npm update",
24
- "npm:publish:dryrun": "npm run clean && npm run compile && npm publish --dry-run",
24
+ "updates:apply": "npm-check-updates -u && npm install",
25
+ "npm:publish:dryrun": "npm run clean && npm install && npm run compile && npm publish --dry-run",
25
26
  "postinstall": "chmod +x ./bin/*"
26
27
  },
27
28
  "repository": {
@@ -42,7 +43,7 @@
42
43
  "devDependencies": {
43
44
  "@types/escape-html": "1.0.4",
44
45
  "@types/js-yaml": "4.0.9",
45
- "@types/node": "20.11.26",
46
+ "@types/node": "20.11.28",
46
47
  "@types/text-table": "0.2.5",
47
48
  "@typescript-eslint/eslint-plugin": "7.2.0",
48
49
  "@typescript-eslint/parser": "7.2.0",
@@ -53,14 +54,14 @@
53
54
  "typescript": "5.4.2"
54
55
  },
55
56
  "dependencies": {
56
- "@anthropic-ai/sdk": "0.17.2",
57
+ "@anthropic-ai/sdk": "0.18.0",
57
58
  "@google/generative-ai": "0.3.0",
58
59
  "chalk": "5.3.0",
59
60
  "commander": "12.0.0",
60
61
  "dotenv": "16.4.5",
61
62
  "escape-html": "1.0.3",
62
63
  "js-yaml": "4.1.0",
63
- "openai": "4.28.4",
64
+ "openai": "4.29.1",
64
65
  "sqlite": "5.1.1",
65
66
  "sqlite3": "5.1.7",
66
67
  "text-table": "0.2.0",