naisys 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -11,6 +11,10 @@ vim or nano so point the LLM to use cat to read/write files in a single operatio
11
11
 
12
12
  [NPM](https://www.npmjs.com/package/naisys) | [Website](https://naisys.org) | [Discord](https://discord.gg/JBUPWSbaEt) | [Demo Video](https://www.youtube.com/watch?v=Ttya3ixjumo)
13
13
 
14
+ ```bash
15
+ npm install -g naisys
16
+ ```
17
+
14
18
  #### Node.js is used to create a simple proxy shell environment for the LLM that
15
19
 
16
20
  - Helps the LLM keep track of its current context size
@@ -85,20 +89,26 @@ tokenMax: 5000
85
89
  # No value or zero means wait indefinitely (debug driven)
86
90
  debugPauseSeconds: 5
87
91
 
88
- # If true, regardless of the debugPauseSeconds, the agent will not wake up on messages
89
- # With lots of agents this could be costly if they all end up mailing/replying each other in quick succession
92
+ # If true, regardless of the debugPauseSeconds, the agent will wake up on messages
93
+ # Useful for agents with long debugPauseSeconds, so that they can wake up and reply quickly
90
94
  wakeOnMessage: false
91
95
 
92
96
  # The maximum amount to spend on LLM interactions
93
97
  # Once reached the agent will stop and this value will need to be increased to continue
94
98
  spendLimitDollars: 2.00
95
99
 
96
- # None: Commands from the LLM run automatically, this is the default setting as well if the value is not set
97
- # Manual: Every command the LLM wants you run you have to approve [y/n]
98
- # Auto: All commands are run through the separate LLM instace, commands that look like they'll modify the system are blocked
99
- commandProtection: 'none'
100
-
101
- # Additional custom variables can be defined here and/or in the .env file to be loaded into the agent prompt
100
+ # Command Protection: Useful for agents you want to restrict from modifying the system
101
+ # None: Commands from the LLM run automatically, this is the default setting as well if the value is not set
102
+ # Manual: Every command the LLM wants to run has to be approved [y/n]. Not very autonomous.
103
+ # Auto: All commands are run through the separate LLM instace that will check to see if the command is safe
104
+ commandProtection: "none"
105
+
106
+ # Run these commands on session start, in the example below the agent will see how to use mail and a list of other agents
107
+ initialCommands:
108
+ - llmail users
109
+ - llmail help
110
+ - cat ${env.NAISYS_FOLDER}/home/${agent.username}/PLAN.md
111
+ # Additional custom variables can be defined here and/or in the agent config to be loaded into the agent prompt
102
112
  ```
103
113
 
104
114
  - Run `naisys <path to yaml or directory>`
package/bin/comment ADDED
@@ -0,0 +1,4 @@
1
+ #!/bin/bash
2
+
3
+ # ./src/command/commandHandler.ts has the same message
4
+ echo "Comment noted. Try running commands now to achieve your goal. ."
package/bin/endsession ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'endsession' cannot be used with other commands on the same prompt."
package/bin/llmail ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'llmail' cannot be used with other commands on the same prompt."
package/bin/llmynx ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'llmynx' cannot be used with other commands on the same prompt."
@@ -12,7 +12,7 @@ fi
12
12
 
13
13
  # Resolves the location of naisys from the bin directory
14
14
  SCRIPT=$(readlink -f "$0" || echo "$0")
15
- SCRIPT_DIR=$(dirname "$SCRIPT")
15
+ SCRIPT_DIR=$(dirname "$SCRIPT")/..
16
16
 
17
17
  # if path is a yaml file then start a single agent
18
18
  if [ -f "$1" ]; then
package/bin/pause ADDED
@@ -0,0 +1,3 @@
1
+ #!/bin/bash
2
+
3
+ echo "'pause' cannot be used with other commands on the same prompt."
@@ -6,9 +6,8 @@ import * as utilities from "../utils/utilities.js";
6
6
  import { naisysToHostPath } from "../utils/utilities.js";
7
7
  const _dbFilePath = naisysToHostPath(`${config.naisysFolder}/lib/llmail.db`);
8
8
  let _myUserId = -1;
9
- // Implement maxes so that LLMs actively manage threads, archive, and create new ones
10
- const _threadTokenMax = config.agent.tokenMax / 2; // So 4000, would be 2000 thread max
11
- const _messageTokenMax = _threadTokenMax / 5; // Given the above a 400 token max, and 5 big messages per thread
9
+ /** Threading is not currently used so this doesn't matter */
10
+ const _threadTokenMax = config.mailMessageTokenMax * 5;
12
11
  /** The 'non-simple' version of this is a thread first mail system. Where agents can create threads, add users, and reply to threads, etc..
13
12
  * The problem with this was the agents were too chatty with so many mail commands, wasting context replying, reading threads, etc..
14
13
  * Simple mode only has two commands. It still requires db persistance to support offline agents. */
@@ -85,7 +84,7 @@ export async function handleCommand(args) {
85
84
  if (simpleMode) {
86
85
  return `llmail <command>
87
86
  users: Get list of users on the system
88
- send "<users>" "subject" "message": Send a message. ${_messageTokenMax} token max.`;
87
+ send "<users>" "subject" "message": Send a message. ${config.mailMessageTokenMax} token max.`;
89
88
  }
90
89
  else {
91
90
  return `llmail <command>
@@ -138,7 +137,8 @@ export async function handleCommand(args) {
138
137
  await init();
139
138
  return "llmail database reset";
140
139
  default:
141
- return "Unknown llmail command: " + argParams[0];
140
+ return ("Error, unknown command. See valid commands below:\n" +
141
+ (await handleCommand("help")));
142
142
  }
143
143
  }
144
144
  export async function getUnreadThreads() {
@@ -339,8 +339,8 @@ async function getUser(db, username) {
339
339
  }
340
340
  function validateMsgTokenCount(message) {
341
341
  const msgTokenCount = utilities.getTokenCount(message);
342
- if (msgTokenCount > _messageTokenMax) {
343
- throw `Error: Message is ${msgTokenCount} tokens, exceeding the limit of ${_messageTokenMax} tokens`;
342
+ if (msgTokenCount > config.mailMessageTokenMax) {
343
+ throw `Error: Message is ${msgTokenCount} tokens, exceeding the limit of ${config.mailMessageTokenMax} tokens`;
344
344
  }
345
345
  return msgTokenCount;
346
346
  }
@@ -15,53 +15,50 @@ let _nextGlobalLinkNum = 1;
15
15
  export async function handleCommand(cmdArgs) {
16
16
  outputInDebugMode("LLMYNX DEBUG MODE IS ON");
17
17
  const argParams = cmdArgs.split(" ");
18
- const defualtTokenMax = config.agent.tokenMax / 8;
19
18
  if (!argParams[0]) {
20
19
  argParams[0] = "help";
21
20
  }
22
21
  switch (argParams[0]) {
23
22
  case "help":
24
- return `llmynx <command> (results will be reduced to around ${defualtTokenMax})
23
+ return `llmynx <command> (results will be reduced to around ${config.webTokenMax})
25
24
  search <query>: Search google for the given query
26
25
  open <url>: Opens the given url. Links are represented as numbers in brackets which prefix the word they are linking like [123]
27
26
  follow <link number>: Opens the given link number. Link numbers work across all previous outputs
28
- links <url> <page>: Lists only the links for the given url. Use the page number to get more links`;
27
+ links <url> <page>: Lists only the links for the given url. Use the page number to get more links
28
+
29
+ *llmynx does not support input. Use llmynx or curl to call APIs directly*`;
29
30
  case "search": {
30
31
  const query = argParams.slice(1).join(" ");
31
- return await loadUrl("https://www.google.com/search?q=" + encodeURIComponent(query), config.agent.tokenMax / 2, // Prevent form being reduced as google results are usually short anyways and we want to maintainq the links
32
- true, true);
32
+ return await loadUrl("https://www.google.com/search?q=" + encodeURIComponent(query), true, true);
33
33
  }
34
34
  case "open": {
35
35
  const url = argParams[1];
36
- const isNumber = !isNaN(parseInt(argParams[2]));
37
- const tokenMax = isNumber ? parseInt(argParams[2]) : defualtTokenMax;
38
- return await loadUrl(url, tokenMax, false, true);
36
+ return await loadUrl(url, false, true);
39
37
  }
40
38
  case "follow": {
41
39
  const linkNum = parseInt(argParams[1]);
42
- const isNumber = !isNaN(parseInt(argParams[2]));
43
- const tokenMax = isNumber ? parseInt(argParams[2]) : defualtTokenMax;
44
40
  const linkUrl = _globalLinkMap.get(linkNum);
45
41
  if (!linkUrl) {
46
42
  return "Link number not found";
47
43
  }
48
- return await loadUrl(linkUrl, tokenMax, true, false);
44
+ return await loadUrl(linkUrl, true, false);
49
45
  }
50
46
  case "links": {
51
47
  const url = argParams[1];
52
48
  const isNumber = !isNaN(parseInt(argParams[2]));
53
49
  const pageNumber = isNumber ? parseInt(argParams[2]) : 1;
54
- return await loadUrl(url, 600, false, false, pageNumber);
50
+ return await loadUrl(url, false, false, pageNumber);
55
51
  }
56
52
  // Secret command to toggle debug mode
57
53
  case "debug":
58
54
  debugMode = !debugMode;
59
55
  return "Debug mode toggled " + (debugMode ? "on" : "off");
60
56
  default:
61
- return "Unknown llmynx command: " + argParams[0];
57
+ return ("Error, unknown command. See valid commands below:\n" +
58
+ (await handleCommand("help")));
62
59
  }
63
60
  }
64
- async function loadUrl(url, tokenMax, showUrl, showFollowHint, linkPageAsContent) {
61
+ async function loadUrl(url, showUrl, showFollowHint, linkPageAsContent) {
65
62
  let content = await runLynx(url);
66
63
  let links = "";
67
64
  // Reverse find 'References: ' and cut everything after it from the content
@@ -79,13 +76,13 @@ async function loadUrl(url, tokenMax, showUrl, showFollowHint, linkPageAsContent
79
76
  outputInDebugMode(`Content Token size: ${contentTokenSize}\n` +
80
77
  `Links Token size: ${linksTokenSize}`);
81
78
  // Reduce content using LLM if it's over the token max
82
- if (contentTokenSize > tokenMax) {
79
+ if (contentTokenSize > config.webTokenMax) {
83
80
  const model = getLLModel(config.agent.webModel);
84
81
  // For example if context is 16k, and max tokens is 2k, 3k with 1.5x overrun
85
82
  // That would be 3k for the current compressed content, 10k for the chunk, and 3k for the output
86
- let tokenChunkSize = model.maxTokens - tokenMax * 2 * 1.5;
83
+ let tokenChunkSize = model.maxTokens - config.webTokenMax * 2 * 1.5;
87
84
  if (linkPageAsContent) {
88
- tokenChunkSize = tokenMax;
85
+ tokenChunkSize = config.webTokenMax;
89
86
  }
90
87
  outputInDebugMode(`Token max chunk size: ${tokenChunkSize}`);
91
88
  const pieceCount = Math.ceil(contentTokenSize / tokenChunkSize);
@@ -100,10 +97,10 @@ async function loadUrl(url, tokenMax, showUrl, showFollowHint, linkPageAsContent
100
97
  }
101
98
  continue;
102
99
  }
103
- output.comment(`Processing Piece ${i + 1} of ${pieceCount}...`);
100
+ output.comment(`Processing Piece ${i + 1} of ${pieceCount} with ${model.key}...`);
104
101
  outputInDebugMode(` Reduced output tokens: ${utilities.getTokenCount(reducedOutput)}\n` +
105
102
  ` Current Piece tokens: ${utilities.getTokenCount(pieceStr)}`);
106
- reducedOutput = await llmReduce(url, reducedOutput, i + 1, pieceCount, pieceStr, tokenMax);
103
+ reducedOutput = await llmReduce(url, reducedOutput, i + 1, pieceCount, pieceStr);
107
104
  }
108
105
  if (linkPageAsContent) {
109
106
  return "";
@@ -113,7 +110,7 @@ async function loadUrl(url, tokenMax, showUrl, showFollowHint, linkPageAsContent
113
110
  output.comment(`Content reduced from ${contentTokenSize} to ${finalTokenSize} tokens`);
114
111
  }
115
112
  else {
116
- output.comment(`Content is already under ${tokenMax} tokens.`);
113
+ output.comment(`Content is already under ${config.webTokenMax} tokens.`);
117
114
  }
118
115
  // Prefix content with url if following as otherwise the url is never shown
119
116
  if (showUrl) {
@@ -132,22 +129,25 @@ async function runLynx(url) {
132
129
  const modeParams = "";
133
130
  const ifWindows = os.platform() === "win32" ? "wsl " : "";
134
131
  exec(`${ifWindows}lynx -dump ${modeParams} "${url}"`, (error, stdout, stderr) => {
135
- if (error) {
136
- resolve(`error: ${error.message}`);
137
- return;
132
+ let output = "";
133
+ if (stdout) {
134
+ output += stdout;
135
+ }
136
+ // I've only seen either/or, but just in case
137
+ if (stdout && stderr) {
138
+ output += "\nError:\n";
138
139
  }
139
140
  if (stderr) {
140
- resolve(`stderr: ${stderr}`);
141
- return;
141
+ output += stderr;
142
142
  }
143
- resolve(stdout);
143
+ resolve(output);
144
144
  });
145
145
  });
146
146
  }
147
- async function llmReduce(url, reducedOutput, pieceNumber, pieceTotal, pieceStr, tokenMax) {
147
+ async function llmReduce(url, reducedOutput, pieceNumber, pieceTotal, pieceStr) {
148
148
  const systemMessage = `You will be iteratively fed the web page ${url} broken into ${pieceTotal} sequential equally sized pieces.
149
149
  Each piece should be reduced into the final content in order to maintain the meaning of the page while reducing verbosity and duplication.
150
- The final output should be around ${tokenMax} tokens.
150
+ The final output should be around ${config.webTokenMax} tokens.
151
151
  Don't remove links which are represented as numbers in brackets which prefix the word they are linking like [123].
152
152
  Try to prioritize content of substance over advertising content.`;
153
153
  const content = `Web page piece ${pieceNumber} of ${pieceTotal}:
@@ -156,7 +156,7 @@ ${pieceStr}
156
156
  Current reduced content:
157
157
  ${reducedOutput}
158
158
 
159
- Please merge the new piece into the existing reduced content above while keeping the result to around ${tokenMax} tokens.
159
+ Please merge the new piece into the existing reduced content above while keeping the result to around ${config.webTokenMax} tokens.
160
160
 
161
161
  Merged reduced content:
162
162
  `;
@@ -61,6 +61,7 @@ export async function processCommand(prompt, consoleInput) {
61
61
  switch (cmdParams[0]) {
62
62
  case "comment": {
63
63
  // Important - Hint the LLM to turn their thoughts into accounts
64
+ // ./bin/comment shell script has the same message
64
65
  await contextManager.append("Comment noted. Try running commands now to achieve your goal.");
65
66
  break;
66
67
  }
@@ -35,9 +35,8 @@ export async function run() {
35
35
  await output.commentAndLog("Starting Context:");
36
36
  await contextManager.append("Previous Session Note:");
37
37
  await contextManager.append(commandHandler.previousSessionNotes || "None");
38
- if (await llmail.hasMultipleUsers()) {
39
- await commandHandler.processCommand(await promptBuilder.getPrompt(), "llmail help");
40
- await commandHandler.processCommand(await promptBuilder.getPrompt(), "llmail users");
38
+ for (const initialCommand of config.agent.initialCommands) {
39
+ await commandHandler.processCommand(await promptBuilder.getPrompt(0, false), config.resolveConfigVars(initialCommand));
41
40
  }
42
41
  inputMode.toggle(InputMode.Debug);
43
42
  let pauseSeconds = config.agent.debugPauseSeconds;
@@ -38,7 +38,10 @@ export async function handleCommand(input) {
38
38
  if (tokenCount > config.shellOutputTokenMax) {
39
39
  outputLimitExceeded = true;
40
40
  const trimLength = (text.length * config.shellOutputTokenMax) / tokenCount;
41
- text = text.slice(0, trimLength);
41
+ text =
42
+ text.slice(0, trimLength / 2) +
43
+ "\n\n...\n\n" +
44
+ text.slice(-trimLength / 2);
42
45
  }
43
46
  await contextManager.append(text);
44
47
  if (outputLimitExceeded) {
@@ -170,14 +170,16 @@ function resetProcess() {
170
170
  * May also help with common escaping errors */
171
171
  function runCommandFromScript(command) {
172
172
  const scriptPath = `${config.naisysFolder}/home/${config.agent.username}/.command.tmp.sh`;
173
- // set -e causes the script to exit on any error
173
+ // set -e causes the script to exit on the first error
174
174
  const scriptContent = `#!/bin/bash
175
175
  set -e
176
176
  cd ${_currentPath}
177
177
  ${command.trim()}`;
178
178
  // create/writewrite file
179
179
  fs.writeFileSync(naisysToHostPath(scriptPath), scriptContent);
180
- // Source will run the script in the current shell, so any change directories in the script should persist in the current shell
181
- return `source ${scriptPath}`;
180
+ // `Path` is set to the ./bin folder because custom NAISYS commands that follow shell commands will be handled by the shell, which will fail
181
+ // so we need to remind the LLM that 'naisys commands cannot be used with other commands on the same prompt'
182
+ // `source` will run the script in the current shell, so any change directories in the script will persist in the current shell
183
+ return `PATH=${config.binPath}:$PATH source ${scriptPath}`;
182
184
  }
183
185
  //# sourceMappingURL=shellWrapper.js.map
package/dist/config.js CHANGED
@@ -8,8 +8,10 @@ program.argument("<agent-path>", "Path to agent configuration file").parse();
8
8
  dotenv.config();
9
9
  /** The system name that shows after the @ in the command prompt */
10
10
  export const hostname = "naisys";
11
- export const shellOutputTokenMax = 2500;
12
- export const shellCommmandTimeoutSeconds = 10;
11
+ export const shellOutputTokenMax = 2500; // Limits the size of files that can be read/wrote
12
+ export const shellCommmandTimeoutSeconds = 15; // The number of seconds NAISYS will wait for a shell command to complete
13
+ export const webTokenMax = 2500;
14
+ export const mailMessageTokenMax = 400;
13
15
  /* .env is used for global configs across naisys, while agent configs are for the specific agent */
14
16
  export const naisysFolder = getEnv("NAISYS_FOLDER", true);
15
17
  export const websiteFolder = getEnv("WEBSITE_FOLDER");
@@ -21,7 +23,7 @@ export const anthropicApiKey = getEnv("ANTHROPIC_API_KEY");
21
23
  export const agent = loadAgentConfig();
22
24
  function loadAgentConfig() {
23
25
  const agentPath = program.args[0];
24
- const checkAgentConfig = yaml.load(fs.readFileSync(agentPath, "utf8"));
26
+ const config = yaml.load(fs.readFileSync(agentPath, "utf8"));
25
27
  // throw if any property is undefined
26
28
  for (const key of [
27
29
  "username",
@@ -33,19 +35,31 @@ function loadAgentConfig() {
33
35
  "tokenMax",
34
36
  // other properties can be undefined
35
37
  ]) {
36
- if (!valueFromString(checkAgentConfig, key)) {
38
+ if (!valueFromString(config, key)) {
37
39
  throw `Agent config: Error, ${key} is not defined`;
38
40
  }
39
41
  }
40
- if (!checkAgentConfig.commandProtection) {
41
- checkAgentConfig.commandProtection = CommandProtection.None;
42
+ // Sanitize input
43
+ if (!config.initialCommands) {
44
+ config.initialCommands = [];
42
45
  }
43
- if (!Object.values(CommandProtection).includes(checkAgentConfig.commandProtection)) {
46
+ else if (!Array.isArray(config.initialCommands)) {
47
+ throw `Agent config: Error, 'initialCommands' is not an array`;
48
+ }
49
+ config.debugPauseSeconds = config.debugPauseSeconds
50
+ ? Number(config.debugPauseSeconds)
51
+ : 0;
52
+ config.wakeOnMessage = Boolean(config.wakeOnMessage);
53
+ if (!config.commandProtection) {
54
+ config.commandProtection = CommandProtection.None;
55
+ }
56
+ if (!Object.values(CommandProtection).includes(config.commandProtection)) {
44
57
  throw `Agent config: Error, 'commandProtection' is not a valid value`;
45
58
  }
46
- return checkAgentConfig;
59
+ return config;
47
60
  }
48
61
  export const packageVersion = await getVersion();
62
+ export const binPath = getBinPath();
49
63
  /** Can only get version from env variable when naisys is started with npm,
50
64
  * otherwise need to rip it from the package ourselves relative to where this file is located */
51
65
  async function getVersion() {
@@ -67,4 +81,28 @@ function getEnv(key, required) {
67
81
  }
68
82
  return value;
69
83
  }
84
+ export function resolveConfigVars(templateString) {
85
+ let resolvedString = templateString;
86
+ resolvedString = resolveTemplateVars(resolvedString, "agent", agent);
87
+ resolvedString = resolveTemplateVars(resolvedString, "env", process.env);
88
+ return resolvedString;
89
+ }
90
+ function resolveTemplateVars(templateString, allowedVarString, mappedVar) {
91
+ const pattern = new RegExp(`\\$\\{${allowedVarString}\\.([^}]+)\\}`, "g");
92
+ return templateString.replace(pattern, (match, key) => {
93
+ const value = valueFromString(mappedVar, key);
94
+ if (value === undefined) {
95
+ throw `Agent config: Error, ${key} is not defined`;
96
+ }
97
+ return value;
98
+ });
99
+ }
100
+ function getBinPath() {
101
+ // C:/git/naisys/dist/config.js
102
+ let binPath = new URL("../bin", import.meta.url).pathname;
103
+ if (binPath.startsWith("/C:")) {
104
+ binPath = "/mnt/c" + binPath.substring(3);
105
+ }
106
+ return binPath;
107
+ }
70
108
  //# sourceMappingURL=config.js.map
@@ -5,7 +5,6 @@ import * as logService from "../utils/logService.js";
5
5
  import * as output from "../utils/output.js";
6
6
  import { OutputColor } from "../utils/output.js";
7
7
  import * as utilities from "../utils/utilities.js";
8
- import { valueFromString } from "../utils/utilities.js";
9
8
  import { LlmRole } from "./llmDtos.js";
10
9
  export var ContentSource;
11
10
  (function (ContentSource) {
@@ -23,8 +22,7 @@ export function getSystemMessage() {
23
22
  // A lot of the stipulations in here are to prevent common LLM mistakes
24
23
  // Like we can't jump between standard and special commands in a single prompt, which the LLM will try to do if not warned
25
24
  let agentPrompt = config.agent.agentPrompt;
26
- agentPrompt = resolveTemplateVars(agentPrompt, "agent", config.agent);
27
- agentPrompt = resolveTemplateVars(agentPrompt, "env", process.env);
25
+ agentPrompt = config.resolveConfigVars(agentPrompt);
28
26
  const systemMessage = `${agentPrompt.trim()}
29
27
 
30
28
  This is a command line interface presenting you with the next command prompt.
@@ -38,12 +36,12 @@ NAISYS ${config.packageVersion} Shell
38
36
  Welcome back ${config.agent.username}!
39
37
  MOTD:
40
38
  Date: ${new Date().toLocaleString()}
41
- Commands:
42
- Standard Unix commands are available
39
+ LINUX Commands:
40
+ Standard Linux commands are available
43
41
  vi and nano are not supported
44
42
  Read files with cat. Write files with \`cat > filename << 'EOF'\`
45
43
  Do not input notes after the prompt. Only valid commands.
46
- Special Commands: (Don't mix with standard commands on the same prompt)
44
+ NAISYS Commands: (cannot be used with other commands on the same prompt)
47
45
  llmail: A local mail system for communicating with your team
48
46
  llmynx: A context optimized web browser. Enter 'llmynx help' to learn how to use it
49
47
  comment "<thought>": Any non-command output like thinking out loud, prefix with the 'comment' command
@@ -51,23 +49,12 @@ Special Commands: (Don't mix with standard commands on the same prompt)
51
49
  endsession "<note>": Ends this session, clears the console log and context.
52
50
  The note should help you find your bearings in the next session.
53
51
  The note should contain your next goal, and important things should you remember.
54
- Try to keep the note around 400 tokens.
55
52
  Tokens:
56
53
  The console log can only hold a certain number of 'tokens' that is specified in the prompt
57
54
  Make sure to call endsession before the limit is hit so you can continue your work with a fresh console`;
58
55
  _cachedSystemMessage = systemMessage;
59
56
  return systemMessage;
60
57
  }
61
- function resolveTemplateVars(templateString, allowedVarString, mappedVar) {
62
- const pattern = new RegExp(`\\$\\{${allowedVarString}\\.([^}]+)\\}`, "g");
63
- return templateString.replace(pattern, (match, key) => {
64
- const value = valueFromString(mappedVar, key);
65
- if (value === undefined) {
66
- throw `Agent config: Error, ${key} is not defined`;
67
- }
68
- return value;
69
- });
70
- }
71
58
  export let messages = [];
72
59
  export async function append(text, source = ContentSource.Console) {
73
60
  // Debug runs in a shadow mode where their activity is not recorded in the context
@@ -80,24 +80,37 @@ async function sendWithGoogle(modelKey, systemMessage, context, source) {
80
80
  if (lastMessage.role !== LlmRole.User) {
81
81
  throw "Error, last message on context is not a user message";
82
82
  }
83
+ const contextHistory = context
84
+ .filter((m) => m != lastMessage)
85
+ .map((m) => ({
86
+ role: m.role == LlmRole.Assistant ? "model" : "user",
87
+ parts: [
88
+ {
89
+ text: m.content,
90
+ },
91
+ ],
92
+ }));
83
93
  const history = [
84
94
  {
85
95
  role: LlmRole.User, // System role is not supported by Google API
86
- parts: systemMessage,
96
+ parts: [
97
+ {
98
+ text: systemMessage,
99
+ },
100
+ ],
87
101
  },
88
102
  {
89
103
  role: "model",
90
- parts: "Understood",
104
+ parts: [
105
+ {
106
+ text: "Understood",
107
+ },
108
+ ],
91
109
  },
92
- ...context
93
- .filter((m) => m != lastMessage)
94
- .map((m) => ({
95
- role: m.role == LlmRole.Assistant ? "model" : LlmRole.User,
96
- parts: m.content,
97
- })),
110
+ ...contextHistory,
98
111
  ];
99
112
  const chat = googleModel.startChat({
100
- history: history,
113
+ history,
101
114
  generationConfig: {
102
115
  maxOutputTokens: 2000,
103
116
  },
package/package.json CHANGED
@@ -1,20 +1,19 @@
1
1
  {
2
2
  "name": "naisys",
3
3
  "description": "Node.js Autonomous Intelligence System",
4
- "version": "1.1.0",
4
+ "version": "1.2.0",
5
5
  "type": "module",
6
6
  "main": "dist/naisys.js",
7
7
  "preferGlobal": true,
8
8
  "bin": {
9
- "naisys": "naisys.sh"
9
+ "naisys": "./bin/naisys"
10
10
  },
11
11
  "scripts": {
12
12
  "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/webdev-fansite.yaml",
13
- "run agent:dev": "node dist/naisys.js ./agents/2-team/dev.yaml",
14
- "run agent:admin": "node dist/naisys.js ./agents/2-team/admin.yaml",
13
+ "run agent:p1": "node dist/naisys.js ./agents/webdev-battle/player1.yaml",
14
+ "run agent:p2": "node dist/naisys.js ./agents/webdev-battle/player2.yaml",
15
15
  "clean": "rm -rf dist",
16
- "clean:win": "wsl rm -rf dist",
17
- "compile": "tsc",
16
+ "compile": "tsc --build --verbose",
18
17
  "eslint": "npx eslint --rulesdir eslint-rules src",
19
18
  "test": "tsc && node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=dist/__tests__",
20
19
  "prettier": "npx prettier --write .",
@@ -22,7 +21,8 @@
22
21
  "detect-cycles": "madge --circular dist",
23
22
  "updates:check": "npm-check-updates",
24
23
  "updates:apply": "npm-check-updates -u && npm update",
25
- "npm:publish:dryrun": "npm run clean && npm run compile && npm publish --dry-run"
24
+ "npm:publish:dryrun": "npm run clean && npm run compile && npm publish --dry-run",
25
+ "postinstall": "chmod +x ./bin/*"
26
26
  },
27
27
  "repository": {
28
28
  "type": "git",
@@ -42,19 +42,19 @@
42
42
  "devDependencies": {
43
43
  "@types/escape-html": "1.0.4",
44
44
  "@types/js-yaml": "4.0.9",
45
- "@types/node": "20.11.25",
45
+ "@types/node": "20.11.26",
46
46
  "@types/text-table": "0.2.5",
47
- "@typescript-eslint/eslint-plugin": "7.1.1",
48
- "@typescript-eslint/parser": "7.1.1",
47
+ "@typescript-eslint/eslint-plugin": "7.2.0",
48
+ "@typescript-eslint/parser": "7.2.0",
49
49
  "eslint": "8.57.0",
50
50
  "jest": "29.7.0",
51
51
  "prettier": "3.2.5",
52
52
  "ts-node": "10.9.2",
53
- "typescript": "5.3.3"
53
+ "typescript": "5.4.2"
54
54
  },
55
55
  "dependencies": {
56
- "@anthropic-ai/sdk": "0.16.1",
57
- "@google/generative-ai": "0.2.1",
56
+ "@anthropic-ai/sdk": "0.17.2",
57
+ "@google/generative-ai": "0.3.0",
58
58
  "chalk": "5.3.0",
59
59
  "commander": "12.0.0",
60
60
  "dotenv": "16.4.5",