naisys 1.6.0 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE.md CHANGED
File without changes
package/README.md CHANGED
@@ -1,17 +1,17 @@
1
1
  ## NAISYS (Node.js Autonomous Intelligence System)
2
2
 
3
- NAISYS allows any LLM you want to operate a standard linux shell given your instructions. You can control how much
3
+ NAISYS allows any LLM you want to operate a standard Linux shell given your instructions. You can control how much
4
4
  to spend, the maximum number of tokens to use per session, how long to wait between commands, etc.. Between each command
5
- NAISYS will wait a few seconds to accept any input you want to put in yourself in case you want to colllaborate with the
6
- LLM, give it hints, and/or diagnose the session. Once the LLM reaches the token max you specified for the sesssion it
7
- will wrap things up, and start a fresh shell for the LLM to continue on its work.
5
+ NAISYS will wait a few seconds to accept any input you want to put in yourself in case you want to colllaborate with the
6
+ LLM, give it hints, and/or diagnose the session. Once the LLM reaches the token max you specified for the sesssion it
7
+ will wrap things up, and start a fresh shell for the LLM to continue on its work.
8
8
 
9
- NAISYS tries to be a minimal wrapper, just helping the LLM operate in the shell 'better'. Making commands 'context friendly'. For instace if a command is long running, NAISYS will interrupt it, show the LLM the current output, and ask the LLM what it wants to
10
- do next - wait, kill, or send input. The custom command prompt helps the LLM keep track of its token usage during the session. The 'comment' command helps the LLM think outloud without putting invalid commands into the shell.
9
+ NAISYS tries to be a minimal wrapper, just helping the LLM operate in the shell 'better'. Making commands 'context friendly'. For instace if a command is long running, NAISYS will interrupt it, show the LLM the current output, and ask the LLM what it wants to
10
+ do next - wait, kill, or send input. The custom command prompt helps the LLM keep track of its token usage during the session. The 'comment' command helps the LLM think out loud without putting invalid commands into the shell.
11
11
 
12
- Some use cases are building websites, diagnosing a system for security concerns, mapping out the topology of the local
13
- network, learning and performing arbitrary tasks, or just plain exploring the limits of autonomy. NAISYS has a built-in
14
- system for inter-agent communiation. You can manually startup mulitple instances of NAISYS with different roles, or
12
+ Some use cases are building websites, diagnosing a system for security concerns, mapping out the topology of the local
13
+ network, learning and performing arbitrary tasks, or just plain exploring the limits of autonomy. NAISYS has a built-in
14
+ system for inter-agent communiation. You can manually startup mulitple instances of NAISYS with different roles, or
15
15
  you can allow agents to start their own sub-agents on demand with instructions defined by the LLM itself!
16
16
 
17
17
  [NPM](https://www.npmjs.com/package/naisys) | [Website](https://naisys.org) | [Discord](https://discord.gg/JBUPWSbaEt) | [Demo Video](https://www.youtube.com/watch?v=Ttya3ixjumo)
@@ -200,6 +200,8 @@ initialCommands:
200
200
  - Use the above instructions to install locally, and then continue with the instructions below
201
201
  - Install WSL (Windows Subsystem for Linux)
202
202
  - Install a Linux distribution, Ubuntu can easily be installed from the Microsoft Store
203
+ - Make sure to the checked out code perserves the original line endings
204
+ - Files in the /bin folder should have LF endings only, not CRLF
203
205
  - The `NAISYS_FOLDER` and `WEBSITE_FOLDER` should be set to the WSL path
204
206
  - So `C:\var\naisys` should be `/mnt/c/var/naisys` in the `.env` file
205
207
 
@@ -0,0 +1,2 @@
1
+ # Even on Windows, these bin commands are run by WSL so the line endings need to be LF, or bash will error out
2
+ * text eol=lf
@@ -0,0 +1,4 @@
1
+ #!/bin/bash
2
+
3
+ # ./src/command/commandHandler.ts has the same message
4
+ echo "Successful: Naisys fallback bash commands are being caught."
@@ -227,7 +227,8 @@ async function splitMultipleInputCommands(nextInput) {
227
227
  else if (newLinePos > 0 &&
228
228
  (nextInput.startsWith("comment ") ||
229
229
  nextInput.startsWith("genimg ") ||
230
- nextInput.startsWith("trimsession "))) {
230
+ nextInput.startsWith("trimsession ") ||
231
+ nextInput.startsWith("pause "))) {
231
232
  input = nextInput.slice(0, newLinePos);
232
233
  nextInput = nextInput.slice(newLinePos).trim();
233
234
  }
@@ -40,8 +40,7 @@ export async function run() {
40
40
  await output.commentAndLog("Starting Context:");
41
41
  const latestDream = await dreamMaker.goodmorning();
42
42
  if (latestDream) {
43
- await contextManager.append("Previous Session Notes:");
44
- await contextManager.append(latestDream);
43
+ await displayPreviousSessionNotes(latestDream, nextPromptIndex++);
45
44
  }
46
45
  for (const initialCommand of config.agent.initialCommands) {
47
46
  let prompt = await promptBuilder.getPrompt(0, false);
@@ -238,4 +237,13 @@ function setPromptIndex(prompt, index) {
238
237
  }
239
238
  return newPrompt;
240
239
  }
240
+ async function displayPreviousSessionNotes(prevSessionNotes, nextPromptIndex) {
241
+ let prompt = await promptBuilder.getPrompt(0, false);
242
+ prompt = setPromptIndex(prompt, ++nextPromptIndex);
243
+ await contextManager.append(prompt, ContentSource.ConsolePrompt, nextPromptIndex);
244
+ const prevSessionNotesCommand = "cat ~/prev_session_notes";
245
+ await contextManager.append(prevSessionNotesCommand, ContentSource.LlmPromptResponse);
246
+ output.write(prompt + chalk[OutputColor.llm](prevSessionNotesCommand));
247
+ await contextManager.append(prevSessionNotes);
248
+ }
241
249
  //# sourceMappingURL=commandLoop.js.map
File without changes
@@ -19,7 +19,6 @@ const _writeEventName = "write";
19
19
  const _originalWrite = process.stdout.write.bind(process.stdout);
20
20
  process.stdout.write = (...args) => {
21
21
  _writeEventEmitter.emit(_writeEventName, false, ...args);
22
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
23
22
  return _originalWrite.apply(process.stdout, args);
24
23
  };
25
24
  /**
File without changes
@@ -89,6 +89,9 @@ function processOutput(rawDataStr, eventType, pid) {
89
89
  let finalOutput = _currentBufferType == "alternate"
90
90
  ? _getTerminalActiveBuffer()
91
91
  : _commandOutput.trim();
92
+ if (finalOutput.endsWith("command not found")) {
93
+ finalOutput += `\nNAISYS: Make sure that you are using valid linux commands, and that any non-commands are prefixed with the 'commment' command.`;
94
+ }
92
95
  finalOutput += `\nNAISYS: Command killed.`;
93
96
  resetProcess();
94
97
  _completeCommand(finalOutput);
package/dist/config.js CHANGED
@@ -13,21 +13,25 @@ dotenv.config();
13
13
  export const hostname = "naisys";
14
14
  export const shellCommand = {
15
15
  /** Limits the size of files that can be read/wrote */
16
- outputTokenMax: 3000,
16
+ outputTokenMax: 5000,
17
17
  /** The time NAISYS will wait for new shell output before giving up */
18
18
  timeoutSeconds: 15,
19
19
  /** These commands have their own timeout so the LLM doesn't have to continually waste tokens on wait commands */
20
20
  longRunningCommands: ["nmap", "traceroute", "tracepath", "mtr"],
21
21
  longRunningTimeoutSeconds: 120,
22
22
  };
23
+ export const agent = loadAgentConfig();
23
24
  /** Web pages loaded with llmynx will be reduced down to around this number of tokens */
24
- export const webTokenMax = 2500;
25
+ export const webTokenMax = 3000;
26
+ /** Allows the LLM to end it's own session */
25
27
  export const endSessionEnabled = true;
26
- export const mailEnabled = true;
27
- export const webEnabled = true;
28
+ /** Inter agent communication */
29
+ export const mailEnabled = agent.mailEnabled || false;
30
+ /** The LLM optimized browser */
31
+ export const webEnabled = agent.webEnabled || false;
28
32
  /** Experimental, live updating spot in the context for the LLM to put files, to avoid having to continually cat */
29
33
  export const workspacesEnabled = false;
30
- /** Experimental, allow LLM to trim prompts from it's own session context */
34
+ /** Experimental, allow LLM to trim it's own session context to avoid having to restart the session */
31
35
  export const trimSessionEnabled = false;
32
36
  /* .env is used for global configs across naisys, while agent configs are for the specific agent */
33
37
  export const naisysFolder = getEnv("NAISYS_FOLDER", true);
@@ -37,7 +41,7 @@ export const localLlmName = getEnv("LOCAL_LLM_NAME");
37
41
  export const openaiApiKey = getEnv("OPENAI_API_KEY");
38
42
  export const googleApiKey = getEnv("GOOGLE_API_KEY");
39
43
  export const anthropicApiKey = getEnv("ANTHROPIC_API_KEY");
40
- export const agent = loadAgentConfig();
44
+ export const openRouterApiKey = getEnv("OPENROUTER_API_KEY");
41
45
  function loadAgentConfig() {
42
46
  const config = yaml.load(fs.readFileSync(program.args[0], "utf8"));
43
47
  config.hostpath = path.resolve(program.args[0]);
@@ -108,9 +112,7 @@ export function resolveConfigVars(templateString) {
108
112
  resolvedString = resolveTemplateVars(resolvedString, "env", process.env);
109
113
  return resolvedString;
110
114
  }
111
- function resolveTemplateVars(templateString, allowedVarString,
112
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
113
- mappedVar) {
115
+ function resolveTemplateVars(templateString, allowedVarString, mappedVar) {
114
116
  const pattern = new RegExp(`\\$\\{${allowedVarString}\\.([^}]+)\\}`, "g");
115
117
  return templateString.replace(pattern, (match, key) => {
116
118
  const value = valueFromString(mappedVar, key);
@@ -24,6 +24,10 @@ export async function handleCommand(args) {
24
24
  if (!filepath) {
25
25
  throw "Error: Filepath is required";
26
26
  }
27
+ // Validate path is fully qualified
28
+ if (!filepath.getNaisysPath().startsWith("/")) {
29
+ throw "Error: Filepath must be fully qualified";
30
+ }
27
31
  pathService.ensureFileDirExists(filepath);
28
32
  output.comment(`Generating image with ${config.agent.imageModel}...`);
29
33
  const openai = new OpenAI();
@@ -46,15 +50,14 @@ export async function handleCommand(args) {
46
50
  const hostPath = filepath.toHostPath();
47
51
  const fileExtension = path.extname(hostPath).substring(1);
48
52
  await sharp(imageBuffer)
49
- .resize(256, 256, {
50
- fit: "inside",
51
- })
52
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
53
+ /*.resize(512, 512, {
54
+ fit: "inside",
55
+ })*/
53
56
  .toFormat(fileExtension)
54
57
  .toFile(hostPath);
55
58
  // Record the cost
56
59
  await costTracker.recordCost(model.cost, "genimg", model.name);
57
- return "Image generated and saved to " + filepath.getNaisysPath();
60
+ return "1024x1024 Image generated and saved to " + filepath.getNaisysPath();
58
61
  }
59
62
  const imageModels = [
60
63
  {
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -4,57 +4,82 @@ export var LlmApiType;
4
4
  LlmApiType["OpenAI"] = "openai";
5
5
  LlmApiType["Google"] = "google";
6
6
  LlmApiType["Anthropic"] = "anthropic";
7
+ LlmApiType["OpenRouter"] = "openrouter";
7
8
  })(LlmApiType || (LlmApiType = {}));
8
9
  const llmModels = [
9
10
  {
10
- key: "gpt4turbo",
11
- name: "gpt-4-turbo",
11
+ key: "local",
12
+ name: config.localLlmName || "local",
13
+ baseUrl: config.localLlmUrl,
12
14
  apiType: LlmApiType.OpenAI,
15
+ maxTokens: 8000,
16
+ // Prices are per 1M tokens
17
+ inputCost: 0,
18
+ outputCost: 0,
19
+ },
20
+ // Open Router
21
+ {
22
+ key: "llama3-405b",
23
+ name: "meta-llama/llama-3.1-405b-instruct",
24
+ baseUrl: "https://openrouter.ai/api/v1",
25
+ apiType: LlmApiType.OpenRouter,
13
26
  maxTokens: 128000,
14
27
  // Prices are per 1M tokens
15
- inputCost: 10,
16
- outputCost: 30,
28
+ inputCost: 2.7,
29
+ outputCost: 2.7,
17
30
  },
31
+ // OpenAI Models
32
+ // https://openai.com/api/pricing/
18
33
  {
19
- key: "gpt3turbo",
20
- name: "gpt-3.5-turbo",
34
+ key: "gpto3mini",
35
+ name: "o3-mini",
21
36
  apiType: LlmApiType.OpenAI,
22
- maxTokens: 16000,
37
+ maxTokens: 200000,
23
38
  // Prices are per 1M tokens
24
- inputCost: 0.5,
25
- outputCost: 1.5,
39
+ inputCost: 1.1,
40
+ outputCost: 4.4,
26
41
  },
27
42
  {
28
- key: "local",
29
- name: config.localLlmName || "local",
30
- baseUrl: config.localLlmUrl,
43
+ key: "gpt4mini",
44
+ name: "gpt-4o-mini",
31
45
  apiType: LlmApiType.OpenAI,
32
- maxTokens: 8000,
46
+ maxTokens: 128000,
33
47
  // Prices are per 1M tokens
34
- inputCost: 0,
35
- outputCost: 0,
48
+ inputCost: 0.15,
49
+ outputCost: 0.6,
36
50
  },
51
+ {
52
+ key: "gpt4o",
53
+ name: "gpt-4o",
54
+ apiType: LlmApiType.OpenAI,
55
+ maxTokens: 128000,
56
+ // Prices are per 1M tokens
57
+ inputCost: 2.5,
58
+ outputCost: 10,
59
+ },
60
+ // Google Models
37
61
  {
38
62
  key: "gemini1.5",
39
63
  name: "gemini-1.5-pro-latest",
40
64
  apiType: LlmApiType.Google,
41
- maxTokens: 1048576,
65
+ maxTokens: 1000000,
42
66
  // 2 queries per minute free then the prices below are per 1000 characters
43
- inputCost: 7,
44
- outputCost: 21,
67
+ inputCost: 1.25,
68
+ outputCost: 5,
45
69
  },
46
70
  {
47
- key: "gemini1.0",
48
- name: "gemini-pro",
71
+ key: "gemini2.0flash",
72
+ name: "gemini-2.0-flash",
49
73
  apiType: LlmApiType.Google,
50
- maxTokens: 30720,
74
+ maxTokens: 1000000,
51
75
  // 60 queries per minute free then the prices below are per 1000 characters
52
- inputCost: 0.5,
53
- outputCost: 1.5,
76
+ inputCost: 0.1,
77
+ outputCost: 0.4,
54
78
  },
79
+ // Anthropic Models
55
80
  {
56
81
  key: "claude3opus",
57
- name: "claude-3-opus-20240229",
82
+ name: "claude-3-opus-latest",
58
83
  apiType: LlmApiType.Anthropic,
59
84
  maxTokens: 200000,
60
85
  // Prices are per 1M tokens
@@ -62,8 +87,8 @@ const llmModels = [
62
87
  outputCost: 75,
63
88
  },
64
89
  {
65
- key: "claude3sonnet",
66
- name: "claude-3-sonnet-20240229",
90
+ key: "claude3.7sonnet",
91
+ name: "claude-3-7-sonnet-latest",
67
92
  apiType: LlmApiType.Anthropic,
68
93
  maxTokens: 200000,
69
94
  // Prices are per 1M tokens
@@ -71,20 +96,24 @@ const llmModels = [
71
96
  outputCost: 15,
72
97
  },
73
98
  {
74
- key: "claude3haiku",
75
- name: "claude-3-haiku-20240307",
99
+ key: "claude3.5haiku",
100
+ name: "claude-3-5-haiku-latest",
76
101
  apiType: LlmApiType.Anthropic,
77
102
  maxTokens: 200000,
78
103
  // Prices are per 1M tokens
79
- inputCost: 0.25,
80
- outputCost: 1.25,
104
+ inputCost: 0.8,
105
+ outputCost: 4,
81
106
  },
82
107
  ];
83
- export function getLLModel(key) {
84
- const model = llmModels.find((m) => m.key === key);
108
+ export function getLLModel(keyName) {
109
+ const [key, name] = keyName.split("/");
110
+ const model = structuredClone(llmModels.find((m) => m.key === key));
85
111
  if (!model) {
86
112
  throw `Error, model not found: ${key}`;
87
113
  }
114
+ if (name) {
115
+ model.name = name;
116
+ }
88
117
  return model;
89
118
  }
90
119
  //# sourceMappingURL=llModels.js.map
File without changes
@@ -18,14 +18,18 @@ export async function query(modelKey, systemMessage, context, source) {
18
18
  else if (model.apiType == LlmApiType.Anthropic) {
19
19
  return sendWithAnthropic(modelKey, systemMessage, context, source);
20
20
  }
21
- else if (model.apiType == LlmApiType.OpenAI) {
22
- return sendWithOpenAiCompatible(modelKey, systemMessage, context, source);
21
+ else if (model.apiType == LlmApiType.OpenAI ||
22
+ model.apiType == LlmApiType.OpenRouter) {
23
+ const apiKey = model.apiType == LlmApiType.OpenAI
24
+ ? config.openaiApiKey
25
+ : config.openRouterApiKey;
26
+ return sendWithOpenAiCompatible(modelKey, systemMessage, context, source, apiKey);
23
27
  }
24
28
  else {
25
29
  throw `Error, unknown LLM API type ${model.apiType}`;
26
30
  }
27
31
  }
28
- async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source) {
32
+ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source, apiKey) {
29
33
  const model = getLLModel(modelKey);
30
34
  if (model.key === "local") {
31
35
  if (!model.baseUrl) {
@@ -37,7 +41,7 @@ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source
37
41
  }
38
42
  const openAI = new OpenAI({
39
43
  baseURL: model.baseUrl,
40
- apiKey: config.openaiApiKey,
44
+ apiKey,
41
45
  });
42
46
  // Assert the last message on the context is a user message
43
47
  const lastMessage = context[context.length - 1];
@@ -48,7 +52,7 @@ async function sendWithOpenAiCompatible(modelKey, systemMessage, context, source
48
52
  model: model.name,
49
53
  messages: [
50
54
  {
51
- role: LlmRole.System,
55
+ role: LlmRole.System, // LlmRole.User, //
52
56
  content: systemMessage,
53
57
  },
54
58
  ...context.map((m) => ({
File without changes
package/dist/naisys.js CHANGED
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -1,7 +1,5 @@
1
1
  import { get_encoding } from "tiktoken";
2
- export function valueFromString(
3
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
4
- obj, path, defaultValue) {
2
+ export function valueFromString(obj, path, defaultValue) {
5
3
  if (!path) {
6
4
  return obj;
7
5
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "naisys",
3
3
  "description": "Node.js Autonomous Intelligence System",
4
- "version": "1.6.0",
4
+ "version": "1.6.1",
5
5
  "type": "module",
6
6
  "main": "dist/naisys.js",
7
7
  "preferGlobal": true,
@@ -9,16 +9,17 @@
9
9
  "naisys": "bin/naisys"
10
10
  },
11
11
  "scripts": {
12
- "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/netmap.yaml",
12
+ "compile/run/attachable": "tsc && node --inspect dist/naisys.js ./agents/solo-websites/webdev-fansite.yaml",
13
13
  "agent:assistant": "node dist/naisys.js ./agents/assistant.yaml",
14
14
  "agent:nightwatch": "node dist/naisys.js ./agents/nightwatch.yaml",
15
15
  "clean": "rm -rf dist",
16
16
  "compile": "tsc",
17
- "eslint": "npx eslint --rulesdir eslint-rules src",
17
+ "eslint": "npx eslint src",
18
18
  "test": "tsc && node --experimental-vm-modules node_modules/jest/bin/jest.js --testPathPattern=dist/__tests__",
19
19
  "prettier": "npx prettier --write .",
20
20
  "dependency-graph": "madge --image dependency-graph.png dist",
21
21
  "detect-cycles": "madge --circular dist",
22
+ "gen-tuning": "tsc && node dist/__fine-tuning__/gen-jsonl.js",
22
23
  "updates:check": "npm-check-updates",
23
24
  "updates:apply": "npm-check-updates -u && npm install",
24
25
  "npm:publish:dryrun": "npm run clean && npm ci && npm run compile && npm publish --dry-run",
@@ -42,32 +43,33 @@
42
43
  "devDependencies": {
43
44
  "@types/escape-html": "1.0.4",
44
45
  "@types/js-yaml": "4.0.9",
45
- "@types/node": "20.12.7",
46
+ "@types/node": "22.13.5",
46
47
  "@types/text-table": "0.2.5",
47
- "@typescript-eslint/eslint-plugin": "7.7.0",
48
- "@typescript-eslint/parser": "7.7.0",
49
- "eslint": "8.56.0",
48
+ "@typescript-eslint/eslint-plugin": "8.25.0",
49
+ "@typescript-eslint/parser": "8.25.0",
50
+ "eslint": "9.21.0",
50
51
  "jest": "29.7.0",
51
- "prettier": "3.2.5",
52
+ "prettier": "3.5.2",
52
53
  "ts-node": "10.9.2",
53
- "typescript": "5.4.5"
54
+ "typescript": "5.7.3"
54
55
  },
55
56
  "dependencies": {
56
- "@anthropic-ai/sdk": "0.20.5",
57
- "@google/generative-ai": "0.7.1",
57
+ "@anthropic-ai/sdk": "0.38.0",
58
+ "@google/generative-ai": "0.22.0",
58
59
  "@xterm/headless": "5.5.0",
59
- "chalk": "5.3.0",
60
- "commander": "12.0.0",
61
- "dotenv": "16.4.5",
60
+ "chalk": "5.4.1",
61
+ "commander": "13.1.0",
62
+ "dotenv": "16.4.7",
62
63
  "escape-html": "1.0.3",
63
64
  "js-yaml": "4.1.0",
64
- "openai": "4.36.0",
65
- "sharp": "0.33.3",
65
+ "openai": "4.86.1",
66
+ "sharp": "0.33.5",
66
67
  "sqlite": "5.1.1",
67
68
  "sqlite3": "5.1.7",
68
69
  "strip-ansi": "7.1.0",
69
70
  "text-table": "0.2.0",
70
- "tiktoken": "1.0.14",
71
- "tree-kill": "1.2.2"
71
+ "tiktoken": "1.0.20",
72
+ "tree-kill": "1.2.2",
73
+ "web-streams-polyfill": "4.1.0"
72
74
  }
73
75
  }