omnibot3000 1.8.2 → 1.8.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ # basic commit message validation
2
+
3
+ if ! head -1 "$1" | grep -qE '^\[(feat|fix|doc|style|perf|test|chore|misc)(\(.+\))?!?\]'; then
4
+ echo "❌ Commit message must follow format: [type] message"
5
+ echo ""
6
+ echo "Examples:"
7
+ echo " [feat] add new feature"
8
+ echo " [fix] resolve bug in component"
9
+ echo " [doc] update README"
10
+ echo " [chore] update dependencies"
11
+ exit 1
12
+ fi
@@ -0,0 +1 @@
1
+ pnpm lint && pnpm prettify
package/README.md CHANGED
@@ -6,4 +6,33 @@ YOUR OMNISCIENT SOURCE OF TRUTH
6
6
 
7
7
  ### Install
8
8
 
9
- ```pnpm i omnibot3000```
9
+ `pnpm i omnibot3000`
10
+
11
+ ### Configuration
12
+
13
+ Create a `.env` file in the root of your project with the following content:
14
+
15
+ ```env
16
+ DOMAIN=localhost
17
+ DEV_PORT=3000
18
+ API_PORT=3001
19
+ API_PATH=/api
20
+
21
+ MISTRAL_API_KEY=<your_mistral_api_key>
22
+
23
+ OPENAI_ORG_ID=<your_openai_org_id>
24
+ OPENAI_PROJECT_ID=<your_openai_project_id>
25
+ OPENAI_API_KEY=<your_openai_api_key>
26
+ ```
27
+
28
+ ### Package
29
+
30
+ https://www.npmjs.com/package/omnibot3000
31
+
32
+ ### Contributing
33
+
34
+ NO
35
+
36
+ ### License
37
+
38
+ NO
package/api/server.ts CHANGED
@@ -11,7 +11,13 @@ import {createServer, IncomingMessage, ServerResponse} from "http";
11
11
  import path from "path";
12
12
 
13
13
  import "dotenv/config";
14
+ import {Mistral} from "@mistralai/mistralai";
15
+ import type {
16
+ ChatCompletionRequest,
17
+ CompletionEvent,
18
+ } from "@mistralai/mistralai/models/components";
14
19
  import OpenAI from "openai";
20
+ import type {ChatCompletionCreateParamsNonStreaming} from "openai/resources/chat/completions";
15
21
  import type {ChatCompletionChunk} from "openai/resources/index.mjs";
16
22
  import type {Stream} from "openai/streaming";
17
23
 
@@ -21,12 +27,42 @@ type Package = {
21
27
  size: number;
22
28
  };
23
29
 
30
+ type Provider = "openai" | "mistral";
31
+
32
+ const MODEL: Provider = "openai";
33
+ const MAX_TOKENS = 1000;
34
+
24
35
  const DOMAIN = process.env.DOMAIN || "localhost";
25
36
  const API_PATH = process.env.API_PATH || "/api";
26
37
  const API_PORT = process.env.API_PORT || 3001;
27
38
  const BASE_PATH = process.cwd();
28
39
  const JSON_PATH = path.join(BASE_PATH, "dist", "packages.json");
29
40
 
41
+ type OpenAIConfig = Omit<ChatCompletionCreateParamsNonStreaming, "messages">;
42
+ type MistralConfig = Omit<ChatCompletionRequest, "messages">;
43
+
44
+ const API_CONFIG = {
45
+ openai: {
46
+ model: "gpt-4.1-mini",
47
+ //model: "gpt-5-mini",
48
+ temperature: 2.0 /* more creative */,
49
+ top_p: 0.3 /* use nucleus sampling */,
50
+ frequency_penalty: 1.5 /* avoid repetition */,
51
+ presence_penalty: 2.0 /* encourage new topics */,
52
+ max_completion_tokens: MAX_TOKENS,
53
+ } satisfies OpenAIConfig,
54
+ mistral: {
55
+ //model: "ministral-14b-latest",
56
+ model: "mistral-small-latest",
57
+ temperature: 1 /* creativity */,
58
+ topP: 0.3 /* nucleus sampling */,
59
+ frequencyPenalty: 1.0 /* avoid repetition */,
60
+ presencePenalty: 1.0 /* encourage new topics */,
61
+ maxTokens: MAX_TOKENS,
62
+ randomSeed: Math.round(Math.random() * 1e9),
63
+ } satisfies MistralConfig,
64
+ };
65
+
30
66
  const getFolderSize = (folder: string): number => {
31
67
  let total = 0;
32
68
  try {
@@ -67,44 +103,97 @@ const server = createServer((req: IncomingMessage, res: ServerResponse) => {
67
103
  try {
68
104
  const {messages, stream} = JSON.parse(body);
69
105
 
70
- const openai = new OpenAI({
71
- apiKey: process.env.OPENAI_API_KEY,
72
- organization: process.env.OPENAI_ORG_ID,
73
- project: process.env.OPENAI_PROJECT_ID,
74
- });
75
-
76
- const response = await openai.chat.completions.create({
77
- /* https://openai.com/api/pricing/ */
78
- model: "gpt-4.1-mini",
79
- messages,
80
- max_completion_tokens: 1000,
81
- temperature: 1.0, // lower temperature to get stricter completion (good for code)
82
- //reasoning: {effort: "high"},
83
- stream,
84
- });
85
-
86
- if (stream) {
87
- /* server-sent events headers */
88
- res.writeHead(200, {
89
- "Content-Type": "text/event-stream",
90
- "Cache-Control": "no-cache",
91
- Connection: "keep-alive",
92
- });
93
- /* forward chunks to browser as SSE */
94
- for await (const chunk of response as unknown as Stream<ChatCompletionChunk>) {
95
- res.write(`data: ${JSON.stringify(chunk)}\n\n`);
96
- }
97
- /* end the SSE stream */
98
- res.write("data: [DONE]\n\n");
99
- res.end();
100
- } else {
106
+ switch (MODEL as Provider) {
107
+ case "openai":
108
+ /* https://openai.com/api/pricing/ */
109
+ {
110
+ const openai = new OpenAI({
111
+ apiKey: process.env.OPENAI_API_KEY,
112
+ organization: process.env.OPENAI_ORG_ID,
113
+ project: process.env.OPENAI_PROJECT_ID,
114
+ });
115
+ const response = await openai.chat.completions.create({
116
+ ...API_CONFIG.openai,
117
+ messages,
118
+ stream,
119
+ });
120
+ if (stream) {
121
+ /* server-sent events headers */
122
+ res.writeHead(200, {
123
+ "Content-Type": "text/event-stream",
124
+ "Cache-Control": "no-cache",
125
+ Connection: "keep-alive",
126
+ });
127
+ /* forward chunks to browser as SSE */
128
+ for await (const chunk of response as unknown as Stream<ChatCompletionChunk>) {
129
+ res.write(`data: ${JSON.stringify(chunk)}\n\n`);
130
+ }
131
+ /* end the SSE stream */
132
+ res.write("data: [DONE]\n\n");
133
+ res.end();
134
+ } else {
135
+ res.writeHead(200, {"Content-Type": "application/json"});
136
+ res.end(JSON.stringify(response));
137
+ }
138
+ }
139
+ break;
140
+ case "mistral":
141
+ /* https://mistral.ai/pricing#api-pricing */
142
+ {
143
+ const mistral = new Mistral({
144
+ apiKey: process.env.MISTRAL_API_KEY,
145
+ });
146
+ if (stream) {
147
+ /* server-sent events headers */
148
+ res.writeHead(200, {
149
+ "Content-Type": "text/event-stream",
150
+ "Cache-Control": "no-cache",
151
+ Connection: "keep-alive",
152
+ });
153
+ const response = await mistral.chat.stream({
154
+ ...API_CONFIG.mistral,
155
+ messages,
156
+ });
157
+ /* forward chunks to browser as SSE */
158
+ for await (const chunk of response as AsyncIterable<CompletionEvent>) {
159
+ res.write(`data: ${JSON.stringify(chunk)}\n\n`);
160
+ }
161
+ /* end the SSE stream */
162
+ res.write("data: [DONE]\n\n");
163
+ res.end();
164
+ } else {
165
+ const response = await mistral.chat.complete({
166
+ ...API_CONFIG.mistral,
167
+ messages,
168
+ });
169
+ res.writeHead(200, {"Content-Type": "application/json"});
170
+ res.end(JSON.stringify(response));
171
+ }
172
+ }
173
+ break;
174
+ default:
175
+ throw new Error("Invalid API specified");
176
+ }
177
+ } catch (error) {
178
+ console.error(
179
+ "API Error:",
180
+ error instanceof Error ? error.message : String(error),
181
+ );
182
+ /* Only send response if headers haven't been sent yet */
183
+ if (!res.headersSent) {
184
+ const response = {
185
+ choices: [
186
+ {
187
+ message: {
188
+ role: "assistant",
189
+ content: "no signal",
190
+ },
191
+ },
192
+ ],
193
+ };
101
194
  res.writeHead(200, {"Content-Type": "application/json"});
102
195
  res.end(JSON.stringify(response));
103
196
  }
104
- } catch (err) {
105
- const error = err instanceof Error ? err.message : "unknown error";
106
- res.writeHead(500, {"Content-Type": "application/json"});
107
- res.end(JSON.stringify({error}));
108
197
  }
109
198
  });
110
199
  } else if (url.startsWith(`${API_PATH}/packages`)) {
@@ -137,6 +226,10 @@ const server = createServer((req: IncomingMessage, res: ServerResponse) => {
137
226
  }
138
227
  });
139
228
 
229
+ /* Increase max listeners to handle concurrent streaming requests */
230
+ server.setMaxListeners(0);
231
+ server.maxConnections = 100;
232
+
140
233
  server.listen(API_PORT, () => {
141
234
  console.log(
142
235
  "\n\x1b[1m\x1b[32m%s\x1b[0m %s \x1b[36m%s\x1b[0m",
package/bin/omnibot.js ADDED
@@ -0,0 +1,19 @@
1
+ #!/usr/bin/env node
2
+
3
+ import {spawn} from "child_process";
4
+ import path from "path";
5
+ import {fileURLToPath} from "url";
6
+ import process from "process";
7
+
8
+ const filename = fileURLToPath(import.meta.url);
9
+
10
+ const dir = path.dirname(filename);
11
+ const root = path.dirname(dir);
12
+
13
+ process.chdir(root);
14
+
15
+ const vite = spawn("pnpm", ["run", "start"], {stdio: "inherit"});
16
+
17
+ vite.on("close", (code) => {
18
+ process.exit(code);
19
+ });
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "x-display-name": "OMNIBOT 3000",
4
4
  "description": "your omniscient source of truth",
5
5
  "private": false,
6
- "version": "1.8.2",
6
+ "version": "1.8.6",
7
7
  "type": "module",
8
8
  "author": {
9
9
  "name": "rez",
@@ -11,6 +11,13 @@
11
11
  "url": "https://github.com/chiptune"
12
12
  },
13
13
  "homepage": "https://www.omnibot3000.com",
14
+ "repository": {
15
+ "url": "https://github.com/chiptune/omnibot3000",
16
+ "type": "github"
17
+ },
18
+ "bin": {
19
+ "omnibot": "./bin/omnibot.js"
20
+ },
14
21
  "scripts": {
15
22
  "start": "pnpm run-p start:dev start:api",
16
23
  "start:dev": "pnpm run dev",
@@ -23,19 +30,21 @@
23
30
  "build:client": "tsc -b && vite build",
24
31
  "build:api": "tsc --project tsconfig.api.json",
25
32
  "lint": "eslint --fix .",
26
- "prettify": "prettier --write ."
33
+ "prettify": "prettier --write .",
34
+ "prepare": "husky"
27
35
  },
28
36
  "dependencies": {
29
- "openai": "^6.10.0",
37
+ "@mistralai/mistralai": "^1.11.0",
38
+ "openai": "^6.15.0",
30
39
  "react": "^19.2.3",
31
40
  "react-dom": "^19.2.3",
32
41
  "react-markdown": "^10.1.0",
33
- "react-router-dom": "^7.10.1",
42
+ "react-router-dom": "^7.11.0",
34
43
  "zustand": "^5.0.9"
35
44
  },
36
45
  "devDependencies": {
37
46
  "@eslint/js": "^9.39.2",
38
- "@types/node": "^25.0.1",
47
+ "@types/node": "^25.0.3",
39
48
  "@types/react": "^19.2.7",
40
49
  "@types/react-dom": "^19.2.3",
41
50
  "@vitejs/plugin-react": "^5.1.2",
@@ -44,15 +53,16 @@
44
53
  "eslint": "^9.39.2",
45
54
  "eslint-plugin-import": "^2.32.0",
46
55
  "eslint-plugin-react-hooks": "^7.0.1",
47
- "eslint-plugin-react-refresh": "^0.4.24",
56
+ "eslint-plugin-react-refresh": "^0.4.26",
48
57
  "eslint-plugin-simple-import-sort": "^12.1.1",
49
58
  "globals": "^16.5.0",
59
+ "husky": "^9.1.7",
50
60
  "nodemon": "^3.1.11",
51
61
  "npm-run-all": "^4.1.5",
52
62
  "prettier": "^3.7.4",
53
63
  "typescript": "^5.9.3",
54
- "typescript-eslint": "^8.49.0",
55
- "vite": "^7.2.7",
56
- "vite-tsconfig-paths": "^5.1.4"
64
+ "typescript-eslint": "^8.50.0",
65
+ "vite": "^7.3.0",
66
+ "vite-tsconfig-paths": "^6.0.3"
57
67
  }
58
68
  }
package/src/App.tsx CHANGED
@@ -86,19 +86,23 @@ const Layout = () => {
86
86
 
87
87
  setWidth(Math.floor((vw - cw * 2) / cw) * cw);
88
88
  setHeight(Math.floor((vh - cw * 4) / lh) * lh + cw * 2);
89
+ };
90
+
91
+ useEffect(() => {
92
+ const vw = window.innerWidth;
93
+ const vh = window.innerHeight;
89
94
 
90
95
  let el = document.getElementById("debug-screen-size");
91
- if (!el) {
92
- el = document.createElement("div");
93
- el.id = "debug-screen-size";
94
- el.className = "debug-info";
95
- document.body.appendChild(el);
96
- }
97
- el.innerHTML = `viewport: ${vw}x${vh} |\
98
- char: ${format(cw)}x${format(lh)} |\
99
- w: ${w} | h: ${h}`;
96
+ if (!el) el = document.createElement("div");
97
+
98
+ el.id = "debug-screen-size";
99
+ el.className = "debug-info";
100
+ document.body.appendChild(el);
101
+ el.innerHTML = `viewport: ${vw}x${vh} | \
102
+ char: ${format(cw)}x${format(lh)} | \
103
+ w: ${w} | h: ${h}`;
100
104
  el.style.display = debug ? "block" : "none";
101
- };
105
+ }, [w, h]);
102
106
 
103
107
  useLayoutEffect(() => {
104
108
  const resizeObserver = new ResizeObserver(update);
@@ -162,9 +166,9 @@ const Layout = () => {
162
166
  <div className={styles.content}>
163
167
  <Header darkMode={darkMode} onThemeToggle={themeSwitchHandler} />
164
168
  <Line variant="horizontal" className={styles["h-line"]} />
165
- <div ref={bodyRef} className={styles.body}>
169
+ <main ref={bodyRef} className={styles.body}>
166
170
  <Outlet />
167
- </div>
171
+ </main>
168
172
  <Line variant="horizontal" className={styles["h-line"]} />
169
173
  <Footer renderTime={renderTime} />
170
174
  </div>
@@ -35,6 +35,7 @@
35
35
  .hr {
36
36
  margin: 0;
37
37
  padding: 0;
38
+ padding-bottom: var(--line-height);
38
39
  opacity: var(--opacity-tertiary);
39
40
  }
40
41
 
@@ -4,7 +4,7 @@ import Markdown from "react-markdown";
4
4
  import styles from "@commons/OmnibotSpeak.module.css";
5
5
  import Caret from "@ui/Caret";
6
6
  import Line from "@ui/Line";
7
- import {formatText, sanitizeHTML} from "@utils/strings";
7
+ import {sanitizeHTML} from "@utils/strings";
8
8
 
9
9
  import cls from "classnames";
10
10
 
@@ -21,7 +21,7 @@ export const OmnibotSpeak = (props: {truth: string; hasCaret?: boolean}) => (
21
21
  return <Line char="*" className={styles["hr"]} />;
22
22
  },
23
23
  }}>
24
- {formatText(sanitizeHTML(props.truth))}
24
+ {sanitizeHTML(props.truth)}
25
25
  </Markdown>
26
26
  </div>
27
27
  {props.hasCaret && <Caret />}