@alhisan/gac 1.0.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,35 @@
1
+ name: Publish Package to npmjs
2
+
3
+ on:
4
+ release:
5
+ # This triggers when you click 'Publish Release' in the GitHub UI
6
+ types: [published]
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+ - uses: actions/setup-node@v4
14
+ with:
15
+ node-version: 20
16
+ - run: npm ci
17
+ - run: npm test
18
+
19
+ publish-npm:
20
+ needs: build
21
+ runs-on: ubuntu-latest
22
+ # CRITICAL: These permissions are required for --provenance to work
23
+ permissions:
24
+ id-token: write
25
+ contents: read
26
+ steps:
27
+ - uses: actions/checkout@v4
28
+ - uses: actions/setup-node@v4
29
+ with:
30
+ node-version: 20
31
+ registry-url: https://registry.npmjs.org
32
+ - run: npm ci
33
+ - run: npm publish --provenance --access public
34
+ env:
35
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
package/README.md CHANGED
@@ -1,10 +1,10 @@
1
- # GPT4All CLI (gac)
1
+ # GAC CLI (gac)
2
2
 
3
- Terminal client for GPT4All running on localhost. Supports streaming responses, interactive chat, and configurable markdown rendering using `terminal-kit`.
3
+ Terminal client for OpenAI-compatible APIs (including GPT4All) and Ollama. Supports streaming responses, interactive chat, and configurable markdown rendering using `terminal-kit`.
4
4
 
5
5
  ## Installation
6
6
 
7
- Requirements: Node.js 18+ and a running GPT4All OpenAI-compatible server.
7
+ Requirements: Node.js 18+ and a running OpenAI-compatible server (like GPT4All) or Ollama.
8
8
 
9
9
  ```bash
10
10
  npm install -g @alhisan/gac
@@ -29,6 +29,7 @@ Single prompt:
29
29
 
30
30
  ```bash
31
31
  gac -a "Hello gpt4all, how are you doing today?"
32
+ gac "How do I push to GitHub?"
32
33
  gac suggest "How do I connect to ssh server on a custom port 5322?"
33
34
  gac explain "How do I use rsync?"
34
35
  gac suggest -d "Give me step-by-step instructions to set up an SSH server on port 5322"
@@ -68,6 +69,7 @@ View and edit:
68
69
 
69
70
  ```bash
70
71
  gac config
72
+ gac config tui
71
73
  gac config get baseUrl
72
74
  gac config set baseUrl http://localhost:4891
73
75
  gac config set model "Llama 3 8B Instruct"
@@ -77,11 +79,16 @@ gac config set detailedSuggest true
77
79
 
78
80
  ### Core settings
79
81
 
82
+ - `provider` (string): `openai` (default) or `ollama`
80
83
  - `baseUrl` (string): GPT4All server base, e.g. `http://localhost:4891`
84
+ - `ollamaBaseUrl` (string): Ollama base, e.g. `http://localhost:11434`
85
+ - `apiKey` (string): API key for OpenAI-compatible services (empty for local servers)
81
86
  - `model` (string): model ID from `/v1/models`
82
87
  - `temperature` (number)
83
88
  - `maxTokens` (number)
84
89
  - `stream` (boolean)
90
+ - `requestTimeoutMs` (number): request timeout in milliseconds (0 to disable). Useful for larger models or slower servers.
91
+ - `defaultAction` (string): default mode for direct prompts (`suggest`, `ask`, or `explain`).
85
92
  - `detailedSuggest` (boolean): when `true`, `suggest` mode returns more detailed, step-by-step suggestions.
86
93
  - `renderMarkdown` (boolean)
87
94
 
@@ -136,9 +143,19 @@ Example:
136
143
  If you see connection errors, verify the server is reachable:
137
144
 
138
145
  ```bash
139
- curl http://localhost:4891/v1/models
146
+ curl http://[SERVER_ADDRESS]:[SERVER_PORT]/v1/models
147
+ ```
148
+
149
+ For Ollama:
150
+
151
+ ```bash
152
+ curl http://[SERVER_ADDRESS]:[SERVER_PORT]/api/tags
140
153
  ```
141
154
 
142
155
  ## License
143
156
 
144
157
  GNU General Public License v3.0. See `LICENSE`.
158
+
159
+ ## Disclaimer
160
+
161
+ This was mostly vibe coded and I'm treating it as a fun side project / tool that is likely to remain improved and updated by agentic models.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@alhisan/gac",
3
- "version": "1.0.0",
3
+ "version": "1.1.1",
4
4
  "description": "Terminal client for GPT4All running on localhost",
5
5
  "license": "GPL-3.0-only",
6
6
  "type": "module",
@@ -10,7 +10,14 @@
10
10
  "engines": {
11
11
  "node": ">=18.0.0"
12
12
  },
13
+ "scripts": {
14
+ "test": "echo \"No tests yet\""
15
+ },
13
16
  "dependencies": {
14
17
  "terminal-kit": "^3.1.0"
18
+ },
19
+ "repository": {
20
+ "type": "git",
21
+ "url": "git+https://github.com/kwmx/gac"
15
22
  }
16
23
  }
package/src/cli.js CHANGED
@@ -2,12 +2,13 @@ import terminalKit from "terminal-kit";
2
2
  import { chatCompletion, listModels } from "./gpt4all.js";
3
3
  import { getConfigPath, loadConfig, setConfigValue } from "./config.js";
4
4
  import { createMarkdownRenderer } from "./markdown.js";
5
+ import fs from "fs";
5
6
  import os from "os";
6
7
  import process from "process";
7
8
  const { terminal: term } = terminalKit;
8
9
 
9
10
  function printHelp() {
10
- term(`gac - GPT4All CLI\n\n`);
11
+ term(`gac - OpenAI-compatible & Ollama CLI\n\n`);
11
12
  term(`Options:\n`);
12
13
  term(` -a Single prompt mode (alias for ask)\n`);
13
14
  term(` suggest Suggestion mode\n`);
@@ -16,6 +17,7 @@ function printHelp() {
16
17
  term(` chat Interactive chat mode\n`);
17
18
  term(` models List models and set default\n`);
18
19
  term(` config View or edit configuration\n`);
20
+ term(` config tui Open interactive config editor\n`);
19
21
  term(` --no-render Disable markdown rendering\n`);
20
22
  term(` --debug-render Show both rendered and raw output\n`);
21
23
  term(
@@ -31,12 +33,40 @@ function printHelp() {
31
33
  term(` gac chat\n`);
32
34
  term(` gac models\n`);
33
35
  term(` gac config\n`);
36
+ term(` gac config tui\n`);
34
37
  term(` gac config get <key>\n`);
35
38
  term(` gac config set <key> <value>\n`);
36
39
  term(` gac --no-render -a "Raw markdown output"\n`);
37
40
  term(` gac --debug-render -a "Show rendered and raw output"\n`);
38
41
  term(`\n`);
39
42
  }
43
+ function parseOsRelease(contents) {
44
+ const result = {};
45
+ const lines = contents.split("\n");
46
+ for (const line of lines) {
47
+ const trimmed = line.trim();
48
+ if (!trimmed || trimmed.startsWith("#") || !trimmed.includes("=")) continue;
49
+ const [key, ...rest] = trimmed.split("=");
50
+ let value = rest.join("=").trim();
51
+ if (value.startsWith('"') && value.endsWith('"')) {
52
+ value = value.slice(1, -1);
53
+ }
54
+ if (value) {
55
+ result[key] = value;
56
+ }
57
+ }
58
+ return result;
59
+ }
60
+
61
+ function readLinuxOsRelease() {
62
+ try {
63
+ const contents = fs.readFileSync("/etc/os-release", "utf8");
64
+ return parseOsRelease(contents);
65
+ } catch (err) {
66
+ return null;
67
+ }
68
+ }
69
+
40
70
  function getOSVersion() {
41
71
  const platform = os.platform();
42
72
  if (platform === "win32") {
@@ -63,7 +93,14 @@ function getOSVersion() {
63
93
  return "macOS";
64
94
  }
65
95
  if (platform === "linux") {
66
- // Find which distro
96
+ const osRelease = readLinuxOsRelease();
97
+ if (osRelease && (osRelease.PRETTY_NAME || osRelease.NAME)) {
98
+ const pretty = osRelease.PRETTY_NAME || osRelease.NAME;
99
+ const id = osRelease.ID ? `; id=${osRelease.ID}` : "";
100
+ const idLike = osRelease.ID_LIKE ? `; id_like=${osRelease.ID_LIKE}` : "";
101
+ return `Linux (${pretty}${id}${idLike})`;
102
+ }
103
+ // Find which distro (fallbacks)
67
104
  if (process.env.OS_RELEASE) {
68
105
  return `${platform}: ${process.env.OS_RELEASE}`;
69
106
  } else if (process.env.OS) {
@@ -86,24 +123,33 @@ function getOSVersion() {
86
123
  }
87
124
  function buildSystemPrompt(mode, config) {
88
125
  const osInfo = getOSVersion();
126
+ const osGuidance = `The user is using a system with the following OS: ${osInfo}. When providing commands or package install steps, use the native tooling for that OS (e.g., dnf for Fedora, apt for Debian/Ubuntu). Avoid giving instructions for other distros unless explicitly requested.`;
89
127
 
90
128
  if (mode === "suggest") {
91
129
  if (config.detailedSuggest === true) {
92
- return `You are an expert technical assistant. The user is using a system with the following OS: ${osInfo}. When providing suggestions, give detailed, step-by-step instructions that the user can follow to achieve their goals. Include relevant commands, code snippets, or configurations as needed. Avoid unnecessary explanations or background information. Tailor your suggestions to be relevant to the user's operating system and environment.
130
+ return `You are an expert technical assistant. ${osGuidance} When providing suggestions, give detailed, step-by-step instructions that the user can follow to achieve their goals. Include relevant commands, code snippets, or configurations as needed. Avoid unnecessary explanations or background information. Tailor your suggestions to be relevant to the user's operating system and environment.
93
131
  Attempt to make it a single line response where possible. Prefer commands and code snippets over lengthy explanations. Always leave commands and codes in their own line for easy copying.`;
94
132
  } else {
95
- return `You are an expert technical assistant. The user is using a system with the following OS: ${osInfo}. Provide concise and practical suggestions to help the user accomplish their tasks efficiently. Focus on clarity and brevity, ensuring that your suggestions are easy to understand and implement. Tailor your suggestions to be relevant to the user's operating system and environment. Avoid lengthy explanations or unnecessary details prefer single line commands or codes if you must include explainations make sure the commands and codes are in their own line for easy copying.`;
133
+ return `You are an expert technical assistant. ${osGuidance} Provide concise and practical suggestions to help the user accomplish their tasks efficiently. Focus on clarity and brevity, ensuring that your suggestions are easy to understand and implement. Tailor your suggestions to be relevant to the user's operating system and environment. Avoid lengthy explanations or unnecessary details prefer single line commands or codes if you must include explainations make sure the commands and codes are in their own line for easy copying.`;
96
134
  }
97
135
  }
98
136
  if (mode === "ask") {
99
- return "Provide a helpful and accurate response to the user's question.";
137
+ return `Provide a helpful and accurate response to the user's question. ${osGuidance}`;
100
138
  }
101
139
  if (mode === "explain") {
102
- return "Explain step-by-step with a short example if helpful.";
140
+ return `Explain step-by-step with a short example if helpful. ${osGuidance}`;
103
141
  }
104
142
  return null;
105
143
  }
106
144
 
145
+ function normalizeDefaultAction(action) {
146
+ const normalized = String(action || "").trim().toLowerCase();
147
+ if (normalized === "ask" || normalized === "suggest" || normalized === "explain") {
148
+ return normalized;
149
+ }
150
+ return "suggest";
151
+ }
152
+
107
153
  async function runSinglePrompt(mode, prompt, config) {
108
154
  const system = buildSystemPrompt(mode, config);
109
155
  const messages = [];
@@ -119,6 +165,11 @@ async function runSinglePrompt(mode, prompt, config) {
119
165
  term(`${reply}\n`);
120
166
  }
121
167
  }
168
+ if (!reply || !reply.trim()) {
169
+ term(
170
+ "No response from the model. The request may have timed out or returned empty content. Consider increasing requestTimeoutMs in the config or enabling streaming.\n"
171
+ );
172
+ }
122
173
  if (config.debugRender) {
123
174
  term(`\n--- RAW ---\n${reply}\n`);
124
175
  }
@@ -139,6 +190,251 @@ async function inputLine(label) {
139
190
  });
140
191
  }
141
192
 
193
+ function formatConfigValue(value) {
194
+ if (typeof value === "string") {
195
+ if (!value) return "(empty)";
196
+ if (value.length > 48) return `${value.slice(0, 45)}...`;
197
+ return value;
198
+ }
199
+ return JSON.stringify(value);
200
+ }
201
+
202
+ function maskApiKey(apiKey) {
203
+ if (!apiKey) return "(empty)";
204
+ if (apiKey.length <= 6) return "***";
205
+ return `${apiKey.slice(0, 3)}...${apiKey.slice(-3)}`;
206
+ }
207
+
208
+ async function promptConfigValue(label, currentValue) {
209
+ term(`${label} [${formatConfigValue(currentValue)}]: `);
210
+ return new Promise((resolve) => {
211
+ term.inputField({ cancelable: true, default: String(currentValue ?? "") }, (error, input) => {
212
+ term("\n");
213
+ if (error || input === undefined || input === null) {
214
+ resolve(null);
215
+ return;
216
+ }
217
+ resolve(input.trim());
218
+ });
219
+ });
220
+ }
221
+
222
+ async function selectConfigProvider(config) {
223
+ const options = [
224
+ "OpenAI-compatible (includes GPT4All)",
225
+ "Ollama",
226
+ ];
227
+ const currentIndex = config.provider === "ollama" ? 1 : 0;
228
+ term("\nSelect provider:\n");
229
+ return new Promise((resolve) => {
230
+ term.singleColumnMenu(
231
+ options,
232
+ { cancelable: true, selectedIndex: currentIndex },
233
+ (error, response) => {
234
+ term("\n");
235
+ if (error || !response || response.canceled) {
236
+ resolve(null);
237
+ return;
238
+ }
239
+ resolve(response.selectedIndex === 1 ? "ollama" : "openai");
240
+ }
241
+ );
242
+ });
243
+ }
244
+
245
+ async function runConfigTui(config) {
246
+ term("Config editor (Esc to exit)\n\n");
247
+ let updatedConfig = { ...config };
248
+ term.grabInput({ mouse: "button" });
249
+ const cleanup = () => {
250
+ term.grabInput(false);
251
+ term.removeListener("key", onKey);
252
+ term.hideCursor(false);
253
+ };
254
+ const onKey = (name) => {
255
+ if (name === "CTRL_C") {
256
+ cleanup();
257
+ term("\nCanceled.\n");
258
+ term.processExit(0);
259
+ }
260
+ };
261
+ term.on("key", onKey);
262
+ const menuLoop = async () => {
263
+ const menuItems = [
264
+ `Provider: ${updatedConfig.provider === "ollama" ? "Ollama" : "OpenAI-compatible"}`,
265
+ `Base URL (OpenAI): ${formatConfigValue(updatedConfig.baseUrl)}`,
266
+ `Base URL (Ollama): ${formatConfigValue(updatedConfig.ollamaBaseUrl)}`,
267
+ `API Key: ${maskApiKey(updatedConfig.apiKey)}`,
268
+ `Model: ${formatConfigValue(updatedConfig.model)}`,
269
+ `Temperature: ${formatConfigValue(updatedConfig.temperature)}`,
270
+ `Max Tokens: ${formatConfigValue(updatedConfig.maxTokens)}`,
271
+ `Stream: ${formatConfigValue(updatedConfig.stream)}`,
272
+ `Request Timeout (ms): ${formatConfigValue(updatedConfig.requestTimeoutMs)}`,
273
+ `Default Action: ${formatConfigValue(updatedConfig.defaultAction)}`,
274
+ `Render Markdown: ${formatConfigValue(updatedConfig.renderMarkdown)}`,
275
+ `Debug Render: ${formatConfigValue(updatedConfig.debugRender)}`,
276
+ `Detailed Suggest: ${formatConfigValue(updatedConfig.detailedSuggest)}`,
277
+ "Save and exit",
278
+ ];
279
+
280
+ return new Promise((resolve) => {
281
+ term.singleColumnMenu(menuItems, { cancelable: true }, (error, response) => {
282
+ term("\n");
283
+ if (error || !response || response.canceled) {
284
+ resolve(false);
285
+ return;
286
+ }
287
+ resolve(response.selectedIndex);
288
+ });
289
+ });
290
+ };
291
+
292
+ while (true) {
293
+ const selection = await menuLoop();
294
+ if (selection === false) {
295
+ cleanup();
296
+ term("Config editor closed.\n");
297
+ break;
298
+ }
299
+
300
+ if (selection === 0) {
301
+ const provider = await selectConfigProvider(updatedConfig);
302
+ if (provider) {
303
+ setConfigValue("provider", provider);
304
+ updatedConfig.provider = provider;
305
+ }
306
+ continue;
307
+ }
308
+
309
+ if (selection === 1) {
310
+ const value = await promptConfigValue("OpenAI base URL", updatedConfig.baseUrl);
311
+ if (value !== null) {
312
+ setConfigValue("baseUrl", value);
313
+ updatedConfig.baseUrl = value;
314
+ }
315
+ continue;
316
+ }
317
+
318
+ if (selection === 2) {
319
+ const value = await promptConfigValue("Ollama base URL", updatedConfig.ollamaBaseUrl);
320
+ if (value !== null) {
321
+ setConfigValue("ollamaBaseUrl", value);
322
+ updatedConfig.ollamaBaseUrl = value;
323
+ }
324
+ continue;
325
+ }
326
+
327
+ if (selection === 3) {
328
+ term("API Key (leave empty to clear)\n");
329
+ const value = await promptConfigValue("API key", updatedConfig.apiKey);
330
+ if (value !== null) {
331
+ setConfigValue("apiKey", value);
332
+ updatedConfig.apiKey = value;
333
+ }
334
+ continue;
335
+ }
336
+
337
+ if (selection === 4) {
338
+ const value = await promptConfigValue("Model", updatedConfig.model);
339
+ if (value !== null) {
340
+ setConfigValue("model", value);
341
+ updatedConfig.model = value;
342
+ }
343
+ continue;
344
+ }
345
+
346
+ if (selection === 5) {
347
+ const value = await promptConfigValue("Temperature", updatedConfig.temperature);
348
+ if (value !== null) {
349
+ setConfigValue("temperature", value);
350
+ updatedConfig.temperature = value;
351
+ }
352
+ continue;
353
+ }
354
+
355
+ if (selection === 6) {
356
+ const value = await promptConfigValue("Max Tokens", updatedConfig.maxTokens);
357
+ if (value !== null) {
358
+ setConfigValue("maxTokens", value);
359
+ updatedConfig.maxTokens = value;
360
+ }
361
+ continue;
362
+ }
363
+
364
+ if (selection === 7) {
365
+ const value = await promptConfigValue("Stream (true/false)", updatedConfig.stream);
366
+ if (value !== null) {
367
+ setConfigValue("stream", value);
368
+ updatedConfig.stream = value;
369
+ }
370
+ continue;
371
+ }
372
+
373
+ if (selection === 8) {
374
+ const value = await promptConfigValue(
375
+ "Request timeout in ms (0 to disable)",
376
+ updatedConfig.requestTimeoutMs
377
+ );
378
+ if (value !== null) {
379
+ setConfigValue("requestTimeoutMs", value);
380
+ updatedConfig.requestTimeoutMs = value;
381
+ }
382
+ continue;
383
+ }
384
+
385
+ if (selection === 9) {
386
+ const value = await promptConfigValue(
387
+ "Default action (suggest/ask/explain)",
388
+ updatedConfig.defaultAction
389
+ );
390
+ if (value !== null) {
391
+ setConfigValue("defaultAction", value);
392
+ updatedConfig.defaultAction = value;
393
+ }
394
+ continue;
395
+ }
396
+
397
+ if (selection === 10) {
398
+ const value = await promptConfigValue(
399
+ "Render Markdown (true/false)",
400
+ updatedConfig.renderMarkdown
401
+ );
402
+ if (value !== null) {
403
+ setConfigValue("renderMarkdown", value);
404
+ updatedConfig.renderMarkdown = value;
405
+ }
406
+ continue;
407
+ }
408
+
409
+ if (selection === 11) {
410
+ const value = await promptConfigValue("Debug Render (true/false)", updatedConfig.debugRender);
411
+ if (value !== null) {
412
+ setConfigValue("debugRender", value);
413
+ updatedConfig.debugRender = value;
414
+ }
415
+ continue;
416
+ }
417
+
418
+ if (selection === 12) {
419
+ const value = await promptConfigValue(
420
+ "Detailed Suggest (true/false)",
421
+ updatedConfig.detailedSuggest
422
+ );
423
+ if (value !== null) {
424
+ setConfigValue("detailedSuggest", value);
425
+ updatedConfig.detailedSuggest = value;
426
+ }
427
+ continue;
428
+ }
429
+
430
+ if (selection === 13) {
431
+ cleanup();
432
+ term("Configuration saved.\n");
433
+ break;
434
+ }
435
+ }
436
+ }
437
+
142
438
  async function runChat(config) {
143
439
  term('Interactive chat. Type "exit" to quit.\n\n');
144
440
  const messages = [];
@@ -179,6 +475,11 @@ async function runChat(config) {
179
475
  if (config.debugRender) {
180
476
  term(`\n--- RAW ---\n${reply}\n`);
181
477
  }
478
+ if (!reply || !reply.trim()) {
479
+ term(
480
+ "\nNo response from the model. The request may have timed out or returned empty content. Consider increasing requestTimeoutMs in the config or enabling streaming."
481
+ );
482
+ }
182
483
  term("\n\n");
183
484
  messages.push({ role: "assistant", content: reply });
184
485
  }
@@ -187,20 +488,20 @@ async function runChat(config) {
187
488
  async function runModels(config) {
188
489
  let models;
189
490
  try {
190
- models = await listModels(config.baseUrl);
491
+ models = await listModels(config);
191
492
  } catch (err) {
192
493
  term(`Error: ${err.message}\n`);
193
494
  term.processExit(1);
194
495
  }
195
496
 
196
497
  if (!models.length) {
197
- term("No models found from GPT4All server.\n");
498
+ term("No models found from the configured provider.\n");
198
499
  term.processExit(0);
199
500
  }
200
501
 
201
502
  term("Available models:\n");
202
- // Append 'Use default gpt4all model' option at the top
203
- models.unshift("Use default gpt4all setting");
503
+ // Append option to keep current default at the top
504
+ models.unshift("Keep current default");
204
505
  models.forEach((model) => term(`- ${model}\n`));
205
506
  term("\nSelect a default model (use arrows + Enter, Esc to cancel):\n");
206
507
 
@@ -231,9 +532,10 @@ async function runModels(config) {
231
532
  term.processExit(0);
232
533
  }
233
534
  let selected = models[response.selectedIndex];
234
- // if user selected the default model option, set to 'gpt4all'
235
- if (selected === "Use default gpt4all setting") {
236
- selected = "gpt4all";
535
+ if (selected === "Keep current default") {
536
+ cleanup();
537
+ term(`Default model unchanged ("${config.model}").\n`);
538
+ term.processExit(0);
237
539
  }
238
540
  setConfigValue("model", selected);
239
541
  config.model = selected;
@@ -277,6 +579,10 @@ export async function runCli(argv) {
277
579
  }
278
580
 
279
581
  if (args[0] === "config") {
582
+ if (args[1] === "tui") {
583
+ await runConfigTui(config);
584
+ return;
585
+ }
280
586
  if (args[1] === "get" && args[2]) {
281
587
  const key = args[2];
282
588
  term(`${config[key]}\n`);
@@ -324,6 +630,17 @@ export async function runCli(argv) {
324
630
  return;
325
631
  }
326
632
 
633
+ if (!args[0].startsWith("-")) {
634
+ const prompt = args.join(" ").trim();
635
+ if (!prompt) {
636
+ term("Error: missing prompt.\n");
637
+ term.processExit(1);
638
+ }
639
+ const defaultAction = normalizeDefaultAction(config.defaultAction);
640
+ await runSinglePrompt(defaultAction, prompt, config);
641
+ return;
642
+ }
643
+
327
644
  term("Unknown command.\n\n");
328
645
  printHelp();
329
646
  }
package/src/config.js CHANGED
@@ -3,11 +3,16 @@ import path from "path";
3
3
  import os from "os";
4
4
 
5
5
  const DEFAULT_CONFIG = {
6
+ provider: "openai",
6
7
  baseUrl: "http://localhost:4891",
8
+ ollamaBaseUrl: "http://localhost:11434",
9
+ apiKey: "",
7
10
  model: "gpt4all",
8
11
  temperature: 0.7,
9
12
  maxTokens: 512,
10
13
  stream: true,
14
+ requestTimeoutMs: 300000,
15
+ defaultAction: "suggest",
11
16
  renderMarkdown: true,
12
17
  debugRender: false,
13
18
  detailedSuggest: false,
package/src/gpt4all.js CHANGED
@@ -1,37 +1,82 @@
1
- import terminalKit from 'terminal-kit';
2
- import { createMarkdownRenderer } from './markdown.js';
1
+ import terminalKit from "terminal-kit";
2
+ import { createMarkdownRenderer } from "./markdown.js";
3
3
 
4
4
  const { terminal: term } = terminalKit;
5
5
 
6
6
  function getContentDelta(chunk) {
7
- if (!chunk || !chunk.choices || !chunk.choices[0]) return '';
7
+ if (!chunk || !chunk.choices || !chunk.choices[0]) return "";
8
8
  const choice = chunk.choices[0];
9
9
  if (choice.delta && choice.delta.content) return choice.delta.content;
10
10
  if (choice.message && choice.message.content) return choice.message.content;
11
11
  if (choice.text) return choice.text;
12
- return '';
12
+ return "";
13
+ }
14
+
15
+ function getOllamaContentDelta(chunk) {
16
+ if (!chunk) return "";
17
+ if (chunk.message && chunk.message.content) return chunk.message.content;
18
+ if (chunk.response) return chunk.response;
19
+ return "";
20
+ }
21
+
22
+ function normalizeOpenAiBaseUrl(baseUrl) {
23
+ const trimmed = baseUrl.replace(/\/$/, "");
24
+ if (trimmed.endsWith("/v1")) return trimmed;
25
+ return `${trimmed}/v1`;
26
+ }
27
+
28
+ function normalizeOllamaBaseUrl(baseUrl) {
29
+ return baseUrl.replace(/\/$/, "");
30
+ }
31
+
32
+ function getProvider(config) {
33
+ return config.provider === "ollama" ? "ollama" : "openai";
34
+ }
35
+
36
+ function buildOpenAiHeaders(apiKey) {
37
+ const headers = { "Content-Type": "application/json" };
38
+ if (apiKey) {
39
+ headers.Authorization = `Bearer ${apiKey}`;
40
+ }
41
+ return headers;
42
+ }
43
+
44
+ function createTimeoutController(timeoutMs) {
45
+ if (!timeoutMs || Number.isNaN(timeoutMs) || timeoutMs <= 0) {
46
+ return null;
47
+ }
48
+ const controller = new AbortController();
49
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
50
+ return { controller, timeoutId };
13
51
  }
14
52
 
15
53
  async function parseStream(response, onToken, renderer) {
16
54
  const reader = response.body.getReader();
17
- const decoder = new TextDecoder('utf-8');
18
- let buffer = '';
19
- let fullText = '';
20
- let lineBuffer = '';
55
+ const decoder = new TextDecoder("utf-8");
56
+ let buffer = "";
57
+ let fullText = "";
58
+ let lineBuffer = "";
59
+ const flushLineBuffer = () => {
60
+ if (renderer && lineBuffer) {
61
+ onToken(renderer.renderLine(lineBuffer));
62
+ lineBuffer = "";
63
+ }
64
+ };
21
65
 
22
66
  while (true) {
23
67
  const { value, done } = await reader.read();
24
68
  if (done) break;
25
69
  buffer += decoder.decode(value, { stream: true });
26
70
 
27
- const lines = buffer.split('\n');
28
- buffer = lines.pop() || '';
71
+ const lines = buffer.split("\n");
72
+ buffer = lines.pop() || "";
29
73
 
30
74
  for (const line of lines) {
31
75
  const trimmed = line.trim();
32
- if (!trimmed || !trimmed.startsWith('data:')) continue;
33
- const payload = trimmed.replace(/^data:\s*/, '');
34
- if (payload === '[DONE]') {
76
+ if (!trimmed || !trimmed.startsWith("data:")) continue;
77
+ const payload = trimmed.replace(/^data:\s*/, "");
78
+ if (payload === "[DONE]") {
79
+ flushLineBuffer();
35
80
  return fullText;
36
81
  }
37
82
 
@@ -44,8 +89,8 @@ async function parseStream(response, onToken, renderer) {
44
89
  onToken(delta);
45
90
  } else {
46
91
  lineBuffer += delta;
47
- const lines = lineBuffer.split('\n');
48
- lineBuffer = lines.pop() || '';
92
+ const lines = lineBuffer.split("\n");
93
+ lineBuffer = lines.pop() || "";
49
94
  for (const line of lines) {
50
95
  onToken(`${renderer.renderLine(line)}\n`);
51
96
  }
@@ -64,76 +109,168 @@ async function parseStream(response, onToken, renderer) {
64
109
  return fullText;
65
110
  }
66
111
 
67
- function normalizeBaseUrl(baseUrl) {
68
- const trimmed = baseUrl.replace(/\/$/, '');
69
- if (trimmed.endsWith('/v1')) return trimmed;
70
- return `${trimmed}/v1`;
112
+ async function parseOllamaStream(response, onToken, renderer) {
113
+ const reader = response.body.getReader();
114
+ const decoder = new TextDecoder("utf-8");
115
+ let buffer = "";
116
+ let fullText = "";
117
+ let lineBuffer = "";
118
+ const flushLineBuffer = () => {
119
+ if (renderer && lineBuffer) {
120
+ onToken(renderer.renderLine(lineBuffer));
121
+ lineBuffer = "";
122
+ }
123
+ };
124
+
125
+ while (true) {
126
+ const { value, done } = await reader.read();
127
+ if (done) break;
128
+ buffer += decoder.decode(value, { stream: true });
129
+
130
+ const lines = buffer.split("\n");
131
+ buffer = lines.pop() || "";
132
+
133
+ for (const line of lines) {
134
+ const trimmed = line.trim();
135
+ if (!trimmed) continue;
136
+ try {
137
+ const json = JSON.parse(trimmed);
138
+ if (json.done) {
139
+ flushLineBuffer();
140
+ return fullText;
141
+ }
142
+ const delta = getOllamaContentDelta(json);
143
+ if (delta) {
144
+ fullText += delta;
145
+ if (!renderer) {
146
+ onToken(delta);
147
+ } else {
148
+ lineBuffer += delta;
149
+ const lines = lineBuffer.split("\n");
150
+ lineBuffer = lines.pop() || "";
151
+ for (const line of lines) {
152
+ onToken(`${renderer.renderLine(line)}\n`);
153
+ }
154
+ }
155
+ }
156
+ } catch (err) {
157
+ // Ignore non-JSON payloads
158
+ }
159
+ }
160
+ }
161
+
162
+ if (renderer && lineBuffer) {
163
+ onToken(renderer.renderLine(lineBuffer));
164
+ }
165
+
166
+ return fullText;
71
167
  }
72
168
 
73
- async function fetchJson(url, payload) {
169
+ async function fetchJson(url, payload, errorLabel, timeoutMs) {
170
+ const timeout = createTimeoutController(timeoutMs);
74
171
  try {
75
- const response = await fetch(url, payload);
172
+ const response = await fetch(url, {
173
+ ...payload,
174
+ signal: timeout ? timeout.controller.signal : undefined,
175
+ });
76
176
  if (!response.ok) {
77
177
  const text = await response.text();
78
- throw new Error(`GPT4All error ${response.status}: ${text}`);
178
+ throw new Error(`${errorLabel} error ${response.status}: ${text}`);
79
179
  }
80
180
  return await response.json();
81
181
  } catch (err) {
82
- if (err.message && err.message.startsWith('GPT4All error')) {
182
+ if (err.message && err.message.startsWith(`${errorLabel} error`)) {
83
183
  throw err;
84
184
  }
85
- throw new Error(`Failed to connect to ${url}. Is GPT4All running and reachable? (${err.message})`);
185
+ if (err.name === "AbortError") {
186
+ throw new Error(
187
+ `${errorLabel} request timed out after ${timeoutMs}ms. Increase requestTimeoutMs in config if needed.`
188
+ );
189
+ }
190
+ throw new Error(`Failed to connect to ${url}. (${err.message})`);
191
+ } finally {
192
+ if (timeout) {
193
+ clearTimeout(timeout.timeoutId);
194
+ }
86
195
  }
87
196
  }
88
197
 
89
- export async function listModels(baseUrl) {
90
- const url = `${normalizeBaseUrl(baseUrl)}/models`;
91
- const json = await fetchJson(url, { method: 'GET' });
92
- if (!json || !Array.isArray(json.data)) return [];
93
- return json.data.map((model) => model.id).filter(Boolean);
94
- }
95
-
96
- async function fetchCompletion(url, payload) {
198
+ async function fetchCompletion(url, payload, headers, errorLabel, timeoutMs) {
199
+ const timeout = createTimeoutController(timeoutMs);
97
200
  try {
98
201
  return await fetch(url, {
99
- method: 'POST',
100
- headers: { 'Content-Type': 'application/json' },
101
- body: JSON.stringify(payload)
202
+ method: "POST",
203
+ headers,
204
+ body: JSON.stringify(payload),
205
+ signal: timeout ? timeout.controller.signal : undefined,
102
206
  });
103
207
  } catch (err) {
104
- throw new Error(`Failed to connect to ${url}. Is GPT4All running and reachable? (${err.message})`);
208
+ if (err.name === "AbortError") {
209
+ throw new Error(
210
+ `${errorLabel} request timed out after ${timeoutMs}ms. Increase requestTimeoutMs in config if needed.`
211
+ );
212
+ }
213
+ throw new Error(`Failed to connect to ${url}. (${err.message})`);
214
+ } finally {
215
+ if (timeout) {
216
+ clearTimeout(timeout.timeoutId);
217
+ }
105
218
  }
106
219
  }
107
220
 
108
- async function handleError(response) {
221
+ async function handleError(response, errorLabel) {
109
222
  const text = await response.text();
110
- throw new Error(`GPT4All error ${response.status}: ${text}`);
223
+ throw new Error(`${errorLabel} error ${response.status}: ${text}`);
224
+ }
225
+
226
+ export async function listModels(config) {
227
+ const provider = getProvider(config);
228
+ const timeoutMs = Number(config.requestTimeoutMs);
229
+ if (provider === "ollama") {
230
+ const baseUrl = normalizeOllamaBaseUrl(config.ollamaBaseUrl);
231
+ const url = `${baseUrl}/api/tags`;
232
+ const json = await fetchJson(url, { method: "GET" }, "Ollama", timeoutMs);
233
+ if (!json || !Array.isArray(json.models)) return [];
234
+ return json.models.map((model) => model.name).filter(Boolean);
235
+ }
236
+
237
+ const url = `${normalizeOpenAiBaseUrl(config.baseUrl)}/models`;
238
+ const headers = buildOpenAiHeaders(config.apiKey);
239
+ const json = await fetchJson(url, { method: "GET", headers }, "OpenAI", timeoutMs);
240
+ if (!json || !Array.isArray(json.data)) return [];
241
+ return json.data.map((model) => model.id).filter(Boolean);
111
242
  }
112
243
 
113
- export async function chatCompletion(
114
- { baseUrl, model, temperature, maxTokens, stream, renderMarkdown, markdownStyles },
115
- messages
116
- ) {
117
- const url = `${normalizeBaseUrl(baseUrl)}/chat/completions`;
244
+ async function openAiChatCompletion(config, messages) {
245
+ const url = `${normalizeOpenAiBaseUrl(config.baseUrl)}/chat/completions`;
246
+ const timeoutMs = Number(config.requestTimeoutMs);
118
247
  const payload = {
119
- model,
248
+ model: config.model,
120
249
  messages,
121
- temperature,
122
- max_tokens: maxTokens,
123
- stream: Boolean(stream)
250
+ temperature: config.temperature,
251
+ max_tokens: config.maxTokens,
252
+ stream: Boolean(config.stream),
124
253
  };
125
254
 
126
- let response = await fetchCompletion(url, payload);
255
+ const headers = buildOpenAiHeaders(config.apiKey);
256
+ let response = await fetchCompletion(url, payload, headers, "OpenAI", timeoutMs);
127
257
 
128
- const renderer = renderMarkdown ? createMarkdownRenderer(markdownStyles) : null;
258
+ const renderer = config.renderMarkdown
259
+ ? createMarkdownRenderer(config.markdownStyles)
260
+ : null;
129
261
 
130
262
  if (!response.ok) {
131
263
  const text = await response.text();
132
- if (stream && response.status === 400 && text.includes('stream') && text.includes('not supported')) {
264
+ if (
265
+ config.stream &&
266
+ response.status === 400 &&
267
+ text.includes("stream") &&
268
+ text.includes("not supported")
269
+ ) {
133
270
  const retryPayload = { ...payload, stream: false };
134
- response = await fetchCompletion(url, retryPayload);
271
+ response = await fetchCompletion(url, retryPayload, headers, "OpenAI", timeoutMs);
135
272
  if (!response.ok) {
136
- await handleError(response);
273
+ await handleError(response, "OpenAI");
137
274
  }
138
275
  const json = await response.json();
139
276
  const content = getContentDelta(json);
@@ -145,19 +282,19 @@ export async function chatCompletion(
145
282
  return content;
146
283
  }
147
284
 
148
- throw new Error(`GPT4All error ${response.status}: ${text}`);
285
+ throw new Error(`OpenAI error ${response.status}: ${text}`);
149
286
  }
150
287
 
151
- if (stream) {
152
- const contentType = response.headers.get('content-type') || '';
153
- if (contentType.includes('text/event-stream')) {
288
+ if (config.stream) {
289
+ const contentType = response.headers.get("content-type") || "";
290
+ if (contentType.includes("text/event-stream")) {
154
291
  return parseStream(response, (chunk) => term(chunk), renderer);
155
292
  }
156
293
  }
157
294
 
158
295
  const json = await response.json();
159
296
  const content = getContentDelta(json);
160
- if (stream) {
297
+ if (config.stream) {
161
298
  if (renderer) {
162
299
  term(renderer.renderText(content));
163
300
  } else {
@@ -166,3 +303,56 @@ export async function chatCompletion(
166
303
  }
167
304
  return content;
168
305
  }
306
+
307
+ async function ollamaChatCompletion(config, messages) {
308
+ const baseUrl = normalizeOllamaBaseUrl(config.ollamaBaseUrl);
309
+ const url = `${baseUrl}/api/chat`;
310
+ const timeoutMs = Number(config.requestTimeoutMs);
311
+ const payload = {
312
+ model: config.model,
313
+ messages,
314
+ stream: Boolean(config.stream),
315
+ options: {
316
+ temperature: config.temperature,
317
+ num_predict: config.maxTokens,
318
+ },
319
+ };
320
+
321
+ const renderer = config.renderMarkdown
322
+ ? createMarkdownRenderer(config.markdownStyles)
323
+ : null;
324
+ const response = await fetchCompletion(
325
+ url,
326
+ payload,
327
+ { "Content-Type": "application/json" },
328
+ "Ollama",
329
+ timeoutMs
330
+ );
331
+
332
+ if (!response.ok) {
333
+ await handleError(response, "Ollama");
334
+ }
335
+
336
+ if (config.stream) {
337
+ return parseOllamaStream(response, (chunk) => term(chunk), renderer);
338
+ }
339
+
340
+ const json = await response.json();
341
+ const content = getOllamaContentDelta(json);
342
+ if (config.stream) {
343
+ if (renderer) {
344
+ term(renderer.renderText(content));
345
+ } else {
346
+ term(content);
347
+ }
348
+ }
349
+ return content;
350
+ }
351
+
352
+ export async function chatCompletion(config, messages) {
353
+ const provider = getProvider(config);
354
+ if (provider === "ollama") {
355
+ return ollamaChatCompletion(config, messages);
356
+ }
357
+ return openAiChatCompletion(config, messages);
358
+ }
Binary file