@alhisan/gac 1.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,9 @@
1
- # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created
2
- # For more information see: https://docs.github.com/en/actions/publishing-packages/publishing-nodejs-packages
3
-
4
1
  name: Publish Package to npmjs
5
2
 
6
3
  on:
7
4
  release:
8
- types: [created]
5
+ # This triggers when you click 'Publish Release' in the GitHub UI
6
+ types: [published]
9
7
 
10
8
  jobs:
11
9
  build:
@@ -21,13 +19,17 @@ jobs:
21
19
  publish-npm:
22
20
  needs: build
23
21
  runs-on: ubuntu-latest
22
+ # CRITICAL: These permissions are required for --provenance to work
23
+ permissions:
24
+ id-token: write
25
+ contents: read
24
26
  steps:
25
27
  - uses: actions/checkout@v4
26
28
  - uses: actions/setup-node@v4
27
29
  with:
28
30
  node-version: 20
29
- registry-url: https://registry.npmjs.org/
31
+ registry-url: https://registry.npmjs.org
30
32
  - run: npm ci
31
33
  - run: npm publish --provenance --access public
32
34
  env:
33
- NODE_AUTH_TOKEN: ${{secrets.npm_token}}
35
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
package/README.md CHANGED
@@ -29,6 +29,7 @@ Single prompt:
29
29
 
30
30
  ```bash
31
31
  gac -a "Hello gpt4all, how are you doing today?"
32
+ gac "How do I push to GitHub?"
32
33
  gac suggest "How do I connect to ssh server on a custom port 5322?"
33
34
  gac explain "How do I use rsync?"
34
35
  gac suggest -d "Give me step-by-step instructions to set up an SSH server on port 5322"
@@ -86,6 +87,8 @@ gac config set detailedSuggest true
86
87
  - `temperature` (number)
87
88
  - `maxTokens` (number)
88
89
  - `stream` (boolean)
90
+ - `requestTimeoutMs` (number): request timeout in milliseconds (0 to disable). Useful for larger models or slower servers.
91
+ - `defaultAction` (string): default mode for direct prompts (`suggest`, `ask`, or `explain`).
89
92
  - `detailedSuggest` (boolean): when `true`, `suggest` mode returns more detailed, step-by-step suggestions.
90
93
  - `renderMarkdown` (boolean)
91
94
 
@@ -146,7 +149,7 @@ curl http://[SERVER_ADDRESS]:[SERVER_PORT]/v1/models
146
149
  For Ollama:
147
150
 
148
151
  ```bash
149
- curl http://localhost:11434/api/tags
152
+ curl http://[SERVER_ADDRESS]:[SERVER_PORT]/api/tags
150
153
  ```
151
154
 
152
155
  ## License
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@alhisan/gac",
3
- "version": "1.1.0",
3
+ "version": "1.1.1",
4
4
  "description": "Terminal client for GPT4All running on localhost",
5
5
  "license": "GPL-3.0-only",
6
6
  "type": "module",
@@ -10,7 +10,14 @@
10
10
  "engines": {
11
11
  "node": ">=18.0.0"
12
12
  },
13
+ "scripts": {
14
+ "test": "echo \"No tests yet\""
15
+ },
13
16
  "dependencies": {
14
17
  "terminal-kit": "^3.1.0"
18
+ },
19
+ "repository": {
20
+ "type": "git",
21
+ "url": "git+https://github.com/kwmx/gac"
15
22
  }
16
23
  }
package/src/cli.js CHANGED
@@ -2,6 +2,7 @@ import terminalKit from "terminal-kit";
2
2
  import { chatCompletion, listModels } from "./gpt4all.js";
3
3
  import { getConfigPath, loadConfig, setConfigValue } from "./config.js";
4
4
  import { createMarkdownRenderer } from "./markdown.js";
5
+ import fs from "fs";
5
6
  import os from "os";
6
7
  import process from "process";
7
8
  const { terminal: term } = terminalKit;
@@ -39,6 +40,33 @@ function printHelp() {
39
40
  term(` gac --debug-render -a "Show rendered and raw output"\n`);
40
41
  term(`\n`);
41
42
  }
43
+ function parseOsRelease(contents) {
44
+ const result = {};
45
+ const lines = contents.split("\n");
46
+ for (const line of lines) {
47
+ const trimmed = line.trim();
48
+ if (!trimmed || trimmed.startsWith("#") || !trimmed.includes("=")) continue;
49
+ const [key, ...rest] = trimmed.split("=");
50
+ let value = rest.join("=").trim();
51
+ if (value.startsWith('"') && value.endsWith('"')) {
52
+ value = value.slice(1, -1);
53
+ }
54
+ if (value) {
55
+ result[key] = value;
56
+ }
57
+ }
58
+ return result;
59
+ }
60
+
61
+ function readLinuxOsRelease() {
62
+ try {
63
+ const contents = fs.readFileSync("/etc/os-release", "utf8");
64
+ return parseOsRelease(contents);
65
+ } catch (err) {
66
+ return null;
67
+ }
68
+ }
69
+
42
70
  function getOSVersion() {
43
71
  const platform = os.platform();
44
72
  if (platform === "win32") {
@@ -65,7 +93,14 @@ function getOSVersion() {
65
93
  return "macOS";
66
94
  }
67
95
  if (platform === "linux") {
68
- // Find which distro
96
+ const osRelease = readLinuxOsRelease();
97
+ if (osRelease && (osRelease.PRETTY_NAME || osRelease.NAME)) {
98
+ const pretty = osRelease.PRETTY_NAME || osRelease.NAME;
99
+ const id = osRelease.ID ? `; id=${osRelease.ID}` : "";
100
+ const idLike = osRelease.ID_LIKE ? `; id_like=${osRelease.ID_LIKE}` : "";
101
+ return `Linux (${pretty}${id}${idLike})`;
102
+ }
103
+ // Find which distro (fallbacks)
69
104
  if (process.env.OS_RELEASE) {
70
105
  return `${platform}: ${process.env.OS_RELEASE}`;
71
106
  } else if (process.env.OS) {
@@ -88,24 +123,33 @@ function getOSVersion() {
88
123
  }
89
124
  function buildSystemPrompt(mode, config) {
90
125
  const osInfo = getOSVersion();
126
+ const osGuidance = `The user is using a system with the following OS: ${osInfo}. When providing commands or package install steps, use the native tooling for that OS (e.g., dnf for Fedora, apt for Debian/Ubuntu). Avoid giving instructions for other distros unless explicitly requested.`;
91
127
 
92
128
  if (mode === "suggest") {
93
129
  if (config.detailedSuggest === true) {
94
- return `You are an expert technical assistant. The user is using a system with the following OS: ${osInfo}. When providing suggestions, give detailed, step-by-step instructions that the user can follow to achieve their goals. Include relevant commands, code snippets, or configurations as needed. Avoid unnecessary explanations or background information. Tailor your suggestions to be relevant to the user's operating system and environment.
130
+ return `You are an expert technical assistant. ${osGuidance} When providing suggestions, give detailed, step-by-step instructions that the user can follow to achieve their goals. Include relevant commands, code snippets, or configurations as needed. Avoid unnecessary explanations or background information. Tailor your suggestions to be relevant to the user's operating system and environment.
95
131
  Attempt to make it a single line response where possible. Prefer commands and code snippets over lengthy explanations. Always leave commands and codes in their own line for easy copying.`;
96
132
  } else {
97
- return `You are an expert technical assistant. The user is using a system with the following OS: ${osInfo}. Provide concise and practical suggestions to help the user accomplish their tasks efficiently. Focus on clarity and brevity, ensuring that your suggestions are easy to understand and implement. Tailor your suggestions to be relevant to the user's operating system and environment. Avoid lengthy explanations or unnecessary details prefer single line commands or codes if you must include explainations make sure the commands and codes are in their own line for easy copying.`;
133
+ return `You are an expert technical assistant. ${osGuidance} Provide concise and practical suggestions to help the user accomplish their tasks efficiently. Focus on clarity and brevity, ensuring that your suggestions are easy to understand and implement. Tailor your suggestions to be relevant to the user's operating system and environment. Avoid lengthy explanations or unnecessary details prefer single line commands or codes if you must include explainations make sure the commands and codes are in their own line for easy copying.`;
98
134
  }
99
135
  }
100
136
  if (mode === "ask") {
101
- return "Provide a helpful and accurate response to the user's question.";
137
+ return `Provide a helpful and accurate response to the user's question. ${osGuidance}`;
102
138
  }
103
139
  if (mode === "explain") {
104
- return "Explain step-by-step with a short example if helpful.";
140
+ return `Explain step-by-step with a short example if helpful. ${osGuidance}`;
105
141
  }
106
142
  return null;
107
143
  }
108
144
 
145
+ function normalizeDefaultAction(action) {
146
+ const normalized = String(action || "").trim().toLowerCase();
147
+ if (normalized === "ask" || normalized === "suggest" || normalized === "explain") {
148
+ return normalized;
149
+ }
150
+ return "suggest";
151
+ }
152
+
109
153
  async function runSinglePrompt(mode, prompt, config) {
110
154
  const system = buildSystemPrompt(mode, config);
111
155
  const messages = [];
@@ -121,6 +165,11 @@ async function runSinglePrompt(mode, prompt, config) {
121
165
  term(`${reply}\n`);
122
166
  }
123
167
  }
168
+ if (!reply || !reply.trim()) {
169
+ term(
170
+ "No response from the model. The request may have timed out or returned empty content. Consider increasing requestTimeoutMs in the config or enabling streaming.\n"
171
+ );
172
+ }
124
173
  if (config.debugRender) {
125
174
  term(`\n--- RAW ---\n${reply}\n`);
126
175
  }
@@ -220,6 +269,8 @@ async function runConfigTui(config) {
220
269
  `Temperature: ${formatConfigValue(updatedConfig.temperature)}`,
221
270
  `Max Tokens: ${formatConfigValue(updatedConfig.maxTokens)}`,
222
271
  `Stream: ${formatConfigValue(updatedConfig.stream)}`,
272
+ `Request Timeout (ms): ${formatConfigValue(updatedConfig.requestTimeoutMs)}`,
273
+ `Default Action: ${formatConfigValue(updatedConfig.defaultAction)}`,
223
274
  `Render Markdown: ${formatConfigValue(updatedConfig.renderMarkdown)}`,
224
275
  `Debug Render: ${formatConfigValue(updatedConfig.debugRender)}`,
225
276
  `Detailed Suggest: ${formatConfigValue(updatedConfig.detailedSuggest)}`,
@@ -320,6 +371,30 @@ async function runConfigTui(config) {
320
371
  }
321
372
 
322
373
  if (selection === 8) {
374
+ const value = await promptConfigValue(
375
+ "Request timeout in ms (0 to disable)",
376
+ updatedConfig.requestTimeoutMs
377
+ );
378
+ if (value !== null) {
379
+ setConfigValue("requestTimeoutMs", value);
380
+ updatedConfig.requestTimeoutMs = value;
381
+ }
382
+ continue;
383
+ }
384
+
385
+ if (selection === 9) {
386
+ const value = await promptConfigValue(
387
+ "Default action (suggest/ask/explain)",
388
+ updatedConfig.defaultAction
389
+ );
390
+ if (value !== null) {
391
+ setConfigValue("defaultAction", value);
392
+ updatedConfig.defaultAction = value;
393
+ }
394
+ continue;
395
+ }
396
+
397
+ if (selection === 10) {
323
398
  const value = await promptConfigValue(
324
399
  "Render Markdown (true/false)",
325
400
  updatedConfig.renderMarkdown
@@ -331,7 +406,7 @@ async function runConfigTui(config) {
331
406
  continue;
332
407
  }
333
408
 
334
- if (selection === 9) {
409
+ if (selection === 11) {
335
410
  const value = await promptConfigValue("Debug Render (true/false)", updatedConfig.debugRender);
336
411
  if (value !== null) {
337
412
  setConfigValue("debugRender", value);
@@ -340,7 +415,7 @@ async function runConfigTui(config) {
340
415
  continue;
341
416
  }
342
417
 
343
- if (selection === 10) {
418
+ if (selection === 12) {
344
419
  const value = await promptConfigValue(
345
420
  "Detailed Suggest (true/false)",
346
421
  updatedConfig.detailedSuggest
@@ -352,7 +427,7 @@ async function runConfigTui(config) {
352
427
  continue;
353
428
  }
354
429
 
355
- if (selection === 11) {
430
+ if (selection === 13) {
356
431
  cleanup();
357
432
  term("Configuration saved.\n");
358
433
  break;
@@ -400,6 +475,11 @@ async function runChat(config) {
400
475
  if (config.debugRender) {
401
476
  term(`\n--- RAW ---\n${reply}\n`);
402
477
  }
478
+ if (!reply || !reply.trim()) {
479
+ term(
480
+ "\nNo response from the model. The request may have timed out or returned empty content. Consider increasing requestTimeoutMs in the config or enabling streaming."
481
+ );
482
+ }
403
483
  term("\n\n");
404
484
  messages.push({ role: "assistant", content: reply });
405
485
  }
@@ -550,6 +630,17 @@ export async function runCli(argv) {
550
630
  return;
551
631
  }
552
632
 
633
+ if (!args[0].startsWith("-")) {
634
+ const prompt = args.join(" ").trim();
635
+ if (!prompt) {
636
+ term("Error: missing prompt.\n");
637
+ term.processExit(1);
638
+ }
639
+ const defaultAction = normalizeDefaultAction(config.defaultAction);
640
+ await runSinglePrompt(defaultAction, prompt, config);
641
+ return;
642
+ }
643
+
553
644
  term("Unknown command.\n\n");
554
645
  printHelp();
555
646
  }
package/src/config.js CHANGED
@@ -11,6 +11,8 @@ const DEFAULT_CONFIG = {
11
11
  temperature: 0.7,
12
12
  maxTokens: 512,
13
13
  stream: true,
14
+ requestTimeoutMs: 300000,
15
+ defaultAction: "suggest",
14
16
  renderMarkdown: true,
15
17
  debugRender: false,
16
18
  detailedSuggest: false,
package/src/gpt4all.js CHANGED
@@ -41,12 +41,27 @@ function buildOpenAiHeaders(apiKey) {
41
41
  return headers;
42
42
  }
43
43
 
44
+ function createTimeoutController(timeoutMs) {
45
+ if (!timeoutMs || Number.isNaN(timeoutMs) || timeoutMs <= 0) {
46
+ return null;
47
+ }
48
+ const controller = new AbortController();
49
+ const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
50
+ return { controller, timeoutId };
51
+ }
52
+
44
53
  async function parseStream(response, onToken, renderer) {
45
54
  const reader = response.body.getReader();
46
55
  const decoder = new TextDecoder("utf-8");
47
56
  let buffer = "";
48
57
  let fullText = "";
49
58
  let lineBuffer = "";
59
+ const flushLineBuffer = () => {
60
+ if (renderer && lineBuffer) {
61
+ onToken(renderer.renderLine(lineBuffer));
62
+ lineBuffer = "";
63
+ }
64
+ };
50
65
 
51
66
  while (true) {
52
67
  const { value, done } = await reader.read();
@@ -61,6 +76,7 @@ async function parseStream(response, onToken, renderer) {
61
76
  if (!trimmed || !trimmed.startsWith("data:")) continue;
62
77
  const payload = trimmed.replace(/^data:\s*/, "");
63
78
  if (payload === "[DONE]") {
79
+ flushLineBuffer();
64
80
  return fullText;
65
81
  }
66
82
 
@@ -99,6 +115,12 @@ async function parseOllamaStream(response, onToken, renderer) {
99
115
  let buffer = "";
100
116
  let fullText = "";
101
117
  let lineBuffer = "";
118
+ const flushLineBuffer = () => {
119
+ if (renderer && lineBuffer) {
120
+ onToken(renderer.renderLine(lineBuffer));
121
+ lineBuffer = "";
122
+ }
123
+ };
102
124
 
103
125
  while (true) {
104
126
  const { value, done } = await reader.read();
@@ -114,6 +136,7 @@ async function parseOllamaStream(response, onToken, renderer) {
114
136
  try {
115
137
  const json = JSON.parse(trimmed);
116
138
  if (json.done) {
139
+ flushLineBuffer();
117
140
  return fullText;
118
141
  }
119
142
  const delta = getOllamaContentDelta(json);
@@ -143,9 +166,13 @@ async function parseOllamaStream(response, onToken, renderer) {
143
166
  return fullText;
144
167
  }
145
168
 
146
- async function fetchJson(url, payload, errorLabel) {
169
+ async function fetchJson(url, payload, errorLabel, timeoutMs) {
170
+ const timeout = createTimeoutController(timeoutMs);
147
171
  try {
148
- const response = await fetch(url, payload);
172
+ const response = await fetch(url, {
173
+ ...payload,
174
+ signal: timeout ? timeout.controller.signal : undefined,
175
+ });
149
176
  if (!response.ok) {
150
177
  const text = await response.text();
151
178
  throw new Error(`${errorLabel} error ${response.status}: ${text}`);
@@ -155,19 +182,39 @@ async function fetchJson(url, payload, errorLabel) {
155
182
  if (err.message && err.message.startsWith(`${errorLabel} error`)) {
156
183
  throw err;
157
184
  }
185
+ if (err.name === "AbortError") {
186
+ throw new Error(
187
+ `${errorLabel} request timed out after ${timeoutMs}ms. Increase requestTimeoutMs in config if needed.`
188
+ );
189
+ }
158
190
  throw new Error(`Failed to connect to ${url}. (${err.message})`);
191
+ } finally {
192
+ if (timeout) {
193
+ clearTimeout(timeout.timeoutId);
194
+ }
159
195
  }
160
196
  }
161
197
 
162
- async function fetchCompletion(url, payload, headers, errorLabel) {
198
+ async function fetchCompletion(url, payload, headers, errorLabel, timeoutMs) {
199
+ const timeout = createTimeoutController(timeoutMs);
163
200
  try {
164
201
  return await fetch(url, {
165
202
  method: "POST",
166
203
  headers,
167
204
  body: JSON.stringify(payload),
205
+ signal: timeout ? timeout.controller.signal : undefined,
168
206
  });
169
207
  } catch (err) {
208
+ if (err.name === "AbortError") {
209
+ throw new Error(
210
+ `${errorLabel} request timed out after ${timeoutMs}ms. Increase requestTimeoutMs in config if needed.`
211
+ );
212
+ }
170
213
  throw new Error(`Failed to connect to ${url}. (${err.message})`);
214
+ } finally {
215
+ if (timeout) {
216
+ clearTimeout(timeout.timeoutId);
217
+ }
171
218
  }
172
219
  }
173
220
 
@@ -178,23 +225,25 @@ async function handleError(response, errorLabel) {
178
225
 
179
226
  export async function listModels(config) {
180
227
  const provider = getProvider(config);
228
+ const timeoutMs = Number(config.requestTimeoutMs);
181
229
  if (provider === "ollama") {
182
230
  const baseUrl = normalizeOllamaBaseUrl(config.ollamaBaseUrl);
183
231
  const url = `${baseUrl}/api/tags`;
184
- const json = await fetchJson(url, { method: "GET" }, "Ollama");
232
+ const json = await fetchJson(url, { method: "GET" }, "Ollama", timeoutMs);
185
233
  if (!json || !Array.isArray(json.models)) return [];
186
234
  return json.models.map((model) => model.name).filter(Boolean);
187
235
  }
188
236
 
189
237
  const url = `${normalizeOpenAiBaseUrl(config.baseUrl)}/models`;
190
238
  const headers = buildOpenAiHeaders(config.apiKey);
191
- const json = await fetchJson(url, { method: "GET", headers }, "OpenAI");
239
+ const json = await fetchJson(url, { method: "GET", headers }, "OpenAI", timeoutMs);
192
240
  if (!json || !Array.isArray(json.data)) return [];
193
241
  return json.data.map((model) => model.id).filter(Boolean);
194
242
  }
195
243
 
196
244
  async function openAiChatCompletion(config, messages) {
197
245
  const url = `${normalizeOpenAiBaseUrl(config.baseUrl)}/chat/completions`;
246
+ const timeoutMs = Number(config.requestTimeoutMs);
198
247
  const payload = {
199
248
  model: config.model,
200
249
  messages,
@@ -204,7 +253,7 @@ async function openAiChatCompletion(config, messages) {
204
253
  };
205
254
 
206
255
  const headers = buildOpenAiHeaders(config.apiKey);
207
- let response = await fetchCompletion(url, payload, headers, "OpenAI");
256
+ let response = await fetchCompletion(url, payload, headers, "OpenAI", timeoutMs);
208
257
 
209
258
  const renderer = config.renderMarkdown
210
259
  ? createMarkdownRenderer(config.markdownStyles)
@@ -219,7 +268,7 @@ async function openAiChatCompletion(config, messages) {
219
268
  text.includes("not supported")
220
269
  ) {
221
270
  const retryPayload = { ...payload, stream: false };
222
- response = await fetchCompletion(url, retryPayload, headers, "OpenAI");
271
+ response = await fetchCompletion(url, retryPayload, headers, "OpenAI", timeoutMs);
223
272
  if (!response.ok) {
224
273
  await handleError(response, "OpenAI");
225
274
  }
@@ -258,6 +307,7 @@ async function openAiChatCompletion(config, messages) {
258
307
  async function ollamaChatCompletion(config, messages) {
259
308
  const baseUrl = normalizeOllamaBaseUrl(config.ollamaBaseUrl);
260
309
  const url = `${baseUrl}/api/chat`;
310
+ const timeoutMs = Number(config.requestTimeoutMs);
261
311
  const payload = {
262
312
  model: config.model,
263
313
  messages,
@@ -275,7 +325,8 @@ async function ollamaChatCompletion(config, messages) {
275
325
  url,
276
326
  payload,
277
327
  { "Content-Type": "application/json" },
278
- "Ollama"
328
+ "Ollama",
329
+ timeoutMs
279
330
  );
280
331
 
281
332
  if (!response.ok) {