@alhisan/gac 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,33 @@
1
+ # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created
2
+ # For more information see: https://docs.github.com/en/actions/publishing-packages/publishing-nodejs-packages
3
+
4
+ name: Publish Package to npmjs
5
+
6
+ on:
7
+ release:
8
+ types: [created]
9
+
10
+ jobs:
11
+ build:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v4
15
+ - uses: actions/setup-node@v4
16
+ with:
17
+ node-version: 20
18
+ - run: npm ci
19
+ - run: npm test
20
+
21
+ publish-npm:
22
+ needs: build
23
+ runs-on: ubuntu-latest
24
+ steps:
25
+ - uses: actions/checkout@v4
26
+ - uses: actions/setup-node@v4
27
+ with:
28
+ node-version: 20
29
+ registry-url: https://registry.npmjs.org/
30
+ - run: npm ci
31
+ - run: npm publish --provenance --access public
32
+ env:
33
+ NODE_AUTH_TOKEN: ${{secrets.npm_token}}
package/README.md CHANGED
@@ -1,10 +1,10 @@
1
- # GPT4All CLI (gac)
1
+ # GAC CLI (gac)
2
2
 
3
- Terminal client for GPT4All running on localhost. Supports streaming responses, interactive chat, and configurable markdown rendering using `terminal-kit`.
3
+ Terminal client for OpenAI-compatible APIs (including GPT4All) and Ollama. Supports streaming responses, interactive chat, and configurable markdown rendering using `terminal-kit`.
4
4
 
5
5
  ## Installation
6
6
 
7
- Requirements: Node.js 18+ and a running GPT4All OpenAI-compatible server.
7
+ Requirements: Node.js 18+ and a running OpenAI-compatible server (like GPT4All) or Ollama.
8
8
 
9
9
  ```bash
10
10
  npm install -g @alhisan/gac
@@ -68,6 +68,7 @@ View and edit:
68
68
 
69
69
  ```bash
70
70
  gac config
71
+ gac config tui
71
72
  gac config get baseUrl
72
73
  gac config set baseUrl http://localhost:4891
73
74
  gac config set model "Llama 3 8B Instruct"
@@ -77,7 +78,10 @@ gac config set detailedSuggest true
77
78
 
78
79
  ### Core settings
79
80
 
81
+ - `provider` (string): `openai` (default) or `ollama`
80
82
  - `baseUrl` (string): GPT4All server base, e.g. `http://localhost:4891`
83
+ - `ollamaBaseUrl` (string): Ollama base, e.g. `http://localhost:11434`
84
+ - `apiKey` (string): API key for OpenAI-compatible services (empty for local servers)
81
85
  - `model` (string): model ID from `/v1/models`
82
86
  - `temperature` (number)
83
87
  - `maxTokens` (number)
@@ -136,9 +140,19 @@ Example:
136
140
  If you see connection errors, verify the server is reachable:
137
141
 
138
142
  ```bash
139
- curl http://localhost:4891/v1/models
143
+ curl http://[SERVER_ADDRESS]:[SERVER_PORT]/v1/models
144
+ ```
145
+
146
+ For Ollama:
147
+
148
+ ```bash
149
+ curl http://localhost:11434/api/tags
140
150
  ```
141
151
 
142
152
  ## License
143
153
 
144
154
  GNU General Public License v3.0. See `LICENSE`.
155
+
156
+ ## Disclaimer
157
+
158
+ This was mostly vibe coded and I'm treating it as a fun side project / tool that is likely to remain improved and updated by agentic models.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@alhisan/gac",
3
- "version": "1.0.0",
3
+ "version": "1.1.0",
4
4
  "description": "Terminal client for GPT4All running on localhost",
5
5
  "license": "GPL-3.0-only",
6
6
  "type": "module",
package/src/cli.js CHANGED
@@ -7,7 +7,7 @@ import process from "process";
7
7
  const { terminal: term } = terminalKit;
8
8
 
9
9
  function printHelp() {
10
- term(`gac - GPT4All CLI\n\n`);
10
+ term(`gac - OpenAI-compatible & Ollama CLI\n\n`);
11
11
  term(`Options:\n`);
12
12
  term(` -a Single prompt mode (alias for ask)\n`);
13
13
  term(` suggest Suggestion mode\n`);
@@ -16,6 +16,7 @@ function printHelp() {
16
16
  term(` chat Interactive chat mode\n`);
17
17
  term(` models List models and set default\n`);
18
18
  term(` config View or edit configuration\n`);
19
+ term(` config tui Open interactive config editor\n`);
19
20
  term(` --no-render Disable markdown rendering\n`);
20
21
  term(` --debug-render Show both rendered and raw output\n`);
21
22
  term(
@@ -31,6 +32,7 @@ function printHelp() {
31
32
  term(` gac chat\n`);
32
33
  term(` gac models\n`);
33
34
  term(` gac config\n`);
35
+ term(` gac config tui\n`);
34
36
  term(` gac config get <key>\n`);
35
37
  term(` gac config set <key> <value>\n`);
36
38
  term(` gac --no-render -a "Raw markdown output"\n`);
@@ -139,6 +141,225 @@ async function inputLine(label) {
139
141
  });
140
142
  }
141
143
 
144
+ function formatConfigValue(value) {
145
+ if (typeof value === "string") {
146
+ if (!value) return "(empty)";
147
+ if (value.length > 48) return `${value.slice(0, 45)}...`;
148
+ return value;
149
+ }
150
+ return JSON.stringify(value);
151
+ }
152
+
153
+ function maskApiKey(apiKey) {
154
+ if (!apiKey) return "(empty)";
155
+ if (apiKey.length <= 6) return "***";
156
+ return `${apiKey.slice(0, 3)}...${apiKey.slice(-3)}`;
157
+ }
158
+
159
+ async function promptConfigValue(label, currentValue) {
160
+ term(`${label} [${formatConfigValue(currentValue)}]: `);
161
+ return new Promise((resolve) => {
162
+ term.inputField({ cancelable: true, default: String(currentValue ?? "") }, (error, input) => {
163
+ term("\n");
164
+ if (error || input === undefined || input === null) {
165
+ resolve(null);
166
+ return;
167
+ }
168
+ resolve(input.trim());
169
+ });
170
+ });
171
+ }
172
+
173
+ async function selectConfigProvider(config) {
174
+ const options = [
175
+ "OpenAI-compatible (includes GPT4All)",
176
+ "Ollama",
177
+ ];
178
+ const currentIndex = config.provider === "ollama" ? 1 : 0;
179
+ term("\nSelect provider:\n");
180
+ return new Promise((resolve) => {
181
+ term.singleColumnMenu(
182
+ options,
183
+ { cancelable: true, selectedIndex: currentIndex },
184
+ (error, response) => {
185
+ term("\n");
186
+ if (error || !response || response.canceled) {
187
+ resolve(null);
188
+ return;
189
+ }
190
+ resolve(response.selectedIndex === 1 ? "ollama" : "openai");
191
+ }
192
+ );
193
+ });
194
+ }
195
+
196
+ async function runConfigTui(config) {
197
+ term("Config editor (Esc to exit)\n\n");
198
+ let updatedConfig = { ...config };
199
+ term.grabInput({ mouse: "button" });
200
+ const cleanup = () => {
201
+ term.grabInput(false);
202
+ term.removeListener("key", onKey);
203
+ term.hideCursor(false);
204
+ };
205
+ const onKey = (name) => {
206
+ if (name === "CTRL_C") {
207
+ cleanup();
208
+ term("\nCanceled.\n");
209
+ term.processExit(0);
210
+ }
211
+ };
212
+ term.on("key", onKey);
213
+ const menuLoop = async () => {
214
+ const menuItems = [
215
+ `Provider: ${updatedConfig.provider === "ollama" ? "Ollama" : "OpenAI-compatible"}`,
216
+ `Base URL (OpenAI): ${formatConfigValue(updatedConfig.baseUrl)}`,
217
+ `Base URL (Ollama): ${formatConfigValue(updatedConfig.ollamaBaseUrl)}`,
218
+ `API Key: ${maskApiKey(updatedConfig.apiKey)}`,
219
+ `Model: ${formatConfigValue(updatedConfig.model)}`,
220
+ `Temperature: ${formatConfigValue(updatedConfig.temperature)}`,
221
+ `Max Tokens: ${formatConfigValue(updatedConfig.maxTokens)}`,
222
+ `Stream: ${formatConfigValue(updatedConfig.stream)}`,
223
+ `Render Markdown: ${formatConfigValue(updatedConfig.renderMarkdown)}`,
224
+ `Debug Render: ${formatConfigValue(updatedConfig.debugRender)}`,
225
+ `Detailed Suggest: ${formatConfigValue(updatedConfig.detailedSuggest)}`,
226
+ "Save and exit",
227
+ ];
228
+
229
+ return new Promise((resolve) => {
230
+ term.singleColumnMenu(menuItems, { cancelable: true }, (error, response) => {
231
+ term("\n");
232
+ if (error || !response || response.canceled) {
233
+ resolve(false);
234
+ return;
235
+ }
236
+ resolve(response.selectedIndex);
237
+ });
238
+ });
239
+ };
240
+
241
+ while (true) {
242
+ const selection = await menuLoop();
243
+ if (selection === false) {
244
+ cleanup();
245
+ term("Config editor closed.\n");
246
+ break;
247
+ }
248
+
249
+ if (selection === 0) {
250
+ const provider = await selectConfigProvider(updatedConfig);
251
+ if (provider) {
252
+ setConfigValue("provider", provider);
253
+ updatedConfig.provider = provider;
254
+ }
255
+ continue;
256
+ }
257
+
258
+ if (selection === 1) {
259
+ const value = await promptConfigValue("OpenAI base URL", updatedConfig.baseUrl);
260
+ if (value !== null) {
261
+ setConfigValue("baseUrl", value);
262
+ updatedConfig.baseUrl = value;
263
+ }
264
+ continue;
265
+ }
266
+
267
+ if (selection === 2) {
268
+ const value = await promptConfigValue("Ollama base URL", updatedConfig.ollamaBaseUrl);
269
+ if (value !== null) {
270
+ setConfigValue("ollamaBaseUrl", value);
271
+ updatedConfig.ollamaBaseUrl = value;
272
+ }
273
+ continue;
274
+ }
275
+
276
+ if (selection === 3) {
277
+ term("API Key (leave empty to clear)\n");
278
+ const value = await promptConfigValue("API key", updatedConfig.apiKey);
279
+ if (value !== null) {
280
+ setConfigValue("apiKey", value);
281
+ updatedConfig.apiKey = value;
282
+ }
283
+ continue;
284
+ }
285
+
286
+ if (selection === 4) {
287
+ const value = await promptConfigValue("Model", updatedConfig.model);
288
+ if (value !== null) {
289
+ setConfigValue("model", value);
290
+ updatedConfig.model = value;
291
+ }
292
+ continue;
293
+ }
294
+
295
+ if (selection === 5) {
296
+ const value = await promptConfigValue("Temperature", updatedConfig.temperature);
297
+ if (value !== null) {
298
+ setConfigValue("temperature", value);
299
+ updatedConfig.temperature = value;
300
+ }
301
+ continue;
302
+ }
303
+
304
+ if (selection === 6) {
305
+ const value = await promptConfigValue("Max Tokens", updatedConfig.maxTokens);
306
+ if (value !== null) {
307
+ setConfigValue("maxTokens", value);
308
+ updatedConfig.maxTokens = value;
309
+ }
310
+ continue;
311
+ }
312
+
313
+ if (selection === 7) {
314
+ const value = await promptConfigValue("Stream (true/false)", updatedConfig.stream);
315
+ if (value !== null) {
316
+ setConfigValue("stream", value);
317
+ updatedConfig.stream = value;
318
+ }
319
+ continue;
320
+ }
321
+
322
+ if (selection === 8) {
323
+ const value = await promptConfigValue(
324
+ "Render Markdown (true/false)",
325
+ updatedConfig.renderMarkdown
326
+ );
327
+ if (value !== null) {
328
+ setConfigValue("renderMarkdown", value);
329
+ updatedConfig.renderMarkdown = value;
330
+ }
331
+ continue;
332
+ }
333
+
334
+ if (selection === 9) {
335
+ const value = await promptConfigValue("Debug Render (true/false)", updatedConfig.debugRender);
336
+ if (value !== null) {
337
+ setConfigValue("debugRender", value);
338
+ updatedConfig.debugRender = value;
339
+ }
340
+ continue;
341
+ }
342
+
343
+ if (selection === 10) {
344
+ const value = await promptConfigValue(
345
+ "Detailed Suggest (true/false)",
346
+ updatedConfig.detailedSuggest
347
+ );
348
+ if (value !== null) {
349
+ setConfigValue("detailedSuggest", value);
350
+ updatedConfig.detailedSuggest = value;
351
+ }
352
+ continue;
353
+ }
354
+
355
+ if (selection === 11) {
356
+ cleanup();
357
+ term("Configuration saved.\n");
358
+ break;
359
+ }
360
+ }
361
+ }
362
+
142
363
  async function runChat(config) {
143
364
  term('Interactive chat. Type "exit" to quit.\n\n');
144
365
  const messages = [];
@@ -187,20 +408,20 @@ async function runChat(config) {
187
408
  async function runModels(config) {
188
409
  let models;
189
410
  try {
190
- models = await listModels(config.baseUrl);
411
+ models = await listModels(config);
191
412
  } catch (err) {
192
413
  term(`Error: ${err.message}\n`);
193
414
  term.processExit(1);
194
415
  }
195
416
 
196
417
  if (!models.length) {
197
- term("No models found from GPT4All server.\n");
418
+ term("No models found from the configured provider.\n");
198
419
  term.processExit(0);
199
420
  }
200
421
 
201
422
  term("Available models:\n");
202
- // Append 'Use default gpt4all model' option at the top
203
- models.unshift("Use default gpt4all setting");
423
+ // Append option to keep current default at the top
424
+ models.unshift("Keep current default");
204
425
  models.forEach((model) => term(`- ${model}\n`));
205
426
  term("\nSelect a default model (use arrows + Enter, Esc to cancel):\n");
206
427
 
@@ -231,9 +452,10 @@ async function runModels(config) {
231
452
  term.processExit(0);
232
453
  }
233
454
  let selected = models[response.selectedIndex];
234
- // if user selected the default model option, set to 'gpt4all'
235
- if (selected === "Use default gpt4all setting") {
236
- selected = "gpt4all";
455
+ if (selected === "Keep current default") {
456
+ cleanup();
457
+ term(`Default model unchanged ("${config.model}").\n`);
458
+ term.processExit(0);
237
459
  }
238
460
  setConfigValue("model", selected);
239
461
  config.model = selected;
@@ -277,6 +499,10 @@ export async function runCli(argv) {
277
499
  }
278
500
 
279
501
  if (args[0] === "config") {
502
+ if (args[1] === "tui") {
503
+ await runConfigTui(config);
504
+ return;
505
+ }
280
506
  if (args[1] === "get" && args[2]) {
281
507
  const key = args[2];
282
508
  term(`${config[key]}\n`);
package/src/config.js CHANGED
@@ -3,7 +3,10 @@ import path from "path";
3
3
  import os from "os";
4
4
 
5
5
  const DEFAULT_CONFIG = {
6
+ provider: "openai",
6
7
  baseUrl: "http://localhost:4891",
8
+ ollamaBaseUrl: "http://localhost:11434",
9
+ apiKey: "",
7
10
  model: "gpt4all",
8
11
  temperature: 0.7,
9
12
  maxTokens: 512,
package/src/gpt4all.js CHANGED
@@ -1,37 +1,66 @@
1
- import terminalKit from 'terminal-kit';
2
- import { createMarkdownRenderer } from './markdown.js';
1
+ import terminalKit from "terminal-kit";
2
+ import { createMarkdownRenderer } from "./markdown.js";
3
3
 
4
4
  const { terminal: term } = terminalKit;
5
5
 
6
6
  function getContentDelta(chunk) {
7
- if (!chunk || !chunk.choices || !chunk.choices[0]) return '';
7
+ if (!chunk || !chunk.choices || !chunk.choices[0]) return "";
8
8
  const choice = chunk.choices[0];
9
9
  if (choice.delta && choice.delta.content) return choice.delta.content;
10
10
  if (choice.message && choice.message.content) return choice.message.content;
11
11
  if (choice.text) return choice.text;
12
- return '';
12
+ return "";
13
+ }
14
+
15
+ function getOllamaContentDelta(chunk) {
16
+ if (!chunk) return "";
17
+ if (chunk.message && chunk.message.content) return chunk.message.content;
18
+ if (chunk.response) return chunk.response;
19
+ return "";
20
+ }
21
+
22
+ function normalizeOpenAiBaseUrl(baseUrl) {
23
+ const trimmed = baseUrl.replace(/\/$/, "");
24
+ if (trimmed.endsWith("/v1")) return trimmed;
25
+ return `${trimmed}/v1`;
26
+ }
27
+
28
+ function normalizeOllamaBaseUrl(baseUrl) {
29
+ return baseUrl.replace(/\/$/, "");
30
+ }
31
+
32
+ function getProvider(config) {
33
+ return config.provider === "ollama" ? "ollama" : "openai";
34
+ }
35
+
36
+ function buildOpenAiHeaders(apiKey) {
37
+ const headers = { "Content-Type": "application/json" };
38
+ if (apiKey) {
39
+ headers.Authorization = `Bearer ${apiKey}`;
40
+ }
41
+ return headers;
13
42
  }
14
43
 
15
44
  async function parseStream(response, onToken, renderer) {
16
45
  const reader = response.body.getReader();
17
- const decoder = new TextDecoder('utf-8');
18
- let buffer = '';
19
- let fullText = '';
20
- let lineBuffer = '';
46
+ const decoder = new TextDecoder("utf-8");
47
+ let buffer = "";
48
+ let fullText = "";
49
+ let lineBuffer = "";
21
50
 
22
51
  while (true) {
23
52
  const { value, done } = await reader.read();
24
53
  if (done) break;
25
54
  buffer += decoder.decode(value, { stream: true });
26
55
 
27
- const lines = buffer.split('\n');
28
- buffer = lines.pop() || '';
56
+ const lines = buffer.split("\n");
57
+ buffer = lines.pop() || "";
29
58
 
30
59
  for (const line of lines) {
31
60
  const trimmed = line.trim();
32
- if (!trimmed || !trimmed.startsWith('data:')) continue;
33
- const payload = trimmed.replace(/^data:\s*/, '');
34
- if (payload === '[DONE]') {
61
+ if (!trimmed || !trimmed.startsWith("data:")) continue;
62
+ const payload = trimmed.replace(/^data:\s*/, "");
63
+ if (payload === "[DONE]") {
35
64
  return fullText;
36
65
  }
37
66
 
@@ -44,8 +73,8 @@ async function parseStream(response, onToken, renderer) {
44
73
  onToken(delta);
45
74
  } else {
46
75
  lineBuffer += delta;
47
- const lines = lineBuffer.split('\n');
48
- lineBuffer = lines.pop() || '';
76
+ const lines = lineBuffer.split("\n");
77
+ lineBuffer = lines.pop() || "";
49
78
  for (const line of lines) {
50
79
  onToken(`${renderer.renderLine(line)}\n`);
51
80
  }
@@ -64,76 +93,135 @@ async function parseStream(response, onToken, renderer) {
64
93
  return fullText;
65
94
  }
66
95
 
67
- function normalizeBaseUrl(baseUrl) {
68
- const trimmed = baseUrl.replace(/\/$/, '');
69
- if (trimmed.endsWith('/v1')) return trimmed;
70
- return `${trimmed}/v1`;
96
+ async function parseOllamaStream(response, onToken, renderer) {
97
+ const reader = response.body.getReader();
98
+ const decoder = new TextDecoder("utf-8");
99
+ let buffer = "";
100
+ let fullText = "";
101
+ let lineBuffer = "";
102
+
103
+ while (true) {
104
+ const { value, done } = await reader.read();
105
+ if (done) break;
106
+ buffer += decoder.decode(value, { stream: true });
107
+
108
+ const lines = buffer.split("\n");
109
+ buffer = lines.pop() || "";
110
+
111
+ for (const line of lines) {
112
+ const trimmed = line.trim();
113
+ if (!trimmed) continue;
114
+ try {
115
+ const json = JSON.parse(trimmed);
116
+ if (json.done) {
117
+ return fullText;
118
+ }
119
+ const delta = getOllamaContentDelta(json);
120
+ if (delta) {
121
+ fullText += delta;
122
+ if (!renderer) {
123
+ onToken(delta);
124
+ } else {
125
+ lineBuffer += delta;
126
+ const lines = lineBuffer.split("\n");
127
+ lineBuffer = lines.pop() || "";
128
+ for (const line of lines) {
129
+ onToken(`${renderer.renderLine(line)}\n`);
130
+ }
131
+ }
132
+ }
133
+ } catch (err) {
134
+ // Ignore non-JSON payloads
135
+ }
136
+ }
137
+ }
138
+
139
+ if (renderer && lineBuffer) {
140
+ onToken(renderer.renderLine(lineBuffer));
141
+ }
142
+
143
+ return fullText;
71
144
  }
72
145
 
73
- async function fetchJson(url, payload) {
146
+ async function fetchJson(url, payload, errorLabel) {
74
147
  try {
75
148
  const response = await fetch(url, payload);
76
149
  if (!response.ok) {
77
150
  const text = await response.text();
78
- throw new Error(`GPT4All error ${response.status}: ${text}`);
151
+ throw new Error(`${errorLabel} error ${response.status}: ${text}`);
79
152
  }
80
153
  return await response.json();
81
154
  } catch (err) {
82
- if (err.message && err.message.startsWith('GPT4All error')) {
155
+ if (err.message && err.message.startsWith(`${errorLabel} error`)) {
83
156
  throw err;
84
157
  }
85
- throw new Error(`Failed to connect to ${url}. Is GPT4All running and reachable? (${err.message})`);
158
+ throw new Error(`Failed to connect to ${url}. (${err.message})`);
86
159
  }
87
160
  }
88
161
 
89
- export async function listModels(baseUrl) {
90
- const url = `${normalizeBaseUrl(baseUrl)}/models`;
91
- const json = await fetchJson(url, { method: 'GET' });
92
- if (!json || !Array.isArray(json.data)) return [];
93
- return json.data.map((model) => model.id).filter(Boolean);
94
- }
95
-
96
- async function fetchCompletion(url, payload) {
162
+ async function fetchCompletion(url, payload, headers, errorLabel) {
97
163
  try {
98
164
  return await fetch(url, {
99
- method: 'POST',
100
- headers: { 'Content-Type': 'application/json' },
101
- body: JSON.stringify(payload)
165
+ method: "POST",
166
+ headers,
167
+ body: JSON.stringify(payload),
102
168
  });
103
169
  } catch (err) {
104
- throw new Error(`Failed to connect to ${url}. Is GPT4All running and reachable? (${err.message})`);
170
+ throw new Error(`Failed to connect to ${url}. (${err.message})`);
105
171
  }
106
172
  }
107
173
 
108
- async function handleError(response) {
174
+ async function handleError(response, errorLabel) {
109
175
  const text = await response.text();
110
- throw new Error(`GPT4All error ${response.status}: ${text}`);
176
+ throw new Error(`${errorLabel} error ${response.status}: ${text}`);
177
+ }
178
+
179
+ export async function listModels(config) {
180
+ const provider = getProvider(config);
181
+ if (provider === "ollama") {
182
+ const baseUrl = normalizeOllamaBaseUrl(config.ollamaBaseUrl);
183
+ const url = `${baseUrl}/api/tags`;
184
+ const json = await fetchJson(url, { method: "GET" }, "Ollama");
185
+ if (!json || !Array.isArray(json.models)) return [];
186
+ return json.models.map((model) => model.name).filter(Boolean);
187
+ }
188
+
189
+ const url = `${normalizeOpenAiBaseUrl(config.baseUrl)}/models`;
190
+ const headers = buildOpenAiHeaders(config.apiKey);
191
+ const json = await fetchJson(url, { method: "GET", headers }, "OpenAI");
192
+ if (!json || !Array.isArray(json.data)) return [];
193
+ return json.data.map((model) => model.id).filter(Boolean);
111
194
  }
112
195
 
113
- export async function chatCompletion(
114
- { baseUrl, model, temperature, maxTokens, stream, renderMarkdown, markdownStyles },
115
- messages
116
- ) {
117
- const url = `${normalizeBaseUrl(baseUrl)}/chat/completions`;
196
+ async function openAiChatCompletion(config, messages) {
197
+ const url = `${normalizeOpenAiBaseUrl(config.baseUrl)}/chat/completions`;
118
198
  const payload = {
119
- model,
199
+ model: config.model,
120
200
  messages,
121
- temperature,
122
- max_tokens: maxTokens,
123
- stream: Boolean(stream)
201
+ temperature: config.temperature,
202
+ max_tokens: config.maxTokens,
203
+ stream: Boolean(config.stream),
124
204
  };
125
205
 
126
- let response = await fetchCompletion(url, payload);
206
+ const headers = buildOpenAiHeaders(config.apiKey);
207
+ let response = await fetchCompletion(url, payload, headers, "OpenAI");
127
208
 
128
- const renderer = renderMarkdown ? createMarkdownRenderer(markdownStyles) : null;
209
+ const renderer = config.renderMarkdown
210
+ ? createMarkdownRenderer(config.markdownStyles)
211
+ : null;
129
212
 
130
213
  if (!response.ok) {
131
214
  const text = await response.text();
132
- if (stream && response.status === 400 && text.includes('stream') && text.includes('not supported')) {
215
+ if (
216
+ config.stream &&
217
+ response.status === 400 &&
218
+ text.includes("stream") &&
219
+ text.includes("not supported")
220
+ ) {
133
221
  const retryPayload = { ...payload, stream: false };
134
- response = await fetchCompletion(url, retryPayload);
222
+ response = await fetchCompletion(url, retryPayload, headers, "OpenAI");
135
223
  if (!response.ok) {
136
- await handleError(response);
224
+ await handleError(response, "OpenAI");
137
225
  }
138
226
  const json = await response.json();
139
227
  const content = getContentDelta(json);
@@ -145,19 +233,62 @@ export async function chatCompletion(
145
233
  return content;
146
234
  }
147
235
 
148
- throw new Error(`GPT4All error ${response.status}: ${text}`);
236
+ throw new Error(`OpenAI error ${response.status}: ${text}`);
149
237
  }
150
238
 
151
- if (stream) {
152
- const contentType = response.headers.get('content-type') || '';
153
- if (contentType.includes('text/event-stream')) {
239
+ if (config.stream) {
240
+ const contentType = response.headers.get("content-type") || "";
241
+ if (contentType.includes("text/event-stream")) {
154
242
  return parseStream(response, (chunk) => term(chunk), renderer);
155
243
  }
156
244
  }
157
245
 
158
246
  const json = await response.json();
159
247
  const content = getContentDelta(json);
160
- if (stream) {
248
+ if (config.stream) {
249
+ if (renderer) {
250
+ term(renderer.renderText(content));
251
+ } else {
252
+ term(content);
253
+ }
254
+ }
255
+ return content;
256
+ }
257
+
258
+ async function ollamaChatCompletion(config, messages) {
259
+ const baseUrl = normalizeOllamaBaseUrl(config.ollamaBaseUrl);
260
+ const url = `${baseUrl}/api/chat`;
261
+ const payload = {
262
+ model: config.model,
263
+ messages,
264
+ stream: Boolean(config.stream),
265
+ options: {
266
+ temperature: config.temperature,
267
+ num_predict: config.maxTokens,
268
+ },
269
+ };
270
+
271
+ const renderer = config.renderMarkdown
272
+ ? createMarkdownRenderer(config.markdownStyles)
273
+ : null;
274
+ const response = await fetchCompletion(
275
+ url,
276
+ payload,
277
+ { "Content-Type": "application/json" },
278
+ "Ollama"
279
+ );
280
+
281
+ if (!response.ok) {
282
+ await handleError(response, "Ollama");
283
+ }
284
+
285
+ if (config.stream) {
286
+ return parseOllamaStream(response, (chunk) => term(chunk), renderer);
287
+ }
288
+
289
+ const json = await response.json();
290
+ const content = getOllamaContentDelta(json);
291
+ if (config.stream) {
161
292
  if (renderer) {
162
293
  term(renderer.renderText(content));
163
294
  } else {
@@ -166,3 +297,11 @@ export async function chatCompletion(
166
297
  }
167
298
  return content;
168
299
  }
300
+
301
+ export async function chatCompletion(config, messages) {
302
+ const provider = getProvider(config);
303
+ if (provider === "ollama") {
304
+ return ollamaChatCompletion(config, messages);
305
+ }
306
+ return openAiChatCompletion(config, messages);
307
+ }
Binary file