@alhisan/gac 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +674 -0
- package/README.md +144 -0
- package/alhisan-gac-1.0.0.tgz +0 -0
- package/bin/gac.js +10 -0
- package/package.json +16 -0
- package/src/cli.js +329 -0
- package/src/config.js +129 -0
- package/src/gpt4all.js +168 -0
- package/src/markdown.js +326 -0
package/README.md
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
# GPT4All CLI (gac)
|
|
2
|
+
|
|
3
|
+
Terminal client for GPT4All running on localhost. Supports streaming responses, interactive chat, and configurable markdown rendering using `terminal-kit`.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
Requirements: Node.js 18+ and a running GPT4All OpenAI-compatible server.
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install -g @alhisan/gac
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
Or if you don't want to install globally
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npm install
|
|
17
|
+
node bin/gac.js --help
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
Test if it works:
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
gac --help
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Usage
|
|
27
|
+
|
|
28
|
+
Single prompt:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
gac -a "Hello gpt4all, how are you doing today?"
|
|
32
|
+
gac suggest "How do I connect to ssh server on a custom port 5322?"
|
|
33
|
+
gac explain "How do I use rsync?"
|
|
34
|
+
gac suggest -d "Give me step-by-step instructions to set up an SSH server on port 5322"
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
List models and set a default:
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
gac models
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
This opens an interactive selector. Use arrow keys + Enter to choose a model, or Ctrl+C/Esc to cancel.
|
|
44
|
+
|
|
45
|
+
Interactive mode:
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
gac chat
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
Exit chat with `exit`, `quit`, or Ctrl+C.
|
|
52
|
+
|
|
53
|
+
Flags:
|
|
54
|
+
|
|
55
|
+
- `--no-render` disables markdown styling for that run.
|
|
56
|
+
- `--debug-render` prints the raw model output after the rendered response.
|
|
57
|
+
|
|
58
|
+
- `-d, --detailed-suggest` enable more detailed, step-by-step suggestions in `suggest` mode (can also be set via config key `detailedSuggest`).
|
|
59
|
+
|
|
60
|
+
## Configuration
|
|
61
|
+
|
|
62
|
+
Config file is created on first run:
|
|
63
|
+
|
|
64
|
+
- Primary: `~/.gac/config.json`
|
|
65
|
+
- Fallback: `.gac/config.json` (when home is not writable)
|
|
66
|
+
|
|
67
|
+
View and edit:
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
gac config
|
|
71
|
+
gac config get baseUrl
|
|
72
|
+
gac config set baseUrl http://localhost:4891
|
|
73
|
+
gac config set model "Llama 3 8B Instruct"
|
|
74
|
+
gac config set markdownStyles.codeStyles '["#8be9fd"]'
|
|
75
|
+
gac config set detailedSuggest true
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Core settings
|
|
79
|
+
|
|
80
|
+
- `baseUrl` (string): GPT4All server base, e.g. `http://localhost:4891`
|
|
81
|
+
- `model` (string): model ID from `/v1/models`
|
|
82
|
+
- `temperature` (number)
|
|
83
|
+
- `maxTokens` (number)
|
|
84
|
+
- `stream` (boolean)
|
|
85
|
+
- `detailedSuggest` (boolean): when `true`, `suggest` mode returns more detailed, step-by-step suggestions.
|
|
86
|
+
- `renderMarkdown` (boolean)
|
|
87
|
+
|
|
88
|
+
### Markdown styling
|
|
89
|
+
|
|
90
|
+
All markdown options live under `markdownStyles`:
|
|
91
|
+
|
|
92
|
+
- `headerStyles` (array of styles)
|
|
93
|
+
- `headerStylesByLevel` (object, keys `1`–`6` → array of styles)
|
|
94
|
+
- `headerUnderline` (boolean)
|
|
95
|
+
- `headerUnderlineLevels` (array of levels to underline)
|
|
96
|
+
- `headerUnderlineStyle` (array of styles)
|
|
97
|
+
- `headerUnderlineChar` (string, single character)
|
|
98
|
+
- `codeStyles` (array of styles)
|
|
99
|
+
- `codeBackground` (array of styles)
|
|
100
|
+
- `codeBorder` (boolean)
|
|
101
|
+
- `codeBorderStyle` (array of styles)
|
|
102
|
+
- `codeGutter` (string)
|
|
103
|
+
- `codeBorderChars` (object: `topLeft`, `top`, `topRight`, `bottomLeft`, `bottom`, `bottomRight`)
|
|
104
|
+
|
|
105
|
+
Style values can be:
|
|
106
|
+
|
|
107
|
+
- Terminal-kit style names like `bold`, `underline`, `dim`, `brightWhite`
|
|
108
|
+
- Foreground hex colors: `"#ffcc00"`
|
|
109
|
+
- Background hex colors: `"bg:#202020"` or `"bg#202020"`
|
|
110
|
+
- Default/transparent: `"default"` (fg) or `"bg:default"`
|
|
111
|
+
|
|
112
|
+
Example:
|
|
113
|
+
|
|
114
|
+
```json
|
|
115
|
+
{
|
|
116
|
+
"markdownStyles": {
|
|
117
|
+
"headerStylesByLevel": {
|
|
118
|
+
"1": ["bold", "brightWhite"],
|
|
119
|
+
"2": ["bold"],
|
|
120
|
+
"3": ["bold"],
|
|
121
|
+
"4": ["dim"],
|
|
122
|
+
"5": ["dim"],
|
|
123
|
+
"6": ["dim"]
|
|
124
|
+
},
|
|
125
|
+
"headerUnderline": true,
|
|
126
|
+
"headerUnderlineLevels": [1],
|
|
127
|
+
"codeStyles": ["#8be9fd"],
|
|
128
|
+
"codeBackground": ["bg:default"],
|
|
129
|
+
"codeBorderStyle": ["#444444"]
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
## Troubleshooting
|
|
135
|
+
|
|
136
|
+
If you see connection errors, verify the server is reachable:
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
curl http://localhost:4891/v1/models
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## License
|
|
143
|
+
|
|
144
|
+
GNU General Public License v3.0. See `LICENSE`.
|
|
Binary file
|
package/bin/gac.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@alhisan/gac",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Terminal client for GPT4All running on localhost",
|
|
5
|
+
"license": "GPL-3.0-only",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"bin": {
|
|
8
|
+
"gac": "bin/gac.js"
|
|
9
|
+
},
|
|
10
|
+
"engines": {
|
|
11
|
+
"node": ">=18.0.0"
|
|
12
|
+
},
|
|
13
|
+
"dependencies": {
|
|
14
|
+
"terminal-kit": "^3.1.0"
|
|
15
|
+
}
|
|
16
|
+
}
|
package/src/cli.js
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
import terminalKit from "terminal-kit";
|
|
2
|
+
import { chatCompletion, listModels } from "./gpt4all.js";
|
|
3
|
+
import { getConfigPath, loadConfig, setConfigValue } from "./config.js";
|
|
4
|
+
import { createMarkdownRenderer } from "./markdown.js";
|
|
5
|
+
import os from "os";
|
|
6
|
+
import process from "process";
|
|
7
|
+
const { terminal: term } = terminalKit;
|
|
8
|
+
|
|
9
|
+
function printHelp() {
|
|
10
|
+
term(`gac - GPT4All CLI\n\n`);
|
|
11
|
+
term(`Options:\n`);
|
|
12
|
+
term(` -a Single prompt mode (alias for ask)\n`);
|
|
13
|
+
term(` suggest Suggestion mode\n`);
|
|
14
|
+
term(` explain Explanation mode\n`);
|
|
15
|
+
term(` ask Ask mode\n`);
|
|
16
|
+
term(` chat Interactive chat mode\n`);
|
|
17
|
+
term(` models List models and set default\n`);
|
|
18
|
+
term(` config View or edit configuration\n`);
|
|
19
|
+
term(` --no-render Disable markdown rendering\n`);
|
|
20
|
+
term(` --debug-render Show both rendered and raw output\n`);
|
|
21
|
+
term(
|
|
22
|
+
` -d, --detailed-suggest Provide more detailed suggestions (only in suggest mode)\n`
|
|
23
|
+
);
|
|
24
|
+
term(` -h, --help Show this help message\n`);
|
|
25
|
+
term(`\n`);
|
|
26
|
+
term(`Usage:\n`);
|
|
27
|
+
term(` gac -a "Hello gpt4all"\n`);
|
|
28
|
+
term(` gac suggest "How do I connect to ssh server on port 5322"\n`);
|
|
29
|
+
term(` gac explain "How do I use rsync?"\n`);
|
|
30
|
+
term(` gac ask "What is the best way to learn JavaScript?"\n`);
|
|
31
|
+
term(` gac chat\n`);
|
|
32
|
+
term(` gac models\n`);
|
|
33
|
+
term(` gac config\n`);
|
|
34
|
+
term(` gac config get <key>\n`);
|
|
35
|
+
term(` gac config set <key> <value>\n`);
|
|
36
|
+
term(` gac --no-render -a "Raw markdown output"\n`);
|
|
37
|
+
term(` gac --debug-render -a "Show rendered and raw output"\n`);
|
|
38
|
+
term(`\n`);
|
|
39
|
+
}
|
|
40
|
+
function getOSVersion() {
|
|
41
|
+
const platform = os.platform();
|
|
42
|
+
if (platform === "win32") {
|
|
43
|
+
// Which version of Windows?
|
|
44
|
+
if (process.env.OS_VERSION) {
|
|
45
|
+
return `${platform}: ${process.env.OS_VERSION}`;
|
|
46
|
+
} else if (process.env.OS_RELEASE) {
|
|
47
|
+
return `${platform}: ${process.env.OS_RELEASE}`;
|
|
48
|
+
} else if (process.env.OS) {
|
|
49
|
+
return `${platform}: ${process.env.OS}`;
|
|
50
|
+
} else {
|
|
51
|
+
return "Windows";
|
|
52
|
+
}
|
|
53
|
+
} else if (platform === "darwin") {
|
|
54
|
+
if (process.env.OS_VERSION) {
|
|
55
|
+
return `${platform}: ${process.env.OS_VERSION}`;
|
|
56
|
+
}
|
|
57
|
+
if (process.env.OS_RELEASE) {
|
|
58
|
+
return `${platform}: ${process.env.OS_RELEASE}`;
|
|
59
|
+
}
|
|
60
|
+
if (process.env.OS) {
|
|
61
|
+
return `${platform}: ${process.env.OS}`;
|
|
62
|
+
}
|
|
63
|
+
return "macOS";
|
|
64
|
+
}
|
|
65
|
+
if (platform === "linux") {
|
|
66
|
+
// Find which distro
|
|
67
|
+
if (process.env.OS_RELEASE) {
|
|
68
|
+
return `${platform}: ${process.env.OS_RELEASE}`;
|
|
69
|
+
} else if (process.env.OS) {
|
|
70
|
+
return `${platform}: ${process.env.OS}`;
|
|
71
|
+
} else if (process.env.LINUX_DISTRO) {
|
|
72
|
+
return `${platform}: ${process.env.LINUX_DISTRO}`;
|
|
73
|
+
}
|
|
74
|
+
return `Linux`;
|
|
75
|
+
}
|
|
76
|
+
if (platform === "freebsd") {
|
|
77
|
+
return "FreeBSD";
|
|
78
|
+
}
|
|
79
|
+
if (platform === "sunos") {
|
|
80
|
+
return "SunOS";
|
|
81
|
+
}
|
|
82
|
+
if (platform === "aix") {
|
|
83
|
+
return "AIX";
|
|
84
|
+
}
|
|
85
|
+
return "Unknown OS";
|
|
86
|
+
}
|
|
87
|
+
function buildSystemPrompt(mode, config) {
|
|
88
|
+
const osInfo = getOSVersion();
|
|
89
|
+
|
|
90
|
+
if (mode === "suggest") {
|
|
91
|
+
if (config.detailedSuggest === true) {
|
|
92
|
+
return `You are an expert technical assistant. The user is using a system with the following OS: ${osInfo}. When providing suggestions, give detailed, step-by-step instructions that the user can follow to achieve their goals. Include relevant commands, code snippets, or configurations as needed. Avoid unnecessary explanations or background information. Tailor your suggestions to be relevant to the user's operating system and environment.
|
|
93
|
+
Attempt to make it a single line response where possible. Prefer commands and code snippets over lengthy explanations. Always leave commands and codes in their own line for easy copying.`;
|
|
94
|
+
} else {
|
|
95
|
+
return `You are an expert technical assistant. The user is using a system with the following OS: ${osInfo}. Provide concise and practical suggestions to help the user accomplish their tasks efficiently. Focus on clarity and brevity, ensuring that your suggestions are easy to understand and implement. Tailor your suggestions to be relevant to the user's operating system and environment. Avoid lengthy explanations or unnecessary details prefer single line commands or codes if you must include explainations make sure the commands and codes are in their own line for easy copying.`;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
if (mode === "ask") {
|
|
99
|
+
return "Provide a helpful and accurate response to the user's question.";
|
|
100
|
+
}
|
|
101
|
+
if (mode === "explain") {
|
|
102
|
+
return "Explain step-by-step with a short example if helpful.";
|
|
103
|
+
}
|
|
104
|
+
return null;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
async function runSinglePrompt(mode, prompt, config) {
|
|
108
|
+
const system = buildSystemPrompt(mode, config);
|
|
109
|
+
const messages = [];
|
|
110
|
+
if (system) messages.push({ role: "system", content: system });
|
|
111
|
+
messages.push({ role: "user", content: prompt });
|
|
112
|
+
|
|
113
|
+
const reply = await chatCompletion(config, messages);
|
|
114
|
+
if (!config.stream) {
|
|
115
|
+
if (config.renderMarkdown) {
|
|
116
|
+
const renderer = createMarkdownRenderer(config.markdownStyles);
|
|
117
|
+
term(`${renderer.renderText(reply)}\n`);
|
|
118
|
+
} else {
|
|
119
|
+
term(`${reply}\n`);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
if (config.debugRender) {
|
|
123
|
+
term(`\n--- RAW ---\n${reply}\n`);
|
|
124
|
+
}
|
|
125
|
+
term(`\n`);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
async function inputLine(label) {
|
|
129
|
+
term(label);
|
|
130
|
+
return new Promise((resolve) => {
|
|
131
|
+
term.inputField({ cancelable: true }, (error, input) => {
|
|
132
|
+
term("\n");
|
|
133
|
+
if (error || input === undefined || input === null) {
|
|
134
|
+
resolve("");
|
|
135
|
+
return;
|
|
136
|
+
}
|
|
137
|
+
resolve(input.trim());
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
async function runChat(config) {
|
|
143
|
+
term('Interactive chat. Type "exit" to quit.\n\n');
|
|
144
|
+
const messages = [];
|
|
145
|
+
term.grabInput({ mouse: "button" });
|
|
146
|
+
const cleanupChatInput = () => {
|
|
147
|
+
term.grabInput(false);
|
|
148
|
+
term.removeListener("key", onKey);
|
|
149
|
+
};
|
|
150
|
+
const onKey = (name) => {
|
|
151
|
+
if (name === "CTRL_C") {
|
|
152
|
+
cleanupChatInput();
|
|
153
|
+
term("\nBye.\n");
|
|
154
|
+
term.processExit(0);
|
|
155
|
+
}
|
|
156
|
+
};
|
|
157
|
+
term.on("key", onKey);
|
|
158
|
+
|
|
159
|
+
while (true) {
|
|
160
|
+
const prompt = await inputLine("You> ");
|
|
161
|
+
if (!prompt) continue;
|
|
162
|
+
if (prompt.toLowerCase() === "exit" || prompt.toLowerCase() === "quit") {
|
|
163
|
+
cleanupChatInput();
|
|
164
|
+
term("Bye.\n");
|
|
165
|
+
break;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
messages.push({ role: "user", content: prompt });
|
|
169
|
+
term("A.I> ");
|
|
170
|
+
const reply = await chatCompletion(config, messages);
|
|
171
|
+
if (!config.stream) {
|
|
172
|
+
if (config.renderMarkdown) {
|
|
173
|
+
const renderer = createMarkdownRenderer(config.markdownStyles);
|
|
174
|
+
term(renderer.renderText(reply));
|
|
175
|
+
} else {
|
|
176
|
+
term(reply);
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
if (config.debugRender) {
|
|
180
|
+
term(`\n--- RAW ---\n${reply}\n`);
|
|
181
|
+
}
|
|
182
|
+
term("\n\n");
|
|
183
|
+
messages.push({ role: "assistant", content: reply });
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
async function runModels(config) {
|
|
188
|
+
let models;
|
|
189
|
+
try {
|
|
190
|
+
models = await listModels(config.baseUrl);
|
|
191
|
+
} catch (err) {
|
|
192
|
+
term(`Error: ${err.message}\n`);
|
|
193
|
+
term.processExit(1);
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
if (!models.length) {
|
|
197
|
+
term("No models found from GPT4All server.\n");
|
|
198
|
+
term.processExit(0);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
term("Available models:\n");
|
|
202
|
+
// Append 'Use default gpt4all model' option at the top
|
|
203
|
+
models.unshift("Use default gpt4all setting");
|
|
204
|
+
models.forEach((model) => term(`- ${model}\n`));
|
|
205
|
+
term("\nSelect a default model (use arrows + Enter, Esc to cancel):\n");
|
|
206
|
+
|
|
207
|
+
const currentIndex = Math.max(models.indexOf(config.model), 0);
|
|
208
|
+
term.grabInput({ mouse: "button" });
|
|
209
|
+
const cleanup = () => {
|
|
210
|
+
term.grabInput(false);
|
|
211
|
+
term.removeListener("key", onKey);
|
|
212
|
+
};
|
|
213
|
+
const onKey = (name) => {
|
|
214
|
+
if (name === "CTRL_C") {
|
|
215
|
+
cleanup();
|
|
216
|
+
term("\nCanceled.\n");
|
|
217
|
+
term.processExit(0);
|
|
218
|
+
}
|
|
219
|
+
};
|
|
220
|
+
term.on("key", onKey);
|
|
221
|
+
|
|
222
|
+
await new Promise((resolve) => {
|
|
223
|
+
term.singleColumnMenu(
|
|
224
|
+
models,
|
|
225
|
+
{ cancelable: true, selectedIndex: currentIndex },
|
|
226
|
+
(error, response) => {
|
|
227
|
+
term("\n");
|
|
228
|
+
if (error || !response || response.canceled) {
|
|
229
|
+
cleanup();
|
|
230
|
+
term("Selection canceled.\n");
|
|
231
|
+
term.processExit(0);
|
|
232
|
+
}
|
|
233
|
+
let selected = models[response.selectedIndex];
|
|
234
|
+
// if user selected the default model option, set to 'gpt4all'
|
|
235
|
+
if (selected === "Use default gpt4all setting") {
|
|
236
|
+
selected = "gpt4all";
|
|
237
|
+
}
|
|
238
|
+
setConfigValue("model", selected);
|
|
239
|
+
config.model = selected;
|
|
240
|
+
cleanup();
|
|
241
|
+
term(`Default model set to "${selected}".\n`);
|
|
242
|
+
term.processExit(0);
|
|
243
|
+
}
|
|
244
|
+
);
|
|
245
|
+
});
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
export async function runCli(argv) {
|
|
249
|
+
const args = argv.slice(2);
|
|
250
|
+
const config = loadConfig();
|
|
251
|
+
const noRenderIndex = args.indexOf("--no-render");
|
|
252
|
+
if (noRenderIndex !== -1) {
|
|
253
|
+
config.renderMarkdown = false;
|
|
254
|
+
args.splice(noRenderIndex, 1);
|
|
255
|
+
}
|
|
256
|
+
const debugRenderIndex = args.indexOf("--debug-render");
|
|
257
|
+
if (debugRenderIndex !== -1) {
|
|
258
|
+
config.debugRender = true;
|
|
259
|
+
args.splice(debugRenderIndex, 1);
|
|
260
|
+
}
|
|
261
|
+
const detailedSuggestIndex = args.indexOf("--detailed-suggest");
|
|
262
|
+
const shortDetailedSuggestIndex = args.indexOf("-d");
|
|
263
|
+
|
|
264
|
+
if (detailedSuggestIndex !== -1 || shortDetailedSuggestIndex !== -1) {
|
|
265
|
+
config.detailedSuggest = true;
|
|
266
|
+
if (detailedSuggestIndex !== -1) {
|
|
267
|
+
args.splice(detailedSuggestIndex, 1);
|
|
268
|
+
}
|
|
269
|
+
if (shortDetailedSuggestIndex !== -1) {
|
|
270
|
+
args.splice(shortDetailedSuggestIndex, 1);
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
if (args.length === 0 || args.includes("-h") || args.includes("--help")) {
|
|
275
|
+
printHelp();
|
|
276
|
+
return;
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
if (args[0] === "config") {
|
|
280
|
+
if (args[1] === "get" && args[2]) {
|
|
281
|
+
const key = args[2];
|
|
282
|
+
term(`${config[key]}\n`);
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
if (args[1] === "set" && args[2] && args[3] !== undefined) {
|
|
286
|
+
const updated = setConfigValue(args[2], args.slice(3).join(" "));
|
|
287
|
+
term(`Updated ${args[2]} in ${getConfigPath()}\n`);
|
|
288
|
+
term(`${JSON.stringify(updated, null, 2)}\n`);
|
|
289
|
+
return;
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
term(`Config file: ${getConfigPath()}\n`);
|
|
293
|
+
term(`${JSON.stringify(config, null, 2)}\n`);
|
|
294
|
+
return;
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
if (args[0] === "chat") {
|
|
298
|
+
await runChat(config);
|
|
299
|
+
return;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
if (args[0] === "models") {
|
|
303
|
+
await runModels(config);
|
|
304
|
+
return;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if (args[0] === "-a") {
|
|
308
|
+
const prompt = args.slice(1).join(" ").trim();
|
|
309
|
+
if (!prompt) {
|
|
310
|
+
term("Error: missing prompt after -a.\n");
|
|
311
|
+
term.processExit(1);
|
|
312
|
+
}
|
|
313
|
+
await runSinglePrompt("ask", prompt, config);
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
if (args[0] === "suggest" || args[0] === "explain" || args[0] === "ask") {
|
|
318
|
+
const prompt = args.slice(1).join(" ").trim();
|
|
319
|
+
if (!prompt) {
|
|
320
|
+
term(`Error: missing prompt after ${args[0]}.\n`);
|
|
321
|
+
term.processExit(1);
|
|
322
|
+
}
|
|
323
|
+
await runSinglePrompt(args[0], prompt, config);
|
|
324
|
+
return;
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
term("Unknown command.\n\n");
|
|
328
|
+
printHelp();
|
|
329
|
+
}
|
package/src/config.js
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import fs from "fs";
|
|
2
|
+
import path from "path";
|
|
3
|
+
import os from "os";
|
|
4
|
+
|
|
5
|
+
const DEFAULT_CONFIG = {
|
|
6
|
+
baseUrl: "http://localhost:4891",
|
|
7
|
+
model: "gpt4all",
|
|
8
|
+
temperature: 0.7,
|
|
9
|
+
maxTokens: 512,
|
|
10
|
+
stream: true,
|
|
11
|
+
renderMarkdown: true,
|
|
12
|
+
debugRender: false,
|
|
13
|
+
detailedSuggest: false,
|
|
14
|
+
markdownStyles: {
|
|
15
|
+
headerStyles: ["bold"],
|
|
16
|
+
headerStylesByLevel: {
|
|
17
|
+
1: ["bold", "brightWhite"],
|
|
18
|
+
2: ["bold"],
|
|
19
|
+
3: ["bold"],
|
|
20
|
+
4: ["dim"],
|
|
21
|
+
5: ["dim"],
|
|
22
|
+
6: ["dim"],
|
|
23
|
+
},
|
|
24
|
+
headerUnderline: true,
|
|
25
|
+
headerUnderlineLevels: [1],
|
|
26
|
+
headerUnderlineStyle: ["dim"],
|
|
27
|
+
headerUnderlineChar: "─",
|
|
28
|
+
codeStyles: ["cyan"],
|
|
29
|
+
codeBackground: ["bgBlack"],
|
|
30
|
+
codeBorder: true,
|
|
31
|
+
codeBorderStyle: ["dim"],
|
|
32
|
+
codeGutter: "│ ",
|
|
33
|
+
codeBorderChars: {
|
|
34
|
+
topLeft: "┌",
|
|
35
|
+
top: "─",
|
|
36
|
+
topRight: "┐",
|
|
37
|
+
bottomLeft: "└",
|
|
38
|
+
bottom: "─",
|
|
39
|
+
bottomRight: "┘",
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
const HOME_CONFIG_DIR = path.join(os.homedir(), ".gac");
|
|
45
|
+
const FALLBACK_CONFIG_DIR = path.join(process.cwd(), ".gac");
|
|
46
|
+
let resolvedConfigDir = null;
|
|
47
|
+
|
|
48
|
+
function resolveConfigDir() {
|
|
49
|
+
if (resolvedConfigDir) return resolvedConfigDir;
|
|
50
|
+
|
|
51
|
+
try {
|
|
52
|
+
fs.mkdirSync(HOME_CONFIG_DIR, { recursive: true });
|
|
53
|
+
resolvedConfigDir = HOME_CONFIG_DIR;
|
|
54
|
+
return resolvedConfigDir;
|
|
55
|
+
} catch (err) {
|
|
56
|
+
// Fall back to local config if home is not writable.
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
fs.mkdirSync(FALLBACK_CONFIG_DIR, { recursive: true });
|
|
61
|
+
resolvedConfigDir = FALLBACK_CONFIG_DIR;
|
|
62
|
+
return resolvedConfigDir;
|
|
63
|
+
} catch (err) {
|
|
64
|
+
resolvedConfigDir = HOME_CONFIG_DIR;
|
|
65
|
+
return resolvedConfigDir;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
export function getConfigPath() {
|
|
70
|
+
return path.join(resolveConfigDir(), "config.json");
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
export function loadConfig() {
|
|
74
|
+
const configPath = getConfigPath();
|
|
75
|
+
if (!fs.existsSync(configPath)) {
|
|
76
|
+
saveConfig(DEFAULT_CONFIG);
|
|
77
|
+
return { ...DEFAULT_CONFIG };
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
try {
|
|
81
|
+
const raw = fs.readFileSync(configPath, "utf8");
|
|
82
|
+
const parsed = JSON.parse(raw);
|
|
83
|
+
return { ...DEFAULT_CONFIG, ...parsed };
|
|
84
|
+
} catch (err) {
|
|
85
|
+
return { ...DEFAULT_CONFIG };
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
export function saveConfig(config) {
|
|
90
|
+
const normalized = { ...DEFAULT_CONFIG, ...config };
|
|
91
|
+
const configPath = getConfigPath();
|
|
92
|
+
fs.writeFileSync(configPath, JSON.stringify(normalized, null, 2));
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function coerceValue(value) {
|
|
96
|
+
const trimmed = value.trim();
|
|
97
|
+
if (
|
|
98
|
+
(trimmed.startsWith("{") && trimmed.endsWith("}")) ||
|
|
99
|
+
(trimmed.startsWith("[") && trimmed.endsWith("]"))
|
|
100
|
+
) {
|
|
101
|
+
try {
|
|
102
|
+
return JSON.parse(trimmed);
|
|
103
|
+
} catch (err) {
|
|
104
|
+
// Fall through to string handling.
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
if (trimmed === "true") return true;
|
|
108
|
+
if (trimmed === "false") return false;
|
|
109
|
+
if (trimmed === "null") return null;
|
|
110
|
+
if (!Number.isNaN(Number(trimmed)) && trimmed !== "") return Number(trimmed);
|
|
111
|
+
return value;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
export function setConfigValue(key, value) {
|
|
115
|
+
const config = loadConfig();
|
|
116
|
+
const normalizedValue = coerceValue(value);
|
|
117
|
+
const parts = key.split(".");
|
|
118
|
+
let cursor = config;
|
|
119
|
+
for (let i = 0; i < parts.length - 1; i += 1) {
|
|
120
|
+
const part = parts[i];
|
|
121
|
+
if (typeof cursor[part] !== "object" || cursor[part] === null) {
|
|
122
|
+
cursor[part] = {};
|
|
123
|
+
}
|
|
124
|
+
cursor = cursor[part];
|
|
125
|
+
}
|
|
126
|
+
cursor[parts[parts.length - 1]] = normalizedValue;
|
|
127
|
+
saveConfig(config);
|
|
128
|
+
return config;
|
|
129
|
+
}
|