@ramusriram/versus 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/LICENSE +21 -0
- package/README.md +210 -0
- package/bin/versus.js +8 -0
- package/package.json +40 -0
- package/src/backends/gemini.js +57 -0
- package/src/backends/index.js +66 -0
- package/src/backends/mock.js +23 -0
- package/src/backends/ollama.js +29 -0
- package/src/backends/openai.js +40 -0
- package/src/cache.js +82 -0
- package/src/cli.js +341 -0
- package/src/config.js +30 -0
- package/src/engine.js +143 -0
- package/src/introspect.js +165 -0
- package/src/prompt.js +57 -0
- package/src/status.js +125 -0
- package/src/util/argv.js +16 -0
- package/src/util/markdown.js +206 -0
- package/src/util/sanitize.js +28 -0
- package/src/util/spinner.js +47 -0
- package/src/util/style.js +46 -0
- package/src/util/text.js +61 -0
- package/src/util/time.js +93 -0
- package/src/util/timing.js +7 -0
- package/src/util/view.js +107 -0
- package/test/argv.test.js +12 -0
- package/test/markdown.test.js +32 -0
- package/test/prompt.test.js +20 -0
- package/test/sanitize.test.js +16 -0
- package/test/text.test.js +16 -0
- package/test/time.test.js +12 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## 0.1.0
|
|
4
|
+
|
|
5
|
+
Initial public release.
|
|
6
|
+
|
|
7
|
+
- Main compare command: `versus <left> <right>` (with backends: `auto|openai|gemini|ollama|mock`).
|
|
8
|
+
- Markdown output is **rendered in the terminal by default** (TTY only) for readability.
|
|
9
|
+
- Use `--raw` or `--format markdown` to print raw Markdown.
|
|
10
|
+
- Use `--format json` for machine-readable output.
|
|
11
|
+
- Added `versus prompt <left> <right>` to view the full generated LLM prompt (opens a pager by default; optional `--editor` / `--stdout`).
|
|
12
|
+
- Improved mock backend prompt preview: truncates at word boundaries (no mid-word cuts) and points users to `versus prompt`.
|
|
13
|
+
- CLI flag parsing supports both `--flag value` and `--flag=value` (e.g. `--backend=gemini`).
|
|
14
|
+
- Added an animated loading indicator (spinner) for long operations.
|
|
15
|
+
- Added a cached-response notice when output comes from the local cache.
|
|
16
|
+
- Added `--color auto|always|never` (alias: `--no-color`) and improved terminal Markdown rendering for more consistent visuals across terminal themes.
|
|
17
|
+
- Improved Gemini/OpenAI network error hints (includes a WSL DNS workaround for `fetch failed`).
|
|
18
|
+
- Added GitHub Actions CI workflow running tests on Node 20/22.
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
# Versus CLI (`versus`)
|
|
2
|
+
|
|
3
|
+
Compare two Linux commands or concepts (A vs B) from inside your terminal, grounded in your machine’s local documentation (man pages, `--help`, `info`) and summarized by an LLM backend.
|
|
4
|
+
|
|
5
|
+

|
|
6
|
+
|
|
7
|
+
## Demo
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
versus nano vim
|
|
11
|
+
versus curl wget --backend gemini
|
|
12
|
+
versus "git pull" "git fetch" --level beginner
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Why this exists
|
|
16
|
+
|
|
17
|
+
When you’re learning Linux, you constantly ask:
|
|
18
|
+
“What’s the difference between X and Y?”
|
|
19
|
+
|
|
20
|
+
Versus answers that in a structured way, using **local docs as grounding** so the model is less likely to hallucinate.
|
|
21
|
+
|
|
22
|
+
## Features
|
|
23
|
+
|
|
24
|
+
* **Grounded comparisons** using `man`, `--help`, and `info` (best-effort)
|
|
25
|
+
* **Multiple backends:** `gemini`, `openai`, `ollama`, `mock`
|
|
26
|
+
* **Markdown rendering in terminal** by default (TTY). Use `--raw` for source markdown
|
|
27
|
+
* **TTL cache** to reduce repeated API calls and speed up repeat runs
|
|
28
|
+
* **Prompt inspection:** view the exact prompt without calling any backend
|
|
29
|
+
* **CI (GitHub Actions)** runs tests on every push/PR
|
|
30
|
+
|
|
31
|
+
## Requirements
|
|
32
|
+
|
|
33
|
+
* Node.js 20+ (matches CI + `engines` in `package.json`)
|
|
34
|
+
* Linux/WSL (macOS likely works too if `man` exists)
|
|
35
|
+
|
|
36
|
+
## Install
|
|
37
|
+
|
|
38
|
+
### From npm (recommended)
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
npm install -g @ramusriram/versus
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
> Note: this command will work after the first npm publish.
|
|
45
|
+
|
|
46
|
+
Then run:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
versus nano vim
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### From source (for development)
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
git clone https://github.com/RamuSriram/versus-ai-cli.git
|
|
56
|
+
cd versus-ai-cli
|
|
57
|
+
npm install
|
|
58
|
+
npm link
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Quickstart
|
|
62
|
+
|
|
63
|
+
Backend selection (when `--backend auto`):
|
|
64
|
+
|
|
65
|
+
* Uses **OpenAI** if `OPENAI_API_KEY` is set
|
|
66
|
+
* Else uses **Gemini** if `GEMINI_API_KEY` is set
|
|
67
|
+
* Else uses **Ollama** if it’s running locally
|
|
68
|
+
* Else falls back to the **Mock** backend
|
|
69
|
+
|
|
70
|
+
Run without API keys (force mock backend):
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
versus nano vim --backend mock
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Gemini:
|
|
77
|
+
|
|
78
|
+
```bash
|
|
79
|
+
export GEMINI_API_KEY="your_key_here"
|
|
80
|
+
versus nano vim --backend gemini
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
OpenAI:
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
export OPENAI_API_KEY="your_key_here"
|
|
87
|
+
versus nano vim --backend openai
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
Ollama (local):
|
|
91
|
+
|
|
92
|
+
```bash
|
|
93
|
+
# install + run ollama first
|
|
94
|
+
versus nano vim --backend ollama --model llama3
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## Usage
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
versus <left> <right> [options]
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
Common options:
|
|
104
|
+
|
|
105
|
+
* `-b, --backend <backend>`: `auto|openai|gemini|ollama|mock`
|
|
106
|
+
* `-m, --model <model>`: provider-specific model name
|
|
107
|
+
* `--level <level>`: `beginner|intermediate|advanced`
|
|
108
|
+
* `--mode <mode>`: `summary|cheatsheet|table`
|
|
109
|
+
* `--format <format>`: `rendered|markdown|json`
|
|
110
|
+
* `--raw`: output raw Markdown (disable terminal rendering)
|
|
111
|
+
* `--color <mode>`: `auto|always|never` (alias: `--no-color`, also respects `NO_COLOR=1`)
|
|
112
|
+
* `--no-cache`: bypass cache
|
|
113
|
+
* `--ttl-hours <n>`: cache TTL in hours (default: `720` = 30 days)
|
|
114
|
+
* `--max-doc-chars <n>`: max local docs characters per side
|
|
115
|
+
* `--no-docs`: don’t read local docs (LLM general knowledge only)
|
|
116
|
+
* `-d, --debug`: debug metadata
|
|
117
|
+
|
|
118
|
+
Tip: flags can be passed as `--backend gemini` or `--backend=gemini`.
|
|
119
|
+
|
|
120
|
+
## Helpful subcommands
|
|
121
|
+
|
|
122
|
+
### `versus status` (alias: `versus doctor`)
|
|
123
|
+
|
|
124
|
+
Checks your environment (Node, `man`, cache path) and backend configuration.
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
versus status
|
|
128
|
+
versus doctor
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### `versus cache`
|
|
132
|
+
|
|
133
|
+
Inspect or clear your local cache:
|
|
134
|
+
|
|
135
|
+
```bash
|
|
136
|
+
versus cache
|
|
137
|
+
versus cache --clear
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### `versus prompt`
|
|
141
|
+
|
|
142
|
+
View the full prompt that would be sent to the backend (no API call).
|
|
143
|
+
Opens in a pager by default to avoid dumping huge text.
|
|
144
|
+
|
|
145
|
+
```bash
|
|
146
|
+
versus prompt nano vim
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
Other modes:
|
|
150
|
+
|
|
151
|
+
```bash
|
|
152
|
+
versus prompt nano vim --stdout
|
|
153
|
+
versus prompt nano vim --editor
|
|
154
|
+
versus prompt nano vim --output prompt.txt
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
## Configuration
|
|
158
|
+
|
|
159
|
+
Optional config file locations (first found wins):
|
|
160
|
+
|
|
161
|
+
* `${XDG_CONFIG_HOME:-~/.config}/versus/config.json`
|
|
162
|
+
* `~/.versusrc.json`
|
|
163
|
+
|
|
164
|
+
Example:
|
|
165
|
+
|
|
166
|
+
```json
|
|
167
|
+
{
|
|
168
|
+
"backend": "gemini",
|
|
169
|
+
"model": "gemini-2.5-flash",
|
|
170
|
+
"level": "intermediate",
|
|
171
|
+
"mode": "summary",
|
|
172
|
+
"ttlHours": 720,
|
|
173
|
+
"maxDocChars": 6000
|
|
174
|
+
}
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
## Caching
|
|
178
|
+
|
|
179
|
+
Cache file:
|
|
180
|
+
|
|
181
|
+
* `~/.cache/versus/cache.json`
|
|
182
|
+
|
|
183
|
+
Use `--no-cache` to force a fresh response.
|
|
184
|
+
|
|
185
|
+
## Privacy / Data
|
|
186
|
+
|
|
187
|
+
Versus sends your inputs (and, by default, any collected local docs like `man`/`--help`/`info`) to the selected backend.
|
|
188
|
+
|
|
189
|
+
* Want **zero network calls**: use `--backend mock`
|
|
190
|
+
* Want **no local docs included**: use `--no-docs`
|
|
191
|
+
|
|
192
|
+
## Troubleshooting
|
|
193
|
+
|
|
194
|
+
### WSL: `Error: fetch failed`
|
|
195
|
+
|
|
196
|
+
Some WSL setups prefer IPv6 DNS and can cause fetch failures.
|
|
197
|
+
|
|
198
|
+
```bash
|
|
199
|
+
export NODE_OPTIONS="--dns-result-order=ipv4first"
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
## Development
|
|
203
|
+
|
|
204
|
+
```bash
|
|
205
|
+
npm test
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
## License
|
|
209
|
+
|
|
210
|
+
MIT
|
package/bin/versus.js
ADDED
package/package.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@ramusriram/versus",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "AI-powered CLI to compare two Linux commands or concepts, grounded in local docs (man, --help, info).",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"versus": "bin/versus.js"
|
|
8
|
+
},
|
|
9
|
+
"engines": {
|
|
10
|
+
"node": ">=20"
|
|
11
|
+
},
|
|
12
|
+
"scripts": {
|
|
13
|
+
"start": "node ./bin/versus.js --help",
|
|
14
|
+
"test": "node --test",
|
|
15
|
+
"prepublishOnly": "node --test"
|
|
16
|
+
},
|
|
17
|
+
"dependencies": {
|
|
18
|
+
"commander": "^12.1.0",
|
|
19
|
+
"openai": "^6.8.0",
|
|
20
|
+
"picocolors": "^1.1.0"
|
|
21
|
+
},
|
|
22
|
+
"keywords": [
|
|
23
|
+
"cli",
|
|
24
|
+
"linux",
|
|
25
|
+
"man",
|
|
26
|
+
"ai",
|
|
27
|
+
"llm",
|
|
28
|
+
"compare",
|
|
29
|
+
"developer-tools"
|
|
30
|
+
],
|
|
31
|
+
"license": "MIT",
|
|
32
|
+
"files": [
|
|
33
|
+
"bin",
|
|
34
|
+
"src",
|
|
35
|
+
"test",
|
|
36
|
+
"README.md",
|
|
37
|
+
"CHANGELOG.md",
|
|
38
|
+
"LICENSE"
|
|
39
|
+
]
|
|
40
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
export async function generateGemini({ prompt, model, apiKey }) {
|
|
2
|
+
const key = apiKey || process.env.GEMINI_API_KEY;
|
|
3
|
+
if (!key) {
|
|
4
|
+
const err = new Error("GEMINI_API_KEY is not set.");
|
|
5
|
+
err.hint = 'Export your API key: export GEMINI_API_KEY="..."';
|
|
6
|
+
throw err;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
const m = model || "gemini-2.5-flash";
|
|
10
|
+
const url = `https://generativelanguage.googleapis.com/v1beta/models/${encodeURIComponent(m)}:generateContent`;
|
|
11
|
+
|
|
12
|
+
let res;
|
|
13
|
+
try {
|
|
14
|
+
res = await fetch(url, {
|
|
15
|
+
method: "POST",
|
|
16
|
+
headers: {
|
|
17
|
+
"Content-Type": "application/json",
|
|
18
|
+
"x-goog-api-key": key,
|
|
19
|
+
},
|
|
20
|
+
body: JSON.stringify({
|
|
21
|
+
contents: [
|
|
22
|
+
{
|
|
23
|
+
role: "user",
|
|
24
|
+
parts: [{ text: prompt }],
|
|
25
|
+
},
|
|
26
|
+
],
|
|
27
|
+
}),
|
|
28
|
+
});
|
|
29
|
+
} catch {
|
|
30
|
+
const err = new Error("Failed to reach Gemini API (network error).");
|
|
31
|
+
err.hint =
|
|
32
|
+
"Check your internet connection and DNS.\n" +
|
|
33
|
+
"If you're on WSL and see 'fetch failed', try:\n" +
|
|
34
|
+
' export NODE_OPTIONS="--dns-result-order=ipv4first"';
|
|
35
|
+
throw err;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const data = await res.json().catch(() => ({}));
|
|
39
|
+
|
|
40
|
+
if (!res.ok) {
|
|
41
|
+
const msg =
|
|
42
|
+
data?.error?.message ||
|
|
43
|
+
data?.message ||
|
|
44
|
+
(typeof data === "string" ? data : "") ||
|
|
45
|
+
`HTTP ${res.status}`;
|
|
46
|
+
|
|
47
|
+
const err = new Error(`Gemini error: ${msg}`);
|
|
48
|
+
err.hint =
|
|
49
|
+
`Check that your model name is valid (e.g. ${m}) and that GEMINI_API_KEY is correct.`;
|
|
50
|
+
throw err;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const parts = data?.candidates?.[0]?.content?.parts;
|
|
54
|
+
const text = Array.isArray(parts) ? parts.map((p) => p.text || "").join("") : "";
|
|
55
|
+
|
|
56
|
+
return String(text).trim();
|
|
57
|
+
}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { generateOpenAI } from "./openai.js";
|
|
2
|
+
import { generateGemini } from "./gemini.js";
|
|
3
|
+
import { generateOllama } from "./ollama.js";
|
|
4
|
+
import { generateMock } from "./mock.js";
|
|
5
|
+
|
|
6
|
+
async function isOllamaUp(baseUrl) {
|
|
7
|
+
const urlBase = baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
|
|
8
|
+
const url = `${urlBase.replace(/\/$/, "")}/api/version`;
|
|
9
|
+
try {
|
|
10
|
+
const res = await fetch(url);
|
|
11
|
+
if (!res.ok) return false;
|
|
12
|
+
return true;
|
|
13
|
+
} catch {
|
|
14
|
+
return false;
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function hasOpenAIKey() {
|
|
19
|
+
return Boolean(process.env.OPENAI_API_KEY);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
function hasGeminiKey() {
|
|
23
|
+
return Boolean(process.env.GEMINI_API_KEY);
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export async function generateText({ backend, prompt, model }) {
|
|
27
|
+
const want = backend || "auto";
|
|
28
|
+
|
|
29
|
+
if (want === "auto") {
|
|
30
|
+
if (hasOpenAIKey()) {
|
|
31
|
+
const text = await generateOpenAI({ prompt, model });
|
|
32
|
+
return { text, backendUsed: "openai", modelUsed: model || "gpt-5.2" };
|
|
33
|
+
}
|
|
34
|
+
if (hasGeminiKey()) {
|
|
35
|
+
const text = await generateGemini({ prompt, model });
|
|
36
|
+
return { text, backendUsed: "gemini", modelUsed: model || "gemini-2.5-flash" };
|
|
37
|
+
}
|
|
38
|
+
if (await isOllamaUp()) {
|
|
39
|
+
const text = await generateOllama({ prompt, model });
|
|
40
|
+
return { text, backendUsed: "ollama", modelUsed: model || "llama3.2" };
|
|
41
|
+
}
|
|
42
|
+
const text = await generateMock({ prompt });
|
|
43
|
+
return { text, backendUsed: "mock", modelUsed: "mock" };
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
if (want === "openai") {
|
|
47
|
+
const text = await generateOpenAI({ prompt, model });
|
|
48
|
+
return { text, backendUsed: "openai", modelUsed: model || "gpt-5.2" };
|
|
49
|
+
}
|
|
50
|
+
if (want === "gemini") {
|
|
51
|
+
const text = await generateGemini({ prompt, model });
|
|
52
|
+
return { text, backendUsed: "gemini", modelUsed: model || "gemini-2.5-flash" };
|
|
53
|
+
}
|
|
54
|
+
if (want === "ollama") {
|
|
55
|
+
const text = await generateOllama({ prompt, model });
|
|
56
|
+
return { text, backendUsed: "ollama", modelUsed: model || "llama3.2" };
|
|
57
|
+
}
|
|
58
|
+
if (want === "mock") {
|
|
59
|
+
const text = await generateMock({ prompt });
|
|
60
|
+
return { text, backendUsed: "mock", modelUsed: "mock" };
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const err = new Error(`Unknown backend: ${want}`);
|
|
64
|
+
err.hint = "Use --backend auto|openai|gemini|ollama|mock";
|
|
65
|
+
throw err;
|
|
66
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { truncateAtWordBoundary } from "../util/text.js";
|
|
2
|
+
|
|
3
|
+
export async function generateMock({ prompt }) {
|
|
4
|
+
const maxPreviewChars = 900;
|
|
5
|
+
const preview = truncateAtWordBoundary(prompt, maxPreviewChars);
|
|
6
|
+
const wasTruncated = String(prompt ?? "").length > maxPreviewChars;
|
|
7
|
+
|
|
8
|
+
return [
|
|
9
|
+
"Mock backend selected.",
|
|
10
|
+
"",
|
|
11
|
+
"Set one of:",
|
|
12
|
+
"- OPENAI_API_KEY (and use --backend=openai)",
|
|
13
|
+
"- GEMINI_API_KEY (and use --backend=gemini)",
|
|
14
|
+
"- Install Ollama (and use --backend=ollama)",
|
|
15
|
+
"",
|
|
16
|
+
`Prompt preview${wasTruncated ? " (truncated)" : ""}:`,
|
|
17
|
+
preview,
|
|
18
|
+
"",
|
|
19
|
+
"Tip: view the full prompt with:",
|
|
20
|
+
" versus prompt <left> <right> # opens in a pager",
|
|
21
|
+
" versus prompt --stdout <left> <right> # print to stdout",
|
|
22
|
+
].join("\n");
|
|
23
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
export async function generateOllama({ prompt, model, baseUrl }) {
|
|
2
|
+
const urlBase = baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
|
|
3
|
+
const url = `${urlBase.replace(/\/$/, "")}/api/generate`;
|
|
4
|
+
|
|
5
|
+
const res = await fetch(url, {
|
|
6
|
+
method: "POST",
|
|
7
|
+
headers: { "Content-Type": "application/json" },
|
|
8
|
+
body: JSON.stringify({
|
|
9
|
+
model: model || "llama3.2",
|
|
10
|
+
prompt,
|
|
11
|
+
stream: false,
|
|
12
|
+
}),
|
|
13
|
+
}).catch((e) => {
|
|
14
|
+
const err = new Error(`Failed to reach Ollama at ${urlBase}`);
|
|
15
|
+
err.hint = "Start Ollama and make sure the API is reachable on http://localhost:11434";
|
|
16
|
+
throw err;
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
if (!res.ok) {
|
|
20
|
+
const body = await res.text().catch(() => "");
|
|
21
|
+
const err = new Error(`Ollama error (${res.status}): ${body.slice(0, 200)}`);
|
|
22
|
+
err.hint = "Check that the model is installed: ollama pull llama3.2";
|
|
23
|
+
throw err;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const data = await res.json();
|
|
27
|
+
const text = data?.response ?? "";
|
|
28
|
+
return String(text).trim();
|
|
29
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
|
|
3
|
+
export async function generateOpenAI({ prompt, model, apiKey, baseUrl }) {
|
|
4
|
+
const key = apiKey || process.env.OPENAI_API_KEY;
|
|
5
|
+
if (!key) {
|
|
6
|
+
const err = new Error("OPENAI_API_KEY is not set.");
|
|
7
|
+
err.hint = 'Export your API key: export OPENAI_API_KEY="..."';
|
|
8
|
+
throw err;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
const client = new OpenAI({
|
|
12
|
+
apiKey: key,
|
|
13
|
+
baseURL: baseUrl || process.env.OPENAI_BASE_URL,
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
try {
|
|
17
|
+
const response = await client.responses.create({
|
|
18
|
+
model: model || "gpt-5.2",
|
|
19
|
+
input: prompt,
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
const text = response?.output_text ?? "";
|
|
23
|
+
return String(text).trim();
|
|
24
|
+
} catch (e) {
|
|
25
|
+
const msg = e?.message || String(e);
|
|
26
|
+
const err = new Error(`OpenAI request failed: ${msg}`);
|
|
27
|
+
|
|
28
|
+
const lower = String(msg).toLowerCase();
|
|
29
|
+
if (lower.includes("fetch failed")) {
|
|
30
|
+
err.hint =
|
|
31
|
+
"Check your internet connection and DNS.\n" +
|
|
32
|
+
"If you're on WSL and see 'fetch failed', try:\n" +
|
|
33
|
+
' export NODE_OPTIONS="--dns-result-order=ipv4first"';
|
|
34
|
+
} else if (lower.includes("401") || lower.includes("unauthorized")) {
|
|
35
|
+
err.hint = "Check that OPENAI_API_KEY is valid.";
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
throw err;
|
|
39
|
+
}
|
|
40
|
+
}
|
package/src/cache.js
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import os from "node:os";
|
|
4
|
+
|
|
5
|
+
function cacheFilePath() {
|
|
6
|
+
const home = os.homedir();
|
|
7
|
+
const xdg = process.env.XDG_CACHE_HOME;
|
|
8
|
+
const base = xdg ? xdg : path.join(home, ".cache");
|
|
9
|
+
return path.join(base, "versus", "cache.json");
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
async function ensureDir(dir) {
|
|
13
|
+
await fs.mkdir(dir, { recursive: true });
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
async function readCache() {
|
|
17
|
+
const file = cacheFilePath();
|
|
18
|
+
try {
|
|
19
|
+
const raw = await fs.readFile(file, "utf8");
|
|
20
|
+
const data = JSON.parse(raw);
|
|
21
|
+
if (!data || typeof data !== "object") return { version: 1, entries: {} };
|
|
22
|
+
if (!data.entries || typeof data.entries !== "object") data.entries = {};
|
|
23
|
+
return data;
|
|
24
|
+
} catch (err) {
|
|
25
|
+
if (err?.code === "ENOENT") return { version: 1, entries: {} };
|
|
26
|
+
// If corrupted, keep a backup and start fresh.
|
|
27
|
+
if (err instanceof SyntaxError) {
|
|
28
|
+
const backup = file + ".corrupt";
|
|
29
|
+
try {
|
|
30
|
+
await fs.rename(file, backup);
|
|
31
|
+
} catch {}
|
|
32
|
+
return { version: 1, entries: {} };
|
|
33
|
+
}
|
|
34
|
+
throw err;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
async function writeCache(data) {
|
|
39
|
+
const file = cacheFilePath();
|
|
40
|
+
const dir = path.dirname(file);
|
|
41
|
+
await ensureDir(dir);
|
|
42
|
+
await fs.writeFile(file, JSON.stringify(data, null, 2), "utf8");
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export async function getCacheInfo() {
|
|
46
|
+
const file = cacheFilePath();
|
|
47
|
+
const data = await readCache();
|
|
48
|
+
const entries = Object.keys(data.entries || {}).length;
|
|
49
|
+
return { file, entries };
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
export async function cacheGet(key) {
|
|
53
|
+
const data = await readCache();
|
|
54
|
+
const entry = data.entries?.[key];
|
|
55
|
+
if (!entry) return null;
|
|
56
|
+
|
|
57
|
+
if (entry.expiresAt) {
|
|
58
|
+
const exp = Date.parse(entry.expiresAt);
|
|
59
|
+
if (Number.isFinite(exp) && Date.now() > exp) {
|
|
60
|
+
// expired: delete
|
|
61
|
+
delete data.entries[key];
|
|
62
|
+
await writeCache(data);
|
|
63
|
+
return null;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
return entry;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export async function cacheSet(key, entry) {
|
|
71
|
+
const data = await readCache();
|
|
72
|
+
data.entries[key] = entry;
|
|
73
|
+
await writeCache(data);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export async function clearCache() {
|
|
77
|
+
const data = await readCache();
|
|
78
|
+
const count = Object.keys(data.entries || {}).length;
|
|
79
|
+
data.entries = {};
|
|
80
|
+
await writeCache(data);
|
|
81
|
+
return count;
|
|
82
|
+
}
|