@ramusriram/versus 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.1.2
4
+
5
+ - Added demo GIF to README (cropped for cleaner display).
6
+ - **UX improvement:** Added real-time progress status messages ("Reading local docs for curl...", "Querying gemini...") during comparisons.
7
+ - Centralized default model names in `src/backends/defaults.js` for easier maintenance.
8
+
9
+ ## 0.1.1
10
+
11
+ - Published to npm as `@ramusriram/versus`.
12
+ - Added npm version, license, and Node.js version badges to README.
13
+ - Added package metadata: `author`, `repository`, `homepage`, `bugs` fields.
14
+ - Added CLI integration test with mock backend.
15
+
3
16
  ## 0.1.0
4
17
 
5
18
  Initial public release.
package/README.md CHANGED
@@ -1,14 +1,19 @@
1
1
  # Versus CLI (`versus`)
2
2
 
3
- Compare two Linux commands or concepts (A vs B) from inside your terminal, grounded in your machine’s local documentation (man pages, `--help`, `info`) and summarized by an LLM backend.
3
+ [![npm version](https://img.shields.io/npm/v/@ramusriram/versus)](https://www.npmjs.com/package/@ramusriram/versus)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
5
+ [![Node.js Version](https://img.shields.io/node/v/@ramusriram/versus)](https://nodejs.org/)
6
+ [![CI](https://github.com/RamuSriram/versus-ai-cli/actions/workflows/ci.yml/badge.svg)](https://github.com/RamuSriram/versus-ai-cli/actions/workflows/ci.yml)
4
7
 
5
- ![CI](https://github.com/RamuSriram/versus-ai-cli/actions/workflows/ci.yml/badge.svg)
8
+ Compare two Linux commands or concepts (A vs B) from inside your terminal, grounded in your machine’s local documentation (man pages, `--help`, `info`) and summarized by an LLM backend.
6
9
 
7
10
  ## Demo
8
11
 
12
+ ![versus demo](.github/assets/demo.gif)
13
+
9
14
  ```bash
10
- versus nano vim
11
- versus curl wget --backend gemini
15
+ versus curl wget
16
+ versus nano vim --backend gemini
12
17
  versus "git pull" "git fetch" --level beginner
13
18
  ```
14
19
 
@@ -41,8 +46,6 @@ Versus answers that in a structured way, using **local docs as grounding** so th
41
46
  npm install -g @ramusriram/versus
42
47
  ```
43
48
 
44
- > Note: this command will work after the first npm publish.
45
-
46
49
  Then run:
47
50
 
48
51
  ```bash
package/package.json CHANGED
@@ -1,7 +1,16 @@
1
1
  {
2
2
  "name": "@ramusriram/versus",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "description": "AI-powered CLI to compare two Linux commands or concepts, grounded in local docs (man, --help, info).",
5
+ "author": "Ramu Sriram",
6
+ "repository": {
7
+ "type": "git",
8
+ "url": "https://github.com/RamuSriram/versus-ai-cli.git"
9
+ },
10
+ "homepage": "https://github.com/RamuSriram/versus-ai-cli#readme",
11
+ "bugs": {
12
+ "url": "https://github.com/RamuSriram/versus-ai-cli/issues"
13
+ },
5
14
  "type": "module",
6
15
  "bin": {
7
16
  "versus": "bin/versus.js"
@@ -37,4 +46,4 @@
37
46
  "CHANGELOG.md",
38
47
  "LICENSE"
39
48
  ]
40
- }
49
+ }
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Default model names for each backend.
3
+ * Centralized here to avoid repetition and make updates easier.
4
+ */
5
+ export const DEFAULT_MODELS = {
6
+ openai: "gpt-5.2",
7
+ gemini: "gemini-2.5-flash",
8
+ ollama: "llama3.2",
9
+ mock: "mock",
10
+ };
@@ -1,3 +1,5 @@
1
+ import { DEFAULT_MODELS } from "./defaults.js";
2
+
1
3
  export async function generateGemini({ prompt, model, apiKey }) {
2
4
  const key = apiKey || process.env.GEMINI_API_KEY;
3
5
  if (!key) {
@@ -6,7 +8,7 @@ export async function generateGemini({ prompt, model, apiKey }) {
6
8
  throw err;
7
9
  }
8
10
 
9
- const m = model || "gemini-2.5-flash";
11
+ const m = model || DEFAULT_MODELS.gemini;
10
12
  const url = `https://generativelanguage.googleapis.com/v1beta/models/${encodeURIComponent(m)}:generateContent`;
11
13
 
12
14
  let res;
@@ -2,6 +2,7 @@ import { generateOpenAI } from "./openai.js";
2
2
  import { generateGemini } from "./gemini.js";
3
3
  import { generateOllama } from "./ollama.js";
4
4
  import { generateMock } from "./mock.js";
5
+ import { DEFAULT_MODELS } from "./defaults.js";
5
6
 
6
7
  async function isOllamaUp(baseUrl) {
7
8
  const urlBase = baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
@@ -29,38 +30,39 @@ export async function generateText({ backend, prompt, model }) {
29
30
  if (want === "auto") {
30
31
  if (hasOpenAIKey()) {
31
32
  const text = await generateOpenAI({ prompt, model });
32
- return { text, backendUsed: "openai", modelUsed: model || "gpt-5.2" };
33
+ return { text, backendUsed: "openai", modelUsed: model || DEFAULT_MODELS.openai };
33
34
  }
34
35
  if (hasGeminiKey()) {
35
36
  const text = await generateGemini({ prompt, model });
36
- return { text, backendUsed: "gemini", modelUsed: model || "gemini-2.5-flash" };
37
+ return { text, backendUsed: "gemini", modelUsed: model || DEFAULT_MODELS.gemini };
37
38
  }
38
39
  if (await isOllamaUp()) {
39
40
  const text = await generateOllama({ prompt, model });
40
- return { text, backendUsed: "ollama", modelUsed: model || "llama3.2" };
41
+ return { text, backendUsed: "ollama", modelUsed: model || DEFAULT_MODELS.ollama };
41
42
  }
42
43
  const text = await generateMock({ prompt });
43
- return { text, backendUsed: "mock", modelUsed: "mock" };
44
+ return { text, backendUsed: "mock", modelUsed: DEFAULT_MODELS.mock };
44
45
  }
45
46
 
46
47
  if (want === "openai") {
47
48
  const text = await generateOpenAI({ prompt, model });
48
- return { text, backendUsed: "openai", modelUsed: model || "gpt-5.2" };
49
+ return { text, backendUsed: "openai", modelUsed: model || DEFAULT_MODELS.openai };
49
50
  }
50
51
  if (want === "gemini") {
51
52
  const text = await generateGemini({ prompt, model });
52
- return { text, backendUsed: "gemini", modelUsed: model || "gemini-2.5-flash" };
53
+ return { text, backendUsed: "gemini", modelUsed: model || DEFAULT_MODELS.gemini };
53
54
  }
54
55
  if (want === "ollama") {
55
56
  const text = await generateOllama({ prompt, model });
56
- return { text, backendUsed: "ollama", modelUsed: model || "llama3.2" };
57
+ return { text, backendUsed: "ollama", modelUsed: model || DEFAULT_MODELS.ollama };
57
58
  }
58
59
  if (want === "mock") {
59
60
  const text = await generateMock({ prompt });
60
- return { text, backendUsed: "mock", modelUsed: "mock" };
61
+ return { text, backendUsed: "mock", modelUsed: DEFAULT_MODELS.mock };
61
62
  }
62
63
 
63
64
  const err = new Error(`Unknown backend: ${want}`);
64
65
  err.hint = "Use --backend auto|openai|gemini|ollama|mock";
65
66
  throw err;
66
67
  }
68
+
@@ -1,3 +1,5 @@
1
+ import { DEFAULT_MODELS } from "./defaults.js";
2
+
1
3
  export async function generateOllama({ prompt, model, baseUrl }) {
2
4
  const urlBase = baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
3
5
  const url = `${urlBase.replace(/\/$/, "")}/api/generate`;
@@ -6,7 +8,7 @@ export async function generateOllama({ prompt, model, baseUrl }) {
6
8
  method: "POST",
7
9
  headers: { "Content-Type": "application/json" },
8
10
  body: JSON.stringify({
9
- model: model || "llama3.2",
11
+ model: model || DEFAULT_MODELS.ollama,
10
12
  prompt,
11
13
  stream: false,
12
14
  }),
@@ -1,4 +1,5 @@
1
1
  import OpenAI from "openai";
2
+ import { DEFAULT_MODELS } from "./defaults.js";
2
3
 
3
4
  export async function generateOpenAI({ prompt, model, apiKey, baseUrl }) {
4
5
  const key = apiKey || process.env.OPENAI_API_KEY;
@@ -15,7 +16,7 @@ export async function generateOpenAI({ prompt, model, apiKey, baseUrl }) {
15
16
 
16
17
  try {
17
18
  const response = await client.responses.create({
18
- model: model || "gpt-5.2",
19
+ model: model || DEFAULT_MODELS.openai,
19
20
  input: prompt,
20
21
  });
21
22
 
package/src/cache.js CHANGED
@@ -28,7 +28,7 @@ async function readCache() {
28
28
  const backup = file + ".corrupt";
29
29
  try {
30
30
  await fs.rename(file, backup);
31
- } catch {}
31
+ } catch { }
32
32
  return { version: 1, entries: {} };
33
33
  }
34
34
  throw err;
package/src/cli.js CHANGED
@@ -263,7 +263,12 @@ export async function main(argv) {
263
263
 
264
264
  if (shouldShowSpinner(options)) spinner = createSpinner({ text: "Comparing" });
265
265
 
266
- const result = await runComparison(left, right, options);
266
+ // Status callback updates spinner text for real-time progress
267
+ const onStatus = (msg) => {
268
+ if (spinner) spinner.update(msg);
269
+ };
270
+
271
+ const result = await runComparison(left, right, options, onStatus);
267
272
 
268
273
  spinner?.stop();
269
274
 
package/src/engine.js CHANGED
@@ -20,18 +20,22 @@ function computeCacheKey({ left, right, options, leftDocs, rightDocs }) {
20
20
  return sha256(JSON.stringify(payload));
21
21
  }
22
22
 
23
- export async function buildComparisonPrompt(left, right, options) {
23
+ export async function buildComparisonPrompt(left, right, options, onStatus) {
24
24
  const includeDocs = options.includeDocs !== false;
25
+ const status = typeof onStatus === "function" ? onStatus : () => { };
25
26
 
26
- const [leftInfo, rightInfo] = includeDocs
27
- ? await Promise.all([
28
- collectDocs(left, { maxChars: options.maxDocChars, debug: options.debug }),
29
- collectDocs(right, { maxChars: options.maxDocChars, debug: options.debug }),
30
- ])
31
- : [
32
- { docs: "", sources: [], skipped: "docs disabled" },
33
- { docs: "", sources: [], skipped: "docs disabled" },
34
- ];
27
+ let leftInfo, rightInfo;
28
+
29
+ if (includeDocs) {
30
+ status(`Reading local docs for ${left}`);
31
+ leftInfo = await collectDocs(left, { maxChars: options.maxDocChars, debug: options.debug });
32
+
33
+ status(`Reading local docs for ${right}`);
34
+ rightInfo = await collectDocs(right, { maxChars: options.maxDocChars, debug: options.debug });
35
+ } else {
36
+ leftInfo = { docs: "", sources: [], skipped: "docs disabled" };
37
+ rightInfo = { docs: "", sources: [], skipped: "docs disabled" };
38
+ }
35
39
 
36
40
  const leftDocs = leftInfo.docs || "";
37
41
  const rightDocs = rightInfo.docs || "";
@@ -48,13 +52,15 @@ export async function buildComparisonPrompt(left, right, options) {
48
52
  return { prompt, leftInfo, rightInfo, leftDocs, rightDocs };
49
53
  }
50
54
 
51
- export async function runComparison(left, right, options) {
55
+ export async function runComparison(left, right, options, onStatus) {
52
56
  const t0 = hrtimeMs();
57
+ const status = typeof onStatus === "function" ? onStatus : () => { };
53
58
 
54
59
  const { prompt, leftInfo, rightInfo, leftDocs, rightDocs } = await buildComparisonPrompt(
55
60
  left,
56
61
  right,
57
- options
62
+ options,
63
+ onStatus
58
64
  );
59
65
 
60
66
  const key = computeCacheKey({ left, right, options, leftDocs, rightDocs });
@@ -92,6 +98,10 @@ export async function runComparison(left, right, options) {
92
98
  }
93
99
  }
94
100
 
101
+ // Show which backend we're querying
102
+ const backendName = options.backend === "auto" ? "LLM" : options.backend;
103
+ status(`Querying ${backendName}`);
104
+
95
105
  const tLLM0 = hrtimeMs();
96
106
  const gen = await generateText({
97
107
  backend: options.backend,
package/src/introspect.js CHANGED
@@ -30,13 +30,13 @@ function runCommand(cmd, args, { timeoutMs = DEFAULT_TIMEOUT_MS } = {}) {
30
30
  const killTimer =
31
31
  timeoutMs && timeoutMs > 0
32
32
  ? setTimeout(() => {
33
- if (done) return;
34
- done = true;
35
- try {
36
- child.kill("SIGKILL");
37
- } catch {}
38
- resolve({ ok: false, stdout, stderr, timedOut: true });
39
- }, timeoutMs)
33
+ if (done) return;
34
+ done = true;
35
+ try {
36
+ child.kill("SIGKILL");
37
+ } catch { }
38
+ resolve({ ok: false, stdout, stderr, timedOut: true });
39
+ }, timeoutMs)
40
40
  : null;
41
41
 
42
42
  child.stdout.on("data", (d) => (stdout += d.toString("utf8")));
@@ -0,0 +1,77 @@
1
+ import test from "node:test";
2
+ import assert from "node:assert/strict";
3
+ import { spawn } from "node:child_process";
4
+ import path from "node:path";
5
+ import { fileURLToPath } from "node:url";
6
+
7
+ const __dirname = path.dirname(fileURLToPath(import.meta.url));
8
+ const binPath = path.join(__dirname, "..", "bin", "versus.js");
9
+
10
+ function runCLI(args, { timeout = 5000 } = {}) {
11
+ return new Promise((resolve) => {
12
+ const child = spawn(process.execPath, [binPath, ...args], {
13
+ stdio: ["ignore", "pipe", "pipe"],
14
+ env: { ...process.env, NO_COLOR: "1" },
15
+ });
16
+
17
+ let stdout = "";
18
+ let stderr = "";
19
+ let done = false;
20
+
21
+ const timer = setTimeout(() => {
22
+ if (done) return;
23
+ done = true;
24
+ try {
25
+ child.kill("SIGKILL");
26
+ } catch { }
27
+ resolve({ stdout, stderr, code: null, timedOut: true });
28
+ }, timeout);
29
+
30
+ child.stdout.on("data", (d) => (stdout += d.toString("utf8")));
31
+ child.stderr.on("data", (d) => (stderr += d.toString("utf8")));
32
+
33
+ child.on("close", (code) => {
34
+ if (done) return;
35
+ done = true;
36
+ clearTimeout(timer);
37
+ resolve({ stdout, stderr, code, timedOut: false });
38
+ });
39
+
40
+ child.on("error", () => {
41
+ if (done) return;
42
+ done = true;
43
+ clearTimeout(timer);
44
+ resolve({ stdout, stderr, code: 1, timedOut: false });
45
+ });
46
+ });
47
+ }
48
+
49
+ test("CLI --help exits 0 and shows usage", async () => {
50
+ const { stdout, code } = await runCLI(["--help"]);
51
+ assert.equal(code, 0);
52
+ assert.match(stdout, /versus/i);
53
+ assert.match(stdout, /Compare/i);
54
+ });
55
+
56
+ test("CLI --version exits 0 and shows version", async () => {
57
+ const { stdout, code } = await runCLI(["--version"]);
58
+ assert.equal(code, 0);
59
+ assert.match(stdout, /\d+\.\d+\.\d+/);
60
+ });
61
+
62
+ test("CLI runs comparison with mock backend", async () => {
63
+ const { stdout, code } = await runCLI(["curl", "wget", "--backend", "mock", "--no-cache"]);
64
+ assert.equal(code, 0);
65
+ assert.match(stdout, /curl vs wget/i);
66
+ assert.match(stdout, /mock/i);
67
+ });
68
+
69
+ test("CLI status command exits 0", async () => {
70
+ const { code } = await runCLI(["status"]);
71
+ assert.equal(code, 0);
72
+ });
73
+
74
+ test("CLI cache command exits 0", async () => {
75
+ const { code } = await runCLI(["cache"]);
76
+ assert.equal(code, 0);
77
+ });