bridgerapi 1.3.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +153 -0
  2. package/dist/cli.js +110 -221
  3. package/package.json +1 -1
package/README.md ADDED
@@ -0,0 +1,153 @@
1
+ # bridgerapi
2
+
3
+ Turn any AI CLI into an OpenAI-compatible API — no API keys, no billing.
4
+
5
+ ```
6
+ npm install -g bridgerapi
7
+ bridgerapi
8
+ ```
9
+
10
+ ---
11
+
12
+ ## What it does
13
+
14
+ bridgerapi runs a local HTTP server that speaks the OpenAI API format (`/v1/chat/completions`, `/v1/models`, `/health`). Any app that supports OpenAI-compatible endpoints — Goose, Cursor, Continue, Open WebUI, and others — can point at it and use whichever AI CLI you have authenticated on your machine.
15
+
16
+ You keep using the free tier or the subscription you already pay for. No Anthropic API key, no OpenAI API key, no extra cost.
17
+
18
+ ---
19
+
20
+ ## Supported backends
21
+
22
+ | CLI | Install | Auth |
23
+ |-----|---------|------|
24
+ | **Claude Code** | [claude.ai/download](https://claude.ai/download) | `claude login` |
25
+ | **Gemini CLI** | `npm i -g @google/gemini-cli` | `gemini auth` |
26
+ | **Codex CLI** | `npm i -g @openai/codex` | `codex auth` |
27
+ | **GitHub Copilot** | `gh extension install github/gh-copilot` | `gh auth login` |
28
+ | **Droid (Factory.ai)** | [factory.ai](https://factory.ai) | `export FACTORY_API_KEY=fk-...` |
29
+
30
+ Install any one of these and bridgerapi will pick it up automatically.
31
+
32
+ ---
33
+
34
+ ## Usage
35
+
36
+ ### Interactive setup (recommended)
37
+
38
+ ```
39
+ bridgerapi
40
+ ```
41
+
42
+ Detects installed backends, asks for a port, and gives you the choice to run in the foreground or install as a background service that auto-starts on login.
43
+
44
+ ### Manual commands
45
+
46
+ ```
47
+ bridgerapi start # run in foreground (default port 8082)
48
+ bridgerapi start --port 9000 # custom port
49
+ bridgerapi install # install as background service
50
+ bridgerapi uninstall # remove background service
51
+ bridgerapi status # check if running
52
+ bridgerapi chat # interactive chat session in terminal
53
+ ```
54
+
55
+ ### Chat mode
56
+
57
+ ```
58
+ bridgerapi chat
59
+ ```
60
+
61
+ Opens an interactive REPL. Type messages, get streamed responses. Keeps conversation history across turns.
62
+
63
+ ---
64
+
65
+ ## Connecting an app
66
+
67
+ Once running, point any OpenAI-compatible app at:
68
+
69
+ | Setting | Value |
70
+ |---------|-------|
71
+ | **Base URL** | `http://127.0.0.1:8082/v1` |
72
+ | **API Key** | `local` (any non-empty string) |
73
+ | **Model** | any model your CLI supports |
74
+
75
+ ### Goose
76
+
77
+ ```yaml
78
+ # ~/.config/goose/config.yaml
79
+ GOOSE_PROVIDER: openai
80
+ GOOSE_MODEL: claude-sonnet-4-6
81
+ OPENAI_HOST: http://127.0.0.1:8082
82
+ ```
83
+
84
+ ### Continue (VS Code / JetBrains)
85
+
86
+ ```json
87
+ {
88
+ "models": [{
89
+ "title": "bridgerapi",
90
+ "provider": "openai",
91
+ "model": "claude-sonnet-4-6",
92
+ "apiBase": "http://127.0.0.1:8082/v1",
93
+ "apiKey": "local"
94
+ }]
95
+ }
96
+ ```
97
+
98
+ ### Open WebUI
99
+
100
+ Set **OpenAI API Base URL** to `http://127.0.0.1:8082/v1` and **API Key** to `local`.
101
+
102
+ ---
103
+
104
+ ## How it works
105
+
106
+ ```
107
+ Your app → POST /v1/chat/completions
108
+
109
+ bridgerapi (local HTTP server)
110
+
111
+ claude / gemini / codex / gh copilot (subprocess)
112
+
113
+ streamed response back to your app
114
+ ```
115
+
116
+ bridgerapi converts OpenAI message format to a plain prompt, spawns the appropriate CLI as a subprocess using your existing auth, and streams the output back as SSE — exactly what the OpenAI streaming format expects.
117
+
118
+ Model routing is automatic by prefix:
119
+
120
+ - `claude-*` → Claude Code CLI
121
+ - `gemini-*` → Gemini CLI
122
+ - `gpt-*`, `o3`, `o4` → Codex CLI
123
+ - `copilot` → GitHub Copilot CLI
124
+ - `glm-*`, `kimi-*`, `minimax-*`, `droid` → Droid CLI (Factory.ai)
125
+
126
+ If the requested backend isn't available, it falls back to the first one that is.
127
+
128
+ ---
129
+
130
+ ## Background service
131
+
132
+ On macOS, `bridgerapi install` creates a LaunchAgent that starts automatically on login and restarts if it crashes. Logs go to `~/.bridgerapi/server.log`.
133
+
134
+ On Linux, it creates a systemd user service (`systemctl --user`).
135
+
136
+ ```
137
+ bridgerapi install # installs and starts
138
+ bridgerapi status # check pid and port
139
+ bridgerapi uninstall # stops and removes
140
+ ```
141
+
142
+ ---
143
+
144
+ ## Requirements
145
+
146
+ - Node.js 18+
147
+ - At least one AI CLI installed and authenticated
148
+
149
+ ---
150
+
151
+ ## License
152
+
153
+ MIT — [teodorwaltervido](https://github.com/teodorwaltervido)
package/dist/cli.js CHANGED
@@ -59,7 +59,6 @@ function messagesToPrompt(messages) {
59
59
  var import_child_process = require("child_process");
60
60
  var import_fs = require("fs");
61
61
  var import_os = require("os");
62
- var import_https = require("https");
63
62
  var HOME = (0, import_os.homedir)();
64
63
  function which(cmd2) {
65
64
  try {
@@ -68,45 +67,18 @@ function which(cmd2) {
68
67
  return "";
69
68
  }
70
69
  }
71
- function httpsGetJson(url, headers) {
72
- return new Promise((resolve, reject) => {
73
- const req = (0, import_https.request)(url, { headers }, (res) => {
74
- const chunks = [];
75
- res.on("data", (c) => chunks.push(c));
76
- res.on("end", () => {
77
- try {
78
- resolve(JSON.parse(Buffer.concat(chunks).toString()));
79
- } catch (e) {
80
- reject(e);
81
- }
82
- });
83
- });
84
- req.on("error", reject);
85
- req.setTimeout(6e3, () => {
86
- req.destroy();
87
- reject(new Error("timeout"));
88
- });
89
- req.end();
90
- });
91
- }
92
- async function* spawnStream(cmd2, args, stdin, env) {
70
+ async function* spawnStream(cmd2, args, stdin) {
93
71
  const proc = (0, import_child_process.spawn)(cmd2, args, {
94
- env: env ?? process.env,
72
+ env: process.env,
95
73
  stdio: ["pipe", "pipe", "pipe"]
96
74
  });
97
- if (stdin) {
98
- proc.stdin.end(stdin);
99
- }
100
- for await (const chunk2 of proc.stdout) {
101
- yield chunk2;
102
- }
75
+ if (stdin) proc.stdin.end(stdin);
76
+ for await (const chunk2 of proc.stdout) yield chunk2;
103
77
  }
104
- var CLAUDE_FALLBACK = ["claude-opus-4-6", "claude-sonnet-4-6", "claude-haiku-4-5"];
105
78
  var ClaudeBackend = class {
106
79
  constructor() {
107
80
  this.name = "claude";
108
81
  this.prefixes = ["claude"];
109
- this.models = [...CLAUDE_FALLBACK];
110
82
  }
111
83
  get bin() {
112
84
  return process.env.CLAUDE_BIN ?? `${HOME}/.local/bin/claude`;
@@ -114,20 +86,6 @@ var ClaudeBackend = class {
114
86
  available() {
115
87
  return (0, import_fs.existsSync)(this.bin) || Boolean(which("claude"));
116
88
  }
117
- async fetchLiveModels() {
118
- const key = process.env.ANTHROPIC_API_KEY;
119
- if (!key) return [...CLAUDE_FALLBACK];
120
- try {
121
- const data = await httpsGetJson("https://api.anthropic.com/v1/models", {
122
- "x-api-key": key,
123
- "anthropic-version": "2023-06-01"
124
- });
125
- const ids = (data.data ?? []).map((m) => String(m.id)).filter((id) => id.startsWith("claude-"));
126
- return ids.length ? ids : [...CLAUDE_FALLBACK];
127
- } catch {
128
- return [...CLAUDE_FALLBACK];
129
- }
130
- }
131
89
  async runBlocking(prompt, model2) {
132
90
  const bin = which("claude") || this.bin;
133
91
  let out;
@@ -148,12 +106,10 @@ var ClaudeBackend = class {
148
106
  yield* spawnStream(bin, ["-p", "--output-format", "text", "--model", model2], prompt);
149
107
  }
150
108
  };
151
- var GEMINI_FALLBACK = ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-1.5-pro"];
152
109
  var GeminiBackend = class {
153
110
  constructor() {
154
111
  this.name = "gemini";
155
112
  this.prefixes = ["gemini"];
156
- this.models = [...GEMINI_FALLBACK];
157
113
  }
158
114
  get bin() {
159
115
  return process.env.GEMINI_BIN ?? which("gemini") ?? "/opt/homebrew/bin/gemini";
@@ -161,61 +117,39 @@ var GeminiBackend = class {
161
117
  available() {
162
118
  return Boolean(which("gemini")) || (0, import_fs.existsSync)(this.bin);
163
119
  }
164
- async fetchLiveModels() {
165
- const key = process.env.GEMINI_API_KEY;
166
- if (!key) return [...GEMINI_FALLBACK];
167
- try {
168
- const data = await httpsGetJson(
169
- `https://generativelanguage.googleapis.com/v1beta/models?key=${key}&pageSize=50`,
170
- {}
171
- );
172
- const ids = (data.models ?? []).map((m) => String(m.name).replace("models/", "")).filter((id) => /^gemini-/.test(id) && !id.includes("embedding") && !id.includes("aqa"));
173
- return ids.length ? ids : [...GEMINI_FALLBACK];
174
- } catch {
175
- return [...GEMINI_FALLBACK];
176
- }
177
- }
178
120
  async runBlocking(prompt, model2) {
179
121
  const bin = which("gemini") || this.bin;
180
122
  let out;
181
123
  try {
182
- out = (0, import_child_process.execFileSync)(
183
- bin,
184
- ["--output-format", "json", "--model", model2, "--approval-mode", "yolo"],
185
- { input: prompt, encoding: "utf8", timeout: 3e5, env: process.env }
186
- );
124
+ out = (0, import_child_process.execFileSync)(bin, ["--output-format", "json", "--model", model2, "--approval-mode", "yolo"], {
125
+ input: prompt,
126
+ encoding: "utf8",
127
+ timeout: 3e5,
128
+ env: process.env
129
+ });
187
130
  } catch (e) {
188
131
  const err = e.stderr?.trim() ?? "";
189
- if (/auth|login|sign.?in/i.test(err)) {
132
+ if (/auth|login|sign.?in/i.test(err))
190
133
  throw new Error(`Gemini not authenticated. Run: gemini auth OR export GEMINI_API_KEY=<key>`);
191
- }
192
134
  throw new Error(err || `gemini exited non-zero`);
193
135
  }
194
136
  const raw = out.trim();
195
137
  try {
196
138
  const data = JSON.parse(raw);
197
- const text = String(data.response ?? data.result ?? data.text ?? raw);
198
- const usage = data.tokenCount ?? data.usage ?? null;
199
- return [text, usage];
139
+ return [String(data.response ?? data.result ?? data.text ?? raw), data.tokenCount ?? data.usage ?? null];
200
140
  } catch {
201
141
  return [raw, null];
202
142
  }
203
143
  }
204
144
  async *stream(prompt, model2) {
205
145
  const bin = which("gemini") || this.bin;
206
- yield* spawnStream(
207
- bin,
208
- ["--output-format", "text", "--model", model2, "--approval-mode", "yolo"],
209
- prompt
210
- );
146
+ yield* spawnStream(bin, ["--output-format", "text", "--model", model2, "--approval-mode", "yolo"], prompt);
211
147
  }
212
148
  };
213
- var CODEX_FALLBACK = ["o3", "o4-mini", "gpt-4.1", "gpt-4o", "gpt-4o-mini"];
214
149
  var CodexBackend = class {
215
150
  constructor() {
216
151
  this.name = "codex";
217
152
  this.prefixes = ["gpt", "o3", "o4", "o1"];
218
- this.models = [...CODEX_FALLBACK];
219
153
  }
220
154
  get bin() {
221
155
  return process.env.CODEX_BIN ?? which("codex") ?? "codex";
@@ -223,20 +157,6 @@ var CodexBackend = class {
223
157
  available() {
224
158
  return Boolean(which("codex"));
225
159
  }
226
- async fetchLiveModels() {
227
- const key = process.env.OPENAI_API_KEY;
228
- if (!key) return [...CODEX_FALLBACK];
229
- try {
230
- const data = await httpsGetJson("https://api.openai.com/v1/models", {
231
- Authorization: `Bearer ${key}`
232
- });
233
- const EXCLUDE = /instruct|audio|realtime|transcribe|tts|image|search|embed|diariz|whisper|babbage|davinci|curie|ada/i;
234
- const ids = (data.data ?? []).map((m) => String(m.id)).filter((id) => /^(gpt-[^i]|o[0-9])/.test(id) && !EXCLUDE.test(id)).sort();
235
- return ids.length ? ids : [...CODEX_FALLBACK];
236
- } catch {
237
- return [...CODEX_FALLBACK];
238
- }
239
- }
240
160
  async runBlocking(prompt, model2) {
241
161
  let out;
242
162
  try {
@@ -257,7 +177,6 @@ var CopilotBackend = class {
257
177
  constructor() {
258
178
  this.name = "copilot";
259
179
  this.prefixes = ["copilot", "github-copilot"];
260
- this.models = ["copilot", "github-copilot"];
261
180
  }
262
181
  get bin() {
263
182
  return process.env.GH_BIN ?? which("gh") ?? "gh";
@@ -271,9 +190,6 @@ var CopilotBackend = class {
271
190
  return false;
272
191
  }
273
192
  }
274
- async fetchLiveModels() {
275
- return this.models;
276
- }
277
193
  async runBlocking(prompt, model2) {
278
194
  let out;
279
195
  try {
@@ -290,32 +206,51 @@ var CopilotBackend = class {
290
206
  yield* spawnStream(this.bin, ["copilot", "suggest", "-t", "general", prompt]);
291
207
  }
292
208
  };
209
+ var DroidBackend = class {
210
+ constructor() {
211
+ this.name = "droid";
212
+ // Route Droid-exclusive model families + explicit "droid" prefix
213
+ this.prefixes = ["droid", "glm", "kimi", "minimax"];
214
+ }
215
+ get bin() {
216
+ return process.env.DROID_BIN ?? which("droid") ?? `${HOME}/.local/bin/droid`;
217
+ }
218
+ available() {
219
+ return (0, import_fs.existsSync)(this.bin) || Boolean(which("droid"));
220
+ }
221
+ async runBlocking(prompt, model2) {
222
+ const bin = which("droid") || this.bin;
223
+ let out;
224
+ try {
225
+ out = (0, import_child_process.execFileSync)(bin, ["exec", "--output-format", "text", "--model", model2, "-"], {
226
+ input: prompt,
227
+ encoding: "utf8",
228
+ timeout: 3e5
229
+ });
230
+ } catch (e) {
231
+ throw new Error(e.stderr?.trim() || `droid exited non-zero`);
232
+ }
233
+ return [out.trim(), null];
234
+ }
235
+ async *stream(prompt, model2) {
236
+ const bin = which("droid") || this.bin;
237
+ yield* spawnStream(bin, ["exec", "--output-format", "text", "--model", model2, "-"], prompt);
238
+ }
239
+ };
293
240
  var BACKENDS = [
294
241
  new ClaudeBackend(),
295
242
  new GeminiBackend(),
296
243
  new CodexBackend(),
297
- new CopilotBackend()
244
+ new CopilotBackend(),
245
+ new DroidBackend()
298
246
  ];
299
247
  function pickBackend(model2) {
300
248
  const m = model2.toLowerCase();
301
249
  for (const b of BACKENDS) {
302
- if (b.prefixes.some((p) => m.startsWith(p))) {
303
- if (b.available()) return b;
304
- }
250
+ if (b.prefixes.some((p) => m.startsWith(p)) && b.available()) return b;
305
251
  }
306
252
  return BACKENDS.find((b) => b.available()) ?? BACKENDS[0];
307
253
  }
308
- function allModels() {
309
- return BACKENDS.filter((b) => b.available()).flatMap((b) => [...b.models]);
310
- }
311
- async function refreshModels() {
312
- const available = BACKENDS.filter((b) => b.available());
313
- await Promise.all(
314
- available.map(async (b) => {
315
- b.models = await b.fetchLiveModels();
316
- })
317
- );
318
- }
319
254
 
320
255
  // src/server.ts
321
256
  function sse(data) {
@@ -368,9 +303,15 @@ async function readBody(req) {
368
303
  }
369
304
  function handleModels(res) {
370
305
  const ts = Math.floor(Date.now() / 1e3);
306
+ const available = BACKENDS.filter((b) => b.available());
371
307
  sendJson(res, 200, {
372
308
  object: "list",
373
- data: allModels().map((id) => ({ id, object: "model", created: ts, owned_by: "bridge" }))
309
+ data: available.map((b) => ({
310
+ id: b.name,
311
+ object: "model",
312
+ created: ts,
313
+ owned_by: "bridgerapi"
314
+ }))
374
315
  });
375
316
  }
376
317
  function handleHealth(res, port2) {
@@ -613,6 +554,13 @@ var import_path2 = require("path");
613
554
  var import_readline = require("readline");
614
555
  var PORT = parseInt(process.env.BRIDGERAPI_PORT ?? "8082");
615
556
  var LOG_DIR = (0, import_path2.join)((0, import_os3.homedir)(), ".bridgerapi");
557
+ var INSTALL_HINTS = {
558
+ claude: "claude login (Claude Code \u2014 claude.ai/download)",
559
+ gemini: "gemini auth (Gemini CLI \u2014 npm i -g @google/gemini-cli)",
560
+ codex: "codex auth (Codex CLI \u2014 npm i -g @openai/codex)",
561
+ copilot: "gh auth login (GitHub Copilot \u2014 gh extension install github/gh-copilot)",
562
+ droid: "export FACTORY_API_KEY=fk-... (Droid \u2014 factory.ai)"
563
+ };
616
564
  function ask(question) {
617
565
  const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
618
566
  return new Promise((resolve) => {
@@ -622,96 +570,55 @@ function ask(question) {
622
570
  });
623
571
  });
624
572
  }
625
- var INSTALL_HINTS = {
626
- claude: "Install Claude Code: https://claude.ai/download \u2192 then sign in with: claude login",
627
- gemini: "Install Gemini CLI: npm install -g @google/gemini-cli \u2192 then: gemini auth",
628
- codex: "Install Codex CLI: npm install -g @openai/codex \u2192 then: codex auth",
629
- copilot: "Install Copilot: gh extension install github/gh-copilot \u2192 then: gh auth login"
630
- };
631
573
  async function cmdSetup() {
632
574
  console.log();
633
575
  console.log(" bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools");
634
576
  console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
635
577
  console.log();
636
- console.log(" Checking installed backends\u2026");
637
- await refreshModels();
638
- console.log();
639
578
  const available = BACKENDS.filter((b) => b.available());
640
579
  const missing = BACKENDS.filter((b) => !b.available());
580
+ console.log(" Backends detected:\n");
641
581
  for (const b of BACKENDS) {
642
582
  const ok = b.available();
643
- const icon = ok ? "\u2713" : "\u2717";
644
- const note = ok ? b.models.join(", ") : "not found";
645
- console.log(` ${icon} ${b.name.padEnd(10)} ${note}`);
583
+ console.log(` ${ok ? "\u2713" : "\u2717"} ${b.name}`);
584
+ if (!ok) console.log(` \u2192 ${INSTALL_HINTS[b.name]}`);
646
585
  }
647
586
  console.log();
648
587
  if (available.length === 0) {
649
- console.log(" No CLI backends found. Install at least one:\n");
650
- for (const b of missing) console.log(` ${INSTALL_HINTS[b.name]}`);
651
- console.log();
652
- console.log(" Then re-run: bridgerapi");
588
+ console.log(" No backends found. Install at least one CLI above, then re-run: bridgerapi");
653
589
  process.exit(1);
654
590
  }
655
- if (missing.length) {
656
- console.log(" Optional \u2014 to enable missing backends:");
657
- for (const b of missing) console.log(` ${INSTALL_HINTS[b.name]}`);
658
- console.log();
659
- }
660
591
  const portAnswer = await ask(` Port [${PORT}]: `);
661
592
  const port2 = portAnswer ? parseInt(portAnswer) || PORT : PORT;
662
593
  console.log();
663
594
  console.log(" How do you want to run bridgerapi?");
664
- console.log(" 1 Start now in the foreground (stops when you close terminal)");
665
- console.log(" 2 Install as a background service (auto-starts on login)");
595
+ console.log(" 1 Foreground (stops when terminal closes)");
596
+ console.log(" 2 Background service (auto-starts on login)");
666
597
  console.log();
667
- const modeAnswer = await ask(" Choose [1/2]: ");
668
- const mode = modeAnswer === "2" ? "install" : "start";
598
+ const choice = await ask(" Choose [1/2]: ");
669
599
  console.log();
670
- if (mode === "install") {
600
+ if (choice === "2") {
671
601
  cmdInstall(port2);
672
602
  } else {
673
603
  cmdStart(port2);
674
604
  }
675
605
  }
676
- function parseArgs() {
677
- const args = process.argv.slice(2);
678
- const cmd2 = args[0] ?? "";
679
- let port2 = PORT;
680
- let model2;
681
- for (let i = 1; i < args.length; i++) {
682
- if ((args[i] === "--port" || args[i] === "-p") && args[i + 1]) {
683
- port2 = parseInt(args[++i]);
684
- } else if ((args[i] === "--model" || args[i] === "-m") && args[i + 1]) {
685
- model2 = args[++i];
686
- }
687
- }
688
- return { cmd: cmd2, port: port2, model: model2 };
689
- }
690
606
  function cmdStart(port2) {
691
607
  (0, import_fs3.mkdirSync)(LOG_DIR, { recursive: true });
692
608
  const available = BACKENDS.filter((b) => b.available());
693
- const unavailable = BACKENDS.filter((b) => !b.available());
694
- console.log(` bridgerapi \u2192 http://127.0.0.1:${port2}`);
695
- console.log(` backends : ${available.map((b) => b.name).join(", ") || "none!"}`);
696
- if (unavailable.length) {
697
- console.log(` missing : ${unavailable.map((b) => b.name).join(", ")}`);
698
- }
699
- console.log(` logs : ${LOG_DIR}/server.log`);
700
- console.log();
701
609
  if (available.length === 0) {
702
- console.error(" Error: no CLI backends found. Run: bridgerapi to see setup instructions.");
610
+ console.error(" No CLI backends found. Run: bridgerapi to see setup instructions.");
703
611
  process.exit(1);
704
612
  }
705
613
  const server = createBridgeServer(port2);
706
614
  server.listen(port2, "127.0.0.1", () => {
707
- console.log(` GET /v1/models`);
708
- console.log(` POST /v1/chat/completions (streaming + blocking)`);
709
- console.log(` GET /health`);
615
+ console.log(` bridgerapi is running`);
616
+ console.log();
617
+ console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
618
+ console.log(` API Key : local`);
710
619
  console.log();
711
- console.log(" OpenAI-compatible config:");
712
- console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
713
- console.log(` API Key : local`);
714
- console.log(` Model : ${available[0].models[0]}`);
620
+ console.log(` Backends : ${available.map((b) => b.name).join(", ")}`);
621
+ console.log(` Logs : ${LOG_DIR}/server.log`);
715
622
  console.log();
716
623
  console.log(" Ctrl+C to stop.");
717
624
  });
@@ -738,12 +645,10 @@ function cmdInstall(port2) {
738
645
  if (res.statusCode === 200) {
739
646
  clearInterval(poll);
740
647
  console.log();
741
- console.log(` \u2713 bridgerapi is running on http://127.0.0.1:${port2}`);
648
+ console.log(` bridgerapi is running`);
742
649
  console.log();
743
- console.log(" OpenAI-compatible config:");
744
- console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
745
- console.log(` API Key : local`);
746
- console.log(` Model : ${allModels()[0] ?? "claude-sonnet-4-6"}`);
650
+ console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
651
+ console.log(` API Key : local`);
747
652
  console.log();
748
653
  console.log(` Logs : tail -f ${LOG_DIR}/server.log`);
749
654
  console.log(` Stop : bridgerapi uninstall`);
@@ -776,31 +681,14 @@ function cmdUninstall() {
776
681
  function cmdStatus(port2) {
777
682
  const { running, pid } = serviceStatus();
778
683
  if (running) {
779
- console.log(` \u2713 bridgerapi is running${pid ? ` (pid ${pid})` : ""} on port ${port2}`);
684
+ console.log(` bridgerapi is running${pid ? ` (pid ${pid})` : ""}`);
685
+ console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
686
+ console.log(` API Key : local`);
780
687
  } else {
781
688
  console.log(" bridgerapi is not running.");
782
- console.log(" Run: bridgerapi \u2192 interactive setup");
783
- console.log(" Run: bridgerapi start \u2192 start in foreground");
784
- console.log(" Run: bridgerapi install \u2192 install background service");
785
- }
786
- }
787
- async function cmdBackends() {
788
- process.stdout.write("\n Fetching live model lists\u2026");
789
- await refreshModels();
790
- process.stdout.write(" done.\n\n CLI backends:\n\n");
791
- for (const b of BACKENDS) {
792
- const ok = b.available();
793
- const icon = ok ? "\u2713" : "\u2717";
794
- const models = ok ? b.models.join(", ") : "(not installed)";
795
- console.log(` ${icon} ${b.name.padEnd(10)} ${models}`);
796
- if (!ok) console.log(` ${INSTALL_HINTS[b.name]}`);
797
- }
798
- console.log();
799
- const available = BACKENDS.filter((b) => b.available());
800
- if (available.length) {
801
- console.log(` All available models:
802
- ${allModels().join(", ")}
803
- `);
689
+ console.log(" Run: bridgerapi \u2192 setup wizard");
690
+ console.log(" Run: bridgerapi start \u2192 start in foreground");
691
+ console.log(" Run: bridgerapi install \u2192 install background service");
804
692
  }
805
693
  }
806
694
  async function cmdChat(model2) {
@@ -809,15 +697,19 @@ async function cmdChat(model2) {
809
697
  console.error(" No backends found. Run: bridgerapi to see setup instructions.");
810
698
  process.exit(1);
811
699
  }
812
- const resolvedModel = model2 ?? available[0].models[0];
700
+ const resolvedModel = model2 ?? `${available[0].name}-default`;
813
701
  const backend = pickBackend(resolvedModel);
814
702
  console.log();
815
- console.log(` bridgerapi chat \u2014 ${backend.name} \u2014 ${resolvedModel}`);
816
- console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
817
- console.log(" Type your message and press Enter. Ctrl+C to exit.");
703
+ console.log(` bridgerapi chat \u2014 ${backend.name}`);
704
+ console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
705
+ console.log(" Type a message and press Enter. Ctrl+C to exit.");
818
706
  console.log();
819
707
  const history = [];
820
708
  const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
709
+ rl.on("close", () => {
710
+ console.log("\n Goodbye.");
711
+ process.exit(0);
712
+ });
821
713
  const prompt = () => {
822
714
  rl.question("You: ", async (input) => {
823
715
  const text = input.trim();
@@ -844,10 +736,6 @@ async function cmdChat(model2) {
844
736
  prompt();
845
737
  });
846
738
  };
847
- rl.on("close", () => {
848
- console.log("\n Goodbye.");
849
- process.exit(0);
850
- });
851
739
  prompt();
852
740
  }
853
741
  function showHelp() {
@@ -855,21 +743,25 @@ function showHelp() {
855
743
  bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools
856
744
 
857
745
  Usage:
858
- bridgerapi Interactive setup wizard
859
- bridgerapi chat [--model <name>] Interactive chat session in terminal
860
- bridgerapi start [--port n] Start API server in the foreground
861
- bridgerapi install [--port n] Install as a background service
862
- bridgerapi uninstall Remove background service
863
- bridgerapi status Show service status
864
- bridgerapi backends List detected backends
865
-
866
- Supported backends (auto-detected):
867
- claude-* \u2192 Claude Code CLI (claude login)
868
- gemini-* \u2192 Gemini CLI (gemini auth)
869
- gpt-*, o3 \u2192 Codex CLI (codex auth)
870
- copilot \u2192 GitHub Copilot (gh auth login)
746
+ bridgerapi Interactive setup wizard
747
+ bridgerapi chat [--model m] Chat directly in the terminal
748
+ bridgerapi start [--port n] Start API server in the foreground
749
+ bridgerapi install [--port n] Install as a background service
750
+ bridgerapi uninstall Remove background service
751
+ bridgerapi status Show service status
871
752
  `.trim());
872
753
  }
754
+ function parseArgs() {
755
+ const args = process.argv.slice(2);
756
+ const cmd2 = args[0] ?? "";
757
+ let port2 = PORT;
758
+ let model2;
759
+ for (let i = 1; i < args.length; i++) {
760
+ if ((args[i] === "--port" || args[i] === "-p") && args[i + 1]) port2 = parseInt(args[++i]);
761
+ if ((args[i] === "--model" || args[i] === "-m") && args[i + 1]) model2 = args[++i];
762
+ }
763
+ return { cmd: cmd2, port: port2, model: model2 };
764
+ }
873
765
  var { cmd, port, model } = parseArgs();
874
766
  switch (cmd) {
875
767
  case "":
@@ -891,9 +783,6 @@ switch (cmd) {
891
783
  case "status":
892
784
  cmdStatus(port);
893
785
  break;
894
- case "backends":
895
- cmdBackends().catch(console.error);
896
- break;
897
786
  case "help":
898
787
  case "--help":
899
788
  case "-h":
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bridgerapi",
3
- "version": "1.3.0",
3
+ "version": "1.5.0",
4
4
  "description": "Turn any AI CLI (Claude Code, Gemini, Codex, GitHub Copilot) into an OpenAI-compatible API — no API keys needed",
5
5
  "keywords": [
6
6
  "claude",