bridgerapi 1.2.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +153 -0
  2. package/dist/cli.js +110 -140
  3. package/package.json +1 -1
package/README.md ADDED
@@ -0,0 +1,153 @@
1
+ # bridgerapi
2
+
3
+ Turn any AI CLI into an OpenAI-compatible API — no API keys, no billing.
4
+
5
+ ```
6
+ npm install -g bridgerapi
7
+ bridgerapi
8
+ ```
9
+
10
+ ---
11
+
12
+ ## What it does
13
+
14
+ bridgerapi runs a local HTTP server that speaks the OpenAI API format (`/v1/chat/completions`, `/v1/models`, `/health`). Any app that supports OpenAI-compatible endpoints — Goose, Cursor, Continue, Open WebUI, and others — can point at it and use whichever AI CLI you have authenticated on your machine.
15
+
16
+ You keep using the free tier or the subscription you already pay for. No Anthropic API key, no OpenAI API key, no extra cost.
17
+
18
+ ---
19
+
20
+ ## Supported backends
21
+
22
+ | CLI | Install | Auth |
23
+ |-----|---------|------|
24
+ | **Claude Code** | [claude.ai/download](https://claude.ai/download) | `claude login` |
25
+ | **Gemini CLI** | `npm i -g @google/gemini-cli` | `gemini auth` |
26
+ | **Codex CLI** | `npm i -g @openai/codex` | `codex auth` |
27
+ | **GitHub Copilot** | `gh extension install github/gh-copilot` | `gh auth login` |
28
+ | **Droid (Factory.ai)** | [factory.ai](https://factory.ai) | `export FACTORY_API_KEY=fk-...` |
29
+
30
+ Install any one of these and bridgerapi will pick it up automatically.
31
+
32
+ ---
33
+
34
+ ## Usage
35
+
36
+ ### Interactive setup (recommended)
37
+
38
+ ```
39
+ bridgerapi
40
+ ```
41
+
42
+ Detects installed backends, asks for a port, and gives you the choice to run in the foreground or install as a background service that auto-starts on login.
43
+
44
+ ### Manual commands
45
+
46
+ ```
47
+ bridgerapi start # run in foreground (default port 8082)
48
+ bridgerapi start --port 9000 # custom port
49
+ bridgerapi install # install as background service
50
+ bridgerapi uninstall # remove background service
51
+ bridgerapi status # check if running
52
+ bridgerapi chat # interactive chat session in terminal
53
+ ```
54
+
55
+ ### Chat mode
56
+
57
+ ```
58
+ bridgerapi chat
59
+ ```
60
+
61
+ Opens an interactive REPL. Type messages, get streamed responses. Keeps conversation history across turns.
62
+
63
+ ---
64
+
65
+ ## Connecting an app
66
+
67
+ Once running, point any OpenAI-compatible app at:
68
+
69
+ | Setting | Value |
70
+ |---------|-------|
71
+ | **Base URL** | `http://127.0.0.1:8082/v1` |
72
+ | **API Key** | `local` (any non-empty string) |
73
+ | **Model** | any model your CLI supports |
74
+
75
+ ### Goose
76
+
77
+ ```yaml
78
+ # ~/.config/goose/config.yaml
79
+ GOOSE_PROVIDER: openai
80
+ GOOSE_MODEL: claude-sonnet-4-6
81
+ OPENAI_HOST: http://127.0.0.1:8082
82
+ ```
83
+
84
+ ### Continue (VS Code / JetBrains)
85
+
86
+ ```json
87
+ {
88
+ "models": [{
89
+ "title": "bridgerapi",
90
+ "provider": "openai",
91
+ "model": "claude-sonnet-4-6",
92
+ "apiBase": "http://127.0.0.1:8082/v1",
93
+ "apiKey": "local"
94
+ }]
95
+ }
96
+ ```
97
+
98
+ ### Open WebUI
99
+
100
+ Set **OpenAI API Base URL** to `http://127.0.0.1:8082/v1` and **API Key** to `local`.
101
+
102
+ ---
103
+
104
+ ## How it works
105
+
106
+ ```
107
+ Your app → POST /v1/chat/completions
108
+
109
+ bridgerapi (local HTTP server)
110
+
111
+ claude / gemini / codex / gh copilot (subprocess)
112
+
113
+ streamed response back to your app
114
+ ```
115
+
116
+ bridgerapi converts OpenAI message format to a plain prompt, spawns the appropriate CLI as a subprocess using your existing auth, and streams the output back as SSE — exactly what the OpenAI streaming format expects.
117
+
118
+ Model routing is automatic by prefix:
119
+
120
+ - `claude-*` → Claude Code CLI
121
+ - `gemini-*` → Gemini CLI
122
+ - `gpt-*`, `o3`, `o4` → Codex CLI
123
+ - `copilot` → GitHub Copilot CLI
124
+ - `glm-*`, `kimi-*`, `minimax-*`, `droid` → Droid CLI (Factory.ai)
125
+
126
+ If the requested backend isn't available, it falls back to the first one that is.
127
+
128
+ ---
129
+
130
+ ## Background service
131
+
132
+ On macOS, `bridgerapi install` creates a LaunchAgent that starts automatically on login and restarts if it crashes. Logs go to `~/.bridgerapi/server.log`.
133
+
134
+ On Linux, it creates a systemd user service (`systemctl --user`).
135
+
136
+ ```
137
+ bridgerapi install # installs and starts
138
+ bridgerapi status # check pid and port
139
+ bridgerapi uninstall # stops and removes
140
+ ```
141
+
142
+ ---
143
+
144
+ ## Requirements
145
+
146
+ - Node.js 18+
147
+ - At least one AI CLI installed and authenticated
148
+
149
+ ---
150
+
151
+ ## License
152
+
153
+ MIT — [teodorwaltervido](https://github.com/teodorwaltervido)
package/dist/cli.js CHANGED
@@ -67,22 +67,17 @@ function which(cmd2) {
67
67
  return "";
68
68
  }
69
69
  }
70
- async function* spawnStream(cmd2, args, stdin, env) {
70
+ async function* spawnStream(cmd2, args, stdin) {
71
71
  const proc = (0, import_child_process.spawn)(cmd2, args, {
72
- env: env ?? process.env,
72
+ env: process.env,
73
73
  stdio: ["pipe", "pipe", "pipe"]
74
74
  });
75
- if (stdin) {
76
- proc.stdin.end(stdin);
77
- }
78
- for await (const chunk2 of proc.stdout) {
79
- yield chunk2;
80
- }
75
+ if (stdin) proc.stdin.end(stdin);
76
+ for await (const chunk2 of proc.stdout) yield chunk2;
81
77
  }
82
78
  var ClaudeBackend = class {
83
79
  constructor() {
84
80
  this.name = "claude";
85
- this.models = ["claude-opus-4-6", "claude-sonnet-4-6", "claude-haiku-4-5"];
86
81
  this.prefixes = ["claude"];
87
82
  }
88
83
  get bin() {
@@ -114,7 +109,6 @@ var ClaudeBackend = class {
114
109
  var GeminiBackend = class {
115
110
  constructor() {
116
111
  this.name = "gemini";
117
- this.models = ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-1.5-pro"];
118
112
  this.prefixes = ["gemini"];
119
113
  }
120
114
  get bin() {
@@ -127,41 +121,34 @@ var GeminiBackend = class {
127
121
  const bin = which("gemini") || this.bin;
128
122
  let out;
129
123
  try {
130
- out = (0, import_child_process.execFileSync)(
131
- bin,
132
- ["--output-format", "json", "--model", model2, "--approval-mode", "yolo"],
133
- { input: prompt, encoding: "utf8", timeout: 3e5, env: process.env }
134
- );
124
+ out = (0, import_child_process.execFileSync)(bin, ["--output-format", "json", "--model", model2, "--approval-mode", "yolo"], {
125
+ input: prompt,
126
+ encoding: "utf8",
127
+ timeout: 3e5,
128
+ env: process.env
129
+ });
135
130
  } catch (e) {
136
131
  const err = e.stderr?.trim() ?? "";
137
- if (/auth|login|sign.?in/i.test(err)) {
132
+ if (/auth|login|sign.?in/i.test(err))
138
133
  throw new Error(`Gemini not authenticated. Run: gemini auth OR export GEMINI_API_KEY=<key>`);
139
- }
140
134
  throw new Error(err || `gemini exited non-zero`);
141
135
  }
142
136
  const raw = out.trim();
143
137
  try {
144
138
  const data = JSON.parse(raw);
145
- const text = String(data.response ?? data.result ?? data.text ?? raw);
146
- const usage = data.tokenCount ?? data.usage ?? null;
147
- return [text, usage];
139
+ return [String(data.response ?? data.result ?? data.text ?? raw), data.tokenCount ?? data.usage ?? null];
148
140
  } catch {
149
141
  return [raw, null];
150
142
  }
151
143
  }
152
144
  async *stream(prompt, model2) {
153
145
  const bin = which("gemini") || this.bin;
154
- yield* spawnStream(
155
- bin,
156
- ["--output-format", "text", "--model", model2, "--approval-mode", "yolo"],
157
- prompt
158
- );
146
+ yield* spawnStream(bin, ["--output-format", "text", "--model", model2, "--approval-mode", "yolo"], prompt);
159
147
  }
160
148
  };
161
149
  var CodexBackend = class {
162
150
  constructor() {
163
151
  this.name = "codex";
164
- this.models = ["o3", "o4-mini", "gpt-4.1", "gpt-4o"];
165
152
  this.prefixes = ["gpt", "o3", "o4", "o1"];
166
153
  }
167
154
  get bin() {
@@ -189,7 +176,6 @@ var CodexBackend = class {
189
176
  var CopilotBackend = class {
190
177
  constructor() {
191
178
  this.name = "copilot";
192
- this.models = ["copilot", "github-copilot"];
193
179
  this.prefixes = ["copilot", "github-copilot"];
194
180
  }
195
181
  get bin() {
@@ -220,24 +206,51 @@ var CopilotBackend = class {
220
206
  yield* spawnStream(this.bin, ["copilot", "suggest", "-t", "general", prompt]);
221
207
  }
222
208
  };
209
+ var DroidBackend = class {
210
+ constructor() {
211
+ this.name = "droid";
212
+ // Route Droid-exclusive model families + explicit "droid" prefix
213
+ this.prefixes = ["droid", "glm", "kimi", "minimax"];
214
+ }
215
+ get bin() {
216
+ return process.env.DROID_BIN ?? which("droid") ?? `${HOME}/.local/bin/droid`;
217
+ }
218
+ available() {
219
+ return (0, import_fs.existsSync)(this.bin) || Boolean(which("droid"));
220
+ }
221
+ async runBlocking(prompt, model2) {
222
+ const bin = which("droid") || this.bin;
223
+ let out;
224
+ try {
225
+ out = (0, import_child_process.execFileSync)(bin, ["exec", "--output-format", "text", "--model", model2, "-"], {
226
+ input: prompt,
227
+ encoding: "utf8",
228
+ timeout: 3e5
229
+ });
230
+ } catch (e) {
231
+ throw new Error(e.stderr?.trim() || `droid exited non-zero`);
232
+ }
233
+ return [out.trim(), null];
234
+ }
235
+ async *stream(prompt, model2) {
236
+ const bin = which("droid") || this.bin;
237
+ yield* spawnStream(bin, ["exec", "--output-format", "text", "--model", model2, "-"], prompt);
238
+ }
239
+ };
223
240
  var BACKENDS = [
224
241
  new ClaudeBackend(),
225
242
  new GeminiBackend(),
226
243
  new CodexBackend(),
227
- new CopilotBackend()
244
+ new CopilotBackend(),
245
+ new DroidBackend()
228
246
  ];
229
247
  function pickBackend(model2) {
230
248
  const m = model2.toLowerCase();
231
249
  for (const b of BACKENDS) {
232
- if (b.prefixes.some((p) => m.startsWith(p))) {
233
- if (b.available()) return b;
234
- }
250
+ if (b.prefixes.some((p) => m.startsWith(p)) && b.available()) return b;
235
251
  }
236
252
  return BACKENDS.find((b) => b.available()) ?? BACKENDS[0];
237
253
  }
238
- function allModels() {
239
- return BACKENDS.filter((b) => b.available()).flatMap((b) => [...b.models]);
240
- }
241
254
 
242
255
  // src/server.ts
243
256
  function sse(data) {
@@ -290,9 +303,15 @@ async function readBody(req) {
290
303
  }
291
304
  function handleModels(res) {
292
305
  const ts = Math.floor(Date.now() / 1e3);
306
+ const available = BACKENDS.filter((b) => b.available());
293
307
  sendJson(res, 200, {
294
308
  object: "list",
295
- data: allModels().map((id) => ({ id, object: "model", created: ts, owned_by: "bridge" }))
309
+ data: available.map((b) => ({
310
+ id: b.name,
311
+ object: "model",
312
+ created: ts,
313
+ owned_by: "bridgerapi"
314
+ }))
296
315
  });
297
316
  }
298
317
  function handleHealth(res, port2) {
@@ -535,6 +554,13 @@ var import_path2 = require("path");
535
554
  var import_readline = require("readline");
536
555
  var PORT = parseInt(process.env.BRIDGERAPI_PORT ?? "8082");
537
556
  var LOG_DIR = (0, import_path2.join)((0, import_os3.homedir)(), ".bridgerapi");
557
+ var INSTALL_HINTS = {
558
+ claude: "claude login (Claude Code \u2014 claude.ai/download)",
559
+ gemini: "gemini auth (Gemini CLI \u2014 npm i -g @google/gemini-cli)",
560
+ codex: "codex auth (Codex CLI \u2014 npm i -g @openai/codex)",
561
+ copilot: "gh auth login (GitHub Copilot \u2014 gh extension install github/gh-copilot)",
562
+ droid: "export FACTORY_API_KEY=fk-... (Droid \u2014 factory.ai)"
563
+ };
538
564
  function ask(question) {
539
565
  const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
540
566
  return new Promise((resolve) => {
@@ -544,95 +570,55 @@ function ask(question) {
544
570
  });
545
571
  });
546
572
  }
547
- var INSTALL_HINTS = {
548
- claude: "Install Claude Code: https://claude.ai/download \u2192 then sign in with: claude login",
549
- gemini: "Install Gemini CLI: npm install -g @google/gemini-cli \u2192 then: gemini auth",
550
- codex: "Install Codex CLI: npm install -g @openai/codex \u2192 then: codex auth",
551
- copilot: "Install Copilot: gh extension install github/gh-copilot \u2192 then: gh auth login"
552
- };
553
573
  async function cmdSetup() {
554
574
  console.log();
555
575
  console.log(" bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools");
556
576
  console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
557
577
  console.log();
558
- console.log(" Checking installed backends\u2026");
559
- console.log();
560
578
  const available = BACKENDS.filter((b) => b.available());
561
579
  const missing = BACKENDS.filter((b) => !b.available());
580
+ console.log(" Backends detected:\n");
562
581
  for (const b of BACKENDS) {
563
582
  const ok = b.available();
564
- const icon = ok ? "\u2713" : "\u2717";
565
- const note = ok ? b.models.join(", ") : "not found";
566
- console.log(` ${icon} ${b.name.padEnd(10)} ${note}`);
583
+ console.log(` ${ok ? "\u2713" : "\u2717"} ${b.name}`);
584
+ if (!ok) console.log(` \u2192 ${INSTALL_HINTS[b.name]}`);
567
585
  }
568
586
  console.log();
569
587
  if (available.length === 0) {
570
- console.log(" No CLI backends found. Install at least one:\n");
571
- for (const b of missing) console.log(` ${INSTALL_HINTS[b.name]}`);
572
- console.log();
573
- console.log(" Then re-run: bridgerapi");
588
+ console.log(" No backends found. Install at least one CLI above, then re-run: bridgerapi");
574
589
  process.exit(1);
575
590
  }
576
- if (missing.length) {
577
- console.log(" Optional \u2014 to enable missing backends:");
578
- for (const b of missing) console.log(` ${INSTALL_HINTS[b.name]}`);
579
- console.log();
580
- }
581
591
  const portAnswer = await ask(` Port [${PORT}]: `);
582
592
  const port2 = portAnswer ? parseInt(portAnswer) || PORT : PORT;
583
593
  console.log();
584
594
  console.log(" How do you want to run bridgerapi?");
585
- console.log(" 1 Start now in the foreground (stops when you close terminal)");
586
- console.log(" 2 Install as a background service (auto-starts on login)");
595
+ console.log(" 1 Foreground (stops when terminal closes)");
596
+ console.log(" 2 Background service (auto-starts on login)");
587
597
  console.log();
588
- const modeAnswer = await ask(" Choose [1/2]: ");
589
- const mode = modeAnswer === "2" ? "install" : "start";
598
+ const choice = await ask(" Choose [1/2]: ");
590
599
  console.log();
591
- if (mode === "install") {
600
+ if (choice === "2") {
592
601
  cmdInstall(port2);
593
602
  } else {
594
603
  cmdStart(port2);
595
604
  }
596
605
  }
597
- function parseArgs() {
598
- const args = process.argv.slice(2);
599
- const cmd2 = args[0] ?? "";
600
- let port2 = PORT;
601
- let model2;
602
- for (let i = 1; i < args.length; i++) {
603
- if ((args[i] === "--port" || args[i] === "-p") && args[i + 1]) {
604
- port2 = parseInt(args[++i]);
605
- } else if ((args[i] === "--model" || args[i] === "-m") && args[i + 1]) {
606
- model2 = args[++i];
607
- }
608
- }
609
- return { cmd: cmd2, port: port2, model: model2 };
610
- }
611
606
  function cmdStart(port2) {
612
607
  (0, import_fs3.mkdirSync)(LOG_DIR, { recursive: true });
613
608
  const available = BACKENDS.filter((b) => b.available());
614
- const unavailable = BACKENDS.filter((b) => !b.available());
615
- console.log(` bridgerapi \u2192 http://127.0.0.1:${port2}`);
616
- console.log(` backends : ${available.map((b) => b.name).join(", ") || "none!"}`);
617
- if (unavailable.length) {
618
- console.log(` missing : ${unavailable.map((b) => b.name).join(", ")}`);
619
- }
620
- console.log(` logs : ${LOG_DIR}/server.log`);
621
- console.log();
622
609
  if (available.length === 0) {
623
- console.error(" Error: no CLI backends found. Run: bridgerapi to see setup instructions.");
610
+ console.error(" No CLI backends found. Run: bridgerapi to see setup instructions.");
624
611
  process.exit(1);
625
612
  }
626
613
  const server = createBridgeServer(port2);
627
614
  server.listen(port2, "127.0.0.1", () => {
628
- console.log(` GET /v1/models`);
629
- console.log(` POST /v1/chat/completions (streaming + blocking)`);
630
- console.log(` GET /health`);
615
+ console.log(` bridgerapi is running`);
616
+ console.log();
617
+ console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
618
+ console.log(` API Key : local`);
631
619
  console.log();
632
- console.log(" OpenAI-compatible config:");
633
- console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
634
- console.log(` API Key : local`);
635
- console.log(` Model : ${available[0].models[0]}`);
620
+ console.log(` Backends : ${available.map((b) => b.name).join(", ")}`);
621
+ console.log(` Logs : ${LOG_DIR}/server.log`);
636
622
  console.log();
637
623
  console.log(" Ctrl+C to stop.");
638
624
  });
@@ -659,12 +645,10 @@ function cmdInstall(port2) {
659
645
  if (res.statusCode === 200) {
660
646
  clearInterval(poll);
661
647
  console.log();
662
- console.log(` \u2713 bridgerapi is running on http://127.0.0.1:${port2}`);
648
+ console.log(` bridgerapi is running`);
663
649
  console.log();
664
- console.log(" OpenAI-compatible config:");
665
- console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
666
- console.log(` API Key : local`);
667
- console.log(` Model : ${allModels()[0] ?? "claude-sonnet-4-6"}`);
650
+ console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
651
+ console.log(` API Key : local`);
668
652
  console.log();
669
653
  console.log(` Logs : tail -f ${LOG_DIR}/server.log`);
670
654
  console.log(` Stop : bridgerapi uninstall`);
@@ -697,29 +681,14 @@ function cmdUninstall() {
697
681
  function cmdStatus(port2) {
698
682
  const { running, pid } = serviceStatus();
699
683
  if (running) {
700
- console.log(` \u2713 bridgerapi is running${pid ? ` (pid ${pid})` : ""} on port ${port2}`);
684
+ console.log(` bridgerapi is running${pid ? ` (pid ${pid})` : ""}`);
685
+ console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
686
+ console.log(` API Key : local`);
701
687
  } else {
702
688
  console.log(" bridgerapi is not running.");
703
- console.log(" Run: bridgerapi \u2192 interactive setup");
704
- console.log(" Run: bridgerapi start \u2192 start in foreground");
705
- console.log(" Run: bridgerapi install \u2192 install background service");
706
- }
707
- }
708
- function cmdBackends() {
709
- console.log("\n CLI backends:\n");
710
- for (const b of BACKENDS) {
711
- const ok = b.available();
712
- const icon = ok ? "\u2713" : "\u2717";
713
- const models = ok ? b.models.join(", ") : "(not installed)";
714
- console.log(` ${icon} ${b.name.padEnd(10)} ${models}`);
715
- if (!ok) console.log(` ${INSTALL_HINTS[b.name]}`);
716
- }
717
- console.log();
718
- const available = BACKENDS.filter((b) => b.available());
719
- if (available.length) {
720
- console.log(` All available models:
721
- ${allModels().join(", ")}
722
- `);
689
+ console.log(" Run: bridgerapi \u2192 setup wizard");
690
+ console.log(" Run: bridgerapi start \u2192 start in foreground");
691
+ console.log(" Run: bridgerapi install \u2192 install background service");
723
692
  }
724
693
  }
725
694
  async function cmdChat(model2) {
@@ -728,15 +697,19 @@ async function cmdChat(model2) {
728
697
  console.error(" No backends found. Run: bridgerapi to see setup instructions.");
729
698
  process.exit(1);
730
699
  }
731
- const resolvedModel = model2 ?? available[0].models[0];
700
+ const resolvedModel = model2 ?? `${available[0].name}-default`;
732
701
  const backend = pickBackend(resolvedModel);
733
702
  console.log();
734
- console.log(` bridgerapi chat \u2014 ${backend.name} \u2014 ${resolvedModel}`);
735
- console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
736
- console.log(" Type your message and press Enter. Ctrl+C to exit.");
703
+ console.log(` bridgerapi chat \u2014 ${backend.name}`);
704
+ console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
705
+ console.log(" Type a message and press Enter. Ctrl+C to exit.");
737
706
  console.log();
738
707
  const history = [];
739
708
  const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
709
+ rl.on("close", () => {
710
+ console.log("\n Goodbye.");
711
+ process.exit(0);
712
+ });
740
713
  const prompt = () => {
741
714
  rl.question("You: ", async (input) => {
742
715
  const text = input.trim();
@@ -763,10 +736,6 @@ async function cmdChat(model2) {
763
736
  prompt();
764
737
  });
765
738
  };
766
- rl.on("close", () => {
767
- console.log("\n Goodbye.");
768
- process.exit(0);
769
- });
770
739
  prompt();
771
740
  }
772
741
  function showHelp() {
@@ -774,21 +743,25 @@ function showHelp() {
774
743
  bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools
775
744
 
776
745
  Usage:
777
- bridgerapi Interactive setup wizard
778
- bridgerapi chat [--model <name>] Interactive chat session in terminal
779
- bridgerapi start [--port n] Start API server in the foreground
780
- bridgerapi install [--port n] Install as a background service
781
- bridgerapi uninstall Remove background service
782
- bridgerapi status Show service status
783
- bridgerapi backends List detected backends
784
-
785
- Supported backends (auto-detected):
786
- claude-* \u2192 Claude Code CLI (claude login)
787
- gemini-* \u2192 Gemini CLI (gemini auth)
788
- gpt-*, o3 \u2192 Codex CLI (codex auth)
789
- copilot \u2192 GitHub Copilot (gh auth login)
746
+ bridgerapi Interactive setup wizard
747
+ bridgerapi chat [--model m] Chat directly in the terminal
748
+ bridgerapi start [--port n] Start API server in the foreground
749
+ bridgerapi install [--port n] Install as a background service
750
+ bridgerapi uninstall Remove background service
751
+ bridgerapi status Show service status
790
752
  `.trim());
791
753
  }
754
+ function parseArgs() {
755
+ const args = process.argv.slice(2);
756
+ const cmd2 = args[0] ?? "";
757
+ let port2 = PORT;
758
+ let model2;
759
+ for (let i = 1; i < args.length; i++) {
760
+ if ((args[i] === "--port" || args[i] === "-p") && args[i + 1]) port2 = parseInt(args[++i]);
761
+ if ((args[i] === "--model" || args[i] === "-m") && args[i + 1]) model2 = args[++i];
762
+ }
763
+ return { cmd: cmd2, port: port2, model: model2 };
764
+ }
792
765
  var { cmd, port, model } = parseArgs();
793
766
  switch (cmd) {
794
767
  case "":
@@ -810,9 +783,6 @@ switch (cmd) {
810
783
  case "status":
811
784
  cmdStatus(port);
812
785
  break;
813
- case "backends":
814
- cmdBackends();
815
- break;
816
786
  case "help":
817
787
  case "--help":
818
788
  case "-h":
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bridgerapi",
3
- "version": "1.2.0",
3
+ "version": "1.5.0",
4
4
  "description": "Turn any AI CLI (Claude Code, Gemini, Codex, GitHub Copilot) into an OpenAI-compatible API — no API keys needed",
5
5
  "keywords": [
6
6
  "claude",