bridgerapi 1.0.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +207 -84
  2. package/package.json +2 -2
package/dist/cli.js CHANGED
@@ -91,11 +91,11 @@ var ClaudeBackend = class {
91
91
  available() {
92
92
  return (0, import_fs.existsSync)(this.bin) || Boolean(which("claude"));
93
93
  }
94
- async runBlocking(prompt, model) {
94
+ async runBlocking(prompt, model2) {
95
95
  const bin = which("claude") || this.bin;
96
96
  let out;
97
97
  try {
98
- out = (0, import_child_process.execFileSync)(bin, ["-p", "--output-format", "json", "--model", model], {
98
+ out = (0, import_child_process.execFileSync)(bin, ["-p", "--output-format", "json", "--model", model2], {
99
99
  input: prompt,
100
100
  encoding: "utf8",
101
101
  timeout: 3e5
@@ -106,9 +106,9 @@ var ClaudeBackend = class {
106
106
  const data = JSON.parse(out.trim() || "{}");
107
107
  return [data.result ?? "", data.usage ?? null];
108
108
  }
109
- async *stream(prompt, model) {
109
+ async *stream(prompt, model2) {
110
110
  const bin = which("claude") || this.bin;
111
- yield* spawnStream(bin, ["-p", "--output-format", "text", "--model", model], prompt);
111
+ yield* spawnStream(bin, ["-p", "--output-format", "text", "--model", model2], prompt);
112
112
  }
113
113
  };
114
114
  var GeminiBackend = class {
@@ -123,13 +123,13 @@ var GeminiBackend = class {
123
123
  available() {
124
124
  return Boolean(which("gemini")) || (0, import_fs.existsSync)(this.bin);
125
125
  }
126
- async runBlocking(prompt, model) {
126
+ async runBlocking(prompt, model2) {
127
127
  const bin = which("gemini") || this.bin;
128
128
  let out;
129
129
  try {
130
130
  out = (0, import_child_process.execFileSync)(
131
131
  bin,
132
- ["--output-format", "json", "--model", model, "--approval-mode", "yolo"],
132
+ ["--output-format", "json", "--model", model2, "--approval-mode", "yolo"],
133
133
  { input: prompt, encoding: "utf8", timeout: 3e5, env: process.env }
134
134
  );
135
135
  } catch (e) {
@@ -149,11 +149,11 @@ var GeminiBackend = class {
149
149
  return [raw, null];
150
150
  }
151
151
  }
152
- async *stream(prompt, model) {
152
+ async *stream(prompt, model2) {
153
153
  const bin = which("gemini") || this.bin;
154
154
  yield* spawnStream(
155
155
  bin,
156
- ["--output-format", "text", "--model", model, "--approval-mode", "yolo"],
156
+ ["--output-format", "text", "--model", model2, "--approval-mode", "yolo"],
157
157
  prompt
158
158
  );
159
159
  }
@@ -170,10 +170,10 @@ var CodexBackend = class {
170
170
  available() {
171
171
  return Boolean(which("codex"));
172
172
  }
173
- async runBlocking(prompt, model) {
173
+ async runBlocking(prompt, model2) {
174
174
  let out;
175
175
  try {
176
- out = (0, import_child_process.execFileSync)(this.bin, ["-q", "--model", model, prompt], {
176
+ out = (0, import_child_process.execFileSync)(this.bin, ["-q", "--model", model2, prompt], {
177
177
  encoding: "utf8",
178
178
  timeout: 3e5
179
179
  });
@@ -182,8 +182,8 @@ var CodexBackend = class {
182
182
  }
183
183
  return [out.trim(), null];
184
184
  }
185
- async *stream(prompt, model) {
186
- yield* spawnStream(this.bin, ["-q", "--model", model, prompt]);
185
+ async *stream(prompt, model2) {
186
+ yield* spawnStream(this.bin, ["-q", "--model", model2, prompt]);
187
187
  }
188
188
  };
189
189
  var CopilotBackend = class {
@@ -204,7 +204,7 @@ var CopilotBackend = class {
204
204
  return false;
205
205
  }
206
206
  }
207
- async runBlocking(prompt, model) {
207
+ async runBlocking(prompt, model2) {
208
208
  let out;
209
209
  try {
210
210
  out = (0, import_child_process.execFileSync)(this.bin, ["copilot", "suggest", "-t", "general", prompt], {
@@ -216,7 +216,7 @@ var CopilotBackend = class {
216
216
  }
217
217
  return [out.trim(), null];
218
218
  }
219
- async *stream(prompt, model) {
219
+ async *stream(prompt, model2) {
220
220
  yield* spawnStream(this.bin, ["copilot", "suggest", "-t", "general", prompt]);
221
221
  }
222
222
  };
@@ -226,8 +226,8 @@ var BACKENDS = [
226
226
  new CodexBackend(),
227
227
  new CopilotBackend()
228
228
  ];
229
- function pickBackend(model) {
230
- const m = model.toLowerCase();
229
+ function pickBackend(model2) {
230
+ const m = model2.toLowerCase();
231
231
  for (const b of BACKENDS) {
232
232
  if (b.prefixes.some((p) => m.startsWith(p))) {
233
233
  if (b.available()) return b;
@@ -245,23 +245,23 @@ function sse(data) {
245
245
 
246
246
  `;
247
247
  }
248
- function chunk(id, ts, model, delta, finish) {
248
+ function chunk(id, ts, model2, delta, finish) {
249
249
  return sse({
250
250
  id,
251
251
  object: "chat.completion.chunk",
252
252
  created: ts,
253
- model,
253
+ model: model2,
254
254
  choices: [{ index: 0, delta, finish_reason: finish ?? null }]
255
255
  });
256
256
  }
257
- function completion(id, ts, model, text, usage) {
257
+ function completion(id, ts, model2, text, usage) {
258
258
  const pt = usage ? (usage.input_tokens ?? 0) + (usage.cache_creation_input_tokens ?? 0) + (usage.cache_read_input_tokens ?? 0) + (usage.promptTokenCount ?? 0) : 0;
259
259
  const ct = usage ? (usage.output_tokens ?? 0) + (usage.candidatesTokenCount ?? 0) : 0;
260
260
  return {
261
261
  id,
262
262
  object: "chat.completion",
263
263
  created: ts,
264
- model,
264
+ model: model2,
265
265
  choices: [{ index: 0, message: { role: "assistant", content: text }, finish_reason: "stop" }],
266
266
  usage: { prompt_tokens: pt, completion_tokens: ct, total_tokens: pt + ct }
267
267
  };
@@ -313,34 +313,34 @@ async function handleChat(req, res) {
313
313
  sendJson(res, 400, { error: { message: "messages required", type: "invalid_request_error" } });
314
314
  return;
315
315
  }
316
- const model = body.model ?? "claude-sonnet-4-6";
316
+ const model2 = body.model ?? "claude-sonnet-4-6";
317
317
  const streaming = Boolean(body.stream);
318
318
  const prompt = messagesToPrompt(messages);
319
- const backend = pickBackend(model);
319
+ const backend = pickBackend(model2);
320
320
  const id = `chatcmpl-${(0, import_crypto.randomUUID)().replace(/-/g, "").slice(0, 20)}`;
321
321
  const ts = Math.floor(Date.now() / 1e3);
322
- console.log(` ${backend.name} model=${model} stream=${streaming} turns=${messages.length}`);
322
+ console.log(` ${backend.name} model=${model2} stream=${streaming} turns=${messages.length}`);
323
323
  if (streaming) {
324
324
  cors(res, 200);
325
325
  res.setHeader("Content-Type", "text/event-stream");
326
326
  res.setHeader("Cache-Control", "no-cache");
327
327
  res.setHeader("X-Accel-Buffering", "no");
328
328
  res.flushHeaders();
329
- res.write(chunk(id, ts, model, { role: "assistant" }));
329
+ res.write(chunk(id, ts, model2, { role: "assistant" }));
330
330
  try {
331
- for await (const raw of backend.stream(prompt, model)) {
332
- res.write(chunk(id, ts, model, { content: raw.toString("utf8") }));
331
+ for await (const raw of backend.stream(prompt, model2)) {
332
+ res.write(chunk(id, ts, model2, { content: raw.toString("utf8") }));
333
333
  }
334
334
  } catch (err) {
335
335
  console.error(` stream error: ${err.message}`);
336
336
  }
337
- res.write(chunk(id, ts, model, {}, "stop"));
337
+ res.write(chunk(id, ts, model2, {}, "stop"));
338
338
  res.write("data: [DONE]\n\n");
339
339
  res.end();
340
340
  } else {
341
341
  try {
342
- const [text, usage] = await backend.runBlocking(prompt, model);
343
- sendJson(res, 200, completion(id, ts, model, text, usage));
342
+ const [text, usage] = await backend.runBlocking(prompt, model2);
343
+ sendJson(res, 200, completion(id, ts, model2, text, usage));
344
344
  } catch (err) {
345
345
  console.error(` error: ${err.message}`);
346
346
  sendJson(res, 500, { error: { message: err.message, type: "server_error" } });
@@ -532,74 +532,115 @@ function serviceStatus() {
532
532
  var import_fs3 = require("fs");
533
533
  var import_os3 = require("os");
534
534
  var import_path2 = require("path");
535
+ var import_readline = require("readline");
535
536
  var PORT = parseInt(process.env.BRIDGERAPI_PORT ?? "8082");
536
537
  var LOG_DIR = (0, import_path2.join)((0, import_os3.homedir)(), ".bridgerapi");
537
- var USAGE = `
538
- bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools
539
-
540
- Commands:
541
- start [--port <n>] Start server in the foreground (default port: 8082)
542
- install [--port <n>] Install as a background service (auto-starts on login)
543
- uninstall Remove background service
544
- status Show whether the service is running
545
- backends List detected CLI backends and their status
546
-
547
- Examples:
548
- bridgerapi start
549
- bridgerapi start --port 9000
550
- bridgerapi install
551
- bridgerapi backends
552
-
553
- Supported backends (auto-detected):
554
- claude-* \u2192 Claude Code CLI (oauth via Claude Code login)
555
- gemini-* \u2192 Gemini CLI (run: gemini auth)
556
- gpt-*, o3 \u2192 Codex CLI (run: codex auth)
557
- copilot \u2192 GitHub Copilot (run: gh auth login)
558
-
559
- Logs: ${LOG_DIR}/server.log
560
- `.trim();
538
+ function ask(question) {
539
+ const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
540
+ return new Promise((resolve) => {
541
+ rl.question(question, (answer) => {
542
+ rl.close();
543
+ resolve(answer.trim());
544
+ });
545
+ });
546
+ }
547
+ var INSTALL_HINTS = {
548
+ claude: "Install Claude Code: https://claude.ai/download \u2192 then sign in with: claude login",
549
+ gemini: "Install Gemini CLI: npm install -g @google/gemini-cli \u2192 then: gemini auth",
550
+ codex: "Install Codex CLI: npm install -g @openai/codex \u2192 then: codex auth",
551
+ copilot: "Install Copilot: gh extension install github/gh-copilot \u2192 then: gh auth login"
552
+ };
553
+ async function cmdSetup() {
554
+ console.log();
555
+ console.log(" bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools");
556
+ console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
557
+ console.log();
558
+ console.log(" Checking installed backends\u2026");
559
+ console.log();
560
+ const available = BACKENDS.filter((b) => b.available());
561
+ const missing = BACKENDS.filter((b) => !b.available());
562
+ for (const b of BACKENDS) {
563
+ const ok = b.available();
564
+ const icon = ok ? "\u2713" : "\u2717";
565
+ const note = ok ? b.models.join(", ") : "not found";
566
+ console.log(` ${icon} ${b.name.padEnd(10)} ${note}`);
567
+ }
568
+ console.log();
569
+ if (available.length === 0) {
570
+ console.log(" No CLI backends found. Install at least one:\n");
571
+ for (const b of missing) console.log(` ${INSTALL_HINTS[b.name]}`);
572
+ console.log();
573
+ console.log(" Then re-run: bridgerapi");
574
+ process.exit(1);
575
+ }
576
+ if (missing.length) {
577
+ console.log(" Optional \u2014 to enable missing backends:");
578
+ for (const b of missing) console.log(` ${INSTALL_HINTS[b.name]}`);
579
+ console.log();
580
+ }
581
+ const portAnswer = await ask(` Port [${PORT}]: `);
582
+ const port2 = portAnswer ? parseInt(portAnswer) || PORT : PORT;
583
+ console.log();
584
+ console.log(" How do you want to run bridgerapi?");
585
+ console.log(" 1 Start now in the foreground (stops when you close terminal)");
586
+ console.log(" 2 Install as a background service (auto-starts on login)");
587
+ console.log();
588
+ const modeAnswer = await ask(" Choose [1/2]: ");
589
+ const mode = modeAnswer === "2" ? "install" : "start";
590
+ console.log();
591
+ if (mode === "install") {
592
+ cmdInstall(port2);
593
+ } else {
594
+ cmdStart(port2);
595
+ }
596
+ }
561
597
  function parseArgs() {
562
598
  const args = process.argv.slice(2);
563
- const cmd2 = args[0] ?? "help";
599
+ const cmd2 = args[0] ?? "";
564
600
  let port2 = PORT;
601
+ let model2;
565
602
  for (let i = 1; i < args.length; i++) {
566
603
  if ((args[i] === "--port" || args[i] === "-p") && args[i + 1]) {
567
604
  port2 = parseInt(args[++i]);
605
+ } else if ((args[i] === "--model" || args[i] === "-m") && args[i + 1]) {
606
+ model2 = args[++i];
568
607
  }
569
608
  }
570
- return { cmd: cmd2, port: port2 };
609
+ return { cmd: cmd2, port: port2, model: model2 };
571
610
  }
572
611
  function cmdStart(port2) {
573
612
  (0, import_fs3.mkdirSync)(LOG_DIR, { recursive: true });
574
613
  const available = BACKENDS.filter((b) => b.available());
575
614
  const unavailable = BACKENDS.filter((b) => !b.available());
576
- console.log(`bridgerapi http://127.0.0.1:${port2}`);
577
- console.log(` backends ready : ${available.map((b) => b.name).join(", ") || "none!"}`);
615
+ console.log(` bridgerapi \u2192 http://127.0.0.1:${port2}`);
616
+ console.log(` backends : ${available.map((b) => b.name).join(", ") || "none!"}`);
578
617
  if (unavailable.length) {
579
- console.log(` backends missing : ${unavailable.map((b) => b.name).join(", ")}`);
618
+ console.log(` missing : ${unavailable.map((b) => b.name).join(", ")}`);
580
619
  }
581
- console.log(` logs : ${LOG_DIR}/server.log`);
620
+ console.log(` logs : ${LOG_DIR}/server.log`);
582
621
  console.log();
583
622
  if (available.length === 0) {
584
- console.error("Error: no CLI backends found. Install claude / gemini / codex first.");
623
+ console.error(" Error: no CLI backends found. Run: bridgerapi to see setup instructions.");
585
624
  process.exit(1);
586
625
  }
587
626
  const server = createBridgeServer(port2);
588
627
  server.listen(port2, "127.0.0.1", () => {
589
628
  console.log(` GET /v1/models`);
590
- console.log(` POST /v1/chat/completions (stream + blocking)`);
629
+ console.log(` POST /v1/chat/completions (streaming + blocking)`);
591
630
  console.log(` GET /health`);
592
631
  console.log();
593
- console.log(` Goose provider config:`);
632
+ console.log(" OpenAI-compatible config:");
594
633
  console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
595
634
  console.log(` API Key : local`);
596
- console.log(` Model : claude-sonnet-4-6`);
635
+ console.log(` Model : ${available[0].models[0]}`);
636
+ console.log();
637
+ console.log(" Ctrl+C to stop.");
597
638
  });
598
639
  server.on("error", (err) => {
599
640
  if (err.code === "EADDRINUSE") {
600
- console.error(`Port ${port2} is already in use. Try: bridgerapi start --port 9000`);
641
+ console.error(` Port ${port2} is already in use. Try: bridgerapi start --port 9000`);
601
642
  } else {
602
- console.error("Server error:", err.message);
643
+ console.error(" Server error:", err.message);
603
644
  }
604
645
  process.exit(1);
605
646
  });
@@ -617,14 +658,13 @@ function cmdInstall(port2) {
617
658
  http.get(`http://127.0.0.1:${port2}/health`, (res) => {
618
659
  if (res.statusCode === 200) {
619
660
  clearInterval(poll);
620
- console.log(`
621
- \u2713 bridgerapi running on http://127.0.0.1:${port2}`);
622
661
  console.log();
623
- console.log(" Goose config:");
624
- console.log(` Provider : OpenAI Compatible`);
662
+ console.log(` \u2713 bridgerapi is running on http://127.0.0.1:${port2}`);
663
+ console.log();
664
+ console.log(" OpenAI-compatible config:");
625
665
  console.log(` Base URL : http://127.0.0.1:${port2}/v1`);
626
666
  console.log(` API Key : local`);
627
- console.log(` Model : claude-sonnet-4-6`);
667
+ console.log(` Model : ${allModels()[0] ?? "claude-sonnet-4-6"}`);
628
668
  console.log();
629
669
  console.log(` Logs : tail -f ${LOG_DIR}/server.log`);
630
670
  console.log(` Stop : bridgerapi uninstall`);
@@ -634,15 +674,15 @@ function cmdInstall(port2) {
634
674
  });
635
675
  } catch {
636
676
  }
637
- if (attempts >= 10) {
677
+ if (attempts >= 15) {
638
678
  clearInterval(poll);
639
679
  console.log(`
640
- Server did not start. Check: tail -f ${LOG_DIR}/server.log`);
680
+ Server did not respond. Check: tail -f ${LOG_DIR}/server.log`);
641
681
  process.exit(1);
642
682
  }
643
683
  }, 600);
644
684
  } catch (err) {
645
- console.error("Install failed:", err.message);
685
+ console.error(" Install failed:", err.message);
646
686
  process.exit(1);
647
687
  }
648
688
  }
@@ -650,36 +690,114 @@ function cmdUninstall() {
650
690
  try {
651
691
  uninstallService();
652
692
  } catch (err) {
653
- console.error("Uninstall failed:", err.message);
693
+ console.error(" Uninstall failed:", err.message);
654
694
  process.exit(1);
655
695
  }
656
696
  }
657
697
  function cmdStatus(port2) {
658
698
  const { running, pid } = serviceStatus();
659
699
  if (running) {
660
- console.log(`\u2713 bridgerapi is running${pid ? ` (pid ${pid})` : ""} on port ${port2}`);
700
+ console.log(` \u2713 bridgerapi is running${pid ? ` (pid ${pid})` : ""} on port ${port2}`);
661
701
  } else {
662
- console.log(" bridgerapi is not running");
663
- console.log(" Start with: bridgerapi start or bridgerapi install");
702
+ console.log(" bridgerapi is not running.");
703
+ console.log(" Run: bridgerapi \u2192 interactive setup");
704
+ console.log(" Run: bridgerapi start \u2192 start in foreground");
705
+ console.log(" Run: bridgerapi install \u2192 install background service");
664
706
  }
665
707
  }
666
708
  function cmdBackends() {
667
- console.log("CLI backends:\n");
709
+ console.log("\n CLI backends:\n");
668
710
  for (const b of BACKENDS) {
669
711
  const ok = b.available();
670
712
  const icon = ok ? "\u2713" : "\u2717";
671
713
  const models = ok ? b.models.join(", ") : "(not installed)";
672
- console.log(` ${icon} ${b.name.padEnd(10)} ${models}`);
714
+ console.log(` ${icon} ${b.name.padEnd(10)} ${models}`);
715
+ if (!ok) console.log(` ${INSTALL_HINTS[b.name]}`);
673
716
  }
674
717
  console.log();
675
718
  const available = BACKENDS.filter((b) => b.available());
676
719
  if (available.length) {
677
- console.log(`All available models:
678
- ${allModels().join(", ")}`);
720
+ console.log(` All available models:
721
+ ${allModels().join(", ")}
722
+ `);
723
+ }
724
+ }
725
+ async function cmdChat(model2) {
726
+ const available = BACKENDS.filter((b) => b.available());
727
+ if (available.length === 0) {
728
+ console.error(" No backends found. Run: bridgerapi to see setup instructions.");
729
+ process.exit(1);
679
730
  }
731
+ const resolvedModel = model2 ?? available[0].models[0];
732
+ const backend = pickBackend(resolvedModel);
733
+ console.log();
734
+ console.log(` bridgerapi chat \u2014 ${backend.name} \u2014 ${resolvedModel}`);
735
+ console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
736
+ console.log(" Type your message and press Enter. Ctrl+C to exit.");
737
+ console.log();
738
+ const history = [];
739
+ const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
740
+ const prompt = () => {
741
+ rl.question("You: ", async (input) => {
742
+ const text = input.trim();
743
+ if (!text) {
744
+ prompt();
745
+ return;
746
+ }
747
+ history.push({ role: "user", content: text });
748
+ process.stdout.write("\n");
749
+ let reply = "";
750
+ try {
751
+ process.stdout.write(`${backend.name}: `);
752
+ for await (const chunk2 of backend.stream(messagesToPrompt(history), resolvedModel)) {
753
+ const piece = chunk2.toString("utf8");
754
+ process.stdout.write(piece);
755
+ reply += piece;
756
+ }
757
+ } catch (err) {
758
+ process.stdout.write(`
759
+ Error: ${err.message}`);
760
+ }
761
+ process.stdout.write("\n\n");
762
+ if (reply) history.push({ role: "assistant", content: reply });
763
+ prompt();
764
+ });
765
+ };
766
+ rl.on("close", () => {
767
+ console.log("\n Goodbye.");
768
+ process.exit(0);
769
+ });
770
+ prompt();
771
+ }
772
+ function showHelp() {
773
+ console.log(`
774
+ bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools
775
+
776
+ Usage:
777
+ bridgerapi Interactive setup wizard
778
+ bridgerapi chat [--model <name>] Interactive chat session in terminal
779
+ bridgerapi start [--port n] Start API server in the foreground
780
+ bridgerapi install [--port n] Install as a background service
781
+ bridgerapi uninstall Remove background service
782
+ bridgerapi status Show service status
783
+ bridgerapi backends List detected backends
784
+
785
+ Supported backends (auto-detected):
786
+ claude-* \u2192 Claude Code CLI (claude login)
787
+ gemini-* \u2192 Gemini CLI (gemini auth)
788
+ gpt-*, o3 \u2192 Codex CLI (codex auth)
789
+ copilot \u2192 GitHub Copilot (gh auth login)
790
+ `.trim());
680
791
  }
681
- var { cmd, port } = parseArgs();
792
+ var { cmd, port, model } = parseArgs();
682
793
  switch (cmd) {
794
+ case "":
795
+ case "setup":
796
+ cmdSetup();
797
+ break;
798
+ case "chat":
799
+ cmdChat(model);
800
+ break;
683
801
  case "start":
684
802
  cmdStart(port);
685
803
  break;
@@ -695,7 +813,12 @@ switch (cmd) {
695
813
  case "backends":
696
814
  cmdBackends();
697
815
  break;
816
+ case "help":
817
+ case "--help":
818
+ case "-h":
819
+ showHelp();
820
+ break;
698
821
  default:
699
- console.log(USAGE);
700
- if (cmd !== "help") process.exit(1);
822
+ showHelp();
823
+ process.exit(1);
701
824
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "bridgerapi",
3
- "version": "1.0.0",
3
+ "version": "1.2.0",
4
4
  "description": "Turn any AI CLI (Claude Code, Gemini, Codex, GitHub Copilot) into an OpenAI-compatible API — no API keys needed",
5
5
  "keywords": [
6
6
  "claude",
@@ -21,7 +21,7 @@
21
21
  "author": "teodorwaltervido",
22
22
  "main": "dist/cli.js",
23
23
  "bin": {
24
- "bridgerapi": "./dist/cli.js"
24
+ "bridgerapi": "dist/cli.js"
25
25
  },
26
26
  "files": [
27
27
  "dist"