bridgerapi 1.1.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.js +179 -44
- package/package.json +1 -1
package/dist/cli.js
CHANGED
|
@@ -59,6 +59,7 @@ function messagesToPrompt(messages) {
|
|
|
59
59
|
var import_child_process = require("child_process");
|
|
60
60
|
var import_fs = require("fs");
|
|
61
61
|
var import_os = require("os");
|
|
62
|
+
var import_https = require("https");
|
|
62
63
|
var HOME = (0, import_os.homedir)();
|
|
63
64
|
function which(cmd2) {
|
|
64
65
|
try {
|
|
@@ -67,6 +68,27 @@ function which(cmd2) {
|
|
|
67
68
|
return "";
|
|
68
69
|
}
|
|
69
70
|
}
|
|
71
|
+
function httpsGetJson(url, headers) {
|
|
72
|
+
return new Promise((resolve, reject) => {
|
|
73
|
+
const req = (0, import_https.request)(url, { headers }, (res) => {
|
|
74
|
+
const chunks = [];
|
|
75
|
+
res.on("data", (c) => chunks.push(c));
|
|
76
|
+
res.on("end", () => {
|
|
77
|
+
try {
|
|
78
|
+
resolve(JSON.parse(Buffer.concat(chunks).toString()));
|
|
79
|
+
} catch (e) {
|
|
80
|
+
reject(e);
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
});
|
|
84
|
+
req.on("error", reject);
|
|
85
|
+
req.setTimeout(6e3, () => {
|
|
86
|
+
req.destroy();
|
|
87
|
+
reject(new Error("timeout"));
|
|
88
|
+
});
|
|
89
|
+
req.end();
|
|
90
|
+
});
|
|
91
|
+
}
|
|
70
92
|
async function* spawnStream(cmd2, args, stdin, env) {
|
|
71
93
|
const proc = (0, import_child_process.spawn)(cmd2, args, {
|
|
72
94
|
env: env ?? process.env,
|
|
@@ -79,11 +101,12 @@ async function* spawnStream(cmd2, args, stdin, env) {
|
|
|
79
101
|
yield chunk2;
|
|
80
102
|
}
|
|
81
103
|
}
|
|
104
|
+
var CLAUDE_FALLBACK = ["claude-opus-4-6", "claude-sonnet-4-6", "claude-haiku-4-5"];
|
|
82
105
|
var ClaudeBackend = class {
|
|
83
106
|
constructor() {
|
|
84
107
|
this.name = "claude";
|
|
85
|
-
this.models = ["claude-opus-4-6", "claude-sonnet-4-6", "claude-haiku-4-5"];
|
|
86
108
|
this.prefixes = ["claude"];
|
|
109
|
+
this.models = [...CLAUDE_FALLBACK];
|
|
87
110
|
}
|
|
88
111
|
get bin() {
|
|
89
112
|
return process.env.CLAUDE_BIN ?? `${HOME}/.local/bin/claude`;
|
|
@@ -91,11 +114,25 @@ var ClaudeBackend = class {
|
|
|
91
114
|
available() {
|
|
92
115
|
return (0, import_fs.existsSync)(this.bin) || Boolean(which("claude"));
|
|
93
116
|
}
|
|
94
|
-
async
|
|
117
|
+
async fetchLiveModels() {
|
|
118
|
+
const key = process.env.ANTHROPIC_API_KEY;
|
|
119
|
+
if (!key) return [...CLAUDE_FALLBACK];
|
|
120
|
+
try {
|
|
121
|
+
const data = await httpsGetJson("https://api.anthropic.com/v1/models", {
|
|
122
|
+
"x-api-key": key,
|
|
123
|
+
"anthropic-version": "2023-06-01"
|
|
124
|
+
});
|
|
125
|
+
const ids = (data.data ?? []).map((m) => String(m.id)).filter((id) => id.startsWith("claude-"));
|
|
126
|
+
return ids.length ? ids : [...CLAUDE_FALLBACK];
|
|
127
|
+
} catch {
|
|
128
|
+
return [...CLAUDE_FALLBACK];
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
async runBlocking(prompt, model2) {
|
|
95
132
|
const bin = which("claude") || this.bin;
|
|
96
133
|
let out;
|
|
97
134
|
try {
|
|
98
|
-
out = (0, import_child_process.execFileSync)(bin, ["-p", "--output-format", "json", "--model",
|
|
135
|
+
out = (0, import_child_process.execFileSync)(bin, ["-p", "--output-format", "json", "--model", model2], {
|
|
99
136
|
input: prompt,
|
|
100
137
|
encoding: "utf8",
|
|
101
138
|
timeout: 3e5
|
|
@@ -106,16 +143,17 @@ var ClaudeBackend = class {
|
|
|
106
143
|
const data = JSON.parse(out.trim() || "{}");
|
|
107
144
|
return [data.result ?? "", data.usage ?? null];
|
|
108
145
|
}
|
|
109
|
-
async *stream(prompt,
|
|
146
|
+
async *stream(prompt, model2) {
|
|
110
147
|
const bin = which("claude") || this.bin;
|
|
111
|
-
yield* spawnStream(bin, ["-p", "--output-format", "text", "--model",
|
|
148
|
+
yield* spawnStream(bin, ["-p", "--output-format", "text", "--model", model2], prompt);
|
|
112
149
|
}
|
|
113
150
|
};
|
|
151
|
+
var GEMINI_FALLBACK = ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-1.5-pro"];
|
|
114
152
|
var GeminiBackend = class {
|
|
115
153
|
constructor() {
|
|
116
154
|
this.name = "gemini";
|
|
117
|
-
this.models = ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.0-flash", "gemini-1.5-pro"];
|
|
118
155
|
this.prefixes = ["gemini"];
|
|
156
|
+
this.models = [...GEMINI_FALLBACK];
|
|
119
157
|
}
|
|
120
158
|
get bin() {
|
|
121
159
|
return process.env.GEMINI_BIN ?? which("gemini") ?? "/opt/homebrew/bin/gemini";
|
|
@@ -123,13 +161,27 @@ var GeminiBackend = class {
|
|
|
123
161
|
available() {
|
|
124
162
|
return Boolean(which("gemini")) || (0, import_fs.existsSync)(this.bin);
|
|
125
163
|
}
|
|
126
|
-
async
|
|
164
|
+
async fetchLiveModels() {
|
|
165
|
+
const key = process.env.GEMINI_API_KEY;
|
|
166
|
+
if (!key) return [...GEMINI_FALLBACK];
|
|
167
|
+
try {
|
|
168
|
+
const data = await httpsGetJson(
|
|
169
|
+
`https://generativelanguage.googleapis.com/v1beta/models?key=${key}&pageSize=50`,
|
|
170
|
+
{}
|
|
171
|
+
);
|
|
172
|
+
const ids = (data.models ?? []).map((m) => String(m.name).replace("models/", "")).filter((id) => /^gemini-/.test(id) && !id.includes("embedding") && !id.includes("aqa"));
|
|
173
|
+
return ids.length ? ids : [...GEMINI_FALLBACK];
|
|
174
|
+
} catch {
|
|
175
|
+
return [...GEMINI_FALLBACK];
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
async runBlocking(prompt, model2) {
|
|
127
179
|
const bin = which("gemini") || this.bin;
|
|
128
180
|
let out;
|
|
129
181
|
try {
|
|
130
182
|
out = (0, import_child_process.execFileSync)(
|
|
131
183
|
bin,
|
|
132
|
-
["--output-format", "json", "--model",
|
|
184
|
+
["--output-format", "json", "--model", model2, "--approval-mode", "yolo"],
|
|
133
185
|
{ input: prompt, encoding: "utf8", timeout: 3e5, env: process.env }
|
|
134
186
|
);
|
|
135
187
|
} catch (e) {
|
|
@@ -149,20 +201,21 @@ var GeminiBackend = class {
|
|
|
149
201
|
return [raw, null];
|
|
150
202
|
}
|
|
151
203
|
}
|
|
152
|
-
async *stream(prompt,
|
|
204
|
+
async *stream(prompt, model2) {
|
|
153
205
|
const bin = which("gemini") || this.bin;
|
|
154
206
|
yield* spawnStream(
|
|
155
207
|
bin,
|
|
156
|
-
["--output-format", "text", "--model",
|
|
208
|
+
["--output-format", "text", "--model", model2, "--approval-mode", "yolo"],
|
|
157
209
|
prompt
|
|
158
210
|
);
|
|
159
211
|
}
|
|
160
212
|
};
|
|
213
|
+
var CODEX_FALLBACK = ["o3", "o4-mini", "gpt-4.1", "gpt-4o", "gpt-4o-mini"];
|
|
161
214
|
var CodexBackend = class {
|
|
162
215
|
constructor() {
|
|
163
216
|
this.name = "codex";
|
|
164
|
-
this.models = ["o3", "o4-mini", "gpt-4.1", "gpt-4o"];
|
|
165
217
|
this.prefixes = ["gpt", "o3", "o4", "o1"];
|
|
218
|
+
this.models = [...CODEX_FALLBACK];
|
|
166
219
|
}
|
|
167
220
|
get bin() {
|
|
168
221
|
return process.env.CODEX_BIN ?? which("codex") ?? "codex";
|
|
@@ -170,10 +223,24 @@ var CodexBackend = class {
|
|
|
170
223
|
available() {
|
|
171
224
|
return Boolean(which("codex"));
|
|
172
225
|
}
|
|
173
|
-
async
|
|
226
|
+
async fetchLiveModels() {
|
|
227
|
+
const key = process.env.OPENAI_API_KEY;
|
|
228
|
+
if (!key) return [...CODEX_FALLBACK];
|
|
229
|
+
try {
|
|
230
|
+
const data = await httpsGetJson("https://api.openai.com/v1/models", {
|
|
231
|
+
Authorization: `Bearer ${key}`
|
|
232
|
+
});
|
|
233
|
+
const EXCLUDE = /instruct|audio|realtime|transcribe|tts|image|search|embed|diariz|whisper|babbage|davinci|curie|ada/i;
|
|
234
|
+
const ids = (data.data ?? []).map((m) => String(m.id)).filter((id) => /^(gpt-[^i]|o[0-9])/.test(id) && !EXCLUDE.test(id)).sort();
|
|
235
|
+
return ids.length ? ids : [...CODEX_FALLBACK];
|
|
236
|
+
} catch {
|
|
237
|
+
return [...CODEX_FALLBACK];
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
async runBlocking(prompt, model2) {
|
|
174
241
|
let out;
|
|
175
242
|
try {
|
|
176
|
-
out = (0, import_child_process.execFileSync)(this.bin, ["-q", "--model",
|
|
243
|
+
out = (0, import_child_process.execFileSync)(this.bin, ["-q", "--model", model2, prompt], {
|
|
177
244
|
encoding: "utf8",
|
|
178
245
|
timeout: 3e5
|
|
179
246
|
});
|
|
@@ -182,15 +249,15 @@ var CodexBackend = class {
|
|
|
182
249
|
}
|
|
183
250
|
return [out.trim(), null];
|
|
184
251
|
}
|
|
185
|
-
async *stream(prompt,
|
|
186
|
-
yield* spawnStream(this.bin, ["-q", "--model",
|
|
252
|
+
async *stream(prompt, model2) {
|
|
253
|
+
yield* spawnStream(this.bin, ["-q", "--model", model2, prompt]);
|
|
187
254
|
}
|
|
188
255
|
};
|
|
189
256
|
var CopilotBackend = class {
|
|
190
257
|
constructor() {
|
|
191
258
|
this.name = "copilot";
|
|
192
|
-
this.models = ["copilot", "github-copilot"];
|
|
193
259
|
this.prefixes = ["copilot", "github-copilot"];
|
|
260
|
+
this.models = ["copilot", "github-copilot"];
|
|
194
261
|
}
|
|
195
262
|
get bin() {
|
|
196
263
|
return process.env.GH_BIN ?? which("gh") ?? "gh";
|
|
@@ -204,7 +271,10 @@ var CopilotBackend = class {
|
|
|
204
271
|
return false;
|
|
205
272
|
}
|
|
206
273
|
}
|
|
207
|
-
async
|
|
274
|
+
async fetchLiveModels() {
|
|
275
|
+
return this.models;
|
|
276
|
+
}
|
|
277
|
+
async runBlocking(prompt, model2) {
|
|
208
278
|
let out;
|
|
209
279
|
try {
|
|
210
280
|
out = (0, import_child_process.execFileSync)(this.bin, ["copilot", "suggest", "-t", "general", prompt], {
|
|
@@ -216,7 +286,7 @@ var CopilotBackend = class {
|
|
|
216
286
|
}
|
|
217
287
|
return [out.trim(), null];
|
|
218
288
|
}
|
|
219
|
-
async *stream(prompt,
|
|
289
|
+
async *stream(prompt, model2) {
|
|
220
290
|
yield* spawnStream(this.bin, ["copilot", "suggest", "-t", "general", prompt]);
|
|
221
291
|
}
|
|
222
292
|
};
|
|
@@ -226,8 +296,8 @@ var BACKENDS = [
|
|
|
226
296
|
new CodexBackend(),
|
|
227
297
|
new CopilotBackend()
|
|
228
298
|
];
|
|
229
|
-
function pickBackend(
|
|
230
|
-
const m =
|
|
299
|
+
function pickBackend(model2) {
|
|
300
|
+
const m = model2.toLowerCase();
|
|
231
301
|
for (const b of BACKENDS) {
|
|
232
302
|
if (b.prefixes.some((p) => m.startsWith(p))) {
|
|
233
303
|
if (b.available()) return b;
|
|
@@ -238,6 +308,14 @@ function pickBackend(model) {
|
|
|
238
308
|
function allModels() {
|
|
239
309
|
return BACKENDS.filter((b) => b.available()).flatMap((b) => [...b.models]);
|
|
240
310
|
}
|
|
311
|
+
async function refreshModels() {
|
|
312
|
+
const available = BACKENDS.filter((b) => b.available());
|
|
313
|
+
await Promise.all(
|
|
314
|
+
available.map(async (b) => {
|
|
315
|
+
b.models = await b.fetchLiveModels();
|
|
316
|
+
})
|
|
317
|
+
);
|
|
318
|
+
}
|
|
241
319
|
|
|
242
320
|
// src/server.ts
|
|
243
321
|
function sse(data) {
|
|
@@ -245,23 +323,23 @@ function sse(data) {
|
|
|
245
323
|
|
|
246
324
|
`;
|
|
247
325
|
}
|
|
248
|
-
function chunk(id, ts,
|
|
326
|
+
function chunk(id, ts, model2, delta, finish) {
|
|
249
327
|
return sse({
|
|
250
328
|
id,
|
|
251
329
|
object: "chat.completion.chunk",
|
|
252
330
|
created: ts,
|
|
253
|
-
model,
|
|
331
|
+
model: model2,
|
|
254
332
|
choices: [{ index: 0, delta, finish_reason: finish ?? null }]
|
|
255
333
|
});
|
|
256
334
|
}
|
|
257
|
-
function completion(id, ts,
|
|
335
|
+
function completion(id, ts, model2, text, usage) {
|
|
258
336
|
const pt = usage ? (usage.input_tokens ?? 0) + (usage.cache_creation_input_tokens ?? 0) + (usage.cache_read_input_tokens ?? 0) + (usage.promptTokenCount ?? 0) : 0;
|
|
259
337
|
const ct = usage ? (usage.output_tokens ?? 0) + (usage.candidatesTokenCount ?? 0) : 0;
|
|
260
338
|
return {
|
|
261
339
|
id,
|
|
262
340
|
object: "chat.completion",
|
|
263
341
|
created: ts,
|
|
264
|
-
model,
|
|
342
|
+
model: model2,
|
|
265
343
|
choices: [{ index: 0, message: { role: "assistant", content: text }, finish_reason: "stop" }],
|
|
266
344
|
usage: { prompt_tokens: pt, completion_tokens: ct, total_tokens: pt + ct }
|
|
267
345
|
};
|
|
@@ -313,34 +391,34 @@ async function handleChat(req, res) {
|
|
|
313
391
|
sendJson(res, 400, { error: { message: "messages required", type: "invalid_request_error" } });
|
|
314
392
|
return;
|
|
315
393
|
}
|
|
316
|
-
const
|
|
394
|
+
const model2 = body.model ?? "claude-sonnet-4-6";
|
|
317
395
|
const streaming = Boolean(body.stream);
|
|
318
396
|
const prompt = messagesToPrompt(messages);
|
|
319
|
-
const backend = pickBackend(
|
|
397
|
+
const backend = pickBackend(model2);
|
|
320
398
|
const id = `chatcmpl-${(0, import_crypto.randomUUID)().replace(/-/g, "").slice(0, 20)}`;
|
|
321
399
|
const ts = Math.floor(Date.now() / 1e3);
|
|
322
|
-
console.log(` ${backend.name} model=${
|
|
400
|
+
console.log(` ${backend.name} model=${model2} stream=${streaming} turns=${messages.length}`);
|
|
323
401
|
if (streaming) {
|
|
324
402
|
cors(res, 200);
|
|
325
403
|
res.setHeader("Content-Type", "text/event-stream");
|
|
326
404
|
res.setHeader("Cache-Control", "no-cache");
|
|
327
405
|
res.setHeader("X-Accel-Buffering", "no");
|
|
328
406
|
res.flushHeaders();
|
|
329
|
-
res.write(chunk(id, ts,
|
|
407
|
+
res.write(chunk(id, ts, model2, { role: "assistant" }));
|
|
330
408
|
try {
|
|
331
|
-
for await (const raw of backend.stream(prompt,
|
|
332
|
-
res.write(chunk(id, ts,
|
|
409
|
+
for await (const raw of backend.stream(prompt, model2)) {
|
|
410
|
+
res.write(chunk(id, ts, model2, { content: raw.toString("utf8") }));
|
|
333
411
|
}
|
|
334
412
|
} catch (err) {
|
|
335
413
|
console.error(` stream error: ${err.message}`);
|
|
336
414
|
}
|
|
337
|
-
res.write(chunk(id, ts,
|
|
415
|
+
res.write(chunk(id, ts, model2, {}, "stop"));
|
|
338
416
|
res.write("data: [DONE]\n\n");
|
|
339
417
|
res.end();
|
|
340
418
|
} else {
|
|
341
419
|
try {
|
|
342
|
-
const [text, usage] = await backend.runBlocking(prompt,
|
|
343
|
-
sendJson(res, 200, completion(id, ts,
|
|
420
|
+
const [text, usage] = await backend.runBlocking(prompt, model2);
|
|
421
|
+
sendJson(res, 200, completion(id, ts, model2, text, usage));
|
|
344
422
|
} catch (err) {
|
|
345
423
|
console.error(` error: ${err.message}`);
|
|
346
424
|
sendJson(res, 500, { error: { message: err.message, type: "server_error" } });
|
|
@@ -556,6 +634,7 @@ async function cmdSetup() {
|
|
|
556
634
|
console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
|
|
557
635
|
console.log();
|
|
558
636
|
console.log(" Checking installed backends\u2026");
|
|
637
|
+
await refreshModels();
|
|
559
638
|
console.log();
|
|
560
639
|
const available = BACKENDS.filter((b) => b.available());
|
|
561
640
|
const missing = BACKENDS.filter((b) => !b.available());
|
|
@@ -598,12 +677,15 @@ function parseArgs() {
|
|
|
598
677
|
const args = process.argv.slice(2);
|
|
599
678
|
const cmd2 = args[0] ?? "";
|
|
600
679
|
let port2 = PORT;
|
|
680
|
+
let model2;
|
|
601
681
|
for (let i = 1; i < args.length; i++) {
|
|
602
682
|
if ((args[i] === "--port" || args[i] === "-p") && args[i + 1]) {
|
|
603
683
|
port2 = parseInt(args[++i]);
|
|
684
|
+
} else if ((args[i] === "--model" || args[i] === "-m") && args[i + 1]) {
|
|
685
|
+
model2 = args[++i];
|
|
604
686
|
}
|
|
605
687
|
}
|
|
606
|
-
return { cmd: cmd2, port: port2 };
|
|
688
|
+
return { cmd: cmd2, port: port2, model: model2 };
|
|
607
689
|
}
|
|
608
690
|
function cmdStart(port2) {
|
|
609
691
|
(0, import_fs3.mkdirSync)(LOG_DIR, { recursive: true });
|
|
@@ -702,8 +784,10 @@ function cmdStatus(port2) {
|
|
|
702
784
|
console.log(" Run: bridgerapi install \u2192 install background service");
|
|
703
785
|
}
|
|
704
786
|
}
|
|
705
|
-
function cmdBackends() {
|
|
706
|
-
|
|
787
|
+
async function cmdBackends() {
|
|
788
|
+
process.stdout.write("\n Fetching live model lists\u2026");
|
|
789
|
+
await refreshModels();
|
|
790
|
+
process.stdout.write(" done.\n\n CLI backends:\n\n");
|
|
707
791
|
for (const b of BACKENDS) {
|
|
708
792
|
const ok = b.available();
|
|
709
793
|
const icon = ok ? "\u2713" : "\u2717";
|
|
@@ -719,17 +803,65 @@ function cmdBackends() {
|
|
|
719
803
|
`);
|
|
720
804
|
}
|
|
721
805
|
}
|
|
806
|
+
async function cmdChat(model2) {
|
|
807
|
+
const available = BACKENDS.filter((b) => b.available());
|
|
808
|
+
if (available.length === 0) {
|
|
809
|
+
console.error(" No backends found. Run: bridgerapi to see setup instructions.");
|
|
810
|
+
process.exit(1);
|
|
811
|
+
}
|
|
812
|
+
const resolvedModel = model2 ?? available[0].models[0];
|
|
813
|
+
const backend = pickBackend(resolvedModel);
|
|
814
|
+
console.log();
|
|
815
|
+
console.log(` bridgerapi chat \u2014 ${backend.name} \u2014 ${resolvedModel}`);
|
|
816
|
+
console.log(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500");
|
|
817
|
+
console.log(" Type your message and press Enter. Ctrl+C to exit.");
|
|
818
|
+
console.log();
|
|
819
|
+
const history = [];
|
|
820
|
+
const rl = (0, import_readline.createInterface)({ input: process.stdin, output: process.stdout });
|
|
821
|
+
const prompt = () => {
|
|
822
|
+
rl.question("You: ", async (input) => {
|
|
823
|
+
const text = input.trim();
|
|
824
|
+
if (!text) {
|
|
825
|
+
prompt();
|
|
826
|
+
return;
|
|
827
|
+
}
|
|
828
|
+
history.push({ role: "user", content: text });
|
|
829
|
+
process.stdout.write("\n");
|
|
830
|
+
let reply = "";
|
|
831
|
+
try {
|
|
832
|
+
process.stdout.write(`${backend.name}: `);
|
|
833
|
+
for await (const chunk2 of backend.stream(messagesToPrompt(history), resolvedModel)) {
|
|
834
|
+
const piece = chunk2.toString("utf8");
|
|
835
|
+
process.stdout.write(piece);
|
|
836
|
+
reply += piece;
|
|
837
|
+
}
|
|
838
|
+
} catch (err) {
|
|
839
|
+
process.stdout.write(`
|
|
840
|
+
Error: ${err.message}`);
|
|
841
|
+
}
|
|
842
|
+
process.stdout.write("\n\n");
|
|
843
|
+
if (reply) history.push({ role: "assistant", content: reply });
|
|
844
|
+
prompt();
|
|
845
|
+
});
|
|
846
|
+
};
|
|
847
|
+
rl.on("close", () => {
|
|
848
|
+
console.log("\n Goodbye.");
|
|
849
|
+
process.exit(0);
|
|
850
|
+
});
|
|
851
|
+
prompt();
|
|
852
|
+
}
|
|
722
853
|
function showHelp() {
|
|
723
854
|
console.log(`
|
|
724
855
|
bridgerapi \u2014 OpenAI-compatible API bridge for AI CLI tools
|
|
725
856
|
|
|
726
857
|
Usage:
|
|
727
|
-
bridgerapi
|
|
728
|
-
bridgerapi
|
|
729
|
-
bridgerapi
|
|
730
|
-
bridgerapi
|
|
731
|
-
bridgerapi
|
|
732
|
-
bridgerapi
|
|
858
|
+
bridgerapi Interactive setup wizard
|
|
859
|
+
bridgerapi chat [--model <name>] Interactive chat session in terminal
|
|
860
|
+
bridgerapi start [--port n] Start API server in the foreground
|
|
861
|
+
bridgerapi install [--port n] Install as a background service
|
|
862
|
+
bridgerapi uninstall Remove background service
|
|
863
|
+
bridgerapi status Show service status
|
|
864
|
+
bridgerapi backends List detected backends
|
|
733
865
|
|
|
734
866
|
Supported backends (auto-detected):
|
|
735
867
|
claude-* \u2192 Claude Code CLI (claude login)
|
|
@@ -738,12 +870,15 @@ function showHelp() {
|
|
|
738
870
|
copilot \u2192 GitHub Copilot (gh auth login)
|
|
739
871
|
`.trim());
|
|
740
872
|
}
|
|
741
|
-
var { cmd, port } = parseArgs();
|
|
873
|
+
var { cmd, port, model } = parseArgs();
|
|
742
874
|
switch (cmd) {
|
|
743
875
|
case "":
|
|
744
876
|
case "setup":
|
|
745
877
|
cmdSetup();
|
|
746
878
|
break;
|
|
879
|
+
case "chat":
|
|
880
|
+
cmdChat(model);
|
|
881
|
+
break;
|
|
747
882
|
case "start":
|
|
748
883
|
cmdStart(port);
|
|
749
884
|
break;
|
|
@@ -757,7 +892,7 @@ switch (cmd) {
|
|
|
757
892
|
cmdStatus(port);
|
|
758
893
|
break;
|
|
759
894
|
case "backends":
|
|
760
|
-
cmdBackends();
|
|
895
|
+
cmdBackends().catch(console.error);
|
|
761
896
|
break;
|
|
762
897
|
case "help":
|
|
763
898
|
case "--help":
|