@elvatis_com/openclaw-cli-bridge-elvatis 3.0.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  > OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
4
4
 
5
- **Current version:** `3.0.0`
5
+ **Current version:** `3.1.0`
6
6
 
7
7
  ---
8
8
 
@@ -406,6 +406,12 @@ npm run ci # lint + typecheck + test
406
406
 
407
407
  ## Changelog
408
408
 
409
+ ### v3.1.0
410
+ - **feat:** cross-provider fallback chains — Sonnet → Haiku → Gemini Flash → Codex (was single-model fallback only)
411
+ - **feat:** fallback chain loop — tries each model in order until one succeeds, logs each attempt
412
+ - **fix:** live logs newest-on-top — latest entries now appear at the top of the log viewer
413
+ - **feat:** SSE fallback notifications for each chain attempt so user sees what's happening
414
+
409
415
  ### v3.0.0
410
416
  - **feat:** dashboard v2 — sidebar navigation with 9 sections (Overview, Providers, Active, Requests, Fallbacks, Sessions, Live Logs, Timeouts, Models)
411
417
  - **feat:** live log viewer — SSE-powered real-time log streaming with color-coded categories, auto-scroll, pause/resume, 500-line client buffer
package/SKILL.md CHANGED
@@ -68,4 +68,4 @@ On gateway restart, if any session has expired, a **WhatsApp alert** is sent aut
68
68
 
69
69
  See `README.md` for full configuration reference and architecture diagram.
70
70
 
71
- **Version:** 3.0.0
71
+ **Version:** 3.1.0
@@ -2,7 +2,7 @@
2
2
  "id": "openclaw-cli-bridge-elvatis",
3
3
  "slug": "openclaw-cli-bridge-elvatis",
4
4
  "name": "OpenClaw CLI Bridge",
5
- "version": "3.0.0",
5
+ "version": "3.1.0",
6
6
  "license": "MIT",
7
7
  "description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
8
8
  "providers": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@elvatis_com/openclaw-cli-bridge-elvatis",
3
- "version": "3.0.0",
3
+ "version": "3.1.0",
4
4
  "description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
5
5
  "type": "module",
6
6
  "openclaw": {
package/src/config.ts CHANGED
@@ -138,15 +138,21 @@ export const DEFAULT_MODEL_TIMEOUTS: Record<string, number> = {
138
138
  // ──────────────────────────────────────────────────────────────────────────────
139
139
 
140
140
  /**
141
- * Default fallback chain: when a primary model fails (timeout, error),
142
- * retry once with the lighter variant.
141
+ * Default fallback chains: when a primary model fails (timeout, stale, error),
142
+ * try each fallback in order. Cross-provider chains ensure we use all available
143
+ * models instead of just falling back within one provider.
144
+ *
145
+ * Strategy: same-provider fast model first, then cross-provider alternatives.
143
146
  */
144
- export const DEFAULT_MODEL_FALLBACKS: Record<string, string> = {
145
- "cli-gemini/gemini-2.5-pro": "cli-gemini/gemini-2.5-flash",
146
- "cli-gemini/gemini-3-pro-preview": "cli-gemini/gemini-3-flash-preview",
147
- "cli-claude/claude-opus-4-6": "cli-claude/claude-sonnet-4-6",
148
- "cli-claude/claude-sonnet-4-6": "cli-claude/claude-haiku-4-5",
149
- "gemini-api/gemini-2.5-pro": "gemini-api/gemini-2.5-flash",
147
+ export const DEFAULT_MODEL_FALLBACKS: Record<string, string[]> = {
148
+ "cli-claude/claude-opus-4-6": ["cli-claude/claude-sonnet-4-6", "cli-gemini/gemini-2.5-pro", "cli-claude/claude-haiku-4-5"],
149
+ "cli-claude/claude-sonnet-4-6": ["cli-claude/claude-haiku-4-5", "cli-gemini/gemini-2.5-flash", "openai-codex/gpt-5.3-codex"],
150
+ "cli-claude/claude-haiku-4-5": ["cli-gemini/gemini-2.5-flash", "openai-codex/gpt-5.1-codex-mini"],
151
+ "cli-gemini/gemini-2.5-pro": ["cli-gemini/gemini-2.5-flash", "cli-claude/claude-haiku-4-5"],
152
+ "cli-gemini/gemini-3-pro-preview": ["cli-gemini/gemini-3-flash-preview", "cli-gemini/gemini-2.5-flash"],
153
+ "openai-codex/gpt-5.4": ["openai-codex/gpt-5.3-codex", "cli-claude/claude-haiku-4-5"],
154
+ "openai-codex/gpt-5.3-codex": ["openai-codex/gpt-5.1-codex-mini", "cli-gemini/gemini-2.5-flash"],
155
+ "gemini-api/gemini-2.5-pro": ["gemini-api/gemini-2.5-flash"],
150
156
  };
151
157
 
152
158
  // ──────────────────────────────────────────────────────────────────────────────
package/src/debug-log.ts CHANGED
@@ -62,7 +62,7 @@ export function getLogTail(lines = 100): string | null {
62
62
  try {
63
63
  const content = readFileSync(LOG_FILE, "utf8");
64
64
  const allLines = content.split("\n").filter(Boolean);
65
- return allLines.slice(-lines).join("\n");
65
+ return allLines.slice(-lines).reverse().join("\n");
66
66
  } catch {
67
67
  return null;
68
68
  }
@@ -117,7 +117,7 @@ export interface ProxyServerOptions {
117
117
  * When a CLI model fails (timeout, error), the request is retried once
118
118
  * with the fallback model. Example: "cli-gemini/gemini-2.5-pro" → "cli-gemini/gemini-2.5-flash"
119
119
  */
120
- modelFallbacks?: Record<string, string>;
120
+ modelFallbacks?: Record<string, string | string[]>;
121
121
  /**
122
122
  * Per-model timeout overrides (ms). Keys are model IDs (without "vllm/" prefix).
123
123
  * Use this to give heavy models more time or limit fast models.
@@ -917,36 +917,55 @@ async function handleRequest(
917
917
  debugLog("FAIL", `${model} failed after ${(primaryDuration / 1000).toFixed(1)}s`, { isTimeout, error: msg.slice(0, 200) });
918
918
  // Record the run (with timeout flag) — session is preserved, not deleted
919
919
  providerSessions.recordRun(session.id, isTimeout);
920
- const fallbackModel = opts.modelFallbacks?.[model];
921
- if (fallbackModel) {
920
+ // ── Multi-model fallback chain: try each fallback in order ──────────
921
+ // Chains cross providers: Sonnet → Haiku → Gemini Flash → Codex
922
+ const rawFallbacks = opts.modelFallbacks?.[model];
923
+ const fallbackChain: string[] = Array.isArray(rawFallbacks) ? rawFallbacks
924
+ : typeof rawFallbacks === "string" ? [rawFallbacks]
925
+ : [];
926
+
927
+ if (fallbackChain.length > 0) {
922
928
  metrics.recordRequest(model, primaryDuration, false, estPromptTokens, undefined, promptPreview);
923
929
  const reason = isTimeout ? `timeout by supervisor, session=${session.id} preserved` : msg;
924
- opts.warn(`[cli-bridge] ${model} failed (${reason}), falling back to ${fallbackModel}`);
925
- debugLog("FALLBACK", `${model} → ${fallbackModel}`, { reason: isTimeout ? "timeout" : "error", primaryDuration: Math.round(primaryDuration / 1000) });
926
- // Notify the user via SSE that we're retrying with a different model
927
- if (sseHeadersSent) {
928
- res.write(`: fallback ${model} ${isTimeout ? "timed out" : "failed"} after ${Math.round(primaryDuration / 1000)}s, retrying with ${fallbackModel}\n\n`);
930
+ opts.warn(`[cli-bridge] ${model} failed (${reason}), trying fallback chain: ${fallbackChain.join(" → ")}`);
931
+
932
+ let chainSuccess = false;
933
+ for (const fallbackModel of fallbackChain) {
934
+ debugLog("FALLBACK", `${model} ${fallbackModel}`, { reason: isTimeout ? "timeout" : "error", primaryDuration: Math.round(primaryDuration / 1000), chain: fallbackChain });
935
+ if (sseHeadersSent) {
936
+ res.write(`: fallback — trying ${fallbackModel}\n\n`);
937
+ }
938
+ const fallbackStart = Date.now();
939
+ try {
940
+ result = await routeToCliRunner(fallbackModel, cleanMessages, effectiveTimeout, routeOpts);
941
+ const fbCompTokens = estimateTokens(result.content ?? "");
942
+ metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, true, estPromptTokens, fbCompTokens, promptPreview);
943
+ metrics.recordFallback(model, fallbackModel, isTimeout ? "timeout" : "error", primaryDuration, true);
944
+ usedModel = fallbackModel;
945
+ debugLog("FALLBACK-OK", `${fallbackModel} succeeded in ${((Date.now() - fallbackStart) / 1000).toFixed(1)}s`, { toolCalls: result.tool_calls?.length ?? 0 });
946
+ opts.log(`[cli-bridge] fallback to ${fallbackModel} succeeded`);
947
+ chainSuccess = true;
948
+ break;
949
+ } catch (fallbackErr) {
950
+ const fbDuration = Date.now() - fallbackStart;
951
+ metrics.recordRequest(fallbackModel, fbDuration, false, estPromptTokens, undefined, promptPreview);
952
+ metrics.recordFallback(model, fallbackModel, isTimeout ? "timeout" : "error", primaryDuration, false);
953
+ const fallbackMsg = (fallbackErr as Error).message;
954
+ debugLog("FALLBACK-FAIL", `${fallbackModel} failed after ${(fbDuration / 1000).toFixed(1)}s`, { error: fallbackMsg.slice(0, 150) });
955
+ opts.warn(`[cli-bridge] fallback ${fallbackModel} failed: ${fallbackMsg.slice(0, 100)}`);
956
+ // Continue to next fallback in chain
957
+ }
929
958
  }
930
- const fallbackStart = Date.now();
931
- try {
932
- result = await routeToCliRunner(fallbackModel, cleanMessages, effectiveTimeout, routeOpts);
933
- const fbCompTokens = estimateTokens(result.content ?? "");
934
- metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, true, estPromptTokens, fbCompTokens, promptPreview);
935
- metrics.recordFallback(model, fallbackModel, isTimeout ? "timeout" : "error", primaryDuration, true);
936
- usedModel = fallbackModel;
937
- opts.log(`[cli-bridge] fallback to ${fallbackModel} succeeded (response will report original model: ${model})`);
938
- } catch (fallbackErr) {
939
- metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, false, estPromptTokens, undefined, promptPreview);
940
- metrics.recordFallback(model, fallbackModel, isTimeout ? "timeout" : "error", primaryDuration, false);
941
- const fallbackMsg = (fallbackErr as Error).message;
942
- opts.warn(`[cli-bridge] fallback ${fallbackModel} also failed: ${fallbackMsg}`);
959
+
960
+ if (!chainSuccess) {
961
+ const chainStr = fallbackChain.join(", ");
943
962
  if (sseHeadersSent) {
944
- res.write(`data: ${JSON.stringify({ error: { message: `${model}: ${msg} | fallback ${fallbackModel}: ${fallbackMsg}`, type: "cli_error" } })}\n\n`);
963
+ res.write(`data: ${JSON.stringify({ error: { message: `${model} and all fallbacks (${chainStr}) failed`, type: "cli_error" } })}\n\n`);
945
964
  res.write("data: [DONE]\n\n");
946
965
  res.end();
947
966
  } else {
948
967
  res.writeHead(500, { "Content-Type": "application/json" });
949
- res.end(JSON.stringify({ error: { message: `${model}: ${msg} | fallback ${fallbackModel}: ${fallbackMsg}`, type: "cli_error" } }));
968
+ res.end(JSON.stringify({ error: { message: `${model} and all fallbacks (${chainStr}) failed`, type: "cli_error" } }));
950
969
  }
951
970
  return;
952
971
  }
@@ -694,18 +694,20 @@ export function renderStatusPage(opts: StatusTemplateOptions): string {
694
694
  function appendLog(text) {
695
695
  if (!logOutput) return;
696
696
  var lines = text.split('\\n').filter(function(l) { return l.trim(); });
697
+ // Newest on top — prepend lines in reverse order
698
+ var html = '';
697
699
  lines.forEach(function(line) {
698
- logOutput.innerHTML += colorLogLine(line.replace(/</g, '&lt;').replace(/>/g, '&gt;')) + '\\n';
700
+ html = colorLogLine(line.replace(/</g, '&lt;').replace(/>/g, '&gt;')) + '\\n' + html;
699
701
  logLineCount++;
700
702
  });
701
- // Trim old lines
703
+ logOutput.innerHTML = html + logOutput.innerHTML;
704
+ // Trim old lines from bottom
702
705
  while (logLineCount > MAX_LOG_LINES) {
703
- var idx = logOutput.innerHTML.indexOf('\\n');
706
+ var idx = logOutput.innerHTML.lastIndexOf('\\n');
704
707
  if (idx === -1) break;
705
- logOutput.innerHTML = logOutput.innerHTML.slice(idx + 1);
708
+ logOutput.innerHTML = logOutput.innerHTML.slice(0, idx);
706
709
  logLineCount--;
707
710
  }
708
- if (autoScroll) logOutput.scrollTop = logOutput.scrollHeight;
709
711
  }
710
712
 
711
713
  function connectLog() {
@@ -71,11 +71,15 @@ describe("config.ts exports", () => {
71
71
  expect(DEFAULT_MODEL_TIMEOUTS["gemini-api/gemini-2.5-flash"]).toBe(180_000);
72
72
  });
73
73
 
74
- it("exports model fallback chains", () => {
75
- expect(DEFAULT_MODEL_FALLBACKS["cli-claude/claude-sonnet-4-6"]).toBe("cli-claude/claude-haiku-4-5");
76
- expect(DEFAULT_MODEL_FALLBACKS["cli-claude/claude-opus-4-6"]).toBe("cli-claude/claude-sonnet-4-6");
77
- expect(DEFAULT_MODEL_FALLBACKS["cli-gemini/gemini-2.5-pro"]).toBe("cli-gemini/gemini-2.5-flash");
78
- expect(DEFAULT_MODEL_FALLBACKS["gemini-api/gemini-2.5-pro"]).toBe("gemini-api/gemini-2.5-flash");
74
+ it("exports model fallback chains as arrays", () => {
75
+ expect(DEFAULT_MODEL_FALLBACKS["cli-claude/claude-sonnet-4-6"]).toEqual(["cli-claude/claude-haiku-4-5", "cli-gemini/gemini-2.5-flash", "openai-codex/gpt-5.3-codex"]);
76
+ expect(DEFAULT_MODEL_FALLBACKS["cli-claude/claude-opus-4-6"]).toContain("cli-claude/claude-sonnet-4-6");
77
+ expect(DEFAULT_MODEL_FALLBACKS["cli-gemini/gemini-2.5-pro"]).toContain("cli-gemini/gemini-2.5-flash");
78
+ expect(DEFAULT_MODEL_FALLBACKS["gemini-api/gemini-2.5-pro"]).toContain("gemini-api/gemini-2.5-flash");
79
+ // All values must be arrays
80
+ for (const chain of Object.values(DEFAULT_MODEL_FALLBACKS)) {
81
+ expect(Array.isArray(chain)).toBe(true);
82
+ }
79
83
  });
80
84
 
81
85
  it("exports path constants rooted in ~/.openclaw", () => {