@elvatis_com/openclaw-cli-bridge-elvatis 2.8.2 → 2.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  > OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
4
4
 
5
- **Current version:** `2.8.2`
5
+ **Current version:** `2.8.3`
6
6
 
7
7
  ---
8
8
 
@@ -406,6 +406,9 @@ npm run ci # lint + typecheck + test
406
406
 
407
407
  ## Changelog
408
408
 
409
+ ### v2.8.3
410
+ - **fix:** transparent fallback — responses now always report the originally requested model, preventing clients from "sticking" on Haiku after a single Sonnet timeout. Fallback still happens internally for resilience, but is invisible to the client.
411
+
409
412
  ### v2.8.2
410
413
  - **fix:** increase Sonnet-4-6 and Opus-4-6 base timeout 300s→420s (7 min) to prevent premature timeout→Haiku fallback on large webchat sessions
411
414
  - **fix:** increase Haiku-4-5 base timeout 90s→120s for better reliability as fallback model
package/SKILL.md CHANGED
@@ -68,4 +68,4 @@ On gateway restart, if any session has expired, a **WhatsApp alert** is sent aut
68
68
 
69
69
  See `README.md` for full configuration reference and architecture diagram.
70
70
 
71
- **Version:** 2.8.2
71
+ **Version:** 2.8.3
@@ -2,7 +2,7 @@
2
2
  "id": "openclaw-cli-bridge-elvatis",
3
3
  "slug": "openclaw-cli-bridge-elvatis",
4
4
  "name": "OpenClaw CLI Bridge",
5
- "version": "2.8.2",
5
+ "version": "2.8.3",
6
6
  "license": "MIT",
7
7
  "description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
8
8
  "providers": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@elvatis_com/openclaw-cli-bridge-elvatis",
3
- "version": "2.8.2",
3
+ "version": "2.8.3",
4
4
  "description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
5
5
  "type": "module",
6
6
  "openclaw": {
@@ -807,7 +807,7 @@ async function handleRequest(
807
807
  const fbCompTokens = estimateTokens(result.content ?? "");
808
808
  metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, true, estPromptTokens, fbCompTokens);
809
809
  usedModel = fallbackModel;
810
- opts.log(`[cli-bridge] fallback to ${fallbackModel} succeeded`);
810
+ opts.log(`[cli-bridge] fallback to ${fallbackModel} succeeded (response will report original model: ${model})`);
811
811
  } catch (fallbackErr) {
812
812
  metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, false, estPromptTokens);
813
813
  const fallbackMsg = (fallbackErr as Error).message;
@@ -851,7 +851,7 @@ async function handleRequest(
851
851
  const toolCalls = result.tool_calls!;
852
852
  // Role chunk with all tool_calls (name + empty arguments)
853
853
  sendSseChunk(res, {
854
- id, created, model: usedModel,
854
+ id, created, model,
855
855
  delta: {
856
856
  role: "assistant",
857
857
  tool_calls: toolCalls.map((tc, idx) => ({
@@ -864,7 +864,7 @@ async function handleRequest(
864
864
  // Arguments chunks (one per tool call)
865
865
  for (let idx = 0; idx < toolCalls.length; idx++) {
866
866
  sendSseChunk(res, {
867
- id, created, model: usedModel,
867
+ id, created, model,
868
868
  delta: {
869
869
  tool_calls: [{ index: idx, function: { arguments: toolCalls[idx].function.arguments } }],
870
870
  },
@@ -872,20 +872,20 @@ async function handleRequest(
872
872
  });
873
873
  }
874
874
  // Stop chunk
875
- sendSseChunk(res, { id, created, model: usedModel, delta: {}, finish_reason: "tool_calls" });
875
+ sendSseChunk(res, { id, created, model, delta: {}, finish_reason: "tool_calls" });
876
876
  } else {
877
877
  // Standard text streaming
878
- sendSseChunk(res, { id, created, model: usedModel, delta: { role: "assistant" }, finish_reason: null });
878
+ sendSseChunk(res, { id, created, model, delta: { role: "assistant" }, finish_reason: null });
879
879
  const content = result.content ?? "";
880
880
  const chunkSize = 50;
881
881
  for (let i = 0; i < content.length; i += chunkSize) {
882
882
  sendSseChunk(res, {
883
- id, created, model: usedModel,
883
+ id, created, model,
884
884
  delta: { content: content.slice(i, i + chunkSize) },
885
885
  finish_reason: null,
886
886
  });
887
887
  }
888
- sendSseChunk(res, { id, created, model: usedModel, delta: {}, finish_reason: "stop" });
888
+ sendSseChunk(res, { id, created, model, delta: {}, finish_reason: "stop" });
889
889
  }
890
890
 
891
891
  res.write("data: [DONE]\n\n");
@@ -903,7 +903,7 @@ async function handleRequest(
903
903
  id,
904
904
  object: "chat.completion",
905
905
  created,
906
- model: usedModel,
906
+ model,
907
907
  choices: [
908
908
  {
909
909
  index: 0,