@elvatis_com/openclaw-cli-bridge-elvatis 2.8.1 → 2.8.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -1
- package/SKILL.md +1 -1
- package/openclaw.plugin.json +1 -1
- package/package.json +1 -1
- package/src/config.ts +6 -6
- package/src/proxy-server.ts +11 -11
- package/test/config.test.ts +3 -3
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
> OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
|
|
4
4
|
|
|
5
|
-
**Current version:** `2.8.
|
|
5
|
+
**Current version:** `2.8.3`
|
|
6
6
|
|
|
7
7
|
---
|
|
8
8
|
|
|
@@ -406,6 +406,13 @@ npm run ci # lint + typecheck + test
|
|
|
406
406
|
|
|
407
407
|
## Changelog
|
|
408
408
|
|
|
409
|
+
### v2.8.3
|
|
410
|
+
- **fix:** transparent fallback — responses now always report the originally requested model, preventing clients from "sticking" on Haiku after a single Sonnet timeout. Fallback still happens internally for resilience, but is invisible to the client.
|
|
411
|
+
|
|
412
|
+
### v2.8.2
|
|
413
|
+
- **fix:** increase Sonnet-4-6 and Opus-4-6 base timeout 300s→420s (7 min) to prevent premature timeout→Haiku fallback on large webchat sessions
|
|
414
|
+
- **fix:** increase Haiku-4-5 base timeout 90s→120s for better reliability as fallback model
|
|
415
|
+
|
|
409
416
|
### v2.8.1
|
|
410
417
|
- **fix:** increase Sonnet-4-6 base timeout from 180s to 300s to prevent premature SIGTERM kills causing FailoverError fallback to gpt-5.2-codex
|
|
411
418
|
|
package/SKILL.md
CHANGED
package/openclaw.plugin.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"id": "openclaw-cli-bridge-elvatis",
|
|
3
3
|
"slug": "openclaw-cli-bridge-elvatis",
|
|
4
4
|
"name": "OpenClaw CLI Bridge",
|
|
5
|
-
"version": "2.8.
|
|
5
|
+
"version": "2.8.3",
|
|
6
6
|
"license": "MIT",
|
|
7
7
|
"description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
|
|
8
8
|
"providers": [
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@elvatis_com/openclaw-cli-bridge-elvatis",
|
|
3
|
-
"version": "2.8.
|
|
3
|
+
"version": "2.8.3",
|
|
4
4
|
"description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"openclaw": {
|
package/src/config.ts
CHANGED
|
@@ -86,14 +86,14 @@ export const PROVIDER_SESSION_SWEEP_MS = 10 * 60 * 1_000; // 10 min
|
|
|
86
86
|
* Override via `modelTimeouts` in plugin config.
|
|
87
87
|
*
|
|
88
88
|
* Strategy:
|
|
89
|
-
* - Heavy/agentic models (Opus, GPT-5.4):
|
|
90
|
-
* - Standard interactive (Sonnet, Pro, GPT-5.3):
|
|
91
|
-
* - Fast/lightweight (Haiku, Flash, Mini):
|
|
89
|
+
* - Heavy/agentic models (Opus, GPT-5.4): 7 min — need time for tool use + large sessions
|
|
90
|
+
* - Standard interactive (Sonnet, Pro, GPT-5.3): 7 min — prevents premature fallback to Haiku
|
|
91
|
+
* - Fast/lightweight (Haiku, Flash, Mini): 120s
|
|
92
92
|
*/
|
|
93
93
|
export const DEFAULT_MODEL_TIMEOUTS: Record<string, number> = {
|
|
94
|
-
"cli-claude/claude-opus-4-6":
|
|
95
|
-
"cli-claude/claude-sonnet-4-6":
|
|
96
|
-
"cli-claude/claude-haiku-4-5":
|
|
94
|
+
"cli-claude/claude-opus-4-6": 420_000, // 7 min
|
|
95
|
+
"cli-claude/claude-sonnet-4-6": 420_000, // 7 min — prevent timeout→Haiku fallback on large sessions
|
|
96
|
+
"cli-claude/claude-haiku-4-5": 120_000, // 2 min
|
|
97
97
|
"cli-gemini/gemini-2.5-pro": 300_000, // 5 min — image generation needs more time
|
|
98
98
|
"cli-gemini/gemini-2.5-flash": 180_000, // 3 min
|
|
99
99
|
"cli-gemini/gemini-3-pro-preview": 300_000, // 5 min — image generation needs more time
|
package/src/proxy-server.ts
CHANGED
|
@@ -104,9 +104,9 @@ export interface ProxyServerOptions {
|
|
|
104
104
|
*
|
|
105
105
|
* Example:
|
|
106
106
|
* {
|
|
107
|
-
* "cli-claude/claude-sonnet-4-6":
|
|
108
|
-
* "cli-claude/claude-opus-4-6":
|
|
109
|
-
* "cli-claude/claude-haiku-4-5":
|
|
107
|
+
* "cli-claude/claude-sonnet-4-6": 420_000, // 7 min for interactive chat
|
|
108
|
+
* "cli-claude/claude-opus-4-6": 420_000, // 7 min for heavy tasks
|
|
109
|
+
* "cli-claude/claude-haiku-4-5": 120_000, // 2 min for fast responses
|
|
110
110
|
* }
|
|
111
111
|
*
|
|
112
112
|
* When not set for a model, falls back to proxyTimeoutMs (default 300s base).
|
|
@@ -807,7 +807,7 @@ async function handleRequest(
|
|
|
807
807
|
const fbCompTokens = estimateTokens(result.content ?? "");
|
|
808
808
|
metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, true, estPromptTokens, fbCompTokens);
|
|
809
809
|
usedModel = fallbackModel;
|
|
810
|
-
opts.log(`[cli-bridge] fallback to ${fallbackModel} succeeded`);
|
|
810
|
+
opts.log(`[cli-bridge] fallback to ${fallbackModel} succeeded (response will report original model: ${model})`);
|
|
811
811
|
} catch (fallbackErr) {
|
|
812
812
|
metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, false, estPromptTokens);
|
|
813
813
|
const fallbackMsg = (fallbackErr as Error).message;
|
|
@@ -851,7 +851,7 @@ async function handleRequest(
|
|
|
851
851
|
const toolCalls = result.tool_calls!;
|
|
852
852
|
// Role chunk with all tool_calls (name + empty arguments)
|
|
853
853
|
sendSseChunk(res, {
|
|
854
|
-
id, created, model
|
|
854
|
+
id, created, model,
|
|
855
855
|
delta: {
|
|
856
856
|
role: "assistant",
|
|
857
857
|
tool_calls: toolCalls.map((tc, idx) => ({
|
|
@@ -864,7 +864,7 @@ async function handleRequest(
|
|
|
864
864
|
// Arguments chunks (one per tool call)
|
|
865
865
|
for (let idx = 0; idx < toolCalls.length; idx++) {
|
|
866
866
|
sendSseChunk(res, {
|
|
867
|
-
id, created, model
|
|
867
|
+
id, created, model,
|
|
868
868
|
delta: {
|
|
869
869
|
tool_calls: [{ index: idx, function: { arguments: toolCalls[idx].function.arguments } }],
|
|
870
870
|
},
|
|
@@ -872,20 +872,20 @@ async function handleRequest(
|
|
|
872
872
|
});
|
|
873
873
|
}
|
|
874
874
|
// Stop chunk
|
|
875
|
-
sendSseChunk(res, { id, created, model
|
|
875
|
+
sendSseChunk(res, { id, created, model, delta: {}, finish_reason: "tool_calls" });
|
|
876
876
|
} else {
|
|
877
877
|
// Standard text streaming
|
|
878
|
-
sendSseChunk(res, { id, created, model
|
|
878
|
+
sendSseChunk(res, { id, created, model, delta: { role: "assistant" }, finish_reason: null });
|
|
879
879
|
const content = result.content ?? "";
|
|
880
880
|
const chunkSize = 50;
|
|
881
881
|
for (let i = 0; i < content.length; i += chunkSize) {
|
|
882
882
|
sendSseChunk(res, {
|
|
883
|
-
id, created, model
|
|
883
|
+
id, created, model,
|
|
884
884
|
delta: { content: content.slice(i, i + chunkSize) },
|
|
885
885
|
finish_reason: null,
|
|
886
886
|
});
|
|
887
887
|
}
|
|
888
|
-
sendSseChunk(res, { id, created, model
|
|
888
|
+
sendSseChunk(res, { id, created, model, delta: {}, finish_reason: "stop" });
|
|
889
889
|
}
|
|
890
890
|
|
|
891
891
|
res.write("data: [DONE]\n\n");
|
|
@@ -903,7 +903,7 @@ async function handleRequest(
|
|
|
903
903
|
id,
|
|
904
904
|
object: "chat.completion",
|
|
905
905
|
created,
|
|
906
|
-
model
|
|
906
|
+
model,
|
|
907
907
|
choices: [
|
|
908
908
|
{
|
|
909
909
|
index: 0,
|
package/test/config.test.ts
CHANGED
|
@@ -61,9 +61,9 @@ describe("config.ts exports", () => {
|
|
|
61
61
|
});
|
|
62
62
|
|
|
63
63
|
it("exports per-model timeouts for all major models", () => {
|
|
64
|
-
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-opus-4-6"]).toBe(
|
|
65
|
-
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(
|
|
66
|
-
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-haiku-4-5"]).toBe(
|
|
64
|
+
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-opus-4-6"]).toBe(420_000);
|
|
65
|
+
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(420_000);
|
|
66
|
+
expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-haiku-4-5"]).toBe(120_000);
|
|
67
67
|
expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-pro"]).toBe(300_000);
|
|
68
68
|
expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-flash"]).toBe(180_000);
|
|
69
69
|
expect(DEFAULT_MODEL_TIMEOUTS["openai-codex/gpt-5.4"]).toBe(300_000);
|