@elvatis_com/openclaw-cli-bridge-elvatis 2.8.0 → 2.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  > OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
4
4
 
5
- **Current version:** `2.8.0`
5
+ **Current version:** `2.8.2`
6
6
 
7
7
  ---
8
8
 
@@ -406,6 +406,13 @@ npm run ci # lint + typecheck + test
406
406
 
407
407
  ## Changelog
408
408
 
409
+ ### v2.8.2
410
+ - **fix:** increase Sonnet-4-6 and Opus-4-6 base timeout 300s→420s (7 min) to prevent premature timeout→Haiku fallback on large webchat sessions
411
+ - **fix:** increase Haiku-4-5 base timeout 90s→120s for better reliability as fallback model
412
+
413
+ ### v2.8.1
414
+ - **fix:** increase Sonnet-4-6 base timeout from 180s to 300s to prevent premature SIGTERM kills causing FailoverError fallback to gpt-5.2-codex
415
+
409
416
  ### v2.8.0
410
417
  - **feat:** Gemini API provider (`gemini-api/gemini-2.5-flash`, `gemini-api/gemini-2.5-pro`) — direct Google Generative AI SDK integration with native **image generation** support via `responseModalities: ["TEXT", "IMAGE"]`. No CLI subprocess overhead, no browser needed.
411
418
  - **feat:** Images returned as base64 data URIs in OpenAI-compatible `content_parts` format — works with OpenClaw multimodal rendering
package/SKILL.md CHANGED
@@ -68,4 +68,4 @@ On gateway restart, if any session has expired, a **WhatsApp alert** is sent aut
68
68
 
69
69
  See `README.md` for full configuration reference and architecture diagram.
70
70
 
71
- **Version:** 2.8.0
71
+ **Version:** 2.8.2
@@ -2,7 +2,7 @@
2
2
  "id": "openclaw-cli-bridge-elvatis",
3
3
  "slug": "openclaw-cli-bridge-elvatis",
4
4
  "name": "OpenClaw CLI Bridge",
5
- "version": "2.8.0",
5
+ "version": "2.8.2",
6
6
  "license": "MIT",
7
7
  "description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
8
8
  "providers": [
@@ -44,7 +44,7 @@
44
44
  },
45
45
  "default": {
46
46
  "cli-claude/claude-opus-4-6": 300000,
47
- "cli-claude/claude-sonnet-4-6": 180000,
47
+ "cli-claude/claude-sonnet-4-6": 300000,
48
48
  "cli-claude/claude-haiku-4-5": 90000,
49
49
  "cli-gemini/gemini-2.5-pro": 300000,
50
50
  "cli-gemini/gemini-2.5-flash": 180000,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@elvatis_com/openclaw-cli-bridge-elvatis",
3
- "version": "2.8.0",
3
+ "version": "2.8.2",
4
4
  "description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
5
5
  "type": "module",
6
6
  "openclaw": {
package/src/config.ts CHANGED
@@ -86,14 +86,14 @@ export const PROVIDER_SESSION_SWEEP_MS = 10 * 60 * 1_000; // 10 min
86
86
  * Override via `modelTimeouts` in plugin config.
87
87
  *
88
88
  * Strategy:
89
- * - Heavy/agentic models (Opus, GPT-5.4): 5 min — need time for tool use
90
- * - Standard interactive (Sonnet, Pro, GPT-5.3): 3 min
91
- * - Fast/lightweight (Haiku, Flash, Mini): 90s
89
+ * - Heavy/agentic models (Opus, GPT-5.4): 7 min — need time for tool use + large sessions
90
+ * - Standard interactive (Sonnet, Pro, GPT-5.3): 7 min — prevents premature fallback to Haiku
91
+ * - Fast/lightweight (Haiku, Flash, Mini): 120s
92
92
  */
93
93
  export const DEFAULT_MODEL_TIMEOUTS: Record<string, number> = {
94
- "cli-claude/claude-opus-4-6": 300_000, // 5 min
95
- "cli-claude/claude-sonnet-4-6": 180_000, // 3 min
96
- "cli-claude/claude-haiku-4-5": 90_000, // 90s
94
+ "cli-claude/claude-opus-4-6": 420_000, // 7 min
95
+ "cli-claude/claude-sonnet-4-6": 420_000, // 7 min — prevent timeout→Haiku fallback on large sessions
96
+ "cli-claude/claude-haiku-4-5": 120_000, // 2 min
97
97
  "cli-gemini/gemini-2.5-pro": 300_000, // 5 min — image generation needs more time
98
98
  "cli-gemini/gemini-2.5-flash": 180_000, // 3 min
99
99
  "cli-gemini/gemini-3-pro-preview": 300_000, // 5 min — image generation needs more time
@@ -104,9 +104,9 @@ export interface ProxyServerOptions {
104
104
  *
105
105
  * Example:
106
106
  * {
107
- * "cli-claude/claude-sonnet-4-6": 180_000, // 3 min for interactive chat
108
- * "cli-claude/claude-opus-4-6": 300_000, // 5 min for heavy tasks
109
- * "cli-claude/claude-haiku-4-5": 90_000, // 90s for fast responses
107
+ * "cli-claude/claude-sonnet-4-6": 420_000, // 7 min for interactive chat
108
+ * "cli-claude/claude-opus-4-6": 420_000, // 7 min for heavy tasks
109
+ * "cli-claude/claude-haiku-4-5": 120_000, // 2 min for fast responses
110
110
  * }
111
111
  *
112
112
  * When not set for a model, falls back to proxyTimeoutMs (default 300s base).
@@ -61,9 +61,9 @@ describe("config.ts exports", () => {
61
61
  });
62
62
 
63
63
  it("exports per-model timeouts for all major models", () => {
64
- expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-opus-4-6"]).toBe(300_000);
65
- expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(180_000);
66
- expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-haiku-4-5"]).toBe(90_000);
64
+ expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-opus-4-6"]).toBe(420_000);
65
+ expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(420_000);
66
+ expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-haiku-4-5"]).toBe(120_000);
67
67
  expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-pro"]).toBe(300_000);
68
68
  expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-flash"]).toBe(180_000);
69
69
  expect(DEFAULT_MODEL_TIMEOUTS["openai-codex/gpt-5.4"]).toBe(300_000);