@elvatis_com/openclaw-cli-bridge-elvatis 2.7.2 → 2.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  > OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
4
4
 
5
- **Current version:** `2.7.2`
5
+ **Current version:** `2.7.3`
6
6
 
7
7
  ---
8
8
 
@@ -406,6 +406,11 @@ npm run ci # lint + typecheck + test
406
406
 
407
407
  ## Changelog
408
408
 
409
+ ### v2.7.3
410
+ - **fix:** Gemini image generation timeouts — Gemini Pro models bumped from 180s → 300s base timeout, Flash models from 90s → 180s. Image generation needs significantly more time than text completion.
411
+ - **tune:** Per-tool timeout bonus increased from 5s → 7s per tool definition (21 tools = 147s instead of 105s)
412
+ - **tune:** Max effective timeout cap raised from 600s (10 min) → 900s (15 min) to accommodate long-running image generation with many tools
413
+
409
414
  ### v2.7.2
410
415
  - **fix:** Self-heal plugin `modelOrder` still referenced `openai-codex/gpt-5.1` (not in bridge allowlist), causing failover errors. Updated to `vllm/openai-codex/gpt-5.2-codex`.
411
416
 
package/SKILL.md CHANGED
@@ -68,4 +68,4 @@ On gateway restart, if any session has expired, a **WhatsApp alert** is sent aut
68
68
 
69
69
  See `README.md` for full configuration reference and architecture diagram.
70
70
 
71
- **Version:** 2.7.2
71
+ **Version:** 2.7.3
@@ -2,7 +2,7 @@
2
2
  "id": "openclaw-cli-bridge-elvatis",
3
3
  "slug": "openclaw-cli-bridge-elvatis",
4
4
  "name": "OpenClaw CLI Bridge",
5
- "version": "2.7.2",
5
+ "version": "2.7.3",
6
6
  "license": "MIT",
7
7
  "description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
8
8
  "providers": [
@@ -46,10 +46,10 @@
46
46
  "cli-claude/claude-opus-4-6": 300000,
47
47
  "cli-claude/claude-sonnet-4-6": 180000,
48
48
  "cli-claude/claude-haiku-4-5": 90000,
49
- "cli-gemini/gemini-2.5-pro": 180000,
50
- "cli-gemini/gemini-2.5-flash": 90000,
51
- "cli-gemini/gemini-3-pro-preview": 180000,
52
- "cli-gemini/gemini-3-flash-preview": 90000,
49
+ "cli-gemini/gemini-2.5-pro": 300000,
50
+ "cli-gemini/gemini-2.5-flash": 180000,
51
+ "cli-gemini/gemini-3-pro-preview": 300000,
52
+ "cli-gemini/gemini-3-flash-preview": 180000,
53
53
  "openai-codex/gpt-5.4": 300000,
54
54
  "openai-codex/gpt-5.3-codex": 180000,
55
55
  "openai-codex/gpt-5.1-codex-mini": 90000
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@elvatis_com/openclaw-cli-bridge-elvatis",
3
- "version": "2.7.2",
3
+ "version": "2.7.3",
4
4
  "description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
5
5
  "type": "module",
6
6
  "openclaw": {
package/src/config.ts CHANGED
@@ -26,13 +26,13 @@ export const DEFAULT_PROXY_API_KEY = "cli-bridge";
26
26
  export const DEFAULT_PROXY_TIMEOUT_MS = 300_000; // 5 min
27
27
 
28
28
  /** Maximum effective timeout after dynamic scaling (ms). */
29
- export const MAX_EFFECTIVE_TIMEOUT_MS = 600_000; // 10 min
29
+ export const MAX_EFFECTIVE_TIMEOUT_MS = 900_000; // 15 min
30
30
 
31
31
  /** Extra timeout per message beyond 10 in the conversation (ms). */
32
32
  export const TIMEOUT_PER_EXTRA_MSG_MS = 2_000;
33
33
 
34
34
  /** Extra timeout per tool definition in the request (ms). */
35
- export const TIMEOUT_PER_TOOL_MS = 5_000;
35
+ export const TIMEOUT_PER_TOOL_MS = 7_000;
36
36
 
37
37
  /** SSE keepalive interval — prevents OpenClaw read-timeout during long CLI runs (ms). */
38
38
  export const SSE_KEEPALIVE_INTERVAL_MS = 15_000;
@@ -94,10 +94,10 @@ export const DEFAULT_MODEL_TIMEOUTS: Record<string, number> = {
94
94
  "cli-claude/claude-opus-4-6": 300_000, // 5 min
95
95
  "cli-claude/claude-sonnet-4-6": 180_000, // 3 min
96
96
  "cli-claude/claude-haiku-4-5": 90_000, // 90s
97
- "cli-gemini/gemini-2.5-pro": 180_000,
98
- "cli-gemini/gemini-2.5-flash": 90_000,
99
- "cli-gemini/gemini-3-pro-preview": 180_000,
100
- "cli-gemini/gemini-3-flash-preview": 90_000,
97
+ "cli-gemini/gemini-2.5-pro": 300_000, // 5 min — image generation needs more time
98
+ "cli-gemini/gemini-2.5-flash": 180_000, // 3 min
99
+ "cli-gemini/gemini-3-pro-preview": 300_000, // 5 min — image generation needs more time
100
+ "cli-gemini/gemini-3-flash-preview": 180_000, // 3 min
101
101
  "openai-codex/gpt-5.4": 300_000,
102
102
  "openai-codex/gpt-5.3-codex": 180_000,
103
103
  "openai-codex/gpt-5.1-codex-mini": 90_000,
@@ -38,7 +38,7 @@ describe("config.ts exports", () => {
38
38
  expect(DEFAULT_PROXY_TIMEOUT_MS).toBe(300_000);
39
39
  expect(DEFAULT_CLI_TIMEOUT_MS).toBe(120_000);
40
40
  expect(TIMEOUT_GRACE_MS).toBe(5_000);
41
- expect(MAX_EFFECTIVE_TIMEOUT_MS).toBe(600_000);
41
+ expect(MAX_EFFECTIVE_TIMEOUT_MS).toBe(900_000);
42
42
  expect(SESSION_TTL_MS).toBe(30 * 60 * 1000);
43
43
  expect(CLEANUP_INTERVAL_MS).toBe(5 * 60 * 1000);
44
44
  expect(SESSION_KILL_GRACE_MS).toBe(5_000);
@@ -47,7 +47,7 @@ describe("config.ts exports", () => {
47
47
 
48
48
  it("exports dynamic timeout scaling factors", () => {
49
49
  expect(TIMEOUT_PER_EXTRA_MSG_MS).toBe(2_000);
50
- expect(TIMEOUT_PER_TOOL_MS).toBe(5_000);
50
+ expect(TIMEOUT_PER_TOOL_MS).toBe(7_000);
51
51
  });
52
52
 
53
53
  it("exports message limits", () => {
@@ -64,8 +64,8 @@ describe("config.ts exports", () => {
64
64
  expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-opus-4-6"]).toBe(300_000);
65
65
  expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-sonnet-4-6"]).toBe(180_000);
66
66
  expect(DEFAULT_MODEL_TIMEOUTS["cli-claude/claude-haiku-4-5"]).toBe(90_000);
67
- expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-pro"]).toBe(180_000);
68
- expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-flash"]).toBe(90_000);
67
+ expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-pro"]).toBe(300_000);
68
+ expect(DEFAULT_MODEL_TIMEOUTS["cli-gemini/gemini-2.5-flash"]).toBe(180_000);
69
69
  expect(DEFAULT_MODEL_TIMEOUTS["openai-codex/gpt-5.4"]).toBe(300_000);
70
70
  });
71
71