@elvatis_com/openclaw-cli-bridge-elvatis 3.1.1 → 3.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -1
- package/SKILL.md +1 -1
- package/openclaw.plugin.json +1 -1
- package/package.json +1 -1
- package/src/proxy-server.ts +9 -0
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
> OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
|
|
4
4
|
|
|
5
|
-
**Current version:** `3.1.
|
|
5
|
+
**Current version:** `3.1.2`
|
|
6
6
|
|
|
7
7
|
---
|
|
8
8
|
|
|
@@ -406,6 +406,10 @@ npm run ci # lint + typecheck + test
|
|
|
406
406
|
|
|
407
407
|
## Changelog
|
|
408
408
|
|
|
409
|
+
### v3.1.2
|
|
410
|
+
- **fix:** fallback models returning text instead of tool_calls in a tool loop now trigger the next model in the chain. Previously Haiku would say "Lass mich das starten:" as text but never call a tool — conversation died.
|
|
411
|
+
- **feat:** `[FALLBACK-NO-TOOLS]` debug log category for tool-format violations
|
|
412
|
+
|
|
409
413
|
### v3.1.1
|
|
410
414
|
- **fix:** empty-response detection — models returning zero content now trigger the next fallback instead of silently stopping the chain. Previously Haiku would return empty (0 bytes) and the bridge treated it as success, leaving the user with no response.
|
|
411
415
|
- **feat:** `[EMPTY]` and `[FALLBACK-EMPTY]` debug log categories for diagnosing empty model responses
|
package/SKILL.md
CHANGED
package/openclaw.plugin.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"id": "openclaw-cli-bridge-elvatis",
|
|
3
3
|
"slug": "openclaw-cli-bridge-elvatis",
|
|
4
4
|
"name": "OpenClaw CLI Bridge",
|
|
5
|
-
"version": "3.1.
|
|
5
|
+
"version": "3.1.2",
|
|
6
6
|
"license": "MIT",
|
|
7
7
|
"description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
|
|
8
8
|
"providers": [
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@elvatis_com/openclaw-cli-bridge-elvatis",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.2",
|
|
4
4
|
"description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"openclaw": {
|
package/src/proxy-server.ts
CHANGED
|
@@ -949,6 +949,15 @@ async function handleRequest(
|
|
|
949
949
|
debugLog("FALLBACK-EMPTY", `${fallbackModel} returned empty`, {});
|
|
950
950
|
throw new Error(`empty response from ${fallbackModel}`);
|
|
951
951
|
}
|
|
952
|
+
// If tools were requested and the last message was a tool result (gateway expects
|
|
953
|
+
// tool continuation), but the fallback model returned text instead of tool_calls —
|
|
954
|
+
// it ignored the JSON format. Try next model in chain.
|
|
955
|
+
const lastMsg = cleanMessages[cleanMessages.length - 1];
|
|
956
|
+
const inToolLoop = lastMsg?.role === "tool" || lastMsg?.role === "function";
|
|
957
|
+
if (hasTools && inToolLoop && !result.tool_calls?.length && result.content) {
|
|
958
|
+
debugLog("FALLBACK-NO-TOOLS", `${fallbackModel} returned text instead of tool_calls in tool loop`, { contentLen: result.content.length, preview: result.content.slice(0, 80) });
|
|
959
|
+
throw new Error(`${fallbackModel} returned text instead of tool_calls`);
|
|
960
|
+
}
|
|
952
961
|
const fbCompTokens = estimateTokens(result.content ?? "");
|
|
953
962
|
metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, true, estPromptTokens, fbCompTokens, promptPreview);
|
|
954
963
|
metrics.recordFallback(model, fallbackModel, isTimeout ? "timeout" : "error", primaryDuration, true);
|