@elvatis_com/openclaw-cli-bridge-elvatis 3.1.0 → 3.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -1
- package/SKILL.md +1 -1
- package/openclaw.plugin.json +1 -1
- package/package.json +1 -1
- package/src/proxy-server.ts +20 -0
package/README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
> OpenClaw plugin that bridges locally installed AI CLIs (Codex, Gemini, Claude Code, OpenCode, Pi) as model providers — with slash commands for instant model switching, restore, health testing, and model listing.
|
|
4
4
|
|
|
5
|
-
**Current version:** `3.1.
|
|
5
|
+
**Current version:** `3.1.2`
|
|
6
6
|
|
|
7
7
|
---
|
|
8
8
|
|
|
@@ -406,6 +406,14 @@ npm run ci # lint + typecheck + test
|
|
|
406
406
|
|
|
407
407
|
## Changelog
|
|
408
408
|
|
|
409
|
+
### v3.1.2
|
|
410
|
+
- **fix:** fallback models returning text instead of tool_calls in a tool loop now trigger the next model in the chain. Previously Haiku would say "Lass mich das starten:" as text but never call a tool — conversation died.
|
|
411
|
+
- **feat:** `[FALLBACK-NO-TOOLS]` debug log category for tool-format violations
|
|
412
|
+
|
|
413
|
+
### v3.1.1
|
|
414
|
+
- **fix:** empty-response detection — models returning zero content now trigger the next fallback instead of silently stopping the chain. Previously Haiku would return empty (0 bytes) and the bridge treated it as success, leaving the user with no response.
|
|
415
|
+
- **feat:** `[EMPTY]` and `[FALLBACK-EMPTY]` debug log categories for diagnosing empty model responses
|
|
416
|
+
|
|
409
417
|
### v3.1.0
|
|
410
418
|
- **feat:** cross-provider fallback chains — Sonnet → Haiku → Gemini Flash → Codex (was single-model fallback only)
|
|
411
419
|
- **feat:** fallback chain loop — tries each model in order until one succeeds, logs each attempt
|
package/SKILL.md
CHANGED
package/openclaw.plugin.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"id": "openclaw-cli-bridge-elvatis",
|
|
3
3
|
"slug": "openclaw-cli-bridge-elvatis",
|
|
4
4
|
"name": "OpenClaw CLI Bridge",
|
|
5
|
-
"version": "3.1.
|
|
5
|
+
"version": "3.1.2",
|
|
6
6
|
"license": "MIT",
|
|
7
7
|
"description": "Phase 1: openai-codex auth bridge. Phase 2: local HTTP proxy routing model calls through gemini/claude CLIs (vllm provider).",
|
|
8
8
|
"providers": [
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@elvatis_com/openclaw-cli-bridge-elvatis",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.2",
|
|
4
4
|
"description": "Bridges gemini, claude, and codex CLI tools as OpenClaw model providers. Reads existing CLI auth without re-login.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"openclaw": {
|
package/src/proxy-server.ts
CHANGED
|
@@ -905,6 +905,12 @@ async function handleRequest(
|
|
|
905
905
|
try {
|
|
906
906
|
result = await routeToCliRunner(usedModel, cleanMessages, effectiveTimeout, routeOpts);
|
|
907
907
|
const latencyMs = Date.now() - cliStart;
|
|
908
|
+
const hasContent = !!(result.content?.trim()) || !!(result.tool_calls?.length);
|
|
909
|
+
// Empty response = model returned nothing useful. Treat as error to trigger fallback.
|
|
910
|
+
if (!hasContent) {
|
|
911
|
+
debugLog("EMPTY", `${usedModel} returned empty after ${(latencyMs / 1000).toFixed(1)}s`, {});
|
|
912
|
+
throw new Error(`empty response: ${usedModel} returned no content and no tool_calls`);
|
|
913
|
+
}
|
|
908
914
|
const estCompletionTokens = estimateTokens(result.content ?? "");
|
|
909
915
|
metrics.recordRequest(usedModel, latencyMs, true, estPromptTokens, estCompletionTokens, promptPreview);
|
|
910
916
|
providerSessions.recordRun(session.id, false);
|
|
@@ -938,6 +944,20 @@ async function handleRequest(
|
|
|
938
944
|
const fallbackStart = Date.now();
|
|
939
945
|
try {
|
|
940
946
|
result = await routeToCliRunner(fallbackModel, cleanMessages, effectiveTimeout, routeOpts);
|
|
947
|
+
const fbHasContent = !!(result.content?.trim()) || !!(result.tool_calls?.length);
|
|
948
|
+
if (!fbHasContent) {
|
|
949
|
+
debugLog("FALLBACK-EMPTY", `${fallbackModel} returned empty`, {});
|
|
950
|
+
throw new Error(`empty response from ${fallbackModel}`);
|
|
951
|
+
}
|
|
952
|
+
// If tools were requested and the last message was a tool result (gateway expects
|
|
953
|
+
// tool continuation), but the fallback model returned text instead of tool_calls —
|
|
954
|
+
// it ignored the JSON format. Try next model in chain.
|
|
955
|
+
const lastMsg = cleanMessages[cleanMessages.length - 1];
|
|
956
|
+
const inToolLoop = lastMsg?.role === "tool" || lastMsg?.role === "function";
|
|
957
|
+
if (hasTools && inToolLoop && !result.tool_calls?.length && result.content) {
|
|
958
|
+
debugLog("FALLBACK-NO-TOOLS", `${fallbackModel} returned text instead of tool_calls in tool loop`, { contentLen: result.content.length, preview: result.content.slice(0, 80) });
|
|
959
|
+
throw new Error(`${fallbackModel} returned text instead of tool_calls`);
|
|
960
|
+
}
|
|
941
961
|
const fbCompTokens = estimateTokens(result.content ?? "");
|
|
942
962
|
metrics.recordRequest(fallbackModel, Date.now() - fallbackStart, true, estPromptTokens, fbCompTokens, promptPreview);
|
|
943
963
|
metrics.recordFallback(model, fallbackModel, isTimeout ? "timeout" : "error", primaryDuration, true);
|