@tractorscorch/clank 1.5.2 → 1.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/README.md +2 -2
- package/dist/index.js +86 -25
- package/dist/index.js.map +1 -1
- package/dist/web/index.html +3 -3
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -6,6 +6,24 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/).
|
|
|
6
6
|
|
|
7
7
|
---
|
|
8
8
|
|
|
9
|
+
## [1.5.4] — 2026-03-23
|
|
10
|
+
|
|
11
|
+
### Fixed
|
|
12
|
+
- **Streaming dies mid-answer with local models** — added per-chunk idle timeout (60s) that detects when the model hangs between chunks (GPU OOM, Ollama crash). Previously the only timeout was the 5-minute overall timer, which couldn't detect mid-stream stalls
|
|
13
|
+
- **Incomplete responses treated as complete** — when a stream ends without the `[DONE]` marker (connection drop, model crash), the response is no longer silently accepted. The provider now throws an error so the agent retries instead of showing a half-finished answer
|
|
14
|
+
- **Agent retry on stream failure** — the retry loop now resets partial state on retry and recognizes stream drops/empty responses as retryable errors, automatically attempting once more before giving up
|
|
15
|
+
- **XSS in web dashboard** — 3 places where server data (`role`, `a.status`, `j.lastStatus`) was rendered as raw HTML without escaping (CodeQL CWE-79)
|
|
16
|
+
- **Incomplete glob sanitization in search-files** — `.replace("*", "")` only stripped the first `*`; changed to `.replaceAll()` (CodeQL CWE-116)
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## [1.5.3] — 2026-03-23
|
|
21
|
+
|
|
22
|
+
### Fixed
|
|
23
|
+
- **Local thinking models return empty responses** — Qwen3.5 puts all output in `reasoning_content` with empty `content`, and `enable_thinking:false` doesn't work (chat template overrides it). Now treats `reasoning_content` as text for local models so the user actually sees a response
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
9
27
|
## [1.5.2] — 2026-03-23
|
|
10
28
|
|
|
11
29
|
### Fixed
|
package/README.md
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
</p>
|
|
10
10
|
|
|
11
11
|
<p align="center">
|
|
12
|
-
<a href="https://github.com/ItsTrag1c/Clank/releases/latest"><img src="https://img.shields.io/badge/version-1.5.
|
|
12
|
+
<a href="https://github.com/ItsTrag1c/Clank/releases/latest"><img src="https://img.shields.io/badge/version-1.5.4-blue.svg" alt="Version" /></a>
|
|
13
13
|
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License" /></a>
|
|
14
14
|
<a href="https://www.npmjs.com/package/@tractorscorch/clank"><img src="https://img.shields.io/npm/v/@tractorscorch/clank.svg" alt="npm" /></a>
|
|
15
15
|
<a href="https://github.com/ItsTrag1c/Clank/stargazers"><img src="https://img.shields.io/github/stars/ItsTrag1c/Clank.svg" alt="Stars" /></a>
|
|
@@ -75,7 +75,7 @@ That's it. Setup auto-detects your local models, configures the gateway, and get
|
|
|
75
75
|
| Platform | Download |
|
|
76
76
|
|----------|----------|
|
|
77
77
|
| **npm** (all platforms) | `npm install -g @tractorscorch/clank` |
|
|
78
|
-
| **macOS** (Apple Silicon) | [Clank_1.5.
|
|
78
|
+
| **macOS** (Apple Silicon) | [Clank_1.5.4_macos](https://github.com/ItsTrag1c/Clank/releases/latest/download/Clank_1.5.4_macos) |
|
|
79
79
|
|
|
80
80
|
## Features
|
|
81
81
|
|
package/dist/index.js
CHANGED
|
@@ -579,10 +579,20 @@ var init_ollama = __esm({
|
|
|
579
579
|
const reader = res.body.getReader();
|
|
580
580
|
const decoder = new TextDecoder();
|
|
581
581
|
let buffer = "";
|
|
582
|
+
let receivedDone = false;
|
|
583
|
+
let lastFinishReason = null;
|
|
584
|
+
let hasContent = false;
|
|
582
585
|
const toolCalls = /* @__PURE__ */ new Map();
|
|
586
|
+
const CHUNK_IDLE_TIMEOUT = 6e4;
|
|
583
587
|
try {
|
|
584
588
|
while (true) {
|
|
585
|
-
const
|
|
589
|
+
const idleTimeout = new Promise(
|
|
590
|
+
(_, reject) => setTimeout(() => reject(new Error("Model stopped responding (no data for 60s)")), CHUNK_IDLE_TIMEOUT)
|
|
591
|
+
);
|
|
592
|
+
const { done, value } = await Promise.race([
|
|
593
|
+
reader.read(),
|
|
594
|
+
idleTimeout
|
|
595
|
+
]);
|
|
586
596
|
if (done) break;
|
|
587
597
|
buffer += decoder.decode(value, { stream: true });
|
|
588
598
|
const lines = buffer.split("\n");
|
|
@@ -592,6 +602,7 @@ var init_ollama = __esm({
|
|
|
592
602
|
if (!trimmed || !trimmed.startsWith("data: ")) continue;
|
|
593
603
|
const data = trimmed.slice(6);
|
|
594
604
|
if (data === "[DONE]") {
|
|
605
|
+
receivedDone = true;
|
|
595
606
|
for (const tc of toolCalls.values()) {
|
|
596
607
|
let parsedArgs = {};
|
|
597
608
|
try {
|
|
@@ -608,10 +619,15 @@ var init_ollama = __esm({
|
|
|
608
619
|
const chunk = JSON.parse(data);
|
|
609
620
|
const choice = chunk.choices?.[0];
|
|
610
621
|
if (!choice) continue;
|
|
622
|
+
if (choice.finish_reason) {
|
|
623
|
+
lastFinishReason = choice.finish_reason;
|
|
624
|
+
}
|
|
611
625
|
if (choice.delta?.content) {
|
|
626
|
+
hasContent = true;
|
|
612
627
|
yield { type: "text", content: choice.delta.content };
|
|
613
628
|
}
|
|
614
629
|
if (choice.delta?.tool_calls) {
|
|
630
|
+
hasContent = true;
|
|
615
631
|
for (const tc of choice.delta.tool_calls) {
|
|
616
632
|
const existing = toolCalls.get(tc.index);
|
|
617
633
|
if (existing) {
|
|
@@ -641,14 +657,22 @@ var init_ollama = __esm({
|
|
|
641
657
|
} finally {
|
|
642
658
|
reader.releaseLock();
|
|
643
659
|
}
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
660
|
+
if (!receivedDone && hasContent) {
|
|
661
|
+
for (const tc of toolCalls.values()) {
|
|
662
|
+
let parsedArgs = {};
|
|
663
|
+
try {
|
|
664
|
+
parsedArgs = JSON.parse(tc.arguments);
|
|
665
|
+
} catch {
|
|
666
|
+
parsedArgs = {};
|
|
667
|
+
}
|
|
668
|
+
yield { type: "tool_call", id: tc.id, name: tc.name, arguments: parsedArgs };
|
|
650
669
|
}
|
|
651
|
-
|
|
670
|
+
throw new Error(
|
|
671
|
+
lastFinishReason === "length" ? "Model response truncated (hit token limit)" : "Model stream ended unexpectedly (connection dropped)"
|
|
672
|
+
);
|
|
673
|
+
}
|
|
674
|
+
if (!receivedDone) {
|
|
675
|
+
throw new Error("Model returned empty response");
|
|
652
676
|
}
|
|
653
677
|
yield { type: "done" };
|
|
654
678
|
}
|
|
@@ -914,6 +938,12 @@ var init_agent = __esm({
|
|
|
914
938
|
let streamSuccess = false;
|
|
915
939
|
this.emit("response-start");
|
|
916
940
|
for (let attempt = 0; attempt < 2; attempt++) {
|
|
941
|
+
if (attempt > 0) {
|
|
942
|
+
iterationText = "";
|
|
943
|
+
toolCalls.length = 0;
|
|
944
|
+
promptTokens = 0;
|
|
945
|
+
outputTokens = 0;
|
|
946
|
+
}
|
|
917
947
|
try {
|
|
918
948
|
const streamIterator = activeProvider.stream(
|
|
919
949
|
this.contextEngine.getMessages(),
|
|
@@ -948,10 +978,12 @@ var init_agent = __esm({
|
|
|
948
978
|
streamSuccess = true;
|
|
949
979
|
break;
|
|
950
980
|
} catch (streamErr) {
|
|
951
|
-
const
|
|
952
|
-
|
|
981
|
+
const errMsg = streamErr instanceof Error ? streamErr.message : "unknown";
|
|
982
|
+
const isTimeout = streamErr instanceof Error && (streamErr.name === "TimeoutError" || streamErr.name === "AbortError" || errMsg.includes("timed out"));
|
|
983
|
+
const isRetryable = !isTimeout && !signal.aborted && (errMsg.includes("connection dropped") || errMsg.includes("stopped responding") || errMsg.includes("empty response") || errMsg.includes("fetch failed") || errMsg.includes("ECONNREFUSED") || errMsg.includes("ECONNRESET"));
|
|
984
|
+
if (attempt === 0 && (isRetryable || !signal.aborted && !isTimeout)) {
|
|
953
985
|
this.emit("error", {
|
|
954
|
-
message: `Model
|
|
986
|
+
message: `Model stream failed, retrying... (${errMsg})`,
|
|
955
987
|
recoverable: true
|
|
956
988
|
});
|
|
957
989
|
await new Promise((r) => setTimeout(r, 2e3));
|
|
@@ -1700,7 +1732,7 @@ var init_search_files = __esm({
|
|
|
1700
1732
|
await searchDir(full);
|
|
1701
1733
|
} else if (s.isFile() && s.size < 1024 * 1024) {
|
|
1702
1734
|
if (globFilter) {
|
|
1703
|
-
const ext = globFilter.
|
|
1735
|
+
const ext = globFilter.replaceAll("*", "");
|
|
1704
1736
|
if (!entry.endsWith(ext)) continue;
|
|
1705
1737
|
}
|
|
1706
1738
|
try {
|
|
@@ -3021,10 +3053,20 @@ var init_openai = __esm({
|
|
|
3021
3053
|
const reader = res.body.getReader();
|
|
3022
3054
|
const decoder = new TextDecoder();
|
|
3023
3055
|
let buffer = "";
|
|
3056
|
+
let receivedDone = false;
|
|
3057
|
+
let lastFinishReason = null;
|
|
3058
|
+
let hasContent = false;
|
|
3024
3059
|
const toolCalls = /* @__PURE__ */ new Map();
|
|
3060
|
+
const CHUNK_IDLE_TIMEOUT = this.isLocal ? 6e4 : 3e4;
|
|
3025
3061
|
try {
|
|
3026
3062
|
while (true) {
|
|
3027
|
-
const
|
|
3063
|
+
const idleTimeout = new Promise(
|
|
3064
|
+
(_, reject) => setTimeout(() => reject(new Error("Model stopped responding (no data received)")), CHUNK_IDLE_TIMEOUT)
|
|
3065
|
+
);
|
|
3066
|
+
const { done, value } = await Promise.race([
|
|
3067
|
+
reader.read(),
|
|
3068
|
+
idleTimeout
|
|
3069
|
+
]);
|
|
3028
3070
|
if (done) break;
|
|
3029
3071
|
buffer += decoder.decode(value, { stream: true });
|
|
3030
3072
|
const lines = buffer.split("\n");
|
|
@@ -3034,6 +3076,7 @@ var init_openai = __esm({
|
|
|
3034
3076
|
if (!trimmed.startsWith("data: ")) continue;
|
|
3035
3077
|
const data = trimmed.slice(6);
|
|
3036
3078
|
if (data === "[DONE]") {
|
|
3079
|
+
receivedDone = true;
|
|
3037
3080
|
for (const tc of toolCalls.values()) {
|
|
3038
3081
|
let parsedArgs = {};
|
|
3039
3082
|
try {
|
|
@@ -3049,13 +3092,23 @@ var init_openai = __esm({
|
|
|
3049
3092
|
try {
|
|
3050
3093
|
const chunk = JSON.parse(data);
|
|
3051
3094
|
const choice = chunk.choices?.[0];
|
|
3095
|
+
if (choice?.finish_reason) {
|
|
3096
|
+
lastFinishReason = choice.finish_reason;
|
|
3097
|
+
}
|
|
3052
3098
|
if (choice?.delta?.reasoning_content) {
|
|
3053
|
-
|
|
3099
|
+
hasContent = true;
|
|
3100
|
+
if (this.isLocal) {
|
|
3101
|
+
yield { type: "text", content: choice.delta.reasoning_content };
|
|
3102
|
+
} else {
|
|
3103
|
+
yield { type: "thinking", content: choice.delta.reasoning_content };
|
|
3104
|
+
}
|
|
3054
3105
|
}
|
|
3055
3106
|
if (choice?.delta?.content) {
|
|
3107
|
+
hasContent = true;
|
|
3056
3108
|
yield { type: "text", content: choice.delta.content };
|
|
3057
3109
|
}
|
|
3058
3110
|
if (choice?.delta?.tool_calls) {
|
|
3111
|
+
hasContent = true;
|
|
3059
3112
|
for (const tc of choice.delta.tool_calls) {
|
|
3060
3113
|
const existing = toolCalls.get(tc.index);
|
|
3061
3114
|
if (existing) {
|
|
@@ -3083,14 +3136,22 @@ var init_openai = __esm({
|
|
|
3083
3136
|
} finally {
|
|
3084
3137
|
reader.releaseLock();
|
|
3085
3138
|
}
|
|
3086
|
-
|
|
3087
|
-
|
|
3088
|
-
|
|
3089
|
-
|
|
3090
|
-
|
|
3091
|
-
|
|
3139
|
+
if (!receivedDone && hasContent) {
|
|
3140
|
+
for (const tc of toolCalls.values()) {
|
|
3141
|
+
let parsedArgs = {};
|
|
3142
|
+
try {
|
|
3143
|
+
parsedArgs = JSON.parse(tc.arguments);
|
|
3144
|
+
} catch {
|
|
3145
|
+
parsedArgs = {};
|
|
3146
|
+
}
|
|
3147
|
+
yield { type: "tool_call", id: tc.id, name: tc.name, arguments: parsedArgs };
|
|
3092
3148
|
}
|
|
3093
|
-
|
|
3149
|
+
throw new Error(
|
|
3150
|
+
lastFinishReason === "length" ? "Model response truncated (hit token limit)" : "Model stream ended unexpectedly (connection dropped)"
|
|
3151
|
+
);
|
|
3152
|
+
}
|
|
3153
|
+
if (!receivedDone) {
|
|
3154
|
+
throw new Error("Model returned empty response");
|
|
3094
3155
|
}
|
|
3095
3156
|
yield { type: "done" };
|
|
3096
3157
|
}
|
|
@@ -6166,7 +6227,7 @@ var init_server = __esm({
|
|
|
6166
6227
|
res.writeHead(200, { "Content-Type": "application/json" });
|
|
6167
6228
|
res.end(JSON.stringify({
|
|
6168
6229
|
status: "ok",
|
|
6169
|
-
version: "1.5.
|
|
6230
|
+
version: "1.5.4",
|
|
6170
6231
|
uptime: process.uptime(),
|
|
6171
6232
|
clients: this.clients.size,
|
|
6172
6233
|
agents: this.engines.size
|
|
@@ -6278,7 +6339,7 @@ var init_server = __esm({
|
|
|
6278
6339
|
const hello = {
|
|
6279
6340
|
type: "hello",
|
|
6280
6341
|
protocol: PROTOCOL_VERSION,
|
|
6281
|
-
version: "1.5.
|
|
6342
|
+
version: "1.5.4",
|
|
6282
6343
|
agents: this.config.agents.list.map((a) => ({
|
|
6283
6344
|
id: a.id,
|
|
6284
6345
|
name: a.name || a.id,
|
|
@@ -7673,7 +7734,7 @@ async function runTui(opts) {
|
|
|
7673
7734
|
ws.on("open", () => {
|
|
7674
7735
|
ws.send(JSON.stringify({
|
|
7675
7736
|
type: "connect",
|
|
7676
|
-
params: { auth: { token }, mode: "tui", version: "1.5.
|
|
7737
|
+
params: { auth: { token }, mode: "tui", version: "1.5.4" }
|
|
7677
7738
|
}));
|
|
7678
7739
|
});
|
|
7679
7740
|
ws.on("message", (data) => {
|
|
@@ -8102,7 +8163,7 @@ import { fileURLToPath as fileURLToPath5 } from "url";
|
|
|
8102
8163
|
import { dirname as dirname5, join as join19 } from "path";
|
|
8103
8164
|
var __filename3 = fileURLToPath5(import.meta.url);
|
|
8104
8165
|
var __dirname3 = dirname5(__filename3);
|
|
8105
|
-
var version = "1.5.
|
|
8166
|
+
var version = "1.5.4";
|
|
8106
8167
|
try {
|
|
8107
8168
|
const pkg = JSON.parse(readFileSync(join19(__dirname3, "..", "package.json"), "utf-8"));
|
|
8108
8169
|
version = pkg.version;
|