@tractorscorch/clank 1.5.6 → 1.5.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/README.md +2 -2
- package/dist/index.js +36 -30
- package/dist/index.js.map +1 -1
- package/dist/web/index.html +27 -3
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -6,6 +6,25 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/).
|
|
|
6
6
|
|
|
7
7
|
---
|
|
8
8
|
|
|
9
|
+
## [1.5.8] — 2026-03-23
|
|
10
|
+
|
|
11
|
+
### Added
|
|
12
|
+
- **Collapsible thinking blocks** — model thinking/reasoning is now displayed in a separate clickable block above the response instead of being streamed into the message text. Click the "Thought" toggle to expand/collapse. Shows "Thinking..." while streaming, "Thought" when complete
|
|
13
|
+
|
|
14
|
+
### Fixed
|
|
15
|
+
- **Thinking events were disconnected** — the full thinking pipeline (provider → agent → gateway → frontend) was broken at 3 points: provider yielded thinking as text for local models, agent only emitted a one-shot start event without content, and gateway didn't forward thinking events to clients. All 3 fixed
|
|
16
|
+
- **Empty responses from thinking-only models** — when a model puts all output in `reasoning_content` with empty `content` (Qwen3.5), the thinking text is now used as the response instead of showing a blank message
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## [1.5.7] — 2026-03-23
|
|
21
|
+
|
|
22
|
+
### Fixed
|
|
23
|
+
- **Tools completely broken for llama.cpp / LM Studio / vLLM models** — the PromptFallbackProvider (which injects tools into the system prompt as text) was only applied to Ollama models. All other local providers (llama.cpp, LM Studio, vLLM) sent tools via the API's `tools` parameter, which most local models can't handle — so they just ignored tools entirely. Now ALL local models that aren't in the known tool-capable list automatically get prompt-based tool injection
|
|
24
|
+
- **Tool-capable model detection shared across providers** — moved the `TOOL_CAPABLE_PATTERNS` list from the Ollama provider to a shared `supportsNativeTools()` function in types.ts, used by the agent engine for any local provider
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
|
|
9
28
|
## [1.5.6] — 2026-03-23
|
|
10
29
|
|
|
11
30
|
### Fixed
|
package/README.md
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
</p>
|
|
10
10
|
|
|
11
11
|
<p align="center">
|
|
12
|
-
<a href="https://github.com/ItsTrag1c/Clank/releases/latest"><img src="https://img.shields.io/badge/version-1.5.
|
|
12
|
+
<a href="https://github.com/ItsTrag1c/Clank/releases/latest"><img src="https://img.shields.io/badge/version-1.5.8-blue.svg" alt="Version" /></a>
|
|
13
13
|
<a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License" /></a>
|
|
14
14
|
<a href="https://www.npmjs.com/package/@tractorscorch/clank"><img src="https://img.shields.io/npm/v/@tractorscorch/clank.svg" alt="npm" /></a>
|
|
15
15
|
<a href="https://github.com/ItsTrag1c/Clank/stargazers"><img src="https://img.shields.io/github/stars/ItsTrag1c/Clank.svg" alt="Stars" /></a>
|
|
@@ -75,7 +75,7 @@ That's it. Setup auto-detects your local models, configures the gateway, and get
|
|
|
75
75
|
| Platform | Download |
|
|
76
76
|
|----------|----------|
|
|
77
77
|
| **npm** (all platforms) | `npm install -g @tractorscorch/clank` |
|
|
78
|
-
| **macOS** (Apple Silicon) | [Clank_1.5.
|
|
78
|
+
| **macOS** (Apple Silicon) | [Clank_1.5.8_macos](https://github.com/ItsTrag1c/Clank/releases/latest/download/Clank_1.5.8_macos) |
|
|
79
79
|
|
|
80
80
|
## Features
|
|
81
81
|
|
package/dist/index.js
CHANGED
|
@@ -395,11 +395,27 @@ var init_auto_persist = __esm({
|
|
|
395
395
|
});
|
|
396
396
|
|
|
397
397
|
// src/providers/types.ts
|
|
398
|
-
|
|
398
|
+
function supportsNativeTools(model) {
|
|
399
|
+
const name = model.includes("/") ? model.split("/").pop() : model;
|
|
400
|
+
const baseName = name.split(":")[0];
|
|
401
|
+
return TOOL_CAPABLE_PATTERNS.some((p) => p.test(baseName));
|
|
402
|
+
}
|
|
403
|
+
var TOOL_CAPABLE_PATTERNS, BaseProvider;
|
|
399
404
|
var init_types = __esm({
|
|
400
405
|
"src/providers/types.ts"() {
|
|
401
406
|
"use strict";
|
|
402
407
|
init_esm_shims();
|
|
408
|
+
TOOL_CAPABLE_PATTERNS = [
|
|
409
|
+
/^llama3\.[1-9]/i,
|
|
410
|
+
/^llama-3\.[1-9]/i,
|
|
411
|
+
/^qwen[23]/i,
|
|
412
|
+
/^mistral-nemo/i,
|
|
413
|
+
/^mistral-large/i,
|
|
414
|
+
/^command-r/i,
|
|
415
|
+
/^firefunction/i,
|
|
416
|
+
/^hermes-[23]/i,
|
|
417
|
+
/^nemotron/i
|
|
418
|
+
];
|
|
403
419
|
BaseProvider = class {
|
|
404
420
|
/** Rough token estimate (~4 chars per token) */
|
|
405
421
|
estimateTokens(messages) {
|
|
@@ -411,23 +427,12 @@ var init_types = __esm({
|
|
|
411
427
|
});
|
|
412
428
|
|
|
413
429
|
// src/providers/ollama.ts
|
|
414
|
-
var
|
|
430
|
+
var contextWindowCache, OllamaProvider;
|
|
415
431
|
var init_ollama = __esm({
|
|
416
432
|
"src/providers/ollama.ts"() {
|
|
417
433
|
"use strict";
|
|
418
434
|
init_esm_shims();
|
|
419
435
|
init_types();
|
|
420
|
-
TOOL_CAPABLE_PATTERNS = [
|
|
421
|
-
/^llama3\.[1-9]/i,
|
|
422
|
-
/^llama-3\.[1-9]/i,
|
|
423
|
-
/^qwen[23]/i,
|
|
424
|
-
/^mistral-nemo/i,
|
|
425
|
-
/^mistral-large/i,
|
|
426
|
-
/^command-r/i,
|
|
427
|
-
/^firefunction/i,
|
|
428
|
-
/^hermes-[23]/i,
|
|
429
|
-
/^nemotron/i
|
|
430
|
-
];
|
|
431
436
|
contextWindowCache = /* @__PURE__ */ new Map();
|
|
432
437
|
OllamaProvider = class _OllamaProvider extends BaseProvider {
|
|
433
438
|
name = "ollama";
|
|
@@ -497,8 +502,7 @@ var init_ollama = __esm({
|
|
|
497
502
|
}
|
|
498
503
|
/** Check if a model supports native tool calling */
|
|
499
504
|
static supportsTools(model) {
|
|
500
|
-
|
|
501
|
-
return TOOL_CAPABLE_PATTERNS.some((p) => p.test(baseName));
|
|
505
|
+
return supportsNativeTools(model);
|
|
502
506
|
}
|
|
503
507
|
contextWindow() {
|
|
504
508
|
return contextWindowCache.get(this.model) ?? 32768;
|
|
@@ -821,6 +825,7 @@ var init_agent = __esm({
|
|
|
821
825
|
init_esm_shims();
|
|
822
826
|
init_context_engine();
|
|
823
827
|
init_auto_persist();
|
|
828
|
+
init_types();
|
|
824
829
|
init_ollama();
|
|
825
830
|
init_prompt_fallback();
|
|
826
831
|
MAX_ITERATIONS = 50;
|
|
@@ -902,11 +907,8 @@ var init_agent = __esm({
|
|
|
902
907
|
const provider = this.resolvedProvider.provider;
|
|
903
908
|
const isLocal = this.resolvedProvider.isLocal;
|
|
904
909
|
let activeProvider = provider;
|
|
905
|
-
if (isLocal &&
|
|
906
|
-
|
|
907
|
-
if (!OllamaProvider.supportsTools(modelName)) {
|
|
908
|
-
activeProvider = new PromptFallbackProvider(provider);
|
|
909
|
-
}
|
|
910
|
+
if (isLocal && !supportsNativeTools(this.identity.model.primary)) {
|
|
911
|
+
activeProvider = new PromptFallbackProvider(provider);
|
|
910
912
|
}
|
|
911
913
|
let fullResponse = "";
|
|
912
914
|
let iterationCount = 0;
|
|
@@ -932,6 +934,7 @@ var init_agent = __esm({
|
|
|
932
934
|
denylist: this.identity.tools?.deny
|
|
933
935
|
});
|
|
934
936
|
let iterationText = "";
|
|
937
|
+
let thinkingText = "";
|
|
935
938
|
const toolCalls = [];
|
|
936
939
|
let promptTokens = 0;
|
|
937
940
|
let outputTokens = 0;
|
|
@@ -940,6 +943,7 @@ var init_agent = __esm({
|
|
|
940
943
|
for (let attempt = 0; attempt < 2; attempt++) {
|
|
941
944
|
if (attempt > 0) {
|
|
942
945
|
iterationText = "";
|
|
946
|
+
thinkingText = "";
|
|
943
947
|
toolCalls.length = 0;
|
|
944
948
|
promptTokens = 0;
|
|
945
949
|
outputTokens = 0;
|
|
@@ -958,7 +962,8 @@ var init_agent = __esm({
|
|
|
958
962
|
this.emit("token", { content: event.content });
|
|
959
963
|
break;
|
|
960
964
|
case "thinking":
|
|
961
|
-
|
|
965
|
+
thinkingText += event.content;
|
|
966
|
+
this.emit("thinking", { content: event.content });
|
|
962
967
|
break;
|
|
963
968
|
case "tool_call":
|
|
964
969
|
toolCalls.push({
|
|
@@ -1003,6 +1008,10 @@ var init_agent = __esm({
|
|
|
1003
1008
|
contextPercent: Math.round(this.contextEngine.utilizationPercent())
|
|
1004
1009
|
});
|
|
1005
1010
|
if (toolCalls.length === 0) {
|
|
1011
|
+
if (!iterationText && thinkingText) {
|
|
1012
|
+
iterationText = thinkingText;
|
|
1013
|
+
this.emit("token", { content: iterationText });
|
|
1014
|
+
}
|
|
1006
1015
|
fullResponse = iterationText;
|
|
1007
1016
|
this.contextEngine.ingest({ role: "assistant", content: iterationText });
|
|
1008
1017
|
this.emit("response-end", { text: iterationText });
|
|
@@ -3110,11 +3119,7 @@ var init_openai = __esm({
|
|
|
3110
3119
|
}
|
|
3111
3120
|
if (choice?.delta?.reasoning_content) {
|
|
3112
3121
|
hasContent = true;
|
|
3113
|
-
|
|
3114
|
-
yield { type: "text", content: choice.delta.reasoning_content };
|
|
3115
|
-
} else {
|
|
3116
|
-
yield { type: "thinking", content: choice.delta.reasoning_content };
|
|
3117
|
-
}
|
|
3122
|
+
yield { type: "thinking", content: choice.delta.reasoning_content };
|
|
3118
3123
|
}
|
|
3119
3124
|
if (choice?.delta?.content) {
|
|
3120
3125
|
hasContent = true;
|
|
@@ -6240,7 +6245,7 @@ var init_server = __esm({
|
|
|
6240
6245
|
res.writeHead(200, { "Content-Type": "application/json" });
|
|
6241
6246
|
res.end(JSON.stringify({
|
|
6242
6247
|
status: "ok",
|
|
6243
|
-
version: "1.5.
|
|
6248
|
+
version: "1.5.8",
|
|
6244
6249
|
uptime: process.uptime(),
|
|
6245
6250
|
clients: this.clients.size,
|
|
6246
6251
|
agents: this.engines.size
|
|
@@ -6352,7 +6357,7 @@ var init_server = __esm({
|
|
|
6352
6357
|
const hello = {
|
|
6353
6358
|
type: "hello",
|
|
6354
6359
|
protocol: PROTOCOL_VERSION,
|
|
6355
|
-
version: "1.5.
|
|
6360
|
+
version: "1.5.8",
|
|
6356
6361
|
agents: this.config.agents.list.map((a) => ({
|
|
6357
6362
|
id: a.id,
|
|
6358
6363
|
name: a.name || a.id,
|
|
@@ -6617,6 +6622,7 @@ var init_server = __esm({
|
|
|
6617
6622
|
wireEngineEvents(engine, client) {
|
|
6618
6623
|
const eventMap = {
|
|
6619
6624
|
"token": "token",
|
|
6625
|
+
"thinking": "thinking",
|
|
6620
6626
|
"response-start": "response-start",
|
|
6621
6627
|
"response-end": "response-end",
|
|
6622
6628
|
"tool-start": "tool-start",
|
|
@@ -7747,7 +7753,7 @@ async function runTui(opts) {
|
|
|
7747
7753
|
ws.on("open", () => {
|
|
7748
7754
|
ws.send(JSON.stringify({
|
|
7749
7755
|
type: "connect",
|
|
7750
|
-
params: { auth: { token }, mode: "tui", version: "1.5.
|
|
7756
|
+
params: { auth: { token }, mode: "tui", version: "1.5.8" }
|
|
7751
7757
|
}));
|
|
7752
7758
|
});
|
|
7753
7759
|
ws.on("message", (data) => {
|
|
@@ -8176,7 +8182,7 @@ import { fileURLToPath as fileURLToPath5 } from "url";
|
|
|
8176
8182
|
import { dirname as dirname5, join as join19 } from "path";
|
|
8177
8183
|
var __filename3 = fileURLToPath5(import.meta.url);
|
|
8178
8184
|
var __dirname3 = dirname5(__filename3);
|
|
8179
|
-
var version = "1.5.
|
|
8185
|
+
var version = "1.5.8";
|
|
8180
8186
|
try {
|
|
8181
8187
|
const pkg = JSON.parse(readFileSync(join19(__dirname3, "..", "package.json"), "utf-8"));
|
|
8182
8188
|
version = pkg.version;
|