@tractorscorch/clank 1.5.1 → 1.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -6,6 +6,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/).
6
6
 
7
7
  ---
8
8
 
9
+ ## [1.5.2] — 2026-03-23
10
+
11
+ ### Fixed
12
+ - **Thinking models (Qwen3.5) exhaust tokens on reasoning** — the model generates `<think>` reasoning tokens that eat the entire context window, leaving nothing for actual content. Added default `max_tokens: 4096` for local models and `reasoning_effort: "low"` to reduce thinking overhead
13
+ - **Telegram shows nothing during model thinking** — added periodic "typing" indicator every 4 seconds so the bot doesn't appear dead while the model processes internally
14
+ - **Root cause found via direct API testing** — Qwen3.5-35B returns empty `content` with all output in `reasoning_content`; without a max_tokens cap, the model spends all its budget on thinking
15
+
16
+ ---
17
+
9
18
  ## [1.5.1] — 2026-03-23
10
19
 
11
20
  ### Fixed
package/README.md CHANGED
@@ -9,7 +9,7 @@
9
9
  </p>
10
10
 
11
11
  <p align="center">
12
- <a href="https://github.com/ItsTrag1c/Clank/releases/latest"><img src="https://img.shields.io/badge/version-1.5.1-blue.svg" alt="Version" /></a>
12
+ <a href="https://github.com/ItsTrag1c/Clank/releases/latest"><img src="https://img.shields.io/badge/version-1.5.2-blue.svg" alt="Version" /></a>
13
13
  <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License" /></a>
14
14
  <a href="https://www.npmjs.com/package/@tractorscorch/clank"><img src="https://img.shields.io/npm/v/@tractorscorch/clank.svg" alt="npm" /></a>
15
15
  <a href="https://github.com/ItsTrag1c/Clank/stargazers"><img src="https://img.shields.io/github/stars/ItsTrag1c/Clank.svg" alt="Stars" /></a>
@@ -75,7 +75,7 @@ That's it. Setup auto-detects your local models, configures the gateway, and get
75
75
  | Platform | Download |
76
76
  |----------|----------|
77
77
  | **npm** (all platforms) | `npm install -g @tractorscorch/clank` |
78
- | **macOS** (Apple Silicon) | [Clank_1.5.1_macos](https://github.com/ItsTrag1c/Clank/releases/latest/download/Clank_1.5.1_macos) |
78
+ | **macOS** (Apple Silicon) | [Clank_1.5.2_macos](https://github.com/ItsTrag1c/Clank/releases/latest/download/Clank_1.5.2_macos) |
79
79
 
80
80
  ## Features
81
81
 
package/dist/index.js CHANGED
@@ -2987,11 +2987,16 @@ var init_openai = __esm({
2987
2987
  stream: true,
2988
2988
  stream_options: { include_usage: true }
2989
2989
  };
2990
+ if (this.isLocal) {
2991
+ body.reasoning_effort = "low";
2992
+ }
2990
2993
  if (tools.length > 0) {
2991
2994
  body.tools = this.formatTools(tools);
2992
2995
  }
2993
2996
  if (this.maxResponseTokens) {
2994
2997
  body.max_tokens = this.maxResponseTokens;
2998
+ } else if (this.isLocal) {
2999
+ body.max_tokens = 4096;
2995
3000
  }
2996
3001
  const headers = {
2997
3002
  "Content-Type": "application/json"
@@ -5298,6 +5303,10 @@ var init_telegram = __esm({
5298
5303
  try {
5299
5304
  console.log(` Telegram: processing message from ${userId} in ${chatId}`);
5300
5305
  await ctx.api.sendChatAction(chatId, "typing");
5306
+ const typingInterval2 = setInterval(() => {
5307
+ bot.api.sendChatAction(chatId, "typing").catch(() => {
5308
+ });
5309
+ }, 4e3);
5301
5310
  let streamMsgId = null;
5302
5311
  let sendingInitial = false;
5303
5312
  let accumulated = "";
@@ -5366,8 +5375,10 @@ var init_telegram = __esm({
5366
5375
  await ctx.api.sendMessage(chatId, chunk);
5367
5376
  }
5368
5377
  }
5378
+ clearInterval(typingInterval2);
5369
5379
  console.log(` Telegram: response complete (${response?.length || 0} chars)`);
5370
5380
  } catch (err) {
5381
+ clearInterval(typingInterval);
5371
5382
  const errMsg = err instanceof Error ? err.message : String(err);
5372
5383
  console.error(` Telegram: message handler error \u2014 ${errMsg}`);
5373
5384
  await ctx.api.sendMessage(chatId, `Error: ${errMsg.slice(0, 200)}`).catch(() => {
@@ -6155,7 +6166,7 @@ var init_server = __esm({
6155
6166
  res.writeHead(200, { "Content-Type": "application/json" });
6156
6167
  res.end(JSON.stringify({
6157
6168
  status: "ok",
6158
- version: "1.5.1",
6169
+ version: "1.5.2",
6159
6170
  uptime: process.uptime(),
6160
6171
  clients: this.clients.size,
6161
6172
  agents: this.engines.size
@@ -6267,7 +6278,7 @@ var init_server = __esm({
6267
6278
  const hello = {
6268
6279
  type: "hello",
6269
6280
  protocol: PROTOCOL_VERSION,
6270
- version: "1.5.1",
6281
+ version: "1.5.2",
6271
6282
  agents: this.config.agents.list.map((a) => ({
6272
6283
  id: a.id,
6273
6284
  name: a.name || a.id,
@@ -7662,7 +7673,7 @@ async function runTui(opts) {
7662
7673
  ws.on("open", () => {
7663
7674
  ws.send(JSON.stringify({
7664
7675
  type: "connect",
7665
- params: { auth: { token }, mode: "tui", version: "1.5.1" }
7676
+ params: { auth: { token }, mode: "tui", version: "1.5.2" }
7666
7677
  }));
7667
7678
  });
7668
7679
  ws.on("message", (data) => {
@@ -8091,7 +8102,7 @@ import { fileURLToPath as fileURLToPath5 } from "url";
8091
8102
  import { dirname as dirname5, join as join19 } from "path";
8092
8103
  var __filename3 = fileURLToPath5(import.meta.url);
8093
8104
  var __dirname3 = dirname5(__filename3);
8094
- var version = "1.5.1";
8105
+ var version = "1.5.2";
8095
8106
  try {
8096
8107
  const pkg = JSON.parse(readFileSync(join19(__dirname3, "..", "package.json"), "utf-8"));
8097
8108
  version = pkg.version;