@vtstech/pi-status 1.1.2 → 1.1.4-dev

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +27 -23
  2. package/package.json +8 -4
  3. package/status.js +130 -189
package/README.md CHANGED
@@ -1,8 +1,8 @@
1
1
  # @vtstech/pi-status
2
2
 
3
- System monitor / status bar extension for the [Pi Coding Agent](https://github.com/badlogic/pi-mono).
3
+ System monitor extension for the [Pi Coding Agent](https://github.com/badlogic/pi-mono).
4
4
 
5
- Replaces the Pi footer with a unified status bar showing system metrics, model info, and generation params.
5
+ Adds composable named status items to the framework footer using `ctx.ui.setStatus()`. Each metric gets its own named slot so it coexists cleanly with other extensions' status items.
6
6
 
7
7
  ## Install
8
8
 
@@ -12,35 +12,39 @@ pi install "npm:@vtstech/pi-status"
12
12
 
13
13
  ## How It Works
14
14
 
15
- Automatically loaded — no commands needed. Displays a 2-line status bar at the bottom of the Pi interface.
15
+ Automatically loaded — no commands needed. Slots are rendered in the framework footer alongside framework items (model name, session tokens, context usage). All labels use dimmed coloring; all values use green highlighting.
16
16
 
17
- **Line 1 (conf):**
17
+ CPU/RAM/Swap are only shown when using a local Ollama provider (not for cloud/remote). For cloud providers, system metrics are omitted.
18
+
19
+ **Example (local Ollama):**
18
20
  ```
19
- qwen3.5:0.8b · ~/.pi/agent · medium · CPU 9%
21
+ CtxMax:41k RespMax:16.4k Resp 2m3s CPU 12% RAM 2.2G/15.1G Prompt: 2840 chr 393 tok pi:0.66.1
20
22
  ```
21
23
 
22
- **Line 2 (load):**
24
+ **Example (cloud provider):**
23
25
  ```
24
- qwen3.5:0.8b · M:33k · S:9.0%/128k · RAM 2.2G/15.1G · Resp 5m24s · temp:0.0 · max:16384
26
+ CtxMax:128k RespMax:16.4k Resp 1m22s Prompt: 2840 chr 393 tok pi:0.66.1
25
27
  ```
26
28
 
27
- CPU/RAM/Swap are only shown when using a local Ollama provider (not for cloud/remote).
29
+ ## Status Slots
30
+
31
+ Slots are updated every 5 seconds (1 second for active tool timing). Render order is deterministic — all slots are managed through `flushStatus()`.
28
32
 
29
- ## What's Displayed
33
+ | Slot | Description | Condition |
34
+ |------|-------------|-----------|
35
+ | **CtxMax** | Native model context window from Ollama `/api/show` (k-notation) | Local or remote Ollama |
36
+ | **RespMax** | Max response/completion tokens with k-notation (e.g., `16k`) | After first provider request |
37
+ | **Resp** | Agent loop duration (e.g., `2m3s`) | After first agent cycle |
38
+ | **CPU%** | Per-core CPU usage delta | Local Ollama only |
39
+ | **RAM** | Used/total system memory | Local Ollama only |
40
+ | **Swap** | Used/total swap space | Local only, when active |
41
+ | **Generation params** | Temperature, top_p, top_k, num_predict, context size, reasoning_effort (dimmed) | After first provider request |
42
+ | **SEC** | Session-scoped blocked tool count + 3s flash on block event | When blocks occur |
43
+ | **Active tool** | Live elapsed timer with `>` indicator | While a tool is running |
44
+ | **Prompt** | System prompt size as `chars chr tokens tok` | After first agent start |
45
+ | **Pi version** | `pi:0.66.1` (dimmed, always last) | Always shown |
30
46
 
31
- - **Working directory** compact `~`-relative path
32
- - **Git branch** — current branch name (cached)
33
- - **Active model** — the model Pi is currently using
34
- - **Thinking level** — shown when active (off is hidden)
35
- - **Context usage** — percentage and window size (`5.6%/128k`)
36
- - **CPU%** — per-core delta (updates every 3s)
37
- - **RAM** — used/total
38
- - **Swap** — shown only when active
39
- - **Loaded model** — Ollama model in memory via `/api/ps` (cached 15s)
40
- - **Response time** — agent loop duration
41
- - **Generation params** — temperature, top_p, top_k, max tokens, num_predict, context size
42
- - **Security indicator** — 3s flash on blocked tools + persistent blocked count
43
- - **Active tool timing** — live elapsed timer for running tool
47
+ All slots are cleared on `session_shutdown`. Metrics that the framework already provides (model name, session tokens, context usage, thinking level) are intentionally omitted to avoid duplication.
44
48
 
45
49
  ## Links
46
50
 
@@ -49,4 +53,4 @@ CPU/RAM/Swap are only shown when using a local Ollama provider (not for cloud/re
49
53
 
50
54
  ## License
51
55
 
52
- MIT — [VTSTech](https://www.vts-tech.org)
56
+ MIT — [VTSTech](https://www.vts-tech.org)
package/package.json CHANGED
@@ -1,9 +1,11 @@
1
1
  {
2
2
  "name": "@vtstech/pi-status",
3
- "version": "1.1.2",
3
+ "version": "1.1.4-dev",
4
4
  "description": "System monitor / status bar extension for Pi Coding Agent",
5
5
  "main": "status.js",
6
- "keywords": ["pi-extensions"],
6
+ "keywords": [
7
+ "pi-extensions"
8
+ ],
7
9
  "license": "MIT",
8
10
  "access": "public",
9
11
  "type": "module",
@@ -14,12 +16,14 @@
14
16
  "url": "https://github.com/VTSTech/pi-coding-agent"
15
17
  },
16
18
  "dependencies": {
17
- "@vtstech/pi-shared": "1.1.2"
19
+ "@vtstech/pi-shared": "1.1.4"
18
20
  },
19
21
  "peerDependencies": {
20
22
  "@mariozechner/pi-coding-agent": ">=0.66"
21
23
  },
22
24
  "pi": {
23
- "extensions": ["./status.js"]
25
+ "extensions": [
26
+ "./status.js"
27
+ ]
24
28
  }
25
29
  }
package/status.js CHANGED
@@ -1,33 +1,34 @@
1
1
  // .build-npm/status/status.temp.ts
2
+ import * as fs from "node:fs";
3
+ import { execSync } from "node:child_process";
2
4
  import os from "node:os";
3
- import { execSync as gitExecSync } from "node:child_process";
4
5
  import { getOllamaBaseUrl, fetchModelContextLength, readModelsJson } from "@vtstech/pi-shared/ollama";
5
6
  import { fmtBytes, fmtDur } from "@vtstech/pi-shared/format";
7
+ import { debugLog } from "@vtstech/pi-shared/debug";
8
+ var STATUS_UPDATE_INTERVAL_MS = 5e3;
9
+ var TOOL_TIMER_INTERVAL_MS = 1e3;
6
10
  function status_temp_default(pi) {
7
11
  let lastResponseTime = null;
8
12
  let agentStartTime = null;
9
13
  let updateInterval = null;
14
+ let toolTimerInterval = null;
10
15
  let currentCtx = null;
11
16
  let ctxUi = null;
17
+ let ctxTheme = null;
12
18
  let prevCpuInfo = getCpuSnapshot();
13
19
  let lastPayload = null;
14
- let tuiRef = null;
15
- let gitBranchCache = "";
16
20
  let cpuUsage = 0;
17
21
  let memUsed = 0;
18
22
  let memTotal = 0;
19
23
  let swapUsed = 0;
20
24
  let swapTotal = 0;
21
25
  let hasSwap = false;
22
- let ollamaLoaded = "";
23
26
  let footerModel = "";
24
- let footerThinking = "";
25
- let footerCtxPct = "";
26
27
  let footerNativeCtx = "";
27
28
  let nativeCtxModel = "";
28
29
  let isLocalProvider = true;
29
- let lastUpstream = 0;
30
- let lastDownstream = 0;
30
+ let versionsText = "";
31
+ let cachedPromptText = null;
31
32
  let securityFlashTool = "";
32
33
  let securityFlashUntil = 0;
33
34
  let activeTool = "";
@@ -65,18 +66,20 @@ function status_temp_default(pi) {
65
66
  return { used, total };
66
67
  }
67
68
  function getSwap() {
69
+ if (process.platform !== "linux") {
70
+ debugLog("status", "swap detection skipped: not a Linux platform");
71
+ return null;
72
+ }
68
73
  try {
69
- const out = gitExecSync("cat /proc/meminfo", { encoding: "utf-8", timeout: 3e3 });
74
+ const out = fs.readFileSync("/proc/meminfo", "utf-8");
70
75
  const swapTotal2 = Number(out.match(/SwapTotal:\s+(\d+)/)?.[1]) * 1024;
71
76
  const swapFree = Number(out.match(/SwapFree:\s+(\d+)/)?.[1]) * 1024;
72
77
  if (swapTotal2 > 0) return { used: swapTotal2 - swapFree, total: swapTotal2 };
73
- } catch {
78
+ } catch (err) {
79
+ debugLog("status", "failed to read /proc/meminfo", err);
74
80
  }
75
81
  return null;
76
82
  }
77
- let ollamaLoadedCache = "";
78
- let ollamaLoadedLastCheck = 0;
79
- const OLLAMA_LOADED_INTERVAL = 15e3;
80
83
  function detectLocalProvider(modelsJson) {
81
84
  const isLocalUrl = (url) => url.includes("localhost") || url.includes("127.0.0.1") || url.includes("0.0.0.0");
82
85
  try {
@@ -116,68 +119,70 @@ function status_temp_default(pi) {
116
119
  }
117
120
  return footerNativeCtx;
118
121
  }
119
- async function fetchOllamaLoadedModel() {
120
- try {
121
- const ollamaBase = getOllamaBaseUrl();
122
- const res = await fetch(`${ollamaBase}/api/ps`, {
123
- signal: AbortSignal.timeout(5e3)
124
- });
125
- if (!res.ok) return "";
126
- const data = await res.json();
127
- const models = data?.models || [];
128
- if (Array.isArray(models) && models.length > 0) {
129
- return models[0].name || models[0].model || "";
130
- }
131
- } catch {
132
- }
133
- return "";
134
- }
135
- function getOllamaLoadedModel() {
136
- const now = Date.now();
137
- if (now - ollamaLoadedLastCheck < OLLAMA_LOADED_INTERVAL) return ollamaLoadedCache;
138
- ollamaLoadedLastCheck = now;
139
- fetchOllamaLoadedModel().then((loaded) => {
140
- ollamaLoadedCache = loaded;
141
- }).catch(() => {
142
- ollamaLoadedCache = "";
143
- });
144
- return ollamaLoadedCache;
145
- }
146
122
  function extractParams(payload) {
147
123
  const params = [];
148
124
  if (payload.temperature !== void 0) params.push(`temp:${payload.temperature}`);
149
125
  if (payload.top_p !== void 0) params.push(`top_p:${payload.top_p}`);
150
126
  if (payload.top_k !== void 0) params.push(`top_k:${payload.top_k}`);
151
- if (payload.max_completion_tokens !== void 0) params.push(`max:${payload.max_completion_tokens}`);
152
- else if (payload.max_tokens !== void 0) params.push(`max:${payload.max_tokens}`);
153
127
  if (payload.num_predict !== void 0) params.push(`predict:${payload.num_predict}`);
154
128
  if (payload.num_ctx !== void 0) params.push(`ctx:${payload.num_ctx}`);
155
129
  if (payload.reasoning_effort !== void 0) params.push(`think:${payload.reasoning_effort}`);
156
130
  return params;
157
131
  }
158
- function fmtTk(n) {
159
- if (n >= 1e3) return `${(n / 1e3).toFixed(1)}k`;
160
- return String(n);
161
- }
162
- function getPwd() {
163
- const cwd = process.cwd();
164
- if (cwd.startsWith(os.homedir())) return "~" + cwd.slice(os.homedir().length);
165
- return cwd;
166
- }
167
- function getGitBranch() {
168
- if (gitBranchCache) return gitBranchCache;
169
- try {
170
- const branch = gitExecSync("git rev-parse --abbrev-ref HEAD 2>/dev/null", {
171
- encoding: "utf-8",
172
- timeout: 3e3
173
- }).trim();
174
- if (branch) gitBranchCache = branch;
175
- } catch {
132
+ function flushStatus() {
133
+ if (!ctxUi) return;
134
+ const theme = ctxTheme;
135
+ const dim2 = (s) => theme?.fg?.("dim", s) ?? s;
136
+ const green2 = (s) => theme?.fg?.("success", s) ?? s;
137
+ ctxUi.setStatus("status-cpu", isLocalProvider ? `${dim2("CPU")} ${green2(cpuUsage.toFixed(0) + "%")}` : void 0);
138
+ ctxUi.setStatus("status-ram", isLocalProvider ? `${dim2("RAM")} ${green2(fmtBytes(memUsed) + "/" + fmtBytes(memTotal))}` : void 0);
139
+ ctxUi.setStatus(
140
+ "status-swap",
141
+ isLocalProvider && hasSwap && swapUsed > 0 ? `${dim2("Swap")} ${green2(fmtBytes(swapUsed) + "/" + fmtBytes(swapTotal))}` : void 0
142
+ );
143
+ ctxUi.setStatus(
144
+ "status-native-ctx",
145
+ footerNativeCtx ? `${dim2("CtxMax:")}${green2(footerNativeCtx)}` : void 0
146
+ );
147
+ if (lastPayload) {
148
+ const rawMax = lastPayload.max_completion_tokens ?? lastPayload.max_tokens;
149
+ if (rawMax !== void 0) {
150
+ const formatted = rawMax >= 1e3 ? `${(rawMax / 1e3).toFixed(rawMax % 1e3 === 0 ? 0 : 1)}k` : String(rawMax);
151
+ ctxUi.setStatus("status-resp-max", `${dim2("RespMax:")}${green2(formatted)}`);
152
+ } else {
153
+ ctxUi.setStatus("status-resp-max", void 0);
154
+ }
155
+ } else {
156
+ ctxUi.setStatus("status-resp-max", void 0);
157
+ }
158
+ ctxUi.setStatus(
159
+ "status-resp",
160
+ lastResponseTime !== null ? `${dim2("Resp")} ${green2(fmtDur(lastResponseTime))}` : void 0
161
+ );
162
+ if (lastPayload) {
163
+ const params = extractParams(lastPayload);
164
+ ctxUi.setStatus("status-params", params.length > 0 ? dim2(params.join(" ")) : void 0);
165
+ } else {
166
+ ctxUi.setStatus("status-params", void 0);
167
+ }
168
+ const now = Date.now();
169
+ if (securityFlashTool && now < securityFlashUntil) {
170
+ ctxUi.setStatus("status-sec", `${dim2("SEC:")}${green2(String(blockedCount))} ${dim2("(blocked: " + securityFlashTool + ")")}`);
171
+ } else if (blockedCount > 0) {
172
+ ctxUi.setStatus("status-sec", `${dim2("SEC:")}${green2(String(blockedCount))}`);
173
+ } else {
174
+ ctxUi.setStatus("status-sec", void 0);
175
+ }
176
+ if (activeTool && activeToolStart > 0) {
177
+ const elapsed = performance.now() - activeToolStart;
178
+ ctxUi.setStatus("status-tool", `${green2(">")} ${dim2(activeTool + ":")} ${green2(fmtDur(elapsed))}`);
179
+ } else {
180
+ ctxUi.setStatus("status-tool", void 0);
181
+ }
182
+ ctxUi.setStatus("system-prompt", cachedPromptText ?? void 0);
183
+ if (versionsText) {
184
+ ctxUi.setStatus("status-versions", dim2(versionsText));
176
185
  }
177
- return gitBranchCache;
178
- }
179
- function incrementBlockedCount() {
180
- blockedCount++;
181
186
  }
182
187
  function updateMetrics() {
183
188
  cpuUsage = getCpuUsage();
@@ -192,154 +197,79 @@ function status_temp_default(pi) {
192
197
  } else {
193
198
  hasSwap = false;
194
199
  }
195
- ollamaLoaded = getOllamaLoadedModel();
196
200
  const modelsJson = readModelsJson();
197
201
  isLocalProvider = modelsJson ? detectLocalProvider(modelsJson) : false;
198
202
  if (currentCtx) {
199
203
  footerModel = currentCtx.model?.id || "";
200
- footerThinking = pi.getThinkingLevel?.() ?? "";
201
- const usage = currentCtx.getContextUsage?.();
202
- if (usage && usage.contextWindow > 0) {
203
- const pctVal = (usage.tokens / usage.contextWindow * 100).toFixed(1);
204
- footerCtxPct = `${pctVal}%/${(usage.contextWindow / 1e3).toFixed(0)}k`;
205
- } else {
206
- footerCtxPct = "";
207
- }
208
204
  const modelId = currentCtx.model?.id || "";
209
- if (modelId && isLocalProvider) {
205
+ if (modelId) {
210
206
  getNativeModelCtx(modelId);
211
207
  }
212
208
  }
209
+ flushStatus();
213
210
  }
214
211
  pi.on("session_start", async (_event, ctx) => {
215
212
  currentCtx = ctx;
216
213
  ctxUi = ctx.ui;
214
+ ctxTheme = ctx.ui.theme;
217
215
  prevCpuInfo = getCpuSnapshot();
216
+ try {
217
+ const out = execSync("pi -v 2>&1", { encoding: "utf-8", timeout: 5e3 }).trim();
218
+ if (out) versionsText = `pi:${out}`;
219
+ } catch {
220
+ }
218
221
  updateMetrics();
219
- ctx.ui.setFooter((tui, theme, footerData) => {
220
- tuiRef = tui;
221
- const dim = (s) => theme?.fg?.("dim", s) ?? s;
222
- const red = (s) => theme?.fg?.("error", s) ?? s;
223
- const yellow = (s) => theme?.fg?.("yellow", s) ?? s;
224
- const sep = dim(" \xB7 ");
225
- const truncateLine = (line, maxW) => {
226
- const ellipsis = dim("...");
227
- const visible = line.replace(/\x1b\[[0-9;]*m/g, "");
228
- if (visible.length > maxW) {
229
- let vis = 0, cut = 0;
230
- for (let i = 0; i < line.length && vis < maxW - 3; i++) {
231
- if (line[i] === "\x1B") {
232
- while (i < line.length && line[i] !== "m") i++;
233
- } else {
234
- vis++;
235
- }
236
- cut = i + 1;
237
- }
238
- return line.slice(0, cut) + ellipsis;
239
- }
240
- return line;
241
- };
242
- return {
243
- render(width) {
244
- const lines = [];
245
- let branch = "";
246
- try {
247
- branch = footerData?.getGitBranch?.() || "";
248
- } catch {
249
- }
250
- if (!branch) branch = getGitBranch();
251
- const line1Parts = [];
252
- if (footerModel) line1Parts.push(`conf:${footerModel}`);
253
- line1Parts.push(getPwd());
254
- if (footerThinking && footerThinking !== "off") line1Parts.push(dim(footerThinking));
255
- if (isLocalProvider) {
256
- line1Parts.push(dim(`CPU ${cpuUsage.toFixed(0)}%`));
257
- }
258
- let line1 = truncateLine(line1Parts.join(sep), width);
259
- lines.push(line1);
260
- const line2Parts = [];
261
- if (ollamaLoaded) line2Parts.push(`load:${ollamaLoaded}`);
262
- if (footerNativeCtx) line2Parts.push(`M:${footerNativeCtx}`);
263
- if (footerCtxPct) line2Parts.push(`S:${footerCtxPct}`);
264
- if (isLocalProvider) {
265
- line2Parts.push(`RAM ${fmtBytes(memUsed)}/${fmtBytes(memTotal)}`);
266
- if (hasSwap && swapUsed > 0) {
267
- line2Parts.push(`Swap ${fmtBytes(swapUsed)}/${fmtBytes(swapTotal)}`);
268
- }
269
- }
270
- if (lastUpstream > 0 || lastDownstream > 0) {
271
- line2Parts.push(dim(`\u2191${fmtTk(lastUpstream)} \u2193${fmtTk(lastDownstream)}`));
272
- }
273
- if (lastResponseTime !== null) line2Parts.push(`Resp ${fmtDur(lastResponseTime)}`);
274
- if (lastPayload) {
275
- const params = extractParams(lastPayload);
276
- if (params.length > 0) line2Parts.push(...params.map((p) => dim(p)));
277
- }
278
- const now = Date.now();
279
- if (securityFlashTool && now < securityFlashUntil) {
280
- line2Parts.push(red(`BLOCKED:${securityFlashTool}`));
281
- }
282
- if (blockedCount > 0) {
283
- line2Parts.push(red(`SEC:${blockedCount}`));
284
- }
285
- let line2 = truncateLine(line2Parts.join(sep), width);
286
- if (line2) lines.push(line2);
287
- if (activeTool && activeToolStart > 0) {
288
- const elapsed = performance.now() - activeToolStart;
289
- lines.push(`${yellow("\u23F3")} ${activeTool}: ${fmtDur(elapsed)}`);
290
- }
291
- return lines;
292
- },
293
- invalidate() {
294
- },
295
- dispose() {
296
- }
297
- };
298
- });
299
222
  if (updateInterval) clearInterval(updateInterval);
300
- updateInterval = setInterval(() => {
301
- updateMetrics();
302
- if (tuiRef) tuiRef.requestRender();
303
- }, 3e3);
223
+ updateInterval = setInterval(updateMetrics, STATUS_UPDATE_INTERVAL_MS);
304
224
  });
305
- pi.on("session_shutdown", async () => {
306
- if (updateInterval) clearInterval(updateInterval);
307
- updateInterval = null;
308
- tuiRef = null;
309
- if (ctxUi) {
310
- ctxUi.setFooter(void 0);
311
- ctxUi = null;
225
+ pi.on("session_shutdown", async (_event, ctx) => {
226
+ if (updateInterval) {
227
+ clearInterval(updateInterval);
228
+ updateInterval = null;
312
229
  }
230
+ if (toolTimerInterval) {
231
+ clearInterval(toolTimerInterval);
232
+ toolTimerInterval = null;
233
+ }
234
+ ctxUi = null;
313
235
  currentCtx = null;
236
+ const ui = ctx?.ui;
237
+ if (ui) {
238
+ ui.setStatus("status-cpu", void 0);
239
+ ui.setStatus("status-ram", void 0);
240
+ ui.setStatus("status-swap", void 0);
241
+ ui.setStatus("status-native-ctx", void 0);
242
+ ui.setStatus("status-resp", void 0);
243
+ ui.setStatus("status-resp-max", void 0);
244
+ ui.setStatus("status-params", void 0);
245
+ ui.setStatus("system-prompt", void 0);
246
+ ui.setStatus("status-sec", void 0);
247
+ ui.setStatus("status-tool", void 0);
248
+ ui.setStatus("status-versions", void 0);
249
+ }
314
250
  securityFlashTool = "";
315
251
  securityFlashUntil = 0;
316
252
  activeTool = "";
317
253
  activeToolStart = 0;
318
254
  blockedCount = 0;
319
- lastUpstream = 0;
320
- lastDownstream = 0;
255
+ lastResponseTime = null;
256
+ lastPayload = null;
257
+ versionsText = "";
258
+ cachedPromptText = null;
321
259
  });
322
260
  pi.on("before_provider_request", (event) => {
323
261
  lastPayload = event.payload;
324
262
  });
325
- function captureUsage(event) {
326
- if (event?.message?.role !== "assistant") return;
327
- const usage = event?.message?.usage ?? // normalised Pi usage
328
- event?.usage ?? // alternative path
329
- null;
330
- if (!usage) return;
331
- const inp = usage.input ?? usage.promptTokens ?? usage.prompt_tokens;
332
- const out = usage.output ?? usage.completionTokens ?? usage.completion_tokens;
333
- if (inp != null) lastUpstream = inp;
334
- if (out != null) lastDownstream = out;
335
- if (tuiRef) tuiRef.requestRender();
336
- }
337
- pi.on("message_end", captureUsage);
338
- pi.on("turn_end", captureUsage);
339
- pi.on("agent_start", async () => {
263
+ pi.on("agent_start", async (_event, ctx) => {
340
264
  agentStartTime = performance.now();
341
- lastUpstream = 0;
342
- lastDownstream = 0;
265
+ try {
266
+ const prompt = ctx.getSystemPrompt();
267
+ const chr = prompt.length;
268
+ const tok = prompt.split(/\s+/).filter(Boolean).length;
269
+ cachedPromptText = `${dim("Prompt:")} ${green(`${chr} chr ${tok} tok`)}`;
270
+ } catch {
271
+ }
272
+ flushStatus();
343
273
  });
344
274
  pi.on("agent_end", async () => {
345
275
  if (agentStartTime !== null) {
@@ -348,29 +278,40 @@ function status_temp_default(pi) {
348
278
  }
349
279
  activeTool = "";
350
280
  activeToolStart = 0;
281
+ stopToolTimer();
351
282
  updateMetrics();
352
- if (tuiRef) tuiRef.requestRender();
353
283
  });
284
+ function startToolTimer() {
285
+ if (toolTimerInterval) return;
286
+ toolTimerInterval = setInterval(flushStatus, TOOL_TIMER_INTERVAL_MS);
287
+ }
288
+ function stopToolTimer() {
289
+ if (toolTimerInterval) {
290
+ clearInterval(toolTimerInterval);
291
+ toolTimerInterval = null;
292
+ }
293
+ }
354
294
  pi.on("tool_call", (event) => {
355
295
  if (!event) return;
356
296
  const isBlocked = event.blocked === true || event.blocked === "true" || event.result?.blocked === true || event.error?.includes("blocked");
357
297
  if (isBlocked) {
358
298
  securityFlashTool = event.tool ?? event.name ?? "unknown";
359
299
  securityFlashUntil = Date.now() + 3e3;
360
- incrementBlockedCount();
361
- if (tuiRef) tuiRef.requestRender();
300
+ blockedCount++;
301
+ flushStatus();
362
302
  }
363
303
  });
364
304
  pi.on("tool_execution_start", (event) => {
365
305
  if (!event) return;
366
306
  activeTool = event.tool ?? event.name ?? "tool";
367
307
  activeToolStart = performance.now();
368
- if (tuiRef) tuiRef.requestRender();
308
+ startToolTimer();
369
309
  });
370
310
  pi.on("tool_execution_end", () => {
371
311
  activeTool = "";
372
312
  activeToolStart = 0;
373
- if (tuiRef) tuiRef.requestRender();
313
+ stopToolTimer();
314
+ flushStatus();
374
315
  });
375
316
  }
376
317
  export {