@vtstech/pi-status 1.1.3 → 1.1.4-dev

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +27 -23
  2. package/package.json +8 -4
  3. package/status.js +44 -23
package/README.md CHANGED
@@ -1,8 +1,8 @@
1
1
  # @vtstech/pi-status
2
2
 
3
- System monitor / status bar extension for the [Pi Coding Agent](https://github.com/badlogic/pi-mono).
3
+ System monitor extension for the [Pi Coding Agent](https://github.com/badlogic/pi-mono).
4
4
 
5
- Replaces the Pi footer with a unified status bar showing system metrics, model info, and generation params.
5
+ Adds composable named status items to the framework footer using `ctx.ui.setStatus()`. Each metric gets its own named slot so it coexists cleanly with other extensions' status items.
6
6
 
7
7
  ## Install
8
8
 
@@ -12,35 +12,39 @@ pi install "npm:@vtstech/pi-status"
12
12
 
13
13
  ## How It Works
14
14
 
15
- Automatically loaded — no commands needed. Displays a 2-line status bar at the bottom of the Pi interface.
15
+ Automatically loaded — no commands needed. Slots are rendered in the framework footer alongside framework items (model name, session tokens, context usage). All labels use dimmed coloring; all values use green highlighting.
16
16
 
17
- **Line 1 (conf):**
17
+ CPU/RAM/Swap are only shown when using a local Ollama provider (not for cloud/remote). For cloud providers, system metrics are omitted.
18
+
19
+ **Example (local Ollama):**
18
20
  ```
19
- qwen3.5:0.8b · ~/.pi/agent · medium · CPU 9%
21
+ CtxMax:41k RespMax:16.4k Resp 2m3s CPU 12% RAM 2.2G/15.1G Prompt: 2840 chr 393 tok pi:0.66.1
20
22
  ```
21
23
 
22
- **Line 2 (load):**
24
+ **Example (cloud provider):**
23
25
  ```
24
- qwen3.5:0.8b · M:33k · S:9.0%/128k · RAM 2.2G/15.1G · Resp 5m24s · temp:0.0 · max:16384
26
+ CtxMax:128k RespMax:16.4k Resp 1m22s Prompt: 2840 chr 393 tok pi:0.66.1
25
27
  ```
26
28
 
27
- CPU/RAM/Swap are only shown when using a local Ollama provider (not for cloud/remote).
29
+ ## Status Slots
30
+
31
+ Slots are updated every 5 seconds (1 second for active tool timing). Render order is deterministic — all slots are managed through `flushStatus()`.
28
32
 
29
- ## What's Displayed
33
+ | Slot | Description | Condition |
34
+ |------|-------------|-----------|
35
+ | **CtxMax** | Native model context window from Ollama `/api/show` (k-notation) | Local or remote Ollama |
36
+ | **RespMax** | Max response/completion tokens with k-notation (e.g., `16k`) | After first provider request |
37
+ | **Resp** | Agent loop duration (e.g., `2m3s`) | After first agent cycle |
38
+ | **CPU%** | Per-core CPU usage delta | Local Ollama only |
39
+ | **RAM** | Used/total system memory | Local Ollama only |
40
+ | **Swap** | Used/total swap space | Local only, when active |
41
+ | **Generation params** | Temperature, top_p, top_k, num_predict, context size, reasoning_effort (dimmed) | After first provider request |
42
+ | **SEC** | Session-scoped blocked tool count + 3s flash on block event | When blocks occur |
43
+ | **Active tool** | Live elapsed timer with `>` indicator | While a tool is running |
44
+ | **Prompt** | System prompt size as `chars chr tokens tok` | After first agent start |
45
+ | **Pi version** | `pi:0.66.1` (dimmed, always last) | Always shown |
30
46
 
31
- - **Working directory** compact `~`-relative path
32
- - **Git branch** — current branch name (cached)
33
- - **Active model** — the model Pi is currently using
34
- - **Thinking level** — shown when active (off is hidden)
35
- - **Context usage** — percentage and window size (`5.6%/128k`)
36
- - **CPU%** — per-core delta (updates every 3s)
37
- - **RAM** — used/total
38
- - **Swap** — shown only when active
39
- - **Loaded model** — Ollama model in memory via `/api/ps` (cached 15s)
40
- - **Response time** — agent loop duration
41
- - **Generation params** — temperature, top_p, top_k, max tokens, num_predict, context size
42
- - **Security indicator** — 3s flash on blocked tools + persistent blocked count
43
- - **Active tool timing** — live elapsed timer for running tool
47
+ All slots are cleared on `session_shutdown`. Metrics that the framework already provides (model name, session tokens, context usage, thinking level) are intentionally omitted to avoid duplication.
44
48
 
45
49
  ## Links
46
50
 
@@ -49,4 +53,4 @@ CPU/RAM/Swap are only shown when using a local Ollama provider (not for cloud/re
49
53
 
50
54
  ## License
51
55
 
52
- MIT — [VTSTech](https://www.vts-tech.org)
56
+ MIT — [VTSTech](https://www.vts-tech.org)
package/package.json CHANGED
@@ -1,9 +1,11 @@
1
1
  {
2
2
  "name": "@vtstech/pi-status",
3
- "version": "1.1.3",
3
+ "version": "1.1.4-dev",
4
4
  "description": "System monitor / status bar extension for Pi Coding Agent",
5
5
  "main": "status.js",
6
- "keywords": ["pi-extensions"],
6
+ "keywords": [
7
+ "pi-extensions"
8
+ ],
7
9
  "license": "MIT",
8
10
  "access": "public",
9
11
  "type": "module",
@@ -14,12 +16,14 @@
14
16
  "url": "https://github.com/VTSTech/pi-coding-agent"
15
17
  },
16
18
  "dependencies": {
17
- "@vtstech/pi-shared": "1.1.3"
19
+ "@vtstech/pi-shared": "1.1.4"
18
20
  },
19
21
  "peerDependencies": {
20
22
  "@mariozechner/pi-coding-agent": ">=0.66"
21
23
  },
22
24
  "pi": {
23
- "extensions": ["./status.js"]
25
+ "extensions": [
26
+ "./status.js"
27
+ ]
24
28
  }
25
29
  }
package/status.js CHANGED
@@ -1,5 +1,6 @@
1
1
  // .build-npm/status/status.temp.ts
2
2
  import * as fs from "node:fs";
3
+ import { execSync } from "node:child_process";
3
4
  import os from "node:os";
4
5
  import { getOllamaBaseUrl, fetchModelContextLength, readModelsJson } from "@vtstech/pi-shared/ollama";
5
6
  import { fmtBytes, fmtDur } from "@vtstech/pi-shared/format";
@@ -13,6 +14,7 @@ function status_temp_default(pi) {
13
14
  let toolTimerInterval = null;
14
15
  let currentCtx = null;
15
16
  let ctxUi = null;
17
+ let ctxTheme = null;
16
18
  let prevCpuInfo = getCpuSnapshot();
17
19
  let lastPayload = null;
18
20
  let cpuUsage = 0;
@@ -22,10 +24,11 @@ function status_temp_default(pi) {
22
24
  let swapTotal = 0;
23
25
  let hasSwap = false;
24
26
  let footerModel = "";
25
- let footerThinking = "";
26
27
  let footerNativeCtx = "";
27
28
  let nativeCtxModel = "";
28
29
  let isLocalProvider = true;
30
+ let versionsText = "";
31
+ let cachedPromptText = null;
29
32
  let securityFlashTool = "";
30
33
  let securityFlashUntil = 0;
31
34
  let activeTool = "";
@@ -121,8 +124,6 @@ function status_temp_default(pi) {
121
124
  if (payload.temperature !== void 0) params.push(`temp:${payload.temperature}`);
122
125
  if (payload.top_p !== void 0) params.push(`top_p:${payload.top_p}`);
123
126
  if (payload.top_k !== void 0) params.push(`top_k:${payload.top_k}`);
124
- if (payload.max_completion_tokens !== void 0) params.push(`max:${payload.max_completion_tokens}`);
125
- else if (payload.max_tokens !== void 0) params.push(`max:${payload.max_tokens}`);
126
127
  if (payload.num_predict !== void 0) params.push(`predict:${payload.num_predict}`);
127
128
  if (payload.num_ctx !== void 0) params.push(`ctx:${payload.num_ctx}`);
128
129
  if (payload.reasoning_effort !== void 0) params.push(`think:${payload.reasoning_effort}`);
@@ -130,44 +131,58 @@ function status_temp_default(pi) {
130
131
  }
131
132
  function flushStatus() {
132
133
  if (!ctxUi) return;
133
- ctxUi.setStatus("status-cpu", isLocalProvider ? `CPU ${cpuUsage.toFixed(0)}%` : void 0);
134
- ctxUi.setStatus("status-ram", isLocalProvider ? `RAM ${fmtBytes(memUsed)}/${fmtBytes(memTotal)}` : void 0);
134
+ const theme = ctxTheme;
135
+ const dim2 = (s) => theme?.fg?.("dim", s) ?? s;
136
+ const green2 = (s) => theme?.fg?.("success", s) ?? s;
137
+ ctxUi.setStatus("status-cpu", isLocalProvider ? `${dim2("CPU")} ${green2(cpuUsage.toFixed(0) + "%")}` : void 0);
138
+ ctxUi.setStatus("status-ram", isLocalProvider ? `${dim2("RAM")} ${green2(fmtBytes(memUsed) + "/" + fmtBytes(memTotal))}` : void 0);
135
139
  ctxUi.setStatus(
136
140
  "status-swap",
137
- isLocalProvider && hasSwap && swapUsed > 0 ? `Swap ${fmtBytes(swapUsed)}/${fmtBytes(swapTotal)}` : void 0
141
+ isLocalProvider && hasSwap && swapUsed > 0 ? `${dim2("Swap")} ${green2(fmtBytes(swapUsed) + "/" + fmtBytes(swapTotal))}` : void 0
138
142
  );
139
143
  ctxUi.setStatus(
140
144
  "status-native-ctx",
141
- isLocalProvider && footerNativeCtx ? `M:${footerNativeCtx}` : void 0
142
- );
143
- ctxUi.setStatus(
144
- "status-thinking",
145
- footerThinking && footerThinking !== "off" ? footerThinking : void 0
145
+ footerNativeCtx ? `${dim2("CtxMax:")}${green2(footerNativeCtx)}` : void 0
146
146
  );
147
+ if (lastPayload) {
148
+ const rawMax = lastPayload.max_completion_tokens ?? lastPayload.max_tokens;
149
+ if (rawMax !== void 0) {
150
+ const formatted = rawMax >= 1e3 ? `${(rawMax / 1e3).toFixed(rawMax % 1e3 === 0 ? 0 : 1)}k` : String(rawMax);
151
+ ctxUi.setStatus("status-resp-max", `${dim2("RespMax:")}${green2(formatted)}`);
152
+ } else {
153
+ ctxUi.setStatus("status-resp-max", void 0);
154
+ }
155
+ } else {
156
+ ctxUi.setStatus("status-resp-max", void 0);
157
+ }
147
158
  ctxUi.setStatus(
148
159
  "status-resp",
149
- lastResponseTime !== null ? `Resp ${fmtDur(lastResponseTime)}` : void 0
160
+ lastResponseTime !== null ? `${dim2("Resp")} ${green2(fmtDur(lastResponseTime))}` : void 0
150
161
  );
151
162
  if (lastPayload) {
152
163
  const params = extractParams(lastPayload);
153
- ctxUi.setStatus("status-params", params.length > 0 ? params.join(" ") : void 0);
164
+ ctxUi.setStatus("status-params", params.length > 0 ? dim2(params.join(" ")) : void 0);
154
165
  } else {
155
166
  ctxUi.setStatus("status-params", void 0);
156
167
  }
157
168
  const now = Date.now();
158
169
  if (securityFlashTool && now < securityFlashUntil) {
159
- ctxUi.setStatus("status-sec", `SEC:${blockedCount} (blocked: ${securityFlashTool})`);
170
+ ctxUi.setStatus("status-sec", `${dim2("SEC:")}${green2(String(blockedCount))} ${dim2("(blocked: " + securityFlashTool + ")")}`);
160
171
  } else if (blockedCount > 0) {
161
- ctxUi.setStatus("status-sec", `SEC:${blockedCount}`);
172
+ ctxUi.setStatus("status-sec", `${dim2("SEC:")}${green2(String(blockedCount))}`);
162
173
  } else {
163
174
  ctxUi.setStatus("status-sec", void 0);
164
175
  }
165
176
  if (activeTool && activeToolStart > 0) {
166
177
  const elapsed = performance.now() - activeToolStart;
167
- ctxUi.setStatus("status-tool", `> ${activeTool}: ${fmtDur(elapsed)}`);
178
+ ctxUi.setStatus("status-tool", `${green2(">")} ${dim2(activeTool + ":")} ${green2(fmtDur(elapsed))}`);
168
179
  } else {
169
180
  ctxUi.setStatus("status-tool", void 0);
170
181
  }
182
+ ctxUi.setStatus("system-prompt", cachedPromptText ?? void 0);
183
+ if (versionsText) {
184
+ ctxUi.setStatus("status-versions", dim2(versionsText));
185
+ }
171
186
  }
172
187
  function updateMetrics() {
173
188
  cpuUsage = getCpuUsage();
@@ -186,9 +201,8 @@ function status_temp_default(pi) {
186
201
  isLocalProvider = modelsJson ? detectLocalProvider(modelsJson) : false;
187
202
  if (currentCtx) {
188
203
  footerModel = currentCtx.model?.id || "";
189
- footerThinking = pi.getThinkingLevel?.() ?? "";
190
204
  const modelId = currentCtx.model?.id || "";
191
- if (modelId && isLocalProvider) {
205
+ if (modelId) {
192
206
  getNativeModelCtx(modelId);
193
207
  }
194
208
  }
@@ -197,7 +211,13 @@ function status_temp_default(pi) {
197
211
  pi.on("session_start", async (_event, ctx) => {
198
212
  currentCtx = ctx;
199
213
  ctxUi = ctx.ui;
214
+ ctxTheme = ctx.ui.theme;
200
215
  prevCpuInfo = getCpuSnapshot();
216
+ try {
217
+ const out = execSync("pi -v 2>&1", { encoding: "utf-8", timeout: 5e3 }).trim();
218
+ if (out) versionsText = `pi:${out}`;
219
+ } catch {
220
+ }
201
221
  updateMetrics();
202
222
  if (updateInterval) clearInterval(updateInterval);
203
223
  updateInterval = setInterval(updateMetrics, STATUS_UPDATE_INTERVAL_MS);
@@ -219,12 +239,13 @@ function status_temp_default(pi) {
219
239
  ui.setStatus("status-ram", void 0);
220
240
  ui.setStatus("status-swap", void 0);
221
241
  ui.setStatus("status-native-ctx", void 0);
222
- ui.setStatus("status-thinking", void 0);
223
242
  ui.setStatus("status-resp", void 0);
243
+ ui.setStatus("status-resp-max", void 0);
224
244
  ui.setStatus("status-params", void 0);
225
245
  ui.setStatus("system-prompt", void 0);
226
246
  ui.setStatus("status-sec", void 0);
227
247
  ui.setStatus("status-tool", void 0);
248
+ ui.setStatus("status-versions", void 0);
228
249
  }
229
250
  securityFlashTool = "";
230
251
  securityFlashUntil = 0;
@@ -233,6 +254,8 @@ function status_temp_default(pi) {
233
254
  blockedCount = 0;
234
255
  lastResponseTime = null;
235
256
  lastPayload = null;
257
+ versionsText = "";
258
+ cachedPromptText = null;
236
259
  });
237
260
  pi.on("before_provider_request", (event) => {
238
261
  lastPayload = event.payload;
@@ -241,14 +264,12 @@ function status_temp_default(pi) {
241
264
  agentStartTime = performance.now();
242
265
  try {
243
266
  const prompt = ctx.getSystemPrompt();
244
- const theme = ctx.ui.theme;
245
267
  const chr = prompt.length;
246
268
  const tok = prompt.split(/\s+/).filter(Boolean).length;
247
- const label = theme.fg("dim", "Prompt:");
248
- const value = theme.fg("success", `${chr} chr ${tok} tok`);
249
- ctxUi?.setStatus("system-prompt", `${label} ${value}`);
269
+ cachedPromptText = `${dim("Prompt:")} ${green(`${chr} chr ${tok} tok`)}`;
250
270
  } catch {
251
271
  }
272
+ flushStatus();
252
273
  });
253
274
  pi.on("agent_end", async () => {
254
275
  if (agentStartTime !== null) {