@vtstech/pi-status 1.0.4 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +50 -0
  2. package/package.json +3 -3
  3. package/status.js +51 -6
package/README.md ADDED
@@ -0,0 +1,50 @@
1
+ # @vtstech/pi-status
2
+
3
+ System monitor / status bar extension for the [Pi Coding Agent](https://github.com/badlogic/pi-mono).
4
+
5
+ Replaces the Pi footer with a unified status bar showing system metrics, model info, and generation params.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pi install "npm:@vtstech/pi-status"
11
+ ```
12
+
13
+ ## How It Works
14
+
15
+ Automatically loaded — no commands needed. Displays a 2-line status bar at the bottom of the Pi interface.
16
+
17
+ **Line 1:**
18
+ ```
19
+ ~/.pi/agent · main · qwen3:0.6b · medium · 5.6%/128k · CPU 9% · RAM 2.2G/15.1G · Resp 5m24s · temp:0.0
20
+ ```
21
+
22
+ **Line 2:**
23
+ ```
24
+ ⏱ bash (12s)
25
+ ```
26
+
27
+ ## What's Displayed
28
+
29
+ - **Working directory** — compact `~`-relative path
30
+ - **Git branch** — current branch name (cached)
31
+ - **Active model** — the model Pi is currently using
32
+ - **Thinking level** — shown when active (off is hidden)
33
+ - **Context usage** — percentage and window size (`5.6%/128k`)
34
+ - **CPU%** — per-core delta (updates every 3s)
35
+ - **RAM** — used/total
36
+ - **Swap** — shown only when active
37
+ - **Loaded model** — Ollama model in memory via `/api/ps` (cached 15s)
38
+ - **Response time** — agent loop duration
39
+ - **Generation params** — temperature, top_p, top_k, max tokens, num_predict, context size
40
+ - **Security indicator** — 3s flash on blocked tools + persistent blocked count
41
+ - **Active tool timing** — live elapsed timer for running tool
42
+
43
+ ## Links
44
+
45
+ - [Full Documentation](https://github.com/VTSTech/pi-coding-agent#system-monitor-status-ts)
46
+ - [Changelog](https://github.com/VTSTech/pi-coding-agent/blob/main/CHANGELOG.md)
47
+
48
+ ## License
49
+
50
+ MIT — [VTSTech](https://www.vts-tech.org)
package/package.json CHANGED
@@ -1,9 +1,9 @@
1
1
  {
2
2
  "name": "@vtstech/pi-status",
3
- "version": "1.0.4",
3
+ "version": "1.0.6",
4
4
  "description": "System monitor / status bar extension for Pi Coding Agent",
5
5
  "main": "status.js",
6
- "keywords": ["pi-package", "pi-extensions"],
6
+ "keywords": ["pi-extensions"],
7
7
  "license": "MIT",
8
8
  "access": "public",
9
9
  "type": "module",
@@ -14,7 +14,7 @@
14
14
  "url": "https://github.com/VTSTech/pi-coding-agent"
15
15
  },
16
16
  "dependencies": {
17
- "@vtstech/pi-shared": "1.0.4"
17
+ "@vtstech/pi-shared": "1.0.6"
18
18
  },
19
19
  "peerDependencies": {
20
20
  "@mariozechner/pi-coding-agent": ">=0.66"
package/status.js CHANGED
@@ -1,5 +1,7 @@
1
1
  // .build-npm/status/status.temp.ts
2
2
  import os from "node:os";
3
+ import * as fs from "node:fs";
4
+ import * as path from "node:path";
3
5
  import { execSync } from "node:child_process";
4
6
  import { getOllamaBaseUrl } from "@vtstech/pi-shared/ollama";
5
7
  import { fmtBytes, fmtDur } from "@vtstech/pi-shared/format";
@@ -24,6 +26,7 @@ function status_temp_default(pi) {
24
26
  let footerModel = "";
25
27
  let footerThinking = "";
26
28
  let footerCtxPct = "";
29
+ let isLocalProvider = true;
27
30
  let securityFlashTool = "";
28
31
  let securityFlashUntil = 0;
29
32
  let activeTool = "";
@@ -73,6 +76,26 @@ function status_temp_default(pi) {
73
76
  let ollamaLoadedCache = "";
74
77
  let ollamaLoadedLastCheck = 0;
75
78
  const OLLAMA_LOADED_INTERVAL = 15e3;
79
+ function detectLocalProvider(modelsJson) {
80
+ try {
81
+ for (const provider of Object.values(modelsJson.providers || {})) {
82
+ const url = provider.baseUrl || "";
83
+ const hasLocalUrl = url.includes("localhost") || url.includes("127.0.0.1") || url.includes("0.0.0.0");
84
+ const modelId = footerModel || "";
85
+ if (modelId && (provider.models || []).some((m) => m.id === modelId)) {
86
+ return hasLocalUrl;
87
+ }
88
+ }
89
+ for (const [name, provider] of Object.entries(modelsJson.providers || {})) {
90
+ const url = provider.baseUrl || "";
91
+ if (url.includes("localhost") || url.includes("127.0.0.1") || url.includes("0.0.0.0") || name === "ollama") {
92
+ return true;
93
+ }
94
+ }
95
+ } catch {
96
+ }
97
+ return false;
98
+ }
76
99
  function getOllamaLoadedModel() {
77
100
  const now = Date.now();
78
101
  if (now - ollamaLoadedLastCheck < OLLAMA_LOADED_INTERVAL) return ollamaLoadedCache;
@@ -149,17 +172,37 @@ function status_temp_default(pi) {
149
172
  hasSwap = false;
150
173
  }
151
174
  ollamaLoaded = getOllamaLoadedModel();
175
+ let modelsJson = null;
176
+ try {
177
+ const raw = fs.readFileSync(
178
+ path.join(os.homedir(), ".pi", "agent", "models.json"),
179
+ "utf-8"
180
+ );
181
+ modelsJson = JSON.parse(raw);
182
+ } catch {
183
+ }
152
184
  if (currentCtx) {
153
185
  footerModel = currentCtx.model?.id || "";
154
186
  footerThinking = pi.getThinkingLevel?.() ?? "";
155
187
  const usage = currentCtx.getContextUsage?.();
156
188
  if (usage && usage.contextWindow > 0) {
157
- const pct = (usage.tokens / usage.contextWindow * 100).toFixed(1);
158
- footerCtxPct = `${pct}%/${(usage.contextWindow / 1e3).toFixed(0)}k`;
189
+ const pctVal = (usage.tokens / usage.contextWindow * 100).toFixed(1);
190
+ footerCtxPct = `${pctVal}%/${(usage.contextWindow / 1e3).toFixed(0)}k`;
159
191
  } else {
160
192
  footerCtxPct = "";
161
193
  }
194
+ const modelId = currentCtx.model?.id || "";
195
+ if (modelId && !footerCtxPct && modelsJson) {
196
+ for (const prov of Object.values(modelsJson.providers || {})) {
197
+ const match = (prov.models || []).find((m) => m.id === modelId);
198
+ if (match?.contextLength) {
199
+ footerCtxPct = `${(match.contextLength / 1e3).toFixed(0)}k ctx`;
200
+ break;
201
+ }
202
+ }
203
+ }
162
204
  }
205
+ isLocalProvider = modelsJson ? detectLocalProvider(modelsJson) : false;
163
206
  refreshBlockedCount();
164
207
  }
165
208
  pi.on("session_start", async (_event, ctx) => {
@@ -188,10 +231,12 @@ function status_temp_default(pi) {
188
231
  if (footerModel) parts.push(dim(footerModel));
189
232
  if (footerThinking && footerThinking !== "off") parts.push(dim(footerThinking));
190
233
  if (footerCtxPct) parts.push(footerCtxPct);
191
- parts.push(dim(`CPU ${cpuUsage.toFixed(0)}%`));
192
- parts.push(`RAM ${fmtBytes(memUsed)}/${fmtBytes(memTotal)}`);
193
- if (hasSwap && swapUsed > 0) {
194
- parts.push(`Swap ${fmtBytes(swapUsed)}/${fmtBytes(swapTotal)}`);
234
+ if (isLocalProvider) {
235
+ parts.push(dim(`CPU ${cpuUsage.toFixed(0)}%`));
236
+ parts.push(`RAM ${fmtBytes(memUsed)}/${fmtBytes(memTotal)}`);
237
+ if (hasSwap && swapUsed > 0) {
238
+ parts.push(`Swap ${fmtBytes(swapUsed)}/${fmtBytes(swapTotal)}`);
239
+ }
195
240
  }
196
241
  if (ollamaLoaded) parts.push(`${ollamaLoaded}`);
197
242
  if (lastResponseTime !== null) parts.push(`Resp ${fmtDur(lastResponseTime)}`);