termweb-dashboard 0.2.5 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -5
- package/bin/cli.js +11 -1
- package/dist/app.bundle.js +34 -31
- package/dist/index.html +37 -6
- package/lib/metrics.js +540 -226
- package/lib/server.js +13 -4
- package/native/prebuilt/.gitkeep +0 -0
- package/native/prebuilt/metrics-darwin-arm64.node +0 -0
- package/native/prebuilt/metrics-darwin-x64.node +0 -0
- package/native/prebuilt/metrics-linux-arm64.node +0 -0
- package/native/prebuilt/metrics-linux-x64.node +0 -0
- package/package.json +4 -2
package/lib/metrics.js
CHANGED
|
@@ -1,15 +1,37 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* System metrics collector using systeminformation
|
|
2
|
+
* System metrics collector using systeminformation + native Zig addon
|
|
3
|
+
* Native addon provides fastest path (direct OS API calls via Zig)
|
|
4
|
+
* Falls back to systeminformation when native is unavailable
|
|
3
5
|
*/
|
|
4
6
|
const si = require('systeminformation');
|
|
5
7
|
const { exec } = require('child_process');
|
|
6
|
-
const { Worker, isMainThread, parentPort } = require('worker_threads');
|
|
7
|
-
const path = require('path');
|
|
8
8
|
|
|
9
|
-
//
|
|
9
|
+
// Load native metrics from prebuilt binaries (platform-specific)
|
|
10
|
+
let nativeMetrics = null;
|
|
11
|
+
try {
|
|
12
|
+
const os = require('os');
|
|
13
|
+
const path = require('path');
|
|
14
|
+
const platform = process.platform; // darwin, linux
|
|
15
|
+
const arch = process.arch; // arm64, x64
|
|
16
|
+
const binaryName = `metrics-${platform}-${arch}.node`;
|
|
17
|
+
|
|
18
|
+
// Try prebuilt binary first (from npm package)
|
|
19
|
+
const prebuiltPath = path.join(__dirname, '..', 'native', 'prebuilt', binaryName);
|
|
20
|
+
try {
|
|
21
|
+
nativeMetrics = require(prebuiltPath);
|
|
22
|
+
} catch (e) {
|
|
23
|
+
// Try local dev build (zig-out)
|
|
24
|
+
const devPath = path.join(__dirname, '..', 'native', 'zig-out', 'lib', 'metrics.node');
|
|
25
|
+
nativeMetrics = require(devPath);
|
|
26
|
+
}
|
|
27
|
+
} catch (e) {
|
|
28
|
+
// Native metrics not available, will use systeminformation fallback
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Cache for port info (expensive to fetch on macOS)
|
|
10
32
|
let portCache = new Map();
|
|
11
33
|
let portCacheTime = 0;
|
|
12
|
-
const PORT_CACHE_TTL = 5000; //
|
|
34
|
+
const PORT_CACHE_TTL = process.platform === 'linux' ? 5000 : 30000; // Linux: 5s, macOS: 30s (lsof is slow)
|
|
13
35
|
|
|
14
36
|
// Cache for heavy metrics (collected in background)
|
|
15
37
|
let metricsCache = null;
|
|
@@ -18,49 +40,211 @@ let metricsRefreshing = false;
|
|
|
18
40
|
const METRICS_CACHE_TTL = 500; // 500ms - return cached data quickly
|
|
19
41
|
|
|
20
42
|
/**
|
|
21
|
-
* Get port info for processes (cached,
|
|
43
|
+
* Get port info for processes (cached, non-blocking on macOS)
|
|
44
|
+
* Linux: uses ss (fast, ~10ms)
|
|
45
|
+
* macOS: uses lsof which is very slow (5-20s), so we fetch in background
|
|
22
46
|
*/
|
|
23
47
|
async function getPortInfo() {
|
|
24
48
|
const now = Date.now();
|
|
49
|
+
|
|
50
|
+
// Return cache if fresh
|
|
25
51
|
if (now - portCacheTime < PORT_CACHE_TTL && portCache.size > 0) {
|
|
26
52
|
return portCache;
|
|
27
53
|
}
|
|
28
54
|
|
|
55
|
+
const isLinux = process.platform === 'linux';
|
|
56
|
+
|
|
57
|
+
// On macOS, return stale cache and refresh in background (non-blocking)
|
|
58
|
+
// lsof is too slow (5-20 seconds) to block on every call
|
|
59
|
+
if (!isLinux) {
|
|
60
|
+
// Start background refresh if not already running
|
|
61
|
+
if (!portRefreshing) {
|
|
62
|
+
portRefreshing = true;
|
|
63
|
+
exec('lsof -iTCP -sTCP:LISTEN -P -n 2>/dev/null || true', {
|
|
64
|
+
encoding: 'utf-8',
|
|
65
|
+
timeout: 30000
|
|
66
|
+
}, (err, output) => {
|
|
67
|
+
portRefreshing = false;
|
|
68
|
+
if (!err && output) {
|
|
69
|
+
const newCache = new Map();
|
|
70
|
+
const lines = output.split('\n').slice(1);
|
|
71
|
+
for (const line of lines) {
|
|
72
|
+
const parts = line.split(/\s+/);
|
|
73
|
+
if (parts.length >= 9) {
|
|
74
|
+
const pid = parseInt(parts[1], 10);
|
|
75
|
+
const portMatch = parts[8]?.match(/:(\d+)$/);
|
|
76
|
+
if (pid && portMatch) {
|
|
77
|
+
const port = portMatch[1];
|
|
78
|
+
if (newCache.has(pid)) {
|
|
79
|
+
newCache.get(pid).push(port);
|
|
80
|
+
} else {
|
|
81
|
+
newCache.set(pid, [port]);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
if (newCache.size > 0) {
|
|
87
|
+
portCache = newCache;
|
|
88
|
+
portCacheTime = Date.now();
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
return portCache; // Return immediately (may be stale or empty)
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Linux: ss is fast enough to wait for
|
|
29
97
|
return new Promise((resolve) => {
|
|
30
|
-
exec('
|
|
98
|
+
exec('ss -tlnp 2>/dev/null || netstat -tlnp 2>/dev/null', {
|
|
31
99
|
encoding: 'utf-8',
|
|
32
100
|
timeout: 2000
|
|
33
101
|
}, (err, output) => {
|
|
34
|
-
if (err) {
|
|
35
|
-
resolve(portCache);
|
|
102
|
+
if (err || !output) {
|
|
103
|
+
resolve(portCache);
|
|
36
104
|
return;
|
|
37
105
|
}
|
|
38
106
|
|
|
39
|
-
|
|
40
|
-
const lines = output.split('\n').slice(1);
|
|
107
|
+
const newCache = new Map();
|
|
108
|
+
const lines = output.split('\n').slice(1);
|
|
41
109
|
for (const line of lines) {
|
|
42
|
-
const
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
const
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
portCache.set(pid, [port]);
|
|
52
|
-
}
|
|
110
|
+
const portMatch = line.match(/:(\d+)\s/);
|
|
111
|
+
const pidMatch = line.match(/pid=(\d+)/);
|
|
112
|
+
if (portMatch && pidMatch) {
|
|
113
|
+
const port = portMatch[1];
|
|
114
|
+
const pid = parseInt(pidMatch[1], 10);
|
|
115
|
+
if (newCache.has(pid)) {
|
|
116
|
+
newCache.get(pid).push(port);
|
|
117
|
+
} else {
|
|
118
|
+
newCache.set(pid, [port]);
|
|
53
119
|
}
|
|
54
120
|
}
|
|
55
121
|
}
|
|
56
|
-
|
|
122
|
+
|
|
123
|
+
if (newCache.size > 0) {
|
|
124
|
+
portCache = newCache;
|
|
125
|
+
portCacheTime = Date.now();
|
|
126
|
+
}
|
|
57
127
|
resolve(portCache);
|
|
58
128
|
});
|
|
59
129
|
});
|
|
60
130
|
}
|
|
61
131
|
|
|
132
|
+
let portRefreshing = false;
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Get fast disk stats using native addon or fallback to systeminformation
|
|
136
|
+
*/
|
|
137
|
+
async function getFastDisk() {
|
|
138
|
+
if (nativeMetrics) {
|
|
139
|
+
try {
|
|
140
|
+
const disks = nativeMetrics.getDiskStats();
|
|
141
|
+
return disks.map(d => ({
|
|
142
|
+
fs: d.fs,
|
|
143
|
+
mount: d.mount,
|
|
144
|
+
type: 'disk',
|
|
145
|
+
size: d.total,
|
|
146
|
+
used: d.used,
|
|
147
|
+
available: d.available,
|
|
148
|
+
usePercent: d.total > 0 ? (d.used / d.total) * 100 : 0
|
|
149
|
+
}));
|
|
150
|
+
} catch (e) {
|
|
151
|
+
// Fall through
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
const disk = await si.fsSize();
|
|
155
|
+
return disk
|
|
156
|
+
.filter(d => {
|
|
157
|
+
if (!d.size || isNaN(d.size) || d.size <= 0) return false;
|
|
158
|
+
if (d.mount.startsWith('/System/Volumes/')) return false;
|
|
159
|
+
if (d.mount.includes('/private/var/folders/')) return false;
|
|
160
|
+
if (d.size < 1024 * 1024 * 1024) return false;
|
|
161
|
+
return true;
|
|
162
|
+
})
|
|
163
|
+
.map(d => ({
|
|
164
|
+
fs: d.fs,
|
|
165
|
+
mount: d.mount,
|
|
166
|
+
type: d.type,
|
|
167
|
+
size: d.size,
|
|
168
|
+
used: d.used,
|
|
169
|
+
available: d.available,
|
|
170
|
+
usePercent: d.use
|
|
171
|
+
}));
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/**
|
|
175
|
+
* Get fast network stats using native addon or fallback to systeminformation
|
|
176
|
+
*/
|
|
177
|
+
let prevNetStats = null;
|
|
178
|
+
let prevNetTime = 0;
|
|
179
|
+
|
|
180
|
+
async function getFastNetwork() {
|
|
181
|
+
if (nativeMetrics) {
|
|
182
|
+
try {
|
|
183
|
+
const nets = nativeMetrics.getNetStats();
|
|
184
|
+
const now = Date.now();
|
|
185
|
+
const elapsed = prevNetTime > 0 ? (now - prevNetTime) / 1000 : 1;
|
|
186
|
+
|
|
187
|
+
const result = nets.map(n => {
|
|
188
|
+
const prev = prevNetStats?.find(p => p.iface === n.iface);
|
|
189
|
+
const rx_sec = prev ? Math.max(0, (n.rxBytes - prev.rxBytes) / elapsed) : 0;
|
|
190
|
+
const tx_sec = prev ? Math.max(0, (n.txBytes - prev.txBytes) / elapsed) : 0;
|
|
191
|
+
return {
|
|
192
|
+
iface: n.iface,
|
|
193
|
+
rx_bytes: n.rxBytes,
|
|
194
|
+
tx_bytes: n.txBytes,
|
|
195
|
+
rx_sec: Math.round(rx_sec),
|
|
196
|
+
tx_sec: Math.round(tx_sec)
|
|
197
|
+
};
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
prevNetStats = nets;
|
|
201
|
+
prevNetTime = now;
|
|
202
|
+
return result;
|
|
203
|
+
} catch (e) {
|
|
204
|
+
// Fall through
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
const networkStats = await si.networkStats();
|
|
208
|
+
return networkStats.map(n => ({
|
|
209
|
+
iface: n.iface,
|
|
210
|
+
rx_bytes: n.rx_bytes,
|
|
211
|
+
tx_bytes: n.tx_bytes,
|
|
212
|
+
rx_sec: n.rx_sec,
|
|
213
|
+
tx_sec: n.tx_sec
|
|
214
|
+
}));
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Get process stats using systeminformation
|
|
219
|
+
* Native addon has permission issues on macOS, so we use si.processes() which handles this correctly
|
|
220
|
+
*/
|
|
221
|
+
async function getFastProcesses() {
|
|
222
|
+
const ports = await getPortInfo();
|
|
223
|
+
|
|
224
|
+
const processes = await si.processes();
|
|
225
|
+
return {
|
|
226
|
+
all: processes.all,
|
|
227
|
+
running: processes.running,
|
|
228
|
+
blocked: processes.blocked,
|
|
229
|
+
sleeping: processes.sleeping,
|
|
230
|
+
list: processes.list
|
|
231
|
+
.sort((a, b) => b.cpu - a.cpu)
|
|
232
|
+
.slice(0, 50)
|
|
233
|
+
.map(p => ({
|
|
234
|
+
pid: p.pid,
|
|
235
|
+
name: p.name,
|
|
236
|
+
cpu: p.cpu,
|
|
237
|
+
mem: p.mem,
|
|
238
|
+
state: p.state,
|
|
239
|
+
user: p.user,
|
|
240
|
+
ports: ports.get(p.pid) || []
|
|
241
|
+
}))
|
|
242
|
+
};
|
|
243
|
+
}
|
|
244
|
+
|
|
62
245
|
/**
|
|
63
246
|
* Collect all system metrics
|
|
247
|
+
* Uses native Zig addon when available for maximum performance
|
|
64
248
|
* @returns {Promise<Object>} System metrics
|
|
65
249
|
*/
|
|
66
250
|
async function collectMetrics() {
|
|
@@ -69,18 +253,18 @@ async function collectMetrics() {
|
|
|
69
253
|
cpuLoad,
|
|
70
254
|
mem,
|
|
71
255
|
disk,
|
|
72
|
-
|
|
256
|
+
network,
|
|
73
257
|
processes,
|
|
74
258
|
temp,
|
|
75
259
|
system,
|
|
76
260
|
osInfo
|
|
77
261
|
] = await Promise.all([
|
|
78
262
|
si.cpu(),
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
263
|
+
getFastCpuLoad(),
|
|
264
|
+
getFastMemory(),
|
|
265
|
+
getFastDisk(),
|
|
266
|
+
getFastNetwork(),
|
|
267
|
+
getFastProcesses(),
|
|
84
268
|
si.cpuTemperature().catch(() => ({ main: null, cores: [] })),
|
|
85
269
|
si.system(),
|
|
86
270
|
si.osInfo()
|
|
@@ -94,8 +278,8 @@ async function collectMetrics() {
|
|
|
94
278
|
cores: cpu.cores,
|
|
95
279
|
physicalCores: cpu.physicalCores,
|
|
96
280
|
speed: cpu.speed,
|
|
97
|
-
load: cpuLoad.
|
|
98
|
-
loadPerCore: cpuLoad.
|
|
281
|
+
load: cpuLoad.load,
|
|
282
|
+
loadPerCore: cpuLoad.loadPerCore
|
|
99
283
|
},
|
|
100
284
|
memory: {
|
|
101
285
|
total: mem.total,
|
|
@@ -103,59 +287,12 @@ async function collectMetrics() {
|
|
|
103
287
|
free: mem.free,
|
|
104
288
|
active: mem.active,
|
|
105
289
|
available: mem.available,
|
|
106
|
-
swapTotal: mem.
|
|
107
|
-
swapUsed: mem.
|
|
290
|
+
swapTotal: mem.swapTotal || 0,
|
|
291
|
+
swapUsed: mem.swapUsed || 0
|
|
108
292
|
},
|
|
109
|
-
disk
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
// Skip if size is invalid
|
|
113
|
-
if (!d.size || isNaN(d.size) || d.size <= 0) return false;
|
|
114
|
-
// Skip macOS system volumes
|
|
115
|
-
if (d.mount.startsWith('/System/Volumes/')) return false;
|
|
116
|
-
// Skip snapshot/private volumes
|
|
117
|
-
if (d.mount.includes('/private/var/folders/')) return false;
|
|
118
|
-
// Skip tiny volumes (< 1GB)
|
|
119
|
-
if (d.size < 1024 * 1024 * 1024) return false;
|
|
120
|
-
return true;
|
|
121
|
-
})
|
|
122
|
-
.map(d => ({
|
|
123
|
-
fs: d.fs,
|
|
124
|
-
mount: d.mount,
|
|
125
|
-
type: d.type,
|
|
126
|
-
size: d.size,
|
|
127
|
-
used: d.used,
|
|
128
|
-
available: d.available,
|
|
129
|
-
usePercent: d.use
|
|
130
|
-
})),
|
|
131
|
-
network: networkStats.map(n => ({
|
|
132
|
-
iface: n.iface,
|
|
133
|
-
rx_bytes: n.rx_bytes,
|
|
134
|
-
tx_bytes: n.tx_bytes,
|
|
135
|
-
rx_sec: n.rx_sec,
|
|
136
|
-
tx_sec: n.tx_sec
|
|
137
|
-
})),
|
|
138
|
-
processes: await (async () => {
|
|
139
|
-
const ports = await getPortInfo();
|
|
140
|
-
return {
|
|
141
|
-
all: processes.all,
|
|
142
|
-
running: processes.running,
|
|
143
|
-
blocked: processes.blocked,
|
|
144
|
-
sleeping: processes.sleeping,
|
|
145
|
-
list: processes.list
|
|
146
|
-
.sort((a, b) => b.cpu - a.cpu)
|
|
147
|
-
.slice(0, 50)
|
|
148
|
-
.map(p => ({
|
|
149
|
-
pid: p.pid,
|
|
150
|
-
name: p.name,
|
|
151
|
-
cpu: p.cpu,
|
|
152
|
-
mem: p.mem,
|
|
153
|
-
state: p.state,
|
|
154
|
-
user: p.user,
|
|
155
|
-
ports: ports.get(p.pid) || []
|
|
156
|
-
}))
|
|
157
|
-
};
|
|
158
|
-
})(),
|
|
293
|
+
disk,
|
|
294
|
+
network,
|
|
295
|
+
processes,
|
|
159
296
|
temperature: {
|
|
160
297
|
main: temp.main,
|
|
161
298
|
cores: temp.cores
|
|
@@ -174,29 +311,173 @@ async function collectMetrics() {
|
|
|
174
311
|
};
|
|
175
312
|
}
|
|
176
313
|
|
|
314
|
+
// Fast CPU load reading - stores previous values for delta calculation
|
|
315
|
+
let prevCpuTimes = null;
|
|
316
|
+
let prevCoresTimes = null;
|
|
317
|
+
|
|
318
|
+
/**
|
|
319
|
+
* Fast CPU load using native termweb SDK or /proc/stat fallback
|
|
320
|
+
*/
|
|
321
|
+
async function getFastCpuLoad() {
|
|
322
|
+
// Use native termweb SDK if available (fastest - direct Mach/proc calls)
|
|
323
|
+
if (nativeMetrics) {
|
|
324
|
+
try {
|
|
325
|
+
const stats = nativeMetrics.getCpuStats();
|
|
326
|
+
const cores = nativeMetrics.getCoreStats();
|
|
327
|
+
|
|
328
|
+
// Calculate load from delta (need previous sample)
|
|
329
|
+
const total = stats.user + stats.nice + stats.system + stats.idle + stats.iowait;
|
|
330
|
+
const busy = stats.user + stats.nice + stats.system;
|
|
331
|
+
|
|
332
|
+
let load = 0;
|
|
333
|
+
if (prevCpuTimes) {
|
|
334
|
+
const deltaTotal = total - prevCpuTimes.total;
|
|
335
|
+
const deltaBusy = busy - prevCpuTimes.busy;
|
|
336
|
+
load = deltaTotal > 0 ? (deltaBusy / deltaTotal) * 100 : 0;
|
|
337
|
+
}
|
|
338
|
+
prevCpuTimes = { total, busy };
|
|
339
|
+
|
|
340
|
+
// Per-core loads
|
|
341
|
+
const loadPerCore = cores.map((c, i) => {
|
|
342
|
+
const coreTotal = c.user + c.nice + c.system + c.idle + c.iowait;
|
|
343
|
+
const coreBusy = c.user + c.nice + c.system;
|
|
344
|
+
let coreLoad = 0;
|
|
345
|
+
if (prevCoresTimes && prevCoresTimes[i]) {
|
|
346
|
+
const prev = prevCoresTimes[i];
|
|
347
|
+
const deltaTotal = coreTotal - prev.total;
|
|
348
|
+
const deltaBusy = coreBusy - prev.busy;
|
|
349
|
+
coreLoad = deltaTotal > 0 ? (deltaBusy / deltaTotal) * 100 : 0;
|
|
350
|
+
}
|
|
351
|
+
return { total: coreTotal, busy: coreBusy, load: coreLoad };
|
|
352
|
+
});
|
|
353
|
+
prevCoresTimes = loadPerCore.map(c => ({ total: c.total, busy: c.busy }));
|
|
354
|
+
|
|
355
|
+
return { load, loadPerCore: loadPerCore.map(c => c.load) };
|
|
356
|
+
} catch (e) {
|
|
357
|
+
// Fall through to other methods
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
// Linux fallback: read /proc/stat directly
|
|
362
|
+
if (process.platform === 'linux') {
|
|
363
|
+
const fs = require('fs');
|
|
364
|
+
try {
|
|
365
|
+
const stat = fs.readFileSync('/proc/stat', 'utf8');
|
|
366
|
+
const lines = stat.split('\n');
|
|
367
|
+
const cpus = [];
|
|
368
|
+
let totalLoad = 0;
|
|
369
|
+
|
|
370
|
+
for (const line of lines) {
|
|
371
|
+
if (line.startsWith('cpu')) {
|
|
372
|
+
const parts = line.split(/\s+/);
|
|
373
|
+
const name = parts[0];
|
|
374
|
+
const times = parts.slice(1, 8).map(Number);
|
|
375
|
+
const [user, nice, system, idle, iowait, irq, softirq] = times;
|
|
376
|
+
const total = user + nice + system + idle + iowait + irq + softirq;
|
|
377
|
+
const busy = user + nice + system + irq + softirq;
|
|
378
|
+
|
|
379
|
+
if (prevCpuTimes && prevCpuTimes[name]) {
|
|
380
|
+
const prev = prevCpuTimes[name];
|
|
381
|
+
const deltaTotal = total - prev.total;
|
|
382
|
+
const deltaBusy = busy - prev.busy;
|
|
383
|
+
const load = deltaTotal > 0 ? (deltaBusy / deltaTotal) * 100 : 0;
|
|
384
|
+
|
|
385
|
+
if (name === 'cpu') {
|
|
386
|
+
totalLoad = load;
|
|
387
|
+
} else {
|
|
388
|
+
cpus.push(load);
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
if (!prevCpuTimes) prevCpuTimes = {};
|
|
393
|
+
prevCpuTimes[name] = { total, busy };
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
if (cpus.length > 0) {
|
|
398
|
+
return { load: totalLoad, loadPerCore: cpus };
|
|
399
|
+
}
|
|
400
|
+
} catch (e) {}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// Fallback to systeminformation
|
|
404
|
+
const cpuLoad = await si.currentLoad();
|
|
405
|
+
return { load: cpuLoad.currentLoad, loadPerCore: cpuLoad.cpus.map(c => c.load) };
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
/**
|
|
409
|
+
* Fast memory reading using native termweb SDK or /proc/meminfo fallback
|
|
410
|
+
*/
|
|
411
|
+
async function getFastMemory() {
|
|
412
|
+
// Use native termweb SDK if available (fastest - direct Mach/proc calls)
|
|
413
|
+
if (nativeMetrics) {
|
|
414
|
+
try {
|
|
415
|
+
const stats = nativeMetrics.getMemStats();
|
|
416
|
+
return {
|
|
417
|
+
total: stats.total,
|
|
418
|
+
free: stats.free,
|
|
419
|
+
used: stats.used,
|
|
420
|
+
active: stats.used, // Use 'used' as approximation for 'active'
|
|
421
|
+
available: stats.available,
|
|
422
|
+
swapTotal: stats.swapTotal,
|
|
423
|
+
swapUsed: stats.swapUsed
|
|
424
|
+
};
|
|
425
|
+
} catch (e) {
|
|
426
|
+
// Fall through to other methods
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// Linux fallback: read /proc/meminfo directly
|
|
431
|
+
if (process.platform === 'linux') {
|
|
432
|
+
const fs = require('fs');
|
|
433
|
+
try {
|
|
434
|
+
const meminfo = fs.readFileSync('/proc/meminfo', 'utf8');
|
|
435
|
+
const values = {};
|
|
436
|
+
for (const line of meminfo.split('\n')) {
|
|
437
|
+
const match = line.match(/^(\w+):\s+(\d+)/);
|
|
438
|
+
if (match) {
|
|
439
|
+
values[match[1]] = parseInt(match[2], 10) * 1024; // Convert KB to bytes
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
return {
|
|
443
|
+
total: values.MemTotal || 0,
|
|
444
|
+
free: values.MemFree || 0,
|
|
445
|
+
used: (values.MemTotal || 0) - (values.MemAvailable || values.MemFree || 0),
|
|
446
|
+
active: values.Active || 0,
|
|
447
|
+
available: values.MemAvailable || values.MemFree || 0
|
|
448
|
+
};
|
|
449
|
+
} catch (e) {}
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
// Fallback to systeminformation
|
|
453
|
+
const mem = await si.mem();
|
|
454
|
+
return { total: mem.total, free: mem.free, used: mem.used, active: mem.active, available: mem.available };
|
|
455
|
+
}
|
|
456
|
+
|
|
177
457
|
/**
|
|
178
458
|
* Collect lightweight metrics for frequent updates
|
|
459
|
+
* Uses native addon when available for maximum performance
|
|
179
460
|
* @returns {Promise<Object>} Lightweight metrics
|
|
180
461
|
*/
|
|
181
462
|
async function collectLightMetrics() {
|
|
182
|
-
const [
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
463
|
+
const [cpu, mem, network] = await Promise.all([
|
|
464
|
+
getFastCpuLoad(),
|
|
465
|
+
getFastMemory(),
|
|
466
|
+
getFastNetwork()
|
|
186
467
|
]);
|
|
187
468
|
|
|
188
469
|
return {
|
|
189
470
|
timestamp: Date.now(),
|
|
190
471
|
cpu: {
|
|
191
|
-
load:
|
|
192
|
-
loadPerCore:
|
|
472
|
+
load: cpu.load,
|
|
473
|
+
loadPerCore: cpu.loadPerCore
|
|
193
474
|
},
|
|
194
475
|
memory: {
|
|
195
476
|
used: mem.used,
|
|
196
477
|
free: mem.free,
|
|
197
478
|
active: mem.active
|
|
198
479
|
},
|
|
199
|
-
network:
|
|
480
|
+
network: network.map(n => ({
|
|
200
481
|
iface: n.iface,
|
|
201
482
|
rx_sec: n.rx_sec,
|
|
202
483
|
tx_sec: n.tx_sec
|
|
@@ -283,6 +564,16 @@ function killProcess(pid) {
|
|
|
283
564
|
}
|
|
284
565
|
}
|
|
285
566
|
|
|
567
|
+
function deleteFolder(folderPath) {
|
|
568
|
+
try {
|
|
569
|
+
const fs = require('fs');
|
|
570
|
+
fs.rmSync(folderPath, { recursive: true, force: true });
|
|
571
|
+
return true;
|
|
572
|
+
} catch (e) {
|
|
573
|
+
return false;
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
|
|
286
577
|
// Cache for connections
|
|
287
578
|
let connectionsCache = [];
|
|
288
579
|
let connectionsCacheTime = 0;
|
|
@@ -301,9 +592,15 @@ const dnsCache = new Map();
|
|
|
301
592
|
const DNS_CACHE_TTL = 300000; // 5 minutes
|
|
302
593
|
|
|
303
594
|
/**
|
|
304
|
-
* Get per-process network bytes using nettop (macOS)
|
|
595
|
+
* Get per-process network bytes using nettop (macOS only)
|
|
596
|
+
* On Linux, returns empty map (falls back to proportional distribution)
|
|
305
597
|
*/
|
|
306
598
|
async function getProcessBytes() {
|
|
599
|
+
// nettop is macOS only
|
|
600
|
+
if (process.platform !== 'darwin') {
|
|
601
|
+
return new Map();
|
|
602
|
+
}
|
|
603
|
+
|
|
307
604
|
const now = Date.now();
|
|
308
605
|
if (now - processBytesTime < 2000 && processBytes.size > 0) {
|
|
309
606
|
return processBytes;
|
|
@@ -397,125 +694,134 @@ async function getConnections() {
|
|
|
397
694
|
return connectionsCache;
|
|
398
695
|
}
|
|
399
696
|
|
|
400
|
-
//
|
|
401
|
-
const
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
}
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
const pidConnections = new Map(); // pid -> array of { host, port }
|
|
417
|
-
const lines = output.split('\n');
|
|
697
|
+
// Use netstat (fast, works on Linux and macOS) instead of lsof (slow)
|
|
698
|
+
const isLinux = process.platform === 'linux';
|
|
699
|
+
const netstatCmd = isLinux
|
|
700
|
+
? 'ss -tn state established 2>/dev/null || netstat -tn 2>/dev/null | grep ESTABLISHED'
|
|
701
|
+
: 'netstat -an 2>/dev/null | grep ESTABLISHED';
|
|
702
|
+
|
|
703
|
+
const [procBytes, netstatOutput] = await Promise.all([
|
|
704
|
+
getProcessBytes(),
|
|
705
|
+
new Promise((res) => {
|
|
706
|
+
exec(netstatCmd, {
|
|
707
|
+
encoding: 'utf-8',
|
|
708
|
+
timeout: 2000,
|
|
709
|
+
maxBuffer: 512 * 1024
|
|
710
|
+
}, (err, output) => res(err ? '' : output));
|
|
711
|
+
})
|
|
712
|
+
]);
|
|
418
713
|
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
const parts = line.trim().split(/\s+/);
|
|
423
|
-
if (parts.length < 9) continue;
|
|
714
|
+
if (!netstatOutput) {
|
|
715
|
+
return connectionsCache;
|
|
716
|
+
}
|
|
424
717
|
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
718
|
+
const hostMap = new Map();
|
|
719
|
+
const lines = netstatOutput.split('\n');
|
|
720
|
+
|
|
721
|
+
// Parse netstat output for established connections
|
|
722
|
+
for (const line of lines) {
|
|
723
|
+
const parts = line.trim().split(/\s+/);
|
|
724
|
+
if (parts.length < 4) continue;
|
|
725
|
+
|
|
726
|
+
let remoteAddr;
|
|
727
|
+
if (isLinux) {
|
|
728
|
+
// Linux ss: State Recv-Q Send-Q Local:Port Peer:Port
|
|
729
|
+
// Linux netstat: Proto Recv-Q Send-Q Local Addr Foreign Addr State
|
|
730
|
+
remoteAddr = parts[4] || parts[3];
|
|
731
|
+
} else {
|
|
732
|
+
// macOS netstat: Proto Recv-Q Send-Q Local Addr Foreign Addr (state)
|
|
733
|
+
// tcp4 0 0 192.168.2.46.53397 160.79.104.10.443 ESTABLISHED
|
|
734
|
+
remoteAddr = parts[4];
|
|
735
|
+
}
|
|
428
736
|
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
737
|
+
if (!remoteAddr) continue;
|
|
738
|
+
|
|
739
|
+
// Parse remote address - handle both IP.port and IP:port formats
|
|
740
|
+
let remoteHost, remotePort;
|
|
741
|
+
|
|
742
|
+
// IPv6 with brackets: [::1]:443
|
|
743
|
+
const ipv6Match = remoteAddr.match(/^\[([^\]]+)\][.:](\d+)$/);
|
|
744
|
+
if (ipv6Match) {
|
|
745
|
+
remoteHost = ipv6Match[1];
|
|
746
|
+
remotePort = ipv6Match[2];
|
|
747
|
+
} else {
|
|
748
|
+
// macOS uses IP.port, Linux uses IP:port
|
|
749
|
+
const lastDot = remoteAddr.lastIndexOf('.');
|
|
750
|
+
const lastColon = remoteAddr.lastIndexOf(':');
|
|
751
|
+
const sep = lastColon > lastDot ? lastColon : lastDot;
|
|
752
|
+
if (sep > 0) {
|
|
753
|
+
remoteHost = remoteAddr.substring(0, sep);
|
|
754
|
+
remotePort = remoteAddr.substring(sep + 1);
|
|
755
|
+
}
|
|
756
|
+
}
|
|
434
757
|
|
|
435
|
-
|
|
436
|
-
|
|
758
|
+
if (remoteHost && remotePort) {
|
|
759
|
+
// Skip localhost and link-local
|
|
760
|
+
if (remoteHost === '127.0.0.1' || remoteHost === '::1' || remoteHost === 'localhost') continue;
|
|
761
|
+
if (remoteHost.startsWith('fe80:') || remoteHost.startsWith('::ffff:127.')) continue;
|
|
437
762
|
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
hostMap.set(key, { host: remoteHost, bytes: 0, count: 0, ports: new Set(), processes: new Set() });
|
|
442
|
-
}
|
|
443
|
-
const entry = hostMap.get(key);
|
|
444
|
-
entry.count++;
|
|
445
|
-
entry.ports.add(remotePort);
|
|
446
|
-
entry.processes.add(process);
|
|
447
|
-
|
|
448
|
-
// Track per-pid connections
|
|
449
|
-
if (!pidConnections.has(pid)) {
|
|
450
|
-
pidConnections.set(pid, []);
|
|
451
|
-
}
|
|
452
|
-
pidConnections.get(pid).push(remoteHost);
|
|
453
|
-
}
|
|
763
|
+
const key = remoteHost;
|
|
764
|
+
if (!hostMap.has(key)) {
|
|
765
|
+
hostMap.set(key, { host: remoteHost, bytes: 0, count: 0, ports: new Set(), processes: new Set() });
|
|
454
766
|
}
|
|
767
|
+
const entry = hostMap.get(key);
|
|
768
|
+
entry.count++;
|
|
769
|
+
entry.ports.add(remotePort);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
455
772
|
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
}
|
|
773
|
+
// Note: netstat doesn't give us PID, so we can't map bytes to hosts accurately
|
|
774
|
+
// Just distribute total bytes proportionally by connection count
|
|
775
|
+
const totalBytes = Array.from(procBytes.values()).reduce((sum, p) => sum + p.total, 0);
|
|
776
|
+
const totalConns = Array.from(hostMap.values()).reduce((sum, h) => sum + h.count, 0);
|
|
777
|
+
if (totalConns > 0 && totalBytes > 0) {
|
|
778
|
+
for (const entry of hostMap.values()) {
|
|
779
|
+
entry.bytes = Math.floor((totalBytes * entry.count) / totalConns);
|
|
780
|
+
}
|
|
781
|
+
}
|
|
466
782
|
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
const entry = hostMap.get(host);
|
|
472
|
-
if (entry) {
|
|
473
|
-
entry.bytes += share;
|
|
474
|
-
}
|
|
475
|
-
}
|
|
476
|
-
}
|
|
783
|
+
// Add current sample to history
|
|
784
|
+
const currentSample = new Map();
|
|
785
|
+
hostMap.forEach((v, k) => currentSample.set(k, { bytes: v.bytes, count: v.count }));
|
|
786
|
+
connectionHistory.push({ time: now, hosts: currentSample });
|
|
477
787
|
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
788
|
+
// Remove old samples (older than 1 minute)
|
|
789
|
+
while (connectionHistory.length > 0 && now - connectionHistory[0].time > CONNECTION_HISTORY_TTL) {
|
|
790
|
+
connectionHistory.shift();
|
|
791
|
+
}
|
|
482
792
|
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
793
|
+
// Aggregate bytes over last 1 minute
|
|
794
|
+
const bytesMap = new Map();
|
|
795
|
+
for (const sample of connectionHistory) {
|
|
796
|
+
sample.hosts.forEach((data, ip) => {
|
|
797
|
+
const current = bytesMap.get(ip) || 0;
|
|
798
|
+
bytesMap.set(ip, Math.max(current, data.bytes));
|
|
799
|
+
});
|
|
800
|
+
}
|
|
487
801
|
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
802
|
+
// Build results - use cached hostname or IP
|
|
803
|
+
const results = Array.from(hostMap.values())
|
|
804
|
+
.map(h => ({
|
|
805
|
+
host: h.host,
|
|
806
|
+
hostname: dnsCache.get(h.host)?.hostname || h.host,
|
|
807
|
+
bytes: bytesMap.get(h.host) || h.bytes,
|
|
808
|
+
count: h.count,
|
|
809
|
+
ports: Array.from(h.ports).slice(0, 5),
|
|
810
|
+
processes: Array.from(h.processes).slice(0, 5)
|
|
811
|
+
}))
|
|
812
|
+
.sort((a, b) => b.bytes - a.bytes)
|
|
813
|
+
.slice(0, 20);
|
|
496
814
|
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
ports: Array.from(h.ports).slice(0, 5),
|
|
504
|
-
processes: Array.from(h.processes).slice(0, 5)
|
|
505
|
-
}))
|
|
506
|
-
.sort((a, b) => b.bytes - a.bytes)
|
|
507
|
-
.slice(0, 20);
|
|
508
|
-
|
|
509
|
-
// Resolve hostnames in parallel (cached)
|
|
510
|
-
await Promise.all(results.map(async (r) => {
|
|
511
|
-
r.hostname = await reverseDns(r.host);
|
|
512
|
-
}));
|
|
815
|
+
// Start DNS lookups in background (non-blocking)
|
|
816
|
+
for (const r of results) {
|
|
817
|
+
if (!dnsCache.has(r.host)) {
|
|
818
|
+
reverseDns(r.host);
|
|
819
|
+
}
|
|
820
|
+
}
|
|
513
821
|
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
});
|
|
518
|
-
});
|
|
822
|
+
connectionsCache = results;
|
|
823
|
+
connectionsCacheTime = now;
|
|
824
|
+
return connectionsCache;
|
|
519
825
|
}
|
|
520
826
|
|
|
521
827
|
// Cache for folder sizes (progressive scanning)
|
|
@@ -576,44 +882,51 @@ async function getFolderSizes(dir, onUpdate) {
|
|
|
576
882
|
folderSizeCache.set(dir, { items: [...items], scanning: true, lastUpdate: Date.now() });
|
|
577
883
|
resolve(items);
|
|
578
884
|
|
|
579
|
-
// Step 2: Scan actual sizes in
|
|
885
|
+
// Step 2: Scan actual sizes in parallel (multiple batches concurrently)
|
|
580
886
|
const batchSize = 5;
|
|
887
|
+
const maxParallel = 4; // Run up to 4 du commands in parallel
|
|
888
|
+
const batches = [];
|
|
581
889
|
for (let i = 0; i < items.length; i += batchSize) {
|
|
582
|
-
|
|
583
|
-
|
|
890
|
+
batches.push(items.slice(i, i + batchSize));
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
// Process batches in parallel groups
|
|
894
|
+
for (let g = 0; g < batches.length; g += maxParallel) {
|
|
895
|
+
const parallelBatches = batches.slice(g, g + maxParallel);
|
|
584
896
|
|
|
585
|
-
|
|
586
|
-
const
|
|
897
|
+
await Promise.all(parallelBatches.map(batch => {
|
|
898
|
+
const paths = batch.map(item => `"${item.path.replace(/"/g, '\\"')}"`).join(' ');
|
|
899
|
+
return new Promise((res) => {
|
|
587
900
|
exec(`du -sk ${paths} 2>/dev/null || true`, {
|
|
588
901
|
encoding: 'utf-8',
|
|
589
902
|
timeout: 15000
|
|
590
|
-
}, (err, out) =>
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
903
|
+
}, (err, out) => {
|
|
904
|
+
if (!err && out) {
|
|
905
|
+
const sizeLines = out.trim().split('\n');
|
|
906
|
+
for (const line of sizeLines) {
|
|
907
|
+
const match = line.match(/^(\d+)\s+(.+)$/);
|
|
908
|
+
if (match) {
|
|
909
|
+
const sizeKB = parseInt(match[1], 10);
|
|
910
|
+
const path = match[2];
|
|
911
|
+
const item = items.find(it => it.path === path);
|
|
912
|
+
if (item) {
|
|
913
|
+
item.size = sizeKB * 1024;
|
|
914
|
+
item.confirmed = true;
|
|
915
|
+
}
|
|
916
|
+
}
|
|
917
|
+
}
|
|
604
918
|
}
|
|
605
|
-
|
|
606
|
-
|
|
919
|
+
res();
|
|
920
|
+
});
|
|
921
|
+
});
|
|
922
|
+
}));
|
|
607
923
|
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
924
|
+
// Update cache and notify after each parallel group
|
|
925
|
+
const sortedItems = [...items].sort((a, b) => b.size - a.size);
|
|
926
|
+
folderSizeCache.set(dir, { items: sortedItems, scanning: g + maxParallel < batches.length, lastUpdate: Date.now() });
|
|
611
927
|
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
}
|
|
615
|
-
} catch (e) {
|
|
616
|
-
// Continue with next batch
|
|
928
|
+
if (onUpdate) {
|
|
929
|
+
onUpdate(dir, sortedItems);
|
|
617
930
|
}
|
|
618
931
|
}
|
|
619
932
|
|
|
@@ -634,6 +947,7 @@ module.exports = {
|
|
|
634
947
|
getMetricsCached,
|
|
635
948
|
startBackgroundPolling,
|
|
636
949
|
killProcess,
|
|
950
|
+
deleteFolder,
|
|
637
951
|
getConnections,
|
|
638
952
|
getFolderSizes
|
|
639
953
|
};
|