termweb-dashboard 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/metrics.js ADDED
@@ -0,0 +1,639 @@
1
+ /**
2
+ * System metrics collector using systeminformation
3
+ */
4
+ const si = require('systeminformation');
5
+ const { exec } = require('child_process');
6
+ const { Worker, isMainThread, parentPort } = require('worker_threads');
7
+ const path = require('path');
8
+
9
+ // Cache for port info (expensive to fetch)
10
+ let portCache = new Map();
11
+ let portCacheTime = 0;
12
+ const PORT_CACHE_TTL = 5000; // 5 seconds
13
+
14
+ // Cache for heavy metrics (collected in background)
15
+ let metricsCache = null;
16
+ let metricsCacheTime = 0;
17
+ let metricsRefreshing = false;
18
+ const METRICS_CACHE_TTL = 500; // 500ms - return cached data quickly
19
+
20
+ /**
21
+ * Get port info for processes (cached, async)
22
+ */
23
+ async function getPortInfo() {
24
+ const now = Date.now();
25
+ if (now - portCacheTime < PORT_CACHE_TTL && portCache.size > 0) {
26
+ return portCache;
27
+ }
28
+
29
+ return new Promise((resolve) => {
30
+ exec('lsof -iTCP -sTCP:LISTEN -P -n 2>/dev/null || true', {
31
+ encoding: 'utf-8',
32
+ timeout: 2000
33
+ }, (err, output) => {
34
+ if (err) {
35
+ resolve(portCache); // Return old cache on error
36
+ return;
37
+ }
38
+
39
+ portCache = new Map();
40
+ const lines = output.split('\n').slice(1); // Skip header
41
+ for (const line of lines) {
42
+ const parts = line.split(/\s+/);
43
+ if (parts.length >= 9) {
44
+ const pid = parseInt(parts[1], 10);
45
+ const portMatch = parts[8]?.match(/:(\d+)$/);
46
+ if (pid && portMatch) {
47
+ const port = portMatch[1];
48
+ if (portCache.has(pid)) {
49
+ portCache.get(pid).push(port);
50
+ } else {
51
+ portCache.set(pid, [port]);
52
+ }
53
+ }
54
+ }
55
+ }
56
+ portCacheTime = now;
57
+ resolve(portCache);
58
+ });
59
+ });
60
+ }
61
+
62
+ /**
63
+ * Collect all system metrics
64
+ * @returns {Promise<Object>} System metrics
65
+ */
66
+ async function collectMetrics() {
67
+ const [
68
+ cpu,
69
+ cpuLoad,
70
+ mem,
71
+ disk,
72
+ networkStats,
73
+ processes,
74
+ temp,
75
+ system,
76
+ osInfo
77
+ ] = await Promise.all([
78
+ si.cpu(),
79
+ si.currentLoad(),
80
+ si.mem(),
81
+ si.fsSize(),
82
+ si.networkStats(),
83
+ si.processes(),
84
+ si.cpuTemperature().catch(() => ({ main: null, cores: [] })),
85
+ si.system(),
86
+ si.osInfo()
87
+ ]);
88
+
89
+ return {
90
+ timestamp: Date.now(),
91
+ cpu: {
92
+ manufacturer: cpu.manufacturer,
93
+ brand: cpu.brand,
94
+ cores: cpu.cores,
95
+ physicalCores: cpu.physicalCores,
96
+ speed: cpu.speed,
97
+ load: cpuLoad.currentLoad,
98
+ loadPerCore: cpuLoad.cpus.map(c => c.load)
99
+ },
100
+ memory: {
101
+ total: mem.total,
102
+ used: mem.used,
103
+ free: mem.free,
104
+ active: mem.active,
105
+ available: mem.available,
106
+ swapTotal: mem.swaptotal,
107
+ swapUsed: mem.swapused
108
+ },
109
+ disk: disk
110
+ // Filter out macOS system volumes and invalid entries
111
+ .filter(d => {
112
+ // Skip if size is invalid
113
+ if (!d.size || isNaN(d.size) || d.size <= 0) return false;
114
+ // Skip macOS system volumes
115
+ if (d.mount.startsWith('/System/Volumes/')) return false;
116
+ // Skip snapshot/private volumes
117
+ if (d.mount.includes('/private/var/folders/')) return false;
118
+ // Skip tiny volumes (< 1GB)
119
+ if (d.size < 1024 * 1024 * 1024) return false;
120
+ return true;
121
+ })
122
+ .map(d => ({
123
+ fs: d.fs,
124
+ mount: d.mount,
125
+ type: d.type,
126
+ size: d.size,
127
+ used: d.used,
128
+ available: d.available,
129
+ usePercent: d.use
130
+ })),
131
+ network: networkStats.map(n => ({
132
+ iface: n.iface,
133
+ rx_bytes: n.rx_bytes,
134
+ tx_bytes: n.tx_bytes,
135
+ rx_sec: n.rx_sec,
136
+ tx_sec: n.tx_sec
137
+ })),
138
+ processes: await (async () => {
139
+ const ports = await getPortInfo();
140
+ return {
141
+ all: processes.all,
142
+ running: processes.running,
143
+ blocked: processes.blocked,
144
+ sleeping: processes.sleeping,
145
+ list: processes.list
146
+ .sort((a, b) => b.cpu - a.cpu)
147
+ .slice(0, 50)
148
+ .map(p => ({
149
+ pid: p.pid,
150
+ name: p.name,
151
+ cpu: p.cpu,
152
+ mem: p.mem,
153
+ state: p.state,
154
+ user: p.user,
155
+ ports: ports.get(p.pid) || []
156
+ }))
157
+ };
158
+ })(),
159
+ temperature: {
160
+ main: temp.main,
161
+ cores: temp.cores
162
+ },
163
+ system: {
164
+ manufacturer: system.manufacturer,
165
+ model: system.model
166
+ },
167
+ os: {
168
+ platform: osInfo.platform,
169
+ distro: osInfo.distro,
170
+ release: osInfo.release,
171
+ hostname: osInfo.hostname,
172
+ uptime: si.time().uptime
173
+ }
174
+ };
175
+ }
176
+
177
+ /**
178
+ * Collect lightweight metrics for frequent updates
179
+ * @returns {Promise<Object>} Lightweight metrics
180
+ */
181
+ async function collectLightMetrics() {
182
+ const [cpuLoad, mem, networkStats] = await Promise.all([
183
+ si.currentLoad(),
184
+ si.mem(),
185
+ si.networkStats()
186
+ ]);
187
+
188
+ return {
189
+ timestamp: Date.now(),
190
+ cpu: {
191
+ load: cpuLoad.currentLoad,
192
+ loadPerCore: cpuLoad.cpus.map(c => c.load)
193
+ },
194
+ memory: {
195
+ used: mem.used,
196
+ free: mem.free,
197
+ active: mem.active
198
+ },
199
+ network: networkStats.map(n => ({
200
+ iface: n.iface,
201
+ rx_sec: n.rx_sec,
202
+ tx_sec: n.tx_sec
203
+ }))
204
+ };
205
+ }
206
+
207
+ /**
208
+ * Get metrics with caching - returns cached data immediately, refreshes in background
209
+ * This prevents UI lag by never blocking on slow si.processes() calls
210
+ * @returns {Promise<Object>} Cached or fresh metrics
211
+ */
212
+ async function getMetricsCached() {
213
+ const now = Date.now();
214
+
215
+ // If we have recent cache, return it immediately
216
+ if (metricsCache && (now - metricsCacheTime) < METRICS_CACHE_TTL) {
217
+ return metricsCache;
218
+ }
219
+
220
+ // If cache is stale but we're already refreshing, return stale cache
221
+ if (metricsCache && metricsRefreshing) {
222
+ return metricsCache;
223
+ }
224
+
225
+ // If no cache at all, we must wait for first fetch
226
+ if (!metricsCache) {
227
+ metricsCache = await collectMetrics();
228
+ metricsCacheTime = Date.now();
229
+ return metricsCache;
230
+ }
231
+
232
+ // Cache is stale - trigger background refresh and return stale data
233
+ metricsRefreshing = true;
234
+ collectMetrics().then(data => {
235
+ metricsCache = data;
236
+ metricsCacheTime = Date.now();
237
+ metricsRefreshing = false;
238
+ }).catch(() => {
239
+ metricsRefreshing = false;
240
+ });
241
+
242
+ return metricsCache;
243
+ }
244
+
245
+ /**
246
+ * Start background metrics polling
247
+ * Pre-fetches metrics so they're always cached and ready
248
+ * @param {number} interval - Polling interval in ms (default 2000)
249
+ */
250
+ function startBackgroundPolling(interval = 2000) {
251
+ // Initial fetch
252
+ collectMetrics().then(data => {
253
+ metricsCache = data;
254
+ metricsCacheTime = Date.now();
255
+ }).catch(() => {});
256
+
257
+ // Poll in background
258
+ setInterval(async () => {
259
+ if (!metricsRefreshing) {
260
+ metricsRefreshing = true;
261
+ try {
262
+ metricsCache = await collectMetrics();
263
+ metricsCacheTime = Date.now();
264
+ } catch (e) {
265
+ // Ignore errors, keep old cache
266
+ }
267
+ metricsRefreshing = false;
268
+ }
269
+ }, interval);
270
+ }
271
+
272
+ /**
273
+ * Kill a process by PID
274
+ * @param {number} pid - Process ID to kill
275
+ * @returns {boolean} - true if successful
276
+ */
277
+ function killProcess(pid) {
278
+ try {
279
+ process.kill(pid, 'SIGKILL');
280
+ return true;
281
+ } catch (e) {
282
+ return false;
283
+ }
284
+ }
285
+
286
+ // Cache for connections
287
+ let connectionsCache = [];
288
+ let connectionsCacheTime = 0;
289
+ const CONNECTIONS_CACHE_TTL = 2000;
290
+
291
+ // History for 1-minute aggregation: { time, hosts: Map<ip, { bytes, count }> }
292
+ const connectionHistory = [];
293
+ const CONNECTION_HISTORY_TTL = 60000; // 1 minute
294
+
295
+ // Cache for per-process bytes from nettop
296
+ let processBytes = new Map(); // pid -> { rx, tx }
297
+ let processBytesTime = 0;
298
+
299
+ // Cache for DNS reverse lookups (IP -> hostname)
300
+ const dnsCache = new Map();
301
+ const DNS_CACHE_TTL = 300000; // 5 minutes
302
+
303
+ /**
304
+ * Get per-process network bytes using nettop (macOS)
305
+ */
306
+ async function getProcessBytes() {
307
+ const now = Date.now();
308
+ if (now - processBytesTime < 2000 && processBytes.size > 0) {
309
+ return processBytes;
310
+ }
311
+
312
+ return new Promise((resolve) => {
313
+ // nettop CSV format: time,process.pid,interface,state,bytes_in,bytes_out,...
314
+ exec('nettop -P -L 1 -n -x 2>/dev/null || true', {
315
+ encoding: 'utf-8',
316
+ timeout: 3000,
317
+ maxBuffer: 1024 * 1024
318
+ }, (err, output) => {
319
+ if (err || !output) {
320
+ resolve(processBytes);
321
+ return;
322
+ }
323
+
324
+ const newMap = new Map();
325
+ const lines = output.split('\n');
326
+ for (const line of lines) {
327
+ // Skip header line
328
+ if (line.startsWith('time,')) continue;
329
+
330
+ const parts = line.split(',');
331
+ if (parts.length >= 6) {
332
+ const procPid = parts[1]; // e.g., "Google Chrome H.947"
333
+ const pidMatch = procPid.match(/\.(\d+)$/);
334
+ if (pidMatch) {
335
+ const pid = parseInt(pidMatch[1], 10);
336
+ const rx = parseInt(parts[4], 10) || 0; // bytes_in is column 4
337
+ const tx = parseInt(parts[5], 10) || 0; // bytes_out is column 5
338
+ // Accumulate if process has multiple entries
339
+ const existing = newMap.get(pid) || { rx: 0, tx: 0, total: 0 };
340
+ existing.rx += rx;
341
+ existing.tx += tx;
342
+ existing.total = existing.rx + existing.tx;
343
+ newMap.set(pid, existing);
344
+ }
345
+ }
346
+ }
347
+
348
+ if (newMap.size > 0) {
349
+ processBytes = newMap;
350
+ processBytesTime = now;
351
+ }
352
+ resolve(processBytes);
353
+ });
354
+ });
355
+ }
356
+
357
+ /**
358
+ * Reverse DNS lookup with caching
359
+ * @param {string} ip - IP address
360
+ * @returns {Promise<string>} - Hostname or original IP if lookup fails
361
+ */
362
+ async function reverseDns(ip) {
363
+ // Check cache
364
+ const cached = dnsCache.get(ip);
365
+ if (cached && Date.now() - cached.time < DNS_CACHE_TTL) {
366
+ return cached.hostname;
367
+ }
368
+
369
+ return new Promise((resolve) => {
370
+ // Use host command for reverse lookup (faster than nslookup)
371
+ exec(`host -W 1 ${ip} 2>/dev/null || true`, {
372
+ encoding: 'utf-8',
373
+ timeout: 1500
374
+ }, (err, output) => {
375
+ let hostname = ip; // Default to IP
376
+ if (!err && output) {
377
+ // Parse: "1.2.3.4.in-addr.arpa domain name pointer hostname.example.com."
378
+ const match = output.match(/pointer\s+(.+?)\.?\s*$/m);
379
+ if (match) {
380
+ hostname = match[1].replace(/\.$/, ''); // Remove trailing dot
381
+ }
382
+ }
383
+ dnsCache.set(ip, { hostname, time: Date.now() });
384
+ resolve(hostname);
385
+ });
386
+ });
387
+ }
388
+
389
+ /**
390
+ * Get active network connections grouped by remote host
391
+ * Shows actual bytes transferred (from nettop) aggregated over 1 minute
392
+ * @returns {Promise<Array>} - Array of { host, hostname, bytes, count, ports, processes }
393
+ */
394
+ async function getConnections() {
395
+ const now = Date.now();
396
+ if (now - connectionsCacheTime < CONNECTIONS_CACHE_TTL && connectionsCache.length > 0) {
397
+ return connectionsCache;
398
+ }
399
+
400
+ // Get per-process bytes first
401
+ const procBytes = await getProcessBytes();
402
+
403
+ return new Promise((resolve) => {
404
+ // Use lsof to get TCP connections with PID
405
+ exec('lsof -i -n -P 2>/dev/null | grep -E "TCP|UDP" || true', {
406
+ encoding: 'utf-8',
407
+ timeout: 3000,
408
+ maxBuffer: 1024 * 1024
409
+ }, async (err, output) => {
410
+ if (err) {
411
+ resolve(connectionsCache);
412
+ return;
413
+ }
414
+
415
+ const hostMap = new Map();
416
+ const pidConnections = new Map(); // pid -> array of { host, port }
417
+ const lines = output.split('\n');
418
+
419
+ // First pass: collect all connections
420
+ for (const line of lines) {
421
+ // Parse lsof output: COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
422
+ const parts = line.trim().split(/\s+/);
423
+ if (parts.length < 9) continue;
424
+
425
+ const process = parts[0];
426
+ const pid = parseInt(parts[1], 10);
427
+ const name = parts[parts.length - 1]; // Last column is NAME (connection info)
428
+
429
+ // Parse connection: local->remote or *:port (LISTEN)
430
+ const match = name.match(/->([^:]+):(\d+)/);
431
+ if (match) {
432
+ const remoteHost = match[1];
433
+ const remotePort = match[2];
434
+
435
+ // Skip localhost
436
+ if (remoteHost === '127.0.0.1' || remoteHost === '::1' || remoteHost === 'localhost') continue;
437
+
438
+ // Track per-host
439
+ const key = remoteHost;
440
+ if (!hostMap.has(key)) {
441
+ hostMap.set(key, { host: remoteHost, bytes: 0, count: 0, ports: new Set(), processes: new Set() });
442
+ }
443
+ const entry = hostMap.get(key);
444
+ entry.count++;
445
+ entry.ports.add(remotePort);
446
+ entry.processes.add(process);
447
+
448
+ // Track per-pid connections
449
+ if (!pidConnections.has(pid)) {
450
+ pidConnections.set(pid, []);
451
+ }
452
+ pidConnections.get(pid).push(remoteHost);
453
+ }
454
+ }
455
+
456
+ // Second pass: distribute process bytes proportionally among hosts
457
+ for (const [pid, hosts] of pidConnections) {
458
+ const pb = procBytes.get(pid);
459
+ if (!pb || pb.total === 0) continue;
460
+
461
+ // Count connections per host for this pid
462
+ const hostCounts = new Map();
463
+ for (const host of hosts) {
464
+ hostCounts.set(host, (hostCounts.get(host) || 0) + 1);
465
+ }
466
+
467
+ // Distribute bytes proportionally
468
+ const totalConns = hosts.length;
469
+ for (const [host, count] of hostCounts) {
470
+ const share = Math.floor((pb.total * count) / totalConns);
471
+ const entry = hostMap.get(host);
472
+ if (entry) {
473
+ entry.bytes += share;
474
+ }
475
+ }
476
+ }
477
+
478
+ // Add current sample to history (with bytes)
479
+ const currentSample = new Map();
480
+ hostMap.forEach((v, k) => currentSample.set(k, { bytes: v.bytes, count: v.count }));
481
+ connectionHistory.push({ time: now, hosts: currentSample });
482
+
483
+ // Remove old samples (older than 1 minute)
484
+ while (connectionHistory.length > 0 && now - connectionHistory[0].time > CONNECTION_HISTORY_TTL) {
485
+ connectionHistory.shift();
486
+ }
487
+
488
+ // Aggregate bytes over last 1 minute (use max seen, not sum, since bytes are cumulative)
489
+ const bytesMap = new Map();
490
+ for (const sample of connectionHistory) {
491
+ sample.hosts.forEach((data, ip) => {
492
+ const current = bytesMap.get(ip) || 0;
493
+ bytesMap.set(ip, Math.max(current, data.bytes));
494
+ });
495
+ }
496
+
497
+ // Merge with current hostMap data
498
+ const results = Array.from(hostMap.values())
499
+ .map(h => ({
500
+ host: h.host,
501
+ bytes: bytesMap.get(h.host) || h.bytes,
502
+ count: h.count,
503
+ ports: Array.from(h.ports).slice(0, 5),
504
+ processes: Array.from(h.processes).slice(0, 5)
505
+ }))
506
+ .sort((a, b) => b.bytes - a.bytes)
507
+ .slice(0, 20);
508
+
509
+ // Resolve hostnames in parallel (cached)
510
+ await Promise.all(results.map(async (r) => {
511
+ r.hostname = await reverseDns(r.host);
512
+ }));
513
+
514
+ connectionsCache = results;
515
+ connectionsCacheTime = now;
516
+ resolve(connectionsCache);
517
+ });
518
+ });
519
+ }
520
+
521
+ // Cache for folder sizes (progressive scanning)
522
+ const folderSizeCache = new Map(); // path -> { items, scanning, lastUpdate }
523
+
524
+ /**
525
+ * Get folder sizes for a directory (progressive scanning)
526
+ * Returns immediately with estimates, then scans in background
527
+ * @param {string} dir - Directory path
528
+ * @param {function} onUpdate - Callback when data updates (for WebSocket push)
529
+ * @returns {Promise<Array>} - Array of { name, path, size, confirmed }
530
+ */
531
+ async function getFolderSizes(dir, onUpdate) {
532
+ const safeDir = dir.replace(/"/g, '\\"');
533
+
534
+ // Check cache first
535
+ const cached = folderSizeCache.get(dir);
536
+ if (cached && Date.now() - cached.lastUpdate < 30000) {
537
+ return cached.items;
538
+ }
539
+
540
+ return new Promise((resolve) => {
541
+ // Step 1: Quick list of folders + total used space
542
+ exec(`ls -1 "${safeDir}" 2>/dev/null && df -k "${safeDir}" 2>/dev/null | tail -1`, {
543
+ encoding: 'utf-8',
544
+ timeout: 2000
545
+ }, async (err, output) => {
546
+ if (err) {
547
+ resolve([]);
548
+ return;
549
+ }
550
+
551
+ const lines = output.trim().split('\n');
552
+
553
+ // Parse df output (last line): Filesystem 1K-blocks Used Available Use% Mounted
554
+ const dfLine = lines[lines.length - 1];
555
+ const dfParts = dfLine.split(/\s+/);
556
+ const totalUsedKB = parseInt(dfParts[2], 10) || 0;
557
+
558
+ // Get folder names (all lines except df output)
559
+ const folderNames = lines.slice(0, -1).filter(n => n && !n.includes(' '));
560
+
561
+ if (folderNames.length === 0) {
562
+ resolve([]);
563
+ return;
564
+ }
565
+
566
+ // Create initial items with estimated sizes
567
+ const avgSize = Math.floor((totalUsedKB * 1024) / folderNames.length);
568
+ const items = folderNames.slice(0, 30).map(name => ({
569
+ name,
570
+ path: dir === '/' ? `/${name}` : `${dir}/${name}`,
571
+ size: avgSize,
572
+ confirmed: false
573
+ }));
574
+
575
+ // Return estimates immediately
576
+ folderSizeCache.set(dir, { items: [...items], scanning: true, lastUpdate: Date.now() });
577
+ resolve(items);
578
+
579
+ // Step 2: Scan actual sizes in background (batch of 5 at a time)
580
+ const batchSize = 5;
581
+ for (let i = 0; i < items.length; i += batchSize) {
582
+ const batch = items.slice(i, i + batchSize);
583
+ const paths = batch.map(item => `"${item.path.replace(/"/g, '\\"')}"`).join(' ');
584
+
585
+ try {
586
+ const result = await new Promise((res) => {
587
+ exec(`du -sk ${paths} 2>/dev/null || true`, {
588
+ encoding: 'utf-8',
589
+ timeout: 15000
590
+ }, (err, out) => res(err ? '' : out));
591
+ });
592
+
593
+ // Parse results and update items
594
+ const sizeLines = result.trim().split('\n');
595
+ for (const line of sizeLines) {
596
+ const match = line.match(/^(\d+)\s+(.+)$/);
597
+ if (match) {
598
+ const sizeKB = parseInt(match[1], 10);
599
+ const path = match[2];
600
+ const item = items.find(it => it.path === path);
601
+ if (item) {
602
+ item.size = sizeKB * 1024;
603
+ item.confirmed = true;
604
+ }
605
+ }
606
+ }
607
+
608
+ // Update cache and notify
609
+ const sortedItems = [...items].sort((a, b) => b.size - a.size);
610
+ folderSizeCache.set(dir, { items: sortedItems, scanning: i + batchSize < items.length, lastUpdate: Date.now() });
611
+
612
+ if (onUpdate) {
613
+ onUpdate(dir, sortedItems);
614
+ }
615
+ } catch (e) {
616
+ // Continue with next batch
617
+ }
618
+ }
619
+
620
+ // Final sort and cache update
621
+ const sortedItems = [...items].sort((a, b) => b.size - a.size);
622
+ folderSizeCache.set(dir, { items: sortedItems, scanning: false, lastUpdate: Date.now() });
623
+
624
+ if (onUpdate) {
625
+ onUpdate(dir, sortedItems);
626
+ }
627
+ });
628
+ });
629
+ }
630
+
631
+ module.exports = {
632
+ collectMetrics,
633
+ collectLightMetrics,
634
+ getMetricsCached,
635
+ startBackgroundPolling,
636
+ killProcess,
637
+ getConnections,
638
+ getFolderSizes
639
+ };