@covibes/zeroshot 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +167 -0
- package/LICENSE +21 -0
- package/README.md +364 -0
- package/cli/index.js +3990 -0
- package/cluster-templates/base-templates/debug-workflow.json +181 -0
- package/cluster-templates/base-templates/full-workflow.json +455 -0
- package/cluster-templates/base-templates/single-worker.json +48 -0
- package/cluster-templates/base-templates/worker-validator.json +131 -0
- package/cluster-templates/conductor-bootstrap.json +122 -0
- package/cluster-templates/conductor-junior-bootstrap.json +69 -0
- package/docker/zeroshot-cluster/Dockerfile +132 -0
- package/lib/completion.js +174 -0
- package/lib/id-detector.js +53 -0
- package/lib/settings.js +97 -0
- package/lib/stream-json-parser.js +236 -0
- package/package.json +121 -0
- package/src/agent/agent-config.js +121 -0
- package/src/agent/agent-context-builder.js +241 -0
- package/src/agent/agent-hook-executor.js +329 -0
- package/src/agent/agent-lifecycle.js +555 -0
- package/src/agent/agent-stuck-detector.js +256 -0
- package/src/agent/agent-task-executor.js +1034 -0
- package/src/agent/agent-trigger-evaluator.js +67 -0
- package/src/agent-wrapper.js +459 -0
- package/src/agents/git-pusher-agent.json +20 -0
- package/src/attach/attach-client.js +438 -0
- package/src/attach/attach-server.js +543 -0
- package/src/attach/index.js +35 -0
- package/src/attach/protocol.js +220 -0
- package/src/attach/ring-buffer.js +121 -0
- package/src/attach/socket-discovery.js +242 -0
- package/src/claude-task-runner.js +468 -0
- package/src/config-router.js +80 -0
- package/src/config-validator.js +598 -0
- package/src/github.js +103 -0
- package/src/isolation-manager.js +1042 -0
- package/src/ledger.js +429 -0
- package/src/logic-engine.js +223 -0
- package/src/message-bus-bridge.js +139 -0
- package/src/message-bus.js +202 -0
- package/src/name-generator.js +232 -0
- package/src/orchestrator.js +1938 -0
- package/src/schemas/sub-cluster.js +156 -0
- package/src/sub-cluster-wrapper.js +545 -0
- package/src/task-runner.js +28 -0
- package/src/template-resolver.js +347 -0
- package/src/tui/CHANGES.txt +133 -0
- package/src/tui/LAYOUT.md +261 -0
- package/src/tui/README.txt +192 -0
- package/src/tui/TWO-LEVEL-NAVIGATION.md +186 -0
- package/src/tui/data-poller.js +325 -0
- package/src/tui/demo.js +208 -0
- package/src/tui/formatters.js +123 -0
- package/src/tui/index.js +193 -0
- package/src/tui/keybindings.js +383 -0
- package/src/tui/layout.js +317 -0
- package/src/tui/renderer.js +194 -0
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DataPoller - Aggregates cluster data for TUI display
|
|
3
|
+
*
|
|
4
|
+
* Polls all data sources at appropriate intervals:
|
|
5
|
+
* - Cluster states (1s)
|
|
6
|
+
* - Resource stats via pidusage (2s)
|
|
7
|
+
* - New cluster detection (2s)
|
|
8
|
+
* - Ledger message streaming (500ms per cluster)
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
const pidusage = require('pidusage');
|
|
12
|
+
const Ledger = require('../ledger');
|
|
13
|
+
const path = require('path');
|
|
14
|
+
const os = require('os');
|
|
15
|
+
|
|
16
|
+
class DataPoller {
|
|
17
|
+
constructor(orchestrator, options = {}) {
|
|
18
|
+
this.orchestrator = orchestrator;
|
|
19
|
+
this.intervals = [];
|
|
20
|
+
this.ledgers = new Map(); // clusterId -> Ledger instance
|
|
21
|
+
this.ledgerStopFns = new Map(); // clusterId -> stop function for pollForMessages
|
|
22
|
+
this.onUpdate = options.onUpdate || (() => {}); // Callback for updates
|
|
23
|
+
this.watchForNewClustersStopFn = null;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Start all polling intervals
|
|
28
|
+
*/
|
|
29
|
+
start() {
|
|
30
|
+
// Poll cluster states (1s)
|
|
31
|
+
const clusterStateInterval = setInterval(() => {
|
|
32
|
+
this._pollClusterStates();
|
|
33
|
+
}, 1000);
|
|
34
|
+
this.intervals.push(clusterStateInterval);
|
|
35
|
+
|
|
36
|
+
// Poll resource stats (2s)
|
|
37
|
+
const resourceStatsInterval = setInterval(() => {
|
|
38
|
+
this._pollResourceStats();
|
|
39
|
+
}, 2000);
|
|
40
|
+
this.intervals.push(resourceStatsInterval);
|
|
41
|
+
|
|
42
|
+
// Watch for new clusters (2s)
|
|
43
|
+
this._watchForNewClusters();
|
|
44
|
+
|
|
45
|
+
// Defer initial polls to avoid blocking UI startup
|
|
46
|
+
// Run in background after 50ms to let UI render first
|
|
47
|
+
setTimeout(() => {
|
|
48
|
+
this._pollClusterStates();
|
|
49
|
+
}, 50);
|
|
50
|
+
|
|
51
|
+
setTimeout(() => {
|
|
52
|
+
this._pollResourceStats();
|
|
53
|
+
}, 100);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Stop all polling intervals and clean up resources
|
|
58
|
+
*/
|
|
59
|
+
stop() {
|
|
60
|
+
// Clear all intervals
|
|
61
|
+
for (const intervalId of this.intervals) {
|
|
62
|
+
clearInterval(intervalId);
|
|
63
|
+
}
|
|
64
|
+
this.intervals = [];
|
|
65
|
+
|
|
66
|
+
// Stop watching for new clusters
|
|
67
|
+
if (this.watchForNewClustersStopFn) {
|
|
68
|
+
this.watchForNewClustersStopFn();
|
|
69
|
+
this.watchForNewClustersStopFn = null;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Stop all ledger polling
|
|
73
|
+
for (const stopFn of this.ledgerStopFns.values()) {
|
|
74
|
+
stopFn();
|
|
75
|
+
}
|
|
76
|
+
this.ledgerStopFns.clear();
|
|
77
|
+
|
|
78
|
+
// Close all ledger connections
|
|
79
|
+
for (const ledger of this.ledgers.values()) {
|
|
80
|
+
try {
|
|
81
|
+
ledger.close();
|
|
82
|
+
} catch {
|
|
83
|
+
// Ignore errors during cleanup
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
this.ledgers.clear();
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Poll cluster states (1s interval)
|
|
91
|
+
* Gets all clusters and their agent states from orchestrator
|
|
92
|
+
* @private
|
|
93
|
+
*/
|
|
94
|
+
_pollClusterStates() {
|
|
95
|
+
try {
|
|
96
|
+
const clusters = this.orchestrator.listClusters();
|
|
97
|
+
|
|
98
|
+
// Get detailed status for each cluster
|
|
99
|
+
const clustersWithStatus = clusters.map((cluster) => {
|
|
100
|
+
try {
|
|
101
|
+
const status = this.orchestrator.getStatus(cluster.id);
|
|
102
|
+
// Add agentCount for stats calculation
|
|
103
|
+
return {
|
|
104
|
+
...status,
|
|
105
|
+
agentCount: status.agents ? status.agents.length : 0,
|
|
106
|
+
};
|
|
107
|
+
} catch (error) {
|
|
108
|
+
console.error(
|
|
109
|
+
`[DataPoller] Failed to get status for cluster ${cluster.id}:`,
|
|
110
|
+
error.message
|
|
111
|
+
);
|
|
112
|
+
return {
|
|
113
|
+
id: cluster.id,
|
|
114
|
+
state: 'unknown',
|
|
115
|
+
createdAt: cluster.createdAt,
|
|
116
|
+
agents: [],
|
|
117
|
+
agentCount: 0,
|
|
118
|
+
messageCount: 0,
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
this.onUpdate({
|
|
124
|
+
type: 'cluster_state',
|
|
125
|
+
clusters: clustersWithStatus,
|
|
126
|
+
});
|
|
127
|
+
} catch (error) {
|
|
128
|
+
console.error('[DataPoller] _pollClusterStates error:', error.message);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Poll resource stats (2s interval)
|
|
134
|
+
* Uses pidusage to get CPU and memory for all agent processes
|
|
135
|
+
* @private
|
|
136
|
+
*/
|
|
137
|
+
async _pollResourceStats() {
|
|
138
|
+
try {
|
|
139
|
+
const clusters = this.orchestrator.listClusters();
|
|
140
|
+
const stats = {};
|
|
141
|
+
|
|
142
|
+
// Collect all PIDs from all agents
|
|
143
|
+
const pids = [];
|
|
144
|
+
for (const cluster of clusters) {
|
|
145
|
+
try {
|
|
146
|
+
const status = this.orchestrator.getStatus(cluster.id);
|
|
147
|
+
for (const agent of status.agents || []) {
|
|
148
|
+
if (agent.pid) {
|
|
149
|
+
pids.push(agent.pid);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
} catch {
|
|
153
|
+
// Skip clusters that error
|
|
154
|
+
continue;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Get stats for all PIDs
|
|
159
|
+
if (pids.length > 0) {
|
|
160
|
+
try {
|
|
161
|
+
const pidStats = await pidusage(pids);
|
|
162
|
+
|
|
163
|
+
// Convert to map format: pid -> { cpu, memory }
|
|
164
|
+
for (const pid of pids) {
|
|
165
|
+
if (pidStats[pid]) {
|
|
166
|
+
stats[pid] = {
|
|
167
|
+
cpu: pidStats[pid].cpu || 0,
|
|
168
|
+
memory: pidStats[pid].memory || 0,
|
|
169
|
+
};
|
|
170
|
+
} else {
|
|
171
|
+
// Process died - set to zero
|
|
172
|
+
stats[pid] = { cpu: 0, memory: 0 };
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
} catch {
|
|
176
|
+
// pidusage throws if any process is dead
|
|
177
|
+
// Set all to zero and continue
|
|
178
|
+
for (const pid of pids) {
|
|
179
|
+
stats[pid] = { cpu: 0, memory: 0 };
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
this.onUpdate({
|
|
185
|
+
type: 'resource_stats',
|
|
186
|
+
stats,
|
|
187
|
+
});
|
|
188
|
+
} catch (error) {
|
|
189
|
+
console.error('[DataPoller] _pollResourceStats error:', error.message);
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
/**
|
|
194
|
+
* Watch for new clusters (2s interval)
|
|
195
|
+
* Uses orchestrator.watchForNewClusters to detect new clusters
|
|
196
|
+
* and start streaming their ledger messages
|
|
197
|
+
* @private
|
|
198
|
+
*/
|
|
199
|
+
_watchForNewClusters() {
|
|
200
|
+
this.watchForNewClustersStopFn = this.orchestrator.watchForNewClusters((cluster) => {
|
|
201
|
+
try {
|
|
202
|
+
// Lazy load ledger only when we need to stream messages
|
|
203
|
+
// This avoids loading all ledgers on startup
|
|
204
|
+
if (!this.ledgers.has(cluster.id)) {
|
|
205
|
+
const storageDir = this.orchestrator.storageDir || path.join(os.homedir(), '.zeroshot');
|
|
206
|
+
const dbPath = path.join(storageDir, `${cluster.id}.db`);
|
|
207
|
+
|
|
208
|
+
// Only load if database file exists
|
|
209
|
+
const fs = require('fs');
|
|
210
|
+
if (!fs.existsSync(dbPath)) {
|
|
211
|
+
return; // Skip non-existent ledgers
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
const ledger = new Ledger(dbPath);
|
|
215
|
+
this.ledgers.set(cluster.id, ledger);
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Start streaming messages
|
|
219
|
+
this._streamLedgerMessages(cluster.id);
|
|
220
|
+
|
|
221
|
+
// Emit update about new cluster
|
|
222
|
+
this.onUpdate({
|
|
223
|
+
type: 'new_cluster',
|
|
224
|
+
cluster,
|
|
225
|
+
});
|
|
226
|
+
} catch (error) {
|
|
227
|
+
console.error(
|
|
228
|
+
`[DataPoller] Failed to load ledger for cluster ${cluster.id}:`,
|
|
229
|
+
error.message
|
|
230
|
+
);
|
|
231
|
+
}
|
|
232
|
+
}, 2000);
|
|
233
|
+
|
|
234
|
+
// Also load ledgers for all existing clusters
|
|
235
|
+
const existingClusters = this.orchestrator.listClusters();
|
|
236
|
+
for (const cluster of existingClusters) {
|
|
237
|
+
try {
|
|
238
|
+
const storageDir = this.orchestrator.storageDir || path.join(os.homedir(), '.zeroshot');
|
|
239
|
+
const dbPath = path.join(storageDir, `${cluster.id}.db`);
|
|
240
|
+
const ledger = new Ledger(dbPath);
|
|
241
|
+
this.ledgers.set(cluster.id, ledger);
|
|
242
|
+
this._streamLedgerMessages(cluster.id);
|
|
243
|
+
} catch (error) {
|
|
244
|
+
console.error(
|
|
245
|
+
`[DataPoller] Failed to load ledger for existing cluster ${cluster.id}:`,
|
|
246
|
+
error.message
|
|
247
|
+
);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Stream ledger messages for a cluster (500ms interval)
|
|
254
|
+
* Uses ledger.pollForMessages to get new messages
|
|
255
|
+
* @param {string} clusterId - Cluster ID to stream messages from
|
|
256
|
+
* @private
|
|
257
|
+
*/
|
|
258
|
+
_streamLedgerMessages(clusterId) {
|
|
259
|
+
const ledger = this.ledgers.get(clusterId);
|
|
260
|
+
if (!ledger) {
|
|
261
|
+
console.error(`[DataPoller] No ledger found for cluster ${clusterId}`);
|
|
262
|
+
return;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// Stop existing polling if any
|
|
266
|
+
const existingStopFn = this.ledgerStopFns.get(clusterId);
|
|
267
|
+
if (existingStopFn) {
|
|
268
|
+
existingStopFn();
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Start polling for messages
|
|
272
|
+
const stopFn = ledger.pollForMessages(
|
|
273
|
+
clusterId,
|
|
274
|
+
(message) => {
|
|
275
|
+
this.onUpdate({
|
|
276
|
+
type: 'new_message',
|
|
277
|
+
clusterId,
|
|
278
|
+
message,
|
|
279
|
+
});
|
|
280
|
+
},
|
|
281
|
+
500, // Poll every 500ms
|
|
282
|
+
50 // Show last 50 messages initially
|
|
283
|
+
);
|
|
284
|
+
|
|
285
|
+
this.ledgerStopFns.set(clusterId, stopFn);
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Collect resource stats for all agent PIDs
|
|
290
|
+
* @returns {Object} Map of pid -> { cpu, memory }
|
|
291
|
+
* @private
|
|
292
|
+
*/
|
|
293
|
+
async _collectResourceStats() {
|
|
294
|
+
const stats = {};
|
|
295
|
+
const clusters = this.orchestrator.listClusters();
|
|
296
|
+
|
|
297
|
+
for (const cluster of clusters) {
|
|
298
|
+
try {
|
|
299
|
+
const status = this.orchestrator.getStatus(cluster.id);
|
|
300
|
+
|
|
301
|
+
for (const agent of status.agents || []) {
|
|
302
|
+
if (agent.pid) {
|
|
303
|
+
try {
|
|
304
|
+
const pidStat = await pidusage(agent.pid);
|
|
305
|
+
stats[agent.pid] = {
|
|
306
|
+
cpu: pidStat.cpu || 0,
|
|
307
|
+
memory: pidStat.memory || 0,
|
|
308
|
+
};
|
|
309
|
+
} catch {
|
|
310
|
+
// Process died - set to zero
|
|
311
|
+
stats[agent.pid] = { cpu: 0, memory: 0 };
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
} catch {
|
|
316
|
+
// Skip clusters that error
|
|
317
|
+
continue;
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
return stats;
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
module.exports = DataPoller;
|
package/src/tui/demo.js
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TUI Dashboard Demo
|
|
3
|
+
* Simple demonstration of the dashboard layout with mock data
|
|
4
|
+
*
|
|
5
|
+
* Run: node src/tui/demo.js
|
|
6
|
+
* Press: [q] to quit
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
const blessed = require('blessed');
|
|
10
|
+
const {
|
|
11
|
+
createLayout,
|
|
12
|
+
updateClustersTable,
|
|
13
|
+
updateAgentsTable,
|
|
14
|
+
updateStatsBox,
|
|
15
|
+
addLogEntry,
|
|
16
|
+
} = require('./layout');
|
|
17
|
+
const { formatTimestamp } = require('./formatters');
|
|
18
|
+
|
|
19
|
+
// Create main screen
|
|
20
|
+
const screen = blessed.screen({
|
|
21
|
+
mouse: true,
|
|
22
|
+
title: 'Cluster Dashboard - Demo',
|
|
23
|
+
smartCSR: true,
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
// Create layout
|
|
27
|
+
const layout = createLayout(screen);
|
|
28
|
+
|
|
29
|
+
// Mock data generators
|
|
30
|
+
const mockClusters = [
|
|
31
|
+
{
|
|
32
|
+
id: 'cluster-swift-falcon',
|
|
33
|
+
status: 'running',
|
|
34
|
+
agentCount: 5,
|
|
35
|
+
config: 'default',
|
|
36
|
+
uptime: formatTimestamp(2 * 60 * 60 * 1000 + 30 * 60 * 1000), // 2h 30m
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
id: 'cluster-bold-panther',
|
|
40
|
+
status: 'running',
|
|
41
|
+
agentCount: 3,
|
|
42
|
+
config: 'simple',
|
|
43
|
+
uptime: formatTimestamp(45 * 60 * 1000), // 45m
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
id: 'cluster-quick-eagle',
|
|
47
|
+
status: 'stopped',
|
|
48
|
+
agentCount: 0,
|
|
49
|
+
config: 'default',
|
|
50
|
+
uptime: '0s',
|
|
51
|
+
},
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
const mockAgents = [
|
|
55
|
+
{
|
|
56
|
+
clusterId: 'cluster-swift-falcon',
|
|
57
|
+
id: 'worker-1',
|
|
58
|
+
role: 'worker',
|
|
59
|
+
status: 'running',
|
|
60
|
+
iteration: 3,
|
|
61
|
+
cpu: '12.5%',
|
|
62
|
+
memory: '245 MB',
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
clusterId: 'cluster-swift-falcon',
|
|
66
|
+
id: 'validator-req',
|
|
67
|
+
role: 'validator',
|
|
68
|
+
status: 'idle',
|
|
69
|
+
iteration: 0,
|
|
70
|
+
cpu: '0.1%',
|
|
71
|
+
memory: '128 MB',
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
clusterId: 'cluster-swift-falcon',
|
|
75
|
+
id: 'validator-sec',
|
|
76
|
+
role: 'validator',
|
|
77
|
+
status: 'idle',
|
|
78
|
+
iteration: 0,
|
|
79
|
+
cpu: '0.2%',
|
|
80
|
+
memory: '135 MB',
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
clusterId: 'cluster-bold-panther',
|
|
84
|
+
id: 'worker-2',
|
|
85
|
+
role: 'worker',
|
|
86
|
+
status: 'running',
|
|
87
|
+
iteration: 1,
|
|
88
|
+
cpu: '8.3%',
|
|
89
|
+
memory: '189 MB',
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
clusterId: 'cluster-bold-panther',
|
|
93
|
+
id: 'validator-qa',
|
|
94
|
+
role: 'validator',
|
|
95
|
+
status: 'running',
|
|
96
|
+
iteration: 1,
|
|
97
|
+
cpu: '5.1%',
|
|
98
|
+
memory: '156 MB',
|
|
99
|
+
},
|
|
100
|
+
];
|
|
101
|
+
|
|
102
|
+
const mockStats = {
|
|
103
|
+
activeClusters: 2,
|
|
104
|
+
totalAgents: 5,
|
|
105
|
+
usedMemory: '853 MB',
|
|
106
|
+
totalMemory: '8 GB',
|
|
107
|
+
totalCPU: '26.2%',
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
// Keyboard shortcuts
|
|
111
|
+
screen.key(['q', 'C-c'], () => {
|
|
112
|
+
return process.exit(0);
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
screen.key(['r'], () => {
|
|
116
|
+
updateClustersTable(layout.clustersTable, mockClusters);
|
|
117
|
+
updateAgentsTable(layout.agentTable, mockAgents);
|
|
118
|
+
updateStatsBox(layout.statsBox, mockStats);
|
|
119
|
+
addLogEntry(layout.logsBox, 'Dashboard refreshed', 'info');
|
|
120
|
+
screen.render();
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
screen.key(['c'], () => {
|
|
124
|
+
addLogEntry(layout.logsBox, 'Cluster started: cluster-wandering-wolf', 'info');
|
|
125
|
+
screen.render();
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
screen.key(['k'], () => {
|
|
129
|
+
addLogEntry(layout.logsBox, 'Cluster killed: cluster-quick-eagle', 'warn');
|
|
130
|
+
screen.render();
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
screen.key(['s'], () => {
|
|
134
|
+
addLogEntry(layout.logsBox, 'Warning: High memory usage on cluster-swift-falcon', 'warn');
|
|
135
|
+
screen.render();
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
// Initialize with mock data
|
|
139
|
+
updateClustersTable(layout.clustersTable, mockClusters);
|
|
140
|
+
updateAgentsTable(layout.agentTable, mockAgents);
|
|
141
|
+
updateStatsBox(layout.statsBox, mockStats);
|
|
142
|
+
|
|
143
|
+
// Add initial log entries
|
|
144
|
+
addLogEntry(layout.logsBox, 'Dashboard initialized', 'info');
|
|
145
|
+
addLogEntry(layout.logsBox, 'Monitoring 2 active clusters', 'info');
|
|
146
|
+
addLogEntry(layout.logsBox, 'System CPU: 26.2% | Memory: 853 MB / 8 GB', 'info');
|
|
147
|
+
|
|
148
|
+
// Simulate live updates
|
|
149
|
+
const updateInterval = setInterval(() => {
|
|
150
|
+
// Update uptime for running clusters
|
|
151
|
+
mockClusters.forEach((cluster) => {
|
|
152
|
+
if (cluster.status === 'running') {
|
|
153
|
+
const uptimeMs = Math.random() * 3 * 60 * 60 * 1000; // Random uptime
|
|
154
|
+
cluster.uptime = formatTimestamp(uptimeMs);
|
|
155
|
+
}
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
// Simulate CPU/Memory changes
|
|
159
|
+
mockAgents.forEach((agent) => {
|
|
160
|
+
if (agent.status === 'running') {
|
|
161
|
+
agent.cpu = (Math.random() * 20).toFixed(1) + '%';
|
|
162
|
+
agent.memory = Math.floor(Math.random() * 200 + 100) + ' MB';
|
|
163
|
+
}
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
mockStats.totalCPU = (Math.random() * 50).toFixed(1) + '%';
|
|
167
|
+
|
|
168
|
+
updateClustersTable(layout.clustersTable, mockClusters);
|
|
169
|
+
updateAgentsTable(layout.agentTable, mockAgents);
|
|
170
|
+
updateStatsBox(layout.statsBox, mockStats);
|
|
171
|
+
|
|
172
|
+
screen.render();
|
|
173
|
+
}, 3000);
|
|
174
|
+
|
|
175
|
+
// Display help on startup
|
|
176
|
+
setTimeout(() => {
|
|
177
|
+
addLogEntry(
|
|
178
|
+
layout.logsBox,
|
|
179
|
+
'Press [r] to refresh | [c] to add cluster | [k] to kill | [s] for warning | [q] to quit',
|
|
180
|
+
'info'
|
|
181
|
+
);
|
|
182
|
+
screen.render();
|
|
183
|
+
}, 500);
|
|
184
|
+
|
|
185
|
+
// Cleanup on exit
|
|
186
|
+
process.on('exit', () => {
|
|
187
|
+
clearInterval(updateInterval);
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
// Render initial screen
|
|
191
|
+
screen.render();
|
|
192
|
+
|
|
193
|
+
console.log(
|
|
194
|
+
'\n' +
|
|
195
|
+
'===============================================\n' +
|
|
196
|
+
' Cluster Dashboard - Demo Mode\n' +
|
|
197
|
+
'===============================================\n' +
|
|
198
|
+
'Keyboard shortcuts:\n' +
|
|
199
|
+
' [↑/↓] Navigate between widgets\n' +
|
|
200
|
+
' [Tab] Next widget\n' +
|
|
201
|
+
' [Shift+Tab] Previous widget\n' +
|
|
202
|
+
' [r] Refresh data\n' +
|
|
203
|
+
' [c] Simulate cluster start\n' +
|
|
204
|
+
' [k] Simulate cluster kill\n' +
|
|
205
|
+
' [s] Simulate warning\n' +
|
|
206
|
+
' [q] Quit\n' +
|
|
207
|
+
'===============================================\n\n'
|
|
208
|
+
);
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TUI Display Formatters
|
|
3
|
+
* Converts raw values to human-readable formats for terminal display
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Convert milliseconds to human-readable uptime string
|
|
8
|
+
* @param {number} ms - Milliseconds
|
|
9
|
+
* @returns {string} Formatted uptime (e.g., "5m 23s", "2h 15m", "3d 4h")
|
|
10
|
+
*/
|
|
11
|
+
const formatTimestamp = (ms) => {
|
|
12
|
+
if (!ms || ms < 0) return '0s';
|
|
13
|
+
|
|
14
|
+
const seconds = Math.floor(ms / 1000);
|
|
15
|
+
|
|
16
|
+
if (seconds < 60) {
|
|
17
|
+
return `${seconds}s`;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const minutes = Math.floor(seconds / 60);
|
|
21
|
+
if (minutes < 60) {
|
|
22
|
+
const remainingSeconds = seconds % 60;
|
|
23
|
+
return remainingSeconds > 0 ? `${minutes}m ${remainingSeconds}s` : `${minutes}m`;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const hours = Math.floor(minutes / 60);
|
|
27
|
+
if (hours < 24) {
|
|
28
|
+
const remainingMinutes = minutes % 60;
|
|
29
|
+
return remainingMinutes > 0 ? `${hours}h ${remainingMinutes}m` : `${hours}h`;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const days = Math.floor(hours / 24);
|
|
33
|
+
const remainingHours = hours % 24;
|
|
34
|
+
return remainingHours > 0 ? `${days}d ${remainingHours}h` : `${days}d`;
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Convert bytes to human-readable size string
|
|
39
|
+
* @param {number} bytes - Number of bytes
|
|
40
|
+
* @returns {string} Formatted size (e.g., "245 MB", "1.2 GB", "512 KB")
|
|
41
|
+
*/
|
|
42
|
+
const formatBytes = (bytes) => {
|
|
43
|
+
if (!bytes || bytes < 0) return '0 B';
|
|
44
|
+
|
|
45
|
+
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
|
|
46
|
+
let size = bytes;
|
|
47
|
+
let unitIndex = 0;
|
|
48
|
+
|
|
49
|
+
while (size >= 1000 && unitIndex < units.length - 1) {
|
|
50
|
+
size /= 1000;
|
|
51
|
+
unitIndex++;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const formatted = size < 10 ? size.toFixed(1) : Math.round(size);
|
|
55
|
+
return `${formatted} ${units[unitIndex]}`;
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Format CPU percentage with consistent precision
|
|
60
|
+
* @param {number} percent - CPU percentage (0-100)
|
|
61
|
+
* @returns {string} Formatted percentage (e.g., "12.3%", "0.1%")
|
|
62
|
+
*/
|
|
63
|
+
const formatCPU = (percent) => {
|
|
64
|
+
if (typeof percent !== 'number' || percent < 0) return '0.0%';
|
|
65
|
+
if (percent > 100) percent = 100;
|
|
66
|
+
|
|
67
|
+
return `${percent.toFixed(1)}%`;
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Map cluster state to unicode icon
|
|
72
|
+
* @param {string} state - Cluster state (running, stopped, initializing, stopping, failed, killed)
|
|
73
|
+
* @returns {string} Unicode icon representing state
|
|
74
|
+
*/
|
|
75
|
+
const stateIcon = (state) => {
|
|
76
|
+
const icons = {
|
|
77
|
+
running: '●', // filled circle (green)
|
|
78
|
+
stopped: '○', // hollow circle
|
|
79
|
+
initializing: '◐', // half circle
|
|
80
|
+
stopping: '◑', // half circle other way
|
|
81
|
+
failed: '⚠', // warning
|
|
82
|
+
killed: '⚠', // warning
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
return icons[state] || '?';
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Truncate string with ellipsis if exceeds max length
|
|
90
|
+
* @param {string} str - String to truncate
|
|
91
|
+
* @param {number} maxLen - Maximum length
|
|
92
|
+
* @returns {string} Truncated string with "..." if needed
|
|
93
|
+
*/
|
|
94
|
+
const truncate = (str, maxLen) => {
|
|
95
|
+
if (!str || typeof str !== 'string') return '';
|
|
96
|
+
if (str.length <= maxLen) return str;
|
|
97
|
+
|
|
98
|
+
return str.substring(0, maxLen - 3) + '...';
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Format duration between two timestamps
|
|
103
|
+
* @param {number} startMs - Start timestamp in milliseconds
|
|
104
|
+
* @param {number} endMs - End timestamp in milliseconds (null = now)
|
|
105
|
+
* @returns {string} Formatted duration (e.g., "5m 23s", "2h 15m")
|
|
106
|
+
*/
|
|
107
|
+
const formatDuration = (startMs, endMs) => {
|
|
108
|
+
if (!startMs || startMs < 0) return '0s';
|
|
109
|
+
|
|
110
|
+
const end = endMs && endMs > 0 ? endMs : Date.now();
|
|
111
|
+
const duration = Math.max(0, end - startMs);
|
|
112
|
+
|
|
113
|
+
return formatTimestamp(duration);
|
|
114
|
+
};
|
|
115
|
+
|
|
116
|
+
module.exports = {
|
|
117
|
+
formatTimestamp,
|
|
118
|
+
formatBytes,
|
|
119
|
+
formatCPU,
|
|
120
|
+
stateIcon,
|
|
121
|
+
truncate,
|
|
122
|
+
formatDuration,
|
|
123
|
+
};
|