@gravito/zenith 1.1.2 → 1.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +95 -22
- package/README.zh-TW.md +88 -0
- package/dist/bin.js +54699 -39316
- package/dist/client/assets/index-C80c1frR.css +1 -0
- package/dist/client/assets/index-CrWem9u3.js +434 -0
- package/dist/client/index.html +2 -2
- package/dist/server/index.js +54699 -39316
- package/package.json +20 -9
- package/CHANGELOG.md +0 -47
- package/Dockerfile +0 -46
- package/Dockerfile.demo-worker +0 -29
- package/ECOSYSTEM_EXPANSION_RFC.md +0 -130
- package/bin/flux-console.ts +0 -2
- package/dist/client/assets/index-BSMp8oq_.js +0 -436
- package/dist/client/assets/index-BwxlHx-_.css +0 -1
- package/docker-compose.yml +0 -40
- package/docs/ALERTING_GUIDE.md +0 -71
- package/docs/DEPLOYMENT.md +0 -157
- package/docs/DOCS_INTERNAL.md +0 -73
- package/docs/LARAVEL_ZENITH_ROADMAP.md +0 -109
- package/docs/QUASAR_MASTER_PLAN.md +0 -140
- package/docs/QUICK_TEST_GUIDE.md +0 -72
- package/docs/ROADMAP.md +0 -85
- package/docs/integrations/LARAVEL.md +0 -207
- package/postcss.config.js +0 -6
- package/scripts/debug_redis_keys.ts +0 -24
- package/scripts/flood-logs.ts +0 -21
- package/scripts/seed.ts +0 -213
- package/scripts/verify-throttle.ts +0 -49
- package/scripts/worker.ts +0 -124
- package/specs/PULSE_SPEC.md +0 -86
- package/src/bin.ts +0 -6
- package/src/client/App.tsx +0 -72
- package/src/client/Layout.tsx +0 -672
- package/src/client/Sidebar.tsx +0 -112
- package/src/client/ThroughputChart.tsx +0 -144
- package/src/client/WorkerStatus.tsx +0 -226
- package/src/client/components/BrandIcons.tsx +0 -168
- package/src/client/components/ConfirmDialog.tsx +0 -126
- package/src/client/components/JobInspector.tsx +0 -554
- package/src/client/components/LogArchiveModal.tsx +0 -432
- package/src/client/components/NotificationBell.tsx +0 -212
- package/src/client/components/PageHeader.tsx +0 -47
- package/src/client/components/Toaster.tsx +0 -90
- package/src/client/components/UserProfileDropdown.tsx +0 -186
- package/src/client/contexts/AuthContext.tsx +0 -105
- package/src/client/contexts/NotificationContext.tsx +0 -128
- package/src/client/index.css +0 -174
- package/src/client/index.html +0 -12
- package/src/client/main.tsx +0 -15
- package/src/client/pages/LoginPage.tsx +0 -162
- package/src/client/pages/MetricsPage.tsx +0 -417
- package/src/client/pages/OverviewPage.tsx +0 -517
- package/src/client/pages/PulsePage.tsx +0 -488
- package/src/client/pages/QueuesPage.tsx +0 -379
- package/src/client/pages/SchedulesPage.tsx +0 -540
- package/src/client/pages/SettingsPage.tsx +0 -1020
- package/src/client/pages/WorkersPage.tsx +0 -394
- package/src/client/pages/index.ts +0 -8
- package/src/client/utils.ts +0 -15
- package/src/server/config/ServerConfigManager.ts +0 -90
- package/src/server/index.ts +0 -860
- package/src/server/middleware/auth.ts +0 -127
- package/src/server/services/AlertService.ts +0 -321
- package/src/server/services/CommandService.ts +0 -137
- package/src/server/services/LogStreamProcessor.ts +0 -93
- package/src/server/services/MaintenanceScheduler.ts +0 -78
- package/src/server/services/PulseService.ts +0 -91
- package/src/server/services/QueueMetricsCollector.ts +0 -138
- package/src/server/services/QueueService.ts +0 -631
- package/src/shared/types.ts +0 -198
- package/tailwind.config.js +0 -73
- package/tests/placeholder.test.ts +0 -7
- package/tsconfig.json +0 -38
- package/tsconfig.node.json +0 -12
- package/vite.config.ts +0 -27
package/src/server/index.ts
DELETED
|
@@ -1,860 +0,0 @@
|
|
|
1
|
-
import fs from 'node:fs'
|
|
2
|
-
import os from 'node:os'
|
|
3
|
-
import path from 'node:path'
|
|
4
|
-
import { fileURLToPath } from 'node:url'
|
|
5
|
-
import { DB } from '@gravito/atlas'
|
|
6
|
-
import { Photon } from '@gravito/photon'
|
|
7
|
-
import { QuasarAgent } from '@gravito/quasar'
|
|
8
|
-
import { MySQLPersistence, SQLitePersistence } from '@gravito/stream'
|
|
9
|
-
import { serveStatic } from 'hono/bun'
|
|
10
|
-
import { getCookie } from 'hono/cookie'
|
|
11
|
-
import { streamSSE } from 'hono/streaming'
|
|
12
|
-
import {
|
|
13
|
-
authMiddleware,
|
|
14
|
-
createSession,
|
|
15
|
-
destroySession,
|
|
16
|
-
isAuthEnabled,
|
|
17
|
-
verifyPassword,
|
|
18
|
-
} from './middleware/auth'
|
|
19
|
-
import { CommandService } from './services/CommandService'
|
|
20
|
-
import { PulseService } from './services/PulseService'
|
|
21
|
-
import { QueueService } from './services/QueueService'
|
|
22
|
-
|
|
23
|
-
const app = new Photon()
|
|
24
|
-
|
|
25
|
-
// Configuration
|
|
26
|
-
const PORT = parseInt(process.env.PORT || '3000', 10)
|
|
27
|
-
const REDIS_URL = process.env.REDIS_URL || 'redis://localhost:6379'
|
|
28
|
-
const QUEUE_PREFIX = process.env.QUEUE_PREFIX || 'queue:'
|
|
29
|
-
|
|
30
|
-
// Persistence Initialize
|
|
31
|
-
let persistence:
|
|
32
|
-
| { adapter: any; archiveCompleted: boolean; archiveFailed: boolean; archiveEnqueued: boolean }
|
|
33
|
-
| undefined
|
|
34
|
-
|
|
35
|
-
const dbDriver = process.env.DB_DRIVER || (process.env.DB_HOST ? 'mysql' : 'sqlite')
|
|
36
|
-
|
|
37
|
-
if (dbDriver === 'sqlite' || process.env.DB_HOST) {
|
|
38
|
-
if (dbDriver === 'sqlite') {
|
|
39
|
-
DB.addConnection('default', {
|
|
40
|
-
driver: 'sqlite',
|
|
41
|
-
database: process.env.DB_NAME || 'flux.sqlite',
|
|
42
|
-
})
|
|
43
|
-
} else {
|
|
44
|
-
DB.addConnection('default', {
|
|
45
|
-
driver: dbDriver as any,
|
|
46
|
-
host: process.env.DB_HOST,
|
|
47
|
-
port: parseInt(process.env.DB_PORT || '3306', 10),
|
|
48
|
-
database: process.env.DB_NAME || 'flux',
|
|
49
|
-
username: process.env.DB_USER || 'root',
|
|
50
|
-
password: process.env.DB_PASSWORD || '',
|
|
51
|
-
})
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
const adapter =
|
|
55
|
-
dbDriver === 'sqlite'
|
|
56
|
-
? new SQLitePersistence(DB.connection())
|
|
57
|
-
: new MySQLPersistence(DB.connection())
|
|
58
|
-
adapter.setupTable().catch((err) => console.error('[FluxConsole] SQL Archive Setup Error:', err))
|
|
59
|
-
|
|
60
|
-
persistence = {
|
|
61
|
-
adapter,
|
|
62
|
-
archiveCompleted: process.env.PERSIST_ARCHIVE_COMPLETED === 'true',
|
|
63
|
-
archiveFailed: process.env.PERSIST_ARCHIVE_FAILED !== 'false',
|
|
64
|
-
archiveEnqueued: process.env.PERSIST_ARCHIVE_ENQUEUED === 'true',
|
|
65
|
-
}
|
|
66
|
-
console.log(`[FluxConsole] SQL Archive enabled via ${dbDriver}`)
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
// Service Initialization
|
|
70
|
-
const queueService = new QueueService(REDIS_URL, QUEUE_PREFIX, persistence)
|
|
71
|
-
const pulseService = new PulseService(REDIS_URL)
|
|
72
|
-
const commandService = new CommandService(REDIS_URL)
|
|
73
|
-
|
|
74
|
-
queueService
|
|
75
|
-
.connect()
|
|
76
|
-
.then(() => pulseService.connect())
|
|
77
|
-
.then(() => commandService.connect())
|
|
78
|
-
.then(() => {
|
|
79
|
-
// Start Self-Monitoring (Quasar)
|
|
80
|
-
const agent = new QuasarAgent({
|
|
81
|
-
service: 'flux-console',
|
|
82
|
-
redisUrl: REDIS_URL,
|
|
83
|
-
})
|
|
84
|
-
agent.start().catch((err) => console.error('[FluxConsole] Quasar Agent Error:', err))
|
|
85
|
-
|
|
86
|
-
console.log(`[FluxConsole] Connected to Redis at ${REDIS_URL}`)
|
|
87
|
-
// Start background metrics recording (Reduced from 5s to 2s for better real-time feel)
|
|
88
|
-
const updateMetrics = async () => {
|
|
89
|
-
try {
|
|
90
|
-
const [pulseNodes, legacyWorkers] = await Promise.all([
|
|
91
|
-
pulseService.getNodes(),
|
|
92
|
-
queueService.listWorkers(),
|
|
93
|
-
])
|
|
94
|
-
|
|
95
|
-
const pulseWorkers = Object.values(pulseNodes)
|
|
96
|
-
.flat()
|
|
97
|
-
.flatMap((node) => {
|
|
98
|
-
const mainNode = {
|
|
99
|
-
id: node.id,
|
|
100
|
-
service: node.service,
|
|
101
|
-
status: node.runtime.status || 'online',
|
|
102
|
-
pid: node.pid,
|
|
103
|
-
uptime: node.runtime.uptime,
|
|
104
|
-
metrics: {
|
|
105
|
-
cpu: node.cpu.process,
|
|
106
|
-
cores: node.cpu.cores,
|
|
107
|
-
ram: {
|
|
108
|
-
rss: node.memory.process.rss,
|
|
109
|
-
heapUsed: node.memory.process.heapUsed,
|
|
110
|
-
total: node.memory.system.total,
|
|
111
|
-
},
|
|
112
|
-
},
|
|
113
|
-
queues: node.queues,
|
|
114
|
-
meta: node.meta,
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
const subWorkers: any[] = []
|
|
118
|
-
if (node.meta?.laravel?.workers && Array.isArray(node.meta.laravel.workers)) {
|
|
119
|
-
node.meta.laravel.workers.forEach((w: any) => {
|
|
120
|
-
subWorkers.push({
|
|
121
|
-
id: `${node.id}-php-${w.pid}`,
|
|
122
|
-
service: `${node.service} / LARAVEL`,
|
|
123
|
-
status: w.status === 'running' || w.status === 'sleep' ? 'online' : 'idle',
|
|
124
|
-
pid: w.pid,
|
|
125
|
-
uptime: node.runtime.uptime,
|
|
126
|
-
metrics: {
|
|
127
|
-
cpu: w.cpu,
|
|
128
|
-
cores: 1,
|
|
129
|
-
ram: {
|
|
130
|
-
rss: w.memory,
|
|
131
|
-
heapUsed: w.memory,
|
|
132
|
-
total: node.memory.system.total,
|
|
133
|
-
},
|
|
134
|
-
},
|
|
135
|
-
meta: { isVirtual: true, cmdline: w.cmdline },
|
|
136
|
-
})
|
|
137
|
-
})
|
|
138
|
-
}
|
|
139
|
-
return [mainNode, ...subWorkers]
|
|
140
|
-
})
|
|
141
|
-
|
|
142
|
-
const formattedLegacy = legacyWorkers.map((w) => ({
|
|
143
|
-
id: w.id,
|
|
144
|
-
status: 'online',
|
|
145
|
-
pid: w.pid,
|
|
146
|
-
uptime: w.uptime,
|
|
147
|
-
metrics: {
|
|
148
|
-
cpu: (w.loadAvg[0] || 0) * 100,
|
|
149
|
-
cores: 0,
|
|
150
|
-
ram: {
|
|
151
|
-
rss: parseInt(w.memory.rss || '0', 10),
|
|
152
|
-
heapUsed: parseInt(w.memory.heapUsed || '0', 10),
|
|
153
|
-
total: 0,
|
|
154
|
-
},
|
|
155
|
-
},
|
|
156
|
-
queues: w.queues.map((q) => ({
|
|
157
|
-
name: q,
|
|
158
|
-
size: { waiting: 0, active: 0, failed: 0, delayed: 0 },
|
|
159
|
-
})),
|
|
160
|
-
meta: {},
|
|
161
|
-
}))
|
|
162
|
-
|
|
163
|
-
await queueService.recordStatusMetrics(pulseNodes, [...pulseWorkers, ...formattedLegacy])
|
|
164
|
-
} catch (err) {
|
|
165
|
-
console.error('[FluxConsole] Metrics Update Error:', err)
|
|
166
|
-
}
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
setInterval(updateMetrics, 2000)
|
|
170
|
-
|
|
171
|
-
// Start Scheduler Tick (Reduced from 10s to 5s)
|
|
172
|
-
setInterval(() => {
|
|
173
|
-
queueService.tickScheduler().catch(console.error)
|
|
174
|
-
}, 5000)
|
|
175
|
-
|
|
176
|
-
// Record initial snapshot
|
|
177
|
-
updateMetrics()
|
|
178
|
-
})
|
|
179
|
-
.catch((err) => {
|
|
180
|
-
console.error('[FluxConsole] Failed to connect to Redis', err)
|
|
181
|
-
})
|
|
182
|
-
|
|
183
|
-
const api = new Photon()
|
|
184
|
-
|
|
185
|
-
api.get('/health', (c) => c.json({ status: 'ok', time: new Date().toISOString() }))
|
|
186
|
-
|
|
187
|
-
// Auth endpoints (no middleware protection)
|
|
188
|
-
api.get('/auth/status', (c) => {
|
|
189
|
-
const token = getCookie(c, 'flux_session')
|
|
190
|
-
const isAuthenticated =
|
|
191
|
-
!isAuthEnabled() || (token && require('./middleware/auth').validateSession(token))
|
|
192
|
-
return c.json({
|
|
193
|
-
enabled: isAuthEnabled(),
|
|
194
|
-
authenticated: !!isAuthenticated,
|
|
195
|
-
})
|
|
196
|
-
})
|
|
197
|
-
|
|
198
|
-
api.post('/auth/login', async (c) => {
|
|
199
|
-
try {
|
|
200
|
-
const { password } = await c.req.json()
|
|
201
|
-
|
|
202
|
-
if (!verifyPassword(password)) {
|
|
203
|
-
return c.json({ success: false, error: 'Invalid password' }, 401)
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
createSession(c)
|
|
207
|
-
return c.json({ success: true })
|
|
208
|
-
} catch (_err) {
|
|
209
|
-
return c.json({ success: false, error: 'Login failed' }, 500)
|
|
210
|
-
}
|
|
211
|
-
})
|
|
212
|
-
|
|
213
|
-
api.post('/auth/logout', (c) => {
|
|
214
|
-
destroySession(c)
|
|
215
|
-
return c.json({ success: true })
|
|
216
|
-
})
|
|
217
|
-
|
|
218
|
-
// Apply auth middleware to all other API routes
|
|
219
|
-
api.use('/*', authMiddleware)
|
|
220
|
-
|
|
221
|
-
api.get('/queues', async (c) => {
|
|
222
|
-
try {
|
|
223
|
-
const queues = await queueService.listQueues()
|
|
224
|
-
return c.json({ queues })
|
|
225
|
-
} catch (err) {
|
|
226
|
-
console.error(err)
|
|
227
|
-
return c.json({ error: 'Failed to list queues' }, 500)
|
|
228
|
-
}
|
|
229
|
-
})
|
|
230
|
-
|
|
231
|
-
api.get('/search', async (c) => {
|
|
232
|
-
const query = c.req.query('q') || ''
|
|
233
|
-
const type = (c.req.query('type') as 'all' | 'waiting' | 'delayed' | 'failed') || 'all'
|
|
234
|
-
const limit = parseInt(c.req.query('limit') || '20', 10)
|
|
235
|
-
|
|
236
|
-
if (!query || query.length < 2) {
|
|
237
|
-
return c.json({ results: [], message: 'Query must be at least 2 characters' })
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
try {
|
|
241
|
-
const results = await queueService.searchJobs(query, { type, limit })
|
|
242
|
-
return c.json({ results, query, count: results.length })
|
|
243
|
-
} catch (err) {
|
|
244
|
-
console.error(err)
|
|
245
|
-
return c.json({ error: 'Search failed' }, 500)
|
|
246
|
-
}
|
|
247
|
-
})
|
|
248
|
-
|
|
249
|
-
api.get('/archive/search', async (c) => {
|
|
250
|
-
const query = c.req.query('q') || ''
|
|
251
|
-
const queue = c.req.query('queue')
|
|
252
|
-
const page = parseInt(c.req.query('page') || '1', 10)
|
|
253
|
-
const limit = parseInt(c.req.query('limit') || '50', 10)
|
|
254
|
-
|
|
255
|
-
if (!query) {
|
|
256
|
-
return c.json({ results: [] })
|
|
257
|
-
}
|
|
258
|
-
|
|
259
|
-
try {
|
|
260
|
-
const { jobs, total } = await queueService.searchArchive(query, { queue, page, limit })
|
|
261
|
-
return c.json({ results: jobs, query, count: total })
|
|
262
|
-
} catch (err) {
|
|
263
|
-
console.error(err)
|
|
264
|
-
return c.json({ error: 'Archive search failed' }, 500)
|
|
265
|
-
}
|
|
266
|
-
})
|
|
267
|
-
|
|
268
|
-
api.get('/logs/archive', async (c) => {
|
|
269
|
-
const level = c.req.query('level')
|
|
270
|
-
const workerId = c.req.query('workerId')
|
|
271
|
-
const queue = c.req.query('queue')
|
|
272
|
-
const search = c.req.query('search')
|
|
273
|
-
|
|
274
|
-
const startTime = c.req.query('startTime') ? new Date(c.req.query('startTime')!) : undefined
|
|
275
|
-
const endTime = c.req.query('endTime') ? new Date(c.req.query('endTime')!) : undefined
|
|
276
|
-
const page = parseInt(c.req.query('page') || '1', 10)
|
|
277
|
-
const limit = parseInt(c.req.query('limit') || '50', 10)
|
|
278
|
-
|
|
279
|
-
try {
|
|
280
|
-
const results = await queueService.getArchivedLogs({
|
|
281
|
-
level,
|
|
282
|
-
workerId,
|
|
283
|
-
queue,
|
|
284
|
-
search,
|
|
285
|
-
startTime,
|
|
286
|
-
endTime,
|
|
287
|
-
page,
|
|
288
|
-
limit,
|
|
289
|
-
})
|
|
290
|
-
return c.json(results)
|
|
291
|
-
} catch (err) {
|
|
292
|
-
console.error(err)
|
|
293
|
-
return c.json({ error: 'Failed to fetch archived logs' }, 500)
|
|
294
|
-
}
|
|
295
|
-
})
|
|
296
|
-
|
|
297
|
-
api.post('/queues/:name/retry-all', async (c) => {
|
|
298
|
-
const name = c.req.param('name')
|
|
299
|
-
try {
|
|
300
|
-
const count = await queueService.retryDelayedJob(name)
|
|
301
|
-
return c.json({ success: true, count })
|
|
302
|
-
} catch (_err) {
|
|
303
|
-
return c.json({ error: 'Failed to retry jobs' }, 500)
|
|
304
|
-
}
|
|
305
|
-
})
|
|
306
|
-
|
|
307
|
-
api.post('/queues/:name/retry-all-failed', async (c) => {
|
|
308
|
-
const name = c.req.param('name')
|
|
309
|
-
try {
|
|
310
|
-
const count = await queueService.retryAllFailedJobs(name)
|
|
311
|
-
return c.json({ success: true, count })
|
|
312
|
-
} catch (_err) {
|
|
313
|
-
return c.json({ error: 'Failed to retry failed jobs' }, 500)
|
|
314
|
-
}
|
|
315
|
-
})
|
|
316
|
-
|
|
317
|
-
api.post('/queues/:name/clear-failed', async (c) => {
|
|
318
|
-
const name = c.req.param('name')
|
|
319
|
-
try {
|
|
320
|
-
await queueService.clearFailedJobs(name)
|
|
321
|
-
return c.json({ success: true })
|
|
322
|
-
} catch (_err) {
|
|
323
|
-
return c.json({ error: 'Failed to clear failed jobs' }, 500)
|
|
324
|
-
}
|
|
325
|
-
})
|
|
326
|
-
|
|
327
|
-
api.post('/queues/:name/pause', async (c) => {
|
|
328
|
-
const name = c.req.param('name')
|
|
329
|
-
try {
|
|
330
|
-
await queueService.pauseQueue(name)
|
|
331
|
-
return c.json({ success: true, paused: true })
|
|
332
|
-
} catch (_err) {
|
|
333
|
-
return c.json({ error: 'Failed to pause queue' }, 500)
|
|
334
|
-
}
|
|
335
|
-
})
|
|
336
|
-
|
|
337
|
-
api.post('/queues/:name/resume', async (c) => {
|
|
338
|
-
const name = c.req.param('name')
|
|
339
|
-
try {
|
|
340
|
-
await queueService.resumeQueue(name)
|
|
341
|
-
return c.json({ success: true, paused: false })
|
|
342
|
-
} catch (_err) {
|
|
343
|
-
return c.json({ error: 'Failed to resume queue' }, 500)
|
|
344
|
-
}
|
|
345
|
-
})
|
|
346
|
-
|
|
347
|
-
api.get('/queues/:name/jobs', async (c) => {
|
|
348
|
-
const name = c.req.param('name')
|
|
349
|
-
const type = (c.req.query('type') as 'waiting' | 'delayed' | 'failed') || 'waiting'
|
|
350
|
-
try {
|
|
351
|
-
const jobs = await queueService.getJobs(name, type)
|
|
352
|
-
return c.json({ jobs })
|
|
353
|
-
} catch (err) {
|
|
354
|
-
console.error(err)
|
|
355
|
-
return c.json({ error: 'Failed to fetch jobs' }, 500)
|
|
356
|
-
}
|
|
357
|
-
})
|
|
358
|
-
|
|
359
|
-
api.get('/queues/:name/jobs/count', async (c) => {
|
|
360
|
-
const name = c.req.param('name')
|
|
361
|
-
const type = (c.req.query('type') as 'waiting' | 'delayed' | 'failed') || 'waiting'
|
|
362
|
-
try {
|
|
363
|
-
const count = await queueService.getJobCount(name, type)
|
|
364
|
-
return c.json({ count })
|
|
365
|
-
} catch (err) {
|
|
366
|
-
console.error(err)
|
|
367
|
-
return c.json({ error: 'Failed to count jobs' }, 500)
|
|
368
|
-
}
|
|
369
|
-
})
|
|
370
|
-
|
|
371
|
-
api.get('/queues/:name/archive', async (c) => {
|
|
372
|
-
const name = c.req.param('name')
|
|
373
|
-
const page = parseInt(c.req.query('page') || '1', 10)
|
|
374
|
-
const limit = parseInt(c.req.query('limit') || '50', 10)
|
|
375
|
-
|
|
376
|
-
const status = c.req.query('status') as 'completed' | 'failed' | undefined
|
|
377
|
-
const jobId = c.req.query('jobId')
|
|
378
|
-
const startTime = c.req.query('startTime') ? new Date(c.req.query('startTime')!) : undefined
|
|
379
|
-
const endTime = c.req.query('endTime') ? new Date(c.req.query('endTime')!) : undefined
|
|
380
|
-
|
|
381
|
-
try {
|
|
382
|
-
const { jobs, total } = await queueService.getArchiveJobs(name, page, limit, status, {
|
|
383
|
-
jobId,
|
|
384
|
-
startTime,
|
|
385
|
-
endTime,
|
|
386
|
-
})
|
|
387
|
-
return c.json({ jobs, total })
|
|
388
|
-
} catch (err) {
|
|
389
|
-
console.error(err)
|
|
390
|
-
return c.json({ error: 'Failed to fetch archived jobs' }, 500)
|
|
391
|
-
}
|
|
392
|
-
})
|
|
393
|
-
|
|
394
|
-
api.get('/throughput', async (c) => {
|
|
395
|
-
try {
|
|
396
|
-
const data = await queueService.getThroughputData()
|
|
397
|
-
return c.json({ data })
|
|
398
|
-
} catch (_err) {
|
|
399
|
-
return c.json({ error: 'Failed to fetch throughput' }, 500)
|
|
400
|
-
}
|
|
401
|
-
})
|
|
402
|
-
|
|
403
|
-
api.get('/workers', async (c) => {
|
|
404
|
-
try {
|
|
405
|
-
const [legacyWorkers, pulseNodes] = await Promise.all([
|
|
406
|
-
queueService.listWorkers(),
|
|
407
|
-
pulseService.getNodes(),
|
|
408
|
-
])
|
|
409
|
-
|
|
410
|
-
// Transform PulseNodes to match the frontend Worker interface
|
|
411
|
-
const pulseWorkers = Object.values(pulseNodes)
|
|
412
|
-
.flat()
|
|
413
|
-
.flatMap((node) => {
|
|
414
|
-
// 1. The Main Agent Node
|
|
415
|
-
const mainNode = {
|
|
416
|
-
id: node.id,
|
|
417
|
-
service: node.service,
|
|
418
|
-
status: node.runtime.status || 'online',
|
|
419
|
-
pid: node.pid,
|
|
420
|
-
uptime: node.runtime.uptime,
|
|
421
|
-
metrics: {
|
|
422
|
-
cpu: node.cpu.process,
|
|
423
|
-
cores: node.cpu.cores,
|
|
424
|
-
ram: {
|
|
425
|
-
rss: node.memory.process.rss,
|
|
426
|
-
heapUsed: node.memory.process.heapUsed,
|
|
427
|
-
total: node.memory.system.total,
|
|
428
|
-
},
|
|
429
|
-
},
|
|
430
|
-
queues: node.queues,
|
|
431
|
-
meta: node.meta,
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
// 2. Virtual Child Workers (e.g. Laravel)
|
|
435
|
-
const subWorkers: any[] = []
|
|
436
|
-
if (node.meta?.laravel?.workers && Array.isArray(node.meta.laravel.workers)) {
|
|
437
|
-
node.meta.laravel.workers.forEach((w: any) => {
|
|
438
|
-
subWorkers.push({
|
|
439
|
-
id: `${node.id}-php-${w.pid}`,
|
|
440
|
-
service: `${node.service} / LARAVEL`, // Distinct service name
|
|
441
|
-
status: w.status === 'running' || w.status === 'sleep' ? 'online' : 'idle',
|
|
442
|
-
pid: w.pid,
|
|
443
|
-
uptime: node.runtime.uptime, // Inherit uptime for now, or 0
|
|
444
|
-
metrics: {
|
|
445
|
-
cpu: w.cpu, // Per-process CPU
|
|
446
|
-
cores: 1, // Single threaded PHP
|
|
447
|
-
ram: {
|
|
448
|
-
rss: w.memory,
|
|
449
|
-
heapUsed: w.memory,
|
|
450
|
-
total: node.memory.system.total,
|
|
451
|
-
},
|
|
452
|
-
},
|
|
453
|
-
meta: {
|
|
454
|
-
// Tag it so UI can maybe style it differently?
|
|
455
|
-
isVirtual: true,
|
|
456
|
-
cmdline: w.cmdline,
|
|
457
|
-
},
|
|
458
|
-
})
|
|
459
|
-
})
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
return [mainNode, ...subWorkers]
|
|
463
|
-
})
|
|
464
|
-
|
|
465
|
-
// Transform Legacy Workers to match interface (best effort)
|
|
466
|
-
const formattedLegacy = legacyWorkers.map((w) => ({
|
|
467
|
-
id: w.id,
|
|
468
|
-
status: 'online',
|
|
469
|
-
pid: w.pid,
|
|
470
|
-
uptime: w.uptime,
|
|
471
|
-
metrics: {
|
|
472
|
-
cpu: (w.loadAvg[0] || 0) * 100, // Rough estimate
|
|
473
|
-
cores: 0,
|
|
474
|
-
ram: {
|
|
475
|
-
rss: parseInt(w.memory.rss || '0', 10),
|
|
476
|
-
heapUsed: parseInt(w.memory.heapUsed || '0', 10),
|
|
477
|
-
total: 0,
|
|
478
|
-
},
|
|
479
|
-
},
|
|
480
|
-
queues: w.queues.map((q) => ({
|
|
481
|
-
name: q,
|
|
482
|
-
size: { waiting: 0, active: 0, failed: 0, delayed: 0 },
|
|
483
|
-
})),
|
|
484
|
-
meta: {},
|
|
485
|
-
}))
|
|
486
|
-
|
|
487
|
-
return c.json({ workers: [...pulseWorkers, ...formattedLegacy] })
|
|
488
|
-
} catch (_err) {
|
|
489
|
-
console.error(_err)
|
|
490
|
-
return c.json({ error: 'Failed to fetch workers' }, 500)
|
|
491
|
-
}
|
|
492
|
-
})
|
|
493
|
-
|
|
494
|
-
api.get('/metrics/history', async (c) => {
|
|
495
|
-
try {
|
|
496
|
-
const metrics = ['waiting', 'delayed', 'failed', 'workers']
|
|
497
|
-
const history: Record<string, number[]> = {}
|
|
498
|
-
|
|
499
|
-
await Promise.all(
|
|
500
|
-
metrics.map(async (m) => {
|
|
501
|
-
history[m] = await queueService.getMetricHistory(m)
|
|
502
|
-
})
|
|
503
|
-
)
|
|
504
|
-
|
|
505
|
-
return c.json({ history })
|
|
506
|
-
} catch (_err) {
|
|
507
|
-
return c.json({ error: 'Failed to fetch metrics history' }, 500)
|
|
508
|
-
}
|
|
509
|
-
})
|
|
510
|
-
|
|
511
|
-
api.get('/system/status', (c) => {
|
|
512
|
-
const mem = process.memoryUsage()
|
|
513
|
-
const totalMem = os.totalmem()
|
|
514
|
-
|
|
515
|
-
// Find package.json (relative to this file in src/server/index.ts)
|
|
516
|
-
const __dirname = path.dirname(fileURLToPath(import.meta.url))
|
|
517
|
-
const pkgPath = path.resolve(__dirname, '../../package.json')
|
|
518
|
-
let pkg = { version: '0.1.0-unknown', name: '@gravito/zenith' }
|
|
519
|
-
try {
|
|
520
|
-
pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'))
|
|
521
|
-
} catch (_e) {
|
|
522
|
-
// fallback
|
|
523
|
-
}
|
|
524
|
-
|
|
525
|
-
return c.json({
|
|
526
|
-
node: process.version,
|
|
527
|
-
memory: {
|
|
528
|
-
rss: `${(mem.rss / 1024 / 1024).toFixed(2)} MB`,
|
|
529
|
-
heapUsed: `${(mem.heapUsed / 1024 / 1024).toFixed(2)} MB`,
|
|
530
|
-
total: `${(totalMem / 1024 / 1024 / 1024).toFixed(2)} GB`,
|
|
531
|
-
},
|
|
532
|
-
version: pkg.version,
|
|
533
|
-
package: pkg.name,
|
|
534
|
-
engine: `Zenith ${pkg.version}`,
|
|
535
|
-
uptime: process.uptime(),
|
|
536
|
-
env:
|
|
537
|
-
process.env.NODE_ENV === 'production'
|
|
538
|
-
? `production (${os.hostname()})`
|
|
539
|
-
: `development (${os.hostname()})`,
|
|
540
|
-
redisUrl: process.env.REDIS_URL || 'redis://localhost:6379',
|
|
541
|
-
})
|
|
542
|
-
})
|
|
543
|
-
|
|
544
|
-
// --- Pulse Monitoring ---
|
|
545
|
-
api.get('/pulse/nodes', async (c) => {
|
|
546
|
-
try {
|
|
547
|
-
const nodes = await pulseService.getNodes()
|
|
548
|
-
return c.json({ nodes })
|
|
549
|
-
} catch (_err) {
|
|
550
|
-
return c.json({ error: 'Failed to fetch pulse nodes' }, 500)
|
|
551
|
-
}
|
|
552
|
-
})
|
|
553
|
-
|
|
554
|
-
// --- Pulse Remote Control (Phase 3) ---
|
|
555
|
-
api.post('/pulse/command', async (c) => {
|
|
556
|
-
try {
|
|
557
|
-
const { service, nodeId, type, queue, jobKey, driver, action } = await c.req.json()
|
|
558
|
-
|
|
559
|
-
// Validate required fields
|
|
560
|
-
if (!service || !nodeId || !type || !queue || !jobKey) {
|
|
561
|
-
return c.json({ error: 'Missing required fields: service, nodeId, type, queue, jobKey' }, 400)
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
// Validate command type
|
|
565
|
-
if (type !== 'RETRY_JOB' && type !== 'DELETE_JOB' && type !== 'LARAVEL_ACTION') {
|
|
566
|
-
return c.json(
|
|
567
|
-
{ error: 'Invalid command type. Allowed: RETRY_JOB, DELETE_JOB, LARAVEL_ACTION' },
|
|
568
|
-
400
|
|
569
|
-
)
|
|
570
|
-
}
|
|
571
|
-
|
|
572
|
-
const commandId = await commandService.sendCommand(service, nodeId, type, {
|
|
573
|
-
queue,
|
|
574
|
-
jobKey,
|
|
575
|
-
driver: driver || 'redis',
|
|
576
|
-
action,
|
|
577
|
-
})
|
|
578
|
-
|
|
579
|
-
return c.json({
|
|
580
|
-
success: true,
|
|
581
|
-
commandId,
|
|
582
|
-
message: `Command ${type} sent to ${nodeId}. Observe job state for result.`,
|
|
583
|
-
})
|
|
584
|
-
} catch (err) {
|
|
585
|
-
console.error('[CommandService] Error:', err)
|
|
586
|
-
return c.json({ error: 'Failed to send command' }, 500)
|
|
587
|
-
}
|
|
588
|
-
})
|
|
589
|
-
|
|
590
|
-
api.post('/queues/:name/jobs/delete', async (c) => {
|
|
591
|
-
const queueName = c.req.param('name')
|
|
592
|
-
const { type, raw } = await c.req.json()
|
|
593
|
-
try {
|
|
594
|
-
const success = await queueService.deleteJob(queueName, type, raw)
|
|
595
|
-
return c.json({ success })
|
|
596
|
-
} catch (_err) {
|
|
597
|
-
return c.json({ error: 'Failed to delete job' }, 500)
|
|
598
|
-
}
|
|
599
|
-
})
|
|
600
|
-
|
|
601
|
-
api.post('/queues/:name/jobs/retry', async (c) => {
|
|
602
|
-
const queueName = c.req.param('name')
|
|
603
|
-
const { raw } = await c.req.json()
|
|
604
|
-
try {
|
|
605
|
-
const success = await queueService.retryJob(queueName, raw)
|
|
606
|
-
return c.json({ success })
|
|
607
|
-
} catch (_err) {
|
|
608
|
-
return c.json({ error: 'Failed to retry job' }, 500)
|
|
609
|
-
}
|
|
610
|
-
})
|
|
611
|
-
|
|
612
|
-
api.post('/queues/:name/jobs/bulk-delete', async (c) => {
|
|
613
|
-
const queueName = c.req.param('name')
|
|
614
|
-
const { type, raws } = await c.req.json()
|
|
615
|
-
try {
|
|
616
|
-
const deleted = await queueService.deleteJobs(queueName, type, raws)
|
|
617
|
-
return c.json({ success: true, count: deleted })
|
|
618
|
-
} catch (_err) {
|
|
619
|
-
return c.json({ error: 'Failed to bulk delete' }, 500)
|
|
620
|
-
}
|
|
621
|
-
})
|
|
622
|
-
|
|
623
|
-
api.post('/queues/:name/jobs/bulk-retry', async (c) => {
|
|
624
|
-
const queueName = c.req.param('name')
|
|
625
|
-
const { type, raws } = await c.req.json()
|
|
626
|
-
try {
|
|
627
|
-
const retried = await queueService.retryJobs(queueName, type, raws)
|
|
628
|
-
return c.json({ success: true, count: retried })
|
|
629
|
-
} catch (_err) {
|
|
630
|
-
return c.json({ error: 'Failed to bulk retry' }, 500)
|
|
631
|
-
}
|
|
632
|
-
})
|
|
633
|
-
|
|
634
|
-
api.post('/queues/:name/jobs/bulk-delete-all', async (c) => {
|
|
635
|
-
const queueName = c.req.param('name')
|
|
636
|
-
const { type } = await c.req.json()
|
|
637
|
-
try {
|
|
638
|
-
const deleted = await queueService.deleteAllJobs(queueName, type)
|
|
639
|
-
return c.json({ success: true, count: deleted })
|
|
640
|
-
} catch (_err) {
|
|
641
|
-
return c.json({ error: 'Failed to bulk delete all' }, 500)
|
|
642
|
-
}
|
|
643
|
-
})
|
|
644
|
-
|
|
645
|
-
api.post('/queues/:name/jobs/bulk-retry-all', async (c) => {
|
|
646
|
-
const queueName = c.req.param('name')
|
|
647
|
-
const { type } = await c.req.json()
|
|
648
|
-
try {
|
|
649
|
-
const retried = await queueService.retryAllJobs(queueName, type)
|
|
650
|
-
return c.json({ success: true, count: retried })
|
|
651
|
-
} catch (_err) {
|
|
652
|
-
return c.json({ error: 'Failed to bulk retry all' }, 500)
|
|
653
|
-
}
|
|
654
|
-
})
|
|
655
|
-
|
|
656
|
-
api.post('/maintenance/cleanup-archive', async (c) => {
|
|
657
|
-
const { days = 30 } = await c.req.json()
|
|
658
|
-
try {
|
|
659
|
-
const deleted = await queueService.cleanupArchive(days)
|
|
660
|
-
return c.json({ success: true, deleted })
|
|
661
|
-
} catch (_err) {
|
|
662
|
-
return c.json({ error: 'Failed to cleanup archive' }, 500)
|
|
663
|
-
}
|
|
664
|
-
})
|
|
665
|
-
|
|
666
|
-
api.post('/queues/:name/purge', async (c) => {
|
|
667
|
-
const name = c.req.param('name')
|
|
668
|
-
try {
|
|
669
|
-
await queueService.purgeQueue(name)
|
|
670
|
-
return c.json({ success: true })
|
|
671
|
-
} catch (_err) {
|
|
672
|
-
return c.json({ error: 'Failed to purge queue' }, 500)
|
|
673
|
-
}
|
|
674
|
-
})
|
|
675
|
-
|
|
676
|
-
api.get('/logs/stream', async (c) => {
|
|
677
|
-
return streamSSE(c, async (stream) => {
|
|
678
|
-
// 1. Send history first
|
|
679
|
-
const history = await queueService.getLogHistory()
|
|
680
|
-
for (const log of history) {
|
|
681
|
-
await stream.writeSSE({
|
|
682
|
-
data: JSON.stringify(log),
|
|
683
|
-
event: 'log',
|
|
684
|
-
})
|
|
685
|
-
}
|
|
686
|
-
|
|
687
|
-
// 2. Subscribe to new logs
|
|
688
|
-
const unsubscribeLogs = queueService.onLog(async (msg) => {
|
|
689
|
-
await stream.writeSSE({
|
|
690
|
-
data: JSON.stringify(msg),
|
|
691
|
-
event: 'log',
|
|
692
|
-
})
|
|
693
|
-
})
|
|
694
|
-
|
|
695
|
-
// 3. Subscribe to real-time stats
|
|
696
|
-
const unsubscribeStats = queueService.onStats(async (stats) => {
|
|
697
|
-
await stream.writeSSE({
|
|
698
|
-
data: JSON.stringify(stats),
|
|
699
|
-
event: 'stats',
|
|
700
|
-
})
|
|
701
|
-
})
|
|
702
|
-
|
|
703
|
-
// 4. Poll Pulse Nodes per client (simple polling for now)
|
|
704
|
-
const pulseInterval = setInterval(async () => {
|
|
705
|
-
try {
|
|
706
|
-
const nodes = await pulseService.getNodes()
|
|
707
|
-
await stream.writeSSE({
|
|
708
|
-
data: JSON.stringify({ nodes }),
|
|
709
|
-
event: 'pulse',
|
|
710
|
-
})
|
|
711
|
-
} catch (_err) {
|
|
712
|
-
// ignore errors
|
|
713
|
-
}
|
|
714
|
-
}, 2000)
|
|
715
|
-
|
|
716
|
-
stream.onAbort(() => {
|
|
717
|
-
unsubscribeLogs()
|
|
718
|
-
unsubscribeStats()
|
|
719
|
-
clearInterval(pulseInterval)
|
|
720
|
-
})
|
|
721
|
-
|
|
722
|
-
// Keep alive
|
|
723
|
-
while (true) {
|
|
724
|
-
await stream.sleep(5000)
|
|
725
|
-
await stream.writeSSE({ data: 'heartbeat', event: 'ping' })
|
|
726
|
-
}
|
|
727
|
-
})
|
|
728
|
-
})
|
|
729
|
-
|
|
730
|
-
// --- Schedules ---
|
|
731
|
-
api.get('/schedules', async (c) => {
|
|
732
|
-
try {
|
|
733
|
-
const schedules = await queueService.listSchedules()
|
|
734
|
-
return c.json({ schedules })
|
|
735
|
-
} catch (_err) {
|
|
736
|
-
return c.json({ error: 'Failed to list schedules' }, 500)
|
|
737
|
-
}
|
|
738
|
-
})
|
|
739
|
-
|
|
740
|
-
api.post('/schedules', async (c) => {
|
|
741
|
-
const body = await c.req.json()
|
|
742
|
-
try {
|
|
743
|
-
await queueService.registerSchedule(body)
|
|
744
|
-
return c.json({ success: true })
|
|
745
|
-
} catch (_err) {
|
|
746
|
-
return c.json({ error: 'Failed to register schedule' }, 500)
|
|
747
|
-
}
|
|
748
|
-
})
|
|
749
|
-
|
|
750
|
-
api.post('/schedules/run/:id', async (c) => {
|
|
751
|
-
const id = c.req.param('id')
|
|
752
|
-
try {
|
|
753
|
-
await queueService.runScheduleNow(id)
|
|
754
|
-
return c.json({ success: true })
|
|
755
|
-
} catch (_err) {
|
|
756
|
-
return c.json({ error: 'Failed to run schedule' }, 500)
|
|
757
|
-
}
|
|
758
|
-
})
|
|
759
|
-
|
|
760
|
-
api.delete('/schedules/:id', async (c) => {
|
|
761
|
-
const id = c.req.param('id')
|
|
762
|
-
try {
|
|
763
|
-
await queueService.removeSchedule(id)
|
|
764
|
-
return c.json({ success: true })
|
|
765
|
-
} catch (_err) {
|
|
766
|
-
return c.json({ error: 'Failed to remove schedule' }, 500)
|
|
767
|
-
}
|
|
768
|
-
})
|
|
769
|
-
|
|
770
|
-
// --- Alerting ---
|
|
771
|
-
api.get('/alerts/config', async (c) => {
|
|
772
|
-
return c.json({
|
|
773
|
-
rules: queueService.alerts.getRules(),
|
|
774
|
-
config: queueService.alerts.getConfig(),
|
|
775
|
-
// maintenance: await queueService.getMaintenanceConfig(),
|
|
776
|
-
})
|
|
777
|
-
})
|
|
778
|
-
|
|
779
|
-
// Maintenance API temporarily disabled - requires ServerConfigManager enhancement
|
|
780
|
-
// api.post('/maintenance/config', async (c) => {
|
|
781
|
-
// const config = await c.req.json()
|
|
782
|
-
// try {
|
|
783
|
-
// // await queueService.saveMaintenanceConfig(config)
|
|
784
|
-
// return c.json({ success: true })
|
|
785
|
-
// } catch (_err) {
|
|
786
|
-
// return c.json({ error: 'Failed to save maintenance config' }, 500)
|
|
787
|
-
// }
|
|
788
|
-
// })
|
|
789
|
-
|
|
790
|
-
api.post('/alerts/config', async (c) => {
|
|
791
|
-
const config = await c.req.json()
|
|
792
|
-
try {
|
|
793
|
-
await queueService.alerts.saveConfig(config)
|
|
794
|
-
return c.json({ success: true })
|
|
795
|
-
} catch (_err) {
|
|
796
|
-
return c.json({ error: 'Failed to save alert config' }, 500)
|
|
797
|
-
}
|
|
798
|
-
})
|
|
799
|
-
|
|
800
|
-
api.post('/alerts/rules', async (c) => {
|
|
801
|
-
const rule = await c.req.json()
|
|
802
|
-
try {
|
|
803
|
-
await queueService.alerts.addRule(rule)
|
|
804
|
-
return c.json({ success: true })
|
|
805
|
-
} catch (_err) {
|
|
806
|
-
return c.json({ error: 'Failed to add rule' }, 500)
|
|
807
|
-
}
|
|
808
|
-
})
|
|
809
|
-
|
|
810
|
-
api.delete('/alerts/rules/:id', async (c) => {
|
|
811
|
-
const id = c.req.param('id')
|
|
812
|
-
try {
|
|
813
|
-
await queueService.alerts.deleteRule(id)
|
|
814
|
-
return c.json({ success: true })
|
|
815
|
-
} catch (_err) {
|
|
816
|
-
return c.json({ error: 'Failed to delete rule' }, 500)
|
|
817
|
-
}
|
|
818
|
-
})
|
|
819
|
-
|
|
820
|
-
api.post('/alerts/test', async (c) => {
|
|
821
|
-
try {
|
|
822
|
-
const nodes = await pulseService.getNodes()
|
|
823
|
-
queueService.alerts.check({
|
|
824
|
-
queues: [],
|
|
825
|
-
nodes,
|
|
826
|
-
workers: [
|
|
827
|
-
{
|
|
828
|
-
id: 'test-node',
|
|
829
|
-
hostname: 'localhost',
|
|
830
|
-
pid: 0,
|
|
831
|
-
uptime: 0,
|
|
832
|
-
memory: { rss: '0', heapTotal: '0', heapUsed: '0' },
|
|
833
|
-
queues: [],
|
|
834
|
-
},
|
|
835
|
-
] as any,
|
|
836
|
-
totals: { waiting: 9999, delayed: 0, failed: 9999 },
|
|
837
|
-
})
|
|
838
|
-
return c.json({ success: true, message: 'Test alert dispatched' })
|
|
839
|
-
} catch (_err) {
|
|
840
|
-
return c.json({ error: 'Test failed' }, 500)
|
|
841
|
-
}
|
|
842
|
-
})
|
|
843
|
-
|
|
844
|
-
app.route('/api', api)
|
|
845
|
-
|
|
846
|
-
app.use(
|
|
847
|
-
'/*',
|
|
848
|
-
serveStatic({
|
|
849
|
-
root: './dist/client',
|
|
850
|
-
})
|
|
851
|
-
)
|
|
852
|
-
|
|
853
|
-
app.get('*', serveStatic({ path: './dist/client/index.html' }))
|
|
854
|
-
|
|
855
|
-
console.log(`[FluxConsole] Server starting on http://localhost:${PORT}`)
|
|
856
|
-
|
|
857
|
-
export default {
|
|
858
|
-
port: PORT,
|
|
859
|
-
fetch: app.fetch,
|
|
860
|
-
}
|