@karmaniverous/jeeves-runner 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +28 -0
- package/README.md +401 -0
- package/dist/cli/jeeves-runner/index.js +880 -0
- package/dist/db/migrations/001-initial.sql +61 -0
- package/dist/index.d.ts +270 -0
- package/dist/mjs/index.js +917 -0
- package/package.json +141 -0
|
@@ -0,0 +1,917 @@
|
|
|
1
|
+
import { z } from 'zod';
|
|
2
|
+
import { mkdirSync, readFileSync } from 'node:fs';
|
|
3
|
+
import { pino } from 'pino';
|
|
4
|
+
import Fastify from 'fastify';
|
|
5
|
+
import { dirname } from 'node:path';
|
|
6
|
+
import { DatabaseSync } from 'node:sqlite';
|
|
7
|
+
import { request } from 'node:https';
|
|
8
|
+
import { spawn } from 'node:child_process';
|
|
9
|
+
import { Cron } from 'croner';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Runner configuration schema and types.
|
|
13
|
+
*
|
|
14
|
+
* @module
|
|
15
|
+
*/
|
|
16
|
+
/** Notification configuration sub-schema. */
|
|
17
|
+
const notificationsSchema = z.object({
|
|
18
|
+
slackTokenPath: z.string().optional(),
|
|
19
|
+
defaultOnFailure: z.string().nullable().default(null),
|
|
20
|
+
defaultOnSuccess: z.string().nullable().default(null),
|
|
21
|
+
});
|
|
22
|
+
/** Log configuration sub-schema. */
|
|
23
|
+
const logSchema = z.object({
|
|
24
|
+
level: z
|
|
25
|
+
.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal'])
|
|
26
|
+
.default('info'),
|
|
27
|
+
file: z.string().optional(),
|
|
28
|
+
});
|
|
29
|
+
/** Full runner configuration schema. Validates and provides defaults. */
|
|
30
|
+
const runnerConfigSchema = z.object({
|
|
31
|
+
port: z.number().default(3100),
|
|
32
|
+
dbPath: z.string().default('./data/runner.sqlite'),
|
|
33
|
+
maxConcurrency: z.number().default(4),
|
|
34
|
+
runRetentionDays: z.number().default(30),
|
|
35
|
+
cursorCleanupIntervalMs: z.number().default(3600000),
|
|
36
|
+
shutdownGraceMs: z.number().default(30000),
|
|
37
|
+
notifications: notificationsSchema.default({
|
|
38
|
+
defaultOnFailure: null,
|
|
39
|
+
defaultOnSuccess: null,
|
|
40
|
+
}),
|
|
41
|
+
log: logSchema.default({ level: 'info' }),
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Job definition schema and types.
|
|
46
|
+
*
|
|
47
|
+
* @module
|
|
48
|
+
*/
|
|
49
|
+
const jobSchema = z.object({
|
|
50
|
+
id: z.string(),
|
|
51
|
+
name: z.string(),
|
|
52
|
+
schedule: z.string(),
|
|
53
|
+
script: z.string(),
|
|
54
|
+
type: z.enum(['script', 'session']).default('script'),
|
|
55
|
+
description: z.string().optional(),
|
|
56
|
+
enabled: z.boolean().default(true),
|
|
57
|
+
timeoutMs: z.number().optional(),
|
|
58
|
+
overlapPolicy: z.enum(['skip', 'queue', 'allow']).default('skip'),
|
|
59
|
+
onFailure: z.string().nullable().default(null),
|
|
60
|
+
onSuccess: z.string().nullable().default(null),
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Run record schema and types.
|
|
65
|
+
*
|
|
66
|
+
* @module
|
|
67
|
+
*/
|
|
68
|
+
const runStatusSchema = z.enum([
|
|
69
|
+
'pending',
|
|
70
|
+
'running',
|
|
71
|
+
'ok',
|
|
72
|
+
'error',
|
|
73
|
+
'timeout',
|
|
74
|
+
'skipped',
|
|
75
|
+
]);
|
|
76
|
+
const runTriggerSchema = z.enum(['schedule', 'manual', 'retry']);
|
|
77
|
+
const runSchema = z.object({
|
|
78
|
+
id: z.number(),
|
|
79
|
+
jobId: z.string(),
|
|
80
|
+
status: runStatusSchema,
|
|
81
|
+
startedAt: z.string().optional(),
|
|
82
|
+
finishedAt: z.string().optional(),
|
|
83
|
+
durationMs: z.number().optional(),
|
|
84
|
+
exitCode: z.number().optional(),
|
|
85
|
+
tokens: z.number().optional(),
|
|
86
|
+
resultMeta: z.string().optional(),
|
|
87
|
+
error: z.string().optional(),
|
|
88
|
+
stdoutTail: z.string().optional(),
|
|
89
|
+
stderrTail: z.string().optional(),
|
|
90
|
+
trigger: runTriggerSchema.default('schedule'),
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Fastify API routes for job management and monitoring. Provides endpoints for job CRUD, run history, manual triggers, and system stats.
|
|
95
|
+
*/
|
|
96
|
+
/**
|
|
97
|
+
* Register all API routes on the Fastify instance.
|
|
98
|
+
*/
|
|
99
|
+
function registerRoutes(app, deps) {
|
|
100
|
+
const { db, scheduler } = deps;
|
|
101
|
+
/** GET /health — Health check. */
|
|
102
|
+
app.get('/health', () => {
|
|
103
|
+
return { ok: true, uptime: process.uptime() };
|
|
104
|
+
});
|
|
105
|
+
/** GET /jobs — List all jobs with last run status. */
|
|
106
|
+
app.get('/jobs', () => {
|
|
107
|
+
const rows = db
|
|
108
|
+
.prepare(`SELECT j.*,
|
|
109
|
+
(SELECT status FROM runs WHERE job_id = j.id ORDER BY started_at DESC LIMIT 1) as last_status,
|
|
110
|
+
(SELECT started_at FROM runs WHERE job_id = j.id ORDER BY started_at DESC LIMIT 1) as last_run
|
|
111
|
+
FROM jobs j`)
|
|
112
|
+
.all();
|
|
113
|
+
return { jobs: rows };
|
|
114
|
+
});
|
|
115
|
+
/** GET /jobs/:id — Single job detail. */
|
|
116
|
+
app.get('/jobs/:id', async (request, reply) => {
|
|
117
|
+
const job = db
|
|
118
|
+
.prepare('SELECT * FROM jobs WHERE id = ?')
|
|
119
|
+
.get(request.params.id);
|
|
120
|
+
if (!job) {
|
|
121
|
+
reply.code(404);
|
|
122
|
+
return { error: 'Job not found' };
|
|
123
|
+
}
|
|
124
|
+
return { job };
|
|
125
|
+
});
|
|
126
|
+
/** GET /jobs/:id/runs — Run history for a job. */
|
|
127
|
+
app.get('/jobs/:id/runs', (request) => {
|
|
128
|
+
const limit = parseInt(request.query.limit ?? '50', 10);
|
|
129
|
+
const runs = db
|
|
130
|
+
.prepare('SELECT * FROM runs WHERE job_id = ? ORDER BY started_at DESC LIMIT ?')
|
|
131
|
+
.all(request.params.id, limit);
|
|
132
|
+
return { runs };
|
|
133
|
+
});
|
|
134
|
+
/** POST /jobs/:id/run — Trigger manual job run. */
|
|
135
|
+
app.post('/jobs/:id/run', async (request, reply) => {
|
|
136
|
+
try {
|
|
137
|
+
const result = await scheduler.triggerJob(request.params.id);
|
|
138
|
+
return { result };
|
|
139
|
+
}
|
|
140
|
+
catch (err) {
|
|
141
|
+
reply.code(404);
|
|
142
|
+
return { error: err instanceof Error ? err.message : 'Unknown error' };
|
|
143
|
+
}
|
|
144
|
+
});
|
|
145
|
+
/** POST /jobs/:id/enable — Enable a job. */
|
|
146
|
+
app.post('/jobs/:id/enable', (request, reply) => {
|
|
147
|
+
const result = db
|
|
148
|
+
.prepare('UPDATE jobs SET enabled = 1 WHERE id = ?')
|
|
149
|
+
.run(request.params.id);
|
|
150
|
+
if (result.changes === 0) {
|
|
151
|
+
reply.code(404);
|
|
152
|
+
return { error: 'Job not found' };
|
|
153
|
+
}
|
|
154
|
+
return { ok: true };
|
|
155
|
+
});
|
|
156
|
+
/** POST /jobs/:id/disable — Disable a job. */
|
|
157
|
+
app.post('/jobs/:id/disable', (request, reply) => {
|
|
158
|
+
const result = db
|
|
159
|
+
.prepare('UPDATE jobs SET enabled = 0 WHERE id = ?')
|
|
160
|
+
.run(request.params.id);
|
|
161
|
+
if (result.changes === 0) {
|
|
162
|
+
reply.code(404);
|
|
163
|
+
return { error: 'Job not found' };
|
|
164
|
+
}
|
|
165
|
+
return { ok: true };
|
|
166
|
+
});
|
|
167
|
+
/** GET /stats — Aggregate job statistics. */
|
|
168
|
+
app.get('/stats', () => {
|
|
169
|
+
const totalJobs = db
|
|
170
|
+
.prepare('SELECT COUNT(*) as count FROM jobs')
|
|
171
|
+
.get();
|
|
172
|
+
const runningCount = scheduler.getRunningJobs().length;
|
|
173
|
+
const okLastHour = db
|
|
174
|
+
.prepare(`SELECT COUNT(*) as count FROM runs
|
|
175
|
+
WHERE status = 'ok' AND started_at > datetime('now', '-1 hour')`)
|
|
176
|
+
.get();
|
|
177
|
+
const errorsLastHour = db
|
|
178
|
+
.prepare(`SELECT COUNT(*) as count FROM runs
|
|
179
|
+
WHERE status IN ('error', 'timeout') AND started_at > datetime('now', '-1 hour')`)
|
|
180
|
+
.get();
|
|
181
|
+
return {
|
|
182
|
+
totalJobs: totalJobs.count,
|
|
183
|
+
running: runningCount,
|
|
184
|
+
okLastHour: okLastHour.count,
|
|
185
|
+
errorsLastHour: errorsLastHour.count,
|
|
186
|
+
};
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Fastify HTTP server for runner API. Creates server instance with logging, registers routes, listens on configured port (localhost only).
|
|
192
|
+
*/
|
|
193
|
+
/**
|
|
194
|
+
* Create and configure the Fastify server. Routes are registered but server is not started.
|
|
195
|
+
*/
|
|
196
|
+
function createServer(config, deps) {
|
|
197
|
+
const app = Fastify({
|
|
198
|
+
logger: {
|
|
199
|
+
level: config.log.level,
|
|
200
|
+
...(config.log.file
|
|
201
|
+
? {
|
|
202
|
+
transport: {
|
|
203
|
+
target: 'pino/file',
|
|
204
|
+
options: { destination: config.log.file },
|
|
205
|
+
},
|
|
206
|
+
}
|
|
207
|
+
: {}),
|
|
208
|
+
},
|
|
209
|
+
});
|
|
210
|
+
registerRoutes(app, deps);
|
|
211
|
+
return app;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* SQLite connection manager. Creates DB file with parent directories, enables WAL mode for concurrency.
|
|
216
|
+
*/
|
|
217
|
+
/**
|
|
218
|
+
* Create and configure a SQLite database connection.
|
|
219
|
+
* Ensures parent directories exist and enables WAL mode for better concurrency.
|
|
220
|
+
*/
|
|
221
|
+
function createConnection(dbPath) {
|
|
222
|
+
// Ensure parent directory exists
|
|
223
|
+
const dir = dirname(dbPath);
|
|
224
|
+
mkdirSync(dir, { recursive: true });
|
|
225
|
+
// Open database
|
|
226
|
+
const db = new DatabaseSync(dbPath);
|
|
227
|
+
// Enable WAL mode for better concurrency
|
|
228
|
+
db.exec('PRAGMA journal_mode = WAL;');
|
|
229
|
+
db.exec('PRAGMA foreign_keys = ON;');
|
|
230
|
+
return db;
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Close a database connection cleanly.
|
|
234
|
+
*/
|
|
235
|
+
function closeConnection(db) {
|
|
236
|
+
db.close();
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Database maintenance tasks: run retention pruning and expired cursor cleanup.
|
|
241
|
+
*/
|
|
242
|
+
/** Delete runs older than the configured retention period. */
|
|
243
|
+
function pruneOldRuns(db, days, logger) {
|
|
244
|
+
const result = db
|
|
245
|
+
.prepare(`DELETE FROM runs WHERE started_at < datetime('now', '-${String(days)} days')`)
|
|
246
|
+
.run();
|
|
247
|
+
if (result.changes > 0) {
|
|
248
|
+
logger.info({ deleted: result.changes }, 'Pruned old runs');
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
/** Delete expired cursor entries. */
|
|
252
|
+
function cleanExpiredCursors(db, logger) {
|
|
253
|
+
const result = db
|
|
254
|
+
.prepare(`DELETE FROM cursors WHERE expires_at IS NOT NULL AND expires_at < datetime('now')`)
|
|
255
|
+
.run();
|
|
256
|
+
if (result.changes > 0) {
|
|
257
|
+
logger.info({ deleted: result.changes }, 'Cleaned expired cursors');
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
/**
|
|
261
|
+
* Create the maintenance controller. Runs cleanup tasks on startup and at configured intervals.
|
|
262
|
+
*/
|
|
263
|
+
function createMaintenance(db, config, logger) {
|
|
264
|
+
let interval = null;
|
|
265
|
+
function runAll() {
|
|
266
|
+
pruneOldRuns(db, config.runRetentionDays, logger);
|
|
267
|
+
cleanExpiredCursors(db, logger);
|
|
268
|
+
}
|
|
269
|
+
return {
|
|
270
|
+
start() {
|
|
271
|
+
// Run immediately on startup
|
|
272
|
+
runAll();
|
|
273
|
+
// Then run periodically
|
|
274
|
+
interval = setInterval(runAll, config.cursorCleanupIntervalMs);
|
|
275
|
+
},
|
|
276
|
+
stop() {
|
|
277
|
+
if (interval) {
|
|
278
|
+
clearInterval(interval);
|
|
279
|
+
interval = null;
|
|
280
|
+
}
|
|
281
|
+
},
|
|
282
|
+
runNow() {
|
|
283
|
+
runAll();
|
|
284
|
+
},
|
|
285
|
+
};
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Schema migration runner. Tracks applied migrations via schema_version table, applies pending migrations idempotently.
|
|
290
|
+
*/
|
|
291
|
+
/** Initial schema migration SQL (embedded to avoid runtime file resolution issues). */
|
|
292
|
+
const MIGRATION_001 = `
|
|
293
|
+
CREATE TABLE IF NOT EXISTS jobs (
|
|
294
|
+
id TEXT PRIMARY KEY,
|
|
295
|
+
name TEXT NOT NULL,
|
|
296
|
+
schedule TEXT NOT NULL,
|
|
297
|
+
script TEXT NOT NULL,
|
|
298
|
+
type TEXT DEFAULT 'script',
|
|
299
|
+
description TEXT,
|
|
300
|
+
enabled INTEGER DEFAULT 1,
|
|
301
|
+
timeout_ms INTEGER,
|
|
302
|
+
overlap_policy TEXT DEFAULT 'skip',
|
|
303
|
+
on_failure TEXT,
|
|
304
|
+
on_success TEXT,
|
|
305
|
+
created_at TEXT DEFAULT (datetime('now')),
|
|
306
|
+
updated_at TEXT DEFAULT (datetime('now'))
|
|
307
|
+
);
|
|
308
|
+
|
|
309
|
+
CREATE TABLE IF NOT EXISTS runs (
|
|
310
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
311
|
+
job_id TEXT NOT NULL REFERENCES jobs(id),
|
|
312
|
+
status TEXT NOT NULL,
|
|
313
|
+
started_at TEXT,
|
|
314
|
+
finished_at TEXT,
|
|
315
|
+
duration_ms INTEGER,
|
|
316
|
+
exit_code INTEGER,
|
|
317
|
+
tokens INTEGER,
|
|
318
|
+
result_meta TEXT,
|
|
319
|
+
error TEXT,
|
|
320
|
+
stdout_tail TEXT,
|
|
321
|
+
stderr_tail TEXT,
|
|
322
|
+
trigger TEXT DEFAULT 'schedule'
|
|
323
|
+
);
|
|
324
|
+
|
|
325
|
+
CREATE INDEX IF NOT EXISTS idx_runs_job_started ON runs(job_id, started_at DESC);
|
|
326
|
+
CREATE INDEX IF NOT EXISTS idx_runs_status ON runs(status);
|
|
327
|
+
|
|
328
|
+
CREATE TABLE IF NOT EXISTS cursors (
|
|
329
|
+
namespace TEXT NOT NULL,
|
|
330
|
+
key TEXT NOT NULL,
|
|
331
|
+
value TEXT,
|
|
332
|
+
expires_at TEXT,
|
|
333
|
+
updated_at TEXT DEFAULT (datetime('now')),
|
|
334
|
+
PRIMARY KEY (namespace, key)
|
|
335
|
+
);
|
|
336
|
+
|
|
337
|
+
CREATE INDEX IF NOT EXISTS idx_cursors_expires ON cursors(expires_at) WHERE expires_at IS NOT NULL;
|
|
338
|
+
|
|
339
|
+
CREATE TABLE IF NOT EXISTS queues (
|
|
340
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
341
|
+
queue TEXT NOT NULL,
|
|
342
|
+
payload TEXT NOT NULL,
|
|
343
|
+
status TEXT DEFAULT 'pending',
|
|
344
|
+
priority INTEGER DEFAULT 0,
|
|
345
|
+
attempts INTEGER DEFAULT 0,
|
|
346
|
+
max_attempts INTEGER DEFAULT 1,
|
|
347
|
+
error TEXT,
|
|
348
|
+
created_at TEXT DEFAULT (datetime('now')),
|
|
349
|
+
claimed_at TEXT,
|
|
350
|
+
finished_at TEXT
|
|
351
|
+
);
|
|
352
|
+
|
|
353
|
+
CREATE INDEX IF NOT EXISTS idx_queues_poll ON queues(queue, status, priority DESC, created_at);
|
|
354
|
+
`;
|
|
355
|
+
/** Registry of all migrations keyed by version number. */
|
|
356
|
+
const MIGRATIONS = {
|
|
357
|
+
1: MIGRATION_001,
|
|
358
|
+
};
|
|
359
|
+
/**
|
|
360
|
+
* Run all pending migrations. Creates schema_version table if needed, applies migrations in order.
|
|
361
|
+
*/
|
|
362
|
+
function runMigrations(db) {
|
|
363
|
+
// Create schema_version table if it doesn't exist
|
|
364
|
+
db.exec(`
|
|
365
|
+
CREATE TABLE IF NOT EXISTS schema_version (
|
|
366
|
+
version INTEGER PRIMARY KEY,
|
|
367
|
+
applied_at TEXT DEFAULT (datetime('now'))
|
|
368
|
+
);
|
|
369
|
+
`);
|
|
370
|
+
// Get current version
|
|
371
|
+
const currentVersionRow = db
|
|
372
|
+
.prepare('SELECT MAX(version) as version FROM schema_version')
|
|
373
|
+
.get();
|
|
374
|
+
const currentVersion = currentVersionRow?.version ?? 0;
|
|
375
|
+
// Apply pending migrations
|
|
376
|
+
const pendingVersions = Object.keys(MIGRATIONS)
|
|
377
|
+
.map(Number)
|
|
378
|
+
.filter((v) => v > currentVersion)
|
|
379
|
+
.sort((a, b) => a - b);
|
|
380
|
+
for (const version of pendingVersions) {
|
|
381
|
+
db.exec(MIGRATIONS[version]);
|
|
382
|
+
db.prepare('INSERT INTO schema_version (version) VALUES (?)').run(version);
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
/**
|
|
387
|
+
* Slack notification module. Sends job completion/failure messages via Slack Web API (chat.postMessage). Falls back gracefully if no token.
|
|
388
|
+
*/
|
|
389
|
+
/** Post a message to Slack via chat.postMessage API. */
|
|
390
|
+
function postToSlack(token, channel, text) {
|
|
391
|
+
return new Promise((resolve, reject) => {
|
|
392
|
+
const payload = JSON.stringify({ channel, text });
|
|
393
|
+
const req = request('https://slack.com/api/chat.postMessage', {
|
|
394
|
+
method: 'POST',
|
|
395
|
+
headers: {
|
|
396
|
+
'Content-Type': 'application/json',
|
|
397
|
+
Authorization: `Bearer ${token}`,
|
|
398
|
+
'Content-Length': Buffer.byteLength(payload),
|
|
399
|
+
},
|
|
400
|
+
}, (res) => {
|
|
401
|
+
let body = '';
|
|
402
|
+
res.on('data', (chunk) => {
|
|
403
|
+
body += chunk.toString();
|
|
404
|
+
});
|
|
405
|
+
res.on('end', () => {
|
|
406
|
+
if (res.statusCode === 200) {
|
|
407
|
+
resolve();
|
|
408
|
+
}
|
|
409
|
+
else {
|
|
410
|
+
reject(new Error(`Slack API returned ${String(res.statusCode)}: ${body}`));
|
|
411
|
+
}
|
|
412
|
+
});
|
|
413
|
+
});
|
|
414
|
+
req.on('error', reject);
|
|
415
|
+
req.write(payload);
|
|
416
|
+
req.end();
|
|
417
|
+
});
|
|
418
|
+
}
|
|
419
|
+
/**
|
|
420
|
+
* Create a notifier that sends Slack messages for job events. If no token, logs warning and returns silently.
|
|
421
|
+
*/
|
|
422
|
+
function createNotifier(config) {
|
|
423
|
+
const { slackToken } = config;
|
|
424
|
+
return {
|
|
425
|
+
async notifySuccess(jobName, durationMs, channel) {
|
|
426
|
+
if (!slackToken) {
|
|
427
|
+
console.warn(`No Slack token configured — skipping success notification for ${jobName}`);
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
const durationSec = (durationMs / 1000).toFixed(1);
|
|
431
|
+
const text = `✅ *${jobName}* completed (${durationSec}s)`;
|
|
432
|
+
await postToSlack(slackToken, channel, text);
|
|
433
|
+
},
|
|
434
|
+
async notifyFailure(jobName, durationMs, error, channel) {
|
|
435
|
+
if (!slackToken) {
|
|
436
|
+
console.warn(`No Slack token configured — skipping failure notification for ${jobName}`);
|
|
437
|
+
return;
|
|
438
|
+
}
|
|
439
|
+
const durationSec = (durationMs / 1000).toFixed(1);
|
|
440
|
+
const errorMsg = error ? `: ${error}` : '';
|
|
441
|
+
const text = `⚠️ *${jobName}* failed (${durationSec}s)${errorMsg}`;
|
|
442
|
+
await postToSlack(slackToken, channel, text);
|
|
443
|
+
},
|
|
444
|
+
};
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
/**
|
|
448
|
+
* Job executor. Spawns job scripts as child processes, captures output, parses result metadata, enforces timeouts.
|
|
449
|
+
*/
|
|
450
|
+
/** Ring buffer for capturing last N lines of output. */
|
|
451
|
+
class RingBuffer {
|
|
452
|
+
maxLines;
|
|
453
|
+
lines = [];
|
|
454
|
+
constructor(maxLines) {
|
|
455
|
+
this.maxLines = maxLines;
|
|
456
|
+
}
|
|
457
|
+
append(line) {
|
|
458
|
+
this.lines.push(line);
|
|
459
|
+
if (this.lines.length > this.maxLines) {
|
|
460
|
+
this.lines.shift();
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
getAll() {
|
|
464
|
+
return this.lines.join('\n');
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
/** Parse JR_RESULT:\{json\} lines from stdout to extract tokens and resultMeta. */
|
|
468
|
+
function parseResultLines(stdout) {
|
|
469
|
+
const lines = stdout.split('\n');
|
|
470
|
+
let tokens = null;
|
|
471
|
+
let resultMeta = null;
|
|
472
|
+
for (const line of lines) {
|
|
473
|
+
const match = /^JR_RESULT:(.+)$/.exec(line.trim());
|
|
474
|
+
if (match) {
|
|
475
|
+
try {
|
|
476
|
+
const data = JSON.parse(match[1]);
|
|
477
|
+
if (data.tokens !== undefined)
|
|
478
|
+
tokens = data.tokens;
|
|
479
|
+
if (data.meta !== undefined)
|
|
480
|
+
resultMeta = data.meta;
|
|
481
|
+
}
|
|
482
|
+
catch {
|
|
483
|
+
// Ignore parse errors
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
return { tokens, resultMeta };
|
|
488
|
+
}
|
|
489
|
+
/**
|
|
490
|
+
* Execute a job script as a child process. Captures output, parses metadata, enforces timeout.
|
|
491
|
+
*/
|
|
492
|
+
function executeJob(options) {
|
|
493
|
+
const { script, dbPath, jobId, runId, timeoutMs } = options;
|
|
494
|
+
const startTime = Date.now();
|
|
495
|
+
return new Promise((resolve) => {
|
|
496
|
+
const stdoutBuffer = new RingBuffer(100);
|
|
497
|
+
const stderrBuffer = new RingBuffer(100);
|
|
498
|
+
const child = spawn('node', [script], {
|
|
499
|
+
env: {
|
|
500
|
+
...process.env,
|
|
501
|
+
JR_DB_PATH: dbPath,
|
|
502
|
+
JR_JOB_ID: jobId,
|
|
503
|
+
JR_RUN_ID: String(runId),
|
|
504
|
+
},
|
|
505
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
506
|
+
});
|
|
507
|
+
let timedOut = false;
|
|
508
|
+
let timeoutHandle = null;
|
|
509
|
+
if (timeoutMs) {
|
|
510
|
+
timeoutHandle = setTimeout(() => {
|
|
511
|
+
timedOut = true;
|
|
512
|
+
child.kill('SIGTERM');
|
|
513
|
+
setTimeout(() => child.kill('SIGKILL'), 5000); // Force kill after 5s
|
|
514
|
+
}, timeoutMs);
|
|
515
|
+
}
|
|
516
|
+
child.stdout.on('data', (chunk) => {
|
|
517
|
+
const lines = chunk.toString().split('\n');
|
|
518
|
+
for (const line of lines) {
|
|
519
|
+
if (line.trim())
|
|
520
|
+
stdoutBuffer.append(line);
|
|
521
|
+
}
|
|
522
|
+
});
|
|
523
|
+
child.stderr.on('data', (chunk) => {
|
|
524
|
+
const lines = chunk.toString().split('\n');
|
|
525
|
+
for (const line of lines) {
|
|
526
|
+
if (line.trim())
|
|
527
|
+
stderrBuffer.append(line);
|
|
528
|
+
}
|
|
529
|
+
});
|
|
530
|
+
child.on('close', (exitCode) => {
|
|
531
|
+
if (timeoutHandle)
|
|
532
|
+
clearTimeout(timeoutHandle);
|
|
533
|
+
const durationMs = Date.now() - startTime;
|
|
534
|
+
const stdoutTail = stdoutBuffer.getAll();
|
|
535
|
+
const stderrTail = stderrBuffer.getAll();
|
|
536
|
+
const { tokens, resultMeta } = parseResultLines(stdoutTail);
|
|
537
|
+
if (timedOut) {
|
|
538
|
+
resolve({
|
|
539
|
+
status: 'timeout',
|
|
540
|
+
exitCode: null,
|
|
541
|
+
durationMs,
|
|
542
|
+
tokens: null,
|
|
543
|
+
resultMeta: null,
|
|
544
|
+
stdoutTail,
|
|
545
|
+
stderrTail,
|
|
546
|
+
error: `Job timed out after ${String(timeoutMs)}ms`,
|
|
547
|
+
});
|
|
548
|
+
}
|
|
549
|
+
else if (exitCode === 0) {
|
|
550
|
+
resolve({
|
|
551
|
+
status: 'ok',
|
|
552
|
+
exitCode,
|
|
553
|
+
durationMs,
|
|
554
|
+
tokens,
|
|
555
|
+
resultMeta,
|
|
556
|
+
stdoutTail,
|
|
557
|
+
stderrTail,
|
|
558
|
+
error: null,
|
|
559
|
+
});
|
|
560
|
+
}
|
|
561
|
+
else {
|
|
562
|
+
resolve({
|
|
563
|
+
status: 'error',
|
|
564
|
+
exitCode,
|
|
565
|
+
durationMs,
|
|
566
|
+
tokens,
|
|
567
|
+
resultMeta,
|
|
568
|
+
stdoutTail,
|
|
569
|
+
stderrTail,
|
|
570
|
+
error: stderrTail || `Exit code ${String(exitCode)}`,
|
|
571
|
+
});
|
|
572
|
+
}
|
|
573
|
+
});
|
|
574
|
+
child.on('error', (err) => {
|
|
575
|
+
if (timeoutHandle)
|
|
576
|
+
clearTimeout(timeoutHandle);
|
|
577
|
+
const durationMs = Date.now() - startTime;
|
|
578
|
+
resolve({
|
|
579
|
+
status: 'error',
|
|
580
|
+
exitCode: null,
|
|
581
|
+
durationMs,
|
|
582
|
+
tokens: null,
|
|
583
|
+
resultMeta: null,
|
|
584
|
+
stdoutTail: stdoutBuffer.getAll(),
|
|
585
|
+
stderrTail: stderrBuffer.getAll(),
|
|
586
|
+
error: err.message,
|
|
587
|
+
});
|
|
588
|
+
});
|
|
589
|
+
});
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
/**
|
|
593
|
+
* Croner-based job scheduler. Loads enabled jobs, creates cron instances, manages execution, respects overlap policies and concurrency limits.
|
|
594
|
+
*/
|
|
595
|
+
/**
|
|
596
|
+
* Create the job scheduler. Manages cron schedules, job execution, overlap policies, and notifications.
|
|
597
|
+
*/
|
|
598
|
+
function createScheduler(deps) {
|
|
599
|
+
const { db, executor, notifier, config, logger } = deps;
|
|
600
|
+
const crons = new Map();
|
|
601
|
+
const runningJobs = new Set();
|
|
602
|
+
/** Insert a run record and return its ID. */
|
|
603
|
+
function createRun(jobId, trigger) {
|
|
604
|
+
const result = db
|
|
605
|
+
.prepare(`INSERT INTO runs (job_id, status, started_at, trigger)
|
|
606
|
+
VALUES (?, 'running', datetime('now'), ?)`)
|
|
607
|
+
.run(jobId, trigger);
|
|
608
|
+
return result.lastInsertRowid;
|
|
609
|
+
}
|
|
610
|
+
/** Update run record with completion data. */
|
|
611
|
+
function finishRun(runId, execResult) {
|
|
612
|
+
db.prepare(`UPDATE runs SET status = ?, finished_at = datetime('now'), duration_ms = ?,
|
|
613
|
+
exit_code = ?, tokens = ?, result_meta = ?, error = ?, stdout_tail = ?, stderr_tail = ?
|
|
614
|
+
WHERE id = ?`).run(execResult.status, execResult.durationMs, execResult.exitCode, execResult.tokens, execResult.resultMeta, execResult.error, execResult.stdoutTail, execResult.stderrTail, runId);
|
|
615
|
+
}
|
|
616
|
+
/** Execute a job: create run record, run script, update record, send notifications. */
|
|
617
|
+
async function runJob(job, trigger) {
|
|
618
|
+
const { id, name, script, timeout_ms, on_success, on_failure } = job;
|
|
619
|
+
// Check concurrency limit
|
|
620
|
+
if (runningJobs.size >= config.maxConcurrency) {
|
|
621
|
+
logger.warn({ jobId: id }, 'Max concurrency reached, skipping job');
|
|
622
|
+
throw new Error('Max concurrency reached');
|
|
623
|
+
}
|
|
624
|
+
runningJobs.add(id);
|
|
625
|
+
const runId = createRun(id, trigger);
|
|
626
|
+
logger.info({ jobId: id, runId, trigger }, 'Starting job');
|
|
627
|
+
try {
|
|
628
|
+
const result = await executor({
|
|
629
|
+
script,
|
|
630
|
+
dbPath: config.dbPath,
|
|
631
|
+
jobId: id,
|
|
632
|
+
runId,
|
|
633
|
+
timeoutMs: timeout_ms ?? undefined,
|
|
634
|
+
});
|
|
635
|
+
finishRun(runId, result);
|
|
636
|
+
logger.info({ jobId: id, runId, status: result.status }, 'Job finished');
|
|
637
|
+
// Send notifications
|
|
638
|
+
if (result.status === 'ok' && on_success) {
|
|
639
|
+
await notifier
|
|
640
|
+
.notifySuccess(name, result.durationMs, on_success)
|
|
641
|
+
.catch((err) => {
|
|
642
|
+
logger.error({ jobId: id, err }, 'Notification failed');
|
|
643
|
+
});
|
|
644
|
+
}
|
|
645
|
+
else if (result.status !== 'ok' && on_failure) {
|
|
646
|
+
await notifier
|
|
647
|
+
.notifyFailure(name, result.durationMs, result.error, on_failure)
|
|
648
|
+
.catch((err) => {
|
|
649
|
+
logger.error({ jobId: id, err }, 'Notification failed');
|
|
650
|
+
});
|
|
651
|
+
}
|
|
652
|
+
return result;
|
|
653
|
+
}
|
|
654
|
+
finally {
|
|
655
|
+
runningJobs.delete(id);
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
/** Handle scheduled job fire. */
|
|
659
|
+
async function onScheduledRun(job) {
|
|
660
|
+
const { id, overlap_policy } = job;
|
|
661
|
+
// Check overlap policy
|
|
662
|
+
if (runningJobs.has(id)) {
|
|
663
|
+
if (overlap_policy === 'skip') {
|
|
664
|
+
logger.info({ jobId: id }, 'Job already running, skipping (overlap_policy=skip)');
|
|
665
|
+
return;
|
|
666
|
+
}
|
|
667
|
+
else if (overlap_policy === 'queue') {
|
|
668
|
+
logger.info({ jobId: id }, 'Job already running, queueing (overlap_policy=queue)');
|
|
669
|
+
// In a real implementation, we'd queue this. For now, just skip.
|
|
670
|
+
return;
|
|
671
|
+
}
|
|
672
|
+
// 'allow' policy: proceed
|
|
673
|
+
}
|
|
674
|
+
await runJob(job, 'schedule').catch((err) => {
|
|
675
|
+
logger.error({ jobId: id, err }, 'Job execution failed');
|
|
676
|
+
});
|
|
677
|
+
}
|
|
678
|
+
return {
|
|
679
|
+
start() {
|
|
680
|
+
// Load all enabled jobs
|
|
681
|
+
const jobs = db
|
|
682
|
+
.prepare('SELECT * FROM jobs WHERE enabled = 1')
|
|
683
|
+
.all();
|
|
684
|
+
logger.info({ count: jobs.length }, 'Loading jobs');
|
|
685
|
+
for (const job of jobs) {
|
|
686
|
+
try {
|
|
687
|
+
const cron = new Cron(job.schedule, () => {
|
|
688
|
+
void onScheduledRun(job);
|
|
689
|
+
});
|
|
690
|
+
crons.set(job.id, cron);
|
|
691
|
+
logger.info({ jobId: job.id, schedule: job.schedule }, 'Scheduled job');
|
|
692
|
+
}
|
|
693
|
+
catch (err) {
|
|
694
|
+
logger.error({ jobId: job.id, err }, 'Failed to schedule job');
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
},
|
|
698
|
+
stop() {
|
|
699
|
+
logger.info('Stopping scheduler');
|
|
700
|
+
// Stop all crons
|
|
701
|
+
for (const cron of crons.values()) {
|
|
702
|
+
cron.stop();
|
|
703
|
+
}
|
|
704
|
+
crons.clear();
|
|
705
|
+
// Wait for running jobs (simple poll with timeout)
|
|
706
|
+
const deadline = Date.now() + config.shutdownGraceMs;
|
|
707
|
+
const checkInterval = setInterval(() => {
|
|
708
|
+
if (runningJobs.size === 0 || Date.now() > deadline) {
|
|
709
|
+
clearInterval(checkInterval);
|
|
710
|
+
if (runningJobs.size > 0) {
|
|
711
|
+
logger.warn({ count: runningJobs.size }, 'Forced shutdown with running jobs');
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
}, 100);
|
|
715
|
+
},
|
|
716
|
+
async triggerJob(jobId) {
|
|
717
|
+
const job = db.prepare('SELECT * FROM jobs WHERE id = ?').get(jobId);
|
|
718
|
+
if (!job)
|
|
719
|
+
throw new Error(`Job not found: ${jobId}`);
|
|
720
|
+
return runJob(job, 'manual');
|
|
721
|
+
},
|
|
722
|
+
getRunningJobs() {
|
|
723
|
+
return Array.from(runningJobs);
|
|
724
|
+
},
|
|
725
|
+
};
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
/**
|
|
729
|
+
* Main runner orchestrator. Wires up database, scheduler, API server, and handles graceful shutdown on SIGTERM/SIGINT.
|
|
730
|
+
*/
|
|
731
|
+
/**
|
|
732
|
+
* Create the runner. Initializes database, scheduler, API server, and sets up graceful shutdown.
|
|
733
|
+
*/
|
|
734
|
+
function createRunner(config) {
|
|
735
|
+
let db = null;
|
|
736
|
+
let scheduler = null;
|
|
737
|
+
let server = null;
|
|
738
|
+
let maintenance = null;
|
|
739
|
+
const logger = pino({
|
|
740
|
+
level: config.log.level,
|
|
741
|
+
...(config.log.file
|
|
742
|
+
? {
|
|
743
|
+
transport: {
|
|
744
|
+
target: 'pino/file',
|
|
745
|
+
options: { destination: config.log.file },
|
|
746
|
+
},
|
|
747
|
+
}
|
|
748
|
+
: {}),
|
|
749
|
+
});
|
|
750
|
+
return {
|
|
751
|
+
async start() {
|
|
752
|
+
logger.info('Starting runner');
|
|
753
|
+
// Database
|
|
754
|
+
db = createConnection(config.dbPath);
|
|
755
|
+
runMigrations(db);
|
|
756
|
+
logger.info({ dbPath: config.dbPath }, 'Database ready');
|
|
757
|
+
// Notifier
|
|
758
|
+
const slackToken = config.notifications.slackTokenPath
|
|
759
|
+
? readFileSync(config.notifications.slackTokenPath, 'utf-8').trim()
|
|
760
|
+
: null;
|
|
761
|
+
const notifier = createNotifier({ slackToken });
|
|
762
|
+
// Maintenance (run retention pruning + cursor cleanup)
|
|
763
|
+
maintenance = createMaintenance(db, {
|
|
764
|
+
runRetentionDays: config.runRetentionDays,
|
|
765
|
+
cursorCleanupIntervalMs: config.cursorCleanupIntervalMs,
|
|
766
|
+
}, logger);
|
|
767
|
+
maintenance.start();
|
|
768
|
+
logger.info('Maintenance tasks started');
|
|
769
|
+
// Scheduler
|
|
770
|
+
scheduler = createScheduler({
|
|
771
|
+
db,
|
|
772
|
+
executor: executeJob,
|
|
773
|
+
notifier,
|
|
774
|
+
config,
|
|
775
|
+
logger,
|
|
776
|
+
});
|
|
777
|
+
scheduler.start();
|
|
778
|
+
logger.info('Scheduler started');
|
|
779
|
+
// API server
|
|
780
|
+
server = createServer(config, { db, scheduler });
|
|
781
|
+
await server.listen({ port: config.port, host: '127.0.0.1' });
|
|
782
|
+
logger.info({ port: config.port }, 'API server listening');
|
|
783
|
+
// Graceful shutdown
|
|
784
|
+
const shutdown = async (signal) => {
|
|
785
|
+
logger.info({ signal }, 'Received shutdown signal');
|
|
786
|
+
await this.stop();
|
|
787
|
+
process.exit(0);
|
|
788
|
+
};
|
|
789
|
+
process.on('SIGTERM', () => {
|
|
790
|
+
void shutdown('SIGTERM');
|
|
791
|
+
});
|
|
792
|
+
process.on('SIGINT', () => {
|
|
793
|
+
void shutdown('SIGINT');
|
|
794
|
+
});
|
|
795
|
+
},
|
|
796
|
+
async stop() {
|
|
797
|
+
logger.info('Stopping runner');
|
|
798
|
+
if (maintenance) {
|
|
799
|
+
maintenance.stop();
|
|
800
|
+
logger.info('Maintenance stopped');
|
|
801
|
+
}
|
|
802
|
+
if (scheduler) {
|
|
803
|
+
scheduler.stop();
|
|
804
|
+
logger.info('Scheduler stopped');
|
|
805
|
+
}
|
|
806
|
+
if (server) {
|
|
807
|
+
await server.close();
|
|
808
|
+
logger.info('API server stopped');
|
|
809
|
+
}
|
|
810
|
+
if (db) {
|
|
811
|
+
closeConnection(db);
|
|
812
|
+
logger.info('Database closed');
|
|
813
|
+
}
|
|
814
|
+
},
|
|
815
|
+
};
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
/**
|
|
819
|
+
* Job client library for runner jobs. Provides cursor (state) and queue operations. Opens its own DB connection via JR_DB_PATH env var.
|
|
820
|
+
*/
|
|
821
|
+
/** Parse TTL string (e.g., '30d', '24h', '60m') into ISO datetime offset from now. */
|
|
822
|
+
function parseTtl(ttl) {
|
|
823
|
+
const match = /^(\d+)([dhm])$/.exec(ttl);
|
|
824
|
+
if (!match)
|
|
825
|
+
throw new Error(`Invalid TTL format: ${ttl}`);
|
|
826
|
+
const amount = match[1];
|
|
827
|
+
const unit = match[2];
|
|
828
|
+
if (!amount || !unit)
|
|
829
|
+
throw new Error(`Invalid TTL format: ${ttl}`);
|
|
830
|
+
const num = parseInt(amount, 10);
|
|
831
|
+
let modifier;
|
|
832
|
+
switch (unit) {
|
|
833
|
+
case 'd':
|
|
834
|
+
modifier = `+${String(num)} days`;
|
|
835
|
+
break;
|
|
836
|
+
case 'h':
|
|
837
|
+
modifier = `+${String(num)} hours`;
|
|
838
|
+
break;
|
|
839
|
+
case 'm':
|
|
840
|
+
modifier = `+${String(num)} minutes`;
|
|
841
|
+
break;
|
|
842
|
+
default:
|
|
843
|
+
throw new Error(`Unknown TTL unit: ${unit}`);
|
|
844
|
+
}
|
|
845
|
+
return `datetime('now', '${modifier}')`;
|
|
846
|
+
}
|
|
847
|
+
/**
|
|
848
|
+
* Create a runner client for job scripts. Opens its own DB connection.
|
|
849
|
+
*/
|
|
850
|
+
function createClient(dbPath) {
|
|
851
|
+
const path = dbPath ?? process.env.JR_DB_PATH;
|
|
852
|
+
if (!path)
|
|
853
|
+
throw new Error('DB path required (provide dbPath or set JR_DB_PATH env var)');
|
|
854
|
+
const db = createConnection(path);
|
|
855
|
+
return {
|
|
856
|
+
getCursor(namespace, key) {
|
|
857
|
+
const row = db
|
|
858
|
+
.prepare(`SELECT value FROM cursors
|
|
859
|
+
WHERE namespace = ? AND key = ?
|
|
860
|
+
AND (expires_at IS NULL OR expires_at > datetime('now'))`)
|
|
861
|
+
.get(namespace, key);
|
|
862
|
+
return row?.value ?? null;
|
|
863
|
+
},
|
|
864
|
+
setCursor(namespace, key, value, options) {
|
|
865
|
+
const expiresAt = options?.ttl ? parseTtl(options.ttl) : null;
|
|
866
|
+
const sql = expiresAt
|
|
867
|
+
? `INSERT INTO cursors (namespace, key, value, expires_at) VALUES (?, ?, ?, ${expiresAt})
|
|
868
|
+
ON CONFLICT(namespace, key) DO UPDATE SET value = excluded.value, expires_at = excluded.expires_at, updated_at = datetime('now')`
|
|
869
|
+
: `INSERT INTO cursors (namespace, key, value) VALUES (?, ?, ?)
|
|
870
|
+
ON CONFLICT(namespace, key) DO UPDATE SET value = excluded.value, updated_at = datetime('now')`;
|
|
871
|
+
db.prepare(sql).run(namespace, key, value);
|
|
872
|
+
},
|
|
873
|
+
deleteCursor(namespace, key) {
|
|
874
|
+
db.prepare('DELETE FROM cursors WHERE namespace = ? AND key = ?').run(namespace, key);
|
|
875
|
+
},
|
|
876
|
+
enqueue(queue, payload, options) {
|
|
877
|
+
const priority = options?.priority ?? 0;
|
|
878
|
+
const maxAttempts = options?.maxAttempts ?? 1;
|
|
879
|
+
const payloadJson = JSON.stringify(payload);
|
|
880
|
+
const result = db
|
|
881
|
+
.prepare('INSERT INTO queues (queue, payload, priority, max_attempts) VALUES (?, ?, ?, ?)')
|
|
882
|
+
.run(queue, payloadJson, priority, maxAttempts);
|
|
883
|
+
return result.lastInsertRowid;
|
|
884
|
+
},
|
|
885
|
+
dequeue(queue, count = 1) {
|
|
886
|
+
// First, SELECT the items to claim (with correct ordering)
|
|
887
|
+
const rows = db
|
|
888
|
+
.prepare(`SELECT id, payload FROM queues
|
|
889
|
+
WHERE queue = ? AND status = 'pending'
|
|
890
|
+
ORDER BY priority DESC, created_at
|
|
891
|
+
LIMIT ?`)
|
|
892
|
+
.all(queue, count);
|
|
893
|
+
// Then UPDATE each one to claim it
|
|
894
|
+
const updateStmt = db.prepare(`UPDATE queues
|
|
895
|
+
SET status = 'processing', claimed_at = datetime('now'), attempts = attempts + 1
|
|
896
|
+
WHERE id = ?`);
|
|
897
|
+
for (const row of rows) {
|
|
898
|
+
updateStmt.run(row.id);
|
|
899
|
+
}
|
|
900
|
+
return rows.map((row) => ({
|
|
901
|
+
id: row.id,
|
|
902
|
+
payload: JSON.parse(row.payload),
|
|
903
|
+
}));
|
|
904
|
+
},
|
|
905
|
+
done(queueItemId) {
|
|
906
|
+
db.prepare(`UPDATE queues SET status = 'done', finished_at = datetime('now') WHERE id = ?`).run(queueItemId);
|
|
907
|
+
},
|
|
908
|
+
fail(queueItemId, error) {
|
|
909
|
+
db.prepare(`UPDATE queues SET status = 'failed', finished_at = datetime('now'), error = ? WHERE id = ?`).run(error ?? null, queueItemId);
|
|
910
|
+
},
|
|
911
|
+
close() {
|
|
912
|
+
closeConnection(db);
|
|
913
|
+
},
|
|
914
|
+
};
|
|
915
|
+
}
|
|
916
|
+
|
|
917
|
+
export { closeConnection, createClient, createConnection, createMaintenance, createNotifier, createRunner, createScheduler, executeJob, jobSchema, runMigrations, runSchema, runStatusSchema, runTriggerSchema, runnerConfigSchema };
|