@karmaniverous/jeeves-runner 0.1.2 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/jeeves-runner/index.js +600 -134
- package/dist/index.d.ts +225 -11
- package/dist/mjs/index.js +831 -187
- package/package.json +2 -1
package/dist/mjs/index.js
CHANGED
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
import { z } from 'zod';
|
|
2
|
-
import { mkdirSync, readFileSync } from 'node:fs';
|
|
2
|
+
import { mkdirSync, existsSync, readFileSync } from 'node:fs';
|
|
3
3
|
import { pino } from 'pino';
|
|
4
4
|
import Fastify from 'fastify';
|
|
5
5
|
import { dirname, extname } from 'node:path';
|
|
6
6
|
import { DatabaseSync } from 'node:sqlite';
|
|
7
|
+
import { request as request$1 } from 'node:http';
|
|
7
8
|
import { request } from 'node:https';
|
|
8
9
|
import { spawn } from 'node:child_process';
|
|
9
10
|
import { Cron } from 'croner';
|
|
11
|
+
import { JSONPath } from 'jsonpath-plus';
|
|
10
12
|
|
|
11
13
|
/**
|
|
12
14
|
* Runner configuration schema and types.
|
|
@@ -15,30 +17,54 @@ import { Cron } from 'croner';
|
|
|
15
17
|
*/
|
|
16
18
|
/** Notification configuration sub-schema. */
|
|
17
19
|
const notificationsSchema = z.object({
|
|
20
|
+
/** Path to Slack bot token file. */
|
|
18
21
|
slackTokenPath: z.string().optional(),
|
|
22
|
+
/** Default Slack channel ID for failure notifications. */
|
|
19
23
|
defaultOnFailure: z.string().nullable().default(null),
|
|
24
|
+
/** Default Slack channel ID for success notifications. */
|
|
20
25
|
defaultOnSuccess: z.string().nullable().default(null),
|
|
21
26
|
});
|
|
22
27
|
/** Log configuration sub-schema. */
|
|
23
28
|
const logSchema = z.object({
|
|
29
|
+
/** Log level threshold (trace, debug, info, warn, error, fatal). */
|
|
24
30
|
level: z
|
|
25
31
|
.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal'])
|
|
26
32
|
.default('info'),
|
|
33
|
+
/** Optional log file path. */
|
|
27
34
|
file: z.string().optional(),
|
|
28
35
|
});
|
|
36
|
+
/** Gateway configuration sub-schema. */
|
|
37
|
+
const gatewaySchema = z.object({
|
|
38
|
+
/** OpenClaw Gateway URL. */
|
|
39
|
+
url: z.string().default('http://127.0.0.1:18789'),
|
|
40
|
+
/** Path to file containing Gateway auth token. */
|
|
41
|
+
tokenPath: z.string().optional(),
|
|
42
|
+
});
|
|
29
43
|
/** Full runner configuration schema. Validates and provides defaults. */
|
|
30
44
|
const runnerConfigSchema = z.object({
|
|
31
|
-
port
|
|
45
|
+
/** HTTP server port for the runner API. */
|
|
46
|
+
port: z.number().default(1937),
|
|
47
|
+
/** Path to SQLite database file. */
|
|
32
48
|
dbPath: z.string().default('./data/runner.sqlite'),
|
|
49
|
+
/** Maximum number of concurrent job executions. */
|
|
33
50
|
maxConcurrency: z.number().default(4),
|
|
51
|
+
/** Number of days to retain completed run records. */
|
|
34
52
|
runRetentionDays: z.number().default(30),
|
|
53
|
+
/** Interval in milliseconds for cursor cleanup task. */
|
|
35
54
|
cursorCleanupIntervalMs: z.number().default(3600000),
|
|
55
|
+
/** Grace period in milliseconds for shutdown completion. */
|
|
36
56
|
shutdownGraceMs: z.number().default(30000),
|
|
57
|
+
/** Interval in milliseconds for job reconciliation checks. */
|
|
58
|
+
reconcileIntervalMs: z.number().default(60000),
|
|
59
|
+
/** Notification configuration for job completion events. */
|
|
37
60
|
notifications: notificationsSchema.default({
|
|
38
61
|
defaultOnFailure: null,
|
|
39
62
|
defaultOnSuccess: null,
|
|
40
63
|
}),
|
|
64
|
+
/** Logging configuration. */
|
|
41
65
|
log: logSchema.default({ level: 'info' }),
|
|
66
|
+
/** Gateway configuration for session-type jobs. */
|
|
67
|
+
gateway: gatewaySchema.default({ url: 'http://127.0.0.1:18789' }),
|
|
42
68
|
});
|
|
43
69
|
|
|
44
70
|
/**
|
|
@@ -46,25 +72,63 @@ const runnerConfigSchema = z.object({
|
|
|
46
72
|
*
|
|
47
73
|
* @module
|
|
48
74
|
*/
|
|
75
|
+
/** Job definition schema for scheduled tasks. */
|
|
49
76
|
const jobSchema = z.object({
|
|
77
|
+
/** Unique job identifier. */
|
|
50
78
|
id: z.string(),
|
|
79
|
+
/** Human-readable job name. */
|
|
51
80
|
name: z.string(),
|
|
81
|
+
/** Cron expression defining the job schedule. */
|
|
52
82
|
schedule: z.string(),
|
|
83
|
+
/** Script path or command to execute. */
|
|
53
84
|
script: z.string(),
|
|
85
|
+
/** Job execution type (script or session). */
|
|
54
86
|
type: z.enum(['script', 'session']).default('script'),
|
|
87
|
+
/** Optional job description. */
|
|
55
88
|
description: z.string().optional(),
|
|
89
|
+
/** Whether the job is enabled for scheduling. */
|
|
56
90
|
enabled: z.boolean().default(true),
|
|
91
|
+
/** Optional execution timeout in milliseconds. */
|
|
57
92
|
timeoutMs: z.number().optional(),
|
|
93
|
+
/** Policy for handling overlapping job executions (skip, queue, or allow). */
|
|
58
94
|
overlapPolicy: z.enum(['skip', 'queue', 'allow']).default('skip'),
|
|
95
|
+
/** Slack channel ID for failure notifications. */
|
|
59
96
|
onFailure: z.string().nullable().default(null),
|
|
97
|
+
/** Slack channel ID for success notifications. */
|
|
60
98
|
onSuccess: z.string().nullable().default(null),
|
|
61
99
|
});
|
|
62
100
|
|
|
101
|
+
/**
|
|
102
|
+
* Queue definition schema and types.
|
|
103
|
+
*
|
|
104
|
+
* @module
|
|
105
|
+
*/
|
|
106
|
+
/** Queue definition schema for managing queue behavior and retention. */
|
|
107
|
+
const queueSchema = z.object({
|
|
108
|
+
/** Unique queue identifier. */
|
|
109
|
+
id: z.string(),
|
|
110
|
+
/** Human-readable queue name. */
|
|
111
|
+
name: z.string(),
|
|
112
|
+
/** Optional queue description. */
|
|
113
|
+
description: z.string().nullable().optional(),
|
|
114
|
+
/** JSONPath expression for deduplication key extraction. */
|
|
115
|
+
dedupExpr: z.string().nullable().optional(),
|
|
116
|
+
/** Deduplication scope: 'pending' checks pending/processing items, 'all' checks all non-failed items. */
|
|
117
|
+
dedupScope: z.enum(['pending', 'all']).default('pending'),
|
|
118
|
+
/** Maximum retry attempts before dead-lettering. */
|
|
119
|
+
maxAttempts: z.number().default(1),
|
|
120
|
+
/** Retention period in days for completed/failed items. */
|
|
121
|
+
retentionDays: z.number().default(7),
|
|
122
|
+
/** Queue creation timestamp. */
|
|
123
|
+
createdAt: z.string(),
|
|
124
|
+
});
|
|
125
|
+
|
|
63
126
|
/**
|
|
64
127
|
* Run record schema and types.
|
|
65
128
|
*
|
|
66
129
|
* @module
|
|
67
130
|
*/
|
|
131
|
+
/** Run status enumeration schema (pending, running, ok, error, timeout, skipped). */
|
|
68
132
|
const runStatusSchema = z.enum([
|
|
69
133
|
'pending',
|
|
70
134
|
'running',
|
|
@@ -73,20 +137,35 @@ const runStatusSchema = z.enum([
|
|
|
73
137
|
'timeout',
|
|
74
138
|
'skipped',
|
|
75
139
|
]);
|
|
140
|
+
/** Run trigger type enumeration schema (schedule, manual, retry). */
|
|
76
141
|
const runTriggerSchema = z.enum(['schedule', 'manual', 'retry']);
|
|
142
|
+
/** Run record schema representing a job execution instance. */
|
|
77
143
|
const runSchema = z.object({
|
|
144
|
+
/** Unique run identifier. */
|
|
78
145
|
id: z.number(),
|
|
146
|
+
/** Reference to the parent job ID. */
|
|
79
147
|
jobId: z.string(),
|
|
148
|
+
/** Current execution status. */
|
|
80
149
|
status: runStatusSchema,
|
|
150
|
+
/** ISO timestamp when execution started. */
|
|
81
151
|
startedAt: z.string().optional(),
|
|
152
|
+
/** ISO timestamp when execution finished. */
|
|
82
153
|
finishedAt: z.string().optional(),
|
|
154
|
+
/** Execution duration in milliseconds. */
|
|
83
155
|
durationMs: z.number().optional(),
|
|
156
|
+
/** Process exit code. */
|
|
84
157
|
exitCode: z.number().optional(),
|
|
158
|
+
/** Token count for session-type jobs. */
|
|
85
159
|
tokens: z.number().optional(),
|
|
160
|
+
/** Additional result metadata (JSON string). */
|
|
86
161
|
resultMeta: z.string().optional(),
|
|
162
|
+
/** Error message if execution failed. */
|
|
87
163
|
error: z.string().optional(),
|
|
164
|
+
/** Last N characters of stdout. */
|
|
88
165
|
stdoutTail: z.string().optional(),
|
|
166
|
+
/** Last N characters of stderr. */
|
|
89
167
|
stderrTail: z.string().optional(),
|
|
168
|
+
/** What triggered this run (schedule, manual, or retry). */
|
|
90
169
|
trigger: runTriggerSchema.default('schedule'),
|
|
91
170
|
});
|
|
92
171
|
|
|
@@ -100,7 +179,11 @@ function registerRoutes(app, deps) {
|
|
|
100
179
|
const { db, scheduler } = deps;
|
|
101
180
|
/** GET /health — Health check. */
|
|
102
181
|
app.get('/health', () => {
|
|
103
|
-
return {
|
|
182
|
+
return {
|
|
183
|
+
ok: true,
|
|
184
|
+
uptime: process.uptime(),
|
|
185
|
+
failedRegistrations: scheduler.getFailedRegistrations().length,
|
|
186
|
+
};
|
|
104
187
|
});
|
|
105
188
|
/** GET /jobs — List all jobs with last run status. */
|
|
106
189
|
app.get('/jobs', () => {
|
|
@@ -151,6 +234,7 @@ function registerRoutes(app, deps) {
|
|
|
151
234
|
reply.code(404);
|
|
152
235
|
return { error: 'Job not found' };
|
|
153
236
|
}
|
|
237
|
+
scheduler.reconcileNow();
|
|
154
238
|
return { ok: true };
|
|
155
239
|
});
|
|
156
240
|
/** POST /jobs/:id/disable — Disable a job. */
|
|
@@ -162,6 +246,7 @@ function registerRoutes(app, deps) {
|
|
|
162
246
|
reply.code(404);
|
|
163
247
|
return { error: 'Job not found' };
|
|
164
248
|
}
|
|
249
|
+
scheduler.reconcileNow();
|
|
165
250
|
return { ok: true };
|
|
166
251
|
});
|
|
167
252
|
/** GET /stats — Aggregate job statistics. */
|
|
@@ -170,6 +255,7 @@ function registerRoutes(app, deps) {
|
|
|
170
255
|
.prepare('SELECT COUNT(*) as count FROM jobs')
|
|
171
256
|
.get();
|
|
172
257
|
const runningCount = scheduler.getRunningJobs().length;
|
|
258
|
+
const failedCount = scheduler.getFailedRegistrations().length;
|
|
173
259
|
const okLastHour = db
|
|
174
260
|
.prepare(`SELECT COUNT(*) as count FROM runs
|
|
175
261
|
WHERE status = 'ok' AND started_at > datetime('now', '-1 hour')`)
|
|
@@ -181,6 +267,7 @@ function registerRoutes(app, deps) {
|
|
|
181
267
|
return {
|
|
182
268
|
totalJobs: totalJobs.count,
|
|
183
269
|
running: runningCount,
|
|
270
|
+
failedRegistrations: failedCount,
|
|
184
271
|
okLastHour: okLastHour.count,
|
|
185
272
|
errorsLastHour: errorsLastHour.count,
|
|
186
273
|
};
|
|
@@ -193,19 +280,21 @@ function registerRoutes(app, deps) {
|
|
|
193
280
|
/**
|
|
194
281
|
* Create and configure the Fastify server. Routes are registered but server is not started.
|
|
195
282
|
*/
|
|
196
|
-
function createServer(
|
|
283
|
+
function createServer(deps) {
|
|
197
284
|
const app = Fastify({
|
|
198
|
-
logger:
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
285
|
+
logger: deps.loggerConfig
|
|
286
|
+
? {
|
|
287
|
+
level: deps.loggerConfig.level,
|
|
288
|
+
...(deps.loggerConfig.file
|
|
289
|
+
? {
|
|
290
|
+
transport: {
|
|
291
|
+
target: 'pino/file',
|
|
292
|
+
options: { destination: deps.loggerConfig.file },
|
|
293
|
+
},
|
|
294
|
+
}
|
|
295
|
+
: {}),
|
|
296
|
+
}
|
|
297
|
+
: false,
|
|
209
298
|
});
|
|
210
299
|
registerRoutes(app, deps);
|
|
211
300
|
return app;
|
|
@@ -241,20 +330,36 @@ function closeConnection(db) {
|
|
|
241
330
|
*/
|
|
242
331
|
/** Delete runs older than the configured retention period. */
|
|
243
332
|
function pruneOldRuns(db, days, logger) {
|
|
333
|
+
const cutoffDate = new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString();
|
|
244
334
|
const result = db
|
|
245
|
-
.prepare(`DELETE FROM runs WHERE started_at <
|
|
246
|
-
.run();
|
|
335
|
+
.prepare(`DELETE FROM runs WHERE started_at < ?`)
|
|
336
|
+
.run(cutoffDate);
|
|
247
337
|
if (result.changes > 0) {
|
|
248
338
|
logger.info({ deleted: result.changes }, 'Pruned old runs');
|
|
249
339
|
}
|
|
250
340
|
}
|
|
251
|
-
/** Delete expired
|
|
341
|
+
/** Delete expired state entries. */
|
|
252
342
|
function cleanExpiredCursors(db, logger) {
|
|
253
343
|
const result = db
|
|
254
|
-
.prepare(`DELETE FROM
|
|
344
|
+
.prepare(`DELETE FROM state WHERE expires_at IS NOT NULL AND expires_at < datetime('now')`)
|
|
255
345
|
.run();
|
|
256
346
|
if (result.changes > 0) {
|
|
257
|
-
logger.info({ deleted: result.changes }, 'Cleaned expired
|
|
347
|
+
logger.info({ deleted: result.changes }, 'Cleaned expired state entries');
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
/** Prune old queue items based on per-queue retention settings. */
|
|
351
|
+
function pruneOldQueueItems(db, logger) {
|
|
352
|
+
const result = db
|
|
353
|
+
.prepare(`DELETE FROM queue_items
|
|
354
|
+
WHERE status IN ('done', 'failed')
|
|
355
|
+
AND finished_at < datetime('now', '-' ||
|
|
356
|
+
COALESCE(
|
|
357
|
+
(SELECT retention_days FROM queues WHERE queues.id = queue_items.queue_id),
|
|
358
|
+
7
|
|
359
|
+
) || ' days')`)
|
|
360
|
+
.run();
|
|
361
|
+
if (result.changes > 0) {
|
|
362
|
+
logger.info({ deleted: result.changes }, 'Pruned old queue items');
|
|
258
363
|
}
|
|
259
364
|
}
|
|
260
365
|
/**
|
|
@@ -265,6 +370,7 @@ function createMaintenance(db, config, logger) {
|
|
|
265
370
|
function runAll() {
|
|
266
371
|
pruneOldRuns(db, config.runRetentionDays, logger);
|
|
267
372
|
cleanExpiredCursors(db, logger);
|
|
373
|
+
pruneOldQueueItems(db, logger);
|
|
268
374
|
}
|
|
269
375
|
return {
|
|
270
376
|
start() {
|
|
@@ -352,9 +458,76 @@ CREATE TABLE IF NOT EXISTS queues (
|
|
|
352
458
|
|
|
353
459
|
CREATE INDEX IF NOT EXISTS idx_queues_poll ON queues(queue, status, priority DESC, created_at);
|
|
354
460
|
`;
|
|
461
|
+
/** Migration 002: Rename queues → queue_items, create queues definition table, add dedup support. */
|
|
462
|
+
const MIGRATION_002 = `
|
|
463
|
+
-- Drop old index first (references 'queue' column)
|
|
464
|
+
DROP INDEX IF EXISTS idx_queues_poll;
|
|
465
|
+
|
|
466
|
+
-- Rename existing queues table to queue_items
|
|
467
|
+
ALTER TABLE queues RENAME TO queue_items;
|
|
468
|
+
|
|
469
|
+
-- Create new queues definition table
|
|
470
|
+
CREATE TABLE queues (
|
|
471
|
+
id TEXT PRIMARY KEY,
|
|
472
|
+
name TEXT NOT NULL,
|
|
473
|
+
description TEXT,
|
|
474
|
+
dedup_expr TEXT,
|
|
475
|
+
dedup_scope TEXT DEFAULT 'pending',
|
|
476
|
+
max_attempts INTEGER DEFAULT 1,
|
|
477
|
+
retention_days INTEGER DEFAULT 7,
|
|
478
|
+
created_at TEXT DEFAULT (datetime('now'))
|
|
479
|
+
);
|
|
480
|
+
|
|
481
|
+
-- Add new columns to queue_items
|
|
482
|
+
ALTER TABLE queue_items ADD COLUMN queue_id TEXT;
|
|
483
|
+
ALTER TABLE queue_items ADD COLUMN dedup_key TEXT;
|
|
484
|
+
|
|
485
|
+
-- Migrate existing queue column to queue_id
|
|
486
|
+
UPDATE queue_items SET queue_id = queue;
|
|
487
|
+
|
|
488
|
+
-- Drop old queue column
|
|
489
|
+
ALTER TABLE queue_items DROP COLUMN queue;
|
|
490
|
+
|
|
491
|
+
-- Create dedup lookup index
|
|
492
|
+
CREATE INDEX idx_queue_items_dedup ON queue_items(queue_id, dedup_key, status);
|
|
493
|
+
|
|
494
|
+
-- Create new poll index
|
|
495
|
+
CREATE INDEX idx_queue_items_poll ON queue_items(queue_id, status, priority DESC, created_at);
|
|
496
|
+
|
|
497
|
+
-- Seed queue definitions
|
|
498
|
+
INSERT INTO queues (id, name, description, dedup_expr, dedup_scope, max_attempts, retention_days) VALUES
|
|
499
|
+
('email-updates', 'Email Update Queue', NULL, NULL, NULL, 1, 7),
|
|
500
|
+
('email-pending', 'Email Pending', NULL, '$.threadId', 'pending', 1, 7),
|
|
501
|
+
('x-posts', 'X Post Queue', NULL, '$.id', 'pending', 1, 7),
|
|
502
|
+
('gh-collabs', 'GH Collab Queue', NULL, '$.full_name', 'pending', 1, 7);
|
|
503
|
+
`;
|
|
504
|
+
/** Migration 003: Rename cursors → state, add state_items table for collection state. */
|
|
505
|
+
const MIGRATION_003 = `
|
|
506
|
+
-- Rename cursors → state
|
|
507
|
+
ALTER TABLE cursors RENAME TO state;
|
|
508
|
+
|
|
509
|
+
-- Rename index
|
|
510
|
+
DROP INDEX IF EXISTS idx_cursors_expires;
|
|
511
|
+
CREATE INDEX idx_state_expires ON state(expires_at) WHERE expires_at IS NOT NULL;
|
|
512
|
+
|
|
513
|
+
-- Create state_items table
|
|
514
|
+
CREATE TABLE state_items (
|
|
515
|
+
namespace TEXT NOT NULL,
|
|
516
|
+
key TEXT NOT NULL,
|
|
517
|
+
item_key TEXT NOT NULL,
|
|
518
|
+
value TEXT,
|
|
519
|
+
created_at TEXT DEFAULT (datetime('now')),
|
|
520
|
+
updated_at TEXT DEFAULT (datetime('now')),
|
|
521
|
+
PRIMARY KEY (namespace, key, item_key),
|
|
522
|
+
FOREIGN KEY (namespace, key) REFERENCES state(namespace, key)
|
|
523
|
+
);
|
|
524
|
+
CREATE INDEX idx_state_items_ns_key ON state_items(namespace, key);
|
|
525
|
+
`;
|
|
355
526
|
/** Registry of all migrations keyed by version number. */
|
|
356
527
|
const MIGRATIONS = {
|
|
357
528
|
1: MIGRATION_001,
|
|
529
|
+
2: MIGRATION_002,
|
|
530
|
+
3: MIGRATION_003,
|
|
358
531
|
};
|
|
359
532
|
/**
|
|
360
533
|
* Run all pending migrations. Creates schema_version table if needed, applies migrations in order.
|
|
@@ -384,38 +557,128 @@ function runMigrations(db) {
|
|
|
384
557
|
}
|
|
385
558
|
|
|
386
559
|
/**
|
|
387
|
-
*
|
|
560
|
+
* Shared HTTP utility for making POST requests.
|
|
388
561
|
*/
|
|
389
|
-
/**
|
|
390
|
-
function
|
|
562
|
+
/** Make an HTTP/HTTPS POST request. Returns the parsed JSON response body. */
|
|
563
|
+
function httpPost(url, headers, body, timeoutMs = 30000) {
|
|
391
564
|
return new Promise((resolve, reject) => {
|
|
392
|
-
const
|
|
393
|
-
const
|
|
565
|
+
const parsedUrl = new URL(url);
|
|
566
|
+
const isHttps = parsedUrl.protocol === 'https:';
|
|
567
|
+
const requestFn = isHttps ? request : request$1;
|
|
568
|
+
const req = requestFn({
|
|
569
|
+
hostname: parsedUrl.hostname,
|
|
570
|
+
port: parsedUrl.port,
|
|
571
|
+
path: parsedUrl.pathname + parsedUrl.search,
|
|
394
572
|
method: 'POST',
|
|
395
573
|
headers: {
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
'Content-Length': Buffer.byteLength(payload),
|
|
574
|
+
...headers,
|
|
575
|
+
'Content-Length': Buffer.byteLength(body),
|
|
399
576
|
},
|
|
577
|
+
timeout: timeoutMs,
|
|
400
578
|
}, (res) => {
|
|
401
|
-
let
|
|
579
|
+
let responseBody = '';
|
|
402
580
|
res.on('data', (chunk) => {
|
|
403
|
-
|
|
581
|
+
responseBody += chunk.toString();
|
|
404
582
|
});
|
|
405
583
|
res.on('end', () => {
|
|
406
|
-
if (res.statusCode
|
|
407
|
-
|
|
584
|
+
if (res.statusCode !== 200) {
|
|
585
|
+
reject(new Error(`HTTP ${String(res.statusCode)}: ${responseBody}`));
|
|
586
|
+
return;
|
|
408
587
|
}
|
|
409
|
-
|
|
410
|
-
|
|
588
|
+
try {
|
|
589
|
+
resolve(JSON.parse(responseBody));
|
|
590
|
+
}
|
|
591
|
+
catch {
|
|
592
|
+
reject(new Error(`Failed to parse JSON response: ${responseBody}`));
|
|
411
593
|
}
|
|
412
594
|
});
|
|
413
595
|
});
|
|
414
596
|
req.on('error', reject);
|
|
415
|
-
req.
|
|
597
|
+
req.on('timeout', () => {
|
|
598
|
+
req.destroy();
|
|
599
|
+
reject(new Error('Request timed out'));
|
|
600
|
+
});
|
|
601
|
+
req.write(body);
|
|
416
602
|
req.end();
|
|
417
603
|
});
|
|
418
604
|
}
|
|
605
|
+
|
|
606
|
+
/**
|
|
607
|
+
* OpenClaw Gateway HTTP client for spawning and monitoring sessions.
|
|
608
|
+
*/
|
|
609
|
+
/** Make an HTTP POST request to the Gateway /tools/invoke endpoint. */
|
|
610
|
+
function invokeGateway(url, token, tool, args, timeoutMs = 30000) {
|
|
611
|
+
const payload = JSON.stringify({ tool, args });
|
|
612
|
+
return httpPost(`${url}/tools/invoke`, {
|
|
613
|
+
'Content-Type': 'application/json',
|
|
614
|
+
Authorization: `Bearer ${token}`,
|
|
615
|
+
}, payload, timeoutMs);
|
|
616
|
+
}
|
|
617
|
+
/** Create a Gateway client. */
|
|
618
|
+
function createGatewayClient(options) {
|
|
619
|
+
const { url, token, timeoutMs = 30000 } = options;
|
|
620
|
+
return {
|
|
621
|
+
async spawnSession(task, opts) {
|
|
622
|
+
const response = (await invokeGateway(url, token, 'sessions_spawn', {
|
|
623
|
+
task,
|
|
624
|
+
label: opts?.label,
|
|
625
|
+
thinking: opts?.thinking,
|
|
626
|
+
runTimeoutSeconds: opts?.runTimeoutSeconds,
|
|
627
|
+
}, timeoutMs));
|
|
628
|
+
if (!response.ok) {
|
|
629
|
+
throw new Error('Failed to spawn session');
|
|
630
|
+
}
|
|
631
|
+
return {
|
|
632
|
+
sessionKey: response.result.details.childSessionKey,
|
|
633
|
+
runId: response.result.details.runId,
|
|
634
|
+
};
|
|
635
|
+
},
|
|
636
|
+
async getSessionHistory(sessionKey, limit = 3) {
|
|
637
|
+
const response = (await invokeGateway(url, token, 'sessions_history', { sessionKey, limit, includeTools: false }, timeoutMs));
|
|
638
|
+
if (!response.ok) {
|
|
639
|
+
throw new Error('Failed to get session history');
|
|
640
|
+
}
|
|
641
|
+
return response.result;
|
|
642
|
+
},
|
|
643
|
+
async getSessionInfo(sessionKey) {
|
|
644
|
+
// Note: sessions_list doesn't support filtering by key, so we fetch recent sessions
|
|
645
|
+
// and search client-side. Consider using sessions_history with limit 1 as alternative,
|
|
646
|
+
// or request a sessions_get tool from Gateway for more efficient single-session lookup.
|
|
647
|
+
const response = (await invokeGateway(url, token, 'sessions_list', { activeMinutes: 120, limit: 500 }, // Increased from 100 to reduce false negatives
|
|
648
|
+
timeoutMs));
|
|
649
|
+
if (!response.ok) {
|
|
650
|
+
throw new Error('Failed to list sessions');
|
|
651
|
+
}
|
|
652
|
+
const session = response.result.find((s) => s.sessionKey === sessionKey);
|
|
653
|
+
if (!session)
|
|
654
|
+
return null;
|
|
655
|
+
return {
|
|
656
|
+
totalTokens: session.totalTokens,
|
|
657
|
+
model: session.model,
|
|
658
|
+
transcriptPath: session.transcriptPath,
|
|
659
|
+
};
|
|
660
|
+
},
|
|
661
|
+
async isSessionComplete(sessionKey) {
|
|
662
|
+
const history = await this.getSessionHistory(sessionKey, 3);
|
|
663
|
+
if (history.length === 0)
|
|
664
|
+
return false;
|
|
665
|
+
const lastMessage = history[history.length - 1];
|
|
666
|
+
return (lastMessage.role === 'assistant' && lastMessage.stopReason !== undefined);
|
|
667
|
+
},
|
|
668
|
+
};
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
/**
|
|
672
|
+
* Slack notification module. Sends job completion/failure messages via Slack Web API (chat.postMessage). Falls back gracefully if no token.
|
|
673
|
+
*/
|
|
674
|
+
/** Post a message to Slack via chat.postMessage API. */
|
|
675
|
+
async function postToSlack(token, channel, text) {
|
|
676
|
+
const payload = JSON.stringify({ channel, text });
|
|
677
|
+
await httpPost('https://slack.com/api/chat.postMessage', {
|
|
678
|
+
'Content-Type': 'application/json',
|
|
679
|
+
Authorization: `Bearer ${token}`,
|
|
680
|
+
}, payload);
|
|
681
|
+
}
|
|
419
682
|
/**
|
|
420
683
|
* Create a notifier that sends Slack messages for job events. If no token, logs warning and returns silently.
|
|
421
684
|
*/
|
|
@@ -507,12 +770,14 @@ function resolveCommand(script) {
|
|
|
507
770
|
* Execute a job script as a child process. Captures output, parses metadata, enforces timeout.
|
|
508
771
|
*/
|
|
509
772
|
function executeJob(options) {
|
|
510
|
-
const { script, dbPath, jobId, runId, timeoutMs } = options;
|
|
773
|
+
const { script, dbPath, jobId, runId, timeoutMs, commandResolver } = options;
|
|
511
774
|
const startTime = Date.now();
|
|
512
775
|
return new Promise((resolve) => {
|
|
513
776
|
const stdoutBuffer = new RingBuffer(100);
|
|
514
777
|
const stderrBuffer = new RingBuffer(100);
|
|
515
|
-
const { command, args } =
|
|
778
|
+
const { command, args } = commandResolver
|
|
779
|
+
? commandResolver(script)
|
|
780
|
+
: resolveCommand(script);
|
|
516
781
|
const child = spawn(command, args, {
|
|
517
782
|
env: {
|
|
518
783
|
...process.env,
|
|
@@ -607,66 +872,289 @@ function executeJob(options) {
|
|
|
607
872
|
});
|
|
608
873
|
}
|
|
609
874
|
|
|
875
|
+
/**
|
|
876
|
+
* Cron registration and reconciliation utilities.
|
|
877
|
+
*/
|
|
878
|
+
function createCronRegistry(deps) {
|
|
879
|
+
const { db, logger, onScheduledRun } = deps;
|
|
880
|
+
const crons = new Map();
|
|
881
|
+
const cronSchedules = new Map();
|
|
882
|
+
const failedRegistrations = new Set();
|
|
883
|
+
function registerCron(job) {
|
|
884
|
+
try {
|
|
885
|
+
const jobId = job.id;
|
|
886
|
+
const cron = new Cron(job.schedule, () => {
|
|
887
|
+
// Re-read job from DB to get current configuration
|
|
888
|
+
const currentJob = db
|
|
889
|
+
.prepare('SELECT * FROM jobs WHERE id = ? AND enabled = 1')
|
|
890
|
+
.get(jobId);
|
|
891
|
+
if (!currentJob) {
|
|
892
|
+
logger.warn({ jobId }, 'Job no longer exists or disabled, skipping');
|
|
893
|
+
return;
|
|
894
|
+
}
|
|
895
|
+
onScheduledRun(currentJob);
|
|
896
|
+
});
|
|
897
|
+
crons.set(job.id, cron);
|
|
898
|
+
cronSchedules.set(job.id, job.schedule);
|
|
899
|
+
failedRegistrations.delete(job.id);
|
|
900
|
+
logger.info({ jobId: job.id, schedule: job.schedule }, 'Scheduled job');
|
|
901
|
+
return true;
|
|
902
|
+
}
|
|
903
|
+
catch (err) {
|
|
904
|
+
logger.error({ jobId: job.id, err }, 'Failed to schedule job');
|
|
905
|
+
failedRegistrations.add(job.id);
|
|
906
|
+
return false;
|
|
907
|
+
}
|
|
908
|
+
}
|
|
909
|
+
function reconcile() {
|
|
910
|
+
const enabledJobs = db
|
|
911
|
+
.prepare('SELECT * FROM jobs WHERE enabled = 1')
|
|
912
|
+
.all();
|
|
913
|
+
const enabledById = new Map(enabledJobs.map((j) => [j.id, j]));
|
|
914
|
+
// Remove disabled/deleted jobs
|
|
915
|
+
for (const [jobId, cron] of crons.entries()) {
|
|
916
|
+
if (!enabledById.has(jobId)) {
|
|
917
|
+
cron.stop();
|
|
918
|
+
crons.delete(jobId);
|
|
919
|
+
cronSchedules.delete(jobId);
|
|
920
|
+
}
|
|
921
|
+
}
|
|
922
|
+
const failedIds = [];
|
|
923
|
+
// Add or update enabled jobs
|
|
924
|
+
for (const job of enabledJobs) {
|
|
925
|
+
const existingCron = crons.get(job.id);
|
|
926
|
+
const existingSchedule = cronSchedules.get(job.id);
|
|
927
|
+
if (!existingCron) {
|
|
928
|
+
if (!registerCron(job))
|
|
929
|
+
failedIds.push(job.id);
|
|
930
|
+
continue;
|
|
931
|
+
}
|
|
932
|
+
if (existingSchedule !== job.schedule) {
|
|
933
|
+
existingCron.stop();
|
|
934
|
+
crons.delete(job.id);
|
|
935
|
+
cronSchedules.delete(job.id);
|
|
936
|
+
if (!registerCron(job))
|
|
937
|
+
failedIds.push(job.id);
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
return { totalEnabled: enabledJobs.length, failedIds };
|
|
941
|
+
}
|
|
942
|
+
function stopAll() {
|
|
943
|
+
for (const cron of crons.values()) {
|
|
944
|
+
cron.stop();
|
|
945
|
+
}
|
|
946
|
+
crons.clear();
|
|
947
|
+
cronSchedules.clear();
|
|
948
|
+
}
|
|
949
|
+
return {
|
|
950
|
+
reconcile,
|
|
951
|
+
stopAll,
|
|
952
|
+
getFailedRegistrations() {
|
|
953
|
+
return Array.from(failedRegistrations);
|
|
954
|
+
},
|
|
955
|
+
};
|
|
956
|
+
}
|
|
957
|
+
|
|
958
|
+
/**
|
|
959
|
+
* Notification dispatch helper for job completion events.
|
|
960
|
+
*/
|
|
961
|
+
/** Dispatch notification based on execution result and job configuration. */
|
|
962
|
+
async function dispatchNotification(result, jobName, onSuccess, onFailure, notifier, logger) {
|
|
963
|
+
if (result.status === 'ok' && onSuccess) {
|
|
964
|
+
await notifier
|
|
965
|
+
.notifySuccess(jobName, result.durationMs, onSuccess)
|
|
966
|
+
.catch((err) => {
|
|
967
|
+
logger.error({ jobName, err }, 'Success notification failed');
|
|
968
|
+
});
|
|
969
|
+
}
|
|
970
|
+
else if (result.status !== 'ok' && onFailure) {
|
|
971
|
+
await notifier
|
|
972
|
+
.notifyFailure(jobName, result.durationMs, result.error, onFailure)
|
|
973
|
+
.catch((err) => {
|
|
974
|
+
logger.error({ jobName, err }, 'Failure notification failed');
|
|
975
|
+
});
|
|
976
|
+
}
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
/**
|
|
980
|
+
* Run record repository for managing job execution records.
|
|
981
|
+
*/
|
|
982
|
+
/** Create a run repository for the given database connection. */
|
|
983
|
+
function createRunRepository(db) {
|
|
984
|
+
return {
|
|
985
|
+
createRun(jobId, trigger) {
|
|
986
|
+
const result = db
|
|
987
|
+
.prepare(`INSERT INTO runs (job_id, status, started_at, trigger)
|
|
988
|
+
VALUES (?, 'running', datetime('now'), ?)`)
|
|
989
|
+
.run(jobId, trigger);
|
|
990
|
+
return result.lastInsertRowid;
|
|
991
|
+
},
|
|
992
|
+
finishRun(runId, execResult) {
|
|
993
|
+
db.prepare(`UPDATE runs SET status = ?, finished_at = datetime('now'), duration_ms = ?,
|
|
994
|
+
exit_code = ?, tokens = ?, result_meta = ?, error = ?, stdout_tail = ?, stderr_tail = ?
|
|
995
|
+
WHERE id = ?`).run(execResult.status, execResult.durationMs, execResult.exitCode, execResult.tokens, execResult.resultMeta, execResult.error, execResult.stdoutTail, execResult.stderrTail, runId);
|
|
996
|
+
},
|
|
997
|
+
};
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
/**
|
|
1001
|
+
* Session executor for job type='session'. Spawns OpenClaw Gateway sessions and polls for completion.
|
|
1002
|
+
*/
|
|
1003
|
+
/** File extensions that indicate a script rather than a prompt. */
|
|
1004
|
+
const SCRIPT_EXTENSIONS = ['.js', '.mjs', '.cjs', '.ps1', '.cmd', '.bat'];
|
|
1005
|
+
/** Resolve task prompt from script field: read file if .md/.txt, return raw text otherwise. */
|
|
1006
|
+
function resolveTaskPrompt(script) {
|
|
1007
|
+
const ext = extname(script).toLowerCase();
|
|
1008
|
+
// If script extension, caller should fall back to script executor
|
|
1009
|
+
if (SCRIPT_EXTENSIONS.includes(ext)) {
|
|
1010
|
+
return null;
|
|
1011
|
+
}
|
|
1012
|
+
// If .md or .txt, read file contents
|
|
1013
|
+
if (ext === '.md' || ext === '.txt') {
|
|
1014
|
+
if (!existsSync(script)) {
|
|
1015
|
+
throw new Error(`Prompt file not found: ${script}`);
|
|
1016
|
+
}
|
|
1017
|
+
return readFileSync(script, 'utf-8');
|
|
1018
|
+
}
|
|
1019
|
+
// Otherwise, treat script as raw prompt text
|
|
1020
|
+
return script;
|
|
1021
|
+
}
|
|
1022
|
+
/** Poll for session completion with exponential backoff (capped). */
|
|
1023
|
+
async function pollCompletion(gatewayClient, sessionKey, timeoutMs, initialIntervalMs = 5000) {
|
|
1024
|
+
const startTime = Date.now();
|
|
1025
|
+
let interval = initialIntervalMs;
|
|
1026
|
+
const maxInterval = 15000;
|
|
1027
|
+
while (Date.now() - startTime < timeoutMs) {
|
|
1028
|
+
const isComplete = await gatewayClient.isSessionComplete(sessionKey);
|
|
1029
|
+
if (isComplete)
|
|
1030
|
+
return;
|
|
1031
|
+
await new Promise((resolve) => setTimeout(resolve, interval));
|
|
1032
|
+
interval = Math.min(interval * 1.2, maxInterval); // Exponential backoff capped
|
|
1033
|
+
}
|
|
1034
|
+
throw new Error(`Session timed out after ${String(timeoutMs)}ms`);
|
|
1035
|
+
}
|
|
1036
|
+
/**
|
|
1037
|
+
* Execute a session job: spawn a Gateway session, poll for completion, fetch token usage.
|
|
1038
|
+
*/
|
|
1039
|
+
async function executeSession(options) {
|
|
1040
|
+
const { script, jobId, timeoutMs = 300000, gatewayClient, pollIntervalMs, } = options;
|
|
1041
|
+
const startTime = Date.now();
|
|
1042
|
+
try {
|
|
1043
|
+
// Resolve task prompt
|
|
1044
|
+
const taskPrompt = resolveTaskPrompt(script);
|
|
1045
|
+
if (taskPrompt === null) {
|
|
1046
|
+
throw new Error('Session job script has script extension; expected prompt text or .md/.txt file');
|
|
1047
|
+
}
|
|
1048
|
+
// Spawn session
|
|
1049
|
+
const { sessionKey } = await gatewayClient.spawnSession(taskPrompt, {
|
|
1050
|
+
label: jobId,
|
|
1051
|
+
thinking: 'low',
|
|
1052
|
+
runTimeoutSeconds: Math.floor(timeoutMs / 1000),
|
|
1053
|
+
});
|
|
1054
|
+
// Poll for completion
|
|
1055
|
+
await pollCompletion(gatewayClient, sessionKey, timeoutMs, pollIntervalMs);
|
|
1056
|
+
// Fetch session info for token count
|
|
1057
|
+
const sessionInfo = await gatewayClient.getSessionInfo(sessionKey);
|
|
1058
|
+
const tokens = sessionInfo?.totalTokens ?? null;
|
|
1059
|
+
const durationMs = Date.now() - startTime;
|
|
1060
|
+
return {
|
|
1061
|
+
status: 'ok',
|
|
1062
|
+
exitCode: null,
|
|
1063
|
+
durationMs,
|
|
1064
|
+
tokens,
|
|
1065
|
+
resultMeta: sessionKey,
|
|
1066
|
+
stdoutTail: `Session completed: ${sessionKey}`,
|
|
1067
|
+
stderrTail: '',
|
|
1068
|
+
error: null,
|
|
1069
|
+
};
|
|
1070
|
+
}
|
|
1071
|
+
catch (err) {
|
|
1072
|
+
const durationMs = Date.now() - startTime;
|
|
1073
|
+
const errorMessage = err instanceof Error ? err.message : 'Unknown session error';
|
|
1074
|
+
// Check if timeout
|
|
1075
|
+
if (errorMessage.includes('timed out')) {
|
|
1076
|
+
return {
|
|
1077
|
+
status: 'timeout',
|
|
1078
|
+
exitCode: null,
|
|
1079
|
+
durationMs,
|
|
1080
|
+
tokens: null,
|
|
1081
|
+
resultMeta: null,
|
|
1082
|
+
stdoutTail: '',
|
|
1083
|
+
stderrTail: errorMessage,
|
|
1084
|
+
error: errorMessage,
|
|
1085
|
+
};
|
|
1086
|
+
}
|
|
1087
|
+
return {
|
|
1088
|
+
status: 'error',
|
|
1089
|
+
exitCode: null,
|
|
1090
|
+
durationMs,
|
|
1091
|
+
tokens: null,
|
|
1092
|
+
resultMeta: null,
|
|
1093
|
+
stdoutTail: '',
|
|
1094
|
+
stderrTail: errorMessage,
|
|
1095
|
+
error: errorMessage,
|
|
1096
|
+
};
|
|
1097
|
+
}
|
|
1098
|
+
}
|
|
1099
|
+
|
|
610
1100
|
/**
|
|
611
1101
|
* Croner-based job scheduler. Loads enabled jobs, creates cron instances, manages execution, respects overlap policies and concurrency limits.
|
|
612
1102
|
*/
|
|
1103
|
+
// JobRow is imported from cron-registry
|
|
613
1104
|
/**
|
|
614
1105
|
* Create the job scheduler. Manages cron schedules, job execution, overlap policies, and notifications.
|
|
615
1106
|
*/
|
|
616
1107
|
function createScheduler(deps) {
|
|
617
|
-
const { db, executor, notifier, config, logger } = deps;
|
|
618
|
-
const crons = new Map();
|
|
1108
|
+
const { db, executor, notifier, config, logger, gatewayClient } = deps;
|
|
619
1109
|
const runningJobs = new Set();
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
function finishRun(runId, execResult) {
|
|
630
|
-
db.prepare(`UPDATE runs SET status = ?, finished_at = datetime('now'), duration_ms = ?,
|
|
631
|
-
exit_code = ?, tokens = ?, result_meta = ?, error = ?, stdout_tail = ?, stderr_tail = ?
|
|
632
|
-
WHERE id = ?`).run(execResult.status, execResult.durationMs, execResult.exitCode, execResult.tokens, execResult.resultMeta, execResult.error, execResult.stdoutTail, execResult.stderrTail, runId);
|
|
633
|
-
}
|
|
1110
|
+
const cronRegistry = createCronRegistry({
|
|
1111
|
+
db,
|
|
1112
|
+
logger,
|
|
1113
|
+
onScheduledRun: (job) => {
|
|
1114
|
+
void onScheduledRun(job);
|
|
1115
|
+
},
|
|
1116
|
+
});
|
|
1117
|
+
const runRepository = createRunRepository(db);
|
|
1118
|
+
let reconcileInterval = null;
|
|
634
1119
|
/** Execute a job: create run record, run script, update record, send notifications. */
|
|
635
1120
|
async function runJob(job, trigger) {
|
|
636
|
-
const { id, name, script, timeout_ms, on_success, on_failure } = job;
|
|
1121
|
+
const { id, name, script, type, timeout_ms, on_success, on_failure } = job;
|
|
637
1122
|
// Check concurrency limit
|
|
638
1123
|
if (runningJobs.size >= config.maxConcurrency) {
|
|
639
1124
|
logger.warn({ jobId: id }, 'Max concurrency reached, skipping job');
|
|
640
1125
|
throw new Error('Max concurrency reached');
|
|
641
1126
|
}
|
|
642
1127
|
runningJobs.add(id);
|
|
643
|
-
const runId = createRun(id, trigger);
|
|
644
|
-
logger.info({ jobId: id, runId, trigger }, 'Starting job');
|
|
1128
|
+
const runId = runRepository.createRun(id, trigger);
|
|
1129
|
+
logger.info({ jobId: id, runId, trigger, type }, 'Starting job');
|
|
645
1130
|
try {
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
await notifier
|
|
658
|
-
.notifySuccess(name, result.durationMs, on_success)
|
|
659
|
-
.catch((err) => {
|
|
660
|
-
logger.error({ jobId: id, err }, 'Notification failed');
|
|
1131
|
+
let result;
|
|
1132
|
+
// Route based on job type
|
|
1133
|
+
if (type === 'session') {
|
|
1134
|
+
if (!gatewayClient) {
|
|
1135
|
+
throw new Error('Session job requires Gateway client (gateway.tokenPath not configured)');
|
|
1136
|
+
}
|
|
1137
|
+
result = await executeSession({
|
|
1138
|
+
script,
|
|
1139
|
+
jobId: id,
|
|
1140
|
+
timeoutMs: timeout_ms ?? undefined,
|
|
1141
|
+
gatewayClient,
|
|
661
1142
|
});
|
|
662
1143
|
}
|
|
663
|
-
else
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
1144
|
+
else {
|
|
1145
|
+
// Default to script executor
|
|
1146
|
+
result = await executor({
|
|
1147
|
+
script,
|
|
1148
|
+
dbPath: config.dbPath,
|
|
1149
|
+
jobId: id,
|
|
1150
|
+
runId,
|
|
1151
|
+
timeoutMs: timeout_ms ?? undefined,
|
|
668
1152
|
});
|
|
669
1153
|
}
|
|
1154
|
+
runRepository.finishRun(runId, result);
|
|
1155
|
+
logger.info({ jobId: id, runId, status: result.status }, 'Job finished');
|
|
1156
|
+
// Send notifications
|
|
1157
|
+
await dispatchNotification(result, name, on_success, on_failure, notifier, logger);
|
|
670
1158
|
return result;
|
|
671
1159
|
}
|
|
672
1160
|
finally {
|
|
@@ -682,63 +1170,53 @@ function createScheduler(deps) {
|
|
|
682
1170
|
logger.info({ jobId: id }, 'Job already running, skipping (overlap_policy=skip)');
|
|
683
1171
|
return;
|
|
684
1172
|
}
|
|
685
|
-
else if (overlap_policy === 'queue') {
|
|
686
|
-
logger.info({ jobId: id }, 'Job already running, queueing (overlap_policy=queue)');
|
|
687
|
-
// In a real implementation, we'd queue this. For now, just skip.
|
|
688
|
-
return;
|
|
689
|
-
}
|
|
690
1173
|
// 'allow' policy: proceed
|
|
691
1174
|
}
|
|
692
1175
|
await runJob(job, 'schedule').catch((err) => {
|
|
693
1176
|
logger.error({ jobId: id, err }, 'Job execution failed');
|
|
694
1177
|
});
|
|
695
1178
|
}
|
|
1179
|
+
// Cron registration and reconciliation are handled by cronRegistry.
|
|
696
1180
|
return {
|
|
697
1181
|
start() {
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
.
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
// Re-read job from DB to get current configuration
|
|
708
|
-
const currentJob = db
|
|
709
|
-
.prepare('SELECT * FROM jobs WHERE id = ? AND enabled = 1')
|
|
710
|
-
.get(jobId);
|
|
711
|
-
if (!currentJob) {
|
|
712
|
-
logger.warn({ jobId }, 'Job no longer exists or disabled, skipping');
|
|
713
|
-
return;
|
|
714
|
-
}
|
|
715
|
-
void onScheduledRun(currentJob);
|
|
716
|
-
});
|
|
717
|
-
crons.set(job.id, cron);
|
|
718
|
-
logger.info({ jobId: job.id, schedule: job.schedule }, 'Scheduled job');
|
|
719
|
-
}
|
|
720
|
-
catch (err) {
|
|
721
|
-
logger.error({ jobId: job.id, err }, 'Failed to schedule job');
|
|
1182
|
+
const { totalEnabled, failedIds } = cronRegistry.reconcile();
|
|
1183
|
+
logger.info({ count: totalEnabled }, 'Loading jobs');
|
|
1184
|
+
if (failedIds.length > 0) {
|
|
1185
|
+
const ok = totalEnabled - failedIds.length;
|
|
1186
|
+
logger.warn({ failed: failedIds.length, total: totalEnabled }, `${String(failedIds.length)} of ${String(totalEnabled)} jobs failed to register`);
|
|
1187
|
+
const message = `⚠️ jeeves-runner started: ${String(ok)}/${String(totalEnabled)} jobs scheduled, ${String(failedIds.length)} failed: ${failedIds.join(', ')}`;
|
|
1188
|
+
const channel = config.notifications.defaultOnFailure;
|
|
1189
|
+
if (channel) {
|
|
1190
|
+
void notifier.notifyFailure('jeeves-runner', 0, message, channel);
|
|
722
1191
|
}
|
|
723
1192
|
}
|
|
1193
|
+
if (reconcileInterval === null && config.reconcileIntervalMs > 0) {
|
|
1194
|
+
reconcileInterval = setInterval(() => {
|
|
1195
|
+
try {
|
|
1196
|
+
cronRegistry.reconcile();
|
|
1197
|
+
}
|
|
1198
|
+
catch (err) {
|
|
1199
|
+
logger.error({ err }, 'Reconciliation failed');
|
|
1200
|
+
}
|
|
1201
|
+
}, config.reconcileIntervalMs);
|
|
1202
|
+
}
|
|
724
1203
|
},
|
|
725
|
-
stop() {
|
|
1204
|
+
async stop() {
|
|
726
1205
|
logger.info('Stopping scheduler');
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
1206
|
+
if (reconcileInterval) {
|
|
1207
|
+
clearInterval(reconcileInterval);
|
|
1208
|
+
reconcileInterval = null;
|
|
730
1209
|
}
|
|
731
|
-
crons
|
|
732
|
-
|
|
1210
|
+
// Stop all crons
|
|
1211
|
+
cronRegistry.stopAll();
|
|
1212
|
+
// Wait for running jobs (with timeout)
|
|
733
1213
|
const deadline = Date.now() + config.shutdownGraceMs;
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
}
|
|
741
|
-
}, 100);
|
|
1214
|
+
while (runningJobs.size > 0 && Date.now() < deadline) {
|
|
1215
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
1216
|
+
}
|
|
1217
|
+
if (runningJobs.size > 0) {
|
|
1218
|
+
logger.warn({ count: runningJobs.size }, 'Forced shutdown with running jobs');
|
|
1219
|
+
}
|
|
742
1220
|
},
|
|
743
1221
|
async triggerJob(jobId) {
|
|
744
1222
|
const job = db.prepare('SELECT * FROM jobs WHERE id = ?').get(jobId);
|
|
@@ -746,9 +1224,15 @@ function createScheduler(deps) {
|
|
|
746
1224
|
throw new Error(`Job not found: ${jobId}`);
|
|
747
1225
|
return runJob(job, 'manual');
|
|
748
1226
|
},
|
|
1227
|
+
reconcileNow() {
|
|
1228
|
+
cronRegistry.reconcile();
|
|
1229
|
+
},
|
|
749
1230
|
getRunningJobs() {
|
|
750
1231
|
return Array.from(runningJobs);
|
|
751
1232
|
},
|
|
1233
|
+
getFailedRegistrations() {
|
|
1234
|
+
return cronRegistry.getFailedRegistrations();
|
|
1235
|
+
},
|
|
752
1236
|
};
|
|
753
1237
|
}
|
|
754
1238
|
|
|
@@ -758,22 +1242,23 @@ function createScheduler(deps) {
|
|
|
758
1242
|
/**
|
|
759
1243
|
* Create the runner. Initializes database, scheduler, API server, and sets up graceful shutdown.
|
|
760
1244
|
*/
|
|
761
|
-
function createRunner(config) {
|
|
1245
|
+
function createRunner(config, deps) {
|
|
762
1246
|
let db = null;
|
|
763
1247
|
let scheduler = null;
|
|
764
1248
|
let server = null;
|
|
765
1249
|
let maintenance = null;
|
|
766
|
-
const logger =
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
1250
|
+
const logger = deps?.logger ??
|
|
1251
|
+
pino({
|
|
1252
|
+
level: config.log.level,
|
|
1253
|
+
...(config.log.file
|
|
1254
|
+
? {
|
|
1255
|
+
transport: {
|
|
1256
|
+
target: 'pino/file',
|
|
1257
|
+
options: { destination: config.log.file },
|
|
1258
|
+
},
|
|
1259
|
+
}
|
|
1260
|
+
: {}),
|
|
1261
|
+
});
|
|
777
1262
|
return {
|
|
778
1263
|
async start() {
|
|
779
1264
|
logger.info('Starting runner');
|
|
@@ -786,6 +1271,19 @@ function createRunner(config) {
|
|
|
786
1271
|
? readFileSync(config.notifications.slackTokenPath, 'utf-8').trim()
|
|
787
1272
|
: null;
|
|
788
1273
|
const notifier = createNotifier({ slackToken });
|
|
1274
|
+
// Gateway client (optional, for session-type jobs)
|
|
1275
|
+
const gatewayToken = config.gateway.tokenPath
|
|
1276
|
+
? readFileSync(config.gateway.tokenPath, 'utf-8').trim()
|
|
1277
|
+
: (process.env.OPENCLAW_GATEWAY_TOKEN ?? null);
|
|
1278
|
+
const gatewayClient = gatewayToken && config.gateway.url
|
|
1279
|
+
? createGatewayClient({
|
|
1280
|
+
url: config.gateway.url,
|
|
1281
|
+
token: gatewayToken,
|
|
1282
|
+
})
|
|
1283
|
+
: undefined;
|
|
1284
|
+
if (gatewayClient) {
|
|
1285
|
+
logger.info('Gateway client initialized');
|
|
1286
|
+
}
|
|
789
1287
|
// Maintenance (run retention pruning + cursor cleanup)
|
|
790
1288
|
maintenance = createMaintenance(db, {
|
|
791
1289
|
runRetentionDays: config.runRetentionDays,
|
|
@@ -800,11 +1298,16 @@ function createRunner(config) {
|
|
|
800
1298
|
notifier,
|
|
801
1299
|
config,
|
|
802
1300
|
logger,
|
|
1301
|
+
gatewayClient,
|
|
803
1302
|
});
|
|
804
1303
|
scheduler.start();
|
|
805
1304
|
logger.info('Scheduler started');
|
|
806
1305
|
// API server
|
|
807
|
-
server = createServer(
|
|
1306
|
+
server = createServer({
|
|
1307
|
+
db,
|
|
1308
|
+
scheduler,
|
|
1309
|
+
loggerConfig: { level: config.log.level, file: config.log.file },
|
|
1310
|
+
});
|
|
808
1311
|
await server.listen({ port: config.port, host: '127.0.0.1' });
|
|
809
1312
|
logger.info({ port: config.port }, 'API server listening');
|
|
810
1313
|
// Graceful shutdown
|
|
@@ -827,7 +1330,7 @@ function createRunner(config) {
|
|
|
827
1330
|
logger.info('Maintenance stopped');
|
|
828
1331
|
}
|
|
829
1332
|
if (scheduler) {
|
|
830
|
-
scheduler.stop();
|
|
1333
|
+
await scheduler.stop();
|
|
831
1334
|
logger.info('Scheduler stopped');
|
|
832
1335
|
}
|
|
833
1336
|
if (server) {
|
|
@@ -843,7 +1346,109 @@ function createRunner(config) {
|
|
|
843
1346
|
}
|
|
844
1347
|
|
|
845
1348
|
/**
|
|
846
|
-
*
|
|
1349
|
+
* Queue operations module for runner client. Provides enqueue, dequeue, done, and fail operations.
|
|
1350
|
+
*/
|
|
1351
|
+
/** Create queue operations for the given database connection. */
|
|
1352
|
+
function createQueueOps(db) {
|
|
1353
|
+
return {
|
|
1354
|
+
enqueue(queue, payload, options) {
|
|
1355
|
+
const priority = options?.priority ?? 0;
|
|
1356
|
+
const payloadJson = JSON.stringify(payload);
|
|
1357
|
+
// Look up queue definition
|
|
1358
|
+
const queueDef = db
|
|
1359
|
+
.prepare('SELECT dedup_expr, dedup_scope, max_attempts FROM queues WHERE id = ?')
|
|
1360
|
+
.get(queue);
|
|
1361
|
+
let dedupKey = null;
|
|
1362
|
+
const maxAttempts = options?.maxAttempts ?? queueDef?.max_attempts ?? 1;
|
|
1363
|
+
// Handle deduplication if queue definition exists and has dedup_expr
|
|
1364
|
+
if (queueDef?.dedup_expr) {
|
|
1365
|
+
try {
|
|
1366
|
+
const result = JSONPath({
|
|
1367
|
+
path: queueDef.dedup_expr,
|
|
1368
|
+
json: payload,
|
|
1369
|
+
});
|
|
1370
|
+
if (Array.isArray(result) && result.length > 0) {
|
|
1371
|
+
dedupKey = String(result[0]);
|
|
1372
|
+
}
|
|
1373
|
+
}
|
|
1374
|
+
catch {
|
|
1375
|
+
// If JSONPath evaluation fails, proceed without dedup
|
|
1376
|
+
dedupKey = null;
|
|
1377
|
+
}
|
|
1378
|
+
// Check for duplicates
|
|
1379
|
+
if (dedupKey) {
|
|
1380
|
+
const dedupScope = queueDef.dedup_scope || 'pending';
|
|
1381
|
+
const statusFilter = dedupScope === 'pending'
|
|
1382
|
+
? "status IN ('pending', 'processing')"
|
|
1383
|
+
: "status IN ('pending', 'processing', 'done')";
|
|
1384
|
+
const existing = db
|
|
1385
|
+
.prepare(`SELECT id FROM queue_items
|
|
1386
|
+
WHERE queue_id = ? AND dedup_key = ? AND ${statusFilter}
|
|
1387
|
+
LIMIT 1`)
|
|
1388
|
+
.get(queue, dedupKey);
|
|
1389
|
+
if (existing) {
|
|
1390
|
+
return -1; // Duplicate found, skip enqueue
|
|
1391
|
+
}
|
|
1392
|
+
}
|
|
1393
|
+
}
|
|
1394
|
+
// Insert the item
|
|
1395
|
+
const result = db
|
|
1396
|
+
.prepare('INSERT INTO queue_items (queue_id, payload, priority, max_attempts, dedup_key) VALUES (?, ?, ?, ?, ?)')
|
|
1397
|
+
.run(queue, payloadJson, priority, maxAttempts, dedupKey);
|
|
1398
|
+
return result.lastInsertRowid;
|
|
1399
|
+
},
|
|
1400
|
+
dequeue(queue, count = 1) {
|
|
1401
|
+
// Wrap SELECT + UPDATE in a transaction for atomicity
|
|
1402
|
+
db.exec('BEGIN');
|
|
1403
|
+
try {
|
|
1404
|
+
// First, SELECT the items to claim (with correct ordering)
|
|
1405
|
+
const rows = db
|
|
1406
|
+
.prepare(`SELECT id, payload FROM queue_items
|
|
1407
|
+
WHERE queue_id = ? AND status = 'pending'
|
|
1408
|
+
ORDER BY priority DESC, created_at
|
|
1409
|
+
LIMIT ?`)
|
|
1410
|
+
.all(queue, count);
|
|
1411
|
+
// Then UPDATE each one to claim it
|
|
1412
|
+
const updateStmt = db.prepare(`UPDATE queue_items
|
|
1413
|
+
SET status = 'processing', claimed_at = datetime('now'), attempts = attempts + 1
|
|
1414
|
+
WHERE id = ?`);
|
|
1415
|
+
for (const row of rows) {
|
|
1416
|
+
updateStmt.run(row.id);
|
|
1417
|
+
}
|
|
1418
|
+
db.exec('COMMIT');
|
|
1419
|
+
return rows.map((row) => ({
|
|
1420
|
+
id: row.id,
|
|
1421
|
+
payload: JSON.parse(row.payload),
|
|
1422
|
+
}));
|
|
1423
|
+
}
|
|
1424
|
+
catch (err) {
|
|
1425
|
+
db.exec('ROLLBACK');
|
|
1426
|
+
throw err;
|
|
1427
|
+
}
|
|
1428
|
+
},
|
|
1429
|
+
done(queueItemId) {
|
|
1430
|
+
db.prepare(`UPDATE queue_items SET status = 'done', finished_at = datetime('now') WHERE id = ?`).run(queueItemId);
|
|
1431
|
+
},
|
|
1432
|
+
fail(queueItemId, error) {
|
|
1433
|
+
// Get current item state
|
|
1434
|
+
const item = db
|
|
1435
|
+
.prepare('SELECT attempts, max_attempts FROM queue_items WHERE id = ?')
|
|
1436
|
+
.get(queueItemId);
|
|
1437
|
+
if (!item)
|
|
1438
|
+
return;
|
|
1439
|
+
// If under max attempts, retry (reset to pending); else dead-letter (mark failed)
|
|
1440
|
+
if (item.attempts < item.max_attempts) {
|
|
1441
|
+
db.prepare(`UPDATE queue_items SET status = 'pending', error = ? WHERE id = ?`).run(error ?? null, queueItemId);
|
|
1442
|
+
}
|
|
1443
|
+
else {
|
|
1444
|
+
db.prepare(`UPDATE queue_items SET status = 'failed', finished_at = datetime('now'), error = ? WHERE id = ?`).run(error ?? null, queueItemId);
|
|
1445
|
+
}
|
|
1446
|
+
},
|
|
1447
|
+
};
|
|
1448
|
+
}
|
|
1449
|
+
|
|
1450
|
+
/**
|
|
1451
|
+
* State operations module for runner client. Provides scalar state (key-value) and collection state (grouped items).
|
|
847
1452
|
*/
|
|
848
1453
|
/** Parse TTL string (e.g., '30d', '24h', '60m') into ISO datetime offset from now. */
|
|
849
1454
|
function parseTtl(ttl) {
|
|
@@ -855,34 +1460,28 @@ function parseTtl(ttl) {
|
|
|
855
1460
|
if (!amount || !unit)
|
|
856
1461
|
throw new Error(`Invalid TTL format: ${ttl}`);
|
|
857
1462
|
const num = parseInt(amount, 10);
|
|
858
|
-
let
|
|
1463
|
+
let ms;
|
|
859
1464
|
switch (unit) {
|
|
860
1465
|
case 'd':
|
|
861
|
-
|
|
1466
|
+
ms = num * 24 * 60 * 60 * 1000;
|
|
862
1467
|
break;
|
|
863
1468
|
case 'h':
|
|
864
|
-
|
|
1469
|
+
ms = num * 60 * 60 * 1000;
|
|
865
1470
|
break;
|
|
866
1471
|
case 'm':
|
|
867
|
-
|
|
1472
|
+
ms = num * 60 * 1000;
|
|
868
1473
|
break;
|
|
869
1474
|
default:
|
|
870
1475
|
throw new Error(`Unknown TTL unit: ${unit}`);
|
|
871
1476
|
}
|
|
872
|
-
return
|
|
1477
|
+
return new Date(Date.now() + ms).toISOString();
|
|
873
1478
|
}
|
|
874
|
-
/**
|
|
875
|
-
|
|
876
|
-
*/
|
|
877
|
-
function createClient(dbPath) {
|
|
878
|
-
const path = dbPath ?? process.env.JR_DB_PATH;
|
|
879
|
-
if (!path)
|
|
880
|
-
throw new Error('DB path required (provide dbPath or set JR_DB_PATH env var)');
|
|
881
|
-
const db = createConnection(path);
|
|
1479
|
+
/** Create state operations for the given database connection. */
|
|
1480
|
+
function createStateOps(db) {
|
|
882
1481
|
return {
|
|
883
1482
|
getCursor(namespace, key) {
|
|
884
1483
|
const row = db
|
|
885
|
-
.prepare(`SELECT value FROM
|
|
1484
|
+
.prepare(`SELECT value FROM state
|
|
886
1485
|
WHERE namespace = ? AND key = ?
|
|
887
1486
|
AND (expires_at IS NULL OR expires_at > datetime('now'))`)
|
|
888
1487
|
.get(namespace, key);
|
|
@@ -890,55 +1489,100 @@ function createClient(dbPath) {
|
|
|
890
1489
|
},
|
|
891
1490
|
setCursor(namespace, key, value, options) {
|
|
892
1491
|
const expiresAt = options?.ttl ? parseTtl(options.ttl) : null;
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
ON CONFLICT(namespace, key) DO UPDATE SET value = excluded.value, expires_at = excluded.expires_at, updated_at = datetime('now')`
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
1492
|
+
if (expiresAt) {
|
|
1493
|
+
db.prepare(`INSERT INTO state (namespace, key, value, expires_at) VALUES (?, ?, ?, ?)
|
|
1494
|
+
ON CONFLICT(namespace, key) DO UPDATE SET value = excluded.value, expires_at = excluded.expires_at, updated_at = datetime('now')`).run(namespace, key, value, expiresAt);
|
|
1495
|
+
}
|
|
1496
|
+
else {
|
|
1497
|
+
db.prepare(`INSERT INTO state (namespace, key, value) VALUES (?, ?, ?)
|
|
1498
|
+
ON CONFLICT(namespace, key) DO UPDATE SET value = excluded.value, updated_at = datetime('now')`).run(namespace, key, value);
|
|
1499
|
+
}
|
|
899
1500
|
},
|
|
900
1501
|
deleteCursor(namespace, key) {
|
|
901
|
-
db.prepare('DELETE FROM
|
|
1502
|
+
db.prepare('DELETE FROM state WHERE namespace = ? AND key = ?').run(namespace, key);
|
|
902
1503
|
},
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
const maxAttempts = options?.maxAttempts ?? 1;
|
|
906
|
-
const payloadJson = JSON.stringify(payload);
|
|
907
|
-
const result = db
|
|
908
|
-
.prepare('INSERT INTO queues (queue, payload, priority, max_attempts) VALUES (?, ?, ?, ?)')
|
|
909
|
-
.run(queue, payloadJson, priority, maxAttempts);
|
|
910
|
-
return result.lastInsertRowid;
|
|
1504
|
+
getState(namespace, key) {
|
|
1505
|
+
return this.getCursor(namespace, key);
|
|
911
1506
|
},
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
const rows = db
|
|
915
|
-
.prepare(`SELECT id, payload FROM queues
|
|
916
|
-
WHERE queue = ? AND status = 'pending'
|
|
917
|
-
ORDER BY priority DESC, created_at
|
|
918
|
-
LIMIT ?`)
|
|
919
|
-
.all(queue, count);
|
|
920
|
-
// Then UPDATE each one to claim it
|
|
921
|
-
const updateStmt = db.prepare(`UPDATE queues
|
|
922
|
-
SET status = 'processing', claimed_at = datetime('now'), attempts = attempts + 1
|
|
923
|
-
WHERE id = ?`);
|
|
924
|
-
for (const row of rows) {
|
|
925
|
-
updateStmt.run(row.id);
|
|
926
|
-
}
|
|
927
|
-
return rows.map((row) => ({
|
|
928
|
-
id: row.id,
|
|
929
|
-
payload: JSON.parse(row.payload),
|
|
930
|
-
}));
|
|
1507
|
+
setState(namespace, key, value, options) {
|
|
1508
|
+
this.setCursor(namespace, key, value, options);
|
|
931
1509
|
},
|
|
932
|
-
|
|
933
|
-
|
|
1510
|
+
deleteState(namespace, key) {
|
|
1511
|
+
this.deleteCursor(namespace, key);
|
|
934
1512
|
},
|
|
935
|
-
|
|
936
|
-
|
|
1513
|
+
};
|
|
1514
|
+
}
|
|
1515
|
+
/** Create collection state operations for the given database connection. */
|
|
1516
|
+
function createCollectionOps(db) {
|
|
1517
|
+
return {
|
|
1518
|
+
hasItem(namespace, key, itemKey) {
|
|
1519
|
+
const row = db
|
|
1520
|
+
.prepare('SELECT 1 FROM state_items WHERE namespace = ? AND key = ? AND item_key = ?')
|
|
1521
|
+
.get(namespace, key, itemKey);
|
|
1522
|
+
return row !== undefined;
|
|
1523
|
+
},
|
|
1524
|
+
getItem(namespace, key, itemKey) {
|
|
1525
|
+
const row = db
|
|
1526
|
+
.prepare('SELECT value FROM state_items WHERE namespace = ? AND key = ? AND item_key = ?')
|
|
1527
|
+
.get(namespace, key, itemKey);
|
|
1528
|
+
return row?.value ?? null;
|
|
1529
|
+
},
|
|
1530
|
+
setItem(namespace, key, itemKey, value) {
|
|
1531
|
+
// Auto-create parent state row if it doesn't exist
|
|
1532
|
+
db.prepare(`INSERT OR IGNORE INTO state (namespace, key, value) VALUES (?, ?, NULL)`).run(namespace, key);
|
|
1533
|
+
db.prepare(`INSERT INTO state_items (namespace, key, item_key, value) VALUES (?, ?, ?, ?)
|
|
1534
|
+
ON CONFLICT(namespace, key, item_key) DO UPDATE SET value = excluded.value, updated_at = datetime('now')`).run(namespace, key, itemKey, value ?? null);
|
|
1535
|
+
},
|
|
1536
|
+
deleteItem(namespace, key, itemKey) {
|
|
1537
|
+
db.prepare('DELETE FROM state_items WHERE namespace = ? AND key = ? AND item_key = ?').run(namespace, key, itemKey);
|
|
1538
|
+
},
|
|
1539
|
+
countItems(namespace, key) {
|
|
1540
|
+
const row = db
|
|
1541
|
+
.prepare('SELECT COUNT(*) as count FROM state_items WHERE namespace = ? AND key = ?')
|
|
1542
|
+
.get(namespace, key);
|
|
1543
|
+
return row?.count ?? 0;
|
|
1544
|
+
},
|
|
1545
|
+
pruneItems(namespace, key, keepCount) {
|
|
1546
|
+
const result = db
|
|
1547
|
+
.prepare(`DELETE FROM state_items WHERE namespace = ? AND key = ? AND rowid NOT IN (SELECT rowid FROM state_items WHERE namespace = ? AND key = ? ORDER BY updated_at DESC LIMIT ?)`)
|
|
1548
|
+
.run(namespace, key, namespace, key, keepCount);
|
|
1549
|
+
return result.changes;
|
|
937
1550
|
},
|
|
1551
|
+
listItemKeys(namespace, key, options) {
|
|
1552
|
+
const order = options?.order === 'asc' ? 'ASC' : 'DESC';
|
|
1553
|
+
const limitClause = options?.limit
|
|
1554
|
+
? ` LIMIT ${String(options.limit)}`
|
|
1555
|
+
: '';
|
|
1556
|
+
const rows = db
|
|
1557
|
+
.prepare(`SELECT item_key FROM state_items WHERE namespace = ? AND key = ? ORDER BY updated_at ${order}${limitClause}`)
|
|
1558
|
+
.all(namespace, key);
|
|
1559
|
+
return rows.map((r) => r.item_key);
|
|
1560
|
+
},
|
|
1561
|
+
};
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
/**
|
|
1565
|
+
* Job client library for runner jobs. Provides cursor (state) and queue operations. Opens its own DB connection via JR_DB_PATH env var.
|
|
1566
|
+
*/
|
|
1567
|
+
/**
|
|
1568
|
+
* Create a runner client for job scripts. Opens its own DB connection.
|
|
1569
|
+
*/
|
|
1570
|
+
function createClient(dbPath) {
|
|
1571
|
+
const path = dbPath ?? process.env.JR_DB_PATH;
|
|
1572
|
+
if (!path)
|
|
1573
|
+
throw new Error('DB path required (provide dbPath or set JR_DB_PATH env var)');
|
|
1574
|
+
const db = createConnection(path);
|
|
1575
|
+
const stateOps = createStateOps(db);
|
|
1576
|
+
const collectionOps = createCollectionOps(db);
|
|
1577
|
+
const queueOps = createQueueOps(db);
|
|
1578
|
+
return {
|
|
1579
|
+
...stateOps,
|
|
1580
|
+
...collectionOps,
|
|
1581
|
+
...queueOps,
|
|
938
1582
|
close() {
|
|
939
1583
|
closeConnection(db);
|
|
940
1584
|
},
|
|
941
1585
|
};
|
|
942
1586
|
}
|
|
943
1587
|
|
|
944
|
-
export { closeConnection, createClient, createConnection, createMaintenance, createNotifier, createRunner, createScheduler, executeJob, jobSchema, runMigrations, runSchema, runStatusSchema, runTriggerSchema, runnerConfigSchema };
|
|
1588
|
+
export { closeConnection, createClient, createConnection, createGatewayClient, createMaintenance, createNotifier, createRunner, createScheduler, executeJob, executeSession, jobSchema, queueSchema, runMigrations, runSchema, runStatusSchema, runTriggerSchema, runnerConfigSchema };
|