@nicnocquee/dataqueue 1.25.0 → 1.26.0-beta.20260223202259
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai/build-docs-content.ts +96 -0
- package/ai/build-llms-full.ts +42 -0
- package/ai/docs-content.json +284 -0
- package/ai/rules/advanced.md +150 -0
- package/ai/rules/basic.md +159 -0
- package/ai/rules/react-dashboard.md +83 -0
- package/ai/skills/dataqueue-advanced/SKILL.md +370 -0
- package/ai/skills/dataqueue-core/SKILL.md +234 -0
- package/ai/skills/dataqueue-react/SKILL.md +189 -0
- package/dist/cli.cjs +1149 -14
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.d.cts +66 -1
- package/dist/cli.d.ts +66 -1
- package/dist/cli.js +1146 -13
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +3236 -1237
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +697 -23
- package/dist/index.d.ts +697 -23
- package/dist/index.js +3235 -1238
- package/dist/index.js.map +1 -1
- package/dist/mcp-server.cjs +186 -0
- package/dist/mcp-server.cjs.map +1 -0
- package/dist/mcp-server.d.cts +32 -0
- package/dist/mcp-server.d.ts +32 -0
- package/dist/mcp-server.js +175 -0
- package/dist/mcp-server.js.map +1 -0
- package/migrations/1781200000004_create_cron_schedules_table.sql +33 -0
- package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
- package/package.json +24 -21
- package/src/backend.ts +170 -5
- package/src/backends/postgres.ts +992 -63
- package/src/backends/redis-scripts.ts +358 -26
- package/src/backends/redis.test.ts +1532 -0
- package/src/backends/redis.ts +993 -35
- package/src/cli.test.ts +82 -6
- package/src/cli.ts +73 -10
- package/src/cron.test.ts +126 -0
- package/src/cron.ts +40 -0
- package/src/db-util.ts +1 -1
- package/src/index.test.ts +1034 -11
- package/src/index.ts +267 -39
- package/src/init-command.test.ts +449 -0
- package/src/init-command.ts +709 -0
- package/src/install-mcp-command.test.ts +216 -0
- package/src/install-mcp-command.ts +185 -0
- package/src/install-rules-command.test.ts +218 -0
- package/src/install-rules-command.ts +233 -0
- package/src/install-skills-command.test.ts +176 -0
- package/src/install-skills-command.ts +124 -0
- package/src/mcp-server.test.ts +162 -0
- package/src/mcp-server.ts +231 -0
- package/src/processor.ts +104 -113
- package/src/queue.test.ts +465 -0
- package/src/queue.ts +34 -252
- package/src/supervisor.test.ts +340 -0
- package/src/supervisor.ts +177 -0
- package/src/types.ts +476 -12
- package/LICENSE +0 -21
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* DataQueue MCP Server — exposes documentation search over stdio.
|
|
5
|
+
* Run via: dataqueue-cli mcp
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
9
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
10
|
+
import { z } from 'zod';
|
|
11
|
+
import fs from 'fs';
|
|
12
|
+
import path from 'path';
|
|
13
|
+
import { fileURLToPath } from 'url';
|
|
14
|
+
|
|
15
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
16
|
+
const __dirname = path.dirname(__filename);
|
|
17
|
+
|
|
18
|
+
interface DocPage {
|
|
19
|
+
slug: string;
|
|
20
|
+
title: string;
|
|
21
|
+
description: string;
|
|
22
|
+
content: string;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/** @internal Loads docs-content.json from the ai/ directory bundled with the package. */
|
|
26
|
+
export function loadDocsContent(
|
|
27
|
+
docsPath: string = path.join(__dirname, '../ai/docs-content.json'),
|
|
28
|
+
): DocPage[] {
|
|
29
|
+
const raw = fs.readFileSync(docsPath, 'utf-8');
|
|
30
|
+
return JSON.parse(raw) as DocPage[];
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** @internal Scores a doc page against a search query using simple term matching. */
|
|
34
|
+
export function scorePageForQuery(page: DocPage, queryTerms: string[]): number {
|
|
35
|
+
const titleLower = page.title.toLowerCase();
|
|
36
|
+
const descLower = page.description.toLowerCase();
|
|
37
|
+
const contentLower = page.content.toLowerCase();
|
|
38
|
+
|
|
39
|
+
let score = 0;
|
|
40
|
+
for (const term of queryTerms) {
|
|
41
|
+
if (titleLower.includes(term)) score += 10;
|
|
42
|
+
if (descLower.includes(term)) score += 5;
|
|
43
|
+
|
|
44
|
+
const contentMatches = contentLower.split(term).length - 1;
|
|
45
|
+
score += Math.min(contentMatches, 10);
|
|
46
|
+
}
|
|
47
|
+
return score;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/** @internal Extracts a relevant excerpt around the first match of any query term. */
|
|
51
|
+
export function extractExcerpt(
|
|
52
|
+
content: string,
|
|
53
|
+
queryTerms: string[],
|
|
54
|
+
maxLength = 500,
|
|
55
|
+
): string {
|
|
56
|
+
const lower = content.toLowerCase();
|
|
57
|
+
let earliestIndex = -1;
|
|
58
|
+
|
|
59
|
+
for (const term of queryTerms) {
|
|
60
|
+
const idx = lower.indexOf(term);
|
|
61
|
+
if (idx !== -1 && (earliestIndex === -1 || idx < earliestIndex)) {
|
|
62
|
+
earliestIndex = idx;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if (earliestIndex === -1) {
|
|
67
|
+
return content.slice(0, maxLength);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const start = Math.max(0, earliestIndex - 100);
|
|
71
|
+
const end = Math.min(content.length, start + maxLength);
|
|
72
|
+
let excerpt = content.slice(start, end);
|
|
73
|
+
|
|
74
|
+
if (start > 0) excerpt = '...' + excerpt;
|
|
75
|
+
if (end < content.length) excerpt = excerpt + '...';
|
|
76
|
+
|
|
77
|
+
return excerpt;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Creates and starts the DataQueue MCP server over stdio.
|
|
82
|
+
*
|
|
83
|
+
* @param deps - Injectable dependencies for testing.
|
|
84
|
+
*/
|
|
85
|
+
export async function startMcpServer(
|
|
86
|
+
deps: {
|
|
87
|
+
docsPath?: string;
|
|
88
|
+
transport?: InstanceType<typeof StdioServerTransport>;
|
|
89
|
+
} = {},
|
|
90
|
+
): Promise<McpServer> {
|
|
91
|
+
const pages = loadDocsContent(deps.docsPath);
|
|
92
|
+
|
|
93
|
+
const server = new McpServer({
|
|
94
|
+
name: 'dataqueue-docs',
|
|
95
|
+
version: '1.0.0',
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
server.resource('llms-txt', 'dataqueue://llms.txt', async () => {
|
|
99
|
+
const llmsPath = path.join(
|
|
100
|
+
__dirname,
|
|
101
|
+
'../ai/skills/dataqueue-core/SKILL.md',
|
|
102
|
+
);
|
|
103
|
+
let content: string;
|
|
104
|
+
try {
|
|
105
|
+
content = fs.readFileSync(llmsPath, 'utf-8');
|
|
106
|
+
} catch {
|
|
107
|
+
content = pages
|
|
108
|
+
.map((p) => `## ${p.title}\n\nSlug: ${p.slug}\n\n${p.description}`)
|
|
109
|
+
.join('\n\n');
|
|
110
|
+
}
|
|
111
|
+
return { contents: [{ uri: 'dataqueue://llms.txt', text: content }] };
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
server.tool(
|
|
115
|
+
'list-doc-pages',
|
|
116
|
+
'List all available DataQueue documentation pages with titles and descriptions.',
|
|
117
|
+
{},
|
|
118
|
+
async () => {
|
|
119
|
+
const listing = pages.map((p) => ({
|
|
120
|
+
slug: p.slug,
|
|
121
|
+
title: p.title,
|
|
122
|
+
description: p.description,
|
|
123
|
+
}));
|
|
124
|
+
return {
|
|
125
|
+
content: [
|
|
126
|
+
{ type: 'text' as const, text: JSON.stringify(listing, null, 2) },
|
|
127
|
+
],
|
|
128
|
+
};
|
|
129
|
+
},
|
|
130
|
+
);
|
|
131
|
+
|
|
132
|
+
server.tool(
|
|
133
|
+
'get-doc-page',
|
|
134
|
+
'Fetch a specific DataQueue doc page by slug. Returns full page content as markdown.',
|
|
135
|
+
{
|
|
136
|
+
slug: z
|
|
137
|
+
.string()
|
|
138
|
+
.describe('The doc page slug, e.g. "usage/add-job" or "api/job-queue"'),
|
|
139
|
+
},
|
|
140
|
+
async ({ slug }) => {
|
|
141
|
+
const page = pages.find((p) => p.slug === slug);
|
|
142
|
+
if (!page) {
|
|
143
|
+
return {
|
|
144
|
+
content: [
|
|
145
|
+
{
|
|
146
|
+
type: 'text' as const,
|
|
147
|
+
text: `Page not found: "${slug}". Use list-doc-pages to see available slugs.`,
|
|
148
|
+
},
|
|
149
|
+
],
|
|
150
|
+
isError: true,
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
const header = page.description
|
|
154
|
+
? `# ${page.title}\n\n> ${page.description}\n\n`
|
|
155
|
+
: `# ${page.title}\n\n`;
|
|
156
|
+
return {
|
|
157
|
+
content: [{ type: 'text' as const, text: header + page.content }],
|
|
158
|
+
};
|
|
159
|
+
},
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
server.tool(
|
|
163
|
+
'search-docs',
|
|
164
|
+
'Full-text search across all DataQueue documentation pages. Returns matching sections with page titles and content excerpts.',
|
|
165
|
+
{
|
|
166
|
+
query: z
|
|
167
|
+
.string()
|
|
168
|
+
.describe('Search query, e.g. "cron scheduling" or "waitForToken"'),
|
|
169
|
+
},
|
|
170
|
+
async ({ query }) => {
|
|
171
|
+
const queryTerms = query
|
|
172
|
+
.toLowerCase()
|
|
173
|
+
.split(/\s+/)
|
|
174
|
+
.filter((t) => t.length > 1);
|
|
175
|
+
|
|
176
|
+
if (queryTerms.length === 0) {
|
|
177
|
+
return {
|
|
178
|
+
content: [
|
|
179
|
+
{ type: 'text' as const, text: 'Please provide a search query.' },
|
|
180
|
+
],
|
|
181
|
+
isError: true,
|
|
182
|
+
};
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
const scored = pages
|
|
186
|
+
.map((page) => ({
|
|
187
|
+
page,
|
|
188
|
+
score: scorePageForQuery(page, queryTerms),
|
|
189
|
+
}))
|
|
190
|
+
.filter((r) => r.score > 0)
|
|
191
|
+
.sort((a, b) => b.score - a.score)
|
|
192
|
+
.slice(0, 5);
|
|
193
|
+
|
|
194
|
+
if (scored.length === 0) {
|
|
195
|
+
return {
|
|
196
|
+
content: [
|
|
197
|
+
{
|
|
198
|
+
type: 'text' as const,
|
|
199
|
+
text: `No results for "${query}". Try different keywords or use list-doc-pages to browse.`,
|
|
200
|
+
},
|
|
201
|
+
],
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
const results = scored.map((r) => {
|
|
206
|
+
const excerpt = extractExcerpt(r.page.content, queryTerms);
|
|
207
|
+
return `## ${r.page.title} (${r.page.slug})\n\n${r.page.description}\n\n${excerpt}`;
|
|
208
|
+
});
|
|
209
|
+
|
|
210
|
+
return {
|
|
211
|
+
content: [{ type: 'text' as const, text: results.join('\n\n---\n\n') }],
|
|
212
|
+
};
|
|
213
|
+
},
|
|
214
|
+
);
|
|
215
|
+
|
|
216
|
+
const transport = deps.transport ?? new StdioServerTransport();
|
|
217
|
+
await server.connect(transport);
|
|
218
|
+
return server;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
const isDirectRun =
|
|
222
|
+
process.argv[1] &&
|
|
223
|
+
(process.argv[1].endsWith('/mcp-server.js') ||
|
|
224
|
+
process.argv[1].endsWith('/mcp-server.cjs'));
|
|
225
|
+
|
|
226
|
+
if (isDirectRun) {
|
|
227
|
+
startMcpServer().catch((err) => {
|
|
228
|
+
console.error('Failed to start MCP server:', err);
|
|
229
|
+
process.exit(1);
|
|
230
|
+
});
|
|
231
|
+
}
|
package/src/processor.ts
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import { Worker } from 'worker_threads';
|
|
2
|
-
import { Pool } from 'pg';
|
|
3
2
|
import {
|
|
4
3
|
JobRecord,
|
|
5
4
|
ProcessorOptions,
|
|
@@ -13,71 +12,11 @@ import {
|
|
|
13
12
|
WaitSignal,
|
|
14
13
|
WaitDuration,
|
|
15
14
|
WaitTokenResult,
|
|
15
|
+
QueueEmitFn,
|
|
16
16
|
} from './types.js';
|
|
17
17
|
import { QueueBackend } from './backend.js';
|
|
18
|
-
import { PostgresBackend } from './backends/postgres.js';
|
|
19
|
-
import {
|
|
20
|
-
waitJob,
|
|
21
|
-
updateStepData,
|
|
22
|
-
createWaitpoint,
|
|
23
|
-
getWaitpoint,
|
|
24
|
-
} from './queue.js';
|
|
25
18
|
import { log, setLogContext } from './log-context.js';
|
|
26
19
|
|
|
27
|
-
/**
|
|
28
|
-
* Try to extract the underlying pg Pool from a QueueBackend.
|
|
29
|
-
* Returns null for non-PostgreSQL backends.
|
|
30
|
-
*/
|
|
31
|
-
function tryExtractPool(backend: QueueBackend): Pool | null {
|
|
32
|
-
if (backend instanceof PostgresBackend) {
|
|
33
|
-
return backend.getPool();
|
|
34
|
-
}
|
|
35
|
-
return null;
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
/**
|
|
39
|
-
* Build a JobContext without wait support (for non-PostgreSQL backends).
|
|
40
|
-
* prolong/onTimeout work normally; wait-related methods throw helpful errors.
|
|
41
|
-
*/
|
|
42
|
-
function buildBasicContext(
|
|
43
|
-
backend: QueueBackend,
|
|
44
|
-
jobId: number,
|
|
45
|
-
baseCtx: {
|
|
46
|
-
prolong: JobContext['prolong'];
|
|
47
|
-
onTimeout: JobContext['onTimeout'];
|
|
48
|
-
},
|
|
49
|
-
): JobContext {
|
|
50
|
-
const waitError = () =>
|
|
51
|
-
new Error(
|
|
52
|
-
'Wait features (waitFor, waitUntil, createToken, waitForToken, ctx.run) are currently only supported with the PostgreSQL backend.',
|
|
53
|
-
);
|
|
54
|
-
return {
|
|
55
|
-
prolong: baseCtx.prolong,
|
|
56
|
-
onTimeout: baseCtx.onTimeout,
|
|
57
|
-
run: async <T>(_stepName: string, fn: () => Promise<T>): Promise<T> => {
|
|
58
|
-
// Without PostgreSQL, just execute the function directly (no persistence)
|
|
59
|
-
return fn();
|
|
60
|
-
},
|
|
61
|
-
waitFor: async () => {
|
|
62
|
-
throw waitError();
|
|
63
|
-
},
|
|
64
|
-
waitUntil: async () => {
|
|
65
|
-
throw waitError();
|
|
66
|
-
},
|
|
67
|
-
createToken: async () => {
|
|
68
|
-
throw waitError();
|
|
69
|
-
},
|
|
70
|
-
waitForToken: async () => {
|
|
71
|
-
throw waitError();
|
|
72
|
-
},
|
|
73
|
-
setProgress: async (percent: number) => {
|
|
74
|
-
if (percent < 0 || percent > 100)
|
|
75
|
-
throw new Error('Progress must be between 0 and 100');
|
|
76
|
-
await backend.updateProgress(jobId, Math.round(percent));
|
|
77
|
-
},
|
|
78
|
-
};
|
|
79
|
-
}
|
|
80
|
-
|
|
81
20
|
/**
|
|
82
21
|
* Validates that a handler can be serialized for worker thread execution.
|
|
83
22
|
* Throws an error with helpful message if serialization fails.
|
|
@@ -388,7 +327,7 @@ function createNoOpContext(
|
|
|
388
327
|
* Marks pending waits as completed and fetches token outputs.
|
|
389
328
|
*/
|
|
390
329
|
async function resolveCompletedWaits(
|
|
391
|
-
|
|
330
|
+
backend: QueueBackend,
|
|
392
331
|
stepData: Record<string, any>,
|
|
393
332
|
): Promise<void> {
|
|
394
333
|
for (const key of Object.keys(stepData)) {
|
|
@@ -401,7 +340,7 @@ async function resolveCompletedWaits(
|
|
|
401
340
|
stepData[key] = { ...entry, completed: true };
|
|
402
341
|
} else if (entry.type === 'token' && entry.tokenId) {
|
|
403
342
|
// Token-based wait -- fetch the waitpoint result
|
|
404
|
-
const wp = await getWaitpoint(
|
|
343
|
+
const wp = await backend.getWaitpoint(entry.tokenId);
|
|
405
344
|
if (wp && wp.status === 'completed') {
|
|
406
345
|
stepData[key] = {
|
|
407
346
|
...entry,
|
|
@@ -422,10 +361,10 @@ async function resolveCompletedWaits(
|
|
|
422
361
|
|
|
423
362
|
/**
|
|
424
363
|
* Build the extended JobContext with step tracking and wait support.
|
|
364
|
+
* Works with any QueueBackend (Postgres or Redis).
|
|
425
365
|
*/
|
|
426
366
|
function buildWaitContext(
|
|
427
367
|
backend: QueueBackend,
|
|
428
|
-
pool: Pool,
|
|
429
368
|
jobId: number,
|
|
430
369
|
stepData: Record<string, any>,
|
|
431
370
|
baseCtx: {
|
|
@@ -455,7 +394,7 @@ function buildWaitContext(
|
|
|
455
394
|
|
|
456
395
|
// Persist step result
|
|
457
396
|
stepData[stepName] = { __completed: true, result };
|
|
458
|
-
await updateStepData(
|
|
397
|
+
await backend.updateStepData(jobId, stepData);
|
|
459
398
|
|
|
460
399
|
return result;
|
|
461
400
|
},
|
|
@@ -498,7 +437,7 @@ function buildWaitContext(
|
|
|
498
437
|
},
|
|
499
438
|
|
|
500
439
|
createToken: async (options?) => {
|
|
501
|
-
const token = await createWaitpoint(
|
|
440
|
+
const token = await backend.createWaitpoint(jobId, options);
|
|
502
441
|
return token;
|
|
503
442
|
},
|
|
504
443
|
|
|
@@ -517,7 +456,7 @@ function buildWaitContext(
|
|
|
517
456
|
}
|
|
518
457
|
|
|
519
458
|
// Check if the token is already completed (e.g., completed while job was still processing)
|
|
520
|
-
const wp = await getWaitpoint(
|
|
459
|
+
const wp = await backend.getWaitpoint(tokenId);
|
|
521
460
|
if (wp && wp.status === 'completed') {
|
|
522
461
|
const result: WaitTokenResult<T> = {
|
|
523
462
|
ok: true,
|
|
@@ -529,7 +468,7 @@ function buildWaitContext(
|
|
|
529
468
|
completed: true,
|
|
530
469
|
result,
|
|
531
470
|
};
|
|
532
|
-
await updateStepData(
|
|
471
|
+
await backend.updateStepData(jobId, stepData);
|
|
533
472
|
return result;
|
|
534
473
|
}
|
|
535
474
|
if (wp && wp.status === 'timed_out') {
|
|
@@ -543,7 +482,7 @@ function buildWaitContext(
|
|
|
543
482
|
completed: true,
|
|
544
483
|
result,
|
|
545
484
|
};
|
|
546
|
-
await updateStepData(
|
|
485
|
+
await backend.updateStepData(jobId, stepData);
|
|
547
486
|
return result;
|
|
548
487
|
}
|
|
549
488
|
|
|
@@ -563,7 +502,12 @@ function buildWaitContext(
|
|
|
563
502
|
}
|
|
564
503
|
|
|
565
504
|
/**
|
|
566
|
-
* Process a single job using the provided handler map
|
|
505
|
+
* Process a single job using the provided handler map.
|
|
506
|
+
*
|
|
507
|
+
* @param backend - The queue backend.
|
|
508
|
+
* @param job - The job record to process.
|
|
509
|
+
* @param jobHandlers - Map of job type to handler function.
|
|
510
|
+
* @param emit - Optional callback to emit lifecycle events to the queue's EventEmitter.
|
|
567
511
|
*/
|
|
568
512
|
export async function processJobWithHandlers<
|
|
569
513
|
PayloadMap,
|
|
@@ -572,6 +516,7 @@ export async function processJobWithHandlers<
|
|
|
572
516
|
backend: QueueBackend,
|
|
573
517
|
job: JobRecord<PayloadMap, T>,
|
|
574
518
|
jobHandlers: JobHandlers<PayloadMap>,
|
|
519
|
+
emit?: QueueEmitFn,
|
|
575
520
|
): Promise<void> {
|
|
576
521
|
const handler = jobHandlers[job.jobType];
|
|
577
522
|
|
|
@@ -580,28 +525,30 @@ export async function processJobWithHandlers<
|
|
|
580
525
|
`No handler registered for job type: ${job.jobType}`,
|
|
581
526
|
job.jobType,
|
|
582
527
|
);
|
|
583
|
-
|
|
584
|
-
job.
|
|
585
|
-
new Error(`No handler registered for job type: ${job.jobType}`),
|
|
586
|
-
FailureReason.NoHandler,
|
|
528
|
+
const noHandlerError = new Error(
|
|
529
|
+
`No handler registered for job type: ${job.jobType}`,
|
|
587
530
|
);
|
|
531
|
+
await backend.failJob(job.id, noHandlerError, FailureReason.NoHandler);
|
|
532
|
+
emit?.('job:failed', {
|
|
533
|
+
jobId: job.id,
|
|
534
|
+
jobType: job.jobType,
|
|
535
|
+
error: noHandlerError,
|
|
536
|
+
willRetry: false,
|
|
537
|
+
});
|
|
588
538
|
return;
|
|
589
539
|
}
|
|
590
540
|
|
|
591
541
|
// Load step data (may contain completed steps from previous invocations)
|
|
592
542
|
const stepData: Record<string, any> = { ...(job.stepData || {}) };
|
|
593
543
|
|
|
594
|
-
// Try to get pool for wait features (PostgreSQL-only)
|
|
595
|
-
const pool = tryExtractPool(backend);
|
|
596
|
-
|
|
597
544
|
// If resuming from a wait, resolve any pending wait entries
|
|
598
545
|
const hasStepHistory = Object.keys(stepData).some((k) =>
|
|
599
546
|
k.startsWith('__wait_'),
|
|
600
547
|
);
|
|
601
|
-
if (hasStepHistory
|
|
602
|
-
await resolveCompletedWaits(
|
|
548
|
+
if (hasStepHistory) {
|
|
549
|
+
await resolveCompletedWaits(backend, stepData);
|
|
603
550
|
// Persist the resolved step data
|
|
604
|
-
await updateStepData(
|
|
551
|
+
await backend.updateStepData(job.id, stepData);
|
|
605
552
|
}
|
|
606
553
|
|
|
607
554
|
// Per-job timeout logic
|
|
@@ -685,10 +632,20 @@ export async function processJobWithHandlers<
|
|
|
685
632
|
},
|
|
686
633
|
};
|
|
687
634
|
|
|
688
|
-
// Build context: full wait support for
|
|
689
|
-
const ctx =
|
|
690
|
-
|
|
691
|
-
|
|
635
|
+
// Build context: full wait support for all backends
|
|
636
|
+
const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
|
|
637
|
+
|
|
638
|
+
// Wrap setProgress to also emit the event
|
|
639
|
+
if (emit) {
|
|
640
|
+
const originalSetProgress = ctx.setProgress;
|
|
641
|
+
ctx.setProgress = async (percent: number) => {
|
|
642
|
+
await originalSetProgress(percent);
|
|
643
|
+
emit('job:progress', {
|
|
644
|
+
jobId: job.id,
|
|
645
|
+
progress: Math.round(percent),
|
|
646
|
+
});
|
|
647
|
+
};
|
|
648
|
+
}
|
|
692
649
|
|
|
693
650
|
// If forceKillOnTimeout was set but timeoutMs was missing, warn
|
|
694
651
|
if (forceKillOnTimeout && !hasTimeout) {
|
|
@@ -715,31 +672,21 @@ export async function processJobWithHandlers<
|
|
|
715
672
|
|
|
716
673
|
// Job completed successfully -- complete via backend
|
|
717
674
|
await backend.completeJob(job.id);
|
|
675
|
+
emit?.('job:completed', { jobId: job.id, jobType: job.jobType });
|
|
718
676
|
} catch (error) {
|
|
719
677
|
if (timeoutId) clearTimeout(timeoutId);
|
|
720
678
|
|
|
721
679
|
// Check if this is a WaitSignal (not a real error)
|
|
722
680
|
if (error instanceof WaitSignal) {
|
|
723
|
-
if (!pool) {
|
|
724
|
-
// Wait signals should never happen with non-PostgreSQL backends
|
|
725
|
-
// since the context methods throw, but guard just in case
|
|
726
|
-
await backend.failJob(
|
|
727
|
-
job.id,
|
|
728
|
-
new Error(
|
|
729
|
-
'WaitSignal received but wait features require the PostgreSQL backend.',
|
|
730
|
-
),
|
|
731
|
-
FailureReason.HandlerError,
|
|
732
|
-
);
|
|
733
|
-
return;
|
|
734
|
-
}
|
|
735
681
|
log(
|
|
736
682
|
`Job ${job.id} entering wait: type=${error.type}, waitUntil=${error.waitUntil?.toISOString() ?? 'none'}, tokenId=${error.tokenId ?? 'none'}`,
|
|
737
683
|
);
|
|
738
|
-
await waitJob(
|
|
684
|
+
await backend.waitJob(job.id, {
|
|
739
685
|
waitUntil: error.waitUntil,
|
|
740
686
|
waitTokenId: error.tokenId,
|
|
741
687
|
stepData: error.stepData,
|
|
742
688
|
});
|
|
689
|
+
emit?.('job:waiting', { jobId: job.id, jobType: job.jobType });
|
|
743
690
|
return;
|
|
744
691
|
}
|
|
745
692
|
|
|
@@ -755,16 +702,28 @@ export async function processJobWithHandlers<
|
|
|
755
702
|
) {
|
|
756
703
|
failureReason = FailureReason.Timeout;
|
|
757
704
|
}
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
705
|
+
const failError = error instanceof Error ? error : new Error(String(error));
|
|
706
|
+
await backend.failJob(job.id, failError, failureReason);
|
|
707
|
+
emit?.('job:failed', {
|
|
708
|
+
jobId: job.id,
|
|
709
|
+
jobType: job.jobType,
|
|
710
|
+
error: failError,
|
|
711
|
+
willRetry: job.attempts + 1 < job.maxAttempts,
|
|
712
|
+
});
|
|
763
713
|
}
|
|
764
714
|
}
|
|
765
715
|
|
|
766
716
|
/**
|
|
767
|
-
* Process a batch of jobs using the provided handler map and concurrency limit
|
|
717
|
+
* Process a batch of jobs using the provided handler map and concurrency limit.
|
|
718
|
+
*
|
|
719
|
+
* @param backend - The queue backend.
|
|
720
|
+
* @param workerId - Identifier for the worker claiming jobs.
|
|
721
|
+
* @param batchSize - Maximum jobs to claim per batch.
|
|
722
|
+
* @param jobType - Optional job type filter.
|
|
723
|
+
* @param jobHandlers - Map of job type to handler function.
|
|
724
|
+
* @param concurrency - Max parallel jobs within the batch.
|
|
725
|
+
* @param onError - Legacy error callback.
|
|
726
|
+
* @param emit - Optional callback to emit lifecycle events.
|
|
768
727
|
*/
|
|
769
728
|
export async function processBatchWithHandlers<PayloadMap>(
|
|
770
729
|
backend: QueueBackend,
|
|
@@ -774,16 +733,26 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
774
733
|
jobHandlers: JobHandlers<PayloadMap>,
|
|
775
734
|
concurrency?: number,
|
|
776
735
|
onError?: (error: Error) => void,
|
|
736
|
+
emit?: QueueEmitFn,
|
|
777
737
|
): Promise<number> {
|
|
778
738
|
const jobs = await backend.getNextBatch<PayloadMap, JobType<PayloadMap>>(
|
|
779
739
|
workerId,
|
|
780
740
|
batchSize,
|
|
781
741
|
jobType,
|
|
782
742
|
);
|
|
743
|
+
|
|
744
|
+
// Emit job:processing for each claimed job
|
|
745
|
+
if (emit) {
|
|
746
|
+
for (const job of jobs) {
|
|
747
|
+
emit('job:processing', { jobId: job.id, jobType: job.jobType });
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
|
|
783
751
|
if (!concurrency || concurrency >= jobs.length) {
|
|
784
|
-
// Default: all in parallel
|
|
785
752
|
await Promise.all(
|
|
786
|
-
jobs.map((job) =>
|
|
753
|
+
jobs.map((job) =>
|
|
754
|
+
processJobWithHandlers(backend, job, jobHandlers, emit),
|
|
755
|
+
),
|
|
787
756
|
);
|
|
788
757
|
return jobs.length;
|
|
789
758
|
}
|
|
@@ -797,7 +766,7 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
797
766
|
while (running < concurrency && idx < jobs.length) {
|
|
798
767
|
const job = jobs[idx++];
|
|
799
768
|
running++;
|
|
800
|
-
processJobWithHandlers(backend, job, jobHandlers)
|
|
769
|
+
processJobWithHandlers(backend, job, jobHandlers, emit)
|
|
801
770
|
.then(() => {
|
|
802
771
|
running--;
|
|
803
772
|
finished++;
|
|
@@ -818,16 +787,21 @@ export async function processBatchWithHandlers<PayloadMap>(
|
|
|
818
787
|
}
|
|
819
788
|
|
|
820
789
|
/**
|
|
821
|
-
* Start a job processor that continuously processes jobs
|
|
822
|
-
*
|
|
823
|
-
* @param
|
|
790
|
+
* Start a job processor that continuously processes jobs.
|
|
791
|
+
*
|
|
792
|
+
* @param backend - The queue backend.
|
|
793
|
+
* @param handlers - The job handlers for this processor instance.
|
|
824
794
|
* @param options - The processor options. Leave pollInterval empty to run only once. Use jobType to filter jobs by type.
|
|
825
|
-
* @
|
|
795
|
+
* @param onBeforeBatch - Optional callback invoked before each batch. Used internally to enqueue due cron jobs.
|
|
796
|
+
* @param emit - Optional callback to emit lifecycle events to the queue's EventEmitter.
|
|
797
|
+
* @returns The processor instance.
|
|
826
798
|
*/
|
|
827
799
|
export const createProcessor = <PayloadMap = any>(
|
|
828
800
|
backend: QueueBackend,
|
|
829
801
|
handlers: JobHandlers<PayloadMap>,
|
|
830
802
|
options: ProcessorOptions = {},
|
|
803
|
+
onBeforeBatch?: () => Promise<void>,
|
|
804
|
+
emit?: QueueEmitFn,
|
|
831
805
|
): Processor => {
|
|
832
806
|
const {
|
|
833
807
|
workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
|
|
@@ -847,6 +821,21 @@ export const createProcessor = <PayloadMap = any>(
|
|
|
847
821
|
const processJobs = async (): Promise<number> => {
|
|
848
822
|
if (!running) return 0;
|
|
849
823
|
|
|
824
|
+
// Run pre-batch hook (e.g. enqueue due cron jobs) before processing
|
|
825
|
+
if (onBeforeBatch) {
|
|
826
|
+
try {
|
|
827
|
+
await onBeforeBatch();
|
|
828
|
+
} catch (hookError) {
|
|
829
|
+
log(`onBeforeBatch hook error: ${hookError}`);
|
|
830
|
+
const err =
|
|
831
|
+
hookError instanceof Error ? hookError : new Error(String(hookError));
|
|
832
|
+
if (onError) {
|
|
833
|
+
onError(err);
|
|
834
|
+
}
|
|
835
|
+
emit?.('error', err);
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
|
|
850
839
|
log(
|
|
851
840
|
`Processing jobs with workerId: ${workerId}${jobType ? ` and jobType: ${Array.isArray(jobType) ? jobType.join(',') : jobType}` : ''}`,
|
|
852
841
|
);
|
|
@@ -860,11 +849,13 @@ export const createProcessor = <PayloadMap = any>(
|
|
|
860
849
|
handlers,
|
|
861
850
|
concurrency,
|
|
862
851
|
onError,
|
|
852
|
+
emit,
|
|
863
853
|
);
|
|
864
|
-
// Only process one batch in start; do not schedule next batch here
|
|
865
854
|
return processed;
|
|
866
855
|
} catch (error) {
|
|
867
|
-
|
|
856
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
857
|
+
onError(err);
|
|
858
|
+
emit?.('error', err);
|
|
868
859
|
}
|
|
869
860
|
return 0;
|
|
870
861
|
};
|