@nicnocquee/dataqueue 1.33.0 → 1.35.0-beta.20260224075710

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/ai/build-docs-content.ts +96 -0
  2. package/ai/build-llms-full.ts +42 -0
  3. package/ai/docs-content.json +290 -0
  4. package/ai/rules/advanced.md +170 -0
  5. package/ai/rules/basic.md +159 -0
  6. package/ai/rules/react-dashboard.md +87 -0
  7. package/ai/skills/dataqueue-advanced/SKILL.md +370 -0
  8. package/ai/skills/dataqueue-core/SKILL.md +235 -0
  9. package/ai/skills/dataqueue-react/SKILL.md +201 -0
  10. package/dist/cli.cjs +577 -32
  11. package/dist/cli.cjs.map +1 -1
  12. package/dist/cli.d.cts +52 -2
  13. package/dist/cli.d.ts +52 -2
  14. package/dist/cli.js +575 -32
  15. package/dist/cli.js.map +1 -1
  16. package/dist/index.cjs +937 -108
  17. package/dist/index.cjs.map +1 -1
  18. package/dist/index.d.cts +358 -11
  19. package/dist/index.d.ts +358 -11
  20. package/dist/index.js +937 -108
  21. package/dist/index.js.map +1 -1
  22. package/dist/mcp-server.cjs +186 -0
  23. package/dist/mcp-server.cjs.map +1 -0
  24. package/dist/mcp-server.d.cts +32 -0
  25. package/dist/mcp-server.d.ts +32 -0
  26. package/dist/mcp-server.js +175 -0
  27. package/dist/mcp-server.js.map +1 -0
  28. package/migrations/1781200000005_add_retry_config_to_job_queue.sql +17 -0
  29. package/migrations/1781200000006_add_output_to_job_queue.sql +3 -0
  30. package/package.json +10 -4
  31. package/src/backend.ts +36 -3
  32. package/src/backends/postgres.ts +344 -42
  33. package/src/backends/redis-scripts.ts +173 -8
  34. package/src/backends/redis.test.ts +668 -0
  35. package/src/backends/redis.ts +244 -15
  36. package/src/cli.test.ts +65 -0
  37. package/src/cli.ts +56 -19
  38. package/src/db-util.ts +1 -1
  39. package/src/index.test.ts +811 -12
  40. package/src/index.ts +106 -14
  41. package/src/install-mcp-command.test.ts +216 -0
  42. package/src/install-mcp-command.ts +185 -0
  43. package/src/install-rules-command.test.ts +218 -0
  44. package/src/install-rules-command.ts +233 -0
  45. package/src/install-skills-command.test.ts +176 -0
  46. package/src/install-skills-command.ts +124 -0
  47. package/src/mcp-server.test.ts +162 -0
  48. package/src/mcp-server.ts +231 -0
  49. package/src/processor.ts +133 -49
  50. package/src/queue.test.ts +477 -0
  51. package/src/queue.ts +20 -3
  52. package/src/supervisor.test.ts +340 -0
  53. package/src/supervisor.ts +177 -0
  54. package/src/types.ts +318 -3
package/src/processor.ts CHANGED
@@ -12,6 +12,7 @@ import {
12
12
  WaitSignal,
13
13
  WaitDuration,
14
14
  WaitTokenResult,
15
+ QueueEmitFn,
15
16
  } from './types.js';
16
17
  import { QueueBackend } from './backend.js';
17
18
  import { log, setLogContext } from './log-context.js';
@@ -90,7 +91,7 @@ async function runHandlerInWorker<
90
91
  payload: PayloadMap[T],
91
92
  timeoutMs: number,
92
93
  jobType: string,
93
- ): Promise<void> {
94
+ ): Promise<unknown> {
94
95
  // Validate handler can be serialized before attempting to run in worker
95
96
  validateHandlerSerializable(handler, jobType);
96
97
 
@@ -155,9 +156,9 @@ async function runHandlerInWorker<
155
156
  }
156
157
 
157
158
  handlerFn(payload, signal)
158
- .then(() => {
159
+ .then((result) => {
159
160
  clearTimeout(timeoutId);
160
- parentPort.postMessage({ type: 'success' });
161
+ parentPort.postMessage({ type: 'success', output: result });
161
162
  })
162
163
  .catch((error) => {
163
164
  clearTimeout(timeoutId);
@@ -195,26 +196,29 @@ async function runHandlerInWorker<
195
196
 
196
197
  let resolved = false;
197
198
 
198
- worker.on('message', (message: { type: string; error?: any }) => {
199
- if (resolved) return;
200
- resolved = true;
199
+ worker.on(
200
+ 'message',
201
+ (message: { type: string; error?: any; output?: unknown }) => {
202
+ if (resolved) return;
203
+ resolved = true;
201
204
 
202
- if (message.type === 'success') {
203
- resolve();
204
- } else if (message.type === 'timeout') {
205
- const timeoutError = new Error(
206
- `Job timed out after ${timeoutMs} ms and was forcefully terminated`,
207
- );
208
- // @ts-ignore
209
- timeoutError.failureReason = FailureReason.Timeout;
210
- reject(timeoutError);
211
- } else if (message.type === 'error') {
212
- const error = new Error(message.error.message);
213
- error.stack = message.error.stack;
214
- error.name = message.error.name;
215
- reject(error);
216
- }
217
- });
205
+ if (message.type === 'success') {
206
+ resolve(message.output);
207
+ } else if (message.type === 'timeout') {
208
+ const timeoutError = new Error(
209
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`,
210
+ );
211
+ // @ts-ignore
212
+ timeoutError.failureReason = FailureReason.Timeout;
213
+ reject(timeoutError);
214
+ } else if (message.type === 'error') {
215
+ const error = new Error(message.error.message);
216
+ error.stack = message.error.stack;
217
+ error.name = message.error.name;
218
+ reject(error);
219
+ }
220
+ },
221
+ );
218
222
 
219
223
  worker.on('error', (error) => {
220
224
  if (resolved) return;
@@ -318,6 +322,9 @@ function createNoOpContext(
318
322
  throw new Error('Progress must be between 0 and 100');
319
323
  await backend.updateProgress(jobId, Math.round(percent));
320
324
  },
325
+ setOutput: async (data: unknown) => {
326
+ await backend.updateOutput(jobId, data);
327
+ },
321
328
  };
322
329
  }
323
330
 
@@ -495,13 +502,21 @@ function buildWaitContext(
495
502
  throw new Error('Progress must be between 0 and 100');
496
503
  await backend.updateProgress(jobId, Math.round(percent));
497
504
  },
505
+ setOutput: async (data: unknown) => {
506
+ await backend.updateOutput(jobId, data);
507
+ },
498
508
  };
499
509
 
500
510
  return ctx;
501
511
  }
502
512
 
503
513
  /**
504
- * Process a single job using the provided handler map
514
+ * Process a single job using the provided handler map.
515
+ *
516
+ * @param backend - The queue backend.
517
+ * @param job - The job record to process.
518
+ * @param jobHandlers - Map of job type to handler function.
519
+ * @param emit - Optional callback to emit lifecycle events to the queue's EventEmitter.
505
520
  */
506
521
  export async function processJobWithHandlers<
507
522
  PayloadMap,
@@ -510,6 +525,7 @@ export async function processJobWithHandlers<
510
525
  backend: QueueBackend,
511
526
  job: JobRecord<PayloadMap, T>,
512
527
  jobHandlers: JobHandlers<PayloadMap>,
528
+ emit?: QueueEmitFn,
513
529
  ): Promise<void> {
514
530
  const handler = jobHandlers[job.jobType];
515
531
 
@@ -518,11 +534,16 @@ export async function processJobWithHandlers<
518
534
  `No handler registered for job type: ${job.jobType}`,
519
535
  job.jobType,
520
536
  );
521
- await backend.failJob(
522
- job.id,
523
- new Error(`No handler registered for job type: ${job.jobType}`),
524
- FailureReason.NoHandler,
537
+ const noHandlerError = new Error(
538
+ `No handler registered for job type: ${job.jobType}`,
525
539
  );
540
+ await backend.failJob(job.id, noHandlerError, FailureReason.NoHandler);
541
+ emit?.('job:failed', {
542
+ jobId: job.id,
543
+ jobType: job.jobType,
544
+ error: noHandlerError,
545
+ willRetry: false,
546
+ });
526
547
  return;
527
548
  }
528
549
 
@@ -544,11 +565,18 @@ export async function processJobWithHandlers<
544
565
  const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
545
566
  let timeoutId: NodeJS.Timeout | undefined;
546
567
  const controller = new AbortController();
568
+ let setOutputCalled = false;
569
+ let handlerReturnValue: unknown;
547
570
  try {
548
571
  // If forceKillOnTimeout is true, run handler in a worker thread
549
- // Note: wait features are not available in forceKillOnTimeout mode
572
+ // Note: wait features and setOutput are not available in forceKillOnTimeout mode
550
573
  if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
551
- await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
574
+ handlerReturnValue = await runHandlerInWorker(
575
+ handler,
576
+ job.payload,
577
+ timeoutMs,
578
+ job.jobType,
579
+ );
552
580
  } else {
553
581
  // Build the JobContext for prolong/onTimeout support
554
582
  let onTimeoutCallback: OnTimeoutCallback | undefined;
@@ -623,6 +651,26 @@ export async function processJobWithHandlers<
623
651
  // Build context: full wait support for all backends
624
652
  const ctx = buildWaitContext(backend, job.id, stepData, baseCtx);
625
653
 
654
+ // Wrap setProgress to also emit the event
655
+ if (emit) {
656
+ const originalSetProgress = ctx.setProgress;
657
+ ctx.setProgress = async (percent: number) => {
658
+ await originalSetProgress(percent);
659
+ emit('job:progress', {
660
+ jobId: job.id,
661
+ progress: Math.round(percent),
662
+ });
663
+ };
664
+ }
665
+
666
+ // Wrap setOutput to track calls and emit the event
667
+ const originalSetOutput = ctx.setOutput;
668
+ ctx.setOutput = async (data: unknown) => {
669
+ setOutputCalled = true;
670
+ await originalSetOutput(data);
671
+ emit?.('job:output', { jobId: job.id, output: data });
672
+ };
673
+
626
674
  // If forceKillOnTimeout was set but timeoutMs was missing, warn
627
675
  if (forceKillOnTimeout && !hasTimeout) {
628
676
  log(
@@ -633,7 +681,7 @@ export async function processJobWithHandlers<
633
681
  const jobPromise = handler(job.payload, controller.signal, ctx);
634
682
 
635
683
  if (hasTimeout) {
636
- await Promise.race([
684
+ handlerReturnValue = await Promise.race([
637
685
  jobPromise,
638
686
  new Promise<never>((_, reject) => {
639
687
  timeoutReject = reject;
@@ -641,13 +689,22 @@ export async function processJobWithHandlers<
641
689
  }),
642
690
  ]);
643
691
  } else {
644
- await jobPromise;
692
+ handlerReturnValue = await jobPromise;
645
693
  }
646
694
  }
647
695
  if (timeoutId) clearTimeout(timeoutId);
648
696
 
697
+ // Determine the output to persist on completion.
698
+ // If setOutput() was called, the value is already in the DB -- pass undefined
699
+ // so completeJob preserves it. Otherwise, use the handler's return value.
700
+ const completionOutput =
701
+ setOutputCalled || handlerReturnValue === undefined
702
+ ? undefined
703
+ : handlerReturnValue;
704
+
649
705
  // Job completed successfully -- complete via backend
650
- await backend.completeJob(job.id);
706
+ await backend.completeJob(job.id, completionOutput);
707
+ emit?.('job:completed', { jobId: job.id, jobType: job.jobType });
651
708
  } catch (error) {
652
709
  if (timeoutId) clearTimeout(timeoutId);
653
710
 
@@ -661,6 +718,7 @@ export async function processJobWithHandlers<
661
718
  waitTokenId: error.tokenId,
662
719
  stepData: error.stepData,
663
720
  });
721
+ emit?.('job:waiting', { jobId: job.id, jobType: job.jobType });
664
722
  return;
665
723
  }
666
724
 
@@ -676,16 +734,28 @@ export async function processJobWithHandlers<
676
734
  ) {
677
735
  failureReason = FailureReason.Timeout;
678
736
  }
679
- await backend.failJob(
680
- job.id,
681
- error instanceof Error ? error : new Error(String(error)),
682
- failureReason,
683
- );
737
+ const failError = error instanceof Error ? error : new Error(String(error));
738
+ await backend.failJob(job.id, failError, failureReason);
739
+ emit?.('job:failed', {
740
+ jobId: job.id,
741
+ jobType: job.jobType,
742
+ error: failError,
743
+ willRetry: job.attempts + 1 < job.maxAttempts,
744
+ });
684
745
  }
685
746
  }
686
747
 
687
748
  /**
688
- * Process a batch of jobs using the provided handler map and concurrency limit
749
+ * Process a batch of jobs using the provided handler map and concurrency limit.
750
+ *
751
+ * @param backend - The queue backend.
752
+ * @param workerId - Identifier for the worker claiming jobs.
753
+ * @param batchSize - Maximum jobs to claim per batch.
754
+ * @param jobType - Optional job type filter.
755
+ * @param jobHandlers - Map of job type to handler function.
756
+ * @param concurrency - Max parallel jobs within the batch.
757
+ * @param onError - Legacy error callback.
758
+ * @param emit - Optional callback to emit lifecycle events.
689
759
  */
690
760
  export async function processBatchWithHandlers<PayloadMap>(
691
761
  backend: QueueBackend,
@@ -695,16 +765,26 @@ export async function processBatchWithHandlers<PayloadMap>(
695
765
  jobHandlers: JobHandlers<PayloadMap>,
696
766
  concurrency?: number,
697
767
  onError?: (error: Error) => void,
768
+ emit?: QueueEmitFn,
698
769
  ): Promise<number> {
699
770
  const jobs = await backend.getNextBatch<PayloadMap, JobType<PayloadMap>>(
700
771
  workerId,
701
772
  batchSize,
702
773
  jobType,
703
774
  );
775
+
776
+ // Emit job:processing for each claimed job
777
+ if (emit) {
778
+ for (const job of jobs) {
779
+ emit('job:processing', { jobId: job.id, jobType: job.jobType });
780
+ }
781
+ }
782
+
704
783
  if (!concurrency || concurrency >= jobs.length) {
705
- // Default: all in parallel
706
784
  await Promise.all(
707
- jobs.map((job) => processJobWithHandlers(backend, job, jobHandlers)),
785
+ jobs.map((job) =>
786
+ processJobWithHandlers(backend, job, jobHandlers, emit),
787
+ ),
708
788
  );
709
789
  return jobs.length;
710
790
  }
@@ -718,7 +798,7 @@ export async function processBatchWithHandlers<PayloadMap>(
718
798
  while (running < concurrency && idx < jobs.length) {
719
799
  const job = jobs[idx++];
720
800
  running++;
721
- processJobWithHandlers(backend, job, jobHandlers)
801
+ processJobWithHandlers(backend, job, jobHandlers, emit)
722
802
  .then(() => {
723
803
  running--;
724
804
  finished++;
@@ -740,17 +820,20 @@ export async function processBatchWithHandlers<PayloadMap>(
740
820
 
741
821
  /**
742
822
  * Start a job processor that continuously processes jobs.
823
+ *
743
824
  * @param backend - The queue backend.
744
825
  * @param handlers - The job handlers for this processor instance.
745
826
  * @param options - The processor options. Leave pollInterval empty to run only once. Use jobType to filter jobs by type.
746
827
  * @param onBeforeBatch - Optional callback invoked before each batch. Used internally to enqueue due cron jobs.
747
- * @returns {Processor} The processor instance.
828
+ * @param emit - Optional callback to emit lifecycle events to the queue's EventEmitter.
829
+ * @returns The processor instance.
748
830
  */
749
831
  export const createProcessor = <PayloadMap = any>(
750
832
  backend: QueueBackend,
751
833
  handlers: JobHandlers<PayloadMap>,
752
834
  options: ProcessorOptions = {},
753
835
  onBeforeBatch?: () => Promise<void>,
836
+ emit?: QueueEmitFn,
754
837
  ): Processor => {
755
838
  const {
756
839
  workerId = `worker-${Math.random().toString(36).substring(2, 9)}`,
@@ -776,13 +859,12 @@ export const createProcessor = <PayloadMap = any>(
776
859
  await onBeforeBatch();
777
860
  } catch (hookError) {
778
861
  log(`onBeforeBatch hook error: ${hookError}`);
862
+ const err =
863
+ hookError instanceof Error ? hookError : new Error(String(hookError));
779
864
  if (onError) {
780
- onError(
781
- hookError instanceof Error
782
- ? hookError
783
- : new Error(String(hookError)),
784
- );
865
+ onError(err);
785
866
  }
867
+ emit?.('error', err);
786
868
  }
787
869
  }
788
870
 
@@ -799,11 +881,13 @@ export const createProcessor = <PayloadMap = any>(
799
881
  handlers,
800
882
  concurrency,
801
883
  onError,
884
+ emit,
802
885
  );
803
- // Only process one batch in start; do not schedule next batch here
804
886
  return processed;
805
887
  } catch (error) {
806
- onError(error instanceof Error ? error : new Error(String(error)));
888
+ const err = error instanceof Error ? error : new Error(String(error));
889
+ onError(err);
890
+ emit?.('error', err);
807
891
  }
808
892
  return 0;
809
893
  };