@nicnocquee/dataqueue 1.26.0-beta.20260223202259 → 1.26.0-beta.20260223204425

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,7 +45,7 @@
45
45
  "slug": "api/job-record",
46
46
  "title": "JobRecord",
47
47
  "description": "",
48
- "content": "The `JobRecord` interface represents a job stored in the queue, including its status, attempts, and metadata.\n\n## Fields\n\n- `id`: _number_ — Unique job ID.\n- `jobType`: _string_ — The type of the job.\n- `payload`: _any_ — The job payload.\n- `status`:\n _'pending' | 'processing' | 'completed' | 'failed' | 'cancelled'_ —\n Current job status.\n- `createdAt`: _Date_ — When the job was created.\n- `updated_at`: _Date_ — When the job was last updated.\n- `locked_at`: _Date | null_ — When the job was locked for\n processing.\n- `locked_by`: _string | null_ — Worker that locked the job.\n- `attempts`: _number_ — Number of attempts so far.\n- `maxAttempts`: _number_ — Maximum allowed attempts.\n- `nextAttemptAt`: _Date | null_ — When the next attempt is\n scheduled.\n- `priority`: _number_ — Job priority.\n- `runAt`: _Date_ — When the job is scheduled to run.\n- `pendingReason?`: _string | null_ — Reason for pending\n status.\n- `errorHistory?`: _\\{ message: string; timestamp: string \\}[]_ — Error history for the job.\n- `timeoutMs?`: _number | null_ — Timeout for this job in\n milliseconds.\n- `failureReason?`: _FailureReason | null_ — Reason for last\n failure, if any.\n- `completedAt`: _Date | null_ — When the job was completed.\n- `startedAt`: _Date | null_ — When the job was first picked up\n for processing.\n- `lastRetriedAt`: _Date | null_ — When the job was last\n retried.\n- `lastFailedAt`: _Date | null_ — When the job last failed.\n- `lastCancelledAt`: _Date | null_ — When the job was last\n cancelled.\n- `tags?`: _string[]_ — Tags for this job. Used for grouping, searching, or batch operations.\n- `idempotencyKey?`: _string | null_ — The idempotency key for this job, if one was provided when the job was created.\n- `progress?`: _number | null_ — Progress percentage (0–100) reported by the handler via `ctx.setProgress()`. `null` if no progress has been reported. See [Progress Tracking](/usage/progress-tracking).\n\n## Example\n\n```json\n{\n \"id\": 1,\n \"jobType\": \"email\",\n \"payload\": { \"to\": \"user@example.com\", \"subject\": \"Hello\" },\n \"status\": \"pending\",\n \"createdAt\": \"2024-06-01T12:00:00Z\",\n \"tags\": [\"welcome\", \"user\"],\n \"idempotencyKey\": \"welcome-email-user-123\",\n \"progress\": null\n}\n```"
48
+ "content": "The `JobRecord` interface represents a job stored in the queue, including its status, attempts, and metadata.\n\n## Fields\n\n- `id`: _number_ — Unique job ID.\n- `jobType`: _string_ — The type of the job.\n- `payload`: _any_ — The job payload.\n- `status`:\n _'pending' | 'processing' | 'completed' | 'failed' | 'cancelled'_ —\n Current job status.\n- `createdAt`: _Date_ — When the job was created.\n- `updated_at`: _Date_ — When the job was last updated.\n- `locked_at`: _Date | null_ — When the job was locked for\n processing.\n- `locked_by`: _string | null_ — Worker that locked the job.\n- `attempts`: _number_ — Number of attempts so far.\n- `maxAttempts`: _number_ — Maximum allowed attempts.\n- `nextAttemptAt`: _Date | null_ — When the next attempt is\n scheduled.\n- `priority`: _number_ — Job priority.\n- `runAt`: _Date_ — When the job is scheduled to run.\n- `pendingReason?`: _string | null_ — Reason for pending\n status.\n- `errorHistory?`: _\\{ message: string; timestamp: string \\}[]_ — Error history for the job.\n- `timeoutMs?`: _number | null_ — Timeout for this job in\n milliseconds.\n- `failureReason?`: _FailureReason | null_ — Reason for last\n failure, if any.\n- `completedAt`: _Date | null_ — When the job was completed.\n- `startedAt`: _Date | null_ — When the job was first picked up\n for processing.\n- `lastRetriedAt`: _Date | null_ — When the job was last\n retried.\n- `lastFailedAt`: _Date | null_ — When the job last failed.\n- `lastCancelledAt`: _Date | null_ — When the job was last\n cancelled.\n- `tags?`: _string[]_ — Tags for this job. Used for grouping, searching, or batch operations.\n- `idempotencyKey?`: _string | null_ — The idempotency key for this job, if one was provided when the job was created.\n- `progress?`: _number | null_ — Progress percentage (0–100) reported by the handler via `ctx.setProgress()`. `null` if no progress has been reported. See [Progress Tracking](/usage/progress-tracking).\n- `output?`: _unknown_ — Handler output stored via `ctx.setOutput(data)` or by returning a value from the handler. `null` if no output has been stored. See [Job Output](/usage/job-output).\n\n## Example\n\n```json\n{\n \"id\": 1,\n \"jobType\": \"email\",\n \"payload\": { \"to\": \"user@example.com\", \"subject\": \"Hello\" },\n \"status\": \"completed\",\n \"createdAt\": \"2024-06-01T12:00:00Z\",\n \"tags\": [\"welcome\", \"user\"],\n \"idempotencyKey\": \"welcome-email-user-123\",\n \"progress\": 100,\n \"output\": { \"messageId\": \"abc-123\", \"sentAt\": \"2024-06-01T12:00:05Z\" }\n}\n```"
49
49
  },
50
50
  {
51
51
  "slug": "api/processor",
@@ -227,6 +227,12 @@
227
227
  "description": "",
228
228
  "content": "The first thing you need to do is define your job types and their corresponding payload types. A payload is the data passed to the job handler. A job handler is a function that runs when a job is processed.\n\n### Define Job Types and Payloads\n\nJob types and their payloads are specific to your app. You can define them in any file. The important thing is that they are an object type, where the keys are the job types and the values are the payload types. In this example, `send_email`, `generate_report`, and `generate_image` are the job types, and their values are the payload types.\n\n```typescript title=\"@lib/types/job-payload-map.ts\"\n// Define the job payload map for this app.\n// This ensures that the job payload is typed correctly when adding jobs.\n// The keys are the job types, and the values are the payload types.\nexport type JobPayloadMap = {\n send_email: {\n to: string;\n subject: string;\n body: string;\n };\n generate_report: {\n reportId: string;\n userId: string;\n };\n generate_image: {\n prompt: string;\n };\n};\n```\n\n### Define Job Handlers\n\nNext, define the job handlers by exporting a `JobHandlers` object that maps job types to handler functions. If you forget to add a handler for a job type, TypeScript will show an error.\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { sendEmail } from './services/email'; // Function to send the email\nimport { generateReport } from './services/generate-report'; // Function to generate the report\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n send_email: async (payload) => {\n const { to, subject, body } = payload;\n await sendEmail(to, subject, body);\n },\n generate_report: async (payload) => {\n const { reportId, userId } = payload;\n await generateReport(reportId, userId);\n },\n generate_image: async (payload, signal) => {\n const { prompt } = payload;\n await generateImageAi(prompt, signal);\n },\n};\n```\n\nIn the example above, we define three job handlers: `send_email`, `generate_report`, and `generate_image`. Each handler is a function that takes a payload, an `AbortSignal`, and a `JobContext` as arguments. The `AbortSignal` is used to abort the job if it takes too long to complete. The `JobContext` provides methods to extend the job's timeout while it's running.\n\n### Job Handler Signature\n\nA job handler receives three arguments: the job payload, an `AbortSignal`, and a `JobContext`.\n\n```typescript\n(payload: Payload, signal: AbortSignal, ctx: JobContext) => Promise<void>;\n```\n\nYou can omit arguments you don't need. For example, if you only need the payload:\n\n```typescript\nconst handler = async (payload) => {\n // ...\n};\n```\n\n### JobContext\n\nThe third argument provides methods for timeout management and progress reporting:\n\n- `ctx.prolong(ms?)` — Proactively reset the timeout. If `ms` is provided, sets the deadline to `ms` milliseconds from now. If omitted, resets to the original `timeoutMs`.\n- `ctx.onTimeout(callback)` — Register a callback that fires when the timeout is about to hit, before the `AbortSignal` is triggered. Return a number (ms) to extend, or return nothing to let the timeout proceed.\n- `ctx.setProgress(percent)` — Report progress as a percentage (0–100). The value is persisted to the database and can be read by clients via `getJob()` or the React SDK's `useJob()` hook.\n\nSee [Job Timeout](/usage/job-timeout) for timeout examples and [Progress Tracking](/usage/progress-tracking) for progress reporting."
229
229
  },
230
+ {
231
+ "slug": "usage/job-output",
232
+ "title": "Job Output",
233
+ "description": "Store and retrieve results from job handlers",
234
+ "content": "Jobs can store an output value when they complete. This is useful when you need to retrieve the result of a background task — for example, a generated report URL, a processed image path, or computation results.\n\n## Storing Output\n\nThere are two ways to store output from a handler:\n\n### 1. Return a value from the handler\n\nThe simplest approach — return any JSON-serializable value from your handler function:\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n generate_report: async (payload, signal, ctx) => {\n const url = await generateReport(payload.reportId);return { url, generatedAt: new Date().toISOString() };},\n};\n```\n\n### 2. Use `ctx.setOutput(data)`\n\nFor more control, call `ctx.setOutput()` explicitly. This is useful when you want to store intermediate results during execution:\n\n```typescript title=\"@lib/job-handlers.ts\"\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n process_images: async (payload, signal, ctx) => {\n const results: string[] = [];\n\n for (const image of payload.images) {\n const url = await processImage(image);\n results.push(url);\n\n await ctx.setProgress(\n Math.round((results.length / payload.images.length) * 100),\n );\n await ctx.setOutput({ processedUrls: results });}\n },\n};\n```\n\n### Precedence\n\nIf both `ctx.setOutput()` is called **and** the handler returns a value, the `ctx.setOutput()` value takes precedence. The handler's return value is ignored in that case.\n\n### Rules\n\n- **JSON-serializable**: The output value must be JSON-serializable (objects, arrays, strings, numbers, booleans, null).\n- **Last write wins**: Calling `ctx.setOutput()` multiple times overwrites the previous value.\n- **Best-effort persistence**: Like `setProgress`, output writes to the database are best-effort — errors do not kill the handler.\n\n## Reading Output\n\nOutput is stored in the `output` field of the [JobRecord](/api/job-record):\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nconsole.log(job?.output); // null | any JSON value\n```\n\n- Before the handler stores output, the value is `null`.\n- After the job completes, the output is preserved and can be read at any time.\n- Handlers that return `undefined` (or `void`) do not store output — the field remains `null`.\n\n## Tracking Output in React\n\nIf you're using the [React SDK](/usage/react-sdk), the `useJob` hook exposes `output` directly:\n\n```tsx\nimport { useJob } from '@nicnocquee/dataqueue-react';\n\nfunction JobResult({ jobId }: { jobId: number }) {\n const { status, output, progress } = useJob(jobId, {\n fetcher: (id) =>\n fetch(`/api/jobs/${id}`)\n .then((r) => r.json())\n .then((d) => d.job),\n });\n\n if (status === 'completed' && output) {\n return <a href={(output as any).url}>Download Report</a>;\n }\n\n return (\n <div>\n <p>Status: {status}</p>\n <progress value={progress ?? 0} max={100} />\n </div>\n );\n}\n```\n\n## Listening for Output Events\n\nYou can subscribe to the `job:output` event to be notified whenever a handler calls `ctx.setOutput()`:\n\n```typescript\njobQueue.on('job:output', ({ jobId, output }) => {\n console.log(`Job ${jobId} stored output:`, output);\n});\n```\n\n## Database Migration\n\n> **Note:** If you're using the **PostgreSQL** backend, make sure to run the latest\n migrations to add the `output` column. See [Database\n Migration](/usage/database-migration).\n\nThe Redis backend requires no migration — the `output` field is stored automatically as part of the job hash."
235
+ },
230
236
  {
231
237
  "slug": "usage/job-timeout",
232
238
  "title": "Job Timeout",
@@ -249,7 +255,7 @@
249
255
  "slug": "usage/progress-tracking",
250
256
  "title": "Progress Tracking",
251
257
  "description": "Report and track job progress from handlers",
252
- "content": "Jobs can report their progress as a percentage (0–100) while they run. This is useful for long-running tasks like file processing, data imports, or image generation where you want to show a progress bar or percentage to the user.\n\n## Reporting Progress from a Handler\n\nUse `ctx.setProgress(percent)` inside your job handler to report progress:\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n generate_report: async (payload, signal, ctx) => {\n const chunks = await loadData(payload.reportId);\n\n for (let i = 0; i < chunks.length; i++) {\n if (signal.aborted) return;\n\n await processChunk(chunks[i]);\n\n // Report progress (0-100)\n await ctx.setProgress(Math.round(((i + 1) / chunks.length) * 100));\n }\n },\n};\n```\n\n### setProgress Rules\n\n- **Range**: The value must be between 0 and 100 (inclusive). Values outside this range throw an error.\n- **Rounding**: Fractional values are rounded to the nearest integer (`33.7` becomes `34`).\n- **Best-effort persistence**: Progress is written to the database but errors during the write do not kill the handler — processing continues.\n\n## Reading Progress\n\nProgress is stored in the `progress` field of the [JobRecord](/api/job-record):\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nconsole.log(job?.progress); // null | 0–100\n```\n\n- Before the handler calls `setProgress`, the value is `null`.\n- After the job completes, the last progress value is preserved (typically `100`).\n\n## Tracking Progress in React\n\nIf you're using the [React SDK](/usage/react-sdk), the `useJob` hook exposes `progress` directly:\n\n```tsx\nimport { useJob } from '@nicnocquee/dataqueue-react';\n\nfunction JobProgress({ jobId }: { jobId: number }) {\n const { status, progress } = useJob(jobId, {\n fetcher: (id) =>\n fetch(`/api/jobs/${id}`)\n .then((r) => r.json())\n .then((d) => d.job),\n });\n\n return (\n <div>\n <p>Status: {status}</p>\n <progress value={progress ?? 0} max={100} />\n <span>{progress ?? 0}%</span>\n </div>\n );\n}\n```\n\n## Database Migration\n\n> **Note:** If you're using the **PostgreSQL** backend, make sure to run the latest\n migrations to add the `progress` column. See [Database\n Migration](/usage/database-migration).\n\nThe Redis backend requires no migration — the `progress` field is stored automatically as part of the job hash."
258
+ "content": "Jobs can report their progress as a percentage (0–100) while they run. This is useful for long-running tasks like file processing, data imports, or image generation where you want to show a progress bar or percentage to the user.\n\n## Reporting Progress from a Handler\n\nUse `ctx.setProgress(percent)` inside your job handler to report progress:\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n generate_report: async (payload, signal, ctx) => {\n const chunks = await loadData(payload.reportId);\n\n for (let i = 0; i < chunks.length; i++) {\n if (signal.aborted) return;\n\n await processChunk(chunks[i]);\n\n // Report progress (0-100)\n await ctx.setProgress(Math.round(((i + 1) / chunks.length) * 100));\n }\n },\n};\n```\n\n### setProgress Rules\n\n- **Range**: The value must be between 0 and 100 (inclusive). Values outside this range throw an error.\n- **Rounding**: Fractional values are rounded to the nearest integer (`33.7` becomes `34`).\n- **Best-effort persistence**: Progress is written to the database but errors during the write do not kill the handler — processing continues.\n\n## Reading Progress\n\nProgress is stored in the `progress` field of the [JobRecord](/api/job-record):\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nconsole.log(job?.progress); // null | 0–100\n```\n\n- Before the handler calls `setProgress`, the value is `null`.\n- After the job completes, the last progress value is preserved (typically `100`).\n\n## Tracking Progress in React\n\nIf you're using the [React SDK](/usage/react-sdk), the `useJob` hook exposes `progress` directly:\n\n```tsx\nimport { useJob } from '@nicnocquee/dataqueue-react';\n\nfunction JobProgress({ jobId }: { jobId: number }) {\n const { status, progress } = useJob(jobId, {\n fetcher: (id) =>\n fetch(`/api/jobs/${id}`)\n .then((r) => r.json())\n .then((d) => d.job),\n });\n\n return (\n <div>\n <p>Status: {status}</p>\n <progress value={progress ?? 0} max={100} />\n <span>{progress ?? 0}%</span>\n </div>\n );\n}\n```\n\n## Database Migration\n\n> **Note:** If you're using the **PostgreSQL** backend, make sure to run the latest\n migrations to add the `progress` column. See [Database\n Migration](/usage/database-migration).\n\nThe Redis backend requires no migration — the `progress` field is stored automatically as part of the job hash.\n\n## Related\n\n- [Job Output](/usage/job-output) — Store and retrieve results from job handlers using `ctx.setOutput()` or handler return values."
253
259
  },
254
260
  {
255
261
  "slug": "usage/quick-start",
@@ -130,7 +130,7 @@ queue.on('job:failed', ({ jobId, jobType, error, willRetry }) => {
130
130
  queue.on('error', (error) => Sentry.captureException(error));
131
131
  ```
132
132
 
133
- Events: `job:added`, `job:processing`, `job:completed`, `job:failed` (with `willRetry`), `job:cancelled`, `job:retried`, `job:waiting`, `job:progress`, `error`.
133
+ Events: `job:added`, `job:processing`, `job:completed`, `job:failed` (with `willRetry`), `job:cancelled`, `job:retried`, `job:waiting`, `job:progress`, `job:output`, `error`.
134
134
 
135
135
  `error` events fire alongside `onError` callbacks in `ProcessorOptions` / `SupervisorOptions` — both mechanisms work independently.
136
136
 
@@ -148,3 +148,23 @@ await ctx.setProgress(50); // 0–100, persisted to DB
148
148
  ```
149
149
 
150
150
  Read via `queue.getJob(id)` (`progress` field) or React SDK's `useJob` hook.
151
+
152
+ ## Job Output
153
+
154
+ Store results via `ctx.setOutput(data)` or by returning a value from the handler:
155
+
156
+ ```typescript
157
+ // Option 1: return a value
158
+ const handler = async (payload, signal, ctx) => {
159
+ const result = await doWork(payload);
160
+ return { url: result.downloadUrl };
161
+ };
162
+
163
+ // Option 2: ctx.setOutput (takes precedence over return value)
164
+ const handler = async (payload, signal, ctx) => {
165
+ const result = await doWork(payload);
166
+ await ctx.setOutput({ url: result.downloadUrl });
167
+ };
168
+ ```
169
+
170
+ Read via `queue.getJob(id)` (`output` field) or React SDK's `useJob` hook (`output` property).
@@ -10,7 +10,7 @@ Install: `npm install @nicnocquee/dataqueue-react` (requires React 18+).
10
10
  'use client';
11
11
  import { useJob } from '@nicnocquee/dataqueue-react';
12
12
 
13
- const { status, progress, data, isLoading, error } = useJob(jobId, {
13
+ const { status, progress, output, data, isLoading, error } = useJob(jobId, {
14
14
  fetcher: (id) =>
15
15
  fetch(`/api/jobs/${id}`)
16
16
  .then((r) => r.json())
@@ -81,3 +81,7 @@ Wrap handlers with your auth middleware before exporting GET/POST.
81
81
  ## Progress Tracking
82
82
 
83
83
  Use `ctx.setProgress(percent)` in handlers (0–100). The value appears in `useJob`'s `progress` field and the dashboard detail view.
84
+
85
+ ## Job Output
86
+
87
+ Store results via `ctx.setOutput(data)` or by returning a value from the handler. The value appears in `useJob`'s `output` field and the dashboard detail view. If both are used, `ctx.setOutput()` takes precedence.
@@ -38,7 +38,8 @@ export const jobHandlers: JobHandlers<JobPayloadMap> = {
38
38
  },
39
39
  generate_report: async (payload, signal) => {
40
40
  if (signal.aborted) return;
41
- await generateReport(payload.reportId, payload.userId);
41
+ const url = await generateReport(payload.reportId, payload.userId);
42
+ return { url }; // stored as job output, readable via getJob()
42
43
  },
43
44
  };
44
45
  ```
@@ -102,13 +102,14 @@ export async function GET(
102
102
 
103
103
  ### useJob Return Value
104
104
 
105
- | Field | Type | Description |
106
- | ----------- | ------------------- | ------------------------------- |
107
- | `data` | `JobData \| null` | Latest job data from fetcher |
108
- | `status` | `JobStatus \| null` | Current job status |
109
- | `progress` | `number \| null` | Progress percentage (0–100) |
110
- | `isLoading` | `boolean` | True until first fetch resolves |
111
- | `error` | `Error \| null` | Last fetch error |
105
+ | Field | Type | Description |
106
+ | ----------- | ------------------- | ----------------------------------------------------- |
107
+ | `data` | `JobData \| null` | Latest job data from fetcher |
108
+ | `status` | `JobStatus \| null` | Current job status |
109
+ | `progress` | `number \| null` | Progress percentage (0–100) |
110
+ | `output` | `unknown \| null` | Handler output from `ctx.setOutput()` or return value |
111
+ | `isLoading` | `boolean` | True until first fetch resolves |
112
+ | `error` | `Error \| null` | Last fetch error |
112
113
 
113
114
  ## Dashboard — @nicnocquee/dataqueue-dashboard
114
115
 
@@ -187,3 +188,14 @@ const handler = async (payload, signal, ctx) => {
187
188
  }
188
189
  };
189
190
  ```
191
+
192
+ ### Job Output from Handlers
193
+
194
+ Store results via `ctx.setOutput(data)` or by returning a value from the handler. Exposed via `getJob()` (`output` field) and the `useJob` hook's `output` property. If both are used, `ctx.setOutput()` takes precedence.
195
+
196
+ ```typescript
197
+ const handler = async (payload, signal, ctx) => {
198
+ const result = await doWork(payload);
199
+ return { url: result.downloadUrl }; // stored as output
200
+ };
201
+ ```
package/dist/index.cjs CHANGED
@@ -151,9 +151,9 @@ async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
151
151
  }
152
152
 
153
153
  handlerFn(payload, signal)
154
- .then(() => {
154
+ .then((result) => {
155
155
  clearTimeout(timeoutId);
156
- parentPort.postMessage({ type: 'success' });
156
+ parentPort.postMessage({ type: 'success', output: result });
157
157
  })
158
158
  .catch((error) => {
159
159
  clearTimeout(timeoutId);
@@ -188,24 +188,27 @@ async function runHandlerInWorker(handler, payload, timeoutMs, jobType) {
188
188
  }
189
189
  });
190
190
  let resolved = false;
191
- worker.on("message", (message) => {
192
- if (resolved) return;
193
- resolved = true;
194
- if (message.type === "success") {
195
- resolve();
196
- } else if (message.type === "timeout") {
197
- const timeoutError = new Error(
198
- `Job timed out after ${timeoutMs} ms and was forcefully terminated`
199
- );
200
- timeoutError.failureReason = "timeout" /* Timeout */;
201
- reject(timeoutError);
202
- } else if (message.type === "error") {
203
- const error = new Error(message.error.message);
204
- error.stack = message.error.stack;
205
- error.name = message.error.name;
206
- reject(error);
191
+ worker.on(
192
+ "message",
193
+ (message) => {
194
+ if (resolved) return;
195
+ resolved = true;
196
+ if (message.type === "success") {
197
+ resolve(message.output);
198
+ } else if (message.type === "timeout") {
199
+ const timeoutError = new Error(
200
+ `Job timed out after ${timeoutMs} ms and was forcefully terminated`
201
+ );
202
+ timeoutError.failureReason = "timeout" /* Timeout */;
203
+ reject(timeoutError);
204
+ } else if (message.type === "error") {
205
+ const error = new Error(message.error.message);
206
+ error.stack = message.error.stack;
207
+ error.name = message.error.name;
208
+ reject(error);
209
+ }
207
210
  }
208
- });
211
+ );
209
212
  worker.on("error", (error) => {
210
213
  if (resolved) return;
211
214
  resolved = true;
@@ -362,6 +365,9 @@ function buildWaitContext(backend, jobId, stepData, baseCtx) {
362
365
  if (percent < 0 || percent > 100)
363
366
  throw new Error("Progress must be between 0 and 100");
364
367
  await backend.updateProgress(jobId, Math.round(percent));
368
+ },
369
+ setOutput: async (data) => {
370
+ await backend.updateOutput(jobId, data);
365
371
  }
366
372
  };
367
373
  return ctx;
@@ -397,9 +403,16 @@ async function processJobWithHandlers(backend, job, jobHandlers, emit) {
397
403
  const forceKillOnTimeout = job.forceKillOnTimeout ?? false;
398
404
  let timeoutId;
399
405
  const controller = new AbortController();
406
+ let setOutputCalled = false;
407
+ let handlerReturnValue;
400
408
  try {
401
409
  if (forceKillOnTimeout && timeoutMs && timeoutMs > 0) {
402
- await runHandlerInWorker(handler, job.payload, timeoutMs, job.jobType);
410
+ handlerReturnValue = await runHandlerInWorker(
411
+ handler,
412
+ job.payload,
413
+ timeoutMs,
414
+ job.jobType
415
+ );
403
416
  } else {
404
417
  let onTimeoutCallback;
405
418
  let timeoutReject;
@@ -461,6 +474,12 @@ async function processJobWithHandlers(backend, job, jobHandlers, emit) {
461
474
  });
462
475
  };
463
476
  }
477
+ const originalSetOutput = ctx.setOutput;
478
+ ctx.setOutput = async (data) => {
479
+ setOutputCalled = true;
480
+ await originalSetOutput(data);
481
+ emit?.("job:output", { jobId: job.id, output: data });
482
+ };
464
483
  if (forceKillOnTimeout && !hasTimeout) {
465
484
  log(
466
485
  `forceKillOnTimeout is set but no timeoutMs for job ${job.id}, running without force kill`
@@ -468,7 +487,7 @@ async function processJobWithHandlers(backend, job, jobHandlers, emit) {
468
487
  }
469
488
  const jobPromise = handler(job.payload, controller.signal, ctx);
470
489
  if (hasTimeout) {
471
- await Promise.race([
490
+ handlerReturnValue = await Promise.race([
472
491
  jobPromise,
473
492
  new Promise((_, reject) => {
474
493
  timeoutReject = reject;
@@ -476,11 +495,12 @@ async function processJobWithHandlers(backend, job, jobHandlers, emit) {
476
495
  })
477
496
  ]);
478
497
  } else {
479
- await jobPromise;
498
+ handlerReturnValue = await jobPromise;
480
499
  }
481
500
  }
482
501
  if (timeoutId) clearTimeout(timeoutId);
483
- await backend.completeJob(job.id);
502
+ const completionOutput = setOutputCalled || handlerReturnValue === void 0 ? void 0 : handlerReturnValue;
503
+ await backend.completeJob(job.id, completionOutput);
484
504
  emit?.("job:completed", { jobId: job.id, jobType: job.jobType });
485
505
  } catch (error) {
486
506
  if (timeoutId) clearTimeout(timeoutId);
@@ -1234,7 +1254,7 @@ var PostgresBackend = class {
1234
1254
  const client = await this.pool.connect();
1235
1255
  try {
1236
1256
  const result = await client.query(
1237
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue WHERE id = $1`,
1257
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue WHERE id = $1`,
1238
1258
  [id]
1239
1259
  );
1240
1260
  if (result.rows.length === 0) {
@@ -1261,7 +1281,7 @@ var PostgresBackend = class {
1261
1281
  const client = await this.pool.connect();
1262
1282
  try {
1263
1283
  const result = await client.query(
1264
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
1284
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue WHERE status = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3`,
1265
1285
  [status, limit, offset]
1266
1286
  );
1267
1287
  log(`Found ${result.rows.length} jobs by status ${status}`);
@@ -1283,7 +1303,7 @@ var PostgresBackend = class {
1283
1303
  const client = await this.pool.connect();
1284
1304
  try {
1285
1305
  const result = await client.query(
1286
- `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
1306
+ `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue ORDER BY created_at DESC LIMIT $1 OFFSET $2`,
1287
1307
  [limit, offset]
1288
1308
  );
1289
1309
  log(`Found ${result.rows.length} jobs (all)`);
@@ -1303,7 +1323,7 @@ var PostgresBackend = class {
1303
1323
  async getJobs(filters, limit = 100, offset = 0) {
1304
1324
  const client = await this.pool.connect();
1305
1325
  try {
1306
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax" FROM job_queue`;
1326
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output FROM job_queue`;
1307
1327
  const params = [];
1308
1328
  const where = [];
1309
1329
  let paramIdx = 1;
@@ -1404,7 +1424,7 @@ var PostgresBackend = class {
1404
1424
  async getJobsByTags(tags, mode = "all", limit = 100, offset = 0) {
1405
1425
  const client = await this.pool.connect();
1406
1426
  try {
1407
- let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax"
1427
+ let query = `SELECT id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_failed_at AS "lastFailedAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", tags, idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output
1408
1428
  FROM job_queue`;
1409
1429
  let params = [];
1410
1430
  switch (mode) {
@@ -1498,7 +1518,7 @@ var PostgresBackend = class {
1498
1518
  LIMIT $2
1499
1519
  FOR UPDATE SKIP LOCKED
1500
1520
  )
1501
- RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax"
1521
+ RETURNING id, job_type AS "jobType", payload, status, max_attempts AS "maxAttempts", attempts, priority, run_at AS "runAt", timeout_ms AS "timeoutMs", force_kill_on_timeout AS "forceKillOnTimeout", created_at AS "createdAt", updated_at AS "updatedAt", started_at AS "startedAt", completed_at AS "completedAt", last_failed_at AS "lastFailedAt", locked_at AS "lockedAt", locked_by AS "lockedBy", error_history AS "errorHistory", failure_reason AS "failureReason", next_attempt_at AS "nextAttemptAt", last_retried_at AS "lastRetriedAt", last_cancelled_at AS "lastCancelledAt", pending_reason AS "pendingReason", idempotency_key AS "idempotencyKey", wait_until AS "waitUntil", wait_token_id AS "waitTokenId", step_data AS "stepData", progress, retry_delay AS "retryDelay", retry_backoff AS "retryBackoff", retry_delay_max AS "retryDelayMax", output
1502
1522
  `,
1503
1523
  params
1504
1524
  );
@@ -1526,17 +1546,19 @@ var PostgresBackend = class {
1526
1546
  client.release();
1527
1547
  }
1528
1548
  }
1529
- async completeJob(jobId) {
1549
+ async completeJob(jobId, output) {
1530
1550
  const client = await this.pool.connect();
1531
1551
  try {
1552
+ const outputJson = output !== void 0 ? JSON.stringify(output) : null;
1532
1553
  const result = await client.query(
1533
1554
  `
1534
1555
  UPDATE job_queue
1535
1556
  SET status = 'completed', updated_at = NOW(), completed_at = NOW(),
1536
- step_data = NULL, wait_until = NULL, wait_token_id = NULL
1557
+ step_data = NULL, wait_until = NULL, wait_token_id = NULL,
1558
+ output = COALESCE($2::jsonb, output)
1537
1559
  WHERE id = $1 AND status = 'processing'
1538
1560
  `,
1539
- [jobId]
1561
+ [jobId, outputJson]
1540
1562
  );
1541
1563
  if (result.rowCount === 0) {
1542
1564
  log(
@@ -1639,6 +1661,21 @@ var PostgresBackend = class {
1639
1661
  client.release();
1640
1662
  }
1641
1663
  }
1664
+ // ── Output ────────────────────────────────────────────────────────────
1665
+ async updateOutput(jobId, output) {
1666
+ const client = await this.pool.connect();
1667
+ try {
1668
+ await client.query(
1669
+ `UPDATE job_queue SET output = $2::jsonb, updated_at = NOW() WHERE id = $1`,
1670
+ [jobId, JSON.stringify(output)]
1671
+ );
1672
+ log(`Updated output for job ${jobId}`);
1673
+ } catch (error) {
1674
+ log(`Error updating output for job ${jobId}: ${error}`);
1675
+ } finally {
1676
+ client.release();
1677
+ }
1678
+ }
1642
1679
  // ── Job management ────────────────────────────────────────────────────
1643
1680
  async retryJob(jobId) {
1644
1681
  const client = await this.pool.connect();
@@ -3038,16 +3075,24 @@ var COMPLETE_JOB_SCRIPT = `
3038
3075
  local prefix = KEYS[1]
3039
3076
  local jobId = ARGV[1]
3040
3077
  local nowMs = ARGV[2]
3078
+ local outputJson = ARGV[3]
3041
3079
  local jk = prefix .. 'job:' .. jobId
3042
3080
 
3043
- redis.call('HMSET', jk,
3081
+ local fields = {
3044
3082
  'status', 'completed',
3045
3083
  'updatedAt', nowMs,
3046
3084
  'completedAt', nowMs,
3047
3085
  'stepData', 'null',
3048
3086
  'waitUntil', 'null',
3049
3087
  'waitTokenId', 'null'
3050
- )
3088
+ }
3089
+
3090
+ if outputJson ~= '__NONE__' then
3091
+ fields[#fields + 1] = 'output'
3092
+ fields[#fields + 1] = outputJson
3093
+ end
3094
+
3095
+ redis.call('HMSET', jk, unpack(fields))
3051
3096
  redis.call('SREM', prefix .. 'status:processing', jobId)
3052
3097
  redis.call('SADD', prefix .. 'status:completed', jobId)
3053
3098
 
@@ -3510,9 +3555,18 @@ function deserializeJob(h) {
3510
3555
  stepData: parseStepData(h.stepData),
3511
3556
  retryDelay: numOrNull(h.retryDelay),
3512
3557
  retryBackoff: h.retryBackoff === "true" ? true : h.retryBackoff === "false" ? false : null,
3513
- retryDelayMax: numOrNull(h.retryDelayMax)
3558
+ retryDelayMax: numOrNull(h.retryDelayMax),
3559
+ output: parseJsonField(h.output)
3514
3560
  };
3515
3561
  }
3562
+ function parseJsonField(raw) {
3563
+ if (!raw || raw === "null") return null;
3564
+ try {
3565
+ return JSON.parse(raw);
3566
+ } catch {
3567
+ return null;
3568
+ }
3569
+ }
3516
3570
  function parseStepData(raw) {
3517
3571
  if (!raw || raw === "null") return void 0;
3518
3572
  try {
@@ -3810,9 +3864,17 @@ var RedisBackend = class {
3810
3864
  }
3811
3865
  return jobs;
3812
3866
  }
3813
- async completeJob(jobId) {
3867
+ async completeJob(jobId, output) {
3814
3868
  const now = this.nowMs();
3815
- await this.client.eval(COMPLETE_JOB_SCRIPT, 1, this.prefix, jobId, now);
3869
+ const outputArg = output !== void 0 ? JSON.stringify(output) : "__NONE__";
3870
+ await this.client.eval(
3871
+ COMPLETE_JOB_SCRIPT,
3872
+ 1,
3873
+ this.prefix,
3874
+ jobId,
3875
+ now,
3876
+ outputArg
3877
+ );
3816
3878
  await this.recordJobEvent(jobId, "completed" /* Completed */);
3817
3879
  log(`Completed job ${jobId}`);
3818
3880
  }
@@ -3865,6 +3927,22 @@ var RedisBackend = class {
3865
3927
  log(`Error updating progress for job ${jobId}: ${error}`);
3866
3928
  }
3867
3929
  }
3930
+ // ── Output ────────────────────────────────────────────────────────────
3931
+ async updateOutput(jobId, output) {
3932
+ try {
3933
+ const now = this.nowMs();
3934
+ await this.client.hset(
3935
+ `${this.prefix}job:${jobId}`,
3936
+ "output",
3937
+ JSON.stringify(output),
3938
+ "updatedAt",
3939
+ now.toString()
3940
+ );
3941
+ log(`Updated output for job ${jobId}`);
3942
+ } catch (error) {
3943
+ log(`Error updating output for job ${jobId}: ${error}`);
3944
+ }
3945
+ }
3868
3946
  // ── Job management ────────────────────────────────────────────────────
3869
3947
  async retryJob(jobId) {
3870
3948
  const now = this.nowMs();