@nicnocquee/dataqueue 1.26.0-beta.20260223195940 → 1.26.0-beta.20260223204425
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai/docs-content.json +15 -3
- package/ai/rules/advanced.md +38 -0
- package/ai/rules/react-dashboard.md +5 -1
- package/ai/skills/dataqueue-advanced/SKILL.md +50 -0
- package/ai/skills/dataqueue-core/SKILL.md +2 -1
- package/ai/skills/dataqueue-react/SKILL.md +19 -7
- package/dist/index.cjs +230 -73
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +112 -5
- package/dist/index.d.ts +112 -5
- package/dist/index.js +230 -73
- package/dist/index.js.map +1 -1
- package/migrations/1781200000006_add_output_to_job_queue.sql +3 -0
- package/package.json +1 -1
- package/src/backend.ts +5 -2
- package/src/backends/postgres.ts +28 -9
- package/src/backends/redis-scripts.ts +11 -3
- package/src/backends/redis.test.ts +276 -0
- package/src/backends/redis.ts +40 -2
- package/src/index.test.ts +489 -11
- package/src/index.ts +66 -13
- package/src/processor.ts +133 -49
- package/src/queue.test.ts +41 -0
- package/src/queue.ts +5 -2
- package/src/supervisor.ts +20 -5
- package/src/types.ts +107 -1
package/ai/docs-content.json
CHANGED
|
@@ -39,13 +39,13 @@
|
|
|
39
39
|
"slug": "api/job-queue",
|
|
40
40
|
"title": "JobQueue",
|
|
41
41
|
"description": "",
|
|
42
|
-
"content": "## Initialization\n\n### initJobQueue\n\n```ts\ninitJobQueue(config: JobQueueConfig): JobQueue\n```\n\nInitializes the job queue system with the provided configuration. The `JobQueueConfig` is a discriminated union -- you provide either a PostgreSQL or Redis configuration.\n\n#### PostgresJobQueueConfig\n\nProvide either `databaseConfig` (the library creates a pool) or `pool` (bring your own `pg.Pool`). At least one must be set.\n\n```ts\ninterface PostgresJobQueueConfig {\n backend?: 'postgres'; // Optional, defaults to 'postgres'\n databaseConfig?: {\n connectionString?: string;\n host?: string;\n port?: number;\n database?: string;\n user?: string;\n password?: string;\n ssl?: DatabaseSSLConfig;\n };\n pool?: import('pg').Pool; // Bring your own pool\n verbose?: boolean;\n}\n```\n\n#### RedisJobQueueConfig\n\nProvide either `redisConfig` (the library creates an ioredis client) or `client` (bring your own). At least one must be set.\n\n```ts\ninterface RedisJobQueueConfig {\n backend: 'redis'; // Required\n redisConfig?: {\n url?: string;\n host?: string;\n port?: number;\n password?: string;\n db?: number;\n tls?: RedisTLSConfig;\n keyPrefix?: string; // Default: 'dq:'\n };\n client?: unknown; // Bring your own ioredis client\n keyPrefix?: string; // Key prefix when using external client (default: 'dq:')\n verbose?: boolean;\n}\n```\n\n#### JobQueueConfig\n\n```ts\ntype JobQueueConfig = PostgresJobQueueConfig | RedisJobQueueConfig;\n```\n\n#### DatabaseSSLConfig\n\n```ts\ninterface DatabaseSSLConfig {\n ca?: string;\n cert?: string;\n key?: string;\n rejectUnauthorized?: boolean;\n}\n```\n\n- `ca` - Client certificate authority (CA) as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.\n- `cert` - Client certificate as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.\n- `key` - Client private key as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.\n- `rejectUnauthorized` - Whether to reject unauthorized certificates (default: true)\n\n#### RedisTLSConfig\n\n```ts\ninterface RedisTLSConfig {\n ca?: string;\n cert?: string;\n key?: string;\n rejectUnauthorized?: boolean;\n}\n```\n\n---\n\n## Adding Jobs\n\n### addJob\n\n```ts\naddJob(job: JobOptions, options?: AddJobOptions): Promise<number>\n```\n\nAdds a job to the queue. Returns the job ID.\n\n#### JobOptions\n\n```ts\ninterface JobOptions {\n jobType: string;\n payload: any;\n maxAttempts?: number;\n priority?: number;\n runAt?: Date | null;\n timeoutMs?: number;\n tags?: string[];\n idempotencyKey?: string;\n retryDelay?: number; // Base delay between retries in seconds (default: 60)\n retryBackoff?: boolean; // Use exponential backoff (default: true)\n retryDelayMax?: number; // Max delay cap in seconds (default: none)\n}\n```\n\n- `retryDelay` - Base delay between retries in seconds. When `retryBackoff` is true, this is the base for exponential backoff (`retryDelay * 2^attempts`). When false, retries use this fixed delay. Default: `60`.\n- `retryBackoff` - Whether to use exponential backoff. When true, delay doubles with each attempt and includes jitter. Default: `true`.\n- `retryDelayMax` - Maximum delay cap in seconds. Only meaningful when `retryBackoff` is true. No limit when omitted.\n\n#### AddJobOptions\n\n```ts\ninterface AddJobOptions {\n db?: DatabaseClient;\n}\n```\n\n- `db` — An external database client (e.g., a `pg.PoolClient` inside a transaction). When provided, the INSERT runs on this client instead of the internal pool. **PostgreSQL only.** Throws if used with the Redis backend.\n\n### addJobs\n\n```ts\naddJobs(jobs: JobOptions[], options?: AddJobOptions): Promise<number[]>\n```\n\nAdds multiple jobs to the queue in a single operation. More efficient than calling `addJob` in a loop because it batches the INSERT into a single database round-trip (PostgreSQL) or a single atomic Lua script (Redis).\n\nReturns an array of job IDs in the same order as the input array.\n\nEach job can independently have its own `priority`, `runAt`, `tags`, `idempotencyKey`, and other options. Idempotency keys are handled per-job — duplicates resolve to the existing job's ID without creating a new row.\n\nPassing an empty array returns `[]` immediately without touching the database.\n\n```ts\nconst jobIds = await jobQueue.addJobs([\n {\n jobType: 'email',\n payload: { to: 'a@example.com', subject: 'Hi', body: '...' },\n },\n {\n jobType: 'email',\n payload: { to: 'b@example.com', subject: 'Hi', body: '...' },\n priority: 10,\n },\n {\n jobType: 'report',\n payload: { reportId: '123', userId: '456' },\n tags: ['monthly'],\n },\n]);\n// jobIds = [1, 2, 3]\n```\n\nThe `{ db }` option works the same as `addJob` — pass a transactional client to batch-insert within an existing transaction (PostgreSQL only).\n\n#### DatabaseClient\n\n```ts\ninterface DatabaseClient {\n query(\n text: string,\n values?: any[],\n ): Promise<{ rows: any[]; rowCount: number | null }>;\n}\n```\n\nAny object matching this interface works — `pg.Pool`, `pg.PoolClient`, `pg.Client`, or ORM query runners that expose a raw `query()` method.\n\n---\n\n## Retrieving Jobs\n\n### getJob\n\n```ts\ngetJob(id: number): Promise<JobRecord | null>\n```\n\nRetrieves a job by its ID.\n\n### getJobs\n\n```ts\ngetJobs(\n filters?: {\n jobType?: string;\n priority?: number;\n runAt?: Date | { gt?: Date; gte?: Date; lt?: Date; lte?: Date; eq?: Date };\n tags?: { values: string[]; mode?: 'all' | 'any' | 'none' | 'exact' };\n },\n limit?: number,\n offset?: number\n): Promise<JobRecord[]>\n```\n\nRetrieves jobs matching the provided filters, with optional pagination.\n\n### getJobsByStatus\n\n```ts\ngetJobsByStatus(status: string, limit?: number, offset?: number): Promise<JobRecord[]>\n```\n\nRetrieves jobs by their status, with pagination.\n\n### getAllJobs\n\n```ts\ngetAllJobs(limit?: number, offset?: number): Promise<JobRecord[]>\n```\n\nRetrieves all jobs, with optional pagination.\n\n### getJobsByTags\n\n```ts\ngetJobsByTags(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number): Promise<JobRecord[]>\n```\n\nRetrieves jobs by tag(s).\n\n---\n\n## Managing Jobs\n\n### retryJob\n\n```ts\nretryJob(jobId: number): Promise<void>\n```\n\nRetries a job given its ID.\n\n### cancelJob\n\n```ts\ncancelJob(jobId: number): Promise<void>\n```\n\nCancels a job given its ID.\n\n### editJob\n\n```ts\neditJob(jobId: number, updates: EditJobOptions): Promise<void>\n```\n\nEdits a pending job given its ID. Only works for jobs with status 'pending'. Silently fails for other statuses (processing, completed, failed, cancelled).\n\n#### EditJobOptions\n\n```ts\ninterface EditJobOptions {\n payload?: any;\n maxAttempts?: number;\n priority?: number;\n runAt?: Date | null;\n timeoutMs?: number;\n tags?: string[];\n retryDelay?: number | null;\n retryBackoff?: boolean | null;\n retryDelayMax?: number | null;\n}\n```\n\nAll fields are optional - only provided fields will be updated. Note that `jobType` cannot be changed. Set retry fields to `null` to revert to legacy default behavior.\n\n#### Example\n\n```ts\n// Edit a pending job's payload and priority\nawait jobQueue.editJob(jobId, {\n payload: { to: 'newemail@example.com', subject: 'Updated' },\n priority: 10,\n});\n\n// Edit only the scheduled run time\nawait jobQueue.editJob(jobId, {\n runAt: new Date(Date.now() + 60000), // Run in 1 minute\n});\n\n// Edit multiple fields at once\nawait jobQueue.editJob(jobId, {\n payload: { to: 'updated@example.com' },\n priority: 5,\n maxAttempts: 10,\n timeoutMs: 30000,\n tags: ['urgent', 'priority'],\n});\n```\n\n### editAllPendingJobs\n\n```ts\neditAllPendingJobs(\n filters?: {\n jobType?: string;\n priority?: number;\n runAt?: Date | { gt?: Date; gte?: Date; lt?: Date; lte?: Date; eq?: Date };\n tags?: { values: string[]; mode?: 'all' | 'any' | 'none' | 'exact' };\n },\n updates: EditJobOptions\n): Promise<number>\n```\n\nEdits all pending jobs that match the filters. Only works for jobs with status 'pending'. Non-pending jobs are not affected. Returns the number of jobs that were edited.\n\n#### Parameters\n\n- `filters` (optional): Filters to select which jobs to edit. If not provided, all pending jobs are edited.\n - `jobType`: Filter by job type\n - `priority`: Filter by priority\n - `runAt`: Filter by scheduled run time (supports `gt`, `gte`, `lt`, `lte`, `eq` operators or exact Date match)\n - `tags`: Filter by tags with mode ('all', 'any', 'none', 'exact')\n- `updates`: The fields to update (same as `EditJobOptions`). All fields are optional - only provided fields will be updated.\n\n#### Returns\n\nThe number of jobs that were successfully edited.\n\n#### Examples\n\n```ts\n// Edit all pending jobs\nconst editedCount = await jobQueue.editAllPendingJobs(undefined, {\n priority: 10,\n});\n\n// Edit all pending email jobs\nconst editedCount = await jobQueue.editAllPendingJobs(\n { jobType: 'email' },\n {\n priority: 5,\n },\n);\n\n// Edit all pending jobs with 'urgent' tag\nconst editedCount = await jobQueue.editAllPendingJobs(\n { tags: { values: ['urgent'], mode: 'any' } },\n {\n priority: 10,\n maxAttempts: 5,\n },\n);\n\n// Edit all pending jobs scheduled in the future\nconst editedCount = await jobQueue.editAllPendingJobs(\n { runAt: { gte: new Date() } },\n {\n priority: 10,\n },\n);\n\n// Edit with combined filters\nconst editedCount = await jobQueue.editAllPendingJobs(\n {\n jobType: 'email',\n tags: { values: ['urgent'], mode: 'any' },\n },\n {\n priority: 10,\n maxAttempts: 5,\n },\n);\n```\n\n**Note:** Only pending jobs are edited. Jobs with other statuses (processing, completed, failed, cancelled) are not affected. Edit events are recorded for each affected job, just like single job edits.\n\n### cancelAllUpcomingJobs\n\n```ts\ncancelAllUpcomingJobs(filters?: {\n jobType?: string;\n priority?: number;\n runAt?: Date | { gt?: Date; gte?: Date; lt?: Date; lte?: Date; eq?: Date };\n tags?: { values: string[]; mode?: 'all' | 'any' | 'none' | 'exact' };\n}): Promise<number>\n```\n\nCancels all upcoming jobs that match the filters. Returns the number of jobs cancelled.\n\n### cleanupOldJobs\n\n```ts\ncleanupOldJobs(daysToKeep?: number): Promise<number>\n```\n\nCleans up jobs older than the specified number of days. Returns the number of jobs removed.\n\n### reclaimStuckJobs\n\n```ts\nreclaimStuckJobs(maxProcessingTimeMinutes?: number): Promise<number>\n```\n\nReclaims jobs stuck in 'processing' for too long. Returns the number of jobs reclaimed. If a job has a `timeoutMs` that is longer than the `maxProcessingTimeMinutes` threshold, the job's own timeout is used instead, preventing premature reclamation of long-running jobs.\n\n---\n\n## Job Events\n\n### getJobEvents\n\n```ts\ngetJobEvents(jobId: number): Promise<JobEvent[]>\n```\n\nRetrieves the job events for a job.\n\n#### JobEvent\n\n```ts\ninterface JobEvent {\n id: number;\n jobId: number;\n eventType: JobEventType;\n createdAt: Date;\n metadata: any;\n}\n```\n\n#### JobEventType\n\n```ts\nenum JobEventType {\n Added = 'added',\n Processing = 'processing',\n Completed = 'completed',\n Failed = 'failed',\n Cancelled = 'cancelled',\n Retried = 'retried',\n Edited = 'edited',\n}\n```\n\n---\n\n## Processing Jobs\n\n### createProcessor\n\n```ts\ncreateProcessor(\n handlers: JobHandlers,\n options?: ProcessorOptions\n): Processor\n```\n\nCreates a job processor with the provided handlers and options.\n\n#### ProcessorOptions\n\n```ts\ninterface ProcessorOptions {\n workerId?: string;\n batchSize?: number;\n concurrency?: number;\n pollInterval?: number;\n onError?: (error: Error) => void;\n verbose?: boolean;\n jobType?: string | string[];\n}\n```\n\n---\n\n## Background Supervisor\n\n### createSupervisor\n\n```ts\ncreateSupervisor(options?: SupervisorOptions): Supervisor\n```\n\nCreates a background supervisor that automatically runs maintenance tasks on a configurable interval: reclaiming stuck jobs, cleaning up old completed jobs/events, and expiring timed-out waitpoint tokens.\n\n#### SupervisorOptions\n\n```ts\ninterface SupervisorOptions {\n intervalMs?: number; // default: 60000\n stuckJobsTimeoutMinutes?: number; // default: 10\n cleanupJobsDaysToKeep?: number; // default: 30 (0 to disable)\n cleanupEventsDaysToKeep?: number; // default: 30 (0 to disable)\n cleanupBatchSize?: number; // default: 1000\n reclaimStuckJobs?: boolean; // default: true\n expireTimedOutTokens?: boolean; // default: true\n onError?: (error: Error) => void; // default: console.error\n verbose?: boolean;\n}\n```\n\n#### Supervisor\n\n```ts\ninterface Supervisor {\n start(): Promise<SupervisorRunResult>;\n startInBackground(): void;\n stop(): void;\n stopAndDrain(timeoutMs?: number): Promise<void>;\n isRunning(): boolean;\n}\n```\n\n- `start()` runs all tasks once and returns the results (serverless-friendly).\n- `startInBackground()` starts a background loop that runs every `intervalMs`.\n- `stopAndDrain()` stops the loop and waits for the current run to finish.\n\n#### SupervisorRunResult\n\n```ts\ninterface SupervisorRunResult {\n reclaimedJobs: number;\n cleanedUpJobs: number;\n cleanedUpEvents: number;\n expiredTokens: number;\n}\n```\n\nSee [Long-Running Server](/usage/long-running-server#background-supervisor) for usage examples.\n\n---\n\n## Accessing the Underlying Client\n\n### getPool\n\n```ts\ngetPool(): Pool\n```\n\nReturns the PostgreSQL connection pool instance. Only available when using the PostgreSQL backend.\n\n> **Note:** Throws an error if called when using the Redis backend.\n\n### getRedisClient\n\n```ts\ngetRedisClient(): Redis\n```\n\nReturns the `ioredis` client instance. Only available when using the Redis backend.\n\n> **Note:** Throws an error if called when using the PostgreSQL backend."
|
|
42
|
+
"content": "## Initialization\n\n### initJobQueue\n\n```ts\ninitJobQueue(config: JobQueueConfig): JobQueue\n```\n\nInitializes the job queue system with the provided configuration. The `JobQueueConfig` is a discriminated union -- you provide either a PostgreSQL or Redis configuration.\n\n#### PostgresJobQueueConfig\n\nProvide either `databaseConfig` (the library creates a pool) or `pool` (bring your own `pg.Pool`). At least one must be set.\n\n```ts\ninterface PostgresJobQueueConfig {\n backend?: 'postgres'; // Optional, defaults to 'postgres'\n databaseConfig?: {\n connectionString?: string;\n host?: string;\n port?: number;\n database?: string;\n user?: string;\n password?: string;\n ssl?: DatabaseSSLConfig;\n };\n pool?: import('pg').Pool; // Bring your own pool\n verbose?: boolean;\n}\n```\n\n#### RedisJobQueueConfig\n\nProvide either `redisConfig` (the library creates an ioredis client) or `client` (bring your own). At least one must be set.\n\n```ts\ninterface RedisJobQueueConfig {\n backend: 'redis'; // Required\n redisConfig?: {\n url?: string;\n host?: string;\n port?: number;\n password?: string;\n db?: number;\n tls?: RedisTLSConfig;\n keyPrefix?: string; // Default: 'dq:'\n };\n client?: unknown; // Bring your own ioredis client\n keyPrefix?: string; // Key prefix when using external client (default: 'dq:')\n verbose?: boolean;\n}\n```\n\n#### JobQueueConfig\n\n```ts\ntype JobQueueConfig = PostgresJobQueueConfig | RedisJobQueueConfig;\n```\n\n#### DatabaseSSLConfig\n\n```ts\ninterface DatabaseSSLConfig {\n ca?: string;\n cert?: string;\n key?: string;\n rejectUnauthorized?: boolean;\n}\n```\n\n- `ca` - Client certificate authority (CA) as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.\n- `cert` - Client certificate as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.\n- `key` - Client private key as PEM string or file path. If the value starts with 'file://', it will be loaded from file, otherwise treated as PEM string.\n- `rejectUnauthorized` - Whether to reject unauthorized certificates (default: true)\n\n#### RedisTLSConfig\n\n```ts\ninterface RedisTLSConfig {\n ca?: string;\n cert?: string;\n key?: string;\n rejectUnauthorized?: boolean;\n}\n```\n\n---\n\n## Adding Jobs\n\n### addJob\n\n```ts\naddJob(job: JobOptions, options?: AddJobOptions): Promise<number>\n```\n\nAdds a job to the queue. Returns the job ID.\n\n#### JobOptions\n\n```ts\ninterface JobOptions {\n jobType: string;\n payload: any;\n maxAttempts?: number;\n priority?: number;\n runAt?: Date | null;\n timeoutMs?: number;\n tags?: string[];\n idempotencyKey?: string;\n retryDelay?: number; // Base delay between retries in seconds (default: 60)\n retryBackoff?: boolean; // Use exponential backoff (default: true)\n retryDelayMax?: number; // Max delay cap in seconds (default: none)\n}\n```\n\n- `retryDelay` - Base delay between retries in seconds. When `retryBackoff` is true, this is the base for exponential backoff (`retryDelay * 2^attempts`). When false, retries use this fixed delay. Default: `60`.\n- `retryBackoff` - Whether to use exponential backoff. When true, delay doubles with each attempt and includes jitter. Default: `true`.\n- `retryDelayMax` - Maximum delay cap in seconds. Only meaningful when `retryBackoff` is true. No limit when omitted.\n\n#### AddJobOptions\n\n```ts\ninterface AddJobOptions {\n db?: DatabaseClient;\n}\n```\n\n- `db` — An external database client (e.g., a `pg.PoolClient` inside a transaction). When provided, the INSERT runs on this client instead of the internal pool. **PostgreSQL only.** Throws if used with the Redis backend.\n\n### addJobs\n\n```ts\naddJobs(jobs: JobOptions[], options?: AddJobOptions): Promise<number[]>\n```\n\nAdds multiple jobs to the queue in a single operation. More efficient than calling `addJob` in a loop because it batches the INSERT into a single database round-trip (PostgreSQL) or a single atomic Lua script (Redis).\n\nReturns an array of job IDs in the same order as the input array.\n\nEach job can independently have its own `priority`, `runAt`, `tags`, `idempotencyKey`, and other options. Idempotency keys are handled per-job — duplicates resolve to the existing job's ID without creating a new row.\n\nPassing an empty array returns `[]` immediately without touching the database.\n\n```ts\nconst jobIds = await jobQueue.addJobs([\n {\n jobType: 'email',\n payload: { to: 'a@example.com', subject: 'Hi', body: '...' },\n },\n {\n jobType: 'email',\n payload: { to: 'b@example.com', subject: 'Hi', body: '...' },\n priority: 10,\n },\n {\n jobType: 'report',\n payload: { reportId: '123', userId: '456' },\n tags: ['monthly'],\n },\n]);\n// jobIds = [1, 2, 3]\n```\n\nThe `{ db }` option works the same as `addJob` — pass a transactional client to batch-insert within an existing transaction (PostgreSQL only).\n\n#### DatabaseClient\n\n```ts\ninterface DatabaseClient {\n query(\n text: string,\n values?: any[],\n ): Promise<{ rows: any[]; rowCount: number | null }>;\n}\n```\n\nAny object matching this interface works — `pg.Pool`, `pg.PoolClient`, `pg.Client`, or ORM query runners that expose a raw `query()` method.\n\n---\n\n## Retrieving Jobs\n\n### getJob\n\n```ts\ngetJob(id: number): Promise<JobRecord | null>\n```\n\nRetrieves a job by its ID.\n\n### getJobs\n\n```ts\ngetJobs(\n filters?: {\n jobType?: string;\n priority?: number;\n runAt?: Date | { gt?: Date; gte?: Date; lt?: Date; lte?: Date; eq?: Date };\n tags?: { values: string[]; mode?: 'all' | 'any' | 'none' | 'exact' };\n },\n limit?: number,\n offset?: number\n): Promise<JobRecord[]>\n```\n\nRetrieves jobs matching the provided filters, with optional pagination.\n\n### getJobsByStatus\n\n```ts\ngetJobsByStatus(status: string, limit?: number, offset?: number): Promise<JobRecord[]>\n```\n\nRetrieves jobs by their status, with pagination.\n\n### getAllJobs\n\n```ts\ngetAllJobs(limit?: number, offset?: number): Promise<JobRecord[]>\n```\n\nRetrieves all jobs, with optional pagination.\n\n### getJobsByTags\n\n```ts\ngetJobsByTags(tags: string[], mode?: TagQueryMode, limit?: number, offset?: number): Promise<JobRecord[]>\n```\n\nRetrieves jobs by tag(s).\n\n---\n\n## Managing Jobs\n\n### retryJob\n\n```ts\nretryJob(jobId: number): Promise<void>\n```\n\nRetries a job given its ID.\n\n### cancelJob\n\n```ts\ncancelJob(jobId: number): Promise<void>\n```\n\nCancels a job given its ID.\n\n### editJob\n\n```ts\neditJob(jobId: number, updates: EditJobOptions): Promise<void>\n```\n\nEdits a pending job given its ID. Only works for jobs with status 'pending'. Silently fails for other statuses (processing, completed, failed, cancelled).\n\n#### EditJobOptions\n\n```ts\ninterface EditJobOptions {\n payload?: any;\n maxAttempts?: number;\n priority?: number;\n runAt?: Date | null;\n timeoutMs?: number;\n tags?: string[];\n retryDelay?: number | null;\n retryBackoff?: boolean | null;\n retryDelayMax?: number | null;\n}\n```\n\nAll fields are optional - only provided fields will be updated. Note that `jobType` cannot be changed. Set retry fields to `null` to revert to legacy default behavior.\n\n#### Example\n\n```ts\n// Edit a pending job's payload and priority\nawait jobQueue.editJob(jobId, {\n payload: { to: 'newemail@example.com', subject: 'Updated' },\n priority: 10,\n});\n\n// Edit only the scheduled run time\nawait jobQueue.editJob(jobId, {\n runAt: new Date(Date.now() + 60000), // Run in 1 minute\n});\n\n// Edit multiple fields at once\nawait jobQueue.editJob(jobId, {\n payload: { to: 'updated@example.com' },\n priority: 5,\n maxAttempts: 10,\n timeoutMs: 30000,\n tags: ['urgent', 'priority'],\n});\n```\n\n### editAllPendingJobs\n\n```ts\neditAllPendingJobs(\n filters?: {\n jobType?: string;\n priority?: number;\n runAt?: Date | { gt?: Date; gte?: Date; lt?: Date; lte?: Date; eq?: Date };\n tags?: { values: string[]; mode?: 'all' | 'any' | 'none' | 'exact' };\n },\n updates: EditJobOptions\n): Promise<number>\n```\n\nEdits all pending jobs that match the filters. Only works for jobs with status 'pending'. Non-pending jobs are not affected. Returns the number of jobs that were edited.\n\n#### Parameters\n\n- `filters` (optional): Filters to select which jobs to edit. If not provided, all pending jobs are edited.\n - `jobType`: Filter by job type\n - `priority`: Filter by priority\n - `runAt`: Filter by scheduled run time (supports `gt`, `gte`, `lt`, `lte`, `eq` operators or exact Date match)\n - `tags`: Filter by tags with mode ('all', 'any', 'none', 'exact')\n- `updates`: The fields to update (same as `EditJobOptions`). All fields are optional - only provided fields will be updated.\n\n#### Returns\n\nThe number of jobs that were successfully edited.\n\n#### Examples\n\n```ts\n// Edit all pending jobs\nconst editedCount = await jobQueue.editAllPendingJobs(undefined, {\n priority: 10,\n});\n\n// Edit all pending email jobs\nconst editedCount = await jobQueue.editAllPendingJobs(\n { jobType: 'email' },\n {\n priority: 5,\n },\n);\n\n// Edit all pending jobs with 'urgent' tag\nconst editedCount = await jobQueue.editAllPendingJobs(\n { tags: { values: ['urgent'], mode: 'any' } },\n {\n priority: 10,\n maxAttempts: 5,\n },\n);\n\n// Edit all pending jobs scheduled in the future\nconst editedCount = await jobQueue.editAllPendingJobs(\n { runAt: { gte: new Date() } },\n {\n priority: 10,\n },\n);\n\n// Edit with combined filters\nconst editedCount = await jobQueue.editAllPendingJobs(\n {\n jobType: 'email',\n tags: { values: ['urgent'], mode: 'any' },\n },\n {\n priority: 10,\n maxAttempts: 5,\n },\n);\n```\n\n**Note:** Only pending jobs are edited. Jobs with other statuses (processing, completed, failed, cancelled) are not affected. Edit events are recorded for each affected job, just like single job edits.\n\n### cancelAllUpcomingJobs\n\n```ts\ncancelAllUpcomingJobs(filters?: {\n jobType?: string;\n priority?: number;\n runAt?: Date | { gt?: Date; gte?: Date; lt?: Date; lte?: Date; eq?: Date };\n tags?: { values: string[]; mode?: 'all' | 'any' | 'none' | 'exact' };\n}): Promise<number>\n```\n\nCancels all upcoming jobs that match the filters. Returns the number of jobs cancelled.\n\n### cleanupOldJobs\n\n```ts\ncleanupOldJobs(daysToKeep?: number): Promise<number>\n```\n\nCleans up jobs older than the specified number of days. Returns the number of jobs removed.\n\n### reclaimStuckJobs\n\n```ts\nreclaimStuckJobs(maxProcessingTimeMinutes?: number): Promise<number>\n```\n\nReclaims jobs stuck in 'processing' for too long. Returns the number of jobs reclaimed. If a job has a `timeoutMs` that is longer than the `maxProcessingTimeMinutes` threshold, the job's own timeout is used instead, preventing premature reclamation of long-running jobs.\n\n---\n\n## Job Events\n\n### getJobEvents\n\n```ts\ngetJobEvents(jobId: number): Promise<JobEvent[]>\n```\n\nRetrieves the job events for a job.\n\n#### JobEvent\n\n```ts\ninterface JobEvent {\n id: number;\n jobId: number;\n eventType: JobEventType;\n createdAt: Date;\n metadata: any;\n}\n```\n\n#### JobEventType\n\n```ts\nenum JobEventType {\n Added = 'added',\n Processing = 'processing',\n Completed = 'completed',\n Failed = 'failed',\n Cancelled = 'cancelled',\n Retried = 'retried',\n Edited = 'edited',\n}\n```\n\n---\n\n## Event Hooks\n\nDataQueue emits real-time events for job lifecycle transitions. Register listeners using `on`, `once`, `off`, and `removeAllListeners`. Works identically with both PostgreSQL and Redis backends.\n\n### QueueEventMap\n\n```ts\ninterface QueueEventMap {\n 'job:added': { jobId: number; jobType: string };\n 'job:processing': { jobId: number; jobType: string };\n 'job:completed': { jobId: number; jobType: string };\n 'job:failed': {\n jobId: number;\n jobType: string;\n error: Error;\n willRetry: boolean;\n };\n 'job:cancelled': { jobId: number };\n 'job:retried': { jobId: number };\n 'job:waiting': { jobId: number; jobType: string };\n 'job:progress': { jobId: number; progress: number };\n error: Error;\n}\n```\n\n### on\n\n```ts\non(event: QueueEventName, listener: (data) => void): void\n```\n\nRegister a listener that fires every time the event is emitted.\n\n### once\n\n```ts\nonce(event: QueueEventName, listener: (data) => void): void\n```\n\nRegister a one-time listener that auto-removes after the first invocation.\n\n### off\n\n```ts\noff(event: QueueEventName, listener: (data) => void): void\n```\n\nRemove a previously registered listener. Pass the exact function reference used with `on` or `once`.\n\n### removeAllListeners\n\n```ts\nremoveAllListeners(event?: QueueEventName): void\n```\n\nRemove all listeners for a specific event, or all listeners for all events when called without arguments.\n\nSee [Event Hooks](/usage/event-hooks) for detailed usage examples.\n\n---\n\n## Processing Jobs\n\n### createProcessor\n\n```ts\ncreateProcessor(\n handlers: JobHandlers,\n options?: ProcessorOptions\n): Processor\n```\n\nCreates a job processor with the provided handlers and options.\n\n#### ProcessorOptions\n\n```ts\ninterface ProcessorOptions {\n workerId?: string;\n batchSize?: number;\n concurrency?: number;\n pollInterval?: number;\n onError?: (error: Error) => void;\n verbose?: boolean;\n jobType?: string | string[];\n}\n```\n\n---\n\n## Background Supervisor\n\n### createSupervisor\n\n```ts\ncreateSupervisor(options?: SupervisorOptions): Supervisor\n```\n\nCreates a background supervisor that automatically runs maintenance tasks on a configurable interval: reclaiming stuck jobs, cleaning up old completed jobs/events, and expiring timed-out waitpoint tokens.\n\n#### SupervisorOptions\n\n```ts\ninterface SupervisorOptions {\n intervalMs?: number; // default: 60000\n stuckJobsTimeoutMinutes?: number; // default: 10\n cleanupJobsDaysToKeep?: number; // default: 30 (0 to disable)\n cleanupEventsDaysToKeep?: number; // default: 30 (0 to disable)\n cleanupBatchSize?: number; // default: 1000\n reclaimStuckJobs?: boolean; // default: true\n expireTimedOutTokens?: boolean; // default: true\n onError?: (error: Error) => void; // default: console.error\n verbose?: boolean;\n}\n```\n\n#### Supervisor\n\n```ts\ninterface Supervisor {\n start(): Promise<SupervisorRunResult>;\n startInBackground(): void;\n stop(): void;\n stopAndDrain(timeoutMs?: number): Promise<void>;\n isRunning(): boolean;\n}\n```\n\n- `start()` runs all tasks once and returns the results (serverless-friendly).\n- `startInBackground()` starts a background loop that runs every `intervalMs`.\n- `stopAndDrain()` stops the loop and waits for the current run to finish.\n\n#### SupervisorRunResult\n\n```ts\ninterface SupervisorRunResult {\n reclaimedJobs: number;\n cleanedUpJobs: number;\n cleanedUpEvents: number;\n expiredTokens: number;\n}\n```\n\nSee [Long-Running Server](/usage/long-running-server#background-supervisor) for usage examples.\n\n---\n\n## Accessing the Underlying Client\n\n### getPool\n\n```ts\ngetPool(): Pool\n```\n\nReturns the PostgreSQL connection pool instance. Only available when using the PostgreSQL backend.\n\n> **Note:** Throws an error if called when using the Redis backend.\n\n### getRedisClient\n\n```ts\ngetRedisClient(): Redis\n```\n\nReturns the `ioredis` client instance. Only available when using the Redis backend.\n\n> **Note:** Throws an error if called when using the PostgreSQL backend."
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
45
|
"slug": "api/job-record",
|
|
46
46
|
"title": "JobRecord",
|
|
47
47
|
"description": "",
|
|
48
|
-
"content": "The `JobRecord` interface represents a job stored in the queue, including its status, attempts, and metadata.\n\n## Fields\n\n- `id`: _number_ — Unique job ID.\n- `jobType`: _string_ — The type of the job.\n- `payload`: _any_ — The job payload.\n- `status`:\n _'pending' | 'processing' | 'completed' | 'failed' | 'cancelled'_ —\n Current job status.\n- `createdAt`: _Date_ — When the job was created.\n- `updated_at`: _Date_ — When the job was last updated.\n- `locked_at`: _Date | null_ — When the job was locked for\n processing.\n- `locked_by`: _string | null_ — Worker that locked the job.\n- `attempts`: _number_ — Number of attempts so far.\n- `maxAttempts`: _number_ — Maximum allowed attempts.\n- `nextAttemptAt`: _Date | null_ — When the next attempt is\n scheduled.\n- `priority`: _number_ — Job priority.\n- `runAt`: _Date_ — When the job is scheduled to run.\n- `pendingReason?`: _string | null_ — Reason for pending\n status.\n- `errorHistory?`: _\\{ message: string; timestamp: string \\}[]_ — Error history for the job.\n- `timeoutMs?`: _number | null_ — Timeout for this job in\n milliseconds.\n- `failureReason?`: _FailureReason | null_ — Reason for last\n failure, if any.\n- `completedAt`: _Date | null_ — When the job was completed.\n- `startedAt`: _Date | null_ — When the job was first picked up\n for processing.\n- `lastRetriedAt`: _Date | null_ — When the job was last\n retried.\n- `lastFailedAt`: _Date | null_ — When the job last failed.\n- `lastCancelledAt`: _Date | null_ — When the job was last\n cancelled.\n- `tags?`: _string[]_ — Tags for this job. Used for grouping, searching, or batch operations.\n- `idempotencyKey?`: _string | null_ — The idempotency key for this job, if one was provided when the job was created.\n- `progress?`: _number | null_ — Progress percentage (0–100) reported by the handler via `ctx.setProgress()`. `null` if no progress has been reported. See [Progress Tracking](/usage/progress-tracking).\n\n## Example\n\n```json\n{\n \"id\": 1,\n \"jobType\": \"email\",\n \"payload\": { \"to\": \"user@example.com\", \"subject\": \"Hello\" },\n \"status\": \"
|
|
48
|
+
"content": "The `JobRecord` interface represents a job stored in the queue, including its status, attempts, and metadata.\n\n## Fields\n\n- `id`: _number_ — Unique job ID.\n- `jobType`: _string_ — The type of the job.\n- `payload`: _any_ — The job payload.\n- `status`:\n _'pending' | 'processing' | 'completed' | 'failed' | 'cancelled'_ —\n Current job status.\n- `createdAt`: _Date_ — When the job was created.\n- `updated_at`: _Date_ — When the job was last updated.\n- `locked_at`: _Date | null_ — When the job was locked for\n processing.\n- `locked_by`: _string | null_ — Worker that locked the job.\n- `attempts`: _number_ — Number of attempts so far.\n- `maxAttempts`: _number_ — Maximum allowed attempts.\n- `nextAttemptAt`: _Date | null_ — When the next attempt is\n scheduled.\n- `priority`: _number_ — Job priority.\n- `runAt`: _Date_ — When the job is scheduled to run.\n- `pendingReason?`: _string | null_ — Reason for pending\n status.\n- `errorHistory?`: _\\{ message: string; timestamp: string \\}[]_ — Error history for the job.\n- `timeoutMs?`: _number | null_ — Timeout for this job in\n milliseconds.\n- `failureReason?`: _FailureReason | null_ — Reason for last\n failure, if any.\n- `completedAt`: _Date | null_ — When the job was completed.\n- `startedAt`: _Date | null_ — When the job was first picked up\n for processing.\n- `lastRetriedAt`: _Date | null_ — When the job was last\n retried.\n- `lastFailedAt`: _Date | null_ — When the job last failed.\n- `lastCancelledAt`: _Date | null_ — When the job was last\n cancelled.\n- `tags?`: _string[]_ — Tags for this job. Used for grouping, searching, or batch operations.\n- `idempotencyKey?`: _string | null_ — The idempotency key for this job, if one was provided when the job was created.\n- `progress?`: _number | null_ — Progress percentage (0–100) reported by the handler via `ctx.setProgress()`. `null` if no progress has been reported. See [Progress Tracking](/usage/progress-tracking).\n- `output?`: _unknown_ — Handler output stored via `ctx.setOutput(data)` or by returning a value from the handler. `null` if no output has been stored. See [Job Output](/usage/job-output).\n\n## Example\n\n```json\n{\n \"id\": 1,\n \"jobType\": \"email\",\n \"payload\": { \"to\": \"user@example.com\", \"subject\": \"Hello\" },\n \"status\": \"completed\",\n \"createdAt\": \"2024-06-01T12:00:00Z\",\n \"tags\": [\"welcome\", \"user\"],\n \"idempotencyKey\": \"welcome-email-user-123\",\n \"progress\": 100,\n \"output\": { \"messageId\": \"abc-123\", \"sentAt\": \"2024-06-01T12:00:05Z\" }\n}\n```"
|
|
49
49
|
},
|
|
50
50
|
{
|
|
51
51
|
"slug": "api/processor",
|
|
@@ -185,6 +185,12 @@
|
|
|
185
185
|
"description": "",
|
|
186
186
|
"content": "You can edit a pending job by its ID to update its properties before it is processed. Only jobs with status 'pending' can be edited. Attempting to edit a job with any other status (processing, completed, failed, cancelled) will silently fail.\n\n## Basic Usage\n\n```typescript title=\"@/app/api/edit-job/route.ts\"\nimport { NextRequest, NextResponse } from 'next/server';\nimport { getJobQueue } from '@/lib/queue';\n\nexport async function POST(request: NextRequest) {\n try {\n const { jobId, updates } = await request.json();const jobQueue = getJobQueue();\n await jobQueue.editJob(jobId, updates);\n return NextResponse.json({ message: 'Job updated' });\n } catch (error) {\n console.error('Error editing job:', error);\n return NextResponse.json(\n { message: 'Failed to edit job' },\n { status: 500 },\n );\n }\n}\n```\n\n## Editable Fields\n\nAll fields in `EditJobOptions` are optional - only the fields you provide will be updated. The following fields can be edited:\n\n- `payload` - The job payload data\n- `priority` - Job priority (higher runs first)\n- `maxAttempts` - Maximum number of attempts\n- `runAt` - When to run the job (Date or null)\n- `timeoutMs` - Timeout for the job in milliseconds\n- `tags` - Tags for grouping, searching, or batch operations\n\n**Note:** `jobType` cannot be changed. If you need to change the job type, you should cancel the job and create a new one.\n\n## Examples\n\n### Edit Payload\n\n```typescript\n// Update the payload of a pending job\nawait jobQueue.editJob(jobId, {\n payload: { to: 'newemail@example.com', subject: 'Updated Subject' },\n});\n```\n\n### Edit Priority\n\n```typescript\n// Increase the priority of a job\nawait jobQueue.editJob(jobId, {\n priority: 10,\n});\n```\n\n### Edit Scheduled Time\n\n```typescript\n// Reschedule a job to run in 1 hour\nawait jobQueue.editJob(jobId, {\n runAt: new Date(Date.now() + 60 * 60 * 1000),\n});\n\n// Schedule a job to run immediately (or as soon as possible)\nawait jobQueue.editJob(jobId, {\n runAt: null,\n});\n```\n\n### Edit Multiple Fields\n\n```typescript\n// Update multiple fields at once\nawait jobQueue.editJob(jobId, {\n payload: { to: 'updated@example.com', subject: 'New Subject' },\n priority: 5,\n maxAttempts: 10,\n timeoutMs: 30000,\n tags: ['urgent', 'priority'],\n});\n```\n\n### Partial Updates\n\n```typescript\n// Only update what you need - other fields remain unchanged\nawait jobQueue.editJob(jobId, {\n priority: 10,\n // payload, maxAttempts, runAt, timeoutMs, and tags remain unchanged\n});\n```\n\n### Clear Tags or Timeout\n\n```typescript\n// Remove tags by setting to undefined\nawait jobQueue.editJob(jobId, {\n tags: undefined,\n});\n\n// Remove timeout by setting to undefined\nawait jobQueue.editJob(jobId, {\n timeoutMs: undefined,\n});\n```\n\n## Batch Editing\n\nYou can edit multiple pending jobs at once using `editAllPendingJobs`. This is useful when you need to update many jobs that match certain criteria. The function returns the number of jobs that were edited.\n\n### Basic Batch Edit\n\n```typescript\n// Edit all pending jobs\nconst editedCount = await jobQueue.editAllPendingJobs(undefined, {\n priority: 10,\n});\nconsole.log(`Edited ${editedCount} jobs`);\n```\n\n### Filter by Job Type\n\n```typescript\n// Edit all pending email jobs\nconst editedCount = await jobQueue.editAllPendingJobs(\n { jobType: 'email' },\n {\n priority: 5,\n },\n);\n```\n\n### Filter by Priority\n\n```typescript\n// Edit all pending jobs with priority 1\nconst editedCount = await jobQueue.editAllPendingJobs(\n { priority: 1 },\n {\n priority: 5,\n },\n);\n```\n\n### Filter by Tags\n\n```typescript\n// Edit all pending jobs with 'urgent' tag\nconst editedCount = await jobQueue.editAllPendingJobs(\n { tags: { values: ['urgent'], mode: 'any' } },\n {\n priority: 10,\n },\n);\n```\n\n### Filter by Scheduled Time\n\n```typescript\n// Edit all pending jobs scheduled in the future\nconst editedCount = await jobQueue.editAllPendingJobs(\n { runAt: { gte: new Date() } },\n {\n priority: 10,\n },\n);\n\n// Edit all pending jobs scheduled before a specific date\nconst editedCount = await jobQueue.editAllPendingJobs(\n { runAt: { lt: new Date('2024-12-31') } },\n {\n priority: 5,\n },\n);\n```\n\n### Combined Filters\n\n```typescript\n// Edit all pending email jobs with 'urgent' tag\nconst editedCount = await jobQueue.editAllPendingJobs(\n {\n jobType: 'email',\n tags: { values: ['urgent'], mode: 'any' },\n },\n {\n priority: 10,\n maxAttempts: 5,\n },\n);\n```\n\n### Batch Edit Notes\n\n- Only pending jobs are edited. Jobs with other statuses (processing, completed, failed, cancelled) are not affected.\n- The function returns the number of jobs that were successfully edited.\n- Edit events are recorded for each affected job, just like single job edits.\n- If no fields are provided in the updates object, the function returns 0 and no jobs are modified.\n\n## When to Use Edit vs Cancel vs Retry\n\n- **Edit**: Use when you want to modify a pending job's properties before it runs\n- **Cancel**: Use when you want to completely remove a pending job from the queue\n- **Retry**: Use when you want to retry a failed job (sets status back to pending)\n\n## Error Handling\n\nThe `editJob` function silently fails if you try to edit a non-pending job. This means:\n\n- No error is thrown\n- The job remains unchanged\n- The operation completes successfully (but does nothing)\n\nTo check if an edit was successful, you can:\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nif (job?.status === 'pending') {\n // Job is still pending, edit might have succeeded\n // Check if the fields you wanted to update actually changed\n if (job.priority === newPriority) {\n console.log('Edit successful');\n }\n} else {\n console.log('Job is not pending, edit was ignored');\n}\n```\n\n## Event Tracking\n\nWhen a job is edited, an 'edited' event is recorded in the job's event history. The event metadata contains the fields that were updated:\n\n```typescript\nconst events = await jobQueue.getJobEvents(jobId);\nconst editEvent = events.find((e) => e.eventType === 'edited');\nif (editEvent) {\n console.log('Updated fields:', editEvent.metadata);\n // { payload: {...}, priority: 10, ... }\n}\n```\n\n## Best Practices\n\n1. **Check job status before editing**: If you're unsure whether a job is pending, check its status first:\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nif (job?.status === 'pending') {\n await jobQueue.editJob(jobId, updates);\n} else {\n console.log('Job is not pending, cannot edit');\n}\n```\n\n2. **Use partial updates**: Only update the fields you need to change. This is more efficient and reduces the chance of accidentally overwriting other fields.\n\n3. **Validate updates**: Ensure the updated values are valid for your job handlers. For example, if your handler expects a specific payload structure, make sure the updated payload matches.\n\n4. **Consider race conditions**: If a job might be picked up for processing while you're editing it, be aware that the edit might not take effect if the job transitions to 'processing' status between your check and the edit operation.\n\n5. **Monitor events**: Use job events to track when and what was edited for audit purposes."
|
|
187
187
|
},
|
|
188
|
+
{
|
|
189
|
+
"slug": "usage/event-hooks",
|
|
190
|
+
"title": "Event Hooks",
|
|
191
|
+
"description": "",
|
|
192
|
+
"content": "DataQueue emits real-time events for job lifecycle transitions, progress updates, and internal errors. Use event hooks to integrate with logging, metrics, alerting, or any custom logic without polling.\n\nEvent hooks work identically with both the PostgreSQL and Redis backends.\n\n## Listening for Events\n\nRegister listeners with `on()`, `once()`, or remove them with `off()` and `removeAllListeners()`.\n\n```typescript\nconst queue = initJobQueue<MyPayloadMap>(config);\n\nqueue.on('job:completed', (event) => {\n console.log(`Job ${event.jobId} (${event.jobType}) completed`);\n});\n\nqueue.on('job:failed', (event) => {\n console.error(`Job ${event.jobId} failed: ${event.error.message}`);\n if (!event.willRetry) {\n alertOps(`Permanent failure for job ${event.jobId}`);\n }\n});\n\nqueue.on('error', (error) => {\n logger.error('Queue internal error:', error);\n});\n```\n\n## Available Events\n\n| Event | Payload | When |\n| ---------------- | -------------------------------------- | --------------------------------------------------------------------------------- |\n| `job:added` | `{ jobId, jobType }` | After `addJob()` or `addJobs()` |\n| `job:processing` | `{ jobId, jobType }` | When a processor claims and starts a job |\n| `job:completed` | `{ jobId, jobType }` | When a handler completes successfully |\n| `job:failed` | `{ jobId, jobType, error, willRetry }` | When a handler throws or times out |\n| `job:cancelled` | `{ jobId }` | After `cancelJob()` |\n| `job:retried` | `{ jobId }` | After `retryJob()` |\n| `job:waiting` | `{ jobId, jobType }` | When a handler enters a wait (`ctx.waitFor`, `ctx.waitUntil`, `ctx.waitForToken`) |\n| `job:progress` | `{ jobId, progress }` | When a handler calls `ctx.setProgress()` |\n| `error` | `Error` | Internal errors from the processor or supervisor |\n\n## One-Time Listeners\n\nUse `once()` when you only need to react to the first occurrence of an event.\n\n```typescript\nqueue.once('job:added', (event) => {\n console.log('First job added:', event.jobId);\n});\n```\n\n## Removing Listeners\n\n```typescript\nconst listener = (event) => console.log(event);\n\nqueue.on('job:completed', listener);\n\n// Remove a specific listener\nqueue.off('job:completed', listener);\n\n// Remove all listeners for one event\nqueue.removeAllListeners('job:completed');\n\n// Remove all listeners for all events\nqueue.removeAllListeners();\n```\n\n## Error Monitoring\n\nThe `error` event fires for internal errors in the processor and supervisor. It works alongside the existing `onError` callback in `ProcessorOptions` and `SupervisorOptions` -- both fire independently.\n\n```typescript\nqueue.on('error', (error) => {\n Sentry.captureException(error);\n});\n\n// onError still works as before\nconst processor = queue.createProcessor(handlers, {\n onError: (error) => console.error('Processor error:', error),\n});\n```\n\n## Failure Retry Detection\n\nThe `job:failed` event includes a `willRetry` boolean that tells you whether the job will be retried automatically.\n\n```typescript\nqueue.on('job:failed', (event) => {\n if (event.willRetry) {\n metrics.increment('job.retry', { jobType: event.jobType });\n } else {\n metrics.increment('job.permanent_failure', { jobType: event.jobType });\n pagerDuty.alert(`Job ${event.jobId} permanently failed`);\n }\n});\n```\n\n## Progress Tracking\n\nThe `job:progress` event fires whenever a handler calls `ctx.setProgress()`, giving you real-time progress updates.\n\n```typescript\nqueue.on('job:progress', (event) => {\n websocket.broadcast(`job:${event.jobId}`, { progress: event.progress });\n});\n```\n\n> **Note:** Events are emitted synchronously after the corresponding database operation\n completes. Slow event listeners will delay the return of methods like\n `addJob()` or the processing of the next job. Use async patterns in listeners\n if they perform I/O."
|
|
193
|
+
},
|
|
188
194
|
{
|
|
189
195
|
"slug": "usage/failed-jobs",
|
|
190
196
|
"title": "Failed Jobs",
|
|
@@ -221,6 +227,12 @@
|
|
|
221
227
|
"description": "",
|
|
222
228
|
"content": "The first thing you need to do is define your job types and their corresponding payload types. A payload is the data passed to the job handler. A job handler is a function that runs when a job is processed.\n\n### Define Job Types and Payloads\n\nJob types and their payloads are specific to your app. You can define them in any file. The important thing is that they are an object type, where the keys are the job types and the values are the payload types. In this example, `send_email`, `generate_report`, and `generate_image` are the job types, and their values are the payload types.\n\n```typescript title=\"@lib/types/job-payload-map.ts\"\n// Define the job payload map for this app.\n// This ensures that the job payload is typed correctly when adding jobs.\n// The keys are the job types, and the values are the payload types.\nexport type JobPayloadMap = {\n send_email: {\n to: string;\n subject: string;\n body: string;\n };\n generate_report: {\n reportId: string;\n userId: string;\n };\n generate_image: {\n prompt: string;\n };\n};\n```\n\n### Define Job Handlers\n\nNext, define the job handlers by exporting a `JobHandlers` object that maps job types to handler functions. If you forget to add a handler for a job type, TypeScript will show an error.\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { sendEmail } from './services/email'; // Function to send the email\nimport { generateReport } from './services/generate-report'; // Function to generate the report\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n send_email: async (payload) => {\n const { to, subject, body } = payload;\n await sendEmail(to, subject, body);\n },\n generate_report: async (payload) => {\n const { reportId, userId } = payload;\n await generateReport(reportId, userId);\n },\n generate_image: async (payload, signal) => {\n const { prompt } = payload;\n await generateImageAi(prompt, signal);\n },\n};\n```\n\nIn the example above, we define three job handlers: `send_email`, `generate_report`, and `generate_image`. Each handler is a function that takes a payload, an `AbortSignal`, and a `JobContext` as arguments. The `AbortSignal` is used to abort the job if it takes too long to complete. The `JobContext` provides methods to extend the job's timeout while it's running.\n\n### Job Handler Signature\n\nA job handler receives three arguments: the job payload, an `AbortSignal`, and a `JobContext`.\n\n```typescript\n(payload: Payload, signal: AbortSignal, ctx: JobContext) => Promise<void>;\n```\n\nYou can omit arguments you don't need. For example, if you only need the payload:\n\n```typescript\nconst handler = async (payload) => {\n // ...\n};\n```\n\n### JobContext\n\nThe third argument provides methods for timeout management and progress reporting:\n\n- `ctx.prolong(ms?)` — Proactively reset the timeout. If `ms` is provided, sets the deadline to `ms` milliseconds from now. If omitted, resets to the original `timeoutMs`.\n- `ctx.onTimeout(callback)` — Register a callback that fires when the timeout is about to hit, before the `AbortSignal` is triggered. Return a number (ms) to extend, or return nothing to let the timeout proceed.\n- `ctx.setProgress(percent)` — Report progress as a percentage (0–100). The value is persisted to the database and can be read by clients via `getJob()` or the React SDK's `useJob()` hook.\n\nSee [Job Timeout](/usage/job-timeout) for timeout examples and [Progress Tracking](/usage/progress-tracking) for progress reporting."
|
|
223
229
|
},
|
|
230
|
+
{
|
|
231
|
+
"slug": "usage/job-output",
|
|
232
|
+
"title": "Job Output",
|
|
233
|
+
"description": "Store and retrieve results from job handlers",
|
|
234
|
+
"content": "Jobs can store an output value when they complete. This is useful when you need to retrieve the result of a background task — for example, a generated report URL, a processed image path, or computation results.\n\n## Storing Output\n\nThere are two ways to store output from a handler:\n\n### 1. Return a value from the handler\n\nThe simplest approach — return any JSON-serializable value from your handler function:\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n generate_report: async (payload, signal, ctx) => {\n const url = await generateReport(payload.reportId);return { url, generatedAt: new Date().toISOString() };},\n};\n```\n\n### 2. Use `ctx.setOutput(data)`\n\nFor more control, call `ctx.setOutput()` explicitly. This is useful when you want to store intermediate results during execution:\n\n```typescript title=\"@lib/job-handlers.ts\"\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n process_images: async (payload, signal, ctx) => {\n const results: string[] = [];\n\n for (const image of payload.images) {\n const url = await processImage(image);\n results.push(url);\n\n await ctx.setProgress(\n Math.round((results.length / payload.images.length) * 100),\n );\n await ctx.setOutput({ processedUrls: results });}\n },\n};\n```\n\n### Precedence\n\nIf both `ctx.setOutput()` is called **and** the handler returns a value, the `ctx.setOutput()` value takes precedence. The handler's return value is ignored in that case.\n\n### Rules\n\n- **JSON-serializable**: The output value must be JSON-serializable (objects, arrays, strings, numbers, booleans, null).\n- **Last write wins**: Calling `ctx.setOutput()` multiple times overwrites the previous value.\n- **Best-effort persistence**: Like `setProgress`, output writes to the database are best-effort — errors do not kill the handler.\n\n## Reading Output\n\nOutput is stored in the `output` field of the [JobRecord](/api/job-record):\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nconsole.log(job?.output); // null | any JSON value\n```\n\n- Before the handler stores output, the value is `null`.\n- After the job completes, the output is preserved and can be read at any time.\n- Handlers that return `undefined` (or `void`) do not store output — the field remains `null`.\n\n## Tracking Output in React\n\nIf you're using the [React SDK](/usage/react-sdk), the `useJob` hook exposes `output` directly:\n\n```tsx\nimport { useJob } from '@nicnocquee/dataqueue-react';\n\nfunction JobResult({ jobId }: { jobId: number }) {\n const { status, output, progress } = useJob(jobId, {\n fetcher: (id) =>\n fetch(`/api/jobs/${id}`)\n .then((r) => r.json())\n .then((d) => d.job),\n });\n\n if (status === 'completed' && output) {\n return <a href={(output as any).url}>Download Report</a>;\n }\n\n return (\n <div>\n <p>Status: {status}</p>\n <progress value={progress ?? 0} max={100} />\n </div>\n );\n}\n```\n\n## Listening for Output Events\n\nYou can subscribe to the `job:output` event to be notified whenever a handler calls `ctx.setOutput()`:\n\n```typescript\njobQueue.on('job:output', ({ jobId, output }) => {\n console.log(`Job ${jobId} stored output:`, output);\n});\n```\n\n## Database Migration\n\n> **Note:** If you're using the **PostgreSQL** backend, make sure to run the latest\n migrations to add the `output` column. See [Database\n Migration](/usage/database-migration).\n\nThe Redis backend requires no migration — the `output` field is stored automatically as part of the job hash."
|
|
235
|
+
},
|
|
224
236
|
{
|
|
225
237
|
"slug": "usage/job-timeout",
|
|
226
238
|
"title": "Job Timeout",
|
|
@@ -243,7 +255,7 @@
|
|
|
243
255
|
"slug": "usage/progress-tracking",
|
|
244
256
|
"title": "Progress Tracking",
|
|
245
257
|
"description": "Report and track job progress from handlers",
|
|
246
|
-
"content": "Jobs can report their progress as a percentage (0–100) while they run. This is useful for long-running tasks like file processing, data imports, or image generation where you want to show a progress bar or percentage to the user.\n\n## Reporting Progress from a Handler\n\nUse `ctx.setProgress(percent)` inside your job handler to report progress:\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n generate_report: async (payload, signal, ctx) => {\n const chunks = await loadData(payload.reportId);\n\n for (let i = 0; i < chunks.length; i++) {\n if (signal.aborted) return;\n\n await processChunk(chunks[i]);\n\n // Report progress (0-100)\n await ctx.setProgress(Math.round(((i + 1) / chunks.length) * 100));\n }\n },\n};\n```\n\n### setProgress Rules\n\n- **Range**: The value must be between 0 and 100 (inclusive). Values outside this range throw an error.\n- **Rounding**: Fractional values are rounded to the nearest integer (`33.7` becomes `34`).\n- **Best-effort persistence**: Progress is written to the database but errors during the write do not kill the handler — processing continues.\n\n## Reading Progress\n\nProgress is stored in the `progress` field of the [JobRecord](/api/job-record):\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nconsole.log(job?.progress); // null | 0–100\n```\n\n- Before the handler calls `setProgress`, the value is `null`.\n- After the job completes, the last progress value is preserved (typically `100`).\n\n## Tracking Progress in React\n\nIf you're using the [React SDK](/usage/react-sdk), the `useJob` hook exposes `progress` directly:\n\n```tsx\nimport { useJob } from '@nicnocquee/dataqueue-react';\n\nfunction JobProgress({ jobId }: { jobId: number }) {\n const { status, progress } = useJob(jobId, {\n fetcher: (id) =>\n fetch(`/api/jobs/${id}`)\n .then((r) => r.json())\n .then((d) => d.job),\n });\n\n return (\n <div>\n <p>Status: {status}</p>\n <progress value={progress ?? 0} max={100} />\n <span>{progress ?? 0}%</span>\n </div>\n );\n}\n```\n\n## Database Migration\n\n> **Note:** If you're using the **PostgreSQL** backend, make sure to run the latest\n migrations to add the `progress` column. See [Database\n Migration](/usage/database-migration).\n\nThe Redis backend requires no migration — the `progress` field is stored automatically as part of the job hash."
|
|
258
|
+
"content": "Jobs can report their progress as a percentage (0–100) while they run. This is useful for long-running tasks like file processing, data imports, or image generation where you want to show a progress bar or percentage to the user.\n\n## Reporting Progress from a Handler\n\nUse `ctx.setProgress(percent)` inside your job handler to report progress:\n\n```typescript title=\"@lib/job-handlers.ts\"\nimport { JobHandlers } from '@nicnocquee/dataqueue';\n\nexport const jobHandlers: JobHandlers<JobPayloadMap> = {\n generate_report: async (payload, signal, ctx) => {\n const chunks = await loadData(payload.reportId);\n\n for (let i = 0; i < chunks.length; i++) {\n if (signal.aborted) return;\n\n await processChunk(chunks[i]);\n\n // Report progress (0-100)\n await ctx.setProgress(Math.round(((i + 1) / chunks.length) * 100));\n }\n },\n};\n```\n\n### setProgress Rules\n\n- **Range**: The value must be between 0 and 100 (inclusive). Values outside this range throw an error.\n- **Rounding**: Fractional values are rounded to the nearest integer (`33.7` becomes `34`).\n- **Best-effort persistence**: Progress is written to the database but errors during the write do not kill the handler — processing continues.\n\n## Reading Progress\n\nProgress is stored in the `progress` field of the [JobRecord](/api/job-record):\n\n```typescript\nconst job = await jobQueue.getJob(jobId);\nconsole.log(job?.progress); // null | 0–100\n```\n\n- Before the handler calls `setProgress`, the value is `null`.\n- After the job completes, the last progress value is preserved (typically `100`).\n\n## Tracking Progress in React\n\nIf you're using the [React SDK](/usage/react-sdk), the `useJob` hook exposes `progress` directly:\n\n```tsx\nimport { useJob } from '@nicnocquee/dataqueue-react';\n\nfunction JobProgress({ jobId }: { jobId: number }) {\n const { status, progress } = useJob(jobId, {\n fetcher: (id) =>\n fetch(`/api/jobs/${id}`)\n .then((r) => r.json())\n .then((d) => d.job),\n });\n\n return (\n <div>\n <p>Status: {status}</p>\n <progress value={progress ?? 0} max={100} />\n <span>{progress ?? 0}%</span>\n </div>\n );\n}\n```\n\n## Database Migration\n\n> **Note:** If you're using the **PostgreSQL** backend, make sure to run the latest\n migrations to add the `progress` column. See [Database\n Migration](/usage/database-migration).\n\nThe Redis backend requires no migration — the `progress` field is stored automatically as part of the job hash.\n\n## Related\n\n- [Job Output](/usage/job-output) — Store and retrieve results from job handlers using `ctx.setOutput()` or handler return values."
|
|
247
259
|
},
|
|
248
260
|
{
|
|
249
261
|
"slug": "usage/quick-start",
|
package/ai/rules/advanced.md
CHANGED
|
@@ -116,6 +116,24 @@ await queue.addJob({
|
|
|
116
116
|
- No config — legacy `2^attempts * 60s` formula (backward compatible).
|
|
117
117
|
- Cron schedules propagate retry config to enqueued jobs.
|
|
118
118
|
|
|
119
|
+
## Event Hooks
|
|
120
|
+
|
|
121
|
+
Subscribe to real-time lifecycle events via `on`, `once`, `off`, `removeAllListeners`. Works with both Postgres and Redis.
|
|
122
|
+
|
|
123
|
+
```typescript
|
|
124
|
+
queue.on('job:completed', ({ jobId, jobType }) => {
|
|
125
|
+
metrics.increment('job.completed', { jobType });
|
|
126
|
+
});
|
|
127
|
+
queue.on('job:failed', ({ jobId, jobType, error, willRetry }) => {
|
|
128
|
+
if (!willRetry) alertOps(`Permanent failure: ${jobId}`);
|
|
129
|
+
});
|
|
130
|
+
queue.on('error', (error) => Sentry.captureException(error));
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
Events: `job:added`, `job:processing`, `job:completed`, `job:failed` (with `willRetry`), `job:cancelled`, `job:retried`, `job:waiting`, `job:progress`, `job:output`, `error`.
|
|
134
|
+
|
|
135
|
+
`error` events fire alongside `onError` callbacks in `ProcessorOptions` / `SupervisorOptions` — both mechanisms work independently.
|
|
136
|
+
|
|
119
137
|
## Scaling
|
|
120
138
|
|
|
121
139
|
- Increase `batchSize` and `concurrency` for higher throughput.
|
|
@@ -130,3 +148,23 @@ await ctx.setProgress(50); // 0–100, persisted to DB
|
|
|
130
148
|
```
|
|
131
149
|
|
|
132
150
|
Read via `queue.getJob(id)` (`progress` field) or React SDK's `useJob` hook.
|
|
151
|
+
|
|
152
|
+
## Job Output
|
|
153
|
+
|
|
154
|
+
Store results via `ctx.setOutput(data)` or by returning a value from the handler:
|
|
155
|
+
|
|
156
|
+
```typescript
|
|
157
|
+
// Option 1: return a value
|
|
158
|
+
const handler = async (payload, signal, ctx) => {
|
|
159
|
+
const result = await doWork(payload);
|
|
160
|
+
return { url: result.downloadUrl };
|
|
161
|
+
};
|
|
162
|
+
|
|
163
|
+
// Option 2: ctx.setOutput (takes precedence over return value)
|
|
164
|
+
const handler = async (payload, signal, ctx) => {
|
|
165
|
+
const result = await doWork(payload);
|
|
166
|
+
await ctx.setOutput({ url: result.downloadUrl });
|
|
167
|
+
};
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Read via `queue.getJob(id)` (`output` field) or React SDK's `useJob` hook (`output` property).
|
|
@@ -10,7 +10,7 @@ Install: `npm install @nicnocquee/dataqueue-react` (requires React 18+).
|
|
|
10
10
|
'use client';
|
|
11
11
|
import { useJob } from '@nicnocquee/dataqueue-react';
|
|
12
12
|
|
|
13
|
-
const { status, progress, data, isLoading, error } = useJob(jobId, {
|
|
13
|
+
const { status, progress, output, data, isLoading, error } = useJob(jobId, {
|
|
14
14
|
fetcher: (id) =>
|
|
15
15
|
fetch(`/api/jobs/${id}`)
|
|
16
16
|
.then((r) => r.json())
|
|
@@ -81,3 +81,7 @@ Wrap handlers with your auth middleware before exporting GET/POST.
|
|
|
81
81
|
## Progress Tracking
|
|
82
82
|
|
|
83
83
|
Use `ctx.setProgress(percent)` in handlers (0–100). The value appears in `useJob`'s `progress` field and the dashboard detail view.
|
|
84
|
+
|
|
85
|
+
## Job Output
|
|
86
|
+
|
|
87
|
+
Store results via `ctx.setOutput(data)` or by returning a value from the handler. The value appears in `useJob`'s `output` field and the dashboard detail view. If both are used, `ctx.setOutput()` takes precedence.
|
|
@@ -170,6 +170,56 @@ await queue.addJob({
|
|
|
170
170
|
- Handler must be serializable (no closures over external variables).
|
|
171
171
|
- `prolong`, `onTimeout`, `ctx.run`, waits are NOT available.
|
|
172
172
|
|
|
173
|
+
## Event Hooks
|
|
174
|
+
|
|
175
|
+
Subscribe to real-time job lifecycle events. Works identically with PostgreSQL and Redis.
|
|
176
|
+
|
|
177
|
+
```typescript
|
|
178
|
+
const queue = initJobQueue<MyPayloadMap>(config);
|
|
179
|
+
|
|
180
|
+
queue.on('job:completed', ({ jobId, jobType }) => {
|
|
181
|
+
console.log(`Job ${jobId} (${jobType}) completed`);
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
queue.on('job:failed', ({ jobId, jobType, error, willRetry }) => {
|
|
185
|
+
console.error(`Job ${jobId} failed: ${error.message}`);
|
|
186
|
+
if (!willRetry) {
|
|
187
|
+
alertOps(`Permanent failure for job ${jobId}`);
|
|
188
|
+
}
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
queue.on('error', (error) => {
|
|
192
|
+
Sentry.captureException(error);
|
|
193
|
+
});
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Available events
|
|
197
|
+
|
|
198
|
+
| Event | Payload |
|
|
199
|
+
| ---------------- | -------------------------------------- |
|
|
200
|
+
| `job:added` | `{ jobId, jobType }` |
|
|
201
|
+
| `job:processing` | `{ jobId, jobType }` |
|
|
202
|
+
| `job:completed` | `{ jobId, jobType }` |
|
|
203
|
+
| `job:failed` | `{ jobId, jobType, error, willRetry }` |
|
|
204
|
+
| `job:cancelled` | `{ jobId }` |
|
|
205
|
+
| `job:retried` | `{ jobId }` |
|
|
206
|
+
| `job:waiting` | `{ jobId, jobType }` |
|
|
207
|
+
| `job:progress` | `{ jobId, progress }` |
|
|
208
|
+
| `error` | `Error` |
|
|
209
|
+
|
|
210
|
+
### Listener management
|
|
211
|
+
|
|
212
|
+
```typescript
|
|
213
|
+
const listener = ({ jobId }) => console.log(jobId);
|
|
214
|
+
queue.on('job:completed', listener);
|
|
215
|
+
queue.off('job:completed', listener);
|
|
216
|
+
queue.once('job:added', ({ jobId }) => console.log('First job:', jobId));
|
|
217
|
+
queue.removeAllListeners('job:completed');
|
|
218
|
+
queue.removeAllListeners(); // all events
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
The `error` event fires alongside `onError` callbacks in `ProcessorOptions` and `SupervisorOptions` -- both mechanisms work independently.
|
|
222
|
+
|
|
173
223
|
## Tags
|
|
174
224
|
|
|
175
225
|
```typescript
|
|
@@ -38,7 +38,8 @@ export const jobHandlers: JobHandlers<JobPayloadMap> = {
|
|
|
38
38
|
},
|
|
39
39
|
generate_report: async (payload, signal) => {
|
|
40
40
|
if (signal.aborted) return;
|
|
41
|
-
await generateReport(payload.reportId, payload.userId);
|
|
41
|
+
const url = await generateReport(payload.reportId, payload.userId);
|
|
42
|
+
return { url }; // stored as job output, readable via getJob()
|
|
42
43
|
},
|
|
43
44
|
};
|
|
44
45
|
```
|
|
@@ -102,13 +102,14 @@ export async function GET(
|
|
|
102
102
|
|
|
103
103
|
### useJob Return Value
|
|
104
104
|
|
|
105
|
-
| Field | Type | Description
|
|
106
|
-
| ----------- | ------------------- |
|
|
107
|
-
| `data` | `JobData \| null` | Latest job data from fetcher
|
|
108
|
-
| `status` | `JobStatus \| null` | Current job status
|
|
109
|
-
| `progress` | `number \| null` | Progress percentage (0–100)
|
|
110
|
-
| `
|
|
111
|
-
| `
|
|
105
|
+
| Field | Type | Description |
|
|
106
|
+
| ----------- | ------------------- | ----------------------------------------------------- |
|
|
107
|
+
| `data` | `JobData \| null` | Latest job data from fetcher |
|
|
108
|
+
| `status` | `JobStatus \| null` | Current job status |
|
|
109
|
+
| `progress` | `number \| null` | Progress percentage (0–100) |
|
|
110
|
+
| `output` | `unknown \| null` | Handler output from `ctx.setOutput()` or return value |
|
|
111
|
+
| `isLoading` | `boolean` | True until first fetch resolves |
|
|
112
|
+
| `error` | `Error \| null` | Last fetch error |
|
|
112
113
|
|
|
113
114
|
## Dashboard — @nicnocquee/dataqueue-dashboard
|
|
114
115
|
|
|
@@ -187,3 +188,14 @@ const handler = async (payload, signal, ctx) => {
|
|
|
187
188
|
}
|
|
188
189
|
};
|
|
189
190
|
```
|
|
191
|
+
|
|
192
|
+
### Job Output from Handlers
|
|
193
|
+
|
|
194
|
+
Store results via `ctx.setOutput(data)` or by returning a value from the handler. Exposed via `getJob()` (`output` field) and the `useJob` hook's `output` property. If both are used, `ctx.setOutput()` takes precedence.
|
|
195
|
+
|
|
196
|
+
```typescript
|
|
197
|
+
const handler = async (payload, signal, ctx) => {
|
|
198
|
+
const result = await doWork(payload);
|
|
199
|
+
return { url: result.downloadUrl }; // stored as output
|
|
200
|
+
};
|
|
201
|
+
```
|