@open-mercato/queue 0.4.2-canary-c02407ff85

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,303 @@
1
+ import fs from 'node:fs'
2
+ import path from 'node:path'
3
+ import crypto from 'node:crypto'
4
+ import type { Queue, QueuedJob, JobHandler, LocalQueueOptions, ProcessOptions, ProcessResult } from '../types'
5
+
6
+ type LocalState = {
7
+ lastProcessedId?: string
8
+ completedCount?: number
9
+ failedCount?: number
10
+ }
11
+
12
+ type StoredJob<T> = QueuedJob<T>
13
+
14
+ /** Default polling interval in milliseconds */
15
+ const DEFAULT_POLL_INTERVAL = 1000
16
+
17
+ /**
18
+ * Creates a file-based local queue.
19
+ *
20
+ * Jobs are stored in JSON files within a directory structure:
21
+ * - `.queue/<name>/queue.json` - Array of queued jobs
22
+ * - `.queue/<name>/state.json` - Processing state (last processed ID)
23
+ *
24
+ * **Limitations:**
25
+ * - Jobs are processed sequentially (concurrency option is for logging/compatibility only)
26
+ * - Not suitable for production or multi-process environments
27
+ * - No retry mechanism for failed jobs
28
+ *
29
+ * @template T - The payload type for jobs
30
+ * @param name - Queue name (used for directory naming)
31
+ * @param options - Local queue options
32
+ */
33
+ export function createLocalQueue<T = unknown>(
34
+ name: string,
35
+ options?: LocalQueueOptions
36
+ ): Queue<T> {
37
+ const baseDir = options?.baseDir ?? path.resolve('.queue')
38
+ const queueDir = path.join(baseDir, name)
39
+ const queueFile = path.join(queueDir, 'queue.json')
40
+ const stateFile = path.join(queueDir, 'state.json')
41
+ // Note: concurrency is stored for logging/compatibility but jobs are processed sequentially
42
+ const concurrency = options?.concurrency ?? 1
43
+ const pollInterval = options?.pollInterval ?? DEFAULT_POLL_INTERVAL
44
+
45
+ // Worker state for continuous polling
46
+ let pollingTimer: ReturnType<typeof setInterval> | null = null
47
+ let isProcessing = false
48
+ let activeHandler: JobHandler<T> | null = null
49
+
50
+ // -------------------------------------------------------------------------
51
+ // File Operations
52
+ // -------------------------------------------------------------------------
53
+
54
+ function ensureDir(): void {
55
+ // Use atomic operations to handle race conditions
56
+ try {
57
+ fs.mkdirSync(queueDir, { recursive: true })
58
+ } catch (e: unknown) {
59
+ const error = e as NodeJS.ErrnoException
60
+ if (error.code !== 'EEXIST') throw error
61
+ }
62
+
63
+ // Initialize queue file with exclusive create flag
64
+ try {
65
+ fs.writeFileSync(queueFile, '[]', { encoding: 'utf8', flag: 'wx' })
66
+ } catch (e: unknown) {
67
+ const error = e as NodeJS.ErrnoException
68
+ if (error.code !== 'EEXIST') throw error
69
+ }
70
+
71
+ // Initialize state file with exclusive create flag
72
+ try {
73
+ fs.writeFileSync(stateFile, '{}', { encoding: 'utf8', flag: 'wx' })
74
+ } catch (e: unknown) {
75
+ const error = e as NodeJS.ErrnoException
76
+ if (error.code !== 'EEXIST') throw error
77
+ }
78
+ }
79
+
80
+ function readQueue(): StoredJob<T>[] {
81
+ ensureDir()
82
+ try {
83
+ const content = fs.readFileSync(queueFile, 'utf8')
84
+ return JSON.parse(content) as StoredJob<T>[]
85
+ } catch (error: unknown) {
86
+ const e = error as NodeJS.ErrnoException
87
+ if (e.code === 'ENOENT') {
88
+ return []
89
+ }
90
+ console.error(`[queue:${name}] Failed to read queue file:`, e.message)
91
+ throw new Error(`Queue file corrupted or unreadable: ${e.message}`)
92
+ }
93
+ }
94
+
95
+ function writeQueue(jobs: StoredJob<T>[]): void {
96
+ ensureDir()
97
+ fs.writeFileSync(queueFile, JSON.stringify(jobs, null, 2), 'utf8')
98
+ }
99
+
100
+ function readState(): LocalState {
101
+ ensureDir()
102
+ try {
103
+ const content = fs.readFileSync(stateFile, 'utf8')
104
+ return JSON.parse(content) as LocalState
105
+ } catch {
106
+ return {}
107
+ }
108
+ }
109
+
110
+ function writeState(state: LocalState): void {
111
+ ensureDir()
112
+ fs.writeFileSync(stateFile, JSON.stringify(state, null, 2), 'utf8')
113
+ }
114
+
115
+ function generateId(): string {
116
+ return crypto.randomUUID()
117
+ }
118
+
119
+ // -------------------------------------------------------------------------
120
+ // Queue Implementation
121
+ // -------------------------------------------------------------------------
122
+
123
+ async function enqueue(data: T): Promise<string> {
124
+ const jobs = readQueue()
125
+ const job: StoredJob<T> = {
126
+ id: generateId(),
127
+ payload: data,
128
+ createdAt: new Date().toISOString(),
129
+ }
130
+ jobs.push(job)
131
+ writeQueue(jobs)
132
+ return job.id
133
+ }
134
+
135
+ /**
136
+ * Process pending jobs in a single batch (internal helper).
137
+ */
138
+ async function processBatch(
139
+ handler: JobHandler<T>,
140
+ options?: ProcessOptions
141
+ ): Promise<ProcessResult> {
142
+ const state = readState()
143
+ const jobs = readQueue()
144
+
145
+ // Find jobs that haven't been processed yet
146
+ const lastProcessedIndex = state.lastProcessedId
147
+ ? jobs.findIndex((j) => j.id === state.lastProcessedId)
148
+ : -1
149
+
150
+ const pendingJobs = jobs.slice(lastProcessedIndex + 1)
151
+ const jobsToProcess = options?.limit
152
+ ? pendingJobs.slice(0, options.limit)
153
+ : pendingJobs
154
+
155
+ let processed = 0
156
+ let failed = 0
157
+ let lastJobId: string | undefined
158
+ const jobIdsToRemove = new Set<string>()
159
+
160
+ for (const job of jobsToProcess) {
161
+ try {
162
+ await Promise.resolve(
163
+ handler(job, {
164
+ jobId: job.id,
165
+ attemptNumber: 1,
166
+ queueName: name,
167
+ })
168
+ )
169
+ processed++
170
+ lastJobId = job.id
171
+ jobIdsToRemove.add(job.id)
172
+ console.log(`[queue:${name}] Job ${job.id} completed`)
173
+ } catch (error) {
174
+ console.error(`[queue:${name}] Job ${job.id} failed:`, error)
175
+ failed++
176
+ lastJobId = job.id
177
+ jobIdsToRemove.add(job.id) // Remove failed jobs too (matching async strategy)
178
+ }
179
+ }
180
+
181
+ // Remove processed jobs from queue (matching async removeOnComplete behavior)
182
+ if (jobIdsToRemove.size > 0) {
183
+ const updatedJobs = jobs.filter((j) => !jobIdsToRemove.has(j.id))
184
+ writeQueue(updatedJobs)
185
+
186
+ // Update state with running counts
187
+ const newState: LocalState = {
188
+ lastProcessedId: lastJobId,
189
+ completedCount: (state.completedCount ?? 0) + processed,
190
+ failedCount: (state.failedCount ?? 0) + failed,
191
+ }
192
+ writeState(newState)
193
+ }
194
+
195
+ return { processed, failed, lastJobId }
196
+ }
197
+
198
+ /**
199
+ * Poll for and process new jobs.
200
+ */
201
+ async function pollAndProcess(): Promise<void> {
202
+ // Skip if already processing to avoid concurrent file access
203
+ if (isProcessing || !activeHandler) return
204
+
205
+ isProcessing = true
206
+ try {
207
+ await processBatch(activeHandler)
208
+ } catch (error) {
209
+ console.error(`[queue:${name}] Polling error:`, error)
210
+ } finally {
211
+ isProcessing = false
212
+ }
213
+ }
214
+
215
+ async function process(
216
+ handler: JobHandler<T>,
217
+ options?: ProcessOptions
218
+ ): Promise<ProcessResult> {
219
+ // If limit is specified, do a single batch (backward compatibility)
220
+ if (options?.limit) {
221
+ return processBatch(handler, options)
222
+ }
223
+
224
+ // Start continuous polling mode (like BullMQ Worker)
225
+ activeHandler = handler
226
+
227
+ // Process any pending jobs immediately
228
+ await processBatch(handler)
229
+
230
+ // Start polling interval for new jobs
231
+ pollingTimer = setInterval(() => {
232
+ pollAndProcess().catch((err) => {
233
+ console.error(`[queue:${name}] Poll cycle error:`, err)
234
+ })
235
+ }, pollInterval)
236
+
237
+ console.log(`[queue:${name}] Worker started with concurrency ${concurrency}`)
238
+
239
+ // Return sentinel value indicating continuous worker mode (like async strategy)
240
+ return { processed: -1, failed: -1, lastJobId: undefined }
241
+ }
242
+
243
+ async function clear(): Promise<{ removed: number }> {
244
+ const jobs = readQueue()
245
+ const removed = jobs.length
246
+ writeQueue([])
247
+ // Reset state but preserve counts for historical tracking
248
+ const state = readState()
249
+ writeState({
250
+ completedCount: state.completedCount,
251
+ failedCount: state.failedCount,
252
+ })
253
+ return { removed }
254
+ }
255
+
256
+ async function close(): Promise<void> {
257
+ // Stop polling timer
258
+ if (pollingTimer) {
259
+ clearInterval(pollingTimer)
260
+ pollingTimer = null
261
+ }
262
+ activeHandler = null
263
+
264
+ // Wait for any in-progress processing to complete (with timeout)
265
+ const SHUTDOWN_TIMEOUT = 5000
266
+ const startTime = Date.now()
267
+
268
+ while (isProcessing) {
269
+ if (Date.now() - startTime > SHUTDOWN_TIMEOUT) {
270
+ console.warn(`[queue:${name}] Force closing after ${SHUTDOWN_TIMEOUT}ms timeout`)
271
+ break
272
+ }
273
+ await new Promise((resolve) => setTimeout(resolve, 50))
274
+ }
275
+ }
276
+
277
+ async function getJobCounts(): Promise<{
278
+ waiting: number
279
+ active: number
280
+ completed: number
281
+ failed: number
282
+ }> {
283
+ const state = readState()
284
+ const jobs = readQueue()
285
+
286
+ return {
287
+ waiting: jobs.length, // All jobs in queue are waiting (processed ones are removed)
288
+ active: 0, // Local strategy doesn't track active jobs
289
+ completed: state.completedCount ?? 0,
290
+ failed: state.failedCount ?? 0,
291
+ }
292
+ }
293
+
294
+ return {
295
+ name,
296
+ strategy: 'local',
297
+ enqueue,
298
+ process,
299
+ clear,
300
+ close,
301
+ getJobCounts,
302
+ }
303
+ }
package/src/types.ts ADDED
@@ -0,0 +1,238 @@
1
+ /**
2
+ * Queue Package Type Definitions
3
+ *
4
+ * Provides type-safe abstractions for multi-strategy job queues.
5
+ */
6
+
7
+ // ============================================================================
8
+ // Core Job Types
9
+ // ============================================================================
10
+
11
+ /**
12
+ * Represents a job stored in the queue.
13
+ * @template T - The payload type for this job
14
+ */
15
+ export type QueuedJob<T = unknown> = {
16
+ /** Unique identifier for the job */
17
+ id: string
18
+ /** The job payload data */
19
+ payload: T
20
+ /** ISO timestamp when the job was created */
21
+ createdAt: string
22
+ /** Optional metadata for the job */
23
+ metadata?: Record<string, unknown>
24
+ }
25
+
26
+ /**
27
+ * Context provided to job handlers during processing.
28
+ */
29
+ export type JobContext = {
30
+ /** Unique identifier of the current job */
31
+ jobId: string
32
+ /** Current attempt number (1-based) */
33
+ attemptNumber: number
34
+ /** Name of the queue being processed */
35
+ queueName: string
36
+ }
37
+
38
+ /**
39
+ * Handler function that processes jobs from the queue.
40
+ * @template T - The payload type this handler expects
41
+ */
42
+ export type JobHandler<T = unknown> = (
43
+ job: QueuedJob<T>,
44
+ ctx: JobContext
45
+ ) => Promise<void> | void
46
+
47
+ // ============================================================================
48
+ // Strategy Types
49
+ // ============================================================================
50
+
51
+ /** Available queue strategy types */
52
+ export type QueueStrategyType = 'local' | 'async'
53
+
54
+ /**
55
+ * Options for local (file-based) queue strategy.
56
+ */
57
+ export type LocalQueueOptions = {
58
+ /** Base directory for queue files. Defaults to '.queue' */
59
+ baseDir?: string
60
+ /** Number of concurrent job processors. Defaults to 1 */
61
+ concurrency?: number
62
+ /** Polling interval in milliseconds for continuous processing. Defaults to 1000 */
63
+ pollInterval?: number
64
+ }
65
+
66
+ /**
67
+ * Redis connection options for async strategy.
68
+ */
69
+ export type RedisConnectionOptions = {
70
+ /** Redis connection URL (e.g., redis://localhost:6379) */
71
+ url?: string
72
+ /** Redis host */
73
+ host?: string
74
+ /** Redis port */
75
+ port?: number
76
+ /** Redis password */
77
+ password?: string
78
+ }
79
+
80
+ /**
81
+ * Options for async (BullMQ) queue strategy.
82
+ */
83
+ export type AsyncQueueOptions = {
84
+ /** Redis connection configuration */
85
+ connection?: RedisConnectionOptions
86
+ /** Number of concurrent job processors. Defaults to 1 */
87
+ concurrency?: number
88
+ }
89
+
90
+ /**
91
+ * Conditional options type based on strategy.
92
+ * Local strategy gets file options, async gets Redis options.
93
+ */
94
+ export type QueueOptions<S extends QueueStrategyType> = S extends 'async'
95
+ ? AsyncQueueOptions
96
+ : LocalQueueOptions
97
+
98
+ // ============================================================================
99
+ // Process Types
100
+ // ============================================================================
101
+
102
+ /**
103
+ * Options for the process operation.
104
+ */
105
+ export type ProcessOptions = {
106
+ /** Maximum number of jobs to process (local strategy only) */
107
+ limit?: number
108
+ }
109
+
110
+ /**
111
+ * Result returned after processing jobs.
112
+ */
113
+ export type ProcessResult = {
114
+ /** Number of jobs successfully processed */
115
+ processed: number
116
+ /** Number of jobs that failed */
117
+ failed: number
118
+ /** ID of the last processed job */
119
+ lastJobId?: string
120
+ }
121
+
122
+ // ============================================================================
123
+ // Queue Interface
124
+ // ============================================================================
125
+
126
+ /**
127
+ * Main queue interface that all strategies must implement.
128
+ * @template T - The payload type for jobs in this queue
129
+ */
130
+ export interface Queue<T = unknown> {
131
+ /** Name of this queue */
132
+ readonly name: string
133
+ /** Strategy type used by this queue */
134
+ readonly strategy: QueueStrategyType
135
+
136
+ /**
137
+ * Add a job to the queue.
138
+ * @param data - The job payload
139
+ * @returns Promise resolving to the job ID
140
+ */
141
+ enqueue(data: T): Promise<string>
142
+
143
+ /**
144
+ * Process jobs from the queue.
145
+ *
146
+ * For local strategy: processes jobs synchronously and returns result with counts.
147
+ * For async strategy: starts a worker and returns sentinel result (processed=-1).
148
+ *
149
+ * @param handler - Function to handle each job
150
+ * @param options - Processing options
151
+ * @returns ProcessResult with counts (or sentinel for async worker mode)
152
+ */
153
+ process(handler: JobHandler<T>, options?: ProcessOptions): Promise<ProcessResult>
154
+
155
+ /**
156
+ * Remove all jobs from the queue.
157
+ * @returns Promise with count of removed jobs
158
+ */
159
+ clear(): Promise<{ removed: number }>
160
+
161
+ /**
162
+ * Close the queue and release resources.
163
+ */
164
+ close(): Promise<void>
165
+
166
+ /**
167
+ * Get current job counts by status.
168
+ * For async strategy: returns counts from BullMQ.
169
+ * For local strategy: waiting/completed based on last processed ID.
170
+ */
171
+ getJobCounts(): Promise<{
172
+ waiting: number
173
+ active: number
174
+ completed: number
175
+ failed: number
176
+ }>
177
+ }
178
+
179
+ // ============================================================================
180
+ // Factory Types
181
+ // ============================================================================
182
+
183
+ /**
184
+ * Discriminated union for queue creation options.
185
+ */
186
+ export type CreateQueueConfig<S extends QueueStrategyType = QueueStrategyType> =
187
+ S extends 'async'
188
+ ? { strategy: 'async' } & AsyncQueueOptions
189
+ : { strategy: 'local' } & LocalQueueOptions
190
+
191
+ /**
192
+ * Factory function signature for creating queues.
193
+ */
194
+ export type CreateQueueFn = <T = unknown>(
195
+ name: string,
196
+ strategy: QueueStrategyType,
197
+ options?: QueueOptions<QueueStrategyType>
198
+ ) => Queue<T>
199
+
200
+ // ============================================================================
201
+ // Worker Discovery Types
202
+ // ============================================================================
203
+
204
+ /**
205
+ * Metadata exported by worker files for auto-discovery.
206
+ *
207
+ * @example
208
+ * ```typescript
209
+ * // src/modules/example/workers/my-queue.ts
210
+ * export const metadata: WorkerMeta = {
211
+ * queue: 'my-queue',
212
+ * concurrency: 5,
213
+ * }
214
+ * ```
215
+ */
216
+ export type WorkerMeta = {
217
+ /** Queue name this worker processes */
218
+ queue: string
219
+ /** Optional unique identifier (defaults to <module>:workers:<filename>) */
220
+ id?: string
221
+ /** Worker concurrency (default: 1) */
222
+ concurrency?: number
223
+ }
224
+
225
+ /**
226
+ * Descriptor for a discovered and registered worker.
227
+ * @template T - The job payload type this worker handles
228
+ */
229
+ export type WorkerDescriptor<T = unknown> = {
230
+ /** Unique identifier for this worker */
231
+ id: string
232
+ /** Queue name to process */
233
+ queue: string
234
+ /** Handler function */
235
+ handler: JobHandler<T>
236
+ /** Concurrency level */
237
+ concurrency: number
238
+ }
@@ -0,0 +1,176 @@
1
+ import type { WorkerDescriptor } from '../../types'
2
+ import {
3
+ registerWorker,
4
+ registerModuleWorkers,
5
+ getWorkers,
6
+ getWorkersByQueue,
7
+ getWorker,
8
+ getRegisteredQueues,
9
+ clearWorkers,
10
+ } from '../registry'
11
+
12
+ describe('Worker Registry', () => {
13
+ beforeEach(() => {
14
+ clearWorkers()
15
+ })
16
+
17
+ afterEach(() => {
18
+ clearWorkers()
19
+ })
20
+
21
+ describe('registerWorker', () => {
22
+ it('should register a single worker', () => {
23
+ const worker: WorkerDescriptor = {
24
+ id: 'test:worker',
25
+ queue: 'test-queue',
26
+ concurrency: 1,
27
+ handler: async () => {},
28
+ }
29
+
30
+ registerWorker(worker)
31
+
32
+ const result = getWorker('test:worker')
33
+ expect(result).toBeDefined()
34
+ expect(result?.id).toBe('test:worker')
35
+ expect(result?.queue).toBe('test-queue')
36
+ expect(result?.concurrency).toBe(1)
37
+ })
38
+
39
+ it('should overwrite existing worker with same id', () => {
40
+ const worker1: WorkerDescriptor = {
41
+ id: 'test:worker',
42
+ queue: 'queue-1',
43
+ concurrency: 1,
44
+ handler: async () => {},
45
+ }
46
+
47
+ const worker2: WorkerDescriptor = {
48
+ id: 'test:worker',
49
+ queue: 'queue-2',
50
+ concurrency: 5,
51
+ handler: async () => {},
52
+ }
53
+
54
+ registerWorker(worker1)
55
+ registerWorker(worker2)
56
+
57
+ const workers = getWorkers()
58
+ expect(workers.length).toBe(1)
59
+ expect(workers[0].queue).toBe('queue-2')
60
+ expect(workers[0].concurrency).toBe(5)
61
+ })
62
+ })
63
+
64
+ describe('registerModuleWorkers', () => {
65
+ it('should register multiple workers at once', () => {
66
+ const workers: WorkerDescriptor[] = [
67
+ { id: 'module:worker1', queue: 'queue-a', concurrency: 1, handler: async () => {} },
68
+ { id: 'module:worker2', queue: 'queue-b', concurrency: 2, handler: async () => {} },
69
+ { id: 'module:worker3', queue: 'queue-a', concurrency: 3, handler: async () => {} },
70
+ ]
71
+
72
+ registerModuleWorkers(workers)
73
+
74
+ expect(getWorkers().length).toBe(3)
75
+ expect(getWorker('module:worker1')).toBeDefined()
76
+ expect(getWorker('module:worker2')).toBeDefined()
77
+ expect(getWorker('module:worker3')).toBeDefined()
78
+ })
79
+
80
+ it('should handle empty array', () => {
81
+ registerModuleWorkers([])
82
+ expect(getWorkers().length).toBe(0)
83
+ })
84
+ })
85
+
86
+ describe('getWorkers', () => {
87
+ it('should return empty array when no workers registered', () => {
88
+ expect(getWorkers()).toEqual([])
89
+ })
90
+
91
+ it('should return all registered workers', () => {
92
+ registerWorker({ id: 'w1', queue: 'q1', concurrency: 1, handler: async () => {} })
93
+ registerWorker({ id: 'w2', queue: 'q2', concurrency: 2, handler: async () => {} })
94
+
95
+ const workers = getWorkers()
96
+ expect(workers.length).toBe(2)
97
+ expect(workers.map(w => w.id).sort()).toEqual(['w1', 'w2'])
98
+ })
99
+ })
100
+
101
+ describe('getWorkersByQueue', () => {
102
+ it('should return workers for a specific queue', () => {
103
+ registerModuleWorkers([
104
+ { id: 'events:w1', queue: 'events', concurrency: 1, handler: async () => {} },
105
+ { id: 'search:w1', queue: 'fulltext-indexing', concurrency: 2, handler: async () => {} },
106
+ { id: 'events:w2', queue: 'events', concurrency: 1, handler: async () => {} },
107
+ ])
108
+
109
+ const eventsWorkers = getWorkersByQueue('events')
110
+ expect(eventsWorkers.length).toBe(2)
111
+ expect(eventsWorkers.every(w => w.queue === 'events')).toBe(true)
112
+
113
+ const searchWorkers = getWorkersByQueue('fulltext-indexing')
114
+ expect(searchWorkers.length).toBe(1)
115
+ expect(searchWorkers[0].id).toBe('search:w1')
116
+ })
117
+
118
+ it('should return empty array for unknown queue', () => {
119
+ registerWorker({ id: 'w1', queue: 'known-queue', concurrency: 1, handler: async () => {} })
120
+
121
+ const workers = getWorkersByQueue('unknown-queue')
122
+ expect(workers).toEqual([])
123
+ })
124
+ })
125
+
126
+ describe('getWorker', () => {
127
+ it('should return undefined for unknown worker id', () => {
128
+ expect(getWorker('unknown:worker')).toBeUndefined()
129
+ })
130
+
131
+ it('should return the worker for known id', () => {
132
+ const handler = async () => {}
133
+ registerWorker({ id: 'known:worker', queue: 'my-queue', concurrency: 5, handler })
134
+
135
+ const worker = getWorker('known:worker')
136
+ expect(worker).toBeDefined()
137
+ expect(worker?.queue).toBe('my-queue')
138
+ expect(worker?.concurrency).toBe(5)
139
+ expect(worker?.handler).toBe(handler)
140
+ })
141
+ })
142
+
143
+ describe('getRegisteredQueues', () => {
144
+ it('should return empty array when no workers registered', () => {
145
+ expect(getRegisteredQueues()).toEqual([])
146
+ })
147
+
148
+ it('should return unique queue names', () => {
149
+ registerModuleWorkers([
150
+ { id: 'w1', queue: 'events', concurrency: 1, handler: async () => {} },
151
+ { id: 'w2', queue: 'fulltext-indexing', concurrency: 1, handler: async () => {} },
152
+ { id: 'w3', queue: 'events', concurrency: 1, handler: async () => {} },
153
+ { id: 'w4', queue: 'vector-indexing', concurrency: 1, handler: async () => {} },
154
+ ])
155
+
156
+ const queues = getRegisteredQueues().sort()
157
+ expect(queues).toEqual(['events', 'fulltext-indexing', 'vector-indexing'])
158
+ })
159
+ })
160
+
161
+ describe('clearWorkers', () => {
162
+ it('should remove all registered workers', () => {
163
+ registerModuleWorkers([
164
+ { id: 'w1', queue: 'q1', concurrency: 1, handler: async () => {} },
165
+ { id: 'w2', queue: 'q2', concurrency: 1, handler: async () => {} },
166
+ ])
167
+
168
+ expect(getWorkers().length).toBe(2)
169
+
170
+ clearWorkers()
171
+
172
+ expect(getWorkers()).toEqual([])
173
+ expect(getRegisteredQueues()).toEqual([])
174
+ })
175
+ })
176
+ })