@mantiq/queue 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -0
- package/package.json +72 -0
- package/src/Job.ts +122 -0
- package/src/JobBatch.ts +188 -0
- package/src/JobChain.ts +119 -0
- package/src/JobRegistry.ts +37 -0
- package/src/PendingDispatch.ts +80 -0
- package/src/QueueManager.ts +76 -0
- package/src/QueueServiceProvider.ts +88 -0
- package/src/Worker.ts +282 -0
- package/src/commands/MakeJobCommand.ts +29 -0
- package/src/commands/QueueFailedCommand.ts +50 -0
- package/src/commands/QueueFlushCommand.ts +19 -0
- package/src/commands/QueueRetryCommand.ts +52 -0
- package/src/commands/QueueWorkCommand.ts +49 -0
- package/src/commands/ScheduleRunCommand.ts +58 -0
- package/src/contracts/JobContract.ts +64 -0
- package/src/contracts/QueueDriver.ts +70 -0
- package/src/drivers/KafkaDriver.ts +334 -0
- package/src/drivers/RedisDriver.ts +292 -0
- package/src/drivers/SQLiteDriver.ts +280 -0
- package/src/drivers/SqsDriver.ts +280 -0
- package/src/drivers/SyncDriver.ts +142 -0
- package/src/errors/QueueError.ts +22 -0
- package/src/events/QueueEvents.ts +36 -0
- package/src/helpers/queue.ts +76 -0
- package/src/index.ts +85 -0
- package/src/schedule/Schedule.ts +252 -0
- package/src/testing/QueueFake.ts +209 -0
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { Command } from '@mantiq/cli'
|
|
2
|
+
import type { ParsedArgs } from '@mantiq/cli'
|
|
3
|
+
import { Schedule } from '../schedule/Schedule.ts'
|
|
4
|
+
import { dispatch } from '../helpers/queue.ts'
|
|
5
|
+
import { Job } from '../Job.ts'
|
|
6
|
+
import type { Constructor } from '../contracts/JobContract.ts'
|
|
7
|
+
|
|
8
|
+
export class ScheduleRunCommand extends Command {
|
|
9
|
+
override name = 'schedule:run'
|
|
10
|
+
override description = 'Run due scheduled entries'
|
|
11
|
+
|
|
12
|
+
constructor(private readonly schedule: Schedule) {
|
|
13
|
+
super()
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
override async handle(_args: ParsedArgs): Promise<number> {
|
|
17
|
+
const due = this.schedule.dueEntries()
|
|
18
|
+
|
|
19
|
+
if (due.length === 0) {
|
|
20
|
+
this.io.info('No scheduled entries are due.')
|
|
21
|
+
return 0
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
for (const entry of due) {
|
|
25
|
+
const desc = entry.description || entry.expression
|
|
26
|
+
try {
|
|
27
|
+
switch (entry.type) {
|
|
28
|
+
case 'command':
|
|
29
|
+
this.io.info(`Running command: ${entry.value as string}`)
|
|
30
|
+
// Commands are executed by the CLI kernel — we just log here.
|
|
31
|
+
// In a full implementation, this would call the CLI kernel's dispatch.
|
|
32
|
+
break
|
|
33
|
+
|
|
34
|
+
case 'job': {
|
|
35
|
+
const JobClass = entry.value as Constructor<Job>
|
|
36
|
+
const job = Object.assign(new (JobClass as any)(), entry.jobData ?? {}) as Job
|
|
37
|
+
this.io.info(`Dispatching job: ${JobClass.name}`)
|
|
38
|
+
await dispatch(job)
|
|
39
|
+
break
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
case 'callback': {
|
|
43
|
+
this.io.info(`Running callback: ${desc}`)
|
|
44
|
+
const callback = entry.value as () => any
|
|
45
|
+
await callback()
|
|
46
|
+
break
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
} catch (error) {
|
|
50
|
+
const msg = error instanceof Error ? error.message : String(error)
|
|
51
|
+
this.io.error(`Failed to run scheduled entry [${desc}]: ${msg}`)
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
this.io.success(`Ran ${due.length} scheduled entry/entries.`)
|
|
56
|
+
return 0
|
|
57
|
+
}
|
|
58
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import type { Job } from '../Job.ts'
|
|
2
|
+
|
|
3
|
+
/** Serialized form of a Job, stored in the queue backend */
|
|
4
|
+
export interface SerializedPayload {
|
|
5
|
+
jobName: string
|
|
6
|
+
data: Record<string, any>
|
|
7
|
+
queue: string
|
|
8
|
+
connection: string | null
|
|
9
|
+
tries: number
|
|
10
|
+
backoff: string
|
|
11
|
+
timeout: number
|
|
12
|
+
delay: number
|
|
13
|
+
/** For job chains: remaining jobs to dispatch after this one */
|
|
14
|
+
chainedJobs?: SerializedPayload[]
|
|
15
|
+
/** For job chains: job to dispatch if a chained job fails */
|
|
16
|
+
chainCatchJob?: SerializedPayload
|
|
17
|
+
/** For batches: the batch this job belongs to */
|
|
18
|
+
batchId?: string
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/** A job as stored/retrieved by the queue driver */
|
|
22
|
+
export interface QueuedJob {
|
|
23
|
+
id: string | number
|
|
24
|
+
queue: string
|
|
25
|
+
payload: SerializedPayload
|
|
26
|
+
attempts: number
|
|
27
|
+
reservedAt: number | null
|
|
28
|
+
availableAt: number
|
|
29
|
+
createdAt: number
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/** A permanently failed job */
|
|
33
|
+
export interface FailedJob {
|
|
34
|
+
id: string | number
|
|
35
|
+
queue: string
|
|
36
|
+
payload: SerializedPayload
|
|
37
|
+
exception: string
|
|
38
|
+
failedAt: number
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/** Batch record stored by the driver */
|
|
42
|
+
export interface BatchRecord {
|
|
43
|
+
id: string
|
|
44
|
+
name: string
|
|
45
|
+
totalJobs: number
|
|
46
|
+
processedJobs: number
|
|
47
|
+
failedJobs: number
|
|
48
|
+
failedJobIds: string[]
|
|
49
|
+
options: BatchOptions
|
|
50
|
+
cancelledAt: number | null
|
|
51
|
+
createdAt: number
|
|
52
|
+
finishedAt: number | null
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export interface BatchOptions {
|
|
56
|
+
thenJob?: SerializedPayload | undefined
|
|
57
|
+
catchJob?: SerializedPayload | undefined
|
|
58
|
+
finallyJob?: SerializedPayload | undefined
|
|
59
|
+
allowFailures: boolean
|
|
60
|
+
queue: string
|
|
61
|
+
connection: string | null
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export type Constructor<T> = new (...args: any[]) => T
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
QueuedJob,
|
|
3
|
+
FailedJob,
|
|
4
|
+
SerializedPayload,
|
|
5
|
+
BatchRecord,
|
|
6
|
+
} from './JobContract.ts'
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Contract for queue storage backends.
|
|
10
|
+
* Each driver implements push/pop/delete/release for job lifecycle,
|
|
11
|
+
* plus failed-job management and batch support.
|
|
12
|
+
*/
|
|
13
|
+
export interface QueueDriver {
|
|
14
|
+
// ── Core job operations ──────────────────────────────────────────
|
|
15
|
+
|
|
16
|
+
/** Push a serialized job payload onto a queue */
|
|
17
|
+
push(payload: SerializedPayload, queue: string, delay?: number): Promise<string | number>
|
|
18
|
+
|
|
19
|
+
/** Pop the next available job from a queue (atomic claim) */
|
|
20
|
+
pop(queue: string): Promise<QueuedJob | null>
|
|
21
|
+
|
|
22
|
+
/** Delete a successfully processed job */
|
|
23
|
+
delete(job: QueuedJob): Promise<void>
|
|
24
|
+
|
|
25
|
+
/** Release a job back to the queue for retry */
|
|
26
|
+
release(job: QueuedJob, delay: number): Promise<void>
|
|
27
|
+
|
|
28
|
+
/** Get the number of pending jobs on a queue */
|
|
29
|
+
size(queue: string): Promise<number>
|
|
30
|
+
|
|
31
|
+
/** Remove all jobs from a queue */
|
|
32
|
+
clear(queue: string): Promise<void>
|
|
33
|
+
|
|
34
|
+
// ── Failed jobs ──────────────────────────────────────────────────
|
|
35
|
+
|
|
36
|
+
/** Move a job to the failed jobs table */
|
|
37
|
+
fail(job: QueuedJob, error: Error): Promise<void>
|
|
38
|
+
|
|
39
|
+
/** List all failed jobs */
|
|
40
|
+
getFailedJobs(): Promise<FailedJob[]>
|
|
41
|
+
|
|
42
|
+
/** Find a single failed job by ID */
|
|
43
|
+
findFailedJob(id: string | number): Promise<FailedJob | null>
|
|
44
|
+
|
|
45
|
+
/** Delete a failed job, returns true if found */
|
|
46
|
+
forgetFailedJob(id: string | number): Promise<boolean>
|
|
47
|
+
|
|
48
|
+
/** Delete all failed jobs */
|
|
49
|
+
flushFailedJobs(): Promise<void>
|
|
50
|
+
|
|
51
|
+
// ── Batch support ────────────────────────────────────────────────
|
|
52
|
+
|
|
53
|
+
/** Create a new batch record */
|
|
54
|
+
createBatch(batch: BatchRecord): Promise<string>
|
|
55
|
+
|
|
56
|
+
/** Find a batch by ID */
|
|
57
|
+
findBatch(id: string): Promise<BatchRecord | null>
|
|
58
|
+
|
|
59
|
+
/** Atomically increment processed/failed counters, returns updated record */
|
|
60
|
+
updateBatchProgress(id: string, processed: number, failed: number): Promise<BatchRecord | null>
|
|
61
|
+
|
|
62
|
+
/** Mark a batch as finished */
|
|
63
|
+
markBatchFinished(id: string): Promise<void>
|
|
64
|
+
|
|
65
|
+
/** Cancel a batch */
|
|
66
|
+
cancelBatch(id: string): Promise<void>
|
|
67
|
+
|
|
68
|
+
/** Delete batches older than a given age */
|
|
69
|
+
pruneBatches(olderThanSeconds: number): Promise<void>
|
|
70
|
+
}
|
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
import type { QueueDriver } from '../contracts/QueueDriver.ts'
|
|
2
|
+
import type {
|
|
3
|
+
QueuedJob,
|
|
4
|
+
FailedJob,
|
|
5
|
+
SerializedPayload,
|
|
6
|
+
BatchRecord,
|
|
7
|
+
} from '../contracts/JobContract.ts'
|
|
8
|
+
|
|
9
|
+
export interface KafkaQueueConfig {
|
|
10
|
+
driver: 'kafka'
|
|
11
|
+
/** Kafka broker addresses. Default: ['localhost:9092'] */
|
|
12
|
+
brokers?: string[] | undefined
|
|
13
|
+
/** Client ID. Default: 'mantiq-queue' */
|
|
14
|
+
clientId?: string | undefined
|
|
15
|
+
/** Consumer group ID. Default: 'mantiq-workers' */
|
|
16
|
+
groupId?: string | undefined
|
|
17
|
+
/** Topic prefix for queue names. Default: 'mantiq.' */
|
|
18
|
+
topicPrefix?: string | undefined
|
|
19
|
+
/** SASL authentication */
|
|
20
|
+
sasl?: {
|
|
21
|
+
mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512'
|
|
22
|
+
username: string
|
|
23
|
+
password: string
|
|
24
|
+
} | undefined
|
|
25
|
+
/** Enable SSL */
|
|
26
|
+
ssl?: boolean | undefined
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Kafka queue driver using kafkajs.
|
|
31
|
+
*
|
|
32
|
+
* Maps queue names to Kafka topics (`{prefix}{queue}`).
|
|
33
|
+
* Uses consumer groups for atomic job claiming.
|
|
34
|
+
*
|
|
35
|
+
* Design:
|
|
36
|
+
* - push → producer.send() to topic
|
|
37
|
+
* - pop → consumer.run() with eachMessage, buffered for single-message retrieval
|
|
38
|
+
* - delete → commit offset (auto on successful processing)
|
|
39
|
+
* - release → seek back / re-produce to topic
|
|
40
|
+
*
|
|
41
|
+
* Requires `kafkajs`:
|
|
42
|
+
* bun add kafkajs
|
|
43
|
+
*/
|
|
44
|
+
export class KafkaDriver implements QueueDriver {
|
|
45
|
+
private kafka: any
|
|
46
|
+
private producer: any | null = null
|
|
47
|
+
private consumer: any | null = null
|
|
48
|
+
private readonly topicPrefix: string
|
|
49
|
+
private readonly groupId: string
|
|
50
|
+
|
|
51
|
+
/** Buffer for consumed messages awaiting pop() */
|
|
52
|
+
private messageBuffer: Array<{
|
|
53
|
+
queuedJob: QueuedJob
|
|
54
|
+
topic: string
|
|
55
|
+
partition: number
|
|
56
|
+
offset: string
|
|
57
|
+
}> = []
|
|
58
|
+
|
|
59
|
+
/** Pending resolve for when pop() is waiting */
|
|
60
|
+
private popResolver: ((value: QueuedJob | null) => void) | null = null
|
|
61
|
+
private consumerRunning = false
|
|
62
|
+
private subscribedTopics = new Set<string>()
|
|
63
|
+
|
|
64
|
+
/** In-memory tracking for failed jobs and batches */
|
|
65
|
+
private failedJobs: FailedJob[] = []
|
|
66
|
+
private nextFailedId = 1
|
|
67
|
+
private batches = new Map<string, BatchRecord>()
|
|
68
|
+
|
|
69
|
+
constructor(config: KafkaQueueConfig) {
|
|
70
|
+
this.topicPrefix = config.topicPrefix ?? 'mantiq.'
|
|
71
|
+
this.groupId = config.groupId ?? 'mantiq-workers'
|
|
72
|
+
|
|
73
|
+
try {
|
|
74
|
+
const { Kafka } = require('kafkajs')
|
|
75
|
+
const kafkaConfig: any = {
|
|
76
|
+
clientId: config.clientId ?? 'mantiq-queue',
|
|
77
|
+
brokers: config.brokers ?? ['localhost:9092'],
|
|
78
|
+
}
|
|
79
|
+
if (config.sasl) kafkaConfig.sasl = config.sasl
|
|
80
|
+
if (config.ssl) kafkaConfig.ssl = config.ssl
|
|
81
|
+
|
|
82
|
+
this.kafka = new Kafka(kafkaConfig)
|
|
83
|
+
} catch {
|
|
84
|
+
throw new Error(
|
|
85
|
+
'kafkajs is required for the Kafka queue driver. Install it with: bun add kafkajs',
|
|
86
|
+
)
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// ── Core job operations ──────────────────────────────────────────
|
|
91
|
+
|
|
92
|
+
async push(payload: SerializedPayload, queue: string, delay = 0): Promise<string | number> {
|
|
93
|
+
const producer = await this.getProducer()
|
|
94
|
+
const id = crypto.randomUUID()
|
|
95
|
+
const now = Math.floor(Date.now() / 1000)
|
|
96
|
+
|
|
97
|
+
const job: QueuedJob = {
|
|
98
|
+
id,
|
|
99
|
+
queue,
|
|
100
|
+
payload,
|
|
101
|
+
attempts: 0,
|
|
102
|
+
reservedAt: null,
|
|
103
|
+
availableAt: now + delay,
|
|
104
|
+
createdAt: now,
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const topic = this.topicName(queue)
|
|
108
|
+
|
|
109
|
+
await producer.send({
|
|
110
|
+
topic,
|
|
111
|
+
messages: [{
|
|
112
|
+
key: id,
|
|
113
|
+
value: JSON.stringify(job),
|
|
114
|
+
headers: {
|
|
115
|
+
'mantiq-delay': String(delay),
|
|
116
|
+
'mantiq-available-at': String(now + delay),
|
|
117
|
+
},
|
|
118
|
+
}],
|
|
119
|
+
})
|
|
120
|
+
|
|
121
|
+
return id
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
async pop(queue: string): Promise<QueuedJob | null> {
|
|
125
|
+
const topic = this.topicName(queue)
|
|
126
|
+
|
|
127
|
+
// Check buffer first
|
|
128
|
+
const now = Math.floor(Date.now() / 1000)
|
|
129
|
+
const buffIdx = this.messageBuffer.findIndex(
|
|
130
|
+
(m) => m.topic === topic && m.queuedJob.availableAt <= now,
|
|
131
|
+
)
|
|
132
|
+
if (buffIdx !== -1) {
|
|
133
|
+
const entry = this.messageBuffer.splice(buffIdx, 1)[0]!
|
|
134
|
+
entry.queuedJob.reservedAt = now
|
|
135
|
+
entry.queuedJob.attempts++
|
|
136
|
+
return entry.queuedJob
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// Ensure consumer is subscribed and running
|
|
140
|
+
await this.ensureConsumer(topic)
|
|
141
|
+
|
|
142
|
+
// Wait briefly for a message
|
|
143
|
+
return new Promise<QueuedJob | null>((resolve) => {
|
|
144
|
+
this.popResolver = resolve
|
|
145
|
+
|
|
146
|
+
// Timeout after 5 seconds if no message
|
|
147
|
+
setTimeout(() => {
|
|
148
|
+
if (this.popResolver === resolve) {
|
|
149
|
+
this.popResolver = null
|
|
150
|
+
resolve(null)
|
|
151
|
+
}
|
|
152
|
+
}, 5000)
|
|
153
|
+
})
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
async delete(_job: QueuedJob): Promise<void> {
|
|
157
|
+
// In Kafka, messages are "deleted" by committing the offset.
|
|
158
|
+
// This happens automatically when the consumer processes messages.
|
|
159
|
+
// No explicit delete needed — Kafka retains messages based on retention policy.
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
async release(job: QueuedJob, delay: number): Promise<void> {
|
|
163
|
+
// Re-produce the message to the topic
|
|
164
|
+
job.reservedAt = null
|
|
165
|
+
job.availableAt = Math.floor(Date.now() / 1000) + delay
|
|
166
|
+
|
|
167
|
+
const producer = await this.getProducer()
|
|
168
|
+
const topic = this.topicName(job.queue)
|
|
169
|
+
|
|
170
|
+
await producer.send({
|
|
171
|
+
topic,
|
|
172
|
+
messages: [{
|
|
173
|
+
key: String(job.id),
|
|
174
|
+
value: JSON.stringify(job),
|
|
175
|
+
headers: {
|
|
176
|
+
'mantiq-delay': String(delay),
|
|
177
|
+
'mantiq-available-at': String(job.availableAt),
|
|
178
|
+
'mantiq-retry': 'true',
|
|
179
|
+
},
|
|
180
|
+
}],
|
|
181
|
+
})
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
async size(_queue: string): Promise<number> {
|
|
185
|
+
// Kafka doesn't have a simple "queue size" concept.
|
|
186
|
+
// Would need to compute lag = latest offset - committed offset per partition.
|
|
187
|
+
// For now, return the buffer size as a rough approximation.
|
|
188
|
+
return this.messageBuffer.length
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
async clear(_queue: string): Promise<void> {
|
|
192
|
+
// Kafka topics can't be "cleared" like a traditional queue.
|
|
193
|
+
// You'd need to delete and recreate the topic, or wait for retention.
|
|
194
|
+
this.messageBuffer = []
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// ── Failed jobs (in-memory) ──────────────────────────────────────
|
|
198
|
+
|
|
199
|
+
async fail(job: QueuedJob, error: Error): Promise<void> {
|
|
200
|
+
this.failedJobs.push({
|
|
201
|
+
id: this.nextFailedId++,
|
|
202
|
+
queue: job.queue,
|
|
203
|
+
payload: job.payload,
|
|
204
|
+
exception: `${error.name}: ${error.message}\n${error.stack ?? ''}`,
|
|
205
|
+
failedAt: Math.floor(Date.now() / 1000),
|
|
206
|
+
})
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
async getFailedJobs(): Promise<FailedJob[]> {
|
|
210
|
+
return [...this.failedJobs]
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
async findFailedJob(id: string | number): Promise<FailedJob | null> {
|
|
214
|
+
return this.failedJobs.find((j) => j.id === id) ?? null
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
async forgetFailedJob(id: string | number): Promise<boolean> {
|
|
218
|
+
const idx = this.failedJobs.findIndex((j) => j.id === id)
|
|
219
|
+
if (idx === -1) return false
|
|
220
|
+
this.failedJobs.splice(idx, 1)
|
|
221
|
+
return true
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
async flushFailedJobs(): Promise<void> {
|
|
225
|
+
this.failedJobs = []
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// ── Batch support (in-memory) ────────────────────────────────────
|
|
229
|
+
|
|
230
|
+
async createBatch(batch: BatchRecord): Promise<string> {
|
|
231
|
+
this.batches.set(batch.id, { ...batch })
|
|
232
|
+
return batch.id
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
async findBatch(id: string): Promise<BatchRecord | null> {
|
|
236
|
+
const b = this.batches.get(id)
|
|
237
|
+
return b ? { ...b, failedJobIds: [...b.failedJobIds], options: { ...b.options } } : null
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
async updateBatchProgress(id: string, processed: number, failed: number): Promise<BatchRecord | null> {
|
|
241
|
+
const b = this.batches.get(id)
|
|
242
|
+
if (!b) return null
|
|
243
|
+
b.processedJobs += processed
|
|
244
|
+
b.failedJobs += failed
|
|
245
|
+
return { ...b, failedJobIds: [...b.failedJobIds], options: { ...b.options } }
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
async markBatchFinished(id: string): Promise<void> {
|
|
249
|
+
const b = this.batches.get(id)
|
|
250
|
+
if (b) b.finishedAt = Math.floor(Date.now() / 1000)
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
async cancelBatch(id: string): Promise<void> {
|
|
254
|
+
const b = this.batches.get(id)
|
|
255
|
+
if (b) b.cancelledAt = Math.floor(Date.now() / 1000)
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
async pruneBatches(olderThanSeconds: number): Promise<void> {
|
|
259
|
+
const cutoff = Math.floor(Date.now() / 1000) - olderThanSeconds
|
|
260
|
+
for (const [id, b] of this.batches) {
|
|
261
|
+
if (b.createdAt < cutoff) this.batches.delete(id)
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ── Helpers ──────────────────────────────────────────────────────
|
|
266
|
+
|
|
267
|
+
private async getProducer(): Promise<any> {
|
|
268
|
+
if (!this.producer) {
|
|
269
|
+
this.producer = this.kafka.producer()
|
|
270
|
+
await this.producer.connect()
|
|
271
|
+
}
|
|
272
|
+
return this.producer
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
private async ensureConsumer(topic: string): Promise<void> {
|
|
276
|
+
if (!this.consumer) {
|
|
277
|
+
this.consumer = this.kafka.consumer({ groupId: this.groupId })
|
|
278
|
+
await this.consumer.connect()
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
if (!this.subscribedTopics.has(topic)) {
|
|
282
|
+
await this.consumer.subscribe({ topic, fromBeginning: false })
|
|
283
|
+
this.subscribedTopics.add(topic)
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
if (!this.consumerRunning) {
|
|
287
|
+
this.consumerRunning = true
|
|
288
|
+
await this.consumer.run({
|
|
289
|
+
autoCommit: true,
|
|
290
|
+
eachMessage: async ({ topic: msgTopic, partition, message }: any) => {
|
|
291
|
+
const job: QueuedJob = JSON.parse(message.value.toString())
|
|
292
|
+
const now = Math.floor(Date.now() / 1000)
|
|
293
|
+
|
|
294
|
+
const entry = {
|
|
295
|
+
queuedJob: job,
|
|
296
|
+
topic: msgTopic,
|
|
297
|
+
partition,
|
|
298
|
+
offset: message.offset,
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// If there's a waiting pop(), resolve it directly
|
|
302
|
+
if (this.popResolver && job.availableAt <= now) {
|
|
303
|
+
job.reservedAt = now
|
|
304
|
+
job.attempts++
|
|
305
|
+
const resolver = this.popResolver
|
|
306
|
+
this.popResolver = null
|
|
307
|
+
resolver(job)
|
|
308
|
+
} else {
|
|
309
|
+
// Buffer for later pop()
|
|
310
|
+
this.messageBuffer.push(entry)
|
|
311
|
+
}
|
|
312
|
+
},
|
|
313
|
+
})
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
private topicName(queue: string): string {
|
|
318
|
+
return `${this.topicPrefix}${queue}`
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
/** Disconnect producer and consumer */
|
|
322
|
+
async disconnect(): Promise<void> {
|
|
323
|
+
if (this.producer) {
|
|
324
|
+
await this.producer.disconnect()
|
|
325
|
+
this.producer = null
|
|
326
|
+
}
|
|
327
|
+
if (this.consumer) {
|
|
328
|
+
await this.consumer.disconnect()
|
|
329
|
+
this.consumer = null
|
|
330
|
+
this.consumerRunning = false
|
|
331
|
+
this.subscribedTopics.clear()
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
}
|