@amqp-contract/worker 0.21.0 → 0.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +371 -274
- package/dist/index.d.cts +99 -134
- package/dist/index.d.cts.map +1 -1
- package/dist/index.d.mts +99 -134
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +371 -274
- package/dist/index.mjs.map +1 -1
- package/docs/index.md +50 -86
- package/package.json +5 -5
package/dist/index.mjs
CHANGED
|
@@ -196,6 +196,231 @@ function nonRetryable(message, cause) {
|
|
|
196
196
|
return new NonRetryableError(message, cause);
|
|
197
197
|
}
|
|
198
198
|
//#endregion
|
|
199
|
+
//#region src/retry.ts
|
|
200
|
+
/**
|
|
201
|
+
* Handle error in message processing with retry logic.
|
|
202
|
+
*
|
|
203
|
+
* Flow depends on retry mode:
|
|
204
|
+
*
|
|
205
|
+
* **immediate-requeue mode:**
|
|
206
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
207
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
208
|
+
* 3. Otherwise -> requeue immediately for retry
|
|
209
|
+
*
|
|
210
|
+
* **ttl-backoff mode:**
|
|
211
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
212
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
213
|
+
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
214
|
+
*
|
|
215
|
+
* **none mode (no retry config):**
|
|
216
|
+
* 1. send directly to DLQ (no retry)
|
|
217
|
+
*/
|
|
218
|
+
function handleError(ctx, error, msg, consumerName, consumer) {
|
|
219
|
+
if (error instanceof NonRetryableError) {
|
|
220
|
+
ctx.logger?.error("Non-retryable error, sending to DLQ immediately", {
|
|
221
|
+
consumerName,
|
|
222
|
+
errorType: error.name,
|
|
223
|
+
error: error.message
|
|
224
|
+
});
|
|
225
|
+
sendToDLQ(ctx, msg, consumer);
|
|
226
|
+
return Future.value(Result.Ok(void 0));
|
|
227
|
+
}
|
|
228
|
+
const config = extractQueue(consumer.queue).retry;
|
|
229
|
+
if (config.mode === "immediate-requeue") return handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config);
|
|
230
|
+
if (config.mode === "ttl-backoff") return handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config);
|
|
231
|
+
ctx.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
232
|
+
consumerName,
|
|
233
|
+
error: error.message
|
|
234
|
+
});
|
|
235
|
+
sendToDLQ(ctx, msg, consumer);
|
|
236
|
+
return Future.value(Result.Ok(void 0));
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Handle error by requeuing immediately.
|
|
240
|
+
*
|
|
241
|
+
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
242
|
+
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
243
|
+
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
244
|
+
*
|
|
245
|
+
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
246
|
+
*/
|
|
247
|
+
function handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config) {
|
|
248
|
+
const queue = extractQueue(consumer.queue);
|
|
249
|
+
const queueName = queue.name;
|
|
250
|
+
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
251
|
+
if (retryCount >= config.maxRetries) {
|
|
252
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
253
|
+
consumerName,
|
|
254
|
+
queueName,
|
|
255
|
+
retryCount,
|
|
256
|
+
maxRetries: config.maxRetries,
|
|
257
|
+
error: error.message
|
|
258
|
+
});
|
|
259
|
+
sendToDLQ(ctx, msg, consumer);
|
|
260
|
+
return Future.value(Result.Ok(void 0));
|
|
261
|
+
}
|
|
262
|
+
ctx.logger?.warn("Retrying message (immediate-requeue mode)", {
|
|
263
|
+
consumerName,
|
|
264
|
+
queueName,
|
|
265
|
+
retryCount,
|
|
266
|
+
maxRetries: config.maxRetries,
|
|
267
|
+
error: error.message
|
|
268
|
+
});
|
|
269
|
+
if (queue.type === "quorum") {
|
|
270
|
+
ctx.amqpClient.nack(msg, false, true);
|
|
271
|
+
return Future.value(Result.Ok(void 0));
|
|
272
|
+
} else return publishForRetry(ctx, {
|
|
273
|
+
msg,
|
|
274
|
+
exchange: msg.fields.exchange,
|
|
275
|
+
routingKey: msg.fields.routingKey,
|
|
276
|
+
queueName,
|
|
277
|
+
error
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Handle error using TTL + wait queue pattern for exponential backoff.
|
|
282
|
+
*
|
|
283
|
+
* ┌─────────────────────────────────────────────────────────────────┐
|
|
284
|
+
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
285
|
+
* ├─────────────────────────────────────────────────────────────────┤
|
|
286
|
+
* │ │
|
|
287
|
+
* │ 1. Handler throws any Error │
|
|
288
|
+
* │ ↓ │
|
|
289
|
+
* │ 2. Worker publishes to wait exchange |
|
|
290
|
+
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
291
|
+
* │ ↓ │
|
|
292
|
+
* │ 3. Wait exchange routes to wait queue │
|
|
293
|
+
* │ (with expiration: calculated backoff delay) │
|
|
294
|
+
* │ ↓ │
|
|
295
|
+
* │ 4. Message waits in queue until TTL expires │
|
|
296
|
+
* │ ↓ │
|
|
297
|
+
* │ 5. Expired message dead-lettered to retry exchange |
|
|
298
|
+
* | (with header `x-retry-queue` set to the main queue name) │
|
|
299
|
+
* │ ↓ │
|
|
300
|
+
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
301
|
+
* │ ↓ │
|
|
302
|
+
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
303
|
+
* │ │
|
|
304
|
+
* └─────────────────────────────────────────────────────────────────┘
|
|
305
|
+
*/
|
|
306
|
+
function handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config) {
|
|
307
|
+
if (!isQueueWithTtlBackoffInfrastructure(consumer.queue)) {
|
|
308
|
+
ctx.logger?.error("Queue does not have TTL-backoff infrastructure", {
|
|
309
|
+
consumerName,
|
|
310
|
+
queueName: consumer.queue.name
|
|
311
|
+
});
|
|
312
|
+
return Future.value(Result.Error(new TechnicalError("Queue does not have TTL-backoff infrastructure")));
|
|
313
|
+
}
|
|
314
|
+
const queueEntry = consumer.queue;
|
|
315
|
+
const queueName = extractQueue(queueEntry).name;
|
|
316
|
+
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
317
|
+
if (retryCount >= config.maxRetries) {
|
|
318
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
319
|
+
consumerName,
|
|
320
|
+
queueName,
|
|
321
|
+
retryCount,
|
|
322
|
+
maxRetries: config.maxRetries,
|
|
323
|
+
error: error.message
|
|
324
|
+
});
|
|
325
|
+
sendToDLQ(ctx, msg, consumer);
|
|
326
|
+
return Future.value(Result.Ok(void 0));
|
|
327
|
+
}
|
|
328
|
+
const delayMs = calculateRetryDelay(retryCount, config);
|
|
329
|
+
ctx.logger?.warn("Retrying message (ttl-backoff mode)", {
|
|
330
|
+
consumerName,
|
|
331
|
+
queueName,
|
|
332
|
+
retryCount: retryCount + 1,
|
|
333
|
+
maxRetries: config.maxRetries,
|
|
334
|
+
delayMs,
|
|
335
|
+
error: error.message
|
|
336
|
+
});
|
|
337
|
+
return publishForRetry(ctx, {
|
|
338
|
+
msg,
|
|
339
|
+
exchange: queueEntry.waitExchange.name,
|
|
340
|
+
routingKey: msg.fields.routingKey,
|
|
341
|
+
waitQueueName: queueEntry.waitQueue.name,
|
|
342
|
+
queueName,
|
|
343
|
+
delayMs,
|
|
344
|
+
error
|
|
345
|
+
});
|
|
346
|
+
}
|
|
347
|
+
/**
|
|
348
|
+
* Calculate retry delay with exponential backoff and optional jitter.
|
|
349
|
+
*/
|
|
350
|
+
function calculateRetryDelay(retryCount, config) {
|
|
351
|
+
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
352
|
+
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
353
|
+
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
354
|
+
return Math.floor(delay);
|
|
355
|
+
}
|
|
356
|
+
/**
|
|
357
|
+
* Parse message content for republishing.
|
|
358
|
+
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
359
|
+
*/
|
|
360
|
+
function parseMessageContentForRetry(ctx, msg, queueName) {
|
|
361
|
+
let content = msg.content;
|
|
362
|
+
if (!msg.properties.contentEncoding) try {
|
|
363
|
+
content = JSON.parse(msg.content.toString());
|
|
364
|
+
} catch (err) {
|
|
365
|
+
ctx.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
366
|
+
queueName,
|
|
367
|
+
error: err
|
|
368
|
+
});
|
|
369
|
+
}
|
|
370
|
+
return content;
|
|
371
|
+
}
|
|
372
|
+
/**
|
|
373
|
+
* Publish message with an incremented x-retry-count header and optional TTL.
|
|
374
|
+
*/
|
|
375
|
+
function publishForRetry(ctx, { msg, exchange, routingKey, queueName, waitQueueName, delayMs, error }) {
|
|
376
|
+
const newRetryCount = (msg.properties.headers?.["x-retry-count"] ?? 0) + 1;
|
|
377
|
+
ctx.amqpClient.ack(msg);
|
|
378
|
+
const content = parseMessageContentForRetry(ctx, msg, queueName);
|
|
379
|
+
return ctx.amqpClient.publish(exchange, routingKey, content, {
|
|
380
|
+
...msg.properties,
|
|
381
|
+
...delayMs !== void 0 ? { expiration: delayMs.toString() } : {},
|
|
382
|
+
headers: {
|
|
383
|
+
...msg.properties.headers,
|
|
384
|
+
"x-retry-count": newRetryCount,
|
|
385
|
+
"x-last-error": error.message,
|
|
386
|
+
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
387
|
+
...waitQueueName !== void 0 ? {
|
|
388
|
+
"x-wait-queue": waitQueueName,
|
|
389
|
+
"x-retry-queue": queueName
|
|
390
|
+
} : {}
|
|
391
|
+
}
|
|
392
|
+
}).mapOkToResult((published) => {
|
|
393
|
+
if (!published) {
|
|
394
|
+
ctx.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
395
|
+
queueName,
|
|
396
|
+
retryCount: newRetryCount,
|
|
397
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
398
|
+
});
|
|
399
|
+
return Result.Error(new TechnicalError("Failed to publish message for retry (write buffer full)"));
|
|
400
|
+
}
|
|
401
|
+
ctx.logger?.info("Message published for retry", {
|
|
402
|
+
queueName,
|
|
403
|
+
retryCount: newRetryCount,
|
|
404
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
405
|
+
});
|
|
406
|
+
return Result.Ok(void 0);
|
|
407
|
+
});
|
|
408
|
+
}
|
|
409
|
+
/**
|
|
410
|
+
* Send message to dead letter queue.
|
|
411
|
+
* Nacks the message without requeue, relying on DLX configuration.
|
|
412
|
+
*/
|
|
413
|
+
function sendToDLQ(ctx, msg, consumer) {
|
|
414
|
+
const queue = extractQueue(consumer.queue);
|
|
415
|
+
const queueName = queue.name;
|
|
416
|
+
if (!(queue.deadLetter !== void 0)) ctx.logger?.warn("Queue does not have DLX configured - message will be lost on nack", { queueName });
|
|
417
|
+
ctx.logger?.info("Sending message to DLQ", {
|
|
418
|
+
queueName,
|
|
419
|
+
deliveryTag: msg.fields.deliveryTag
|
|
420
|
+
});
|
|
421
|
+
ctx.amqpClient.nack(msg, false, false);
|
|
422
|
+
}
|
|
423
|
+
//#endregion
|
|
199
424
|
//#region src/worker.ts
|
|
200
425
|
/**
|
|
201
426
|
* Type guard to check if a handler entry is a tuple format [handler, options].
|
|
@@ -246,7 +471,10 @@ function isHandlerTuple(entry) {
|
|
|
246
471
|
*/
|
|
247
472
|
var TypedAmqpWorker = class TypedAmqpWorker {
|
|
248
473
|
/**
|
|
249
|
-
* Internal handler storage
|
|
474
|
+
* Internal handler storage. Keyed by handler name (consumer or RPC); the
|
|
475
|
+
* stored function signature is widened so the dispatch loop can call it
|
|
476
|
+
* uniformly. The actual handler is type-checked at the worker's public API
|
|
477
|
+
* boundary via `WorkerInferHandlers<TContract>`.
|
|
250
478
|
*/
|
|
251
479
|
actualHandlers;
|
|
252
480
|
consumerOptions;
|
|
@@ -261,23 +489,49 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
261
489
|
this.actualHandlers = {};
|
|
262
490
|
this.consumerOptions = {};
|
|
263
491
|
const handlersRecord = handlers;
|
|
264
|
-
for (const
|
|
265
|
-
const handlerEntry = handlersRecord[
|
|
266
|
-
const
|
|
492
|
+
for (const handlerName of Object.keys(handlersRecord)) {
|
|
493
|
+
const handlerEntry = handlersRecord[handlerName];
|
|
494
|
+
const typedName = handlerName;
|
|
267
495
|
if (isHandlerTuple(handlerEntry)) {
|
|
268
496
|
const [handler, options] = handlerEntry;
|
|
269
|
-
this.actualHandlers[
|
|
270
|
-
this.consumerOptions[
|
|
497
|
+
this.actualHandlers[typedName] = handler;
|
|
498
|
+
this.consumerOptions[typedName] = {
|
|
271
499
|
...this.defaultConsumerOptions,
|
|
272
500
|
...options
|
|
273
501
|
};
|
|
274
502
|
} else {
|
|
275
|
-
this.actualHandlers[
|
|
276
|
-
this.consumerOptions[
|
|
503
|
+
this.actualHandlers[typedName] = handlerEntry;
|
|
504
|
+
this.consumerOptions[typedName] = this.defaultConsumerOptions;
|
|
277
505
|
}
|
|
278
506
|
}
|
|
279
507
|
}
|
|
280
508
|
/**
|
|
509
|
+
* Build a `ConsumerDefinition`-shaped view for a handler name, regardless
|
|
510
|
+
* of whether it came from `contract.consumers` or `contract.rpcs`. The
|
|
511
|
+
* dispatch path treats both uniformly; the returned `isRpc` flag (and the
|
|
512
|
+
* accompanying `responseSchema`) tells `processMessage` whether to validate
|
|
513
|
+
* the handler return value and publish a reply.
|
|
514
|
+
*/
|
|
515
|
+
resolveConsumerView(name) {
|
|
516
|
+
const rpcs = this.contract.rpcs;
|
|
517
|
+
if (rpcs && Object.hasOwn(rpcs, name)) {
|
|
518
|
+
const rpc = rpcs[name];
|
|
519
|
+
return {
|
|
520
|
+
consumer: {
|
|
521
|
+
queue: rpc.queue,
|
|
522
|
+
message: rpc.request
|
|
523
|
+
},
|
|
524
|
+
isRpc: true,
|
|
525
|
+
responseSchema: rpc.response.payload
|
|
526
|
+
};
|
|
527
|
+
}
|
|
528
|
+
const consumerEntry = this.contract.consumers[name];
|
|
529
|
+
return {
|
|
530
|
+
consumer: extractConsumer(consumerEntry),
|
|
531
|
+
isRpc: false
|
|
532
|
+
};
|
|
533
|
+
}
|
|
534
|
+
/**
|
|
281
535
|
* Create a type-safe AMQP worker from a contract.
|
|
282
536
|
*
|
|
283
537
|
* Connection management (including automatic reconnection) is handled internally
|
|
@@ -302,12 +556,18 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
302
556
|
* }).resultToPromise();
|
|
303
557
|
* ```
|
|
304
558
|
*/
|
|
305
|
-
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry }) {
|
|
559
|
+
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry, connectTimeoutMs }) {
|
|
306
560
|
const worker = new TypedAmqpWorker(contract, new AmqpClient(contract, {
|
|
307
561
|
urls,
|
|
308
|
-
connectionOptions
|
|
562
|
+
connectionOptions,
|
|
563
|
+
connectTimeoutMs
|
|
309
564
|
}), handlers, defaultConsumerOptions ?? {}, logger, telemetry);
|
|
310
|
-
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).
|
|
565
|
+
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).flatMap((result) => result.match({
|
|
566
|
+
Ok: () => Future.value(Result.Ok(worker)),
|
|
567
|
+
Error: (error) => worker.close().tapError((closeError) => {
|
|
568
|
+
logger?.warn("Failed to close worker after setup failure", { error: closeError });
|
|
569
|
+
}).map(() => Result.Error(error))
|
|
570
|
+
}));
|
|
311
571
|
}
|
|
312
572
|
/**
|
|
313
573
|
* Close the AMQP channel and connection.
|
|
@@ -337,33 +597,25 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
337
597
|
}).flatMapOk(() => this.amqpClient.close()).mapOk(() => void 0);
|
|
338
598
|
}
|
|
339
599
|
/**
|
|
340
|
-
*
|
|
341
|
-
* Defaults are applied in the contract's defineQueue, so we just return the config.
|
|
342
|
-
*/
|
|
343
|
-
getRetryConfigForConsumer(consumer) {
|
|
344
|
-
return extractQueue(consumer.queue).retry;
|
|
345
|
-
}
|
|
346
|
-
/**
|
|
347
|
-
* Start consuming messages for all consumers.
|
|
348
|
-
* TypeScript guarantees consumers exist (handlers require matching consumers).
|
|
600
|
+
* Start consuming for every entry in `contract.consumers` and `contract.rpcs`.
|
|
349
601
|
*/
|
|
350
602
|
consumeAll() {
|
|
351
|
-
const
|
|
352
|
-
const
|
|
353
|
-
|
|
603
|
+
const consumerNames = Object.keys(this.contract.consumers ?? {});
|
|
604
|
+
const rpcNames = Object.keys(this.contract.rpcs ?? {});
|
|
605
|
+
const allNames = [...consumerNames, ...rpcNames];
|
|
606
|
+
return Future.all(allNames.map((name) => this.consume(name))).map(Result.all).mapOk(() => void 0);
|
|
354
607
|
}
|
|
355
608
|
waitForConnectionReady() {
|
|
356
609
|
return this.amqpClient.waitForConnect();
|
|
357
610
|
}
|
|
358
611
|
/**
|
|
359
|
-
* Start consuming messages for a specific
|
|
360
|
-
*
|
|
612
|
+
* Start consuming messages for a specific handler — either a `consumers`
|
|
613
|
+
* entry (regular event/command consumer) or an `rpcs` entry (RPC server).
|
|
361
614
|
*/
|
|
362
|
-
consume(
|
|
363
|
-
const
|
|
364
|
-
const
|
|
365
|
-
|
|
366
|
-
return this.consumeSingle(consumerName, consumer, handler);
|
|
615
|
+
consume(name) {
|
|
616
|
+
const view = this.resolveConsumerView(name);
|
|
617
|
+
const handler = this.actualHandlers[name];
|
|
618
|
+
return this.consumeSingle(name, view, handler);
|
|
367
619
|
}
|
|
368
620
|
/**
|
|
369
621
|
* Validate data against a Standard Schema and handle errors.
|
|
@@ -421,272 +673,117 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
421
673
|
}).map(Result.allFromDict);
|
|
422
674
|
}
|
|
423
675
|
/**
|
|
424
|
-
*
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
return this.amqpClient.consume(queueName, async (msg) => {
|
|
429
|
-
if (msg === null) {
|
|
430
|
-
this.logger?.warn("Consumer cancelled by server", {
|
|
431
|
-
consumerName: String(consumerName),
|
|
432
|
-
queueName
|
|
433
|
-
});
|
|
434
|
-
return;
|
|
435
|
-
}
|
|
436
|
-
const startTime = Date.now();
|
|
437
|
-
const span = startConsumeSpan(this.telemetry, queueName, String(consumerName), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
438
|
-
await this.parseAndValidateMessage(msg, consumer, consumerName).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk(() => {
|
|
439
|
-
this.logger?.info("Message consumed successfully", {
|
|
440
|
-
consumerName: String(consumerName),
|
|
441
|
-
queueName
|
|
442
|
-
});
|
|
443
|
-
this.amqpClient.ack(msg);
|
|
444
|
-
const durationMs = Date.now() - startTime;
|
|
445
|
-
endSpanSuccess(span);
|
|
446
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), true, durationMs);
|
|
447
|
-
return Future.value(Result.Ok(void 0));
|
|
448
|
-
}).flatMapError((handlerError) => {
|
|
449
|
-
this.logger?.error("Error processing message", {
|
|
450
|
-
consumerName: String(consumerName),
|
|
451
|
-
queueName,
|
|
452
|
-
errorType: handlerError.name,
|
|
453
|
-
error: handlerError.message
|
|
454
|
-
});
|
|
455
|
-
const durationMs = Date.now() - startTime;
|
|
456
|
-
endSpanError(span, handlerError);
|
|
457
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), false, durationMs);
|
|
458
|
-
return this.handleError(handlerError, msg, String(consumerName), consumer);
|
|
459
|
-
})).tapError(() => {
|
|
460
|
-
const durationMs = Date.now() - startTime;
|
|
461
|
-
endSpanError(span, /* @__PURE__ */ new Error("Message validation failed"));
|
|
462
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), false, durationMs);
|
|
463
|
-
}).toPromise();
|
|
464
|
-
}, this.consumerOptions[consumerName]).tapOk((consumerTag) => {
|
|
465
|
-
this.consumerTags.add(consumerTag);
|
|
466
|
-
}).mapError((error) => new TechnicalError(`Failed to start consuming for "${String(consumerName)}"`, error)).mapOk(() => void 0);
|
|
467
|
-
}
|
|
468
|
-
/**
|
|
469
|
-
* Handle error in message processing with retry logic.
|
|
470
|
-
*
|
|
471
|
-
* Flow depends on retry mode:
|
|
676
|
+
* Validate an RPC handler's response and publish it back to the caller's reply
|
|
677
|
+
* queue with the same `correlationId`. Published via the AMQP default exchange
|
|
678
|
+
* with `routingKey = msg.properties.replyTo`, which works for both
|
|
679
|
+
* `amq.rabbitmq.reply-to` and any anonymous queue declared by the caller.
|
|
472
680
|
*
|
|
473
|
-
*
|
|
474
|
-
*
|
|
475
|
-
*
|
|
476
|
-
* 3. Otherwise -> requeue immediately for retry
|
|
477
|
-
*
|
|
478
|
-
* **ttl-backoff mode:**
|
|
479
|
-
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
480
|
-
* 2. If max retries exceeded -> send to DLQ
|
|
481
|
-
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
482
|
-
*
|
|
483
|
-
* **none mode (no retry config):**
|
|
484
|
-
* 1. send directly to DLQ (no retry)
|
|
681
|
+
* Validation errors are surfaced as NonRetryableError (handler returned the
|
|
682
|
+
* wrong shape — retrying the same input will not fix it). Publish errors are
|
|
683
|
+
* surfaced as RetryableError so the worker's existing retry logic applies.
|
|
485
684
|
*/
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
685
|
+
publishRpcResponse(msg, queueName, rpcName, responseSchema, response) {
|
|
686
|
+
const replyTo = msg.properties.replyTo;
|
|
687
|
+
const correlationId = msg.properties.correlationId;
|
|
688
|
+
if (typeof replyTo !== "string" || replyTo.length === 0) {
|
|
689
|
+
this.logger?.warn("RPC handler returned a response but the incoming message has no replyTo; dropping response", {
|
|
690
|
+
rpcName: String(rpcName),
|
|
691
|
+
queueName
|
|
492
692
|
});
|
|
493
|
-
this.sendToDLQ(msg, consumer);
|
|
494
693
|
return Future.value(Result.Ok(void 0));
|
|
495
694
|
}
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
this.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
500
|
-
consumerName,
|
|
501
|
-
error: error.message
|
|
502
|
-
});
|
|
503
|
-
this.sendToDLQ(msg, consumer);
|
|
504
|
-
return Future.value(Result.Ok(void 0));
|
|
505
|
-
}
|
|
506
|
-
/**
|
|
507
|
-
* Handle error by requeuing immediately.
|
|
508
|
-
*
|
|
509
|
-
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
510
|
-
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
511
|
-
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
512
|
-
*
|
|
513
|
-
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
514
|
-
*/
|
|
515
|
-
handleErrorImmediateRequeue(error, msg, consumerName, consumer, config) {
|
|
516
|
-
const queue = extractQueue(consumer.queue);
|
|
517
|
-
const queueName = queue.name;
|
|
518
|
-
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
519
|
-
if (retryCount >= config.maxRetries) {
|
|
520
|
-
this.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
521
|
-
consumerName,
|
|
695
|
+
if (typeof correlationId !== "string" || correlationId.length === 0) {
|
|
696
|
+
this.logger?.warn("RPC handler returned a response but the incoming message has no correlationId; dropping response", {
|
|
697
|
+
rpcName: String(rpcName),
|
|
522
698
|
queueName,
|
|
523
|
-
|
|
524
|
-
maxRetries: config.maxRetries,
|
|
525
|
-
error: error.message
|
|
699
|
+
replyTo
|
|
526
700
|
});
|
|
527
|
-
this.sendToDLQ(msg, consumer);
|
|
528
701
|
return Future.value(Result.Ok(void 0));
|
|
529
702
|
}
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
return
|
|
540
|
-
}
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
queueName,
|
|
545
|
-
error
|
|
546
|
-
});
|
|
703
|
+
let rawValidation;
|
|
704
|
+
try {
|
|
705
|
+
rawValidation = responseSchema["~standard"].validate(response);
|
|
706
|
+
} catch (error) {
|
|
707
|
+
return Future.value(Result.Error(new NonRetryableError("RPC response schema validation threw", error)));
|
|
708
|
+
}
|
|
709
|
+
const validationPromise = rawValidation instanceof Promise ? rawValidation : Promise.resolve(rawValidation);
|
|
710
|
+
return Future.fromPromise(validationPromise).mapError((error) => new NonRetryableError("RPC response schema validation threw", error)).mapOkToResult((validation) => {
|
|
711
|
+
if (validation.issues) return Result.Error(new NonRetryableError(`RPC response for "${String(rpcName)}" failed schema validation`, new MessageValidationError(String(rpcName), validation.issues)));
|
|
712
|
+
return Result.Ok(validation.value);
|
|
713
|
+
}).flatMapOk((validatedResponse) => this.amqpClient.publish("", replyTo, validatedResponse, {
|
|
714
|
+
correlationId,
|
|
715
|
+
contentType: "application/json"
|
|
716
|
+
}).mapErrorToResult((error) => Result.Error(new RetryableError("Failed to publish RPC response", error))).mapOkToResult((published) => published ? Result.Ok(void 0) : Result.Error(new RetryableError("Failed to publish RPC response: channel buffer full"))));
|
|
547
717
|
}
|
|
548
718
|
/**
|
|
549
|
-
*
|
|
550
|
-
*
|
|
551
|
-
* ┌─────────────────────────────────────────────────────────────────┐
|
|
552
|
-
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
553
|
-
* ├─────────────────────────────────────────────────────────────────┤
|
|
554
|
-
* │ │
|
|
555
|
-
* │ 1. Handler throws any Error │
|
|
556
|
-
* │ ↓ │
|
|
557
|
-
* │ 2. Worker publishes to wait exchange |
|
|
558
|
-
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
559
|
-
* │ ↓ │
|
|
560
|
-
* │ 3. Wait exchange routes to wait queue │
|
|
561
|
-
* │ (with expiration: calculated backoff delay) │
|
|
562
|
-
* │ ↓ │
|
|
563
|
-
* │ 4. Message waits in queue until TTL expires │
|
|
564
|
-
* │ ↓ │
|
|
565
|
-
* │ 5. Expired message dead-lettered to retry exchange |
|
|
566
|
-
* | (with header `x-retry-queue` set to the main queue name) │
|
|
567
|
-
* │ ↓ │
|
|
568
|
-
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
569
|
-
* │ ↓ │
|
|
570
|
-
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
571
|
-
* │ │
|
|
572
|
-
* └─────────────────────────────────────────────────────────────────┘
|
|
719
|
+
* Process a single consumed message: validate, invoke handler, optionally
|
|
720
|
+
* publish the RPC response, record telemetry, and handle errors.
|
|
573
721
|
*/
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
722
|
+
processMessage(msg, view, name, handler) {
|
|
723
|
+
const { consumer, isRpc, responseSchema } = view;
|
|
724
|
+
const queueName = extractQueue(consumer.queue).name;
|
|
725
|
+
const startTime = Date.now();
|
|
726
|
+
const span = startConsumeSpan(this.telemetry, queueName, String(name), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
727
|
+
let messageHandled = false;
|
|
728
|
+
let firstError;
|
|
729
|
+
return this.parseAndValidateMessage(msg, consumer, name).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk((handlerResponse) => {
|
|
730
|
+
if (isRpc && responseSchema) return this.publishRpcResponse(msg, queueName, name, responseSchema, handlerResponse).flatMapOk(() => {
|
|
731
|
+
this.logger?.info("Message consumed successfully", {
|
|
732
|
+
consumerName: String(name),
|
|
733
|
+
queueName
|
|
734
|
+
});
|
|
735
|
+
this.amqpClient.ack(msg);
|
|
736
|
+
messageHandled = true;
|
|
737
|
+
return Future.value(Result.Ok(void 0));
|
|
579
738
|
});
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
const queueName = extractQueue(queueEntry).name;
|
|
584
|
-
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
585
|
-
if (retryCount >= config.maxRetries) {
|
|
586
|
-
this.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
587
|
-
consumerName,
|
|
588
|
-
queueName,
|
|
589
|
-
retryCount,
|
|
590
|
-
maxRetries: config.maxRetries,
|
|
591
|
-
error: error.message
|
|
739
|
+
this.logger?.info("Message consumed successfully", {
|
|
740
|
+
consumerName: String(name),
|
|
741
|
+
queueName
|
|
592
742
|
});
|
|
593
|
-
this.
|
|
743
|
+
this.amqpClient.ack(msg);
|
|
744
|
+
messageHandled = true;
|
|
594
745
|
return Future.value(Result.Ok(void 0));
|
|
595
|
-
}
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
consumerName,
|
|
599
|
-
queueName,
|
|
600
|
-
retryCount: retryCount + 1,
|
|
601
|
-
maxRetries: config.maxRetries,
|
|
602
|
-
delayMs,
|
|
603
|
-
error: error.message
|
|
604
|
-
});
|
|
605
|
-
return this.publishForRetry({
|
|
606
|
-
msg,
|
|
607
|
-
exchange: queueEntry.waitExchange.name,
|
|
608
|
-
routingKey: msg.fields.routingKey,
|
|
609
|
-
waitQueueName: queueEntry.waitQueue.name,
|
|
610
|
-
queueName,
|
|
611
|
-
delayMs,
|
|
612
|
-
error
|
|
613
|
-
});
|
|
614
|
-
}
|
|
615
|
-
/**
|
|
616
|
-
* Calculate retry delay with exponential backoff and optional jitter.
|
|
617
|
-
*/
|
|
618
|
-
calculateRetryDelay(retryCount, config) {
|
|
619
|
-
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
620
|
-
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
621
|
-
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
622
|
-
return Math.floor(delay);
|
|
623
|
-
}
|
|
624
|
-
/**
|
|
625
|
-
* Parse message content for republishing.
|
|
626
|
-
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
627
|
-
*/
|
|
628
|
-
parseMessageContentForRetry(msg, queueName) {
|
|
629
|
-
let content = msg.content;
|
|
630
|
-
if (!msg.properties.contentEncoding) try {
|
|
631
|
-
content = JSON.parse(msg.content.toString());
|
|
632
|
-
} catch (err) {
|
|
633
|
-
this.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
746
|
+
}).flatMapError((handlerError) => {
|
|
747
|
+
this.logger?.error("Error processing message", {
|
|
748
|
+
consumerName: String(name),
|
|
634
749
|
queueName,
|
|
635
|
-
|
|
750
|
+
errorType: handlerError.name,
|
|
751
|
+
error: handlerError.message
|
|
636
752
|
});
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
headers: {
|
|
651
|
-
...msg.properties.headers,
|
|
652
|
-
"x-retry-count": newRetryCount,
|
|
653
|
-
"x-last-error": error.message,
|
|
654
|
-
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
655
|
-
...waitQueueName !== void 0 ? {
|
|
656
|
-
"x-wait-queue": waitQueueName,
|
|
657
|
-
"x-retry-queue": queueName
|
|
658
|
-
} : {}
|
|
659
|
-
}
|
|
660
|
-
}).mapOkToResult((published) => {
|
|
661
|
-
if (!published) {
|
|
662
|
-
this.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
663
|
-
queueName,
|
|
664
|
-
retryCount: newRetryCount,
|
|
665
|
-
...delayMs !== void 0 ? { delayMs } : {}
|
|
666
|
-
});
|
|
667
|
-
return Result.Error(new TechnicalError("Failed to publish message for retry (write buffer full)"));
|
|
753
|
+
firstError = handlerError;
|
|
754
|
+
return handleError({
|
|
755
|
+
amqpClient: this.amqpClient,
|
|
756
|
+
logger: this.logger
|
|
757
|
+
}, handlerError, msg, String(name), consumer);
|
|
758
|
+
})).map((result) => {
|
|
759
|
+
const durationMs = Date.now() - startTime;
|
|
760
|
+
if (messageHandled) {
|
|
761
|
+
endSpanSuccess(span);
|
|
762
|
+
recordConsumeMetric(this.telemetry, queueName, String(name), true, durationMs);
|
|
763
|
+
} else {
|
|
764
|
+
endSpanError(span, result.isError() ? result.error : firstError ?? /* @__PURE__ */ new Error("Unknown error"));
|
|
765
|
+
recordConsumeMetric(this.telemetry, queueName, String(name), false, durationMs);
|
|
668
766
|
}
|
|
669
|
-
|
|
670
|
-
queueName,
|
|
671
|
-
retryCount: newRetryCount,
|
|
672
|
-
...delayMs !== void 0 ? { delayMs } : {}
|
|
673
|
-
});
|
|
674
|
-
return Result.Ok(void 0);
|
|
767
|
+
return result;
|
|
675
768
|
});
|
|
676
769
|
}
|
|
677
770
|
/**
|
|
678
|
-
*
|
|
679
|
-
* Nacks the message without requeue, relying on DLX configuration.
|
|
771
|
+
* Consume messages one at a time.
|
|
680
772
|
*/
|
|
681
|
-
|
|
682
|
-
const
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
773
|
+
consumeSingle(name, view, handler) {
|
|
774
|
+
const queueName = extractQueue(view.consumer.queue).name;
|
|
775
|
+
return this.amqpClient.consume(queueName, async (msg) => {
|
|
776
|
+
if (msg === null) {
|
|
777
|
+
this.logger?.warn("Consumer cancelled by server", {
|
|
778
|
+
consumerName: String(name),
|
|
779
|
+
queueName
|
|
780
|
+
});
|
|
781
|
+
return;
|
|
782
|
+
}
|
|
783
|
+
await this.processMessage(msg, view, name, handler).toPromise();
|
|
784
|
+
}, this.consumerOptions[name]).tapOk((consumerTag) => {
|
|
785
|
+
this.consumerTags.add(consumerTag);
|
|
786
|
+
}).mapError((error) => new TechnicalError(`Failed to start consuming for "${String(name)}"`, error)).mapOk(() => void 0);
|
|
690
787
|
}
|
|
691
788
|
};
|
|
692
789
|
//#endregion
|