@amqp-contract/worker 0.21.0 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +422 -307
- package/dist/index.d.cts +115 -137
- package/dist/index.d.cts.map +1 -1
- package/dist/index.d.mts +115 -137
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +422 -307
- package/dist/index.mjs.map +1 -1
- package/docs/index.md +50 -86
- package/package.json +5 -5
package/dist/index.mjs
CHANGED
|
@@ -196,6 +196,240 @@ function nonRetryable(message, cause) {
|
|
|
196
196
|
return new NonRetryableError(message, cause);
|
|
197
197
|
}
|
|
198
198
|
//#endregion
|
|
199
|
+
//#region src/retry.ts
|
|
200
|
+
/**
|
|
201
|
+
* Handle error in message processing with retry logic.
|
|
202
|
+
*
|
|
203
|
+
* Flow depends on retry mode:
|
|
204
|
+
*
|
|
205
|
+
* **immediate-requeue mode:**
|
|
206
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
207
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
208
|
+
* 3. Otherwise -> requeue immediately for retry
|
|
209
|
+
*
|
|
210
|
+
* **ttl-backoff mode:**
|
|
211
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
212
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
213
|
+
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
214
|
+
*
|
|
215
|
+
* **none mode (no retry config):**
|
|
216
|
+
* 1. send directly to DLQ (no retry)
|
|
217
|
+
*/
|
|
218
|
+
function handleError(ctx, error, msg, consumerName, consumer) {
|
|
219
|
+
if (error instanceof NonRetryableError) {
|
|
220
|
+
ctx.logger?.error("Non-retryable error, sending to DLQ immediately", {
|
|
221
|
+
consumerName,
|
|
222
|
+
errorType: error.name,
|
|
223
|
+
error: error.message
|
|
224
|
+
});
|
|
225
|
+
sendToDLQ(ctx, msg, consumer);
|
|
226
|
+
return Future.value(Result.Ok(void 0));
|
|
227
|
+
}
|
|
228
|
+
const config = extractQueue(consumer.queue).retry;
|
|
229
|
+
if (config.mode === "immediate-requeue") return handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config);
|
|
230
|
+
if (config.mode === "ttl-backoff") return handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config);
|
|
231
|
+
ctx.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
232
|
+
consumerName,
|
|
233
|
+
error: error.message
|
|
234
|
+
});
|
|
235
|
+
sendToDLQ(ctx, msg, consumer);
|
|
236
|
+
return Future.value(Result.Ok(void 0));
|
|
237
|
+
}
|
|
238
|
+
/**
|
|
239
|
+
* Handle error by requeuing immediately.
|
|
240
|
+
*
|
|
241
|
+
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
242
|
+
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
243
|
+
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
244
|
+
*
|
|
245
|
+
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
246
|
+
*/
|
|
247
|
+
function handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config) {
|
|
248
|
+
const queue = extractQueue(consumer.queue);
|
|
249
|
+
const queueName = queue.name;
|
|
250
|
+
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
251
|
+
if (retryCount >= config.maxRetries) {
|
|
252
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
253
|
+
consumerName,
|
|
254
|
+
queueName,
|
|
255
|
+
retryCount,
|
|
256
|
+
maxRetries: config.maxRetries,
|
|
257
|
+
error: error.message
|
|
258
|
+
});
|
|
259
|
+
sendToDLQ(ctx, msg, consumer);
|
|
260
|
+
return Future.value(Result.Ok(void 0));
|
|
261
|
+
}
|
|
262
|
+
ctx.logger?.warn("Retrying message (immediate-requeue mode)", {
|
|
263
|
+
consumerName,
|
|
264
|
+
queueName,
|
|
265
|
+
retryCount,
|
|
266
|
+
maxRetries: config.maxRetries,
|
|
267
|
+
error: error.message
|
|
268
|
+
});
|
|
269
|
+
if (queue.type === "quorum") {
|
|
270
|
+
ctx.amqpClient.nack(msg, false, true);
|
|
271
|
+
return Future.value(Result.Ok(void 0));
|
|
272
|
+
} else return publishForRetry(ctx, {
|
|
273
|
+
msg,
|
|
274
|
+
exchange: msg.fields.exchange,
|
|
275
|
+
routingKey: msg.fields.routingKey,
|
|
276
|
+
queueName,
|
|
277
|
+
error
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Handle error using TTL + wait queue pattern for exponential backoff.
|
|
282
|
+
*
|
|
283
|
+
* ┌─────────────────────────────────────────────────────────────────┐
|
|
284
|
+
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
285
|
+
* ├─────────────────────────────────────────────────────────────────┤
|
|
286
|
+
* │ │
|
|
287
|
+
* │ 1. Handler throws any Error │
|
|
288
|
+
* │ ↓ │
|
|
289
|
+
* │ 2. Worker publishes to wait exchange |
|
|
290
|
+
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
291
|
+
* │ ↓ │
|
|
292
|
+
* │ 3. Wait exchange routes to wait queue │
|
|
293
|
+
* │ (with expiration: calculated backoff delay) │
|
|
294
|
+
* │ ↓ │
|
|
295
|
+
* │ 4. Message waits in queue until TTL expires │
|
|
296
|
+
* │ ↓ │
|
|
297
|
+
* │ 5. Expired message dead-lettered to retry exchange |
|
|
298
|
+
* | (with header `x-retry-queue` set to the main queue name) │
|
|
299
|
+
* │ ↓ │
|
|
300
|
+
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
301
|
+
* │ ↓ │
|
|
302
|
+
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
303
|
+
* │ │
|
|
304
|
+
* └─────────────────────────────────────────────────────────────────┘
|
|
305
|
+
*/
|
|
306
|
+
function handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config) {
|
|
307
|
+
if (!isQueueWithTtlBackoffInfrastructure(consumer.queue)) {
|
|
308
|
+
ctx.logger?.error("Queue does not have TTL-backoff infrastructure", {
|
|
309
|
+
consumerName,
|
|
310
|
+
queueName: consumer.queue.name
|
|
311
|
+
});
|
|
312
|
+
return Future.value(Result.Error(new TechnicalError("Queue does not have TTL-backoff infrastructure")));
|
|
313
|
+
}
|
|
314
|
+
const queueEntry = consumer.queue;
|
|
315
|
+
const queueName = extractQueue(queueEntry).name;
|
|
316
|
+
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
317
|
+
if (retryCount >= config.maxRetries) {
|
|
318
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
319
|
+
consumerName,
|
|
320
|
+
queueName,
|
|
321
|
+
retryCount,
|
|
322
|
+
maxRetries: config.maxRetries,
|
|
323
|
+
error: error.message
|
|
324
|
+
});
|
|
325
|
+
sendToDLQ(ctx, msg, consumer);
|
|
326
|
+
return Future.value(Result.Ok(void 0));
|
|
327
|
+
}
|
|
328
|
+
const delayMs = calculateRetryDelay(retryCount, config);
|
|
329
|
+
ctx.logger?.warn("Retrying message (ttl-backoff mode)", {
|
|
330
|
+
consumerName,
|
|
331
|
+
queueName,
|
|
332
|
+
retryCount: retryCount + 1,
|
|
333
|
+
maxRetries: config.maxRetries,
|
|
334
|
+
delayMs,
|
|
335
|
+
error: error.message
|
|
336
|
+
});
|
|
337
|
+
return publishForRetry(ctx, {
|
|
338
|
+
msg,
|
|
339
|
+
exchange: queueEntry.waitExchange.name,
|
|
340
|
+
routingKey: msg.fields.routingKey,
|
|
341
|
+
waitQueueName: queueEntry.waitQueue.name,
|
|
342
|
+
queueName,
|
|
343
|
+
delayMs,
|
|
344
|
+
error
|
|
345
|
+
});
|
|
346
|
+
}
|
|
347
|
+
/**
|
|
348
|
+
* Calculate retry delay with exponential backoff and optional jitter.
|
|
349
|
+
*/
|
|
350
|
+
function calculateRetryDelay(retryCount, config) {
|
|
351
|
+
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
352
|
+
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
353
|
+
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
354
|
+
return Math.floor(delay);
|
|
355
|
+
}
|
|
356
|
+
/**
|
|
357
|
+
* Parse message content for republishing.
|
|
358
|
+
*
|
|
359
|
+
* The channel is configured with `json: true`, so values published as plain
|
|
360
|
+
* objects are encoded once at publish time. Re-publishing the raw `Buffer`
|
|
361
|
+
* would then trigger a *second* JSON.stringify (turning the bytes into a
|
|
362
|
+
* stringified base64 blob), so for JSON payloads we must round-trip back to
|
|
363
|
+
* the parsed value. For any other content type — or when the message is
|
|
364
|
+
* compressed — we pass the bytes through untouched, since re-parsing would
|
|
365
|
+
* either fail or silently corrupt binary data.
|
|
366
|
+
*/
|
|
367
|
+
function parseMessageContentForRetry(ctx, msg, queueName) {
|
|
368
|
+
if (msg.properties.contentEncoding) return msg.content;
|
|
369
|
+
const contentType = msg.properties.contentType;
|
|
370
|
+
if (!(contentType === void 0 || contentType === "application/json" || contentType.startsWith("application/json;") || contentType.endsWith("+json"))) return msg.content;
|
|
371
|
+
try {
|
|
372
|
+
return JSON.parse(msg.content.toString());
|
|
373
|
+
} catch (err) {
|
|
374
|
+
ctx.logger?.warn("Failed to parse JSON message for retry, using original buffer", {
|
|
375
|
+
queueName,
|
|
376
|
+
error: err
|
|
377
|
+
});
|
|
378
|
+
return msg.content;
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
/**
|
|
382
|
+
* Publish message with an incremented x-retry-count header and optional TTL.
|
|
383
|
+
*/
|
|
384
|
+
function publishForRetry(ctx, { msg, exchange, routingKey, queueName, waitQueueName, delayMs, error }) {
|
|
385
|
+
const newRetryCount = (msg.properties.headers?.["x-retry-count"] ?? 0) + 1;
|
|
386
|
+
ctx.amqpClient.ack(msg);
|
|
387
|
+
const content = parseMessageContentForRetry(ctx, msg, queueName);
|
|
388
|
+
return ctx.amqpClient.publish(exchange, routingKey, content, {
|
|
389
|
+
...msg.properties,
|
|
390
|
+
...delayMs !== void 0 ? { expiration: delayMs.toString() } : {},
|
|
391
|
+
headers: {
|
|
392
|
+
...msg.properties.headers,
|
|
393
|
+
"x-retry-count": newRetryCount,
|
|
394
|
+
"x-last-error": error.message,
|
|
395
|
+
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
396
|
+
...waitQueueName !== void 0 ? {
|
|
397
|
+
"x-wait-queue": waitQueueName,
|
|
398
|
+
"x-retry-queue": queueName
|
|
399
|
+
} : {}
|
|
400
|
+
}
|
|
401
|
+
}).mapOkToResult((published) => {
|
|
402
|
+
if (!published) {
|
|
403
|
+
ctx.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
404
|
+
queueName,
|
|
405
|
+
retryCount: newRetryCount,
|
|
406
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
407
|
+
});
|
|
408
|
+
return Result.Error(new TechnicalError("Failed to publish message for retry (write buffer full)"));
|
|
409
|
+
}
|
|
410
|
+
ctx.logger?.info("Message published for retry", {
|
|
411
|
+
queueName,
|
|
412
|
+
retryCount: newRetryCount,
|
|
413
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
414
|
+
});
|
|
415
|
+
return Result.Ok(void 0);
|
|
416
|
+
});
|
|
417
|
+
}
|
|
418
|
+
/**
|
|
419
|
+
* Send message to dead letter queue.
|
|
420
|
+
* Nacks the message without requeue, relying on DLX configuration.
|
|
421
|
+
*/
|
|
422
|
+
function sendToDLQ(ctx, msg, consumer) {
|
|
423
|
+
const queue = extractQueue(consumer.queue);
|
|
424
|
+
const queueName = queue.name;
|
|
425
|
+
if (!(queue.deadLetter !== void 0)) ctx.logger?.warn("Queue does not have DLX configured - message will be lost on nack", { queueName });
|
|
426
|
+
ctx.logger?.info("Sending message to DLQ", {
|
|
427
|
+
queueName,
|
|
428
|
+
deliveryTag: msg.fields.deliveryTag
|
|
429
|
+
});
|
|
430
|
+
ctx.amqpClient.nack(msg, false, false);
|
|
431
|
+
}
|
|
432
|
+
//#endregion
|
|
199
433
|
//#region src/worker.ts
|
|
200
434
|
/**
|
|
201
435
|
* Type guard to check if a handler entry is a tuple format [handler, options].
|
|
@@ -246,7 +480,10 @@ function isHandlerTuple(entry) {
|
|
|
246
480
|
*/
|
|
247
481
|
var TypedAmqpWorker = class TypedAmqpWorker {
|
|
248
482
|
/**
|
|
249
|
-
* Internal handler storage
|
|
483
|
+
* Internal handler storage. Keyed by handler name (consumer or RPC); the
|
|
484
|
+
* stored function signature is widened so the dispatch loop can call it
|
|
485
|
+
* uniformly. The actual handler is type-checked at the worker's public API
|
|
486
|
+
* boundary via `WorkerInferHandlers<TContract>`.
|
|
250
487
|
*/
|
|
251
488
|
actualHandlers;
|
|
252
489
|
consumerOptions;
|
|
@@ -261,23 +498,49 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
261
498
|
this.actualHandlers = {};
|
|
262
499
|
this.consumerOptions = {};
|
|
263
500
|
const handlersRecord = handlers;
|
|
264
|
-
for (const
|
|
265
|
-
const handlerEntry = handlersRecord[
|
|
266
|
-
const
|
|
501
|
+
for (const handlerName of Object.keys(handlersRecord)) {
|
|
502
|
+
const handlerEntry = handlersRecord[handlerName];
|
|
503
|
+
const typedName = handlerName;
|
|
267
504
|
if (isHandlerTuple(handlerEntry)) {
|
|
268
505
|
const [handler, options] = handlerEntry;
|
|
269
|
-
this.actualHandlers[
|
|
270
|
-
this.consumerOptions[
|
|
506
|
+
this.actualHandlers[typedName] = handler;
|
|
507
|
+
this.consumerOptions[typedName] = {
|
|
271
508
|
...this.defaultConsumerOptions,
|
|
272
509
|
...options
|
|
273
510
|
};
|
|
274
511
|
} else {
|
|
275
|
-
this.actualHandlers[
|
|
276
|
-
this.consumerOptions[
|
|
512
|
+
this.actualHandlers[typedName] = handlerEntry;
|
|
513
|
+
this.consumerOptions[typedName] = this.defaultConsumerOptions;
|
|
277
514
|
}
|
|
278
515
|
}
|
|
279
516
|
}
|
|
280
517
|
/**
|
|
518
|
+
* Build a `ConsumerDefinition`-shaped view for a handler name, regardless
|
|
519
|
+
* of whether it came from `contract.consumers` or `contract.rpcs`. The
|
|
520
|
+
* dispatch path treats both uniformly; the returned `isRpc` flag (and the
|
|
521
|
+
* accompanying `responseSchema`) tells `processMessage` whether to validate
|
|
522
|
+
* the handler return value and publish a reply.
|
|
523
|
+
*/
|
|
524
|
+
resolveConsumerView(name) {
|
|
525
|
+
const rpcs = this.contract.rpcs;
|
|
526
|
+
if (rpcs && Object.hasOwn(rpcs, name)) {
|
|
527
|
+
const rpc = rpcs[name];
|
|
528
|
+
return {
|
|
529
|
+
consumer: {
|
|
530
|
+
queue: rpc.queue,
|
|
531
|
+
message: rpc.request
|
|
532
|
+
},
|
|
533
|
+
isRpc: true,
|
|
534
|
+
responseSchema: rpc.response.payload
|
|
535
|
+
};
|
|
536
|
+
}
|
|
537
|
+
const consumerEntry = this.contract.consumers[name];
|
|
538
|
+
return {
|
|
539
|
+
consumer: extractConsumer(consumerEntry),
|
|
540
|
+
isRpc: false
|
|
541
|
+
};
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
281
544
|
* Create a type-safe AMQP worker from a contract.
|
|
282
545
|
*
|
|
283
546
|
* Connection management (including automatic reconnection) is handled internally
|
|
@@ -302,12 +565,18 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
302
565
|
* }).resultToPromise();
|
|
303
566
|
* ```
|
|
304
567
|
*/
|
|
305
|
-
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry }) {
|
|
568
|
+
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry, connectTimeoutMs }) {
|
|
306
569
|
const worker = new TypedAmqpWorker(contract, new AmqpClient(contract, {
|
|
307
570
|
urls,
|
|
308
|
-
connectionOptions
|
|
571
|
+
connectionOptions,
|
|
572
|
+
connectTimeoutMs
|
|
309
573
|
}), handlers, defaultConsumerOptions ?? {}, logger, telemetry);
|
|
310
|
-
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).
|
|
574
|
+
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).flatMap((result) => result.match({
|
|
575
|
+
Ok: () => Future.value(Result.Ok(worker)),
|
|
576
|
+
Error: (error) => worker.close().tapError((closeError) => {
|
|
577
|
+
logger?.warn("Failed to close worker after setup failure", { error: closeError });
|
|
578
|
+
}).map(() => Result.Error(error))
|
|
579
|
+
}));
|
|
311
580
|
}
|
|
312
581
|
/**
|
|
313
582
|
* Close the AMQP channel and connection.
|
|
@@ -337,356 +606,202 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
337
606
|
}).flatMapOk(() => this.amqpClient.close()).mapOk(() => void 0);
|
|
338
607
|
}
|
|
339
608
|
/**
|
|
340
|
-
*
|
|
341
|
-
* Defaults are applied in the contract's defineQueue, so we just return the config.
|
|
342
|
-
*/
|
|
343
|
-
getRetryConfigForConsumer(consumer) {
|
|
344
|
-
return extractQueue(consumer.queue).retry;
|
|
345
|
-
}
|
|
346
|
-
/**
|
|
347
|
-
* Start consuming messages for all consumers.
|
|
348
|
-
* TypeScript guarantees consumers exist (handlers require matching consumers).
|
|
609
|
+
* Start consuming for every entry in `contract.consumers` and `contract.rpcs`.
|
|
349
610
|
*/
|
|
350
611
|
consumeAll() {
|
|
351
|
-
const
|
|
352
|
-
const
|
|
353
|
-
|
|
612
|
+
const consumerNames = Object.keys(this.contract.consumers ?? {});
|
|
613
|
+
const rpcNames = Object.keys(this.contract.rpcs ?? {});
|
|
614
|
+
const allNames = [...consumerNames, ...rpcNames];
|
|
615
|
+
return Future.all(allNames.map((name) => this.consume(name))).map(Result.all).mapOk(() => void 0);
|
|
354
616
|
}
|
|
355
617
|
waitForConnectionReady() {
|
|
356
618
|
return this.amqpClient.waitForConnect();
|
|
357
619
|
}
|
|
358
620
|
/**
|
|
359
|
-
* Start consuming messages for a specific
|
|
360
|
-
*
|
|
621
|
+
* Start consuming messages for a specific handler — either a `consumers`
|
|
622
|
+
* entry (regular event/command consumer) or an `rpcs` entry (RPC server).
|
|
361
623
|
*/
|
|
362
|
-
consume(
|
|
363
|
-
const
|
|
364
|
-
const
|
|
365
|
-
|
|
366
|
-
return this.consumeSingle(consumerName, consumer, handler);
|
|
624
|
+
consume(name) {
|
|
625
|
+
const view = this.resolveConsumerView(name);
|
|
626
|
+
const handler = this.actualHandlers[name];
|
|
627
|
+
return this.consumeSingle(name, view, handler);
|
|
367
628
|
}
|
|
368
629
|
/**
|
|
369
|
-
* Validate data against a Standard Schema
|
|
630
|
+
* Validate data against a Standard Schema. No side effects; the caller is
|
|
631
|
+
* responsible for ack/nack based on the Result.
|
|
370
632
|
*/
|
|
371
|
-
validateSchema(schema, data, context
|
|
633
|
+
validateSchema(schema, data, context) {
|
|
372
634
|
const rawValidation = schema["~standard"].validate(data);
|
|
373
635
|
const validationPromise = rawValidation instanceof Promise ? rawValidation : Promise.resolve(rawValidation);
|
|
374
636
|
return Future.fromPromise(validationPromise).mapError((error) => new TechnicalError(`Error validating ${context.field}`, error)).mapOkToResult((result) => {
|
|
375
637
|
if (result.issues) return Result.Error(new TechnicalError(`${context.field} validation failed`, new MessageValidationError(context.consumerName, result.issues)));
|
|
376
638
|
return Result.Ok(result.value);
|
|
377
|
-
}).tapError((error) => {
|
|
378
|
-
this.logger?.error(`${context.field} validation failed`, {
|
|
379
|
-
consumerName: context.consumerName,
|
|
380
|
-
queueName: context.queueName,
|
|
381
|
-
error
|
|
382
|
-
});
|
|
383
|
-
this.amqpClient.nack(msg, false, false);
|
|
384
639
|
});
|
|
385
640
|
}
|
|
386
641
|
/**
|
|
387
|
-
* Parse and validate a message from AMQP.
|
|
388
|
-
*
|
|
642
|
+
* Parse and validate a message from AMQP. Pure: returns the validated payload
|
|
643
|
+
* and headers, or an error. The dispatch path in {@link processMessage} routes
|
|
644
|
+
* validation/parse errors directly to the DLQ (single nack) — they never enter
|
|
645
|
+
* the retry pipeline because retrying an unparseable or schema-violating
|
|
646
|
+
* payload cannot succeed.
|
|
389
647
|
*/
|
|
390
648
|
parseAndValidateMessage(msg, consumer, consumerName) {
|
|
391
|
-
const
|
|
392
|
-
const
|
|
393
|
-
consumerName: String(consumerName),
|
|
394
|
-
queueName: queue.name
|
|
395
|
-
};
|
|
396
|
-
const nackAndError = (message, error) => {
|
|
397
|
-
this.logger?.error(message, {
|
|
398
|
-
...context,
|
|
399
|
-
error
|
|
400
|
-
});
|
|
401
|
-
this.amqpClient.nack(msg, false, false);
|
|
402
|
-
return new TechnicalError(message, error);
|
|
403
|
-
};
|
|
404
|
-
const parsePayload = decompressBuffer(msg.content, msg.properties.contentEncoding).tapError((error) => {
|
|
405
|
-
this.logger?.error("Failed to decompress message", {
|
|
406
|
-
...context,
|
|
407
|
-
error
|
|
408
|
-
});
|
|
409
|
-
this.amqpClient.nack(msg, false, false);
|
|
410
|
-
}).mapOkToResult((buffer) => Result.fromExecution(() => JSON.parse(buffer.toString())).mapError((error) => nackAndError("Failed to parse JSON", error))).flatMapOk((parsed) => this.validateSchema(consumer.message.payload, parsed, {
|
|
649
|
+
const context = { consumerName: String(consumerName) };
|
|
650
|
+
const parsePayload = decompressBuffer(msg.content, msg.properties.contentEncoding).mapErrorToResult((error) => Result.Error(new TechnicalError("Failed to decompress message", error))).mapOkToResult((buffer) => Result.fromExecution(() => JSON.parse(buffer.toString())).mapError((error) => new TechnicalError("Failed to parse JSON", error))).flatMapOk((parsed) => this.validateSchema(consumer.message.payload, parsed, {
|
|
411
651
|
...context,
|
|
412
652
|
field: "payload"
|
|
413
|
-
}
|
|
653
|
+
}));
|
|
414
654
|
const parseHeaders = consumer.message.headers ? this.validateSchema(consumer.message.headers, msg.properties.headers ?? {}, {
|
|
415
655
|
...context,
|
|
416
656
|
field: "headers"
|
|
417
|
-
}
|
|
657
|
+
}) : Future.value(Result.Ok(void 0));
|
|
418
658
|
return Future.allFromDict({
|
|
419
659
|
payload: parsePayload,
|
|
420
660
|
headers: parseHeaders
|
|
421
661
|
}).map(Result.allFromDict);
|
|
422
662
|
}
|
|
423
663
|
/**
|
|
424
|
-
*
|
|
664
|
+
* Validate an RPC handler's response and publish it back to the caller's reply
|
|
665
|
+
* queue with the same `correlationId`. Published via the AMQP default exchange
|
|
666
|
+
* with `routingKey = msg.properties.replyTo`, which works for both
|
|
667
|
+
* `amq.rabbitmq.reply-to` and any anonymous queue declared by the caller.
|
|
668
|
+
*
|
|
669
|
+
* Failure semantics:
|
|
670
|
+
* - **Missing replyTo / correlationId**: NonRetryableError. The caller is
|
|
671
|
+
* already lost; retrying the original message cannot recover the reply
|
|
672
|
+
* path. The poison message lands in DLQ for inspection rather than being
|
|
673
|
+
* silently ack'd (which would mask a contract violation).
|
|
674
|
+
* - **Schema validation failure**: NonRetryableError — the handler returned
|
|
675
|
+
* the wrong shape; retrying the same input will not fix it.
|
|
676
|
+
* - **Publish failure**: NonRetryableError. The caller has already timed out
|
|
677
|
+
* (or will shortly), so retrying the message wastes the queue's retry
|
|
678
|
+
* budget on a reply that no one is waiting for. The message is logged and
|
|
679
|
+
* DLQ'd; the original work is treated as completed for the purpose of the
|
|
680
|
+
* inbox.
|
|
681
|
+
*/
|
|
682
|
+
publishRpcResponse(msg, queueName, rpcName, responseSchema, response) {
|
|
683
|
+
const replyTo = msg.properties.replyTo;
|
|
684
|
+
const correlationId = msg.properties.correlationId;
|
|
685
|
+
if (typeof replyTo !== "string" || replyTo.length === 0) {
|
|
686
|
+
this.logger?.error("RPC handler returned a response but the incoming message has no replyTo", {
|
|
687
|
+
rpcName: String(rpcName),
|
|
688
|
+
queueName
|
|
689
|
+
});
|
|
690
|
+
return Future.value(Result.Error(new NonRetryableError(`RPC "${String(rpcName)}" received a message without replyTo; cannot deliver response`)));
|
|
691
|
+
}
|
|
692
|
+
if (typeof correlationId !== "string" || correlationId.length === 0) {
|
|
693
|
+
this.logger?.error("RPC handler returned a response but the incoming message has no correlationId", {
|
|
694
|
+
rpcName: String(rpcName),
|
|
695
|
+
queueName,
|
|
696
|
+
replyTo
|
|
697
|
+
});
|
|
698
|
+
return Future.value(Result.Error(new NonRetryableError(`RPC "${String(rpcName)}" received a message without correlationId; cannot deliver response`)));
|
|
699
|
+
}
|
|
700
|
+
let rawValidation;
|
|
701
|
+
try {
|
|
702
|
+
rawValidation = responseSchema["~standard"].validate(response);
|
|
703
|
+
} catch (error) {
|
|
704
|
+
return Future.value(Result.Error(new NonRetryableError("RPC response schema validation threw", error)));
|
|
705
|
+
}
|
|
706
|
+
const validationPromise = rawValidation instanceof Promise ? rawValidation : Promise.resolve(rawValidation);
|
|
707
|
+
return Future.fromPromise(validationPromise).mapError((error) => new NonRetryableError("RPC response schema validation threw", error)).mapOkToResult((validation) => {
|
|
708
|
+
if (validation.issues) return Result.Error(new NonRetryableError(`RPC response for "${String(rpcName)}" failed schema validation`, new MessageValidationError(String(rpcName), validation.issues)));
|
|
709
|
+
return Result.Ok(validation.value);
|
|
710
|
+
}).flatMapOk((validatedResponse) => this.amqpClient.publish("", replyTo, validatedResponse, {
|
|
711
|
+
correlationId,
|
|
712
|
+
contentType: "application/json"
|
|
713
|
+
}).mapErrorToResult((error) => Result.Error(new NonRetryableError("Failed to publish RPC response", error))).mapOkToResult((published) => published ? Result.Ok(void 0) : Result.Error(new NonRetryableError("Failed to publish RPC response: channel buffer full"))));
|
|
714
|
+
}
|
|
715
|
+
/**
|
|
716
|
+
* Process a single consumed message: validate, invoke handler, optionally
|
|
717
|
+
* publish the RPC response, record telemetry, and handle errors.
|
|
425
718
|
*/
|
|
426
|
-
|
|
719
|
+
processMessage(msg, view, name, handler) {
|
|
720
|
+
const { consumer, isRpc, responseSchema } = view;
|
|
427
721
|
const queueName = extractQueue(consumer.queue).name;
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
722
|
+
const startTime = Date.now();
|
|
723
|
+
const span = startConsumeSpan(this.telemetry, queueName, String(name), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
724
|
+
let messageHandled = false;
|
|
725
|
+
let firstError;
|
|
726
|
+
return this.parseAndValidateMessage(msg, consumer, name).flatMap((parseResult) => parseResult.match({
|
|
727
|
+
Ok: (validatedMessage) => handler(validatedMessage, msg).flatMapOk((handlerResponse) => {
|
|
728
|
+
if (isRpc && responseSchema) return this.publishRpcResponse(msg, queueName, name, responseSchema, handlerResponse).flatMapOk(() => {
|
|
729
|
+
this.logger?.info("Message consumed successfully", {
|
|
730
|
+
consumerName: String(name),
|
|
731
|
+
queueName
|
|
732
|
+
});
|
|
733
|
+
this.amqpClient.ack(msg);
|
|
734
|
+
messageHandled = true;
|
|
735
|
+
return Future.value(Result.Ok(void 0));
|
|
433
736
|
});
|
|
434
|
-
return;
|
|
435
|
-
}
|
|
436
|
-
const startTime = Date.now();
|
|
437
|
-
const span = startConsumeSpan(this.telemetry, queueName, String(consumerName), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
438
|
-
await this.parseAndValidateMessage(msg, consumer, consumerName).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk(() => {
|
|
439
737
|
this.logger?.info("Message consumed successfully", {
|
|
440
|
-
consumerName: String(
|
|
738
|
+
consumerName: String(name),
|
|
441
739
|
queueName
|
|
442
740
|
});
|
|
443
741
|
this.amqpClient.ack(msg);
|
|
444
|
-
|
|
445
|
-
endSpanSuccess(span);
|
|
446
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), true, durationMs);
|
|
742
|
+
messageHandled = true;
|
|
447
743
|
return Future.value(Result.Ok(void 0));
|
|
448
744
|
}).flatMapError((handlerError) => {
|
|
449
745
|
this.logger?.error("Error processing message", {
|
|
450
|
-
consumerName: String(
|
|
746
|
+
consumerName: String(name),
|
|
451
747
|
queueName,
|
|
452
748
|
errorType: handlerError.name,
|
|
453
749
|
error: handlerError.message
|
|
454
750
|
});
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
482
|
-
*
|
|
483
|
-
* **none mode (no retry config):**
|
|
484
|
-
* 1. send directly to DLQ (no retry)
|
|
485
|
-
*/
|
|
486
|
-
handleError(error, msg, consumerName, consumer) {
|
|
487
|
-
if (error instanceof NonRetryableError) {
|
|
488
|
-
this.logger?.error("Non-retryable error, sending to DLQ immediately", {
|
|
489
|
-
consumerName,
|
|
490
|
-
errorType: error.name,
|
|
491
|
-
error: error.message
|
|
492
|
-
});
|
|
493
|
-
this.sendToDLQ(msg, consumer);
|
|
494
|
-
return Future.value(Result.Ok(void 0));
|
|
495
|
-
}
|
|
496
|
-
const config = this.getRetryConfigForConsumer(consumer);
|
|
497
|
-
if (config.mode === "immediate-requeue") return this.handleErrorImmediateRequeue(error, msg, consumerName, consumer, config);
|
|
498
|
-
if (config.mode === "ttl-backoff") return this.handleErrorTtlBackoff(error, msg, consumerName, consumer, config);
|
|
499
|
-
this.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
500
|
-
consumerName,
|
|
501
|
-
error: error.message
|
|
502
|
-
});
|
|
503
|
-
this.sendToDLQ(msg, consumer);
|
|
504
|
-
return Future.value(Result.Ok(void 0));
|
|
505
|
-
}
|
|
506
|
-
/**
|
|
507
|
-
* Handle error by requeuing immediately.
|
|
508
|
-
*
|
|
509
|
-
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
510
|
-
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
511
|
-
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
512
|
-
*
|
|
513
|
-
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
514
|
-
*/
|
|
515
|
-
handleErrorImmediateRequeue(error, msg, consumerName, consumer, config) {
|
|
516
|
-
const queue = extractQueue(consumer.queue);
|
|
517
|
-
const queueName = queue.name;
|
|
518
|
-
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
519
|
-
if (retryCount >= config.maxRetries) {
|
|
520
|
-
this.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
521
|
-
consumerName,
|
|
522
|
-
queueName,
|
|
523
|
-
retryCount,
|
|
524
|
-
maxRetries: config.maxRetries,
|
|
525
|
-
error: error.message
|
|
526
|
-
});
|
|
527
|
-
this.sendToDLQ(msg, consumer);
|
|
528
|
-
return Future.value(Result.Ok(void 0));
|
|
529
|
-
}
|
|
530
|
-
this.logger?.warn("Retrying message (immediate-requeue mode)", {
|
|
531
|
-
consumerName,
|
|
532
|
-
queueName,
|
|
533
|
-
retryCount,
|
|
534
|
-
maxRetries: config.maxRetries,
|
|
535
|
-
error: error.message
|
|
536
|
-
});
|
|
537
|
-
if (queue.type === "quorum") {
|
|
538
|
-
this.amqpClient.nack(msg, false, true);
|
|
539
|
-
return Future.value(Result.Ok(void 0));
|
|
540
|
-
} else return this.publishForRetry({
|
|
541
|
-
msg,
|
|
542
|
-
exchange: msg.fields.exchange,
|
|
543
|
-
routingKey: msg.fields.routingKey,
|
|
544
|
-
queueName,
|
|
545
|
-
error
|
|
546
|
-
});
|
|
547
|
-
}
|
|
548
|
-
/**
|
|
549
|
-
* Handle error using TTL + wait queue pattern for exponential backoff.
|
|
550
|
-
*
|
|
551
|
-
* ┌─────────────────────────────────────────────────────────────────┐
|
|
552
|
-
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
553
|
-
* ├─────────────────────────────────────────────────────────────────┤
|
|
554
|
-
* │ │
|
|
555
|
-
* │ 1. Handler throws any Error │
|
|
556
|
-
* │ ↓ │
|
|
557
|
-
* │ 2. Worker publishes to wait exchange |
|
|
558
|
-
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
559
|
-
* │ ↓ │
|
|
560
|
-
* │ 3. Wait exchange routes to wait queue │
|
|
561
|
-
* │ (with expiration: calculated backoff delay) │
|
|
562
|
-
* │ ↓ │
|
|
563
|
-
* │ 4. Message waits in queue until TTL expires │
|
|
564
|
-
* │ ↓ │
|
|
565
|
-
* │ 5. Expired message dead-lettered to retry exchange |
|
|
566
|
-
* | (with header `x-retry-queue` set to the main queue name) │
|
|
567
|
-
* │ ↓ │
|
|
568
|
-
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
569
|
-
* │ ↓ │
|
|
570
|
-
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
571
|
-
* │ │
|
|
572
|
-
* └─────────────────────────────────────────────────────────────────┘
|
|
573
|
-
*/
|
|
574
|
-
handleErrorTtlBackoff(error, msg, consumerName, consumer, config) {
|
|
575
|
-
if (!isQueueWithTtlBackoffInfrastructure(consumer.queue)) {
|
|
576
|
-
this.logger?.error("Queue does not have TTL-backoff infrastructure", {
|
|
577
|
-
consumerName,
|
|
578
|
-
queueName: consumer.queue.name
|
|
579
|
-
});
|
|
580
|
-
return Future.value(Result.Error(new TechnicalError("Queue does not have TTL-backoff infrastructure")));
|
|
581
|
-
}
|
|
582
|
-
const queueEntry = consumer.queue;
|
|
583
|
-
const queueName = extractQueue(queueEntry).name;
|
|
584
|
-
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
585
|
-
if (retryCount >= config.maxRetries) {
|
|
586
|
-
this.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
587
|
-
consumerName,
|
|
588
|
-
queueName,
|
|
589
|
-
retryCount,
|
|
590
|
-
maxRetries: config.maxRetries,
|
|
591
|
-
error: error.message
|
|
592
|
-
});
|
|
593
|
-
this.sendToDLQ(msg, consumer);
|
|
594
|
-
return Future.value(Result.Ok(void 0));
|
|
595
|
-
}
|
|
596
|
-
const delayMs = this.calculateRetryDelay(retryCount, config);
|
|
597
|
-
this.logger?.warn("Retrying message (ttl-backoff mode)", {
|
|
598
|
-
consumerName,
|
|
599
|
-
queueName,
|
|
600
|
-
retryCount: retryCount + 1,
|
|
601
|
-
maxRetries: config.maxRetries,
|
|
602
|
-
delayMs,
|
|
603
|
-
error: error.message
|
|
604
|
-
});
|
|
605
|
-
return this.publishForRetry({
|
|
606
|
-
msg,
|
|
607
|
-
exchange: queueEntry.waitExchange.name,
|
|
608
|
-
routingKey: msg.fields.routingKey,
|
|
609
|
-
waitQueueName: queueEntry.waitQueue.name,
|
|
610
|
-
queueName,
|
|
611
|
-
delayMs,
|
|
612
|
-
error
|
|
751
|
+
firstError = handlerError;
|
|
752
|
+
return handleError({
|
|
753
|
+
amqpClient: this.amqpClient,
|
|
754
|
+
logger: this.logger
|
|
755
|
+
}, handlerError, msg, String(name), consumer);
|
|
756
|
+
}),
|
|
757
|
+
Error: (parseError) => {
|
|
758
|
+
firstError = parseError;
|
|
759
|
+
this.logger?.error("Failed to parse/validate message; sending to DLQ", {
|
|
760
|
+
consumerName: String(name),
|
|
761
|
+
queueName,
|
|
762
|
+
error: parseError
|
|
763
|
+
});
|
|
764
|
+
this.amqpClient.nack(msg, false, false);
|
|
765
|
+
return Future.value(Result.Error(parseError));
|
|
766
|
+
}
|
|
767
|
+
})).map((result) => {
|
|
768
|
+
const durationMs = Date.now() - startTime;
|
|
769
|
+
if (messageHandled) {
|
|
770
|
+
endSpanSuccess(span);
|
|
771
|
+
recordConsumeMetric(this.telemetry, queueName, String(name), true, durationMs);
|
|
772
|
+
} else {
|
|
773
|
+
endSpanError(span, result.isError() ? result.error : firstError ?? /* @__PURE__ */ new Error("Unknown error"));
|
|
774
|
+
recordConsumeMetric(this.telemetry, queueName, String(name), false, durationMs);
|
|
775
|
+
}
|
|
776
|
+
return result;
|
|
613
777
|
});
|
|
614
778
|
}
|
|
615
779
|
/**
|
|
616
|
-
*
|
|
617
|
-
*/
|
|
618
|
-
calculateRetryDelay(retryCount, config) {
|
|
619
|
-
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
620
|
-
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
621
|
-
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
622
|
-
return Math.floor(delay);
|
|
623
|
-
}
|
|
624
|
-
/**
|
|
625
|
-
* Parse message content for republishing.
|
|
626
|
-
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
627
|
-
*/
|
|
628
|
-
parseMessageContentForRetry(msg, queueName) {
|
|
629
|
-
let content = msg.content;
|
|
630
|
-
if (!msg.properties.contentEncoding) try {
|
|
631
|
-
content = JSON.parse(msg.content.toString());
|
|
632
|
-
} catch (err) {
|
|
633
|
-
this.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
634
|
-
queueName,
|
|
635
|
-
error: err
|
|
636
|
-
});
|
|
637
|
-
}
|
|
638
|
-
return content;
|
|
639
|
-
}
|
|
640
|
-
/**
|
|
641
|
-
* Publish message with an incremented x-retry-count header and optional TTL.
|
|
780
|
+
* Consume messages one at a time.
|
|
642
781
|
*/
|
|
643
|
-
|
|
644
|
-
const
|
|
645
|
-
this.amqpClient.
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
"x-retry-count": newRetryCount,
|
|
653
|
-
"x-last-error": error.message,
|
|
654
|
-
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
655
|
-
...waitQueueName !== void 0 ? {
|
|
656
|
-
"x-wait-queue": waitQueueName,
|
|
657
|
-
"x-retry-queue": queueName
|
|
658
|
-
} : {}
|
|
782
|
+
consumeSingle(name, view, handler) {
|
|
783
|
+
const queueName = extractQueue(view.consumer.queue).name;
|
|
784
|
+
return this.amqpClient.consume(queueName, async (msg) => {
|
|
785
|
+
if (msg === null) {
|
|
786
|
+
this.logger?.warn("Consumer cancelled by server", {
|
|
787
|
+
consumerName: String(name),
|
|
788
|
+
queueName
|
|
789
|
+
});
|
|
790
|
+
return;
|
|
659
791
|
}
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
792
|
+
try {
|
|
793
|
+
await this.processMessage(msg, view, name, handler).toPromise();
|
|
794
|
+
} catch (error) {
|
|
795
|
+
this.logger?.error("Uncaught error in consume callback; nacking message", {
|
|
796
|
+
consumerName: String(name),
|
|
663
797
|
queueName,
|
|
664
|
-
|
|
665
|
-
...delayMs !== void 0 ? { delayMs } : {}
|
|
798
|
+
error
|
|
666
799
|
});
|
|
667
|
-
|
|
800
|
+
this.amqpClient.nack(msg, false, false);
|
|
668
801
|
}
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
...delayMs !== void 0 ? { delayMs } : {}
|
|
673
|
-
});
|
|
674
|
-
return Result.Ok(void 0);
|
|
675
|
-
});
|
|
676
|
-
}
|
|
677
|
-
/**
|
|
678
|
-
* Send message to dead letter queue.
|
|
679
|
-
* Nacks the message without requeue, relying on DLX configuration.
|
|
680
|
-
*/
|
|
681
|
-
sendToDLQ(msg, consumer) {
|
|
682
|
-
const queue = extractQueue(consumer.queue);
|
|
683
|
-
const queueName = queue.name;
|
|
684
|
-
if (!(queue.deadLetter !== void 0)) this.logger?.warn("Queue does not have DLX configured - message will be lost on nack", { queueName });
|
|
685
|
-
this.logger?.info("Sending message to DLQ", {
|
|
686
|
-
queueName,
|
|
687
|
-
deliveryTag: msg.fields.deliveryTag
|
|
688
|
-
});
|
|
689
|
-
this.amqpClient.nack(msg, false, false);
|
|
802
|
+
}, this.consumerOptions[name]).tapOk((consumerTag) => {
|
|
803
|
+
this.consumerTags.add(consumerTag);
|
|
804
|
+
}).mapError((error) => new TechnicalError(`Failed to start consuming for "${String(name)}"`, error)).mapOk(() => void 0);
|
|
690
805
|
}
|
|
691
806
|
};
|
|
692
807
|
//#endregion
|