@amqp-contract/worker 0.21.0 → 0.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +371 -274
- package/dist/index.d.cts +99 -134
- package/dist/index.d.cts.map +1 -1
- package/dist/index.d.mts +99 -134
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +371 -274
- package/dist/index.mjs.map +1 -1
- package/docs/index.md +50 -86
- package/package.json +5 -5
package/dist/index.cjs
CHANGED
|
@@ -197,6 +197,231 @@ function nonRetryable(message, cause) {
|
|
|
197
197
|
return new NonRetryableError(message, cause);
|
|
198
198
|
}
|
|
199
199
|
//#endregion
|
|
200
|
+
//#region src/retry.ts
|
|
201
|
+
/**
|
|
202
|
+
* Handle error in message processing with retry logic.
|
|
203
|
+
*
|
|
204
|
+
* Flow depends on retry mode:
|
|
205
|
+
*
|
|
206
|
+
* **immediate-requeue mode:**
|
|
207
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
208
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
209
|
+
* 3. Otherwise -> requeue immediately for retry
|
|
210
|
+
*
|
|
211
|
+
* **ttl-backoff mode:**
|
|
212
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
213
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
214
|
+
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
215
|
+
*
|
|
216
|
+
* **none mode (no retry config):**
|
|
217
|
+
* 1. send directly to DLQ (no retry)
|
|
218
|
+
*/
|
|
219
|
+
function handleError(ctx, error, msg, consumerName, consumer) {
|
|
220
|
+
if (error instanceof NonRetryableError) {
|
|
221
|
+
ctx.logger?.error("Non-retryable error, sending to DLQ immediately", {
|
|
222
|
+
consumerName,
|
|
223
|
+
errorType: error.name,
|
|
224
|
+
error: error.message
|
|
225
|
+
});
|
|
226
|
+
sendToDLQ(ctx, msg, consumer);
|
|
227
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
228
|
+
}
|
|
229
|
+
const config = (0, _amqp_contract_contract.extractQueue)(consumer.queue).retry;
|
|
230
|
+
if (config.mode === "immediate-requeue") return handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config);
|
|
231
|
+
if (config.mode === "ttl-backoff") return handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config);
|
|
232
|
+
ctx.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
233
|
+
consumerName,
|
|
234
|
+
error: error.message
|
|
235
|
+
});
|
|
236
|
+
sendToDLQ(ctx, msg, consumer);
|
|
237
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
238
|
+
}
|
|
239
|
+
/**
|
|
240
|
+
* Handle error by requeuing immediately.
|
|
241
|
+
*
|
|
242
|
+
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
243
|
+
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
244
|
+
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
245
|
+
*
|
|
246
|
+
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
247
|
+
*/
|
|
248
|
+
function handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config) {
|
|
249
|
+
const queue = (0, _amqp_contract_contract.extractQueue)(consumer.queue);
|
|
250
|
+
const queueName = queue.name;
|
|
251
|
+
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
252
|
+
if (retryCount >= config.maxRetries) {
|
|
253
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
254
|
+
consumerName,
|
|
255
|
+
queueName,
|
|
256
|
+
retryCount,
|
|
257
|
+
maxRetries: config.maxRetries,
|
|
258
|
+
error: error.message
|
|
259
|
+
});
|
|
260
|
+
sendToDLQ(ctx, msg, consumer);
|
|
261
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
262
|
+
}
|
|
263
|
+
ctx.logger?.warn("Retrying message (immediate-requeue mode)", {
|
|
264
|
+
consumerName,
|
|
265
|
+
queueName,
|
|
266
|
+
retryCount,
|
|
267
|
+
maxRetries: config.maxRetries,
|
|
268
|
+
error: error.message
|
|
269
|
+
});
|
|
270
|
+
if (queue.type === "quorum") {
|
|
271
|
+
ctx.amqpClient.nack(msg, false, true);
|
|
272
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
273
|
+
} else return publishForRetry(ctx, {
|
|
274
|
+
msg,
|
|
275
|
+
exchange: msg.fields.exchange,
|
|
276
|
+
routingKey: msg.fields.routingKey,
|
|
277
|
+
queueName,
|
|
278
|
+
error
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
/**
|
|
282
|
+
* Handle error using TTL + wait queue pattern for exponential backoff.
|
|
283
|
+
*
|
|
284
|
+
* ┌─────────────────────────────────────────────────────────────────┐
|
|
285
|
+
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
286
|
+
* ├─────────────────────────────────────────────────────────────────┤
|
|
287
|
+
* │ │
|
|
288
|
+
* │ 1. Handler throws any Error │
|
|
289
|
+
* │ ↓ │
|
|
290
|
+
* │ 2. Worker publishes to wait exchange |
|
|
291
|
+
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
292
|
+
* │ ↓ │
|
|
293
|
+
* │ 3. Wait exchange routes to wait queue │
|
|
294
|
+
* │ (with expiration: calculated backoff delay) │
|
|
295
|
+
* │ ↓ │
|
|
296
|
+
* │ 4. Message waits in queue until TTL expires │
|
|
297
|
+
* │ ↓ │
|
|
298
|
+
* │ 5. Expired message dead-lettered to retry exchange |
|
|
299
|
+
* | (with header `x-retry-queue` set to the main queue name) │
|
|
300
|
+
* │ ↓ │
|
|
301
|
+
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
302
|
+
* │ ↓ │
|
|
303
|
+
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
304
|
+
* │ │
|
|
305
|
+
* └─────────────────────────────────────────────────────────────────┘
|
|
306
|
+
*/
|
|
307
|
+
function handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config) {
|
|
308
|
+
if (!(0, _amqp_contract_contract.isQueueWithTtlBackoffInfrastructure)(consumer.queue)) {
|
|
309
|
+
ctx.logger?.error("Queue does not have TTL-backoff infrastructure", {
|
|
310
|
+
consumerName,
|
|
311
|
+
queueName: consumer.queue.name
|
|
312
|
+
});
|
|
313
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Error(new _amqp_contract_core.TechnicalError("Queue does not have TTL-backoff infrastructure")));
|
|
314
|
+
}
|
|
315
|
+
const queueEntry = consumer.queue;
|
|
316
|
+
const queueName = (0, _amqp_contract_contract.extractQueue)(queueEntry).name;
|
|
317
|
+
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
318
|
+
if (retryCount >= config.maxRetries) {
|
|
319
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
320
|
+
consumerName,
|
|
321
|
+
queueName,
|
|
322
|
+
retryCount,
|
|
323
|
+
maxRetries: config.maxRetries,
|
|
324
|
+
error: error.message
|
|
325
|
+
});
|
|
326
|
+
sendToDLQ(ctx, msg, consumer);
|
|
327
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
328
|
+
}
|
|
329
|
+
const delayMs = calculateRetryDelay(retryCount, config);
|
|
330
|
+
ctx.logger?.warn("Retrying message (ttl-backoff mode)", {
|
|
331
|
+
consumerName,
|
|
332
|
+
queueName,
|
|
333
|
+
retryCount: retryCount + 1,
|
|
334
|
+
maxRetries: config.maxRetries,
|
|
335
|
+
delayMs,
|
|
336
|
+
error: error.message
|
|
337
|
+
});
|
|
338
|
+
return publishForRetry(ctx, {
|
|
339
|
+
msg,
|
|
340
|
+
exchange: queueEntry.waitExchange.name,
|
|
341
|
+
routingKey: msg.fields.routingKey,
|
|
342
|
+
waitQueueName: queueEntry.waitQueue.name,
|
|
343
|
+
queueName,
|
|
344
|
+
delayMs,
|
|
345
|
+
error
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
/**
|
|
349
|
+
* Calculate retry delay with exponential backoff and optional jitter.
|
|
350
|
+
*/
|
|
351
|
+
function calculateRetryDelay(retryCount, config) {
|
|
352
|
+
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
353
|
+
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
354
|
+
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
355
|
+
return Math.floor(delay);
|
|
356
|
+
}
|
|
357
|
+
/**
|
|
358
|
+
* Parse message content for republishing.
|
|
359
|
+
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
360
|
+
*/
|
|
361
|
+
function parseMessageContentForRetry(ctx, msg, queueName) {
|
|
362
|
+
let content = msg.content;
|
|
363
|
+
if (!msg.properties.contentEncoding) try {
|
|
364
|
+
content = JSON.parse(msg.content.toString());
|
|
365
|
+
} catch (err) {
|
|
366
|
+
ctx.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
367
|
+
queueName,
|
|
368
|
+
error: err
|
|
369
|
+
});
|
|
370
|
+
}
|
|
371
|
+
return content;
|
|
372
|
+
}
|
|
373
|
+
/**
|
|
374
|
+
* Publish message with an incremented x-retry-count header and optional TTL.
|
|
375
|
+
*/
|
|
376
|
+
function publishForRetry(ctx, { msg, exchange, routingKey, queueName, waitQueueName, delayMs, error }) {
|
|
377
|
+
const newRetryCount = (msg.properties.headers?.["x-retry-count"] ?? 0) + 1;
|
|
378
|
+
ctx.amqpClient.ack(msg);
|
|
379
|
+
const content = parseMessageContentForRetry(ctx, msg, queueName);
|
|
380
|
+
return ctx.amqpClient.publish(exchange, routingKey, content, {
|
|
381
|
+
...msg.properties,
|
|
382
|
+
...delayMs !== void 0 ? { expiration: delayMs.toString() } : {},
|
|
383
|
+
headers: {
|
|
384
|
+
...msg.properties.headers,
|
|
385
|
+
"x-retry-count": newRetryCount,
|
|
386
|
+
"x-last-error": error.message,
|
|
387
|
+
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
388
|
+
...waitQueueName !== void 0 ? {
|
|
389
|
+
"x-wait-queue": waitQueueName,
|
|
390
|
+
"x-retry-queue": queueName
|
|
391
|
+
} : {}
|
|
392
|
+
}
|
|
393
|
+
}).mapOkToResult((published) => {
|
|
394
|
+
if (!published) {
|
|
395
|
+
ctx.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
396
|
+
queueName,
|
|
397
|
+
retryCount: newRetryCount,
|
|
398
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
399
|
+
});
|
|
400
|
+
return _swan_io_boxed.Result.Error(new _amqp_contract_core.TechnicalError("Failed to publish message for retry (write buffer full)"));
|
|
401
|
+
}
|
|
402
|
+
ctx.logger?.info("Message published for retry", {
|
|
403
|
+
queueName,
|
|
404
|
+
retryCount: newRetryCount,
|
|
405
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
406
|
+
});
|
|
407
|
+
return _swan_io_boxed.Result.Ok(void 0);
|
|
408
|
+
});
|
|
409
|
+
}
|
|
410
|
+
/**
|
|
411
|
+
* Send message to dead letter queue.
|
|
412
|
+
* Nacks the message without requeue, relying on DLX configuration.
|
|
413
|
+
*/
|
|
414
|
+
function sendToDLQ(ctx, msg, consumer) {
|
|
415
|
+
const queue = (0, _amqp_contract_contract.extractQueue)(consumer.queue);
|
|
416
|
+
const queueName = queue.name;
|
|
417
|
+
if (!(queue.deadLetter !== void 0)) ctx.logger?.warn("Queue does not have DLX configured - message will be lost on nack", { queueName });
|
|
418
|
+
ctx.logger?.info("Sending message to DLQ", {
|
|
419
|
+
queueName,
|
|
420
|
+
deliveryTag: msg.fields.deliveryTag
|
|
421
|
+
});
|
|
422
|
+
ctx.amqpClient.nack(msg, false, false);
|
|
423
|
+
}
|
|
424
|
+
//#endregion
|
|
200
425
|
//#region src/worker.ts
|
|
201
426
|
/**
|
|
202
427
|
* Type guard to check if a handler entry is a tuple format [handler, options].
|
|
@@ -247,7 +472,10 @@ function isHandlerTuple(entry) {
|
|
|
247
472
|
*/
|
|
248
473
|
var TypedAmqpWorker = class TypedAmqpWorker {
|
|
249
474
|
/**
|
|
250
|
-
* Internal handler storage
|
|
475
|
+
* Internal handler storage. Keyed by handler name (consumer or RPC); the
|
|
476
|
+
* stored function signature is widened so the dispatch loop can call it
|
|
477
|
+
* uniformly. The actual handler is type-checked at the worker's public API
|
|
478
|
+
* boundary via `WorkerInferHandlers<TContract>`.
|
|
251
479
|
*/
|
|
252
480
|
actualHandlers;
|
|
253
481
|
consumerOptions;
|
|
@@ -262,23 +490,49 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
262
490
|
this.actualHandlers = {};
|
|
263
491
|
this.consumerOptions = {};
|
|
264
492
|
const handlersRecord = handlers;
|
|
265
|
-
for (const
|
|
266
|
-
const handlerEntry = handlersRecord[
|
|
267
|
-
const
|
|
493
|
+
for (const handlerName of Object.keys(handlersRecord)) {
|
|
494
|
+
const handlerEntry = handlersRecord[handlerName];
|
|
495
|
+
const typedName = handlerName;
|
|
268
496
|
if (isHandlerTuple(handlerEntry)) {
|
|
269
497
|
const [handler, options] = handlerEntry;
|
|
270
|
-
this.actualHandlers[
|
|
271
|
-
this.consumerOptions[
|
|
498
|
+
this.actualHandlers[typedName] = handler;
|
|
499
|
+
this.consumerOptions[typedName] = {
|
|
272
500
|
...this.defaultConsumerOptions,
|
|
273
501
|
...options
|
|
274
502
|
};
|
|
275
503
|
} else {
|
|
276
|
-
this.actualHandlers[
|
|
277
|
-
this.consumerOptions[
|
|
504
|
+
this.actualHandlers[typedName] = handlerEntry;
|
|
505
|
+
this.consumerOptions[typedName] = this.defaultConsumerOptions;
|
|
278
506
|
}
|
|
279
507
|
}
|
|
280
508
|
}
|
|
281
509
|
/**
|
|
510
|
+
* Build a `ConsumerDefinition`-shaped view for a handler name, regardless
|
|
511
|
+
* of whether it came from `contract.consumers` or `contract.rpcs`. The
|
|
512
|
+
* dispatch path treats both uniformly; the returned `isRpc` flag (and the
|
|
513
|
+
* accompanying `responseSchema`) tells `processMessage` whether to validate
|
|
514
|
+
* the handler return value and publish a reply.
|
|
515
|
+
*/
|
|
516
|
+
resolveConsumerView(name) {
|
|
517
|
+
const rpcs = this.contract.rpcs;
|
|
518
|
+
if (rpcs && Object.hasOwn(rpcs, name)) {
|
|
519
|
+
const rpc = rpcs[name];
|
|
520
|
+
return {
|
|
521
|
+
consumer: {
|
|
522
|
+
queue: rpc.queue,
|
|
523
|
+
message: rpc.request
|
|
524
|
+
},
|
|
525
|
+
isRpc: true,
|
|
526
|
+
responseSchema: rpc.response.payload
|
|
527
|
+
};
|
|
528
|
+
}
|
|
529
|
+
const consumerEntry = this.contract.consumers[name];
|
|
530
|
+
return {
|
|
531
|
+
consumer: (0, _amqp_contract_contract.extractConsumer)(consumerEntry),
|
|
532
|
+
isRpc: false
|
|
533
|
+
};
|
|
534
|
+
}
|
|
535
|
+
/**
|
|
282
536
|
* Create a type-safe AMQP worker from a contract.
|
|
283
537
|
*
|
|
284
538
|
* Connection management (including automatic reconnection) is handled internally
|
|
@@ -303,12 +557,18 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
303
557
|
* }).resultToPromise();
|
|
304
558
|
* ```
|
|
305
559
|
*/
|
|
306
|
-
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry }) {
|
|
560
|
+
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry, connectTimeoutMs }) {
|
|
307
561
|
const worker = new TypedAmqpWorker(contract, new _amqp_contract_core.AmqpClient(contract, {
|
|
308
562
|
urls,
|
|
309
|
-
connectionOptions
|
|
563
|
+
connectionOptions,
|
|
564
|
+
connectTimeoutMs
|
|
310
565
|
}), handlers, defaultConsumerOptions ?? {}, logger, telemetry);
|
|
311
|
-
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).
|
|
566
|
+
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).flatMap((result) => result.match({
|
|
567
|
+
Ok: () => _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(worker)),
|
|
568
|
+
Error: (error) => worker.close().tapError((closeError) => {
|
|
569
|
+
logger?.warn("Failed to close worker after setup failure", { error: closeError });
|
|
570
|
+
}).map(() => _swan_io_boxed.Result.Error(error))
|
|
571
|
+
}));
|
|
312
572
|
}
|
|
313
573
|
/**
|
|
314
574
|
* Close the AMQP channel and connection.
|
|
@@ -338,33 +598,25 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
338
598
|
}).flatMapOk(() => this.amqpClient.close()).mapOk(() => void 0);
|
|
339
599
|
}
|
|
340
600
|
/**
|
|
341
|
-
*
|
|
342
|
-
* Defaults are applied in the contract's defineQueue, so we just return the config.
|
|
343
|
-
*/
|
|
344
|
-
getRetryConfigForConsumer(consumer) {
|
|
345
|
-
return (0, _amqp_contract_contract.extractQueue)(consumer.queue).retry;
|
|
346
|
-
}
|
|
347
|
-
/**
|
|
348
|
-
* Start consuming messages for all consumers.
|
|
349
|
-
* TypeScript guarantees consumers exist (handlers require matching consumers).
|
|
601
|
+
* Start consuming for every entry in `contract.consumers` and `contract.rpcs`.
|
|
350
602
|
*/
|
|
351
603
|
consumeAll() {
|
|
352
|
-
const
|
|
353
|
-
const
|
|
354
|
-
|
|
604
|
+
const consumerNames = Object.keys(this.contract.consumers ?? {});
|
|
605
|
+
const rpcNames = Object.keys(this.contract.rpcs ?? {});
|
|
606
|
+
const allNames = [...consumerNames, ...rpcNames];
|
|
607
|
+
return _swan_io_boxed.Future.all(allNames.map((name) => this.consume(name))).map(_swan_io_boxed.Result.all).mapOk(() => void 0);
|
|
355
608
|
}
|
|
356
609
|
waitForConnectionReady() {
|
|
357
610
|
return this.amqpClient.waitForConnect();
|
|
358
611
|
}
|
|
359
612
|
/**
|
|
360
|
-
* Start consuming messages for a specific
|
|
361
|
-
*
|
|
613
|
+
* Start consuming messages for a specific handler — either a `consumers`
|
|
614
|
+
* entry (regular event/command consumer) or an `rpcs` entry (RPC server).
|
|
362
615
|
*/
|
|
363
|
-
consume(
|
|
364
|
-
const
|
|
365
|
-
const
|
|
366
|
-
|
|
367
|
-
return this.consumeSingle(consumerName, consumer, handler);
|
|
616
|
+
consume(name) {
|
|
617
|
+
const view = this.resolveConsumerView(name);
|
|
618
|
+
const handler = this.actualHandlers[name];
|
|
619
|
+
return this.consumeSingle(name, view, handler);
|
|
368
620
|
}
|
|
369
621
|
/**
|
|
370
622
|
* Validate data against a Standard Schema and handle errors.
|
|
@@ -422,272 +674,117 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
422
674
|
}).map(_swan_io_boxed.Result.allFromDict);
|
|
423
675
|
}
|
|
424
676
|
/**
|
|
425
|
-
*
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
return this.amqpClient.consume(queueName, async (msg) => {
|
|
430
|
-
if (msg === null) {
|
|
431
|
-
this.logger?.warn("Consumer cancelled by server", {
|
|
432
|
-
consumerName: String(consumerName),
|
|
433
|
-
queueName
|
|
434
|
-
});
|
|
435
|
-
return;
|
|
436
|
-
}
|
|
437
|
-
const startTime = Date.now();
|
|
438
|
-
const span = (0, _amqp_contract_core.startConsumeSpan)(this.telemetry, queueName, String(consumerName), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
439
|
-
await this.parseAndValidateMessage(msg, consumer, consumerName).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk(() => {
|
|
440
|
-
this.logger?.info("Message consumed successfully", {
|
|
441
|
-
consumerName: String(consumerName),
|
|
442
|
-
queueName
|
|
443
|
-
});
|
|
444
|
-
this.amqpClient.ack(msg);
|
|
445
|
-
const durationMs = Date.now() - startTime;
|
|
446
|
-
(0, _amqp_contract_core.endSpanSuccess)(span);
|
|
447
|
-
(0, _amqp_contract_core.recordConsumeMetric)(this.telemetry, queueName, String(consumerName), true, durationMs);
|
|
448
|
-
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
449
|
-
}).flatMapError((handlerError) => {
|
|
450
|
-
this.logger?.error("Error processing message", {
|
|
451
|
-
consumerName: String(consumerName),
|
|
452
|
-
queueName,
|
|
453
|
-
errorType: handlerError.name,
|
|
454
|
-
error: handlerError.message
|
|
455
|
-
});
|
|
456
|
-
const durationMs = Date.now() - startTime;
|
|
457
|
-
(0, _amqp_contract_core.endSpanError)(span, handlerError);
|
|
458
|
-
(0, _amqp_contract_core.recordConsumeMetric)(this.telemetry, queueName, String(consumerName), false, durationMs);
|
|
459
|
-
return this.handleError(handlerError, msg, String(consumerName), consumer);
|
|
460
|
-
})).tapError(() => {
|
|
461
|
-
const durationMs = Date.now() - startTime;
|
|
462
|
-
(0, _amqp_contract_core.endSpanError)(span, /* @__PURE__ */ new Error("Message validation failed"));
|
|
463
|
-
(0, _amqp_contract_core.recordConsumeMetric)(this.telemetry, queueName, String(consumerName), false, durationMs);
|
|
464
|
-
}).toPromise();
|
|
465
|
-
}, this.consumerOptions[consumerName]).tapOk((consumerTag) => {
|
|
466
|
-
this.consumerTags.add(consumerTag);
|
|
467
|
-
}).mapError((error) => new _amqp_contract_core.TechnicalError(`Failed to start consuming for "${String(consumerName)}"`, error)).mapOk(() => void 0);
|
|
468
|
-
}
|
|
469
|
-
/**
|
|
470
|
-
* Handle error in message processing with retry logic.
|
|
471
|
-
*
|
|
472
|
-
* Flow depends on retry mode:
|
|
677
|
+
* Validate an RPC handler's response and publish it back to the caller's reply
|
|
678
|
+
* queue with the same `correlationId`. Published via the AMQP default exchange
|
|
679
|
+
* with `routingKey = msg.properties.replyTo`, which works for both
|
|
680
|
+
* `amq.rabbitmq.reply-to` and any anonymous queue declared by the caller.
|
|
473
681
|
*
|
|
474
|
-
*
|
|
475
|
-
*
|
|
476
|
-
*
|
|
477
|
-
* 3. Otherwise -> requeue immediately for retry
|
|
478
|
-
*
|
|
479
|
-
* **ttl-backoff mode:**
|
|
480
|
-
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
481
|
-
* 2. If max retries exceeded -> send to DLQ
|
|
482
|
-
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
483
|
-
*
|
|
484
|
-
* **none mode (no retry config):**
|
|
485
|
-
* 1. send directly to DLQ (no retry)
|
|
682
|
+
* Validation errors are surfaced as NonRetryableError (handler returned the
|
|
683
|
+
* wrong shape — retrying the same input will not fix it). Publish errors are
|
|
684
|
+
* surfaced as RetryableError so the worker's existing retry logic applies.
|
|
486
685
|
*/
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
686
|
+
publishRpcResponse(msg, queueName, rpcName, responseSchema, response) {
|
|
687
|
+
const replyTo = msg.properties.replyTo;
|
|
688
|
+
const correlationId = msg.properties.correlationId;
|
|
689
|
+
if (typeof replyTo !== "string" || replyTo.length === 0) {
|
|
690
|
+
this.logger?.warn("RPC handler returned a response but the incoming message has no replyTo; dropping response", {
|
|
691
|
+
rpcName: String(rpcName),
|
|
692
|
+
queueName
|
|
493
693
|
});
|
|
494
|
-
this.sendToDLQ(msg, consumer);
|
|
495
694
|
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
496
695
|
}
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
this.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
501
|
-
consumerName,
|
|
502
|
-
error: error.message
|
|
503
|
-
});
|
|
504
|
-
this.sendToDLQ(msg, consumer);
|
|
505
|
-
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
506
|
-
}
|
|
507
|
-
/**
|
|
508
|
-
* Handle error by requeuing immediately.
|
|
509
|
-
*
|
|
510
|
-
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
511
|
-
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
512
|
-
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
513
|
-
*
|
|
514
|
-
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
515
|
-
*/
|
|
516
|
-
handleErrorImmediateRequeue(error, msg, consumerName, consumer, config) {
|
|
517
|
-
const queue = (0, _amqp_contract_contract.extractQueue)(consumer.queue);
|
|
518
|
-
const queueName = queue.name;
|
|
519
|
-
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
520
|
-
if (retryCount >= config.maxRetries) {
|
|
521
|
-
this.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
522
|
-
consumerName,
|
|
696
|
+
if (typeof correlationId !== "string" || correlationId.length === 0) {
|
|
697
|
+
this.logger?.warn("RPC handler returned a response but the incoming message has no correlationId; dropping response", {
|
|
698
|
+
rpcName: String(rpcName),
|
|
523
699
|
queueName,
|
|
524
|
-
|
|
525
|
-
maxRetries: config.maxRetries,
|
|
526
|
-
error: error.message
|
|
700
|
+
replyTo
|
|
527
701
|
});
|
|
528
|
-
this.sendToDLQ(msg, consumer);
|
|
529
702
|
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
530
703
|
}
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
return _swan_io_boxed.
|
|
541
|
-
}
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
queueName,
|
|
546
|
-
error
|
|
547
|
-
});
|
|
704
|
+
let rawValidation;
|
|
705
|
+
try {
|
|
706
|
+
rawValidation = responseSchema["~standard"].validate(response);
|
|
707
|
+
} catch (error) {
|
|
708
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Error(new NonRetryableError("RPC response schema validation threw", error)));
|
|
709
|
+
}
|
|
710
|
+
const validationPromise = rawValidation instanceof Promise ? rawValidation : Promise.resolve(rawValidation);
|
|
711
|
+
return _swan_io_boxed.Future.fromPromise(validationPromise).mapError((error) => new NonRetryableError("RPC response schema validation threw", error)).mapOkToResult((validation) => {
|
|
712
|
+
if (validation.issues) return _swan_io_boxed.Result.Error(new NonRetryableError(`RPC response for "${String(rpcName)}" failed schema validation`, new _amqp_contract_core.MessageValidationError(String(rpcName), validation.issues)));
|
|
713
|
+
return _swan_io_boxed.Result.Ok(validation.value);
|
|
714
|
+
}).flatMapOk((validatedResponse) => this.amqpClient.publish("", replyTo, validatedResponse, {
|
|
715
|
+
correlationId,
|
|
716
|
+
contentType: "application/json"
|
|
717
|
+
}).mapErrorToResult((error) => _swan_io_boxed.Result.Error(new RetryableError("Failed to publish RPC response", error))).mapOkToResult((published) => published ? _swan_io_boxed.Result.Ok(void 0) : _swan_io_boxed.Result.Error(new RetryableError("Failed to publish RPC response: channel buffer full"))));
|
|
548
718
|
}
|
|
549
719
|
/**
|
|
550
|
-
*
|
|
551
|
-
*
|
|
552
|
-
* ┌─────────────────────────────────────────────────────────────────┐
|
|
553
|
-
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
554
|
-
* ├─────────────────────────────────────────────────────────────────┤
|
|
555
|
-
* │ │
|
|
556
|
-
* │ 1. Handler throws any Error │
|
|
557
|
-
* │ ↓ │
|
|
558
|
-
* │ 2. Worker publishes to wait exchange |
|
|
559
|
-
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
560
|
-
* │ ↓ │
|
|
561
|
-
* │ 3. Wait exchange routes to wait queue │
|
|
562
|
-
* │ (with expiration: calculated backoff delay) │
|
|
563
|
-
* │ ↓ │
|
|
564
|
-
* │ 4. Message waits in queue until TTL expires │
|
|
565
|
-
* │ ↓ │
|
|
566
|
-
* │ 5. Expired message dead-lettered to retry exchange |
|
|
567
|
-
* | (with header `x-retry-queue` set to the main queue name) │
|
|
568
|
-
* │ ↓ │
|
|
569
|
-
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
570
|
-
* │ ↓ │
|
|
571
|
-
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
572
|
-
* │ │
|
|
573
|
-
* └─────────────────────────────────────────────────────────────────┘
|
|
720
|
+
* Process a single consumed message: validate, invoke handler, optionally
|
|
721
|
+
* publish the RPC response, record telemetry, and handle errors.
|
|
574
722
|
*/
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
723
|
+
processMessage(msg, view, name, handler) {
|
|
724
|
+
const { consumer, isRpc, responseSchema } = view;
|
|
725
|
+
const queueName = (0, _amqp_contract_contract.extractQueue)(consumer.queue).name;
|
|
726
|
+
const startTime = Date.now();
|
|
727
|
+
const span = (0, _amqp_contract_core.startConsumeSpan)(this.telemetry, queueName, String(name), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
728
|
+
let messageHandled = false;
|
|
729
|
+
let firstError;
|
|
730
|
+
return this.parseAndValidateMessage(msg, consumer, name).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk((handlerResponse) => {
|
|
731
|
+
if (isRpc && responseSchema) return this.publishRpcResponse(msg, queueName, name, responseSchema, handlerResponse).flatMapOk(() => {
|
|
732
|
+
this.logger?.info("Message consumed successfully", {
|
|
733
|
+
consumerName: String(name),
|
|
734
|
+
queueName
|
|
735
|
+
});
|
|
736
|
+
this.amqpClient.ack(msg);
|
|
737
|
+
messageHandled = true;
|
|
738
|
+
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
580
739
|
});
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
const queueName = (0, _amqp_contract_contract.extractQueue)(queueEntry).name;
|
|
585
|
-
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
586
|
-
if (retryCount >= config.maxRetries) {
|
|
587
|
-
this.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
588
|
-
consumerName,
|
|
589
|
-
queueName,
|
|
590
|
-
retryCount,
|
|
591
|
-
maxRetries: config.maxRetries,
|
|
592
|
-
error: error.message
|
|
740
|
+
this.logger?.info("Message consumed successfully", {
|
|
741
|
+
consumerName: String(name),
|
|
742
|
+
queueName
|
|
593
743
|
});
|
|
594
|
-
this.
|
|
744
|
+
this.amqpClient.ack(msg);
|
|
745
|
+
messageHandled = true;
|
|
595
746
|
return _swan_io_boxed.Future.value(_swan_io_boxed.Result.Ok(void 0));
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
consumerName,
|
|
600
|
-
queueName,
|
|
601
|
-
retryCount: retryCount + 1,
|
|
602
|
-
maxRetries: config.maxRetries,
|
|
603
|
-
delayMs,
|
|
604
|
-
error: error.message
|
|
605
|
-
});
|
|
606
|
-
return this.publishForRetry({
|
|
607
|
-
msg,
|
|
608
|
-
exchange: queueEntry.waitExchange.name,
|
|
609
|
-
routingKey: msg.fields.routingKey,
|
|
610
|
-
waitQueueName: queueEntry.waitQueue.name,
|
|
611
|
-
queueName,
|
|
612
|
-
delayMs,
|
|
613
|
-
error
|
|
614
|
-
});
|
|
615
|
-
}
|
|
616
|
-
/**
|
|
617
|
-
* Calculate retry delay with exponential backoff and optional jitter.
|
|
618
|
-
*/
|
|
619
|
-
calculateRetryDelay(retryCount, config) {
|
|
620
|
-
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
621
|
-
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
622
|
-
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
623
|
-
return Math.floor(delay);
|
|
624
|
-
}
|
|
625
|
-
/**
|
|
626
|
-
* Parse message content for republishing.
|
|
627
|
-
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
628
|
-
*/
|
|
629
|
-
parseMessageContentForRetry(msg, queueName) {
|
|
630
|
-
let content = msg.content;
|
|
631
|
-
if (!msg.properties.contentEncoding) try {
|
|
632
|
-
content = JSON.parse(msg.content.toString());
|
|
633
|
-
} catch (err) {
|
|
634
|
-
this.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
747
|
+
}).flatMapError((handlerError) => {
|
|
748
|
+
this.logger?.error("Error processing message", {
|
|
749
|
+
consumerName: String(name),
|
|
635
750
|
queueName,
|
|
636
|
-
|
|
751
|
+
errorType: handlerError.name,
|
|
752
|
+
error: handlerError.message
|
|
637
753
|
});
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
headers: {
|
|
652
|
-
...msg.properties.headers,
|
|
653
|
-
"x-retry-count": newRetryCount,
|
|
654
|
-
"x-last-error": error.message,
|
|
655
|
-
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
656
|
-
...waitQueueName !== void 0 ? {
|
|
657
|
-
"x-wait-queue": waitQueueName,
|
|
658
|
-
"x-retry-queue": queueName
|
|
659
|
-
} : {}
|
|
660
|
-
}
|
|
661
|
-
}).mapOkToResult((published) => {
|
|
662
|
-
if (!published) {
|
|
663
|
-
this.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
664
|
-
queueName,
|
|
665
|
-
retryCount: newRetryCount,
|
|
666
|
-
...delayMs !== void 0 ? { delayMs } : {}
|
|
667
|
-
});
|
|
668
|
-
return _swan_io_boxed.Result.Error(new _amqp_contract_core.TechnicalError("Failed to publish message for retry (write buffer full)"));
|
|
754
|
+
firstError = handlerError;
|
|
755
|
+
return handleError({
|
|
756
|
+
amqpClient: this.amqpClient,
|
|
757
|
+
logger: this.logger
|
|
758
|
+
}, handlerError, msg, String(name), consumer);
|
|
759
|
+
})).map((result) => {
|
|
760
|
+
const durationMs = Date.now() - startTime;
|
|
761
|
+
if (messageHandled) {
|
|
762
|
+
(0, _amqp_contract_core.endSpanSuccess)(span);
|
|
763
|
+
(0, _amqp_contract_core.recordConsumeMetric)(this.telemetry, queueName, String(name), true, durationMs);
|
|
764
|
+
} else {
|
|
765
|
+
(0, _amqp_contract_core.endSpanError)(span, result.isError() ? result.error : firstError ?? /* @__PURE__ */ new Error("Unknown error"));
|
|
766
|
+
(0, _amqp_contract_core.recordConsumeMetric)(this.telemetry, queueName, String(name), false, durationMs);
|
|
669
767
|
}
|
|
670
|
-
|
|
671
|
-
queueName,
|
|
672
|
-
retryCount: newRetryCount,
|
|
673
|
-
...delayMs !== void 0 ? { delayMs } : {}
|
|
674
|
-
});
|
|
675
|
-
return _swan_io_boxed.Result.Ok(void 0);
|
|
768
|
+
return result;
|
|
676
769
|
});
|
|
677
770
|
}
|
|
678
771
|
/**
|
|
679
|
-
*
|
|
680
|
-
* Nacks the message without requeue, relying on DLX configuration.
|
|
772
|
+
* Consume messages one at a time.
|
|
681
773
|
*/
|
|
682
|
-
|
|
683
|
-
const
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
774
|
+
consumeSingle(name, view, handler) {
|
|
775
|
+
const queueName = (0, _amqp_contract_contract.extractQueue)(view.consumer.queue).name;
|
|
776
|
+
return this.amqpClient.consume(queueName, async (msg) => {
|
|
777
|
+
if (msg === null) {
|
|
778
|
+
this.logger?.warn("Consumer cancelled by server", {
|
|
779
|
+
consumerName: String(name),
|
|
780
|
+
queueName
|
|
781
|
+
});
|
|
782
|
+
return;
|
|
783
|
+
}
|
|
784
|
+
await this.processMessage(msg, view, name, handler).toPromise();
|
|
785
|
+
}, this.consumerOptions[name]).tapOk((consumerTag) => {
|
|
786
|
+
this.consumerTags.add(consumerTag);
|
|
787
|
+
}).mapError((error) => new _amqp_contract_core.TechnicalError(`Failed to start consuming for "${String(name)}"`, error)).mapOk(() => void 0);
|
|
691
788
|
}
|
|
692
789
|
};
|
|
693
790
|
//#endregion
|