@amqp-contract/worker 0.20.0 → 0.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -7
- package/dist/index.cjs +415 -291
- package/dist/index.d.cts +123 -149
- package/dist/index.d.cts.map +1 -1
- package/dist/index.d.mts +122 -148
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +409 -284
- package/dist/index.mjs.map +1 -1
- package/docs/index.md +90 -115
- package/package.json +29 -29
package/dist/index.mjs
CHANGED
|
@@ -1,9 +1,40 @@
|
|
|
1
|
+
import { extractConsumer, extractQueue, isQueueWithTtlBackoffInfrastructure } from "@amqp-contract/contract";
|
|
1
2
|
import { AmqpClient, MessageValidationError, TechnicalError, defaultTelemetryProvider, endSpanError, endSpanSuccess, recordConsumeMetric, startConsumeSpan } from "@amqp-contract/core";
|
|
2
|
-
import { extractConsumer } from "@amqp-contract/contract";
|
|
3
3
|
import { Future, Result } from "@swan-io/boxed";
|
|
4
4
|
import { gunzip, inflate } from "node:zlib";
|
|
5
5
|
import { promisify } from "node:util";
|
|
6
|
-
|
|
6
|
+
//#region src/decompression.ts
|
|
7
|
+
const gunzipAsync = promisify(gunzip);
|
|
8
|
+
const inflateAsync = promisify(inflate);
|
|
9
|
+
/**
|
|
10
|
+
* Supported content encodings for message decompression.
|
|
11
|
+
*/
|
|
12
|
+
const SUPPORTED_ENCODINGS = ["gzip", "deflate"];
|
|
13
|
+
/**
|
|
14
|
+
* Type guard to check if a string is a supported encoding.
|
|
15
|
+
*/
|
|
16
|
+
function isSupportedEncoding(encoding) {
|
|
17
|
+
return SUPPORTED_ENCODINGS.includes(encoding.toLowerCase());
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Decompress a buffer based on the content-encoding header.
|
|
21
|
+
*
|
|
22
|
+
* @param buffer - The buffer to decompress
|
|
23
|
+
* @param contentEncoding - The content-encoding header value (e.g., 'gzip', 'deflate')
|
|
24
|
+
* @returns A Future with the decompressed buffer or a TechnicalError
|
|
25
|
+
*
|
|
26
|
+
* @internal
|
|
27
|
+
*/
|
|
28
|
+
function decompressBuffer(buffer, contentEncoding) {
|
|
29
|
+
if (!contentEncoding) return Future.value(Result.Ok(buffer));
|
|
30
|
+
const normalizedEncoding = contentEncoding.toLowerCase();
|
|
31
|
+
if (!isSupportedEncoding(normalizedEncoding)) return Future.value(Result.Error(new TechnicalError(`Unsupported content-encoding: "${contentEncoding}". Supported encodings are: ${SUPPORTED_ENCODINGS.join(", ")}. Please check your publisher configuration.`)));
|
|
32
|
+
switch (normalizedEncoding) {
|
|
33
|
+
case "gzip": return Future.fromPromise(gunzipAsync(buffer)).mapError((error) => new TechnicalError("Failed to decompress gzip", error));
|
|
34
|
+
case "deflate": return Future.fromPromise(inflateAsync(buffer)).mapError((error) => new TechnicalError("Failed to decompress deflate", error));
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
//#endregion
|
|
7
38
|
//#region src/errors.ts
|
|
8
39
|
/**
|
|
9
40
|
* Retryable errors - transient failures that may succeed on retry
|
|
@@ -164,40 +195,231 @@ function retryable(message, cause) {
|
|
|
164
195
|
function nonRetryable(message, cause) {
|
|
165
196
|
return new NonRetryableError(message, cause);
|
|
166
197
|
}
|
|
167
|
-
|
|
168
198
|
//#endregion
|
|
169
|
-
//#region src/
|
|
170
|
-
const gunzipAsync = promisify(gunzip);
|
|
171
|
-
const inflateAsync = promisify(inflate);
|
|
199
|
+
//#region src/retry.ts
|
|
172
200
|
/**
|
|
173
|
-
*
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
*
|
|
201
|
+
* Handle error in message processing with retry logic.
|
|
202
|
+
*
|
|
203
|
+
* Flow depends on retry mode:
|
|
204
|
+
*
|
|
205
|
+
* **immediate-requeue mode:**
|
|
206
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
207
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
208
|
+
* 3. Otherwise -> requeue immediately for retry
|
|
209
|
+
*
|
|
210
|
+
* **ttl-backoff mode:**
|
|
211
|
+
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
212
|
+
* 2. If max retries exceeded -> send to DLQ
|
|
213
|
+
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
214
|
+
*
|
|
215
|
+
* **none mode (no retry config):**
|
|
216
|
+
* 1. send directly to DLQ (no retry)
|
|
178
217
|
*/
|
|
179
|
-
function
|
|
180
|
-
|
|
218
|
+
function handleError(ctx, error, msg, consumerName, consumer) {
|
|
219
|
+
if (error instanceof NonRetryableError) {
|
|
220
|
+
ctx.logger?.error("Non-retryable error, sending to DLQ immediately", {
|
|
221
|
+
consumerName,
|
|
222
|
+
errorType: error.name,
|
|
223
|
+
error: error.message
|
|
224
|
+
});
|
|
225
|
+
sendToDLQ(ctx, msg, consumer);
|
|
226
|
+
return Future.value(Result.Ok(void 0));
|
|
227
|
+
}
|
|
228
|
+
const config = extractQueue(consumer.queue).retry;
|
|
229
|
+
if (config.mode === "immediate-requeue") return handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config);
|
|
230
|
+
if (config.mode === "ttl-backoff") return handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config);
|
|
231
|
+
ctx.logger?.warn("Retry disabled (none mode), sending to DLQ", {
|
|
232
|
+
consumerName,
|
|
233
|
+
error: error.message
|
|
234
|
+
});
|
|
235
|
+
sendToDLQ(ctx, msg, consumer);
|
|
236
|
+
return Future.value(Result.Ok(void 0));
|
|
181
237
|
}
|
|
182
238
|
/**
|
|
183
|
-
*
|
|
239
|
+
* Handle error by requeuing immediately.
|
|
184
240
|
*
|
|
185
|
-
*
|
|
186
|
-
*
|
|
187
|
-
*
|
|
241
|
+
* For quorum queues, messages are requeued with `nack(requeue=true)`, and the worker tracks delivery count via the native RabbitMQ `x-delivery-count` header.
|
|
242
|
+
* For classic queues, messages are re-published on the same queue, and the worker tracks delivery count via a custom `x-retry-count` header.
|
|
243
|
+
* When the count exceeds `maxRetries`, the message is automatically dead-lettered (if DLX is configured) or dropped.
|
|
188
244
|
*
|
|
189
|
-
*
|
|
245
|
+
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
190
246
|
*/
|
|
191
|
-
function
|
|
192
|
-
|
|
193
|
-
const
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
247
|
+
function handleErrorImmediateRequeue(ctx, error, msg, consumerName, consumer, config) {
|
|
248
|
+
const queue = extractQueue(consumer.queue);
|
|
249
|
+
const queueName = queue.name;
|
|
250
|
+
const retryCount = queue.type === "quorum" ? msg.properties.headers?.["x-delivery-count"] ?? 0 : msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
251
|
+
if (retryCount >= config.maxRetries) {
|
|
252
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (immediate-requeue mode)", {
|
|
253
|
+
consumerName,
|
|
254
|
+
queueName,
|
|
255
|
+
retryCount,
|
|
256
|
+
maxRetries: config.maxRetries,
|
|
257
|
+
error: error.message
|
|
258
|
+
});
|
|
259
|
+
sendToDLQ(ctx, msg, consumer);
|
|
260
|
+
return Future.value(Result.Ok(void 0));
|
|
198
261
|
}
|
|
262
|
+
ctx.logger?.warn("Retrying message (immediate-requeue mode)", {
|
|
263
|
+
consumerName,
|
|
264
|
+
queueName,
|
|
265
|
+
retryCount,
|
|
266
|
+
maxRetries: config.maxRetries,
|
|
267
|
+
error: error.message
|
|
268
|
+
});
|
|
269
|
+
if (queue.type === "quorum") {
|
|
270
|
+
ctx.amqpClient.nack(msg, false, true);
|
|
271
|
+
return Future.value(Result.Ok(void 0));
|
|
272
|
+
} else return publishForRetry(ctx, {
|
|
273
|
+
msg,
|
|
274
|
+
exchange: msg.fields.exchange,
|
|
275
|
+
routingKey: msg.fields.routingKey,
|
|
276
|
+
queueName,
|
|
277
|
+
error
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
/**
|
|
281
|
+
* Handle error using TTL + wait queue pattern for exponential backoff.
|
|
282
|
+
*
|
|
283
|
+
* ┌─────────────────────────────────────────────────────────────────┐
|
|
284
|
+
* │ Retry Flow (Native RabbitMQ TTL + Wait queue pattern) │
|
|
285
|
+
* ├─────────────────────────────────────────────────────────────────┤
|
|
286
|
+
* │ │
|
|
287
|
+
* │ 1. Handler throws any Error │
|
|
288
|
+
* │ ↓ │
|
|
289
|
+
* │ 2. Worker publishes to wait exchange |
|
|
290
|
+
* | (with header `x-wait-queue` set to the wait queue name) │
|
|
291
|
+
* │ ↓ │
|
|
292
|
+
* │ 3. Wait exchange routes to wait queue │
|
|
293
|
+
* │ (with expiration: calculated backoff delay) │
|
|
294
|
+
* │ ↓ │
|
|
295
|
+
* │ 4. Message waits in queue until TTL expires │
|
|
296
|
+
* │ ↓ │
|
|
297
|
+
* │ 5. Expired message dead-lettered to retry exchange |
|
|
298
|
+
* | (with header `x-retry-queue` set to the main queue name) │
|
|
299
|
+
* │ ↓ │
|
|
300
|
+
* │ 6. Retry exchange routes back to main queue → RETRY │
|
|
301
|
+
* │ ↓ │
|
|
302
|
+
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
303
|
+
* │ │
|
|
304
|
+
* └─────────────────────────────────────────────────────────────────┘
|
|
305
|
+
*/
|
|
306
|
+
function handleErrorTtlBackoff(ctx, error, msg, consumerName, consumer, config) {
|
|
307
|
+
if (!isQueueWithTtlBackoffInfrastructure(consumer.queue)) {
|
|
308
|
+
ctx.logger?.error("Queue does not have TTL-backoff infrastructure", {
|
|
309
|
+
consumerName,
|
|
310
|
+
queueName: consumer.queue.name
|
|
311
|
+
});
|
|
312
|
+
return Future.value(Result.Error(new TechnicalError("Queue does not have TTL-backoff infrastructure")));
|
|
313
|
+
}
|
|
314
|
+
const queueEntry = consumer.queue;
|
|
315
|
+
const queueName = extractQueue(queueEntry).name;
|
|
316
|
+
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
317
|
+
if (retryCount >= config.maxRetries) {
|
|
318
|
+
ctx.logger?.error("Max retries exceeded, sending to DLQ (ttl-backoff mode)", {
|
|
319
|
+
consumerName,
|
|
320
|
+
queueName,
|
|
321
|
+
retryCount,
|
|
322
|
+
maxRetries: config.maxRetries,
|
|
323
|
+
error: error.message
|
|
324
|
+
});
|
|
325
|
+
sendToDLQ(ctx, msg, consumer);
|
|
326
|
+
return Future.value(Result.Ok(void 0));
|
|
327
|
+
}
|
|
328
|
+
const delayMs = calculateRetryDelay(retryCount, config);
|
|
329
|
+
ctx.logger?.warn("Retrying message (ttl-backoff mode)", {
|
|
330
|
+
consumerName,
|
|
331
|
+
queueName,
|
|
332
|
+
retryCount: retryCount + 1,
|
|
333
|
+
maxRetries: config.maxRetries,
|
|
334
|
+
delayMs,
|
|
335
|
+
error: error.message
|
|
336
|
+
});
|
|
337
|
+
return publishForRetry(ctx, {
|
|
338
|
+
msg,
|
|
339
|
+
exchange: queueEntry.waitExchange.name,
|
|
340
|
+
routingKey: msg.fields.routingKey,
|
|
341
|
+
waitQueueName: queueEntry.waitQueue.name,
|
|
342
|
+
queueName,
|
|
343
|
+
delayMs,
|
|
344
|
+
error
|
|
345
|
+
});
|
|
346
|
+
}
|
|
347
|
+
/**
|
|
348
|
+
* Calculate retry delay with exponential backoff and optional jitter.
|
|
349
|
+
*/
|
|
350
|
+
function calculateRetryDelay(retryCount, config) {
|
|
351
|
+
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
352
|
+
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
353
|
+
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
354
|
+
return Math.floor(delay);
|
|
355
|
+
}
|
|
356
|
+
/**
|
|
357
|
+
* Parse message content for republishing.
|
|
358
|
+
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
359
|
+
*/
|
|
360
|
+
function parseMessageContentForRetry(ctx, msg, queueName) {
|
|
361
|
+
let content = msg.content;
|
|
362
|
+
if (!msg.properties.contentEncoding) try {
|
|
363
|
+
content = JSON.parse(msg.content.toString());
|
|
364
|
+
} catch (err) {
|
|
365
|
+
ctx.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
366
|
+
queueName,
|
|
367
|
+
error: err
|
|
368
|
+
});
|
|
369
|
+
}
|
|
370
|
+
return content;
|
|
371
|
+
}
|
|
372
|
+
/**
|
|
373
|
+
* Publish message with an incremented x-retry-count header and optional TTL.
|
|
374
|
+
*/
|
|
375
|
+
function publishForRetry(ctx, { msg, exchange, routingKey, queueName, waitQueueName, delayMs, error }) {
|
|
376
|
+
const newRetryCount = (msg.properties.headers?.["x-retry-count"] ?? 0) + 1;
|
|
377
|
+
ctx.amqpClient.ack(msg);
|
|
378
|
+
const content = parseMessageContentForRetry(ctx, msg, queueName);
|
|
379
|
+
return ctx.amqpClient.publish(exchange, routingKey, content, {
|
|
380
|
+
...msg.properties,
|
|
381
|
+
...delayMs !== void 0 ? { expiration: delayMs.toString() } : {},
|
|
382
|
+
headers: {
|
|
383
|
+
...msg.properties.headers,
|
|
384
|
+
"x-retry-count": newRetryCount,
|
|
385
|
+
"x-last-error": error.message,
|
|
386
|
+
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now(),
|
|
387
|
+
...waitQueueName !== void 0 ? {
|
|
388
|
+
"x-wait-queue": waitQueueName,
|
|
389
|
+
"x-retry-queue": queueName
|
|
390
|
+
} : {}
|
|
391
|
+
}
|
|
392
|
+
}).mapOkToResult((published) => {
|
|
393
|
+
if (!published) {
|
|
394
|
+
ctx.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
395
|
+
queueName,
|
|
396
|
+
retryCount: newRetryCount,
|
|
397
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
398
|
+
});
|
|
399
|
+
return Result.Error(new TechnicalError("Failed to publish message for retry (write buffer full)"));
|
|
400
|
+
}
|
|
401
|
+
ctx.logger?.info("Message published for retry", {
|
|
402
|
+
queueName,
|
|
403
|
+
retryCount: newRetryCount,
|
|
404
|
+
...delayMs !== void 0 ? { delayMs } : {}
|
|
405
|
+
});
|
|
406
|
+
return Result.Ok(void 0);
|
|
407
|
+
});
|
|
408
|
+
}
|
|
409
|
+
/**
|
|
410
|
+
* Send message to dead letter queue.
|
|
411
|
+
* Nacks the message without requeue, relying on DLX configuration.
|
|
412
|
+
*/
|
|
413
|
+
function sendToDLQ(ctx, msg, consumer) {
|
|
414
|
+
const queue = extractQueue(consumer.queue);
|
|
415
|
+
const queueName = queue.name;
|
|
416
|
+
if (!(queue.deadLetter !== void 0)) ctx.logger?.warn("Queue does not have DLX configured - message will be lost on nack", { queueName });
|
|
417
|
+
ctx.logger?.info("Sending message to DLQ", {
|
|
418
|
+
queueName,
|
|
419
|
+
deliveryTag: msg.fields.deliveryTag
|
|
420
|
+
});
|
|
421
|
+
ctx.amqpClient.nack(msg, false, false);
|
|
199
422
|
}
|
|
200
|
-
|
|
201
423
|
//#endregion
|
|
202
424
|
//#region src/worker.ts
|
|
203
425
|
/**
|
|
@@ -220,7 +442,7 @@ function isHandlerTuple(entry) {
|
|
|
220
442
|
* import { defineQueue, defineMessage, defineContract, defineConsumer } from '@amqp-contract/contract';
|
|
221
443
|
* import { z } from 'zod';
|
|
222
444
|
*
|
|
223
|
-
* const orderQueue = defineQueue('order-processing'
|
|
445
|
+
* const orderQueue = defineQueue('order-processing');
|
|
224
446
|
* const orderMessage = defineMessage(z.object({
|
|
225
447
|
* orderId: z.string(),
|
|
226
448
|
* amount: z.number()
|
|
@@ -249,31 +471,67 @@ function isHandlerTuple(entry) {
|
|
|
249
471
|
*/
|
|
250
472
|
var TypedAmqpWorker = class TypedAmqpWorker {
|
|
251
473
|
/**
|
|
252
|
-
* Internal handler storage
|
|
474
|
+
* Internal handler storage. Keyed by handler name (consumer or RPC); the
|
|
475
|
+
* stored function signature is widened so the dispatch loop can call it
|
|
476
|
+
* uniformly. The actual handler is type-checked at the worker's public API
|
|
477
|
+
* boundary via `WorkerInferHandlers<TContract>`.
|
|
253
478
|
*/
|
|
254
479
|
actualHandlers;
|
|
255
480
|
consumerOptions;
|
|
256
481
|
consumerTags = /* @__PURE__ */ new Set();
|
|
257
482
|
telemetry;
|
|
258
|
-
constructor(contract, amqpClient, handlers, logger, telemetry) {
|
|
483
|
+
constructor(contract, amqpClient, handlers, defaultConsumerOptions, logger, telemetry) {
|
|
259
484
|
this.contract = contract;
|
|
260
485
|
this.amqpClient = amqpClient;
|
|
486
|
+
this.defaultConsumerOptions = defaultConsumerOptions;
|
|
261
487
|
this.logger = logger;
|
|
262
488
|
this.telemetry = telemetry ?? defaultTelemetryProvider;
|
|
263
489
|
this.actualHandlers = {};
|
|
264
490
|
this.consumerOptions = {};
|
|
265
491
|
const handlersRecord = handlers;
|
|
266
|
-
for (const
|
|
267
|
-
const handlerEntry = handlersRecord[
|
|
268
|
-
const
|
|
492
|
+
for (const handlerName of Object.keys(handlersRecord)) {
|
|
493
|
+
const handlerEntry = handlersRecord[handlerName];
|
|
494
|
+
const typedName = handlerName;
|
|
269
495
|
if (isHandlerTuple(handlerEntry)) {
|
|
270
496
|
const [handler, options] = handlerEntry;
|
|
271
|
-
this.actualHandlers[
|
|
272
|
-
this.consumerOptions[
|
|
273
|
-
|
|
497
|
+
this.actualHandlers[typedName] = handler;
|
|
498
|
+
this.consumerOptions[typedName] = {
|
|
499
|
+
...this.defaultConsumerOptions,
|
|
500
|
+
...options
|
|
501
|
+
};
|
|
502
|
+
} else {
|
|
503
|
+
this.actualHandlers[typedName] = handlerEntry;
|
|
504
|
+
this.consumerOptions[typedName] = this.defaultConsumerOptions;
|
|
505
|
+
}
|
|
274
506
|
}
|
|
275
507
|
}
|
|
276
508
|
/**
|
|
509
|
+
* Build a `ConsumerDefinition`-shaped view for a handler name, regardless
|
|
510
|
+
* of whether it came from `contract.consumers` or `contract.rpcs`. The
|
|
511
|
+
* dispatch path treats both uniformly; the returned `isRpc` flag (and the
|
|
512
|
+
* accompanying `responseSchema`) tells `processMessage` whether to validate
|
|
513
|
+
* the handler return value and publish a reply.
|
|
514
|
+
*/
|
|
515
|
+
resolveConsumerView(name) {
|
|
516
|
+
const rpcs = this.contract.rpcs;
|
|
517
|
+
if (rpcs && Object.hasOwn(rpcs, name)) {
|
|
518
|
+
const rpc = rpcs[name];
|
|
519
|
+
return {
|
|
520
|
+
consumer: {
|
|
521
|
+
queue: rpc.queue,
|
|
522
|
+
message: rpc.request
|
|
523
|
+
},
|
|
524
|
+
isRpc: true,
|
|
525
|
+
responseSchema: rpc.response.payload
|
|
526
|
+
};
|
|
527
|
+
}
|
|
528
|
+
const consumerEntry = this.contract.consumers[name];
|
|
529
|
+
return {
|
|
530
|
+
consumer: extractConsumer(consumerEntry),
|
|
531
|
+
isRpc: false
|
|
532
|
+
};
|
|
533
|
+
}
|
|
534
|
+
/**
|
|
277
535
|
* Create a type-safe AMQP worker from a contract.
|
|
278
536
|
*
|
|
279
537
|
* Connection management (including automatic reconnection) is handled internally
|
|
@@ -298,12 +556,18 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
298
556
|
* }).resultToPromise();
|
|
299
557
|
* ```
|
|
300
558
|
*/
|
|
301
|
-
static create({ contract, handlers, urls, connectionOptions, logger, telemetry }) {
|
|
559
|
+
static create({ contract, handlers, urls, connectionOptions, defaultConsumerOptions, logger, telemetry, connectTimeoutMs }) {
|
|
302
560
|
const worker = new TypedAmqpWorker(contract, new AmqpClient(contract, {
|
|
303
561
|
urls,
|
|
304
|
-
connectionOptions
|
|
305
|
-
|
|
306
|
-
|
|
562
|
+
connectionOptions,
|
|
563
|
+
connectTimeoutMs
|
|
564
|
+
}), handlers, defaultConsumerOptions ?? {}, logger, telemetry);
|
|
565
|
+
return worker.waitForConnectionReady().flatMapOk(() => worker.consumeAll()).flatMap((result) => result.match({
|
|
566
|
+
Ok: () => Future.value(Result.Ok(worker)),
|
|
567
|
+
Error: (error) => worker.close().tapError((closeError) => {
|
|
568
|
+
logger?.warn("Failed to close worker after setup failure", { error: closeError });
|
|
569
|
+
}).map(() => Result.Error(error))
|
|
570
|
+
}));
|
|
307
571
|
}
|
|
308
572
|
/**
|
|
309
573
|
* Close the AMQP channel and connection.
|
|
@@ -333,40 +597,25 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
333
597
|
}).flatMapOk(() => this.amqpClient.close()).mapOk(() => void 0);
|
|
334
598
|
}
|
|
335
599
|
/**
|
|
336
|
-
*
|
|
337
|
-
* Defaults are applied in the contract's defineQueue, so we just return the config.
|
|
338
|
-
*/
|
|
339
|
-
getRetryConfigForConsumer(consumer) {
|
|
340
|
-
return consumer.queue.retry;
|
|
341
|
-
}
|
|
342
|
-
/**
|
|
343
|
-
* Start consuming messages for all consumers.
|
|
344
|
-
* TypeScript guarantees consumers exist (handlers require matching consumers).
|
|
600
|
+
* Start consuming for every entry in `contract.consumers` and `contract.rpcs`.
|
|
345
601
|
*/
|
|
346
602
|
consumeAll() {
|
|
347
|
-
const
|
|
348
|
-
const
|
|
349
|
-
const
|
|
350
|
-
|
|
351
|
-
return prefetch ? Math.max(max, prefetch) : max;
|
|
352
|
-
}, 0);
|
|
353
|
-
if (maxPrefetch > 0) this.amqpClient.addSetup(async (channel) => {
|
|
354
|
-
await channel.prefetch(maxPrefetch);
|
|
355
|
-
});
|
|
356
|
-
return Future.all(consumerNames.map((name) => this.consume(name))).map(Result.all).mapOk(() => void 0);
|
|
603
|
+
const consumerNames = Object.keys(this.contract.consumers ?? {});
|
|
604
|
+
const rpcNames = Object.keys(this.contract.rpcs ?? {});
|
|
605
|
+
const allNames = [...consumerNames, ...rpcNames];
|
|
606
|
+
return Future.all(allNames.map((name) => this.consume(name))).map(Result.all).mapOk(() => void 0);
|
|
357
607
|
}
|
|
358
608
|
waitForConnectionReady() {
|
|
359
609
|
return this.amqpClient.waitForConnect();
|
|
360
610
|
}
|
|
361
611
|
/**
|
|
362
|
-
* Start consuming messages for a specific
|
|
363
|
-
*
|
|
612
|
+
* Start consuming messages for a specific handler — either a `consumers`
|
|
613
|
+
* entry (regular event/command consumer) or an `rpcs` entry (RPC server).
|
|
364
614
|
*/
|
|
365
|
-
consume(
|
|
366
|
-
const
|
|
367
|
-
const
|
|
368
|
-
|
|
369
|
-
return this.consumeSingle(consumerName, consumer, handler);
|
|
615
|
+
consume(name) {
|
|
616
|
+
const view = this.resolveConsumerView(name);
|
|
617
|
+
const handler = this.actualHandlers[name];
|
|
618
|
+
return this.consumeSingle(name, view, handler);
|
|
370
619
|
}
|
|
371
620
|
/**
|
|
372
621
|
* Validate data against a Standard Schema and handle errors.
|
|
@@ -391,9 +640,10 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
391
640
|
* @returns Ok with validated message (payload + headers), or Error (message already nacked)
|
|
392
641
|
*/
|
|
393
642
|
parseAndValidateMessage(msg, consumer, consumerName) {
|
|
643
|
+
const queue = extractQueue(consumer.queue);
|
|
394
644
|
const context = {
|
|
395
645
|
consumerName: String(consumerName),
|
|
396
|
-
queueName:
|
|
646
|
+
queueName: queue.name
|
|
397
647
|
};
|
|
398
648
|
const nackAndError = (message, error) => {
|
|
399
649
|
this.logger?.error(message, {
|
|
@@ -423,244 +673,119 @@ var TypedAmqpWorker = class TypedAmqpWorker {
|
|
|
423
673
|
}).map(Result.allFromDict);
|
|
424
674
|
}
|
|
425
675
|
/**
|
|
426
|
-
*
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
return this.amqpClient.consume(queueName, async (msg) => {
|
|
431
|
-
if (msg === null) {
|
|
432
|
-
this.logger?.warn("Consumer cancelled by server", {
|
|
433
|
-
consumerName: String(consumerName),
|
|
434
|
-
queueName
|
|
435
|
-
});
|
|
436
|
-
return;
|
|
437
|
-
}
|
|
438
|
-
const startTime = Date.now();
|
|
439
|
-
const span = startConsumeSpan(this.telemetry, queueName, String(consumerName), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
440
|
-
await this.parseAndValidateMessage(msg, consumer, consumerName).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk(() => {
|
|
441
|
-
this.logger?.info("Message consumed successfully", {
|
|
442
|
-
consumerName: String(consumerName),
|
|
443
|
-
queueName
|
|
444
|
-
});
|
|
445
|
-
this.amqpClient.ack(msg);
|
|
446
|
-
const durationMs = Date.now() - startTime;
|
|
447
|
-
endSpanSuccess(span);
|
|
448
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), true, durationMs);
|
|
449
|
-
return Future.value(Result.Ok(void 0));
|
|
450
|
-
}).flatMapError((handlerError) => {
|
|
451
|
-
this.logger?.error("Error processing message", {
|
|
452
|
-
consumerName: String(consumerName),
|
|
453
|
-
queueName,
|
|
454
|
-
errorType: handlerError.name,
|
|
455
|
-
error: handlerError.message
|
|
456
|
-
});
|
|
457
|
-
const durationMs = Date.now() - startTime;
|
|
458
|
-
endSpanError(span, handlerError);
|
|
459
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), false, durationMs);
|
|
460
|
-
return this.handleError(handlerError, msg, String(consumerName), consumer);
|
|
461
|
-
})).tapError(() => {
|
|
462
|
-
const durationMs = Date.now() - startTime;
|
|
463
|
-
endSpanError(span, /* @__PURE__ */ new Error("Message validation failed"));
|
|
464
|
-
recordConsumeMetric(this.telemetry, queueName, String(consumerName), false, durationMs);
|
|
465
|
-
}).toPromise();
|
|
466
|
-
}).tapOk((consumerTag) => {
|
|
467
|
-
this.consumerTags.add(consumerTag);
|
|
468
|
-
}).mapError((error) => new TechnicalError(`Failed to start consuming for "${String(consumerName)}"`, error)).mapOk(() => void 0);
|
|
469
|
-
}
|
|
470
|
-
/**
|
|
471
|
-
* Handle error in message processing with retry logic.
|
|
472
|
-
*
|
|
473
|
-
* Flow depends on retry mode:
|
|
676
|
+
* Validate an RPC handler's response and publish it back to the caller's reply
|
|
677
|
+
* queue with the same `correlationId`. Published via the AMQP default exchange
|
|
678
|
+
* with `routingKey = msg.properties.replyTo`, which works for both
|
|
679
|
+
* `amq.rabbitmq.reply-to` and any anonymous queue declared by the caller.
|
|
474
680
|
*
|
|
475
|
-
*
|
|
476
|
-
*
|
|
477
|
-
*
|
|
478
|
-
*
|
|
479
|
-
* **ttl-backoff mode:**
|
|
480
|
-
* 1. If NonRetryableError -> send directly to DLQ (no retry)
|
|
481
|
-
* 2. If max retries exceeded -> send to DLQ
|
|
482
|
-
* 3. Otherwise -> publish to wait queue with TTL for retry
|
|
483
|
-
*
|
|
484
|
-
* **Legacy mode (no retry config):**
|
|
485
|
-
* 1. nack with requeue=true (immediate requeue)
|
|
681
|
+
* Validation errors are surfaced as NonRetryableError (handler returned the
|
|
682
|
+
* wrong shape — retrying the same input will not fix it). Publish errors are
|
|
683
|
+
* surfaced as RetryableError so the worker's existing retry logic applies.
|
|
486
684
|
*/
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
685
|
+
publishRpcResponse(msg, queueName, rpcName, responseSchema, response) {
|
|
686
|
+
const replyTo = msg.properties.replyTo;
|
|
687
|
+
const correlationId = msg.properties.correlationId;
|
|
688
|
+
if (typeof replyTo !== "string" || replyTo.length === 0) {
|
|
689
|
+
this.logger?.warn("RPC handler returned a response but the incoming message has no replyTo; dropping response", {
|
|
690
|
+
rpcName: String(rpcName),
|
|
691
|
+
queueName
|
|
493
692
|
});
|
|
494
|
-
this.sendToDLQ(msg, consumer);
|
|
495
693
|
return Future.value(Result.Ok(void 0));
|
|
496
694
|
}
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
* Handle error using quorum queue's native delivery limit feature.
|
|
503
|
-
*
|
|
504
|
-
* Simply requeues the message with nack(requeue=true). RabbitMQ automatically:
|
|
505
|
-
* - Increments x-delivery-count header
|
|
506
|
-
* - Dead-letters the message when count exceeds x-delivery-limit
|
|
507
|
-
*
|
|
508
|
-
* This is simpler than TTL-based retry but provides immediate retries only.
|
|
509
|
-
*/
|
|
510
|
-
handleErrorQuorumNative(error, msg, consumerName, consumer) {
|
|
511
|
-
const queue = consumer.queue;
|
|
512
|
-
const queueName = queue.name;
|
|
513
|
-
const deliveryCount = msg.properties.headers?.["x-delivery-count"] ?? 0;
|
|
514
|
-
const deliveryLimit = queue.type === "quorum" ? queue.deliveryLimit : void 0;
|
|
515
|
-
const attemptsBeforeDeadLetter = deliveryLimit !== void 0 ? Math.max(0, deliveryLimit - deliveryCount - 1) : "unknown";
|
|
516
|
-
if (deliveryLimit !== void 0 && deliveryCount >= deliveryLimit - 1) this.logger?.warn("Message at final delivery attempt (quorum-native mode)", {
|
|
517
|
-
consumerName,
|
|
518
|
-
queueName,
|
|
519
|
-
deliveryCount,
|
|
520
|
-
deliveryLimit,
|
|
521
|
-
willDeadLetterOnNextFailure: deliveryCount === deliveryLimit - 1,
|
|
522
|
-
alreadyExceededLimit: deliveryCount >= deliveryLimit,
|
|
523
|
-
error: error.message
|
|
524
|
-
});
|
|
525
|
-
else this.logger?.warn("Retrying message (quorum-native mode)", {
|
|
526
|
-
consumerName,
|
|
527
|
-
queueName,
|
|
528
|
-
deliveryCount,
|
|
529
|
-
deliveryLimit,
|
|
530
|
-
attemptsBeforeDeadLetter,
|
|
531
|
-
error: error.message
|
|
532
|
-
});
|
|
533
|
-
this.amqpClient.nack(msg, false, true);
|
|
534
|
-
return Future.value(Result.Ok(void 0));
|
|
535
|
-
}
|
|
536
|
-
/**
|
|
537
|
-
* Handle error using TTL + wait queue pattern for exponential backoff.
|
|
538
|
-
*/
|
|
539
|
-
handleErrorTtlBackoff(error, msg, consumerName, consumer, config) {
|
|
540
|
-
const retryCount = msg.properties.headers?.["x-retry-count"] ?? 0;
|
|
541
|
-
if (retryCount >= config.maxRetries) {
|
|
542
|
-
this.logger?.error("Max retries exceeded, sending to DLQ", {
|
|
543
|
-
consumerName,
|
|
544
|
-
retryCount,
|
|
545
|
-
maxRetries: config.maxRetries,
|
|
546
|
-
error: error.message
|
|
695
|
+
if (typeof correlationId !== "string" || correlationId.length === 0) {
|
|
696
|
+
this.logger?.warn("RPC handler returned a response but the incoming message has no correlationId; dropping response", {
|
|
697
|
+
rpcName: String(rpcName),
|
|
698
|
+
queueName,
|
|
699
|
+
replyTo
|
|
547
700
|
});
|
|
548
|
-
this.sendToDLQ(msg, consumer);
|
|
549
701
|
return Future.value(Result.Ok(void 0));
|
|
550
702
|
}
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
error: error.message
|
|
557
|
-
});
|
|
558
|
-
return this.publishForRetry(msg, consumer, retryCount + 1, delayMs, error);
|
|
559
|
-
}
|
|
560
|
-
/**
|
|
561
|
-
* Calculate retry delay with exponential backoff and optional jitter.
|
|
562
|
-
*/
|
|
563
|
-
calculateRetryDelay(retryCount, config) {
|
|
564
|
-
const { initialDelayMs, maxDelayMs, backoffMultiplier, jitter } = config;
|
|
565
|
-
let delay = Math.min(initialDelayMs * Math.pow(backoffMultiplier, retryCount), maxDelayMs);
|
|
566
|
-
if (jitter) delay = delay * (.5 + Math.random() * .5);
|
|
567
|
-
return Math.floor(delay);
|
|
568
|
-
}
|
|
569
|
-
/**
|
|
570
|
-
* Parse message content for republishing.
|
|
571
|
-
* Prevents double JSON serialization by converting Buffer to object when possible.
|
|
572
|
-
*/
|
|
573
|
-
parseMessageContentForRetry(msg, queueName) {
|
|
574
|
-
let content = msg.content;
|
|
575
|
-
if (!msg.properties.contentEncoding) try {
|
|
576
|
-
content = JSON.parse(msg.content.toString());
|
|
577
|
-
} catch (err) {
|
|
578
|
-
this.logger?.warn("Failed to parse message for retry, using original buffer", {
|
|
579
|
-
queueName,
|
|
580
|
-
error: err
|
|
581
|
-
});
|
|
703
|
+
let rawValidation;
|
|
704
|
+
try {
|
|
705
|
+
rawValidation = responseSchema["~standard"].validate(response);
|
|
706
|
+
} catch (error) {
|
|
707
|
+
return Future.value(Result.Error(new NonRetryableError("RPC response schema validation threw", error)));
|
|
582
708
|
}
|
|
583
|
-
|
|
709
|
+
const validationPromise = rawValidation instanceof Promise ? rawValidation : Promise.resolve(rawValidation);
|
|
710
|
+
return Future.fromPromise(validationPromise).mapError((error) => new NonRetryableError("RPC response schema validation threw", error)).mapOkToResult((validation) => {
|
|
711
|
+
if (validation.issues) return Result.Error(new NonRetryableError(`RPC response for "${String(rpcName)}" failed schema validation`, new MessageValidationError(String(rpcName), validation.issues)));
|
|
712
|
+
return Result.Ok(validation.value);
|
|
713
|
+
}).flatMapOk((validatedResponse) => this.amqpClient.publish("", replyTo, validatedResponse, {
|
|
714
|
+
correlationId,
|
|
715
|
+
contentType: "application/json"
|
|
716
|
+
}).mapErrorToResult((error) => Result.Error(new RetryableError("Failed to publish RPC response", error))).mapOkToResult((published) => published ? Result.Ok(void 0) : Result.Error(new RetryableError("Failed to publish RPC response: channel buffer full"))));
|
|
584
717
|
}
|
|
585
718
|
/**
|
|
586
|
-
*
|
|
587
|
-
*
|
|
588
|
-
* ┌─────────────────────────────────────────────────────────────────┐
|
|
589
|
-
* │ Retry Flow (Native RabbitMQ TTL + DLX Pattern) │
|
|
590
|
-
* ├─────────────────────────────────────────────────────────────────┤
|
|
591
|
-
* │ │
|
|
592
|
-
* │ 1. Handler throws any Error │
|
|
593
|
-
* │ ↓ │
|
|
594
|
-
* │ 2. Worker publishes to DLX with routing key: {queue}-wait │
|
|
595
|
-
* │ ↓ │
|
|
596
|
-
* │ 3. DLX routes to wait queue: {queue}-wait │
|
|
597
|
-
* │ (with expiration: calculated backoff delay) │
|
|
598
|
-
* │ ↓ │
|
|
599
|
-
* │ 4. Message waits in queue until TTL expires │
|
|
600
|
-
* │ ↓ │
|
|
601
|
-
* │ 5. Expired message dead-lettered to DLX │
|
|
602
|
-
* │ (with routing key: {queue}) │
|
|
603
|
-
* │ ↓ │
|
|
604
|
-
* │ 6. DLX routes back to main queue → RETRY │
|
|
605
|
-
* │ ↓ │
|
|
606
|
-
* │ 7. If retries exhausted: nack without requeue → DLQ │
|
|
607
|
-
* │ │
|
|
608
|
-
* └─────────────────────────────────────────────────────────────────┘
|
|
719
|
+
* Process a single consumed message: validate, invoke handler, optionally
|
|
720
|
+
* publish the RPC response, record telemetry, and handle errors.
|
|
609
721
|
*/
|
|
610
|
-
|
|
611
|
-
const
|
|
612
|
-
const
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
return this.amqpClient.publish(dlxName, waitRoutingKey, content, {
|
|
623
|
-
...msg.properties,
|
|
624
|
-
expiration: delayMs.toString(),
|
|
625
|
-
headers: {
|
|
626
|
-
...msg.properties.headers,
|
|
627
|
-
"x-retry-count": newRetryCount,
|
|
628
|
-
"x-last-error": error.message,
|
|
629
|
-
"x-first-failure-timestamp": msg.properties.headers?.["x-first-failure-timestamp"] ?? Date.now()
|
|
630
|
-
}
|
|
631
|
-
}).mapOkToResult((published) => {
|
|
632
|
-
if (!published) {
|
|
633
|
-
this.logger?.error("Failed to publish message for retry (write buffer full)", {
|
|
634
|
-
queueName,
|
|
635
|
-
waitRoutingKey,
|
|
636
|
-
retryCount: newRetryCount
|
|
722
|
+
processMessage(msg, view, name, handler) {
|
|
723
|
+
const { consumer, isRpc, responseSchema } = view;
|
|
724
|
+
const queueName = extractQueue(consumer.queue).name;
|
|
725
|
+
const startTime = Date.now();
|
|
726
|
+
const span = startConsumeSpan(this.telemetry, queueName, String(name), { "messaging.rabbitmq.message.delivery_tag": msg.fields.deliveryTag });
|
|
727
|
+
let messageHandled = false;
|
|
728
|
+
let firstError;
|
|
729
|
+
return this.parseAndValidateMessage(msg, consumer, name).flatMapOk((validatedMessage) => handler(validatedMessage, msg).flatMapOk((handlerResponse) => {
|
|
730
|
+
if (isRpc && responseSchema) return this.publishRpcResponse(msg, queueName, name, responseSchema, handlerResponse).flatMapOk(() => {
|
|
731
|
+
this.logger?.info("Message consumed successfully", {
|
|
732
|
+
consumerName: String(name),
|
|
733
|
+
queueName
|
|
637
734
|
});
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
735
|
+
this.amqpClient.ack(msg);
|
|
736
|
+
messageHandled = true;
|
|
737
|
+
return Future.value(Result.Ok(void 0));
|
|
738
|
+
});
|
|
739
|
+
this.logger?.info("Message consumed successfully", {
|
|
740
|
+
consumerName: String(name),
|
|
741
|
+
queueName
|
|
742
|
+
});
|
|
743
|
+
this.amqpClient.ack(msg);
|
|
744
|
+
messageHandled = true;
|
|
745
|
+
return Future.value(Result.Ok(void 0));
|
|
746
|
+
}).flatMapError((handlerError) => {
|
|
747
|
+
this.logger?.error("Error processing message", {
|
|
748
|
+
consumerName: String(name),
|
|
641
749
|
queueName,
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
delayMs
|
|
750
|
+
errorType: handlerError.name,
|
|
751
|
+
error: handlerError.message
|
|
645
752
|
});
|
|
646
|
-
|
|
753
|
+
firstError = handlerError;
|
|
754
|
+
return handleError({
|
|
755
|
+
amqpClient: this.amqpClient,
|
|
756
|
+
logger: this.logger
|
|
757
|
+
}, handlerError, msg, String(name), consumer);
|
|
758
|
+
})).map((result) => {
|
|
759
|
+
const durationMs = Date.now() - startTime;
|
|
760
|
+
if (messageHandled) {
|
|
761
|
+
endSpanSuccess(span);
|
|
762
|
+
recordConsumeMetric(this.telemetry, queueName, String(name), true, durationMs);
|
|
763
|
+
} else {
|
|
764
|
+
endSpanError(span, result.isError() ? result.error : firstError ?? /* @__PURE__ */ new Error("Unknown error"));
|
|
765
|
+
recordConsumeMetric(this.telemetry, queueName, String(name), false, durationMs);
|
|
766
|
+
}
|
|
767
|
+
return result;
|
|
647
768
|
});
|
|
648
769
|
}
|
|
649
770
|
/**
|
|
650
|
-
*
|
|
651
|
-
* Nacks the message without requeue, relying on DLX configuration.
|
|
771
|
+
* Consume messages one at a time.
|
|
652
772
|
*/
|
|
653
|
-
|
|
654
|
-
const queueName = consumer.queue.name;
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
773
|
+
consumeSingle(name, view, handler) {
|
|
774
|
+
const queueName = extractQueue(view.consumer.queue).name;
|
|
775
|
+
return this.amqpClient.consume(queueName, async (msg) => {
|
|
776
|
+
if (msg === null) {
|
|
777
|
+
this.logger?.warn("Consumer cancelled by server", {
|
|
778
|
+
consumerName: String(name),
|
|
779
|
+
queueName
|
|
780
|
+
});
|
|
781
|
+
return;
|
|
782
|
+
}
|
|
783
|
+
await this.processMessage(msg, view, name, handler).toPromise();
|
|
784
|
+
}, this.consumerOptions[name]).tapOk((consumerTag) => {
|
|
785
|
+
this.consumerTags.add(consumerTag);
|
|
786
|
+
}).mapError((error) => new TechnicalError(`Failed to start consuming for "${String(name)}"`, error)).mapOk(() => void 0);
|
|
661
787
|
}
|
|
662
788
|
};
|
|
663
|
-
|
|
664
789
|
//#endregion
|
|
665
790
|
//#region src/handlers.ts
|
|
666
791
|
/**
|
|
@@ -721,7 +846,7 @@ function defineHandlers(contract, handlers) {
|
|
|
721
846
|
validateHandlers(contract, handlers);
|
|
722
847
|
return handlers;
|
|
723
848
|
}
|
|
724
|
-
|
|
725
849
|
//#endregion
|
|
726
850
|
export { MessageValidationError, NonRetryableError, RetryableError, TypedAmqpWorker, defineHandler, defineHandlers, isHandlerError, isNonRetryableError, isRetryableError, nonRetryable, retryable };
|
|
851
|
+
|
|
727
852
|
//# sourceMappingURL=index.mjs.map
|