@koala42/redis-highway 0.2.12 → 0.2.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -9,6 +9,7 @@ Designed for high throughput and massive concurrency with low overhead.
9
9
  - **Granular Retries**: Consumer group isolation - if one group fails, only that group retries.
10
10
  - **Reliability**: Auto-claiming of stuck messages (crashed consumers) and Dead Letter Queue (DLQ) support.
11
11
  - **Metrics**: Built-in tracking for throughput, queue depth, DLQ size, and retries. Prometheus export ready.
12
+ - **ZSTD Compression**: Optional payload compression using Node.js built-in ZSTD. Workers auto-detect compressed messages.
12
13
 
13
14
  ## Installation
14
15
 
@@ -25,7 +26,10 @@ import { Redis } from 'ioredis';
25
26
  import { Producer } from '@koala42/redis-highway';
26
27
 
27
28
  const redis = new Redis();
28
- const producer = new Producer<{hello: string}>(redis, 'my-stream');
29
+ const producer = new Producer<{hello: string}>(redis, {
30
+ streamName: 'my-stream',
31
+ compression: false // Set to true to enable ZSTD compression
32
+ });
29
33
 
30
34
  // Send job
31
35
  await producer.push(
@@ -92,6 +96,57 @@ const batchWorker = new MyBatchWorker(
92
96
  await batchWorker.start();
93
97
  ```
94
98
 
99
+ ### DLQ Worker
100
+ Process messages from the Dead Letter Queue. Use this to handle jobs that have exhausted all retries.
101
+
102
+ **Important:** DLQ Worker has no built-in error handling or retry policy. If `process()` throws an error, the message is lost. This is by design - DLQ processing is meant for manual intervention, logging, or forwarding to external systems.
103
+
104
+ ```typescript
105
+ import { Redis } from 'ioredis';
106
+ import { DlqWorker, DlqMessageEntity } from '@koala42/redis-highway';
107
+
108
+ class MyDlqWorker extends DlqWorker<{hello: string}> {
109
+ async process(message: DlqMessageEntity<{hello: string}>) {
110
+ console.log('Failed job data:', message.data);
111
+ console.log('Original error:', message.errorMessage);
112
+ console.log('Failed at:', new Date(message.failedAt));
113
+ console.log('Original consumer group:', message.group);
114
+
115
+ // Example: Log to external system, send alert, or store for manual review
116
+ await externalLogger.log(message);
117
+ }
118
+ }
119
+
120
+ const redis = new Redis();
121
+ const dlqWorker = new MyDlqWorker(redis, {
122
+ streamName: 'my-stream' // Must match your main worker's stream
123
+ });
124
+
125
+ await dlqWorker.start();
126
+
127
+ // To stop gracefully
128
+ // await dlqWorker.stop();
129
+ ```
130
+
131
+ #### DLQ Worker Options
132
+
133
+ | Option | Type | Default | Description |
134
+ |--------|------|---------|-------------|
135
+ | `streamName` | string | - | **Required**. The Redis stream key (same as your main workers). |
136
+ | `blockTimeoutMs` | number | 5000 | Redis XREADGROUP block duration in milliseconds. |
137
+ | `waitTimeoutMs` | number | 5000 | Wait time between processing cycles when no messages are available. |
138
+
139
+ #### DlqMessageEntity Properties
140
+
141
+ | Property | Type | Description |
142
+ |----------|------|-------------|
143
+ | `data` | T | The original job payload. |
144
+ | `errorMessage` | string | The error message from the last failed attempt. |
145
+ | `failedAt` | number | Unix timestamp when the job was moved to DLQ. |
146
+ | `group` | string | The consumer group that failed to process this job. |
147
+ | `messageUuid` | string | The original job's unique identifier. |
148
+ | `streamMessageId` | string | The Redis stream message ID. |
149
+
95
150
  ### Metrics
96
151
 
97
152
  ```typescript
@@ -107,6 +162,26 @@ console.log(stats.throughput);
107
162
  const promMetrics = await metrics.getPrometheusMetrics(['group-A'], 'my_app_queue');
108
163
  ```
109
164
 
165
+ ### Compression
166
+
167
+ Enable ZSTD compression to reduce Redis memory usage and network bandwidth for large payloads.
168
+
169
+ ```typescript
170
+ const producer = new Producer<{hello: string}>(redis, {
171
+ streamName: 'my-stream',
172
+ compression: true // Enable ZSTD compression
173
+ });
174
+
175
+ // Messages are automatically compressed before being sent to Redis
176
+ await producer.push({ hello: 'world' }, ['group-A']);
177
+ ```
178
+
179
+ **Key points:**
180
+ - Compression uses Node.js built-in ZSTD (no external dependencies required, Node.js 22+)
181
+ - Workers automatically detect and decompress compressed messages
182
+ - No configuration changes needed on workers - they handle both compressed and uncompressed messages
183
+ - Recommended for payloads larger than 1KB where compression benefits outweigh CPU overhead
184
+
110
185
  ## Configuration
111
186
 
112
187
  ### Worker Options
@@ -150,7 +225,10 @@ export class EntryService {
150
225
  private readonly producer: Producer<MyPayload>;
151
226
 
152
227
  constructor(@InjectRedis() private readonly redis: Redis) {
153
- this.producer = new Producer(this.redis, 'my-stream');
228
+ this.producer = new Producer(this.redis, {
229
+ streamName: 'my-stream',
230
+ compression: false
231
+ });
154
232
  }
155
233
 
156
234
  async addToQueue(data: MyPayload) {
@@ -6,6 +6,7 @@ const keys_1 = require("./keys");
6
6
  const uuid_1 = require("uuid");
7
7
  const stream_message_entity_1 = require("./stream-message-entity");
8
8
  const lua_1 = require("./lua");
9
+ const dlq_message_entity_1 = require("./dlq-message-entity");
9
10
  class BaseWorker {
10
11
  constructor(redis, options, controlOptions, metricsOptions) {
11
12
  this.redis = redis;
@@ -44,8 +45,8 @@ class BaseWorker {
44
45
  throw e;
45
46
  }
46
47
  }
47
- this._fetchLoop();
48
- this._autoClaimLoop();
48
+ this._fetchLoop().catch((e) => console.error("Fetch loop crashed", e));
49
+ this._autoClaimLoop().catch((e) => console.error('Auto claim loop crashed', e));
49
50
  }
50
51
  /**
51
52
  * Gracefully stops the worker
@@ -60,6 +61,7 @@ class BaseWorker {
60
61
  while (this._activeCount > 0) {
61
62
  await new Promise((resolve) => setTimeout(resolve, 50));
62
63
  }
64
+ await this.redis.xgroup('DELCONSUMER', this._streamName, this._groupName, this._consumerName).catch();
63
65
  }
64
66
  /**
65
67
  * Auto claim loop
@@ -116,7 +118,7 @@ class BaseWorker {
116
118
  if (message.retryCount < this._maxRetries) {
117
119
  retryCountIncr++;
118
120
  const newJobId = (0, uuid_1.v7)();
119
- pipeline.xadd(this._streamName, '*', 'id', newJobId, 'target', this._groupName, 'retryCount', message.retryCount + 1, 'data', message.serializedData);
121
+ pipeline.xadd(this._streamName, '*', ...stream_message_entity_1.StreamMessageEntity.getStreamFields(newJobId, this._groupName, message.serializedData, message.compressed, message.retryCount + 1));
120
122
  const newStatusKey = this._keys.getJobStatusKey(newJobId);
121
123
  pipeline.hset(newStatusKey, '__target', 1);
122
124
  const statusKey = this._keys.getJobStatusKey(message.messageUuid);
@@ -126,7 +128,7 @@ class BaseWorker {
126
128
  console.error(`[${this._groupName}] Job ${message.messageUuid} run out of retries. Moving to DLQ`);
127
129
  messagesToDLQ.push(message);
128
130
  // Add message to DLQ stream
129
- pipeline.xadd(this._keys.getDlqStreamKey(), '*', 'id', message.messageUuid, 'group', this._groupName, 'error', errorMessage, 'payload', message.serializedData, 'failedAt', timestamp);
131
+ pipeline.xadd(this._keys.getDlqStreamKey(), '*', ...dlq_message_entity_1.DlqMessageEntity.getStreamFields(message.messageUuid, this._groupName, errorMessage, message.serializedData, timestamp));
130
132
  const statusKey = this._keys.getJobStatusKey(message.messageUuid);
131
133
  pipeline.eval(lua_1.LUA_FINALIZE, 2, statusKey, this._streamName, this._groupName, timestamp, message.streamMessageId);
132
134
  }
@@ -0,0 +1,19 @@
1
+ import { StreamMessage } from "./interfaces";
2
+ export declare class DlqMessageEntity<T extends Record<string, unknown>> {
3
+ private readonly _streamMessageId;
4
+ private readonly _rawFields;
5
+ private readonly _fields;
6
+ private readonly _group;
7
+ private readonly _errorMessage;
8
+ private readonly _failedAt;
9
+ private readonly _messageUuid;
10
+ private readonly _data;
11
+ constructor(message: StreamMessage);
12
+ get data(): T;
13
+ get streamMessageId(): string;
14
+ get messageUuid(): string;
15
+ get group(): string;
16
+ get errorMessage(): string;
17
+ get failedAt(): number;
18
+ static getStreamFields(id: string, group: string, error: string, payload: string, failedAt: number): (string | number)[];
19
+ }
@@ -0,0 +1,47 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DlqMessageEntity = void 0;
4
+ class DlqMessageEntity {
5
+ constructor(message) {
6
+ this._rawFields = [];
7
+ this._fields = {};
8
+ this._streamMessageId = message[0];
9
+ this._rawFields = message[1];
10
+ for (let i = 0; i < this._rawFields.length; i += 2) {
11
+ this._fields[this._rawFields[i]] = this._rawFields[i + 1];
12
+ }
13
+ this._messageUuid = this._fields['id'];
14
+ this._group = this._fields['group'];
15
+ this._errorMessage = this._fields['error'];
16
+ this._failedAt = Number(this._fields['failedAt']);
17
+ this._data = JSON.parse(this._fields['payload']);
18
+ }
19
+ get data() {
20
+ return this._data;
21
+ }
22
+ get streamMessageId() {
23
+ return this._streamMessageId;
24
+ }
25
+ get messageUuid() {
26
+ return this._messageUuid;
27
+ }
28
+ get group() {
29
+ return this._group;
30
+ }
31
+ get errorMessage() {
32
+ return this._errorMessage;
33
+ }
34
+ get failedAt() {
35
+ return this._failedAt;
36
+ }
37
+ static getStreamFields(id, group, error, payload, failedAt) {
38
+ return [
39
+ 'id', id,
40
+ 'group', group,
41
+ 'error', error,
42
+ 'payload', payload,
43
+ 'failedAt', failedAt
44
+ ];
45
+ }
46
+ }
47
+ exports.DlqMessageEntity = DlqMessageEntity;
@@ -0,0 +1,35 @@
1
+ import Redis from "ioredis";
2
+ import { KeyManager } from "./keys";
3
+ import { DlqWorkerOptions, XReadGroupResponse } from "./interfaces";
4
+ import { DlqMessageEntity } from "./dlq-message-entity";
5
+ export declare abstract class DlqWorker<T extends Record<string, unknown>> {
6
+ protected readonly _redis: Redis;
7
+ protected _isRunning: boolean;
8
+ protected readonly _keys: KeyManager;
9
+ protected readonly _consumerId: string;
10
+ protected readonly _dlqStreamName: string;
11
+ protected readonly _groupName = "dlq-worker";
12
+ protected readonly _consumerName: string;
13
+ protected readonly _blockTimeoutMs: number;
14
+ protected readonly _waitTimeoutMs: number;
15
+ constructor(_redis: Redis, options: DlqWorkerOptions);
16
+ /**
17
+ * Start DLQ worker
18
+ * @returns
19
+ */
20
+ protected start(): Promise<void>;
21
+ /**
22
+ * Stop DLQ loop
23
+ */
24
+ protected stop(): Promise<void>;
25
+ /**
26
+ * Background* DLQ loopw
27
+ */
28
+ protected dlqLoop(): Promise<void>;
29
+ /**
30
+ * Reads job from DLQ
31
+ * @returns
32
+ */
33
+ protected _readGroup(): Promise<XReadGroupResponse | null>;
34
+ abstract process(data: DlqMessageEntity<T>): Promise<void>;
35
+ }
@@ -0,0 +1,83 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DlqWorker = void 0;
4
+ const keys_1 = require("./keys");
5
+ const uuid_1 = require("uuid");
6
+ const dlq_message_entity_1 = require("./dlq-message-entity");
7
+ class DlqWorker {
8
+ constructor(_redis, options) {
9
+ this._redis = _redis;
10
+ this._isRunning = false;
11
+ this._consumerId = (0, uuid_1.v7)();
12
+ this._groupName = 'dlq-worker';
13
+ this._consumerName = `${this._groupName}-${this._consumerId}`;
14
+ this._keys = new keys_1.KeyManager(options.streamName);
15
+ this._dlqStreamName = this._keys.getDlqStreamKey();
16
+ this._blockTimeoutMs = options.blockTimeoutMs ?? 5000;
17
+ this._waitTimeoutMs = options.waitTimeoutMs ?? 5000;
18
+ }
19
+ /**
20
+ * Start DLQ worker
21
+ * @returns
22
+ */
23
+ async start() {
24
+ if (this._isRunning) {
25
+ return;
26
+ }
27
+ this._isRunning = true;
28
+ try {
29
+ await this._redis.xgroup('CREATE', this._dlqStreamName, this._groupName, '0', 'MKSTREAM');
30
+ }
31
+ catch (e) {
32
+ if (!e.message.includes('BUSYGROUP')) {
33
+ throw e;
34
+ }
35
+ }
36
+ this.dlqLoop().catch((e) => console.error('DLQ loop crashed', e));
37
+ }
38
+ /**
39
+ * Stop DLQ loop
40
+ */
41
+ async stop() {
42
+ this._isRunning = false;
43
+ await this._redis.xgroup('DELCONSUMER', this._dlqStreamName, this._groupName, this._consumerName).catch();
44
+ }
45
+ /**
46
+ * Background* DLQ loopw
47
+ */
48
+ async dlqLoop() {
49
+ while (this._isRunning) {
50
+ try {
51
+ const results = await this._readGroup();
52
+ if (!results) {
53
+ await new Promise((resolve) => setTimeout(resolve, this._waitTimeoutMs));
54
+ continue;
55
+ }
56
+ const message = results[0][1][0];
57
+ if (!message) {
58
+ await new Promise((resolve) => setTimeout(resolve, this._waitTimeoutMs));
59
+ continue;
60
+ }
61
+ const dlqMessage = new dlq_message_entity_1.DlqMessageEntity(message);
62
+ // Ack and delete the message. XACKDEL is supported from 8.2
63
+ await this._redis.multi()
64
+ .xack(this._dlqStreamName, this._groupName, dlqMessage.streamMessageId)
65
+ .xdel(this._dlqStreamName, dlqMessage.streamMessageId)
66
+ .exec();
67
+ await this.process(dlqMessage);
68
+ }
69
+ catch (e) {
70
+ console.error(`[${this._groupName}] Failed processing DLQ job`, e);
71
+ await new Promise((resolve) => setTimeout(resolve, this._waitTimeoutMs));
72
+ }
73
+ }
74
+ }
75
+ /**
76
+ * Reads job from DLQ
77
+ * @returns
78
+ */
79
+ async _readGroup() {
80
+ return this._redis.xreadgroup('GROUP', this._groupName, this._consumerName, 'COUNT', 1, 'BLOCK', this._blockTimeoutMs, 'STREAMS', this._dlqStreamName, '>');
81
+ }
82
+ }
83
+ exports.DlqWorker = DlqWorker;
package/dist/index.d.ts CHANGED
@@ -6,3 +6,6 @@ export * from './interfaces';
6
6
  export * from './batch-worker';
7
7
  export * from './stream-message-entity';
8
8
  export * from './base-worker';
9
+ export * from './dlq-message-entity';
10
+ export * from './dlq-worker';
11
+ export * from './serializer';
package/dist/index.js CHANGED
@@ -22,3 +22,6 @@ __exportStar(require("./interfaces"), exports);
22
22
  __exportStar(require("./batch-worker"), exports);
23
23
  __exportStar(require("./stream-message-entity"), exports);
24
24
  __exportStar(require("./base-worker"), exports);
25
+ __exportStar(require("./dlq-message-entity"), exports);
26
+ __exportStar(require("./dlq-worker"), exports);
27
+ __exportStar(require("./serializer"), exports);
@@ -5,6 +5,11 @@ export interface BaseWorkerOptions {
5
5
  streamName: string;
6
6
  concurrency: number;
7
7
  }
8
+ export interface DlqWorkerOptions {
9
+ streamName: string;
10
+ blockTimeoutMs?: number;
11
+ waitTimeoutMs?: number;
12
+ }
8
13
  export interface BaseWorkerControlOptions {
9
14
  maxRetries: number;
10
15
  blockTimeMs: number;
@@ -21,3 +26,7 @@ export interface BatchWorkerOptions extends BaseWorkerOptions {
21
26
  batchSize: number;
22
27
  maxFetchCount: number;
23
28
  }
29
+ export interface ProducerOptions {
30
+ streamName: string;
31
+ compression: boolean;
32
+ }
@@ -1,13 +1,14 @@
1
1
  import { Redis } from 'ioredis';
2
+ import { ProducerOptions } from './interfaces';
2
3
  export interface JobOptions {
3
4
  ttl?: number | null;
4
- streamName?: string;
5
5
  }
6
6
  export declare class Producer<T extends Record<string, unknown>> {
7
- private readonly redis;
8
- private readonly streamName;
9
- private keys;
10
- constructor(redis: Redis, streamName: string);
7
+ private readonly _keys;
8
+ private readonly _redis;
9
+ private readonly _streamName;
10
+ private readonly _compression;
11
+ constructor(redis: Redis, options: ProducerOptions);
11
12
  /**
12
13
  * Push message to queue
13
14
  * @param payload
package/dist/producer.js CHANGED
@@ -3,11 +3,14 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.Producer = void 0;
4
4
  const uuid_1 = require("uuid");
5
5
  const keys_1 = require("./keys");
6
+ const stream_message_entity_1 = require("./stream-message-entity");
7
+ const serializer_1 = require("./serializer");
6
8
  class Producer {
7
- constructor(redis, streamName) {
8
- this.redis = redis;
9
- this.streamName = streamName;
10
- this.keys = new keys_1.KeyManager(streamName);
9
+ constructor(redis, options) {
10
+ this._redis = redis;
11
+ this._streamName = options.streamName;
12
+ this._keys = new keys_1.KeyManager(this._streamName);
13
+ this._compression = options.compression;
11
14
  }
12
15
  /**
13
16
  * Push message to queue
@@ -17,19 +20,18 @@ class Producer {
17
20
  * @returns Created job ID (uuidv7)
18
21
  */
19
22
  async push(payload, targetGroups, opts) {
20
- const serializedPayload = JSON.stringify(payload);
21
23
  const id = (0, uuid_1.v7)();
22
- const ttl = opts?.ttl || null; // 24 hours in seconds
23
- const pipeline = this.redis.pipeline();
24
- const statusKey = this.keys.getJobStatusKey(id);
24
+ const ttl = opts?.ttl || null; // Defaults to null, no expiry
25
+ const pipeline = this._redis.pipeline();
26
+ const statusKey = this._keys.getJobStatusKey(id);
25
27
  // Initialize job metadata - status
26
- // TODO: improve target groups use groups join by "," instead of groups length
27
28
  pipeline.hset(statusKey, '__target', targetGroups.length);
28
29
  if (ttl) {
29
30
  pipeline.expire(statusKey, ttl);
30
31
  }
32
+ const serializedPayload = this._compression ? await serializer_1.Serializer.compressPayload(payload) : JSON.stringify(payload);
31
33
  // Push message to stream
32
- pipeline.xadd(this.streamName, '*', 'id', id, 'target', targetGroups.join(','), 'data', serializedPayload);
34
+ pipeline.xadd(this._streamName, '*', ...stream_message_entity_1.StreamMessageEntity.getStreamFields(id, targetGroups, serializedPayload, this._compression));
33
35
  await pipeline.exec();
34
36
  return id;
35
37
  }
@@ -0,0 +1,20 @@
1
+ export declare class Serializer {
2
+ /**
3
+ * Compress payload using zstd
4
+ * @param payload - JSON like payload
5
+ * @returns base64 encoded payload
6
+ */
7
+ static compressPayload(payload: Record<string, unknown>): Promise<string>;
8
+ /**
9
+ * Decompress payload using zstd
10
+ * @param compressedPayload - base64 encoded compressed payload
11
+ * @returns Parsed JSON payload
12
+ */
13
+ static decompressPayload(compressedPayload: string): Promise<Record<string, unknown>>;
14
+ /**
15
+ * Decompress payload sync
16
+ * @param compressedPayload - base64 encoded payload
17
+ * @returns Parsed JSON payload
18
+ */
19
+ static decompressPayloadSync<T extends Record<string, unknown>>(compressedPayload: string): Promise<T>;
20
+ }
@@ -0,0 +1,41 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Serializer = void 0;
4
+ const zlib_1 = require("zlib");
5
+ class Serializer {
6
+ /**
7
+ * Compress payload using zstd
8
+ * @param payload - JSON like payload
9
+ * @returns base64 encoded payload
10
+ */
11
+ static async compressPayload(payload) {
12
+ const minifiedPayload = JSON.stringify(payload);
13
+ const compressedPayload = await new Promise((resolve) => {
14
+ (0, zlib_1.zstdCompress)(Buffer.from(minifiedPayload), (_, result) => resolve(result));
15
+ });
16
+ return Buffer.from(compressedPayload).toString("base64");
17
+ }
18
+ /**
19
+ * Decompress payload using zstd
20
+ * @param compressedPayload - base64 encoded compressed payload
21
+ * @returns Parsed JSON payload
22
+ */
23
+ static async decompressPayload(compressedPayload) {
24
+ const decoded = Buffer.from(compressedPayload, "base64");
25
+ const decompressed = await new Promise((resolve) => {
26
+ (0, zlib_1.zstdDecompress)(decoded, (_, result) => resolve(result));
27
+ });
28
+ return JSON.parse(decompressed.toString());
29
+ }
30
+ /**
31
+ * Decompress payload sync
32
+ * @param compressedPayload - base64 encoded payload
33
+ * @returns Parsed JSON payload
34
+ */
35
+ static decompressPayloadSync(compressedPayload) {
36
+ const decoded = Buffer.from(compressedPayload, "base64");
37
+ const decompressed = (0, zlib_1.zstdDecompressSync)(decoded);
38
+ return JSON.parse(decompressed.toString());
39
+ }
40
+ }
41
+ exports.Serializer = Serializer;
@@ -7,12 +7,15 @@ export declare class StreamMessageEntity<T extends Record<string, unknown>> {
7
7
  private readonly _messageUuid;
8
8
  private readonly _retryCount;
9
9
  private readonly _data;
10
+ private readonly _compressed;
10
11
  private readonly _rawData;
11
12
  constructor(message: StreamMessage);
12
13
  get data(): T;
13
14
  get serializedData(): string;
15
+ get compressed(): boolean;
14
16
  get streamMessageId(): string;
15
17
  get messageUuid(): string;
16
18
  get routes(): string[];
17
19
  get retryCount(): number;
20
+ static getStreamFields(id: string, target: string | string[], serializedPayload: string, compression: boolean, retryCount?: number): (string | number)[];
18
21
  }
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.StreamMessageEntity = void 0;
4
+ const serializer_1 = require("./serializer");
4
5
  class StreamMessageEntity {
5
6
  constructor(message) {
6
7
  this._rawFields = [];
@@ -14,7 +15,8 @@ class StreamMessageEntity {
14
15
  this._messageUuid = this._fields['id'];
15
16
  this._routes = this._fields['target'].split(',');
16
17
  this._retryCount = parseInt(this._fields['retryCount'] || '0', 10);
17
- this._data = JSON.parse(this._fields['data']);
18
+ this._compressed = this._fields['zstd'] === 'true';
19
+ this._data = this._compressed ? serializer_1.Serializer.decompressPayloadSync(this._fields['data']) : JSON.parse(this._fields['data']);
18
20
  this._rawData = this._fields['data'];
19
21
  }
20
22
  get data() {
@@ -23,6 +25,9 @@ class StreamMessageEntity {
23
25
  get serializedData() {
24
26
  return this._rawData;
25
27
  }
28
+ get compressed() {
29
+ return this._compressed;
30
+ }
26
31
  get streamMessageId() {
27
32
  return this._streamMessageId;
28
33
  }
@@ -35,5 +40,15 @@ class StreamMessageEntity {
35
40
  get retryCount() {
36
41
  return this._retryCount;
37
42
  }
43
+ static getStreamFields(id, target, serializedPayload, compression, retryCount) {
44
+ const fields = ['id', id, 'target', Array.isArray(target) ? target.join(',') : target, 'data', serializedPayload];
45
+ if (retryCount !== undefined) {
46
+ fields.push('retryCount', retryCount);
47
+ }
48
+ if (compression) {
49
+ fields.push('zstd', 'true');
50
+ }
51
+ return fields;
52
+ }
38
53
  }
39
54
  exports.StreamMessageEntity = StreamMessageEntity;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@koala42/redis-highway",
3
- "version": "0.2.12",
3
+ "version": "0.2.14",
4
4
  "description": "High performance redis queue",
5
5
  "license": "MIT",
6
6
  "author": {