@koala42/redis-highway 0.2.8 → 0.2.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +192 -0
- package/package.json +1 -1
package/README.md
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# @koala42/redis-highway
|
|
2
|
+
|
|
3
|
+
High performance Redis stream-based queue for Node.js. Supports Redis single instances and Valkey single instances.
|
|
4
|
+
Designed for high throughput and massive concurrency with low overhead.
|
|
5
|
+
|
|
6
|
+
## Features
|
|
7
|
+
- **Lightweight**: Uses optimized Lua scripts and pipelines for maximum performance and reduced I/O.
|
|
8
|
+
- **Micro-Batching**: Supports batch processing for high-volume message consumption.
|
|
9
|
+
- **Granular Retries**: Consumer group isolation - if one group fails, only that group retries.
|
|
10
|
+
- **Reliability**: Auto-claiming of stuck messages (crashed consumers) and Dead Letter Queue (DLQ) support.
|
|
11
|
+
- **Metrics**: Built-in tracking for throughput, queue depth, DLQ size, and retries. Prometheus export ready.
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npm install @koala42/redis-highway
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
### Producer
|
|
22
|
+
|
|
23
|
+
```typescript
|
|
24
|
+
import { Redis } from 'ioredis';
|
|
25
|
+
import { Producer } from '@koala42/redis-highway';
|
|
26
|
+
|
|
27
|
+
const redis = new Redis();
|
|
28
|
+
const producer = new Producer<{hello: string}>(redis, 'my-stream');
|
|
29
|
+
|
|
30
|
+
// Send job
|
|
31
|
+
await producer.push(
|
|
32
|
+
{ hello: 'world' }, // Type-safe payload
|
|
33
|
+
['group-A', 'group-B'], // Target specific consumer groups
|
|
34
|
+
{ ttl: 3600 } // Optional: expiration time in seconds
|
|
35
|
+
);
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Worker
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
import { Redis } from 'ioredis';
|
|
42
|
+
import { Worker } from '@koala42/redis-highway';
|
|
43
|
+
|
|
44
|
+
class MyWorker extends Worker<{hello: string}> {
|
|
45
|
+
async process(data: {hello: string}) {
|
|
46
|
+
console.log('Processing:', data.hello);
|
|
47
|
+
// throw new Error('fail'); // Triggers automatic retry logic
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
const redis = new Redis();
|
|
52
|
+
const worker = new MyWorker(
|
|
53
|
+
redis,
|
|
54
|
+
{
|
|
55
|
+
groupName: 'group-A',
|
|
56
|
+
streamName: 'my-stream',
|
|
57
|
+
concurrency: 10 // Number of concurrent jobs to process
|
|
58
|
+
}
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
await worker.start();
|
|
62
|
+
|
|
63
|
+
// To stop gracefully
|
|
64
|
+
// await worker.stop();
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Batch Worker
|
|
68
|
+
Process messages in batches for higher throughput.
|
|
69
|
+
|
|
70
|
+
```typescript
|
|
71
|
+
import { Redis } from 'ioredis';
|
|
72
|
+
import { BatchWorker } from '@koala42/redis-highway';
|
|
73
|
+
|
|
74
|
+
class MyBatchWorker extends BatchWorker<{hello: string}> {
|
|
75
|
+
async process(batchedData: {hello: string}[]) {
|
|
76
|
+
console.log(`Processing batch of ${batchedData.length} items`);
|
|
77
|
+
// Example: Bulk insert into database
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
const batchWorker = new MyBatchWorker(
|
|
82
|
+
redis,
|
|
83
|
+
{
|
|
84
|
+
groupName: 'group-B',
|
|
85
|
+
streamName: 'my-stream',
|
|
86
|
+
concurrency: 50, // Total items processing limit
|
|
87
|
+
batchSize: 10, // Items per batch
|
|
88
|
+
maxFetchCount: 50
|
|
89
|
+
}
|
|
90
|
+
);
|
|
91
|
+
|
|
92
|
+
await batchWorker.start();
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Metrics
|
|
96
|
+
|
|
97
|
+
```typescript
|
|
98
|
+
import { Metrics } from '@koala42/redis-highway';
|
|
99
|
+
|
|
100
|
+
const metrics = new Metrics(redis, 'my-stream');
|
|
101
|
+
|
|
102
|
+
// Get raw metrics object
|
|
103
|
+
const stats = await metrics.getMetrics(['group-A', 'group-B']);
|
|
104
|
+
console.log(stats.throughput);
|
|
105
|
+
|
|
106
|
+
// Get Prometheus formatted string
|
|
107
|
+
const promMetrics = await metrics.getPrometheusMetrics(['group-A'], 'my_app_queue');
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Configuration
|
|
111
|
+
|
|
112
|
+
### Worker Options
|
|
113
|
+
The second argument to `Worker` and `BatchWorker` constructors is the primary configuration object.
|
|
114
|
+
|
|
115
|
+
| Option | Type | Description |
|
|
116
|
+
|--------|------|-------------|
|
|
117
|
+
| `groupName` | string | **Required**. The consumer group name (e.g., 'email-service'). |
|
|
118
|
+
| `streamName` | string | **Required**. The Redis stream key. |
|
|
119
|
+
| `concurrency` | number | **Required**. Maximum number of messages processed in parallel by this worker instance. |
|
|
120
|
+
| `batchSize` | number | **Required (BatchWorker only)**. Number of messages to process in a single call. |
|
|
121
|
+
| `maxFetchCount` | number | **Required (BatchWorker only)**. limit for XREADGROUP count. |
|
|
122
|
+
|
|
123
|
+
### Control Options
|
|
124
|
+
The third argument is for fine-tuning retry and recovery behavior.
|
|
125
|
+
|
|
126
|
+
```typescript
|
|
127
|
+
const worker = new MyWorker(redis, { ... }, {
|
|
128
|
+
maxRetries: 3, // Default: 3
|
|
129
|
+
blockTimeMs: 2000, // Default: 2000. XREADGROUP block time.
|
|
130
|
+
minIdleTimeMs: 120000, // Default: 2 minutes. Time before a message is considered stuck.
|
|
131
|
+
claimIntervalMs: 120000,// Default: 2 minutes. How often to check for stuck messages.
|
|
132
|
+
collectMetrics: true // Default: true. Enable throughput tracking.
|
|
133
|
+
});
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
| Option | Default | Description |
|
|
137
|
+
|--------|---------|-------------|
|
|
138
|
+
| `maxRetries` | 3 | Number of times to retry a failed message before moving it to DLQ. |
|
|
139
|
+
| `blockTimeMs` | 2000 | Redis blocking timeout for fetching new messages (in ms). |
|
|
140
|
+
| `minIdleTimeMs` | 120000 | Messages pending longer than this are candidates for auto-claim (recovery). |
|
|
141
|
+
| `claimIntervalMs` | 120000 | Interval for checking and claiming stuck messages. |
|
|
142
|
+
| `collectMetrics` | true | If true, increments throughput counters in Redis. |
|
|
143
|
+
|
|
144
|
+
## Usage with NestJS
|
|
145
|
+
|
|
146
|
+
```typescript
|
|
147
|
+
// Producer Service
|
|
148
|
+
@Injectable()
|
|
149
|
+
export class EntryService {
|
|
150
|
+
private readonly producer: Producer<MyPayload>;
|
|
151
|
+
|
|
152
|
+
constructor(@InjectRedis() private readonly redis: Redis) {
|
|
153
|
+
this.producer = new Producer(this.redis, 'my-stream');
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
async addToQueue(data: MyPayload) {
|
|
157
|
+
await this.producer.push(data, ['group-A']);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Worker Service
|
|
162
|
+
@Injectable()
|
|
163
|
+
export class ProcessorService extends Worker<MyPayload> implements OnModuleInit, OnModuleDestroy {
|
|
164
|
+
constructor(@InjectRedis() redis: Redis) {
|
|
165
|
+
super(redis, {
|
|
166
|
+
groupName: 'group-A',
|
|
167
|
+
streamName: 'my-stream',
|
|
168
|
+
concurrency: 50
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
async onModuleInit() {
|
|
173
|
+
await this.start();
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
async onModuleDestroy() {
|
|
177
|
+
await this.stop();
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
async process(data: MyPayload) {
|
|
181
|
+
// Process your job here
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
## Roadmap & Missing Features
|
|
187
|
+
tracked in [Github Issues](https://github.com/Koala42/redis-highway/issues)
|
|
188
|
+
|
|
189
|
+
## AI Usage Disclosure
|
|
190
|
+
- AI will not be used for the development, ever
|
|
191
|
+
- AI may be used to do code reviews
|
|
192
|
+
- AI may be used to write unit tests
|