alepha 0.9.3 → 0.9.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +93 -47
- package/batch.d.ts +492 -19
- package/bucket.d.ts +449 -19
- package/cache/redis.d.ts +7 -7
- package/cache.d.ts +134 -7
- package/command.d.ts +11 -9
- package/core.d.ts +184 -291
- package/datetime.d.ts +3 -7
- package/email.cjs +8 -0
- package/email.d.ts +246 -0
- package/email.js +1 -0
- package/file.d.ts +0 -3
- package/lock/redis.d.ts +2 -5
- package/lock.d.ts +420 -38
- package/logger.cjs +8 -0
- package/logger.d.ts +227 -0
- package/logger.js +1 -0
- package/package.json +57 -42
- package/postgres.d.ts +1827 -344
- package/queue/redis.d.ts +0 -2
- package/queue.d.ts +662 -21
- package/react/auth.d.ts +214 -134
- package/react/form.d.ts +119 -71
- package/react/head.d.ts +8 -15
- package/react/i18n.d.ts +27 -29
- package/react.d.ts +412 -349
- package/redis.d.ts +12 -12
- package/retry.d.ts +0 -4
- package/router.d.ts +1 -1
- package/scheduler.d.ts +9 -13
- package/security.d.ts +68 -86
- package/server/cache.d.ts +3 -5
- package/server/compress.d.ts +0 -3
- package/server/cookies.d.ts +4 -7
- package/server/cors.d.ts +1 -5
- package/server/health.d.ts +0 -3
- package/server/helmet.d.ts +28 -28
- package/server/links.d.ts +143 -30
- package/server/metrics.d.ts +1 -5
- package/server/multipart.d.ts +0 -2
- package/server/proxy.d.ts +195 -7
- package/server/security.d.ts +6 -7
- package/server/static.d.ts +4 -8
- package/server/swagger.d.ts +2 -6
- package/server.d.ts +656 -108
- package/topic/redis.d.ts +3 -6
- package/topic.d.ts +808 -24
- package/vite.d.ts +7 -5
package/queue.d.ts
CHANGED
|
@@ -1,8 +1,6 @@
|
|
|
1
|
-
import * as _alepha_core2 from "alepha";
|
|
2
|
-
import * as _alepha_core3 from "alepha";
|
|
3
1
|
import * as _alepha_core1 from "alepha";
|
|
4
|
-
import * as _alepha_core0 from "alepha";
|
|
5
2
|
import { Alepha, Descriptor, KIND, Service, Static, TSchema } from "alepha";
|
|
3
|
+
import * as _alepha_logger0 from "alepha/logger";
|
|
6
4
|
import { DateTimeProvider } from "alepha/datetime";
|
|
7
5
|
|
|
8
6
|
//#region src/providers/QueueProvider.d.ts
|
|
@@ -28,38 +26,36 @@ declare abstract class QueueProvider {
|
|
|
28
26
|
*/
|
|
29
27
|
abstract pop(queue: string): Promise<string | undefined>;
|
|
30
28
|
}
|
|
31
|
-
//# sourceMappingURL=QueueProvider.d.ts.map
|
|
32
29
|
//#endregion
|
|
33
30
|
//#region src/providers/MemoryQueueProvider.d.ts
|
|
34
31
|
declare class MemoryQueueProvider implements QueueProvider {
|
|
35
|
-
protected readonly log:
|
|
32
|
+
protected readonly log: _alepha_logger0.Logger;
|
|
36
33
|
protected queueList: Record<string, string[]>;
|
|
37
34
|
push(queue: string, ...messages: string[]): Promise<void>;
|
|
38
35
|
pop(queue: string): Promise<string | undefined>;
|
|
39
36
|
}
|
|
40
|
-
//# sourceMappingURL=MemoryQueueProvider.d.ts.map
|
|
41
37
|
//#endregion
|
|
42
38
|
//#region src/providers/WorkerProvider.d.ts
|
|
43
|
-
declare const envSchema:
|
|
39
|
+
declare const envSchema: _alepha_core1.TObject<{
|
|
44
40
|
/**
|
|
45
41
|
* The interval in milliseconds to wait before checking for new messages.
|
|
46
42
|
*/
|
|
47
|
-
QUEUE_WORKER_INTERVAL:
|
|
43
|
+
QUEUE_WORKER_INTERVAL: _alepha_core1.TNumber;
|
|
48
44
|
/**
|
|
49
45
|
* The maximum interval in milliseconds to wait before checking for new messages.
|
|
50
46
|
*/
|
|
51
|
-
QUEUE_WORKER_MAX_INTERVAL:
|
|
47
|
+
QUEUE_WORKER_MAX_INTERVAL: _alepha_core1.TNumber;
|
|
52
48
|
/**
|
|
53
49
|
* The number of workers to run concurrently. Defaults to 1.
|
|
54
50
|
* Useful only if you are doing a lot of I/O.
|
|
55
51
|
*/
|
|
56
|
-
QUEUE_WORKER_CONCURRENCY:
|
|
52
|
+
QUEUE_WORKER_CONCURRENCY: _alepha_core1.TNumber;
|
|
57
53
|
}>;
|
|
58
54
|
declare module "alepha" {
|
|
59
55
|
interface Env extends Partial<Static<typeof envSchema>> {}
|
|
60
56
|
}
|
|
61
57
|
declare class WorkerProvider {
|
|
62
|
-
protected readonly log:
|
|
58
|
+
protected readonly log: _alepha_logger0.Logger;
|
|
63
59
|
protected readonly env: {
|
|
64
60
|
QUEUE_WORKER_INTERVAL: number;
|
|
65
61
|
QUEUE_WORKER_MAX_INTERVAL: number;
|
|
@@ -73,13 +69,13 @@ declare class WorkerProvider {
|
|
|
73
69
|
protected abortController: AbortController;
|
|
74
70
|
protected workerIntervals: Record<number, number>;
|
|
75
71
|
protected consumers: Array<Consumer>;
|
|
76
|
-
protected readonly start:
|
|
72
|
+
protected readonly start: _alepha_core1.HookDescriptor<"start">;
|
|
77
73
|
/**
|
|
78
74
|
* Start the workers.
|
|
79
75
|
* This method will create an endless loop that will check for new messages!
|
|
80
76
|
*/
|
|
81
77
|
protected startWorkers(): void;
|
|
82
|
-
protected readonly stop:
|
|
78
|
+
protected readonly stop: _alepha_core1.HookDescriptor<"stop">;
|
|
83
79
|
/**
|
|
84
80
|
* Wait for the next message, where `n` is the worker number.
|
|
85
81
|
*
|
|
@@ -119,21 +115,309 @@ interface NextMessage {
|
|
|
119
115
|
//#endregion
|
|
120
116
|
//#region src/descriptors/$queue.d.ts
|
|
121
117
|
/**
|
|
122
|
-
*
|
|
118
|
+
* Creates a queue descriptor for asynchronous message processing with background workers.
|
|
119
|
+
*
|
|
120
|
+
* This descriptor provides a powerful message queue system that enables decoupled, asynchronous
|
|
121
|
+
* communication between different parts of your application. It supports multiple storage backends,
|
|
122
|
+
* type-safe message handling, and automatic worker processing with intelligent retry mechanisms.
|
|
123
|
+
*
|
|
124
|
+
* **Key Features**
|
|
125
|
+
*
|
|
126
|
+
* - **Type-Safe Messages**: Full TypeScript support with schema validation using TypeBox
|
|
127
|
+
* - **Multiple Storage Backends**: Support for in-memory, Redis, and custom queue providers
|
|
128
|
+
* - **Background Processing**: Automatic worker threads for message processing
|
|
129
|
+
* - **Reliable Delivery**: Built-in retry mechanisms and error handling
|
|
130
|
+
* - **Scalable Architecture**: Horizontal scaling support with distributed queue backends
|
|
131
|
+
* - **Dead Letter Queues**: Failed message handling with configurable retry policies
|
|
132
|
+
*
|
|
133
|
+
* **Use Cases**
|
|
134
|
+
*
|
|
135
|
+
* Perfect for decoupling application components and handling asynchronous tasks:
|
|
136
|
+
* - Background job processing
|
|
137
|
+
* - Email and notification sending
|
|
138
|
+
* - Image/file processing pipelines
|
|
139
|
+
* - Event-driven architectures
|
|
140
|
+
* - Microservice communication
|
|
141
|
+
* - Long-running data operations
|
|
142
|
+
*
|
|
143
|
+
* @example
|
|
144
|
+
* **Basic queue with automatic processing:**
|
|
145
|
+
* ```ts
|
|
146
|
+
* import { $queue } from "alepha/queue";
|
|
147
|
+
* import { t } from "alepha";
|
|
148
|
+
*
|
|
149
|
+
* class NotificationService {
|
|
150
|
+
* emailQueue = $queue({
|
|
151
|
+
* name: "email-notifications",
|
|
152
|
+
* schema: t.object({
|
|
153
|
+
* to: t.string(),
|
|
154
|
+
* subject: t.string(),
|
|
155
|
+
* body: t.string(),
|
|
156
|
+
* priority: t.optional(t.enum(["high", "normal"]))
|
|
157
|
+
* }),
|
|
158
|
+
* handler: async (message) => {
|
|
159
|
+
* // This runs in a background worker
|
|
160
|
+
* await this.sendEmail(message.payload);
|
|
161
|
+
* console.log(`Email sent to ${message.payload.to}`);
|
|
162
|
+
* }
|
|
163
|
+
* });
|
|
164
|
+
*
|
|
165
|
+
* async sendWelcomeEmail(userEmail: string) {
|
|
166
|
+
* // Push message to queue for background processing
|
|
167
|
+
* await this.emailQueue.push({
|
|
168
|
+
* to: userEmail,
|
|
169
|
+
* subject: "Welcome to our platform!",
|
|
170
|
+
* body: "Thank you for joining us...",
|
|
171
|
+
* priority: "high"
|
|
172
|
+
* });
|
|
173
|
+
* }
|
|
174
|
+
* }
|
|
175
|
+
* ```
|
|
176
|
+
*
|
|
177
|
+
* @example
|
|
178
|
+
* **Batch processing with multiple messages:**
|
|
179
|
+
* ```ts
|
|
180
|
+
* class ImageProcessor {
|
|
181
|
+
* imageQueue = $queue({
|
|
182
|
+
* name: "image-processing",
|
|
183
|
+
* description: "Process uploaded images for optimization and thumbnails",
|
|
184
|
+
* schema: t.object({
|
|
185
|
+
* imageId: t.string(),
|
|
186
|
+
* originalUrl: t.string(),
|
|
187
|
+
* userId: t.string(),
|
|
188
|
+
* operations: t.array(t.union([
|
|
189
|
+
* t.literal("resize"),
|
|
190
|
+
* t.literal("compress"),
|
|
191
|
+
* t.literal("thumbnail")
|
|
192
|
+
* ]))
|
|
193
|
+
* }),
|
|
194
|
+
* handler: async (message) => {
|
|
195
|
+
* const { imageId, originalUrl, operations } = message.payload;
|
|
196
|
+
*
|
|
197
|
+
* for (const operation of operations) {
|
|
198
|
+
* await this.processImage(imageId, originalUrl, operation);
|
|
199
|
+
* }
|
|
200
|
+
*
|
|
201
|
+
* console.log(`Processed image ${imageId} with operations: ${operations.join(", ")}`);
|
|
202
|
+
* }
|
|
203
|
+
* });
|
|
204
|
+
*
|
|
205
|
+
* async processUploadedImages(images: Array<{id: string; url: string; userId: string}>) {
|
|
206
|
+
* // Process multiple images in parallel
|
|
207
|
+
* const messages = images.map(img => ({
|
|
208
|
+
* imageId: img.id,
|
|
209
|
+
* originalUrl: img.url,
|
|
210
|
+
* userId: img.userId,
|
|
211
|
+
* operations: ["resize", "compress", "thumbnail"] as const
|
|
212
|
+
* }));
|
|
213
|
+
*
|
|
214
|
+
* // Push all messages at once for efficient batch processing
|
|
215
|
+
* await this.imageQueue.push(...messages);
|
|
216
|
+
* }
|
|
217
|
+
* }
|
|
218
|
+
* ```
|
|
219
|
+
*
|
|
220
|
+
* @example
|
|
221
|
+
* **Redis-backed queue for production scalability:**
|
|
222
|
+
* ```ts
|
|
223
|
+
* class OrderProcessor {
|
|
224
|
+
* orderQueue = $queue({
|
|
225
|
+
* name: "order-processing",
|
|
226
|
+
* provider: RedisQueueProvider, // Use Redis for distributed processing
|
|
227
|
+
* schema: t.object({
|
|
228
|
+
* orderId: t.string(),
|
|
229
|
+
* customerId: t.string(),
|
|
230
|
+
* items: t.array(t.object({
|
|
231
|
+
* productId: t.string(),
|
|
232
|
+
* quantity: t.number(),
|
|
233
|
+
* price: t.number()
|
|
234
|
+
* })),
|
|
235
|
+
* paymentMethod: t.string(),
|
|
236
|
+
* shippingAddress: t.object({
|
|
237
|
+
* street: t.string(),
|
|
238
|
+
* city: t.string(),
|
|
239
|
+
* zipCode: t.string(),
|
|
240
|
+
* country: t.string()
|
|
241
|
+
* })
|
|
242
|
+
* }),
|
|
243
|
+
* handler: async (message) => {
|
|
244
|
+
* const { orderId, customerId, items } = message.payload;
|
|
245
|
+
*
|
|
246
|
+
* // Process payment
|
|
247
|
+
* await this.processPayment(orderId, items);
|
|
248
|
+
*
|
|
249
|
+
* // Update inventory
|
|
250
|
+
* await this.updateInventory(items);
|
|
251
|
+
*
|
|
252
|
+
* // Send confirmation email
|
|
253
|
+
* await this.sendOrderConfirmation(customerId, orderId);
|
|
254
|
+
*
|
|
255
|
+
* // Schedule shipping
|
|
256
|
+
* await this.scheduleShipping(orderId, message.payload.shippingAddress);
|
|
257
|
+
*
|
|
258
|
+
* console.log(`Order ${orderId} processed successfully`);
|
|
259
|
+
* }
|
|
260
|
+
* });
|
|
261
|
+
* }
|
|
262
|
+
* ```
|
|
263
|
+
*
|
|
264
|
+
* @example
|
|
265
|
+
* **Memory-only queue for development and testing:**
|
|
266
|
+
* ```ts
|
|
267
|
+
* class DevTaskProcessor {
|
|
268
|
+
* taskQueue = $queue({
|
|
269
|
+
* name: "dev-tasks",
|
|
270
|
+
* provider: "memory", // Use in-memory queue for development
|
|
271
|
+
* schema: t.object({
|
|
272
|
+
* taskType: t.enum(["cleanup", "backup", "report"]),
|
|
273
|
+
* data: t.record(t.string(), t.any()),
|
|
274
|
+
* scheduledAt: t.optional(t.string())
|
|
275
|
+
* }),
|
|
276
|
+
* handler: async (message) => {
|
|
277
|
+
* const { taskType, data } = message.payload;
|
|
278
|
+
*
|
|
279
|
+
* switch (taskType) {
|
|
280
|
+
* case "cleanup":
|
|
281
|
+
* await this.performCleanup(data);
|
|
282
|
+
* break;
|
|
283
|
+
* case "backup":
|
|
284
|
+
* await this.createBackup(data);
|
|
285
|
+
* break;
|
|
286
|
+
* case "report":
|
|
287
|
+
* await this.generateReport(data);
|
|
288
|
+
* break;
|
|
289
|
+
* }
|
|
290
|
+
* }
|
|
291
|
+
* });
|
|
292
|
+
* }
|
|
293
|
+
* ```
|
|
123
294
|
*/
|
|
124
295
|
declare const $queue: {
|
|
125
296
|
<T extends TSchema>(options: QueueDescriptorOptions<T>): QueueDescriptor<T>;
|
|
126
297
|
[KIND]: typeof QueueDescriptor;
|
|
127
298
|
};
|
|
128
299
|
interface QueueDescriptorOptions<T extends TSchema> {
|
|
300
|
+
/**
|
|
301
|
+
* Unique name for the queue.
|
|
302
|
+
*
|
|
303
|
+
* This name is used for:
|
|
304
|
+
* - Queue identification across the system
|
|
305
|
+
* - Storage backend key generation
|
|
306
|
+
* - Logging and monitoring
|
|
307
|
+
* - Worker assignment and routing
|
|
308
|
+
*
|
|
309
|
+
* If not provided, defaults to the property key where the queue is declared.
|
|
310
|
+
*
|
|
311
|
+
* @example "email-notifications"
|
|
312
|
+
* @example "image-processing"
|
|
313
|
+
* @example "order-fulfillment"
|
|
314
|
+
*/
|
|
129
315
|
name?: string;
|
|
316
|
+
/**
|
|
317
|
+
* Human-readable description of the queue's purpose.
|
|
318
|
+
*
|
|
319
|
+
* Used for:
|
|
320
|
+
* - Documentation generation
|
|
321
|
+
* - Monitoring dashboards
|
|
322
|
+
* - Development team communication
|
|
323
|
+
* - Queue management interfaces
|
|
324
|
+
*
|
|
325
|
+
* @example "Process user registration emails and welcome sequences"
|
|
326
|
+
* @example "Handle image uploads, resizing, and thumbnail generation"
|
|
327
|
+
* @example "Manage order processing, payment, and shipping workflows"
|
|
328
|
+
*/
|
|
130
329
|
description?: string;
|
|
330
|
+
/**
|
|
331
|
+
* Queue storage provider configuration.
|
|
332
|
+
*
|
|
333
|
+
* Options:
|
|
334
|
+
* - **"memory"**: In-memory queue (default for development, lost on restart)
|
|
335
|
+
* - **Service<QueueProvider>**: Custom provider class (e.g., RedisQueueProvider)
|
|
336
|
+
* - **undefined**: Uses the default queue provider from dependency injection
|
|
337
|
+
*
|
|
338
|
+
* **Provider Selection Guidelines**:
|
|
339
|
+
* - Development: Use "memory" for fast, simple testing
|
|
340
|
+
* - Production: Use Redis or database-backed providers for persistence
|
|
341
|
+
* - High-throughput: Use specialized providers with connection pooling
|
|
342
|
+
* - Distributed systems: Use Redis or message brokers for scalability
|
|
343
|
+
*
|
|
344
|
+
* @default Uses injected QueueProvider
|
|
345
|
+
* @example "memory"
|
|
346
|
+
* @example RedisQueueProvider
|
|
347
|
+
* @example DatabaseQueueProvider
|
|
348
|
+
*/
|
|
131
349
|
provider?: "memory" | Service<QueueProvider>;
|
|
350
|
+
/**
|
|
351
|
+
* TypeBox schema defining the structure of messages in this queue.
|
|
352
|
+
*
|
|
353
|
+
* This schema:
|
|
354
|
+
* - Validates all messages pushed to the queue
|
|
355
|
+
* - Provides full TypeScript type inference
|
|
356
|
+
* - Ensures type safety between producers and consumers
|
|
357
|
+
* - Enables automatic serialization/deserialization
|
|
358
|
+
*
|
|
359
|
+
* **Schema Design Best Practices**:
|
|
360
|
+
* - Keep schemas simple and focused on the specific task
|
|
361
|
+
* - Use optional fields for data that might not always be available
|
|
362
|
+
* - Include version fields for schema evolution
|
|
363
|
+
* - Use union types for different message types in the same queue
|
|
364
|
+
*
|
|
365
|
+
* @example
|
|
366
|
+
* ```ts
|
|
367
|
+
* t.object({
|
|
368
|
+
* userId: t.string(),
|
|
369
|
+
* action: t.enum(["create", "update"]),
|
|
370
|
+
* data: t.record(t.string(), t.any()),
|
|
371
|
+
* timestamp: t.optional(t.number())
|
|
372
|
+
* })
|
|
373
|
+
* ```
|
|
374
|
+
*/
|
|
132
375
|
schema: T;
|
|
376
|
+
/**
|
|
377
|
+
* Message handler function that processes queue messages.
|
|
378
|
+
*
|
|
379
|
+
* This function:
|
|
380
|
+
* - Runs in background worker threads for non-blocking processing
|
|
381
|
+
* - Receives type-safe message payloads based on the schema
|
|
382
|
+
* - Should be idempotent to handle potential retries
|
|
383
|
+
* - Can throw errors to trigger retry mechanisms
|
|
384
|
+
* - Has access to the full Alepha dependency injection container
|
|
385
|
+
*
|
|
386
|
+
* **Handler Best Practices**:
|
|
387
|
+
* - Keep handlers focused on a single responsibility
|
|
388
|
+
* - Use proper error handling and logging
|
|
389
|
+
* - Make operations idempotent when possible
|
|
390
|
+
* - Validate critical business logic within handlers
|
|
391
|
+
* - Consider using transactions for data consistency
|
|
392
|
+
*
|
|
393
|
+
* @param message - The queue message with validated payload
|
|
394
|
+
* @returns Promise that resolves when processing is complete
|
|
395
|
+
*
|
|
396
|
+
* @example
|
|
397
|
+
* ```ts
|
|
398
|
+
* handler: async (message) => {
|
|
399
|
+
* const { userId, email, template } = message.payload;
|
|
400
|
+
*
|
|
401
|
+
* try {
|
|
402
|
+
* await this.emailService.send({
|
|
403
|
+
* to: email,
|
|
404
|
+
* template,
|
|
405
|
+
* data: { userId }
|
|
406
|
+
* });
|
|
407
|
+
*
|
|
408
|
+
* await this.userService.markEmailSent(userId, template);
|
|
409
|
+
* } catch (error) {
|
|
410
|
+
* // Log error and let the queue system handle retries
|
|
411
|
+
* this.logger.error(`Failed to send email to ${email}`, error);
|
|
412
|
+
* throw error;
|
|
413
|
+
* }
|
|
414
|
+
* }
|
|
415
|
+
* ```
|
|
416
|
+
*/
|
|
133
417
|
handler?: (message: QueueMessage<T>) => Promise<void>;
|
|
134
418
|
}
|
|
135
419
|
declare class QueueDescriptor<T extends TSchema> extends Descriptor<QueueDescriptorOptions<T>> {
|
|
136
|
-
protected readonly log:
|
|
420
|
+
protected readonly log: _alepha_logger0.Logger;
|
|
137
421
|
protected readonly workerProvider: WorkerProvider;
|
|
138
422
|
readonly provider: QueueProvider | MemoryQueueProvider;
|
|
139
423
|
push(...payloads: Array<Static<T>>): Promise<void>;
|
|
@@ -146,24 +430,383 @@ interface QueueMessageSchema {
|
|
|
146
430
|
interface QueueMessage<T extends TSchema> {
|
|
147
431
|
payload: Static<T>;
|
|
148
432
|
}
|
|
149
|
-
//# sourceMappingURL=$queue.d.ts.map
|
|
150
433
|
//#endregion
|
|
151
434
|
//#region src/descriptors/$consumer.d.ts
|
|
152
435
|
/**
|
|
153
|
-
*
|
|
436
|
+
* Creates a consumer descriptor to process messages from a specific queue.
|
|
437
|
+
*
|
|
438
|
+
* This descriptor creates a dedicated message consumer that connects to a queue and processes
|
|
439
|
+
* its messages using a custom handler function. Consumers provide a clean way to separate
|
|
440
|
+
* message production from consumption, enabling scalable architectures where multiple
|
|
441
|
+
* consumers can process messages from the same queue.
|
|
442
|
+
*
|
|
443
|
+
* **Key Features**
|
|
444
|
+
*
|
|
445
|
+
* - **Queue Integration**: Seamlessly connects to any $queue descriptor
|
|
446
|
+
* - **Type Safety**: Full TypeScript support inherited from the connected queue's schema
|
|
447
|
+
* - **Dedicated Processing**: Isolated message processing logic separate from the queue
|
|
448
|
+
* - **Worker Management**: Automatic integration with the worker system for background processing
|
|
449
|
+
* - **Error Handling**: Built-in error handling and retry mechanisms from the queue system
|
|
450
|
+
* - **Scalability**: Multiple consumers can process the same queue for horizontal scaling
|
|
451
|
+
*
|
|
452
|
+
* **Use Cases**
|
|
453
|
+
*
|
|
454
|
+
* Perfect for creating specialized message processors:
|
|
455
|
+
* - Dedicated email sending services
|
|
456
|
+
* - Image processing workers
|
|
457
|
+
* - Data synchronization tasks
|
|
458
|
+
* - Event handlers for specific domains
|
|
459
|
+
* - Microservice message consumers
|
|
460
|
+
* - Background job processors
|
|
461
|
+
*
|
|
462
|
+
* @example
|
|
463
|
+
* **Basic consumer setup:**
|
|
464
|
+
* ```ts
|
|
465
|
+
* import { $queue, $consumer } from "alepha/queue";
|
|
466
|
+
* import { t } from "alepha";
|
|
467
|
+
*
|
|
468
|
+
* class EmailService {
|
|
469
|
+
* // Define the queue
|
|
470
|
+
* emailQueue = $queue({
|
|
471
|
+
* name: "emails",
|
|
472
|
+
* schema: t.object({
|
|
473
|
+
* to: t.string(),
|
|
474
|
+
* subject: t.string(),
|
|
475
|
+
* body: t.string(),
|
|
476
|
+
* template: t.optional(t.string())
|
|
477
|
+
* })
|
|
478
|
+
* });
|
|
479
|
+
*
|
|
480
|
+
* // Create a dedicated consumer for this queue
|
|
481
|
+
* emailConsumer = $consumer({
|
|
482
|
+
* queue: this.emailQueue,
|
|
483
|
+
* handler: async (message) => {
|
|
484
|
+
* const { to, subject, body, template } = message.payload;
|
|
485
|
+
*
|
|
486
|
+
* if (template) {
|
|
487
|
+
* await this.sendTemplatedEmail(to, template, { subject, body });
|
|
488
|
+
* } else {
|
|
489
|
+
* await this.sendPlainEmail(to, subject, body);
|
|
490
|
+
* }
|
|
491
|
+
*
|
|
492
|
+
* console.log(`Email sent to ${to}: ${subject}`);
|
|
493
|
+
* }
|
|
494
|
+
* });
|
|
495
|
+
*
|
|
496
|
+
* async sendWelcomeEmail(userEmail: string) {
|
|
497
|
+
* // Push to queue - consumer will automatically process it
|
|
498
|
+
* await this.emailQueue.push({
|
|
499
|
+
* to: userEmail,
|
|
500
|
+
* subject: "Welcome!",
|
|
501
|
+
* body: "Thanks for joining our platform.",
|
|
502
|
+
* template: "welcome"
|
|
503
|
+
* });
|
|
504
|
+
* }
|
|
505
|
+
* }
|
|
506
|
+
* ```
|
|
507
|
+
*
|
|
508
|
+
* @example
|
|
509
|
+
* **Multiple specialized consumers for different message types:**
|
|
510
|
+
* ```ts
|
|
511
|
+
* class NotificationService {
|
|
512
|
+
* notificationQueue = $queue({
|
|
513
|
+
* name: "notifications",
|
|
514
|
+
* schema: t.object({
|
|
515
|
+
* type: t.enum(["email", "sms", "push"]),
|
|
516
|
+
* recipient: t.string(),
|
|
517
|
+
* message: t.string(),
|
|
518
|
+
* metadata: t.optional(t.record(t.string(), t.any()))
|
|
519
|
+
* })
|
|
520
|
+
* });
|
|
521
|
+
*
|
|
522
|
+
* // Email-specific consumer
|
|
523
|
+
* emailConsumer = $consumer({
|
|
524
|
+
* queue: this.notificationQueue,
|
|
525
|
+
* handler: async (message) => {
|
|
526
|
+
* if (message.payload.type === "email") {
|
|
527
|
+
* await this.emailProvider.send({
|
|
528
|
+
* to: message.payload.recipient,
|
|
529
|
+
* subject: message.payload.metadata?.subject || "Notification",
|
|
530
|
+
* body: message.payload.message
|
|
531
|
+
* });
|
|
532
|
+
* }
|
|
533
|
+
* }
|
|
534
|
+
* });
|
|
535
|
+
*
|
|
536
|
+
* // SMS-specific consumer
|
|
537
|
+
* smsConsumer = $consumer({
|
|
538
|
+
* queue: this.notificationQueue,
|
|
539
|
+
* handler: async (message) => {
|
|
540
|
+
* if (message.payload.type === "sms") {
|
|
541
|
+
* await this.smsProvider.send({
|
|
542
|
+
* to: message.payload.recipient,
|
|
543
|
+
* message: message.payload.message
|
|
544
|
+
* });
|
|
545
|
+
* }
|
|
546
|
+
* }
|
|
547
|
+
* });
|
|
548
|
+
*
|
|
549
|
+
* // Push notification consumer
|
|
550
|
+
* pushConsumer = $consumer({
|
|
551
|
+
* queue: this.notificationQueue,
|
|
552
|
+
* handler: async (message) => {
|
|
553
|
+
* if (message.payload.type === "push") {
|
|
554
|
+
* await this.pushProvider.send({
|
|
555
|
+
* deviceToken: message.payload.recipient,
|
|
556
|
+
* title: message.payload.metadata?.title || "Notification",
|
|
557
|
+
* body: message.payload.message
|
|
558
|
+
* });
|
|
559
|
+
* }
|
|
560
|
+
* }
|
|
561
|
+
* });
|
|
562
|
+
* }
|
|
563
|
+
* ```
|
|
564
|
+
*
|
|
565
|
+
* @example
|
|
566
|
+
* **Consumer with advanced error handling and logging:**
|
|
567
|
+
* ```ts
|
|
568
|
+
* class OrderProcessor {
|
|
569
|
+
* orderQueue = $queue({
|
|
570
|
+
* name: "order-processing",
|
|
571
|
+
* schema: t.object({
|
|
572
|
+
* orderId: t.string(),
|
|
573
|
+
* customerId: t.string(),
|
|
574
|
+
* items: t.array(t.object({
|
|
575
|
+
* productId: t.string(),
|
|
576
|
+
* quantity: t.number(),
|
|
577
|
+
* price: t.number()
|
|
578
|
+
* }))
|
|
579
|
+
* })
|
|
580
|
+
* });
|
|
581
|
+
*
|
|
582
|
+
* orderConsumer = $consumer({
|
|
583
|
+
* queue: this.orderQueue,
|
|
584
|
+
* handler: async (message) => {
|
|
585
|
+
* const { orderId, customerId, items } = message.payload;
|
|
586
|
+
*
|
|
587
|
+
* try {
|
|
588
|
+
* // Log processing start
|
|
589
|
+
* this.logger.info(`Processing order ${orderId} for customer ${customerId}`);
|
|
590
|
+
*
|
|
591
|
+
* // Validate inventory
|
|
592
|
+
* await this.validateInventory(items);
|
|
593
|
+
*
|
|
594
|
+
* // Process payment
|
|
595
|
+
* const paymentResult = await this.processPayment(orderId, items);
|
|
596
|
+
* if (!paymentResult.success) {
|
|
597
|
+
* throw new Error(`Payment failed: ${paymentResult.error}`);
|
|
598
|
+
* }
|
|
599
|
+
*
|
|
600
|
+
* // Update inventory
|
|
601
|
+
* await this.updateInventory(items);
|
|
602
|
+
*
|
|
603
|
+
* // Create shipment
|
|
604
|
+
* await this.createShipment(orderId, customerId);
|
|
605
|
+
*
|
|
606
|
+
* // Send confirmation
|
|
607
|
+
* await this.sendOrderConfirmation(customerId, orderId);
|
|
608
|
+
*
|
|
609
|
+
* this.logger.info(`Order ${orderId} processed successfully`);
|
|
610
|
+
*
|
|
611
|
+
* } catch (error) {
|
|
612
|
+
* // Log detailed error information
|
|
613
|
+
* this.logger.error(`Failed to process order ${orderId}`, {
|
|
614
|
+
* error: error.message,
|
|
615
|
+
* orderId,
|
|
616
|
+
* customerId,
|
|
617
|
+
* itemCount: items.length
|
|
618
|
+
* });
|
|
619
|
+
*
|
|
620
|
+
* // Re-throw to trigger queue retry mechanism
|
|
621
|
+
* throw error;
|
|
622
|
+
* }
|
|
623
|
+
* }
|
|
624
|
+
* });
|
|
625
|
+
* }
|
|
626
|
+
* ```
|
|
627
|
+
*
|
|
628
|
+
* @example
|
|
629
|
+
* **Consumer for batch processing with performance optimization:**
|
|
630
|
+
* ```ts
|
|
631
|
+
* class DataProcessor {
|
|
632
|
+
* dataQueue = $queue({
|
|
633
|
+
* name: "data-processing",
|
|
634
|
+
* schema: t.object({
|
|
635
|
+
* batchId: t.string(),
|
|
636
|
+
* records: t.array(t.object({
|
|
637
|
+
* id: t.string(),
|
|
638
|
+
* data: t.record(t.string(), t.any())
|
|
639
|
+
* })),
|
|
640
|
+
* processingOptions: t.object({
|
|
641
|
+
* validateData: t.boolean(),
|
|
642
|
+
* generateReport: t.boolean(),
|
|
643
|
+
* notifyCompletion: t.boolean()
|
|
644
|
+
* })
|
|
645
|
+
* })
|
|
646
|
+
* });
|
|
647
|
+
*
|
|
648
|
+
* dataConsumer = $consumer({
|
|
649
|
+
* queue: this.dataQueue,
|
|
650
|
+
* handler: async (message) => {
|
|
651
|
+
* const { batchId, records, processingOptions } = message.payload;
|
|
652
|
+
* const startTime = Date.now();
|
|
653
|
+
*
|
|
654
|
+
* this.logger.info(`Starting batch processing for ${batchId} with ${records.length} records`);
|
|
655
|
+
*
|
|
656
|
+
* try {
|
|
657
|
+
* // Process records in chunks for better performance
|
|
658
|
+
* const chunkSize = 100;
|
|
659
|
+
* const chunks = this.chunkArray(records, chunkSize);
|
|
660
|
+
*
|
|
661
|
+
* for (let i = 0; i < chunks.length; i++) {
|
|
662
|
+
* const chunk = chunks[i];
|
|
663
|
+
*
|
|
664
|
+
* if (processingOptions.validateData) {
|
|
665
|
+
* await this.validateChunk(chunk);
|
|
666
|
+
* }
|
|
667
|
+
*
|
|
668
|
+
* await this.processChunk(chunk);
|
|
669
|
+
*
|
|
670
|
+
* // Log progress
|
|
671
|
+
* const progress = ((i + 1) / chunks.length) * 100;
|
|
672
|
+
* this.logger.debug(`Batch ${batchId} progress: ${progress.toFixed(1)}%`);
|
|
673
|
+
* }
|
|
674
|
+
*
|
|
675
|
+
* if (processingOptions.generateReport) {
|
|
676
|
+
* await this.generateProcessingReport(batchId, records.length);
|
|
677
|
+
* }
|
|
678
|
+
*
|
|
679
|
+
* if (processingOptions.notifyCompletion) {
|
|
680
|
+
* await this.notifyBatchCompletion(batchId);
|
|
681
|
+
* }
|
|
682
|
+
*
|
|
683
|
+
* const duration = Date.now() - startTime;
|
|
684
|
+
* this.logger.info(`Batch ${batchId} completed in ${duration}ms`);
|
|
685
|
+
*
|
|
686
|
+
* } catch (error) {
|
|
687
|
+
* const duration = Date.now() - startTime;
|
|
688
|
+
* this.logger.error(`Batch ${batchId} failed after ${duration}ms`, error);
|
|
689
|
+
* throw error;
|
|
690
|
+
* }
|
|
691
|
+
* }
|
|
692
|
+
* });
|
|
693
|
+
* }
|
|
694
|
+
* ```
|
|
154
695
|
*/
|
|
155
696
|
declare const $consumer: {
|
|
156
697
|
<T extends TSchema>(options: ConsumerDescriptorOptions<T>): ConsumerDescriptor<T>;
|
|
157
698
|
[KIND]: typeof ConsumerDescriptor;
|
|
158
699
|
};
|
|
159
700
|
interface ConsumerDescriptorOptions<T extends TSchema> {
|
|
701
|
+
/**
|
|
702
|
+
* The queue descriptor that this consumer will process messages from.
|
|
703
|
+
*
|
|
704
|
+
* This establishes the connection between the consumer and its source queue:
|
|
705
|
+
* - The consumer inherits the queue's message schema for type safety
|
|
706
|
+
* - Messages pushed to the queue will be automatically routed to this consumer
|
|
707
|
+
* - Multiple consumers can be attached to the same queue for parallel processing
|
|
708
|
+
* - The consumer will use the queue's provider and configuration settings
|
|
709
|
+
*
|
|
710
|
+
* **Queue Integration Benefits**:
|
|
711
|
+
* - Type safety: Consumer handler gets fully typed message payloads
|
|
712
|
+
* - Schema validation: Messages are validated before reaching the consumer
|
|
713
|
+
* - Error handling: Failed messages can be retried or moved to dead letter queues
|
|
714
|
+
* - Monitoring: Queue metrics include consumer processing statistics
|
|
715
|
+
*
|
|
716
|
+
* @example
|
|
717
|
+
* ```ts
|
|
718
|
+
* // First, define a queue
|
|
719
|
+
* emailQueue = $queue({
|
|
720
|
+
* name: "emails",
|
|
721
|
+
* schema: t.object({ to: t.string(), subject: t.string() })
|
|
722
|
+
* });
|
|
723
|
+
*
|
|
724
|
+
* // Then, create a consumer for that queue
|
|
725
|
+
* emailConsumer = $consumer({
|
|
726
|
+
* queue: this.emailQueue, // Reference the queue descriptor
|
|
727
|
+
* handler: async (message) => { } // process email
|
|
728
|
+
* });
|
|
729
|
+
* ```
|
|
730
|
+
*/
|
|
160
731
|
queue: QueueDescriptor<T>;
|
|
732
|
+
/**
|
|
733
|
+
* Message handler function that processes individual messages from the queue.
|
|
734
|
+
*
|
|
735
|
+
* This function:
|
|
736
|
+
* - Receives fully typed and validated message payloads from the connected queue
|
|
737
|
+
* - Runs in the background worker system for non-blocking operation
|
|
738
|
+
* - Should implement the core business logic for processing this message type
|
|
739
|
+
* - Can throw errors to trigger the queue's retry mechanisms
|
|
740
|
+
* - Has access to the full Alepha dependency injection container
|
|
741
|
+
* - Should be idempotent to handle potential duplicate deliveries
|
|
742
|
+
*
|
|
743
|
+
* **Handler Design Guidelines**:
|
|
744
|
+
* - Keep handlers focused on a single responsibility
|
|
745
|
+
* - Use proper error handling and meaningful error messages
|
|
746
|
+
* - Log important processing steps for debugging and monitoring
|
|
747
|
+
* - Consider transaction boundaries for data consistency
|
|
748
|
+
* - Make operations idempotent when possible
|
|
749
|
+
* - Validate business rules within the handler logic
|
|
750
|
+
*
|
|
751
|
+
* **Error Handling Strategy**:
|
|
752
|
+
* - Throw errors for temporary failures that should be retried
|
|
753
|
+
* - Log and handle permanent failures gracefully
|
|
754
|
+
* - Use specific error types to control retry behavior
|
|
755
|
+
* - Consider implementing circuit breakers for external service calls
|
|
756
|
+
*
|
|
757
|
+
* @param message - The queue message containing the validated payload
|
|
758
|
+
* @param message.payload - The typed message data based on the queue's schema
|
|
759
|
+
* @returns Promise that resolves when processing is complete
|
|
760
|
+
*
|
|
761
|
+
* @example
|
|
762
|
+
* ```ts
|
|
763
|
+
* handler: async (message) => {
|
|
764
|
+
* const { userId, action, data } = message.payload;
|
|
765
|
+
*
|
|
766
|
+
* try {
|
|
767
|
+
* // Log processing start
|
|
768
|
+
* this.logger.info(`Processing ${action} for user ${userId}`);
|
|
769
|
+
*
|
|
770
|
+
* // Validate business rules
|
|
771
|
+
* if (!await this.userService.exists(userId)) {
|
|
772
|
+
* throw new Error(`User ${userId} not found`);
|
|
773
|
+
* }
|
|
774
|
+
*
|
|
775
|
+
* // Perform the main processing logic
|
|
776
|
+
* switch (action) {
|
|
777
|
+
* case "create":
|
|
778
|
+
* await this.processCreation(userId, data);
|
|
779
|
+
* break;
|
|
780
|
+
* case "update":
|
|
781
|
+
* await this.processUpdate(userId, data);
|
|
782
|
+
* break;
|
|
783
|
+
* default:
|
|
784
|
+
* throw new Error(`Unknown action: ${action}`);
|
|
785
|
+
* }
|
|
786
|
+
*
|
|
787
|
+
* // Log successful completion
|
|
788
|
+
* this.logger.info(`Successfully processed ${action} for user ${userId}`);
|
|
789
|
+
*
|
|
790
|
+
* } catch (error) {
|
|
791
|
+
* // Log error with context
|
|
792
|
+
* this.logger.error(`Failed to process ${action} for user ${userId}`, {
|
|
793
|
+
* error: error.message,
|
|
794
|
+
* userId,
|
|
795
|
+
* action,
|
|
796
|
+
* data
|
|
797
|
+
* });
|
|
798
|
+
*
|
|
799
|
+
* // Re-throw to trigger queue retry mechanism
|
|
800
|
+
* throw error;
|
|
801
|
+
* }
|
|
802
|
+
* }
|
|
803
|
+
* ```
|
|
804
|
+
*/
|
|
161
805
|
handler: (message: {
|
|
162
806
|
payload: Static<T["payload"]>;
|
|
163
807
|
}) => Promise<void>;
|
|
164
808
|
}
|
|
165
809
|
declare class ConsumerDescriptor<T extends TSchema> extends Descriptor<ConsumerDescriptorOptions<T>> {}
|
|
166
|
-
//# sourceMappingURL=$consumer.d.ts.map
|
|
167
810
|
//#endregion
|
|
168
811
|
//#region src/index.d.ts
|
|
169
812
|
/**
|
|
@@ -177,9 +820,7 @@ declare class ConsumerDescriptor<T extends TSchema> extends Descriptor<ConsumerD
|
|
|
177
820
|
* @see {@link $consumer}
|
|
178
821
|
* @module alepha.queue
|
|
179
822
|
*/
|
|
180
|
-
declare const AlephaQueue:
|
|
181
|
-
//# sourceMappingURL=index.d.ts.map
|
|
182
|
-
|
|
823
|
+
declare const AlephaQueue: _alepha_core1.Service<_alepha_core1.Module>;
|
|
183
824
|
//#endregion
|
|
184
825
|
export { $consumer, $queue, AlephaQueue, ConsumerDescriptor, ConsumerDescriptorOptions, MemoryQueueProvider, QueueDescriptor, QueueDescriptorOptions, QueueMessage, QueueMessageSchema, QueueProvider };
|
|
185
826
|
//# sourceMappingURL=index.d.ts.map
|