alepha 0.11.2 → 0.11.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/api/files.d.ts +1 -439
- package/api/jobs.d.ts +1 -218
- package/api/notifications.d.ts +1 -264
- package/api/users.d.ts +1 -924
- package/batch.d.ts +1 -585
- package/bucket.d.ts +1 -507
- package/cache/redis.d.ts +1 -40
- package/cache.d.ts +1 -288
- package/command.d.ts +1 -238
- package/core.d.ts +1 -1563
- package/datetime.d.ts +5 -3
- package/devtools.d.ts +1 -368
- package/email.d.ts +1 -144
- package/fake.cjs +8 -0
- package/fake.d.ts +1 -0
- package/fake.js +1 -0
- package/file.d.ts +1 -53
- package/lock/redis.d.ts +1 -24
- package/lock.d.ts +1 -552
- package/logger.d.ts +1 -284
- package/package.json +57 -50
- package/postgres.d.ts +1 -1931
- package/queue/redis.d.ts +1 -29
- package/queue.d.ts +1 -760
- package/react/auth.d.ts +1 -499
- package/react/form.d.ts +1 -188
- package/react/head.d.ts +1 -120
- package/react/i18n.d.ts +1 -118
- package/react.d.ts +1 -929
- package/redis.d.ts +1 -82
- package/scheduler.d.ts +1 -145
- package/security.d.ts +1 -586
- package/server/cache.d.ts +1 -163
- package/server/compress.d.ts +1 -32
- package/server/cookies.d.ts +1 -144
- package/server/cors.d.ts +1 -27
- package/server/health.d.ts +1 -59
- package/server/helmet.d.ts +1 -69
- package/server/links.d.ts +1 -316
- package/server/metrics.d.ts +1 -35
- package/server/multipart.d.ts +1 -42
- package/server/proxy.d.ts +1 -234
- package/server/security.d.ts +1 -87
- package/server/static.d.ts +1 -119
- package/server/swagger.d.ts +1 -148
- package/server.d.ts +1 -849
- package/topic/redis.d.ts +1 -42
- package/topic.d.ts +1 -819
- package/ui.d.ts +1 -619
- package/vite.d.ts +1 -197
package/batch.d.ts
CHANGED
|
@@ -1,585 +1 @@
|
|
|
1
|
-
|
|
2
|
-
import { Descriptor, KIND, Static, TSchema } from "alepha";
|
|
3
|
-
import { DateTimeProvider, DurationLike } from "alepha/datetime";
|
|
4
|
-
import * as _alepha_logger0 from "alepha/logger";
|
|
5
|
-
import * as _alepha_retry0 from "alepha/retry";
|
|
6
|
-
import { RetryDescriptorOptions } from "alepha/retry";
|
|
7
|
-
import * as typebox0 from "typebox";
|
|
8
|
-
|
|
9
|
-
//#region src/descriptors/$batch.d.ts
|
|
10
|
-
|
|
11
|
-
/**
|
|
12
|
-
* Creates a batch processing descriptor for efficient grouping and processing of multiple operations.
|
|
13
|
-
*
|
|
14
|
-
* This descriptor provides a powerful batching mechanism that collects multiple individual items
|
|
15
|
-
* and processes them together in groups, significantly improving performance by reducing overhead
|
|
16
|
-
* and enabling bulk operations. It supports partitioning, concurrent processing, automatic flushing,
|
|
17
|
-
* and intelligent retry mechanisms for robust batch processing workflows.
|
|
18
|
-
*
|
|
19
|
-
* **Key Features**
|
|
20
|
-
*
|
|
21
|
-
* - **Intelligent Batching**: Groups items based on size and time thresholds
|
|
22
|
-
* - **Partitioning Support**: Process different types of items in separate batches
|
|
23
|
-
* - **Concurrent Processing**: Handle multiple batches simultaneously with configurable limits
|
|
24
|
-
* - **Automatic Flushing**: Time-based and size-based automatic batch execution
|
|
25
|
-
* - **Type Safety**: Full TypeScript support with schema validation using TypeBox
|
|
26
|
-
* - **Retry Logic**: Built-in retry mechanisms for failed batch operations
|
|
27
|
-
* - **Resource Management**: Automatic cleanup and graceful shutdown handling
|
|
28
|
-
*
|
|
29
|
-
* **Use Cases**
|
|
30
|
-
*
|
|
31
|
-
* Perfect for optimizing high-throughput operations:
|
|
32
|
-
* - Database bulk inserts and updates
|
|
33
|
-
* - API call batching and rate limit optimization
|
|
34
|
-
* - Log aggregation and bulk shipping
|
|
35
|
-
* - File processing and bulk uploads
|
|
36
|
-
* - Event processing and analytics ingestion
|
|
37
|
-
* - Notification delivery optimization
|
|
38
|
-
* - Cache invalidation batching
|
|
39
|
-
*
|
|
40
|
-
* @example
|
|
41
|
-
* **Basic database batch operations:**
|
|
42
|
-
* ```ts
|
|
43
|
-
* import { $batch } from "alepha/batch";
|
|
44
|
-
* import { t } from "alepha";
|
|
45
|
-
*
|
|
46
|
-
* class UserService {
|
|
47
|
-
* userBatch = $batch({
|
|
48
|
-
* schema: t.object({
|
|
49
|
-
* id: t.text(),
|
|
50
|
-
* name: t.text(),
|
|
51
|
-
* email: t.text(),
|
|
52
|
-
* createdAt: t.optional(t.text())
|
|
53
|
-
* }),
|
|
54
|
-
* maxSize: 50, // Process up to 50 users at once
|
|
55
|
-
* maxDuration: [5, "seconds"], // Or flush every 5 seconds
|
|
56
|
-
* handler: async (users) => {
|
|
57
|
-
* // Bulk insert users - much faster than individual inserts
|
|
58
|
-
* console.log(`Processing batch of ${users.length} users`);
|
|
59
|
-
*
|
|
60
|
-
* const result = await this.database.users.insertMany(users.map(user => ({
|
|
61
|
-
* ...user,
|
|
62
|
-
* createdAt: user.createdAt || new Date().toISOString()
|
|
63
|
-
* })));
|
|
64
|
-
*
|
|
65
|
-
* console.log(`Successfully inserted ${result.length} users`);
|
|
66
|
-
* return { inserted: result.length, userIds: result.map(r => r.id) };
|
|
67
|
-
* }
|
|
68
|
-
* });
|
|
69
|
-
*
|
|
70
|
-
* async createUser(userData: { name: string; email: string }) {
|
|
71
|
-
* // Individual calls are automatically batched
|
|
72
|
-
* const result = await this.userBatch.push({
|
|
73
|
-
* id: generateId(),
|
|
74
|
-
* name: userData.name,
|
|
75
|
-
* email: userData.email
|
|
76
|
-
* });
|
|
77
|
-
*
|
|
78
|
-
* return result; // Returns the batch result once batch is processed
|
|
79
|
-
* }
|
|
80
|
-
* }
|
|
81
|
-
* ```
|
|
82
|
-
*
|
|
83
|
-
* @example
|
|
84
|
-
* **API call batching with partitioning:**
|
|
85
|
-
* ```ts
|
|
86
|
-
* class NotificationService {
|
|
87
|
-
* notificationBatch = $batch({
|
|
88
|
-
* schema: t.object({
|
|
89
|
-
* userId: t.text(),
|
|
90
|
-
* type: t.enum(["email", "sms", "push"]),
|
|
91
|
-
* message: t.text(),
|
|
92
|
-
* priority: t.enum(["high", "normal", "low"])
|
|
93
|
-
* }),
|
|
94
|
-
* maxSize: 100,
|
|
95
|
-
* maxDuration: [10, "seconds"],
|
|
96
|
-
* // Partition by notification type for different processing
|
|
97
|
-
* partitionBy: (notification) => notification.type,
|
|
98
|
-
* concurrency: 3, // Process up to 3 different types simultaneously
|
|
99
|
-
* handler: async (notifications) => {
|
|
100
|
-
* const type = notifications[0].type; // All items in batch have same type
|
|
101
|
-
* console.log(`Processing ${notifications.length} ${type} notifications`);
|
|
102
|
-
*
|
|
103
|
-
* switch (type) {
|
|
104
|
-
* case 'email':
|
|
105
|
-
* return await this.emailProvider.sendBulk(notifications.map(n => ({
|
|
106
|
-
* to: n.userId,
|
|
107
|
-
* subject: 'Notification',
|
|
108
|
-
* body: n.message,
|
|
109
|
-
* priority: n.priority
|
|
110
|
-
* })));
|
|
111
|
-
*
|
|
112
|
-
* case 'sms':
|
|
113
|
-
* return await this.smsProvider.sendBulk(notifications.map(n => ({
|
|
114
|
-
* to: n.userId,
|
|
115
|
-
* message: n.message
|
|
116
|
-
* })));
|
|
117
|
-
*
|
|
118
|
-
* case 'push':
|
|
119
|
-
* return await this.pushProvider.sendBulk(notifications.map(n => ({
|
|
120
|
-
* userId: n.userId,
|
|
121
|
-
* title: 'Notification',
|
|
122
|
-
* body: n.message,
|
|
123
|
-
* priority: n.priority
|
|
124
|
-
* })));
|
|
125
|
-
* }
|
|
126
|
-
* }
|
|
127
|
-
* });
|
|
128
|
-
*
|
|
129
|
-
* async sendNotification(userId: string, type: 'email' | 'sms' | 'push', message: string, priority: 'high' | 'normal' | 'low' = 'normal') {
|
|
130
|
-
* // Notifications are automatically batched by type
|
|
131
|
-
* return await this.notificationBatch.push({
|
|
132
|
-
* userId,
|
|
133
|
-
* type,
|
|
134
|
-
* message,
|
|
135
|
-
* priority
|
|
136
|
-
* });
|
|
137
|
-
* }
|
|
138
|
-
* }
|
|
139
|
-
* ```
|
|
140
|
-
*
|
|
141
|
-
* @example
|
|
142
|
-
* **Log aggregation with retry logic:**
|
|
143
|
-
* ```ts
|
|
144
|
-
* class LoggingService {
|
|
145
|
-
* logBatch = $batch({
|
|
146
|
-
* schema: t.object({
|
|
147
|
-
* timestamp: t.number(),
|
|
148
|
-
* level: t.enum(["info", "warn", "error"]),
|
|
149
|
-
* message: t.text(),
|
|
150
|
-
* metadata: t.optional(t.record(t.text(), t.any())),
|
|
151
|
-
* source: t.text()
|
|
152
|
-
* }),
|
|
153
|
-
* maxSize: 1000, // Large batches for log efficiency
|
|
154
|
-
* maxDuration: [30, "seconds"], // Longer duration for log aggregation
|
|
155
|
-
* concurrency: 2, // Limit concurrent log shipments
|
|
156
|
-
* retry: {
|
|
157
|
-
* maxAttempts: 5,
|
|
158
|
-
* delay: [2, "seconds"],
|
|
159
|
-
* backoff: "exponential"
|
|
160
|
-
* },
|
|
161
|
-
* handler: async (logEntries) => {
|
|
162
|
-
* console.log(`Shipping ${logEntries.length} log entries`);
|
|
163
|
-
*
|
|
164
|
-
* try {
|
|
165
|
-
* // Ship logs to external service (e.g., Elasticsearch, Splunk)
|
|
166
|
-
* const response = await this.logShipper.bulkIndex({
|
|
167
|
-
* index: 'application-logs',
|
|
168
|
-
* body: logEntries.map(entry => ([
|
|
169
|
-
* { index: { _index: 'application-logs' } },
|
|
170
|
-
* {
|
|
171
|
-
* ...entry,
|
|
172
|
-
* '@timestamp': new Date(entry.timestamp).toISOString()
|
|
173
|
-
* }
|
|
174
|
-
* ])).flat()
|
|
175
|
-
* });
|
|
176
|
-
*
|
|
177
|
-
* if (response.errors) {
|
|
178
|
-
* console.error(`Some log entries failed to index`, response.errors);
|
|
179
|
-
* // Retry will be triggered by throwing
|
|
180
|
-
* throw new Error(`Failed to index ${response.errors.length} log entries`);
|
|
181
|
-
* }
|
|
182
|
-
*
|
|
183
|
-
* console.log(`Successfully shipped ${logEntries.length} log entries`);
|
|
184
|
-
* return { shipped: logEntries.length, indexedAt: Date.now() };
|
|
185
|
-
*
|
|
186
|
-
* } catch (error) {
|
|
187
|
-
* console.error(`Failed to ship logs batch`, error);
|
|
188
|
-
* throw error; // Trigger retry mechanism
|
|
189
|
-
* }
|
|
190
|
-
* }
|
|
191
|
-
* });
|
|
192
|
-
*
|
|
193
|
-
* async log(level: 'info' | 'warn' | 'error', message: string, metadata?: Record<string, any>, source: string = 'application') {
|
|
194
|
-
* // Individual log calls are batched and shipped efficiently
|
|
195
|
-
* return await this.logBatch.push({
|
|
196
|
-
* timestamp: Date.now(),
|
|
197
|
-
* level,
|
|
198
|
-
* message,
|
|
199
|
-
* metadata,
|
|
200
|
-
* source
|
|
201
|
-
* });
|
|
202
|
-
* }
|
|
203
|
-
* }
|
|
204
|
-
* ```
|
|
205
|
-
*
|
|
206
|
-
* @example
|
|
207
|
-
* **File processing with dynamic partitioning:**
|
|
208
|
-
* ```ts
|
|
209
|
-
* class FileProcessingService {
|
|
210
|
-
* fileProcessingBatch = $batch({
|
|
211
|
-
* schema: t.object({
|
|
212
|
-
* filePath: t.text(),
|
|
213
|
-
* fileType: t.enum(["image", "video", "document"]),
|
|
214
|
-
* processingOptions: t.object({
|
|
215
|
-
* quality: t.optional(t.enum(["low", "medium", "high"])),
|
|
216
|
-
* format: t.optional(t.text()),
|
|
217
|
-
* compress: t.optional(t.boolean())
|
|
218
|
-
* }),
|
|
219
|
-
* priority: t.enum(["urgent", "normal", "background"])
|
|
220
|
-
* }),
|
|
221
|
-
* maxSize: 20, // Smaller batches for file processing
|
|
222
|
-
* maxDuration: [2, "minutes"], // Reasonable time for file accumulation
|
|
223
|
-
* // Partition by file type and priority for optimal resource usage
|
|
224
|
-
* partitionBy: (file) => `${file.fileType}-${file.priority}`,
|
|
225
|
-
* concurrency: 4, // Multiple concurrent processing pipelines
|
|
226
|
-
* retry: {
|
|
227
|
-
* maxAttempts: 3,
|
|
228
|
-
* delay: [5, "seconds"]
|
|
229
|
-
* },
|
|
230
|
-
* handler: async (files) => {
|
|
231
|
-
* const fileType = files[0].fileType;
|
|
232
|
-
* const priority = files[0].priority;
|
|
233
|
-
*
|
|
234
|
-
* console.log(`Processing ${files.length} ${fileType} files with ${priority} priority`);
|
|
235
|
-
*
|
|
236
|
-
* try {
|
|
237
|
-
* const results = [];
|
|
238
|
-
*
|
|
239
|
-
* for (const file of files) {
|
|
240
|
-
* const result = await this.processFile(file.filePath, file.fileType, file.processingOptions);
|
|
241
|
-
* results.push({
|
|
242
|
-
* originalPath: file.filePath,
|
|
243
|
-
* processedPath: result.outputPath,
|
|
244
|
-
* size: result.size,
|
|
245
|
-
* duration: result.processingTime
|
|
246
|
-
* });
|
|
247
|
-
* }
|
|
248
|
-
*
|
|
249
|
-
* // Update database with batch results
|
|
250
|
-
* await this.updateProcessingStatus(results);
|
|
251
|
-
*
|
|
252
|
-
* console.log(`Successfully processed ${files.length} ${fileType} files`);
|
|
253
|
-
* return {
|
|
254
|
-
* processed: files.length,
|
|
255
|
-
* fileType,
|
|
256
|
-
* priority,
|
|
257
|
-
* totalSize: results.reduce((sum, r) => sum + r.size, 0),
|
|
258
|
-
* results
|
|
259
|
-
* };
|
|
260
|
-
*
|
|
261
|
-
* } catch (error) {
|
|
262
|
-
* console.error(`Batch file processing failed for ${fileType} files`, error);
|
|
263
|
-
* throw error;
|
|
264
|
-
* }
|
|
265
|
-
* }
|
|
266
|
-
* });
|
|
267
|
-
*
|
|
268
|
-
* async processFile(filePath: string, fileType: 'image' | 'video' | 'document', options: any, priority: 'urgent' | 'normal' | 'background' = 'normal') {
|
|
269
|
-
* // Files are automatically batched by type and priority
|
|
270
|
-
* return await this.fileProcessingBatch.push({
|
|
271
|
-
* filePath,
|
|
272
|
-
* fileType,
|
|
273
|
-
* processingOptions: options,
|
|
274
|
-
* priority
|
|
275
|
-
* });
|
|
276
|
-
* }
|
|
277
|
-
* }
|
|
278
|
-
* ```
|
|
279
|
-
*/
|
|
280
|
-
declare const $batch: {
|
|
281
|
-
<TItem extends TSchema, TResponse>(options: BatchDescriptorOptions<TItem, TResponse>): BatchDescriptor<TItem, TResponse>;
|
|
282
|
-
[KIND]: typeof BatchDescriptor;
|
|
283
|
-
};
|
|
284
|
-
interface BatchDescriptorOptions<TItem extends TSchema, TResponse = any> {
|
|
285
|
-
/**
|
|
286
|
-
* TypeBox schema for validating each item added to the batch.
|
|
287
|
-
*
|
|
288
|
-
* This schema:
|
|
289
|
-
* - Validates every item pushed to the batch for data integrity
|
|
290
|
-
* - Provides full TypeScript type inference for batch items
|
|
291
|
-
* - Ensures type safety between item producers and batch handlers
|
|
292
|
-
* - Enables automatic serialization/deserialization if needed
|
|
293
|
-
*
|
|
294
|
-
* **Schema Design Guidelines**:
|
|
295
|
-
* - Keep schemas focused on the data needed for batch processing
|
|
296
|
-
* - Use optional fields for data that might not always be present
|
|
297
|
-
* - Include identifiers that might be needed for partitioning
|
|
298
|
-
* - Consider versioning for schema evolution in long-running systems
|
|
299
|
-
*
|
|
300
|
-
* @example
|
|
301
|
-
* ```ts
|
|
302
|
-
* t.object({
|
|
303
|
-
* id: t.text(),
|
|
304
|
-
* operation: t.enum(["create", "update"]),
|
|
305
|
-
* data: t.record(t.text(), t.any()),
|
|
306
|
-
* timestamp: t.optional(t.number()),
|
|
307
|
-
* priority: t.optional(t.enum(["high", "normal"]))
|
|
308
|
-
* })
|
|
309
|
-
* ```
|
|
310
|
-
*/
|
|
311
|
-
schema: TItem;
|
|
312
|
-
/**
|
|
313
|
-
* The batch processing handler function that processes arrays of validated items.
|
|
314
|
-
*
|
|
315
|
-
* This handler:
|
|
316
|
-
* - Receives an array of validated items based on the schema
|
|
317
|
-
* - Should implement bulk operations for maximum efficiency
|
|
318
|
-
* - Can be async and perform any operations (database, API calls, etc.)
|
|
319
|
-
* - Should handle errors appropriately (retry logic is provided separately)
|
|
320
|
-
* - Has access to the full Alepha dependency injection container
|
|
321
|
-
* - Returns results that will be provided to all items in the batch
|
|
322
|
-
*
|
|
323
|
-
* **Handler Design Guidelines**:
|
|
324
|
-
* - Implement true bulk operations rather than loops of individual operations
|
|
325
|
-
* - Use transactions when processing related data for consistency
|
|
326
|
-
* - Log batch processing progress and results for monitoring
|
|
327
|
-
* - Handle partial failures gracefully when possible
|
|
328
|
-
* - Consider memory usage for large batch sizes
|
|
329
|
-
*
|
|
330
|
-
* **Performance Considerations**:
|
|
331
|
-
* - Batch operations should be significantly faster than individual operations
|
|
332
|
-
* - Use database bulk operations (INSERT, UPDATE, etc.) when available
|
|
333
|
-
* - Optimize for the expected batch size and data characteristics
|
|
334
|
-
* - Consider connection pooling and resource limits
|
|
335
|
-
*
|
|
336
|
-
* @param items - Array of validated items to process in this batch
|
|
337
|
-
* @returns Result that will be returned to all callers who contributed items to this batch
|
|
338
|
-
*
|
|
339
|
-
* @example
|
|
340
|
-
* ```ts
|
|
341
|
-
* handler: async (items) => {
|
|
342
|
-
* console.log(`Processing batch of ${items.length} items`);
|
|
343
|
-
*
|
|
344
|
-
* try {
|
|
345
|
-
* // Use bulk operations for maximum efficiency
|
|
346
|
-
* const results = await this.database.transaction(async (tx) => {
|
|
347
|
-
* const insertResults = await tx.items.insertMany(items);
|
|
348
|
-
*
|
|
349
|
-
* // Update related data in bulk
|
|
350
|
-
* const updates = items.map(item => ({ id: item.id, processed: true }));
|
|
351
|
-
* await tx.items.updateMany(updates);
|
|
352
|
-
*
|
|
353
|
-
* return insertResults;
|
|
354
|
-
* });
|
|
355
|
-
*
|
|
356
|
-
* // Log successful processing
|
|
357
|
-
* console.log(`Successfully processed ${items.length} items`);
|
|
358
|
-
*
|
|
359
|
-
* return {
|
|
360
|
-
* processed: items.length,
|
|
361
|
-
* results: results.map(r => ({ id: r.id, status: 'success' })),
|
|
362
|
-
* timestamp: Date.now()
|
|
363
|
-
* };
|
|
364
|
-
*
|
|
365
|
-
* } catch (error) {
|
|
366
|
-
* console.error(`Batch processing failed for ${items.length} items`, error);
|
|
367
|
-
* throw error; // Will trigger retry logic if configured
|
|
368
|
-
* }
|
|
369
|
-
* }
|
|
370
|
-
* ```
|
|
371
|
-
*/
|
|
372
|
-
handler: (items: Static<TItem>[]) => TResponse;
|
|
373
|
-
/**
|
|
374
|
-
* Maximum number of items to collect before automatically flushing the batch.
|
|
375
|
-
*
|
|
376
|
-
* When this threshold is reached, the batch will be processed immediately
|
|
377
|
-
* regardless of the time duration. This provides an upper bound on batch size
|
|
378
|
-
* and ensures processing doesn't wait indefinitely for more items.
|
|
379
|
-
*
|
|
380
|
-
* **Size Selection Guidelines**:
|
|
381
|
-
* - Database operations: 100-1000 items depending on record size
|
|
382
|
-
* - API calls: 10-100 items depending on rate limits and payload size
|
|
383
|
-
* - File operations: 10-50 items depending on processing complexity
|
|
384
|
-
* - Memory operations: 1000+ items for simple transformations
|
|
385
|
-
*
|
|
386
|
-
* **Trade-offs**:
|
|
387
|
-
* - Larger batches: Better efficiency, higher memory usage, longer latency
|
|
388
|
-
* - Smaller batches: Lower latency, less efficiency, more frequent processing
|
|
389
|
-
*
|
|
390
|
-
* @default 10
|
|
391
|
-
*
|
|
392
|
-
* @example 50 // Good for database bulk operations
|
|
393
|
-
* @example 100 // Good for API batching with rate limits
|
|
394
|
-
* @example 1000 // Good for high-throughput log processing
|
|
395
|
-
*/
|
|
396
|
-
maxSize?: number;
|
|
397
|
-
/**
|
|
398
|
-
* Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.
|
|
399
|
-
*
|
|
400
|
-
* This timer starts when the first item is added to a partition and ensures
|
|
401
|
-
* that items don't wait indefinitely for a batch to fill up. It provides
|
|
402
|
-
* a maximum latency guarantee for batch processing.
|
|
403
|
-
*
|
|
404
|
-
* **Duration Selection Guidelines**:
|
|
405
|
-
* - Real-time systems: 100ms - 1 second for low latency
|
|
406
|
-
* - Background processing: 5 - 30 seconds for higher throughput
|
|
407
|
-
* - Bulk operations: 1 - 5 minutes for maximum efficiency
|
|
408
|
-
* - Log shipping: 30 seconds - 2 minutes for log aggregation
|
|
409
|
-
*
|
|
410
|
-
* **Latency Impact**:
|
|
411
|
-
* - Shorter durations: Lower latency, potentially smaller batches
|
|
412
|
-
* - Longer durations: Higher throughput, potentially better efficiency
|
|
413
|
-
*
|
|
414
|
-
* @default [1, "second"]
|
|
415
|
-
*
|
|
416
|
-
* @example [500, "milliseconds"] // Low latency for real-time processing
|
|
417
|
-
* @example [10, "seconds"] // Balanced latency and throughput
|
|
418
|
-
* @example [2, "minutes"] // High throughput for bulk operations
|
|
419
|
-
*/
|
|
420
|
-
maxDuration?: DurationLike;
|
|
421
|
-
/**
|
|
422
|
-
* Function to determine partition keys for grouping items into separate batches.
|
|
423
|
-
*
|
|
424
|
-
* Items with the same partition key are batched together, while items with
|
|
425
|
-
* different keys are processed in separate batches. This enables:
|
|
426
|
-
* - Processing different types of items with different logic
|
|
427
|
-
* - Parallel processing of independent item groups
|
|
428
|
-
* - Resource optimization based on item characteristics
|
|
429
|
-
*
|
|
430
|
-
* **Partitioning Strategies**:
|
|
431
|
-
* - By type: Group similar operations together
|
|
432
|
-
* - By destination: Group items going to the same endpoint
|
|
433
|
-
* - By priority: Process high-priority items separately
|
|
434
|
-
* - By size/complexity: Group items with similar processing requirements
|
|
435
|
-
* - By tenant/user: Process items per customer or tenant
|
|
436
|
-
*
|
|
437
|
-
* **Partition Key Guidelines**:
|
|
438
|
-
* - Use descriptive, consistent naming
|
|
439
|
-
* - Keep key cardinality reasonable (avoid too many unique keys)
|
|
440
|
-
* - Consider memory impact of multiple active partitions
|
|
441
|
-
* - Balance between parallelism and resource usage
|
|
442
|
-
*
|
|
443
|
-
* If not provided, all items are placed in a single default partition.
|
|
444
|
-
*
|
|
445
|
-
* @param item - The validated item to determine partition for
|
|
446
|
-
* @returns String key identifying the partition this item belongs to
|
|
447
|
-
*
|
|
448
|
-
* @example
|
|
449
|
-
* ```ts
|
|
450
|
-
* // Partition by operation type
|
|
451
|
-
* partitionBy: (item) => item.operation,
|
|
452
|
-
*
|
|
453
|
-
* // Partition by priority and type
|
|
454
|
-
* partitionBy: (item) => `${item.priority}-${item.type}`,
|
|
455
|
-
*
|
|
456
|
-
* // Partition by destination service
|
|
457
|
-
* partitionBy: (item) => item.targetService,
|
|
458
|
-
*
|
|
459
|
-
* // Dynamic partitioning based on size
|
|
460
|
-
* partitionBy: (item) => {
|
|
461
|
-
* const size = JSON.stringify(item).length;
|
|
462
|
-
* return size > 1000 ? 'large' : 'small';
|
|
463
|
-
* }
|
|
464
|
-
* ```
|
|
465
|
-
*/
|
|
466
|
-
partitionBy?: (item: Static<TItem>) => string;
|
|
467
|
-
/**
|
|
468
|
-
* Maximum number of batch handlers that can execute simultaneously.
|
|
469
|
-
*
|
|
470
|
-
* This controls the level of parallelism for batch processing across
|
|
471
|
-
* all partitions. Higher concurrency can improve throughput but may
|
|
472
|
-
* increase resource usage and contention.
|
|
473
|
-
*
|
|
474
|
-
* **Concurrency Considerations**:
|
|
475
|
-
* - Database operations: Limit based on connection pool size
|
|
476
|
-
* - API calls: Consider rate limits and server capacity
|
|
477
|
-
* - CPU-intensive operations: Set to number of CPU cores
|
|
478
|
-
* - Memory-intensive operations: Consider available RAM
|
|
479
|
-
* - I/O operations: Can be higher than CPU count
|
|
480
|
-
*
|
|
481
|
-
* **Resource Planning**:
|
|
482
|
-
* - Each concurrent handler may use significant memory/connections
|
|
483
|
-
* - Monitor resource usage and adjust based on system capacity
|
|
484
|
-
* - Consider downstream system limits and capabilities
|
|
485
|
-
*
|
|
486
|
-
* @default 1
|
|
487
|
-
*
|
|
488
|
-
* @example 1 // Sequential processing, lowest resource usage
|
|
489
|
-
* @example 4 // Moderate parallelism for balanced systems
|
|
490
|
-
* @example 10 // High concurrency for I/O-bound operations
|
|
491
|
-
*/
|
|
492
|
-
concurrency?: number;
|
|
493
|
-
/**
|
|
494
|
-
* Retry configuration for failed batch processing operations.
|
|
495
|
-
*
|
|
496
|
-
* When batch handlers fail, this configuration determines how and when
|
|
497
|
-
* to retry the operation. Uses the `@alepha/retry` module for robust
|
|
498
|
-
* retry logic with exponential backoff, jitter, and other strategies.
|
|
499
|
-
*
|
|
500
|
-
* **Retry Strategies**:
|
|
501
|
-
* - Exponential backoff: Increasingly longer delays between attempts
|
|
502
|
-
* - Fixed delays: Consistent intervals between retries
|
|
503
|
-
* - Jitter: Random variation to avoid thundering herd problems
|
|
504
|
-
*
|
|
505
|
-
* **Failure Scenarios to Consider**:
|
|
506
|
-
* - Temporary network issues
|
|
507
|
-
* - Database connection problems
|
|
508
|
-
* - Rate limiting from external services
|
|
509
|
-
* - Resource exhaustion (memory, disk space)
|
|
510
|
-
* - Downstream service temporary unavailability
|
|
511
|
-
*
|
|
512
|
-
* **Retry Guidelines**:
|
|
513
|
-
* - Use exponential backoff for network-related failures
|
|
514
|
-
* - Set reasonable max attempts to avoid infinite loops
|
|
515
|
-
* - Consider the impact of retries on overall system performance
|
|
516
|
-
* - Monitor retry patterns to identify systemic issues
|
|
517
|
-
*
|
|
518
|
-
* @example
|
|
519
|
-
* ```ts
|
|
520
|
-
* retry: {
|
|
521
|
-
* maxAttempts: 3,
|
|
522
|
-
* delay: [1, "second"],
|
|
523
|
-
* backoff: "exponential",
|
|
524
|
-
* maxDelay: [30, "seconds"],
|
|
525
|
-
* jitter: true
|
|
526
|
-
* }
|
|
527
|
-
* ```
|
|
528
|
-
*/
|
|
529
|
-
retry?: Omit<RetryDescriptorOptions<() => Array<Static<TItem>>>, "handler">;
|
|
530
|
-
}
|
|
531
|
-
declare class BatchDescriptor<TItem extends TSchema, TResponse = any> extends Descriptor<BatchDescriptorOptions<TItem, TResponse>> {
|
|
532
|
-
protected readonly log: _alepha_logger0.Logger;
|
|
533
|
-
protected readonly dateTime: DateTimeProvider;
|
|
534
|
-
protected readonly partitions: Map<any, any>;
|
|
535
|
-
protected activeHandlers: PromiseWithResolvers<void>[];
|
|
536
|
-
protected retry: _alepha_retry0.RetryDescriptorFn<(items: typebox0.StaticType<[], "Decode", {}, {}, TItem>[]) => TResponse>;
|
|
537
|
-
/**
|
|
538
|
-
* Pushes an item into the batch. The item will be processed
|
|
539
|
-
* asynchronously with other items when the batch is flushed.
|
|
540
|
-
*/
|
|
541
|
-
push(item: Static<TItem>): Promise<TResponse>;
|
|
542
|
-
flush(partitionKey?: string): Promise<void>;
|
|
543
|
-
protected flushPartition(partitionKey: string): Promise<void>;
|
|
544
|
-
protected readonly dispose: _alepha_core1.HookDescriptor<"stop">;
|
|
545
|
-
}
|
|
546
|
-
//#endregion
|
|
547
|
-
//#region src/index.d.ts
|
|
548
|
-
/**
|
|
549
|
-
* This module allows you to group multiple asynchronous operations into a single "batch," which is then processed together.
|
|
550
|
-
* This is an essential pattern for improving performance, reducing I/O, and interacting efficiently with rate-limited APIs or databases.
|
|
551
|
-
*
|
|
552
|
-
* ```ts
|
|
553
|
-
* import { Alepha, $hook, run, t } from "alepha";
|
|
554
|
-
* import { $batch } from "alepha/batch";
|
|
555
|
-
*
|
|
556
|
-
* class LoggingService {
|
|
557
|
-
* // define the batch processor
|
|
558
|
-
* logBatch = $batch({
|
|
559
|
-
* schema: t.text(),
|
|
560
|
-
* maxSize: 10,
|
|
561
|
-
* maxDuration: [5, "seconds"],
|
|
562
|
-
* handler: async (items) => {
|
|
563
|
-
* console.log(`[BATCH LOG] Processing ${items.length} events:`, items);
|
|
564
|
-
* },
|
|
565
|
-
* });
|
|
566
|
-
*
|
|
567
|
-
* // example of how to use it
|
|
568
|
-
* onReady = $hook({
|
|
569
|
-
* on: "ready",
|
|
570
|
-
* handler: async () => {
|
|
571
|
-
* this.logBatch.push("Application started.");
|
|
572
|
-
* this.logBatch.push("User authenticated.");
|
|
573
|
-
* // ... more events pushed from elsewhere in the app
|
|
574
|
-
* },
|
|
575
|
-
* });
|
|
576
|
-
* }
|
|
577
|
-
* ```
|
|
578
|
-
*
|
|
579
|
-
* @see {@link $batch}
|
|
580
|
-
* @module alepha.batch
|
|
581
|
-
*/
|
|
582
|
-
declare const AlephaBatch: _alepha_core1.Service<_alepha_core1.Module<{}>>;
|
|
583
|
-
//#endregion
|
|
584
|
-
export { $batch, AlephaBatch, BatchDescriptor, BatchDescriptorOptions };
|
|
585
|
-
//# sourceMappingURL=index.d.ts.map
|
|
1
|
+
export * from '@alepha/batch';
|