alepha 0.11.5 → 0.11.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/LICENSE +1 -1
  2. package/README.md +15 -100
  3. package/api/files.d.ts +168 -169
  4. package/api/jobs.d.ts +263 -154
  5. package/api/notifications.d.ts +28 -29
  6. package/api/users.d.ts +476 -477
  7. package/batch.d.ts +59 -493
  8. package/bucket.d.ts +20 -11
  9. package/cache/redis.d.ts +1 -1
  10. package/cache.d.ts +1 -1
  11. package/command.d.ts +24 -7
  12. package/core.d.ts +299 -222
  13. package/datetime.d.ts +4 -29
  14. package/devtools.d.ts +166 -284
  15. package/email.d.ts +45 -2
  16. package/fake.d.ts +1 -1
  17. package/file.d.ts +1 -1
  18. package/lock/redis.d.ts +1 -1
  19. package/lock.d.ts +1 -1
  20. package/logger.d.ts +11 -9
  21. package/package.json +51 -51
  22. package/postgres.d.ts +480 -198
  23. package/queue/redis.d.ts +1 -1
  24. package/queue.d.ts +1 -1
  25. package/react/auth.d.ts +7 -2
  26. package/react/form.d.ts +39 -16
  27. package/react/head.d.ts +1 -1
  28. package/react/i18n.d.ts +2 -2
  29. package/react.d.ts +66 -41
  30. package/redis.d.ts +1 -1
  31. package/retry.d.ts +84 -21
  32. package/scheduler.d.ts +1 -1
  33. package/security.d.ts +29 -29
  34. package/server/cache.d.ts +1 -1
  35. package/server/compress.d.ts +9 -3
  36. package/server/cookies.d.ts +1 -1
  37. package/server/cors.d.ts +28 -10
  38. package/server/health.d.ts +18 -19
  39. package/server/helmet.d.ts +44 -15
  40. package/server/links.d.ts +37 -31
  41. package/server/metrics.d.ts +1 -1
  42. package/server/multipart.d.ts +1 -1
  43. package/server/proxy.d.ts +1 -1
  44. package/server/security.d.ts +8 -3
  45. package/server/static.d.ts +1 -1
  46. package/server/swagger.d.ts +19 -6
  47. package/server.d.ts +1 -1
  48. package/topic/redis.d.ts +1 -1
  49. package/topic.d.ts +1 -1
  50. package/ui.d.ts +142 -12
  51. package/vite.d.ts +2 -2
package/batch.d.ts CHANGED
@@ -7,275 +7,8 @@ import { RetryDescriptorOptions } from "alepha/retry";
7
7
  import * as typebox0 from "typebox";
8
8
 
9
9
  //#region src/descriptors/$batch.d.ts
10
-
11
10
  /**
12
11
  * Creates a batch processing descriptor for efficient grouping and processing of multiple operations.
13
- *
14
- * This descriptor provides a powerful batching mechanism that collects multiple individual items
15
- * and processes them together in groups, significantly improving performance by reducing overhead
16
- * and enabling bulk operations. It supports partitioning, concurrent processing, automatic flushing,
17
- * and intelligent retry mechanisms for robust batch processing workflows.
18
- *
19
- * **Key Features**
20
- *
21
- * - **Intelligent Batching**: Groups items based on size and time thresholds
22
- * - **Partitioning Support**: Process different types of items in separate batches
23
- * - **Concurrent Processing**: Handle multiple batches simultaneously with configurable limits
24
- * - **Automatic Flushing**: Time-based and size-based automatic batch execution
25
- * - **Type Safety**: Full TypeScript support with schema validation using TypeBox
26
- * - **Retry Logic**: Built-in retry mechanisms for failed batch operations
27
- * - **Resource Management**: Automatic cleanup and graceful shutdown handling
28
- *
29
- * **Use Cases**
30
- *
31
- * Perfect for optimizing high-throughput operations:
32
- * - Database bulk inserts and updates
33
- * - API call batching and rate limit optimization
34
- * - Log aggregation and bulk shipping
35
- * - File processing and bulk uploads
36
- * - Event processing and analytics ingestion
37
- * - Notification delivery optimization
38
- * - Cache invalidation batching
39
- *
40
- * @example
41
- * **Basic database batch operations:**
42
- * ```ts
43
- * import { $batch } from "alepha/batch";
44
- * import { t } from "alepha";
45
- *
46
- * class UserService {
47
- * userBatch = $batch({
48
- * schema: t.object({
49
- * id: t.text(),
50
- * name: t.text(),
51
- * email: t.text(),
52
- * createdAt: t.optional(t.text())
53
- * }),
54
- * maxSize: 50, // Process up to 50 users at once
55
- * maxDuration: [5, "seconds"], // Or flush every 5 seconds
56
- * handler: async (users) => {
57
- * // Bulk insert users - much faster than individual inserts
58
- * console.log(`Processing batch of ${users.length} users`);
59
- *
60
- * const result = await this.database.users.insertMany(users.map(user => ({
61
- * ...user,
62
- * createdAt: user.createdAt || new Date().toISOString()
63
- * })));
64
- *
65
- * console.log(`Successfully inserted ${result.length} users`);
66
- * return { inserted: result.length, userIds: result.map(r => r.id) };
67
- * }
68
- * });
69
- *
70
- * async createUser(userData: { name: string; email: string }) {
71
- * // Individual calls are automatically batched
72
- * const result = await this.userBatch.push({
73
- * id: generateId(),
74
- * name: userData.name,
75
- * email: userData.email
76
- * });
77
- *
78
- * return result; // Returns the batch result once batch is processed
79
- * }
80
- * }
81
- * ```
82
- *
83
- * @example
84
- * **API call batching with partitioning:**
85
- * ```ts
86
- * class NotificationService {
87
- * notificationBatch = $batch({
88
- * schema: t.object({
89
- * userId: t.text(),
90
- * type: t.enum(["email", "sms", "push"]),
91
- * message: t.text(),
92
- * priority: t.enum(["high", "normal", "low"])
93
- * }),
94
- * maxSize: 100,
95
- * maxDuration: [10, "seconds"],
96
- * // Partition by notification type for different processing
97
- * partitionBy: (notification) => notification.type,
98
- * concurrency: 3, // Process up to 3 different types simultaneously
99
- * handler: async (notifications) => {
100
- * const type = notifications[0].type; // All items in batch have same type
101
- * console.log(`Processing ${notifications.length} ${type} notifications`);
102
- *
103
- * switch (type) {
104
- * case 'email':
105
- * return await this.emailProvider.sendBulk(notifications.map(n => ({
106
- * to: n.userId,
107
- * subject: 'Notification',
108
- * body: n.message,
109
- * priority: n.priority
110
- * })));
111
- *
112
- * case 'sms':
113
- * return await this.smsProvider.sendBulk(notifications.map(n => ({
114
- * to: n.userId,
115
- * message: n.message
116
- * })));
117
- *
118
- * case 'push':
119
- * return await this.pushProvider.sendBulk(notifications.map(n => ({
120
- * userId: n.userId,
121
- * title: 'Notification',
122
- * body: n.message,
123
- * priority: n.priority
124
- * })));
125
- * }
126
- * }
127
- * });
128
- *
129
- * async sendNotification(userId: string, type: 'email' | 'sms' | 'push', message: string, priority: 'high' | 'normal' | 'low' = 'normal') {
130
- * // Notifications are automatically batched by type
131
- * return await this.notificationBatch.push({
132
- * userId,
133
- * type,
134
- * message,
135
- * priority
136
- * });
137
- * }
138
- * }
139
- * ```
140
- *
141
- * @example
142
- * **Log aggregation with retry logic:**
143
- * ```ts
144
- * class LoggingService {
145
- * logBatch = $batch({
146
- * schema: t.object({
147
- * timestamp: t.number(),
148
- * level: t.enum(["info", "warn", "error"]),
149
- * message: t.text(),
150
- * metadata: t.optional(t.record(t.text(), t.any())),
151
- * source: t.text()
152
- * }),
153
- * maxSize: 1000, // Large batches for log efficiency
154
- * maxDuration: [30, "seconds"], // Longer duration for log aggregation
155
- * concurrency: 2, // Limit concurrent log shipments
156
- * retry: {
157
- * maxAttempts: 5,
158
- * delay: [2, "seconds"],
159
- * backoff: "exponential"
160
- * },
161
- * handler: async (logEntries) => {
162
- * console.log(`Shipping ${logEntries.length} log entries`);
163
- *
164
- * try {
165
- * // Ship logs to external service (e.g., Elasticsearch, Splunk)
166
- * const response = await this.logShipper.bulkIndex({
167
- * index: 'application-logs',
168
- * body: logEntries.map(entry => ([
169
- * { index: { _index: 'application-logs' } },
170
- * {
171
- * ...entry,
172
- * '@timestamp': new Date(entry.timestamp).toISOString()
173
- * }
174
- * ])).flat()
175
- * });
176
- *
177
- * if (response.errors) {
178
- * console.error(`Some log entries failed to index`, response.errors);
179
- * // Retry will be triggered by throwing
180
- * throw new Error(`Failed to index ${response.errors.length} log entries`);
181
- * }
182
- *
183
- * console.log(`Successfully shipped ${logEntries.length} log entries`);
184
- * return { shipped: logEntries.length, indexedAt: Date.now() };
185
- *
186
- * } catch (error) {
187
- * console.error(`Failed to ship logs batch`, error);
188
- * throw error; // Trigger retry mechanism
189
- * }
190
- * }
191
- * });
192
- *
193
- * async log(level: 'info' | 'warn' | 'error', message: string, metadata?: Record<string, any>, source: string = 'application') {
194
- * // Individual log calls are batched and shipped efficiently
195
- * return await this.logBatch.push({
196
- * timestamp: Date.now(),
197
- * level,
198
- * message,
199
- * metadata,
200
- * source
201
- * });
202
- * }
203
- * }
204
- * ```
205
- *
206
- * @example
207
- * **File processing with dynamic partitioning:**
208
- * ```ts
209
- * class FileProcessingService {
210
- * fileProcessingBatch = $batch({
211
- * schema: t.object({
212
- * filePath: t.text(),
213
- * fileType: t.enum(["image", "video", "document"]),
214
- * processingOptions: t.object({
215
- * quality: t.optional(t.enum(["low", "medium", "high"])),
216
- * format: t.optional(t.text()),
217
- * compress: t.optional(t.boolean())
218
- * }),
219
- * priority: t.enum(["urgent", "normal", "background"])
220
- * }),
221
- * maxSize: 20, // Smaller batches for file processing
222
- * maxDuration: [2, "minutes"], // Reasonable time for file accumulation
223
- * // Partition by file type and priority for optimal resource usage
224
- * partitionBy: (file) => `${file.fileType}-${file.priority}`,
225
- * concurrency: 4, // Multiple concurrent processing pipelines
226
- * retry: {
227
- * maxAttempts: 3,
228
- * delay: [5, "seconds"]
229
- * },
230
- * handler: async (files) => {
231
- * const fileType = files[0].fileType;
232
- * const priority = files[0].priority;
233
- *
234
- * console.log(`Processing ${files.length} ${fileType} files with ${priority} priority`);
235
- *
236
- * try {
237
- * const results = [];
238
- *
239
- * for (const file of files) {
240
- * const result = await this.processFile(file.filePath, file.fileType, file.processingOptions);
241
- * results.push({
242
- * originalPath: file.filePath,
243
- * processedPath: result.outputPath,
244
- * size: result.size,
245
- * duration: result.processingTime
246
- * });
247
- * }
248
- *
249
- * // Update database with batch results
250
- * await this.updateProcessingStatus(results);
251
- *
252
- * console.log(`Successfully processed ${files.length} ${fileType} files`);
253
- * return {
254
- * processed: files.length,
255
- * fileType,
256
- * priority,
257
- * totalSize: results.reduce((sum, r) => sum + r.size, 0),
258
- * results
259
- * };
260
- *
261
- * } catch (error) {
262
- * console.error(`Batch file processing failed for ${fileType} files`, error);
263
- * throw error;
264
- * }
265
- * }
266
- * });
267
- *
268
- * async processFile(filePath: string, fileType: 'image' | 'video' | 'document', options: any, priority: 'urgent' | 'normal' | 'background' = 'normal') {
269
- * // Files are automatically batched by type and priority
270
- * return await this.fileProcessingBatch.push({
271
- * filePath,
272
- * fileType,
273
- * processingOptions: options,
274
- * priority
275
- * });
276
- * }
277
- * }
278
- * ```
279
12
  */
280
13
  declare const $batch: {
281
14
  <TItem extends TSchema, TResponse>(options: BatchDescriptorOptions<TItem, TResponse>): BatchDescriptor<TItem, TResponse>;
@@ -284,264 +17,90 @@ declare const $batch: {
284
17
  interface BatchDescriptorOptions<TItem extends TSchema, TResponse = any> {
285
18
  /**
286
19
  * TypeBox schema for validating each item added to the batch.
287
- *
288
- * This schema:
289
- * - Validates every item pushed to the batch for data integrity
290
- * - Provides full TypeScript type inference for batch items
291
- * - Ensures type safety between item producers and batch handlers
292
- * - Enables automatic serialization/deserialization if needed
293
- *
294
- * **Schema Design Guidelines**:
295
- * - Keep schemas focused on the data needed for batch processing
296
- * - Use optional fields for data that might not always be present
297
- * - Include identifiers that might be needed for partitioning
298
- * - Consider versioning for schema evolution in long-running systems
299
- *
300
- * @example
301
- * ```ts
302
- * t.object({
303
- * id: t.text(),
304
- * operation: t.enum(["create", "update"]),
305
- * data: t.record(t.text(), t.any()),
306
- * timestamp: t.optional(t.number()),
307
- * priority: t.optional(t.enum(["high", "normal"]))
308
- * })
309
- * ```
310
20
  */
311
21
  schema: TItem;
312
22
  /**
313
23
  * The batch processing handler function that processes arrays of validated items.
314
- *
315
- * This handler:
316
- * - Receives an array of validated items based on the schema
317
- * - Should implement bulk operations for maximum efficiency
318
- * - Can be async and perform any operations (database, API calls, etc.)
319
- * - Should handle errors appropriately (retry logic is provided separately)
320
- * - Has access to the full Alepha dependency injection container
321
- * - Returns results that will be provided to all items in the batch
322
- *
323
- * **Handler Design Guidelines**:
324
- * - Implement true bulk operations rather than loops of individual operations
325
- * - Use transactions when processing related data for consistency
326
- * - Log batch processing progress and results for monitoring
327
- * - Handle partial failures gracefully when possible
328
- * - Consider memory usage for large batch sizes
329
- *
330
- * **Performance Considerations**:
331
- * - Batch operations should be significantly faster than individual operations
332
- * - Use database bulk operations (INSERT, UPDATE, etc.) when available
333
- * - Optimize for the expected batch size and data characteristics
334
- * - Consider connection pooling and resource limits
335
- *
336
- * @param items - Array of validated items to process in this batch
337
- * @returns Result that will be returned to all callers who contributed items to this batch
338
- *
339
- * @example
340
- * ```ts
341
- * handler: async (items) => {
342
- * console.log(`Processing batch of ${items.length} items`);
343
- *
344
- * try {
345
- * // Use bulk operations for maximum efficiency
346
- * const results = await this.database.transaction(async (tx) => {
347
- * const insertResults = await tx.items.insertMany(items);
348
- *
349
- * // Update related data in bulk
350
- * const updates = items.map(item => ({ id: item.id, processed: true }));
351
- * await tx.items.updateMany(updates);
352
- *
353
- * return insertResults;
354
- * });
355
- *
356
- * // Log successful processing
357
- * console.log(`Successfully processed ${items.length} items`);
358
- *
359
- * return {
360
- * processed: items.length,
361
- * results: results.map(r => ({ id: r.id, status: 'success' })),
362
- * timestamp: Date.now()
363
- * };
364
- *
365
- * } catch (error) {
366
- * console.error(`Batch processing failed for ${items.length} items`, error);
367
- * throw error; // Will trigger retry logic if configured
368
- * }
369
- * }
370
- * ```
371
24
  */
372
25
  handler: (items: Static<TItem>[]) => TResponse;
373
26
  /**
374
27
  * Maximum number of items to collect before automatically flushing the batch.
375
- *
376
- * When this threshold is reached, the batch will be processed immediately
377
- * regardless of the time duration. This provides an upper bound on batch size
378
- * and ensures processing doesn't wait indefinitely for more items.
379
- *
380
- * **Size Selection Guidelines**:
381
- * - Database operations: 100-1000 items depending on record size
382
- * - API calls: 10-100 items depending on rate limits and payload size
383
- * - File operations: 10-50 items depending on processing complexity
384
- * - Memory operations: 1000+ items for simple transformations
385
- *
386
- * **Trade-offs**:
387
- * - Larger batches: Better efficiency, higher memory usage, longer latency
388
- * - Smaller batches: Lower latency, less efficiency, more frequent processing
389
- *
390
- * @default 10
391
- *
392
- * @example 50 // Good for database bulk operations
393
- * @example 100 // Good for API batching with rate limits
394
- * @example 1000 // Good for high-throughput log processing
395
28
  */
396
29
  maxSize?: number;
397
30
  /**
398
31
  * Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.
399
- *
400
- * This timer starts when the first item is added to a partition and ensures
401
- * that items don't wait indefinitely for a batch to fill up. It provides
402
- * a maximum latency guarantee for batch processing.
403
- *
404
- * **Duration Selection Guidelines**:
405
- * - Real-time systems: 100ms - 1 second for low latency
406
- * - Background processing: 5 - 30 seconds for higher throughput
407
- * - Bulk operations: 1 - 5 minutes for maximum efficiency
408
- * - Log shipping: 30 seconds - 2 minutes for log aggregation
409
- *
410
- * **Latency Impact**:
411
- * - Shorter durations: Lower latency, potentially smaller batches
412
- * - Longer durations: Higher throughput, potentially better efficiency
413
- *
414
- * @default [1, "second"]
415
- *
416
- * @example [500, "milliseconds"] // Low latency for real-time processing
417
- * @example [10, "seconds"] // Balanced latency and throughput
418
- * @example [2, "minutes"] // High throughput for bulk operations
419
32
  */
420
33
  maxDuration?: DurationLike;
421
34
  /**
422
35
  * Function to determine partition keys for grouping items into separate batches.
423
- *
424
- * Items with the same partition key are batched together, while items with
425
- * different keys are processed in separate batches. This enables:
426
- * - Processing different types of items with different logic
427
- * - Parallel processing of independent item groups
428
- * - Resource optimization based on item characteristics
429
- *
430
- * **Partitioning Strategies**:
431
- * - By type: Group similar operations together
432
- * - By destination: Group items going to the same endpoint
433
- * - By priority: Process high-priority items separately
434
- * - By size/complexity: Group items with similar processing requirements
435
- * - By tenant/user: Process items per customer or tenant
436
- *
437
- * **Partition Key Guidelines**:
438
- * - Use descriptive, consistent naming
439
- * - Keep key cardinality reasonable (avoid too many unique keys)
440
- * - Consider memory impact of multiple active partitions
441
- * - Balance between parallelism and resource usage
442
- *
443
- * If not provided, all items are placed in a single default partition.
444
- *
445
- * @param item - The validated item to determine partition for
446
- * @returns String key identifying the partition this item belongs to
447
- *
448
- * @example
449
- * ```ts
450
- * // Partition by operation type
451
- * partitionBy: (item) => item.operation,
452
- *
453
- * // Partition by priority and type
454
- * partitionBy: (item) => `${item.priority}-${item.type}`,
455
- *
456
- * // Partition by destination service
457
- * partitionBy: (item) => item.targetService,
458
- *
459
- * // Dynamic partitioning based on size
460
- * partitionBy: (item) => {
461
- * const size = JSON.stringify(item).length;
462
- * return size > 1000 ? 'large' : 'small';
463
- * }
464
- * ```
465
36
  */
466
37
  partitionBy?: (item: Static<TItem>) => string;
467
38
  /**
468
39
  * Maximum number of batch handlers that can execute simultaneously.
469
- *
470
- * This controls the level of parallelism for batch processing across
471
- * all partitions. Higher concurrency can improve throughput but may
472
- * increase resource usage and contention.
473
- *
474
- * **Concurrency Considerations**:
475
- * - Database operations: Limit based on connection pool size
476
- * - API calls: Consider rate limits and server capacity
477
- * - CPU-intensive operations: Set to number of CPU cores
478
- * - Memory-intensive operations: Consider available RAM
479
- * - I/O operations: Can be higher than CPU count
480
- *
481
- * **Resource Planning**:
482
- * - Each concurrent handler may use significant memory/connections
483
- * - Monitor resource usage and adjust based on system capacity
484
- * - Consider downstream system limits and capabilities
485
- *
486
- * @default 1
487
- *
488
- * @example 1 // Sequential processing, lowest resource usage
489
- * @example 4 // Moderate parallelism for balanced systems
490
- * @example 10 // High concurrency for I/O-bound operations
491
40
  */
492
41
  concurrency?: number;
493
42
  /**
494
43
  * Retry configuration for failed batch processing operations.
495
- *
496
- * When batch handlers fail, this configuration determines how and when
497
- * to retry the operation. Uses the `@alepha/retry` module for robust
498
- * retry logic with exponential backoff, jitter, and other strategies.
499
- *
500
- * **Retry Strategies**:
501
- * - Exponential backoff: Increasingly longer delays between attempts
502
- * - Fixed delays: Consistent intervals between retries
503
- * - Jitter: Random variation to avoid thundering herd problems
504
- *
505
- * **Failure Scenarios to Consider**:
506
- * - Temporary network issues
507
- * - Database connection problems
508
- * - Rate limiting from external services
509
- * - Resource exhaustion (memory, disk space)
510
- * - Downstream service temporary unavailability
511
- *
512
- * **Retry Guidelines**:
513
- * - Use exponential backoff for network-related failures
514
- * - Set reasonable max attempts to avoid infinite loops
515
- * - Consider the impact of retries on overall system performance
516
- * - Monitor retry patterns to identify systemic issues
517
- *
518
- * @example
519
- * ```ts
520
- * retry: {
521
- * maxAttempts: 3,
522
- * delay: [1, "second"],
523
- * backoff: "exponential",
524
- * maxDelay: [30, "seconds"],
525
- * jitter: true
526
- * }
527
- * ```
528
44
  */
529
45
  retry?: Omit<RetryDescriptorOptions<() => Array<Static<TItem>>>, "handler">;
530
46
  }
47
+ type BatchItemStatus = "pending" | "processing" | "completed" | "failed";
48
+ interface BatchItemState<TItem, TResponse> {
49
+ id: string;
50
+ item: TItem;
51
+ partitionKey: string;
52
+ status: BatchItemStatus;
53
+ result?: TResponse;
54
+ error?: Error;
55
+ promise?: Promise<TResponse>;
56
+ resolve?: (value: TResponse) => void;
57
+ reject?: (error: Error) => void;
58
+ }
59
+ interface PartitionState {
60
+ itemIds: string[];
61
+ timeout?: {
62
+ clear: () => void;
63
+ };
64
+ flushing: boolean;
65
+ }
531
66
  declare class BatchDescriptor<TItem extends TSchema, TResponse = any> extends Descriptor<BatchDescriptorOptions<TItem, TResponse>> {
532
67
  protected readonly log: _alepha_logger0.Logger;
533
68
  protected readonly dateTime: DateTimeProvider;
534
- protected readonly partitions: Map<any, any>;
69
+ protected readonly itemStates: Map<string, BatchItemState<typebox0.StaticType<[], "Decode", {}, {}, TItem>, TResponse>>;
70
+ protected readonly partitions: Map<string, PartitionState>;
535
71
  protected activeHandlers: PromiseWithResolvers<void>[];
72
+ protected isShuttingDown: boolean;
536
73
  protected get maxSize(): number;
537
74
  protected get concurrency(): number;
538
75
  protected get maxDuration(): DurationLike;
539
76
  protected retry: _alepha_retry0.RetryDescriptorFn<(items: typebox0.StaticType<[], "Decode", {}, {}, TItem>[]) => TResponse>;
540
77
  /**
541
- * Pushes an item into the batch. The item will be processed
542
- * asynchronously with other items when the batch is flushed.
78
+ * Pushes an item into the batch and returns immediately with a unique ID.
79
+ * The item will be processed asynchronously with other items when the batch is flushed.
80
+ * Use wait(id) to get the processing result.
81
+ */
82
+ push(item: Static<TItem>): Promise<string>;
83
+ /**
84
+ * Wait for a specific item to be processed and get its result.
85
+ * @param id The item ID returned from push()
86
+ * @returns The processing result
87
+ * @throws If the item doesn't exist or processing failed
543
88
  */
544
- push(item: Static<TItem>): Promise<TResponse>;
89
+ wait(id: string): Promise<TResponse>;
90
+ /**
91
+ * Get the current status of an item.
92
+ * @param id The item ID returned from push()
93
+ * @returns Status information or undefined if item doesn't exist
94
+ */
95
+ status(id: string): {
96
+ status: "pending" | "processing";
97
+ } | {
98
+ status: "completed";
99
+ result: TResponse;
100
+ } | {
101
+ status: "failed";
102
+ error: Error;
103
+ } | undefined;
545
104
  flush(partitionKey?: string): Promise<void>;
546
105
  protected flushPartition(partitionKey: string): Promise<void>;
547
106
  protected readonly dispose: _alepha_core1.HookDescriptor<"stop">;
@@ -571,9 +130,16 @@ declare class BatchDescriptor<TItem extends TSchema, TResponse = any> extends De
571
130
  * onReady = $hook({
572
131
  * on: "ready",
573
132
  * handler: async () => {
574
- * this.logBatch.push("Application started.");
575
- * this.logBatch.push("User authenticated.");
576
- * // ... more events pushed from elsewhere in the app
133
+ * // push() returns an ID immediately
134
+ * const id1 = await this.logBatch.push("Application started.");
135
+ * const id2 = await this.logBatch.push("User authenticated.");
136
+ *
137
+ * // optionally wait for processing to complete
138
+ * await this.logBatch.wait(id1);
139
+ *
140
+ * // or check the status
141
+ * const status = this.logBatch.status(id2);
142
+ * console.log(status?.status); // "pending" | "processing" | "completed" | "failed"
577
143
  * },
578
144
  * });
579
145
  * }
@@ -582,7 +148,7 @@ declare class BatchDescriptor<TItem extends TSchema, TResponse = any> extends De
582
148
  * @see {@link $batch}
583
149
  * @module alepha.batch
584
150
  */
585
- declare const AlephaBatch: _alepha_core1.Service<_alepha_core1.Module<{}>>;
151
+ declare const AlephaBatch: _alepha_core1.Service<_alepha_core1.Module>;
586
152
  //#endregion
587
- export { $batch, AlephaBatch, BatchDescriptor, BatchDescriptorOptions };
153
+ export { $batch, AlephaBatch, BatchDescriptor, BatchDescriptorOptions, BatchItemState, BatchItemStatus };
588
154
  //# sourceMappingURL=index.d.ts.map
package/bucket.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import * as _alepha_core1 from "alepha";
2
- import { Alepha, AlephaError, Descriptor, FileLike, KIND, Service } from "alepha";
2
+ import { Alepha, AlephaError, Descriptor, FileLike, KIND, Service, Static } from "alepha";
3
3
  import { FileSystem } from "alepha/file";
4
4
  import * as fs from "node:fs";
5
5
  import * as _alepha_logger0 from "alepha/logger";
@@ -448,15 +448,29 @@ declare class FileMetadataService {
448
448
  }
449
449
  //#endregion
450
450
  //#region src/providers/LocalFileStorageProvider.d.ts
451
+ /**
452
+ * Local file storage configuration atom
453
+ */
454
+ declare const localFileStorageOptions: _alepha_core1.Atom<typebox0.TObject<{
455
+ storagePath: typebox0.TString;
456
+ }>, "alepha.bucket.local.options">;
457
+ type LocalFileStorageProviderOptions = Static<typeof localFileStorageOptions.schema>;
458
+ declare module "alepha" {
459
+ interface State {
460
+ [localFileStorageOptions.key]: LocalFileStorageProviderOptions;
461
+ }
462
+ }
451
463
  declare class LocalFileStorageProvider implements FileStorageProvider {
452
464
  protected readonly alepha: Alepha;
453
465
  protected readonly log: _alepha_logger0.Logger;
454
466
  protected readonly metadataService: FileMetadataService;
455
467
  protected readonly fileSystem: FileSystem;
456
- options: {
468
+ protected readonly options: Readonly<{
457
469
  storagePath: string;
458
- };
459
- protected readonly configure: _alepha_core1.HookDescriptor<"start">;
470
+ }>;
471
+ protected get storagePath(): string;
472
+ protected readonly onConfigure: _alepha_core1.HookDescriptor<"configure">;
473
+ protected readonly onStart: _alepha_core1.HookDescriptor<"start">;
460
474
  upload(bucketName: string, file: FileLike, fileId?: string): Promise<string>;
461
475
  download(bucketName: string, fileId: string): Promise<FileLike>;
462
476
  exists(bucketName: string, fileId: string): Promise<boolean>;
@@ -466,11 +480,6 @@ declare class LocalFileStorageProvider implements FileStorageProvider {
466
480
  protected path(bucket: string, fileId?: string): string;
467
481
  protected isErrorNoEntry(error: unknown): boolean;
468
482
  }
469
- declare const fileMetadataSchema: typebox0.TObject<{
470
- name: typebox0.TString;
471
- type: typebox0.TString;
472
- size: typebox0.TNumber;
473
- }>;
474
483
  //#endregion
475
484
  //#region src/index.d.ts
476
485
  declare module "alepha" {
@@ -505,7 +514,7 @@ declare module "alepha" {
505
514
  * @see {@link FileStorageProvider}
506
515
  * @module alepha.bucket
507
516
  */
508
- declare const AlephaBucket: _alepha_core1.Service<_alepha_core1.Module<{}>>;
517
+ declare const AlephaBucket: _alepha_core1.Service<_alepha_core1.Module>;
509
518
  //#endregion
510
- export { $bucket, AlephaBucket, BucketDescriptor, BucketDescriptorOptions, BucketFileOptions, FileMetadata, FileMetadataService, FileNotFoundError, FileStorageProvider, LocalFileStorageProvider, MemoryFileStorageProvider, fileMetadataSchema };
519
+ export { $bucket, AlephaBucket, BucketDescriptor, BucketDescriptorOptions, BucketFileOptions, FileMetadata, FileMetadataService, FileNotFoundError, FileStorageProvider, LocalFileStorageProvider, LocalFileStorageProviderOptions, MemoryFileStorageProvider, localFileStorageOptions };
511
520
  //# sourceMappingURL=index.d.ts.map
package/cache/redis.d.ts CHANGED
@@ -34,7 +34,7 @@ declare class RedisCacheProvider implements CacheProvider {
34
34
  * @see {@link RedisCacheProvider}
35
35
  * @module alepha.cache.redis
36
36
  */
37
- declare const AlephaCacheRedis: _alepha_core1.Service<_alepha_core1.Module<{}>>;
37
+ declare const AlephaCacheRedis: _alepha_core1.Service<_alepha_core1.Module>;
38
38
  //#endregion
39
39
  export { AlephaCacheRedis, RedisCacheProvider };
40
40
  //# sourceMappingURL=index.d.ts.map