alepha 0.11.4 → 0.11.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/api/files.d.ts +439 -1
- package/api/jobs.d.ts +290 -1
- package/api/notifications.d.ts +264 -1
- package/api/users.d.ts +924 -1
- package/batch.d.ts +154 -1
- package/bucket.d.ts +520 -1
- package/cache/redis.d.ts +40 -1
- package/cache.d.ts +288 -1
- package/command.d.ts +269 -1
- package/core.d.ts +1877 -1
- package/datetime.d.ts +1 -1
- package/devtools.d.ts +408 -1
- package/email.d.ts +187 -1
- package/fake.d.ts +73 -1
- package/file.d.ts +528 -1
- package/lock/redis.d.ts +24 -1
- package/lock.d.ts +552 -1
- package/logger.d.ts +288 -1
- package/package.json +51 -51
- package/postgres.d.ts +2146 -1
- package/queue/redis.d.ts +29 -1
- package/queue.d.ts +760 -1
- package/react/auth.d.ts +504 -1
- package/react/form.d.ts +208 -1
- package/react/head.d.ts +120 -1
- package/react/i18n.d.ts +168 -1
- package/react.d.ts +1261 -1
- package/redis.d.ts +82 -1
- package/retry.d.ts +84 -21
- package/scheduler.d.ts +145 -1
- package/security.d.ts +586 -1
- package/server/cache.d.ts +163 -1
- package/server/compress.d.ts +38 -1
- package/server/cookies.d.ts +144 -1
- package/server/cors.d.ts +45 -1
- package/server/health.d.ts +59 -1
- package/server/helmet.d.ts +98 -1
- package/server/links.d.ts +322 -1
- package/server/metrics.d.ts +35 -1
- package/server/multipart.d.ts +42 -1
- package/server/proxy.d.ts +234 -1
- package/server/security.d.ts +92 -1
- package/server/static.d.ts +119 -1
- package/server/swagger.d.ts +161 -1
- package/server.d.ts +849 -1
- package/topic/redis.d.ts +42 -1
- package/topic.d.ts +819 -1
- package/ui.d.ts +786 -1
- package/vite.d.ts +186 -1
package/batch.d.ts
CHANGED
|
@@ -1 +1,154 @@
|
|
|
1
|
-
|
|
1
|
+
import * as _alepha_core1 from "alepha";
|
|
2
|
+
import { Descriptor, KIND, Static, TSchema } from "alepha";
|
|
3
|
+
import { DateTimeProvider, DurationLike } from "alepha/datetime";
|
|
4
|
+
import * as _alepha_logger0 from "alepha/logger";
|
|
5
|
+
import * as _alepha_retry0 from "alepha/retry";
|
|
6
|
+
import { RetryDescriptorOptions } from "alepha/retry";
|
|
7
|
+
import * as typebox0 from "typebox";
|
|
8
|
+
|
|
9
|
+
//#region src/descriptors/$batch.d.ts
|
|
10
|
+
/**
|
|
11
|
+
* Creates a batch processing descriptor for efficient grouping and processing of multiple operations.
|
|
12
|
+
*/
|
|
13
|
+
declare const $batch: {
|
|
14
|
+
<TItem extends TSchema, TResponse>(options: BatchDescriptorOptions<TItem, TResponse>): BatchDescriptor<TItem, TResponse>;
|
|
15
|
+
[KIND]: typeof BatchDescriptor;
|
|
16
|
+
};
|
|
17
|
+
interface BatchDescriptorOptions<TItem extends TSchema, TResponse = any> {
|
|
18
|
+
/**
|
|
19
|
+
* TypeBox schema for validating each item added to the batch.
|
|
20
|
+
*/
|
|
21
|
+
schema: TItem;
|
|
22
|
+
/**
|
|
23
|
+
* The batch processing handler function that processes arrays of validated items.
|
|
24
|
+
*/
|
|
25
|
+
handler: (items: Static<TItem>[]) => TResponse;
|
|
26
|
+
/**
|
|
27
|
+
* Maximum number of items to collect before automatically flushing the batch.
|
|
28
|
+
*/
|
|
29
|
+
maxSize?: number;
|
|
30
|
+
/**
|
|
31
|
+
* Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.
|
|
32
|
+
*/
|
|
33
|
+
maxDuration?: DurationLike;
|
|
34
|
+
/**
|
|
35
|
+
* Function to determine partition keys for grouping items into separate batches.
|
|
36
|
+
*/
|
|
37
|
+
partitionBy?: (item: Static<TItem>) => string;
|
|
38
|
+
/**
|
|
39
|
+
* Maximum number of batch handlers that can execute simultaneously.
|
|
40
|
+
*/
|
|
41
|
+
concurrency?: number;
|
|
42
|
+
/**
|
|
43
|
+
* Retry configuration for failed batch processing operations.
|
|
44
|
+
*/
|
|
45
|
+
retry?: Omit<RetryDescriptorOptions<() => Array<Static<TItem>>>, "handler">;
|
|
46
|
+
}
|
|
47
|
+
type BatchItemStatus = "pending" | "processing" | "completed" | "failed";
|
|
48
|
+
interface BatchItemState<TItem, TResponse> {
|
|
49
|
+
id: string;
|
|
50
|
+
item: TItem;
|
|
51
|
+
partitionKey: string;
|
|
52
|
+
status: BatchItemStatus;
|
|
53
|
+
result?: TResponse;
|
|
54
|
+
error?: Error;
|
|
55
|
+
promise?: Promise<TResponse>;
|
|
56
|
+
resolve?: (value: TResponse) => void;
|
|
57
|
+
reject?: (error: Error) => void;
|
|
58
|
+
}
|
|
59
|
+
interface PartitionState {
|
|
60
|
+
itemIds: string[];
|
|
61
|
+
timeout?: {
|
|
62
|
+
clear: () => void;
|
|
63
|
+
};
|
|
64
|
+
flushing: boolean;
|
|
65
|
+
}
|
|
66
|
+
declare class BatchDescriptor<TItem extends TSchema, TResponse = any> extends Descriptor<BatchDescriptorOptions<TItem, TResponse>> {
|
|
67
|
+
protected readonly log: _alepha_logger0.Logger;
|
|
68
|
+
protected readonly dateTime: DateTimeProvider;
|
|
69
|
+
protected readonly itemStates: Map<string, BatchItemState<typebox0.StaticType<[], "Decode", {}, {}, TItem>, TResponse>>;
|
|
70
|
+
protected readonly partitions: Map<string, PartitionState>;
|
|
71
|
+
protected activeHandlers: PromiseWithResolvers<void>[];
|
|
72
|
+
protected isShuttingDown: boolean;
|
|
73
|
+
protected get maxSize(): number;
|
|
74
|
+
protected get concurrency(): number;
|
|
75
|
+
protected get maxDuration(): DurationLike;
|
|
76
|
+
protected retry: _alepha_retry0.RetryDescriptorFn<(items: typebox0.StaticType<[], "Decode", {}, {}, TItem>[]) => TResponse>;
|
|
77
|
+
/**
|
|
78
|
+
* Pushes an item into the batch and returns immediately with a unique ID.
|
|
79
|
+
* The item will be processed asynchronously with other items when the batch is flushed.
|
|
80
|
+
* Use wait(id) to get the processing result.
|
|
81
|
+
*/
|
|
82
|
+
push(item: Static<TItem>): Promise<string>;
|
|
83
|
+
/**
|
|
84
|
+
* Wait for a specific item to be processed and get its result.
|
|
85
|
+
* @param id The item ID returned from push()
|
|
86
|
+
* @returns The processing result
|
|
87
|
+
* @throws If the item doesn't exist or processing failed
|
|
88
|
+
*/
|
|
89
|
+
wait(id: string): Promise<TResponse>;
|
|
90
|
+
/**
|
|
91
|
+
* Get the current status of an item.
|
|
92
|
+
* @param id The item ID returned from push()
|
|
93
|
+
* @returns Status information or undefined if item doesn't exist
|
|
94
|
+
*/
|
|
95
|
+
status(id: string): {
|
|
96
|
+
status: "pending" | "processing";
|
|
97
|
+
} | {
|
|
98
|
+
status: "completed";
|
|
99
|
+
result: TResponse;
|
|
100
|
+
} | {
|
|
101
|
+
status: "failed";
|
|
102
|
+
error: Error;
|
|
103
|
+
} | undefined;
|
|
104
|
+
flush(partitionKey?: string): Promise<void>;
|
|
105
|
+
protected flushPartition(partitionKey: string): Promise<void>;
|
|
106
|
+
protected readonly dispose: _alepha_core1.HookDescriptor<"stop">;
|
|
107
|
+
}
|
|
108
|
+
//#endregion
|
|
109
|
+
//#region src/index.d.ts
|
|
110
|
+
/**
|
|
111
|
+
* This module allows you to group multiple asynchronous operations into a single "batch," which is then processed together.
|
|
112
|
+
* This is an essential pattern for improving performance, reducing I/O, and interacting efficiently with rate-limited APIs or databases.
|
|
113
|
+
*
|
|
114
|
+
* ```ts
|
|
115
|
+
* import { Alepha, $hook, run, t } from "alepha";
|
|
116
|
+
* import { $batch } from "alepha/batch";
|
|
117
|
+
*
|
|
118
|
+
* class LoggingService {
|
|
119
|
+
* // define the batch processor
|
|
120
|
+
* logBatch = $batch({
|
|
121
|
+
* schema: t.text(),
|
|
122
|
+
* maxSize: 10,
|
|
123
|
+
* maxDuration: [5, "seconds"],
|
|
124
|
+
* handler: async (items) => {
|
|
125
|
+
* console.log(`[BATCH LOG] Processing ${items.length} events:`, items);
|
|
126
|
+
* },
|
|
127
|
+
* });
|
|
128
|
+
*
|
|
129
|
+
* // example of how to use it
|
|
130
|
+
* onReady = $hook({
|
|
131
|
+
* on: "ready",
|
|
132
|
+
* handler: async () => {
|
|
133
|
+
* // push() returns an ID immediately
|
|
134
|
+
* const id1 = await this.logBatch.push("Application started.");
|
|
135
|
+
* const id2 = await this.logBatch.push("User authenticated.");
|
|
136
|
+
*
|
|
137
|
+
* // optionally wait for processing to complete
|
|
138
|
+
* await this.logBatch.wait(id1);
|
|
139
|
+
*
|
|
140
|
+
* // or check the status
|
|
141
|
+
* const status = this.logBatch.status(id2);
|
|
142
|
+
* console.log(status?.status); // "pending" | "processing" | "completed" | "failed"
|
|
143
|
+
* },
|
|
144
|
+
* });
|
|
145
|
+
* }
|
|
146
|
+
* ```
|
|
147
|
+
*
|
|
148
|
+
* @see {@link $batch}
|
|
149
|
+
* @module alepha.batch
|
|
150
|
+
*/
|
|
151
|
+
declare const AlephaBatch: _alepha_core1.Service<_alepha_core1.Module>;
|
|
152
|
+
//#endregion
|
|
153
|
+
export { $batch, AlephaBatch, BatchDescriptor, BatchDescriptorOptions, BatchItemState, BatchItemStatus };
|
|
154
|
+
//# sourceMappingURL=index.d.ts.map
|
package/bucket.d.ts
CHANGED
|
@@ -1 +1,520 @@
|
|
|
1
|
-
|
|
1
|
+
import * as _alepha_core1 from "alepha";
|
|
2
|
+
import { Alepha, AlephaError, Descriptor, FileLike, KIND, Service, Static } from "alepha";
|
|
3
|
+
import { FileSystem } from "alepha/file";
|
|
4
|
+
import * as fs from "node:fs";
|
|
5
|
+
import * as _alepha_logger0 from "alepha/logger";
|
|
6
|
+
import * as typebox0 from "typebox";
|
|
7
|
+
|
|
8
|
+
//#region src/providers/FileStorageProvider.d.ts
|
|
9
|
+
declare abstract class FileStorageProvider {
|
|
10
|
+
/**
|
|
11
|
+
* Uploads a file to the storage.
|
|
12
|
+
*
|
|
13
|
+
* @param bucketName - Container name
|
|
14
|
+
* @param file - File to upload
|
|
15
|
+
* @param fileId - Optional file identifier. If not provided, a unique ID will be generated.
|
|
16
|
+
* @return The identifier of the uploaded file.
|
|
17
|
+
*/
|
|
18
|
+
abstract upload(bucketName: string, file: FileLike, fileId?: string): Promise<string>;
|
|
19
|
+
/**
|
|
20
|
+
* Downloads a file from the storage.
|
|
21
|
+
*
|
|
22
|
+
* @param bucketName - Container name
|
|
23
|
+
* @param fileId - Identifier of the file to download
|
|
24
|
+
* @return The downloaded file as a FileLike object.
|
|
25
|
+
*/
|
|
26
|
+
abstract download(bucketName: string, fileId: string): Promise<FileLike>;
|
|
27
|
+
/**
|
|
28
|
+
* Check if fileId exists in the storage bucket.
|
|
29
|
+
*
|
|
30
|
+
* @param bucketName - Container name
|
|
31
|
+
* @param fileId - Identifier of the file to stream
|
|
32
|
+
* @return True is the file exists, false otherwise.
|
|
33
|
+
*/
|
|
34
|
+
abstract exists(bucketName: string, fileId: string): Promise<boolean>;
|
|
35
|
+
/**
|
|
36
|
+
* Delete permanently a file from the storage.
|
|
37
|
+
*
|
|
38
|
+
* @param bucketName - Container name
|
|
39
|
+
* @param fileId - Identifier of the file to delete
|
|
40
|
+
*/
|
|
41
|
+
abstract delete(bucketName: string, fileId: string): Promise<void>;
|
|
42
|
+
}
|
|
43
|
+
//#endregion
|
|
44
|
+
//#region src/providers/MemoryFileStorageProvider.d.ts
|
|
45
|
+
declare class MemoryFileStorageProvider implements FileStorageProvider {
|
|
46
|
+
readonly files: Record<string, FileLike>;
|
|
47
|
+
private readonly fileSystem;
|
|
48
|
+
upload(bucketName: string, file: FileLike, fileId?: string): Promise<string>;
|
|
49
|
+
download(bucketName: string, fileId: string): Promise<FileLike>;
|
|
50
|
+
exists(bucketName: string, fileId: string): Promise<boolean>;
|
|
51
|
+
delete(bucketName: string, fileId: string): Promise<void>;
|
|
52
|
+
protected createId(): string;
|
|
53
|
+
}
|
|
54
|
+
//#endregion
|
|
55
|
+
//#region src/descriptors/$bucket.d.ts
|
|
56
|
+
/**
|
|
57
|
+
* Creates a bucket descriptor for file storage and management with configurable validation.
|
|
58
|
+
*
|
|
59
|
+
* This descriptor provides a comprehensive file storage system that handles file uploads,
|
|
60
|
+
* downloads, validation, and management across multiple storage backends. It supports
|
|
61
|
+
* MIME type validation, size limits, and integrates seamlessly with various storage
|
|
62
|
+
* providers for scalable file management in applications.
|
|
63
|
+
*
|
|
64
|
+
* **Key Features**
|
|
65
|
+
*
|
|
66
|
+
* - **Multi-Provider Support**: Works with filesystem, cloud storage (S3, Azure), and in-memory providers
|
|
67
|
+
* - **File Validation**: Automatic MIME type checking and file size validation
|
|
68
|
+
* - **Type Safety**: Full TypeScript support with FileLike interface compatibility
|
|
69
|
+
* - **Event Integration**: Emits events for file operations (upload, delete) for monitoring
|
|
70
|
+
* - **Flexible Configuration**: Per-bucket and per-operation configuration options
|
|
71
|
+
* - **Automatic Detection**: Smart file type and size detection with fallback mechanisms
|
|
72
|
+
* - **Error Handling**: Comprehensive error handling with descriptive error messages
|
|
73
|
+
*
|
|
74
|
+
* **Use Cases**
|
|
75
|
+
*
|
|
76
|
+
* Perfect for handling file storage requirements across applications:
|
|
77
|
+
* - User profile picture and document uploads
|
|
78
|
+
* - Product image and media management
|
|
79
|
+
* - Document storage and retrieval systems
|
|
80
|
+
* - Temporary file handling and processing
|
|
81
|
+
* - Content delivery and asset management
|
|
82
|
+
* - Backup and archival storage
|
|
83
|
+
* - File-based data import/export workflows
|
|
84
|
+
*
|
|
85
|
+
* @example
|
|
86
|
+
* **Basic file upload bucket:**
|
|
87
|
+
* ```ts
|
|
88
|
+
* import { $bucket } from "alepha/bucket";
|
|
89
|
+
*
|
|
90
|
+
* class MediaService {
|
|
91
|
+
* images = $bucket({
|
|
92
|
+
* name: "user-images",
|
|
93
|
+
* description: "User uploaded profile images and photos",
|
|
94
|
+
* mimeTypes: ["image/jpeg", "image/png", "image/gif", "image/webp"],
|
|
95
|
+
* maxSize: 5 // 5MB limit
|
|
96
|
+
* });
|
|
97
|
+
*
|
|
98
|
+
* async uploadProfileImage(file: FileLike, userId: string): Promise<string> {
|
|
99
|
+
* // File is automatically validated against MIME types and size
|
|
100
|
+
* const fileId = await this.images.upload(file);
|
|
101
|
+
*
|
|
102
|
+
* // Update user profile with new image
|
|
103
|
+
* await this.userService.updateProfileImage(userId, fileId);
|
|
104
|
+
*
|
|
105
|
+
* return fileId;
|
|
106
|
+
* }
|
|
107
|
+
*
|
|
108
|
+
* async getUserProfileImage(userId: string): Promise<FileLike> {
|
|
109
|
+
* const user = await this.userService.getUser(userId);
|
|
110
|
+
* if (!user.profileImageId) {
|
|
111
|
+
* throw new Error('User has no profile image');
|
|
112
|
+
* }
|
|
113
|
+
*
|
|
114
|
+
* return await this.images.download(user.profileImageId);
|
|
115
|
+
* }
|
|
116
|
+
* }
|
|
117
|
+
* ```
|
|
118
|
+
*
|
|
119
|
+
* @example
|
|
120
|
+
* **Document storage with multiple file types:**
|
|
121
|
+
* ```ts
|
|
122
|
+
* class DocumentManager {
|
|
123
|
+
* documents = $bucket({
|
|
124
|
+
* name: "company-documents",
|
|
125
|
+
* description: "Legal documents, contracts, and reports",
|
|
126
|
+
* mimeTypes: [
|
|
127
|
+
* "application/pdf",
|
|
128
|
+
* "application/msword",
|
|
129
|
+
* "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
130
|
+
* "text/plain",
|
|
131
|
+
* "text/csv"
|
|
132
|
+
* ],
|
|
133
|
+
* maxSize: 50 // 50MB for large documents
|
|
134
|
+
* });
|
|
135
|
+
*
|
|
136
|
+
* async uploadDocument(file: FileLike, metadata: { title: string; category: string; userId: string }): Promise<string> {
|
|
137
|
+
* try {
|
|
138
|
+
* const fileId = await this.documents.upload(file);
|
|
139
|
+
*
|
|
140
|
+
* // Store document metadata in database
|
|
141
|
+
* await this.database.documents.create({
|
|
142
|
+
* id: fileId,
|
|
143
|
+
* title: metadata.title,
|
|
144
|
+
* category: metadata.category,
|
|
145
|
+
* uploadedBy: metadata.userId,
|
|
146
|
+
* fileName: file.name,
|
|
147
|
+
* fileSize: file.size,
|
|
148
|
+
* mimeType: file.type,
|
|
149
|
+
* uploadedAt: new Date()
|
|
150
|
+
* });
|
|
151
|
+
*
|
|
152
|
+
* console.log(`Document uploaded successfully: ${metadata.title} (${fileId})`);
|
|
153
|
+
* return fileId;
|
|
154
|
+
*
|
|
155
|
+
* } catch (error) {
|
|
156
|
+
* console.error(`Failed to upload document: ${metadata.title}`, error);
|
|
157
|
+
* throw error;
|
|
158
|
+
* }
|
|
159
|
+
* }
|
|
160
|
+
*
|
|
161
|
+
* async downloadDocument(documentId: string, userId: string): Promise<FileLike> {
|
|
162
|
+
* // Check permissions
|
|
163
|
+
* const document = await this.database.documents.findById(documentId);
|
|
164
|
+
* if (!document) {
|
|
165
|
+
* throw new Error('Document not found');
|
|
166
|
+
* }
|
|
167
|
+
*
|
|
168
|
+
* const hasAccess = await this.permissionService.canAccessDocument(userId, documentId);
|
|
169
|
+
* if (!hasAccess) {
|
|
170
|
+
* throw new Error('Insufficient permissions to access document');
|
|
171
|
+
* }
|
|
172
|
+
*
|
|
173
|
+
* // Download and return file
|
|
174
|
+
* return await this.documents.download(documentId);
|
|
175
|
+
* }
|
|
176
|
+
*
|
|
177
|
+
* async deleteDocument(documentId: string, userId: string): Promise<void> {
|
|
178
|
+
* // Verify ownership or admin privileges
|
|
179
|
+
* const document = await this.database.documents.findById(documentId);
|
|
180
|
+
* if (document.uploadedBy !== userId && !await this.userService.isAdmin(userId)) {
|
|
181
|
+
* throw new Error('Cannot delete document: insufficient permissions');
|
|
182
|
+
* }
|
|
183
|
+
*
|
|
184
|
+
* // Delete from storage and database
|
|
185
|
+
* await this.documents.delete(documentId);
|
|
186
|
+
* await this.database.documents.delete(documentId);
|
|
187
|
+
*
|
|
188
|
+
* console.log(`Document deleted: ${document.title} (${documentId})`);
|
|
189
|
+
* }
|
|
190
|
+
* }
|
|
191
|
+
* ```
|
|
192
|
+
*/
|
|
193
|
+
declare const $bucket: {
|
|
194
|
+
(options: BucketDescriptorOptions): BucketDescriptor;
|
|
195
|
+
[KIND]: typeof BucketDescriptor;
|
|
196
|
+
};
|
|
197
|
+
interface BucketDescriptorOptions extends BucketFileOptions {
|
|
198
|
+
/**
|
|
199
|
+
* File storage provider configuration for the bucket.
|
|
200
|
+
*
|
|
201
|
+
* Options:
|
|
202
|
+
* - **"memory"**: In-memory storage (default for development, lost on restart)
|
|
203
|
+
* - **Service<FileStorageProvider>**: Custom provider class (e.g., S3FileStorageProvider, AzureBlobProvider)
|
|
204
|
+
* - **undefined**: Uses the default file storage provider from dependency injection
|
|
205
|
+
*
|
|
206
|
+
* **Provider Selection Guidelines**:
|
|
207
|
+
* - **Development**: Use "memory" for fast, simple testing without external dependencies
|
|
208
|
+
* - **Production**: Use cloud providers (S3, Azure Blob, Google Cloud Storage) for scalability
|
|
209
|
+
* - **Local deployment**: Use filesystem providers for on-premise installations
|
|
210
|
+
* - **Hybrid**: Use different providers for different bucket types (temp files vs permanent storage)
|
|
211
|
+
*
|
|
212
|
+
* **Provider Capabilities**:
|
|
213
|
+
* - File persistence and durability guarantees
|
|
214
|
+
* - Scalability and performance characteristics
|
|
215
|
+
* - Geographic distribution and CDN integration
|
|
216
|
+
* - Cost implications for storage and bandwidth
|
|
217
|
+
* - Backup and disaster recovery features
|
|
218
|
+
*
|
|
219
|
+
* @default Uses injected FileStorageProvider
|
|
220
|
+
* @example "memory"
|
|
221
|
+
* @example S3FileStorageProvider
|
|
222
|
+
* @example AzureBlobStorageProvider
|
|
223
|
+
*/
|
|
224
|
+
provider?: Service<FileStorageProvider> | "memory";
|
|
225
|
+
/**
|
|
226
|
+
* Unique name identifier for the bucket.
|
|
227
|
+
*
|
|
228
|
+
* This name is used for:
|
|
229
|
+
* - Storage backend organization and partitioning
|
|
230
|
+
* - File path generation and URL construction
|
|
231
|
+
* - Logging, monitoring, and debugging
|
|
232
|
+
* - Access control and permissions management
|
|
233
|
+
* - Backup and replication configuration
|
|
234
|
+
*
|
|
235
|
+
* **Naming Conventions**:
|
|
236
|
+
* - Use lowercase with hyphens for consistency
|
|
237
|
+
* - Include purpose or content type in the name
|
|
238
|
+
* - Avoid spaces and special characters
|
|
239
|
+
* - Consider environment prefixes for deployment isolation
|
|
240
|
+
*
|
|
241
|
+
* If not provided, defaults to the property key where the bucket is declared.
|
|
242
|
+
*
|
|
243
|
+
* @example "user-avatars"
|
|
244
|
+
* @example "product-images"
|
|
245
|
+
* @example "legal-documents"
|
|
246
|
+
* @example "temp-processing-files"
|
|
247
|
+
*/
|
|
248
|
+
name?: string;
|
|
249
|
+
}
|
|
250
|
+
interface BucketFileOptions {
|
|
251
|
+
/**
|
|
252
|
+
* Human-readable description of the bucket's purpose and contents.
|
|
253
|
+
*
|
|
254
|
+
* Used for:
|
|
255
|
+
* - Documentation generation and API references
|
|
256
|
+
* - Developer onboarding and system understanding
|
|
257
|
+
* - Monitoring dashboards and admin interfaces
|
|
258
|
+
* - Compliance and audit documentation
|
|
259
|
+
*
|
|
260
|
+
* **Description Best Practices**:
|
|
261
|
+
* - Explain what types of files this bucket stores
|
|
262
|
+
* - Mention any special handling or processing requirements
|
|
263
|
+
* - Include information about retention policies if applicable
|
|
264
|
+
* - Note any compliance or security considerations
|
|
265
|
+
*
|
|
266
|
+
* @example "User profile pictures and avatar images"
|
|
267
|
+
* @example "Product catalog images with automated thumbnail generation"
|
|
268
|
+
* @example "Legal documents requiring long-term retention"
|
|
269
|
+
* @example "Temporary files for data processing workflows"
|
|
270
|
+
*/
|
|
271
|
+
description?: string;
|
|
272
|
+
/**
|
|
273
|
+
* Array of allowed MIME types for files uploaded to this bucket.
|
|
274
|
+
*
|
|
275
|
+
* When specified, only files with these exact MIME types will be accepted.
|
|
276
|
+
* Files with disallowed MIME types will be rejected with an InvalidFileError.
|
|
277
|
+
*
|
|
278
|
+
* **MIME Type Categories**:
|
|
279
|
+
* - Images: "image/jpeg", "image/png", "image/gif", "image/webp", "image/svg+xml"
|
|
280
|
+
* - Documents: "application/pdf", "text/plain", "text/csv"
|
|
281
|
+
* - Office: "application/msword", "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
|
282
|
+
* - Archives: "application/zip", "application/x-tar", "application/gzip"
|
|
283
|
+
* - Media: "video/mp4", "audio/mpeg", "audio/wav"
|
|
284
|
+
*
|
|
285
|
+
* **Security Considerations**:
|
|
286
|
+
* - Always validate MIME types for user uploads
|
|
287
|
+
* - Be cautious with executable file types
|
|
288
|
+
* - Consider using allow-lists rather than deny-lists
|
|
289
|
+
* - Remember that MIME types can be spoofed by malicious users
|
|
290
|
+
*
|
|
291
|
+
* If not specified, all MIME types are allowed (not recommended for user uploads).
|
|
292
|
+
*
|
|
293
|
+
* @example ["image/jpeg", "image/png"] // Only JPEG and PNG images
|
|
294
|
+
* @example ["application/pdf", "text/plain"] // Documents only
|
|
295
|
+
* @example ["video/mp4", "video/webm"] // Video files
|
|
296
|
+
*/
|
|
297
|
+
mimeTypes?: string[];
|
|
298
|
+
/**
|
|
299
|
+
* Maximum file size allowed in megabytes (MB).
|
|
300
|
+
*
|
|
301
|
+
* Files larger than this limit will be rejected with an InvalidFileError.
|
|
302
|
+
* This helps prevent:
|
|
303
|
+
* - Storage quota exhaustion
|
|
304
|
+
* - Memory issues during file processing
|
|
305
|
+
* - Long upload times and timeouts
|
|
306
|
+
* - Abuse of storage resources
|
|
307
|
+
*
|
|
308
|
+
* **Size Guidelines by File Type**:
|
|
309
|
+
* - Profile images: 1-5 MB
|
|
310
|
+
* - Product photos: 5-10 MB
|
|
311
|
+
* - Documents: 10-50 MB
|
|
312
|
+
* - Video files: 50-500 MB
|
|
313
|
+
* - Data files: 100-1000 MB
|
|
314
|
+
*
|
|
315
|
+
* **Considerations**:
|
|
316
|
+
* - Consider your storage costs and limits
|
|
317
|
+
* - Factor in network upload speeds for users
|
|
318
|
+
* - Account for processing requirements (thumbnails, compression)
|
|
319
|
+
* - Set reasonable limits based on actual use cases
|
|
320
|
+
*
|
|
321
|
+
* @default 10 MB
|
|
322
|
+
*
|
|
323
|
+
* @example 1 // 1MB for small images
|
|
324
|
+
* @example 25 // 25MB for documents
|
|
325
|
+
* @example 100 // 100MB for media files
|
|
326
|
+
*/
|
|
327
|
+
maxSize?: number;
|
|
328
|
+
}
|
|
329
|
+
declare class BucketDescriptor extends Descriptor<BucketDescriptorOptions> {
|
|
330
|
+
readonly provider: FileStorageProvider | MemoryFileStorageProvider;
|
|
331
|
+
private readonly fileSystem;
|
|
332
|
+
get name(): string;
|
|
333
|
+
/**
|
|
334
|
+
* Uploads a file to the bucket.
|
|
335
|
+
*/
|
|
336
|
+
upload(file: FileLike, options?: BucketFileOptions): Promise<string>;
|
|
337
|
+
/**
|
|
338
|
+
* Delete permanently a file from the bucket.
|
|
339
|
+
*/
|
|
340
|
+
delete(fileId: string, skipHook?: boolean): Promise<void>;
|
|
341
|
+
/**
|
|
342
|
+
* Checks if a file exists in the bucket.
|
|
343
|
+
*/
|
|
344
|
+
exists(fileId: string): Promise<boolean>;
|
|
345
|
+
/**
|
|
346
|
+
* Downloads a file from the bucket.
|
|
347
|
+
*/
|
|
348
|
+
download(fileId: string): Promise<FileLike>;
|
|
349
|
+
protected $provider(): FileStorageProvider | MemoryFileStorageProvider;
|
|
350
|
+
}
|
|
351
|
+
interface BucketFileOptions {
|
|
352
|
+
/**
|
|
353
|
+
* Optional description of the bucket.
|
|
354
|
+
*/
|
|
355
|
+
description?: string;
|
|
356
|
+
/**
|
|
357
|
+
* Allowed MIME types.
|
|
358
|
+
*/
|
|
359
|
+
mimeTypes?: string[];
|
|
360
|
+
/**
|
|
361
|
+
* Maximum size of the files in the bucket.
|
|
362
|
+
*
|
|
363
|
+
* @default 10
|
|
364
|
+
*/
|
|
365
|
+
maxSize?: number;
|
|
366
|
+
}
|
|
367
|
+
//#endregion
|
|
368
|
+
//#region src/errors/FileNotFoundError.d.ts
|
|
369
|
+
declare class FileNotFoundError extends AlephaError {
|
|
370
|
+
readonly status = 404;
|
|
371
|
+
}
|
|
372
|
+
//#endregion
|
|
373
|
+
//#region src/services/FileMetadataService.d.ts
|
|
374
|
+
interface FileMetadata {
|
|
375
|
+
name: string;
|
|
376
|
+
type: string;
|
|
377
|
+
}
|
|
378
|
+
/**
|
|
379
|
+
* Service for encoding/decoding file metadata in storage streams.
|
|
380
|
+
*
|
|
381
|
+
* The metadata is stored at the beginning of the file with the following structure:
|
|
382
|
+
* - 4-byte header: UInt32BE containing the metadata length
|
|
383
|
+
* - N-byte metadata: JSON object containing file metadata (name, type)
|
|
384
|
+
* - Remaining bytes: Actual file content
|
|
385
|
+
*
|
|
386
|
+
* @example
|
|
387
|
+
* ```typescript
|
|
388
|
+
* const service = new FileMetadataService();
|
|
389
|
+
*
|
|
390
|
+
* // Encode metadata and content for storage
|
|
391
|
+
* const { header, metadata } = service.encodeMetadata({
|
|
392
|
+
* name: "document.pdf",
|
|
393
|
+
* type: "application/pdf"
|
|
394
|
+
* });
|
|
395
|
+
*
|
|
396
|
+
* // Decode metadata from stored file
|
|
397
|
+
* const fileHandle = await open(filePath, 'r');
|
|
398
|
+
* const { metadata, contentStart } = await service.decodeMetadata(fileHandle);
|
|
399
|
+
* ```
|
|
400
|
+
*/
|
|
401
|
+
declare class FileMetadataService {
|
|
402
|
+
/**
|
|
403
|
+
* Length of the header containing metadata size (4 bytes for UInt32BE)
|
|
404
|
+
*/
|
|
405
|
+
static readonly METADATA_HEADER_LENGTH = 4;
|
|
406
|
+
/**
|
|
407
|
+
* Encodes file metadata into header and metadata buffers.
|
|
408
|
+
*
|
|
409
|
+
* @param file - The file or metadata to encode
|
|
410
|
+
* @returns Object containing the header buffer and metadata buffer
|
|
411
|
+
*/
|
|
412
|
+
encodeMetadata(file: FileLike | FileMetadata): {
|
|
413
|
+
header: Buffer;
|
|
414
|
+
metadata: Buffer;
|
|
415
|
+
};
|
|
416
|
+
/**
|
|
417
|
+
* Decodes file metadata from a file handle.
|
|
418
|
+
*
|
|
419
|
+
* @param fileHandle - File handle opened for reading
|
|
420
|
+
* @returns Object containing the decoded metadata and content start position
|
|
421
|
+
*/
|
|
422
|
+
decodeMetadata(fileHandle: {
|
|
423
|
+
read: (buffer: Buffer, offset: number, length: number, position: number) => Promise<{
|
|
424
|
+
bytesRead: number;
|
|
425
|
+
}>;
|
|
426
|
+
}): Promise<{
|
|
427
|
+
metadata: FileMetadata;
|
|
428
|
+
contentStart: number;
|
|
429
|
+
}>;
|
|
430
|
+
/**
|
|
431
|
+
* Decodes file metadata from a buffer.
|
|
432
|
+
*
|
|
433
|
+
* @param buffer - Buffer containing the file with metadata
|
|
434
|
+
* @returns Object containing the decoded metadata and content start position
|
|
435
|
+
*/
|
|
436
|
+
decodeMetadataFromBuffer(buffer: Buffer): {
|
|
437
|
+
metadata: FileMetadata;
|
|
438
|
+
contentStart: number;
|
|
439
|
+
};
|
|
440
|
+
/**
|
|
441
|
+
* Creates a complete buffer with metadata header, metadata, and content.
|
|
442
|
+
*
|
|
443
|
+
* @param file - The file to encode
|
|
444
|
+
* @param content - The file content as a buffer
|
|
445
|
+
* @returns Complete buffer ready for storage
|
|
446
|
+
*/
|
|
447
|
+
createFileBuffer(file: FileLike | FileMetadata, content: Buffer): Buffer;
|
|
448
|
+
}
|
|
449
|
+
//#endregion
|
|
450
|
+
//#region src/providers/LocalFileStorageProvider.d.ts
|
|
451
|
+
/**
|
|
452
|
+
* Local file storage configuration atom
|
|
453
|
+
*/
|
|
454
|
+
declare const localFileStorageOptions: _alepha_core1.Atom<typebox0.TObject<{
|
|
455
|
+
storagePath: typebox0.TString;
|
|
456
|
+
}>, "alepha.bucket.local.options">;
|
|
457
|
+
type LocalFileStorageProviderOptions = Static<typeof localFileStorageOptions.schema>;
|
|
458
|
+
declare module "alepha" {
|
|
459
|
+
interface State {
|
|
460
|
+
[localFileStorageOptions.key]: LocalFileStorageProviderOptions;
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
declare class LocalFileStorageProvider implements FileStorageProvider {
|
|
464
|
+
protected readonly alepha: Alepha;
|
|
465
|
+
protected readonly log: _alepha_logger0.Logger;
|
|
466
|
+
protected readonly metadataService: FileMetadataService;
|
|
467
|
+
protected readonly fileSystem: FileSystem;
|
|
468
|
+
protected readonly options: Readonly<{
|
|
469
|
+
storagePath: string;
|
|
470
|
+
}>;
|
|
471
|
+
protected get storagePath(): string;
|
|
472
|
+
protected readonly onConfigure: _alepha_core1.HookDescriptor<"configure">;
|
|
473
|
+
protected readonly onStart: _alepha_core1.HookDescriptor<"start">;
|
|
474
|
+
upload(bucketName: string, file: FileLike, fileId?: string): Promise<string>;
|
|
475
|
+
download(bucketName: string, fileId: string): Promise<FileLike>;
|
|
476
|
+
exists(bucketName: string, fileId: string): Promise<boolean>;
|
|
477
|
+
delete(bucketName: string, fileId: string): Promise<void>;
|
|
478
|
+
protected stat(bucket: string, fileId: string): Promise<fs.Stats>;
|
|
479
|
+
protected createId(): string;
|
|
480
|
+
protected path(bucket: string, fileId?: string): string;
|
|
481
|
+
protected isErrorNoEntry(error: unknown): boolean;
|
|
482
|
+
}
|
|
483
|
+
//#endregion
|
|
484
|
+
//#region src/index.d.ts
|
|
485
|
+
declare module "alepha" {
|
|
486
|
+
interface Hooks {
|
|
487
|
+
/**
|
|
488
|
+
* Triggered when a file is uploaded to a bucket.
|
|
489
|
+
* Can be used to perform actions after a file is uploaded, like creating a database record!
|
|
490
|
+
*/
|
|
491
|
+
"bucket:file:uploaded": {
|
|
492
|
+
id: string;
|
|
493
|
+
file: FileLike;
|
|
494
|
+
bucket: BucketDescriptor;
|
|
495
|
+
options: BucketFileOptions;
|
|
496
|
+
};
|
|
497
|
+
/**
|
|
498
|
+
* Triggered when a file is deleted from a bucket.
|
|
499
|
+
*/
|
|
500
|
+
"bucket:file:deleted": {
|
|
501
|
+
id: string;
|
|
502
|
+
bucket: BucketDescriptor;
|
|
503
|
+
};
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
/**
|
|
507
|
+
* Provides file storage capabilities through declarative bucket descriptors with support for multiple storage backends.
|
|
508
|
+
*
|
|
509
|
+
* The bucket module enables unified file operations across different storage systems using the `$bucket` descriptor
|
|
510
|
+
* on class properties. It abstracts storage provider differences, offering consistent APIs for local filesystem,
|
|
511
|
+
* cloud storage, or in-memory storage for testing environments.
|
|
512
|
+
*
|
|
513
|
+
* @see {@link $bucket}
|
|
514
|
+
* @see {@link FileStorageProvider}
|
|
515
|
+
* @module alepha.bucket
|
|
516
|
+
*/
|
|
517
|
+
declare const AlephaBucket: _alepha_core1.Service<_alepha_core1.Module>;
|
|
518
|
+
//#endregion
|
|
519
|
+
export { $bucket, AlephaBucket, BucketDescriptor, BucketDescriptorOptions, BucketFileOptions, FileMetadata, FileMetadataService, FileNotFoundError, FileStorageProvider, LocalFileStorageProvider, LocalFileStorageProviderOptions, MemoryFileStorageProvider, localFileStorageOptions };
|
|
520
|
+
//# sourceMappingURL=index.d.ts.map
|