alepha 0.12.1 → 0.13.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api-notifications/index.d.ts +111 -111
- package/dist/api-users/index.d.ts +1240 -1240
- package/dist/api-verifications/index.d.ts +94 -94
- package/dist/cli/{dist-Sz2EXvQX.cjs → dist-Dl9Vl7Ur.js} +17 -13
- package/dist/cli/{dist-BBPjuQ56.js.map → dist-Dl9Vl7Ur.js.map} +1 -1
- package/dist/cli/index.d.ts +3 -11
- package/dist/cli/index.js +106 -74
- package/dist/cli/index.js.map +1 -1
- package/dist/email/index.js +71 -73
- package/dist/email/index.js.map +1 -1
- package/dist/orm/index.d.ts +1 -1
- package/dist/orm/index.js.map +1 -1
- package/dist/queue/index.d.ts +4 -4
- package/dist/redis/index.d.ts +10 -10
- package/dist/retry/index.d.ts +1 -1
- package/dist/retry/index.js +2 -2
- package/dist/retry/index.js.map +1 -1
- package/dist/scheduler/index.d.ts +6 -6
- package/dist/server/index.js +1 -1
- package/dist/server/index.js.map +1 -1
- package/dist/server-auth/index.d.ts +193 -193
- package/dist/server-health/index.d.ts +17 -17
- package/dist/server-links/index.d.ts +34 -34
- package/dist/server-metrics/index.js +170 -174
- package/dist/server-metrics/index.js.map +1 -1
- package/dist/server-security/index.d.ts +9 -9
- package/dist/vite/index.js +4 -5
- package/dist/vite/index.js.map +1 -1
- package/dist/websocket/index.d.ts +7 -7
- package/package.json +52 -103
- package/src/cli/apps/AlephaPackageBuilderCli.ts +7 -2
- package/src/cli/assets/appRouterTs.ts +9 -0
- package/src/cli/assets/indexHtml.ts +2 -1
- package/src/cli/assets/mainBrowserTs.ts +10 -0
- package/src/cli/commands/CoreCommands.ts +6 -5
- package/src/cli/commands/DrizzleCommands.ts +65 -57
- package/src/cli/commands/VerifyCommands.ts +1 -1
- package/src/cli/services/ProjectUtils.ts +44 -38
- package/src/orm/providers/DrizzleKitProvider.ts +1 -1
- package/src/retry/descriptors/$retry.ts +5 -3
- package/src/server/providers/NodeHttpServerProvider.ts +1 -1
- package/src/vite/helpers/boot.ts +3 -3
- package/dist/api-files/index.cjs +0 -1293
- package/dist/api-files/index.cjs.map +0 -1
- package/dist/api-files/index.d.cts +0 -829
- package/dist/api-jobs/index.cjs +0 -274
- package/dist/api-jobs/index.cjs.map +0 -1
- package/dist/api-jobs/index.d.cts +0 -654
- package/dist/api-notifications/index.cjs +0 -380
- package/dist/api-notifications/index.cjs.map +0 -1
- package/dist/api-notifications/index.d.cts +0 -289
- package/dist/api-parameters/index.cjs +0 -66
- package/dist/api-parameters/index.cjs.map +0 -1
- package/dist/api-parameters/index.d.cts +0 -84
- package/dist/api-users/index.cjs +0 -6009
- package/dist/api-users/index.cjs.map +0 -1
- package/dist/api-users/index.d.cts +0 -4740
- package/dist/api-verifications/index.cjs +0 -407
- package/dist/api-verifications/index.cjs.map +0 -1
- package/dist/api-verifications/index.d.cts +0 -207
- package/dist/batch/index.cjs +0 -408
- package/dist/batch/index.cjs.map +0 -1
- package/dist/batch/index.d.cts +0 -330
- package/dist/bin/index.cjs +0 -17
- package/dist/bin/index.cjs.map +0 -1
- package/dist/bin/index.d.cts +0 -1
- package/dist/bucket/index.cjs +0 -303
- package/dist/bucket/index.cjs.map +0 -1
- package/dist/bucket/index.d.cts +0 -355
- package/dist/cache/index.cjs +0 -241
- package/dist/cache/index.cjs.map +0 -1
- package/dist/cache/index.d.cts +0 -202
- package/dist/cache-redis/index.cjs +0 -84
- package/dist/cache-redis/index.cjs.map +0 -1
- package/dist/cache-redis/index.d.cts +0 -40
- package/dist/cli/chunk-DSlc6foC.cjs +0 -43
- package/dist/cli/dist-BBPjuQ56.js +0 -2778
- package/dist/cli/dist-Sz2EXvQX.cjs.map +0 -1
- package/dist/cli/index.cjs +0 -1241
- package/dist/cli/index.cjs.map +0 -1
- package/dist/cli/index.d.cts +0 -422
- package/dist/command/index.cjs +0 -693
- package/dist/command/index.cjs.map +0 -1
- package/dist/command/index.d.cts +0 -340
- package/dist/core/index.cjs +0 -2264
- package/dist/core/index.cjs.map +0 -1
- package/dist/core/index.d.cts +0 -1927
- package/dist/datetime/index.cjs +0 -318
- package/dist/datetime/index.cjs.map +0 -1
- package/dist/datetime/index.d.cts +0 -145
- package/dist/email/index.cjs +0 -10874
- package/dist/email/index.cjs.map +0 -1
- package/dist/email/index.d.cts +0 -186
- package/dist/fake/index.cjs +0 -34641
- package/dist/fake/index.cjs.map +0 -1
- package/dist/fake/index.d.cts +0 -74
- package/dist/file/index.cjs +0 -1212
- package/dist/file/index.cjs.map +0 -1
- package/dist/file/index.d.cts +0 -698
- package/dist/lock/index.cjs +0 -226
- package/dist/lock/index.cjs.map +0 -1
- package/dist/lock/index.d.cts +0 -361
- package/dist/lock-redis/index.cjs +0 -113
- package/dist/lock-redis/index.cjs.map +0 -1
- package/dist/lock-redis/index.d.cts +0 -24
- package/dist/logger/index.cjs +0 -521
- package/dist/logger/index.cjs.map +0 -1
- package/dist/logger/index.d.cts +0 -281
- package/dist/orm/index.cjs +0 -2986
- package/dist/orm/index.cjs.map +0 -1
- package/dist/orm/index.d.cts +0 -2213
- package/dist/queue/index.cjs +0 -1044
- package/dist/queue/index.cjs.map +0 -1
- package/dist/queue/index.d.cts +0 -1265
- package/dist/queue-redis/index.cjs +0 -873
- package/dist/queue-redis/index.cjs.map +0 -1
- package/dist/queue-redis/index.d.cts +0 -82
- package/dist/redis/index.cjs +0 -153
- package/dist/redis/index.cjs.map +0 -1
- package/dist/redis/index.d.cts +0 -82
- package/dist/retry/index.cjs +0 -146
- package/dist/retry/index.cjs.map +0 -1
- package/dist/retry/index.d.cts +0 -172
- package/dist/router/index.cjs +0 -111
- package/dist/router/index.cjs.map +0 -1
- package/dist/router/index.d.cts +0 -46
- package/dist/scheduler/index.cjs +0 -576
- package/dist/scheduler/index.cjs.map +0 -1
- package/dist/scheduler/index.d.cts +0 -145
- package/dist/security/index.cjs +0 -2402
- package/dist/security/index.cjs.map +0 -1
- package/dist/security/index.d.cts +0 -598
- package/dist/server/index.cjs +0 -1680
- package/dist/server/index.cjs.map +0 -1
- package/dist/server/index.d.cts +0 -810
- package/dist/server-auth/index.cjs +0 -3146
- package/dist/server-auth/index.cjs.map +0 -1
- package/dist/server-auth/index.d.cts +0 -1164
- package/dist/server-cache/index.cjs +0 -252
- package/dist/server-cache/index.cjs.map +0 -1
- package/dist/server-cache/index.d.cts +0 -164
- package/dist/server-compress/index.cjs +0 -141
- package/dist/server-compress/index.cjs.map +0 -1
- package/dist/server-compress/index.d.cts +0 -38
- package/dist/server-cookies/index.cjs +0 -234
- package/dist/server-cookies/index.cjs.map +0 -1
- package/dist/server-cookies/index.d.cts +0 -144
- package/dist/server-cors/index.cjs +0 -201
- package/dist/server-cors/index.cjs.map +0 -1
- package/dist/server-cors/index.d.cts +0 -140
- package/dist/server-health/index.cjs +0 -62
- package/dist/server-health/index.cjs.map +0 -1
- package/dist/server-health/index.d.cts +0 -58
- package/dist/server-helmet/index.cjs +0 -131
- package/dist/server-helmet/index.cjs.map +0 -1
- package/dist/server-helmet/index.d.cts +0 -97
- package/dist/server-links/index.cjs +0 -992
- package/dist/server-links/index.cjs.map +0 -1
- package/dist/server-links/index.d.cts +0 -513
- package/dist/server-metrics/index.cjs +0 -4535
- package/dist/server-metrics/index.cjs.map +0 -1
- package/dist/server-metrics/index.d.cts +0 -35
- package/dist/server-multipart/index.cjs +0 -237
- package/dist/server-multipart/index.cjs.map +0 -1
- package/dist/server-multipart/index.d.cts +0 -50
- package/dist/server-proxy/index.cjs +0 -186
- package/dist/server-proxy/index.cjs.map +0 -1
- package/dist/server-proxy/index.d.cts +0 -234
- package/dist/server-rate-limit/index.cjs +0 -241
- package/dist/server-rate-limit/index.cjs.map +0 -1
- package/dist/server-rate-limit/index.d.cts +0 -183
- package/dist/server-security/index.cjs +0 -316
- package/dist/server-security/index.cjs.map +0 -1
- package/dist/server-security/index.d.cts +0 -173
- package/dist/server-static/index.cjs +0 -170
- package/dist/server-static/index.cjs.map +0 -1
- package/dist/server-static/index.d.cts +0 -121
- package/dist/server-swagger/index.cjs +0 -1021
- package/dist/server-swagger/index.cjs.map +0 -1
- package/dist/server-swagger/index.d.cts +0 -382
- package/dist/sms/index.cjs +0 -221
- package/dist/sms/index.cjs.map +0 -1
- package/dist/sms/index.d.cts +0 -130
- package/dist/thread/index.cjs +0 -350
- package/dist/thread/index.cjs.map +0 -1
- package/dist/thread/index.d.cts +0 -260
- package/dist/topic/index.cjs +0 -282
- package/dist/topic/index.cjs.map +0 -1
- package/dist/topic/index.d.cts +0 -523
- package/dist/topic-redis/index.cjs +0 -71
- package/dist/topic-redis/index.cjs.map +0 -1
- package/dist/topic-redis/index.d.cts +0 -42
- package/dist/vite/index.cjs +0 -1077
- package/dist/vite/index.cjs.map +0 -1
- package/dist/vite/index.d.cts +0 -542
- package/dist/websocket/index.cjs +0 -1117
- package/dist/websocket/index.cjs.map +0 -1
- package/dist/websocket/index.d.cts +0 -861
package/dist/batch/index.d.cts
DELETED
|
@@ -1,330 +0,0 @@
|
|
|
1
|
-
import * as alepha1 from "alepha";
|
|
2
|
-
import { Alepha, Descriptor, KIND, Static, TSchema } from "alepha";
|
|
3
|
-
import { DateTimeProvider, DurationLike } from "alepha/datetime";
|
|
4
|
-
import { RetryBackoffOptions, RetryDescriptorOptions, RetryProvider } from "alepha/retry";
|
|
5
|
-
import * as alepha_logger0 from "alepha/logger";
|
|
6
|
-
|
|
7
|
-
//#region src/batch/providers/BatchProvider.d.ts
|
|
8
|
-
interface BatchOptions<TItem, TResponse = any> {
|
|
9
|
-
/**
|
|
10
|
-
* The batch processing handler function that processes arrays of validated items.
|
|
11
|
-
*/
|
|
12
|
-
handler: (items: TItem[]) => TResponse;
|
|
13
|
-
/**
|
|
14
|
-
* Maximum number of items to collect before automatically flushing the batch.
|
|
15
|
-
*
|
|
16
|
-
* @default 10
|
|
17
|
-
*/
|
|
18
|
-
maxSize?: number;
|
|
19
|
-
/**
|
|
20
|
-
* Maximum number of items that can be queued in a single partition.
|
|
21
|
-
* If exceeded, push() will throw an error.
|
|
22
|
-
*/
|
|
23
|
-
maxQueueSize?: number;
|
|
24
|
-
/**
|
|
25
|
-
* Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.
|
|
26
|
-
*
|
|
27
|
-
* @default [1, "second"]
|
|
28
|
-
*/
|
|
29
|
-
maxDuration?: DurationLike;
|
|
30
|
-
/**
|
|
31
|
-
* Function to determine partition keys for grouping items into separate batches.
|
|
32
|
-
*/
|
|
33
|
-
partitionBy?: (item: TItem) => string;
|
|
34
|
-
/**
|
|
35
|
-
* Maximum number of batch handlers that can execute simultaneously.
|
|
36
|
-
*
|
|
37
|
-
* @default 1
|
|
38
|
-
*/
|
|
39
|
-
concurrency?: number;
|
|
40
|
-
/**
|
|
41
|
-
* Retry configuration for failed batch processing operations.
|
|
42
|
-
*/
|
|
43
|
-
retry?: {
|
|
44
|
-
/**
|
|
45
|
-
* The maximum number of attempts.
|
|
46
|
-
*
|
|
47
|
-
* @default 3
|
|
48
|
-
*/
|
|
49
|
-
max?: number;
|
|
50
|
-
/**
|
|
51
|
-
* The backoff strategy for delays between retries.
|
|
52
|
-
* Can be a fixed number (in ms) or a configuration object for exponential backoff.
|
|
53
|
-
*
|
|
54
|
-
* @default { initial: 200, factor: 2, jitter: true }
|
|
55
|
-
*/
|
|
56
|
-
backoff?: number | RetryBackoffOptions;
|
|
57
|
-
/**
|
|
58
|
-
* An overall time limit for all retry attempts combined.
|
|
59
|
-
*
|
|
60
|
-
* e.g., `[5, 'seconds']`
|
|
61
|
-
*/
|
|
62
|
-
maxDuration?: DurationLike;
|
|
63
|
-
/**
|
|
64
|
-
* A function that determines if a retry should be attempted based on the error.
|
|
65
|
-
*
|
|
66
|
-
* @default (error) => true (retries on any error)
|
|
67
|
-
*/
|
|
68
|
-
when?: (error: Error) => boolean;
|
|
69
|
-
/**
|
|
70
|
-
* A custom callback for when a retry attempt fails.
|
|
71
|
-
* This is called before the delay.
|
|
72
|
-
*/
|
|
73
|
-
onError?: (error: Error, attempt: number) => void;
|
|
74
|
-
};
|
|
75
|
-
}
|
|
76
|
-
type BatchItemStatus = "pending" | "processing" | "completed" | "failed";
|
|
77
|
-
interface BatchItemState<TItem, TResponse> {
|
|
78
|
-
id: string;
|
|
79
|
-
item: TItem;
|
|
80
|
-
partitionKey: string;
|
|
81
|
-
status: BatchItemStatus;
|
|
82
|
-
result?: TResponse;
|
|
83
|
-
error?: Error;
|
|
84
|
-
promise?: Promise<TResponse>;
|
|
85
|
-
resolve?: (value: TResponse) => void;
|
|
86
|
-
reject?: (error: Error) => void;
|
|
87
|
-
}
|
|
88
|
-
interface PartitionState {
|
|
89
|
-
itemIds: string[];
|
|
90
|
-
timeout?: {
|
|
91
|
-
clear: () => void;
|
|
92
|
-
};
|
|
93
|
-
flushing: boolean;
|
|
94
|
-
}
|
|
95
|
-
/**
|
|
96
|
-
* Context object that holds all state for a batch processor instance.
|
|
97
|
-
*/
|
|
98
|
-
interface BatchContext<TItem, TResponse> {
|
|
99
|
-
options: BatchOptions<TItem, TResponse>;
|
|
100
|
-
itemStates: Map<string, BatchItemState<TItem, TResponse>>;
|
|
101
|
-
partitions: Map<string, PartitionState>;
|
|
102
|
-
activeHandlers: PromiseWithResolvers<void>[];
|
|
103
|
-
isShuttingDown: boolean;
|
|
104
|
-
isReady: boolean;
|
|
105
|
-
alepha: Alepha;
|
|
106
|
-
}
|
|
107
|
-
/**
|
|
108
|
-
* Service for batch processing operations.
|
|
109
|
-
* Provides methods to manage batches of items with automatic flushing based on size or time.
|
|
110
|
-
*/
|
|
111
|
-
declare class BatchProvider {
|
|
112
|
-
protected readonly log: alepha_logger0.Logger;
|
|
113
|
-
protected readonly dateTime: DateTimeProvider;
|
|
114
|
-
protected readonly retryProvider: RetryProvider;
|
|
115
|
-
/**
|
|
116
|
-
* Creates a new batch context with the given options.
|
|
117
|
-
*/
|
|
118
|
-
createContext<TItem, TResponse>(alepha: Alepha, options: BatchOptions<TItem, TResponse>): BatchContext<TItem, TResponse>;
|
|
119
|
-
/**
|
|
120
|
-
* Get the effective maxSize for a context.
|
|
121
|
-
*/
|
|
122
|
-
protected getMaxSize<TItem, TResponse>(context: BatchContext<TItem, TResponse>): number;
|
|
123
|
-
/**
|
|
124
|
-
* Get the effective concurrency for a context.
|
|
125
|
-
*/
|
|
126
|
-
protected getConcurrency<TItem, TResponse>(context: BatchContext<TItem, TResponse>): number;
|
|
127
|
-
/**
|
|
128
|
-
* Get the effective maxDuration for a context.
|
|
129
|
-
*/
|
|
130
|
-
protected getMaxDuration<TItem, TResponse>(context: BatchContext<TItem, TResponse>): DurationLike;
|
|
131
|
-
/**
|
|
132
|
-
* Pushes an item into the batch and returns immediately with a unique ID.
|
|
133
|
-
* The item will be processed asynchronously with other items when the batch is flushed.
|
|
134
|
-
* Use wait(id) to get the processing result.
|
|
135
|
-
*
|
|
136
|
-
* @throws Error if maxQueueSize is exceeded
|
|
137
|
-
*/
|
|
138
|
-
push<TItem, TResponse>(context: BatchContext<TItem, TResponse>, item: TItem): string;
|
|
139
|
-
/**
|
|
140
|
-
* Wait for a specific item to be processed and get its result.
|
|
141
|
-
* @param id The item ID returned from push()
|
|
142
|
-
* @returns The processing result
|
|
143
|
-
* @throws If the item doesn't exist or processing failed
|
|
144
|
-
*/
|
|
145
|
-
wait<TItem, TResponse>(context: BatchContext<TItem, TResponse>, id: string): Promise<TResponse>;
|
|
146
|
-
/**
|
|
147
|
-
* Get the current status of an item.
|
|
148
|
-
* @param id The item ID returned from push()
|
|
149
|
-
* @returns Status information or undefined if item doesn't exist
|
|
150
|
-
*/
|
|
151
|
-
status<TItem, TResponse>(context: BatchContext<TItem, TResponse>, id: string): {
|
|
152
|
-
status: "pending" | "processing";
|
|
153
|
-
} | {
|
|
154
|
-
status: "completed";
|
|
155
|
-
result: TResponse;
|
|
156
|
-
} | {
|
|
157
|
-
status: "failed";
|
|
158
|
-
error: Error;
|
|
159
|
-
} | undefined;
|
|
160
|
-
/**
|
|
161
|
-
* Clears completed and failed items from the context to free memory.
|
|
162
|
-
* Returns the number of items cleared.
|
|
163
|
-
*
|
|
164
|
-
* @param context The batch context
|
|
165
|
-
* @param status Optional: only clear items with this specific status ('completed' or 'failed')
|
|
166
|
-
* @returns The number of items cleared
|
|
167
|
-
*/
|
|
168
|
-
clearCompleted<TItem, TResponse>(context: BatchContext<TItem, TResponse>, status?: "completed" | "failed"): number;
|
|
169
|
-
/**
|
|
170
|
-
* Flush all partitions or a specific partition.
|
|
171
|
-
*/
|
|
172
|
-
flush<TItem, TResponse>(context: BatchContext<TItem, TResponse>, partitionKey?: string): Promise<void>;
|
|
173
|
-
/**
|
|
174
|
-
* Flush a specific partition.
|
|
175
|
-
*/
|
|
176
|
-
protected flushPartition<TItem, TResponse>(context: BatchContext<TItem, TResponse>, partitionKey: string, limit?: number): Promise<void>;
|
|
177
|
-
/**
|
|
178
|
-
* Mark the context as ready and start processing buffered items.
|
|
179
|
-
* Called after the "ready" hook.
|
|
180
|
-
*/
|
|
181
|
-
markReady<TItem, TResponse>(context: BatchContext<TItem, TResponse>): Promise<void>;
|
|
182
|
-
/**
|
|
183
|
-
* Mark the context as shutting down and flush all remaining items.
|
|
184
|
-
*/
|
|
185
|
-
shutdown<TItem, TResponse>(context: BatchContext<TItem, TResponse>): Promise<void>;
|
|
186
|
-
/**
|
|
187
|
-
* Called after the "ready" hook to start processing buffered items that were
|
|
188
|
-
* pushed during startup. This checks all partitions and starts timeouts/flushes
|
|
189
|
-
* for items that were accumulated before the app was ready.
|
|
190
|
-
*/
|
|
191
|
-
protected startProcessing<TItem, TResponse>(context: BatchContext<TItem, TResponse>): Promise<void>;
|
|
192
|
-
}
|
|
193
|
-
//#endregion
|
|
194
|
-
//#region src/batch/descriptors/$batch.d.ts
|
|
195
|
-
/**
|
|
196
|
-
* Creates a batch processing descriptor for efficient grouping and processing of multiple operations.
|
|
197
|
-
*/
|
|
198
|
-
declare const $batch: {
|
|
199
|
-
<TItem extends TSchema, TResponse>(options: BatchDescriptorOptions<TItem, TResponse>): BatchDescriptor<TItem, TResponse>;
|
|
200
|
-
[KIND]: typeof BatchDescriptor;
|
|
201
|
-
};
|
|
202
|
-
interface BatchDescriptorOptions<TItem extends TSchema, TResponse = any> {
|
|
203
|
-
/**
|
|
204
|
-
* TypeBox schema for validating each item added to the batch.
|
|
205
|
-
*/
|
|
206
|
-
schema: TItem;
|
|
207
|
-
/**
|
|
208
|
-
* The batch processing handler function that processes arrays of validated items.
|
|
209
|
-
*/
|
|
210
|
-
handler: (items: Static<TItem>[]) => TResponse;
|
|
211
|
-
/**
|
|
212
|
-
* Maximum number of items to collect before automatically flushing the batch.
|
|
213
|
-
*/
|
|
214
|
-
maxSize?: number;
|
|
215
|
-
/**
|
|
216
|
-
* Maximum number of items that can be queued in a single partition.
|
|
217
|
-
* If exceeded, push() will throw an error.
|
|
218
|
-
*/
|
|
219
|
-
maxQueueSize?: number;
|
|
220
|
-
/**
|
|
221
|
-
* Maximum time to wait before flushing a batch, even if it hasn't reached maxSize.
|
|
222
|
-
*/
|
|
223
|
-
maxDuration?: DurationLike;
|
|
224
|
-
/**
|
|
225
|
-
* Function to determine partition keys for grouping items into separate batches.
|
|
226
|
-
*/
|
|
227
|
-
partitionBy?: (item: Static<TItem>) => string;
|
|
228
|
-
/**
|
|
229
|
-
* Maximum number of batch handlers that can execute simultaneously.
|
|
230
|
-
*/
|
|
231
|
-
concurrency?: number;
|
|
232
|
-
/**
|
|
233
|
-
* Retry configuration for failed batch processing operations.
|
|
234
|
-
*/
|
|
235
|
-
retry?: Omit<RetryDescriptorOptions<() => Array<Static<TItem>>>, "handler">;
|
|
236
|
-
}
|
|
237
|
-
declare class BatchDescriptor<TItem extends TSchema, TResponse = any> extends Descriptor<BatchDescriptorOptions<TItem, TResponse>> {
|
|
238
|
-
protected readonly batchProvider: BatchProvider;
|
|
239
|
-
protected readonly context: BatchContext<Static<TItem>, TResponse>;
|
|
240
|
-
constructor(...args: ConstructorParameters<typeof Descriptor<BatchDescriptorOptions<TItem, TResponse>>>);
|
|
241
|
-
/**
|
|
242
|
-
* Pushes an item into the batch and returns immediately with a unique ID.
|
|
243
|
-
* The item will be processed asynchronously with other items when the batch is flushed.
|
|
244
|
-
* Use wait(id) to get the processing result.
|
|
245
|
-
*/
|
|
246
|
-
push(item: Static<TItem>): Promise<string>;
|
|
247
|
-
/**
|
|
248
|
-
* Wait for a specific item to be processed and get its result.
|
|
249
|
-
* @param id The item ID returned from push()
|
|
250
|
-
* @returns The processing result
|
|
251
|
-
* @throws If the item doesn't exist or processing failed
|
|
252
|
-
*/
|
|
253
|
-
wait(id: string): Promise<TResponse>;
|
|
254
|
-
/**
|
|
255
|
-
* Get the current status of an item.
|
|
256
|
-
* @param id The item ID returned from push()
|
|
257
|
-
* @returns Status information or undefined if item doesn't exist
|
|
258
|
-
*/
|
|
259
|
-
status(id: string): {
|
|
260
|
-
status: "pending" | "processing";
|
|
261
|
-
} | {
|
|
262
|
-
status: "completed";
|
|
263
|
-
result: TResponse;
|
|
264
|
-
} | {
|
|
265
|
-
status: "failed";
|
|
266
|
-
error: Error;
|
|
267
|
-
} | undefined;
|
|
268
|
-
/**
|
|
269
|
-
* Flush all partitions or a specific partition.
|
|
270
|
-
*/
|
|
271
|
-
flush(partitionKey?: string): Promise<void>;
|
|
272
|
-
/**
|
|
273
|
-
* Clears completed and failed items from memory.
|
|
274
|
-
* Call this periodically in long-running applications to prevent memory leaks.
|
|
275
|
-
*
|
|
276
|
-
* @param status Optional: only clear items with this specific status ('completed' or 'failed')
|
|
277
|
-
* @returns The number of items cleared
|
|
278
|
-
*/
|
|
279
|
-
clearCompleted(status?: "completed" | "failed"): number;
|
|
280
|
-
protected readonly onReady: alepha1.HookDescriptor<"ready">;
|
|
281
|
-
protected readonly dispose: alepha1.HookDescriptor<"stop">;
|
|
282
|
-
}
|
|
283
|
-
//#endregion
|
|
284
|
-
//#region src/batch/index.d.ts
|
|
285
|
-
/**
|
|
286
|
-
* This module allows you to group multiple asynchronous operations into a single "batch," which is then processed together.
|
|
287
|
-
* This is an essential pattern for improving performance, reducing I/O, and interacting efficiently with rate-limited APIs or databases.
|
|
288
|
-
*
|
|
289
|
-
* ```ts
|
|
290
|
-
* import { Alepha, $hook, run, t } from "alepha";
|
|
291
|
-
* import { $batch } from "alepha/batch";
|
|
292
|
-
*
|
|
293
|
-
* class LoggingService {
|
|
294
|
-
* // define the batch processor
|
|
295
|
-
* logBatch = $batch({
|
|
296
|
-
* schema: t.text(),
|
|
297
|
-
* maxSize: 10,
|
|
298
|
-
* maxDuration: [5, "seconds"],
|
|
299
|
-
* handler: async (items) => {
|
|
300
|
-
* console.log(`[BATCH LOG] Processing ${items.length} events:`, items);
|
|
301
|
-
* },
|
|
302
|
-
* });
|
|
303
|
-
*
|
|
304
|
-
* // example of how to use it
|
|
305
|
-
* onReady = $hook({
|
|
306
|
-
* on: "ready",
|
|
307
|
-
* handler: async () => {
|
|
308
|
-
* // push() returns an ID immediately
|
|
309
|
-
* const id1 = await this.logBatch.push("Application started.");
|
|
310
|
-
* const id2 = await this.logBatch.push("User authenticated.");
|
|
311
|
-
*
|
|
312
|
-
* // optionally wait for processing to complete
|
|
313
|
-
* await this.logBatch.wait(id1);
|
|
314
|
-
*
|
|
315
|
-
* // or check the status
|
|
316
|
-
* const status = this.logBatch.status(id2);
|
|
317
|
-
* console.log(status?.status); // "pending" | "processing" | "completed" | "failed"
|
|
318
|
-
* },
|
|
319
|
-
* });
|
|
320
|
-
* }
|
|
321
|
-
* ```
|
|
322
|
-
*
|
|
323
|
-
* @see {@link $batch}
|
|
324
|
-
* @see {@link BatchProvider}
|
|
325
|
-
* @module alepha.batch
|
|
326
|
-
*/
|
|
327
|
-
declare const AlephaBatch: alepha1.Service<alepha1.Module>;
|
|
328
|
-
//#endregion
|
|
329
|
-
export { $batch, AlephaBatch, BatchContext, BatchDescriptor, BatchDescriptorOptions, type BatchItemState, type BatchItemStatus, BatchOptions, BatchProvider, PartitionState };
|
|
330
|
-
//# sourceMappingURL=index.d.cts.map
|
package/dist/bin/index.cjs
DELETED
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
require("tsx");
|
|
3
|
-
let alepha = require("alepha");
|
|
4
|
-
let alepha_cli = require("alepha/cli");
|
|
5
|
-
|
|
6
|
-
//#region src/bin/index.ts
|
|
7
|
-
const alepha$1 = alepha.Alepha.create({ env: {
|
|
8
|
-
LOG_LEVEL: "alepha.core:warn,info",
|
|
9
|
-
LOG_FORMAT: "raw",
|
|
10
|
-
CLI_NAME: "alepha",
|
|
11
|
-
CLI_DESCRIPTION: `Alepha CLI v${alepha_cli.version} - Create and manage Alepha projects.`
|
|
12
|
-
} });
|
|
13
|
-
alepha$1.with(alepha_cli.AlephaCli);
|
|
14
|
-
(0, alepha.run)(alepha$1);
|
|
15
|
-
|
|
16
|
-
//#endregion
|
|
17
|
-
//# sourceMappingURL=index.cjs.map
|
package/dist/bin/index.cjs.map
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.cjs","names":["alepha","Alepha","version","AlephaCli"],"sources":["../../src/bin/index.ts"],"sourcesContent":["#!/usr/bin/env node\nimport \"tsx\";\nimport { Alepha, run } from \"alepha\";\nimport { AlephaCli, version } from \"alepha/cli\";\n\nconst alepha = Alepha.create({\n env: {\n LOG_LEVEL: \"alepha.core:warn,info\",\n LOG_FORMAT: \"raw\",\n CLI_NAME: \"alepha\",\n CLI_DESCRIPTION: `Alepha CLI v${version} - Create and manage Alepha projects.`,\n },\n});\n\nalepha.with(AlephaCli);\n\nrun(alepha);\n"],"mappings":";;;;;;AAKA,MAAMA,WAASC,cAAO,OAAO,EAC3B,KAAK;CACH,WAAW;CACX,YAAY;CACZ,UAAU;CACV,iBAAiB,eAAeC,mBAAQ;CACzC,EACF,CAAC;AAEFF,SAAO,KAAKG,qBAAU;gBAElBH,SAAO"}
|
package/dist/bin/index.d.cts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export { };
|
package/dist/bucket/index.cjs
DELETED
|
@@ -1,303 +0,0 @@
|
|
|
1
|
-
let alepha = require("alepha");
|
|
2
|
-
let alepha_file = require("alepha/file");
|
|
3
|
-
let node_crypto = require("node:crypto");
|
|
4
|
-
let node_fs = require("node:fs");
|
|
5
|
-
let node_fs_promises = require("node:fs/promises");
|
|
6
|
-
let node_os = require("node:os");
|
|
7
|
-
let node_path = require("node:path");
|
|
8
|
-
let alepha_logger = require("alepha/logger");
|
|
9
|
-
|
|
10
|
-
//#region src/bucket/errors/InvalidFileError.ts
|
|
11
|
-
var InvalidFileError = class extends Error {
|
|
12
|
-
status = 400;
|
|
13
|
-
};
|
|
14
|
-
|
|
15
|
-
//#endregion
|
|
16
|
-
//#region src/bucket/providers/FileStorageProvider.ts
|
|
17
|
-
var FileStorageProvider = class {};
|
|
18
|
-
|
|
19
|
-
//#endregion
|
|
20
|
-
//#region src/bucket/errors/FileNotFoundError.ts
|
|
21
|
-
var FileNotFoundError = class extends alepha.AlephaError {
|
|
22
|
-
status = 404;
|
|
23
|
-
};
|
|
24
|
-
|
|
25
|
-
//#endregion
|
|
26
|
-
//#region src/bucket/providers/MemoryFileStorageProvider.ts
|
|
27
|
-
var MemoryFileStorageProvider = class {
|
|
28
|
-
files = {};
|
|
29
|
-
fileSystem = (0, alepha.$inject)(alepha_file.FileSystemProvider);
|
|
30
|
-
fileDetector = (0, alepha.$inject)(alepha_file.FileDetector);
|
|
31
|
-
async upload(bucketName, file, fileId) {
|
|
32
|
-
fileId ??= this.createId();
|
|
33
|
-
this.files[`${bucketName}/${fileId}`] = this.fileSystem.createFile({
|
|
34
|
-
stream: file.stream(),
|
|
35
|
-
name: file.name,
|
|
36
|
-
type: file.type,
|
|
37
|
-
size: file.size
|
|
38
|
-
});
|
|
39
|
-
return fileId;
|
|
40
|
-
}
|
|
41
|
-
async download(bucketName, fileId) {
|
|
42
|
-
const fileKey = `${bucketName}/${fileId}`;
|
|
43
|
-
const file = this.files[fileKey];
|
|
44
|
-
if (!file) throw new FileNotFoundError(`File with ID ${fileId} not found.`);
|
|
45
|
-
return file;
|
|
46
|
-
}
|
|
47
|
-
async exists(bucketName, fileId) {
|
|
48
|
-
return `${bucketName}/${fileId}` in this.files;
|
|
49
|
-
}
|
|
50
|
-
async delete(bucketName, fileId) {
|
|
51
|
-
const fileKey = `${bucketName}/${fileId}`;
|
|
52
|
-
if (!(fileKey in this.files)) throw new FileNotFoundError(`File with ID ${fileId} not found.`);
|
|
53
|
-
delete this.files[fileKey];
|
|
54
|
-
}
|
|
55
|
-
createId() {
|
|
56
|
-
return (0, node_crypto.randomUUID)();
|
|
57
|
-
}
|
|
58
|
-
};
|
|
59
|
-
|
|
60
|
-
//#endregion
|
|
61
|
-
//#region src/bucket/descriptors/$bucket.ts
|
|
62
|
-
/**
|
|
63
|
-
* Creates a bucket descriptor for file storage and management with configurable validation.
|
|
64
|
-
*
|
|
65
|
-
* Provides a comprehensive file storage system that handles uploads, downloads, validation,
|
|
66
|
-
* and management across multiple storage backends with MIME type and size limit controls.
|
|
67
|
-
*
|
|
68
|
-
* **Key Features**
|
|
69
|
-
* - Multi-provider support (filesystem, cloud storage, in-memory)
|
|
70
|
-
* - Automatic MIME type and file size validation
|
|
71
|
-
* - Event integration for file operations monitoring
|
|
72
|
-
* - Flexible per-bucket and per-operation configuration
|
|
73
|
-
* - Smart file type and size detection
|
|
74
|
-
*
|
|
75
|
-
* **Common Use Cases**
|
|
76
|
-
* - User profile pictures and document uploads
|
|
77
|
-
* - Product images and media management
|
|
78
|
-
* - Document storage and retrieval systems
|
|
79
|
-
*
|
|
80
|
-
* @example
|
|
81
|
-
* ```ts
|
|
82
|
-
* class MediaService {
|
|
83
|
-
* images = $bucket({
|
|
84
|
-
* name: "user-images",
|
|
85
|
-
* mimeTypes: ["image/jpeg", "image/png", "image/gif"],
|
|
86
|
-
* maxSize: 5 // 5MB limit
|
|
87
|
-
* });
|
|
88
|
-
*
|
|
89
|
-
* documents = $bucket({
|
|
90
|
-
* name: "documents",
|
|
91
|
-
* mimeTypes: ["application/pdf", "text/plain"],
|
|
92
|
-
* maxSize: 50 // 50MB limit
|
|
93
|
-
* });
|
|
94
|
-
*
|
|
95
|
-
* async uploadProfileImage(file: FileLike, userId: string): Promise<string> {
|
|
96
|
-
* const fileId = await this.images.upload(file);
|
|
97
|
-
* await this.userService.updateProfileImage(userId, fileId);
|
|
98
|
-
* return fileId;
|
|
99
|
-
* }
|
|
100
|
-
*
|
|
101
|
-
* async downloadDocument(documentId: string): Promise<FileLike> {
|
|
102
|
-
* return await this.documents.download(documentId);
|
|
103
|
-
* }
|
|
104
|
-
*
|
|
105
|
-
* async deleteDocument(documentId: string): Promise<void> {
|
|
106
|
-
* await this.documents.delete(documentId);
|
|
107
|
-
* }
|
|
108
|
-
* }
|
|
109
|
-
* ```
|
|
110
|
-
*/
|
|
111
|
-
const $bucket = (options) => (0, alepha.createDescriptor)(BucketDescriptor, options);
|
|
112
|
-
var BucketDescriptor = class extends alepha.Descriptor {
|
|
113
|
-
provider = this.$provider();
|
|
114
|
-
fileSystem = (0, alepha.$inject)(alepha_file.FileSystemProvider);
|
|
115
|
-
get name() {
|
|
116
|
-
return this.options.name ?? `${this.config.propertyKey}`;
|
|
117
|
-
}
|
|
118
|
-
/**
|
|
119
|
-
* Uploads a file to the bucket.
|
|
120
|
-
*/
|
|
121
|
-
async upload(file, options) {
|
|
122
|
-
if (file instanceof File) file = this.fileSystem.createFile({ file });
|
|
123
|
-
options = {
|
|
124
|
-
...this.options,
|
|
125
|
-
...options
|
|
126
|
-
};
|
|
127
|
-
const mimeTypes = options.mimeTypes ?? void 0;
|
|
128
|
-
const maxSize = options.maxSize ?? 10;
|
|
129
|
-
if (mimeTypes) {
|
|
130
|
-
const mimeType = file.type || "application/octet-stream";
|
|
131
|
-
if (!mimeTypes.includes(mimeType)) throw new InvalidFileError(`MIME type ${mimeType} is not allowed in bucket ${this.name}`);
|
|
132
|
-
}
|
|
133
|
-
if (file.size > maxSize * 1024 * 1024) throw new InvalidFileError(`File size ${file.size} exceeds the maximum size of ${this.options.maxSize} MB in bucket ${this.name}`);
|
|
134
|
-
const id = await this.provider.upload(this.name, file);
|
|
135
|
-
await this.alepha.events.emit("bucket:file:uploaded", {
|
|
136
|
-
id,
|
|
137
|
-
bucket: this,
|
|
138
|
-
file,
|
|
139
|
-
options
|
|
140
|
-
});
|
|
141
|
-
return id;
|
|
142
|
-
}
|
|
143
|
-
/**
|
|
144
|
-
* Delete permanently a file from the bucket.
|
|
145
|
-
*/
|
|
146
|
-
async delete(fileId, skipHook = false) {
|
|
147
|
-
await this.provider.delete(this.name, fileId);
|
|
148
|
-
if (skipHook) return;
|
|
149
|
-
await this.alepha.events.emit("bucket:file:deleted", {
|
|
150
|
-
id: fileId,
|
|
151
|
-
bucket: this
|
|
152
|
-
});
|
|
153
|
-
}
|
|
154
|
-
/**
|
|
155
|
-
* Checks if a file exists in the bucket.
|
|
156
|
-
*/
|
|
157
|
-
async exists(fileId) {
|
|
158
|
-
return this.provider.exists(this.name, fileId);
|
|
159
|
-
}
|
|
160
|
-
/**
|
|
161
|
-
* Downloads a file from the bucket.
|
|
162
|
-
*/
|
|
163
|
-
async download(fileId) {
|
|
164
|
-
return this.provider.download(this.name, fileId);
|
|
165
|
-
}
|
|
166
|
-
$provider() {
|
|
167
|
-
if (!this.options.provider) return this.alepha.inject(FileStorageProvider);
|
|
168
|
-
if (this.options.provider === "memory") return this.alepha.inject(MemoryFileStorageProvider);
|
|
169
|
-
return this.alepha.inject(this.options.provider);
|
|
170
|
-
}
|
|
171
|
-
};
|
|
172
|
-
$bucket[alepha.KIND] = BucketDescriptor;
|
|
173
|
-
|
|
174
|
-
//#endregion
|
|
175
|
-
//#region src/bucket/providers/LocalFileStorageProvider.ts
|
|
176
|
-
/**
|
|
177
|
-
* Local file storage configuration atom
|
|
178
|
-
*/
|
|
179
|
-
const localFileStorageOptions = (0, alepha.$atom)({
|
|
180
|
-
name: "alepha.bucket.local.options",
|
|
181
|
-
schema: alepha.t.object({ storagePath: alepha.t.string({ description: "Directory path where files will be stored" }) }),
|
|
182
|
-
default: { storagePath: "node_modules/.alepha/buckets" }
|
|
183
|
-
});
|
|
184
|
-
var LocalFileStorageProvider = class {
|
|
185
|
-
alepha = (0, alepha.$inject)(alepha.Alepha);
|
|
186
|
-
log = (0, alepha_logger.$logger)();
|
|
187
|
-
fileDetector = (0, alepha.$inject)(alepha_file.FileDetector);
|
|
188
|
-
fileSystemProvider = (0, alepha.$inject)(alepha_file.FileSystemProvider);
|
|
189
|
-
options = (0, alepha.$use)(localFileStorageOptions);
|
|
190
|
-
get storagePath() {
|
|
191
|
-
return this.options.storagePath;
|
|
192
|
-
}
|
|
193
|
-
onConfigure = (0, alepha.$hook)({
|
|
194
|
-
on: "configure",
|
|
195
|
-
handler: async () => {
|
|
196
|
-
if (this.alepha.isTest() && this.storagePath === localFileStorageOptions.options.default.storagePath) this.alepha.state.set(localFileStorageOptions, { storagePath: (0, node_path.join)((0, node_os.tmpdir)(), `alepha-test-${Date.now()}`) });
|
|
197
|
-
}
|
|
198
|
-
});
|
|
199
|
-
onStart = (0, alepha.$hook)({
|
|
200
|
-
on: "start",
|
|
201
|
-
handler: async () => {
|
|
202
|
-
try {
|
|
203
|
-
await (0, node_fs_promises.mkdir)(this.storagePath, { recursive: true });
|
|
204
|
-
} catch {}
|
|
205
|
-
for (const bucket of this.alepha.descriptors($bucket)) {
|
|
206
|
-
if (bucket.provider !== this) continue;
|
|
207
|
-
await (0, node_fs_promises.mkdir)((0, node_path.join)(this.storagePath, bucket.name), { recursive: true });
|
|
208
|
-
this.log.debug(`Bucket '${bucket.name}' at ${this.storagePath} OK`);
|
|
209
|
-
}
|
|
210
|
-
}
|
|
211
|
-
});
|
|
212
|
-
async upload(bucketName, file, fileId) {
|
|
213
|
-
fileId ??= this.createId(file.type);
|
|
214
|
-
this.log.trace(`Uploading file to ${bucketName}`);
|
|
215
|
-
await this.fileSystemProvider.writeFile(this.path(bucketName, fileId), file);
|
|
216
|
-
return fileId;
|
|
217
|
-
}
|
|
218
|
-
async download(bucketName, fileId) {
|
|
219
|
-
const filePath = this.path(bucketName, fileId);
|
|
220
|
-
try {
|
|
221
|
-
const stats = await (0, node_fs_promises.stat)(filePath);
|
|
222
|
-
const mimeType = this.fileDetector.getContentType(fileId);
|
|
223
|
-
return this.fileSystemProvider.createFile({
|
|
224
|
-
stream: (0, node_fs.createReadStream)(filePath),
|
|
225
|
-
name: fileId,
|
|
226
|
-
type: mimeType,
|
|
227
|
-
size: stats.size
|
|
228
|
-
});
|
|
229
|
-
} catch (error) {
|
|
230
|
-
if (this.isErrorNoEntry(error)) throw new FileNotFoundError(`File with ID ${fileId} not found.`);
|
|
231
|
-
throw new alepha.AlephaError("Invalid file operation", { cause: error });
|
|
232
|
-
}
|
|
233
|
-
}
|
|
234
|
-
async exists(bucketName, fileId) {
|
|
235
|
-
try {
|
|
236
|
-
await (0, node_fs_promises.stat)(this.path(bucketName, fileId));
|
|
237
|
-
return true;
|
|
238
|
-
} catch (error) {
|
|
239
|
-
if (this.isErrorNoEntry(error)) return false;
|
|
240
|
-
throw new alepha.AlephaError("Error checking file existence", { cause: error });
|
|
241
|
-
}
|
|
242
|
-
}
|
|
243
|
-
async delete(bucketName, fileId) {
|
|
244
|
-
try {
|
|
245
|
-
return await (0, node_fs_promises.unlink)(this.path(bucketName, fileId));
|
|
246
|
-
} catch (error) {
|
|
247
|
-
if (this.isErrorNoEntry(error)) throw new FileNotFoundError(`File with ID ${fileId} not found.`);
|
|
248
|
-
throw new alepha.AlephaError("Error deleting file", { cause: error });
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
stat(bucket, fileId) {
|
|
252
|
-
return (0, node_fs_promises.stat)(this.path(bucket, fileId));
|
|
253
|
-
}
|
|
254
|
-
createId(mimeType) {
|
|
255
|
-
const ext = this.fileDetector.getExtensionFromMimeType(mimeType);
|
|
256
|
-
return `${(0, node_crypto.randomUUID)()}.${ext}`;
|
|
257
|
-
}
|
|
258
|
-
path(bucket, fileId = "") {
|
|
259
|
-
return (0, node_path.join)(this.storagePath, bucket, fileId);
|
|
260
|
-
}
|
|
261
|
-
isErrorNoEntry(error) {
|
|
262
|
-
return error instanceof Error && "code" in error && error.code === "ENOENT";
|
|
263
|
-
}
|
|
264
|
-
};
|
|
265
|
-
|
|
266
|
-
//#endregion
|
|
267
|
-
//#region src/bucket/index.ts
|
|
268
|
-
/**
|
|
269
|
-
* Provides file storage capabilities through declarative bucket descriptors with support for multiple storage backends.
|
|
270
|
-
*
|
|
271
|
-
* The bucket module enables unified file operations across different storage systems using the `$bucket` descriptor
|
|
272
|
-
* on class properties. It abstracts storage provider differences, offering consistent APIs for local filesystem,
|
|
273
|
-
* cloud storage, or in-memory storage for testing environments.
|
|
274
|
-
*
|
|
275
|
-
* @see {@link $bucket}
|
|
276
|
-
* @see {@link FileStorageProvider}
|
|
277
|
-
* @module alepha.bucket
|
|
278
|
-
*/
|
|
279
|
-
const AlephaBucket = (0, alepha.$module)({
|
|
280
|
-
name: "alepha.bucket",
|
|
281
|
-
descriptors: [$bucket],
|
|
282
|
-
services: [
|
|
283
|
-
FileStorageProvider,
|
|
284
|
-
MemoryFileStorageProvider,
|
|
285
|
-
LocalFileStorageProvider
|
|
286
|
-
],
|
|
287
|
-
register: (alepha$1) => alepha$1.with({
|
|
288
|
-
optional: true,
|
|
289
|
-
provide: FileStorageProvider,
|
|
290
|
-
use: alepha$1.isTest() ? MemoryFileStorageProvider : LocalFileStorageProvider
|
|
291
|
-
})
|
|
292
|
-
});
|
|
293
|
-
|
|
294
|
-
//#endregion
|
|
295
|
-
exports.$bucket = $bucket;
|
|
296
|
-
exports.AlephaBucket = AlephaBucket;
|
|
297
|
-
exports.BucketDescriptor = BucketDescriptor;
|
|
298
|
-
exports.FileNotFoundError = FileNotFoundError;
|
|
299
|
-
exports.FileStorageProvider = FileStorageProvider;
|
|
300
|
-
exports.LocalFileStorageProvider = LocalFileStorageProvider;
|
|
301
|
-
exports.MemoryFileStorageProvider = MemoryFileStorageProvider;
|
|
302
|
-
exports.localFileStorageOptions = localFileStorageOptions;
|
|
303
|
-
//# sourceMappingURL=index.cjs.map
|