@od-oneapp/storage 2026.1.1301
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +854 -0
- package/dist/client-next.d.mts +61 -0
- package/dist/client-next.d.mts.map +1 -0
- package/dist/client-next.mjs +111 -0
- package/dist/client-next.mjs.map +1 -0
- package/dist/client-utils-Dx6W25iz.d.mts +43 -0
- package/dist/client-utils-Dx6W25iz.d.mts.map +1 -0
- package/dist/client.d.mts +28 -0
- package/dist/client.d.mts.map +1 -0
- package/dist/client.mjs +183 -0
- package/dist/client.mjs.map +1 -0
- package/dist/env-BVHLmQdh.mjs +128 -0
- package/dist/env-BVHLmQdh.mjs.map +1 -0
- package/dist/env.mjs +3 -0
- package/dist/health-check-D7LnnDec.mjs +746 -0
- package/dist/health-check-D7LnnDec.mjs.map +1 -0
- package/dist/health-check-im_huJ59.d.mts +116 -0
- package/dist/health-check-im_huJ59.d.mts.map +1 -0
- package/dist/index.d.mts +60 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +3 -0
- package/dist/keys.d.mts +37 -0
- package/dist/keys.d.mts.map +1 -0
- package/dist/keys.mjs +253 -0
- package/dist/keys.mjs.map +1 -0
- package/dist/server-edge.d.mts +28 -0
- package/dist/server-edge.d.mts.map +1 -0
- package/dist/server-edge.mjs +88 -0
- package/dist/server-edge.mjs.map +1 -0
- package/dist/server-next.d.mts +183 -0
- package/dist/server-next.d.mts.map +1 -0
- package/dist/server-next.mjs +1353 -0
- package/dist/server-next.mjs.map +1 -0
- package/dist/server.d.mts +70 -0
- package/dist/server.d.mts.map +1 -0
- package/dist/server.mjs +384 -0
- package/dist/server.mjs.map +1 -0
- package/dist/types.d.mts +321 -0
- package/dist/types.d.mts.map +1 -0
- package/dist/types.mjs +3 -0
- package/dist/validation.d.mts +101 -0
- package/dist/validation.d.mts.map +1 -0
- package/dist/validation.mjs +590 -0
- package/dist/validation.mjs.map +1 -0
- package/dist/vercel-blob-07Sx0Akn.d.mts +31 -0
- package/dist/vercel-blob-07Sx0Akn.d.mts.map +1 -0
- package/dist/vercel-blob-DA8HaYuw.mjs +158 -0
- package/dist/vercel-blob-DA8HaYuw.mjs.map +1 -0
- package/package.json +111 -0
- package/src/actions/blob-upload.ts +171 -0
- package/src/actions/index.ts +23 -0
- package/src/actions/mediaActions.ts +1071 -0
- package/src/actions/productMediaActions.ts +538 -0
- package/src/auth-helpers.ts +386 -0
- package/src/capabilities.ts +225 -0
- package/src/client-next.ts +184 -0
- package/src/client-utils.ts +292 -0
- package/src/client.ts +102 -0
- package/src/constants.ts +88 -0
- package/src/health-check.ts +81 -0
- package/src/multi-storage.ts +230 -0
- package/src/multipart.ts +497 -0
- package/src/retry-utils.test.ts +118 -0
- package/src/retry-utils.ts +59 -0
- package/src/server-edge.ts +129 -0
- package/src/server-next.ts +14 -0
- package/src/server.ts +666 -0
- package/src/validation.test.ts +312 -0
- package/src/validation.ts +827 -0
|
@@ -0,0 +1,746 @@
|
|
|
1
|
+
import { t as VercelBlobProvider } from "./vercel-blob-DA8HaYuw.mjs";
|
|
2
|
+
import { createRequire } from "node:module";
|
|
3
|
+
import { logWarn } from "@od-oneapp/shared/logs";
|
|
4
|
+
|
|
5
|
+
//#region providers/cloudflare-images.ts
|
|
6
|
+
/**
|
|
7
|
+
* @fileoverview Cloudflare Images storage provider
|
|
8
|
+
*
|
|
9
|
+
* Lazy shim around the monorepo integration package.
|
|
10
|
+
*
|
|
11
|
+
* Why:
|
|
12
|
+
* - `@integrations/*` packages are monorepo-private and may not be installed in extracted apps.
|
|
13
|
+
* - This avoids hard module resolution failures during bundling/build; the integration is only
|
|
14
|
+
* required if the provider is instantiated.
|
|
15
|
+
*/
|
|
16
|
+
const require$1 = createRequire(import.meta.url);
|
|
17
|
+
function loadProvider$1() {
|
|
18
|
+
return require$1("@integrations/cloudflare/storage-provider/images").CloudflareImagesProvider;
|
|
19
|
+
}
|
|
20
|
+
const CloudflareImagesProvider = class CloudflareImagesProviderShim {
|
|
21
|
+
constructor(...args) {
|
|
22
|
+
return new (loadProvider$1())(...args);
|
|
23
|
+
}
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
//#endregion
|
|
27
|
+
//#region providers/cloudflare-r2.ts
|
|
28
|
+
/**
|
|
29
|
+
* @fileoverview Cloudflare R2 storage provider
|
|
30
|
+
*
|
|
31
|
+
* Lazy shim around the monorepo integration package.
|
|
32
|
+
*
|
|
33
|
+
* Why:
|
|
34
|
+
* - `@integrations/*` packages are monorepo-private and may not be installed in extracted apps.
|
|
35
|
+
* - This avoids hard module resolution failures during bundling/build; the integration is only
|
|
36
|
+
* required if the provider is instantiated.
|
|
37
|
+
*/
|
|
38
|
+
const require = createRequire(import.meta.url);
|
|
39
|
+
function loadProvider() {
|
|
40
|
+
return require("@integrations/cloudflare/storage-provider/r2").CloudflareR2Provider;
|
|
41
|
+
}
|
|
42
|
+
const CloudflareR2Provider = class CloudflareR2ProviderShim {
|
|
43
|
+
constructor(...args) {
|
|
44
|
+
return new (loadProvider())(...args);
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
//#endregion
|
|
49
|
+
//#region src/constants.ts
|
|
50
|
+
/**
|
|
51
|
+
* @fileoverview Storage Package Constants
|
|
52
|
+
*
|
|
53
|
+
* Centralized constants for storage operations to avoid magic numbers
|
|
54
|
+
* and improve maintainability.
|
|
55
|
+
*
|
|
56
|
+
* Includes:
|
|
57
|
+
* - URL expiration times
|
|
58
|
+
* - File size limits
|
|
59
|
+
* - Multipart upload thresholds
|
|
60
|
+
* - Retry configuration
|
|
61
|
+
* - Rate limiting settings
|
|
62
|
+
*
|
|
63
|
+
* @module @od-oneapp/storage/constants
|
|
64
|
+
*/
|
|
65
|
+
const STORAGE_CONSTANTS = {
|
|
66
|
+
DEFAULT_URL_EXPIRY_SECONDS: 3600,
|
|
67
|
+
PRODUCT_URL_EXPIRY_SECONDS: 3600,
|
|
68
|
+
UPLOAD_URL_EXPIRY_SECONDS: 1800,
|
|
69
|
+
ADMIN_URL_EXPIRY_SECONDS: 7200,
|
|
70
|
+
MULTIPART_THRESHOLD_BYTES: 100 * 1024 * 1024,
|
|
71
|
+
DEFAULT_PART_SIZE_BYTES: 5 * 1024 * 1024,
|
|
72
|
+
DEFAULT_MAX_PART_SIZE_BYTES: 25 * 1024 * 1024,
|
|
73
|
+
DEFAULT_QUEUE_SIZE: 4,
|
|
74
|
+
DEFAULT_BATCH_SIZE: 5,
|
|
75
|
+
MAX_BATCH_SIZE: 50,
|
|
76
|
+
DEFAULT_REQUEST_TIMEOUT_MS: 3e4,
|
|
77
|
+
DEFAULT_UPLOAD_TIMEOUT_MS: 3e5,
|
|
78
|
+
DEFAULT_DOWNLOAD_TIMEOUT_MS: 6e4,
|
|
79
|
+
DEFAULT_MAX_RETRIES: 3,
|
|
80
|
+
RETRY_BASE_DELAY_MS: 1e3,
|
|
81
|
+
MAX_KEY_LENGTH: 1024,
|
|
82
|
+
MAX_FILENAME_LENGTH: 255,
|
|
83
|
+
DEFAULT_RATE_LIMIT_REQUESTS: 100,
|
|
84
|
+
DEFAULT_RATE_LIMIT_WINDOW_MS: 60 * 1e3,
|
|
85
|
+
HEALTH_CHECK_KEY: "__health_check__"
|
|
86
|
+
};
|
|
87
|
+
/**
|
|
88
|
+
* File size thresholds for multipart upload decisions
|
|
89
|
+
*/
|
|
90
|
+
const MULTIPART_THRESHOLDS = {
|
|
91
|
+
SMALL_FILE: 100 * 1024 * 1024,
|
|
92
|
+
MEDIUM_FILE: 1024 * 1024 * 1024,
|
|
93
|
+
LARGE_FILE: Infinity
|
|
94
|
+
};
|
|
95
|
+
/**
|
|
96
|
+
* Part sizes based on file size
|
|
97
|
+
*/
|
|
98
|
+
const PART_SIZES = {
|
|
99
|
+
SMALL: 5 * 1024 * 1024,
|
|
100
|
+
MEDIUM: 10 * 1024 * 1024,
|
|
101
|
+
LARGE: 25 * 1024 * 1024
|
|
102
|
+
};
|
|
103
|
+
/**
|
|
104
|
+
* Default storage capabilities for providers that don't implement getCapabilities()
|
|
105
|
+
* Single source of truth for capability defaults
|
|
106
|
+
*/
|
|
107
|
+
const DEFAULT_STORAGE_CAPABILITIES = {
|
|
108
|
+
multipart: false,
|
|
109
|
+
presignedUrls: false,
|
|
110
|
+
progressTracking: false,
|
|
111
|
+
abortSignal: false,
|
|
112
|
+
metadata: false,
|
|
113
|
+
customDomains: false,
|
|
114
|
+
edgeCompatible: false,
|
|
115
|
+
versioning: false,
|
|
116
|
+
encryption: false,
|
|
117
|
+
directoryListing: false
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
//#endregion
|
|
121
|
+
//#region src/multi-storage.ts
|
|
122
|
+
/**
|
|
123
|
+
* @fileoverview Multi-storage provider manager
|
|
124
|
+
*
|
|
125
|
+
* Manages multiple storage providers with routing and fallback capabilities.
|
|
126
|
+
* Allows using different providers for different use cases or as backups.
|
|
127
|
+
*
|
|
128
|
+
* Features:
|
|
129
|
+
* - Provider routing based on key patterns
|
|
130
|
+
* - Fallback to default provider
|
|
131
|
+
* - Unified API across multiple providers
|
|
132
|
+
*
|
|
133
|
+
* @module @od-oneapp/storage/multi-storage
|
|
134
|
+
*/
|
|
135
|
+
var MultiStorageManager = class {
|
|
136
|
+
providers = /* @__PURE__ */ new Map();
|
|
137
|
+
defaultProvider;
|
|
138
|
+
routing;
|
|
139
|
+
constructor(config) {
|
|
140
|
+
for (const [name, providerConfig] of Object.entries(config.providers)) this.providers.set(name, this.createProvider(providerConfig));
|
|
141
|
+
const firstProvider = Object.keys(config.providers)[0];
|
|
142
|
+
this.defaultProvider = config.defaultProvider ?? firstProvider ?? "";
|
|
143
|
+
if (!this.defaultProvider) throw new Error("No storage providers configured");
|
|
144
|
+
this.routing = config.routing;
|
|
145
|
+
}
|
|
146
|
+
createProvider(config) {
|
|
147
|
+
switch (config.provider) {
|
|
148
|
+
case "multi": throw new Error("Multi provider cannot be nested");
|
|
149
|
+
case "cloudflare-r2":
|
|
150
|
+
if (!config.cloudflareR2) throw new Error("Cloudflare R2 configuration is required");
|
|
151
|
+
if (Array.isArray(config.cloudflareR2)) {
|
|
152
|
+
if (config.cloudflareR2.length === 0) throw new Error("No R2 configurations provided");
|
|
153
|
+
const firstR2Config = config.cloudflareR2[0];
|
|
154
|
+
if (!firstR2Config) throw new Error("First R2 configuration is undefined");
|
|
155
|
+
return new CloudflareR2Provider(firstR2Config);
|
|
156
|
+
}
|
|
157
|
+
return new CloudflareR2Provider(config.cloudflareR2);
|
|
158
|
+
case "cloudflare-images":
|
|
159
|
+
if (!config.cloudflareImages) throw new Error("Cloudflare Images configuration is required");
|
|
160
|
+
return new CloudflareImagesProvider(config.cloudflareImages);
|
|
161
|
+
case "vercel-blob":
|
|
162
|
+
if (!config.vercelBlob?.token) throw new Error("Vercel Blob token is required");
|
|
163
|
+
return new VercelBlobProvider(config.vercelBlob.token);
|
|
164
|
+
default: throw new Error(`Unknown storage provider: ${config.provider}`);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
getProviderForKey(key) {
|
|
168
|
+
if (this.routing) {
|
|
169
|
+
const extension = key.split(".").pop()?.toLowerCase();
|
|
170
|
+
if (this.routing.images && this.isImageFile(extension)) {
|
|
171
|
+
const provider = this.providers.get(this.routing.images);
|
|
172
|
+
if (provider) return {
|
|
173
|
+
provider,
|
|
174
|
+
providerName: this.routing.images
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
if (this.routing.documents && this.isDocumentFile(extension)) {
|
|
178
|
+
const provider = this.providers.get(this.routing.documents);
|
|
179
|
+
if (provider) return {
|
|
180
|
+
provider,
|
|
181
|
+
providerName: this.routing.documents
|
|
182
|
+
};
|
|
183
|
+
}
|
|
184
|
+
for (const [pattern, providerName] of Object.entries(this.routing)) if (pattern !== "images" && pattern !== "documents" && providerName) {
|
|
185
|
+
if (key.includes(pattern)) {
|
|
186
|
+
const provider = this.providers.get(providerName);
|
|
187
|
+
if (provider) return {
|
|
188
|
+
provider,
|
|
189
|
+
providerName
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
const provider = this.providers.get(this.defaultProvider);
|
|
195
|
+
if (!provider) throw new Error(`Default provider '${this.defaultProvider}' not found`);
|
|
196
|
+
return {
|
|
197
|
+
provider,
|
|
198
|
+
providerName: this.defaultProvider
|
|
199
|
+
};
|
|
200
|
+
}
|
|
201
|
+
isImageFile(extension) {
|
|
202
|
+
return extension ? [
|
|
203
|
+
"jpg",
|
|
204
|
+
"jpeg",
|
|
205
|
+
"png",
|
|
206
|
+
"gif",
|
|
207
|
+
"webp",
|
|
208
|
+
"avif",
|
|
209
|
+
"svg",
|
|
210
|
+
"ico"
|
|
211
|
+
].includes(extension) : false;
|
|
212
|
+
}
|
|
213
|
+
isDocumentFile(extension) {
|
|
214
|
+
return extension ? [
|
|
215
|
+
"pdf",
|
|
216
|
+
"doc",
|
|
217
|
+
"docx",
|
|
218
|
+
"xls",
|
|
219
|
+
"xlsx",
|
|
220
|
+
"ppt",
|
|
221
|
+
"pptx",
|
|
222
|
+
"txt",
|
|
223
|
+
"csv"
|
|
224
|
+
].includes(extension) : false;
|
|
225
|
+
}
|
|
226
|
+
getProvider(name) {
|
|
227
|
+
return this.providers.get(name);
|
|
228
|
+
}
|
|
229
|
+
async delete(key) {
|
|
230
|
+
const { provider } = this.getProviderForKey(key);
|
|
231
|
+
return provider.delete(key);
|
|
232
|
+
}
|
|
233
|
+
async download(key) {
|
|
234
|
+
const { provider } = this.getProviderForKey(key);
|
|
235
|
+
return provider.download(key);
|
|
236
|
+
}
|
|
237
|
+
async exists(key) {
|
|
238
|
+
const { provider } = this.getProviderForKey(key);
|
|
239
|
+
return provider.exists(key);
|
|
240
|
+
}
|
|
241
|
+
async getMetadata(key) {
|
|
242
|
+
const { provider } = this.getProviderForKey(key);
|
|
243
|
+
return provider.getMetadata(key);
|
|
244
|
+
}
|
|
245
|
+
async getUrl(key, options) {
|
|
246
|
+
const { provider } = this.getProviderForKey(key);
|
|
247
|
+
return provider.getUrl(key, options);
|
|
248
|
+
}
|
|
249
|
+
async list(options) {
|
|
250
|
+
if (options?.provider) {
|
|
251
|
+
const provider = this.providers.get(options.provider);
|
|
252
|
+
if (!provider) throw new Error(`Provider '${options.provider}' not found`);
|
|
253
|
+
return provider.list(options);
|
|
254
|
+
}
|
|
255
|
+
const allResults = [];
|
|
256
|
+
for (const provider of this.providers.values()) {
|
|
257
|
+
const results = await provider.list(options);
|
|
258
|
+
allResults.push(...results);
|
|
259
|
+
}
|
|
260
|
+
return allResults;
|
|
261
|
+
}
|
|
262
|
+
async upload(key, data, options) {
|
|
263
|
+
let provider;
|
|
264
|
+
if (options?.provider) {
|
|
265
|
+
const requestedProvider = this.providers.get(options.provider);
|
|
266
|
+
if (!requestedProvider) throw new Error(`Provider '${options.provider}' not found`);
|
|
267
|
+
provider = requestedProvider;
|
|
268
|
+
} else {
|
|
269
|
+
const { provider: routedProvider } = this.getProviderForKey(key);
|
|
270
|
+
provider = routedProvider;
|
|
271
|
+
}
|
|
272
|
+
return provider.upload(key, data, options);
|
|
273
|
+
}
|
|
274
|
+
async getCloudflareImagesProvider() {
|
|
275
|
+
for (const provider of this.providers.values()) if (provider instanceof CloudflareImagesProvider) return provider;
|
|
276
|
+
}
|
|
277
|
+
getProviderNames() {
|
|
278
|
+
return Array.from(this.providers.keys());
|
|
279
|
+
}
|
|
280
|
+
};
|
|
281
|
+
|
|
282
|
+
//#endregion
|
|
283
|
+
//#region src/multipart.ts
|
|
284
|
+
/**
|
|
285
|
+
* @fileoverview Unified Multipart Upload Manager
|
|
286
|
+
*
|
|
287
|
+
* Provides a consistent interface for multipart uploads across all storage providers.
|
|
288
|
+
* Handles provider-specific differences and provides retry logic, progress tracking,
|
|
289
|
+
* and error recovery.
|
|
290
|
+
*
|
|
291
|
+
* Features:
|
|
292
|
+
* - Automatic part size calculation
|
|
293
|
+
* - Concurrent part uploads
|
|
294
|
+
* - Progress tracking
|
|
295
|
+
* - Resume support
|
|
296
|
+
* - Error recovery
|
|
297
|
+
*
|
|
298
|
+
* @module @od-oneapp/storage/multipart
|
|
299
|
+
*/
|
|
300
|
+
var MultipartUploadManager = class {
|
|
301
|
+
provider;
|
|
302
|
+
state = null;
|
|
303
|
+
abortController = null;
|
|
304
|
+
constructor(provider) {
|
|
305
|
+
this.provider = provider;
|
|
306
|
+
}
|
|
307
|
+
/**
|
|
308
|
+
* Check if provider supports multipart uploads
|
|
309
|
+
*/
|
|
310
|
+
supportsMultipart() {
|
|
311
|
+
return (this.provider.getCapabilities?.())?.multipart ?? false;
|
|
312
|
+
}
|
|
313
|
+
/**
|
|
314
|
+
* Create a new multipart upload
|
|
315
|
+
*
|
|
316
|
+
* @param key - Storage key
|
|
317
|
+
* @param totalSize - Total file size in bytes
|
|
318
|
+
* @param options - Upload options
|
|
319
|
+
* @returns Upload state
|
|
320
|
+
*/
|
|
321
|
+
async createUpload(key, totalSize, options = {}) {
|
|
322
|
+
if (!this.supportsMultipart()) throw new Error("Provider does not support multipart uploads");
|
|
323
|
+
if (this.state && !this.state.completed && !this.state.aborted) throw new Error("Upload already in progress. Abort current upload first.");
|
|
324
|
+
const partSize = this.calculatePartSize(totalSize, options.partSize);
|
|
325
|
+
const totalParts = Math.ceil(totalSize / partSize);
|
|
326
|
+
const uploadOptions = { onProgress: options.onProgress ? (progress) => {
|
|
327
|
+
options.onProgress?.({
|
|
328
|
+
key: progress.key,
|
|
329
|
+
loaded: progress.loaded,
|
|
330
|
+
total: progress.total,
|
|
331
|
+
part: progress.part ?? 0,
|
|
332
|
+
percentage: progress.percentage ?? 0
|
|
333
|
+
});
|
|
334
|
+
} : void 0 };
|
|
335
|
+
const result = await this.provider.createMultipartUpload?.(key, uploadOptions);
|
|
336
|
+
if (!result) throw new Error("Failed to create multipart upload");
|
|
337
|
+
this.state = {
|
|
338
|
+
uploadId: result.uploadId,
|
|
339
|
+
key: result.key,
|
|
340
|
+
provider: this.getProviderName(),
|
|
341
|
+
parts: Array.from({ length: totalParts }, (_, i) => ({
|
|
342
|
+
partNumber: i + 1,
|
|
343
|
+
size: i === totalParts - 1 ? totalSize - i * partSize : partSize,
|
|
344
|
+
uploaded: false
|
|
345
|
+
})),
|
|
346
|
+
totalSize,
|
|
347
|
+
uploadedSize: 0,
|
|
348
|
+
completed: false,
|
|
349
|
+
aborted: false
|
|
350
|
+
};
|
|
351
|
+
this.abortController = new AbortController();
|
|
352
|
+
return this.state;
|
|
353
|
+
}
|
|
354
|
+
/**
|
|
355
|
+
* Upload a part
|
|
356
|
+
*
|
|
357
|
+
* @param partNumber - Part number (1-based)
|
|
358
|
+
* @param data - Part data
|
|
359
|
+
* @param options - Upload options
|
|
360
|
+
* @returns Upload result
|
|
361
|
+
*/
|
|
362
|
+
async uploadPart(partNumber, data, options = {}) {
|
|
363
|
+
if (!this.state) throw new Error("No active upload. Call createUpload first.");
|
|
364
|
+
if (this.state.completed || this.state.aborted) throw new Error("Upload is completed or aborted");
|
|
365
|
+
const part = this.state.parts.find((p) => p.partNumber === partNumber);
|
|
366
|
+
if (!part) throw new Error(`Part ${partNumber} not found`);
|
|
367
|
+
if (part.uploaded) throw new Error(`Part ${partNumber} already uploaded`);
|
|
368
|
+
if (this.abortController?.signal.aborted) throw new Error("Upload aborted");
|
|
369
|
+
try {
|
|
370
|
+
const result = await this.uploadPartWithRetry(partNumber, data, options);
|
|
371
|
+
part.etag = result.etag;
|
|
372
|
+
part.uploaded = true;
|
|
373
|
+
this.state.uploadedSize += part.size;
|
|
374
|
+
if (options.onProgress) options.onProgress({
|
|
375
|
+
key: this.state.key,
|
|
376
|
+
loaded: this.state.uploadedSize,
|
|
377
|
+
total: this.state.totalSize,
|
|
378
|
+
part: partNumber,
|
|
379
|
+
percentage: this.state.uploadedSize / this.state.totalSize * 100
|
|
380
|
+
});
|
|
381
|
+
return result;
|
|
382
|
+
} catch (error) {
|
|
383
|
+
this.state.error = error instanceof Error ? error.message : String(error);
|
|
384
|
+
throw error;
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
/**
|
|
388
|
+
* Complete the multipart upload
|
|
389
|
+
*
|
|
390
|
+
* @returns Final upload result
|
|
391
|
+
*/
|
|
392
|
+
async completeUpload() {
|
|
393
|
+
if (!this.state) throw new Error("No active upload. Call createUpload first.");
|
|
394
|
+
if (this.state.completed) throw new Error("Upload already completed");
|
|
395
|
+
if (this.state.aborted) throw new Error("Upload was aborted");
|
|
396
|
+
const unuploadedParts = this.state.parts.filter((p) => !p.uploaded);
|
|
397
|
+
if (unuploadedParts.length > 0) throw new Error(`Upload incomplete: ${unuploadedParts.length} parts not uploaded`);
|
|
398
|
+
try {
|
|
399
|
+
const parts = this.state.parts.filter((p) => p.etag).map((p) => ({
|
|
400
|
+
partNumber: p.partNumber,
|
|
401
|
+
etag: p.etag ?? ""
|
|
402
|
+
})).filter((p) => p.etag !== "");
|
|
403
|
+
const result = await this.provider.completeMultipartUpload?.(this.state.uploadId, parts);
|
|
404
|
+
if (!result) throw new Error("Failed to complete multipart upload");
|
|
405
|
+
this.state.completed = true;
|
|
406
|
+
return {
|
|
407
|
+
...result,
|
|
408
|
+
key: result.key ?? this.state.key,
|
|
409
|
+
uploadId: this.state.uploadId,
|
|
410
|
+
parts: this.state.parts.filter((p) => p.etag).map((p) => ({
|
|
411
|
+
partNumber: p.partNumber,
|
|
412
|
+
etag: p.etag ?? "",
|
|
413
|
+
size: p.size
|
|
414
|
+
})),
|
|
415
|
+
totalParts: this.state.parts.length
|
|
416
|
+
};
|
|
417
|
+
} catch (error) {
|
|
418
|
+
this.state.error = error instanceof Error ? error.message : String(error);
|
|
419
|
+
throw error;
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
/**
|
|
423
|
+
* Abort the multipart upload
|
|
424
|
+
*/
|
|
425
|
+
async abortUpload() {
|
|
426
|
+
if (!this.state) return;
|
|
427
|
+
if (this.state.completed) return;
|
|
428
|
+
this.abortController?.abort();
|
|
429
|
+
this.state.aborted = true;
|
|
430
|
+
try {
|
|
431
|
+
await this.provider.abortMultipartUpload?.(this.state.uploadId);
|
|
432
|
+
} catch (error) {
|
|
433
|
+
logWarn("Failed to abort upload on provider", {
|
|
434
|
+
error: error instanceof Error ? error.message : String(error),
|
|
435
|
+
uploadId: this.state.uploadId,
|
|
436
|
+
key: this.state.key
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
this.state = null;
|
|
440
|
+
this.abortController = null;
|
|
441
|
+
}
|
|
442
|
+
/**
|
|
443
|
+
* Get current upload state
|
|
444
|
+
*/
|
|
445
|
+
getState() {
|
|
446
|
+
return this.state;
|
|
447
|
+
}
|
|
448
|
+
/**
|
|
449
|
+
* Resume upload from state (for recovery)
|
|
450
|
+
*
|
|
451
|
+
* @param state - Previous upload state
|
|
452
|
+
*/
|
|
453
|
+
async resumeUpload(state) {
|
|
454
|
+
if (this.state && !this.state.completed && !this.state.aborted) throw new Error("Upload already in progress");
|
|
455
|
+
this.state = { ...state };
|
|
456
|
+
this.abortController = new AbortController();
|
|
457
|
+
}
|
|
458
|
+
/**
|
|
459
|
+
* Upload file in chunks automatically
|
|
460
|
+
*
|
|
461
|
+
* @param key - Storage key
|
|
462
|
+
* @param data - File data
|
|
463
|
+
* @param options - Upload options
|
|
464
|
+
* @returns Upload result
|
|
465
|
+
*/
|
|
466
|
+
async uploadFile(key, data, options = {}) {
|
|
467
|
+
const totalSize = data instanceof ArrayBuffer ? data.byteLength : data instanceof Buffer ? data.length : data.size;
|
|
468
|
+
await this.createUpload(key, totalSize, options);
|
|
469
|
+
const partSize = this.calculatePartSize(totalSize, options.partSize);
|
|
470
|
+
const totalParts = Math.ceil(totalSize / partSize);
|
|
471
|
+
for (let i = 0; i < totalParts; i++) {
|
|
472
|
+
const start = i * partSize;
|
|
473
|
+
const end = Math.min(start + partSize, totalSize);
|
|
474
|
+
const partData = data.slice(start, end);
|
|
475
|
+
await this.uploadPart(i + 1, partData, { onProgress: options.onProgress ? (progress) => {
|
|
476
|
+
options.onProgress?.({
|
|
477
|
+
key: progress.key,
|
|
478
|
+
loaded: progress.loaded,
|
|
479
|
+
total: progress.total,
|
|
480
|
+
part: progress.part ?? 0,
|
|
481
|
+
percentage: progress.percentage ?? 0
|
|
482
|
+
});
|
|
483
|
+
} : void 0 });
|
|
484
|
+
}
|
|
485
|
+
return await this.completeUpload();
|
|
486
|
+
}
|
|
487
|
+
async uploadPartWithRetry(partNumber, data, options, maxRetries = 3) {
|
|
488
|
+
let lastError = null;
|
|
489
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) try {
|
|
490
|
+
if (!this.state?.uploadId) throw new Error("Upload ID not initialized");
|
|
491
|
+
const result = await this.provider.uploadPart?.(this.state.uploadId, partNumber, data, {
|
|
492
|
+
...options,
|
|
493
|
+
abortSignal: this.abortController?.signal
|
|
494
|
+
});
|
|
495
|
+
if (!result) throw new Error("Failed to upload part");
|
|
496
|
+
return {
|
|
497
|
+
etag: result.etag ?? "",
|
|
498
|
+
partNumber: result.partNumber ?? partNumber,
|
|
499
|
+
size: data instanceof ArrayBuffer ? data.byteLength : data instanceof Buffer ? data.length : data.size
|
|
500
|
+
};
|
|
501
|
+
} catch (error) {
|
|
502
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
503
|
+
if (this.abortController?.signal.aborted) throw lastError;
|
|
504
|
+
if (lastError.message.includes("validation") || lastError.message.includes("invalid")) throw lastError;
|
|
505
|
+
if (attempt < maxRetries) await new Promise((resolve) => setTimeout(resolve, Math.pow(2, attempt) * STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS));
|
|
506
|
+
}
|
|
507
|
+
throw lastError ?? /* @__PURE__ */ new Error("Upload failed after retries");
|
|
508
|
+
}
|
|
509
|
+
calculatePartSize(totalSize, userPartSize) {
|
|
510
|
+
if (userPartSize) return Math.min(userPartSize, totalSize);
|
|
511
|
+
if (totalSize < MULTIPART_THRESHOLDS.SMALL_FILE) return PART_SIZES.SMALL;
|
|
512
|
+
else if (totalSize < MULTIPART_THRESHOLDS.MEDIUM_FILE) return PART_SIZES.MEDIUM;
|
|
513
|
+
else {
|
|
514
|
+
const minPartSize = Math.ceil(totalSize / 9999);
|
|
515
|
+
return Math.max(PART_SIZES.LARGE, minPartSize);
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
getProviderName() {
|
|
519
|
+
return this.provider.constructor.name;
|
|
520
|
+
}
|
|
521
|
+
};
|
|
522
|
+
/**
|
|
523
|
+
* Create a multipart upload manager for a provider
|
|
524
|
+
*
|
|
525
|
+
* @param provider - Storage provider
|
|
526
|
+
* @returns Multipart upload manager
|
|
527
|
+
*/
|
|
528
|
+
function createMultipartUploadManager(provider) {
|
|
529
|
+
if (!hasMultipartSupport(provider)) throw new Error("The provided storage provider does not support multipart uploads");
|
|
530
|
+
return new MultipartUploadManager(provider);
|
|
531
|
+
}
|
|
532
|
+
/**
|
|
533
|
+
* Determines whether the given storage provider supports multipart uploads.
|
|
534
|
+
*
|
|
535
|
+
* @param provider - The storage provider to check
|
|
536
|
+
* @returns `true` if the provider supports multipart uploads, `false` otherwise.
|
|
537
|
+
*/
|
|
538
|
+
function hasMultipartSupport(provider) {
|
|
539
|
+
return (provider.getCapabilities?.())?.multipart ?? false;
|
|
540
|
+
}
|
|
541
|
+
/**
|
|
542
|
+
* Selects an appropriate multipart upload part size for a file.
|
|
543
|
+
*
|
|
544
|
+
* If `userPartSize` is provided, it will be capped at the file size; otherwise the part size
|
|
545
|
+
* is chosen from configured thresholds for small, medium, or large files.
|
|
546
|
+
*
|
|
547
|
+
* @param fileSize - File size in bytes
|
|
548
|
+
* @param userPartSize - Optional user-specified part size in bytes; capped to `fileSize` when provided
|
|
549
|
+
* @returns The selected part size in bytes
|
|
550
|
+
*/
|
|
551
|
+
function getOptimalPartSize(fileSize, userPartSize) {
|
|
552
|
+
if (userPartSize) return Math.min(userPartSize, fileSize);
|
|
553
|
+
if (fileSize < MULTIPART_THRESHOLDS.SMALL_FILE) return PART_SIZES.SMALL;
|
|
554
|
+
else if (fileSize < MULTIPART_THRESHOLDS.MEDIUM_FILE) return PART_SIZES.MEDIUM;
|
|
555
|
+
else {
|
|
556
|
+
const minPartSize = Math.ceil(fileSize / 9999);
|
|
557
|
+
return Math.max(PART_SIZES.LARGE, minPartSize);
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
//#endregion
|
|
562
|
+
//#region src/capabilities.ts
|
|
563
|
+
/**
|
|
564
|
+
* @fileoverview Storage Provider Capabilities Utilities
|
|
565
|
+
*
|
|
566
|
+
* Provides utilities for checking storage provider capabilities and feature support.
|
|
567
|
+
* Helps determine which features are available for each provider.
|
|
568
|
+
*
|
|
569
|
+
* @module @od-oneapp/storage/capabilities
|
|
570
|
+
*/
|
|
571
|
+
/**
|
|
572
|
+
* Check if a storage provider has a specific capability
|
|
573
|
+
* @param provider - The storage provider to check
|
|
574
|
+
* @param capability - The capability to check for
|
|
575
|
+
* @returns True if the provider supports the capability
|
|
576
|
+
*/
|
|
577
|
+
function hasCapability(provider, capability) {
|
|
578
|
+
return (provider.getCapabilities?.())?.[capability] ?? false;
|
|
579
|
+
}
|
|
580
|
+
/**
|
|
581
|
+
* Check if a storage provider supports multiple capabilities
|
|
582
|
+
* @param provider - The storage provider to check
|
|
583
|
+
* @param capabilities - Array of capabilities to check for
|
|
584
|
+
* @returns True if the provider supports all capabilities
|
|
585
|
+
*/
|
|
586
|
+
function hasAllCapabilities(provider, capabilities) {
|
|
587
|
+
return capabilities.every((capability) => hasCapability(provider, capability));
|
|
588
|
+
}
|
|
589
|
+
/**
|
|
590
|
+
* Check if a storage provider supports any of the specified capabilities
|
|
591
|
+
* @param provider - The storage provider to check
|
|
592
|
+
* @param capabilities - Array of capabilities to check for
|
|
593
|
+
* @returns True if the provider supports at least one capability
|
|
594
|
+
*/
|
|
595
|
+
function hasAnyCapability(provider, capabilities) {
|
|
596
|
+
return capabilities.some((capability) => hasCapability(provider, capability));
|
|
597
|
+
}
|
|
598
|
+
/**
|
|
599
|
+
* Get all capabilities supported by a storage provider
|
|
600
|
+
* @param provider - The storage provider to check
|
|
601
|
+
* @returns Object with all capabilities and their support status
|
|
602
|
+
*/
|
|
603
|
+
function getProviderCapabilities(provider) {
|
|
604
|
+
return provider.getCapabilities?.() ?? { ...DEFAULT_STORAGE_CAPABILITIES };
|
|
605
|
+
}
|
|
606
|
+
/**
|
|
607
|
+
* Get a human-readable description of provider capabilities
|
|
608
|
+
* @param provider - The storage provider to describe
|
|
609
|
+
* @returns String describing the provider's capabilities
|
|
610
|
+
*/
|
|
611
|
+
function describeProviderCapabilities(provider) {
|
|
612
|
+
const capabilities = getProviderCapabilities(provider);
|
|
613
|
+
const supportedFeatures = [];
|
|
614
|
+
const unsupportedFeatures = [];
|
|
615
|
+
if (capabilities.multipart) supportedFeatures.push("multipart uploads");
|
|
616
|
+
else unsupportedFeatures.push("multipart uploads");
|
|
617
|
+
if (capabilities.presignedUrls) supportedFeatures.push("presigned URLs");
|
|
618
|
+
else unsupportedFeatures.push("presigned URLs");
|
|
619
|
+
if (capabilities.progressTracking) supportedFeatures.push("progress tracking");
|
|
620
|
+
else unsupportedFeatures.push("progress tracking");
|
|
621
|
+
if (capabilities.abortSignal) supportedFeatures.push("abort signals");
|
|
622
|
+
else unsupportedFeatures.push("abort signals");
|
|
623
|
+
if (capabilities.metadata) supportedFeatures.push("metadata");
|
|
624
|
+
else unsupportedFeatures.push("metadata");
|
|
625
|
+
if (capabilities.customDomains) supportedFeatures.push("custom domains");
|
|
626
|
+
else unsupportedFeatures.push("custom domains");
|
|
627
|
+
if (capabilities.edgeCompatible) supportedFeatures.push("edge runtime");
|
|
628
|
+
else unsupportedFeatures.push("edge runtime");
|
|
629
|
+
let description = `Provider supports: ${supportedFeatures.join(", ")}`;
|
|
630
|
+
if (unsupportedFeatures.length > 0) description += `\nProvider does not support: ${unsupportedFeatures.join(", ")}`;
|
|
631
|
+
return description;
|
|
632
|
+
}
|
|
633
|
+
/**
|
|
634
|
+
* Validate that a provider supports the required capabilities for an operation
|
|
635
|
+
* @param provider - The storage provider to validate
|
|
636
|
+
* @param requiredCapabilities - Capabilities required for the operation
|
|
637
|
+
* @throws Error if provider doesn't support required capabilities
|
|
638
|
+
*/
|
|
639
|
+
function validateProviderCapabilities(provider, requiredCapabilities) {
|
|
640
|
+
const missingCapabilities = requiredCapabilities.filter((capability) => !hasCapability(provider, capability));
|
|
641
|
+
if (missingCapabilities.length > 0) {
|
|
642
|
+
const providerName = provider.constructor.name;
|
|
643
|
+
throw new Error(`Provider ${providerName} does not support required capabilities: ${missingCapabilities.join(", ")}`);
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
/**
|
|
647
|
+
* Get the best provider for a specific use case based on capabilities
|
|
648
|
+
* @param providers - Array of storage providers to choose from
|
|
649
|
+
* @param requiredCapabilities - Capabilities required for the use case
|
|
650
|
+
* @returns The best provider or null if none meet the requirements
|
|
651
|
+
*/
|
|
652
|
+
function getBestProvider(providers, requiredCapabilities) {
|
|
653
|
+
const suitableProviders = providers.filter((provider) => hasAllCapabilities(provider, requiredCapabilities));
|
|
654
|
+
if (suitableProviders.length === 0) return null;
|
|
655
|
+
const edgeCompatible = suitableProviders.filter((provider) => hasCapability(provider, "edgeCompatible"));
|
|
656
|
+
return edgeCompatible.length > 0 ? edgeCompatible[0] ?? null : suitableProviders[0] ?? null;
|
|
657
|
+
}
|
|
658
|
+
/**
|
|
659
|
+
* Builds a map from provider names to their reported storage capabilities.
|
|
660
|
+
*
|
|
661
|
+
* @param providers - Array of entries each containing a `name` (used as the map key) and a `provider` instance
|
|
662
|
+
* @returns A mapping from each provider name to its `StorageCapabilities`
|
|
663
|
+
*/
|
|
664
|
+
function getCapabilityMatrix(providers) {
|
|
665
|
+
const matrix = {};
|
|
666
|
+
for (const { name, provider } of providers) matrix[name] = getProviderCapabilities(provider);
|
|
667
|
+
return matrix;
|
|
668
|
+
}
|
|
669
|
+
/**
|
|
670
|
+
* Evaluate a storage provider's suitability for a specific file and produce actionable recommendations and warnings.
|
|
671
|
+
*
|
|
672
|
+
* @param provider - The storage provider to evaluate
|
|
673
|
+
* @param fileSize - File size in bytes
|
|
674
|
+
* @param fileType - MIME type of the file
|
|
675
|
+
* @returns An object with `suitable` (`true` if there are no warnings, `false` otherwise), `recommendations` (suggested actions to improve handling), and `warnings` (issues that reduce suitability)
|
|
676
|
+
*/
|
|
677
|
+
function checkProviderSuitability(provider, fileSize, fileType) {
|
|
678
|
+
const capabilities = getProviderCapabilities(provider);
|
|
679
|
+
const recommendations = [];
|
|
680
|
+
const warnings = [];
|
|
681
|
+
if (fileSize > MULTIPART_THRESHOLDS.SMALL_FILE) if (!capabilities.multipart) warnings.push("Large file detected but provider does not support multipart uploads");
|
|
682
|
+
else recommendations.push("Use multipart upload for this large file");
|
|
683
|
+
if (fileType.startsWith("image/")) {
|
|
684
|
+
if (capabilities.metadata) recommendations.push("Consider storing image metadata for better organization");
|
|
685
|
+
}
|
|
686
|
+
if (fileType.startsWith("video/")) {
|
|
687
|
+
if (!capabilities.multipart) warnings.push("Video files are typically large and benefit from multipart uploads");
|
|
688
|
+
}
|
|
689
|
+
if (fileType.startsWith("text/") && !capabilities.edgeCompatible) recommendations.push("Text files could be processed in edge runtime for better performance");
|
|
690
|
+
return {
|
|
691
|
+
suitable: warnings.length === 0,
|
|
692
|
+
recommendations,
|
|
693
|
+
warnings
|
|
694
|
+
};
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
//#endregion
|
|
698
|
+
//#region src/health-check.ts
|
|
699
|
+
/**
|
|
700
|
+
* Check the health of a storage provider
|
|
701
|
+
*
|
|
702
|
+
* @param provider - The storage provider to check
|
|
703
|
+
* @returns Health check result with status and latency
|
|
704
|
+
*/
|
|
705
|
+
async function checkProviderHealth(provider) {
|
|
706
|
+
const startTime = Date.now();
|
|
707
|
+
try {
|
|
708
|
+
await provider.list({ limit: 1 });
|
|
709
|
+
const latencyMs = Date.now() - startTime;
|
|
710
|
+
const HEALTHY_THRESHOLD_MS = 1e3;
|
|
711
|
+
const DEGRADED_THRESHOLD_MS = 3e3;
|
|
712
|
+
let status;
|
|
713
|
+
if (latencyMs < HEALTHY_THRESHOLD_MS) status = "healthy";
|
|
714
|
+
else if (latencyMs < DEGRADED_THRESHOLD_MS) status = "degraded";
|
|
715
|
+
else status = "unhealthy";
|
|
716
|
+
return {
|
|
717
|
+
status,
|
|
718
|
+
latencyMs,
|
|
719
|
+
details: {
|
|
720
|
+
provider: provider.constructor.name,
|
|
721
|
+
threshold_healthy_ms: HEALTHY_THRESHOLD_MS,
|
|
722
|
+
threshold_degraded_ms: DEGRADED_THRESHOLD_MS
|
|
723
|
+
}
|
|
724
|
+
};
|
|
725
|
+
} catch (error) {
|
|
726
|
+
return {
|
|
727
|
+
status: "unhealthy",
|
|
728
|
+
latencyMs: Date.now() - startTime,
|
|
729
|
+
error: error instanceof Error ? error.message : String(error),
|
|
730
|
+
details: { provider: provider.constructor.name }
|
|
731
|
+
};
|
|
732
|
+
}
|
|
733
|
+
}
|
|
734
|
+
/**
|
|
735
|
+
* Perform a comprehensive health check on storage system
|
|
736
|
+
*
|
|
737
|
+
* @param provider - The storage provider to check
|
|
738
|
+
* @returns Detailed health check result
|
|
739
|
+
*/
|
|
740
|
+
async function storageHealthCheck(provider) {
|
|
741
|
+
return checkProviderHealth(provider);
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
//#endregion
|
|
745
|
+
export { DEFAULT_STORAGE_CAPABILITIES as _, getBestProvider as a, CloudflareImagesProvider as b, hasAllCapabilities as c, validateProviderCapabilities as d, MultipartUploadManager as f, MultiStorageManager as g, hasMultipartSupport as h, describeProviderCapabilities as i, hasAnyCapability as l, getOptimalPartSize as m, storageHealthCheck as n, getCapabilityMatrix as o, createMultipartUploadManager as p, checkProviderSuitability as r, getProviderCapabilities as s, checkProviderHealth as t, hasCapability as u, STORAGE_CONSTANTS as v, CloudflareR2Provider as y };
|
|
746
|
+
//# sourceMappingURL=health-check-D7LnnDec.mjs.map
|