@chunkflowjs/upload-server 0.0.1-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,20 @@
1
+ # @chunkflowjs/upload-server
2
+
3
+ Server-side SDK for ChunkFlow Upload with storage adapters and upload service.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pnpm add @chunkflowjs/upload-server
9
+ ```
10
+
11
+ ## Features
12
+
13
+ - Storage adapters (Local, OSS, S3)
14
+ - Database adapters
15
+ - Upload service implementation
16
+ - Token management
17
+
18
+ ## License
19
+
20
+ MIT
@@ -0,0 +1,65 @@
1
+ export interface FileMetadata {
2
+ fileId: string;
3
+ filename: string;
4
+ size: number;
5
+ mimeType: string;
6
+ fileHash: string;
7
+ uploadToken: string;
8
+ chunkSize: number;
9
+ totalChunks: number;
10
+ uploadedChunks: number;
11
+ status: "pending" | "uploading" | "completed" | "failed";
12
+ createdAt: Date;
13
+ updatedAt: Date;
14
+ completedAt?: Date;
15
+ url?: string;
16
+ }
17
+ export interface ChunkEntity {
18
+ chunkHash: string;
19
+ size: number;
20
+ refCount: number;
21
+ createdAt: Date;
22
+ }
23
+ export interface FileChunkEntity {
24
+ fileId: string;
25
+ chunkHash: string;
26
+ chunkIndex: number;
27
+ createdAt: Date;
28
+ }
29
+ export interface CreateFileOptions {
30
+ filename: string;
31
+ size: number;
32
+ mimeType: string;
33
+ fileHash: string;
34
+ uploadToken: string;
35
+ chunkSize: number;
36
+ totalChunks: number;
37
+ }
38
+ export interface UpdateFileOptions {
39
+ uploadedChunks?: number;
40
+ status?: "pending" | "uploading" | "completed" | "failed";
41
+ completedAt?: Date;
42
+ url?: string;
43
+ fileHash?: string;
44
+ }
45
+ export interface DatabaseAdapter {
46
+ initialize(): Promise<void>;
47
+ createFile(fileId: string, options: CreateFileOptions): Promise<FileMetadata>;
48
+ getFile(fileId: string): Promise<FileMetadata | null>;
49
+ getFileByHash(fileHash: string): Promise<FileMetadata | null>;
50
+ getFileByToken(uploadToken: string): Promise<FileMetadata | null>;
51
+ updateFile(fileId: string, options: UpdateFileOptions): Promise<FileMetadata>;
52
+ deleteFile(fileId: string): Promise<void>;
53
+ upsertChunk(chunkHash: string, size: number): Promise<ChunkEntity>;
54
+ getChunk(chunkHash: string): Promise<ChunkEntity | null>;
55
+ chunkExists(chunkHash: string): Promise<boolean>;
56
+ chunksExist(chunkHashes: string[]): Promise<boolean[]>;
57
+ decrementChunkRef(chunkHash: string): Promise<void>;
58
+ createFileChunk(fileId: string, chunkHash: string, chunkIndex: number): Promise<FileChunkEntity>;
59
+ getFileChunks(fileId: string): Promise<FileChunkEntity[]>;
60
+ getFileChunkHashes(fileId: string): Promise<string[]>;
61
+ deleteFileChunks(fileId: string): Promise<void>;
62
+ transaction<T>(callback: () => Promise<T>): Promise<T>;
63
+ cleanup(): Promise<void>;
64
+ }
65
+ //# sourceMappingURL=database-adapter.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"database-adapter.d.ts","sourceRoot":"","sources":["../src/database-adapter.ts"],"names":[],"mappings":"AAGA,MAAM,WAAW,YAAY;IAE3B,MAAM,EAAE,MAAM,CAAC;IAGf,QAAQ,EAAE,MAAM,CAAC;IAGjB,IAAI,EAAE,MAAM,CAAC;IAGb,QAAQ,EAAE,MAAM,CAAC;IAGjB,QAAQ,EAAE,MAAM,CAAC;IAGjB,WAAW,EAAE,MAAM,CAAC;IAGpB,SAAS,EAAE,MAAM,CAAC;IAGlB,WAAW,EAAE,MAAM,CAAC;IAGpB,cAAc,EAAE,MAAM,CAAC;IAGvB,MAAM,EAAE,SAAS,GAAG,WAAW,GAAG,WAAW,GAAG,QAAQ,CAAC;IAGzD,SAAS,EAAE,IAAI,CAAC;IAGhB,SAAS,EAAE,IAAI,CAAC;IAGhB,WAAW,CAAC,EAAE,IAAI,CAAC;IAGnB,GAAG,CAAC,EAAE,MAAM,CAAC;CACd;AAKD,MAAM,WAAW,WAAW;IAE1B,SAAS,EAAE,MAAM,CAAC;IAGlB,IAAI,EAAE,MAAM,CAAC;IAGb,QAAQ,EAAE,MAAM,CAAC;IAGjB,SAAS,EAAE,IAAI,CAAC;CACjB;AAKD,MAAM,WAAW,eAAe;IAE9B,MAAM,EAAE,MAAM,CAAC;IAGf,SAAS,EAAE,MAAM,CAAC;IAGlB,UAAU,EAAE,MAAM,CAAC;IAGnB,SAAS,EAAE,IAAI,CAAC;CACjB;AAKD,MAAM,WAAW,iBAAiB;IAChC,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;CACrB;AAKD,MAAM,WAAW,iBAAiB;IAChC,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,MAAM,CAAC,EAAE,SAAS,GAAG,WAAW,GAAG,WAAW,GAAG,QAAQ,CAAC;IAC1D,WAAW,CAAC,EAAE,IAAI,CAAC;IACnB,GAAG,CAAC,EAAE,MAAM,CAAC;IACb,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB;AAOD,MAAM,WAAW,eAAe;IAM9B,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IAS5B,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,iBAAiB,GAAG,OAAO,CAAC,YAAY,CAAC,CAAC;IAQ9E,OAAO,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC;IAQtD,aAAa,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC;IAQ9D,cAAc,CAAC,WAAW,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC;IASlE,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,iBAAiB,GAAG,OAAO,CAAC,YAAY,CAAC,CAAC;IAQ9E,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAU1C,WAAW,CAAC,SAAS,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC;IAQnE,QAAQ,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,WAAW,GAAG,IAAI,CAAC,CAAC;IAQzD,WAAW,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC,CAAC;IAQjD,WAAW,CAAC,WAAW,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,OAAO,EAAE,CAAC,CAAC;IAQvD,iBAAiB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAUpD,eAAe,CAAC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,CAAC,CAAC;IAQjG,aAAa,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,eAAe,EAAE,CAAC,CAAC;IAQ1D,kBAAkB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;IAQtD,gBAAgB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAQhD,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,MAAM,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,CAAC;IAOvD,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;CAC1B"}
package/dist/index.cjs ADDED
@@ -0,0 +1,537 @@
1
+ let fs = require("fs");
2
+ let path = require("path");
3
+ let crypto = require("crypto");
4
+ let jsonwebtoken = require("jsonwebtoken");
5
+
6
+ //#region src/local-storage-adapter.ts
7
+ /**
8
+ * Local filesystem storage adapter
9
+ *
10
+ * Stores chunks in the local filesystem using a directory structure
11
+ * based on the first two characters of the chunk hash for better performance.
12
+ *
13
+ * Directory structure: baseDir/ab/abcdef123456...
14
+ */
15
+ var LocalStorageAdapter = class {
16
+ baseDir;
17
+ initialized = false;
18
+ constructor(options = {}) {
19
+ this.baseDir = options.baseDir || "./uploads";
20
+ }
21
+ /**
22
+ * Initialize the storage adapter by creating the base directory
23
+ */
24
+ async initialize() {
25
+ if (this.initialized) return;
26
+ try {
27
+ await fs.promises.mkdir(this.baseDir, { recursive: true });
28
+ this.initialized = true;
29
+ } catch (error) {
30
+ throw new Error(`Failed to initialize local storage: ${error.message}`);
31
+ }
32
+ }
33
+ /**
34
+ * Get the file path for a chunk hash
35
+ * Uses the first two characters as a subdirectory for better performance
36
+ */
37
+ getChunkPath(chunkHash) {
38
+ if (chunkHash.length < 2) throw new Error("Chunk hash must be at least 2 characters long");
39
+ const subDir = chunkHash.substring(0, 2);
40
+ return (0, path.join)(this.baseDir, subDir, chunkHash);
41
+ }
42
+ /**
43
+ * Save a chunk to the local filesystem
44
+ */
45
+ async saveChunk(chunkHash, data) {
46
+ if (!this.initialized) await this.initialize();
47
+ const chunkPath = this.getChunkPath(chunkHash);
48
+ const chunkDir = (0, path.dirname)(chunkPath);
49
+ try {
50
+ await fs.promises.mkdir(chunkDir, { recursive: true });
51
+ await fs.promises.writeFile(chunkPath, data);
52
+ } catch (error) {
53
+ throw new Error(`Failed to save chunk ${chunkHash}: ${error.message}`);
54
+ }
55
+ }
56
+ /**
57
+ * Get a chunk from the local filesystem
58
+ */
59
+ async getChunk(chunkHash) {
60
+ if (!this.initialized) await this.initialize();
61
+ const chunkPath = this.getChunkPath(chunkHash);
62
+ try {
63
+ return await fs.promises.readFile(chunkPath);
64
+ } catch (error) {
65
+ if (error.code === "ENOENT") return null;
66
+ throw new Error(`Failed to get chunk ${chunkHash}: ${error.message}`);
67
+ }
68
+ }
69
+ /**
70
+ * Check if a chunk exists in the local filesystem
71
+ */
72
+ async chunkExists(chunkHash) {
73
+ if (!this.initialized) await this.initialize();
74
+ const chunkPath = this.getChunkPath(chunkHash);
75
+ try {
76
+ await fs.promises.access(chunkPath);
77
+ return true;
78
+ } catch {
79
+ return false;
80
+ }
81
+ }
82
+ /**
83
+ * Check if multiple chunks exist in the local filesystem
84
+ */
85
+ async chunksExist(chunkHashes) {
86
+ if (!this.initialized) await this.initialize();
87
+ return await Promise.all(chunkHashes.map((hash) => this.chunkExists(hash)));
88
+ }
89
+ /**
90
+ * Get a readable stream for a chunk
91
+ */
92
+ async getChunkStream(chunkHash) {
93
+ if (!this.initialized) await this.initialize();
94
+ const chunkPath = this.getChunkPath(chunkHash);
95
+ if (!(0, fs.existsSync)(chunkPath)) return null;
96
+ try {
97
+ return (0, fs.createReadStream)(chunkPath);
98
+ } catch (error) {
99
+ throw new Error(`Failed to create stream for chunk ${chunkHash}: ${error.message}`);
100
+ }
101
+ }
102
+ /**
103
+ * Delete a chunk from the local filesystem
104
+ */
105
+ async deleteChunk(chunkHash) {
106
+ if (!this.initialized) await this.initialize();
107
+ const chunkPath = this.getChunkPath(chunkHash);
108
+ try {
109
+ await fs.promises.unlink(chunkPath);
110
+ } catch (error) {
111
+ if (error.code === "ENOENT") return;
112
+ throw new Error(`Failed to delete chunk ${chunkHash}: ${error.message}`);
113
+ }
114
+ }
115
+ /**
116
+ * Clean up resources (no-op for local storage)
117
+ */
118
+ async cleanup() {
119
+ this.initialized = false;
120
+ }
121
+ };
122
+
123
+ //#endregion
124
+ //#region src/memory-database-adapter.ts
125
+ /**
126
+ * In-memory database adapter for testing and development
127
+ *
128
+ * This adapter stores all data in memory and is useful for:
129
+ * - Unit testing without database dependencies
130
+ * - Development and prototyping
131
+ * - Simple deployments that don't require persistence
132
+ */
133
+ var MemoryDatabaseAdapter = class {
134
+ files = /* @__PURE__ */ new Map();
135
+ chunks = /* @__PURE__ */ new Map();
136
+ fileChunks = /* @__PURE__ */ new Map();
137
+ async initialize() {}
138
+ async createFile(fileId, options) {
139
+ if (this.files.has(fileId)) throw new Error(`File ${fileId} already exists`);
140
+ const now = /* @__PURE__ */ new Date();
141
+ const file = {
142
+ fileId,
143
+ filename: options.filename,
144
+ size: options.size,
145
+ mimeType: options.mimeType,
146
+ fileHash: options.fileHash,
147
+ uploadToken: options.uploadToken,
148
+ chunkSize: options.chunkSize,
149
+ totalChunks: options.totalChunks,
150
+ uploadedChunks: 0,
151
+ status: "pending",
152
+ createdAt: now,
153
+ updatedAt: now
154
+ };
155
+ this.files.set(fileId, file);
156
+ return { ...file };
157
+ }
158
+ async getFile(fileId) {
159
+ const file = this.files.get(fileId);
160
+ return file ? { ...file } : null;
161
+ }
162
+ async getFileByHash(fileHash) {
163
+ for (const file of this.files.values()) if (file.fileHash === fileHash) return { ...file };
164
+ return null;
165
+ }
166
+ async getFileByToken(uploadToken) {
167
+ for (const file of this.files.values()) if (file.uploadToken === uploadToken) return { ...file };
168
+ return null;
169
+ }
170
+ async updateFile(fileId, options) {
171
+ const file = this.files.get(fileId);
172
+ if (!file) throw new Error(`File ${fileId} not found`);
173
+ const updated = {
174
+ ...file,
175
+ ...options,
176
+ updatedAt: /* @__PURE__ */ new Date()
177
+ };
178
+ this.files.set(fileId, updated);
179
+ return { ...updated };
180
+ }
181
+ async deleteFile(fileId) {
182
+ this.files.delete(fileId);
183
+ }
184
+ async upsertChunk(chunkHash, size) {
185
+ const existing = this.chunks.get(chunkHash);
186
+ if (existing) {
187
+ const updated = {
188
+ ...existing,
189
+ refCount: existing.refCount + 1
190
+ };
191
+ this.chunks.set(chunkHash, updated);
192
+ return { ...updated };
193
+ }
194
+ const chunk = {
195
+ chunkHash,
196
+ size,
197
+ refCount: 1,
198
+ createdAt: /* @__PURE__ */ new Date()
199
+ };
200
+ this.chunks.set(chunkHash, chunk);
201
+ return { ...chunk };
202
+ }
203
+ async getChunk(chunkHash) {
204
+ const chunk = this.chunks.get(chunkHash);
205
+ return chunk ? { ...chunk } : null;
206
+ }
207
+ async chunkExists(chunkHash) {
208
+ return this.chunks.has(chunkHash);
209
+ }
210
+ async chunksExist(chunkHashes) {
211
+ return chunkHashes.map((hash) => this.chunks.has(hash));
212
+ }
213
+ async decrementChunkRef(chunkHash) {
214
+ const chunk = this.chunks.get(chunkHash);
215
+ if (!chunk) return;
216
+ if (chunk.refCount <= 1) this.chunks.delete(chunkHash);
217
+ else {
218
+ const updated = {
219
+ ...chunk,
220
+ refCount: chunk.refCount - 1
221
+ };
222
+ this.chunks.set(chunkHash, updated);
223
+ }
224
+ }
225
+ async createFileChunk(fileId, chunkHash, chunkIndex) {
226
+ const fileChunk = {
227
+ fileId,
228
+ chunkHash,
229
+ chunkIndex,
230
+ createdAt: /* @__PURE__ */ new Date()
231
+ };
232
+ const fileChunks = this.fileChunks.get(fileId) || [];
233
+ fileChunks.push(fileChunk);
234
+ this.fileChunks.set(fileId, fileChunks);
235
+ return { ...fileChunk };
236
+ }
237
+ async getFileChunks(fileId) {
238
+ return (this.fileChunks.get(fileId) || []).slice().sort((a, b) => a.chunkIndex - b.chunkIndex).map((fc) => ({ ...fc }));
239
+ }
240
+ async getFileChunkHashes(fileId) {
241
+ return (await this.getFileChunks(fileId)).map((fc) => fc.chunkHash);
242
+ }
243
+ async deleteFileChunks(fileId) {
244
+ this.fileChunks.delete(fileId);
245
+ }
246
+ async transaction(callback) {
247
+ return callback();
248
+ }
249
+ async cleanup() {
250
+ this.files.clear();
251
+ this.chunks.clear();
252
+ this.fileChunks.clear();
253
+ }
254
+ };
255
+
256
+ //#endregion
257
+ //#region src/upload-service.ts
258
+ /**
259
+ * Upload service for handling file uploads with chunking and deduplication
260
+ */
261
+ var UploadService = class {
262
+ storageAdapter;
263
+ databaseAdapter;
264
+ jwtSecret;
265
+ tokenExpiration;
266
+ minChunkSize;
267
+ maxChunkSize;
268
+ constructor(options) {
269
+ this.storageAdapter = options.storageAdapter;
270
+ this.databaseAdapter = options.databaseAdapter;
271
+ this.jwtSecret = options.jwtSecret;
272
+ this.tokenExpiration = options.tokenExpiration || 1440 * 60;
273
+ this.minChunkSize = options.minChunkSize || 256 * 1024;
274
+ this.maxChunkSize = options.maxChunkSize || 10 * 1024 * 1024;
275
+ }
276
+ /**
277
+ * Initialize the upload service
278
+ */
279
+ async initialize() {
280
+ await this.storageAdapter.initialize();
281
+ await this.databaseAdapter.initialize();
282
+ }
283
+ /**
284
+ * Clean up resources
285
+ */
286
+ async cleanup() {
287
+ await this.storageAdapter.cleanup();
288
+ await this.databaseAdapter.cleanup();
289
+ }
290
+ /**
291
+ * Generate a unique file ID
292
+ */
293
+ generateFileId() {
294
+ return (0, crypto.randomBytes)(16).toString("hex");
295
+ }
296
+ /**
297
+ * Generate an upload token
298
+ */
299
+ generateUploadToken(fileId) {
300
+ return (0, jsonwebtoken.sign)({
301
+ fileId,
302
+ type: "upload"
303
+ }, this.jwtSecret, { expiresIn: this.tokenExpiration });
304
+ }
305
+ /**
306
+ * Verify an upload token
307
+ */
308
+ verifyUploadToken(token) {
309
+ try {
310
+ const payload = (0, jsonwebtoken.verify)(token, this.jwtSecret);
311
+ if (payload.type !== "upload") throw new Error("Invalid token type");
312
+ return { fileId: payload.fileId };
313
+ } catch (error) {
314
+ throw new Error(`Invalid upload token: ${error.message}`);
315
+ }
316
+ }
317
+ /**
318
+ * Negotiate chunk size based on file size and client preference
319
+ */
320
+ negotiateChunkSize(fileSize, clientChunkSize) {
321
+ if (clientChunkSize) {
322
+ if (clientChunkSize > this.maxChunkSize) return this.maxChunkSize;
323
+ return clientChunkSize;
324
+ }
325
+ if (fileSize < 10 * 1024 * 1024) return Math.max(256 * 1024, this.minChunkSize);
326
+ else if (fileSize < 100 * 1024 * 1024) return Math.max(1024 * 1024, this.minChunkSize);
327
+ else if (fileSize < 1024 * 1024 * 1024) return Math.max(2 * 1024 * 1024, this.minChunkSize);
328
+ else return Math.max(5 * 1024 * 1024, this.minChunkSize);
329
+ }
330
+ /**
331
+ * Create a new file upload session
332
+ *
333
+ * Generates a unique file ID and upload token, negotiates chunk size,
334
+ * and saves file metadata to the database.
335
+ */
336
+ async createFile(request) {
337
+ const fileId = this.generateFileId();
338
+ const uploadToken = this.generateUploadToken(fileId);
339
+ const chunkSize = this.negotiateChunkSize(request.fileSize, request.preferredChunkSize);
340
+ const totalChunks = Math.ceil(request.fileSize / chunkSize);
341
+ await this.databaseAdapter.createFile(fileId, {
342
+ filename: request.fileName,
343
+ size: request.fileSize,
344
+ mimeType: request.fileType,
345
+ fileHash: "",
346
+ uploadToken,
347
+ chunkSize,
348
+ totalChunks
349
+ });
350
+ return {
351
+ uploadToken,
352
+ negotiatedChunkSize: chunkSize
353
+ };
354
+ }
355
+ /**
356
+ * Verify file and chunk hashes for instant upload (秒传)
357
+ *
358
+ * Checks if the file hash already exists (full instant upload)
359
+ * or if any chunks already exist (partial instant upload).
360
+ * For existing chunks, automatically creates file-chunk relationships.
361
+ */
362
+ async verifyHash(request) {
363
+ const { fileId } = this.verifyUploadToken(request.uploadToken);
364
+ const file = await this.databaseAdapter.getFile(fileId);
365
+ if (!file) throw new Error("File not found");
366
+ if (request.fileHash) {
367
+ const existingFile = await this.databaseAdapter.getFileByHash(request.fileHash);
368
+ if (existingFile && existingFile.status === "completed") return {
369
+ fileExists: true,
370
+ fileUrl: existingFile.url,
371
+ existingChunks: [],
372
+ missingChunks: []
373
+ };
374
+ }
375
+ if (request.chunkHashes && request.chunkHashes.length > 0) {
376
+ const chunkExistence = await this.databaseAdapter.chunksExist(request.chunkHashes);
377
+ const existingChunks = [];
378
+ const missingChunks = [];
379
+ for (let index = 0; index < chunkExistence.length; index++) if (chunkExistence[index]) {
380
+ existingChunks.push(index);
381
+ const chunkHash = request.chunkHashes[index];
382
+ try {
383
+ if (!(await this.databaseAdapter.getFileChunks(fileId)).some((fc) => fc.chunkIndex === index && fc.chunkHash === chunkHash)) {
384
+ await this.databaseAdapter.createFileChunk(fileId, chunkHash, index);
385
+ const chunk = await this.databaseAdapter.getChunk(chunkHash);
386
+ if (chunk) await this.databaseAdapter.upsertChunk(chunkHash, chunk.size);
387
+ }
388
+ } catch (error) {
389
+ console.error(`Failed to create file-chunk relationship for chunk ${index}:`, error);
390
+ }
391
+ } else missingChunks.push(index);
392
+ const uploadedChunks = (await this.databaseAdapter.getFileChunks(fileId)).length;
393
+ const status = uploadedChunks === file.totalChunks ? "completed" : uploadedChunks > 0 ? "uploading" : "pending";
394
+ await this.databaseAdapter.updateFile(fileId, {
395
+ uploadedChunks,
396
+ status
397
+ });
398
+ return {
399
+ fileExists: false,
400
+ existingChunks,
401
+ missingChunks
402
+ };
403
+ }
404
+ return {
405
+ fileExists: false,
406
+ existingChunks: [],
407
+ missingChunks: []
408
+ };
409
+ }
410
+ /**
411
+ * Upload a chunk
412
+ *
413
+ * Validates the upload token and chunk hash, saves the chunk to storage
414
+ * with deduplication, and updates file metadata.
415
+ */
416
+ async uploadChunk(request) {
417
+ const { fileId } = this.verifyUploadToken(request.uploadToken);
418
+ const file = await this.databaseAdapter.getFile(fileId);
419
+ if (!file) throw new Error("File not found");
420
+ let chunkBuffer;
421
+ if (Buffer.isBuffer(request.chunk)) chunkBuffer = request.chunk;
422
+ else {
423
+ const arrayBuffer = await request.chunk.arrayBuffer();
424
+ chunkBuffer = Buffer.from(arrayBuffer);
425
+ }
426
+ if ((await import("spark-md5")).default.ArrayBuffer.hash(chunkBuffer) !== request.chunkHash) throw new Error("Chunk hash mismatch");
427
+ if (!await this.storageAdapter.chunkExists(request.chunkHash)) await this.storageAdapter.saveChunk(request.chunkHash, chunkBuffer);
428
+ await this.databaseAdapter.upsertChunk(request.chunkHash, chunkBuffer.length);
429
+ await this.databaseAdapter.createFileChunk(fileId, request.chunkHash, request.chunkIndex);
430
+ const uploadedChunks = (await this.databaseAdapter.getFileChunks(fileId)).length;
431
+ const status = uploadedChunks === file.totalChunks ? "completed" : "uploading";
432
+ await this.databaseAdapter.updateFile(fileId, {
433
+ uploadedChunks,
434
+ status
435
+ });
436
+ return {
437
+ success: true,
438
+ chunkHash: request.chunkHash
439
+ };
440
+ }
441
+ /**
442
+ * Merge file chunks (logical merge)
443
+ *
444
+ * Verifies all chunks are uploaded and updates file status to completed.
445
+ * Generates a file access URL.
446
+ */
447
+ async mergeFile(request) {
448
+ const { fileId } = this.verifyUploadToken(request.uploadToken);
449
+ const file = await this.databaseAdapter.getFile(fileId);
450
+ if (!file) throw new Error("File not found");
451
+ if (file.uploadedChunks !== file.totalChunks) throw new Error(`Not all chunks uploaded: ${file.uploadedChunks}/${file.totalChunks}`);
452
+ const url = `/upload/files/${fileId}`;
453
+ await this.databaseAdapter.updateFile(fileId, {
454
+ fileHash: request.fileHash,
455
+ status: "completed",
456
+ completedAt: /* @__PURE__ */ new Date(),
457
+ url
458
+ });
459
+ return {
460
+ success: true,
461
+ fileUrl: url,
462
+ fileId
463
+ };
464
+ }
465
+ /**
466
+ * Get file metadata
467
+ *
468
+ * Retrieves file metadata from database without creating a stream.
469
+ */
470
+ async getFileMetadata(fileId) {
471
+ return await this.databaseAdapter.getFile(fileId);
472
+ }
473
+ /**
474
+ * Get file stream for download
475
+ *
476
+ * Reads chunks in order and creates a stream pipeline for file output.
477
+ * Supports Range requests for partial content.
478
+ */
479
+ async getFileStream(fileId, range) {
480
+ const file = await this.databaseAdapter.getFile(fileId);
481
+ if (!file || file.status !== "completed") return null;
482
+ const chunkHashes = await this.databaseAdapter.getFileChunkHashes(fileId);
483
+ const { Readable } = await import("stream");
484
+ const storageAdapter = this.storageAdapter;
485
+ let currentChunkIndex = 0;
486
+ let bytesRead = 0;
487
+ const startByte = range?.start || 0;
488
+ const endByte = range?.end || file.size - 1;
489
+ return {
490
+ stream: new Readable({ async read() {
491
+ try {
492
+ while (currentChunkIndex < chunkHashes.length) {
493
+ const chunkHash = chunkHashes[currentChunkIndex];
494
+ const chunkData = await storageAdapter.getChunk(chunkHash);
495
+ if (!chunkData) {
496
+ this.destroy(/* @__PURE__ */ new Error(`Chunk ${chunkHash} not found`));
497
+ return;
498
+ }
499
+ const chunkStart = currentChunkIndex * file.chunkSize;
500
+ const chunkEnd = chunkStart + chunkData.length - 1;
501
+ if (chunkEnd < startByte) {
502
+ currentChunkIndex++;
503
+ continue;
504
+ }
505
+ if (chunkStart > endByte) {
506
+ this.push(null);
507
+ return;
508
+ }
509
+ let sliceStart = 0;
510
+ let sliceEnd = chunkData.length;
511
+ if (chunkStart < startByte) sliceStart = startByte - chunkStart;
512
+ if (chunkEnd > endByte) sliceEnd = endByte - chunkStart + 1;
513
+ const slicedData = chunkData.slice(sliceStart, sliceEnd);
514
+ this.push(slicedData);
515
+ bytesRead += slicedData.length;
516
+ currentChunkIndex++;
517
+ if (bytesRead >= endByte - startByte + 1) {
518
+ this.push(null);
519
+ return;
520
+ }
521
+ return;
522
+ }
523
+ this.push(null);
524
+ } catch (error) {
525
+ this.destroy(error);
526
+ }
527
+ } }),
528
+ size: range ? endByte - startByte + 1 : file.size,
529
+ mimeType: file.mimeType
530
+ };
531
+ }
532
+ };
533
+
534
+ //#endregion
535
+ exports.LocalStorageAdapter = LocalStorageAdapter;
536
+ exports.MemoryDatabaseAdapter = MemoryDatabaseAdapter;
537
+ exports.UploadService = UploadService;