@kadi.build/file-sharing 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,984 @@
1
+ /**
2
+ * S3Server - Local S3-compatible API server for @kadi.build/file-sharing
3
+ *
4
+ * Provides a LOCAL S3-compatible server that emulates AWS S3 API endpoints.
5
+ * Files are stored locally on disk, NOT on AWS. This allows using AWS SDK tools
6
+ * (like @aws-sdk/client-s3) to interact with your local file system.
7
+ *
8
+ * Supported operations:
9
+ * - ListBuckets
10
+ * - ListObjects / ListObjectsV2
11
+ * - GetObject (with range requests)
12
+ * - PutObject
13
+ * - DeleteObject
14
+ * - HeadObject
15
+ * - HeadBucket
16
+ * - CreateBucket
17
+ * - DeleteBucket
18
+ * - CreateMultipartUpload / UploadPart / CompleteMultipartUpload
19
+ *
20
+ * Migrated from src/s3Server.js
21
+ */
22
+
23
+ import { EventEmitter } from 'events';
24
+ import http from 'http';
25
+ import path from 'path';
26
+ import fs from 'fs/promises';
27
+ import { createReadStream, createWriteStream } from 'fs';
28
+ import crypto from 'crypto';
29
+ import { Builder } from 'xml2js';
30
+
31
+ export class S3Server extends EventEmitter {
32
+ constructor(config = {}) {
33
+ super();
34
+
35
+ this.config = {
36
+ port: 9000,
37
+ host: '0.0.0.0',
38
+ rootDir: process.cwd(),
39
+ accessKeyId: 'minioadmin',
40
+ secretAccessKey: 'minioadmin',
41
+ bucketName: 'local',
42
+ region: 'us-east-1',
43
+ maxFileSize: null,
44
+ ...config
45
+ };
46
+
47
+ this.server = null;
48
+ this.isRunning = false;
49
+ this.multipartUploads = new Map();
50
+ this.requestCount = 0;
51
+ }
52
+
53
+ /**
54
+ * Start the S3 server
55
+ * @returns {{ port: number, endpoint: string }}
56
+ */
57
+ async start() {
58
+ if (this.isRunning) {
59
+ return {
60
+ port: this.config.port,
61
+ endpoint: `http://${this.config.host}:${this.config.port}`
62
+ };
63
+ }
64
+
65
+ // Ensure bucket directory exists
66
+ const bucketDir = path.join(this.config.rootDir, this.config.bucketName);
67
+ await fs.mkdir(bucketDir, { recursive: true });
68
+
69
+ return new Promise((resolve, reject) => {
70
+ this.server = http.createServer(async (req, res) => {
71
+ this.requestCount++;
72
+ try {
73
+ // Set CORS headers
74
+ res.setHeader('Access-Control-Allow-Origin', '*');
75
+ res.setHeader('Access-Control-Allow-Methods', 'GET, PUT, POST, DELETE, HEAD, OPTIONS');
76
+ res.setHeader('Access-Control-Allow-Headers', '*');
77
+ res.setHeader('Access-Control-Expose-Headers', 'ETag, x-amz-request-id, x-amz-id-2, Content-Length, Content-Type');
78
+
79
+ if (req.method === 'OPTIONS') {
80
+ res.writeHead(204);
81
+ res.end();
82
+ return;
83
+ }
84
+
85
+ // S3 authentication check
86
+ if (!this._checkS3Auth(req)) {
87
+ this._sendError(res, 'AccessDenied',
88
+ 'Access Denied. Provide valid AWS credentials via Authorization header or query parameters.', 403);
89
+ return;
90
+ }
91
+
92
+ await this._handleRequest(req, res);
93
+ } catch (error) {
94
+ this.emit('error', error);
95
+ if (!res.headersSent) {
96
+ this._sendError(res, 'InternalError', error.message, 500);
97
+ }
98
+ }
99
+ });
100
+
101
+ this.server.listen(this.config.port, this.config.host, () => {
102
+ const addr = this.server.address();
103
+ this.config.port = addr.port;
104
+ this.isRunning = true;
105
+
106
+ const info = {
107
+ port: addr.port,
108
+ endpoint: `http://${this.config.host}:${addr.port}`
109
+ };
110
+
111
+ this.emit('started', info);
112
+ resolve(info);
113
+ });
114
+
115
+ this.server.on('error', (err) => {
116
+ this.emit('error', err);
117
+ reject(err);
118
+ });
119
+ });
120
+ }
121
+
122
+ /**
123
+ * Stop the S3 server
124
+ */
125
+ async stop() {
126
+ if (!this.isRunning) return;
127
+
128
+ return new Promise((resolve) => {
129
+ this.server.close(() => {
130
+ this.isRunning = false;
131
+ this.server = null;
132
+ this.multipartUploads.clear();
133
+ this.emit('stopped');
134
+ resolve();
135
+ });
136
+ });
137
+ }
138
+
139
+ /**
140
+ * Get bucket configuration
141
+ * @returns {{ name: string, rootDir: string }}
142
+ */
143
+ getBucket() {
144
+ return { name: this.config.bucketName, rootDir: this.config.rootDir };
145
+ }
146
+
147
+ /**
148
+ * List objects in bucket (programmatic access)
149
+ * @param {string} prefix - Optional prefix filter
150
+ * @returns {Promise<Array>}
151
+ */
152
+ async listObjects(prefix = '') {
153
+ const bucketDir = path.join(this.config.rootDir, this.config.bucketName);
154
+ const searchDir = prefix ? path.join(bucketDir, prefix) : bucketDir;
155
+
156
+ try {
157
+ const entries = await this._walkDirectory(searchDir, bucketDir);
158
+ return entries;
159
+ } catch {
160
+ return [];
161
+ }
162
+ }
163
+
164
+ // ============================================================================
165
+ // REQUEST ROUTING
166
+ // ============================================================================
167
+
168
+ /**
169
+ * Main request router
170
+ * @private
171
+ */
172
+ async _handleRequest(req, res) {
173
+ const url = new URL(req.url, `http://${req.headers.host || 'localhost'}`);
174
+ const pathParts = url.pathname.split('/').filter(Boolean);
175
+
176
+ // Add S3 response headers
177
+ res.setHeader('x-amz-request-id', crypto.randomBytes(8).toString('hex').toUpperCase());
178
+ res.setHeader('x-amz-id-2', crypto.randomBytes(16).toString('base64'));
179
+ res.setHeader('Server', 'KadiS3');
180
+
181
+ // Root: List buckets
182
+ if (pathParts.length === 0) {
183
+ if (req.method === 'GET') {
184
+ return this._listBuckets(req, res);
185
+ }
186
+ }
187
+
188
+ const bucket = pathParts[0];
189
+ const key = pathParts.slice(1).join('/');
190
+
191
+ // Bucket-level operations (no key)
192
+ if (!key && !url.search.includes('prefix')) {
193
+ switch (req.method) {
194
+ case 'GET':
195
+ return this._listObjects(req, res, bucket, url.searchParams);
196
+ case 'HEAD':
197
+ return this._headBucket(req, res, bucket);
198
+ case 'PUT':
199
+ return this._createBucket(req, res, bucket);
200
+ case 'DELETE':
201
+ return this._deleteBucket(req, res, bucket);
202
+ }
203
+ }
204
+
205
+ // Object-level operations
206
+ if (key || url.searchParams.has('prefix')) {
207
+ switch (req.method) {
208
+ case 'GET':
209
+ // Could be list with prefix if no explicit key
210
+ if (!key) {
211
+ return this._listObjects(req, res, bucket, url.searchParams);
212
+ }
213
+ return this._getObject(req, res, bucket, key);
214
+ case 'HEAD':
215
+ if (!key) {
216
+ return this._headBucket(req, res, bucket);
217
+ }
218
+ return this._headObject(req, res, bucket, key);
219
+ case 'PUT': {
220
+ // Check if this is a multipart upload part
221
+ const uploadId = url.searchParams.get('uploadId');
222
+ const partNumber = url.searchParams.get('partNumber');
223
+ if (uploadId && partNumber) {
224
+ return this._uploadPart(req, res, bucket, key, uploadId, parseInt(partNumber, 10));
225
+ }
226
+ return this._putObject(req, res, bucket, key);
227
+ }
228
+ case 'DELETE':
229
+ return this._deleteObject(req, res, bucket, key);
230
+ case 'POST': {
231
+ // Multipart upload operations
232
+ if (url.searchParams.has('uploads')) {
233
+ return this._createMultipartUpload(req, res, bucket, key);
234
+ }
235
+ const uploadIdPost = url.searchParams.get('uploadId');
236
+ if (uploadIdPost) {
237
+ return this._completeMultipartUpload(req, res, bucket, key, uploadIdPost);
238
+ }
239
+ // Fallback: treat POST like PUT
240
+ return this._putObject(req, res, bucket, key);
241
+ }
242
+ }
243
+ }
244
+
245
+ this._sendError(res, 'MethodNotAllowed', `Method ${req.method} not allowed`, 405);
246
+ }
247
+
248
+ // ============================================================================
249
+ // S3 API HANDLERS
250
+ // ============================================================================
251
+
252
+ /**
253
+ * S3 API: ListBuckets
254
+ * @private
255
+ */
256
+ async _listBuckets(req, res) {
257
+ const buckets = [{
258
+ Name: this.config.bucketName,
259
+ CreationDate: new Date().toISOString()
260
+ }];
261
+
262
+ // Also check for additional bucket directories
263
+ try {
264
+ const entries = await fs.readdir(this.config.rootDir, { withFileTypes: true });
265
+ for (const entry of entries) {
266
+ if (entry.isDirectory() && entry.name !== this.config.bucketName) {
267
+ buckets.push({
268
+ Name: entry.name,
269
+ CreationDate: new Date().toISOString()
270
+ });
271
+ }
272
+ }
273
+ } catch {
274
+ // Ignore readdir errors
275
+ }
276
+
277
+ const xml = this._buildXml('ListAllMyBucketsResult', {
278
+ $: { xmlns: 'http://s3.amazonaws.com/doc/2006-03-01/' },
279
+ Owner: { ID: 'owner', DisplayName: 'owner' },
280
+ Buckets: { Bucket: buckets }
281
+ });
282
+
283
+ this._sendXml(res, xml);
284
+ }
285
+
286
+ /**
287
+ * S3 API: ListObjects / ListObjectsV2
288
+ * @private
289
+ */
290
+ async _listObjects(req, res, bucket, params) {
291
+ const prefix = params.get('prefix') || '';
292
+ const delimiter = params.get('delimiter') || '';
293
+ const maxKeys = parseInt(params.get('max-keys') || '1000', 10);
294
+ const listType = params.get('list-type');
295
+
296
+ const bucketDir = path.join(this.config.rootDir, bucket);
297
+
298
+ try {
299
+ await fs.access(bucketDir);
300
+ } catch {
301
+ // Bucket doesn't exist - return empty result
302
+ const emptyResponse = this._formatListObjectsV2Response(
303
+ { objects: [], prefixes: [] }, bucket, prefix, delimiter, maxKeys
304
+ );
305
+ return this._sendXml(res, emptyResponse);
306
+ }
307
+
308
+ try {
309
+ const items = await this._listDirectoryForS3(bucketDir, prefix, delimiter, maxKeys);
310
+
311
+ const response = listType === '2'
312
+ ? this._formatListObjectsV2Response(items, bucket, prefix, delimiter, maxKeys)
313
+ : this._formatListObjectsResponse(items, bucket, prefix, delimiter, maxKeys);
314
+
315
+ this._sendXml(res, response);
316
+ } catch (error) {
317
+ if (error.code === 'ENOENT') {
318
+ this._sendXml(res, this._formatListObjectsV2Response(
319
+ { objects: [], prefixes: [] }, bucket, prefix, delimiter, maxKeys
320
+ ));
321
+ } else {
322
+ throw error;
323
+ }
324
+ }
325
+ }
326
+
327
+ /**
328
+ * S3 API: GetObject
329
+ * @private
330
+ */
331
+ async _getObject(req, res, bucket, key) {
332
+ const filePath = path.join(this.config.rootDir, bucket, key);
333
+
334
+ // Security: prevent directory traversal
335
+ const resolvedRoot = path.resolve(this.config.rootDir, bucket);
336
+ const resolvedPath = path.resolve(filePath);
337
+ if (!resolvedPath.startsWith(resolvedRoot)) {
338
+ return this._sendError(res, 'AccessDenied', 'Access Denied', 403);
339
+ }
340
+
341
+ try {
342
+ const stats = await fs.stat(filePath);
343
+
344
+ if (stats.isDirectory()) {
345
+ return this._sendError(res, 'NoSuchKey', 'The specified key does not exist', 404);
346
+ }
347
+
348
+ const etag = `"${crypto.createHash('md5').update(`${stats.size}-${stats.mtime.getTime()}`).digest('hex')}"`;
349
+
350
+ // Handle range requests
351
+ const range = req.headers.range;
352
+ if (range) {
353
+ const parts = range.replace(/bytes=/, '').split('-');
354
+ const start = parseInt(parts[0], 10);
355
+ const end = parts[1] ? parseInt(parts[1], 10) : stats.size - 1;
356
+ const chunkSize = end - start + 1;
357
+
358
+ res.writeHead(206, {
359
+ 'Content-Range': `bytes ${start}-${end}/${stats.size}`,
360
+ 'Accept-Ranges': 'bytes',
361
+ 'Content-Length': chunkSize,
362
+ 'Content-Type': 'application/octet-stream',
363
+ 'Last-Modified': stats.mtime.toUTCString(),
364
+ 'ETag': etag
365
+ });
366
+
367
+ createReadStream(filePath, { start, end }).pipe(res);
368
+ } else {
369
+ res.writeHead(200, {
370
+ 'Content-Length': stats.size,
371
+ 'Content-Type': 'application/octet-stream',
372
+ 'Accept-Ranges': 'bytes',
373
+ 'Last-Modified': stats.mtime.toUTCString(),
374
+ 'ETag': etag
375
+ });
376
+
377
+ createReadStream(filePath).pipe(res);
378
+ }
379
+
380
+ this.emit('object:get', { bucket, key, size: stats.size });
381
+ } catch (error) {
382
+ if (error.code === 'ENOENT') {
383
+ this._sendError(res, 'NoSuchKey', 'The specified key does not exist', 404);
384
+ } else {
385
+ throw error;
386
+ }
387
+ }
388
+ }
389
+
390
+ /**
391
+ * S3 API: PutObject
392
+ * @private
393
+ */
394
+ async _putObject(req, res, bucket, key) {
395
+ const filePath = path.join(this.config.rootDir, bucket, key);
396
+ const dirPath = path.dirname(filePath);
397
+
398
+ // Security: prevent directory traversal
399
+ const resolvedRoot = path.resolve(this.config.rootDir, bucket);
400
+ const resolvedPath = path.resolve(filePath);
401
+ if (!resolvedPath.startsWith(resolvedRoot)) {
402
+ return this._sendError(res, 'AccessDenied', 'Access Denied', 403);
403
+ }
404
+
405
+ // Ensure bucket and parent directories exist
406
+ await fs.mkdir(dirPath, { recursive: true });
407
+
408
+ return new Promise((resolve, reject) => {
409
+ const writeStream = createWriteStream(filePath);
410
+ let size = 0;
411
+
412
+ req.on('data', (chunk) => {
413
+ size += chunk.length;
414
+ writeStream.write(chunk);
415
+ });
416
+
417
+ req.on('end', async () => {
418
+ writeStream.end();
419
+
420
+ // Wait for write stream to finish
421
+ writeStream.on('finish', async () => {
422
+ try {
423
+ const stats = await fs.stat(filePath);
424
+ const etag = `"${crypto.createHash('md5').update(`${stats.size}-${stats.mtime.getTime()}`).digest('hex')}"`;
425
+
426
+ res.writeHead(200, {
427
+ 'ETag': etag,
428
+ 'Content-Length': 0
429
+ });
430
+ res.end();
431
+
432
+ this.emit('object:put', { bucket, key, size });
433
+ resolve();
434
+ } catch (err) {
435
+ reject(err);
436
+ }
437
+ });
438
+ });
439
+
440
+ req.on('error', (err) => {
441
+ writeStream.destroy();
442
+ reject(err);
443
+ });
444
+
445
+ writeStream.on('error', (err) => {
446
+ if (!res.headersSent) {
447
+ this._sendError(res, 'InternalError', err.message, 500);
448
+ }
449
+ reject(err);
450
+ });
451
+ });
452
+ }
453
+
454
+ /**
455
+ * S3 API: DeleteObject
456
+ * @private
457
+ */
458
+ async _deleteObject(req, res, bucket, key) {
459
+ const filePath = path.join(this.config.rootDir, bucket, key);
460
+
461
+ try {
462
+ await fs.unlink(filePath);
463
+ this.emit('object:delete', { bucket, key });
464
+ } catch (error) {
465
+ // S3 returns success even if object doesn't exist
466
+ if (error.code !== 'ENOENT') {
467
+ throw error;
468
+ }
469
+ }
470
+
471
+ res.writeHead(204);
472
+ res.end();
473
+ }
474
+
475
+ /**
476
+ * S3 API: HeadObject
477
+ * @private
478
+ */
479
+ async _headObject(req, res, bucket, key) {
480
+ const filePath = path.join(this.config.rootDir, bucket, key);
481
+
482
+ try {
483
+ const stats = await fs.stat(filePath);
484
+
485
+ if (stats.isDirectory()) {
486
+ return this._sendError(res, 'NoSuchKey', 'The specified key does not exist', 404);
487
+ }
488
+
489
+ const etag = `"${crypto.createHash('md5').update(`${stats.size}-${stats.mtime.getTime()}`).digest('hex')}"`;
490
+
491
+ res.writeHead(200, {
492
+ 'Content-Length': stats.size,
493
+ 'Content-Type': 'application/octet-stream',
494
+ 'Last-Modified': stats.mtime.toUTCString(),
495
+ 'ETag': etag
496
+ });
497
+ res.end();
498
+ } catch (error) {
499
+ if (error.code === 'ENOENT') {
500
+ this._sendError(res, 'NoSuchKey', 'The specified key does not exist', 404);
501
+ } else {
502
+ throw error;
503
+ }
504
+ }
505
+ }
506
+
507
+ /**
508
+ * S3 API: HeadBucket
509
+ * @private
510
+ */
511
+ async _headBucket(req, res, bucket) {
512
+ const bucketDir = path.join(this.config.rootDir, bucket);
513
+
514
+ try {
515
+ await fs.access(bucketDir);
516
+ res.writeHead(200, {
517
+ 'x-amz-bucket-region': this.config.region
518
+ });
519
+ res.end();
520
+ } catch {
521
+ this._sendError(res, 'NoSuchBucket', 'The specified bucket does not exist', 404);
522
+ }
523
+ }
524
+
525
+ /**
526
+ * S3 API: CreateBucket
527
+ * @private
528
+ */
529
+ async _createBucket(req, res, bucket) {
530
+ const bucketDir = path.join(this.config.rootDir, bucket);
531
+
532
+ try {
533
+ await fs.mkdir(bucketDir, { recursive: true });
534
+ res.writeHead(200, {
535
+ 'Location': `/${bucket}`
536
+ });
537
+ res.end();
538
+ } catch (error) {
539
+ this._sendError(res, 'InternalError', error.message, 500);
540
+ }
541
+ }
542
+
543
+ /**
544
+ * S3 API: DeleteBucket
545
+ * @private
546
+ */
547
+ async _deleteBucket(req, res, bucket) {
548
+ const bucketDir = path.join(this.config.rootDir, bucket);
549
+
550
+ try {
551
+ const entries = await fs.readdir(bucketDir);
552
+ if (entries.length > 0) {
553
+ return this._sendError(res, 'BucketNotEmpty', 'The bucket you tried to delete is not empty', 409);
554
+ }
555
+ await fs.rmdir(bucketDir);
556
+ res.writeHead(204);
557
+ res.end();
558
+ } catch (error) {
559
+ if (error.code === 'ENOENT') {
560
+ this._sendError(res, 'NoSuchBucket', 'The specified bucket does not exist', 404);
561
+ } else {
562
+ this._sendError(res, 'InternalError', error.message, 500);
563
+ }
564
+ }
565
+ }
566
+
567
+ // ============================================================================
568
+ // MULTIPART UPLOAD HANDLERS
569
+ // ============================================================================
570
+
571
+ /**
572
+ * S3 API: CreateMultipartUpload
573
+ * @private
574
+ */
575
+ async _createMultipartUpload(req, res, bucket, key) {
576
+ const uploadId = crypto.randomBytes(16).toString('hex');
577
+
578
+ this.multipartUploads.set(uploadId, {
579
+ bucket,
580
+ key,
581
+ parts: new Map(),
582
+ createdAt: new Date()
583
+ });
584
+
585
+ const xml = this._buildXml('InitiateMultipartUploadResult', {
586
+ $: { xmlns: 'http://s3.amazonaws.com/doc/2006-03-01/' },
587
+ Bucket: bucket,
588
+ Key: key,
589
+ UploadId: uploadId
590
+ });
591
+
592
+ this._sendXml(res, xml);
593
+ }
594
+
595
+ /**
596
+ * S3 API: UploadPart
597
+ * @private
598
+ */
599
+ async _uploadPart(req, res, bucket, key, uploadId, partNumber) {
600
+ const upload = this.multipartUploads.get(uploadId);
601
+ if (!upload) {
602
+ return this._sendError(res, 'NoSuchUpload', 'The specified multipart upload does not exist', 404);
603
+ }
604
+
605
+ const chunks = [];
606
+ for await (const chunk of req) {
607
+ chunks.push(chunk);
608
+ }
609
+ const body = Buffer.concat(chunks);
610
+
611
+ const etag = `"${crypto.createHash('md5').update(body).digest('hex')}"`;
612
+ upload.parts.set(partNumber, { body, etag });
613
+
614
+ res.writeHead(200, {
615
+ 'ETag': etag
616
+ });
617
+ res.end();
618
+ }
619
+
620
+ /**
621
+ * S3 API: CompleteMultipartUpload
622
+ * @private
623
+ */
624
+ async _completeMultipartUpload(req, res, bucket, key, uploadId) {
625
+ const upload = this.multipartUploads.get(uploadId);
626
+ if (!upload) {
627
+ return this._sendError(res, 'NoSuchUpload', 'The specified multipart upload does not exist', 404);
628
+ }
629
+
630
+ // Collect request body (XML with part list)
631
+ const chunks = [];
632
+ for await (const chunk of req) {
633
+ chunks.push(chunk);
634
+ }
635
+
636
+ // Assemble parts in order
637
+ const sortedParts = Array.from(upload.parts.entries())
638
+ .sort(([a], [b]) => a - b);
639
+
640
+ const bodyParts = sortedParts.map(([, part]) => part.body);
641
+ const fullBody = Buffer.concat(bodyParts);
642
+
643
+ // Write the assembled file
644
+ const filePath = path.join(this.config.rootDir, bucket, key);
645
+ const dirPath = path.dirname(filePath);
646
+ await fs.mkdir(dirPath, { recursive: true });
647
+ await fs.writeFile(filePath, fullBody);
648
+
649
+ const etag = `"${crypto.createHash('md5').update(fullBody).digest('hex')}"`;
650
+
651
+ // Cleanup
652
+ this.multipartUploads.delete(uploadId);
653
+
654
+ const xml = this._buildXml('CompleteMultipartUploadResult', {
655
+ $: { xmlns: 'http://s3.amazonaws.com/doc/2006-03-01/' },
656
+ Location: `http://${this.config.host}:${this.config.port}/${bucket}/${key}`,
657
+ Bucket: bucket,
658
+ Key: key,
659
+ ETag: etag
660
+ });
661
+
662
+ this._sendXml(res, xml);
663
+ this.emit('object:put', { bucket, key, size: fullBody.length });
664
+ }
665
+
666
+ // ============================================================================
667
+ // DIRECTORY LISTING HELPERS
668
+ // ============================================================================
669
+
670
+ /**
671
+ * List directory contents for S3-style listing
672
+ * @private
673
+ */
674
+ async _listDirectoryForS3(bucketDir, prefix, delimiter, maxKeys) {
675
+ const objects = [];
676
+ const prefixes = new Set();
677
+
678
+ const searchDir = prefix ? path.join(bucketDir, prefix) : bucketDir;
679
+
680
+ try {
681
+ // If prefix points to a specific directory, list its contents
682
+ const stats = await fs.stat(searchDir);
683
+ if (stats.isDirectory()) {
684
+ const entries = await fs.readdir(searchDir, { withFileTypes: true });
685
+
686
+ for (const entry of entries) {
687
+ if (objects.length >= maxKeys) break;
688
+
689
+ const entryKey = prefix
690
+ ? `${prefix}${prefix.endsWith('/') ? '' : '/'}${entry.name}`
691
+ : entry.name;
692
+
693
+ if (entry.isDirectory()) {
694
+ if (delimiter) {
695
+ prefixes.add(entryKey.endsWith('/') ? entryKey : `${entryKey}/`);
696
+ } else {
697
+ // Recurse into subdirectory
698
+ const subDir = path.join(searchDir, entry.name);
699
+ const subEntries = await this._walkDirectory(subDir, bucketDir);
700
+ objects.push(...subEntries.slice(0, maxKeys - objects.length));
701
+ }
702
+ } else {
703
+ const filePath = path.join(searchDir, entry.name);
704
+ const fileStat = await fs.stat(filePath);
705
+ objects.push({
706
+ Key: entryKey,
707
+ LastModified: fileStat.mtime.toISOString(),
708
+ ETag: `"${crypto.createHash('md5').update(`${fileStat.size}-${fileStat.mtime.getTime()}`).digest('hex')}"`,
709
+ Size: fileStat.size,
710
+ StorageClass: 'STANDARD'
711
+ });
712
+ }
713
+ }
714
+ }
715
+ } catch (error) {
716
+ if (error.code !== 'ENOENT') {
717
+ // Try prefix as a file prefix in parent directory
718
+ const parentDir = path.dirname(searchDir);
719
+ const filePrefix = path.basename(searchDir);
720
+
721
+ try {
722
+ const entries = await fs.readdir(parentDir, { withFileTypes: true });
723
+ for (const entry of entries) {
724
+ if (objects.length >= maxKeys) break;
725
+ if (entry.name.startsWith(filePrefix)) {
726
+ const entryPath = path.join(parentDir, entry.name);
727
+ const entryStat = await fs.stat(entryPath);
728
+ const relKey = path.relative(bucketDir, entryPath);
729
+
730
+ if (entry.isFile()) {
731
+ objects.push({
732
+ Key: relKey,
733
+ LastModified: entryStat.mtime.toISOString(),
734
+ ETag: `"${crypto.createHash('md5').update(`${entryStat.size}-${entryStat.mtime.getTime()}`).digest('hex')}"`,
735
+ Size: entryStat.size,
736
+ StorageClass: 'STANDARD'
737
+ });
738
+ } else if (delimiter) {
739
+ prefixes.add(relKey.endsWith('/') ? relKey : `${relKey}/`);
740
+ }
741
+ }
742
+ }
743
+ } catch {
744
+ // Parent dir doesn't exist either
745
+ }
746
+ }
747
+ }
748
+
749
+ return { objects, prefixes: Array.from(prefixes) };
750
+ }
751
+
752
+ /**
753
+ * Walk directory recursively, collecting file entries
754
+ * @private
755
+ */
756
+ async _walkDirectory(dir, baseDir) {
757
+ const results = [];
758
+
759
+ try {
760
+ const entries = await fs.readdir(dir, { withFileTypes: true });
761
+
762
+ for (const entry of entries) {
763
+ const fullPath = path.join(dir, entry.name);
764
+
765
+ if (entry.isFile()) {
766
+ const stats = await fs.stat(fullPath);
767
+ const relKey = path.relative(baseDir, fullPath);
768
+ results.push({
769
+ Key: relKey,
770
+ LastModified: stats.mtime.toISOString(),
771
+ ETag: `"${crypto.createHash('md5').update(`${stats.size}-${stats.mtime.getTime()}`).digest('hex')}"`,
772
+ Size: stats.size,
773
+ StorageClass: 'STANDARD'
774
+ });
775
+ } else if (entry.isDirectory()) {
776
+ const subResults = await this._walkDirectory(fullPath, baseDir);
777
+ results.push(...subResults);
778
+ }
779
+ }
780
+ } catch {
781
+ // Ignore errors during walk
782
+ }
783
+
784
+ return results;
785
+ }
786
+
787
+ // ============================================================================
788
+ // XML RESPONSE FORMATTING
789
+ // ============================================================================
790
+
791
+ /**
792
+ * Format ListObjects (v1) response
793
+ * @private
794
+ */
795
+ _formatListObjectsResponse(items, bucket, prefix, delimiter, maxKeys) {
796
+ const response = {
797
+ $: { xmlns: 'http://s3.amazonaws.com/doc/2006-03-01/' },
798
+ Name: bucket,
799
+ Prefix: prefix || '',
800
+ MaxKeys: maxKeys,
801
+ IsTruncated: false
802
+ };
803
+
804
+ if (delimiter) {
805
+ response.Delimiter = delimiter;
806
+ }
807
+
808
+ if (items.objects.length > 0) {
809
+ response.Contents = items.objects;
810
+ }
811
+
812
+ if (items.prefixes.length > 0) {
813
+ response.CommonPrefixes = items.prefixes.map(p => ({ Prefix: p }));
814
+ }
815
+
816
+ return this._buildXml('ListBucketResult', response);
817
+ }
818
+
819
+ /**
820
+ * Format ListObjectsV2 response
821
+ * @private
822
+ */
823
+ _formatListObjectsV2Response(items, bucket, prefix, delimiter, maxKeys) {
824
+ const response = {
825
+ $: { xmlns: 'http://s3.amazonaws.com/doc/2006-03-01/' },
826
+ Name: bucket,
827
+ Prefix: prefix || '',
828
+ MaxKeys: maxKeys,
829
+ KeyCount: items.objects.length,
830
+ IsTruncated: false
831
+ };
832
+
833
+ if (delimiter) {
834
+ response.Delimiter = delimiter;
835
+ }
836
+
837
+ if (items.objects.length > 0) {
838
+ response.Contents = items.objects;
839
+ }
840
+
841
+ if (items.prefixes.length > 0) {
842
+ response.CommonPrefixes = items.prefixes.map(p => ({ Prefix: p }));
843
+ }
844
+
845
+ return this._buildXml('ListBucketResult', response);
846
+ }
847
+
848
+ // ============================================================================
849
+ // XML HELPERS
850
+ // ============================================================================
851
+
852
+ /**
853
+ * Build XML response
854
+ * @private
855
+ */
856
+ _buildXml(rootName, content) {
857
+ const builder = new Builder({
858
+ rootName,
859
+ headless: false,
860
+ xmldec: { version: '1.0', encoding: 'UTF-8' }
861
+ });
862
+ return builder.buildObject(content);
863
+ }
864
+
865
+ /**
866
+ * Send XML response
867
+ * @private
868
+ */
869
+ _sendXml(res, xml) {
870
+ res.writeHead(200, {
871
+ 'Content-Type': 'application/xml',
872
+ 'Content-Length': Buffer.byteLength(xml)
873
+ });
874
+ res.end(xml);
875
+ }
876
+
877
+ /**
878
+ * Send S3-formatted error response
879
+ * @private
880
+ */
881
+ _sendError(res, code, message, statusCode) {
882
+ const xml = this._buildXml('Error', {
883
+ Code: code,
884
+ Message: message,
885
+ RequestId: crypto.randomBytes(8).toString('hex'),
886
+ HostId: crypto.randomBytes(16).toString('base64')
887
+ });
888
+
889
+ res.writeHead(statusCode, {
890
+ 'Content-Type': 'application/xml',
891
+ 'Content-Length': Buffer.byteLength(xml)
892
+ });
893
+ res.end(xml);
894
+ }
895
+
896
+ // ============================================================================
897
+ // AUTHENTICATION
898
+ // ============================================================================
899
+
900
+ /**
901
+ * Validate S3 request authentication.
902
+ *
903
+ * When `config.enforceAuth` is `true` (or when non-default credentials are
904
+ * configured), every request must carry valid credentials via one of:
905
+ *
906
+ * 1. `Authorization` header containing the access key id
907
+ * - AWS Signature V4: `AWS4-HMAC-SHA256 Credential=<accessKeyId>/…`
908
+ * - AWS Signature V2: `AWS <accessKeyId>:…`
909
+ * - Simple Bearer: `Bearer <accessKeyId>` (non-standard convenience)
910
+ *
911
+ * 2. Query-string pre-signed URL parameters
912
+ * - V4: `?X-Amz-Credential=<accessKeyId>/…`
913
+ * - V2: `?AWSAccessKeyId=<accessKeyId>`
914
+ *
915
+ * Note: This is intentionally *identity-based* (checks access key) rather
916
+ * than a full HMAC signature check. A full SigV4 implementation would
917
+ * require reconstructing the canonical request exactly as the SDK built it,
918
+ * which is fragile and unnecessary for a local dev server. The goal is to
919
+ * prevent accidental unauthenticated access when credentials are configured,
920
+ * not to replicate AWS IAM.
921
+ *
922
+ * @private
923
+ * @param {import('http').IncomingMessage} req
924
+ * @returns {boolean} `true` if the request is authorized
925
+ */
926
+ _checkS3Auth(req) {
927
+ const { accessKeyId, secretAccessKey, enforceAuth } = this.config;
928
+
929
+ // If using default credentials and enforceAuth is not explicitly true,
930
+ // skip auth checks (backward compatible — local dev convenience).
931
+ const isDefault = accessKeyId === 'minioadmin' && secretAccessKey === 'minioadmin';
932
+ if (isDefault && !enforceAuth) {
933
+ return true;
934
+ }
935
+
936
+ // --- Check Authorization header ---
937
+ const authHeader = req.headers.authorization || '';
938
+
939
+ // AWS Signature V4: "AWS4-HMAC-SHA256 Credential=<accessKeyId>/date/region/s3/aws4_request, …"
940
+ if (authHeader.startsWith('AWS4-HMAC-SHA256')) {
941
+ const credMatch = authHeader.match(/Credential=([^/,\s]+)/);
942
+ if (credMatch && credMatch[1] === accessKeyId) {
943
+ return true;
944
+ }
945
+ }
946
+
947
+ // AWS Signature V2: "AWS <accessKeyId>:<signature>"
948
+ if (authHeader.startsWith('AWS ')) {
949
+ const parts = authHeader.slice(4).split(':');
950
+ if (parts[0] === accessKeyId) {
951
+ return true;
952
+ }
953
+ }
954
+
955
+ // Simple Bearer (non-standard convenience for agents/scripts)
956
+ if (authHeader.startsWith('Bearer ')) {
957
+ if (authHeader.slice(7) === accessKeyId) {
958
+ return true;
959
+ }
960
+ }
961
+
962
+ // --- Check query parameters (pre-signed URL) ---
963
+ try {
964
+ const url = new URL(req.url, `http://${req.headers.host || 'localhost'}`);
965
+
966
+ // V4 pre-signed: ?X-Amz-Credential=<accessKeyId>/…
967
+ const amzCred = url.searchParams.get('X-Amz-Credential');
968
+ if (amzCred && amzCred.startsWith(accessKeyId + '/')) {
969
+ return true;
970
+ }
971
+
972
+ // V2 pre-signed: ?AWSAccessKeyId=<accessKeyId>
973
+ if (url.searchParams.get('AWSAccessKeyId') === accessKeyId) {
974
+ return true;
975
+ }
976
+ } catch {
977
+ // Malformed URL — deny
978
+ }
979
+
980
+ return false;
981
+ }
982
+ }
983
+
984
+ export default S3Server;