bun-types 1.2.6 → 1.2.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/s3.d.ts ADDED
@@ -0,0 +1,825 @@
1
+ declare module "bun" {
2
+ /**
3
+ * Fast incremental writer for files and pipes.
4
+ *
5
+ * This uses the same interface as {@link ArrayBufferSink}, but writes to a file or pipe.
6
+ */
7
+ interface FileSink {
8
+ /**
9
+ * Write a chunk of data to the file.
10
+ *
11
+ * If the file descriptor is not writable yet, the data is buffered.
12
+ */
13
+ write(chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer): number;
14
+ /**
15
+ * Flush the internal buffer, committing the data to disk or the pipe.
16
+ */
17
+ flush(): number | Promise<number>;
18
+ /**
19
+ * Close the file descriptor. This also flushes the internal buffer.
20
+ */
21
+ end(error?: Error): number | Promise<number>;
22
+
23
+ start(options?: {
24
+ /**
25
+ * Preallocate an internal buffer of this size
26
+ * This can significantly improve performance when the chunk size is small
27
+ */
28
+ highWaterMark?: number;
29
+ }): void;
30
+
31
+ /**
32
+ * For FIFOs & pipes, this lets you decide whether Bun's process should
33
+ * remain alive until the pipe is closed.
34
+ *
35
+ * By default, it is automatically managed. While the stream is open, the
36
+ * process remains alive and once the other end hangs up or the stream
37
+ * closes, the process exits.
38
+ *
39
+ * If you previously called {@link unref}, you can call this again to re-enable automatic management.
40
+ *
41
+ * Internally, it will reference count the number of times this is called. By default, that number is 1
42
+ *
43
+ * If the file is not a FIFO or pipe, {@link ref} and {@link unref} do
44
+ * nothing. If the pipe is already closed, this does nothing.
45
+ */
46
+ ref(): void;
47
+
48
+ /**
49
+ * For FIFOs & pipes, this lets you decide whether Bun's process should
50
+ * remain alive until the pipe is closed.
51
+ *
52
+ * If you want to allow Bun's process to terminate while the stream is open,
53
+ * call this.
54
+ *
55
+ * If the file is not a FIFO or pipe, {@link ref} and {@link unref} do
56
+ * nothing. If the pipe is already closed, this does nothing.
57
+ */
58
+ unref(): void;
59
+ }
60
+
61
+ interface NetworkSink extends FileSink {
62
+ /**
63
+ * Write a chunk of data to the network.
64
+ *
65
+ * If the network is not writable yet, the data is buffered.
66
+ */
67
+ write(chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer): number;
68
+ /**
69
+ * Flush the internal buffer, committing the data to the network.
70
+ */
71
+ flush(): number | Promise<number>;
72
+ /**
73
+ * Finish the upload. This also flushes the internal buffer.
74
+ */
75
+ end(error?: Error): number | Promise<number>;
76
+
77
+ /**
78
+ * Get the stat of the file.
79
+ */
80
+ stat(): Promise<import("node:fs").Stats>;
81
+ }
82
+
83
+ /**
84
+ * Configuration options for S3 operations
85
+ */
86
+ interface S3Options extends BlobPropertyBag {
87
+ /**
88
+ * The Access Control List (ACL) policy for the file.
89
+ * Controls who can access the file and what permissions they have.
90
+ *
91
+ * @example
92
+ * // Setting public read access
93
+ * const file = s3("public-file.txt", {
94
+ * acl: "public-read",
95
+ * bucket: "my-bucket"
96
+ * });
97
+ *
98
+ * @example
99
+ * // Using with presigned URLs
100
+ * const url = file.presign({
101
+ * acl: "public-read",
102
+ * expiresIn: 3600
103
+ * });
104
+ */
105
+ acl?:
106
+ | "private"
107
+ | "public-read"
108
+ | "public-read-write"
109
+ | "aws-exec-read"
110
+ | "authenticated-read"
111
+ | "bucket-owner-read"
112
+ | "bucket-owner-full-control"
113
+ | "log-delivery-write";
114
+
115
+ /**
116
+ * The S3 bucket name. Can be set via `S3_BUCKET` or `AWS_BUCKET` environment variables.
117
+ *
118
+ * @example
119
+ * // Using explicit bucket
120
+ * const file = s3("my-file.txt", { bucket: "my-bucket" });
121
+ *
122
+ * @example
123
+ * // Using environment variables
124
+ * // With S3_BUCKET=my-bucket in .env
125
+ * const file = s3("my-file.txt");
126
+ */
127
+ bucket?: string;
128
+
129
+ /**
130
+ * The AWS region. Can be set via `S3_REGION` or `AWS_REGION` environment variables.
131
+ *
132
+ * @example
133
+ * const file = s3("my-file.txt", {
134
+ * bucket: "my-bucket",
135
+ * region: "us-west-2"
136
+ * });
137
+ */
138
+ region?: string;
139
+
140
+ /**
141
+ * The access key ID for authentication.
142
+ * Can be set via `S3_ACCESS_KEY_ID` or `AWS_ACCESS_KEY_ID` environment variables.
143
+ */
144
+ accessKeyId?: string;
145
+
146
+ /**
147
+ * The secret access key for authentication.
148
+ * Can be set via `S3_SECRET_ACCESS_KEY` or `AWS_SECRET_ACCESS_KEY` environment variables.
149
+ */
150
+ secretAccessKey?: string;
151
+
152
+ /**
153
+ * Optional session token for temporary credentials.
154
+ * Can be set via `S3_SESSION_TOKEN` or `AWS_SESSION_TOKEN` environment variables.
155
+ *
156
+ * @example
157
+ * // Using temporary credentials
158
+ * const file = s3("my-file.txt", {
159
+ * accessKeyId: tempAccessKey,
160
+ * secretAccessKey: tempSecretKey,
161
+ * sessionToken: tempSessionToken
162
+ * });
163
+ */
164
+ sessionToken?: string;
165
+
166
+ /**
167
+ * The S3-compatible service endpoint URL.
168
+ * Can be set via `S3_ENDPOINT` or `AWS_ENDPOINT` environment variables.
169
+ *
170
+ * @example
171
+ * // AWS S3
172
+ * const file = s3("my-file.txt", {
173
+ * endpoint: "https://s3.us-east-1.amazonaws.com"
174
+ * });
175
+ *
176
+ * @example
177
+ * // Cloudflare R2
178
+ * const file = s3("my-file.txt", {
179
+ * endpoint: "https://<account-id>.r2.cloudflarestorage.com"
180
+ * });
181
+ *
182
+ * @example
183
+ * // DigitalOcean Spaces
184
+ * const file = s3("my-file.txt", {
185
+ * endpoint: "https://<region>.digitaloceanspaces.com"
186
+ * });
187
+ *
188
+ * @example
189
+ * // MinIO (local development)
190
+ * const file = s3("my-file.txt", {
191
+ * endpoint: "http://localhost:9000"
192
+ * });
193
+ */
194
+ endpoint?: string;
195
+
196
+ /**
197
+ * Use virtual hosted style endpoint. default to false, when true if `endpoint` is informed it will ignore the `bucket`
198
+ *
199
+ * @example
200
+ * // Using virtual hosted style
201
+ * const file = s3("my-file.txt", {
202
+ * virtualHostedStyle: true,
203
+ * endpoint: "https://my-bucket.s3.us-east-1.amazonaws.com"
204
+ * });
205
+ */
206
+ virtualHostedStyle?: boolean;
207
+
208
+ /**
209
+ * The size of each part in multipart uploads (in bytes).
210
+ * - Minimum: 5 MiB
211
+ * - Maximum: 5120 MiB
212
+ * - Default: 5 MiB
213
+ *
214
+ * @example
215
+ * // Configuring multipart uploads
216
+ * const file = s3("large-file.dat", {
217
+ * partSize: 10 * 1024 * 1024, // 10 MiB parts
218
+ * queueSize: 4 // Upload 4 parts in parallel
219
+ * });
220
+ *
221
+ * const writer = file.writer();
222
+ * // ... write large file in chunks
223
+ */
224
+ partSize?: number;
225
+
226
+ /**
227
+ * Number of parts to upload in parallel for multipart uploads.
228
+ * - Default: 5
229
+ * - Maximum: 255
230
+ *
231
+ * Increasing this value can improve upload speeds for large files
232
+ * but will use more memory.
233
+ */
234
+ queueSize?: number;
235
+
236
+ /**
237
+ * Number of retry attempts for failed uploads.
238
+ * - Default: 3
239
+ * - Maximum: 255
240
+ *
241
+ * @example
242
+ * // Setting retry attempts
243
+ * const file = s3("my-file.txt", {
244
+ * retry: 5 // Retry failed uploads up to 5 times
245
+ * });
246
+ */
247
+ retry?: number;
248
+
249
+ /**
250
+ * The Content-Type of the file.
251
+ * Automatically set based on file extension when possible.
252
+ *
253
+ * @example
254
+ * // Setting explicit content type
255
+ * const file = s3("data.bin", {
256
+ * type: "application/octet-stream"
257
+ * });
258
+ */
259
+ type?: string;
260
+
261
+ /**
262
+ * By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects.
263
+ *
264
+ * @example
265
+ * // Setting explicit Storage class
266
+ * const file = s3("my-file.json", {
267
+ * storageClass: "STANDARD_IA"
268
+ * });
269
+ */
270
+ storageClass?:
271
+ | "STANDARD"
272
+ | "DEEP_ARCHIVE"
273
+ | "EXPRESS_ONEZONE"
274
+ | "GLACIER"
275
+ | "GLACIER_IR"
276
+ | "INTELLIGENT_TIERING"
277
+ | "ONEZONE_IA"
278
+ | "OUTPOSTS"
279
+ | "REDUCED_REDUNDANCY"
280
+ | "SNOW"
281
+ | "STANDARD_IA";
282
+
283
+ /**
284
+ * @deprecated The size of the internal buffer in bytes. Defaults to 5 MiB. use `partSize` and `queueSize` instead.
285
+ */
286
+ highWaterMark?: number;
287
+ }
288
+
289
+ /**
290
+ * Options for generating presigned URLs
291
+ */
292
+ interface S3FilePresignOptions extends S3Options {
293
+ /**
294
+ * Number of seconds until the presigned URL expires.
295
+ * - Default: 86400 (1 day)
296
+ *
297
+ * @example
298
+ * // Short-lived URL
299
+ * const url = file.presign({
300
+ * expiresIn: 3600 // 1 hour
301
+ * });
302
+ *
303
+ * @example
304
+ * // Long-lived public URL
305
+ * const url = file.presign({
306
+ * expiresIn: 7 * 24 * 60 * 60, // 7 days
307
+ * acl: "public-read"
308
+ * });
309
+ */
310
+ expiresIn?: number;
311
+
312
+ /**
313
+ * The HTTP method allowed for the presigned URL.
314
+ *
315
+ * @example
316
+ * // GET URL for downloads
317
+ * const downloadUrl = file.presign({
318
+ * method: "GET",
319
+ * expiresIn: 3600
320
+ * });
321
+ *
322
+ * @example
323
+ * // PUT URL for uploads
324
+ * const uploadUrl = file.presign({
325
+ * method: "PUT",
326
+ * expiresIn: 3600,
327
+ * type: "application/json"
328
+ * });
329
+ */
330
+ method?: "GET" | "POST" | "PUT" | "DELETE" | "HEAD";
331
+ }
332
+
333
+ interface S3Stats {
334
+ size: number;
335
+ lastModified: Date;
336
+ etag: string;
337
+ type: string;
338
+ }
339
+
340
+ /**
341
+ * Represents a file in an S3-compatible storage service.
342
+ * Extends the Blob interface for compatibility with web APIs.
343
+ */
344
+ interface S3File extends Blob {
345
+ /**
346
+ * The size of the file in bytes.
347
+ * This is a Promise because it requires a network request to determine the size.
348
+ *
349
+ * @example
350
+ * // Getting file size
351
+ * const size = await file.size;
352
+ * console.log(`File size: ${size} bytes`);
353
+ *
354
+ * @example
355
+ * // Check if file is larger than 1MB
356
+ * if (await file.size > 1024 * 1024) {
357
+ * console.log("Large file detected");
358
+ * }
359
+ */
360
+ /**
361
+ * TODO: figure out how to get the typescript types to not error for this property.
362
+ */
363
+ // size: Promise<number>;
364
+
365
+ /**
366
+ * Creates a new S3File representing a slice of the original file.
367
+ * Uses HTTP Range headers for efficient partial downloads.
368
+ *
369
+ * @param begin - Starting byte offset
370
+ * @param end - Ending byte offset (exclusive)
371
+ * @param contentType - Optional MIME type for the slice
372
+ * @returns A new S3File representing the specified range
373
+ *
374
+ * @example
375
+ * // Reading file header
376
+ * const header = file.slice(0, 1024);
377
+ * const headerText = await header.text();
378
+ *
379
+ * @example
380
+ * // Reading with content type
381
+ * const jsonSlice = file.slice(1024, 2048, "application/json");
382
+ * const data = await jsonSlice.json();
383
+ *
384
+ * @example
385
+ * // Reading from offset to end
386
+ * const remainder = file.slice(1024);
387
+ * const content = await remainder.text();
388
+ */
389
+ slice(begin?: number, end?: number, contentType?: string): S3File;
390
+ slice(begin?: number, contentType?: string): S3File;
391
+ slice(contentType?: string): S3File;
392
+
393
+ /**
394
+ * Creates a writable stream for uploading data.
395
+ * Suitable for large files as it uses multipart upload.
396
+ *
397
+ * @param options - Configuration for the upload
398
+ * @returns A NetworkSink for writing data
399
+ *
400
+ * @example
401
+ * // Basic streaming write
402
+ * const writer = file.writer({
403
+ * type: "application/json"
404
+ * });
405
+ * writer.write('{"hello": ');
406
+ * writer.write('"world"}');
407
+ * await writer.end();
408
+ *
409
+ * @example
410
+ * // Optimized large file upload
411
+ * const writer = file.writer({
412
+ * partSize: 10 * 1024 * 1024, // 10MB parts
413
+ * queueSize: 4, // Upload 4 parts in parallel
414
+ * retry: 3 // Retry failed parts
415
+ * });
416
+ *
417
+ * // Write large chunks of data efficiently
418
+ * for (const chunk of largeDataChunks) {
419
+ * writer.write(chunk);
420
+ * }
421
+ * await writer.end();
422
+ *
423
+ * @example
424
+ * // Error handling
425
+ * const writer = file.writer();
426
+ * try {
427
+ * writer.write(data);
428
+ * await writer.end();
429
+ * } catch (err) {
430
+ * console.error('Upload failed:', err);
431
+ * // Writer will automatically abort multipart upload on error
432
+ * }
433
+ */
434
+ writer(options?: S3Options): NetworkSink;
435
+
436
+ /**
437
+ * Gets a readable stream of the file's content.
438
+ * Useful for processing large files without loading them entirely into memory.
439
+ *
440
+ * @returns A ReadableStream for the file content
441
+ *
442
+ * @example
443
+ * // Basic streaming read
444
+ * const stream = file.stream();
445
+ * for await (const chunk of stream) {
446
+ * console.log('Received chunk:', chunk);
447
+ * }
448
+ *
449
+ * @example
450
+ * // Piping to response
451
+ * const stream = file.stream();
452
+ * return new Response(stream, {
453
+ * headers: { 'Content-Type': file.type }
454
+ * });
455
+ *
456
+ * @example
457
+ * // Processing large files
458
+ * const stream = file.stream();
459
+ * const textDecoder = new TextDecoder();
460
+ * for await (const chunk of stream) {
461
+ * const text = textDecoder.decode(chunk);
462
+ * // Process text chunk by chunk
463
+ * }
464
+ */
465
+ readonly readable: ReadableStream;
466
+ stream(): ReadableStream;
467
+
468
+ /**
469
+ * The name or path of the file in the bucket.
470
+ *
471
+ * @example
472
+ * const file = s3("folder/image.jpg");
473
+ * console.log(file.name); // "folder/image.jpg"
474
+ */
475
+ readonly name?: string;
476
+
477
+ /**
478
+ * The bucket name containing the file.
479
+ *
480
+ * @example
481
+ * const file = s3("s3://my-bucket/file.txt");
482
+ * console.log(file.bucket); // "my-bucket"
483
+ */
484
+ readonly bucket?: string;
485
+
486
+ /**
487
+ * Checks if the file exists in S3.
488
+ * Uses HTTP HEAD request to efficiently check existence without downloading.
489
+ *
490
+ * @returns Promise resolving to true if file exists, false otherwise
491
+ *
492
+ * @example
493
+ * // Basic existence check
494
+ * if (await file.exists()) {
495
+ * console.log("File exists in S3");
496
+ * }
497
+ *
498
+ * @example
499
+ * // With error handling
500
+ * try {
501
+ * const exists = await file.exists();
502
+ * if (!exists) {
503
+ * console.log("File not found");
504
+ * }
505
+ * } catch (err) {
506
+ * console.error("Error checking file:", err);
507
+ * }
508
+ */
509
+ exists(): Promise<boolean>;
510
+
511
+ /**
512
+ * Uploads data to S3.
513
+ * Supports various input types and automatically handles large files.
514
+ *
515
+ * @param data - The data to upload
516
+ * @param options - Upload configuration options
517
+ * @returns Promise resolving to number of bytes written
518
+ *
519
+ * @example
520
+ * // Writing string data
521
+ * await file.write("Hello World", {
522
+ * type: "text/plain"
523
+ * });
524
+ *
525
+ * @example
526
+ * // Writing JSON
527
+ * const data = { hello: "world" };
528
+ * await file.write(JSON.stringify(data), {
529
+ * type: "application/json"
530
+ * });
531
+ *
532
+ * @example
533
+ * // Writing from Response
534
+ * const response = await fetch("https://example.com/data");
535
+ * await file.write(response);
536
+ *
537
+ * @example
538
+ * // Writing with ACL
539
+ * await file.write(data, {
540
+ * acl: "public-read",
541
+ * type: "application/octet-stream"
542
+ * });
543
+ */
544
+ write(
545
+ data: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer | Request | Response | BunFile | S3File | Blob,
546
+ options?: S3Options,
547
+ ): Promise<number>;
548
+
549
+ /**
550
+ * Generates a presigned URL for the file.
551
+ * Allows temporary access to the file without exposing credentials.
552
+ *
553
+ * @param options - Configuration for the presigned URL
554
+ * @returns Presigned URL string
555
+ *
556
+ * @example
557
+ * // Basic download URL
558
+ * const url = file.presign({
559
+ * expiresIn: 3600 // 1 hour
560
+ * });
561
+ *
562
+ * @example
563
+ * // Upload URL with specific content type
564
+ * const uploadUrl = file.presign({
565
+ * method: "PUT",
566
+ * expiresIn: 3600,
567
+ * type: "image/jpeg",
568
+ * acl: "public-read"
569
+ * });
570
+ *
571
+ * @example
572
+ * // URL with custom permissions
573
+ * const url = file.presign({
574
+ * method: "GET",
575
+ * expiresIn: 7 * 24 * 60 * 60, // 7 days
576
+ * acl: "public-read"
577
+ * });
578
+ */
579
+ presign(options?: S3FilePresignOptions): string;
580
+
581
+ /**
582
+ * Deletes the file from S3.
583
+ *
584
+ * @returns Promise that resolves when deletion is complete
585
+ *
586
+ * @example
587
+ * // Basic deletion
588
+ * await file.delete();
589
+ *
590
+ * @example
591
+ * // With error handling
592
+ * try {
593
+ * await file.delete();
594
+ * console.log("File deleted successfully");
595
+ * } catch (err) {
596
+ * console.error("Failed to delete file:", err);
597
+ * }
598
+ */
599
+ delete(): Promise<void>;
600
+
601
+ /**
602
+ * Alias for delete() method.
603
+ * Provided for compatibility with Node.js fs API naming.
604
+ *
605
+ * @example
606
+ * await file.unlink();
607
+ */
608
+ unlink: S3File["delete"];
609
+
610
+ /**
611
+ * Get the stat of a file in an S3-compatible storage service.
612
+ *
613
+ * @returns Promise resolving to S3Stat
614
+ */
615
+ stat(): Promise<S3Stats>;
616
+ }
617
+
618
+ /**
619
+ * A configured S3 bucket instance for managing files.
620
+ * The instance is callable to create S3File instances and provides methods
621
+ * for common operations.
622
+ *
623
+ * @example
624
+ * // Basic bucket setup
625
+ * const bucket = new S3Client({
626
+ * bucket: "my-bucket",
627
+ * accessKeyId: "key",
628
+ * secretAccessKey: "secret"
629
+ * });
630
+ *
631
+ * // Get file instance
632
+ * const file = bucket("image.jpg");
633
+ *
634
+ * // Common operations
635
+ * await bucket.write("data.json", JSON.stringify({hello: "world"}));
636
+ * const url = bucket.presign("file.pdf");
637
+ * await bucket.unlink("old.txt");
638
+ */
639
+ class S3Client {
640
+ prototype: S3Client;
641
+ /**
642
+ * Create a new instance of an S3 bucket so that credentials can be managed
643
+ * from a single instance instead of being passed to every method.
644
+ *
645
+ * @param options The default options to use for the S3 client. Can be
646
+ * overriden by passing options to the methods.
647
+ *
648
+ * ## Keep S3 credentials in a single instance
649
+ *
650
+ * @example
651
+ * const bucket = new Bun.S3Client({
652
+ * accessKeyId: "your-access-key",
653
+ * secretAccessKey: "your-secret-key",
654
+ * bucket: "my-bucket",
655
+ * endpoint: "https://s3.us-east-1.amazonaws.com",
656
+ * sessionToken: "your-session-token",
657
+ * });
658
+ *
659
+ * // S3Client is callable, so you can do this:
660
+ * const file = bucket.file("my-file.txt");
661
+ *
662
+ * // or this:
663
+ * await file.write("Hello Bun!");
664
+ * await file.text();
665
+ *
666
+ * // To delete the file:
667
+ * await bucket.delete("my-file.txt");
668
+ *
669
+ * // To write a file without returning the instance:
670
+ * await bucket.write("my-file.txt", "Hello Bun!");
671
+ *
672
+ */
673
+ constructor(options?: S3Options);
674
+
675
+ /**
676
+ * Creates an S3File instance for the given path.
677
+ *
678
+ * @example
679
+ * const file = bucket.file("image.jpg");
680
+ * await file.write(imageData);
681
+ * const configFile = bucket("config.json", {
682
+ * type: "application/json",
683
+ * acl: "private"
684
+ * });
685
+ */
686
+ file(path: string, options?: S3Options): S3File;
687
+
688
+ /**
689
+ * Writes data directly to a path in the bucket.
690
+ * Supports strings, buffers, streams, and web API types.
691
+ *
692
+ * @example
693
+ * // Write string
694
+ * await bucket.write("hello.txt", "Hello World");
695
+ *
696
+ * // Write JSON with type
697
+ * await bucket.write(
698
+ * "data.json",
699
+ * JSON.stringify({hello: "world"}),
700
+ * {type: "application/json"}
701
+ * );
702
+ *
703
+ * // Write from fetch
704
+ * const res = await fetch("https://example.com/data");
705
+ * await bucket.write("data.bin", res);
706
+ *
707
+ * // Write with ACL
708
+ * await bucket.write("public.html", html, {
709
+ * acl: "public-read",
710
+ * type: "text/html"
711
+ * });
712
+ */
713
+ write(
714
+ path: string,
715
+ data:
716
+ | string
717
+ | ArrayBufferView
718
+ | ArrayBuffer
719
+ | SharedArrayBuffer
720
+ | Request
721
+ | Response
722
+ | BunFile
723
+ | S3File
724
+ | Blob
725
+ | File,
726
+ options?: S3Options,
727
+ ): Promise<number>;
728
+
729
+ /**
730
+ * Generate a presigned URL for temporary access to a file.
731
+ * Useful for generating upload/download URLs without exposing credentials.
732
+ *
733
+ * @example
734
+ * // Download URL
735
+ * const downloadUrl = bucket.presign("file.pdf", {
736
+ * expiresIn: 3600 // 1 hour
737
+ * });
738
+ *
739
+ * // Upload URL
740
+ * const uploadUrl = bucket.presign("uploads/image.jpg", {
741
+ * method: "PUT",
742
+ * expiresIn: 3600,
743
+ * type: "image/jpeg",
744
+ * acl: "public-read"
745
+ * });
746
+ *
747
+ * // Long-lived public URL
748
+ * const publicUrl = bucket.presign("public/doc.pdf", {
749
+ * expiresIn: 7 * 24 * 60 * 60, // 7 days
750
+ * acl: "public-read"
751
+ * });
752
+ */
753
+ presign(path: string, options?: S3FilePresignOptions): string;
754
+
755
+ /**
756
+ * Delete a file from the bucket.
757
+ *
758
+ * @example
759
+ * // Simple delete
760
+ * await bucket.unlink("old-file.txt");
761
+ *
762
+ * // With error handling
763
+ * try {
764
+ * await bucket.unlink("file.dat");
765
+ * console.log("File deleted");
766
+ * } catch (err) {
767
+ * console.error("Delete failed:", err);
768
+ * }
769
+ */
770
+ unlink(path: string, options?: S3Options): Promise<void>;
771
+ delete: S3Client["unlink"];
772
+
773
+ /**
774
+ * Get the size of a file in bytes.
775
+ * Uses HEAD request to efficiently get size.
776
+ *
777
+ * @example
778
+ * // Get size
779
+ * const bytes = await bucket.size("video.mp4");
780
+ * console.log(`Size: ${bytes} bytes`);
781
+ *
782
+ * // Check if file is large
783
+ * if (await bucket.size("data.zip") > 100 * 1024 * 1024) {
784
+ * console.log("File is larger than 100MB");
785
+ * }
786
+ */
787
+ size(path: string, options?: S3Options): Promise<number>;
788
+
789
+ /**
790
+ * Check if a file exists in the bucket.
791
+ * Uses HEAD request to check existence.
792
+ *
793
+ * @example
794
+ * // Check existence
795
+ * if (await bucket.exists("config.json")) {
796
+ * const file = bucket("config.json");
797
+ * const config = await file.json();
798
+ * }
799
+ *
800
+ * // With error handling
801
+ * try {
802
+ * if (!await bucket.exists("required.txt")) {
803
+ * throw new Error("Required file missing");
804
+ * }
805
+ * } catch (err) {
806
+ * console.error("Check failed:", err);
807
+ * }
808
+ */
809
+ exists(path: string, options?: S3Options): Promise<boolean>;
810
+ /**
811
+ * Get the stat of a file in an S3-compatible storage service.
812
+ *
813
+ * @param path The path to the file.
814
+ * @param options The options to use for the S3 client.
815
+ */
816
+ stat(path: string, options?: S3Options): Promise<S3Stats>;
817
+ }
818
+
819
+ /**
820
+ * A default instance of S3Client
821
+ *
822
+ * Pulls credentials from environment variables. Use `new Bun.S3Client()` if you need to explicitly set credentials.
823
+ */
824
+ var s3: S3Client;
825
+ }