bun-types 1.1.43-canary.20250103T140555 → 1.1.43-canary.20250104T140550

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bun.d.ts CHANGED
@@ -1257,45 +1257,326 @@ declare module "bun" {
1257
1257
  */
1258
1258
  unlink(): Promise<void>;
1259
1259
  }
1260
+ interface NetworkSink extends FileSink {
1261
+ /**
1262
+ * Write a chunk of data to the network.
1263
+ *
1264
+ * If the network is not writable yet, the data is buffered.
1265
+ */
1266
+ write(
1267
+ chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer,
1268
+ ): number;
1269
+ /**
1270
+ * Flush the internal buffer, committing the data to the network.
1271
+ */
1272
+ flush(): number | Promise<number>;
1273
+ /**
1274
+ * Finish the upload. This also flushes the internal buffer.
1275
+ */
1276
+ end(error?: Error): number | Promise<number>;
1277
+ }
1278
+
1279
+ type S3 = {
1280
+ /**
1281
+ * Create a new instance of an S3 bucket so that credentials can be managed
1282
+ * from a single instance instead of being passed to every method.
1283
+ *
1284
+ * @param options The default options to use for the S3 client. Can be
1285
+ * overriden by passing options to the methods.
1286
+ *
1287
+ * ## Keep S3 credentials in a single instance
1288
+ *
1289
+ * @example
1290
+ * const bucket = new Bun.S3({
1291
+ * accessKeyId: "your-access-key",
1292
+ * secretAccessKey: "your-secret-key",
1293
+ * bucket: "my-bucket",
1294
+ * endpoint: "https://s3.us-east-1.amazonaws.com",
1295
+ * sessionToken: "your-session-token",
1296
+ * });
1297
+ *
1298
+ * // S3Bucket is callable, so you can do this:
1299
+ * const file = bucket("my-file.txt");
1300
+ *
1301
+ * // or this:
1302
+ * await file.write("Hello Bun!");
1303
+ * await file.text();
1304
+ *
1305
+ * // To delete the file:
1306
+ * await bucket.delete("my-file.txt");
1307
+ *
1308
+ * // To write a file without returning the instance:
1309
+ * await bucket.write("my-file.txt", "Hello Bun!");
1310
+ *
1311
+ */
1312
+ new (options?: S3Options): S3Bucket;
1313
+
1314
+ /**
1315
+ * Delete a file from an S3-compatible object storage service.
1316
+ *
1317
+ * @param path The path to the file.
1318
+ * @param options The options to use for the S3 client.
1319
+ *
1320
+ * For an instance method version, {@link S3File.unlink}. You can also use {@link S3Bucket.unlink}.
1321
+ *
1322
+ * @example
1323
+ * import { S3 } from "bun";
1324
+ * await S3.unlink("s3://my-bucket/my-file.txt", {
1325
+ * accessKeyId: "your-access-key",
1326
+ * secretAccessKey: "your-secret-key",
1327
+ * });
1328
+ *
1329
+ * @example
1330
+ * await S3.unlink("key", {
1331
+ * bucket: "my-bucket",
1332
+ * accessKeyId: "your-access-key",
1333
+ * secretAccessKey: "your-secret-key",
1334
+ * });
1335
+ */
1336
+ delete(path: string, options?: S3Options): Promise<void>;
1337
+ /**
1338
+ * unlink is an alias for {@link S3.delete}
1339
+ */
1340
+ unlink: S3["delete"];
1341
+
1342
+ /**
1343
+ * Writes data to an S3-compatible storage service.
1344
+ * Supports various input types and handles large files with multipart uploads.
1345
+ *
1346
+ * @param path The path or key where the file will be written
1347
+ * @param data The data to write
1348
+ * @param options S3 configuration and upload options
1349
+ * @returns promise that resolves with the number of bytes written
1350
+ *
1351
+ * @example
1352
+ * // Writing a string
1353
+ * await S3.write("hello.txt", "Hello World!", {
1354
+ * bucket: "my-bucket",
1355
+ * type: "text/plain"
1356
+ * });
1357
+ *
1358
+ * @example
1359
+ * // Writing JSON
1360
+ * await S3.write(
1361
+ * "data.json",
1362
+ * JSON.stringify({ hello: "world" }),
1363
+ * { type: "application/json" }
1364
+ * );
1365
+ *
1366
+ * @example
1367
+ * // Writing a large file with multipart upload
1368
+ * await S3.write("large-file.dat", largeBuffer, {
1369
+ * partSize: 10 * 1024 * 1024, // 10MB parts
1370
+ * queueSize: 4, // Upload 4 parts in parallel
1371
+ * retry: 3 // Retry failed parts up to 3 times
1372
+ * });
1373
+ */
1374
+ write(
1375
+ path: string,
1376
+ data:
1377
+ | string
1378
+ | ArrayBufferView
1379
+ | ArrayBufferLike
1380
+ | Response
1381
+ | Request
1382
+ | ReadableStream
1383
+ | Blob
1384
+ | File,
1385
+ options?: S3Options,
1386
+ ): Promise<number>;
1387
+ };
1388
+ var S3: S3;
1389
+
1390
+ /**
1391
+ * Creates a new S3File instance for working with a single file.
1392
+ *
1393
+ * @param path The path or key of the file
1394
+ * @param options S3 configuration options
1395
+ * @returns `S3File` instance for the specified path
1396
+ *
1397
+ * @example
1398
+ * import { s3 } from "bun";
1399
+ * const file = s3("my-file.txt", {
1400
+ * bucket: "my-bucket",
1401
+ * accessKeyId: "your-access-key",
1402
+ * secretAccessKey: "your-secret-key"
1403
+ * });
1404
+ *
1405
+ * // Read the file
1406
+ * const content = await file.text();
1407
+ *
1408
+ * @example
1409
+ * // Using s3:// protocol
1410
+ * const file = s3("s3://my-bucket/my-file.txt", {
1411
+ * accessKeyId: "your-access-key",
1412
+ * secretAccessKey: "your-secret-key"
1413
+ * });
1414
+ */
1415
+ function s3(path: string | URL, options?: S3Options): S3File;
1416
+
1417
+ /**
1418
+ * Configuration options for S3 operations
1419
+ */
1420
+ interface S3Options extends BlobPropertyBag {
1421
+ /**
1422
+ * The Access Control List (ACL) policy for the file.
1423
+ * Controls who can access the file and what permissions they have.
1424
+ *
1425
+ * @example
1426
+ * // Setting public read access
1427
+ * const file = s3("public-file.txt", {
1428
+ * acl: "public-read",
1429
+ * bucket: "my-bucket"
1430
+ * });
1431
+ *
1432
+ * @example
1433
+ * // Using with presigned URLs
1434
+ * const url = file.presign({
1435
+ * acl: "public-read",
1436
+ * expiresIn: 3600
1437
+ * });
1438
+ */
1439
+ acl?:
1440
+ | "private"
1441
+ | "public-read"
1442
+ | "public-read-write"
1443
+ | "aws-exec-read"
1444
+ | "authenticated-read"
1445
+ | "bucket-owner-read"
1446
+ | "bucket-owner-full-control"
1447
+ | "log-delivery-write";
1260
1448
 
1261
- interface S3FileOptions extends BlobPropertyBag {
1262
1449
  /**
1263
- * The bucket to use for the S3 client. by default will use the `S3_BUCKET` and `AWS_BUCKET` environment variable, or deduce as first part of the path.
1450
+ * The S3 bucket name. Can be set via `S3_BUCKET` or `AWS_BUCKET` environment variables.
1451
+ *
1452
+ * @example
1453
+ * // Using explicit bucket
1454
+ * const file = s3("my-file.txt", { bucket: "my-bucket" });
1455
+ *
1456
+ * @example
1457
+ * // Using environment variables
1458
+ * // With S3_BUCKET=my-bucket in .env
1459
+ * const file = s3("my-file.txt");
1264
1460
  */
1265
1461
  bucket?: string;
1462
+
1266
1463
  /**
1267
- * The region to use for the S3 client. By default, it will use the `S3_REGION` and `AWS_REGION` environment variable.
1464
+ * The AWS region. Can be set via `S3_REGION` or `AWS_REGION` environment variables.
1465
+ *
1466
+ * @example
1467
+ * const file = s3("my-file.txt", {
1468
+ * bucket: "my-bucket",
1469
+ * region: "us-west-2"
1470
+ * });
1268
1471
  */
1269
1472
  region?: string;
1473
+
1270
1474
  /**
1271
- * The access key ID to use for the S3 client. By default, it will use the `S3_ACCESS_KEY_ID` and `AWS_ACCESS_KEY_ID` environment variable.
1475
+ * The access key ID for authentication.
1476
+ * Can be set via `S3_ACCESS_KEY_ID` or `AWS_ACCESS_KEY_ID` environment variables.
1272
1477
  */
1273
1478
  accessKeyId?: string;
1479
+
1274
1480
  /**
1275
- * The secret access key to use for the S3 client. By default, it will use the `S3_SECRET_ACCESS_KEY and `AWS_SECRET_ACCESS_KEY` environment variable.
1481
+ * The secret access key for authentication.
1482
+ * Can be set via `S3_SECRET_ACCESS_KEY` or `AWS_SECRET_ACCESS_KEY` environment variables.
1276
1483
  */
1277
1484
  secretAccessKey?: string;
1278
1485
 
1279
1486
  /**
1280
- * The endpoint to use for the S3 client. Defaults to `https://s3.{region}.amazonaws.com`, it will also use the `S3_ENDPOINT` and `AWS_ENDPOINT` environment variable.
1487
+ * Optional session token for temporary credentials.
1488
+ * Can be set via `S3_SESSION_TOKEN` or `AWS_SESSION_TOKEN` environment variables.
1489
+ *
1490
+ * @example
1491
+ * // Using temporary credentials
1492
+ * const file = s3("my-file.txt", {
1493
+ * accessKeyId: tempAccessKey,
1494
+ * secretAccessKey: tempSecretKey,
1495
+ * sessionToken: tempSessionToken
1496
+ * });
1497
+ */
1498
+ sessionToken?: string;
1499
+
1500
+ /**
1501
+ * The S3-compatible service endpoint URL.
1502
+ * Can be set via `S3_ENDPOINT` or `AWS_ENDPOINT` environment variables.
1503
+ *
1504
+ * @example
1505
+ * // AWS S3
1506
+ * const file = s3("my-file.txt", {
1507
+ * endpoint: "https://s3.us-east-1.amazonaws.com"
1508
+ * });
1509
+ *
1510
+ * @example
1511
+ * // Cloudflare R2
1512
+ * const file = s3("my-file.txt", {
1513
+ * endpoint: "https://<account-id>.r2.cloudflarestorage.com"
1514
+ * });
1515
+ *
1516
+ * @example
1517
+ * // DigitalOcean Spaces
1518
+ * const file = s3("my-file.txt", {
1519
+ * endpoint: "https://<region>.digitaloceanspaces.com"
1520
+ * });
1521
+ *
1522
+ * @example
1523
+ * // MinIO (local development)
1524
+ * const file = s3("my-file.txt", {
1525
+ * endpoint: "http://localhost:9000"
1526
+ * });
1281
1527
  */
1282
1528
  endpoint?: string;
1283
1529
 
1284
1530
  /**
1285
- * The size of each part in MiB. Minimum and Default is 5 MiB and maximum is 5120 MiB.
1531
+ * The size of each part in multipart uploads (in MiB).
1532
+ * - Minimum: 5 MiB
1533
+ * - Maximum: 5120 MiB
1534
+ * - Default: 5 MiB
1535
+ *
1536
+ * @example
1537
+ * // Configuring multipart uploads
1538
+ * const file = s3("large-file.dat", {
1539
+ * partSize: 10, // 10 MiB parts
1540
+ * queueSize: 4 // Upload 4 parts in parallel
1541
+ * });
1542
+ *
1543
+ * const writer = file.writer();
1544
+ * // ... write large file in chunks
1286
1545
  */
1287
1546
  partSize?: number;
1547
+
1288
1548
  /**
1289
- * The number of parts to upload in parallel. Default is 5 and maximum is 255. This can speed up the upload of large files but will also use more memory.
1549
+ * Number of parts to upload in parallel for multipart uploads.
1550
+ * - Default: 5
1551
+ * - Maximum: 255
1552
+ *
1553
+ * Increasing this value can improve upload speeds for large files
1554
+ * but will use more memory.
1290
1555
  */
1291
1556
  queueSize?: number;
1557
+
1292
1558
  /**
1293
- * The number of times to retry the upload if it fails. Default is 3 and maximum is 255.
1559
+ * Number of retry attempts for failed uploads.
1560
+ * - Default: 3
1561
+ * - Maximum: 255
1562
+ *
1563
+ * @example
1564
+ * // Setting retry attempts
1565
+ * const file = s3("my-file.txt", {
1566
+ * retry: 5 // Retry failed uploads up to 5 times
1567
+ * });
1294
1568
  */
1295
1569
  retry?: number;
1296
1570
 
1297
1571
  /**
1298
- * The Content-Type of the file. If not provided, it is automatically set based on the file extension when possible.
1572
+ * The Content-Type of the file.
1573
+ * Automatically set based on file extension when possible.
1574
+ *
1575
+ * @example
1576
+ * // Setting explicit content type
1577
+ * const file = s3("data.bin", {
1578
+ * type: "application/octet-stream"
1579
+ * });
1299
1580
  */
1300
1581
  type?: string;
1301
1582
 
@@ -1305,93 +1586,253 @@ declare module "bun" {
1305
1586
  highWaterMark?: number;
1306
1587
  }
1307
1588
 
1308
- interface S3FilePresignOptions extends S3FileOptions {
1589
+ /**
1590
+ * Options for generating presigned URLs
1591
+ */
1592
+ interface S3FilePresignOptions extends S3Options {
1309
1593
  /**
1310
- * The number of seconds the presigned URL will be valid for. Defaults to 86400 (1 day).
1594
+ * Number of seconds until the presigned URL expires.
1595
+ * - Default: 86400 (1 day)
1596
+ *
1597
+ * @example
1598
+ * // Short-lived URL
1599
+ * const url = file.presign({
1600
+ * expiresIn: 3600 // 1 hour
1601
+ * });
1602
+ *
1603
+ * @example
1604
+ * // Long-lived public URL
1605
+ * const url = file.presign({
1606
+ * expiresIn: 7 * 24 * 60 * 60, // 7 days
1607
+ * acl: "public-read"
1608
+ * });
1311
1609
  */
1312
1610
  expiresIn?: number;
1611
+
1313
1612
  /**
1314
- * The HTTP method to use for the presigned URL. Defaults to GET.
1613
+ * The HTTP method allowed for the presigned URL.
1614
+ *
1615
+ * @example
1616
+ * // GET URL for downloads
1617
+ * const downloadUrl = file.presign({
1618
+ * method: "GET",
1619
+ * expiresIn: 3600
1620
+ * });
1621
+ *
1622
+ * @example
1623
+ * // PUT URL for uploads
1624
+ * const uploadUrl = file.presign({
1625
+ * method: "PUT",
1626
+ * expiresIn: 3600,
1627
+ * type: "application/json"
1628
+ * });
1315
1629
  */
1316
- method?: string;
1630
+ method?: "GET" | "POST" | "PUT" | "DELETE" | "HEAD";
1317
1631
  }
1318
1632
 
1319
- interface S3File extends BunFile {
1320
- /**
1321
- * @param path - The path to the file. If bucket options is not provided or set in the path, it will be deduced from the path.
1322
- * @param options - The options to use for the S3 client.
1323
- */
1324
- new (path: string | URL, options?: S3FileOptions): S3File;
1633
+ /**
1634
+ * Represents a file in an S3-compatible storage service.
1635
+ * Extends the Blob interface for compatibility with web APIs.
1636
+ */
1637
+ interface S3File extends Blob {
1325
1638
  /**
1326
1639
  * The size of the file in bytes.
1327
- */
1328
- size: Promise<number>;
1329
- /**
1330
- * Offset any operation on the file starting at `begin` and ending at `end`. `end` is relative to 0
1331
- *
1332
- * Similar to [`TypedArray.subarray`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/subarray). Does not copy the file, open the file, or modify the file.
1640
+ * This is a Promise because it requires a network request to determine the size.
1333
1641
  *
1334
- * It will use [`range`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range) to download only the bytes you need.
1642
+ * @example
1643
+ * // Getting file size
1644
+ * const size = await file.size;
1645
+ * console.log(`File size: ${size} bytes`);
1335
1646
  *
1336
- * @param begin - start offset in bytes
1337
- * @param end - absolute offset in bytes (relative to 0)
1338
- * @param contentType - MIME type for the new S3File
1647
+ * @example
1648
+ * // Check if file is larger than 1MB
1649
+ * if (await file.size > 1024 * 1024) {
1650
+ * console.log("Large file detected");
1651
+ * }
1339
1652
  */
1340
- slice(begin?: number, end?: number, contentType?: string): S3File;
1653
+ /**
1654
+ * TODO: figure out how to get the typescript types to not error for this property.
1655
+ */
1656
+ // size: Promise<number>;
1341
1657
 
1342
- /** */
1343
1658
  /**
1344
- * Offset any operation on the file starting at `begin`
1659
+ * Creates a new S3File representing a slice of the original file.
1660
+ * Uses HTTP Range headers for efficient partial downloads.
1345
1661
  *
1346
- * Similar to [`TypedArray.subarray`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/subarray). Does not copy the file, open the file, or modify the file.
1662
+ * @param begin - Starting byte offset
1663
+ * @param end - Ending byte offset (exclusive)
1664
+ * @param contentType - Optional MIME type for the slice
1665
+ * @returns A new S3File representing the specified range
1347
1666
  *
1348
- * It will use [`range`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range) to download only the bytes you need.
1667
+ * @example
1668
+ * // Reading file header
1669
+ * const header = file.slice(0, 1024);
1670
+ * const headerText = await header.text();
1349
1671
  *
1350
- * @param begin - start offset in bytes
1351
- * @param contentType - MIME type for the new S3File
1672
+ * @example
1673
+ * // Reading with content type
1674
+ * const jsonSlice = file.slice(1024, 2048, "application/json");
1675
+ * const data = await jsonSlice.json();
1676
+ *
1677
+ * @example
1678
+ * // Reading from offset to end
1679
+ * const remainder = file.slice(1024);
1680
+ * const content = await remainder.text();
1352
1681
  */
1682
+ slice(begin?: number, end?: number, contentType?: string): S3File;
1353
1683
  slice(begin?: number, contentType?: string): S3File;
1354
-
1355
- /**
1356
- * @param contentType - MIME type for the new S3File
1357
- */
1358
1684
  slice(contentType?: string): S3File;
1359
1685
 
1360
1686
  /**
1361
- * Incremental writer to stream writes to S3, this is equivalent of using MultipartUpload and is suitable for large files.
1687
+ * Creates a writable stream for uploading data.
1688
+ * Suitable for large files as it uses multipart upload.
1689
+ *
1690
+ * @param options - Configuration for the upload
1691
+ * @returns A NetworkSink for writing data
1692
+ *
1693
+ * @example
1694
+ * // Basic streaming write
1695
+ * const writer = file.writer({
1696
+ * type: "application/json"
1697
+ * });
1698
+ * writer.write('{"hello": ');
1699
+ * writer.write('"world"}');
1700
+ * await writer.end();
1701
+ *
1702
+ * @example
1703
+ * // Optimized large file upload
1704
+ * const writer = file.writer({
1705
+ * partSize: 10 * 1024 * 1024, // 10MB parts
1706
+ * queueSize: 4, // Upload 4 parts in parallel
1707
+ * retry: 3 // Retry failed parts
1708
+ * });
1709
+ *
1710
+ * // Write large chunks of data efficiently
1711
+ * for (const chunk of largeDataChunks) {
1712
+ * await writer.write(chunk);
1713
+ * }
1714
+ * await writer.end();
1715
+ *
1716
+ * @example
1717
+ * // Error handling
1718
+ * const writer = file.writer();
1719
+ * try {
1720
+ * await writer.write(data);
1721
+ * await writer.end();
1722
+ * } catch (err) {
1723
+ * console.error('Upload failed:', err);
1724
+ * // Writer will automatically abort multipart upload on error
1725
+ * }
1362
1726
  */
1363
- writer(options?: S3FileOptions): FileSink;
1727
+ writer(options?: S3Options): NetworkSink;
1364
1728
 
1365
1729
  /**
1366
- * The readable stream of the file.
1730
+ * Gets a readable stream of the file's content.
1731
+ * Useful for processing large files without loading them entirely into memory.
1732
+ *
1733
+ * @returns A ReadableStream for the file content
1734
+ *
1735
+ * @example
1736
+ * // Basic streaming read
1737
+ * const stream = file.stream();
1738
+ * for await (const chunk of stream) {
1739
+ * console.log('Received chunk:', chunk);
1740
+ * }
1741
+ *
1742
+ * @example
1743
+ * // Piping to response
1744
+ * const stream = file.stream();
1745
+ * return new Response(stream, {
1746
+ * headers: { 'Content-Type': file.type }
1747
+ * });
1748
+ *
1749
+ * @example
1750
+ * // Processing large files
1751
+ * const stream = file.stream();
1752
+ * const textDecoder = new TextDecoder();
1753
+ * for await (const chunk of stream) {
1754
+ * const text = textDecoder.decode(chunk);
1755
+ * // Process text chunk by chunk
1756
+ * }
1367
1757
  */
1368
1758
  readonly readable: ReadableStream;
1369
-
1370
- /**
1371
- * Get a readable stream of the file.
1372
- */
1373
1759
  stream(): ReadableStream;
1374
1760
 
1375
1761
  /**
1376
- * The name or path of the file, as specified in the constructor.
1762
+ * The name or path of the file in the bucket.
1763
+ *
1764
+ * @example
1765
+ * const file = s3("folder/image.jpg");
1766
+ * console.log(file.name); // "folder/image.jpg"
1377
1767
  */
1378
1768
  readonly name?: string;
1379
1769
 
1380
1770
  /**
1381
- * The bucket name of the file.
1771
+ * The bucket name containing the file.
1772
+ *
1773
+ * @example
1774
+ * const file = s3("s3://my-bucket/file.txt");
1775
+ * console.log(file.bucket); // "my-bucket"
1382
1776
  */
1383
1777
  readonly bucket?: string;
1384
1778
 
1385
1779
  /**
1386
- * Does the file exist?
1387
- * It will use [`head`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/HEAD) to check if the file exists.
1780
+ * Checks if the file exists in S3.
1781
+ * Uses HTTP HEAD request to efficiently check existence without downloading.
1782
+ *
1783
+ * @returns Promise resolving to true if file exists, false otherwise
1784
+ *
1785
+ * @example
1786
+ * // Basic existence check
1787
+ * if (await file.exists()) {
1788
+ * console.log("File exists in S3");
1789
+ * }
1790
+ *
1791
+ * @example
1792
+ * // With error handling
1793
+ * try {
1794
+ * const exists = await file.exists();
1795
+ * if (!exists) {
1796
+ * console.log("File not found");
1797
+ * }
1798
+ * } catch (err) {
1799
+ * console.error("Error checking file:", err);
1800
+ * }
1388
1801
  */
1389
1802
  exists(): Promise<boolean>;
1390
1803
 
1391
1804
  /**
1392
- * Uploads the data to S3. This is equivalent of using {@link S3File.upload} with a {@link S3File}.
1393
- * @param data - The data to write.
1394
- * @param options - The options to use for the S3 client.
1805
+ * Uploads data to S3.
1806
+ * Supports various input types and automatically handles large files.
1807
+ *
1808
+ * @param data - The data to upload
1809
+ * @param options - Upload configuration options
1810
+ * @returns Promise resolving to number of bytes written
1811
+ *
1812
+ * @example
1813
+ * // Writing string data
1814
+ * await file.write("Hello World", {
1815
+ * type: "text/plain"
1816
+ * });
1817
+ *
1818
+ * @example
1819
+ * // Writing JSON
1820
+ * const data = { hello: "world" };
1821
+ * await file.write(JSON.stringify(data), {
1822
+ * type: "application/json"
1823
+ * });
1824
+ *
1825
+ * @example
1826
+ * // Writing from Response
1827
+ * const response = await fetch("https://example.com/data");
1828
+ * await file.write(response);
1829
+ *
1830
+ * @example
1831
+ * // Writing with ACL
1832
+ * await file.write(data, {
1833
+ * acl: "public-read",
1834
+ * type: "application/octet-stream"
1835
+ * });
1395
1836
  */
1396
1837
  write(
1397
1838
  data:
@@ -1404,29 +1845,133 @@ declare module "bun" {
1404
1845
  | BunFile
1405
1846
  | S3File
1406
1847
  | Blob,
1407
- options?: S3FileOptions,
1848
+ options?: S3Options,
1408
1849
  ): Promise<number>;
1409
1850
 
1410
1851
  /**
1411
- * Returns a presigned URL for the file.
1412
- * @param options - The options to use for the presigned URL.
1852
+ * Generates a presigned URL for the file.
1853
+ * Allows temporary access to the file without exposing credentials.
1854
+ *
1855
+ * @param options - Configuration for the presigned URL
1856
+ * @returns Presigned URL string
1857
+ *
1858
+ * @example
1859
+ * // Basic download URL
1860
+ * const url = file.presign({
1861
+ * expiresIn: 3600 // 1 hour
1862
+ * });
1863
+ *
1864
+ * @example
1865
+ * // Upload URL with specific content type
1866
+ * const uploadUrl = file.presign({
1867
+ * method: "PUT",
1868
+ * expiresIn: 3600,
1869
+ * type: "image/jpeg",
1870
+ * acl: "public-read"
1871
+ * });
1872
+ *
1873
+ * @example
1874
+ * // URL with custom permissions
1875
+ * const url = file.presign({
1876
+ * method: "GET",
1877
+ * expiresIn: 7 * 24 * 60 * 60, // 7 days
1878
+ * acl: "public-read"
1879
+ * });
1413
1880
  */
1414
1881
  presign(options?: S3FilePresignOptions): string;
1415
1882
 
1416
1883
  /**
1417
1884
  * Deletes the file from S3.
1885
+ *
1886
+ * @returns Promise that resolves when deletion is complete
1887
+ *
1888
+ * @example
1889
+ * // Basic deletion
1890
+ * await file.delete();
1891
+ *
1892
+ * @example
1893
+ * // With error handling
1894
+ * try {
1895
+ * await file.delete();
1896
+ * console.log("File deleted successfully");
1897
+ * } catch (err) {
1898
+ * console.error("Failed to delete file:", err);
1899
+ * }
1418
1900
  */
1419
- unlink(): Promise<void>;
1901
+ delete(): Promise<void>;
1902
+
1903
+ /**
1904
+ * Alias for delete() method.
1905
+ * Provided for compatibility with Node.js fs API naming.
1906
+ *
1907
+ * @example
1908
+ * await file.unlink();
1909
+ */
1910
+ unlink: S3File["delete"];
1420
1911
  }
1421
1912
 
1422
- namespace S3File {
1913
+ /**
1914
+ * A configured S3 bucket instance for managing files.
1915
+ * The instance is callable to create S3File instances and provides methods
1916
+ * for common operations.
1917
+ *
1918
+ * @example
1919
+ * // Basic bucket setup
1920
+ * const bucket = new S3({
1921
+ * bucket: "my-bucket",
1922
+ * accessKeyId: "key",
1923
+ * secretAccessKey: "secret"
1924
+ * });
1925
+ *
1926
+ * // Get file instance
1927
+ * const file = bucket("image.jpg");
1928
+ *
1929
+ * // Common operations
1930
+ * await bucket.write("data.json", JSON.stringify({hello: "world"}));
1931
+ * const url = bucket.presign("file.pdf");
1932
+ * await bucket.unlink("old.txt");
1933
+ */
1934
+ type S3Bucket = {
1423
1935
  /**
1424
- * Uploads the data to S3.
1425
- * @param data - The data to write.
1426
- * @param options - The options to use for the S3 client.
1936
+ * Creates an S3File instance for the given path.
1937
+ *
1938
+ * @example
1939
+ * const file = bucket("image.jpg");
1940
+ * await file.write(imageData);
1941
+ * const configFile = bucket("config.json", {
1942
+ * type: "application/json",
1943
+ * acl: "private"
1944
+ * });
1427
1945
  */
1428
- function upload(
1429
- path: string | S3File,
1946
+ (path: string, options?: S3Options): S3File;
1947
+
1948
+ /**
1949
+ * Writes data directly to a path in the bucket.
1950
+ * Supports strings, buffers, streams, and web API types.
1951
+ *
1952
+ * @example
1953
+ * // Write string
1954
+ * await bucket.write("hello.txt", "Hello World");
1955
+ *
1956
+ * // Write JSON with type
1957
+ * await bucket.write(
1958
+ * "data.json",
1959
+ * JSON.stringify({hello: "world"}),
1960
+ * {type: "application/json"}
1961
+ * );
1962
+ *
1963
+ * // Write from fetch
1964
+ * const res = await fetch("https://example.com/data");
1965
+ * await bucket.write("data.bin", res);
1966
+ *
1967
+ * // Write with ACL
1968
+ * await bucket.write("public.html", html, {
1969
+ * acl: "public-read",
1970
+ * type: "text/html"
1971
+ * });
1972
+ */
1973
+ write(
1974
+ path: string,
1430
1975
  data:
1431
1976
  | string
1432
1977
  | ArrayBufferView
@@ -1435,43 +1980,93 @@ declare module "bun" {
1435
1980
  | Request
1436
1981
  | Response
1437
1982
  | BunFile
1438
- | S3File,
1439
- options?: S3FileOptions,
1983
+ | S3File
1984
+ | Blob
1985
+ | File,
1986
+ options?: S3Options,
1440
1987
  ): Promise<number>;
1441
1988
 
1442
1989
  /**
1443
- * Returns a presigned URL for the file.
1444
- * @param options - The options to use for the presigned URL.
1990
+ * Generate a presigned URL for temporary access to a file.
1991
+ * Useful for generating upload/download URLs without exposing credentials.
1992
+ *
1993
+ * @example
1994
+ * // Download URL
1995
+ * const downloadUrl = bucket.presign("file.pdf", {
1996
+ * expiresIn: 3600 // 1 hour
1997
+ * });
1998
+ *
1999
+ * // Upload URL
2000
+ * const uploadUrl = bucket.presign("uploads/image.jpg", {
2001
+ * method: "PUT",
2002
+ * expiresIn: 3600,
2003
+ * type: "image/jpeg",
2004
+ * acl: "public-read"
2005
+ * });
2006
+ *
2007
+ * // Long-lived public URL
2008
+ * const publicUrl = bucket.presign("public/doc.pdf", {
2009
+ * expiresIn: 7 * 24 * 60 * 60, // 7 days
2010
+ * acl: "public-read"
2011
+ * });
1445
2012
  */
1446
- function presign(
1447
- path: string | S3File,
1448
- options?: S3FilePresignOptions,
1449
- ): string;
2013
+ presign(path: string, options?: S3FilePresignOptions): string;
1450
2014
 
1451
2015
  /**
1452
- * Deletes the file from S3.
2016
+ * Delete a file from the bucket.
2017
+ *
2018
+ * @example
2019
+ * // Simple delete
2020
+ * await bucket.unlink("old-file.txt");
2021
+ *
2022
+ * // With error handling
2023
+ * try {
2024
+ * await bucket.unlink("file.dat");
2025
+ * console.log("File deleted");
2026
+ * } catch (err) {
2027
+ * console.error("Delete failed:", err);
2028
+ * }
1453
2029
  */
1454
- function unlink(
1455
- path: string | S3File,
1456
- options?: S3FileOptions,
1457
- ): Promise<void>;
2030
+ unlink(path: string, options?: S3Options): Promise<void>;
1458
2031
 
1459
2032
  /**
1460
- * The size of the file in bytes.
2033
+ * Get the size of a file in bytes.
2034
+ * Uses HEAD request to efficiently get size.
2035
+ *
2036
+ * @example
2037
+ * // Get size
2038
+ * const bytes = await bucket.size("video.mp4");
2039
+ * console.log(`Size: ${bytes} bytes`);
2040
+ *
2041
+ * // Check if file is large
2042
+ * if (await bucket.size("data.zip") > 100 * 1024 * 1024) {
2043
+ * console.log("File is larger than 100MB");
2044
+ * }
1461
2045
  */
1462
- function size(
1463
- path: string | S3File,
1464
- options?: S3FileOptions,
1465
- ): Promise<number>;
2046
+ size(path: string, options?: S3Options): Promise<number>;
1466
2047
 
1467
2048
  /**
1468
- * The size of the file in bytes.
2049
+ * Check if a file exists in the bucket.
2050
+ * Uses HEAD request to check existence.
2051
+ *
2052
+ * @example
2053
+ * // Check existence
2054
+ * if (await bucket.exists("config.json")) {
2055
+ * const file = bucket("config.json");
2056
+ * const config = await file.json();
2057
+ * }
2058
+ *
2059
+ * // With error handling
2060
+ * try {
2061
+ * if (!await bucket.exists("required.txt")) {
2062
+ * throw new Error("Required file missing");
2063
+ * }
2064
+ * } catch (err) {
2065
+ * console.error("Check failed:", err);
2066
+ * }
1469
2067
  */
1470
- function exists(
1471
- path: string | S3File,
1472
- options?: S3FileOptions,
1473
- ): Promise<boolean>;
1474
- }
2068
+ exists(path: string, options?: S3Options): Promise<boolean>;
2069
+ };
1475
2070
 
1476
2071
  /**
1477
2072
  * This lets you use macros as regular imports
@@ -3417,17 +4012,6 @@ declare module "bun" {
3417
4012
  // tslint:disable-next-line:unified-signatures
3418
4013
  function file(fileDescriptor: number, options?: BlobPropertyBag): BunFile;
3419
4014
 
3420
- /**
3421
- * Lazily load/upload a file from S3.
3422
- * @param path - The path to the file. If bucket options is not provided or set in the path, it will be deduced from the path.
3423
- * @param options - The options to use for the S3 client.
3424
- */
3425
- function s3(path: string | URL, options?: S3FileOptions): S3File;
3426
- /**
3427
- * The S3 file class.
3428
- */
3429
- const S3: typeof S3File;
3430
-
3431
4015
  /**
3432
4016
  * Allocate a new [`Uint8Array`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Uint8Array) without zeroing the bytes.
3433
4017
  *