bun-types 1.1.42 → 1.1.43-canary.20250104T140550
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bun.d.ts +873 -7
- package/docs/api/cc.md +3 -3
- package/docs/api/s3.md +549 -0
- package/docs/bundler/html.md +110 -0
- package/docs/bundler/loaders.md +76 -1
- package/docs/guides/ecosystem/nextjs.md +8 -0
- package/docs/install/cache.md +1 -1
- package/docs/install/index.md +7 -1
- package/docs/install/lockfile.md +14 -2
- package/docs/runtime/nodejs-apis.md +3 -3
- package/globals.d.ts +6 -0
- package/html-rewriter.d.ts +2 -0
- package/package.json +1 -1
package/bun.d.ts
CHANGED
|
@@ -539,7 +539,7 @@ declare module "bun" {
|
|
|
539
539
|
*/
|
|
540
540
|
// tslint:disable-next-line:unified-signatures
|
|
541
541
|
function write(
|
|
542
|
-
destination: BunFile | Bun.PathLike,
|
|
542
|
+
destination: BunFile | S3File | Bun.PathLike,
|
|
543
543
|
input: Blob | NodeJS.TypedArray | ArrayBufferLike | string | Bun.BlobPart[],
|
|
544
544
|
options?: {
|
|
545
545
|
/** If writing to a PathLike, set the permissions of the file. */
|
|
@@ -1234,8 +1234,840 @@ declare module "bun" {
|
|
|
1234
1234
|
* For empty Blob, this always returns true.
|
|
1235
1235
|
*/
|
|
1236
1236
|
exists(): Promise<boolean>;
|
|
1237
|
+
|
|
1238
|
+
/**
|
|
1239
|
+
* Write data to the file. This is equivalent to using {@link Bun.write} with a {@link BunFile}.
|
|
1240
|
+
* @param data - The data to write.
|
|
1241
|
+
* @param options - The options to use for the write.
|
|
1242
|
+
*/
|
|
1243
|
+
write(
|
|
1244
|
+
data:
|
|
1245
|
+
| string
|
|
1246
|
+
| ArrayBufferView
|
|
1247
|
+
| ArrayBuffer
|
|
1248
|
+
| SharedArrayBuffer
|
|
1249
|
+
| Request
|
|
1250
|
+
| Response
|
|
1251
|
+
| BunFile,
|
|
1252
|
+
options?: { highWaterMark?: number },
|
|
1253
|
+
): Promise<number>;
|
|
1254
|
+
|
|
1255
|
+
/**
|
|
1256
|
+
* Deletes the file.
|
|
1257
|
+
*/
|
|
1258
|
+
unlink(): Promise<void>;
|
|
1259
|
+
}
|
|
1260
|
+
interface NetworkSink extends FileSink {
|
|
1261
|
+
/**
|
|
1262
|
+
* Write a chunk of data to the network.
|
|
1263
|
+
*
|
|
1264
|
+
* If the network is not writable yet, the data is buffered.
|
|
1265
|
+
*/
|
|
1266
|
+
write(
|
|
1267
|
+
chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer,
|
|
1268
|
+
): number;
|
|
1269
|
+
/**
|
|
1270
|
+
* Flush the internal buffer, committing the data to the network.
|
|
1271
|
+
*/
|
|
1272
|
+
flush(): number | Promise<number>;
|
|
1273
|
+
/**
|
|
1274
|
+
* Finish the upload. This also flushes the internal buffer.
|
|
1275
|
+
*/
|
|
1276
|
+
end(error?: Error): number | Promise<number>;
|
|
1277
|
+
}
|
|
1278
|
+
|
|
1279
|
+
type S3 = {
|
|
1280
|
+
/**
|
|
1281
|
+
* Create a new instance of an S3 bucket so that credentials can be managed
|
|
1282
|
+
* from a single instance instead of being passed to every method.
|
|
1283
|
+
*
|
|
1284
|
+
* @param options The default options to use for the S3 client. Can be
|
|
1285
|
+
* overriden by passing options to the methods.
|
|
1286
|
+
*
|
|
1287
|
+
* ## Keep S3 credentials in a single instance
|
|
1288
|
+
*
|
|
1289
|
+
* @example
|
|
1290
|
+
* const bucket = new Bun.S3({
|
|
1291
|
+
* accessKeyId: "your-access-key",
|
|
1292
|
+
* secretAccessKey: "your-secret-key",
|
|
1293
|
+
* bucket: "my-bucket",
|
|
1294
|
+
* endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
1295
|
+
* sessionToken: "your-session-token",
|
|
1296
|
+
* });
|
|
1297
|
+
*
|
|
1298
|
+
* // S3Bucket is callable, so you can do this:
|
|
1299
|
+
* const file = bucket("my-file.txt");
|
|
1300
|
+
*
|
|
1301
|
+
* // or this:
|
|
1302
|
+
* await file.write("Hello Bun!");
|
|
1303
|
+
* await file.text();
|
|
1304
|
+
*
|
|
1305
|
+
* // To delete the file:
|
|
1306
|
+
* await bucket.delete("my-file.txt");
|
|
1307
|
+
*
|
|
1308
|
+
* // To write a file without returning the instance:
|
|
1309
|
+
* await bucket.write("my-file.txt", "Hello Bun!");
|
|
1310
|
+
*
|
|
1311
|
+
*/
|
|
1312
|
+
new (options?: S3Options): S3Bucket;
|
|
1313
|
+
|
|
1314
|
+
/**
|
|
1315
|
+
* Delete a file from an S3-compatible object storage service.
|
|
1316
|
+
*
|
|
1317
|
+
* @param path The path to the file.
|
|
1318
|
+
* @param options The options to use for the S3 client.
|
|
1319
|
+
*
|
|
1320
|
+
* For an instance method version, {@link S3File.unlink}. You can also use {@link S3Bucket.unlink}.
|
|
1321
|
+
*
|
|
1322
|
+
* @example
|
|
1323
|
+
* import { S3 } from "bun";
|
|
1324
|
+
* await S3.unlink("s3://my-bucket/my-file.txt", {
|
|
1325
|
+
* accessKeyId: "your-access-key",
|
|
1326
|
+
* secretAccessKey: "your-secret-key",
|
|
1327
|
+
* });
|
|
1328
|
+
*
|
|
1329
|
+
* @example
|
|
1330
|
+
* await S3.unlink("key", {
|
|
1331
|
+
* bucket: "my-bucket",
|
|
1332
|
+
* accessKeyId: "your-access-key",
|
|
1333
|
+
* secretAccessKey: "your-secret-key",
|
|
1334
|
+
* });
|
|
1335
|
+
*/
|
|
1336
|
+
delete(path: string, options?: S3Options): Promise<void>;
|
|
1337
|
+
/**
|
|
1338
|
+
* unlink is an alias for {@link S3.delete}
|
|
1339
|
+
*/
|
|
1340
|
+
unlink: S3["delete"];
|
|
1341
|
+
|
|
1342
|
+
/**
|
|
1343
|
+
* Writes data to an S3-compatible storage service.
|
|
1344
|
+
* Supports various input types and handles large files with multipart uploads.
|
|
1345
|
+
*
|
|
1346
|
+
* @param path The path or key where the file will be written
|
|
1347
|
+
* @param data The data to write
|
|
1348
|
+
* @param options S3 configuration and upload options
|
|
1349
|
+
* @returns promise that resolves with the number of bytes written
|
|
1350
|
+
*
|
|
1351
|
+
* @example
|
|
1352
|
+
* // Writing a string
|
|
1353
|
+
* await S3.write("hello.txt", "Hello World!", {
|
|
1354
|
+
* bucket: "my-bucket",
|
|
1355
|
+
* type: "text/plain"
|
|
1356
|
+
* });
|
|
1357
|
+
*
|
|
1358
|
+
* @example
|
|
1359
|
+
* // Writing JSON
|
|
1360
|
+
* await S3.write(
|
|
1361
|
+
* "data.json",
|
|
1362
|
+
* JSON.stringify({ hello: "world" }),
|
|
1363
|
+
* { type: "application/json" }
|
|
1364
|
+
* );
|
|
1365
|
+
*
|
|
1366
|
+
* @example
|
|
1367
|
+
* // Writing a large file with multipart upload
|
|
1368
|
+
* await S3.write("large-file.dat", largeBuffer, {
|
|
1369
|
+
* partSize: 10 * 1024 * 1024, // 10MB parts
|
|
1370
|
+
* queueSize: 4, // Upload 4 parts in parallel
|
|
1371
|
+
* retry: 3 // Retry failed parts up to 3 times
|
|
1372
|
+
* });
|
|
1373
|
+
*/
|
|
1374
|
+
write(
|
|
1375
|
+
path: string,
|
|
1376
|
+
data:
|
|
1377
|
+
| string
|
|
1378
|
+
| ArrayBufferView
|
|
1379
|
+
| ArrayBufferLike
|
|
1380
|
+
| Response
|
|
1381
|
+
| Request
|
|
1382
|
+
| ReadableStream
|
|
1383
|
+
| Blob
|
|
1384
|
+
| File,
|
|
1385
|
+
options?: S3Options,
|
|
1386
|
+
): Promise<number>;
|
|
1387
|
+
};
|
|
1388
|
+
var S3: S3;
|
|
1389
|
+
|
|
1390
|
+
/**
|
|
1391
|
+
* Creates a new S3File instance for working with a single file.
|
|
1392
|
+
*
|
|
1393
|
+
* @param path The path or key of the file
|
|
1394
|
+
* @param options S3 configuration options
|
|
1395
|
+
* @returns `S3File` instance for the specified path
|
|
1396
|
+
*
|
|
1397
|
+
* @example
|
|
1398
|
+
* import { s3 } from "bun";
|
|
1399
|
+
* const file = s3("my-file.txt", {
|
|
1400
|
+
* bucket: "my-bucket",
|
|
1401
|
+
* accessKeyId: "your-access-key",
|
|
1402
|
+
* secretAccessKey: "your-secret-key"
|
|
1403
|
+
* });
|
|
1404
|
+
*
|
|
1405
|
+
* // Read the file
|
|
1406
|
+
* const content = await file.text();
|
|
1407
|
+
*
|
|
1408
|
+
* @example
|
|
1409
|
+
* // Using s3:// protocol
|
|
1410
|
+
* const file = s3("s3://my-bucket/my-file.txt", {
|
|
1411
|
+
* accessKeyId: "your-access-key",
|
|
1412
|
+
* secretAccessKey: "your-secret-key"
|
|
1413
|
+
* });
|
|
1414
|
+
*/
|
|
1415
|
+
function s3(path: string | URL, options?: S3Options): S3File;
|
|
1416
|
+
|
|
1417
|
+
/**
|
|
1418
|
+
* Configuration options for S3 operations
|
|
1419
|
+
*/
|
|
1420
|
+
interface S3Options extends BlobPropertyBag {
|
|
1421
|
+
/**
|
|
1422
|
+
* The Access Control List (ACL) policy for the file.
|
|
1423
|
+
* Controls who can access the file and what permissions they have.
|
|
1424
|
+
*
|
|
1425
|
+
* @example
|
|
1426
|
+
* // Setting public read access
|
|
1427
|
+
* const file = s3("public-file.txt", {
|
|
1428
|
+
* acl: "public-read",
|
|
1429
|
+
* bucket: "my-bucket"
|
|
1430
|
+
* });
|
|
1431
|
+
*
|
|
1432
|
+
* @example
|
|
1433
|
+
* // Using with presigned URLs
|
|
1434
|
+
* const url = file.presign({
|
|
1435
|
+
* acl: "public-read",
|
|
1436
|
+
* expiresIn: 3600
|
|
1437
|
+
* });
|
|
1438
|
+
*/
|
|
1439
|
+
acl?:
|
|
1440
|
+
| "private"
|
|
1441
|
+
| "public-read"
|
|
1442
|
+
| "public-read-write"
|
|
1443
|
+
| "aws-exec-read"
|
|
1444
|
+
| "authenticated-read"
|
|
1445
|
+
| "bucket-owner-read"
|
|
1446
|
+
| "bucket-owner-full-control"
|
|
1447
|
+
| "log-delivery-write";
|
|
1448
|
+
|
|
1449
|
+
/**
|
|
1450
|
+
* The S3 bucket name. Can be set via `S3_BUCKET` or `AWS_BUCKET` environment variables.
|
|
1451
|
+
*
|
|
1452
|
+
* @example
|
|
1453
|
+
* // Using explicit bucket
|
|
1454
|
+
* const file = s3("my-file.txt", { bucket: "my-bucket" });
|
|
1455
|
+
*
|
|
1456
|
+
* @example
|
|
1457
|
+
* // Using environment variables
|
|
1458
|
+
* // With S3_BUCKET=my-bucket in .env
|
|
1459
|
+
* const file = s3("my-file.txt");
|
|
1460
|
+
*/
|
|
1461
|
+
bucket?: string;
|
|
1462
|
+
|
|
1463
|
+
/**
|
|
1464
|
+
* The AWS region. Can be set via `S3_REGION` or `AWS_REGION` environment variables.
|
|
1465
|
+
*
|
|
1466
|
+
* @example
|
|
1467
|
+
* const file = s3("my-file.txt", {
|
|
1468
|
+
* bucket: "my-bucket",
|
|
1469
|
+
* region: "us-west-2"
|
|
1470
|
+
* });
|
|
1471
|
+
*/
|
|
1472
|
+
region?: string;
|
|
1473
|
+
|
|
1474
|
+
/**
|
|
1475
|
+
* The access key ID for authentication.
|
|
1476
|
+
* Can be set via `S3_ACCESS_KEY_ID` or `AWS_ACCESS_KEY_ID` environment variables.
|
|
1477
|
+
*/
|
|
1478
|
+
accessKeyId?: string;
|
|
1479
|
+
|
|
1480
|
+
/**
|
|
1481
|
+
* The secret access key for authentication.
|
|
1482
|
+
* Can be set via `S3_SECRET_ACCESS_KEY` or `AWS_SECRET_ACCESS_KEY` environment variables.
|
|
1483
|
+
*/
|
|
1484
|
+
secretAccessKey?: string;
|
|
1485
|
+
|
|
1486
|
+
/**
|
|
1487
|
+
* Optional session token for temporary credentials.
|
|
1488
|
+
* Can be set via `S3_SESSION_TOKEN` or `AWS_SESSION_TOKEN` environment variables.
|
|
1489
|
+
*
|
|
1490
|
+
* @example
|
|
1491
|
+
* // Using temporary credentials
|
|
1492
|
+
* const file = s3("my-file.txt", {
|
|
1493
|
+
* accessKeyId: tempAccessKey,
|
|
1494
|
+
* secretAccessKey: tempSecretKey,
|
|
1495
|
+
* sessionToken: tempSessionToken
|
|
1496
|
+
* });
|
|
1497
|
+
*/
|
|
1498
|
+
sessionToken?: string;
|
|
1499
|
+
|
|
1500
|
+
/**
|
|
1501
|
+
* The S3-compatible service endpoint URL.
|
|
1502
|
+
* Can be set via `S3_ENDPOINT` or `AWS_ENDPOINT` environment variables.
|
|
1503
|
+
*
|
|
1504
|
+
* @example
|
|
1505
|
+
* // AWS S3
|
|
1506
|
+
* const file = s3("my-file.txt", {
|
|
1507
|
+
* endpoint: "https://s3.us-east-1.amazonaws.com"
|
|
1508
|
+
* });
|
|
1509
|
+
*
|
|
1510
|
+
* @example
|
|
1511
|
+
* // Cloudflare R2
|
|
1512
|
+
* const file = s3("my-file.txt", {
|
|
1513
|
+
* endpoint: "https://<account-id>.r2.cloudflarestorage.com"
|
|
1514
|
+
* });
|
|
1515
|
+
*
|
|
1516
|
+
* @example
|
|
1517
|
+
* // DigitalOcean Spaces
|
|
1518
|
+
* const file = s3("my-file.txt", {
|
|
1519
|
+
* endpoint: "https://<region>.digitaloceanspaces.com"
|
|
1520
|
+
* });
|
|
1521
|
+
*
|
|
1522
|
+
* @example
|
|
1523
|
+
* // MinIO (local development)
|
|
1524
|
+
* const file = s3("my-file.txt", {
|
|
1525
|
+
* endpoint: "http://localhost:9000"
|
|
1526
|
+
* });
|
|
1527
|
+
*/
|
|
1528
|
+
endpoint?: string;
|
|
1529
|
+
|
|
1530
|
+
/**
|
|
1531
|
+
* The size of each part in multipart uploads (in MiB).
|
|
1532
|
+
* - Minimum: 5 MiB
|
|
1533
|
+
* - Maximum: 5120 MiB
|
|
1534
|
+
* - Default: 5 MiB
|
|
1535
|
+
*
|
|
1536
|
+
* @example
|
|
1537
|
+
* // Configuring multipart uploads
|
|
1538
|
+
* const file = s3("large-file.dat", {
|
|
1539
|
+
* partSize: 10, // 10 MiB parts
|
|
1540
|
+
* queueSize: 4 // Upload 4 parts in parallel
|
|
1541
|
+
* });
|
|
1542
|
+
*
|
|
1543
|
+
* const writer = file.writer();
|
|
1544
|
+
* // ... write large file in chunks
|
|
1545
|
+
*/
|
|
1546
|
+
partSize?: number;
|
|
1547
|
+
|
|
1548
|
+
/**
|
|
1549
|
+
* Number of parts to upload in parallel for multipart uploads.
|
|
1550
|
+
* - Default: 5
|
|
1551
|
+
* - Maximum: 255
|
|
1552
|
+
*
|
|
1553
|
+
* Increasing this value can improve upload speeds for large files
|
|
1554
|
+
* but will use more memory.
|
|
1555
|
+
*/
|
|
1556
|
+
queueSize?: number;
|
|
1557
|
+
|
|
1558
|
+
/**
|
|
1559
|
+
* Number of retry attempts for failed uploads.
|
|
1560
|
+
* - Default: 3
|
|
1561
|
+
* - Maximum: 255
|
|
1562
|
+
*
|
|
1563
|
+
* @example
|
|
1564
|
+
* // Setting retry attempts
|
|
1565
|
+
* const file = s3("my-file.txt", {
|
|
1566
|
+
* retry: 5 // Retry failed uploads up to 5 times
|
|
1567
|
+
* });
|
|
1568
|
+
*/
|
|
1569
|
+
retry?: number;
|
|
1570
|
+
|
|
1571
|
+
/**
|
|
1572
|
+
* The Content-Type of the file.
|
|
1573
|
+
* Automatically set based on file extension when possible.
|
|
1574
|
+
*
|
|
1575
|
+
* @example
|
|
1576
|
+
* // Setting explicit content type
|
|
1577
|
+
* const file = s3("data.bin", {
|
|
1578
|
+
* type: "application/octet-stream"
|
|
1579
|
+
* });
|
|
1580
|
+
*/
|
|
1581
|
+
type?: string;
|
|
1582
|
+
|
|
1583
|
+
/**
|
|
1584
|
+
* @deprecated The size of the internal buffer in bytes. Defaults to 5 MiB. use `partSize` and `queueSize` instead.
|
|
1585
|
+
*/
|
|
1586
|
+
highWaterMark?: number;
|
|
1587
|
+
}
|
|
1588
|
+
|
|
1589
|
+
/**
|
|
1590
|
+
* Options for generating presigned URLs
|
|
1591
|
+
*/
|
|
1592
|
+
interface S3FilePresignOptions extends S3Options {
|
|
1593
|
+
/**
|
|
1594
|
+
* Number of seconds until the presigned URL expires.
|
|
1595
|
+
* - Default: 86400 (1 day)
|
|
1596
|
+
*
|
|
1597
|
+
* @example
|
|
1598
|
+
* // Short-lived URL
|
|
1599
|
+
* const url = file.presign({
|
|
1600
|
+
* expiresIn: 3600 // 1 hour
|
|
1601
|
+
* });
|
|
1602
|
+
*
|
|
1603
|
+
* @example
|
|
1604
|
+
* // Long-lived public URL
|
|
1605
|
+
* const url = file.presign({
|
|
1606
|
+
* expiresIn: 7 * 24 * 60 * 60, // 7 days
|
|
1607
|
+
* acl: "public-read"
|
|
1608
|
+
* });
|
|
1609
|
+
*/
|
|
1610
|
+
expiresIn?: number;
|
|
1611
|
+
|
|
1612
|
+
/**
|
|
1613
|
+
* The HTTP method allowed for the presigned URL.
|
|
1614
|
+
*
|
|
1615
|
+
* @example
|
|
1616
|
+
* // GET URL for downloads
|
|
1617
|
+
* const downloadUrl = file.presign({
|
|
1618
|
+
* method: "GET",
|
|
1619
|
+
* expiresIn: 3600
|
|
1620
|
+
* });
|
|
1621
|
+
*
|
|
1622
|
+
* @example
|
|
1623
|
+
* // PUT URL for uploads
|
|
1624
|
+
* const uploadUrl = file.presign({
|
|
1625
|
+
* method: "PUT",
|
|
1626
|
+
* expiresIn: 3600,
|
|
1627
|
+
* type: "application/json"
|
|
1628
|
+
* });
|
|
1629
|
+
*/
|
|
1630
|
+
method?: "GET" | "POST" | "PUT" | "DELETE" | "HEAD";
|
|
1631
|
+
}
|
|
1632
|
+
|
|
1633
|
+
/**
|
|
1634
|
+
* Represents a file in an S3-compatible storage service.
|
|
1635
|
+
* Extends the Blob interface for compatibility with web APIs.
|
|
1636
|
+
*/
|
|
1637
|
+
interface S3File extends Blob {
|
|
1638
|
+
/**
|
|
1639
|
+
* The size of the file in bytes.
|
|
1640
|
+
* This is a Promise because it requires a network request to determine the size.
|
|
1641
|
+
*
|
|
1642
|
+
* @example
|
|
1643
|
+
* // Getting file size
|
|
1644
|
+
* const size = await file.size;
|
|
1645
|
+
* console.log(`File size: ${size} bytes`);
|
|
1646
|
+
*
|
|
1647
|
+
* @example
|
|
1648
|
+
* // Check if file is larger than 1MB
|
|
1649
|
+
* if (await file.size > 1024 * 1024) {
|
|
1650
|
+
* console.log("Large file detected");
|
|
1651
|
+
* }
|
|
1652
|
+
*/
|
|
1653
|
+
/**
|
|
1654
|
+
* TODO: figure out how to get the typescript types to not error for this property.
|
|
1655
|
+
*/
|
|
1656
|
+
// size: Promise<number>;
|
|
1657
|
+
|
|
1658
|
+
/**
|
|
1659
|
+
* Creates a new S3File representing a slice of the original file.
|
|
1660
|
+
* Uses HTTP Range headers for efficient partial downloads.
|
|
1661
|
+
*
|
|
1662
|
+
* @param begin - Starting byte offset
|
|
1663
|
+
* @param end - Ending byte offset (exclusive)
|
|
1664
|
+
* @param contentType - Optional MIME type for the slice
|
|
1665
|
+
* @returns A new S3File representing the specified range
|
|
1666
|
+
*
|
|
1667
|
+
* @example
|
|
1668
|
+
* // Reading file header
|
|
1669
|
+
* const header = file.slice(0, 1024);
|
|
1670
|
+
* const headerText = await header.text();
|
|
1671
|
+
*
|
|
1672
|
+
* @example
|
|
1673
|
+
* // Reading with content type
|
|
1674
|
+
* const jsonSlice = file.slice(1024, 2048, "application/json");
|
|
1675
|
+
* const data = await jsonSlice.json();
|
|
1676
|
+
*
|
|
1677
|
+
* @example
|
|
1678
|
+
* // Reading from offset to end
|
|
1679
|
+
* const remainder = file.slice(1024);
|
|
1680
|
+
* const content = await remainder.text();
|
|
1681
|
+
*/
|
|
1682
|
+
slice(begin?: number, end?: number, contentType?: string): S3File;
|
|
1683
|
+
slice(begin?: number, contentType?: string): S3File;
|
|
1684
|
+
slice(contentType?: string): S3File;
|
|
1685
|
+
|
|
1686
|
+
/**
|
|
1687
|
+
* Creates a writable stream for uploading data.
|
|
1688
|
+
* Suitable for large files as it uses multipart upload.
|
|
1689
|
+
*
|
|
1690
|
+
* @param options - Configuration for the upload
|
|
1691
|
+
* @returns A NetworkSink for writing data
|
|
1692
|
+
*
|
|
1693
|
+
* @example
|
|
1694
|
+
* // Basic streaming write
|
|
1695
|
+
* const writer = file.writer({
|
|
1696
|
+
* type: "application/json"
|
|
1697
|
+
* });
|
|
1698
|
+
* writer.write('{"hello": ');
|
|
1699
|
+
* writer.write('"world"}');
|
|
1700
|
+
* await writer.end();
|
|
1701
|
+
*
|
|
1702
|
+
* @example
|
|
1703
|
+
* // Optimized large file upload
|
|
1704
|
+
* const writer = file.writer({
|
|
1705
|
+
* partSize: 10 * 1024 * 1024, // 10MB parts
|
|
1706
|
+
* queueSize: 4, // Upload 4 parts in parallel
|
|
1707
|
+
* retry: 3 // Retry failed parts
|
|
1708
|
+
* });
|
|
1709
|
+
*
|
|
1710
|
+
* // Write large chunks of data efficiently
|
|
1711
|
+
* for (const chunk of largeDataChunks) {
|
|
1712
|
+
* await writer.write(chunk);
|
|
1713
|
+
* }
|
|
1714
|
+
* await writer.end();
|
|
1715
|
+
*
|
|
1716
|
+
* @example
|
|
1717
|
+
* // Error handling
|
|
1718
|
+
* const writer = file.writer();
|
|
1719
|
+
* try {
|
|
1720
|
+
* await writer.write(data);
|
|
1721
|
+
* await writer.end();
|
|
1722
|
+
* } catch (err) {
|
|
1723
|
+
* console.error('Upload failed:', err);
|
|
1724
|
+
* // Writer will automatically abort multipart upload on error
|
|
1725
|
+
* }
|
|
1726
|
+
*/
|
|
1727
|
+
writer(options?: S3Options): NetworkSink;
|
|
1728
|
+
|
|
1729
|
+
/**
|
|
1730
|
+
* Gets a readable stream of the file's content.
|
|
1731
|
+
* Useful for processing large files without loading them entirely into memory.
|
|
1732
|
+
*
|
|
1733
|
+
* @returns A ReadableStream for the file content
|
|
1734
|
+
*
|
|
1735
|
+
* @example
|
|
1736
|
+
* // Basic streaming read
|
|
1737
|
+
* const stream = file.stream();
|
|
1738
|
+
* for await (const chunk of stream) {
|
|
1739
|
+
* console.log('Received chunk:', chunk);
|
|
1740
|
+
* }
|
|
1741
|
+
*
|
|
1742
|
+
* @example
|
|
1743
|
+
* // Piping to response
|
|
1744
|
+
* const stream = file.stream();
|
|
1745
|
+
* return new Response(stream, {
|
|
1746
|
+
* headers: { 'Content-Type': file.type }
|
|
1747
|
+
* });
|
|
1748
|
+
*
|
|
1749
|
+
* @example
|
|
1750
|
+
* // Processing large files
|
|
1751
|
+
* const stream = file.stream();
|
|
1752
|
+
* const textDecoder = new TextDecoder();
|
|
1753
|
+
* for await (const chunk of stream) {
|
|
1754
|
+
* const text = textDecoder.decode(chunk);
|
|
1755
|
+
* // Process text chunk by chunk
|
|
1756
|
+
* }
|
|
1757
|
+
*/
|
|
1758
|
+
readonly readable: ReadableStream;
|
|
1759
|
+
stream(): ReadableStream;
|
|
1760
|
+
|
|
1761
|
+
/**
|
|
1762
|
+
* The name or path of the file in the bucket.
|
|
1763
|
+
*
|
|
1764
|
+
* @example
|
|
1765
|
+
* const file = s3("folder/image.jpg");
|
|
1766
|
+
* console.log(file.name); // "folder/image.jpg"
|
|
1767
|
+
*/
|
|
1768
|
+
readonly name?: string;
|
|
1769
|
+
|
|
1770
|
+
/**
|
|
1771
|
+
* The bucket name containing the file.
|
|
1772
|
+
*
|
|
1773
|
+
* @example
|
|
1774
|
+
* const file = s3("s3://my-bucket/file.txt");
|
|
1775
|
+
* console.log(file.bucket); // "my-bucket"
|
|
1776
|
+
*/
|
|
1777
|
+
readonly bucket?: string;
|
|
1778
|
+
|
|
1779
|
+
/**
|
|
1780
|
+
* Checks if the file exists in S3.
|
|
1781
|
+
* Uses HTTP HEAD request to efficiently check existence without downloading.
|
|
1782
|
+
*
|
|
1783
|
+
* @returns Promise resolving to true if file exists, false otherwise
|
|
1784
|
+
*
|
|
1785
|
+
* @example
|
|
1786
|
+
* // Basic existence check
|
|
1787
|
+
* if (await file.exists()) {
|
|
1788
|
+
* console.log("File exists in S3");
|
|
1789
|
+
* }
|
|
1790
|
+
*
|
|
1791
|
+
* @example
|
|
1792
|
+
* // With error handling
|
|
1793
|
+
* try {
|
|
1794
|
+
* const exists = await file.exists();
|
|
1795
|
+
* if (!exists) {
|
|
1796
|
+
* console.log("File not found");
|
|
1797
|
+
* }
|
|
1798
|
+
* } catch (err) {
|
|
1799
|
+
* console.error("Error checking file:", err);
|
|
1800
|
+
* }
|
|
1801
|
+
*/
|
|
1802
|
+
exists(): Promise<boolean>;
|
|
1803
|
+
|
|
1804
|
+
/**
|
|
1805
|
+
* Uploads data to S3.
|
|
1806
|
+
* Supports various input types and automatically handles large files.
|
|
1807
|
+
*
|
|
1808
|
+
* @param data - The data to upload
|
|
1809
|
+
* @param options - Upload configuration options
|
|
1810
|
+
* @returns Promise resolving to number of bytes written
|
|
1811
|
+
*
|
|
1812
|
+
* @example
|
|
1813
|
+
* // Writing string data
|
|
1814
|
+
* await file.write("Hello World", {
|
|
1815
|
+
* type: "text/plain"
|
|
1816
|
+
* });
|
|
1817
|
+
*
|
|
1818
|
+
* @example
|
|
1819
|
+
* // Writing JSON
|
|
1820
|
+
* const data = { hello: "world" };
|
|
1821
|
+
* await file.write(JSON.stringify(data), {
|
|
1822
|
+
* type: "application/json"
|
|
1823
|
+
* });
|
|
1824
|
+
*
|
|
1825
|
+
* @example
|
|
1826
|
+
* // Writing from Response
|
|
1827
|
+
* const response = await fetch("https://example.com/data");
|
|
1828
|
+
* await file.write(response);
|
|
1829
|
+
*
|
|
1830
|
+
* @example
|
|
1831
|
+
* // Writing with ACL
|
|
1832
|
+
* await file.write(data, {
|
|
1833
|
+
* acl: "public-read",
|
|
1834
|
+
* type: "application/octet-stream"
|
|
1835
|
+
* });
|
|
1836
|
+
*/
|
|
1837
|
+
write(
|
|
1838
|
+
data:
|
|
1839
|
+
| string
|
|
1840
|
+
| ArrayBufferView
|
|
1841
|
+
| ArrayBuffer
|
|
1842
|
+
| SharedArrayBuffer
|
|
1843
|
+
| Request
|
|
1844
|
+
| Response
|
|
1845
|
+
| BunFile
|
|
1846
|
+
| S3File
|
|
1847
|
+
| Blob,
|
|
1848
|
+
options?: S3Options,
|
|
1849
|
+
): Promise<number>;
|
|
1850
|
+
|
|
1851
|
+
/**
|
|
1852
|
+
* Generates a presigned URL for the file.
|
|
1853
|
+
* Allows temporary access to the file without exposing credentials.
|
|
1854
|
+
*
|
|
1855
|
+
* @param options - Configuration for the presigned URL
|
|
1856
|
+
* @returns Presigned URL string
|
|
1857
|
+
*
|
|
1858
|
+
* @example
|
|
1859
|
+
* // Basic download URL
|
|
1860
|
+
* const url = file.presign({
|
|
1861
|
+
* expiresIn: 3600 // 1 hour
|
|
1862
|
+
* });
|
|
1863
|
+
*
|
|
1864
|
+
* @example
|
|
1865
|
+
* // Upload URL with specific content type
|
|
1866
|
+
* const uploadUrl = file.presign({
|
|
1867
|
+
* method: "PUT",
|
|
1868
|
+
* expiresIn: 3600,
|
|
1869
|
+
* type: "image/jpeg",
|
|
1870
|
+
* acl: "public-read"
|
|
1871
|
+
* });
|
|
1872
|
+
*
|
|
1873
|
+
* @example
|
|
1874
|
+
* // URL with custom permissions
|
|
1875
|
+
* const url = file.presign({
|
|
1876
|
+
* method: "GET",
|
|
1877
|
+
* expiresIn: 7 * 24 * 60 * 60, // 7 days
|
|
1878
|
+
* acl: "public-read"
|
|
1879
|
+
* });
|
|
1880
|
+
*/
|
|
1881
|
+
presign(options?: S3FilePresignOptions): string;
|
|
1882
|
+
|
|
1883
|
+
/**
|
|
1884
|
+
* Deletes the file from S3.
|
|
1885
|
+
*
|
|
1886
|
+
* @returns Promise that resolves when deletion is complete
|
|
1887
|
+
*
|
|
1888
|
+
* @example
|
|
1889
|
+
* // Basic deletion
|
|
1890
|
+
* await file.delete();
|
|
1891
|
+
*
|
|
1892
|
+
* @example
|
|
1893
|
+
* // With error handling
|
|
1894
|
+
* try {
|
|
1895
|
+
* await file.delete();
|
|
1896
|
+
* console.log("File deleted successfully");
|
|
1897
|
+
* } catch (err) {
|
|
1898
|
+
* console.error("Failed to delete file:", err);
|
|
1899
|
+
* }
|
|
1900
|
+
*/
|
|
1901
|
+
delete(): Promise<void>;
|
|
1902
|
+
|
|
1903
|
+
/**
|
|
1904
|
+
* Alias for delete() method.
|
|
1905
|
+
* Provided for compatibility with Node.js fs API naming.
|
|
1906
|
+
*
|
|
1907
|
+
* @example
|
|
1908
|
+
* await file.unlink();
|
|
1909
|
+
*/
|
|
1910
|
+
unlink: S3File["delete"];
|
|
1237
1911
|
}
|
|
1238
1912
|
|
|
1913
|
+
/**
|
|
1914
|
+
* A configured S3 bucket instance for managing files.
|
|
1915
|
+
* The instance is callable to create S3File instances and provides methods
|
|
1916
|
+
* for common operations.
|
|
1917
|
+
*
|
|
1918
|
+
* @example
|
|
1919
|
+
* // Basic bucket setup
|
|
1920
|
+
* const bucket = new S3({
|
|
1921
|
+
* bucket: "my-bucket",
|
|
1922
|
+
* accessKeyId: "key",
|
|
1923
|
+
* secretAccessKey: "secret"
|
|
1924
|
+
* });
|
|
1925
|
+
*
|
|
1926
|
+
* // Get file instance
|
|
1927
|
+
* const file = bucket("image.jpg");
|
|
1928
|
+
*
|
|
1929
|
+
* // Common operations
|
|
1930
|
+
* await bucket.write("data.json", JSON.stringify({hello: "world"}));
|
|
1931
|
+
* const url = bucket.presign("file.pdf");
|
|
1932
|
+
* await bucket.unlink("old.txt");
|
|
1933
|
+
*/
|
|
1934
|
+
type S3Bucket = {
|
|
1935
|
+
/**
|
|
1936
|
+
* Creates an S3File instance for the given path.
|
|
1937
|
+
*
|
|
1938
|
+
* @example
|
|
1939
|
+
* const file = bucket("image.jpg");
|
|
1940
|
+
* await file.write(imageData);
|
|
1941
|
+
* const configFile = bucket("config.json", {
|
|
1942
|
+
* type: "application/json",
|
|
1943
|
+
* acl: "private"
|
|
1944
|
+
* });
|
|
1945
|
+
*/
|
|
1946
|
+
(path: string, options?: S3Options): S3File;
|
|
1947
|
+
|
|
1948
|
+
/**
|
|
1949
|
+
* Writes data directly to a path in the bucket.
|
|
1950
|
+
* Supports strings, buffers, streams, and web API types.
|
|
1951
|
+
*
|
|
1952
|
+
* @example
|
|
1953
|
+
* // Write string
|
|
1954
|
+
* await bucket.write("hello.txt", "Hello World");
|
|
1955
|
+
*
|
|
1956
|
+
* // Write JSON with type
|
|
1957
|
+
* await bucket.write(
|
|
1958
|
+
* "data.json",
|
|
1959
|
+
* JSON.stringify({hello: "world"}),
|
|
1960
|
+
* {type: "application/json"}
|
|
1961
|
+
* );
|
|
1962
|
+
*
|
|
1963
|
+
* // Write from fetch
|
|
1964
|
+
* const res = await fetch("https://example.com/data");
|
|
1965
|
+
* await bucket.write("data.bin", res);
|
|
1966
|
+
*
|
|
1967
|
+
* // Write with ACL
|
|
1968
|
+
* await bucket.write("public.html", html, {
|
|
1969
|
+
* acl: "public-read",
|
|
1970
|
+
* type: "text/html"
|
|
1971
|
+
* });
|
|
1972
|
+
*/
|
|
1973
|
+
write(
|
|
1974
|
+
path: string,
|
|
1975
|
+
data:
|
|
1976
|
+
| string
|
|
1977
|
+
| ArrayBufferView
|
|
1978
|
+
| ArrayBuffer
|
|
1979
|
+
| SharedArrayBuffer
|
|
1980
|
+
| Request
|
|
1981
|
+
| Response
|
|
1982
|
+
| BunFile
|
|
1983
|
+
| S3File
|
|
1984
|
+
| Blob
|
|
1985
|
+
| File,
|
|
1986
|
+
options?: S3Options,
|
|
1987
|
+
): Promise<number>;
|
|
1988
|
+
|
|
1989
|
+
/**
|
|
1990
|
+
* Generate a presigned URL for temporary access to a file.
|
|
1991
|
+
* Useful for generating upload/download URLs without exposing credentials.
|
|
1992
|
+
*
|
|
1993
|
+
* @example
|
|
1994
|
+
* // Download URL
|
|
1995
|
+
* const downloadUrl = bucket.presign("file.pdf", {
|
|
1996
|
+
* expiresIn: 3600 // 1 hour
|
|
1997
|
+
* });
|
|
1998
|
+
*
|
|
1999
|
+
* // Upload URL
|
|
2000
|
+
* const uploadUrl = bucket.presign("uploads/image.jpg", {
|
|
2001
|
+
* method: "PUT",
|
|
2002
|
+
* expiresIn: 3600,
|
|
2003
|
+
* type: "image/jpeg",
|
|
2004
|
+
* acl: "public-read"
|
|
2005
|
+
* });
|
|
2006
|
+
*
|
|
2007
|
+
* // Long-lived public URL
|
|
2008
|
+
* const publicUrl = bucket.presign("public/doc.pdf", {
|
|
2009
|
+
* expiresIn: 7 * 24 * 60 * 60, // 7 days
|
|
2010
|
+
* acl: "public-read"
|
|
2011
|
+
* });
|
|
2012
|
+
*/
|
|
2013
|
+
presign(path: string, options?: S3FilePresignOptions): string;
|
|
2014
|
+
|
|
2015
|
+
/**
|
|
2016
|
+
* Delete a file from the bucket.
|
|
2017
|
+
*
|
|
2018
|
+
* @example
|
|
2019
|
+
* // Simple delete
|
|
2020
|
+
* await bucket.unlink("old-file.txt");
|
|
2021
|
+
*
|
|
2022
|
+
* // With error handling
|
|
2023
|
+
* try {
|
|
2024
|
+
* await bucket.unlink("file.dat");
|
|
2025
|
+
* console.log("File deleted");
|
|
2026
|
+
* } catch (err) {
|
|
2027
|
+
* console.error("Delete failed:", err);
|
|
2028
|
+
* }
|
|
2029
|
+
*/
|
|
2030
|
+
unlink(path: string, options?: S3Options): Promise<void>;
|
|
2031
|
+
|
|
2032
|
+
/**
|
|
2033
|
+
* Get the size of a file in bytes.
|
|
2034
|
+
* Uses HEAD request to efficiently get size.
|
|
2035
|
+
*
|
|
2036
|
+
* @example
|
|
2037
|
+
* // Get size
|
|
2038
|
+
* const bytes = await bucket.size("video.mp4");
|
|
2039
|
+
* console.log(`Size: ${bytes} bytes`);
|
|
2040
|
+
*
|
|
2041
|
+
* // Check if file is large
|
|
2042
|
+
* if (await bucket.size("data.zip") > 100 * 1024 * 1024) {
|
|
2043
|
+
* console.log("File is larger than 100MB");
|
|
2044
|
+
* }
|
|
2045
|
+
*/
|
|
2046
|
+
size(path: string, options?: S3Options): Promise<number>;
|
|
2047
|
+
|
|
2048
|
+
/**
|
|
2049
|
+
* Check if a file exists in the bucket.
|
|
2050
|
+
* Uses HEAD request to check existence.
|
|
2051
|
+
*
|
|
2052
|
+
* @example
|
|
2053
|
+
* // Check existence
|
|
2054
|
+
* if (await bucket.exists("config.json")) {
|
|
2055
|
+
* const file = bucket("config.json");
|
|
2056
|
+
* const config = await file.json();
|
|
2057
|
+
* }
|
|
2058
|
+
*
|
|
2059
|
+
* // With error handling
|
|
2060
|
+
* try {
|
|
2061
|
+
* if (!await bucket.exists("required.txt")) {
|
|
2062
|
+
* throw new Error("Required file missing");
|
|
2063
|
+
* }
|
|
2064
|
+
* } catch (err) {
|
|
2065
|
+
* console.error("Check failed:", err);
|
|
2066
|
+
* }
|
|
2067
|
+
*/
|
|
2068
|
+
exists(path: string, options?: S3Options): Promise<boolean>;
|
|
2069
|
+
};
|
|
2070
|
+
|
|
1239
2071
|
/**
|
|
1240
2072
|
* This lets you use macros as regular imports
|
|
1241
2073
|
* @example
|
|
@@ -1682,10 +2514,28 @@ declare module "bun" {
|
|
|
1682
2514
|
/**
|
|
1683
2515
|
* **Experimental**
|
|
1684
2516
|
*
|
|
1685
|
-
*
|
|
2517
|
+
* Bundle CSS files.
|
|
2518
|
+
*
|
|
2519
|
+
* This will be enabled by default in Bun v1.2.
|
|
2520
|
+
*
|
|
2521
|
+
* @default false (until Bunv 1.2)
|
|
1686
2522
|
*/
|
|
1687
2523
|
experimentalCss?: boolean;
|
|
1688
2524
|
|
|
2525
|
+
/**
|
|
2526
|
+
* **Experimental**
|
|
2527
|
+
*
|
|
2528
|
+
* Bundle JavaScript & CSS files from HTML files. JavaScript & CSS files
|
|
2529
|
+
* from non-external <script> or <link> tags will be bundled.
|
|
2530
|
+
*
|
|
2531
|
+
* Underneath, this works similarly to HTMLRewriter.
|
|
2532
|
+
*
|
|
2533
|
+
* This will be enabled by default in Bun v1.2.
|
|
2534
|
+
*
|
|
2535
|
+
* @default false (until Bun v1.2)
|
|
2536
|
+
*/
|
|
2537
|
+
html?: boolean;
|
|
2538
|
+
|
|
1689
2539
|
/**
|
|
1690
2540
|
* Drop function calls to matching property accesses.
|
|
1691
2541
|
*/
|
|
@@ -3111,7 +3961,7 @@ declare module "bun" {
|
|
|
3111
3961
|
* "Hello, world!"
|
|
3112
3962
|
* );
|
|
3113
3963
|
* ```
|
|
3114
|
-
* @param path The path to the file (lazily loaded)
|
|
3964
|
+
* @param path The path to the file (lazily loaded) if the path starts with `s3://` it will behave like {@link S3File}
|
|
3115
3965
|
*/
|
|
3116
3966
|
// tslint:disable-next-line:unified-signatures
|
|
3117
3967
|
function file(path: string | URL, options?: BlobPropertyBag): BunFile;
|
|
@@ -3137,7 +3987,7 @@ declare module "bun" {
|
|
|
3137
3987
|
* console.log(file.type); // "application/json"
|
|
3138
3988
|
* ```
|
|
3139
3989
|
*
|
|
3140
|
-
* @param path The path to the file as a byte buffer (the buffer is copied)
|
|
3990
|
+
* @param path The path to the file as a byte buffer (the buffer is copied) if the path starts with `s3://` it will behave like {@link S3File}
|
|
3141
3991
|
*/
|
|
3142
3992
|
// tslint:disable-next-line:unified-signatures
|
|
3143
3993
|
function file(
|
|
@@ -3483,9 +4333,24 @@ declare module "bun" {
|
|
|
3483
4333
|
function nanoseconds(): number;
|
|
3484
4334
|
|
|
3485
4335
|
/**
|
|
3486
|
-
*
|
|
4336
|
+
* Show precise statistics about memory usage of your application
|
|
4337
|
+
*
|
|
4338
|
+
* Generate a heap snapshot in JavaScriptCore's format that can be viewed with `bun --inspect` or Safari's Web Inspector
|
|
4339
|
+
*/
|
|
4340
|
+
function generateHeapSnapshot(format?: "jsc"): HeapSnapshot;
|
|
4341
|
+
|
|
4342
|
+
/**
|
|
4343
|
+
* Show precise statistics about memory usage of your application
|
|
4344
|
+
*
|
|
4345
|
+
* Generate a V8 Heap Snapshot that can be used with Chrome DevTools & Visual Studio Code
|
|
4346
|
+
*
|
|
4347
|
+
* This is a JSON string that can be saved to a file.
|
|
4348
|
+
* ```ts
|
|
4349
|
+
* const snapshot = Bun.generateHeapSnapshot("v8");
|
|
4350
|
+
* await Bun.write("heap.heapsnapshot", snapshot);
|
|
4351
|
+
* ```
|
|
3487
4352
|
*/
|
|
3488
|
-
function generateHeapSnapshot():
|
|
4353
|
+
function generateHeapSnapshot(format: "v8"): string;
|
|
3489
4354
|
|
|
3490
4355
|
/**
|
|
3491
4356
|
* The next time JavaScriptCore is idle, clear unused memory and attempt to reduce the heap size.
|
|
@@ -3979,7 +4844,8 @@ declare module "bun" {
|
|
|
3979
4844
|
| "napi"
|
|
3980
4845
|
| "wasm"
|
|
3981
4846
|
| "text"
|
|
3982
|
-
| "css"
|
|
4847
|
+
| "css"
|
|
4848
|
+
| "html";
|
|
3983
4849
|
|
|
3984
4850
|
interface PluginConstraints {
|
|
3985
4851
|
/**
|