bun-types 1.1.42 → 1.1.43-canary.20250104T140550
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bun.d.ts +873 -7
- package/docs/api/cc.md +3 -3
- package/docs/api/s3.md +549 -0
- package/docs/bundler/html.md +110 -0
- package/docs/bundler/loaders.md +76 -1
- package/docs/guides/ecosystem/nextjs.md +8 -0
- package/docs/install/cache.md +1 -1
- package/docs/install/index.md +7 -1
- package/docs/install/lockfile.md +14 -2
- package/docs/runtime/nodejs-apis.md +3 -3
- package/globals.d.ts +6 -0
- package/html-rewriter.d.ts +2 -0
- package/package.json +1 -1
package/docs/api/cc.md
CHANGED
|
@@ -179,16 +179,16 @@ type Flags = string | string[];
|
|
|
179
179
|
|
|
180
180
|
These are flags like `-I` for include directories and `-D` for preprocessor definitions.
|
|
181
181
|
|
|
182
|
-
#### `
|
|
182
|
+
#### `define: Record<string, string>`
|
|
183
183
|
|
|
184
|
-
The `
|
|
184
|
+
The `define` is an optional object that should be passed to the TinyCC compiler.
|
|
185
185
|
|
|
186
186
|
```ts
|
|
187
187
|
type Defines = Record<string, string>;
|
|
188
188
|
|
|
189
189
|
cc({
|
|
190
190
|
source: "hello.c",
|
|
191
|
-
|
|
191
|
+
define: {
|
|
192
192
|
"NDEBUG": "1",
|
|
193
193
|
},
|
|
194
194
|
});
|
package/docs/api/s3.md
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
1
|
+
Production servers often read, upload, and write files to S3-compatible object storage services instead of the local filesystem. Historically, that means local filesystem APIs you use in development can't be used in production. When you use Bun, things are different.
|
|
2
|
+
|
|
3
|
+
Bun provides fast, native bindings for interacting with S3-compatible object storage services. Bun's S3 API is designed to be simple and feel similar to fetch's `Response` and `Blob` APIs (like Bun's local filesystem APIs).
|
|
4
|
+
|
|
5
|
+
```ts
|
|
6
|
+
import { s3, write, S3 } from "bun";
|
|
7
|
+
|
|
8
|
+
const metadata = await s3("123.json", {
|
|
9
|
+
accessKeyId: "your-access-key",
|
|
10
|
+
secretAccessKey: "your-secret-key",
|
|
11
|
+
bucket: "my-bucket",
|
|
12
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
// Download from S3 as JSON
|
|
16
|
+
const data = await metadata.json();
|
|
17
|
+
|
|
18
|
+
// Upload to S3
|
|
19
|
+
await write(metadata, JSON.stringify({ name: "John", age: 30 }));
|
|
20
|
+
|
|
21
|
+
// Presign a URL (synchronous - no network request needed)
|
|
22
|
+
const url = metadata.presign({
|
|
23
|
+
acl: "public-read",
|
|
24
|
+
expiresIn: 60 * 60 * 24, // 1 day
|
|
25
|
+
});
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
S3 is the [de facto standard](https://en.wikipedia.org/wiki/De_facto_standard) internet filesystem. You can use Bun's S3 API with S3-compatible storage services like:
|
|
29
|
+
|
|
30
|
+
- AWS S3
|
|
31
|
+
- Cloudflare R2
|
|
32
|
+
- DigitalOcean Spaces
|
|
33
|
+
- MinIO
|
|
34
|
+
- Backblaze B2
|
|
35
|
+
- ...and any other S3-compatible storage service
|
|
36
|
+
|
|
37
|
+
## Basic Usage
|
|
38
|
+
|
|
39
|
+
There are several ways to interact with Bun's S3 API.
|
|
40
|
+
|
|
41
|
+
### Using `Bun.s3()`
|
|
42
|
+
|
|
43
|
+
The `s3()` helper function is used to create one-off `S3File` instances for a single file.
|
|
44
|
+
|
|
45
|
+
```ts
|
|
46
|
+
import { s3 } from "bun";
|
|
47
|
+
|
|
48
|
+
// Using the s3() helper
|
|
49
|
+
const s3file = s3("my-file.txt", {
|
|
50
|
+
accessKeyId: "your-access-key",
|
|
51
|
+
secretAccessKey: "your-secret-key",
|
|
52
|
+
bucket: "my-bucket",
|
|
53
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com", // optional
|
|
54
|
+
// endpoint: "https://<account-id>.r2.cloudflarestorage.com", // Cloudflare R2
|
|
55
|
+
// endpoint: "https://<region>.digitaloceanspaces.com", // DigitalOcean Spaces
|
|
56
|
+
// endpoint: "http://localhost:9000", // MinIO
|
|
57
|
+
});
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### Reading Files
|
|
61
|
+
|
|
62
|
+
You can read files from S3 using similar methods to Bun's file system APIs:
|
|
63
|
+
|
|
64
|
+
```ts
|
|
65
|
+
// Read an S3File as text
|
|
66
|
+
const text = await s3file.text();
|
|
67
|
+
|
|
68
|
+
// Read an S3File as JSON
|
|
69
|
+
const json = await s3file.json();
|
|
70
|
+
|
|
71
|
+
// Read an S3File as an ArrayBuffer
|
|
72
|
+
const buffer = await s3file.arrayBuffer();
|
|
73
|
+
|
|
74
|
+
// Get only the first 1024 bytes
|
|
75
|
+
const partial = await s3file.slice(0, 1024).text();
|
|
76
|
+
|
|
77
|
+
// Stream the file
|
|
78
|
+
const stream = s3file.stream();
|
|
79
|
+
for await (const chunk of stream) {
|
|
80
|
+
console.log(chunk);
|
|
81
|
+
}
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Writing Files
|
|
85
|
+
|
|
86
|
+
Writing to S3 is just as simple:
|
|
87
|
+
|
|
88
|
+
```ts
|
|
89
|
+
// Write a string (replacing the file)
|
|
90
|
+
await s3file.write("Hello World!");
|
|
91
|
+
|
|
92
|
+
// Write with content type
|
|
93
|
+
await s3file.write(JSON.stringify({ name: "John", age: 30 }), {
|
|
94
|
+
type: "application/json",
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
// Write using a writer (streaming)
|
|
98
|
+
const writer = s3file.writer({ type: "application/json" });
|
|
99
|
+
writer.write("Hello");
|
|
100
|
+
writer.write(" World!");
|
|
101
|
+
await writer.end();
|
|
102
|
+
|
|
103
|
+
// Write using Bun.write
|
|
104
|
+
await Bun.write(s3file, "Hello World!");
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
### Working with large files (streams)
|
|
108
|
+
|
|
109
|
+
Bun automatically handles multipart uploads for large files and provides streaming capabilities. The same API that works for local files also works for S3 files.
|
|
110
|
+
|
|
111
|
+
```ts
|
|
112
|
+
// Write a large file
|
|
113
|
+
const bigFile = Buffer.alloc(10 * 1024 * 1024); // 10MB
|
|
114
|
+
const writer = s3file.writer({
|
|
115
|
+
// Automatically retry on network errors up to 3 times
|
|
116
|
+
retry: 3,
|
|
117
|
+
|
|
118
|
+
// Queue up to 10 requests at a time
|
|
119
|
+
queueSize: 10,
|
|
120
|
+
|
|
121
|
+
// Upload in 5 MB chunks
|
|
122
|
+
partSize: 5 * 1024 * 1024,
|
|
123
|
+
});
|
|
124
|
+
for (let i = 0; i < 10; i++) {
|
|
125
|
+
await writer.write(bigFile);
|
|
126
|
+
}
|
|
127
|
+
await writer.end();
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## Presigning URLs
|
|
131
|
+
|
|
132
|
+
When your production service needs to let users upload files to your server, it's often more reliable for the user to upload directly to S3 instead of your server acting as an intermediary.
|
|
133
|
+
|
|
134
|
+
To facilitate this, you can presign URLs for S3 files. This generates a URL with a signature that allows a user to securely upload that specific file to S3, without exposing your credentials or granting them unnecessary access to your bucket.
|
|
135
|
+
|
|
136
|
+
```ts
|
|
137
|
+
// Generate a presigned URL that expires in 24 hours (default)
|
|
138
|
+
const url = s3file.presign();
|
|
139
|
+
|
|
140
|
+
// Custom expiration time (in seconds)
|
|
141
|
+
const url2 = s3file.presign({ expiresIn: 3600 }); // 1 hour
|
|
142
|
+
|
|
143
|
+
// Using static method
|
|
144
|
+
const url3 = Bun.S3.presign("my-file.txt", {
|
|
145
|
+
bucket: "my-bucket",
|
|
146
|
+
accessKeyId: "your-access-key",
|
|
147
|
+
secretAccessKey: "your-secret-key",
|
|
148
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
149
|
+
// endpoint: "https://<account-id>.r2.cloudflarestorage.com", // Cloudflare R2
|
|
150
|
+
expiresIn: 3600,
|
|
151
|
+
});
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
### Setting ACLs
|
|
155
|
+
|
|
156
|
+
To set an ACL (access control list) on a presigned URL, pass the `acl` option:
|
|
157
|
+
|
|
158
|
+
```ts
|
|
159
|
+
const url = s3file.presign({
|
|
160
|
+
acl: "public-read",
|
|
161
|
+
expiresIn: 3600,
|
|
162
|
+
});
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
You can pass any of the following ACLs:
|
|
166
|
+
|
|
167
|
+
| ACL | Explanation |
|
|
168
|
+
| ----------------------------- | ------------------------------------------------------------------- |
|
|
169
|
+
| `"public-read"` | The object is readable by the public. |
|
|
170
|
+
| `"private"` | The object is readable only by the bucket owner. |
|
|
171
|
+
| `"public-read-write"` | The object is readable and writable by the public. |
|
|
172
|
+
| `"authenticated-read"` | The object is readable by the bucket owner and authenticated users. |
|
|
173
|
+
| `"aws-exec-read"` | The object is readable by the AWS account that made the request. |
|
|
174
|
+
| `"bucket-owner-read"` | The object is readable by the bucket owner. |
|
|
175
|
+
| `"bucket-owner-full-control"` | The object is readable and writable by the bucket owner. |
|
|
176
|
+
| `"log-delivery-write"` | The object is writable by AWS services used for log delivery. |
|
|
177
|
+
|
|
178
|
+
### Expiring URLs
|
|
179
|
+
|
|
180
|
+
To set an expiration time for a presigned URL, pass the `expiresIn` option.
|
|
181
|
+
|
|
182
|
+
```ts
|
|
183
|
+
const url = s3file.presign({
|
|
184
|
+
// Seconds
|
|
185
|
+
expiresIn: 3600, // 1 hour
|
|
186
|
+
});
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
### `method`
|
|
190
|
+
|
|
191
|
+
To set the HTTP method for a presigned URL, pass the `method` option.
|
|
192
|
+
|
|
193
|
+
```ts
|
|
194
|
+
const url = s3file.presign({
|
|
195
|
+
method: "PUT",
|
|
196
|
+
// method: "DELETE",
|
|
197
|
+
// method: "GET",
|
|
198
|
+
// method: "HEAD",
|
|
199
|
+
// method: "POST",
|
|
200
|
+
// method: "PUT",
|
|
201
|
+
});
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
### `new Response(S3File)`
|
|
205
|
+
|
|
206
|
+
To quickly redirect users to a presigned URL for an S3 file, you can pass an `S3File` instance to a `Response` object as the body.
|
|
207
|
+
|
|
208
|
+
```ts
|
|
209
|
+
const response = new Response(s3file);
|
|
210
|
+
console.log(response);
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
This will automatically redirect the user to the presigned URL for the S3 file, saving you the memory, time, and bandwidth cost of downloading the file to your server and sending it back to the user.
|
|
214
|
+
|
|
215
|
+
```ts
|
|
216
|
+
Response (0 KB) {
|
|
217
|
+
ok: false,
|
|
218
|
+
url: "",
|
|
219
|
+
status: 302,
|
|
220
|
+
statusText: "",
|
|
221
|
+
headers: Headers {
|
|
222
|
+
"location": "https://<account-id>.r2.cloudflarestorage.com/...",
|
|
223
|
+
},
|
|
224
|
+
redirected: true,
|
|
225
|
+
bodyUsed: false
|
|
226
|
+
}
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
## Support for S3-Compatible Services
|
|
230
|
+
|
|
231
|
+
Bun's S3 implementation works with any S3-compatible storage service. Just specify the appropriate endpoint:
|
|
232
|
+
|
|
233
|
+
```ts
|
|
234
|
+
import { s3 } from "bun";
|
|
235
|
+
|
|
236
|
+
// CloudFlare R2
|
|
237
|
+
const r2file = s3("my-file.txt", {
|
|
238
|
+
accessKeyId: "access-key",
|
|
239
|
+
secretAccessKey: "secret-key",
|
|
240
|
+
bucket: "my-bucket",
|
|
241
|
+
endpoint: "https://<account-id>.r2.cloudflarestorage.com",
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
// DigitalOcean Spaces
|
|
245
|
+
const spacesFile = s3("my-file.txt", {
|
|
246
|
+
accessKeyId: "access-key",
|
|
247
|
+
secretAccessKey: "secret-key",
|
|
248
|
+
bucket: "my-bucket",
|
|
249
|
+
endpoint: "https://<region>.digitaloceanspaces.com",
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
// MinIO
|
|
253
|
+
const minioFile = s3("my-file.txt", {
|
|
254
|
+
accessKeyId: "access-key",
|
|
255
|
+
secretAccessKey: "secret-key",
|
|
256
|
+
bucket: "my-bucket",
|
|
257
|
+
endpoint: "http://localhost:9000",
|
|
258
|
+
});
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
## Credentials
|
|
262
|
+
|
|
263
|
+
Credentials are one of the hardest parts of using S3, and we've tried to make it as easy as possible. By default, Bun reads the following environment variables for credentials.
|
|
264
|
+
|
|
265
|
+
| Option name | Environment variable |
|
|
266
|
+
| ----------------- | ---------------------- |
|
|
267
|
+
| `accessKeyId` | `S3_ACCESS_KEY_ID` |
|
|
268
|
+
| `secretAccessKey` | `S3_SECRET_ACCESS_KEY` |
|
|
269
|
+
| `region` | `S3_REGION` |
|
|
270
|
+
| `endpoint` | `S3_ENDPOINT` |
|
|
271
|
+
| `bucket` | `S3_BUCKET` |
|
|
272
|
+
| `sessionToken` | `S3_SESSION_TOKEN` |
|
|
273
|
+
|
|
274
|
+
If the `S3_*` environment variable is not set, Bun will also check for the `AWS_*` environment variable, for each of the above options.
|
|
275
|
+
|
|
276
|
+
| Option name | Fallback environment variable |
|
|
277
|
+
| ----------------- | ----------------------------- |
|
|
278
|
+
| `accessKeyId` | `AWS_ACCESS_KEY_ID` |
|
|
279
|
+
| `secretAccessKey` | `AWS_SECRET_ACCESS_KEY` |
|
|
280
|
+
| `region` | `AWS_REGION` |
|
|
281
|
+
| `endpoint` | `AWS_ENDPOINT` |
|
|
282
|
+
| `bucket` | `AWS_BUCKET` |
|
|
283
|
+
| `sessionToken` | `AWS_SESSION_TOKEN` |
|
|
284
|
+
|
|
285
|
+
These environment variables are read from [`.env` files](/docs/runtime/env) or from the process environment at initialization time (`process.env` is not used for this).
|
|
286
|
+
|
|
287
|
+
These defaults are overriden by the options you pass to `s3(credentials)`, `new Bun.S3(credentials)`, or any of the methods that accept credentials. So if, for example, you use the same credentials for different buckets, you can set the credentials once in your `.env` file and then pass `bucket: "my-bucket"` to the `s3()` helper function without having to specify all the credentials again.
|
|
288
|
+
|
|
289
|
+
### `S3` Buckets
|
|
290
|
+
|
|
291
|
+
Passing around all of these credentials can be cumbersome. To make it easier, you can create a `S3` bucket instance.
|
|
292
|
+
|
|
293
|
+
```ts
|
|
294
|
+
import { S3 } from "bun";
|
|
295
|
+
|
|
296
|
+
const bucket = new S3({
|
|
297
|
+
accessKeyId: "your-access-key",
|
|
298
|
+
secretAccessKey: "your-secret-key",
|
|
299
|
+
bucket: "my-bucket",
|
|
300
|
+
// sessionToken: "..."
|
|
301
|
+
endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
302
|
+
// endpoint: "https://<account-id>.r2.cloudflarestorage.com", // Cloudflare R2
|
|
303
|
+
// endpoint: "http://localhost:9000", // MinIO
|
|
304
|
+
});
|
|
305
|
+
|
|
306
|
+
// bucket is a function that creates `S3File` instances (lazy)
|
|
307
|
+
const file = bucket("my-file.txt");
|
|
308
|
+
|
|
309
|
+
// Write to S3
|
|
310
|
+
await file.write("Hello World!");
|
|
311
|
+
|
|
312
|
+
// Read from S3
|
|
313
|
+
const text = await file.text();
|
|
314
|
+
|
|
315
|
+
// Write using a Response
|
|
316
|
+
await file.write(new Response("Hello World!"));
|
|
317
|
+
|
|
318
|
+
// Presign a URL
|
|
319
|
+
const url = file.presign({
|
|
320
|
+
expiresIn: 60 * 60 * 24, // 1 day
|
|
321
|
+
acl: "public-read",
|
|
322
|
+
});
|
|
323
|
+
|
|
324
|
+
// Delete the file
|
|
325
|
+
await file.unlink();
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
#### Read a file from an `S3` bucket
|
|
329
|
+
|
|
330
|
+
The `S3` bucket instance is itself a function that creates `S3File` instances. It provides a more convenient API for interacting with S3.
|
|
331
|
+
|
|
332
|
+
```ts
|
|
333
|
+
const s3file = bucket("my-file.txt");
|
|
334
|
+
const text = await s3file.text();
|
|
335
|
+
const json = await s3file.json();
|
|
336
|
+
const bytes = await s3file.bytes();
|
|
337
|
+
const arrayBuffer = await s3file.arrayBuffer();
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
#### Write a file to S3
|
|
341
|
+
|
|
342
|
+
To write a file to the bucket, you can use the `write` method.
|
|
343
|
+
|
|
344
|
+
```ts
|
|
345
|
+
const bucket = new Bun.S3({
|
|
346
|
+
accessKeyId: "your-access-key",
|
|
347
|
+
secretAccessKey: "your-secret-key",
|
|
348
|
+
endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
349
|
+
bucket: "my-bucket",
|
|
350
|
+
});
|
|
351
|
+
await bucket.write("my-file.txt", "Hello World!");
|
|
352
|
+
await bucket.write("my-file.txt", new Response("Hello World!"));
|
|
353
|
+
```
|
|
354
|
+
|
|
355
|
+
You can also call `.write` on the `S3File` instance created by the `S3` bucket instance.
|
|
356
|
+
|
|
357
|
+
```ts
|
|
358
|
+
const s3file = bucket("my-file.txt");
|
|
359
|
+
await s3file.write("Hello World!", {
|
|
360
|
+
type: "text/plain",
|
|
361
|
+
});
|
|
362
|
+
await s3file.write(new Response("Hello World!"));
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
#### Delete a file from S3
|
|
366
|
+
|
|
367
|
+
To delete a file from the bucket, you can use the `delete` method.
|
|
368
|
+
|
|
369
|
+
```ts
|
|
370
|
+
const bucket = new Bun.S3({
|
|
371
|
+
accessKeyId: "your-access-key",
|
|
372
|
+
secretAccessKey: "your-secret-key",
|
|
373
|
+
bucket: "my-bucket",
|
|
374
|
+
});
|
|
375
|
+
|
|
376
|
+
await bucket.delete("my-file.txt");
|
|
377
|
+
```
|
|
378
|
+
|
|
379
|
+
You can also use the `unlink` method, which is an alias for `delete`.
|
|
380
|
+
|
|
381
|
+
```ts
|
|
382
|
+
// "delete" and "unlink" are aliases of each other.
|
|
383
|
+
await bucket.unlink("my-file.txt");
|
|
384
|
+
```
|
|
385
|
+
|
|
386
|
+
## `S3File`
|
|
387
|
+
|
|
388
|
+
`S3File` instances are created by calling the `S3` instance method or the `s3()` helper function. Like `Bun.file()`, `S3File` instances are lazy. They don't refer to something that necessarily exists at the time of creation. That's why all the methods that don't involve network requests are fully synchronous.
|
|
389
|
+
|
|
390
|
+
```ts
|
|
391
|
+
interface S3File extends Blob {
|
|
392
|
+
slice(start: number, end?: number): S3File;
|
|
393
|
+
exists(): Promise<boolean>;
|
|
394
|
+
unlink(): Promise<void>;
|
|
395
|
+
presign(options: S3Options): string;
|
|
396
|
+
text(): Promise<string>;
|
|
397
|
+
json(): Promise<any>;
|
|
398
|
+
bytes(): Promise<Uint8Array>;
|
|
399
|
+
arrayBuffer(): Promise<ArrayBuffer>;
|
|
400
|
+
stream(options: S3Options): ReadableStream;
|
|
401
|
+
write(
|
|
402
|
+
data:
|
|
403
|
+
| string
|
|
404
|
+
| Uint8Array
|
|
405
|
+
| ArrayBuffer
|
|
406
|
+
| Blob
|
|
407
|
+
| ReadableStream
|
|
408
|
+
| Response
|
|
409
|
+
| Request,
|
|
410
|
+
options?: BlobPropertyBag,
|
|
411
|
+
): Promise<void>;
|
|
412
|
+
|
|
413
|
+
readonly size: Promise<number>;
|
|
414
|
+
|
|
415
|
+
// ... more omitted for brevity
|
|
416
|
+
}
|
|
417
|
+
```
|
|
418
|
+
|
|
419
|
+
Like `Bun.file()`, `S3File` extends [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob), so all the methods that are available on `Blob` are also available on `S3File`. The same API for reading data from a local file is also available for reading data from S3.
|
|
420
|
+
|
|
421
|
+
| Method | Output |
|
|
422
|
+
| ---------------------------- | ---------------- |
|
|
423
|
+
| `await s3File.text()` | `string` |
|
|
424
|
+
| `await s3File.bytes()` | `Uint8Array` |
|
|
425
|
+
| `await s3File.json()` | `JSON` |
|
|
426
|
+
| `await s3File.stream()` | `ReadableStream` |
|
|
427
|
+
| `await s3File.arrayBuffer()` | `ArrayBuffer` |
|
|
428
|
+
|
|
429
|
+
That means using `S3File` instances with `fetch()`, `Response`, and other web APIs that accept `Blob` instances just works.
|
|
430
|
+
|
|
431
|
+
### Partial reads
|
|
432
|
+
|
|
433
|
+
To read a partial range of a file, you can use the `slice` method.
|
|
434
|
+
|
|
435
|
+
```ts
|
|
436
|
+
const partial = s3file.slice(0, 1024);
|
|
437
|
+
|
|
438
|
+
// Read the partial range as a Uint8Array
|
|
439
|
+
const bytes = await partial.bytes();
|
|
440
|
+
|
|
441
|
+
// Read the partial range as a string
|
|
442
|
+
const text = await partial.text();
|
|
443
|
+
```
|
|
444
|
+
|
|
445
|
+
Internally, this works by using the HTTP `Range` header to request only the bytes you want. This `slice` method is the same as [`Blob.prototype.slice`](https://developer.mozilla.org/en-US/docs/Web/API/Blob/slice).
|
|
446
|
+
|
|
447
|
+
## Error codes
|
|
448
|
+
|
|
449
|
+
When Bun's S3 API throws an error, it will have a `code` property that matches one of the following values:
|
|
450
|
+
|
|
451
|
+
- `ERR_S3_MISSING_CREDENTIALS`
|
|
452
|
+
- `ERR_S3_INVALID_METHOD`
|
|
453
|
+
- `ERR_S3_INVALID_PATH`
|
|
454
|
+
- `ERR_S3_INVALID_ENDPOINT`
|
|
455
|
+
- `ERR_S3_INVALID_SIGNATURE`
|
|
456
|
+
- `ERR_S3_INVALID_SESSION_TOKEN`
|
|
457
|
+
|
|
458
|
+
When the S3 Object Storage service returns an error (that is, not Bun), it will be an `S3Error` instance (an `Error` instance with the name `"S3Error"`).
|
|
459
|
+
|
|
460
|
+
## `S3` static methods
|
|
461
|
+
|
|
462
|
+
The `S3` class provides several static methods for interacting with S3.
|
|
463
|
+
|
|
464
|
+
### `S3.presign`
|
|
465
|
+
|
|
466
|
+
To generate a presigned URL for an S3 file, you can use the `S3.presign` method.
|
|
467
|
+
|
|
468
|
+
```ts
|
|
469
|
+
import { S3 } from "bun";
|
|
470
|
+
|
|
471
|
+
const url = S3.presign("my-file.txt", {
|
|
472
|
+
accessKeyId: "your-access-key",
|
|
473
|
+
secretAccessKey: "your-secret-key",
|
|
474
|
+
bucket: "my-bucket",
|
|
475
|
+
expiresIn: 3600,
|
|
476
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
477
|
+
// endpoint: "https://<account-id>.r2.cloudflarestorage.com", // Cloudflare R2
|
|
478
|
+
});
|
|
479
|
+
```
|
|
480
|
+
|
|
481
|
+
This is the same as `S3File.prototype.presign` and `new S3(credentials).presign`, as a static method on the `S3` class.
|
|
482
|
+
|
|
483
|
+
### `S3.exists`
|
|
484
|
+
|
|
485
|
+
To check if an S3 file exists, you can use the `S3.exists` method.
|
|
486
|
+
|
|
487
|
+
```ts
|
|
488
|
+
import { S3 } from "bun";
|
|
489
|
+
|
|
490
|
+
const exists = await S3.exists("my-file.txt", {
|
|
491
|
+
accessKeyId: "your-access-key",
|
|
492
|
+
secretAccessKey: "your-secret-key",
|
|
493
|
+
bucket: "my-bucket",
|
|
494
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
495
|
+
});
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
The same method also works on `S3File` instances.
|
|
499
|
+
|
|
500
|
+
```ts
|
|
501
|
+
const s3file = Bun.s3("my-file.txt", {
|
|
502
|
+
accessKeyId: "your-access-key",
|
|
503
|
+
secretAccessKey: "your-secret-key",
|
|
504
|
+
bucket: "my-bucket",
|
|
505
|
+
});
|
|
506
|
+
const exists = await s3file.exists();
|
|
507
|
+
```
|
|
508
|
+
|
|
509
|
+
### `S3.size`
|
|
510
|
+
|
|
511
|
+
To get the size of an S3 file, you can use the `S3.size` method.
|
|
512
|
+
|
|
513
|
+
```ts
|
|
514
|
+
import { S3 } from "bun";
|
|
515
|
+
const size = await S3.size("my-file.txt", {
|
|
516
|
+
accessKeyId: "your-access-key",
|
|
517
|
+
secretAccessKey: "your-secret-key",
|
|
518
|
+
bucket: "my-bucket",
|
|
519
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
520
|
+
});
|
|
521
|
+
```
|
|
522
|
+
|
|
523
|
+
### `S3.unlink`
|
|
524
|
+
|
|
525
|
+
To delete an S3 file, you can use the `S3.unlink` method.
|
|
526
|
+
|
|
527
|
+
```ts
|
|
528
|
+
import { S3 } from "bun";
|
|
529
|
+
|
|
530
|
+
await S3.unlink("my-file.txt", {
|
|
531
|
+
accessKeyId: "your-access-key",
|
|
532
|
+
secretAccessKey: "your-secret-key",
|
|
533
|
+
bucket: "my-bucket",
|
|
534
|
+
// endpoint: "https://s3.us-east-1.amazonaws.com",
|
|
535
|
+
});
|
|
536
|
+
```
|
|
537
|
+
|
|
538
|
+
## s3:// protocol
|
|
539
|
+
|
|
540
|
+
To make it easier to use the same code for local files and S3 files, the `s3://` protocol is supported in `fetch` and `Bun.file()`.
|
|
541
|
+
|
|
542
|
+
```ts
|
|
543
|
+
const response = await fetch("s3://my-bucket/my-file.txt");
|
|
544
|
+
const file = Bun.file("s3://my-bucket/my-file.txt");
|
|
545
|
+
```
|
|
546
|
+
|
|
547
|
+
This is the equivalent of calling `Bun.s3("my-file.txt", { bucket: "my-bucket" })`.
|
|
548
|
+
|
|
549
|
+
This `s3://` protocol exists to make it easier to use the same code for local files and S3 files.
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
As of Bun v1.1.43, Bun's bundler now has first-class support for HTML. Build static sites, landing pages, and web applications with zero configuration. Just point Bun at your HTML file and it handles everything else.
|
|
2
|
+
|
|
3
|
+
```html#index.html
|
|
4
|
+
<!doctype html>
|
|
5
|
+
<html>
|
|
6
|
+
<head>
|
|
7
|
+
<link rel="stylesheet" href="./styles.css" />
|
|
8
|
+
<script src="./app.ts" type="module"></script>
|
|
9
|
+
</head>
|
|
10
|
+
<body>
|
|
11
|
+
<img src="./logo.png" />
|
|
12
|
+
</body>
|
|
13
|
+
</html>
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
One command is all you need (won't be experimental after Bun v1.2):
|
|
17
|
+
|
|
18
|
+
{% codetabs %}
|
|
19
|
+
|
|
20
|
+
```bash#CLI
|
|
21
|
+
$ bun build --experimental-html --experimental-css ./index.html --outdir=dist
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
```ts#API
|
|
25
|
+
Bun.build({
|
|
26
|
+
entrypoints: ["./index.html"],
|
|
27
|
+
outdir: "./dist",
|
|
28
|
+
|
|
29
|
+
// On by default in Bun v1.2+
|
|
30
|
+
html: true,
|
|
31
|
+
experimentalCss: true,
|
|
32
|
+
});
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
{% /codetabs %}
|
|
36
|
+
|
|
37
|
+
Bun automatically:
|
|
38
|
+
|
|
39
|
+
- Bundles, tree-shakes, and optimizes your JavaScript, JSX and TypeScript
|
|
40
|
+
- Bundles and optimizes your CSS
|
|
41
|
+
- Copies & hashes images and other assets
|
|
42
|
+
- Updates all references to local files or packages in your HTML
|
|
43
|
+
|
|
44
|
+
## Zero Config, Maximum Performance
|
|
45
|
+
|
|
46
|
+
The HTML bundler is enabled by default after Bun v1.2+. Drop in your existing HTML files and Bun will handle:
|
|
47
|
+
|
|
48
|
+
- **TypeScript & JSX** - Write modern JavaScript for browsers without the setup
|
|
49
|
+
- **CSS** - Bundle CSS stylesheets directly from `<link rel="stylesheet">` or `@import`
|
|
50
|
+
- **Images & Assets** - Automatic copying & hashing & rewriting of assets in JavaScript, CSS, and HTML
|
|
51
|
+
|
|
52
|
+
## Watch mode
|
|
53
|
+
|
|
54
|
+
You can run `bun build --watch` to watch for changes and rebuild automatically.
|
|
55
|
+
|
|
56
|
+
You've never seen a watch mode this fast.
|
|
57
|
+
|
|
58
|
+
## Plugin API
|
|
59
|
+
|
|
60
|
+
Need more control? Configure the bundler through the JavaScript API and use Bun's builtin `HTMLRewriter` to preprocess HTML.
|
|
61
|
+
|
|
62
|
+
```ts
|
|
63
|
+
await Bun.build({
|
|
64
|
+
entrypoints: ["./index.html"],
|
|
65
|
+
outdir: "./dist",
|
|
66
|
+
html: true,
|
|
67
|
+
experimentalCss: true,
|
|
68
|
+
minify: true,
|
|
69
|
+
|
|
70
|
+
plugins: [
|
|
71
|
+
{
|
|
72
|
+
// A plugin that makes every HTML tag lowercase
|
|
73
|
+
name: "lowercase-html-plugin",
|
|
74
|
+
setup({ onLoad }) {
|
|
75
|
+
const rewriter = new HTMLRewriter().on("*", {
|
|
76
|
+
element(element) {
|
|
77
|
+
element.tagName = element.tagName.toLowerCase();
|
|
78
|
+
},
|
|
79
|
+
text(element) {
|
|
80
|
+
element.replace(element.text.toLowerCase());
|
|
81
|
+
},
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
onLoad({ filter: /\.html$/ }, async args => {
|
|
85
|
+
const html = await Bun.file(args.path).text();
|
|
86
|
+
|
|
87
|
+
return {
|
|
88
|
+
// Bun's bundler will scan the HTML for <script> tags, <link rel="stylesheet"> tags, and other assets
|
|
89
|
+
// and bundle them automatically
|
|
90
|
+
contents: rewriter.transform(html),
|
|
91
|
+
loader: "html",
|
|
92
|
+
};
|
|
93
|
+
});
|
|
94
|
+
},
|
|
95
|
+
},
|
|
96
|
+
],
|
|
97
|
+
});
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## What Gets Processed?
|
|
101
|
+
|
|
102
|
+
Bun automatically handles all common web assets:
|
|
103
|
+
|
|
104
|
+
- Scripts (`<script src>`) are run through Bun's JavaScript/TypeScript/JSX bundler
|
|
105
|
+
- Stylesheets (`<link rel="stylesheet">`) are run through Bun's CSS parser & bundler
|
|
106
|
+
- Images (`<img>`, `<picture>`) are copied and hashed
|
|
107
|
+
- Media (`<video>`, `<audio>`, `<source>`) are copied and hashed
|
|
108
|
+
- Any `<link>` tag with an `href` attribute pointing to a local file is rewritten to the new path, and hashed
|
|
109
|
+
|
|
110
|
+
All paths are resolved relative to your HTML file, making it easy to organize your project however you want.
|