@constructive-io/bucket-provisioner 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,23 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2025 Dan Lynch <pyramation@gmail.com>
4
+ Copyright (c) 2025 Constructive <developers@constructive.io>
5
+ Copyright (c) 2020-present, Interweb, Inc.
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,335 @@
1
+ # @constructive-io/bucket-provisioner
2
+
3
+ <p align="center" width="100%">
4
+ <img height="250" src="https://raw.githubusercontent.com/constructive-io/constructive/refs/heads/main/assets/outline-logo.svg" />
5
+ </p>
6
+
7
+ <p align="center" width="100%">
8
+ <a href="https://github.com/constructive-io/constructive/actions/workflows/run-tests.yaml">
9
+ <img height="20" src="https://github.com/constructive-io/constructive/actions/workflows/run-tests.yaml/badge.svg" />
10
+ </a>
11
+ <a href="https://github.com/constructive-io/constructive/blob/main/LICENSE"><img height="20" src="https://img.shields.io/badge/license-MIT-blue.svg"/></a>
12
+ <a href="https://www.npmjs.com/package/@constructive-io/bucket-provisioner"><img height="20" src="https://img.shields.io/github/package-json/v/constructive-io/constructive?filename=packages%2Fbucket-provisioner%2Fpackage.json"/></a>
13
+ </p>
14
+
15
+ S3-compatible bucket provisioning library for the Constructive storage module. Creates and configures buckets with the correct privacy policies, CORS rules, versioning, and lifecycle settings for private, public, and temporary file storage.
16
+
17
+ ## Features
18
+
19
+ - **Privacy enforcement** — Block All Public Access for private/temp buckets, public-read policy for public buckets
20
+ - **CORS configuration** — Browser-compatible rules for presigned URL uploads
21
+ - **Lifecycle rules** — Auto-cleanup for temp buckets (abandoned uploads)
22
+ - **Versioning** — Optional S3 versioning for durability
23
+ - **Multi-provider** — Works with AWS S3, MinIO, Cloudflare R2, Google Cloud Storage, and DigitalOcean Spaces
24
+ - **Inspect/audit** — Read back a bucket's current configuration for verification
25
+ - **Typed errors** — Structured `ProvisionerError` with error codes for programmatic handling
26
+
27
+ ## Installation
28
+
29
+ ```bash
30
+ pnpm add @constructive-io/bucket-provisioner
31
+ ```
32
+
33
+ ## Quick Start
34
+
35
+ ```typescript
36
+ import { BucketProvisioner } from '@constructive-io/bucket-provisioner';
37
+
38
+ const provisioner = new BucketProvisioner({
39
+ connection: {
40
+ provider: 'minio',
41
+ region: 'us-east-1',
42
+ endpoint: 'http://minio:9000',
43
+ accessKeyId: 'minioadmin',
44
+ secretAccessKey: 'minioadmin',
45
+ },
46
+ allowedOrigins: ['https://app.example.com'],
47
+ });
48
+
49
+ // Provision a private bucket (presigned URLs only)
50
+ const result = await provisioner.provision({
51
+ bucketName: 'my-app-private',
52
+ accessType: 'private',
53
+ versioning: true,
54
+ });
55
+
56
+ console.log(result);
57
+ // {
58
+ // bucketName: 'my-app-private',
59
+ // accessType: 'private',
60
+ // blockPublicAccess: true,
61
+ // versioning: true,
62
+ // corsRules: [...],
63
+ // lifecycleRules: [],
64
+ // ...
65
+ // }
66
+ ```
67
+
68
+ ## Usage
69
+
70
+ ### Provision a Public Bucket
71
+
72
+ Public buckets serve files via direct URL or CDN. The provisioner applies a public-read bucket policy and configures CORS for browser uploads.
73
+
74
+ ```typescript
75
+ const result = await provisioner.provision({
76
+ bucketName: 'my-app-public',
77
+ accessType: 'public',
78
+ publicUrlPrefix: 'https://cdn.example.com/public',
79
+ });
80
+ // result.blockPublicAccess === false
81
+ // result.publicUrlPrefix === 'https://cdn.example.com/public'
82
+ ```
83
+
84
+ ### Provision a Temp Bucket
85
+
86
+ Temp buckets are staging areas for uploads. They behave like private buckets but include a lifecycle rule to auto-delete objects after a configurable period.
87
+
88
+ ```typescript
89
+ const result = await provisioner.provision({
90
+ bucketName: 'my-app-temp',
91
+ accessType: 'temp',
92
+ });
93
+ // result.lifecycleRules[0].id === 'temp-cleanup'
94
+ // result.lifecycleRules[0].expirationDays === 1
95
+ ```
96
+
97
+ ### Inspect an Existing Bucket
98
+
99
+ Read back a bucket's current configuration to verify it matches expectations.
100
+
101
+ ```typescript
102
+ const config = await provisioner.inspect('my-app-private', 'private');
103
+ console.log(config.blockPublicAccess); // true
104
+ console.log(config.versioning); // true
105
+ console.log(config.corsRules.length); // 1
106
+ ```
107
+
108
+ ### Use with AWS S3
109
+
110
+ For AWS S3, no endpoint is needed — just region and credentials.
111
+
112
+ ```typescript
113
+ const provisioner = new BucketProvisioner({
114
+ connection: {
115
+ provider: 's3',
116
+ region: 'us-west-2',
117
+ accessKeyId: process.env.AWS_ACCESS_KEY_ID!,
118
+ secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
119
+ },
120
+ allowedOrigins: ['https://app.example.com'],
121
+ });
122
+ ```
123
+
124
+ ### Use with Cloudflare R2
125
+
126
+ ```typescript
127
+ const provisioner = new BucketProvisioner({
128
+ connection: {
129
+ provider: 'r2',
130
+ region: 'auto',
131
+ endpoint: `https://${ACCOUNT_ID}.r2.cloudflarestorage.com`,
132
+ accessKeyId: R2_ACCESS_KEY,
133
+ secretAccessKey: R2_SECRET_KEY,
134
+ },
135
+ allowedOrigins: ['https://app.example.com'],
136
+ });
137
+ ```
138
+
139
+ ## API
140
+
141
+ ### `BucketProvisioner`
142
+
143
+ The main class that orchestrates bucket creation and configuration.
144
+
145
+ #### `new BucketProvisioner(options)`
146
+
147
+ | Option | Type | Description |
148
+ |--------|------|-------------|
149
+ | `connection.provider` | `'s3' \| 'minio' \| 'r2' \| 'gcs' \| 'spaces'` | Storage provider type |
150
+ | `connection.region` | `string` | S3 region (e.g., `'us-east-1'`) |
151
+ | `connection.endpoint` | `string?` | S3-compatible endpoint URL. Required for non-AWS providers. |
152
+ | `connection.accessKeyId` | `string` | AWS access key ID |
153
+ | `connection.secretAccessKey` | `string` | AWS secret access key |
154
+ | `connection.forcePathStyle` | `boolean?` | Force path-style URLs (auto-detected per provider) |
155
+ | `allowedOrigins` | `string[]` | Domains allowed for CORS (e.g., `['https://app.example.com']`) |
156
+
157
+ #### `provisioner.provision(options): Promise<ProvisionResult>`
158
+
159
+ Creates and configures a bucket. Steps:
160
+
161
+ 1. Creates the bucket (or verifies it exists)
162
+ 2. Configures Block Public Access
163
+ 3. Applies bucket policy (public-read or none)
164
+ 4. Sets CORS rules for presigned URL uploads
165
+ 5. Optionally enables versioning
166
+ 6. Adds lifecycle rules for temp buckets
167
+
168
+ | Option | Type | Description |
169
+ |--------|------|-------------|
170
+ | `bucketName` | `string` | S3 bucket name |
171
+ | `accessType` | `'public' \| 'private' \| 'temp'` | Determines which policies are applied |
172
+ | `region` | `string?` | Override region for this bucket |
173
+ | `versioning` | `boolean?` | Enable S3 versioning (default: `false`) |
174
+ | `publicUrlPrefix` | `string?` | CDN/public URL for public buckets |
175
+
176
+ #### `provisioner.inspect(bucketName, accessType): Promise<ProvisionResult>`
177
+
178
+ Reads back a bucket's current configuration (policy, CORS, versioning, lifecycle).
179
+
180
+ #### `provisioner.getClient(): S3Client`
181
+
182
+ Returns the underlying `@aws-sdk/client-s3` S3Client for advanced operations.
183
+
184
+ #### `provisioner.bucketExists(bucketName): Promise<boolean>`
185
+
186
+ Checks if a bucket exists and is accessible.
187
+
188
+ ### Policy Builders
189
+
190
+ Standalone functions for generating S3 policy documents.
191
+
192
+ #### `getPublicAccessBlock(accessType)`
193
+
194
+ Returns the Block Public Access configuration for a given access type.
195
+
196
+ #### `buildPublicReadPolicy(bucketName, keyPrefix?)`
197
+
198
+ Builds a public-read bucket policy document.
199
+
200
+ #### `buildCloudFrontOacPolicy(bucketName, distributionArn, keyPrefix?)`
201
+
202
+ Builds a CloudFront Origin Access Control bucket policy.
203
+
204
+ #### `buildPresignedUrlIamPolicy(bucketName)`
205
+
206
+ Builds the minimum-permission IAM policy for the presigned URL plugin.
207
+
208
+ ### CORS Builders
209
+
210
+ #### `buildUploadCorsRules(allowedOrigins, maxAgeSeconds?)`
211
+
212
+ CORS rules for public/temp buckets (PUT, GET, HEAD).
213
+
214
+ #### `buildPrivateCorsRules(allowedOrigins, maxAgeSeconds?)`
215
+
216
+ CORS rules for private buckets (PUT, HEAD only — no GET).
217
+
218
+ ### Lifecycle Builders
219
+
220
+ #### `buildTempCleanupRule(expirationDays?, prefix?)`
221
+
222
+ Lifecycle rule for auto-expiring temp bucket objects.
223
+
224
+ #### `buildAbortIncompleteMultipartRule(days?)`
225
+
226
+ Lifecycle rule for cleaning up incomplete multipart uploads.
227
+
228
+ ### Error Handling
229
+
230
+ All errors thrown by the provisioner are instances of `ProvisionerError`:
231
+
232
+ ```typescript
233
+ import { ProvisionerError } from '@constructive-io/bucket-provisioner';
234
+
235
+ try {
236
+ await provisioner.provision({ bucketName: 'test', accessType: 'private' });
237
+ } catch (err) {
238
+ if (err instanceof ProvisionerError) {
239
+ console.error(err.code); // 'POLICY_FAILED', 'CORS_FAILED', etc.
240
+ console.error(err.message); // Human-readable description
241
+ console.error(err.cause); // Original AWS SDK error
242
+ }
243
+ }
244
+ ```
245
+
246
+ Error codes:
247
+
248
+ | Code | Description |
249
+ |------|-------------|
250
+ | `CONNECTION_FAILED` | Could not connect to the storage endpoint |
251
+ | `BUCKET_ALREADY_EXISTS` | Bucket exists and is owned by another account |
252
+ | `BUCKET_NOT_FOUND` | Bucket does not exist (for inspect/read operations) |
253
+ | `INVALID_CONFIG` | Invalid configuration (missing credentials, origins, etc.) |
254
+ | `POLICY_FAILED` | Failed to apply Block Public Access or bucket policy |
255
+ | `CORS_FAILED` | Failed to set CORS configuration |
256
+ | `LIFECYCLE_FAILED` | Failed to set lifecycle rules |
257
+ | `VERSIONING_FAILED` | Failed to enable versioning |
258
+ | `ACCESS_DENIED` | Credentials lack required permissions |
259
+ | `PROVIDER_ERROR` | Generic provider error (check `cause` for details) |
260
+
261
+ ## Privacy Model
262
+
263
+ | Access Type | Block Public Access | Bucket Policy | CORS Methods | Lifecycle |
264
+ |-------------|-------------------|---------------|--------------|-----------|
265
+ | `private` | All blocked | None (deleted) | PUT, HEAD | None |
266
+ | `public` | Partially relaxed | Public-read | PUT, GET, HEAD | None |
267
+ | `temp` | All blocked | None (deleted) | PUT, GET, HEAD | Auto-expire (1 day) |
268
+
269
+ ## Provider Notes
270
+
271
+ | Provider | Endpoint Required | Path Style | Notes |
272
+ |----------|------------------|------------|-------|
273
+ | `s3` | No | Virtual-hosted | AWS default |
274
+ | `minio` | Yes | Path-style | Local development, self-hosted |
275
+ | `r2` | Yes | Path-style | Cloudflare R2 |
276
+ | `gcs` | Yes | Path-style | GCS S3-compatible API |
277
+ | `spaces` | Yes | Virtual-hosted | DigitalOcean Spaces |
278
+
279
+ ---
280
+
281
+ ## Education and Tutorials
282
+
283
+ 1. 🚀 [Quickstart: Getting Up and Running](https://constructive.io/learn/quickstart)
284
+ Get started with modular databases in minutes. Install prerequisites and deploy your first module.
285
+
286
+ 2. 📦 [Modular PostgreSQL Development with Database Packages](https://constructive.io/learn/modular-postgres)
287
+ Learn to organize PostgreSQL projects with pgpm workspaces and reusable database modules.
288
+
289
+ 3. ✏️ [Authoring Database Changes](https://constructive.io/learn/authoring-database-changes)
290
+ Master the workflow for adding, organizing, and managing database changes with pgpm.
291
+
292
+ 4. 🧪 [End-to-End PostgreSQL Testing with TypeScript](https://constructive.io/learn/e2e-postgres-testing)
293
+ Master end-to-end PostgreSQL testing with ephemeral databases, RLS testing, and CI/CD automation.
294
+
295
+ 5. ⚡ [Supabase Testing](https://constructive.io/learn/supabase)
296
+ Use TypeScript-first tools to test Supabase projects with realistic RLS, policies, and auth contexts.
297
+
298
+ 6. 💧 [Drizzle ORM Testing](https://constructive.io/learn/drizzle-testing)
299
+ Run full-stack tests with Drizzle ORM, including database setup, teardown, and RLS enforcement.
300
+
301
+ 7. 🔧 [Troubleshooting](https://constructive.io/learn/troubleshooting)
302
+ Common issues and solutions for pgpm, PostgreSQL, and testing.
303
+
304
+ ## Related Constructive Tooling
305
+
306
+ ### 📦 Package Management
307
+
308
+ * [pgpm](https://github.com/constructive-io/constructive/tree/main/pgpm/pgpm): **🖥️ PostgreSQL Package Manager** for modular Postgres development. Works with database workspaces, scaffolding, migrations, seeding, and installing database packages.
309
+
310
+ ### 🧪 Testing
311
+
312
+ * [pgsql-test](https://github.com/constructive-io/constructive/tree/main/postgres/pgsql-test): **📊 Isolated testing environments** with per-test transaction rollbacks—ideal for integration tests, complex migrations, and RLS simulation.
313
+ * [pgsql-seed](https://github.com/constructive-io/constructive/tree/main/postgres/pgsql-seed): **🌱 PostgreSQL seeding utilities** for CSV, JSON, SQL data loading, and pgpm deployment.
314
+ * [supabase-test](https://github.com/constructive-io/constructive/tree/main/postgres/supabase-test): **🧪 Supabase-native test harness** preconfigured for the local Supabase stack—per-test rollbacks, JWT/role context helpers, and CI/GitHub Actions ready.
315
+ * [graphile-test](https://github.com/constructive-io/constructive/tree/main/graphile/graphile-test): **🔐 Authentication mocking** for Graphile-focused test helpers and emulating row-level security contexts.
316
+ * [pg-query-context](https://github.com/constructive-io/constructive/tree/main/postgres/pg-query-context): **🔒 Session context injection** to add session-local context (e.g., `SET LOCAL`) into queries—ideal for setting `role`, `jwt.claims`, and other session settings.
317
+
318
+ ### 🧠 Parsing & AST
319
+
320
+ * [pgsql-parser](https://www.npmjs.com/package/pgsql-parser): **🔄 SQL conversion engine** that interprets and converts PostgreSQL syntax.
321
+ * [libpg-query-node](https://www.npmjs.com/package/libpg-query): **🌉 Node.js bindings** for `libpg_query`, converting SQL into parse trees.
322
+ * [pg-proto-parser](https://www.npmjs.com/package/pg-proto-parser): **📦 Protobuf parser** for parsing PostgreSQL Protocol Buffers definitions to generate TypeScript interfaces, utility functions, and JSON mappings for enums.
323
+ * [@pgsql/enums](https://www.npmjs.com/package/@pgsql/enums): **🏷️ TypeScript enums** for PostgreSQL AST for safe and ergonomic parsing logic.
324
+ * [@pgsql/types](https://www.npmjs.com/package/@pgsql/types): **📝 Type definitions** for PostgreSQL AST nodes in TypeScript.
325
+ * [@pgsql/utils](https://www.npmjs.com/package/@pgsql/utils): **🛠️ AST utilities** for constructing and transforming PostgreSQL syntax trees.
326
+
327
+ ## Credits
328
+
329
+ **🛠 Built by the [Constructive](https://constructive.io) team — creators of modular Postgres tooling for secure, composable backends. If you like our work, contribute on [GitHub](https://github.com/constructive-io).**
330
+
331
+ ## Disclaimer
332
+
333
+ AS DESCRIBED IN THE LICENSES, THE SOFTWARE IS PROVIDED "AS IS", AT YOUR OWN RISK, AND WITHOUT WARRANTIES OF ANY KIND.
334
+
335
+ No developer or entity involved in creating this software will be liable for any claims or damages whatsoever associated with your use, inability to use, or your interaction with other users of the code, including any direct, indirect, incidental, special, exemplary, punitive or consequential damages, or loss of profits, cryptocurrencies, tokens, or anything else of value.
package/client.d.ts ADDED
@@ -0,0 +1,19 @@
1
+ /**
2
+ * S3 client factory.
3
+ *
4
+ * Creates a configured S3Client from a StorageConnectionConfig.
5
+ * Handles provider-specific settings (path-style for MinIO, etc.).
6
+ */
7
+ import { S3Client } from '@aws-sdk/client-s3';
8
+ import type { StorageConnectionConfig } from './types';
9
+ /**
10
+ * Create an S3Client from a storage connection config.
11
+ *
12
+ * Provider-specific defaults:
13
+ * - `minio`: forces path-style URLs (required by MinIO)
14
+ * - `r2`: forces path-style URLs (required by Cloudflare R2)
15
+ * - `s3`: uses virtual-hosted style (AWS default)
16
+ * - `gcs`: forces path-style URLs (GCS S3-compatible API)
17
+ * - `spaces`: uses virtual-hosted style (DigitalOcean default)
18
+ */
19
+ export declare function createS3Client(config: StorageConnectionConfig): S3Client;
package/client.js ADDED
@@ -0,0 +1,45 @@
1
+ "use strict";
2
+ /**
3
+ * S3 client factory.
4
+ *
5
+ * Creates a configured S3Client from a StorageConnectionConfig.
6
+ * Handles provider-specific settings (path-style for MinIO, etc.).
7
+ */
8
+ Object.defineProperty(exports, "__esModule", { value: true });
9
+ exports.createS3Client = createS3Client;
10
+ const client_s3_1 = require("@aws-sdk/client-s3");
11
+ const types_1 = require("./types");
12
+ /**
13
+ * Create an S3Client from a storage connection config.
14
+ *
15
+ * Provider-specific defaults:
16
+ * - `minio`: forces path-style URLs (required by MinIO)
17
+ * - `r2`: forces path-style URLs (required by Cloudflare R2)
18
+ * - `s3`: uses virtual-hosted style (AWS default)
19
+ * - `gcs`: forces path-style URLs (GCS S3-compatible API)
20
+ * - `spaces`: uses virtual-hosted style (DigitalOcean default)
21
+ */
22
+ function createS3Client(config) {
23
+ if (!config.accessKeyId || !config.secretAccessKey) {
24
+ throw new types_1.ProvisionerError('INVALID_CONFIG', 'accessKeyId and secretAccessKey are required');
25
+ }
26
+ if (!config.region) {
27
+ throw new types_1.ProvisionerError('INVALID_CONFIG', 'region is required');
28
+ }
29
+ // Providers that require path-style URLs
30
+ const pathStyleProviders = new Set(['minio', 'r2', 'gcs']);
31
+ const forcePathStyle = config.forcePathStyle ?? pathStyleProviders.has(config.provider);
32
+ // Non-AWS providers require an endpoint
33
+ if (config.provider !== 's3' && !config.endpoint) {
34
+ throw new types_1.ProvisionerError('INVALID_CONFIG', `endpoint is required for provider '${config.provider}'`);
35
+ }
36
+ return new client_s3_1.S3Client({
37
+ region: config.region,
38
+ endpoint: config.endpoint,
39
+ forcePathStyle,
40
+ credentials: {
41
+ accessKeyId: config.accessKeyId,
42
+ secretAccessKey: config.secretAccessKey,
43
+ },
44
+ });
45
+ }
package/cors.d.ts ADDED
@@ -0,0 +1,33 @@
1
+ /**
2
+ * CORS configuration builders.
3
+ *
4
+ * Generates CORS rules for S3 buckets to allow browser-based
5
+ * presigned URL uploads. Without CORS, the browser will block
6
+ * the cross-origin PUT request to the S3 endpoint.
7
+ */
8
+ import type { CorsRule } from './types';
9
+ /**
10
+ * Build the default CORS rules for presigned URL uploads.
11
+ *
12
+ * This allows:
13
+ * - PUT: for presigned uploads from the browser
14
+ * - GET: for presigned downloads and public file access
15
+ * - HEAD: for confirmUpload verification and cache headers
16
+ *
17
+ * @param allowedOrigins - Domains allowed to make cross-origin requests.
18
+ * Use specific domains in production (e.g., ["https://app.example.com"]).
19
+ * Never use ["*"] in production.
20
+ * @param maxAgeSeconds - Preflight cache duration (default: 3600 = 1 hour)
21
+ */
22
+ export declare function buildUploadCorsRules(allowedOrigins: string[], maxAgeSeconds?: number): CorsRule[];
23
+ /**
24
+ * Build restrictive CORS rules for private-only buckets.
25
+ *
26
+ * Similar to upload CORS but without GET (private files use
27
+ * presigned URLs which include auth in the query string,
28
+ * so CORS is less of a concern for downloads).
29
+ *
30
+ * @param allowedOrigins - Domains allowed to make cross-origin requests.
31
+ * @param maxAgeSeconds - Preflight cache duration (default: 3600 = 1 hour)
32
+ */
33
+ export declare function buildPrivateCorsRules(allowedOrigins: string[], maxAgeSeconds?: number): CorsRule[];
package/cors.js ADDED
@@ -0,0 +1,88 @@
1
+ "use strict";
2
+ /**
3
+ * CORS configuration builders.
4
+ *
5
+ * Generates CORS rules for S3 buckets to allow browser-based
6
+ * presigned URL uploads. Without CORS, the browser will block
7
+ * the cross-origin PUT request to the S3 endpoint.
8
+ */
9
+ Object.defineProperty(exports, "__esModule", { value: true });
10
+ exports.buildUploadCorsRules = buildUploadCorsRules;
11
+ exports.buildPrivateCorsRules = buildPrivateCorsRules;
12
+ /**
13
+ * Build the default CORS rules for presigned URL uploads.
14
+ *
15
+ * This allows:
16
+ * - PUT: for presigned uploads from the browser
17
+ * - GET: for presigned downloads and public file access
18
+ * - HEAD: for confirmUpload verification and cache headers
19
+ *
20
+ * @param allowedOrigins - Domains allowed to make cross-origin requests.
21
+ * Use specific domains in production (e.g., ["https://app.example.com"]).
22
+ * Never use ["*"] in production.
23
+ * @param maxAgeSeconds - Preflight cache duration (default: 3600 = 1 hour)
24
+ */
25
+ function buildUploadCorsRules(allowedOrigins, maxAgeSeconds = 3600) {
26
+ if (allowedOrigins.length === 0) {
27
+ throw new Error('allowedOrigins must contain at least one origin');
28
+ }
29
+ return [
30
+ {
31
+ allowedOrigins,
32
+ allowedMethods: ['PUT', 'GET', 'HEAD'],
33
+ allowedHeaders: [
34
+ 'Content-Type',
35
+ 'Content-Length',
36
+ 'Content-MD5',
37
+ 'x-amz-content-sha256',
38
+ 'x-amz-date',
39
+ 'x-amz-security-token',
40
+ 'Authorization',
41
+ ],
42
+ exposedHeaders: [
43
+ 'ETag',
44
+ 'Content-Length',
45
+ 'Content-Type',
46
+ 'x-amz-request-id',
47
+ 'x-amz-id-2',
48
+ ],
49
+ maxAgeSeconds,
50
+ },
51
+ ];
52
+ }
53
+ /**
54
+ * Build restrictive CORS rules for private-only buckets.
55
+ *
56
+ * Similar to upload CORS but without GET (private files use
57
+ * presigned URLs which include auth in the query string,
58
+ * so CORS is less of a concern for downloads).
59
+ *
60
+ * @param allowedOrigins - Domains allowed to make cross-origin requests.
61
+ * @param maxAgeSeconds - Preflight cache duration (default: 3600 = 1 hour)
62
+ */
63
+ function buildPrivateCorsRules(allowedOrigins, maxAgeSeconds = 3600) {
64
+ if (allowedOrigins.length === 0) {
65
+ throw new Error('allowedOrigins must contain at least one origin');
66
+ }
67
+ return [
68
+ {
69
+ allowedOrigins,
70
+ allowedMethods: ['PUT', 'HEAD'],
71
+ allowedHeaders: [
72
+ 'Content-Type',
73
+ 'Content-Length',
74
+ 'Content-MD5',
75
+ 'x-amz-content-sha256',
76
+ 'x-amz-date',
77
+ 'x-amz-security-token',
78
+ 'Authorization',
79
+ ],
80
+ exposedHeaders: [
81
+ 'ETag',
82
+ 'Content-Length',
83
+ 'x-amz-request-id',
84
+ ],
85
+ maxAgeSeconds,
86
+ },
87
+ ];
88
+ }
@@ -0,0 +1,19 @@
1
+ /**
2
+ * S3 client factory.
3
+ *
4
+ * Creates a configured S3Client from a StorageConnectionConfig.
5
+ * Handles provider-specific settings (path-style for MinIO, etc.).
6
+ */
7
+ import { S3Client } from '@aws-sdk/client-s3';
8
+ import type { StorageConnectionConfig } from './types';
9
+ /**
10
+ * Create an S3Client from a storage connection config.
11
+ *
12
+ * Provider-specific defaults:
13
+ * - `minio`: forces path-style URLs (required by MinIO)
14
+ * - `r2`: forces path-style URLs (required by Cloudflare R2)
15
+ * - `s3`: uses virtual-hosted style (AWS default)
16
+ * - `gcs`: forces path-style URLs (GCS S3-compatible API)
17
+ * - `spaces`: uses virtual-hosted style (DigitalOcean default)
18
+ */
19
+ export declare function createS3Client(config: StorageConnectionConfig): S3Client;
package/esm/client.js ADDED
@@ -0,0 +1,42 @@
1
+ /**
2
+ * S3 client factory.
3
+ *
4
+ * Creates a configured S3Client from a StorageConnectionConfig.
5
+ * Handles provider-specific settings (path-style for MinIO, etc.).
6
+ */
7
+ import { S3Client } from '@aws-sdk/client-s3';
8
+ import { ProvisionerError } from './types';
9
+ /**
10
+ * Create an S3Client from a storage connection config.
11
+ *
12
+ * Provider-specific defaults:
13
+ * - `minio`: forces path-style URLs (required by MinIO)
14
+ * - `r2`: forces path-style URLs (required by Cloudflare R2)
15
+ * - `s3`: uses virtual-hosted style (AWS default)
16
+ * - `gcs`: forces path-style URLs (GCS S3-compatible API)
17
+ * - `spaces`: uses virtual-hosted style (DigitalOcean default)
18
+ */
19
+ export function createS3Client(config) {
20
+ if (!config.accessKeyId || !config.secretAccessKey) {
21
+ throw new ProvisionerError('INVALID_CONFIG', 'accessKeyId and secretAccessKey are required');
22
+ }
23
+ if (!config.region) {
24
+ throw new ProvisionerError('INVALID_CONFIG', 'region is required');
25
+ }
26
+ // Providers that require path-style URLs
27
+ const pathStyleProviders = new Set(['minio', 'r2', 'gcs']);
28
+ const forcePathStyle = config.forcePathStyle ?? pathStyleProviders.has(config.provider);
29
+ // Non-AWS providers require an endpoint
30
+ if (config.provider !== 's3' && !config.endpoint) {
31
+ throw new ProvisionerError('INVALID_CONFIG', `endpoint is required for provider '${config.provider}'`);
32
+ }
33
+ return new S3Client({
34
+ region: config.region,
35
+ endpoint: config.endpoint,
36
+ forcePathStyle,
37
+ credentials: {
38
+ accessKeyId: config.accessKeyId,
39
+ secretAccessKey: config.secretAccessKey,
40
+ },
41
+ });
42
+ }
package/esm/cors.d.ts ADDED
@@ -0,0 +1,33 @@
1
+ /**
2
+ * CORS configuration builders.
3
+ *
4
+ * Generates CORS rules for S3 buckets to allow browser-based
5
+ * presigned URL uploads. Without CORS, the browser will block
6
+ * the cross-origin PUT request to the S3 endpoint.
7
+ */
8
+ import type { CorsRule } from './types';
9
+ /**
10
+ * Build the default CORS rules for presigned URL uploads.
11
+ *
12
+ * This allows:
13
+ * - PUT: for presigned uploads from the browser
14
+ * - GET: for presigned downloads and public file access
15
+ * - HEAD: for confirmUpload verification and cache headers
16
+ *
17
+ * @param allowedOrigins - Domains allowed to make cross-origin requests.
18
+ * Use specific domains in production (e.g., ["https://app.example.com"]).
19
+ * Never use ["*"] in production.
20
+ * @param maxAgeSeconds - Preflight cache duration (default: 3600 = 1 hour)
21
+ */
22
+ export declare function buildUploadCorsRules(allowedOrigins: string[], maxAgeSeconds?: number): CorsRule[];
23
+ /**
24
+ * Build restrictive CORS rules for private-only buckets.
25
+ *
26
+ * Similar to upload CORS but without GET (private files use
27
+ * presigned URLs which include auth in the query string,
28
+ * so CORS is less of a concern for downloads).
29
+ *
30
+ * @param allowedOrigins - Domains allowed to make cross-origin requests.
31
+ * @param maxAgeSeconds - Preflight cache duration (default: 3600 = 1 hour)
32
+ */
33
+ export declare function buildPrivateCorsRules(allowedOrigins: string[], maxAgeSeconds?: number): CorsRule[];