graphile-presigned-url-plugin 0.17.1 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/download-url-field.js +18 -5
- package/esm/download-url-field.js +18 -5
- package/esm/plugin.js +15 -5
- package/esm/types.d.ts +5 -4
- package/package.json +2 -2
- package/plugin.js +15 -5
- package/types.d.ts +5 -4
package/download-url-field.js
CHANGED
|
@@ -52,10 +52,10 @@ function resolveS3(options) {
|
|
|
52
52
|
* Build a per-database S3Config by overlaying storage_module overrides
|
|
53
53
|
* onto the global S3Config. Same logic as plugin.ts resolveS3ForDatabase.
|
|
54
54
|
*/
|
|
55
|
-
function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
55
|
+
function resolveS3ForDatabase(options, storageConfig, databaseId, bucketKey) {
|
|
56
56
|
const globalS3 = resolveS3(options);
|
|
57
57
|
const bucket = options.resolveBucketName
|
|
58
|
-
? options.resolveBucketName(databaseId)
|
|
58
|
+
? options.resolveBucketName(databaseId, bucketKey)
|
|
59
59
|
: globalS3.bucket;
|
|
60
60
|
const publicUrlPrefix = storageConfig.publicUrlPrefix ?? globalS3.publicUrlPrefix;
|
|
61
61
|
if (bucket === globalS3.bucket && publicUrlPrefix === globalS3.publicUrlPrefix) {
|
|
@@ -97,16 +97,18 @@ function createDownloadUrlPlugin(options) {
|
|
|
97
97
|
const $key = $parent.get('key');
|
|
98
98
|
const $isPublic = $parent.get('is_public');
|
|
99
99
|
const $filename = $parent.get('filename');
|
|
100
|
+
const $bucketId = $parent.get('bucket_id');
|
|
100
101
|
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
101
102
|
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
102
103
|
const $combined = (0, grafast_1.object)({
|
|
103
104
|
key: $key,
|
|
104
105
|
isPublic: $isPublic,
|
|
105
106
|
filename: $filename,
|
|
107
|
+
bucketId: $bucketId,
|
|
106
108
|
withPgClient: $withPgClient,
|
|
107
109
|
pgSettings: $pgSettings,
|
|
108
110
|
});
|
|
109
|
-
return (0, grafast_1.lambda)($combined, async ({ key, isPublic, filename, withPgClient, pgSettings }) => {
|
|
111
|
+
return (0, grafast_1.lambda)($combined, async ({ key, isPublic, filename, bucketId, withPgClient, pgSettings }) => {
|
|
110
112
|
if (!key)
|
|
111
113
|
return null;
|
|
112
114
|
let s3ForDb = resolveS3(options);
|
|
@@ -124,11 +126,22 @@ function createDownloadUrlPlugin(options) {
|
|
|
124
126
|
const config = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
|
|
125
127
|
if (!config)
|
|
126
128
|
return null;
|
|
127
|
-
|
|
129
|
+
// Look up the bucket key for scoped S3 resolution
|
|
130
|
+
let bucketKey = 'public';
|
|
131
|
+
if (bucketId) {
|
|
132
|
+
const bucketResult = await pgClient.query({
|
|
133
|
+
text: `SELECT key FROM ${config.bucketsQualifiedName} WHERE id = $1 LIMIT 1`,
|
|
134
|
+
values: [bucketId],
|
|
135
|
+
});
|
|
136
|
+
if (bucketResult.rows[0]?.key) {
|
|
137
|
+
bucketKey = bucketResult.rows[0].key;
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
return { config, databaseId, bucketKey };
|
|
128
141
|
});
|
|
129
142
|
if (resolved) {
|
|
130
143
|
downloadUrlExpirySeconds = resolved.config.downloadUrlExpirySeconds;
|
|
131
|
-
s3ForDb = resolveS3ForDatabase(options, resolved.config, resolved.databaseId);
|
|
144
|
+
s3ForDb = resolveS3ForDatabase(options, resolved.config, resolved.databaseId, resolved.bucketKey);
|
|
132
145
|
}
|
|
133
146
|
}
|
|
134
147
|
}
|
|
@@ -49,10 +49,10 @@ function resolveS3(options) {
|
|
|
49
49
|
* Build a per-database S3Config by overlaying storage_module overrides
|
|
50
50
|
* onto the global S3Config. Same logic as plugin.ts resolveS3ForDatabase.
|
|
51
51
|
*/
|
|
52
|
-
function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
52
|
+
function resolveS3ForDatabase(options, storageConfig, databaseId, bucketKey) {
|
|
53
53
|
const globalS3 = resolveS3(options);
|
|
54
54
|
const bucket = options.resolveBucketName
|
|
55
|
-
? options.resolveBucketName(databaseId)
|
|
55
|
+
? options.resolveBucketName(databaseId, bucketKey)
|
|
56
56
|
: globalS3.bucket;
|
|
57
57
|
const publicUrlPrefix = storageConfig.publicUrlPrefix ?? globalS3.publicUrlPrefix;
|
|
58
58
|
if (bucket === globalS3.bucket && publicUrlPrefix === globalS3.publicUrlPrefix) {
|
|
@@ -94,16 +94,18 @@ export function createDownloadUrlPlugin(options) {
|
|
|
94
94
|
const $key = $parent.get('key');
|
|
95
95
|
const $isPublic = $parent.get('is_public');
|
|
96
96
|
const $filename = $parent.get('filename');
|
|
97
|
+
const $bucketId = $parent.get('bucket_id');
|
|
97
98
|
const $withPgClient = grafastContext().get('withPgClient');
|
|
98
99
|
const $pgSettings = grafastContext().get('pgSettings');
|
|
99
100
|
const $combined = object({
|
|
100
101
|
key: $key,
|
|
101
102
|
isPublic: $isPublic,
|
|
102
103
|
filename: $filename,
|
|
104
|
+
bucketId: $bucketId,
|
|
103
105
|
withPgClient: $withPgClient,
|
|
104
106
|
pgSettings: $pgSettings,
|
|
105
107
|
});
|
|
106
|
-
return lambda($combined, async ({ key, isPublic, filename, withPgClient, pgSettings }) => {
|
|
108
|
+
return lambda($combined, async ({ key, isPublic, filename, bucketId, withPgClient, pgSettings }) => {
|
|
107
109
|
if (!key)
|
|
108
110
|
return null;
|
|
109
111
|
let s3ForDb = resolveS3(options);
|
|
@@ -121,11 +123,22 @@ export function createDownloadUrlPlugin(options) {
|
|
|
121
123
|
const config = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
|
|
122
124
|
if (!config)
|
|
123
125
|
return null;
|
|
124
|
-
|
|
126
|
+
// Look up the bucket key for scoped S3 resolution
|
|
127
|
+
let bucketKey = 'public';
|
|
128
|
+
if (bucketId) {
|
|
129
|
+
const bucketResult = await pgClient.query({
|
|
130
|
+
text: `SELECT key FROM ${config.bucketsQualifiedName} WHERE id = $1 LIMIT 1`,
|
|
131
|
+
values: [bucketId],
|
|
132
|
+
});
|
|
133
|
+
if (bucketResult.rows[0]?.key) {
|
|
134
|
+
bucketKey = bucketResult.rows[0].key;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
return { config, databaseId, bucketKey };
|
|
125
138
|
});
|
|
126
139
|
if (resolved) {
|
|
127
140
|
downloadUrlExpirySeconds = resolved.config.downloadUrlExpirySeconds;
|
|
128
|
-
s3ForDb = resolveS3ForDatabase(options, resolved.config, resolved.databaseId);
|
|
141
|
+
s3ForDb = resolveS3ForDatabase(options, resolved.config, resolved.databaseId, resolved.bucketKey);
|
|
129
142
|
}
|
|
130
143
|
}
|
|
131
144
|
}
|
package/esm/plugin.js
CHANGED
|
@@ -74,10 +74,10 @@ function resolveS3(options) {
|
|
|
74
74
|
}
|
|
75
75
|
return options.s3;
|
|
76
76
|
}
|
|
77
|
-
function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
77
|
+
function resolveS3ForDatabase(options, storageConfig, databaseId, bucketKey) {
|
|
78
78
|
const globalS3 = resolveS3(options);
|
|
79
79
|
const bucket = options.resolveBucketName
|
|
80
|
-
? options.resolveBucketName(databaseId)
|
|
80
|
+
? options.resolveBucketName(databaseId, bucketKey)
|
|
81
81
|
: globalS3.bucket;
|
|
82
82
|
const publicUrlPrefix = storageConfig.publicUrlPrefix ?? globalS3.publicUrlPrefix;
|
|
83
83
|
if (bucket === globalS3.bucket && publicUrlPrefix === globalS3.publicUrlPrefix) {
|
|
@@ -214,7 +214,7 @@ export function createPresignedUrlPlugin(options) {
|
|
|
214
214
|
const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
|
|
215
215
|
if (!bucket)
|
|
216
216
|
throw new Error('BUCKET_NOT_FOUND');
|
|
217
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
217
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId, bucket.key);
|
|
218
218
|
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
219
219
|
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
|
|
220
220
|
contentHash: vals.contentHash,
|
|
@@ -310,7 +310,7 @@ export function createPresignedUrlPlugin(options) {
|
|
|
310
310
|
if (totalSize > storageConfig.maxBulkTotalSize) {
|
|
311
311
|
throw new Error(`BULK_UPLOAD_SIZE_EXCEEDED: ${totalSize} bytes exceeds maximum of ${storageConfig.maxBulkTotalSize} bytes per batch`);
|
|
312
312
|
}
|
|
313
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
313
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId, bucket.key);
|
|
314
314
|
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
315
315
|
const results = [];
|
|
316
316
|
for (const file of filesArray) {
|
|
@@ -419,7 +419,17 @@ export function createPresignedUrlPlugin(options) {
|
|
|
419
419
|
return;
|
|
420
420
|
}
|
|
421
421
|
// No other references — attempt sync S3 delete
|
|
422
|
-
|
|
422
|
+
// Look up the bucket key for scoped S3 resolution
|
|
423
|
+
const bucketResult = await pgClient.query({
|
|
424
|
+
text: `SELECT key FROM ${storageConfig.bucketsQualifiedName} WHERE id = $1 LIMIT 1`,
|
|
425
|
+
values: [fileRow.bucket_id],
|
|
426
|
+
});
|
|
427
|
+
const bucketKey = bucketResult.rows[0]?.key;
|
|
428
|
+
if (!bucketKey) {
|
|
429
|
+
log.warn(`Bucket not found for bucket_id=${fileRow.bucket_id}; skipping S3 delete`);
|
|
430
|
+
return;
|
|
431
|
+
}
|
|
432
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId, bucketKey);
|
|
423
433
|
await deleteS3Object(s3ForDb, fileRow.key);
|
|
424
434
|
log.info(`Sync S3 delete succeeded for key=${fileRow.key}`);
|
|
425
435
|
});
|
package/esm/types.d.ts
CHANGED
|
@@ -130,16 +130,17 @@ export interface S3Config {
|
|
|
130
130
|
*/
|
|
131
131
|
export type S3ConfigOrGetter = S3Config | (() => S3Config);
|
|
132
132
|
/**
|
|
133
|
-
* Function to derive the actual S3 bucket name for a given database.
|
|
133
|
+
* Function to derive the actual S3 bucket name for a given database and bucket key.
|
|
134
134
|
*
|
|
135
135
|
* When provided, the presigned URL plugin calls this on every request
|
|
136
|
-
* to determine which S3 bucket to use — enabling per-database
|
|
136
|
+
* to determine which S3 bucket to use — enabling per-(database, bucketKey)
|
|
137
137
|
* isolation. If not provided, falls back to `s3Config.bucket` (global).
|
|
138
138
|
*
|
|
139
139
|
* @param databaseId - The metaschema database UUID
|
|
140
|
-
* @
|
|
140
|
+
* @param bucketKey - The logical bucket key (e.g., "public", "private")
|
|
141
|
+
* @returns The S3 bucket name for this database + bucket key
|
|
141
142
|
*/
|
|
142
|
-
export type BucketNameResolver = (databaseId: string) => string;
|
|
143
|
+
export type BucketNameResolver = (databaseId: string, bucketKey: string) => string;
|
|
143
144
|
/**
|
|
144
145
|
* Callback to lazily provision an S3 bucket on first use.
|
|
145
146
|
*
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "graphile-presigned-url-plugin",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.18.0",
|
|
4
4
|
"description": "Presigned URL upload plugin for PostGraphile v5 — requestUploadUrl mutation and downloadUrl computed field",
|
|
5
5
|
"author": "Constructive <developers@constructive.io>",
|
|
6
6
|
"homepage": "https://github.com/constructive-io/constructive",
|
|
@@ -60,5 +60,5 @@
|
|
|
60
60
|
"@types/node": "^22.19.11",
|
|
61
61
|
"makage": "^0.3.0"
|
|
62
62
|
},
|
|
63
|
-
"gitHead": "
|
|
63
|
+
"gitHead": "28b0b236e65b2a2228acad4fd840543c04b24825"
|
|
64
64
|
}
|
package/plugin.js
CHANGED
|
@@ -78,10 +78,10 @@ function resolveS3(options) {
|
|
|
78
78
|
}
|
|
79
79
|
return options.s3;
|
|
80
80
|
}
|
|
81
|
-
function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
81
|
+
function resolveS3ForDatabase(options, storageConfig, databaseId, bucketKey) {
|
|
82
82
|
const globalS3 = resolveS3(options);
|
|
83
83
|
const bucket = options.resolveBucketName
|
|
84
|
-
? options.resolveBucketName(databaseId)
|
|
84
|
+
? options.resolveBucketName(databaseId, bucketKey)
|
|
85
85
|
: globalS3.bucket;
|
|
86
86
|
const publicUrlPrefix = storageConfig.publicUrlPrefix ?? globalS3.publicUrlPrefix;
|
|
87
87
|
if (bucket === globalS3.bucket && publicUrlPrefix === globalS3.publicUrlPrefix) {
|
|
@@ -218,7 +218,7 @@ function createPresignedUrlPlugin(options) {
|
|
|
218
218
|
const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
|
|
219
219
|
if (!bucket)
|
|
220
220
|
throw new Error('BUCKET_NOT_FOUND');
|
|
221
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
221
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId, bucket.key);
|
|
222
222
|
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
223
223
|
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
|
|
224
224
|
contentHash: vals.contentHash,
|
|
@@ -314,7 +314,7 @@ function createPresignedUrlPlugin(options) {
|
|
|
314
314
|
if (totalSize > storageConfig.maxBulkTotalSize) {
|
|
315
315
|
throw new Error(`BULK_UPLOAD_SIZE_EXCEEDED: ${totalSize} bytes exceeds maximum of ${storageConfig.maxBulkTotalSize} bytes per batch`);
|
|
316
316
|
}
|
|
317
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
317
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId, bucket.key);
|
|
318
318
|
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
319
319
|
const results = [];
|
|
320
320
|
for (const file of filesArray) {
|
|
@@ -423,7 +423,17 @@ function createPresignedUrlPlugin(options) {
|
|
|
423
423
|
return;
|
|
424
424
|
}
|
|
425
425
|
// No other references — attempt sync S3 delete
|
|
426
|
-
|
|
426
|
+
// Look up the bucket key for scoped S3 resolution
|
|
427
|
+
const bucketResult = await pgClient.query({
|
|
428
|
+
text: `SELECT key FROM ${storageConfig.bucketsQualifiedName} WHERE id = $1 LIMIT 1`,
|
|
429
|
+
values: [fileRow.bucket_id],
|
|
430
|
+
});
|
|
431
|
+
const bucketKey = bucketResult.rows[0]?.key;
|
|
432
|
+
if (!bucketKey) {
|
|
433
|
+
log.warn(`Bucket not found for bucket_id=${fileRow.bucket_id}; skipping S3 delete`);
|
|
434
|
+
return;
|
|
435
|
+
}
|
|
436
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId, bucketKey);
|
|
427
437
|
await (0, s3_signer_1.deleteS3Object)(s3ForDb, fileRow.key);
|
|
428
438
|
log.info(`Sync S3 delete succeeded for key=${fileRow.key}`);
|
|
429
439
|
});
|
package/types.d.ts
CHANGED
|
@@ -130,16 +130,17 @@ export interface S3Config {
|
|
|
130
130
|
*/
|
|
131
131
|
export type S3ConfigOrGetter = S3Config | (() => S3Config);
|
|
132
132
|
/**
|
|
133
|
-
* Function to derive the actual S3 bucket name for a given database.
|
|
133
|
+
* Function to derive the actual S3 bucket name for a given database and bucket key.
|
|
134
134
|
*
|
|
135
135
|
* When provided, the presigned URL plugin calls this on every request
|
|
136
|
-
* to determine which S3 bucket to use — enabling per-database
|
|
136
|
+
* to determine which S3 bucket to use — enabling per-(database, bucketKey)
|
|
137
137
|
* isolation. If not provided, falls back to `s3Config.bucket` (global).
|
|
138
138
|
*
|
|
139
139
|
* @param databaseId - The metaschema database UUID
|
|
140
|
-
* @
|
|
140
|
+
* @param bucketKey - The logical bucket key (e.g., "public", "private")
|
|
141
|
+
* @returns The S3 bucket name for this database + bucket key
|
|
141
142
|
*/
|
|
142
|
-
export type BucketNameResolver = (databaseId: string) => string;
|
|
143
|
+
export type BucketNameResolver = (databaseId: string, bucketKey: string) => string;
|
|
143
144
|
/**
|
|
144
145
|
* Callback to lazily provision an S3 bucket on first use.
|
|
145
146
|
*
|