graphile-presigned-url-plugin 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +23 -0
- package/README.md +93 -0
- package/download-url-field.d.ts +27 -0
- package/download-url-field.js +104 -0
- package/esm/download-url-field.d.ts +27 -0
- package/esm/download-url-field.js +101 -0
- package/esm/index.d.ts +34 -0
- package/esm/index.js +33 -0
- package/esm/plugin.d.ts +23 -0
- package/esm/plugin.js +339 -0
- package/esm/preset.d.ts +34 -0
- package/esm/preset.js +41 -0
- package/esm/s3-signer.d.ts +44 -0
- package/esm/s3-signer.js +87 -0
- package/esm/storage-module-cache.d.ts +40 -0
- package/esm/storage-module-cache.js +180 -0
- package/esm/types.d.ts +116 -0
- package/esm/types.js +1 -0
- package/index.d.ts +34 -0
- package/index.js +47 -0
- package/package.json +62 -0
- package/plugin.d.ts +23 -0
- package/plugin.js +343 -0
- package/preset.d.ts +34 -0
- package/preset.js +44 -0
- package/s3-signer.d.ts +44 -0
- package/s3-signer.js +92 -0
- package/storage-module-cache.d.ts +40 -0
- package/storage-module-cache.js +186 -0
- package/types.d.ts +116 -0
- package/types.js +2 -0
package/plugin.js
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Presigned URL Plugin for PostGraphile v5
|
|
4
|
+
*
|
|
5
|
+
* Adds presigned URL upload support to PostGraphile v5:
|
|
6
|
+
*
|
|
7
|
+
* 1. `requestUploadUrl` mutation — generates a presigned PUT URL for direct
|
|
8
|
+
* client-to-S3 upload. Checks bucket access via RLS, deduplicates by
|
|
9
|
+
* content hash, tracks the request in upload_requests.
|
|
10
|
+
*
|
|
11
|
+
* 2. `confirmUpload` mutation — confirms a file was uploaded to S3, verifies
|
|
12
|
+
* the object exists with correct content-type, transitions file status
|
|
13
|
+
* from 'pending' to 'ready'.
|
|
14
|
+
*
|
|
15
|
+
* 3. `downloadUrl` computed field on File types — generates presigned GET URLs
|
|
16
|
+
* for private files, returns public URL prefix + key for public files.
|
|
17
|
+
*
|
|
18
|
+
* Uses the extendSchema + grafast plan pattern (same as PublicKeySignature).
|
|
19
|
+
*/
|
|
20
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
21
|
+
exports.PresignedUrlPlugin = void 0;
|
|
22
|
+
exports.createPresignedUrlPlugin = createPresignedUrlPlugin;
|
|
23
|
+
const grafast_1 = require("grafast");
|
|
24
|
+
const graphile_utils_1 = require("graphile-utils");
|
|
25
|
+
const logger_1 = require("@pgpmjs/logger");
|
|
26
|
+
const storage_module_cache_1 = require("./storage-module-cache");
|
|
27
|
+
const s3_signer_1 = require("./s3-signer");
|
|
28
|
+
const log = new logger_1.Logger('graphile-presigned-url:plugin');
|
|
29
|
+
// --- Protocol-level constants (not configurable) ---
|
|
30
|
+
const MAX_CONTENT_HASH_LENGTH = 128;
|
|
31
|
+
const MAX_CONTENT_TYPE_LENGTH = 255;
|
|
32
|
+
const MAX_BUCKET_KEY_LENGTH = 255;
|
|
33
|
+
const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
|
|
34
|
+
// --- Helpers ---
|
|
35
|
+
/**
|
|
36
|
+
* Validate a SHA-256 hex string.
|
|
37
|
+
*/
|
|
38
|
+
function isValidSha256(hash) {
|
|
39
|
+
return SHA256_HEX_REGEX.test(hash);
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Build the S3 key from content hash and content type extension.
|
|
43
|
+
* Format: {contentHash} (flat namespace, content-addressed)
|
|
44
|
+
*/
|
|
45
|
+
function buildS3Key(contentHash) {
|
|
46
|
+
return contentHash;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Resolve the database_id from the JWT context.
|
|
50
|
+
* The server middleware sets jwt.claims.database_id, which is accessible
|
|
51
|
+
* via jwt_private.current_database_id() — a simple function call, no
|
|
52
|
+
* metaschema query needed.
|
|
53
|
+
*/
|
|
54
|
+
async function resolveDatabaseId(pgClient) {
|
|
55
|
+
const result = await pgClient.query(`SELECT jwt_private.current_database_id() AS id`);
|
|
56
|
+
return result.rows[0]?.id ?? null;
|
|
57
|
+
}
|
|
58
|
+
// --- Plugin factory ---
|
|
59
|
+
function createPresignedUrlPlugin(options) {
|
|
60
|
+
const { s3 } = options;
|
|
61
|
+
return (0, graphile_utils_1.extendSchema)(() => ({
|
|
62
|
+
typeDefs: (0, graphile_utils_1.gql) `
|
|
63
|
+
input RequestUploadUrlInput {
|
|
64
|
+
"""Bucket key (e.g., "public", "private")"""
|
|
65
|
+
bucketKey: String!
|
|
66
|
+
"""SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
|
|
67
|
+
contentHash: String!
|
|
68
|
+
"""MIME type of the file (e.g., "image/png")"""
|
|
69
|
+
contentType: String!
|
|
70
|
+
"""File size in bytes"""
|
|
71
|
+
size: Int!
|
|
72
|
+
"""Original filename (optional, for display and Content-Disposition)"""
|
|
73
|
+
filename: String
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
type RequestUploadUrlPayload {
|
|
77
|
+
"""Presigned PUT URL (null if file was deduplicated)"""
|
|
78
|
+
uploadUrl: String
|
|
79
|
+
"""The file ID (existing if deduplicated, new if fresh upload)"""
|
|
80
|
+
fileId: UUID!
|
|
81
|
+
"""The S3 object key"""
|
|
82
|
+
key: String!
|
|
83
|
+
"""Whether this file was deduplicated (already exists with same hash)"""
|
|
84
|
+
deduplicated: Boolean!
|
|
85
|
+
"""Presigned URL expiry time (null if deduplicated)"""
|
|
86
|
+
expiresAt: Datetime
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
input ConfirmUploadInput {
|
|
90
|
+
"""The file ID returned by requestUploadUrl"""
|
|
91
|
+
fileId: UUID!
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
type ConfirmUploadPayload {
|
|
95
|
+
"""The confirmed file ID"""
|
|
96
|
+
fileId: UUID!
|
|
97
|
+
"""New file status"""
|
|
98
|
+
status: String!
|
|
99
|
+
"""Whether confirmation succeeded"""
|
|
100
|
+
success: Boolean!
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
extend type Mutation {
|
|
104
|
+
"""
|
|
105
|
+
Request a presigned URL for uploading a file directly to S3.
|
|
106
|
+
Client computes SHA-256 of the file content and provides it here.
|
|
107
|
+
If a file with the same hash already exists (dedup), returns the
|
|
108
|
+
existing file ID and deduplicated=true with no uploadUrl.
|
|
109
|
+
"""
|
|
110
|
+
requestUploadUrl(
|
|
111
|
+
input: RequestUploadUrlInput!
|
|
112
|
+
): RequestUploadUrlPayload
|
|
113
|
+
|
|
114
|
+
"""
|
|
115
|
+
Confirm that a file has been uploaded to S3.
|
|
116
|
+
Verifies the object exists in S3, checks content-type,
|
|
117
|
+
and transitions the file status from 'pending' to 'ready'.
|
|
118
|
+
"""
|
|
119
|
+
confirmUpload(
|
|
120
|
+
input: ConfirmUploadInput!
|
|
121
|
+
): ConfirmUploadPayload
|
|
122
|
+
}
|
|
123
|
+
`,
|
|
124
|
+
plans: {
|
|
125
|
+
Mutation: {
|
|
126
|
+
requestUploadUrl(_$mutation, fieldArgs) {
|
|
127
|
+
const $input = fieldArgs.getRaw('input');
|
|
128
|
+
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
129
|
+
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
130
|
+
const $combined = (0, grafast_1.object)({
|
|
131
|
+
input: $input,
|
|
132
|
+
withPgClient: $withPgClient,
|
|
133
|
+
pgSettings: $pgSettings,
|
|
134
|
+
});
|
|
135
|
+
return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
|
|
136
|
+
// --- Input validation ---
|
|
137
|
+
const { bucketKey, contentHash, contentType, size, filename } = input;
|
|
138
|
+
if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
|
|
139
|
+
throw new Error('INVALID_BUCKET_KEY');
|
|
140
|
+
}
|
|
141
|
+
if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
|
|
142
|
+
throw new Error('INVALID_CONTENT_HASH');
|
|
143
|
+
}
|
|
144
|
+
if (!isValidSha256(contentHash)) {
|
|
145
|
+
throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
|
|
146
|
+
}
|
|
147
|
+
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
148
|
+
throw new Error('INVALID_CONTENT_TYPE');
|
|
149
|
+
}
|
|
150
|
+
return withPgClient(pgSettings, async (pgClient) => {
|
|
151
|
+
await pgClient.query('BEGIN');
|
|
152
|
+
try {
|
|
153
|
+
// --- Resolve storage module config (all limits come from here) ---
|
|
154
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
155
|
+
if (!databaseId) {
|
|
156
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
157
|
+
}
|
|
158
|
+
const storageConfig = await (0, storage_module_cache_1.getStorageModuleConfig)(pgClient, databaseId);
|
|
159
|
+
if (!storageConfig) {
|
|
160
|
+
throw new Error('STORAGE_MODULE_NOT_PROVISIONED');
|
|
161
|
+
}
|
|
162
|
+
// --- Validate size against storage module default (bucket override checked below) ---
|
|
163
|
+
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
164
|
+
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
165
|
+
}
|
|
166
|
+
if (filename !== undefined && filename !== null) {
|
|
167
|
+
if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
|
|
168
|
+
throw new Error('INVALID_FILENAME');
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
// --- Look up the bucket (cached; first miss queries via RLS) ---
|
|
172
|
+
const bucket = await (0, storage_module_cache_1.getBucketConfig)(pgClient, storageConfig, databaseId, bucketKey);
|
|
173
|
+
if (!bucket) {
|
|
174
|
+
throw new Error('BUCKET_NOT_FOUND');
|
|
175
|
+
}
|
|
176
|
+
// --- Validate content type against bucket's allowed_mime_types ---
|
|
177
|
+
if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
|
|
178
|
+
const allowed = bucket.allowed_mime_types;
|
|
179
|
+
const isAllowed = allowed.some((pattern) => {
|
|
180
|
+
if (pattern === '*/*')
|
|
181
|
+
return true;
|
|
182
|
+
if (pattern.endsWith('/*')) {
|
|
183
|
+
const prefix = pattern.slice(0, -1);
|
|
184
|
+
return contentType.startsWith(prefix);
|
|
185
|
+
}
|
|
186
|
+
return contentType === pattern;
|
|
187
|
+
});
|
|
188
|
+
if (!isAllowed) {
|
|
189
|
+
throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
// --- Validate size against bucket's max_file_size ---
|
|
193
|
+
if (bucket.max_file_size && size > bucket.max_file_size) {
|
|
194
|
+
throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
|
|
195
|
+
}
|
|
196
|
+
const s3Key = buildS3Key(contentHash);
|
|
197
|
+
// --- Dedup check: look for existing file with same content_hash in this bucket ---
|
|
198
|
+
const dedupResult = await pgClient.query(`SELECT id, status
|
|
199
|
+
FROM ${storageConfig.filesQualifiedName}
|
|
200
|
+
WHERE content_hash = $1
|
|
201
|
+
AND bucket_id = $2
|
|
202
|
+
AND status IN ('ready', 'processed')
|
|
203
|
+
LIMIT 1`, [contentHash, bucket.id]);
|
|
204
|
+
if (dedupResult.rows.length > 0) {
|
|
205
|
+
const existingFile = dedupResult.rows[0];
|
|
206
|
+
log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
|
|
207
|
+
// Track the dedup request
|
|
208
|
+
await pgClient.query(`INSERT INTO ${storageConfig.uploadRequestsQualifiedName}
|
|
209
|
+
(file_id, bucket_id, key, content_type, content_hash, size, status, expires_at)
|
|
210
|
+
VALUES ($1, $2, $3, $4, $5, $6, 'confirmed', NOW())`, [existingFile.id, bucket.id, s3Key, contentType, contentHash, size]);
|
|
211
|
+
await pgClient.query('COMMIT');
|
|
212
|
+
return {
|
|
213
|
+
uploadUrl: null,
|
|
214
|
+
fileId: existingFile.id,
|
|
215
|
+
key: s3Key,
|
|
216
|
+
deduplicated: true,
|
|
217
|
+
expiresAt: null,
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
// --- Create file record (status=pending) ---
|
|
221
|
+
const fileResult = await pgClient.query(`INSERT INTO ${storageConfig.filesQualifiedName}
|
|
222
|
+
(bucket_id, key, content_type, content_hash, size, filename, owner_id, is_public, status)
|
|
223
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'pending')
|
|
224
|
+
RETURNING id`, [
|
|
225
|
+
bucket.id,
|
|
226
|
+
s3Key,
|
|
227
|
+
contentType,
|
|
228
|
+
contentHash,
|
|
229
|
+
size,
|
|
230
|
+
filename || null,
|
|
231
|
+
bucket.owner_id,
|
|
232
|
+
bucket.is_public,
|
|
233
|
+
]);
|
|
234
|
+
const fileId = fileResult.rows[0].id;
|
|
235
|
+
// --- Generate presigned PUT URL ---
|
|
236
|
+
const uploadUrl = await (0, s3_signer_1.generatePresignedPutUrl)(s3, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
|
|
237
|
+
const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
|
|
238
|
+
// --- Track the upload request ---
|
|
239
|
+
await pgClient.query(`INSERT INTO ${storageConfig.uploadRequestsQualifiedName}
|
|
240
|
+
(file_id, bucket_id, key, content_type, content_hash, size, status, expires_at)
|
|
241
|
+
VALUES ($1, $2, $3, $4, $5, $6, 'issued', $7)`, [fileId, bucket.id, s3Key, contentType, contentHash, size, expiresAt]);
|
|
242
|
+
await pgClient.query('COMMIT');
|
|
243
|
+
return {
|
|
244
|
+
uploadUrl,
|
|
245
|
+
fileId,
|
|
246
|
+
key: s3Key,
|
|
247
|
+
deduplicated: false,
|
|
248
|
+
expiresAt,
|
|
249
|
+
};
|
|
250
|
+
}
|
|
251
|
+
catch (err) {
|
|
252
|
+
await pgClient.query('ROLLBACK');
|
|
253
|
+
throw err;
|
|
254
|
+
}
|
|
255
|
+
});
|
|
256
|
+
});
|
|
257
|
+
},
|
|
258
|
+
confirmUpload(_$mutation, fieldArgs) {
|
|
259
|
+
const $input = fieldArgs.getRaw('input');
|
|
260
|
+
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
261
|
+
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
262
|
+
const $combined = (0, grafast_1.object)({
|
|
263
|
+
input: $input,
|
|
264
|
+
withPgClient: $withPgClient,
|
|
265
|
+
pgSettings: $pgSettings,
|
|
266
|
+
});
|
|
267
|
+
return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
|
|
268
|
+
const { fileId } = input;
|
|
269
|
+
if (!fileId || typeof fileId !== 'string') {
|
|
270
|
+
throw new Error('INVALID_FILE_ID');
|
|
271
|
+
}
|
|
272
|
+
return withPgClient(pgSettings, async (pgClient) => {
|
|
273
|
+
await pgClient.query('BEGIN');
|
|
274
|
+
try {
|
|
275
|
+
// --- Resolve storage module config ---
|
|
276
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
277
|
+
if (!databaseId) {
|
|
278
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
279
|
+
}
|
|
280
|
+
const storageConfig = await (0, storage_module_cache_1.getStorageModuleConfig)(pgClient, databaseId);
|
|
281
|
+
if (!storageConfig) {
|
|
282
|
+
throw new Error('STORAGE_MODULE_NOT_PROVISIONED');
|
|
283
|
+
}
|
|
284
|
+
// --- Look up the file (RLS enforced) ---
|
|
285
|
+
const fileResult = await pgClient.query(`SELECT id, key, content_type, status, bucket_id
|
|
286
|
+
FROM ${storageConfig.filesQualifiedName}
|
|
287
|
+
WHERE id = $1
|
|
288
|
+
LIMIT 1`, [fileId]);
|
|
289
|
+
if (fileResult.rows.length === 0) {
|
|
290
|
+
throw new Error('FILE_NOT_FOUND');
|
|
291
|
+
}
|
|
292
|
+
const file = fileResult.rows[0];
|
|
293
|
+
if (file.status !== 'pending') {
|
|
294
|
+
// File is already confirmed or processed — idempotent success
|
|
295
|
+
await pgClient.query('COMMIT');
|
|
296
|
+
return {
|
|
297
|
+
fileId: file.id,
|
|
298
|
+
status: file.status,
|
|
299
|
+
success: true,
|
|
300
|
+
};
|
|
301
|
+
}
|
|
302
|
+
// --- Verify file exists in S3 ---
|
|
303
|
+
const s3Head = await (0, s3_signer_1.headObject)(s3, file.key, file.content_type);
|
|
304
|
+
if (!s3Head) {
|
|
305
|
+
throw new Error('FILE_NOT_IN_S3: the file has not been uploaded yet');
|
|
306
|
+
}
|
|
307
|
+
// --- Content-type verification ---
|
|
308
|
+
if (s3Head.contentType && s3Head.contentType !== file.content_type) {
|
|
309
|
+
// Mark upload_request as rejected
|
|
310
|
+
await pgClient.query(`UPDATE ${storageConfig.uploadRequestsQualifiedName}
|
|
311
|
+
SET status = 'rejected'
|
|
312
|
+
WHERE file_id = $1 AND status = 'issued'`, [fileId]);
|
|
313
|
+
await pgClient.query('COMMIT');
|
|
314
|
+
throw new Error(`CONTENT_TYPE_MISMATCH: expected ${file.content_type}, got ${s3Head.contentType}`);
|
|
315
|
+
}
|
|
316
|
+
// --- Transition file to 'ready' ---
|
|
317
|
+
await pgClient.query(`UPDATE ${storageConfig.filesQualifiedName}
|
|
318
|
+
SET status = 'ready'
|
|
319
|
+
WHERE id = $1`, [fileId]);
|
|
320
|
+
// --- Update upload_request to 'confirmed' ---
|
|
321
|
+
await pgClient.query(`UPDATE ${storageConfig.uploadRequestsQualifiedName}
|
|
322
|
+
SET status = 'confirmed', confirmed_at = NOW()
|
|
323
|
+
WHERE file_id = $1 AND status = 'issued'`, [fileId]);
|
|
324
|
+
await pgClient.query('COMMIT');
|
|
325
|
+
return {
|
|
326
|
+
fileId: file.id,
|
|
327
|
+
status: 'ready',
|
|
328
|
+
success: true,
|
|
329
|
+
};
|
|
330
|
+
}
|
|
331
|
+
catch (err) {
|
|
332
|
+
await pgClient.query('ROLLBACK');
|
|
333
|
+
throw err;
|
|
334
|
+
}
|
|
335
|
+
});
|
|
336
|
+
});
|
|
337
|
+
},
|
|
338
|
+
},
|
|
339
|
+
},
|
|
340
|
+
}));
|
|
341
|
+
}
|
|
342
|
+
exports.PresignedUrlPlugin = createPresignedUrlPlugin;
|
|
343
|
+
exports.default = exports.PresignedUrlPlugin;
|
package/preset.d.ts
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PostGraphile v5 Presigned URL Preset
|
|
3
|
+
*
|
|
4
|
+
* Provides a convenient preset for including presigned URL upload support
|
|
5
|
+
* in PostGraphile. Combines the main mutation plugin (requestUploadUrl,
|
|
6
|
+
* confirmUpload) with the downloadUrl computed field plugin.
|
|
7
|
+
*/
|
|
8
|
+
import type { GraphileConfig } from 'graphile-config';
|
|
9
|
+
import type { PresignedUrlPluginOptions } from './types';
|
|
10
|
+
/**
|
|
11
|
+
* Creates a preset that includes the presigned URL plugins with the given options.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```typescript
|
|
15
|
+
* import { PresignedUrlPreset } from 'graphile-presigned-url-plugin';
|
|
16
|
+
* import { S3Client } from '@aws-sdk/client-s3';
|
|
17
|
+
*
|
|
18
|
+
* const s3Client = new S3Client({ region: 'us-east-1' });
|
|
19
|
+
*
|
|
20
|
+
* const preset = {
|
|
21
|
+
* extends: [
|
|
22
|
+
* PresignedUrlPreset({
|
|
23
|
+
* s3: {
|
|
24
|
+
* client: s3Client,
|
|
25
|
+
* bucket: 'my-bucket',
|
|
26
|
+
* publicUrlPrefix: 'https://cdn.example.com',
|
|
27
|
+
* },
|
|
28
|
+
* }),
|
|
29
|
+
* ],
|
|
30
|
+
* };
|
|
31
|
+
* ```
|
|
32
|
+
*/
|
|
33
|
+
export declare function PresignedUrlPreset(options: PresignedUrlPluginOptions): GraphileConfig.Preset;
|
|
34
|
+
export default PresignedUrlPreset;
|
package/preset.js
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* PostGraphile v5 Presigned URL Preset
|
|
4
|
+
*
|
|
5
|
+
* Provides a convenient preset for including presigned URL upload support
|
|
6
|
+
* in PostGraphile. Combines the main mutation plugin (requestUploadUrl,
|
|
7
|
+
* confirmUpload) with the downloadUrl computed field plugin.
|
|
8
|
+
*/
|
|
9
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
10
|
+
exports.PresignedUrlPreset = PresignedUrlPreset;
|
|
11
|
+
const plugin_1 = require("./plugin");
|
|
12
|
+
const download_url_field_1 = require("./download-url-field");
|
|
13
|
+
/**
|
|
14
|
+
* Creates a preset that includes the presigned URL plugins with the given options.
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* ```typescript
|
|
18
|
+
* import { PresignedUrlPreset } from 'graphile-presigned-url-plugin';
|
|
19
|
+
* import { S3Client } from '@aws-sdk/client-s3';
|
|
20
|
+
*
|
|
21
|
+
* const s3Client = new S3Client({ region: 'us-east-1' });
|
|
22
|
+
*
|
|
23
|
+
* const preset = {
|
|
24
|
+
* extends: [
|
|
25
|
+
* PresignedUrlPreset({
|
|
26
|
+
* s3: {
|
|
27
|
+
* client: s3Client,
|
|
28
|
+
* bucket: 'my-bucket',
|
|
29
|
+
* publicUrlPrefix: 'https://cdn.example.com',
|
|
30
|
+
* },
|
|
31
|
+
* }),
|
|
32
|
+
* ],
|
|
33
|
+
* };
|
|
34
|
+
* ```
|
|
35
|
+
*/
|
|
36
|
+
function PresignedUrlPreset(options) {
|
|
37
|
+
return {
|
|
38
|
+
plugins: [
|
|
39
|
+
(0, plugin_1.createPresignedUrlPlugin)(options),
|
|
40
|
+
(0, download_url_field_1.createDownloadUrlPlugin)(options),
|
|
41
|
+
],
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
exports.default = PresignedUrlPreset;
|
package/s3-signer.d.ts
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import type { S3Config } from './types';
|
|
2
|
+
/**
|
|
3
|
+
* Generate a presigned PUT URL for uploading a file to S3.
|
|
4
|
+
*
|
|
5
|
+
* The presigned URL is locked to the specific key, content-type, and
|
|
6
|
+
* content-length via the signature. The client MUST use these exact values
|
|
7
|
+
* when performing the PUT request.
|
|
8
|
+
*
|
|
9
|
+
* @param s3Config - S3 client and bucket configuration
|
|
10
|
+
* @param key - S3 object key (content hash or UUID)
|
|
11
|
+
* @param contentType - MIME type (locked into the signature)
|
|
12
|
+
* @param contentLength - File size in bytes (locked into the signature)
|
|
13
|
+
* @param expiresIn - URL expiry in seconds (default: 900 = 15 minutes)
|
|
14
|
+
* @returns Presigned PUT URL
|
|
15
|
+
*/
|
|
16
|
+
export declare function generatePresignedPutUrl(s3Config: S3Config, key: string, contentType: string, contentLength: number, expiresIn?: number): Promise<string>;
|
|
17
|
+
/**
|
|
18
|
+
* Generate a presigned GET URL for downloading a file from S3.
|
|
19
|
+
*
|
|
20
|
+
* Used for private files that shouldn't be served through a public CDN.
|
|
21
|
+
* For public files, the downloadUrl field returns the public URL prefix + key.
|
|
22
|
+
*
|
|
23
|
+
* @param s3Config - S3 client and bucket configuration
|
|
24
|
+
* @param key - S3 object key
|
|
25
|
+
* @param expiresIn - URL expiry in seconds (default: 3600 = 1 hour)
|
|
26
|
+
* @param filename - Optional filename for Content-Disposition header
|
|
27
|
+
* @returns Presigned GET URL
|
|
28
|
+
*/
|
|
29
|
+
export declare function generatePresignedGetUrl(s3Config: S3Config, key: string, expiresIn?: number, filename?: string): Promise<string>;
|
|
30
|
+
/**
|
|
31
|
+
* Check if an object exists in S3 and optionally verify its content-type.
|
|
32
|
+
*
|
|
33
|
+
* Used by confirmUpload to verify the file was actually uploaded to S3
|
|
34
|
+
* and that the content-type matches what was declared.
|
|
35
|
+
*
|
|
36
|
+
* @param s3Config - S3 client and bucket configuration
|
|
37
|
+
* @param key - S3 object key
|
|
38
|
+
* @param expectedContentType - Expected MIME type (optional)
|
|
39
|
+
* @returns Object metadata if exists, null if not found
|
|
40
|
+
*/
|
|
41
|
+
export declare function headObject(s3Config: S3Config, key: string, expectedContentType?: string): Promise<{
|
|
42
|
+
contentType?: string;
|
|
43
|
+
contentLength?: number;
|
|
44
|
+
} | null>;
|
package/s3-signer.js
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.generatePresignedPutUrl = generatePresignedPutUrl;
|
|
4
|
+
exports.generatePresignedGetUrl = generatePresignedGetUrl;
|
|
5
|
+
exports.headObject = headObject;
|
|
6
|
+
const client_s3_1 = require("@aws-sdk/client-s3");
|
|
7
|
+
const s3_request_presigner_1 = require("@aws-sdk/s3-request-presigner");
|
|
8
|
+
const logger_1 = require("@pgpmjs/logger");
|
|
9
|
+
const log = new logger_1.Logger('graphile-presigned-url:s3');
|
|
10
|
+
/**
|
|
11
|
+
* Generate a presigned PUT URL for uploading a file to S3.
|
|
12
|
+
*
|
|
13
|
+
* The presigned URL is locked to the specific key, content-type, and
|
|
14
|
+
* content-length via the signature. The client MUST use these exact values
|
|
15
|
+
* when performing the PUT request.
|
|
16
|
+
*
|
|
17
|
+
* @param s3Config - S3 client and bucket configuration
|
|
18
|
+
* @param key - S3 object key (content hash or UUID)
|
|
19
|
+
* @param contentType - MIME type (locked into the signature)
|
|
20
|
+
* @param contentLength - File size in bytes (locked into the signature)
|
|
21
|
+
* @param expiresIn - URL expiry in seconds (default: 900 = 15 minutes)
|
|
22
|
+
* @returns Presigned PUT URL
|
|
23
|
+
*/
|
|
24
|
+
async function generatePresignedPutUrl(s3Config, key, contentType, contentLength, expiresIn = 900) {
|
|
25
|
+
const command = new client_s3_1.PutObjectCommand({
|
|
26
|
+
Bucket: s3Config.bucket,
|
|
27
|
+
Key: key,
|
|
28
|
+
ContentType: contentType,
|
|
29
|
+
ContentLength: contentLength,
|
|
30
|
+
});
|
|
31
|
+
const url = await (0, s3_request_presigner_1.getSignedUrl)(s3Config.client, command, { expiresIn });
|
|
32
|
+
log.debug(`Generated presigned PUT URL for key=${key}, contentType=${contentType}, expires=${expiresIn}s`);
|
|
33
|
+
return url;
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Generate a presigned GET URL for downloading a file from S3.
|
|
37
|
+
*
|
|
38
|
+
* Used for private files that shouldn't be served through a public CDN.
|
|
39
|
+
* For public files, the downloadUrl field returns the public URL prefix + key.
|
|
40
|
+
*
|
|
41
|
+
* @param s3Config - S3 client and bucket configuration
|
|
42
|
+
* @param key - S3 object key
|
|
43
|
+
* @param expiresIn - URL expiry in seconds (default: 3600 = 1 hour)
|
|
44
|
+
* @param filename - Optional filename for Content-Disposition header
|
|
45
|
+
* @returns Presigned GET URL
|
|
46
|
+
*/
|
|
47
|
+
async function generatePresignedGetUrl(s3Config, key, expiresIn = 3600, filename) {
|
|
48
|
+
const params = {
|
|
49
|
+
Bucket: s3Config.bucket,
|
|
50
|
+
Key: key,
|
|
51
|
+
};
|
|
52
|
+
if (filename) {
|
|
53
|
+
const sanitized = filename.replace(/["\\\r\n]/g, '_');
|
|
54
|
+
params.ResponseContentDisposition = `attachment; filename="${sanitized}"`;
|
|
55
|
+
}
|
|
56
|
+
const command = new client_s3_1.GetObjectCommand(params);
|
|
57
|
+
const url = await (0, s3_request_presigner_1.getSignedUrl)(s3Config.client, command, { expiresIn });
|
|
58
|
+
log.debug(`Generated presigned GET URL for key=${key}, expires=${expiresIn}s`);
|
|
59
|
+
return url;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Check if an object exists in S3 and optionally verify its content-type.
|
|
63
|
+
*
|
|
64
|
+
* Used by confirmUpload to verify the file was actually uploaded to S3
|
|
65
|
+
* and that the content-type matches what was declared.
|
|
66
|
+
*
|
|
67
|
+
* @param s3Config - S3 client and bucket configuration
|
|
68
|
+
* @param key - S3 object key
|
|
69
|
+
* @param expectedContentType - Expected MIME type (optional)
|
|
70
|
+
* @returns Object metadata if exists, null if not found
|
|
71
|
+
*/
|
|
72
|
+
async function headObject(s3Config, key, expectedContentType) {
|
|
73
|
+
try {
|
|
74
|
+
const response = await s3Config.client.send(new client_s3_1.HeadObjectCommand({
|
|
75
|
+
Bucket: s3Config.bucket,
|
|
76
|
+
Key: key,
|
|
77
|
+
}));
|
|
78
|
+
if (expectedContentType && response.ContentType !== expectedContentType) {
|
|
79
|
+
log.warn(`Content-type mismatch for key=${key}: expected=${expectedContentType}, actual=${response.ContentType}`);
|
|
80
|
+
}
|
|
81
|
+
return {
|
|
82
|
+
contentType: response.ContentType,
|
|
83
|
+
contentLength: response.ContentLength,
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
catch (e) {
|
|
87
|
+
if (e.name === 'NotFound' || e.$metadata?.httpStatusCode === 404) {
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
throw e;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import type { StorageModuleConfig, BucketConfig } from './types';
|
|
2
|
+
/**
|
|
3
|
+
* Resolve the storage module config for a database, using the LRU cache.
|
|
4
|
+
*
|
|
5
|
+
* @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
|
|
6
|
+
* @param databaseId - The metaschema database UUID
|
|
7
|
+
* @returns StorageModuleConfig or null if no storage module is provisioned
|
|
8
|
+
*/
|
|
9
|
+
export declare function getStorageModuleConfig(pgClient: {
|
|
10
|
+
query: (sql: string, params: unknown[]) => Promise<{
|
|
11
|
+
rows: unknown[];
|
|
12
|
+
}>;
|
|
13
|
+
}, databaseId: string): Promise<StorageModuleConfig | null>;
|
|
14
|
+
/**
|
|
15
|
+
* Resolve bucket metadata for a given database + bucket key, using the LRU cache.
|
|
16
|
+
*
|
|
17
|
+
* On cache miss, queries the bucket table (RLS-enforced via pgSettings on
|
|
18
|
+
* the pgClient). On cache hit, returns the cached metadata directly.
|
|
19
|
+
*
|
|
20
|
+
* @param pgClient - A pg client from the Graphile context
|
|
21
|
+
* @param storageConfig - The resolved StorageModuleConfig for this database
|
|
22
|
+
* @param databaseId - The metaschema database UUID (used as cache key prefix)
|
|
23
|
+
* @param bucketKey - The bucket key (e.g., "public", "private")
|
|
24
|
+
* @returns BucketConfig or null if the bucket doesn't exist / isn't accessible
|
|
25
|
+
*/
|
|
26
|
+
export declare function getBucketConfig(pgClient: {
|
|
27
|
+
query: (sql: string, params: unknown[]) => Promise<{
|
|
28
|
+
rows: unknown[];
|
|
29
|
+
}>;
|
|
30
|
+
}, storageConfig: StorageModuleConfig, databaseId: string, bucketKey: string): Promise<BucketConfig | null>;
|
|
31
|
+
/**
|
|
32
|
+
* Clear the storage module cache AND bucket cache.
|
|
33
|
+
* Useful for testing or schema changes.
|
|
34
|
+
*/
|
|
35
|
+
export declare function clearStorageModuleCache(): void;
|
|
36
|
+
/**
|
|
37
|
+
* Clear cached bucket entries for a specific database.
|
|
38
|
+
* Useful when bucket config changes are detected.
|
|
39
|
+
*/
|
|
40
|
+
export declare function clearBucketCache(databaseId?: string): void;
|