graphile-presigned-url-plugin 0.7.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -2
- package/download-url-field.js +1 -7
- package/esm/download-url-field.js +1 -7
- package/esm/index.d.ts +2 -3
- package/esm/index.js +1 -2
- package/esm/plugin.d.ts +2 -6
- package/esm/plugin.js +338 -220
- package/esm/preset.d.ts +2 -2
- package/esm/preset.js +2 -2
- package/esm/s3-signer.d.ts +1 -2
- package/esm/s3-signer.js +1 -2
- package/esm/storage-module-cache.d.ts +0 -2
- package/esm/storage-module-cache.js +17 -16
- package/esm/types.d.ts +16 -24
- package/index.d.ts +2 -3
- package/index.js +1 -2
- package/package.json +3 -3
- package/plugin.d.ts +2 -6
- package/plugin.js +336 -218
- package/preset.d.ts +2 -2
- package/preset.js +2 -2
- package/s3-signer.d.ts +1 -2
- package/s3-signer.js +1 -2
- package/storage-module-cache.d.ts +0 -2
- package/storage-module-cache.js +17 -16
- package/types.d.ts +16 -24
package/esm/plugin.js
CHANGED
|
@@ -5,13 +5,9 @@
|
|
|
5
5
|
*
|
|
6
6
|
* 1. `requestUploadUrl` mutation — generates a presigned PUT URL for direct
|
|
7
7
|
* client-to-S3 upload. Checks bucket access via RLS, deduplicates by
|
|
8
|
-
* content hash
|
|
8
|
+
* content hash via UNIQUE(bucket_id, key) constraint.
|
|
9
9
|
*
|
|
10
|
-
* 2. `
|
|
11
|
-
* the object exists with correct content-type, transitions file status
|
|
12
|
-
* from 'pending' to 'ready'.
|
|
13
|
-
*
|
|
14
|
-
* 3. `downloadUrl` computed field on File types — generates presigned GET URLs
|
|
10
|
+
* 2. `downloadUrl` computed field on File types — generates presigned GET URLs
|
|
15
11
|
* for private files, returns public URL prefix + key for public files.
|
|
16
12
|
*
|
|
17
13
|
* Uses the extendSchema + grafast plan pattern (same as PublicKeySignature).
|
|
@@ -19,14 +15,16 @@
|
|
|
19
15
|
import { context as grafastContext, lambda, object } from 'grafast';
|
|
20
16
|
import { extendSchema, gql } from 'graphile-utils';
|
|
21
17
|
import { Logger } from '@pgpmjs/logger';
|
|
22
|
-
import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig,
|
|
23
|
-
import { generatePresignedPutUrl
|
|
18
|
+
import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
|
|
19
|
+
import { generatePresignedPutUrl } from './s3-signer';
|
|
24
20
|
const log = new Logger('graphile-presigned-url:plugin');
|
|
25
21
|
// --- Protocol-level constants (not configurable) ---
|
|
26
22
|
const MAX_CONTENT_HASH_LENGTH = 128;
|
|
27
23
|
const MAX_CONTENT_TYPE_LENGTH = 255;
|
|
28
24
|
const MAX_BUCKET_KEY_LENGTH = 255;
|
|
25
|
+
const MAX_CUSTOM_KEY_LENGTH = 1024;
|
|
29
26
|
const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
|
|
27
|
+
const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
|
|
30
28
|
// --- Helpers ---
|
|
31
29
|
/**
|
|
32
30
|
* Validate a SHA-256 hex string.
|
|
@@ -35,12 +33,46 @@ function isValidSha256(hash) {
|
|
|
35
33
|
return SHA256_HEX_REGEX.test(hash);
|
|
36
34
|
}
|
|
37
35
|
/**
|
|
38
|
-
* Build the S3 key from content hash
|
|
36
|
+
* Build the S3 key from content hash.
|
|
39
37
|
* Format: {contentHash} (flat namespace, content-addressed)
|
|
40
38
|
*/
|
|
41
39
|
function buildS3Key(contentHash) {
|
|
42
40
|
return contentHash;
|
|
43
41
|
}
|
|
42
|
+
/**
|
|
43
|
+
* Validate a custom S3 key.
|
|
44
|
+
* Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
|
|
45
|
+
*/
|
|
46
|
+
function validateCustomKey(key) {
|
|
47
|
+
if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
|
|
48
|
+
return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
|
|
49
|
+
}
|
|
50
|
+
if (key.includes('..')) {
|
|
51
|
+
return 'INVALID_KEY: path traversal (..) not allowed';
|
|
52
|
+
}
|
|
53
|
+
if (key.startsWith('/')) {
|
|
54
|
+
return 'INVALID_KEY: leading slash not allowed';
|
|
55
|
+
}
|
|
56
|
+
if (key.includes('\0')) {
|
|
57
|
+
return 'INVALID_KEY: null bytes not allowed';
|
|
58
|
+
}
|
|
59
|
+
if (!CUSTOM_KEY_REGEX.test(key)) {
|
|
60
|
+
return 'INVALID_KEY: must start with alphanumeric and contain only alphanumeric, dots, hyphens, underscores, and slashes';
|
|
61
|
+
}
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Derive an ltree path from a custom S3 key's directory portion.
|
|
66
|
+
* e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
|
|
67
|
+
* Returns null if the key has no directory component.
|
|
68
|
+
*/
|
|
69
|
+
function derivePathFromKey(key) {
|
|
70
|
+
const lastSlash = key.lastIndexOf('/');
|
|
71
|
+
if (lastSlash <= 0)
|
|
72
|
+
return null;
|
|
73
|
+
const dir = key.substring(0, lastSlash);
|
|
74
|
+
return dir.replace(/\//g, '.');
|
|
75
|
+
}
|
|
44
76
|
/**
|
|
45
77
|
* Resolve the database_id from the JWT context.
|
|
46
78
|
* The server middleware sets jwt.claims.database_id, which is accessible
|
|
@@ -132,6 +164,14 @@ export function createPresignedUrlPlugin(options) {
|
|
|
132
164
|
size: Int!
|
|
133
165
|
"""Original filename (optional, for display and Content-Disposition)"""
|
|
134
166
|
filename: String
|
|
167
|
+
"""
|
|
168
|
+
Custom S3 key (e.g., "reports/2024/Q1.pdf").
|
|
169
|
+
Only allowed when the bucket has allow_custom_keys=true.
|
|
170
|
+
When omitted, key defaults to contentHash (content-addressed dedup).
|
|
171
|
+
When provided, the file is stored at this key.
|
|
172
|
+
Re-uploading to an existing key auto-creates a new version.
|
|
173
|
+
"""
|
|
174
|
+
key: String
|
|
135
175
|
}
|
|
136
176
|
|
|
137
177
|
type RequestUploadUrlPayload {
|
|
@@ -145,22 +185,52 @@ export function createPresignedUrlPlugin(options) {
|
|
|
145
185
|
deduplicated: Boolean!
|
|
146
186
|
"""Presigned URL expiry time (null if deduplicated)"""
|
|
147
187
|
expiresAt: Datetime
|
|
148
|
-
"""
|
|
149
|
-
|
|
188
|
+
"""ID of the previous version (set when re-uploading to an existing custom key)"""
|
|
189
|
+
previousVersionId: UUID
|
|
150
190
|
}
|
|
151
191
|
|
|
152
|
-
input
|
|
153
|
-
"""
|
|
154
|
-
|
|
192
|
+
input BulkUploadFileInput {
|
|
193
|
+
"""SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
|
|
194
|
+
contentHash: String!
|
|
195
|
+
"""MIME type of the file (e.g., "image/png")"""
|
|
196
|
+
contentType: String!
|
|
197
|
+
"""File size in bytes"""
|
|
198
|
+
size: Int!
|
|
199
|
+
"""Original filename (optional, for display and Content-Disposition)"""
|
|
200
|
+
filename: String
|
|
201
|
+
"""Custom S3 key (only when bucket has allow_custom_keys=true)"""
|
|
202
|
+
key: String
|
|
155
203
|
}
|
|
156
204
|
|
|
157
|
-
|
|
158
|
-
"""
|
|
205
|
+
input RequestBulkUploadUrlsInput {
|
|
206
|
+
"""Bucket key (e.g., "public", "private")"""
|
|
207
|
+
bucketKey: String!
|
|
208
|
+
"""Owner entity ID for entity-scoped uploads"""
|
|
209
|
+
ownerId: UUID
|
|
210
|
+
"""Array of files to upload"""
|
|
211
|
+
files: [BulkUploadFileInput!]!
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
type BulkUploadFilePayload {
|
|
215
|
+
"""Presigned PUT URL (null if file was deduplicated)"""
|
|
216
|
+
uploadUrl: String
|
|
217
|
+
"""The file ID"""
|
|
159
218
|
fileId: UUID!
|
|
160
|
-
"""
|
|
161
|
-
|
|
162
|
-
"""Whether
|
|
163
|
-
|
|
219
|
+
"""The S3 object key"""
|
|
220
|
+
key: String!
|
|
221
|
+
"""Whether this file was deduplicated"""
|
|
222
|
+
deduplicated: Boolean!
|
|
223
|
+
"""Presigned URL expiry time (null if deduplicated)"""
|
|
224
|
+
expiresAt: Datetime
|
|
225
|
+
"""ID of the previous version (set when re-uploading to an existing custom key)"""
|
|
226
|
+
previousVersionId: UUID
|
|
227
|
+
"""Index of this file in the input array (for client correlation)"""
|
|
228
|
+
index: Int!
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
type RequestBulkUploadUrlsPayload {
|
|
232
|
+
"""Array of results, one per input file"""
|
|
233
|
+
files: [BulkUploadFilePayload!]!
|
|
164
234
|
}
|
|
165
235
|
|
|
166
236
|
extend type Mutation {
|
|
@@ -175,13 +245,13 @@ export function createPresignedUrlPlugin(options) {
|
|
|
175
245
|
): RequestUploadUrlPayload
|
|
176
246
|
|
|
177
247
|
"""
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
248
|
+
Request presigned URLs for uploading multiple files in a single batch.
|
|
249
|
+
Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
|
|
250
|
+
Each file is processed independently — some may dedup while others get fresh URLs.
|
|
181
251
|
"""
|
|
182
|
-
|
|
183
|
-
input:
|
|
184
|
-
):
|
|
252
|
+
requestBulkUploadUrls(
|
|
253
|
+
input: RequestBulkUploadUrlsInput!
|
|
254
|
+
): RequestBulkUploadUrlsPayload
|
|
185
255
|
}
|
|
186
256
|
`,
|
|
187
257
|
plans: {
|
|
@@ -196,28 +266,33 @@ export function createPresignedUrlPlugin(options) {
|
|
|
196
266
|
pgSettings: $pgSettings,
|
|
197
267
|
});
|
|
198
268
|
return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
|
|
199
|
-
|
|
200
|
-
|
|
269
|
+
const result = await processUpload(options, input, withPgClient, pgSettings);
|
|
270
|
+
return result;
|
|
271
|
+
});
|
|
272
|
+
},
|
|
273
|
+
requestBulkUploadUrls(_$mutation, fieldArgs) {
|
|
274
|
+
const $input = fieldArgs.getRaw('input');
|
|
275
|
+
const $withPgClient = grafastContext().get('withPgClient');
|
|
276
|
+
const $pgSettings = grafastContext().get('pgSettings');
|
|
277
|
+
const $combined = object({
|
|
278
|
+
input: $input,
|
|
279
|
+
withPgClient: $withPgClient,
|
|
280
|
+
pgSettings: $pgSettings,
|
|
281
|
+
});
|
|
282
|
+
return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
|
|
283
|
+
const { bucketKey, ownerId, files } = input;
|
|
201
284
|
if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
|
|
202
285
|
throw new Error('INVALID_BUCKET_KEY');
|
|
203
286
|
}
|
|
204
|
-
if (!
|
|
205
|
-
throw new Error('
|
|
206
|
-
}
|
|
207
|
-
if (!isValidSha256(contentHash)) {
|
|
208
|
-
throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
|
|
209
|
-
}
|
|
210
|
-
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
211
|
-
throw new Error('INVALID_CONTENT_TYPE');
|
|
287
|
+
if (!Array.isArray(files) || files.length === 0) {
|
|
288
|
+
throw new Error('INVALID_FILES: must provide at least one file');
|
|
212
289
|
}
|
|
213
290
|
return withPgClient(pgSettings, async (pgClient) => {
|
|
214
291
|
return pgClient.withTransaction(async (txClient) => {
|
|
215
|
-
// --- Resolve storage module config (all limits come from here) ---
|
|
216
292
|
const databaseId = await resolveDatabaseId(txClient);
|
|
217
293
|
if (!databaseId) {
|
|
218
294
|
throw new Error('DATABASE_NOT_FOUND');
|
|
219
295
|
}
|
|
220
|
-
// --- Resolve storage module (app-level or entity-scoped) ---
|
|
221
296
|
const storageConfig = ownerId
|
|
222
297
|
? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
|
|
223
298
|
: await getStorageModuleConfig(txClient, databaseId);
|
|
@@ -226,198 +301,34 @@ export function createPresignedUrlPlugin(options) {
|
|
|
226
301
|
? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
|
|
227
302
|
: 'STORAGE_MODULE_NOT_PROVISIONED');
|
|
228
303
|
}
|
|
229
|
-
// --- Validate
|
|
230
|
-
if (
|
|
231
|
-
throw new Error(`
|
|
304
|
+
// --- Validate bulk limits ---
|
|
305
|
+
if (files.length > storageConfig.maxBulkFiles) {
|
|
306
|
+
throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
|
|
232
307
|
}
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
}
|
|
308
|
+
const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
|
|
309
|
+
if (totalSize > storageConfig.maxBulkTotalSize) {
|
|
310
|
+
throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
|
|
237
311
|
}
|
|
238
|
-
// --- Look up the bucket (cached; first miss queries via RLS) ---
|
|
239
312
|
const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
|
|
240
313
|
if (!bucket) {
|
|
241
314
|
throw new Error('BUCKET_NOT_FOUND');
|
|
242
315
|
}
|
|
243
|
-
// ---
|
|
244
|
-
if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
|
|
245
|
-
const allowed = bucket.allowed_mime_types;
|
|
246
|
-
const isAllowed = allowed.some((pattern) => {
|
|
247
|
-
if (pattern === '*/*')
|
|
248
|
-
return true;
|
|
249
|
-
if (pattern.endsWith('/*')) {
|
|
250
|
-
const prefix = pattern.slice(0, -1);
|
|
251
|
-
return contentType.startsWith(prefix);
|
|
252
|
-
}
|
|
253
|
-
return contentType === pattern;
|
|
254
|
-
});
|
|
255
|
-
if (!isAllowed) {
|
|
256
|
-
throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
|
|
257
|
-
}
|
|
258
|
-
}
|
|
259
|
-
// --- Validate size against bucket's max_file_size ---
|
|
260
|
-
if (bucket.max_file_size && size > bucket.max_file_size) {
|
|
261
|
-
throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
|
|
262
|
-
}
|
|
263
|
-
const s3Key = buildS3Key(contentHash);
|
|
264
|
-
// --- Dedup check: look for existing file with same key (content hash) in this bucket ---
|
|
265
|
-
const dedupResult = await txClient.query({
|
|
266
|
-
text: `SELECT id, status
|
|
267
|
-
FROM ${storageConfig.filesQualifiedName}
|
|
268
|
-
WHERE key = $1
|
|
269
|
-
AND bucket_id = $2
|
|
270
|
-
AND status IN ('ready', 'processed')
|
|
271
|
-
LIMIT 1`,
|
|
272
|
-
values: [s3Key, bucket.id],
|
|
273
|
-
});
|
|
274
|
-
if (dedupResult.rows.length > 0) {
|
|
275
|
-
const existingFile = dedupResult.rows[0];
|
|
276
|
-
log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
|
|
277
|
-
// Track the dedup request
|
|
278
|
-
await txClient.query({
|
|
279
|
-
text: `INSERT INTO ${storageConfig.uploadRequestsQualifiedName}
|
|
280
|
-
(file_id, bucket_id, key, content_type, content_hash, status, expires_at)
|
|
281
|
-
VALUES ($1, $2, $3, $4, $5, 'confirmed', NOW())`,
|
|
282
|
-
values: [existingFile.id, bucket.id, s3Key, contentType, contentHash],
|
|
283
|
-
});
|
|
284
|
-
return {
|
|
285
|
-
uploadUrl: null,
|
|
286
|
-
fileId: existingFile.id,
|
|
287
|
-
key: s3Key,
|
|
288
|
-
deduplicated: true,
|
|
289
|
-
expiresAt: null,
|
|
290
|
-
status: existingFile.status,
|
|
291
|
-
};
|
|
292
|
-
}
|
|
293
|
-
// --- Create file record (status=pending) ---
|
|
294
|
-
// For app-level storage (no owner_id column), omit owner_id from the INSERT.
|
|
295
|
-
const hasOwnerColumn = storageConfig.membershipType !== null;
|
|
296
|
-
const fileResult = await txClient.query({
|
|
297
|
-
text: hasOwnerColumn
|
|
298
|
-
? `INSERT INTO ${storageConfig.filesQualifiedName}
|
|
299
|
-
(bucket_id, key, mime_type, size, filename, owner_id, is_public, status)
|
|
300
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, 'pending')
|
|
301
|
-
RETURNING id`
|
|
302
|
-
: `INSERT INTO ${storageConfig.filesQualifiedName}
|
|
303
|
-
(bucket_id, key, mime_type, size, filename, is_public, status)
|
|
304
|
-
VALUES ($1, $2, $3, $4, $5, $6, 'pending')
|
|
305
|
-
RETURNING id`,
|
|
306
|
-
values: hasOwnerColumn
|
|
307
|
-
? [
|
|
308
|
-
bucket.id,
|
|
309
|
-
s3Key,
|
|
310
|
-
contentType,
|
|
311
|
-
size,
|
|
312
|
-
filename || null,
|
|
313
|
-
bucket.owner_id,
|
|
314
|
-
bucket.is_public,
|
|
315
|
-
]
|
|
316
|
-
: [
|
|
317
|
-
bucket.id,
|
|
318
|
-
s3Key,
|
|
319
|
-
contentType,
|
|
320
|
-
size,
|
|
321
|
-
filename || null,
|
|
322
|
-
bucket.is_public,
|
|
323
|
-
],
|
|
324
|
-
});
|
|
325
|
-
const fileId = fileResult.rows[0].id;
|
|
326
|
-
// --- Ensure the S3 bucket exists (lazy provisioning) ---
|
|
316
|
+
// --- Ensure S3 bucket exists once for the batch ---
|
|
327
317
|
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
328
318
|
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
329
|
-
// ---
|
|
330
|
-
const
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
values: [fileId, bucket.id, s3Key, contentType, contentHash, expiresAt],
|
|
338
|
-
});
|
|
339
|
-
return {
|
|
340
|
-
uploadUrl,
|
|
341
|
-
fileId,
|
|
342
|
-
key: s3Key,
|
|
343
|
-
deduplicated: false,
|
|
344
|
-
expiresAt,
|
|
345
|
-
status: 'pending',
|
|
346
|
-
};
|
|
347
|
-
});
|
|
348
|
-
});
|
|
349
|
-
});
|
|
350
|
-
},
|
|
351
|
-
confirmUpload(_$mutation, fieldArgs) {
|
|
352
|
-
const $input = fieldArgs.getRaw('input');
|
|
353
|
-
const $withPgClient = grafastContext().get('withPgClient');
|
|
354
|
-
const $pgSettings = grafastContext().get('pgSettings');
|
|
355
|
-
const $combined = object({
|
|
356
|
-
input: $input,
|
|
357
|
-
withPgClient: $withPgClient,
|
|
358
|
-
pgSettings: $pgSettings,
|
|
359
|
-
});
|
|
360
|
-
return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
|
|
361
|
-
const { fileId } = input;
|
|
362
|
-
if (!fileId || typeof fileId !== 'string') {
|
|
363
|
-
throw new Error('INVALID_FILE_ID');
|
|
364
|
-
}
|
|
365
|
-
return withPgClient(pgSettings, async (pgClient) => {
|
|
366
|
-
return pgClient.withTransaction(async (txClient) => {
|
|
367
|
-
// --- Resolve storage module by file ID (probes all file tables) ---
|
|
368
|
-
const databaseId = await resolveDatabaseId(txClient);
|
|
369
|
-
if (!databaseId) {
|
|
370
|
-
throw new Error('DATABASE_NOT_FOUND');
|
|
371
|
-
}
|
|
372
|
-
const resolved = await resolveStorageModuleByFileId(txClient, databaseId, fileId);
|
|
373
|
-
if (!resolved) {
|
|
374
|
-
throw new Error('FILE_NOT_FOUND');
|
|
375
|
-
}
|
|
376
|
-
const { storageConfig, file } = resolved;
|
|
377
|
-
if (file.status !== 'pending') {
|
|
378
|
-
// File is already confirmed or processed — idempotent success
|
|
379
|
-
return {
|
|
380
|
-
fileId: file.id,
|
|
381
|
-
status: file.status,
|
|
382
|
-
success: true,
|
|
319
|
+
// --- Process each file ---
|
|
320
|
+
const results = [];
|
|
321
|
+
for (let i = 0; i < files.length; i++) {
|
|
322
|
+
const fileInput = files[i];
|
|
323
|
+
const singleInput = {
|
|
324
|
+
...fileInput,
|
|
325
|
+
bucketKey,
|
|
326
|
+
ownerId,
|
|
383
327
|
};
|
|
328
|
+
const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, singleInput);
|
|
329
|
+
results.push({ ...result, index: i });
|
|
384
330
|
}
|
|
385
|
-
|
|
386
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
387
|
-
const s3Head = await headObject(s3ForDb, file.key, file.mime_type);
|
|
388
|
-
if (!s3Head) {
|
|
389
|
-
throw new Error('FILE_NOT_IN_S3: the file has not been uploaded yet');
|
|
390
|
-
}
|
|
391
|
-
// --- Content-type verification ---
|
|
392
|
-
if (s3Head.contentType && s3Head.contentType !== file.mime_type) {
|
|
393
|
-
// Mark upload_request as rejected
|
|
394
|
-
await txClient.query({
|
|
395
|
-
text: `UPDATE ${storageConfig.uploadRequestsQualifiedName}
|
|
396
|
-
SET status = 'rejected'
|
|
397
|
-
WHERE file_id = $1 AND status = 'issued'`,
|
|
398
|
-
values: [fileId],
|
|
399
|
-
});
|
|
400
|
-
throw new Error(`CONTENT_TYPE_MISMATCH: expected ${file.mime_type}, got ${s3Head.contentType}`);
|
|
401
|
-
}
|
|
402
|
-
// --- Transition file to 'ready' ---
|
|
403
|
-
await txClient.query({
|
|
404
|
-
text: `UPDATE ${storageConfig.filesQualifiedName}
|
|
405
|
-
SET status = 'ready'
|
|
406
|
-
WHERE id = $1`,
|
|
407
|
-
values: [fileId],
|
|
408
|
-
});
|
|
409
|
-
// --- Update upload_request to 'confirmed' ---
|
|
410
|
-
await txClient.query({
|
|
411
|
-
text: `UPDATE ${storageConfig.uploadRequestsQualifiedName}
|
|
412
|
-
SET status = 'confirmed', confirmed_at = NOW()
|
|
413
|
-
WHERE file_id = $1 AND status = 'issued'`,
|
|
414
|
-
values: [fileId],
|
|
415
|
-
});
|
|
416
|
-
return {
|
|
417
|
-
fileId: file.id,
|
|
418
|
-
status: 'ready',
|
|
419
|
-
success: true,
|
|
420
|
-
};
|
|
331
|
+
return { files: results };
|
|
421
332
|
});
|
|
422
333
|
});
|
|
423
334
|
});
|
|
@@ -426,5 +337,212 @@ export function createPresignedUrlPlugin(options) {
|
|
|
426
337
|
},
|
|
427
338
|
}));
|
|
428
339
|
}
|
|
340
|
+
// --- Shared upload logic ---
|
|
341
|
+
/**
|
|
342
|
+
* Process a single upload request (used by both requestUploadUrl and requestBulkUploadUrls).
|
|
343
|
+
*/
|
|
344
|
+
async function processUpload(options, input, withPgClient, pgSettings) {
|
|
345
|
+
const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
|
|
346
|
+
if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
|
|
347
|
+
throw new Error('INVALID_BUCKET_KEY');
|
|
348
|
+
}
|
|
349
|
+
if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
|
|
350
|
+
throw new Error('INVALID_CONTENT_HASH');
|
|
351
|
+
}
|
|
352
|
+
if (!isValidSha256(contentHash)) {
|
|
353
|
+
throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
|
|
354
|
+
}
|
|
355
|
+
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
356
|
+
throw new Error('INVALID_CONTENT_TYPE');
|
|
357
|
+
}
|
|
358
|
+
return withPgClient(pgSettings, async (pgClient) => {
|
|
359
|
+
return pgClient.withTransaction(async (txClient) => {
|
|
360
|
+
const databaseId = await resolveDatabaseId(txClient);
|
|
361
|
+
if (!databaseId) {
|
|
362
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
363
|
+
}
|
|
364
|
+
const storageConfig = ownerId
|
|
365
|
+
? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
|
|
366
|
+
: await getStorageModuleConfig(txClient, databaseId);
|
|
367
|
+
if (!storageConfig) {
|
|
368
|
+
throw new Error(ownerId
|
|
369
|
+
? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
|
|
370
|
+
: 'STORAGE_MODULE_NOT_PROVISIONED');
|
|
371
|
+
}
|
|
372
|
+
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
373
|
+
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
374
|
+
}
|
|
375
|
+
if (filename !== undefined && filename !== null) {
|
|
376
|
+
if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
|
|
377
|
+
throw new Error('INVALID_FILENAME');
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
|
|
381
|
+
if (!bucket) {
|
|
382
|
+
throw new Error('BUCKET_NOT_FOUND');
|
|
383
|
+
}
|
|
384
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
385
|
+
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
386
|
+
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
|
|
387
|
+
});
|
|
388
|
+
});
|
|
389
|
+
}
|
|
390
|
+
/**
|
|
391
|
+
* Process a single file upload within an already-resolved context.
|
|
392
|
+
* Handles dedup, custom keys, versioning, and auto-path derivation.
|
|
393
|
+
*/
|
|
394
|
+
async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
|
|
395
|
+
const { contentHash, contentType, size, filename, key: customKey } = input;
|
|
396
|
+
// --- Validate inputs ---
|
|
397
|
+
if (!contentHash || !isValidSha256(contentHash)) {
|
|
398
|
+
throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
|
|
399
|
+
}
|
|
400
|
+
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
401
|
+
throw new Error('INVALID_CONTENT_TYPE');
|
|
402
|
+
}
|
|
403
|
+
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
404
|
+
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
405
|
+
}
|
|
406
|
+
if (filename !== undefined && filename !== null) {
|
|
407
|
+
if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
|
|
408
|
+
throw new Error('INVALID_FILENAME');
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
// --- Validate content type against bucket's allowed_mime_types ---
|
|
412
|
+
if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
|
|
413
|
+
const allowed = bucket.allowed_mime_types;
|
|
414
|
+
const isAllowed = allowed.some((pattern) => {
|
|
415
|
+
if (pattern === '*/*')
|
|
416
|
+
return true;
|
|
417
|
+
if (pattern.endsWith('/*')) {
|
|
418
|
+
const prefix = pattern.slice(0, -1);
|
|
419
|
+
return contentType.startsWith(prefix);
|
|
420
|
+
}
|
|
421
|
+
return contentType === pattern;
|
|
422
|
+
});
|
|
423
|
+
if (!isAllowed) {
|
|
424
|
+
throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
// --- Validate size against bucket's max_file_size ---
|
|
428
|
+
if (bucket.max_file_size && size > bucket.max_file_size) {
|
|
429
|
+
throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
|
|
430
|
+
}
|
|
431
|
+
// --- Determine S3 key ---
|
|
432
|
+
let s3Key;
|
|
433
|
+
let isCustomKey = false;
|
|
434
|
+
if (customKey) {
|
|
435
|
+
if (!bucket.allow_custom_keys) {
|
|
436
|
+
throw new Error('CUSTOM_KEY_NOT_ALLOWED: bucket does not allow custom keys');
|
|
437
|
+
}
|
|
438
|
+
const keyError = validateCustomKey(customKey);
|
|
439
|
+
if (keyError) {
|
|
440
|
+
throw new Error(keyError);
|
|
441
|
+
}
|
|
442
|
+
s3Key = customKey;
|
|
443
|
+
isCustomKey = true;
|
|
444
|
+
}
|
|
445
|
+
else {
|
|
446
|
+
s3Key = buildS3Key(contentHash);
|
|
447
|
+
}
|
|
448
|
+
// --- Dedup / versioning check ---
|
|
449
|
+
let previousVersionId = null;
|
|
450
|
+
if (isCustomKey) {
|
|
451
|
+
// Custom key mode: check if a file with this key already exists in this bucket.
|
|
452
|
+
// If so, auto-version by linking via previous_version_id.
|
|
453
|
+
const existingResult = await txClient.query({
|
|
454
|
+
text: `SELECT id, content_hash
|
|
455
|
+
FROM ${storageConfig.filesQualifiedName}
|
|
456
|
+
WHERE key = $1
|
|
457
|
+
AND bucket_id = $2
|
|
458
|
+
ORDER BY created_at DESC
|
|
459
|
+
LIMIT 1`,
|
|
460
|
+
values: [s3Key, bucket.id],
|
|
461
|
+
});
|
|
462
|
+
if (existingResult.rows.length > 0) {
|
|
463
|
+
const existing = existingResult.rows[0];
|
|
464
|
+
// Same content hash = true dedup (no new upload needed)
|
|
465
|
+
if (existing.content_hash === contentHash) {
|
|
466
|
+
log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
|
|
467
|
+
return {
|
|
468
|
+
uploadUrl: null,
|
|
469
|
+
fileId: existing.id,
|
|
470
|
+
key: s3Key,
|
|
471
|
+
deduplicated: true,
|
|
472
|
+
expiresAt: null,
|
|
473
|
+
previousVersionId: null,
|
|
474
|
+
};
|
|
475
|
+
}
|
|
476
|
+
// Different content = new version
|
|
477
|
+
previousVersionId = existing.id;
|
|
478
|
+
log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
else {
|
|
482
|
+
// Hash-based mode: dedup by content_hash in this bucket
|
|
483
|
+
const dedupResult = await txClient.query({
|
|
484
|
+
text: `SELECT id
|
|
485
|
+
FROM ${storageConfig.filesQualifiedName}
|
|
486
|
+
WHERE content_hash = $1
|
|
487
|
+
AND bucket_id = $2
|
|
488
|
+
LIMIT 1`,
|
|
489
|
+
values: [contentHash, bucket.id],
|
|
490
|
+
});
|
|
491
|
+
if (dedupResult.rows.length > 0) {
|
|
492
|
+
const existingFile = dedupResult.rows[0];
|
|
493
|
+
log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
|
|
494
|
+
return {
|
|
495
|
+
uploadUrl: null,
|
|
496
|
+
fileId: existingFile.id,
|
|
497
|
+
key: s3Key,
|
|
498
|
+
deduplicated: true,
|
|
499
|
+
expiresAt: null,
|
|
500
|
+
previousVersionId: null,
|
|
501
|
+
};
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
// --- Auto-derive ltree path from custom key directory (only when has_path_shares) ---
|
|
505
|
+
const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
|
|
506
|
+
// --- Create file record ---
|
|
507
|
+
const hasOwnerColumn = storageConfig.membershipType !== null;
|
|
508
|
+
const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
|
|
509
|
+
const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
|
|
510
|
+
let paramIdx = values.length;
|
|
511
|
+
if (hasOwnerColumn) {
|
|
512
|
+
columns.push('owner_id');
|
|
513
|
+
values.push(bucket.owner_id);
|
|
514
|
+
paramIdx = values.length;
|
|
515
|
+
}
|
|
516
|
+
if (previousVersionId) {
|
|
517
|
+
columns.push('previous_version_id');
|
|
518
|
+
values.push(previousVersionId);
|
|
519
|
+
paramIdx = values.length;
|
|
520
|
+
}
|
|
521
|
+
if (derivedPath) {
|
|
522
|
+
columns.push('path');
|
|
523
|
+
values.push(derivedPath);
|
|
524
|
+
paramIdx = values.length;
|
|
525
|
+
}
|
|
526
|
+
const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
|
|
527
|
+
const fileResult = await txClient.query({
|
|
528
|
+
text: `INSERT INTO ${storageConfig.filesQualifiedName}
|
|
529
|
+
(${columns.join(', ')})
|
|
530
|
+
VALUES (${placeholders})
|
|
531
|
+
RETURNING id`,
|
|
532
|
+
values,
|
|
533
|
+
});
|
|
534
|
+
const fileId = fileResult.rows[0].id;
|
|
535
|
+
// --- Generate presigned PUT URL ---
|
|
536
|
+
const uploadUrl = await generatePresignedPutUrl(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
|
|
537
|
+
const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
|
|
538
|
+
return {
|
|
539
|
+
uploadUrl,
|
|
540
|
+
fileId,
|
|
541
|
+
key: s3Key,
|
|
542
|
+
deduplicated: false,
|
|
543
|
+
expiresAt,
|
|
544
|
+
previousVersionId,
|
|
545
|
+
};
|
|
546
|
+
}
|
|
429
547
|
export const PresignedUrlPlugin = createPresignedUrlPlugin;
|
|
430
548
|
export default PresignedUrlPlugin;
|