graphile-presigned-url-plugin 0.8.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/plugin.js CHANGED
@@ -22,7 +22,9 @@ const log = new Logger('graphile-presigned-url:plugin');
22
22
  const MAX_CONTENT_HASH_LENGTH = 128;
23
23
  const MAX_CONTENT_TYPE_LENGTH = 255;
24
24
  const MAX_BUCKET_KEY_LENGTH = 255;
25
+ const MAX_CUSTOM_KEY_LENGTH = 1024;
25
26
  const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
27
+ const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
26
28
  // --- Helpers ---
27
29
  /**
28
30
  * Validate a SHA-256 hex string.
@@ -31,12 +33,46 @@ function isValidSha256(hash) {
31
33
  return SHA256_HEX_REGEX.test(hash);
32
34
  }
33
35
  /**
34
- * Build the S3 key from content hash and content type extension.
36
+ * Build the S3 key from content hash.
35
37
  * Format: {contentHash} (flat namespace, content-addressed)
36
38
  */
37
39
  function buildS3Key(contentHash) {
38
40
  return contentHash;
39
41
  }
42
+ /**
43
+ * Validate a custom S3 key.
44
+ * Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
45
+ */
46
+ function validateCustomKey(key) {
47
+ if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
48
+ return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
49
+ }
50
+ if (key.includes('..')) {
51
+ return 'INVALID_KEY: path traversal (..) not allowed';
52
+ }
53
+ if (key.startsWith('/')) {
54
+ return 'INVALID_KEY: leading slash not allowed';
55
+ }
56
+ if (key.includes('\0')) {
57
+ return 'INVALID_KEY: null bytes not allowed';
58
+ }
59
+ if (!CUSTOM_KEY_REGEX.test(key)) {
60
+ return 'INVALID_KEY: must start with alphanumeric and contain only alphanumeric, dots, hyphens, underscores, and slashes';
61
+ }
62
+ return null;
63
+ }
64
+ /**
65
+ * Derive an ltree path from a custom S3 key's directory portion.
66
+ * e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
67
+ * Returns null if the key has no directory component.
68
+ */
69
+ function derivePathFromKey(key) {
70
+ const lastSlash = key.lastIndexOf('/');
71
+ if (lastSlash <= 0)
72
+ return null;
73
+ const dir = key.substring(0, lastSlash);
74
+ return dir.replace(/\//g, '.');
75
+ }
40
76
  /**
41
77
  * Resolve the database_id from the JWT context.
42
78
  * The server middleware sets jwt.claims.database_id, which is accessible
@@ -128,6 +164,14 @@ export function createPresignedUrlPlugin(options) {
128
164
  size: Int!
129
165
  """Original filename (optional, for display and Content-Disposition)"""
130
166
  filename: String
167
+ """
168
+ Custom S3 key (e.g., "reports/2024/Q1.pdf").
169
+ Only allowed when the bucket has allow_custom_keys=true.
170
+ When omitted, key defaults to contentHash (content-addressed dedup).
171
+ When provided, the file is stored at this key.
172
+ Re-uploading to an existing key auto-creates a new version.
173
+ """
174
+ key: String
131
175
  }
132
176
 
133
177
  type RequestUploadUrlPayload {
@@ -141,6 +185,52 @@ export function createPresignedUrlPlugin(options) {
141
185
  deduplicated: Boolean!
142
186
  """Presigned URL expiry time (null if deduplicated)"""
143
187
  expiresAt: Datetime
188
+ """ID of the previous version (set when re-uploading to an existing custom key)"""
189
+ previousVersionId: UUID
190
+ }
191
+
192
+ input BulkUploadFileInput {
193
+ """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
194
+ contentHash: String!
195
+ """MIME type of the file (e.g., "image/png")"""
196
+ contentType: String!
197
+ """File size in bytes"""
198
+ size: Int!
199
+ """Original filename (optional, for display and Content-Disposition)"""
200
+ filename: String
201
+ """Custom S3 key (only when bucket has allow_custom_keys=true)"""
202
+ key: String
203
+ }
204
+
205
+ input RequestBulkUploadUrlsInput {
206
+ """Bucket key (e.g., "public", "private")"""
207
+ bucketKey: String!
208
+ """Owner entity ID for entity-scoped uploads"""
209
+ ownerId: UUID
210
+ """Array of files to upload"""
211
+ files: [BulkUploadFileInput!]!
212
+ }
213
+
214
+ type BulkUploadFilePayload {
215
+ """Presigned PUT URL (null if file was deduplicated)"""
216
+ uploadUrl: String
217
+ """The file ID"""
218
+ fileId: UUID!
219
+ """The S3 object key"""
220
+ key: String!
221
+ """Whether this file was deduplicated"""
222
+ deduplicated: Boolean!
223
+ """Presigned URL expiry time (null if deduplicated)"""
224
+ expiresAt: Datetime
225
+ """ID of the previous version (set when re-uploading to an existing custom key)"""
226
+ previousVersionId: UUID
227
+ """Index of this file in the input array (for client correlation)"""
228
+ index: Int!
229
+ }
230
+
231
+ type RequestBulkUploadUrlsPayload {
232
+ """Array of results, one per input file"""
233
+ files: [BulkUploadFilePayload!]!
144
234
  }
145
235
 
146
236
  extend type Mutation {
@@ -153,6 +243,15 @@ export function createPresignedUrlPlugin(options) {
153
243
  requestUploadUrl(
154
244
  input: RequestUploadUrlInput!
155
245
  ): RequestUploadUrlPayload
246
+
247
+ """
248
+ Request presigned URLs for uploading multiple files in a single batch.
249
+ Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
250
+ Each file is processed independently — some may dedup while others get fresh URLs.
251
+ """
252
+ requestBulkUploadUrls(
253
+ input: RequestBulkUploadUrlsInput!
254
+ ): RequestBulkUploadUrlsPayload
156
255
  }
157
256
  `,
158
257
  plans: {
@@ -167,28 +266,33 @@ export function createPresignedUrlPlugin(options) {
167
266
  pgSettings: $pgSettings,
168
267
  });
169
268
  return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
170
- // --- Input validation ---
171
- const { bucketKey, ownerId, contentHash, contentType, size, filename } = input;
269
+ const result = await processUpload(options, input, withPgClient, pgSettings);
270
+ return result;
271
+ });
272
+ },
273
+ requestBulkUploadUrls(_$mutation, fieldArgs) {
274
+ const $input = fieldArgs.getRaw('input');
275
+ const $withPgClient = grafastContext().get('withPgClient');
276
+ const $pgSettings = grafastContext().get('pgSettings');
277
+ const $combined = object({
278
+ input: $input,
279
+ withPgClient: $withPgClient,
280
+ pgSettings: $pgSettings,
281
+ });
282
+ return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
283
+ const { bucketKey, ownerId, files } = input;
172
284
  if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
173
285
  throw new Error('INVALID_BUCKET_KEY');
174
286
  }
175
- if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
176
- throw new Error('INVALID_CONTENT_HASH');
177
- }
178
- if (!isValidSha256(contentHash)) {
179
- throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
180
- }
181
- if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
182
- throw new Error('INVALID_CONTENT_TYPE');
287
+ if (!Array.isArray(files) || files.length === 0) {
288
+ throw new Error('INVALID_FILES: must provide at least one file');
183
289
  }
184
290
  return withPgClient(pgSettings, async (pgClient) => {
185
291
  return pgClient.withTransaction(async (txClient) => {
186
- // --- Resolve storage module config (all limits come from here) ---
187
292
  const databaseId = await resolveDatabaseId(txClient);
188
293
  if (!databaseId) {
189
294
  throw new Error('DATABASE_NOT_FOUND');
190
295
  }
191
- // --- Resolve storage module (app-level or entity-scoped) ---
192
296
  const storageConfig = ownerId
193
297
  ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
194
298
  : await getStorageModuleConfig(txClient, databaseId);
@@ -197,107 +301,34 @@ export function createPresignedUrlPlugin(options) {
197
301
  ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
198
302
  : 'STORAGE_MODULE_NOT_PROVISIONED');
199
303
  }
200
- // --- Validate size against storage module default (bucket override checked below) ---
201
- if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
202
- throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
304
+ // --- Validate bulk limits ---
305
+ if (files.length > storageConfig.maxBulkFiles) {
306
+ throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
203
307
  }
204
- if (filename !== undefined && filename !== null) {
205
- if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
206
- throw new Error('INVALID_FILENAME');
207
- }
308
+ const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
309
+ if (totalSize > storageConfig.maxBulkTotalSize) {
310
+ throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
208
311
  }
209
- // --- Look up the bucket (cached; first miss queries via RLS) ---
210
312
  const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
211
313
  if (!bucket) {
212
314
  throw new Error('BUCKET_NOT_FOUND');
213
315
  }
214
- // --- Validate content type against bucket's allowed_mime_types ---
215
- if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
216
- const allowed = bucket.allowed_mime_types;
217
- const isAllowed = allowed.some((pattern) => {
218
- if (pattern === '*/*')
219
- return true;
220
- if (pattern.endsWith('/*')) {
221
- const prefix = pattern.slice(0, -1);
222
- return contentType.startsWith(prefix);
223
- }
224
- return contentType === pattern;
225
- });
226
- if (!isAllowed) {
227
- throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
228
- }
229
- }
230
- // --- Validate size against bucket's max_file_size ---
231
- if (bucket.max_file_size && size > bucket.max_file_size) {
232
- throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
233
- }
234
- const s3Key = buildS3Key(contentHash);
235
- // --- Dedup check: look for existing file with same key (content hash) in this bucket ---
236
- const dedupResult = await txClient.query({
237
- text: `SELECT id
238
- FROM ${storageConfig.filesQualifiedName}
239
- WHERE key = $1
240
- AND bucket_id = $2
241
- LIMIT 1`,
242
- values: [s3Key, bucket.id],
243
- });
244
- if (dedupResult.rows.length > 0) {
245
- const existingFile = dedupResult.rows[0];
246
- log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
247
- return {
248
- uploadUrl: null,
249
- fileId: existingFile.id,
250
- key: s3Key,
251
- deduplicated: true,
252
- expiresAt: null,
253
- };
254
- }
255
- // --- Create file record ---
256
- // For app-level storage (no owner_id column), omit owner_id from the INSERT.
257
- const hasOwnerColumn = storageConfig.membershipType !== null;
258
- const fileResult = await txClient.query({
259
- text: hasOwnerColumn
260
- ? `INSERT INTO ${storageConfig.filesQualifiedName}
261
- (bucket_id, key, mime_type, size, filename, owner_id, is_public)
262
- VALUES ($1, $2, $3, $4, $5, $6, $7)
263
- RETURNING id`
264
- : `INSERT INTO ${storageConfig.filesQualifiedName}
265
- (bucket_id, key, mime_type, size, filename, is_public)
266
- VALUES ($1, $2, $3, $4, $5, $6)
267
- RETURNING id`,
268
- values: hasOwnerColumn
269
- ? [
270
- bucket.id,
271
- s3Key,
272
- contentType,
273
- size,
274
- filename || null,
275
- bucket.owner_id,
276
- bucket.is_public,
277
- ]
278
- : [
279
- bucket.id,
280
- s3Key,
281
- contentType,
282
- size,
283
- filename || null,
284
- bucket.is_public,
285
- ],
286
- });
287
- const fileId = fileResult.rows[0].id;
288
- // --- Ensure the S3 bucket exists (lazy provisioning) ---
316
+ // --- Ensure S3 bucket exists once for the batch ---
289
317
  const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
290
318
  await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
291
- // --- Generate presigned PUT URL (per-database bucket) ---
292
- const uploadUrl = await generatePresignedPutUrl(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
293
- const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
294
- return {
295
- uploadUrl,
296
- fileId,
297
- key: s3Key,
298
- deduplicated: false,
299
- expiresAt,
300
- };
319
+ // --- Process each file ---
320
+ const results = [];
321
+ for (let i = 0; i < files.length; i++) {
322
+ const fileInput = files[i];
323
+ const singleInput = {
324
+ ...fileInput,
325
+ bucketKey,
326
+ ownerId,
327
+ };
328
+ const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, singleInput);
329
+ results.push({ ...result, index: i });
330
+ }
331
+ return { files: results };
301
332
  });
302
333
  });
303
334
  });
@@ -306,5 +337,212 @@ export function createPresignedUrlPlugin(options) {
306
337
  },
307
338
  }));
308
339
  }
340
+ // --- Shared upload logic ---
341
+ /**
342
+ * Process a single upload request (used by both requestUploadUrl and requestBulkUploadUrls).
343
+ */
344
+ async function processUpload(options, input, withPgClient, pgSettings) {
345
+ const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
346
+ if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
347
+ throw new Error('INVALID_BUCKET_KEY');
348
+ }
349
+ if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
350
+ throw new Error('INVALID_CONTENT_HASH');
351
+ }
352
+ if (!isValidSha256(contentHash)) {
353
+ throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
354
+ }
355
+ if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
356
+ throw new Error('INVALID_CONTENT_TYPE');
357
+ }
358
+ return withPgClient(pgSettings, async (pgClient) => {
359
+ return pgClient.withTransaction(async (txClient) => {
360
+ const databaseId = await resolveDatabaseId(txClient);
361
+ if (!databaseId) {
362
+ throw new Error('DATABASE_NOT_FOUND');
363
+ }
364
+ const storageConfig = ownerId
365
+ ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
366
+ : await getStorageModuleConfig(txClient, databaseId);
367
+ if (!storageConfig) {
368
+ throw new Error(ownerId
369
+ ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
370
+ : 'STORAGE_MODULE_NOT_PROVISIONED');
371
+ }
372
+ if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
373
+ throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
374
+ }
375
+ if (filename !== undefined && filename !== null) {
376
+ if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
377
+ throw new Error('INVALID_FILENAME');
378
+ }
379
+ }
380
+ const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
381
+ if (!bucket) {
382
+ throw new Error('BUCKET_NOT_FOUND');
383
+ }
384
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
385
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
386
+ return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
387
+ });
388
+ });
389
+ }
390
+ /**
391
+ * Process a single file upload within an already-resolved context.
392
+ * Handles dedup, custom keys, versioning, and auto-path derivation.
393
+ */
394
+ async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
395
+ const { contentHash, contentType, size, filename, key: customKey } = input;
396
+ // --- Validate inputs ---
397
+ if (!contentHash || !isValidSha256(contentHash)) {
398
+ throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
399
+ }
400
+ if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
401
+ throw new Error('INVALID_CONTENT_TYPE');
402
+ }
403
+ if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
404
+ throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
405
+ }
406
+ if (filename !== undefined && filename !== null) {
407
+ if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
408
+ throw new Error('INVALID_FILENAME');
409
+ }
410
+ }
411
+ // --- Validate content type against bucket's allowed_mime_types ---
412
+ if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
413
+ const allowed = bucket.allowed_mime_types;
414
+ const isAllowed = allowed.some((pattern) => {
415
+ if (pattern === '*/*')
416
+ return true;
417
+ if (pattern.endsWith('/*')) {
418
+ const prefix = pattern.slice(0, -1);
419
+ return contentType.startsWith(prefix);
420
+ }
421
+ return contentType === pattern;
422
+ });
423
+ if (!isAllowed) {
424
+ throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
425
+ }
426
+ }
427
+ // --- Validate size against bucket's max_file_size ---
428
+ if (bucket.max_file_size && size > bucket.max_file_size) {
429
+ throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
430
+ }
431
+ // --- Determine S3 key ---
432
+ let s3Key;
433
+ let isCustomKey = false;
434
+ if (customKey) {
435
+ if (!bucket.allow_custom_keys) {
436
+ throw new Error('CUSTOM_KEY_NOT_ALLOWED: bucket does not allow custom keys');
437
+ }
438
+ const keyError = validateCustomKey(customKey);
439
+ if (keyError) {
440
+ throw new Error(keyError);
441
+ }
442
+ s3Key = customKey;
443
+ isCustomKey = true;
444
+ }
445
+ else {
446
+ s3Key = buildS3Key(contentHash);
447
+ }
448
+ // --- Dedup / versioning check ---
449
+ let previousVersionId = null;
450
+ if (isCustomKey) {
451
+ // Custom key mode: check if a file with this key already exists in this bucket.
452
+ // If so, auto-version by linking via previous_version_id.
453
+ const existingResult = await txClient.query({
454
+ text: `SELECT id, content_hash
455
+ FROM ${storageConfig.filesQualifiedName}
456
+ WHERE key = $1
457
+ AND bucket_id = $2
458
+ ORDER BY created_at DESC
459
+ LIMIT 1`,
460
+ values: [s3Key, bucket.id],
461
+ });
462
+ if (existingResult.rows.length > 0) {
463
+ const existing = existingResult.rows[0];
464
+ // Same content hash = true dedup (no new upload needed)
465
+ if (existing.content_hash === contentHash) {
466
+ log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
467
+ return {
468
+ uploadUrl: null,
469
+ fileId: existing.id,
470
+ key: s3Key,
471
+ deduplicated: true,
472
+ expiresAt: null,
473
+ previousVersionId: null,
474
+ };
475
+ }
476
+ // Different content = new version
477
+ previousVersionId = existing.id;
478
+ log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
479
+ }
480
+ }
481
+ else {
482
+ // Hash-based mode: dedup by content_hash in this bucket
483
+ const dedupResult = await txClient.query({
484
+ text: `SELECT id
485
+ FROM ${storageConfig.filesQualifiedName}
486
+ WHERE content_hash = $1
487
+ AND bucket_id = $2
488
+ LIMIT 1`,
489
+ values: [contentHash, bucket.id],
490
+ });
491
+ if (dedupResult.rows.length > 0) {
492
+ const existingFile = dedupResult.rows[0];
493
+ log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
494
+ return {
495
+ uploadUrl: null,
496
+ fileId: existingFile.id,
497
+ key: s3Key,
498
+ deduplicated: true,
499
+ expiresAt: null,
500
+ previousVersionId: null,
501
+ };
502
+ }
503
+ }
504
+ // --- Auto-derive ltree path from custom key directory (only when has_path_shares) ---
505
+ const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
506
+ // --- Create file record ---
507
+ const hasOwnerColumn = storageConfig.membershipType !== null;
508
+ const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
509
+ const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
510
+ let paramIdx = values.length;
511
+ if (hasOwnerColumn) {
512
+ columns.push('owner_id');
513
+ values.push(bucket.owner_id);
514
+ paramIdx = values.length;
515
+ }
516
+ if (previousVersionId) {
517
+ columns.push('previous_version_id');
518
+ values.push(previousVersionId);
519
+ paramIdx = values.length;
520
+ }
521
+ if (derivedPath) {
522
+ columns.push('path');
523
+ values.push(derivedPath);
524
+ paramIdx = values.length;
525
+ }
526
+ const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
527
+ const fileResult = await txClient.query({
528
+ text: `INSERT INTO ${storageConfig.filesQualifiedName}
529
+ (${columns.join(', ')})
530
+ VALUES (${placeholders})
531
+ RETURNING id`,
532
+ values,
533
+ });
534
+ const fileId = fileResult.rows[0].id;
535
+ // --- Generate presigned PUT URL ---
536
+ const uploadUrl = await generatePresignedPutUrl(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
537
+ const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
538
+ return {
539
+ uploadUrl,
540
+ fileId,
541
+ key: s3Key,
542
+ deduplicated: false,
543
+ expiresAt,
544
+ previousVersionId,
545
+ };
546
+ }
309
547
  export const PresignedUrlPlugin = createPresignedUrlPlugin;
310
548
  export default PresignedUrlPlugin;
@@ -8,6 +8,8 @@ const DEFAULT_DOWNLOAD_URL_EXPIRY_SECONDS = 3600; // 1 hour
8
8
  const DEFAULT_MAX_FILE_SIZE = 200 * 1024 * 1024; // 200MB
9
9
  const DEFAULT_MAX_FILENAME_LENGTH = 1024;
10
10
  const DEFAULT_CACHE_TTL_SECONDS = process.env.NODE_ENV === 'development' ? 300 : 3600;
11
+ const DEFAULT_MAX_BULK_FILES = 100;
12
+ const DEFAULT_MAX_BULK_TOTAL_SIZE = 1073741824; // 1GB
11
13
  const FIVE_MINUTES_MS = 1000 * 60 * 5;
12
14
  const ONE_HOUR_MS = 1000 * 60 * 60;
13
15
  /**
@@ -51,6 +53,9 @@ const APP_STORAGE_MODULE_QUERY = `
51
53
  sm.default_max_file_size,
52
54
  sm.max_filename_length,
53
55
  sm.cache_ttl_seconds,
56
+ sm.max_bulk_files,
57
+ sm.max_bulk_total_size,
58
+ sm.has_path_shares,
54
59
  NULL AS entity_schema,
55
60
  NULL AS entity_table
56
61
  FROM metaschema_modules_public.storage_module sm
@@ -86,6 +91,9 @@ const ALL_STORAGE_MODULES_QUERY = `
86
91
  sm.default_max_file_size,
87
92
  sm.max_filename_length,
88
93
  sm.cache_ttl_seconds,
94
+ sm.max_bulk_files,
95
+ sm.max_bulk_total_size,
96
+ sm.has_path_shares,
89
97
  es.schema_name AS entity_schema,
90
98
  et.name AS entity_table
91
99
  FROM metaschema_modules_public.storage_module sm
@@ -123,6 +131,9 @@ function buildConfig(row) {
123
131
  defaultMaxFileSize: row.default_max_file_size ?? DEFAULT_MAX_FILE_SIZE,
124
132
  maxFilenameLength: row.max_filename_length ?? DEFAULT_MAX_FILENAME_LENGTH,
125
133
  cacheTtlSeconds,
134
+ hasPathShares: row.has_path_shares ?? false,
135
+ maxBulkFiles: row.max_bulk_files ?? DEFAULT_MAX_BULK_FILES,
136
+ maxBulkTotalSize: row.max_bulk_total_size ?? DEFAULT_MAX_BULK_TOTAL_SIZE,
126
137
  };
127
138
  }
128
139
  /**
@@ -291,11 +302,11 @@ export async function getBucketConfig(pgClient, storageConfig, databaseId, bucke
291
302
  const hasOwner = ownerId && storageConfig.membershipType !== null;
292
303
  const result = await pgClient.query({
293
304
  text: hasOwner
294
- ? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
305
+ ? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size, allow_custom_keys
295
306
  FROM ${storageConfig.bucketsQualifiedName}
296
307
  WHERE key = $1 AND owner_id = $2
297
308
  LIMIT 1`
298
- : `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size
309
+ : `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size, allow_custom_keys
299
310
  FROM ${storageConfig.bucketsQualifiedName}
300
311
  WHERE key = $1
301
312
  LIMIT 1`,
@@ -313,6 +324,7 @@ export async function getBucketConfig(pgClient, storageConfig, databaseId, bucke
313
324
  owner_id: row.owner_id ?? null,
314
325
  allowed_mime_types: row.allowed_mime_types,
315
326
  max_file_size: row.max_file_size,
327
+ allow_custom_keys: row.allow_custom_keys ?? false,
316
328
  };
317
329
  bucketCache.set(cacheKey, config);
318
330
  log.debug(`Cached bucket config for ${databaseId}:${bucketKey} (id=${config.id}, scope=${storageConfig.membershipType ?? 'app'})`);
package/esm/types.d.ts CHANGED
@@ -10,6 +10,7 @@ export interface BucketConfig {
10
10
  owner_id: string | null;
11
11
  allowed_mime_types: string[] | null;
12
12
  max_file_size: number | null;
13
+ allow_custom_keys: boolean;
13
14
  }
14
15
  /**
15
16
  * Storage module configuration resolved from metaschema for a given database.
@@ -51,6 +52,12 @@ export interface StorageModuleConfig {
51
52
  maxFilenameLength: number;
52
53
  /** Cache TTL in seconds for this config entry (default: 300 dev / 3600 prod) */
53
54
  cacheTtlSeconds: number;
55
+ /** Whether this storage module uses ltree path + path shares (determines if path column exists on files) */
56
+ hasPathShares: boolean;
57
+ /** Max files per requestBulkUploadUrls batch (default: 100) */
58
+ maxBulkFiles: number;
59
+ /** Max total size per bulk upload batch in bytes (default: 1GB) */
60
+ maxBulkTotalSize: number;
54
61
  }
55
62
  /**
56
63
  * Input for the requestUploadUrl mutation.
@@ -73,6 +80,13 @@ export interface RequestUploadUrlInput {
73
80
  size: number;
74
81
  /** Original filename (optional, for display/Content-Disposition) */
75
82
  filename?: string;
83
+ /**
84
+ * Custom S3 key for the file (only allowed when bucket has allow_custom_keys=true).
85
+ * When omitted, key defaults to contentHash (content-addressed dedup).
86
+ * When provided, the file is stored at this key; dedup is bypassed.
87
+ * Max 1024 chars. Must not contain path traversal (.. or leading /).
88
+ */
89
+ key?: string;
76
90
  }
77
91
  /**
78
92
  * Result of the requestUploadUrl mutation.
@@ -88,6 +102,8 @@ export interface RequestUploadUrlPayload {
88
102
  deduplicated: boolean;
89
103
  /** Presigned URL expiry time (null if deduplicated) */
90
104
  expiresAt: string | null;
105
+ /** ID of the previous version (set when re-uploading to an existing custom key) */
106
+ previousVersionId: string | null;
91
107
  }
92
108
  /**
93
109
  * S3 configuration for the presigned URL plugin.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphile-presigned-url-plugin",
3
- "version": "0.8.0",
3
+ "version": "0.10.0",
4
4
  "description": "Presigned URL upload plugin for PostGraphile v5 — requestUploadUrl mutation and downloadUrl computed field",
5
5
  "author": "Constructive <developers@constructive.io>",
6
6
  "homepage": "https://github.com/constructive-io/constructive",
@@ -42,7 +42,7 @@
42
42
  "dependencies": {
43
43
  "@aws-sdk/client-s3": "^3.1009.0",
44
44
  "@aws-sdk/s3-request-presigner": "^3.1009.0",
45
- "@pgpmjs/logger": "^2.6.0",
45
+ "@pgpmjs/logger": "^2.7.0",
46
46
  "@pgsql/quotes": "^17.1.0",
47
47
  "lru-cache": "^11.2.7"
48
48
  },
@@ -56,9 +56,9 @@
56
56
  "postgraphile": "5.0.0"
57
57
  },
58
58
  "devDependencies": {
59
- "@constructive-io/s3-utils": "^2.12.1",
59
+ "@constructive-io/s3-utils": "^2.13.0",
60
60
  "@types/node": "^22.19.11",
61
61
  "makage": "^0.1.10"
62
62
  },
63
- "gitHead": "0238640b70fed4b203eb84f48315c2bd807923b9"
63
+ "gitHead": "fb12131bc72ca66e2eb554e15c49e35dc5b362ac"
64
64
  }
package/plugin.js CHANGED
@@ -26,7 +26,9 @@ const log = new logger_1.Logger('graphile-presigned-url:plugin');
26
26
  const MAX_CONTENT_HASH_LENGTH = 128;
27
27
  const MAX_CONTENT_TYPE_LENGTH = 255;
28
28
  const MAX_BUCKET_KEY_LENGTH = 255;
29
+ const MAX_CUSTOM_KEY_LENGTH = 1024;
29
30
  const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
31
+ const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
30
32
  // --- Helpers ---
31
33
  /**
32
34
  * Validate a SHA-256 hex string.
@@ -35,12 +37,46 @@ function isValidSha256(hash) {
35
37
  return SHA256_HEX_REGEX.test(hash);
36
38
  }
37
39
  /**
38
- * Build the S3 key from content hash and content type extension.
40
+ * Build the S3 key from content hash.
39
41
  * Format: {contentHash} (flat namespace, content-addressed)
40
42
  */
41
43
  function buildS3Key(contentHash) {
42
44
  return contentHash;
43
45
  }
46
+ /**
47
+ * Validate a custom S3 key.
48
+ * Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
49
+ */
50
+ function validateCustomKey(key) {
51
+ if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
52
+ return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
53
+ }
54
+ if (key.includes('..')) {
55
+ return 'INVALID_KEY: path traversal (..) not allowed';
56
+ }
57
+ if (key.startsWith('/')) {
58
+ return 'INVALID_KEY: leading slash not allowed';
59
+ }
60
+ if (key.includes('\0')) {
61
+ return 'INVALID_KEY: null bytes not allowed';
62
+ }
63
+ if (!CUSTOM_KEY_REGEX.test(key)) {
64
+ return 'INVALID_KEY: must start with alphanumeric and contain only alphanumeric, dots, hyphens, underscores, and slashes';
65
+ }
66
+ return null;
67
+ }
68
+ /**
69
+ * Derive an ltree path from a custom S3 key's directory portion.
70
+ * e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
71
+ * Returns null if the key has no directory component.
72
+ */
73
+ function derivePathFromKey(key) {
74
+ const lastSlash = key.lastIndexOf('/');
75
+ if (lastSlash <= 0)
76
+ return null;
77
+ const dir = key.substring(0, lastSlash);
78
+ return dir.replace(/\//g, '.');
79
+ }
44
80
  /**
45
81
  * Resolve the database_id from the JWT context.
46
82
  * The server middleware sets jwt.claims.database_id, which is accessible
@@ -132,6 +168,14 @@ function createPresignedUrlPlugin(options) {
132
168
  size: Int!
133
169
  """Original filename (optional, for display and Content-Disposition)"""
134
170
  filename: String
171
+ """
172
+ Custom S3 key (e.g., "reports/2024/Q1.pdf").
173
+ Only allowed when the bucket has allow_custom_keys=true.
174
+ When omitted, key defaults to contentHash (content-addressed dedup).
175
+ When provided, the file is stored at this key.
176
+ Re-uploading to an existing key auto-creates a new version.
177
+ """
178
+ key: String
135
179
  }
136
180
 
137
181
  type RequestUploadUrlPayload {
@@ -145,6 +189,52 @@ function createPresignedUrlPlugin(options) {
145
189
  deduplicated: Boolean!
146
190
  """Presigned URL expiry time (null if deduplicated)"""
147
191
  expiresAt: Datetime
192
+ """ID of the previous version (set when re-uploading to an existing custom key)"""
193
+ previousVersionId: UUID
194
+ }
195
+
196
+ input BulkUploadFileInput {
197
+ """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
198
+ contentHash: String!
199
+ """MIME type of the file (e.g., "image/png")"""
200
+ contentType: String!
201
+ """File size in bytes"""
202
+ size: Int!
203
+ """Original filename (optional, for display and Content-Disposition)"""
204
+ filename: String
205
+ """Custom S3 key (only when bucket has allow_custom_keys=true)"""
206
+ key: String
207
+ }
208
+
209
+ input RequestBulkUploadUrlsInput {
210
+ """Bucket key (e.g., "public", "private")"""
211
+ bucketKey: String!
212
+ """Owner entity ID for entity-scoped uploads"""
213
+ ownerId: UUID
214
+ """Array of files to upload"""
215
+ files: [BulkUploadFileInput!]!
216
+ }
217
+
218
+ type BulkUploadFilePayload {
219
+ """Presigned PUT URL (null if file was deduplicated)"""
220
+ uploadUrl: String
221
+ """The file ID"""
222
+ fileId: UUID!
223
+ """The S3 object key"""
224
+ key: String!
225
+ """Whether this file was deduplicated"""
226
+ deduplicated: Boolean!
227
+ """Presigned URL expiry time (null if deduplicated)"""
228
+ expiresAt: Datetime
229
+ """ID of the previous version (set when re-uploading to an existing custom key)"""
230
+ previousVersionId: UUID
231
+ """Index of this file in the input array (for client correlation)"""
232
+ index: Int!
233
+ }
234
+
235
+ type RequestBulkUploadUrlsPayload {
236
+ """Array of results, one per input file"""
237
+ files: [BulkUploadFilePayload!]!
148
238
  }
149
239
 
150
240
  extend type Mutation {
@@ -157,6 +247,15 @@ function createPresignedUrlPlugin(options) {
157
247
  requestUploadUrl(
158
248
  input: RequestUploadUrlInput!
159
249
  ): RequestUploadUrlPayload
250
+
251
+ """
252
+ Request presigned URLs for uploading multiple files in a single batch.
253
+ Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
254
+ Each file is processed independently — some may dedup while others get fresh URLs.
255
+ """
256
+ requestBulkUploadUrls(
257
+ input: RequestBulkUploadUrlsInput!
258
+ ): RequestBulkUploadUrlsPayload
160
259
  }
161
260
  `,
162
261
  plans: {
@@ -171,28 +270,33 @@ function createPresignedUrlPlugin(options) {
171
270
  pgSettings: $pgSettings,
172
271
  });
173
272
  return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
174
- // --- Input validation ---
175
- const { bucketKey, ownerId, contentHash, contentType, size, filename } = input;
273
+ const result = await processUpload(options, input, withPgClient, pgSettings);
274
+ return result;
275
+ });
276
+ },
277
+ requestBulkUploadUrls(_$mutation, fieldArgs) {
278
+ const $input = fieldArgs.getRaw('input');
279
+ const $withPgClient = (0, grafast_1.context)().get('withPgClient');
280
+ const $pgSettings = (0, grafast_1.context)().get('pgSettings');
281
+ const $combined = (0, grafast_1.object)({
282
+ input: $input,
283
+ withPgClient: $withPgClient,
284
+ pgSettings: $pgSettings,
285
+ });
286
+ return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
287
+ const { bucketKey, ownerId, files } = input;
176
288
  if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
177
289
  throw new Error('INVALID_BUCKET_KEY');
178
290
  }
179
- if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
180
- throw new Error('INVALID_CONTENT_HASH');
181
- }
182
- if (!isValidSha256(contentHash)) {
183
- throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
184
- }
185
- if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
186
- throw new Error('INVALID_CONTENT_TYPE');
291
+ if (!Array.isArray(files) || files.length === 0) {
292
+ throw new Error('INVALID_FILES: must provide at least one file');
187
293
  }
188
294
  return withPgClient(pgSettings, async (pgClient) => {
189
295
  return pgClient.withTransaction(async (txClient) => {
190
- // --- Resolve storage module config (all limits come from here) ---
191
296
  const databaseId = await resolveDatabaseId(txClient);
192
297
  if (!databaseId) {
193
298
  throw new Error('DATABASE_NOT_FOUND');
194
299
  }
195
- // --- Resolve storage module (app-level or entity-scoped) ---
196
300
  const storageConfig = ownerId
197
301
  ? await (0, storage_module_cache_1.getStorageModuleConfigForOwner)(txClient, databaseId, ownerId)
198
302
  : await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
@@ -201,107 +305,34 @@ function createPresignedUrlPlugin(options) {
201
305
  ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
202
306
  : 'STORAGE_MODULE_NOT_PROVISIONED');
203
307
  }
204
- // --- Validate size against storage module default (bucket override checked below) ---
205
- if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
206
- throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
308
+ // --- Validate bulk limits ---
309
+ if (files.length > storageConfig.maxBulkFiles) {
310
+ throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
207
311
  }
208
- if (filename !== undefined && filename !== null) {
209
- if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
210
- throw new Error('INVALID_FILENAME');
211
- }
312
+ const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
313
+ if (totalSize > storageConfig.maxBulkTotalSize) {
314
+ throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
212
315
  }
213
- // --- Look up the bucket (cached; first miss queries via RLS) ---
214
316
  const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey, ownerId);
215
317
  if (!bucket) {
216
318
  throw new Error('BUCKET_NOT_FOUND');
217
319
  }
218
- // --- Validate content type against bucket's allowed_mime_types ---
219
- if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
220
- const allowed = bucket.allowed_mime_types;
221
- const isAllowed = allowed.some((pattern) => {
222
- if (pattern === '*/*')
223
- return true;
224
- if (pattern.endsWith('/*')) {
225
- const prefix = pattern.slice(0, -1);
226
- return contentType.startsWith(prefix);
227
- }
228
- return contentType === pattern;
229
- });
230
- if (!isAllowed) {
231
- throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
232
- }
233
- }
234
- // --- Validate size against bucket's max_file_size ---
235
- if (bucket.max_file_size && size > bucket.max_file_size) {
236
- throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
237
- }
238
- const s3Key = buildS3Key(contentHash);
239
- // --- Dedup check: look for existing file with same key (content hash) in this bucket ---
240
- const dedupResult = await txClient.query({
241
- text: `SELECT id
242
- FROM ${storageConfig.filesQualifiedName}
243
- WHERE key = $1
244
- AND bucket_id = $2
245
- LIMIT 1`,
246
- values: [s3Key, bucket.id],
247
- });
248
- if (dedupResult.rows.length > 0) {
249
- const existingFile = dedupResult.rows[0];
250
- log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
251
- return {
252
- uploadUrl: null,
253
- fileId: existingFile.id,
254
- key: s3Key,
255
- deduplicated: true,
256
- expiresAt: null,
257
- };
258
- }
259
- // --- Create file record ---
260
- // For app-level storage (no owner_id column), omit owner_id from the INSERT.
261
- const hasOwnerColumn = storageConfig.membershipType !== null;
262
- const fileResult = await txClient.query({
263
- text: hasOwnerColumn
264
- ? `INSERT INTO ${storageConfig.filesQualifiedName}
265
- (bucket_id, key, mime_type, size, filename, owner_id, is_public)
266
- VALUES ($1, $2, $3, $4, $5, $6, $7)
267
- RETURNING id`
268
- : `INSERT INTO ${storageConfig.filesQualifiedName}
269
- (bucket_id, key, mime_type, size, filename, is_public)
270
- VALUES ($1, $2, $3, $4, $5, $6)
271
- RETURNING id`,
272
- values: hasOwnerColumn
273
- ? [
274
- bucket.id,
275
- s3Key,
276
- contentType,
277
- size,
278
- filename || null,
279
- bucket.owner_id,
280
- bucket.is_public,
281
- ]
282
- : [
283
- bucket.id,
284
- s3Key,
285
- contentType,
286
- size,
287
- filename || null,
288
- bucket.is_public,
289
- ],
290
- });
291
- const fileId = fileResult.rows[0].id;
292
- // --- Ensure the S3 bucket exists (lazy provisioning) ---
320
+ // --- Ensure S3 bucket exists once for the batch ---
293
321
  const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
294
322
  await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
295
- // --- Generate presigned PUT URL (per-database bucket) ---
296
- const uploadUrl = await (0, s3_signer_1.generatePresignedPutUrl)(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
297
- const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
298
- return {
299
- uploadUrl,
300
- fileId,
301
- key: s3Key,
302
- deduplicated: false,
303
- expiresAt,
304
- };
323
+ // --- Process each file ---
324
+ const results = [];
325
+ for (let i = 0; i < files.length; i++) {
326
+ const fileInput = files[i];
327
+ const singleInput = {
328
+ ...fileInput,
329
+ bucketKey,
330
+ ownerId,
331
+ };
332
+ const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, singleInput);
333
+ results.push({ ...result, index: i });
334
+ }
335
+ return { files: results };
305
336
  });
306
337
  });
307
338
  });
@@ -310,5 +341,212 @@ function createPresignedUrlPlugin(options) {
310
341
  },
311
342
  }));
312
343
  }
344
+ // --- Shared upload logic ---
345
+ /**
346
+ * Process a single upload request (used by both requestUploadUrl and requestBulkUploadUrls).
347
+ */
348
+ async function processUpload(options, input, withPgClient, pgSettings) {
349
+ const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
350
+ if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
351
+ throw new Error('INVALID_BUCKET_KEY');
352
+ }
353
+ if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
354
+ throw new Error('INVALID_CONTENT_HASH');
355
+ }
356
+ if (!isValidSha256(contentHash)) {
357
+ throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
358
+ }
359
+ if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
360
+ throw new Error('INVALID_CONTENT_TYPE');
361
+ }
362
+ return withPgClient(pgSettings, async (pgClient) => {
363
+ return pgClient.withTransaction(async (txClient) => {
364
+ const databaseId = await resolveDatabaseId(txClient);
365
+ if (!databaseId) {
366
+ throw new Error('DATABASE_NOT_FOUND');
367
+ }
368
+ const storageConfig = ownerId
369
+ ? await (0, storage_module_cache_1.getStorageModuleConfigForOwner)(txClient, databaseId, ownerId)
370
+ : await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
371
+ if (!storageConfig) {
372
+ throw new Error(ownerId
373
+ ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
374
+ : 'STORAGE_MODULE_NOT_PROVISIONED');
375
+ }
376
+ if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
377
+ throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
378
+ }
379
+ if (filename !== undefined && filename !== null) {
380
+ if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
381
+ throw new Error('INVALID_FILENAME');
382
+ }
383
+ }
384
+ const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey, ownerId);
385
+ if (!bucket) {
386
+ throw new Error('BUCKET_NOT_FOUND');
387
+ }
388
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
389
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
390
+ return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
391
+ });
392
+ });
393
+ }
394
+ /**
395
+ * Process a single file upload within an already-resolved context.
396
+ * Handles dedup, custom keys, versioning, and auto-path derivation.
397
+ */
398
+ async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
399
+ const { contentHash, contentType, size, filename, key: customKey } = input;
400
+ // --- Validate inputs ---
401
+ if (!contentHash || !isValidSha256(contentHash)) {
402
+ throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
403
+ }
404
+ if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
405
+ throw new Error('INVALID_CONTENT_TYPE');
406
+ }
407
+ if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
408
+ throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
409
+ }
410
+ if (filename !== undefined && filename !== null) {
411
+ if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
412
+ throw new Error('INVALID_FILENAME');
413
+ }
414
+ }
415
+ // --- Validate content type against bucket's allowed_mime_types ---
416
+ if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
417
+ const allowed = bucket.allowed_mime_types;
418
+ const isAllowed = allowed.some((pattern) => {
419
+ if (pattern === '*/*')
420
+ return true;
421
+ if (pattern.endsWith('/*')) {
422
+ const prefix = pattern.slice(0, -1);
423
+ return contentType.startsWith(prefix);
424
+ }
425
+ return contentType === pattern;
426
+ });
427
+ if (!isAllowed) {
428
+ throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
429
+ }
430
+ }
431
+ // --- Validate size against bucket's max_file_size ---
432
+ if (bucket.max_file_size && size > bucket.max_file_size) {
433
+ throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
434
+ }
435
+ // --- Determine S3 key ---
436
+ let s3Key;
437
+ let isCustomKey = false;
438
+ if (customKey) {
439
+ if (!bucket.allow_custom_keys) {
440
+ throw new Error('CUSTOM_KEY_NOT_ALLOWED: bucket does not allow custom keys');
441
+ }
442
+ const keyError = validateCustomKey(customKey);
443
+ if (keyError) {
444
+ throw new Error(keyError);
445
+ }
446
+ s3Key = customKey;
447
+ isCustomKey = true;
448
+ }
449
+ else {
450
+ s3Key = buildS3Key(contentHash);
451
+ }
452
+ // --- Dedup / versioning check ---
453
+ let previousVersionId = null;
454
+ if (isCustomKey) {
455
+ // Custom key mode: check if a file with this key already exists in this bucket.
456
+ // If so, auto-version by linking via previous_version_id.
457
+ const existingResult = await txClient.query({
458
+ text: `SELECT id, content_hash
459
+ FROM ${storageConfig.filesQualifiedName}
460
+ WHERE key = $1
461
+ AND bucket_id = $2
462
+ ORDER BY created_at DESC
463
+ LIMIT 1`,
464
+ values: [s3Key, bucket.id],
465
+ });
466
+ if (existingResult.rows.length > 0) {
467
+ const existing = existingResult.rows[0];
468
+ // Same content hash = true dedup (no new upload needed)
469
+ if (existing.content_hash === contentHash) {
470
+ log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
471
+ return {
472
+ uploadUrl: null,
473
+ fileId: existing.id,
474
+ key: s3Key,
475
+ deduplicated: true,
476
+ expiresAt: null,
477
+ previousVersionId: null,
478
+ };
479
+ }
480
+ // Different content = new version
481
+ previousVersionId = existing.id;
482
+ log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
483
+ }
484
+ }
485
+ else {
486
+ // Hash-based mode: dedup by content_hash in this bucket
487
+ const dedupResult = await txClient.query({
488
+ text: `SELECT id
489
+ FROM ${storageConfig.filesQualifiedName}
490
+ WHERE content_hash = $1
491
+ AND bucket_id = $2
492
+ LIMIT 1`,
493
+ values: [contentHash, bucket.id],
494
+ });
495
+ if (dedupResult.rows.length > 0) {
496
+ const existingFile = dedupResult.rows[0];
497
+ log.info(`Dedup hit: file ${existingFile.id} for hash ${contentHash}`);
498
+ return {
499
+ uploadUrl: null,
500
+ fileId: existingFile.id,
501
+ key: s3Key,
502
+ deduplicated: true,
503
+ expiresAt: null,
504
+ previousVersionId: null,
505
+ };
506
+ }
507
+ }
508
+ // --- Auto-derive ltree path from custom key directory (only when has_path_shares) ---
509
+ const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
510
+ // --- Create file record ---
511
+ const hasOwnerColumn = storageConfig.membershipType !== null;
512
+ const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
513
+ const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
514
+ let paramIdx = values.length;
515
+ if (hasOwnerColumn) {
516
+ columns.push('owner_id');
517
+ values.push(bucket.owner_id);
518
+ paramIdx = values.length;
519
+ }
520
+ if (previousVersionId) {
521
+ columns.push('previous_version_id');
522
+ values.push(previousVersionId);
523
+ paramIdx = values.length;
524
+ }
525
+ if (derivedPath) {
526
+ columns.push('path');
527
+ values.push(derivedPath);
528
+ paramIdx = values.length;
529
+ }
530
+ const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
531
+ const fileResult = await txClient.query({
532
+ text: `INSERT INTO ${storageConfig.filesQualifiedName}
533
+ (${columns.join(', ')})
534
+ VALUES (${placeholders})
535
+ RETURNING id`,
536
+ values,
537
+ });
538
+ const fileId = fileResult.rows[0].id;
539
+ // --- Generate presigned PUT URL ---
540
+ const uploadUrl = await (0, s3_signer_1.generatePresignedPutUrl)(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
541
+ const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
542
+ return {
543
+ uploadUrl,
544
+ fileId,
545
+ key: s3Key,
546
+ deduplicated: false,
547
+ expiresAt,
548
+ previousVersionId,
549
+ };
550
+ }
313
551
  exports.PresignedUrlPlugin = createPresignedUrlPlugin;
314
552
  exports.default = exports.PresignedUrlPlugin;
@@ -18,6 +18,8 @@ const DEFAULT_DOWNLOAD_URL_EXPIRY_SECONDS = 3600; // 1 hour
18
18
  const DEFAULT_MAX_FILE_SIZE = 200 * 1024 * 1024; // 200MB
19
19
  const DEFAULT_MAX_FILENAME_LENGTH = 1024;
20
20
  const DEFAULT_CACHE_TTL_SECONDS = process.env.NODE_ENV === 'development' ? 300 : 3600;
21
+ const DEFAULT_MAX_BULK_FILES = 100;
22
+ const DEFAULT_MAX_BULK_TOTAL_SIZE = 1073741824; // 1GB
21
23
  const FIVE_MINUTES_MS = 1000 * 60 * 5;
22
24
  const ONE_HOUR_MS = 1000 * 60 * 60;
23
25
  /**
@@ -61,6 +63,9 @@ const APP_STORAGE_MODULE_QUERY = `
61
63
  sm.default_max_file_size,
62
64
  sm.max_filename_length,
63
65
  sm.cache_ttl_seconds,
66
+ sm.max_bulk_files,
67
+ sm.max_bulk_total_size,
68
+ sm.has_path_shares,
64
69
  NULL AS entity_schema,
65
70
  NULL AS entity_table
66
71
  FROM metaschema_modules_public.storage_module sm
@@ -96,6 +101,9 @@ const ALL_STORAGE_MODULES_QUERY = `
96
101
  sm.default_max_file_size,
97
102
  sm.max_filename_length,
98
103
  sm.cache_ttl_seconds,
104
+ sm.max_bulk_files,
105
+ sm.max_bulk_total_size,
106
+ sm.has_path_shares,
99
107
  es.schema_name AS entity_schema,
100
108
  et.name AS entity_table
101
109
  FROM metaschema_modules_public.storage_module sm
@@ -133,6 +141,9 @@ function buildConfig(row) {
133
141
  defaultMaxFileSize: row.default_max_file_size ?? DEFAULT_MAX_FILE_SIZE,
134
142
  maxFilenameLength: row.max_filename_length ?? DEFAULT_MAX_FILENAME_LENGTH,
135
143
  cacheTtlSeconds,
144
+ hasPathShares: row.has_path_shares ?? false,
145
+ maxBulkFiles: row.max_bulk_files ?? DEFAULT_MAX_BULK_FILES,
146
+ maxBulkTotalSize: row.max_bulk_total_size ?? DEFAULT_MAX_BULK_TOTAL_SIZE,
136
147
  };
137
148
  }
138
149
  /**
@@ -301,11 +312,11 @@ async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey, o
301
312
  const hasOwner = ownerId && storageConfig.membershipType !== null;
302
313
  const result = await pgClient.query({
303
314
  text: hasOwner
304
- ? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
315
+ ? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size, allow_custom_keys
305
316
  FROM ${storageConfig.bucketsQualifiedName}
306
317
  WHERE key = $1 AND owner_id = $2
307
318
  LIMIT 1`
308
- : `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size
319
+ : `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size, allow_custom_keys
309
320
  FROM ${storageConfig.bucketsQualifiedName}
310
321
  WHERE key = $1
311
322
  LIMIT 1`,
@@ -323,6 +334,7 @@ async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey, o
323
334
  owner_id: row.owner_id ?? null,
324
335
  allowed_mime_types: row.allowed_mime_types,
325
336
  max_file_size: row.max_file_size,
337
+ allow_custom_keys: row.allow_custom_keys ?? false,
326
338
  };
327
339
  bucketCache.set(cacheKey, config);
328
340
  log.debug(`Cached bucket config for ${databaseId}:${bucketKey} (id=${config.id}, scope=${storageConfig.membershipType ?? 'app'})`);
package/types.d.ts CHANGED
@@ -10,6 +10,7 @@ export interface BucketConfig {
10
10
  owner_id: string | null;
11
11
  allowed_mime_types: string[] | null;
12
12
  max_file_size: number | null;
13
+ allow_custom_keys: boolean;
13
14
  }
14
15
  /**
15
16
  * Storage module configuration resolved from metaschema for a given database.
@@ -51,6 +52,12 @@ export interface StorageModuleConfig {
51
52
  maxFilenameLength: number;
52
53
  /** Cache TTL in seconds for this config entry (default: 300 dev / 3600 prod) */
53
54
  cacheTtlSeconds: number;
55
+ /** Whether this storage module uses ltree path + path shares (determines if path column exists on files) */
56
+ hasPathShares: boolean;
57
+ /** Max files per requestBulkUploadUrls batch (default: 100) */
58
+ maxBulkFiles: number;
59
+ /** Max total size per bulk upload batch in bytes (default: 1GB) */
60
+ maxBulkTotalSize: number;
54
61
  }
55
62
  /**
56
63
  * Input for the requestUploadUrl mutation.
@@ -73,6 +80,13 @@ export interface RequestUploadUrlInput {
73
80
  size: number;
74
81
  /** Original filename (optional, for display/Content-Disposition) */
75
82
  filename?: string;
83
+ /**
84
+ * Custom S3 key for the file (only allowed when bucket has allow_custom_keys=true).
85
+ * When omitted, key defaults to contentHash (content-addressed dedup).
86
+ * When provided, the file is stored at this key; dedup is bypassed.
87
+ * Max 1024 chars. Must not contain path traversal (.. or leading /).
88
+ */
89
+ key?: string;
76
90
  }
77
91
  /**
78
92
  * Result of the requestUploadUrl mutation.
@@ -88,6 +102,8 @@ export interface RequestUploadUrlPayload {
88
102
  deduplicated: boolean;
89
103
  /** Presigned URL expiry time (null if deduplicated) */
90
104
  expiresAt: string | null;
105
+ /** ID of the previous version (set when re-uploading to an existing custom key) */
106
+ previousVersionId: string | null;
91
107
  }
92
108
  /**
93
109
  * S3 configuration for the presigned URL plugin.