graphile-presigned-url-plugin 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/plugin.js CHANGED
@@ -1,23 +1,26 @@
1
1
  "use strict";
2
2
  /**
3
- * Presigned URL Plugin for PostGraphile v5
3
+ * Per-Table Storage Middleware Plugin for PostGraphile v5
4
4
  *
5
- * Adds presigned URL upload support to PostGraphile v5:
5
+ * Hooks into PostGraphile's auto-generated CRUD mutations to add S3 operations:
6
6
  *
7
- * 1. `requestUploadUrl` mutationgenerates a presigned PUT URL for direct
8
- * client-to-S3 upload. Checks bucket access via RLS, deduplicates by
9
- * content hash via UNIQUE(bucket_id, key) constraint.
7
+ * 1. Delete middlewarewraps `delete*` mutations on `@storageFiles`-tagged tables
8
+ * with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger).
10
9
  *
11
- * 2. `downloadUrl` computed field on File types generates presigned GET URLs
12
- * for private files, returns public URL prefix + key for public files.
10
+ * 2. Upload fields adds `requestUploadUrl` and `requestBulkUploadUrls` fields
11
+ * on `@storageBuckets`-tagged types, so clients upload via the typed bucket API.
13
12
  *
14
- * Uses the extendSchema + grafast plan pattern (same as PublicKeySignature).
13
+ * 3. downloadUrl handled by download-url-field.ts (separate plugin).
14
+ *
15
+ * No global mutations — all S3 operations are scoped to the per-table types that
16
+ * PostGraphile already generates. Scope resolution uses the codec's schema/table
17
+ * name matched against cached storage module configs.
15
18
  */
16
19
  Object.defineProperty(exports, "__esModule", { value: true });
17
20
  exports.PresignedUrlPlugin = void 0;
18
21
  exports.createPresignedUrlPlugin = createPresignedUrlPlugin;
19
22
  const grafast_1 = require("grafast");
20
- const graphile_utils_1 = require("graphile-utils");
23
+ require("graphile-build");
21
24
  const logger_1 = require("@pgpmjs/logger");
22
25
  const storage_module_cache_1 = require("./storage-module-cache");
23
26
  const s3_signer_1 = require("./s3-signer");
@@ -25,28 +28,16 @@ const log = new logger_1.Logger('graphile-presigned-url:plugin');
25
28
  // --- Protocol-level constants (not configurable) ---
26
29
  const MAX_CONTENT_HASH_LENGTH = 128;
27
30
  const MAX_CONTENT_TYPE_LENGTH = 255;
28
- const MAX_BUCKET_KEY_LENGTH = 255;
29
31
  const MAX_CUSTOM_KEY_LENGTH = 1024;
30
32
  const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
31
33
  const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
32
34
  // --- Helpers ---
33
- /**
34
- * Validate a SHA-256 hex string.
35
- */
36
35
  function isValidSha256(hash) {
37
36
  return SHA256_HEX_REGEX.test(hash);
38
37
  }
39
- /**
40
- * Build the S3 key from content hash.
41
- * Format: {contentHash} (flat namespace, content-addressed)
42
- */
43
38
  function buildS3Key(contentHash) {
44
39
  return contentHash;
45
40
  }
46
- /**
47
- * Validate a custom S3 key.
48
- * Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
49
- */
50
41
  function validateCustomKey(key) {
51
42
  if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
52
43
  return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
@@ -65,11 +56,6 @@ function validateCustomKey(key) {
65
56
  }
66
57
  return null;
67
58
  }
68
- /**
69
- * Derive an ltree path from a custom S3 key's directory portion.
70
- * e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
71
- * Returns null if the key has no directory component.
72
- */
73
59
  function derivePathFromKey(key) {
74
60
  const lastSlash = key.lastIndexOf('/');
75
61
  if (lastSlash <= 0)
@@ -77,41 +63,20 @@ function derivePathFromKey(key) {
77
63
  const dir = key.substring(0, lastSlash);
78
64
  return dir.replace(/\//g, '.');
79
65
  }
80
- /**
81
- * Resolve the database_id from the JWT context.
82
- * The server middleware sets jwt.claims.database_id, which is accessible
83
- * via jwt_private.current_database_id() — a simple function call, no
84
- * metaschema query needed.
85
- */
86
66
  async function resolveDatabaseId(pgClient) {
87
67
  const result = await pgClient.query({
88
68
  text: `SELECT jwt_private.current_database_id() AS id`,
89
69
  });
90
70
  return result.rows[0]?.id ?? null;
91
71
  }
92
- // --- Plugin factory ---
93
- /**
94
- * Resolve the S3 config from the options. If the option is a lazy getter
95
- * function, call it (and cache the result). This avoids reading env vars
96
- * or constructing an S3Client at module-import time.
97
- */
98
72
  function resolveS3(options) {
99
73
  if (typeof options.s3 === 'function') {
100
74
  const resolved = options.s3();
101
- // Cache so subsequent calls don't re-evaluate
102
75
  options.s3 = resolved;
103
76
  return resolved;
104
77
  }
105
78
  return options.s3;
106
79
  }
107
- /**
108
- * Build a per-database S3Config by overlaying storage_module overrides
109
- * onto the global S3Config.
110
- *
111
- * - Bucket name: from resolveBucketName(databaseId) if provided, else global
112
- * - publicUrlPrefix: from storageConfig.publicUrlPrefix if set, else global
113
- * - S3 client (credentials, endpoint): always global (shared IAM key)
114
- */
115
80
  function resolveS3ForDatabase(options, storageConfig, databaseId) {
116
81
  const globalS3 = resolveS3(options);
117
82
  const bucket = options.resolveBucketName
@@ -127,16 +92,6 @@ function resolveS3ForDatabase(options, storageConfig, databaseId) {
127
92
  ...(publicUrlPrefix != null ? { publicUrlPrefix } : {}),
128
93
  };
129
94
  }
130
- /**
131
- * Ensure the S3 bucket for a database exists, provisioning it lazily if needed.
132
- *
133
- * Checks an in-memory Set of known-provisioned bucket names. On the first
134
- * request for an unseen bucket, calls the `ensureBucketProvisioned` callback
135
- * (which creates the bucket with correct CORS, policies, etc.), then marks
136
- * it as provisioned so subsequent requests skip the check entirely.
137
- *
138
- * If no `ensureBucketProvisioned` callback is configured, this is a no-op.
139
- */
140
95
  async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, allowedOrigins) {
141
96
  if (!options.ensureBucketProvisioned)
142
97
  return;
@@ -147,209 +102,339 @@ async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, a
147
102
  (0, storage_module_cache_1.markS3BucketProvisioned)(s3BucketName);
148
103
  log.info(`Lazy-provisioned S3 bucket "${s3BucketName}" successfully`);
149
104
  }
105
+ // --- Plugin factory ---
150
106
  function createPresignedUrlPlugin(options) {
151
- return (0, graphile_utils_1.extendSchema)(() => ({
152
- typeDefs: (0, graphile_utils_1.gql) `
153
- input RequestUploadUrlInput {
154
- """Bucket key (e.g., "public", "private")"""
155
- bucketKey: String!
156
- """
157
- Owner entity ID for entity-scoped uploads.
158
- Omit for app-level (database-wide) storage.
159
- When provided, resolves the storage module for the entity type
160
- that owns this entity instance (e.g., a data room ID, team ID).
161
- """
162
- ownerId: UUID
163
- """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
164
- contentHash: String!
165
- """MIME type of the file (e.g., "image/png")"""
166
- contentType: String!
167
- """File size in bytes"""
168
- size: Int!
169
- """Original filename (optional, for display and Content-Disposition)"""
170
- filename: String
171
- """
172
- Custom S3 key (e.g., "reports/2024/Q1.pdf").
173
- Only allowed when the bucket has allow_custom_keys=true.
174
- When omitted, key defaults to contentHash (content-addressed dedup).
175
- When provided, the file is stored at this key.
176
- Re-uploading to an existing key auto-creates a new version.
177
- """
178
- key: String
179
- }
180
-
181
- type RequestUploadUrlPayload {
182
- """Presigned PUT URL (null if file was deduplicated)"""
183
- uploadUrl: String
184
- """The file ID (existing if deduplicated, new if fresh upload)"""
185
- fileId: UUID!
186
- """The S3 object key"""
187
- key: String!
188
- """Whether this file was deduplicated (already exists with same hash)"""
189
- deduplicated: Boolean!
190
- """Presigned URL expiry time (null if deduplicated)"""
191
- expiresAt: Datetime
192
- """ID of the previous version (set when re-uploading to an existing custom key)"""
193
- previousVersionId: UUID
194
- }
195
-
196
- input BulkUploadFileInput {
197
- """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
198
- contentHash: String!
199
- """MIME type of the file (e.g., "image/png")"""
200
- contentType: String!
201
- """File size in bytes"""
202
- size: Int!
203
- """Original filename (optional, for display and Content-Disposition)"""
204
- filename: String
205
- """Custom S3 key (only when bucket has allow_custom_keys=true)"""
206
- key: String
207
- }
208
-
209
- input RequestBulkUploadUrlsInput {
210
- """Bucket key (e.g., "public", "private")"""
211
- bucketKey: String!
212
- """Owner entity ID for entity-scoped uploads"""
213
- ownerId: UUID
214
- """Array of files to upload"""
215
- files: [BulkUploadFileInput!]!
216
- }
217
-
218
- type BulkUploadFilePayload {
219
- """Presigned PUT URL (null if file was deduplicated)"""
220
- uploadUrl: String
221
- """The file ID"""
222
- fileId: UUID!
223
- """The S3 object key"""
224
- key: String!
225
- """Whether this file was deduplicated"""
226
- deduplicated: Boolean!
227
- """Presigned URL expiry time (null if deduplicated)"""
228
- expiresAt: Datetime
229
- """ID of the previous version (set when re-uploading to an existing custom key)"""
230
- previousVersionId: UUID
231
- """Index of this file in the input array (for client correlation)"""
232
- index: Int!
233
- }
234
-
235
- type RequestBulkUploadUrlsPayload {
236
- """Array of results, one per input file"""
237
- files: [BulkUploadFilePayload!]!
238
- }
239
-
240
- extend type Mutation {
241
- """
242
- Request a presigned URL for uploading a file directly to S3.
243
- Client computes SHA-256 of the file content and provides it here.
244
- If a file with the same hash already exists (dedup), returns the
245
- existing file ID and deduplicated=true with no uploadUrl.
246
- """
247
- requestUploadUrl(
248
- input: RequestUploadUrlInput!
249
- ): RequestUploadUrlPayload
250
-
251
- """
252
- Request presigned URLs for uploading multiple files in a single batch.
253
- Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
254
- Each file is processed independently — some may dedup while others get fresh URLs.
255
- """
256
- requestBulkUploadUrls(
257
- input: RequestBulkUploadUrlsInput!
258
- ): RequestBulkUploadUrlsPayload
259
- }
260
- `,
261
- plans: {
262
- Mutation: {
263
- requestUploadUrl(_$mutation, fieldArgs) {
264
- const $input = fieldArgs.getRaw('input');
265
- const $withPgClient = (0, grafast_1.context)().get('withPgClient');
266
- const $pgSettings = (0, grafast_1.context)().get('pgSettings');
267
- const $combined = (0, grafast_1.object)({
268
- input: $input,
269
- withPgClient: $withPgClient,
270
- pgSettings: $pgSettings,
107
+ return {
108
+ name: 'PresignedUrlPlugin',
109
+ version: '1.0.0',
110
+ description: 'Per-table S3 storage middleware: upload fields on @storageBuckets, delete middleware on @storageFiles',
111
+ after: ['PgAttributesPlugin', 'PgMutationCreatePlugin', 'PgMutationUpdateDeletePlugin'],
112
+ schema: {
113
+ hooks: {
114
+ /**
115
+ * Add requestUploadUrl and requestBulkUploadUrls fields on @storageBuckets types.
116
+ */
117
+ GraphQLObjectType_fields(fields, build, context) {
118
+ const { scope: { pgCodec, isPgClassType }, } = context;
119
+ if (!isPgClassType || !pgCodec || !pgCodec.attributes) {
120
+ return fields;
121
+ }
122
+ const tags = pgCodec.extensions?.tags;
123
+ if (!tags?.storageBuckets) {
124
+ return fields;
125
+ }
126
+ log.debug(`Adding upload fields to bucket type: ${pgCodec.name} (has @storageBuckets tag)`);
127
+ const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLList, GraphQLInputObjectType, }, } = build;
128
+ // --- Shared output types ---
129
+ const UploadUrlPayloadType = new GraphQLObjectType({
130
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestUploadUrlPayload`,
131
+ fields: {
132
+ uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
133
+ fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID' },
134
+ key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
135
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated' },
136
+ expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
137
+ previousVersionId: { type: GraphQLString, description: 'ID of the previous version' },
138
+ },
271
139
  });
272
- return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
273
- const result = await processUpload(options, input, withPgClient, pgSettings);
274
- return result;
140
+ const BulkUploadFilePayloadType = new GraphQLObjectType({
141
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFilePayload`,
142
+ fields: {
143
+ uploadUrl: { type: GraphQLString },
144
+ fileId: { type: new GraphQLNonNull(GraphQLString) },
145
+ key: { type: new GraphQLNonNull(GraphQLString) },
146
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
147
+ expiresAt: { type: GraphQLString },
148
+ previousVersionId: { type: GraphQLString },
149
+ index: { type: new GraphQLNonNull(GraphQLInt), description: 'Index in the input array' },
150
+ },
275
151
  });
276
- },
277
- requestBulkUploadUrls(_$mutation, fieldArgs) {
278
- const $input = fieldArgs.getRaw('input');
279
- const $withPgClient = (0, grafast_1.context)().get('withPgClient');
280
- const $pgSettings = (0, grafast_1.context)().get('pgSettings');
281
- const $combined = (0, grafast_1.object)({
282
- input: $input,
283
- withPgClient: $withPgClient,
284
- pgSettings: $pgSettings,
152
+ const BulkUploadUrlsPayloadType = new GraphQLObjectType({
153
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestBulkUploadUrlsPayload`,
154
+ fields: {
155
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFilePayloadType))) },
156
+ },
285
157
  });
286
- return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
287
- const { bucketKey, ownerId, files } = input;
288
- if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
289
- throw new Error('INVALID_BUCKET_KEY');
290
- }
291
- if (!Array.isArray(files) || files.length === 0) {
292
- throw new Error('INVALID_FILES: must provide at least one file');
293
- }
294
- return withPgClient(pgSettings, async (pgClient) => {
295
- return pgClient.withTransaction(async (txClient) => {
296
- const databaseId = await resolveDatabaseId(txClient);
297
- if (!databaseId) {
298
- throw new Error('DATABASE_NOT_FOUND');
299
- }
300
- const storageConfig = ownerId
301
- ? await (0, storage_module_cache_1.getStorageModuleConfigForOwner)(txClient, databaseId, ownerId)
302
- : await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
303
- if (!storageConfig) {
304
- throw new Error(ownerId
305
- ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
306
- : 'STORAGE_MODULE_NOT_PROVISIONED');
307
- }
308
- // --- Validate bulk limits ---
309
- if (files.length > storageConfig.maxBulkFiles) {
310
- throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
311
- }
312
- const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
313
- if (totalSize > storageConfig.maxBulkTotalSize) {
314
- throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
315
- }
316
- const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey, ownerId);
317
- if (!bucket) {
318
- throw new Error('BUCKET_NOT_FOUND');
158
+ const BulkUploadFileInputType = new GraphQLInputObjectType({
159
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFileInput`,
160
+ fields: {
161
+ contentHash: { type: new GraphQLNonNull(GraphQLString) },
162
+ contentType: { type: new GraphQLNonNull(GraphQLString) },
163
+ size: { type: new GraphQLNonNull(GraphQLInt) },
164
+ filename: { type: GraphQLString },
165
+ key: { type: GraphQLString },
166
+ },
167
+ });
168
+ // Capture codec for closure
169
+ const capturedCodec = pgCodec;
170
+ return build.extend(fields, {
171
+ requestUploadUrl: context.fieldWithHooks({ fieldName: 'requestUploadUrl' }, {
172
+ description: 'Request a presigned URL for uploading a file to this bucket.',
173
+ type: UploadUrlPayloadType,
174
+ args: {
175
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
176
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
177
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
178
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
179
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
180
+ },
181
+ plan($parent, fieldArgs) {
182
+ const $bucketId = $parent.get('id');
183
+ const $bucketKey = $parent.get('key');
184
+ const $bucketType = $parent.get('type');
185
+ const $bucketIsPublic = $parent.get('is_public');
186
+ const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
187
+ const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
188
+ const $bucketMaxFileSize = $parent.get('max_file_size');
189
+ const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : (0, grafast_1.lambda)(null, () => null);
190
+ const $contentHash = fieldArgs.getRaw('contentHash');
191
+ const $contentType = fieldArgs.getRaw('contentType');
192
+ const $size = fieldArgs.getRaw('size');
193
+ const $filename = fieldArgs.getRaw('filename');
194
+ const $customKey = fieldArgs.getRaw('key');
195
+ const $withPgClient = (0, grafast_1.context)().get('withPgClient');
196
+ const $pgSettings = (0, grafast_1.context)().get('pgSettings');
197
+ const $combined = (0, grafast_1.object)({
198
+ bucketId: $bucketId,
199
+ bucketKey: $bucketKey,
200
+ bucketType: $bucketType,
201
+ bucketIsPublic: $bucketIsPublic,
202
+ bucketAllowCustomKeys: $bucketAllowCustomKeys,
203
+ bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
204
+ bucketMaxFileSize: $bucketMaxFileSize,
205
+ bucketOwnerId: $bucketOwnerId,
206
+ contentHash: $contentHash,
207
+ contentType: $contentType,
208
+ size: $size,
209
+ filename: $filename,
210
+ customKey: $customKey,
211
+ withPgClient: $withPgClient,
212
+ pgSettings: $pgSettings,
213
+ });
214
+ return (0, grafast_1.lambda)($combined, async (vals) => {
215
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
216
+ return pgClient.withTransaction(async (txClient) => {
217
+ const databaseId = await resolveDatabaseId(txClient);
218
+ if (!databaseId)
219
+ throw new Error('DATABASE_NOT_FOUND');
220
+ const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(txClient, databaseId);
221
+ const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
222
+ if (!storageConfig)
223
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
224
+ const bucket = {
225
+ id: vals.bucketId,
226
+ key: vals.bucketKey,
227
+ type: vals.bucketType,
228
+ is_public: vals.bucketIsPublic,
229
+ owner_id: vals.bucketOwnerId,
230
+ allowed_mime_types: vals.bucketAllowedMimeTypes,
231
+ max_file_size: vals.bucketMaxFileSize,
232
+ allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
233
+ };
234
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
235
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
236
+ return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
237
+ contentHash: vals.contentHash,
238
+ contentType: vals.contentType,
239
+ size: vals.size,
240
+ filename: vals.filename,
241
+ key: vals.customKey,
242
+ });
243
+ });
244
+ });
245
+ });
246
+ },
247
+ }),
248
+ requestBulkUploadUrls: context.fieldWithHooks({ fieldName: 'requestBulkUploadUrls' }, {
249
+ description: 'Request presigned URLs for uploading multiple files to this bucket.',
250
+ type: BulkUploadUrlsPayloadType,
251
+ args: {
252
+ files: {
253
+ type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFileInputType))),
254
+ description: 'Array of files to upload',
255
+ },
256
+ },
257
+ plan($parent, fieldArgs) {
258
+ const $bucketId = $parent.get('id');
259
+ const $bucketKey = $parent.get('key');
260
+ const $bucketType = $parent.get('type');
261
+ const $bucketIsPublic = $parent.get('is_public');
262
+ const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
263
+ const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
264
+ const $bucketMaxFileSize = $parent.get('max_file_size');
265
+ const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : (0, grafast_1.lambda)(null, () => null);
266
+ const $files = fieldArgs.getRaw('files');
267
+ const $withPgClient = (0, grafast_1.context)().get('withPgClient');
268
+ const $pgSettings = (0, grafast_1.context)().get('pgSettings');
269
+ const $combined = (0, grafast_1.object)({
270
+ bucketId: $bucketId,
271
+ bucketKey: $bucketKey,
272
+ bucketType: $bucketType,
273
+ bucketIsPublic: $bucketIsPublic,
274
+ bucketAllowCustomKeys: $bucketAllowCustomKeys,
275
+ bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
276
+ bucketMaxFileSize: $bucketMaxFileSize,
277
+ bucketOwnerId: $bucketOwnerId,
278
+ files: $files,
279
+ withPgClient: $withPgClient,
280
+ pgSettings: $pgSettings,
281
+ });
282
+ return (0, grafast_1.lambda)($combined, async (vals) => {
283
+ const { files } = vals;
284
+ if (!Array.isArray(files) || files.length === 0) {
285
+ throw new Error('INVALID_FILES: must provide at least one file');
286
+ }
287
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
288
+ return pgClient.withTransaction(async (txClient) => {
289
+ const databaseId = await resolveDatabaseId(txClient);
290
+ if (!databaseId)
291
+ throw new Error('DATABASE_NOT_FOUND');
292
+ const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(txClient, databaseId);
293
+ const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
294
+ if (!storageConfig)
295
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
296
+ if (files.length > storageConfig.maxBulkFiles) {
297
+ throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
298
+ }
299
+ const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
300
+ if (totalSize > storageConfig.maxBulkTotalSize) {
301
+ throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
302
+ }
303
+ const bucket = {
304
+ id: vals.bucketId,
305
+ key: vals.bucketKey,
306
+ type: vals.bucketType,
307
+ is_public: vals.bucketIsPublic,
308
+ owner_id: vals.bucketOwnerId,
309
+ allowed_mime_types: vals.bucketAllowedMimeTypes,
310
+ max_file_size: vals.bucketMaxFileSize,
311
+ allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
312
+ };
313
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
314
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
315
+ const results = [];
316
+ for (let i = 0; i < files.length; i++) {
317
+ const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, files[i]);
318
+ results.push({ ...result, index: i });
319
+ }
320
+ return { files: results };
321
+ });
322
+ });
323
+ });
324
+ },
325
+ }),
326
+ }, `PresignedUrlPlugin adding upload fields to ${pgCodec.name}`);
327
+ },
328
+ /**
329
+ * Wrap delete* mutations on @storageFiles-tagged tables with S3 cleanup.
330
+ *
331
+ * Pattern: identical to graphile-bucket-provisioner-plugin's create/update hooks.
332
+ * 1. Read the file row BEFORE delete (need key + bucket_id for S3 cleanup)
333
+ * 2. Call PostGraphile's generated delete (RLS enforced)
334
+ * 3. If delete succeeded, check refcount and attempt sync S3 delete
335
+ * 4. AFTER DELETE trigger (constructive-db) enqueues async GC job as fallback
336
+ */
337
+ GraphQLObjectType_fields_field(field, build, context) {
338
+ const { scope: { isRootMutation, fieldName, pgCodec }, } = context;
339
+ if (!isRootMutation || !pgCodec || !pgCodec.attributes) {
340
+ return field;
341
+ }
342
+ const tags = pgCodec.extensions?.tags;
343
+ if (!tags?.storageFiles) {
344
+ return field;
345
+ }
346
+ if (!fieldName.startsWith('delete')) {
347
+ return field;
348
+ }
349
+ log.debug(`Wrapping delete mutation "${fieldName}" with S3 cleanup (codec: ${pgCodec.name})`);
350
+ const defaultResolver = (obj) => obj[fieldName];
351
+ const { resolve: oldResolve = defaultResolver, ...rest } = field;
352
+ const capturedCodec = pgCodec;
353
+ return {
354
+ ...rest,
355
+ async resolve(source, args, graphqlContext, info) {
356
+ // Extract the file ID from the mutation input
357
+ const inputKey = Object.keys(args.input || {}).find((k) => k !== 'clientMutationId');
358
+ const fileInput = inputKey ? args.input[inputKey] : null;
359
+ let fileRow = null;
360
+ if (fileInput) {
361
+ // Read the file row BEFORE delete to get the S3 key + bucket_id
362
+ const withPgClient = graphqlContext.withPgClient;
363
+ const pgSettings = graphqlContext.pgSettings;
364
+ if (withPgClient) {
365
+ try {
366
+ await withPgClient(pgSettings, async (pgClient) => {
367
+ const databaseId = await resolveDatabaseId(pgClient);
368
+ if (!databaseId)
369
+ return;
370
+ const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(pgClient, databaseId);
371
+ const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
372
+ if (!storageConfig)
373
+ return;
374
+ // Read the file row (RLS enforced)
375
+ const result = await pgClient.query({
376
+ text: `SELECT key, bucket_id FROM ${storageConfig.filesQualifiedName} WHERE id = $1 LIMIT 1`,
377
+ values: [fileInput],
378
+ });
379
+ if (result.rows.length > 0) {
380
+ fileRow = result.rows[0];
381
+ }
382
+ });
383
+ }
384
+ catch (err) {
385
+ log.warn(`Pre-delete file lookup failed: ${err.message}`);
386
+ }
319
387
  }
320
- // --- Ensure S3 bucket exists once for the batch ---
321
- const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
322
- await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
323
- // --- Process each file ---
324
- const results = [];
325
- for (let i = 0; i < files.length; i++) {
326
- const fileInput = files[i];
327
- const singleInput = {
328
- ...fileInput,
329
- bucketKey,
330
- ownerId,
331
- };
332
- const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, singleInput);
333
- results.push({ ...result, index: i });
388
+ }
389
+ // Call PostGraphile's generated delete (RLS enforced)
390
+ const result = await oldResolve(source, args, graphqlContext, info);
391
+ // Attempt sync S3 cleanup if we have the file row
392
+ if (fileRow) {
393
+ const withPgClient = graphqlContext.withPgClient;
394
+ const pgSettings = graphqlContext.pgSettings;
395
+ if (withPgClient) {
396
+ try {
397
+ await withPgClient(pgSettings, async (pgClient) => {
398
+ const databaseId = await resolveDatabaseId(pgClient);
399
+ if (!databaseId)
400
+ return;
401
+ const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(pgClient, databaseId);
402
+ const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
403
+ if (!storageConfig)
404
+ return;
405
+ // Check refcount: any other file with the same key in this bucket?
406
+ const refResult = await pgClient.query({
407
+ text: `SELECT COUNT(*)::int AS ref_count FROM ${storageConfig.filesQualifiedName} WHERE key = $1 AND bucket_id = $2`,
408
+ values: [fileRow.key, fileRow.bucket_id],
409
+ });
410
+ const refCount = refResult.rows[0]?.ref_count ?? 0;
411
+ if (refCount > 0) {
412
+ log.info(`File deleted from DB; S3 key ${fileRow.key} still referenced by ${refCount} file(s)`);
413
+ return;
414
+ }
415
+ // No other references — attempt sync S3 delete
416
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
417
+ await (0, s3_signer_1.deleteS3Object)(s3ForDb, fileRow.key);
418
+ log.info(`Sync S3 delete succeeded for key=${fileRow.key}`);
419
+ });
420
+ }
421
+ catch (err) {
422
+ // Sync S3 delete failed — the AFTER DELETE trigger has enqueued an async GC job
423
+ log.warn(`Sync S3 delete failed for key=${fileRow.key}; async GC job will retry: ${err.message}`);
424
+ }
334
425
  }
335
- return { files: results };
336
- });
337
- });
338
- });
426
+ }
427
+ return result;
428
+ },
429
+ };
339
430
  },
340
431
  },
341
432
  },
342
- }));
433
+ };
343
434
  }
344
435
  // --- Shared upload logic ---
345
- /**
346
- * Process a single upload request (used by both requestUploadUrl and requestBulkUploadUrls).
347
- */
348
- async function processUpload(options, input, withPgClient, pgSettings) {
349
- const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
350
- if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
351
- throw new Error('INVALID_BUCKET_KEY');
352
- }
436
+ async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
437
+ const { contentHash, contentType, size, filename, key: customKey } = input;
353
438
  if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
354
439
  throw new Error('INVALID_CONTENT_HASH');
355
440
  }
@@ -359,51 +444,6 @@ async function processUpload(options, input, withPgClient, pgSettings) {
359
444
  if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
360
445
  throw new Error('INVALID_CONTENT_TYPE');
361
446
  }
362
- return withPgClient(pgSettings, async (pgClient) => {
363
- return pgClient.withTransaction(async (txClient) => {
364
- const databaseId = await resolveDatabaseId(txClient);
365
- if (!databaseId) {
366
- throw new Error('DATABASE_NOT_FOUND');
367
- }
368
- const storageConfig = ownerId
369
- ? await (0, storage_module_cache_1.getStorageModuleConfigForOwner)(txClient, databaseId, ownerId)
370
- : await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
371
- if (!storageConfig) {
372
- throw new Error(ownerId
373
- ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
374
- : 'STORAGE_MODULE_NOT_PROVISIONED');
375
- }
376
- if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
377
- throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
378
- }
379
- if (filename !== undefined && filename !== null) {
380
- if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
381
- throw new Error('INVALID_FILENAME');
382
- }
383
- }
384
- const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey, ownerId);
385
- if (!bucket) {
386
- throw new Error('BUCKET_NOT_FOUND');
387
- }
388
- const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
389
- await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
390
- return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
391
- });
392
- });
393
- }
394
- /**
395
- * Process a single file upload within an already-resolved context.
396
- * Handles dedup, custom keys, versioning, and auto-path derivation.
397
- */
398
- async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
399
- const { contentHash, contentType, size, filename, key: customKey } = input;
400
- // --- Validate inputs ---
401
- if (!contentHash || !isValidSha256(contentHash)) {
402
- throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
403
- }
404
- if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
405
- throw new Error('INVALID_CONTENT_TYPE');
406
- }
407
447
  if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
408
448
  throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
409
449
  }
@@ -412,7 +452,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
412
452
  throw new Error('INVALID_FILENAME');
413
453
  }
414
454
  }
415
- // --- Validate content type against bucket's allowed_mime_types ---
455
+ // Validate content type against bucket's allowed_mime_types
416
456
  if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
417
457
  const allowed = bucket.allowed_mime_types;
418
458
  const isAllowed = allowed.some((pattern) => {
@@ -428,11 +468,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
428
468
  throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
429
469
  }
430
470
  }
431
- // --- Validate size against bucket's max_file_size ---
471
+ // Validate size against bucket's max_file_size
432
472
  if (bucket.max_file_size && size > bucket.max_file_size) {
433
473
  throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
434
474
  }
435
- // --- Determine S3 key ---
475
+ // Determine S3 key
436
476
  let s3Key;
437
477
  let isCustomKey = false;
438
478
  if (customKey) {
@@ -449,11 +489,9 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
449
489
  else {
450
490
  s3Key = buildS3Key(contentHash);
451
491
  }
452
- // --- Dedup / versioning check ---
492
+ // Dedup / versioning check
453
493
  let previousVersionId = null;
454
494
  if (isCustomKey) {
455
- // Custom key mode: check if a file with this key already exists in this bucket.
456
- // If so, auto-version by linking via previous_version_id.
457
495
  const existingResult = await txClient.query({
458
496
  text: `SELECT id, content_hash
459
497
  FROM ${storageConfig.filesQualifiedName}
@@ -465,7 +503,6 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
465
503
  });
466
504
  if (existingResult.rows.length > 0) {
467
505
  const existing = existingResult.rows[0];
468
- // Same content hash = true dedup (no new upload needed)
469
506
  if (existing.content_hash === contentHash) {
470
507
  log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
471
508
  return {
@@ -477,13 +514,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
477
514
  previousVersionId: null,
478
515
  };
479
516
  }
480
- // Different content = new version
481
517
  previousVersionId = existing.id;
482
518
  log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
483
519
  }
484
520
  }
485
521
  else {
486
- // Hash-based mode: dedup by content_hash in this bucket
487
522
  const dedupResult = await txClient.query({
488
523
  text: `SELECT id
489
524
  FROM ${storageConfig.filesQualifiedName}
@@ -505,27 +540,23 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
505
540
  };
506
541
  }
507
542
  }
508
- // --- Auto-derive ltree path from custom key directory (only when has_path_shares) ---
543
+ // Auto-derive ltree path from custom key directory (only when has_path_shares)
509
544
  const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
510
- // --- Create file record ---
545
+ // Create file record
511
546
  const hasOwnerColumn = storageConfig.membershipType !== null;
512
547
  const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
513
548
  const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
514
- let paramIdx = values.length;
515
549
  if (hasOwnerColumn) {
516
550
  columns.push('owner_id');
517
551
  values.push(bucket.owner_id);
518
- paramIdx = values.length;
519
552
  }
520
553
  if (previousVersionId) {
521
554
  columns.push('previous_version_id');
522
555
  values.push(previousVersionId);
523
- paramIdx = values.length;
524
556
  }
525
557
  if (derivedPath) {
526
558
  columns.push('path');
527
559
  values.push(derivedPath);
528
- paramIdx = values.length;
529
560
  }
530
561
  const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
531
562
  const fileResult = await txClient.query({
@@ -536,7 +567,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
536
567
  values,
537
568
  });
538
569
  const fileId = fileResult.rows[0].id;
539
- // --- Generate presigned PUT URL ---
570
+ // Generate presigned PUT URL
540
571
  const uploadUrl = await (0, s3_signer_1.generatePresignedPutUrl)(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
541
572
  const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
542
573
  return {