graphile-presigned-url-plugin 0.10.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/plugin.js CHANGED
@@ -1,48 +1,42 @@
1
1
  /**
2
- * Presigned URL Plugin for PostGraphile v5
2
+ * Per-Table Storage Middleware Plugin for PostGraphile v5
3
3
  *
4
- * Adds presigned URL upload support to PostGraphile v5:
4
+ * Hooks into PostGraphile's auto-generated CRUD mutations to add S3 operations:
5
5
  *
6
- * 1. `requestUploadUrl` mutationgenerates a presigned PUT URL for direct
7
- * client-to-S3 upload. Checks bucket access via RLS, deduplicates by
8
- * content hash via UNIQUE(bucket_id, key) constraint.
6
+ * 1. Delete middlewarewraps `delete*` mutations on `@storageFiles`-tagged tables
7
+ * with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger).
9
8
  *
10
- * 2. `downloadUrl` computed field on File types generates presigned GET URLs
11
- * for private files, returns public URL prefix + key for public files.
9
+ * 2. Upload fields adds `requestUploadUrl` and `requestBulkUploadUrls` fields
10
+ * on `@storageBuckets`-tagged types, so clients upload via the typed bucket API.
12
11
  *
13
- * Uses the extendSchema + grafast plan pattern (same as PublicKeySignature).
12
+ * 3. Mutation entry points adds per-bucket mutation fields on the root Mutation
13
+ * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
14
+ * can be accessed as proper GraphQL mutations instead of queries.
15
+ *
16
+ * 4. downloadUrl — handled by download-url-field.ts (separate plugin).
17
+ *
18
+ * Scope resolution uses the codec's schema/table name matched against
19
+ * cached storage module configs.
14
20
  */
15
- import { context as grafastContext, lambda, object } from 'grafast';
16
- import { extendSchema, gql } from 'graphile-utils';
21
+ import { access, context as grafastContext, lambda, object } from 'grafast';
22
+ import 'graphile-build';
17
23
  import { Logger } from '@pgpmjs/logger';
18
- import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
19
- import { generatePresignedPutUrl } from './s3-signer';
24
+ import { loadAllStorageModules, resolveStorageConfigFromCodec, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
25
+ import { generatePresignedPutUrl, deleteS3Object } from './s3-signer';
20
26
  const log = new Logger('graphile-presigned-url:plugin');
21
27
  // --- Protocol-level constants (not configurable) ---
22
28
  const MAX_CONTENT_HASH_LENGTH = 128;
23
29
  const MAX_CONTENT_TYPE_LENGTH = 255;
24
- const MAX_BUCKET_KEY_LENGTH = 255;
25
30
  const MAX_CUSTOM_KEY_LENGTH = 1024;
26
31
  const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
27
32
  const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
28
33
  // --- Helpers ---
29
- /**
30
- * Validate a SHA-256 hex string.
31
- */
32
34
  function isValidSha256(hash) {
33
35
  return SHA256_HEX_REGEX.test(hash);
34
36
  }
35
- /**
36
- * Build the S3 key from content hash.
37
- * Format: {contentHash} (flat namespace, content-addressed)
38
- */
39
37
  function buildS3Key(contentHash) {
40
38
  return contentHash;
41
39
  }
42
- /**
43
- * Validate a custom S3 key.
44
- * Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
45
- */
46
40
  function validateCustomKey(key) {
47
41
  if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
48
42
  return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
@@ -61,11 +55,6 @@ function validateCustomKey(key) {
61
55
  }
62
56
  return null;
63
57
  }
64
- /**
65
- * Derive an ltree path from a custom S3 key's directory portion.
66
- * e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
67
- * Returns null if the key has no directory component.
68
- */
69
58
  function derivePathFromKey(key) {
70
59
  const lastSlash = key.lastIndexOf('/');
71
60
  if (lastSlash <= 0)
@@ -73,41 +62,20 @@ function derivePathFromKey(key) {
73
62
  const dir = key.substring(0, lastSlash);
74
63
  return dir.replace(/\//g, '.');
75
64
  }
76
- /**
77
- * Resolve the database_id from the JWT context.
78
- * The server middleware sets jwt.claims.database_id, which is accessible
79
- * via jwt_private.current_database_id() — a simple function call, no
80
- * metaschema query needed.
81
- */
82
65
  async function resolveDatabaseId(pgClient) {
83
66
  const result = await pgClient.query({
84
67
  text: `SELECT jwt_private.current_database_id() AS id`,
85
68
  });
86
69
  return result.rows[0]?.id ?? null;
87
70
  }
88
- // --- Plugin factory ---
89
- /**
90
- * Resolve the S3 config from the options. If the option is a lazy getter
91
- * function, call it (and cache the result). This avoids reading env vars
92
- * or constructing an S3Client at module-import time.
93
- */
94
71
  function resolveS3(options) {
95
72
  if (typeof options.s3 === 'function') {
96
73
  const resolved = options.s3();
97
- // Cache so subsequent calls don't re-evaluate
98
74
  options.s3 = resolved;
99
75
  return resolved;
100
76
  }
101
77
  return options.s3;
102
78
  }
103
- /**
104
- * Build a per-database S3Config by overlaying storage_module overrides
105
- * onto the global S3Config.
106
- *
107
- * - Bucket name: from resolveBucketName(databaseId) if provided, else global
108
- * - publicUrlPrefix: from storageConfig.publicUrlPrefix if set, else global
109
- * - S3 client (credentials, endpoint): always global (shared IAM key)
110
- */
111
79
  function resolveS3ForDatabase(options, storageConfig, databaseId) {
112
80
  const globalS3 = resolveS3(options);
113
81
  const bucket = options.resolveBucketName
@@ -123,16 +91,6 @@ function resolveS3ForDatabase(options, storageConfig, databaseId) {
123
91
  ...(publicUrlPrefix != null ? { publicUrlPrefix } : {}),
124
92
  };
125
93
  }
126
- /**
127
- * Ensure the S3 bucket for a database exists, provisioning it lazily if needed.
128
- *
129
- * Checks an in-memory Set of known-provisioned bucket names. On the first
130
- * request for an unseen bucket, calls the `ensureBucketProvisioned` callback
131
- * (which creates the bucket with correct CORS, policies, etc.), then marks
132
- * it as provisioned so subsequent requests skip the check entirely.
133
- *
134
- * If no `ensureBucketProvisioned` callback is configured, this is a no-op.
135
- */
136
94
  async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, allowedOrigins) {
137
95
  if (!options.ensureBucketProvisioned)
138
96
  return;
@@ -143,209 +101,403 @@ async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, a
143
101
  markS3BucketProvisioned(s3BucketName);
144
102
  log.info(`Lazy-provisioned S3 bucket "${s3BucketName}" successfully`);
145
103
  }
104
+ // --- Plugin factory ---
146
105
  export function createPresignedUrlPlugin(options) {
147
- return extendSchema(() => ({
148
- typeDefs: gql `
149
- input RequestUploadUrlInput {
150
- """Bucket key (e.g., "public", "private")"""
151
- bucketKey: String!
152
- """
153
- Owner entity ID for entity-scoped uploads.
154
- Omit for app-level (database-wide) storage.
155
- When provided, resolves the storage module for the entity type
156
- that owns this entity instance (e.g., a data room ID, team ID).
157
- """
158
- ownerId: UUID
159
- """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
160
- contentHash: String!
161
- """MIME type of the file (e.g., "image/png")"""
162
- contentType: String!
163
- """File size in bytes"""
164
- size: Int!
165
- """Original filename (optional, for display and Content-Disposition)"""
166
- filename: String
167
- """
168
- Custom S3 key (e.g., "reports/2024/Q1.pdf").
169
- Only allowed when the bucket has allow_custom_keys=true.
170
- When omitted, key defaults to contentHash (content-addressed dedup).
171
- When provided, the file is stored at this key.
172
- Re-uploading to an existing key auto-creates a new version.
173
- """
174
- key: String
175
- }
176
-
177
- type RequestUploadUrlPayload {
178
- """Presigned PUT URL (null if file was deduplicated)"""
179
- uploadUrl: String
180
- """The file ID (existing if deduplicated, new if fresh upload)"""
181
- fileId: UUID!
182
- """The S3 object key"""
183
- key: String!
184
- """Whether this file was deduplicated (already exists with same hash)"""
185
- deduplicated: Boolean!
186
- """Presigned URL expiry time (null if deduplicated)"""
187
- expiresAt: Datetime
188
- """ID of the previous version (set when re-uploading to an existing custom key)"""
189
- previousVersionId: UUID
190
- }
191
-
192
- input BulkUploadFileInput {
193
- """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
194
- contentHash: String!
195
- """MIME type of the file (e.g., "image/png")"""
196
- contentType: String!
197
- """File size in bytes"""
198
- size: Int!
199
- """Original filename (optional, for display and Content-Disposition)"""
200
- filename: String
201
- """Custom S3 key (only when bucket has allow_custom_keys=true)"""
202
- key: String
203
- }
204
-
205
- input RequestBulkUploadUrlsInput {
206
- """Bucket key (e.g., "public", "private")"""
207
- bucketKey: String!
208
- """Owner entity ID for entity-scoped uploads"""
209
- ownerId: UUID
210
- """Array of files to upload"""
211
- files: [BulkUploadFileInput!]!
212
- }
213
-
214
- type BulkUploadFilePayload {
215
- """Presigned PUT URL (null if file was deduplicated)"""
216
- uploadUrl: String
217
- """The file ID"""
218
- fileId: UUID!
219
- """The S3 object key"""
220
- key: String!
221
- """Whether this file was deduplicated"""
222
- deduplicated: Boolean!
223
- """Presigned URL expiry time (null if deduplicated)"""
224
- expiresAt: Datetime
225
- """ID of the previous version (set when re-uploading to an existing custom key)"""
226
- previousVersionId: UUID
227
- """Index of this file in the input array (for client correlation)"""
228
- index: Int!
229
- }
230
-
231
- type RequestBulkUploadUrlsPayload {
232
- """Array of results, one per input file"""
233
- files: [BulkUploadFilePayload!]!
234
- }
235
-
236
- extend type Mutation {
237
- """
238
- Request a presigned URL for uploading a file directly to S3.
239
- Client computes SHA-256 of the file content and provides it here.
240
- If a file with the same hash already exists (dedup), returns the
241
- existing file ID and deduplicated=true with no uploadUrl.
242
- """
243
- requestUploadUrl(
244
- input: RequestUploadUrlInput!
245
- ): RequestUploadUrlPayload
246
-
247
- """
248
- Request presigned URLs for uploading multiple files in a single batch.
249
- Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
250
- Each file is processed independently — some may dedup while others get fresh URLs.
251
- """
252
- requestBulkUploadUrls(
253
- input: RequestBulkUploadUrlsInput!
254
- ): RequestBulkUploadUrlsPayload
255
- }
256
- `,
257
- plans: {
258
- Mutation: {
259
- requestUploadUrl(_$mutation, fieldArgs) {
260
- const $input = fieldArgs.getRaw('input');
261
- const $withPgClient = grafastContext().get('withPgClient');
262
- const $pgSettings = grafastContext().get('pgSettings');
263
- const $combined = object({
264
- input: $input,
265
- withPgClient: $withPgClient,
266
- pgSettings: $pgSettings,
106
+ return {
107
+ name: 'PresignedUrlPlugin',
108
+ version: '1.0.0',
109
+ description: 'Per-table S3 storage middleware: upload fields on @storageBuckets, delete middleware on @storageFiles',
110
+ after: ['PgAttributesPlugin', 'PgMutationCreatePlugin', 'PgMutationUpdateDeletePlugin'],
111
+ schema: {
112
+ hooks: {
113
+ /**
114
+ * Add requestUploadUrl and requestBulkUploadUrls fields on @storageBuckets types.
115
+ */
116
+ GraphQLObjectType_fields(fields, build, context) {
117
+ const { scope: { pgCodec, isPgClassType, isRootMutation }, } = context;
118
+ // --- Path 1: Add per-bucket mutation entry points on root Mutation ---
119
+ if (isRootMutation) {
120
+ const { graphql: { GraphQLString, GraphQLNonNull }, } = build;
121
+ const bucketCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageBuckets);
122
+ if (bucketCodecs.length === 0)
123
+ return fields;
124
+ const newFields = {};
125
+ for (const codec of bucketCodecs) {
126
+ const typeName = build.inflection.tableType(codec);
127
+ const bucketType = build.getTypeByName(typeName);
128
+ if (!bucketType) {
129
+ log.debug(`Skipping mutation entry point for ${codec.name}: type ${typeName} not found`);
130
+ continue;
131
+ }
132
+ const fieldName = typeName.charAt(0).toLowerCase() + typeName.slice(1);
133
+ const hasOwnerId = !!codec.attributes.owner_id;
134
+ const capturedCodec = codec;
135
+ log.debug(`Adding mutation entry point "${fieldName}" for bucket type ${typeName} (entity-scoped=${hasOwnerId})`);
136
+ newFields[fieldName] = context.fieldWithHooks({ fieldName }, {
137
+ description: `Look up a ${typeName} by key for mutation operations (upload, etc.).`,
138
+ type: bucketType,
139
+ args: {
140
+ key: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
141
+ ...(hasOwnerId
142
+ ? { ownerId: { type: new GraphQLNonNull(GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
143
+ : {}),
144
+ },
145
+ plan(_$mutation, fieldArgs) {
146
+ const $key = fieldArgs.getRaw('key');
147
+ const $ownerId = hasOwnerId ? fieldArgs.getRaw('ownerId') : lambda(null, () => null);
148
+ const $withPgClient = grafastContext().get('withPgClient');
149
+ const $pgSettings = grafastContext().get('pgSettings');
150
+ const $combined = object({
151
+ key: $key,
152
+ ownerId: $ownerId,
153
+ withPgClient: $withPgClient,
154
+ pgSettings: $pgSettings,
155
+ });
156
+ const $row = lambda($combined, async (vals) => {
157
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
158
+ const databaseId = await resolveDatabaseId(pgClient);
159
+ if (!databaseId)
160
+ throw new Error('DATABASE_NOT_FOUND');
161
+ const allConfigs = await loadAllStorageModules(pgClient, databaseId);
162
+ const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
163
+ if (!storageConfig)
164
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
165
+ const bucket = await getBucketConfig(pgClient, storageConfig, databaseId, vals.key, vals.ownerId ?? undefined);
166
+ if (!bucket)
167
+ throw new Error('BUCKET_NOT_FOUND');
168
+ return bucket;
169
+ });
170
+ });
171
+ const columnEntries = {};
172
+ for (const col of Object.keys(capturedCodec.attributes)) {
173
+ columnEntries[col] = access($row, col);
174
+ }
175
+ return object(columnEntries);
176
+ },
177
+ });
178
+ }
179
+ return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points');
180
+ }
181
+ // --- Path 2: Add upload fields on @storageBuckets types ---
182
+ if (!isPgClassType || !pgCodec || !pgCodec.attributes) {
183
+ return fields;
184
+ }
185
+ const tags = pgCodec.extensions?.tags;
186
+ if (!tags?.storageBuckets) {
187
+ return fields;
188
+ }
189
+ log.debug(`Adding upload fields to bucket type: ${pgCodec.name} (has @storageBuckets tag)`);
190
+ const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLList, GraphQLInputObjectType, }, } = build;
191
+ // --- Shared output types ---
192
+ const UploadUrlPayloadType = new GraphQLObjectType({
193
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestUploadUrlPayload`,
194
+ fields: {
195
+ uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
196
+ fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID' },
197
+ key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
198
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated' },
199
+ expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
200
+ previousVersionId: { type: GraphQLString, description: 'ID of the previous version' },
201
+ },
267
202
  });
268
- return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
269
- const result = await processUpload(options, input, withPgClient, pgSettings);
270
- return result;
203
+ const BulkUploadFilePayloadType = new GraphQLObjectType({
204
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFilePayload`,
205
+ fields: {
206
+ uploadUrl: { type: GraphQLString },
207
+ fileId: { type: new GraphQLNonNull(GraphQLString) },
208
+ key: { type: new GraphQLNonNull(GraphQLString) },
209
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
210
+ expiresAt: { type: GraphQLString },
211
+ previousVersionId: { type: GraphQLString },
212
+ index: { type: new GraphQLNonNull(GraphQLInt), description: 'Index in the input array' },
213
+ },
271
214
  });
272
- },
273
- requestBulkUploadUrls(_$mutation, fieldArgs) {
274
- const $input = fieldArgs.getRaw('input');
275
- const $withPgClient = grafastContext().get('withPgClient');
276
- const $pgSettings = grafastContext().get('pgSettings');
277
- const $combined = object({
278
- input: $input,
279
- withPgClient: $withPgClient,
280
- pgSettings: $pgSettings,
215
+ const BulkUploadUrlsPayloadType = new GraphQLObjectType({
216
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestBulkUploadUrlsPayload`,
217
+ fields: {
218
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFilePayloadType))) },
219
+ },
281
220
  });
282
- return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
283
- const { bucketKey, ownerId, files } = input;
284
- if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
285
- throw new Error('INVALID_BUCKET_KEY');
286
- }
287
- if (!Array.isArray(files) || files.length === 0) {
288
- throw new Error('INVALID_FILES: must provide at least one file');
289
- }
290
- return withPgClient(pgSettings, async (pgClient) => {
291
- return pgClient.withTransaction(async (txClient) => {
292
- const databaseId = await resolveDatabaseId(txClient);
293
- if (!databaseId) {
294
- throw new Error('DATABASE_NOT_FOUND');
295
- }
296
- const storageConfig = ownerId
297
- ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
298
- : await getStorageModuleConfig(txClient, databaseId);
299
- if (!storageConfig) {
300
- throw new Error(ownerId
301
- ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
302
- : 'STORAGE_MODULE_NOT_PROVISIONED');
303
- }
304
- // --- Validate bulk limits ---
305
- if (files.length > storageConfig.maxBulkFiles) {
306
- throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
307
- }
308
- const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
309
- if (totalSize > storageConfig.maxBulkTotalSize) {
310
- throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
311
- }
312
- const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
313
- if (!bucket) {
314
- throw new Error('BUCKET_NOT_FOUND');
221
+ const BulkUploadFileInputType = new GraphQLInputObjectType({
222
+ name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFileInput`,
223
+ fields: {
224
+ contentHash: { type: new GraphQLNonNull(GraphQLString) },
225
+ contentType: { type: new GraphQLNonNull(GraphQLString) },
226
+ size: { type: new GraphQLNonNull(GraphQLInt) },
227
+ filename: { type: GraphQLString },
228
+ key: { type: GraphQLString },
229
+ },
230
+ });
231
+ // Capture codec for closure
232
+ const capturedCodec = pgCodec;
233
+ return build.extend(fields, {
234
+ requestUploadUrl: context.fieldWithHooks({ fieldName: 'requestUploadUrl' }, {
235
+ description: 'Request a presigned URL for uploading a file to this bucket.',
236
+ type: UploadUrlPayloadType,
237
+ args: {
238
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
239
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
240
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
241
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
242
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
243
+ },
244
+ plan($parent, fieldArgs) {
245
+ const $bucketId = $parent.get('id');
246
+ const $bucketKey = $parent.get('key');
247
+ const $bucketType = $parent.get('type');
248
+ const $bucketIsPublic = $parent.get('is_public');
249
+ const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
250
+ const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
251
+ const $bucketMaxFileSize = $parent.get('max_file_size');
252
+ const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, () => null);
253
+ const $contentHash = fieldArgs.getRaw('contentHash');
254
+ const $contentType = fieldArgs.getRaw('contentType');
255
+ const $size = fieldArgs.getRaw('size');
256
+ const $filename = fieldArgs.getRaw('filename');
257
+ const $customKey = fieldArgs.getRaw('key');
258
+ const $withPgClient = grafastContext().get('withPgClient');
259
+ const $pgSettings = grafastContext().get('pgSettings');
260
+ const $combined = object({
261
+ bucketId: $bucketId,
262
+ bucketKey: $bucketKey,
263
+ bucketType: $bucketType,
264
+ bucketIsPublic: $bucketIsPublic,
265
+ bucketAllowCustomKeys: $bucketAllowCustomKeys,
266
+ bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
267
+ bucketMaxFileSize: $bucketMaxFileSize,
268
+ bucketOwnerId: $bucketOwnerId,
269
+ contentHash: $contentHash,
270
+ contentType: $contentType,
271
+ size: $size,
272
+ filename: $filename,
273
+ customKey: $customKey,
274
+ withPgClient: $withPgClient,
275
+ pgSettings: $pgSettings,
276
+ });
277
+ return lambda($combined, async (vals) => {
278
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
279
+ return pgClient.withTransaction(async (txClient) => {
280
+ const databaseId = await resolveDatabaseId(txClient);
281
+ if (!databaseId)
282
+ throw new Error('DATABASE_NOT_FOUND');
283
+ const allConfigs = await loadAllStorageModules(txClient, databaseId);
284
+ const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
285
+ if (!storageConfig)
286
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
287
+ const bucket = {
288
+ id: vals.bucketId,
289
+ key: vals.bucketKey,
290
+ type: vals.bucketType,
291
+ is_public: vals.bucketIsPublic,
292
+ owner_id: vals.bucketOwnerId,
293
+ allowed_mime_types: vals.bucketAllowedMimeTypes,
294
+ max_file_size: vals.bucketMaxFileSize,
295
+ allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
296
+ };
297
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
298
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
299
+ return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
300
+ contentHash: vals.contentHash,
301
+ contentType: vals.contentType,
302
+ size: vals.size,
303
+ filename: vals.filename,
304
+ key: vals.customKey,
305
+ });
306
+ });
307
+ });
308
+ });
309
+ },
310
+ }),
311
+ requestBulkUploadUrls: context.fieldWithHooks({ fieldName: 'requestBulkUploadUrls' }, {
312
+ description: 'Request presigned URLs for uploading multiple files to this bucket.',
313
+ type: BulkUploadUrlsPayloadType,
314
+ args: {
315
+ files: {
316
+ type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFileInputType))),
317
+ description: 'Array of files to upload',
318
+ },
319
+ },
320
+ plan($parent, fieldArgs) {
321
+ const $bucketId = $parent.get('id');
322
+ const $bucketKey = $parent.get('key');
323
+ const $bucketType = $parent.get('type');
324
+ const $bucketIsPublic = $parent.get('is_public');
325
+ const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
326
+ const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
327
+ const $bucketMaxFileSize = $parent.get('max_file_size');
328
+ const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, () => null);
329
+ const $files = fieldArgs.getRaw('files');
330
+ const $withPgClient = grafastContext().get('withPgClient');
331
+ const $pgSettings = grafastContext().get('pgSettings');
332
+ const $combined = object({
333
+ bucketId: $bucketId,
334
+ bucketKey: $bucketKey,
335
+ bucketType: $bucketType,
336
+ bucketIsPublic: $bucketIsPublic,
337
+ bucketAllowCustomKeys: $bucketAllowCustomKeys,
338
+ bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
339
+ bucketMaxFileSize: $bucketMaxFileSize,
340
+ bucketOwnerId: $bucketOwnerId,
341
+ files: $files,
342
+ withPgClient: $withPgClient,
343
+ pgSettings: $pgSettings,
344
+ });
345
+ return lambda($combined, async (vals) => {
346
+ const { files } = vals;
347
+ if (!Array.isArray(files) || files.length === 0) {
348
+ throw new Error('INVALID_FILES: must provide at least one file');
349
+ }
350
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
351
+ return pgClient.withTransaction(async (txClient) => {
352
+ const databaseId = await resolveDatabaseId(txClient);
353
+ if (!databaseId)
354
+ throw new Error('DATABASE_NOT_FOUND');
355
+ const allConfigs = await loadAllStorageModules(txClient, databaseId);
356
+ const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
357
+ if (!storageConfig)
358
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
359
+ if (files.length > storageConfig.maxBulkFiles) {
360
+ throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
361
+ }
362
+ const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
363
+ if (totalSize > storageConfig.maxBulkTotalSize) {
364
+ throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
365
+ }
366
+ const bucket = {
367
+ id: vals.bucketId,
368
+ key: vals.bucketKey,
369
+ type: vals.bucketType,
370
+ is_public: vals.bucketIsPublic,
371
+ owner_id: vals.bucketOwnerId,
372
+ allowed_mime_types: vals.bucketAllowedMimeTypes,
373
+ max_file_size: vals.bucketMaxFileSize,
374
+ allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
375
+ };
376
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
377
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
378
+ const results = [];
379
+ for (let i = 0; i < files.length; i++) {
380
+ const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, files[i]);
381
+ results.push({ ...result, index: i });
382
+ }
383
+ return { files: results };
384
+ });
385
+ });
386
+ });
387
+ },
388
+ }),
389
+ }, `PresignedUrlPlugin adding upload fields to ${pgCodec.name}`);
390
+ },
391
+ /**
392
+ * Wrap delete* mutations on @storageFiles-tagged tables with S3 cleanup.
393
+ *
394
+ * Pattern: identical to graphile-bucket-provisioner-plugin's create/update hooks.
395
+ * 1. Read the file row BEFORE delete (need key + bucket_id for S3 cleanup)
396
+ * 2. Call PostGraphile's generated delete (RLS enforced)
397
+ * 3. If delete succeeded, check refcount and attempt sync S3 delete
398
+ * 4. AFTER DELETE trigger (constructive-db) enqueues async GC job as fallback
399
+ */
400
+ GraphQLObjectType_fields_field(field, build, context) {
401
+ const { scope: { isRootMutation, fieldName, pgCodec }, } = context;
402
+ if (!isRootMutation || !pgCodec || !pgCodec.attributes) {
403
+ return field;
404
+ }
405
+ const tags = pgCodec.extensions?.tags;
406
+ if (!tags?.storageFiles) {
407
+ return field;
408
+ }
409
+ if (!fieldName.startsWith('delete')) {
410
+ return field;
411
+ }
412
+ log.debug(`Wrapping delete mutation "${fieldName}" with S3 cleanup (codec: ${pgCodec.name})`);
413
+ const defaultResolver = (obj) => obj[fieldName];
414
+ const { resolve: oldResolve = defaultResolver, ...rest } = field;
415
+ const capturedCodec = pgCodec;
416
+ return {
417
+ ...rest,
418
+ async resolve(source, args, graphqlContext, info) {
419
+ // Extract the file ID from the mutation input
420
+ const inputKey = Object.keys(args.input || {}).find((k) => k !== 'clientMutationId');
421
+ const fileInput = inputKey ? args.input[inputKey] : null;
422
+ let fileRow = null;
423
+ if (fileInput) {
424
+ // Read the file row BEFORE delete to get the S3 key + bucket_id
425
+ const withPgClient = graphqlContext.withPgClient;
426
+ const pgSettings = graphqlContext.pgSettings;
427
+ if (withPgClient) {
428
+ try {
429
+ await withPgClient(pgSettings, async (pgClient) => {
430
+ const databaseId = await resolveDatabaseId(pgClient);
431
+ if (!databaseId)
432
+ return;
433
+ const allConfigs = await loadAllStorageModules(pgClient, databaseId);
434
+ const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
435
+ if (!storageConfig)
436
+ return;
437
+ // Read the file row (RLS enforced)
438
+ const result = await pgClient.query({
439
+ text: `SELECT key, bucket_id FROM ${storageConfig.filesQualifiedName} WHERE id = $1 LIMIT 1`,
440
+ values: [fileInput],
441
+ });
442
+ if (result.rows.length > 0) {
443
+ fileRow = result.rows[0];
444
+ }
445
+ });
446
+ }
447
+ catch (err) {
448
+ log.warn(`Pre-delete file lookup failed: ${err.message}`);
449
+ }
315
450
  }
316
- // --- Ensure S3 bucket exists once for the batch ---
317
- const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
318
- await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
319
- // --- Process each file ---
320
- const results = [];
321
- for (let i = 0; i < files.length; i++) {
322
- const fileInput = files[i];
323
- const singleInput = {
324
- ...fileInput,
325
- bucketKey,
326
- ownerId,
327
- };
328
- const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, singleInput);
329
- results.push({ ...result, index: i });
451
+ }
452
+ // Call PostGraphile's generated delete (RLS enforced)
453
+ const result = await oldResolve(source, args, graphqlContext, info);
454
+ // Attempt sync S3 cleanup if we have the file row
455
+ if (fileRow) {
456
+ const withPgClient = graphqlContext.withPgClient;
457
+ const pgSettings = graphqlContext.pgSettings;
458
+ if (withPgClient) {
459
+ try {
460
+ await withPgClient(pgSettings, async (pgClient) => {
461
+ const databaseId = await resolveDatabaseId(pgClient);
462
+ if (!databaseId)
463
+ return;
464
+ const allConfigs = await loadAllStorageModules(pgClient, databaseId);
465
+ const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
466
+ if (!storageConfig)
467
+ return;
468
+ // Check refcount: any other file with the same key in this bucket?
469
+ const refResult = await pgClient.query({
470
+ text: `SELECT COUNT(*)::int AS ref_count FROM ${storageConfig.filesQualifiedName} WHERE key = $1 AND bucket_id = $2`,
471
+ values: [fileRow.key, fileRow.bucket_id],
472
+ });
473
+ const refCount = refResult.rows[0]?.ref_count ?? 0;
474
+ if (refCount > 0) {
475
+ log.info(`File deleted from DB; S3 key ${fileRow.key} still referenced by ${refCount} file(s)`);
476
+ return;
477
+ }
478
+ // No other references — attempt sync S3 delete
479
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
480
+ await deleteS3Object(s3ForDb, fileRow.key);
481
+ log.info(`Sync S3 delete succeeded for key=${fileRow.key}`);
482
+ });
483
+ }
484
+ catch (err) {
485
+ // Sync S3 delete failed — the AFTER DELETE trigger has enqueued an async GC job
486
+ log.warn(`Sync S3 delete failed for key=${fileRow.key}; async GC job will retry: ${err.message}`);
487
+ }
330
488
  }
331
- return { files: results };
332
- });
333
- });
334
- });
489
+ }
490
+ return result;
491
+ },
492
+ };
335
493
  },
336
494
  },
337
495
  },
338
- }));
496
+ };
339
497
  }
340
498
  // --- Shared upload logic ---
341
- /**
342
- * Process a single upload request (used by both requestUploadUrl and requestBulkUploadUrls).
343
- */
344
- async function processUpload(options, input, withPgClient, pgSettings) {
345
- const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
346
- if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
347
- throw new Error('INVALID_BUCKET_KEY');
348
- }
499
+ async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
500
+ const { contentHash, contentType, size, filename, key: customKey } = input;
349
501
  if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
350
502
  throw new Error('INVALID_CONTENT_HASH');
351
503
  }
@@ -355,51 +507,6 @@ async function processUpload(options, input, withPgClient, pgSettings) {
355
507
  if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
356
508
  throw new Error('INVALID_CONTENT_TYPE');
357
509
  }
358
- return withPgClient(pgSettings, async (pgClient) => {
359
- return pgClient.withTransaction(async (txClient) => {
360
- const databaseId = await resolveDatabaseId(txClient);
361
- if (!databaseId) {
362
- throw new Error('DATABASE_NOT_FOUND');
363
- }
364
- const storageConfig = ownerId
365
- ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
366
- : await getStorageModuleConfig(txClient, databaseId);
367
- if (!storageConfig) {
368
- throw new Error(ownerId
369
- ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
370
- : 'STORAGE_MODULE_NOT_PROVISIONED');
371
- }
372
- if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
373
- throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
374
- }
375
- if (filename !== undefined && filename !== null) {
376
- if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
377
- throw new Error('INVALID_FILENAME');
378
- }
379
- }
380
- const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
381
- if (!bucket) {
382
- throw new Error('BUCKET_NOT_FOUND');
383
- }
384
- const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
385
- await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
386
- return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
387
- });
388
- });
389
- }
390
- /**
391
- * Process a single file upload within an already-resolved context.
392
- * Handles dedup, custom keys, versioning, and auto-path derivation.
393
- */
394
- async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
395
- const { contentHash, contentType, size, filename, key: customKey } = input;
396
- // --- Validate inputs ---
397
- if (!contentHash || !isValidSha256(contentHash)) {
398
- throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
399
- }
400
- if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
401
- throw new Error('INVALID_CONTENT_TYPE');
402
- }
403
510
  if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
404
511
  throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
405
512
  }
@@ -408,7 +515,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
408
515
  throw new Error('INVALID_FILENAME');
409
516
  }
410
517
  }
411
- // --- Validate content type against bucket's allowed_mime_types ---
518
+ // Validate content type against bucket's allowed_mime_types
412
519
  if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
413
520
  const allowed = bucket.allowed_mime_types;
414
521
  const isAllowed = allowed.some((pattern) => {
@@ -424,11 +531,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
424
531
  throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
425
532
  }
426
533
  }
427
- // --- Validate size against bucket's max_file_size ---
534
+ // Validate size against bucket's max_file_size
428
535
  if (bucket.max_file_size && size > bucket.max_file_size) {
429
536
  throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
430
537
  }
431
- // --- Determine S3 key ---
538
+ // Determine S3 key
432
539
  let s3Key;
433
540
  let isCustomKey = false;
434
541
  if (customKey) {
@@ -445,11 +552,9 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
445
552
  else {
446
553
  s3Key = buildS3Key(contentHash);
447
554
  }
448
- // --- Dedup / versioning check ---
555
+ // Dedup / versioning check
449
556
  let previousVersionId = null;
450
557
  if (isCustomKey) {
451
- // Custom key mode: check if a file with this key already exists in this bucket.
452
- // If so, auto-version by linking via previous_version_id.
453
558
  const existingResult = await txClient.query({
454
559
  text: `SELECT id, content_hash
455
560
  FROM ${storageConfig.filesQualifiedName}
@@ -461,7 +566,6 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
461
566
  });
462
567
  if (existingResult.rows.length > 0) {
463
568
  const existing = existingResult.rows[0];
464
- // Same content hash = true dedup (no new upload needed)
465
569
  if (existing.content_hash === contentHash) {
466
570
  log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
467
571
  return {
@@ -473,13 +577,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
473
577
  previousVersionId: null,
474
578
  };
475
579
  }
476
- // Different content = new version
477
580
  previousVersionId = existing.id;
478
581
  log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
479
582
  }
480
583
  }
481
584
  else {
482
- // Hash-based mode: dedup by content_hash in this bucket
483
585
  const dedupResult = await txClient.query({
484
586
  text: `SELECT id
485
587
  FROM ${storageConfig.filesQualifiedName}
@@ -501,27 +603,23 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
501
603
  };
502
604
  }
503
605
  }
504
- // --- Auto-derive ltree path from custom key directory (only when has_path_shares) ---
606
+ // Auto-derive ltree path from custom key directory (only when has_path_shares)
505
607
  const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
506
- // --- Create file record ---
608
+ // Create file record
507
609
  const hasOwnerColumn = storageConfig.membershipType !== null;
508
610
  const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
509
611
  const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
510
- let paramIdx = values.length;
511
612
  if (hasOwnerColumn) {
512
613
  columns.push('owner_id');
513
614
  values.push(bucket.owner_id);
514
- paramIdx = values.length;
515
615
  }
516
616
  if (previousVersionId) {
517
617
  columns.push('previous_version_id');
518
618
  values.push(previousVersionId);
519
- paramIdx = values.length;
520
619
  }
521
620
  if (derivedPath) {
522
621
  columns.push('path');
523
622
  values.push(derivedPath);
524
- paramIdx = values.length;
525
623
  }
526
624
  const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
527
625
  const fileResult = await txClient.query({
@@ -532,7 +630,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
532
630
  values,
533
631
  });
534
632
  const fileId = fileResult.rows[0].id;
535
- // --- Generate presigned PUT URL ---
633
+ // Generate presigned PUT URL
536
634
  const uploadUrl = await generatePresignedPutUrl(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
537
635
  const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
538
636
  return {