graphile-presigned-url-plugin 0.15.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/plugin.js CHANGED
@@ -3,22 +3,15 @@
3
3
  *
4
4
  * Hooks into PostGraphile's auto-generated CRUD mutations to add S3 operations:
5
5
  *
6
- * 1. Delete middlewarewraps `delete*` mutations on `@storageFiles`-tagged tables
7
- * with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger).
8
- *
9
- * 2. Upload fields — adds `requestUploadUrl` and `requestBulkUploadUrls` fields
10
- * on `@storageBuckets`-tagged types, so clients upload via the typed bucket API.
11
- *
12
- * 3. Mutation entry points — adds per-bucket mutation fields on the root Mutation
13
- * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
14
- * can be accessed as proper GraphQL mutations instead of queries.
15
- *
16
- * 4. File upload mutations — adds `upload<FileType>(input: {...})` mutations
6
+ * 1. File upload mutations adds `upload<FileType>(input: {...})` mutations
17
7
  * on root Mutation for each @storageFiles/@storageBuckets pair. These combine
18
8
  * bucket resolution + file INSERT + presigned URL generation in one step.
19
9
  * E.g., `uploadAppFile(input: { bucketKey: "public", contentHash: "...", ... })`
20
10
  *
21
- * 5. downloadUrlhandled by download-url-field.ts (separate plugin).
11
+ * 2. Delete middleware wraps `delete*` mutations on `@storageFiles`-tagged tables
12
+ * with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger).
13
+ *
14
+ * 3. downloadUrl — handled by download-url-field.ts (separate plugin).
22
15
  *
23
16
  * Scope resolution uses the codec's schema/table name matched against
24
17
  * cached storage module configs.
@@ -116,347 +109,90 @@ export function createPresignedUrlPlugin(options) {
116
109
  schema: {
117
110
  hooks: {
118
111
  /**
119
- * Add requestUploadUrl and requestBulkUploadUrls fields on @storageBuckets types.
112
+ * Add file upload mutations (uploadAppFile, uploadDataRoomFile, etc.) on root Mutation.
120
113
  */
121
114
  GraphQLObjectType_fields(fields, build, context) {
122
- const { scope: { pgCodec, isPgClassType, isRootMutation }, } = context;
123
- // --- Path 1: Add per-bucket mutation entry points + file creation mutations on root Mutation ---
124
- if (isRootMutation) {
125
- const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLInputObjectType, GraphQLList, }, } = build;
126
- const bucketCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageBuckets);
127
- if (bucketCodecs.length === 0)
128
- return fields;
129
- const newFields = {};
130
- // --- 1a: Per-bucket entry points (appBucket, dataRoomBucket, etc.) ---
131
- for (const codec of bucketCodecs) {
132
- const typeName = build.inflection.tableType(codec);
133
- const bucketType = build.getTypeByName(typeName);
134
- if (!bucketType) {
135
- log.debug(`Skipping mutation entry point for ${codec.name}: type ${typeName} not found`);
136
- continue;
137
- }
138
- const fieldName = typeName.charAt(0).toLowerCase() + typeName.slice(1);
139
- const hasOwnerId = !!codec.attributes.owner_id;
140
- const bucketResource = Object.values(build.input.pgRegistry.pgResources).find((r) => r.codec === codec && !r.isUnique && !r.isVirtual && !r.parameters);
141
- if (!bucketResource) {
142
- log.debug(`Skipping mutation entry point for ${codec.name}: no PgResource found`);
143
- continue;
144
- }
145
- const ownerIdType = hasOwnerId
146
- ? build.getGraphQLTypeByPgCodec(codec.attributes.owner_id.codec, 'input')
147
- : null;
148
- log.debug(`Adding mutation entry point "${fieldName}" for bucket type ${typeName} (entity-scoped=${hasOwnerId})`);
149
- newFields[fieldName] = context.fieldWithHooks({ fieldName }, {
150
- description: `Look up a ${typeName} by key for mutation operations (upload, etc.).`,
151
- type: bucketType,
152
- args: {
153
- key: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
154
- ...(hasOwnerId
155
- ? { ownerId: { type: new GraphQLNonNull(ownerIdType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
156
- : {}),
157
- },
158
- plan(_$mutation, fieldArgs) {
159
- const spec = {
160
- key: fieldArgs.getRaw('key'),
161
- };
162
- if (hasOwnerId) {
163
- spec.owner_id = fieldArgs.getRaw('ownerId');
164
- }
165
- return bucketResource.find(spec).single();
166
- },
167
- });
168
- }
169
- // --- 1b: File upload mutations (uploadAppFile, uploadDataRoomFile, etc.) ---
170
- const fileCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageFiles);
171
- for (const filesCodec of fileCodecs) {
172
- const filesTypeName = build.inflection.tableType(filesCodec);
173
- const filesSchemaName = filesCodec.extensions?.pg?.schemaName;
174
- // Find the matching bucket codec by table name prefix.
175
- // Schema-name matching is ambiguous when multiple storage modules share
176
- // the same PG schema (e.g. app_files + data_room_files both in storage_public).
177
- // Instead, derive the prefix from the raw SQL table name:
178
- // "data_room_files" → prefix "data_room" → matches "data_room_buckets"
179
- // "app_files" → prefix "app" → matches "app_buckets"
180
- const filesRawName = filesCodec.extensions?.pg?.name;
181
- const filesPrefix = filesRawName?.replace(/_files$/, '');
182
- const matchingBucketCodec = bucketCodecs.find((bc) => {
183
- const bucketRawName = bc.extensions?.pg?.name;
184
- const bucketPrefix = bucketRawName?.replace(/_buckets$/, '');
185
- return bucketPrefix === filesPrefix;
186
- });
187
- if (!matchingBucketCodec) {
188
- log.debug(`Skipping upload mutation for ${filesCodec.name}: no matching bucket codec with prefix "${filesPrefix}"`);
189
- continue;
190
- }
191
- const hasOwnerId = !!matchingBucketCodec.attributes.owner_id;
192
- const mutationName = `upload${filesTypeName}`;
193
- const ownerIdGqlType = hasOwnerId
194
- ? build.getGraphQLTypeByPgCodec(matchingBucketCodec.attributes.owner_id.codec, 'input')
195
- : null;
196
- const InputType = new GraphQLInputObjectType({
197
- name: `Upload${filesTypeName}Input`,
198
- fields: {
199
- bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
200
- ...(hasOwnerId
201
- ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
202
- : {}),
203
- contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
204
- contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
205
- size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
206
- filename: { type: GraphQLString, description: 'Original filename (optional)' },
207
- key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
208
- },
209
- });
210
- const PayloadType = new GraphQLObjectType({
211
- name: `Upload${filesTypeName}Payload`,
212
- fields: {
213
- uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
214
- fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID (UUID)' },
215
- key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
216
- deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated (content already exists)' },
217
- expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
218
- previousVersionId: { type: GraphQLString, description: 'ID of the previous version (when using custom keys)' },
219
- },
220
- });
221
- const capturedFilesCodec = filesCodec;
222
- log.debug(`Adding file upload mutation "${mutationName}" for ${filesTypeName} (entity-scoped=${hasOwnerId})`);
223
- newFields[mutationName] = context.fieldWithHooks({ fieldName: mutationName }, {
224
- description: `Upload a file: resolves the bucket by key, creates the file row, and returns a presigned PUT URL.`,
225
- type: PayloadType,
226
- args: {
227
- input: { type: new GraphQLNonNull(InputType) },
228
- },
229
- plan(_$mutation, fieldArgs) {
230
- const $input = fieldArgs.getRaw('input');
231
- const $bucketKey = access($input, 'bucketKey');
232
- const $contentHash = access($input, 'contentHash');
233
- const $contentType = access($input, 'contentType');
234
- const $size = access($input, 'size');
235
- const $filename = access($input, 'filename');
236
- const $customKey = access($input, 'key');
237
- const $ownerId = hasOwnerId ? access($input, 'ownerId') : lambda(null, () => null);
238
- const $withPgClient = grafastContext().get('withPgClient');
239
- const $pgSettings = grafastContext().get('pgSettings');
240
- const $combined = object({
241
- bucketKey: $bucketKey,
242
- ownerId: $ownerId,
243
- contentHash: $contentHash,
244
- contentType: $contentType,
245
- size: $size,
246
- filename: $filename,
247
- customKey: $customKey,
248
- withPgClient: $withPgClient,
249
- pgSettings: $pgSettings,
250
- });
251
- return lambda($combined, async (vals) => {
252
- return vals.withPgClient(vals.pgSettings, async (pgClient) => {
253
- return pgClient.withTransaction(async (txClient) => {
254
- const databaseId = await resolveDatabaseId(txClient);
255
- if (!databaseId)
256
- throw new Error('DATABASE_NOT_FOUND');
257
- const allConfigs = await loadAllStorageModules(txClient, databaseId);
258
- const storageConfig = resolveStorageConfigFromCodec(capturedFilesCodec, allConfigs);
259
- if (!storageConfig)
260
- throw new Error('STORAGE_MODULE_NOT_FOUND');
261
- const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
262
- if (!bucket)
263
- throw new Error('BUCKET_NOT_FOUND');
264
- const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
265
- await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
266
- return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
267
- contentHash: vals.contentHash,
268
- contentType: vals.contentType,
269
- size: vals.size,
270
- filename: vals.filename,
271
- key: vals.customKey,
272
- });
273
- });
274
- });
275
- });
276
- },
277
- });
278
- // --- Bulk file upload mutation ---
279
- const BulkFileInputType = new GraphQLInputObjectType({
280
- name: `Upload${filesTypeName}BulkFileInput`,
281
- fields: {
282
- contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
283
- contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
284
- size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
285
- filename: { type: GraphQLString, description: 'Original filename (optional)' },
286
- key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
287
- },
288
- });
289
- const BulkFilePayloadType = new GraphQLObjectType({
290
- name: `Upload${filesTypeName}BulkFilePayload`,
291
- fields: {
292
- uploadUrl: { type: GraphQLString },
293
- fileId: { type: new GraphQLNonNull(GraphQLString) },
294
- key: { type: new GraphQLNonNull(GraphQLString) },
295
- deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
296
- expiresAt: { type: GraphQLString },
297
- previousVersionId: { type: GraphQLString },
298
- },
299
- });
300
- const BulkInputType = new GraphQLInputObjectType({
301
- name: `Upload${filesTypeName}BulkInput`,
302
- fields: {
303
- bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
304
- ...(hasOwnerId
305
- ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
306
- : {}),
307
- files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFileInputType))), description: 'Array of files to upload' },
308
- },
309
- });
310
- const BulkPayloadType = new GraphQLObjectType({
311
- name: `Upload${filesTypeName}BulkPayload`,
312
- fields: {
313
- files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFilePayloadType))) },
314
- },
315
- });
316
- const bulkMutationName = `upload${filesTypeName}s`;
317
- log.debug(`Adding bulk file upload mutation "${bulkMutationName}" for ${filesTypeName}`);
318
- newFields[bulkMutationName] = context.fieldWithHooks({ fieldName: bulkMutationName }, {
319
- description: `Upload multiple files: resolves the bucket by key, creates file rows, and returns presigned PUT URLs for each.`,
320
- type: BulkPayloadType,
321
- args: {
322
- input: { type: new GraphQLNonNull(BulkInputType) },
323
- },
324
- plan(_$mutation, fieldArgs) {
325
- const $input = fieldArgs.getRaw('input');
326
- const $bucketKey = access($input, 'bucketKey');
327
- const $ownerId = hasOwnerId ? access($input, 'ownerId') : lambda(null, () => null);
328
- const $files = access($input, 'files');
329
- const $withPgClient = grafastContext().get('withPgClient');
330
- const $pgSettings = grafastContext().get('pgSettings');
331
- const $combined = object({
332
- bucketKey: $bucketKey,
333
- ownerId: $ownerId,
334
- files: $files,
335
- withPgClient: $withPgClient,
336
- pgSettings: $pgSettings,
337
- });
338
- return lambda($combined, async (vals) => {
339
- return vals.withPgClient(vals.pgSettings, async (pgClient) => {
340
- return pgClient.withTransaction(async (txClient) => {
341
- const databaseId = await resolveDatabaseId(txClient);
342
- if (!databaseId)
343
- throw new Error('DATABASE_NOT_FOUND');
344
- const allConfigs = await loadAllStorageModules(txClient, databaseId);
345
- const storageConfig = resolveStorageConfigFromCodec(capturedFilesCodec, allConfigs);
346
- if (!storageConfig)
347
- throw new Error('STORAGE_MODULE_NOT_FOUND');
348
- const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
349
- if (!bucket)
350
- throw new Error('BUCKET_NOT_FOUND');
351
- const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
352
- await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
353
- const results = [];
354
- for (const file of vals.files) {
355
- results.push(await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
356
- contentHash: file.contentHash,
357
- contentType: file.contentType,
358
- size: file.size,
359
- filename: file.filename,
360
- key: file.key,
361
- }));
362
- }
363
- return { files: results };
364
- });
365
- });
366
- });
367
- },
368
- });
369
- }
370
- return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points and file upload mutations');
371
- }
372
- // --- Path 2: Add upload fields on @storageBuckets types ---
373
- if (!isPgClassType || !pgCodec || !pgCodec.attributes) {
115
+ const { scope: { isRootMutation }, } = context;
116
+ if (!isRootMutation)
374
117
  return fields;
375
- }
376
- const tags = pgCodec.extensions?.tags;
377
- if (!tags?.storageBuckets) {
118
+ const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLInputObjectType, GraphQLList, }, } = build;
119
+ const bucketCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageBuckets);
120
+ if (bucketCodecs.length === 0)
378
121
  return fields;
379
- }
380
- log.debug(`Adding upload fields to bucket type: ${pgCodec.name} (has @storageBuckets tag)`);
381
- const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLList, GraphQLInputObjectType, }, } = build;
382
- // --- Shared output types ---
383
- const UploadUrlPayloadType = new GraphQLObjectType({
384
- name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestUploadUrlPayload`,
385
- fields: {
386
- uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
387
- fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID' },
388
- key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
389
- deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated' },
390
- expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
391
- previousVersionId: { type: GraphQLString, description: 'ID of the previous version' },
392
- },
393
- });
394
- const BulkUploadFilePayloadType = new GraphQLObjectType({
395
- name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFilePayload`,
396
- fields: {
397
- uploadUrl: { type: GraphQLString },
398
- fileId: { type: new GraphQLNonNull(GraphQLString) },
399
- key: { type: new GraphQLNonNull(GraphQLString) },
400
- deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
401
- expiresAt: { type: GraphQLString },
402
- previousVersionId: { type: GraphQLString },
403
- index: { type: new GraphQLNonNull(GraphQLInt), description: 'Index in the input array' },
404
- },
405
- });
406
- const BulkUploadUrlsPayloadType = new GraphQLObjectType({
407
- name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestBulkUploadUrlsPayload`,
408
- fields: {
409
- files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFilePayloadType))) },
410
- },
411
- });
412
- const BulkUploadFileInputType = new GraphQLInputObjectType({
413
- name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFileInput`,
414
- fields: {
415
- contentHash: { type: new GraphQLNonNull(GraphQLString) },
416
- contentType: { type: new GraphQLNonNull(GraphQLString) },
417
- size: { type: new GraphQLNonNull(GraphQLInt) },
418
- filename: { type: GraphQLString },
419
- key: { type: GraphQLString },
420
- },
421
- });
422
- // Capture codec for closure
423
- const capturedCodec = pgCodec;
424
- return build.extend(fields, {
425
- requestUploadUrl: context.fieldWithHooks({ fieldName: 'requestUploadUrl' }, {
426
- description: 'Request a presigned URL for uploading a file to this bucket.',
427
- type: UploadUrlPayloadType,
428
- args: {
122
+ const newFields = {};
123
+ // --- File upload mutations (uploadAppFile, uploadDataRoomFile, etc.) ---
124
+ const fileCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageFiles);
125
+ for (const filesCodec of fileCodecs) {
126
+ const filesTypeName = build.inflection.tableType(filesCodec);
127
+ // Find the matching bucket codec by table name prefix.
128
+ // Schema-name matching is ambiguous when multiple storage modules share
129
+ // the same PG schema (e.g. app_files + data_room_files both in storage_public).
130
+ // Instead, derive the prefix from the raw SQL table name:
131
+ // "data_room_files" prefix "data_room" matches "data_room_buckets"
132
+ // "app_files" → prefix "app" → matches "app_buckets"
133
+ const filesRawName = filesCodec.extensions?.pg?.name;
134
+ const filesPrefix = filesRawName?.replace(/_files$/, '');
135
+ const matchingBucketCodec = bucketCodecs.find((bc) => {
136
+ const bucketRawName = bc.extensions?.pg?.name;
137
+ const bucketPrefix = bucketRawName?.replace(/_buckets$/, '');
138
+ return bucketPrefix === filesPrefix;
139
+ });
140
+ if (!matchingBucketCodec) {
141
+ log.debug(`Skipping upload mutation for ${filesCodec.name}: no matching bucket codec with prefix "${filesPrefix}"`);
142
+ continue;
143
+ }
144
+ const hasOwnerId = !!matchingBucketCodec.attributes.owner_id;
145
+ const mutationName = `upload${filesTypeName}`;
146
+ const ownerIdGqlType = hasOwnerId
147
+ ? build.getGraphQLTypeByPgCodec(matchingBucketCodec.attributes.owner_id.codec, 'input')
148
+ : null;
149
+ const InputType = new GraphQLInputObjectType({
150
+ name: `Upload${filesTypeName}Input`,
151
+ fields: {
152
+ bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
153
+ ...(hasOwnerId
154
+ ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
155
+ : {}),
429
156
  contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
430
157
  contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
431
158
  size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
432
159
  filename: { type: GraphQLString, description: 'Original filename (optional)' },
433
160
  key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
434
161
  },
435
- plan($parent, fieldArgs) {
436
- const $bucketId = $parent.get('id');
437
- const $bucketKey = $parent.get('key');
438
- const $bucketType = $parent.get('type');
439
- const $bucketIsPublic = $parent.get('is_public');
440
- const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
441
- const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
442
- const $bucketMaxFileSize = $parent.get('max_file_size');
443
- const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, () => null);
444
- const $contentHash = fieldArgs.getRaw('contentHash');
445
- const $contentType = fieldArgs.getRaw('contentType');
446
- const $size = fieldArgs.getRaw('size');
447
- const $filename = fieldArgs.getRaw('filename');
448
- const $customKey = fieldArgs.getRaw('key');
162
+ });
163
+ const PayloadType = new GraphQLObjectType({
164
+ name: `Upload${filesTypeName}Payload`,
165
+ fields: {
166
+ uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
167
+ fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID (UUID)' },
168
+ key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
169
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated (content already exists)' },
170
+ expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
171
+ previousVersionId: { type: GraphQLString, description: 'ID of the previous version (when using custom keys)' },
172
+ },
173
+ });
174
+ const capturedFilesCodec = filesCodec;
175
+ log.debug(`Adding file upload mutation "${mutationName}" for ${filesTypeName} (entity-scoped=${hasOwnerId})`);
176
+ newFields[mutationName] = context.fieldWithHooks({ fieldName: mutationName }, {
177
+ description: `Upload a file: resolves the bucket by key, creates the file row, and returns a presigned PUT URL.`,
178
+ type: PayloadType,
179
+ args: {
180
+ input: { type: new GraphQLNonNull(InputType) },
181
+ },
182
+ plan(_$mutation, fieldArgs) {
183
+ const $input = fieldArgs.getRaw('input');
184
+ const $bucketKey = access($input, 'bucketKey');
185
+ const $contentHash = access($input, 'contentHash');
186
+ const $contentType = access($input, 'contentType');
187
+ const $size = access($input, 'size');
188
+ const $filename = access($input, 'filename');
189
+ const $customKey = access($input, 'key');
190
+ const $ownerId = hasOwnerId ? access($input, 'ownerId') : lambda(null, () => null);
449
191
  const $withPgClient = grafastContext().get('withPgClient');
450
192
  const $pgSettings = grafastContext().get('pgSettings');
451
193
  const $combined = object({
452
- bucketId: $bucketId,
453
194
  bucketKey: $bucketKey,
454
- bucketType: $bucketType,
455
- bucketIsPublic: $bucketIsPublic,
456
- bucketAllowCustomKeys: $bucketAllowCustomKeys,
457
- bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
458
- bucketMaxFileSize: $bucketMaxFileSize,
459
- bucketOwnerId: $bucketOwnerId,
195
+ ownerId: $ownerId,
460
196
  contentHash: $contentHash,
461
197
  contentType: $contentType,
462
198
  size: $size,
@@ -472,19 +208,12 @@ export function createPresignedUrlPlugin(options) {
472
208
  if (!databaseId)
473
209
  throw new Error('DATABASE_NOT_FOUND');
474
210
  const allConfigs = await loadAllStorageModules(txClient, databaseId);
475
- const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
211
+ const storageConfig = resolveStorageConfigFromCodec(capturedFilesCodec, allConfigs);
476
212
  if (!storageConfig)
477
213
  throw new Error('STORAGE_MODULE_NOT_FOUND');
478
- const bucket = {
479
- id: vals.bucketId,
480
- key: vals.bucketKey,
481
- type: vals.bucketType,
482
- is_public: vals.bucketIsPublic,
483
- owner_id: vals.bucketOwnerId,
484
- allowed_mime_types: vals.bucketAllowedMimeTypes,
485
- max_file_size: vals.bucketMaxFileSize,
486
- allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
487
- };
214
+ const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
215
+ if (!bucket)
216
+ throw new Error('BUCKET_NOT_FOUND');
488
217
  const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
489
218
  await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
490
219
  return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
@@ -498,86 +227,109 @@ export function createPresignedUrlPlugin(options) {
498
227
  });
499
228
  });
500
229
  },
501
- }),
502
- requestBulkUploadUrls: context.fieldWithHooks({ fieldName: 'requestBulkUploadUrls' }, {
503
- description: 'Request presigned URLs for uploading multiple files to this bucket.',
504
- type: BulkUploadUrlsPayloadType,
230
+ });
231
+ // --- Bulk file upload mutation ---
232
+ const BulkFileInputType = new GraphQLInputObjectType({
233
+ name: `Upload${filesTypeName}BulkFileInput`,
234
+ fields: {
235
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
236
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
237
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
238
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
239
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
240
+ },
241
+ });
242
+ const BulkFilePayloadType = new GraphQLObjectType({
243
+ name: `Upload${filesTypeName}BulkFilePayload`,
244
+ fields: {
245
+ uploadUrl: { type: GraphQLString },
246
+ fileId: { type: new GraphQLNonNull(GraphQLString) },
247
+ key: { type: new GraphQLNonNull(GraphQLString) },
248
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
249
+ expiresAt: { type: GraphQLString },
250
+ previousVersionId: { type: GraphQLString },
251
+ },
252
+ });
253
+ const BulkInputType = new GraphQLInputObjectType({
254
+ name: `Upload${filesTypeName}BulkInput`,
255
+ fields: {
256
+ bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
257
+ ...(hasOwnerId
258
+ ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
259
+ : {}),
260
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFileInputType))), description: 'Array of files to upload' },
261
+ },
262
+ });
263
+ const BulkPayloadType = new GraphQLObjectType({
264
+ name: `Upload${filesTypeName}BulkPayload`,
265
+ fields: {
266
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFilePayloadType))) },
267
+ },
268
+ });
269
+ const bulkMutationName = `upload${filesTypeName}s`;
270
+ log.debug(`Adding bulk file upload mutation "${bulkMutationName}" for ${filesTypeName}`);
271
+ newFields[bulkMutationName] = context.fieldWithHooks({ fieldName: bulkMutationName }, {
272
+ description: `Upload multiple files: resolves the bucket by key, creates file rows, and returns presigned PUT URLs for each.`,
273
+ type: BulkPayloadType,
505
274
  args: {
506
- files: {
507
- type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFileInputType))),
508
- description: 'Array of files to upload',
509
- },
275
+ input: { type: new GraphQLNonNull(BulkInputType) },
510
276
  },
511
- plan($parent, fieldArgs) {
512
- const $bucketId = $parent.get('id');
513
- const $bucketKey = $parent.get('key');
514
- const $bucketType = $parent.get('type');
515
- const $bucketIsPublic = $parent.get('is_public');
516
- const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
517
- const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
518
- const $bucketMaxFileSize = $parent.get('max_file_size');
519
- const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, () => null);
520
- const $files = fieldArgs.getRaw('files');
277
+ plan(_$mutation, fieldArgs) {
278
+ const $input = fieldArgs.getRaw('input');
279
+ const $bucketKey = access($input, 'bucketKey');
280
+ const $ownerId = hasOwnerId ? access($input, 'ownerId') : lambda(null, () => null);
281
+ const $files = access($input, 'files');
521
282
  const $withPgClient = grafastContext().get('withPgClient');
522
283
  const $pgSettings = grafastContext().get('pgSettings');
523
284
  const $combined = object({
524
- bucketId: $bucketId,
525
285
  bucketKey: $bucketKey,
526
- bucketType: $bucketType,
527
- bucketIsPublic: $bucketIsPublic,
528
- bucketAllowCustomKeys: $bucketAllowCustomKeys,
529
- bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
530
- bucketMaxFileSize: $bucketMaxFileSize,
531
- bucketOwnerId: $bucketOwnerId,
286
+ ownerId: $ownerId,
532
287
  files: $files,
533
288
  withPgClient: $withPgClient,
534
289
  pgSettings: $pgSettings,
535
290
  });
536
291
  return lambda($combined, async (vals) => {
537
- const { files } = vals;
538
- if (!Array.isArray(files) || files.length === 0) {
539
- throw new Error('INVALID_FILES: must provide at least one file');
540
- }
541
292
  return vals.withPgClient(vals.pgSettings, async (pgClient) => {
542
293
  return pgClient.withTransaction(async (txClient) => {
543
294
  const databaseId = await resolveDatabaseId(txClient);
544
295
  if (!databaseId)
545
296
  throw new Error('DATABASE_NOT_FOUND');
546
297
  const allConfigs = await loadAllStorageModules(txClient, databaseId);
547
- const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
298
+ const storageConfig = resolveStorageConfigFromCodec(capturedFilesCodec, allConfigs);
548
299
  if (!storageConfig)
549
300
  throw new Error('STORAGE_MODULE_NOT_FOUND');
550
- if (files.length > storageConfig.maxBulkFiles) {
551
- throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
301
+ const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
302
+ if (!bucket)
303
+ throw new Error('BUCKET_NOT_FOUND');
304
+ // Enforce bulk upload limits
305
+ const filesArray = vals.files;
306
+ if (filesArray.length > storageConfig.maxBulkFiles) {
307
+ throw new Error(`BULK_UPLOAD_FILES_EXCEEDED: ${filesArray.length} files exceeds maximum of ${storageConfig.maxBulkFiles} per batch`);
552
308
  }
553
- const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
309
+ const totalSize = filesArray.reduce((sum, f) => sum + (f.size || 0), 0);
554
310
  if (totalSize > storageConfig.maxBulkTotalSize) {
555
- throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
311
+ throw new Error(`BULK_UPLOAD_SIZE_EXCEEDED: ${totalSize} bytes exceeds maximum of ${storageConfig.maxBulkTotalSize} bytes per batch`);
556
312
  }
557
- const bucket = {
558
- id: vals.bucketId,
559
- key: vals.bucketKey,
560
- type: vals.bucketType,
561
- is_public: vals.bucketIsPublic,
562
- owner_id: vals.bucketOwnerId,
563
- allowed_mime_types: vals.bucketAllowedMimeTypes,
564
- max_file_size: vals.bucketMaxFileSize,
565
- allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
566
- };
567
313
  const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
568
314
  await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
569
315
  const results = [];
570
- for (let i = 0; i < files.length; i++) {
571
- const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, files[i]);
572
- results.push({ ...result, index: i });
316
+ for (const file of filesArray) {
317
+ results.push(await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
318
+ contentHash: file.contentHash,
319
+ contentType: file.contentType,
320
+ size: file.size,
321
+ filename: file.filename,
322
+ key: file.key,
323
+ }));
573
324
  }
574
325
  return { files: results };
575
326
  });
576
327
  });
577
328
  });
578
329
  },
579
- }),
580
- }, `PresignedUrlPlugin adding upload fields to ${pgCodec.name}`);
330
+ });
331
+ }
332
+ return build.extend(fields, newFields, 'PresignedUrlPlugin adding file upload mutations');
581
333
  },
582
334
  /**
583
335
  * Wrap delete* mutations on @storageFiles-tagged tables with S3 cleanup.