graphile-presigned-url-plugin 0.13.0 → 0.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/plugin.d.ts CHANGED
@@ -13,7 +13,12 @@
13
13
  * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
14
14
  * can be accessed as proper GraphQL mutations instead of queries.
15
15
  *
16
- * 4. downloadUrl handled by download-url-field.ts (separate plugin).
16
+ * 4. File upload mutations adds `upload<FileType>(input: {...})` mutations
17
+ * on root Mutation for each @storageFiles/@storageBuckets pair. These combine
18
+ * bucket resolution + file INSERT + presigned URL generation in one step.
19
+ * E.g., `uploadAppFile(input: { bucketKey: "public", contentHash: "...", ... })`
20
+ *
21
+ * 5. downloadUrl — handled by download-url-field.ts (separate plugin).
17
22
  *
18
23
  * Scope resolution uses the codec's schema/table name matched against
19
24
  * cached storage module configs.
package/esm/plugin.js CHANGED
@@ -13,15 +13,20 @@
13
13
  * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
14
14
  * can be accessed as proper GraphQL mutations instead of queries.
15
15
  *
16
- * 4. downloadUrl handled by download-url-field.ts (separate plugin).
16
+ * 4. File upload mutations adds `upload<FileType>(input: {...})` mutations
17
+ * on root Mutation for each @storageFiles/@storageBuckets pair. These combine
18
+ * bucket resolution + file INSERT + presigned URL generation in one step.
19
+ * E.g., `uploadAppFile(input: { bucketKey: "public", contentHash: "...", ... })`
20
+ *
21
+ * 5. downloadUrl — handled by download-url-field.ts (separate plugin).
17
22
  *
18
23
  * Scope resolution uses the codec's schema/table name matched against
19
24
  * cached storage module configs.
20
25
  */
21
- import { context as grafastContext, lambda, object } from 'grafast';
26
+ import { access, context as grafastContext, lambda, object } from 'grafast';
22
27
  import 'graphile-build';
23
28
  import { Logger } from '@pgpmjs/logger';
24
- import { loadAllStorageModules, resolveStorageConfigFromCodec, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
29
+ import { loadAllStorageModules, resolveStorageConfigFromCodec, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
25
30
  import { generatePresignedPutUrl, deleteS3Object } from './s3-signer';
26
31
  const log = new Logger('graphile-presigned-url:plugin');
27
32
  // --- Protocol-level constants (not configurable) ---
@@ -115,13 +120,14 @@ export function createPresignedUrlPlugin(options) {
115
120
  */
116
121
  GraphQLObjectType_fields(fields, build, context) {
117
122
  const { scope: { pgCodec, isPgClassType, isRootMutation }, } = context;
118
- // --- Path 1: Add per-bucket mutation entry points on root Mutation ---
123
+ // --- Path 1: Add per-bucket mutation entry points + file creation mutations on root Mutation ---
119
124
  if (isRootMutation) {
120
- const { graphql: { GraphQLString, GraphQLNonNull }, } = build;
125
+ const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLInputObjectType, GraphQLList, }, } = build;
121
126
  const bucketCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageBuckets);
122
127
  if (bucketCodecs.length === 0)
123
128
  return fields;
124
129
  const newFields = {};
130
+ // --- 1a: Per-bucket entry points (appBucket, dataRoomBucket, etc.) ---
125
131
  for (const codec of bucketCodecs) {
126
132
  const typeName = build.inflection.tableType(codec);
127
133
  const bucketType = build.getTypeByName(typeName);
@@ -131,14 +137,11 @@ export function createPresignedUrlPlugin(options) {
131
137
  }
132
138
  const fieldName = typeName.charAt(0).toLowerCase() + typeName.slice(1);
133
139
  const hasOwnerId = !!codec.attributes.owner_id;
134
- // Find the PgResource for this codec so we can return a proper PgSelectSingleStep
135
140
  const bucketResource = Object.values(build.input.pgRegistry.pgResources).find((r) => r.codec === codec && !r.isUnique && !r.isVirtual && !r.parameters);
136
141
  if (!bucketResource) {
137
142
  log.debug(`Skipping mutation entry point for ${codec.name}: no PgResource found`);
138
143
  continue;
139
144
  }
140
- // Resolve the GraphQL type for ownerId from the codec's attribute codec
141
- // (e.g. UUID scalar instead of String) so Grafast's type matching works.
142
145
  const ownerIdType = hasOwnerId
143
146
  ? build.getGraphQLTypeByPgCodec(codec.attributes.owner_id.codec, 'input')
144
147
  : null;
@@ -163,7 +166,208 @@ export function createPresignedUrlPlugin(options) {
163
166
  },
164
167
  });
165
168
  }
166
- return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points');
169
+ // --- 1b: File upload mutations (uploadAppFile, uploadDataRoomFile, etc.) ---
170
+ const fileCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageFiles);
171
+ for (const filesCodec of fileCodecs) {
172
+ const filesTypeName = build.inflection.tableType(filesCodec);
173
+ const filesSchemaName = filesCodec.extensions?.pg?.schemaName;
174
+ // Find the matching bucket codec by table name prefix.
175
+ // Schema-name matching is ambiguous when multiple storage modules share
176
+ // the same PG schema (e.g. app_files + data_room_files both in storage_public).
177
+ // Instead, derive the prefix from the raw SQL table name:
178
+ // "data_room_files" → prefix "data_room" → matches "data_room_buckets"
179
+ // "app_files" → prefix "app" → matches "app_buckets"
180
+ const filesRawName = filesCodec.extensions?.pg?.name;
181
+ const filesPrefix = filesRawName?.replace(/_files$/, '');
182
+ const matchingBucketCodec = bucketCodecs.find((bc) => {
183
+ const bucketRawName = bc.extensions?.pg?.name;
184
+ const bucketPrefix = bucketRawName?.replace(/_buckets$/, '');
185
+ return bucketPrefix === filesPrefix;
186
+ });
187
+ if (!matchingBucketCodec) {
188
+ log.debug(`Skipping upload mutation for ${filesCodec.name}: no matching bucket codec with prefix "${filesPrefix}"`);
189
+ continue;
190
+ }
191
+ const hasOwnerId = !!matchingBucketCodec.attributes.owner_id;
192
+ const mutationName = `upload${filesTypeName}`;
193
+ const ownerIdGqlType = hasOwnerId
194
+ ? build.getGraphQLTypeByPgCodec(matchingBucketCodec.attributes.owner_id.codec, 'input')
195
+ : null;
196
+ const InputType = new GraphQLInputObjectType({
197
+ name: `Upload${filesTypeName}Input`,
198
+ fields: {
199
+ bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
200
+ ...(hasOwnerId
201
+ ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
202
+ : {}),
203
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
204
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
205
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
206
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
207
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
208
+ },
209
+ });
210
+ const PayloadType = new GraphQLObjectType({
211
+ name: `Upload${filesTypeName}Payload`,
212
+ fields: {
213
+ uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
214
+ fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID (UUID)' },
215
+ key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
216
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated (content already exists)' },
217
+ expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
218
+ previousVersionId: { type: GraphQLString, description: 'ID of the previous version (when using custom keys)' },
219
+ },
220
+ });
221
+ const capturedFilesCodec = filesCodec;
222
+ log.debug(`Adding file upload mutation "${mutationName}" for ${filesTypeName} (entity-scoped=${hasOwnerId})`);
223
+ newFields[mutationName] = context.fieldWithHooks({ fieldName: mutationName }, {
224
+ description: `Upload a file: resolves the bucket by key, creates the file row, and returns a presigned PUT URL.`,
225
+ type: PayloadType,
226
+ args: {
227
+ input: { type: new GraphQLNonNull(InputType) },
228
+ },
229
+ plan(_$mutation, fieldArgs) {
230
+ const $input = fieldArgs.getRaw('input');
231
+ const $bucketKey = access($input, 'bucketKey');
232
+ const $contentHash = access($input, 'contentHash');
233
+ const $contentType = access($input, 'contentType');
234
+ const $size = access($input, 'size');
235
+ const $filename = access($input, 'filename');
236
+ const $customKey = access($input, 'key');
237
+ const $ownerId = hasOwnerId ? access($input, 'ownerId') : lambda(null, () => null);
238
+ const $withPgClient = grafastContext().get('withPgClient');
239
+ const $pgSettings = grafastContext().get('pgSettings');
240
+ const $combined = object({
241
+ bucketKey: $bucketKey,
242
+ ownerId: $ownerId,
243
+ contentHash: $contentHash,
244
+ contentType: $contentType,
245
+ size: $size,
246
+ filename: $filename,
247
+ customKey: $customKey,
248
+ withPgClient: $withPgClient,
249
+ pgSettings: $pgSettings,
250
+ });
251
+ return lambda($combined, async (vals) => {
252
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
253
+ return pgClient.withTransaction(async (txClient) => {
254
+ const databaseId = await resolveDatabaseId(txClient);
255
+ if (!databaseId)
256
+ throw new Error('DATABASE_NOT_FOUND');
257
+ const allConfigs = await loadAllStorageModules(txClient, databaseId);
258
+ const storageConfig = resolveStorageConfigFromCodec(capturedFilesCodec, allConfigs);
259
+ if (!storageConfig)
260
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
261
+ const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
262
+ if (!bucket)
263
+ throw new Error('BUCKET_NOT_FOUND');
264
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
265
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
266
+ return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
267
+ contentHash: vals.contentHash,
268
+ contentType: vals.contentType,
269
+ size: vals.size,
270
+ filename: vals.filename,
271
+ key: vals.customKey,
272
+ });
273
+ });
274
+ });
275
+ });
276
+ },
277
+ });
278
+ // --- Bulk file upload mutation ---
279
+ const BulkFileInputType = new GraphQLInputObjectType({
280
+ name: `Upload${filesTypeName}BulkFileInput`,
281
+ fields: {
282
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
283
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
284
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
285
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
286
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
287
+ },
288
+ });
289
+ const BulkFilePayloadType = new GraphQLObjectType({
290
+ name: `Upload${filesTypeName}BulkFilePayload`,
291
+ fields: {
292
+ uploadUrl: { type: GraphQLString },
293
+ fileId: { type: new GraphQLNonNull(GraphQLString) },
294
+ key: { type: new GraphQLNonNull(GraphQLString) },
295
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
296
+ expiresAt: { type: GraphQLString },
297
+ previousVersionId: { type: GraphQLString },
298
+ },
299
+ });
300
+ const BulkInputType = new GraphQLInputObjectType({
301
+ name: `Upload${filesTypeName}BulkInput`,
302
+ fields: {
303
+ bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
304
+ ...(hasOwnerId
305
+ ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
306
+ : {}),
307
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFileInputType))), description: 'Array of files to upload' },
308
+ },
309
+ });
310
+ const BulkPayloadType = new GraphQLObjectType({
311
+ name: `Upload${filesTypeName}BulkPayload`,
312
+ fields: {
313
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFilePayloadType))) },
314
+ },
315
+ });
316
+ const bulkMutationName = `upload${filesTypeName}s`;
317
+ log.debug(`Adding bulk file upload mutation "${bulkMutationName}" for ${filesTypeName}`);
318
+ newFields[bulkMutationName] = context.fieldWithHooks({ fieldName: bulkMutationName }, {
319
+ description: `Upload multiple files: resolves the bucket by key, creates file rows, and returns presigned PUT URLs for each.`,
320
+ type: BulkPayloadType,
321
+ args: {
322
+ input: { type: new GraphQLNonNull(BulkInputType) },
323
+ },
324
+ plan(_$mutation, fieldArgs) {
325
+ const $input = fieldArgs.getRaw('input');
326
+ const $bucketKey = access($input, 'bucketKey');
327
+ const $ownerId = hasOwnerId ? access($input, 'ownerId') : lambda(null, () => null);
328
+ const $files = access($input, 'files');
329
+ const $withPgClient = grafastContext().get('withPgClient');
330
+ const $pgSettings = grafastContext().get('pgSettings');
331
+ const $combined = object({
332
+ bucketKey: $bucketKey,
333
+ ownerId: $ownerId,
334
+ files: $files,
335
+ withPgClient: $withPgClient,
336
+ pgSettings: $pgSettings,
337
+ });
338
+ return lambda($combined, async (vals) => {
339
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
340
+ return pgClient.withTransaction(async (txClient) => {
341
+ const databaseId = await resolveDatabaseId(txClient);
342
+ if (!databaseId)
343
+ throw new Error('DATABASE_NOT_FOUND');
344
+ const allConfigs = await loadAllStorageModules(txClient, databaseId);
345
+ const storageConfig = resolveStorageConfigFromCodec(capturedFilesCodec, allConfigs);
346
+ if (!storageConfig)
347
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
348
+ const bucket = await getBucketConfig(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
349
+ if (!bucket)
350
+ throw new Error('BUCKET_NOT_FOUND');
351
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
352
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
353
+ const results = [];
354
+ for (const file of vals.files) {
355
+ results.push(await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
356
+ contentHash: file.contentHash,
357
+ contentType: file.contentType,
358
+ size: file.size,
359
+ filename: file.filename,
360
+ key: file.key,
361
+ }));
362
+ }
363
+ return { files: results };
364
+ });
365
+ });
366
+ });
367
+ },
368
+ });
369
+ }
370
+ return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points and file upload mutations');
167
371
  }
168
372
  // --- Path 2: Add upload fields on @storageBuckets types ---
169
373
  if (!isPgClassType || !pgCodec || !pgCodec.attributes) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphile-presigned-url-plugin",
3
- "version": "0.13.0",
3
+ "version": "0.14.1",
4
4
  "description": "Presigned URL upload plugin for PostGraphile v5 — requestUploadUrl mutation and downloadUrl computed field",
5
5
  "author": "Constructive <developers@constructive.io>",
6
6
  "homepage": "https://github.com/constructive-io/constructive",
@@ -60,5 +60,5 @@
60
60
  "@types/node": "^22.19.11",
61
61
  "makage": "^0.1.10"
62
62
  },
63
- "gitHead": "0ad9af424bb7760e50835edb9ee083b919a4b1f0"
63
+ "gitHead": "96452226da9196a4d2cceb24fe30869ce318aeae"
64
64
  }
package/plugin.d.ts CHANGED
@@ -13,7 +13,12 @@
13
13
  * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
14
14
  * can be accessed as proper GraphQL mutations instead of queries.
15
15
  *
16
- * 4. downloadUrl handled by download-url-field.ts (separate plugin).
16
+ * 4. File upload mutations adds `upload<FileType>(input: {...})` mutations
17
+ * on root Mutation for each @storageFiles/@storageBuckets pair. These combine
18
+ * bucket resolution + file INSERT + presigned URL generation in one step.
19
+ * E.g., `uploadAppFile(input: { bucketKey: "public", contentHash: "...", ... })`
20
+ *
21
+ * 5. downloadUrl — handled by download-url-field.ts (separate plugin).
17
22
  *
18
23
  * Scope resolution uses the codec's schema/table name matched against
19
24
  * cached storage module configs.
package/plugin.js CHANGED
@@ -14,7 +14,12 @@
14
14
  * type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
15
15
  * can be accessed as proper GraphQL mutations instead of queries.
16
16
  *
17
- * 4. downloadUrl handled by download-url-field.ts (separate plugin).
17
+ * 4. File upload mutations adds `upload<FileType>(input: {...})` mutations
18
+ * on root Mutation for each @storageFiles/@storageBuckets pair. These combine
19
+ * bucket resolution + file INSERT + presigned URL generation in one step.
20
+ * E.g., `uploadAppFile(input: { bucketKey: "public", contentHash: "...", ... })`
21
+ *
22
+ * 5. downloadUrl — handled by download-url-field.ts (separate plugin).
18
23
  *
19
24
  * Scope resolution uses the codec's schema/table name matched against
20
25
  * cached storage module configs.
@@ -119,13 +124,14 @@ function createPresignedUrlPlugin(options) {
119
124
  */
120
125
  GraphQLObjectType_fields(fields, build, context) {
121
126
  const { scope: { pgCodec, isPgClassType, isRootMutation }, } = context;
122
- // --- Path 1: Add per-bucket mutation entry points on root Mutation ---
127
+ // --- Path 1: Add per-bucket mutation entry points + file creation mutations on root Mutation ---
123
128
  if (isRootMutation) {
124
- const { graphql: { GraphQLString, GraphQLNonNull }, } = build;
129
+ const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLInputObjectType, GraphQLList, }, } = build;
125
130
  const bucketCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageBuckets);
126
131
  if (bucketCodecs.length === 0)
127
132
  return fields;
128
133
  const newFields = {};
134
+ // --- 1a: Per-bucket entry points (appBucket, dataRoomBucket, etc.) ---
129
135
  for (const codec of bucketCodecs) {
130
136
  const typeName = build.inflection.tableType(codec);
131
137
  const bucketType = build.getTypeByName(typeName);
@@ -135,14 +141,11 @@ function createPresignedUrlPlugin(options) {
135
141
  }
136
142
  const fieldName = typeName.charAt(0).toLowerCase() + typeName.slice(1);
137
143
  const hasOwnerId = !!codec.attributes.owner_id;
138
- // Find the PgResource for this codec so we can return a proper PgSelectSingleStep
139
144
  const bucketResource = Object.values(build.input.pgRegistry.pgResources).find((r) => r.codec === codec && !r.isUnique && !r.isVirtual && !r.parameters);
140
145
  if (!bucketResource) {
141
146
  log.debug(`Skipping mutation entry point for ${codec.name}: no PgResource found`);
142
147
  continue;
143
148
  }
144
- // Resolve the GraphQL type for ownerId from the codec's attribute codec
145
- // (e.g. UUID scalar instead of String) so Grafast's type matching works.
146
149
  const ownerIdType = hasOwnerId
147
150
  ? build.getGraphQLTypeByPgCodec(codec.attributes.owner_id.codec, 'input')
148
151
  : null;
@@ -167,7 +170,208 @@ function createPresignedUrlPlugin(options) {
167
170
  },
168
171
  });
169
172
  }
170
- return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points');
173
+ // --- 1b: File upload mutations (uploadAppFile, uploadDataRoomFile, etc.) ---
174
+ const fileCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageFiles);
175
+ for (const filesCodec of fileCodecs) {
176
+ const filesTypeName = build.inflection.tableType(filesCodec);
177
+ const filesSchemaName = filesCodec.extensions?.pg?.schemaName;
178
+ // Find the matching bucket codec by table name prefix.
179
+ // Schema-name matching is ambiguous when multiple storage modules share
180
+ // the same PG schema (e.g. app_files + data_room_files both in storage_public).
181
+ // Instead, derive the prefix from the raw SQL table name:
182
+ // "data_room_files" → prefix "data_room" → matches "data_room_buckets"
183
+ // "app_files" → prefix "app" → matches "app_buckets"
184
+ const filesRawName = filesCodec.extensions?.pg?.name;
185
+ const filesPrefix = filesRawName?.replace(/_files$/, '');
186
+ const matchingBucketCodec = bucketCodecs.find((bc) => {
187
+ const bucketRawName = bc.extensions?.pg?.name;
188
+ const bucketPrefix = bucketRawName?.replace(/_buckets$/, '');
189
+ return bucketPrefix === filesPrefix;
190
+ });
191
+ if (!matchingBucketCodec) {
192
+ log.debug(`Skipping upload mutation for ${filesCodec.name}: no matching bucket codec with prefix "${filesPrefix}"`);
193
+ continue;
194
+ }
195
+ const hasOwnerId = !!matchingBucketCodec.attributes.owner_id;
196
+ const mutationName = `upload${filesTypeName}`;
197
+ const ownerIdGqlType = hasOwnerId
198
+ ? build.getGraphQLTypeByPgCodec(matchingBucketCodec.attributes.owner_id.codec, 'input')
199
+ : null;
200
+ const InputType = new GraphQLInputObjectType({
201
+ name: `Upload${filesTypeName}Input`,
202
+ fields: {
203
+ bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
204
+ ...(hasOwnerId
205
+ ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
206
+ : {}),
207
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
208
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
209
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
210
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
211
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
212
+ },
213
+ });
214
+ const PayloadType = new GraphQLObjectType({
215
+ name: `Upload${filesTypeName}Payload`,
216
+ fields: {
217
+ uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
218
+ fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID (UUID)' },
219
+ key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
220
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated (content already exists)' },
221
+ expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
222
+ previousVersionId: { type: GraphQLString, description: 'ID of the previous version (when using custom keys)' },
223
+ },
224
+ });
225
+ const capturedFilesCodec = filesCodec;
226
+ log.debug(`Adding file upload mutation "${mutationName}" for ${filesTypeName} (entity-scoped=${hasOwnerId})`);
227
+ newFields[mutationName] = context.fieldWithHooks({ fieldName: mutationName }, {
228
+ description: `Upload a file: resolves the bucket by key, creates the file row, and returns a presigned PUT URL.`,
229
+ type: PayloadType,
230
+ args: {
231
+ input: { type: new GraphQLNonNull(InputType) },
232
+ },
233
+ plan(_$mutation, fieldArgs) {
234
+ const $input = fieldArgs.getRaw('input');
235
+ const $bucketKey = (0, grafast_1.access)($input, 'bucketKey');
236
+ const $contentHash = (0, grafast_1.access)($input, 'contentHash');
237
+ const $contentType = (0, grafast_1.access)($input, 'contentType');
238
+ const $size = (0, grafast_1.access)($input, 'size');
239
+ const $filename = (0, grafast_1.access)($input, 'filename');
240
+ const $customKey = (0, grafast_1.access)($input, 'key');
241
+ const $ownerId = hasOwnerId ? (0, grafast_1.access)($input, 'ownerId') : (0, grafast_1.lambda)(null, () => null);
242
+ const $withPgClient = (0, grafast_1.context)().get('withPgClient');
243
+ const $pgSettings = (0, grafast_1.context)().get('pgSettings');
244
+ const $combined = (0, grafast_1.object)({
245
+ bucketKey: $bucketKey,
246
+ ownerId: $ownerId,
247
+ contentHash: $contentHash,
248
+ contentType: $contentType,
249
+ size: $size,
250
+ filename: $filename,
251
+ customKey: $customKey,
252
+ withPgClient: $withPgClient,
253
+ pgSettings: $pgSettings,
254
+ });
255
+ return (0, grafast_1.lambda)($combined, async (vals) => {
256
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
257
+ return pgClient.withTransaction(async (txClient) => {
258
+ const databaseId = await resolveDatabaseId(txClient);
259
+ if (!databaseId)
260
+ throw new Error('DATABASE_NOT_FOUND');
261
+ const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(txClient, databaseId);
262
+ const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedFilesCodec, allConfigs);
263
+ if (!storageConfig)
264
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
265
+ const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
266
+ if (!bucket)
267
+ throw new Error('BUCKET_NOT_FOUND');
268
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
269
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
270
+ return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
271
+ contentHash: vals.contentHash,
272
+ contentType: vals.contentType,
273
+ size: vals.size,
274
+ filename: vals.filename,
275
+ key: vals.customKey,
276
+ });
277
+ });
278
+ });
279
+ });
280
+ },
281
+ });
282
+ // --- Bulk file upload mutation ---
283
+ const BulkFileInputType = new GraphQLInputObjectType({
284
+ name: `Upload${filesTypeName}BulkFileInput`,
285
+ fields: {
286
+ contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
287
+ contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
288
+ size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
289
+ filename: { type: GraphQLString, description: 'Original filename (optional)' },
290
+ key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
291
+ },
292
+ });
293
+ const BulkFilePayloadType = new GraphQLObjectType({
294
+ name: `Upload${filesTypeName}BulkFilePayload`,
295
+ fields: {
296
+ uploadUrl: { type: GraphQLString },
297
+ fileId: { type: new GraphQLNonNull(GraphQLString) },
298
+ key: { type: new GraphQLNonNull(GraphQLString) },
299
+ deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
300
+ expiresAt: { type: GraphQLString },
301
+ previousVersionId: { type: GraphQLString },
302
+ },
303
+ });
304
+ const BulkInputType = new GraphQLInputObjectType({
305
+ name: `Upload${filesTypeName}BulkInput`,
306
+ fields: {
307
+ bucketKey: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
308
+ ...(hasOwnerId
309
+ ? { ownerId: { type: new GraphQLNonNull(ownerIdGqlType || GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
310
+ : {}),
311
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFileInputType))), description: 'Array of files to upload' },
312
+ },
313
+ });
314
+ const BulkPayloadType = new GraphQLObjectType({
315
+ name: `Upload${filesTypeName}BulkPayload`,
316
+ fields: {
317
+ files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkFilePayloadType))) },
318
+ },
319
+ });
320
+ const bulkMutationName = `upload${filesTypeName}s`;
321
+ log.debug(`Adding bulk file upload mutation "${bulkMutationName}" for ${filesTypeName}`);
322
+ newFields[bulkMutationName] = context.fieldWithHooks({ fieldName: bulkMutationName }, {
323
+ description: `Upload multiple files: resolves the bucket by key, creates file rows, and returns presigned PUT URLs for each.`,
324
+ type: BulkPayloadType,
325
+ args: {
326
+ input: { type: new GraphQLNonNull(BulkInputType) },
327
+ },
328
+ plan(_$mutation, fieldArgs) {
329
+ const $input = fieldArgs.getRaw('input');
330
+ const $bucketKey = (0, grafast_1.access)($input, 'bucketKey');
331
+ const $ownerId = hasOwnerId ? (0, grafast_1.access)($input, 'ownerId') : (0, grafast_1.lambda)(null, () => null);
332
+ const $files = (0, grafast_1.access)($input, 'files');
333
+ const $withPgClient = (0, grafast_1.context)().get('withPgClient');
334
+ const $pgSettings = (0, grafast_1.context)().get('pgSettings');
335
+ const $combined = (0, grafast_1.object)({
336
+ bucketKey: $bucketKey,
337
+ ownerId: $ownerId,
338
+ files: $files,
339
+ withPgClient: $withPgClient,
340
+ pgSettings: $pgSettings,
341
+ });
342
+ return (0, grafast_1.lambda)($combined, async (vals) => {
343
+ return vals.withPgClient(vals.pgSettings, async (pgClient) => {
344
+ return pgClient.withTransaction(async (txClient) => {
345
+ const databaseId = await resolveDatabaseId(txClient);
346
+ if (!databaseId)
347
+ throw new Error('DATABASE_NOT_FOUND');
348
+ const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(txClient, databaseId);
349
+ const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedFilesCodec, allConfigs);
350
+ if (!storageConfig)
351
+ throw new Error('STORAGE_MODULE_NOT_FOUND');
352
+ const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, vals.bucketKey, vals.ownerId || undefined);
353
+ if (!bucket)
354
+ throw new Error('BUCKET_NOT_FOUND');
355
+ const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
356
+ await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
357
+ const results = [];
358
+ for (const file of vals.files) {
359
+ results.push(await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
360
+ contentHash: file.contentHash,
361
+ contentType: file.contentType,
362
+ size: file.size,
363
+ filename: file.filename,
364
+ key: file.key,
365
+ }));
366
+ }
367
+ return { files: results };
368
+ });
369
+ });
370
+ });
371
+ },
372
+ });
373
+ }
374
+ return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points and file upload mutations');
171
375
  }
172
376
  // --- Path 2: Add upload fields on @storageBuckets types ---
173
377
  if (!isPgClassType || !pgCodec || !pgCodec.attributes) {