graphile-presigned-url-plugin 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/download-url-field.d.ts +1 -0
- package/download-url-field.js +6 -8
- package/esm/download-url-field.d.ts +1 -0
- package/esm/download-url-field.js +7 -9
- package/esm/index.d.ts +6 -5
- package/esm/index.js +6 -5
- package/esm/plugin.d.ts +12 -8
- package/esm/plugin.js +343 -312
- package/esm/s3-signer.d.ts +9 -2
- package/esm/s3-signer.js +16 -3
- package/esm/storage-module-cache.d.ts +33 -0
- package/esm/storage-module-cache.js +45 -0
- package/index.d.ts +6 -5
- package/index.js +8 -4
- package/package.json +4 -4
- package/plugin.d.ts +12 -8
- package/plugin.js +341 -310
- package/s3-signer.d.ts +9 -2
- package/s3-signer.js +16 -2
- package/storage-module-cache.d.ts +33 -0
- package/storage-module-cache.js +47 -0
package/esm/plugin.js
CHANGED
|
@@ -1,48 +1,39 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
2
|
+
* Per-Table Storage Middleware Plugin for PostGraphile v5
|
|
3
3
|
*
|
|
4
|
-
*
|
|
4
|
+
* Hooks into PostGraphile's auto-generated CRUD mutations to add S3 operations:
|
|
5
5
|
*
|
|
6
|
-
* 1.
|
|
7
|
-
*
|
|
8
|
-
* content hash via UNIQUE(bucket_id, key) constraint.
|
|
6
|
+
* 1. Delete middleware — wraps `delete*` mutations on `@storageFiles`-tagged tables
|
|
7
|
+
* with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger).
|
|
9
8
|
*
|
|
10
|
-
* 2.
|
|
11
|
-
*
|
|
9
|
+
* 2. Upload fields — adds `requestUploadUrl` and `requestBulkUploadUrls` fields
|
|
10
|
+
* on `@storageBuckets`-tagged types, so clients upload via the typed bucket API.
|
|
12
11
|
*
|
|
13
|
-
*
|
|
12
|
+
* 3. downloadUrl — handled by download-url-field.ts (separate plugin).
|
|
13
|
+
*
|
|
14
|
+
* No global mutations — all S3 operations are scoped to the per-table types that
|
|
15
|
+
* PostGraphile already generates. Scope resolution uses the codec's schema/table
|
|
16
|
+
* name matched against cached storage module configs.
|
|
14
17
|
*/
|
|
15
18
|
import { context as grafastContext, lambda, object } from 'grafast';
|
|
16
|
-
import
|
|
19
|
+
import 'graphile-build';
|
|
17
20
|
import { Logger } from '@pgpmjs/logger';
|
|
18
|
-
import {
|
|
19
|
-
import { generatePresignedPutUrl } from './s3-signer';
|
|
21
|
+
import { loadAllStorageModules, resolveStorageConfigFromCodec, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
|
|
22
|
+
import { generatePresignedPutUrl, deleteS3Object } from './s3-signer';
|
|
20
23
|
const log = new Logger('graphile-presigned-url:plugin');
|
|
21
24
|
// --- Protocol-level constants (not configurable) ---
|
|
22
25
|
const MAX_CONTENT_HASH_LENGTH = 128;
|
|
23
26
|
const MAX_CONTENT_TYPE_LENGTH = 255;
|
|
24
|
-
const MAX_BUCKET_KEY_LENGTH = 255;
|
|
25
27
|
const MAX_CUSTOM_KEY_LENGTH = 1024;
|
|
26
28
|
const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
|
|
27
29
|
const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
|
|
28
30
|
// --- Helpers ---
|
|
29
|
-
/**
|
|
30
|
-
* Validate a SHA-256 hex string.
|
|
31
|
-
*/
|
|
32
31
|
function isValidSha256(hash) {
|
|
33
32
|
return SHA256_HEX_REGEX.test(hash);
|
|
34
33
|
}
|
|
35
|
-
/**
|
|
36
|
-
* Build the S3 key from content hash.
|
|
37
|
-
* Format: {contentHash} (flat namespace, content-addressed)
|
|
38
|
-
*/
|
|
39
34
|
function buildS3Key(contentHash) {
|
|
40
35
|
return contentHash;
|
|
41
36
|
}
|
|
42
|
-
/**
|
|
43
|
-
* Validate a custom S3 key.
|
|
44
|
-
* Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
|
|
45
|
-
*/
|
|
46
37
|
function validateCustomKey(key) {
|
|
47
38
|
if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
|
|
48
39
|
return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
|
|
@@ -61,11 +52,6 @@ function validateCustomKey(key) {
|
|
|
61
52
|
}
|
|
62
53
|
return null;
|
|
63
54
|
}
|
|
64
|
-
/**
|
|
65
|
-
* Derive an ltree path from a custom S3 key's directory portion.
|
|
66
|
-
* e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
|
|
67
|
-
* Returns null if the key has no directory component.
|
|
68
|
-
*/
|
|
69
55
|
function derivePathFromKey(key) {
|
|
70
56
|
const lastSlash = key.lastIndexOf('/');
|
|
71
57
|
if (lastSlash <= 0)
|
|
@@ -73,41 +59,20 @@ function derivePathFromKey(key) {
|
|
|
73
59
|
const dir = key.substring(0, lastSlash);
|
|
74
60
|
return dir.replace(/\//g, '.');
|
|
75
61
|
}
|
|
76
|
-
/**
|
|
77
|
-
* Resolve the database_id from the JWT context.
|
|
78
|
-
* The server middleware sets jwt.claims.database_id, which is accessible
|
|
79
|
-
* via jwt_private.current_database_id() — a simple function call, no
|
|
80
|
-
* metaschema query needed.
|
|
81
|
-
*/
|
|
82
62
|
async function resolveDatabaseId(pgClient) {
|
|
83
63
|
const result = await pgClient.query({
|
|
84
64
|
text: `SELECT jwt_private.current_database_id() AS id`,
|
|
85
65
|
});
|
|
86
66
|
return result.rows[0]?.id ?? null;
|
|
87
67
|
}
|
|
88
|
-
// --- Plugin factory ---
|
|
89
|
-
/**
|
|
90
|
-
* Resolve the S3 config from the options. If the option is a lazy getter
|
|
91
|
-
* function, call it (and cache the result). This avoids reading env vars
|
|
92
|
-
* or constructing an S3Client at module-import time.
|
|
93
|
-
*/
|
|
94
68
|
function resolveS3(options) {
|
|
95
69
|
if (typeof options.s3 === 'function') {
|
|
96
70
|
const resolved = options.s3();
|
|
97
|
-
// Cache so subsequent calls don't re-evaluate
|
|
98
71
|
options.s3 = resolved;
|
|
99
72
|
return resolved;
|
|
100
73
|
}
|
|
101
74
|
return options.s3;
|
|
102
75
|
}
|
|
103
|
-
/**
|
|
104
|
-
* Build a per-database S3Config by overlaying storage_module overrides
|
|
105
|
-
* onto the global S3Config.
|
|
106
|
-
*
|
|
107
|
-
* - Bucket name: from resolveBucketName(databaseId) if provided, else global
|
|
108
|
-
* - publicUrlPrefix: from storageConfig.publicUrlPrefix if set, else global
|
|
109
|
-
* - S3 client (credentials, endpoint): always global (shared IAM key)
|
|
110
|
-
*/
|
|
111
76
|
function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
112
77
|
const globalS3 = resolveS3(options);
|
|
113
78
|
const bucket = options.resolveBucketName
|
|
@@ -123,16 +88,6 @@ function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
|
123
88
|
...(publicUrlPrefix != null ? { publicUrlPrefix } : {}),
|
|
124
89
|
};
|
|
125
90
|
}
|
|
126
|
-
/**
|
|
127
|
-
* Ensure the S3 bucket for a database exists, provisioning it lazily if needed.
|
|
128
|
-
*
|
|
129
|
-
* Checks an in-memory Set of known-provisioned bucket names. On the first
|
|
130
|
-
* request for an unseen bucket, calls the `ensureBucketProvisioned` callback
|
|
131
|
-
* (which creates the bucket with correct CORS, policies, etc.), then marks
|
|
132
|
-
* it as provisioned so subsequent requests skip the check entirely.
|
|
133
|
-
*
|
|
134
|
-
* If no `ensureBucketProvisioned` callback is configured, this is a no-op.
|
|
135
|
-
*/
|
|
136
91
|
async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, allowedOrigins) {
|
|
137
92
|
if (!options.ensureBucketProvisioned)
|
|
138
93
|
return;
|
|
@@ -143,209 +98,339 @@ async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, a
|
|
|
143
98
|
markS3BucketProvisioned(s3BucketName);
|
|
144
99
|
log.info(`Lazy-provisioned S3 bucket "${s3BucketName}" successfully`);
|
|
145
100
|
}
|
|
101
|
+
// --- Plugin factory ---
|
|
146
102
|
export function createPresignedUrlPlugin(options) {
|
|
147
|
-
return
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
uploadUrl: String
|
|
180
|
-
"""The file ID (existing if deduplicated, new if fresh upload)"""
|
|
181
|
-
fileId: UUID!
|
|
182
|
-
"""The S3 object key"""
|
|
183
|
-
key: String!
|
|
184
|
-
"""Whether this file was deduplicated (already exists with same hash)"""
|
|
185
|
-
deduplicated: Boolean!
|
|
186
|
-
"""Presigned URL expiry time (null if deduplicated)"""
|
|
187
|
-
expiresAt: Datetime
|
|
188
|
-
"""ID of the previous version (set when re-uploading to an existing custom key)"""
|
|
189
|
-
previousVersionId: UUID
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
input BulkUploadFileInput {
|
|
193
|
-
"""SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
|
|
194
|
-
contentHash: String!
|
|
195
|
-
"""MIME type of the file (e.g., "image/png")"""
|
|
196
|
-
contentType: String!
|
|
197
|
-
"""File size in bytes"""
|
|
198
|
-
size: Int!
|
|
199
|
-
"""Original filename (optional, for display and Content-Disposition)"""
|
|
200
|
-
filename: String
|
|
201
|
-
"""Custom S3 key (only when bucket has allow_custom_keys=true)"""
|
|
202
|
-
key: String
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
input RequestBulkUploadUrlsInput {
|
|
206
|
-
"""Bucket key (e.g., "public", "private")"""
|
|
207
|
-
bucketKey: String!
|
|
208
|
-
"""Owner entity ID for entity-scoped uploads"""
|
|
209
|
-
ownerId: UUID
|
|
210
|
-
"""Array of files to upload"""
|
|
211
|
-
files: [BulkUploadFileInput!]!
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
type BulkUploadFilePayload {
|
|
215
|
-
"""Presigned PUT URL (null if file was deduplicated)"""
|
|
216
|
-
uploadUrl: String
|
|
217
|
-
"""The file ID"""
|
|
218
|
-
fileId: UUID!
|
|
219
|
-
"""The S3 object key"""
|
|
220
|
-
key: String!
|
|
221
|
-
"""Whether this file was deduplicated"""
|
|
222
|
-
deduplicated: Boolean!
|
|
223
|
-
"""Presigned URL expiry time (null if deduplicated)"""
|
|
224
|
-
expiresAt: Datetime
|
|
225
|
-
"""ID of the previous version (set when re-uploading to an existing custom key)"""
|
|
226
|
-
previousVersionId: UUID
|
|
227
|
-
"""Index of this file in the input array (for client correlation)"""
|
|
228
|
-
index: Int!
|
|
229
|
-
}
|
|
230
|
-
|
|
231
|
-
type RequestBulkUploadUrlsPayload {
|
|
232
|
-
"""Array of results, one per input file"""
|
|
233
|
-
files: [BulkUploadFilePayload!]!
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
extend type Mutation {
|
|
237
|
-
"""
|
|
238
|
-
Request a presigned URL for uploading a file directly to S3.
|
|
239
|
-
Client computes SHA-256 of the file content and provides it here.
|
|
240
|
-
If a file with the same hash already exists (dedup), returns the
|
|
241
|
-
existing file ID and deduplicated=true with no uploadUrl.
|
|
242
|
-
"""
|
|
243
|
-
requestUploadUrl(
|
|
244
|
-
input: RequestUploadUrlInput!
|
|
245
|
-
): RequestUploadUrlPayload
|
|
246
|
-
|
|
247
|
-
"""
|
|
248
|
-
Request presigned URLs for uploading multiple files in a single batch.
|
|
249
|
-
Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
|
|
250
|
-
Each file is processed independently — some may dedup while others get fresh URLs.
|
|
251
|
-
"""
|
|
252
|
-
requestBulkUploadUrls(
|
|
253
|
-
input: RequestBulkUploadUrlsInput!
|
|
254
|
-
): RequestBulkUploadUrlsPayload
|
|
255
|
-
}
|
|
256
|
-
`,
|
|
257
|
-
plans: {
|
|
258
|
-
Mutation: {
|
|
259
|
-
requestUploadUrl(_$mutation, fieldArgs) {
|
|
260
|
-
const $input = fieldArgs.getRaw('input');
|
|
261
|
-
const $withPgClient = grafastContext().get('withPgClient');
|
|
262
|
-
const $pgSettings = grafastContext().get('pgSettings');
|
|
263
|
-
const $combined = object({
|
|
264
|
-
input: $input,
|
|
265
|
-
withPgClient: $withPgClient,
|
|
266
|
-
pgSettings: $pgSettings,
|
|
103
|
+
return {
|
|
104
|
+
name: 'PresignedUrlPlugin',
|
|
105
|
+
version: '1.0.0',
|
|
106
|
+
description: 'Per-table S3 storage middleware: upload fields on @storageBuckets, delete middleware on @storageFiles',
|
|
107
|
+
after: ['PgAttributesPlugin', 'PgMutationCreatePlugin', 'PgMutationUpdateDeletePlugin'],
|
|
108
|
+
schema: {
|
|
109
|
+
hooks: {
|
|
110
|
+
/**
|
|
111
|
+
* Add requestUploadUrl and requestBulkUploadUrls fields on @storageBuckets types.
|
|
112
|
+
*/
|
|
113
|
+
GraphQLObjectType_fields(fields, build, context) {
|
|
114
|
+
const { scope: { pgCodec, isPgClassType }, } = context;
|
|
115
|
+
if (!isPgClassType || !pgCodec || !pgCodec.attributes) {
|
|
116
|
+
return fields;
|
|
117
|
+
}
|
|
118
|
+
const tags = pgCodec.extensions?.tags;
|
|
119
|
+
if (!tags?.storageBuckets) {
|
|
120
|
+
return fields;
|
|
121
|
+
}
|
|
122
|
+
log.debug(`Adding upload fields to bucket type: ${pgCodec.name} (has @storageBuckets tag)`);
|
|
123
|
+
const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLList, GraphQLInputObjectType, }, } = build;
|
|
124
|
+
// --- Shared output types ---
|
|
125
|
+
const UploadUrlPayloadType = new GraphQLObjectType({
|
|
126
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestUploadUrlPayload`,
|
|
127
|
+
fields: {
|
|
128
|
+
uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
|
|
129
|
+
fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID' },
|
|
130
|
+
key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
|
|
131
|
+
deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated' },
|
|
132
|
+
expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
|
|
133
|
+
previousVersionId: { type: GraphQLString, description: 'ID of the previous version' },
|
|
134
|
+
},
|
|
267
135
|
});
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
136
|
+
const BulkUploadFilePayloadType = new GraphQLObjectType({
|
|
137
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFilePayload`,
|
|
138
|
+
fields: {
|
|
139
|
+
uploadUrl: { type: GraphQLString },
|
|
140
|
+
fileId: { type: new GraphQLNonNull(GraphQLString) },
|
|
141
|
+
key: { type: new GraphQLNonNull(GraphQLString) },
|
|
142
|
+
deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
|
|
143
|
+
expiresAt: { type: GraphQLString },
|
|
144
|
+
previousVersionId: { type: GraphQLString },
|
|
145
|
+
index: { type: new GraphQLNonNull(GraphQLInt), description: 'Index in the input array' },
|
|
146
|
+
},
|
|
271
147
|
});
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
const $combined = object({
|
|
278
|
-
input: $input,
|
|
279
|
-
withPgClient: $withPgClient,
|
|
280
|
-
pgSettings: $pgSettings,
|
|
148
|
+
const BulkUploadUrlsPayloadType = new GraphQLObjectType({
|
|
149
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestBulkUploadUrlsPayload`,
|
|
150
|
+
fields: {
|
|
151
|
+
files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFilePayloadType))) },
|
|
152
|
+
},
|
|
281
153
|
});
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
const
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
const
|
|
313
|
-
|
|
314
|
-
|
|
154
|
+
const BulkUploadFileInputType = new GraphQLInputObjectType({
|
|
155
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFileInput`,
|
|
156
|
+
fields: {
|
|
157
|
+
contentHash: { type: new GraphQLNonNull(GraphQLString) },
|
|
158
|
+
contentType: { type: new GraphQLNonNull(GraphQLString) },
|
|
159
|
+
size: { type: new GraphQLNonNull(GraphQLInt) },
|
|
160
|
+
filename: { type: GraphQLString },
|
|
161
|
+
key: { type: GraphQLString },
|
|
162
|
+
},
|
|
163
|
+
});
|
|
164
|
+
// Capture codec for closure
|
|
165
|
+
const capturedCodec = pgCodec;
|
|
166
|
+
return build.extend(fields, {
|
|
167
|
+
requestUploadUrl: context.fieldWithHooks({ fieldName: 'requestUploadUrl' }, {
|
|
168
|
+
description: 'Request a presigned URL for uploading a file to this bucket.',
|
|
169
|
+
type: UploadUrlPayloadType,
|
|
170
|
+
args: {
|
|
171
|
+
contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
|
|
172
|
+
contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
|
|
173
|
+
size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
|
|
174
|
+
filename: { type: GraphQLString, description: 'Original filename (optional)' },
|
|
175
|
+
key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
|
|
176
|
+
},
|
|
177
|
+
plan($parent, fieldArgs) {
|
|
178
|
+
const $bucketId = $parent.get('id');
|
|
179
|
+
const $bucketKey = $parent.get('key');
|
|
180
|
+
const $bucketType = $parent.get('type');
|
|
181
|
+
const $bucketIsPublic = $parent.get('is_public');
|
|
182
|
+
const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
|
|
183
|
+
const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
|
|
184
|
+
const $bucketMaxFileSize = $parent.get('max_file_size');
|
|
185
|
+
const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, () => null);
|
|
186
|
+
const $contentHash = fieldArgs.getRaw('contentHash');
|
|
187
|
+
const $contentType = fieldArgs.getRaw('contentType');
|
|
188
|
+
const $size = fieldArgs.getRaw('size');
|
|
189
|
+
const $filename = fieldArgs.getRaw('filename');
|
|
190
|
+
const $customKey = fieldArgs.getRaw('key');
|
|
191
|
+
const $withPgClient = grafastContext().get('withPgClient');
|
|
192
|
+
const $pgSettings = grafastContext().get('pgSettings');
|
|
193
|
+
const $combined = object({
|
|
194
|
+
bucketId: $bucketId,
|
|
195
|
+
bucketKey: $bucketKey,
|
|
196
|
+
bucketType: $bucketType,
|
|
197
|
+
bucketIsPublic: $bucketIsPublic,
|
|
198
|
+
bucketAllowCustomKeys: $bucketAllowCustomKeys,
|
|
199
|
+
bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
|
|
200
|
+
bucketMaxFileSize: $bucketMaxFileSize,
|
|
201
|
+
bucketOwnerId: $bucketOwnerId,
|
|
202
|
+
contentHash: $contentHash,
|
|
203
|
+
contentType: $contentType,
|
|
204
|
+
size: $size,
|
|
205
|
+
filename: $filename,
|
|
206
|
+
customKey: $customKey,
|
|
207
|
+
withPgClient: $withPgClient,
|
|
208
|
+
pgSettings: $pgSettings,
|
|
209
|
+
});
|
|
210
|
+
return lambda($combined, async (vals) => {
|
|
211
|
+
return vals.withPgClient(vals.pgSettings, async (pgClient) => {
|
|
212
|
+
return pgClient.withTransaction(async (txClient) => {
|
|
213
|
+
const databaseId = await resolveDatabaseId(txClient);
|
|
214
|
+
if (!databaseId)
|
|
215
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
216
|
+
const allConfigs = await loadAllStorageModules(txClient, databaseId);
|
|
217
|
+
const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
|
|
218
|
+
if (!storageConfig)
|
|
219
|
+
throw new Error('STORAGE_MODULE_NOT_FOUND');
|
|
220
|
+
const bucket = {
|
|
221
|
+
id: vals.bucketId,
|
|
222
|
+
key: vals.bucketKey,
|
|
223
|
+
type: vals.bucketType,
|
|
224
|
+
is_public: vals.bucketIsPublic,
|
|
225
|
+
owner_id: vals.bucketOwnerId,
|
|
226
|
+
allowed_mime_types: vals.bucketAllowedMimeTypes,
|
|
227
|
+
max_file_size: vals.bucketMaxFileSize,
|
|
228
|
+
allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
|
|
229
|
+
};
|
|
230
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
231
|
+
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
232
|
+
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
|
|
233
|
+
contentHash: vals.contentHash,
|
|
234
|
+
contentType: vals.contentType,
|
|
235
|
+
size: vals.size,
|
|
236
|
+
filename: vals.filename,
|
|
237
|
+
key: vals.customKey,
|
|
238
|
+
});
|
|
239
|
+
});
|
|
240
|
+
});
|
|
241
|
+
});
|
|
242
|
+
},
|
|
243
|
+
}),
|
|
244
|
+
requestBulkUploadUrls: context.fieldWithHooks({ fieldName: 'requestBulkUploadUrls' }, {
|
|
245
|
+
description: 'Request presigned URLs for uploading multiple files to this bucket.',
|
|
246
|
+
type: BulkUploadUrlsPayloadType,
|
|
247
|
+
args: {
|
|
248
|
+
files: {
|
|
249
|
+
type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFileInputType))),
|
|
250
|
+
description: 'Array of files to upload',
|
|
251
|
+
},
|
|
252
|
+
},
|
|
253
|
+
plan($parent, fieldArgs) {
|
|
254
|
+
const $bucketId = $parent.get('id');
|
|
255
|
+
const $bucketKey = $parent.get('key');
|
|
256
|
+
const $bucketType = $parent.get('type');
|
|
257
|
+
const $bucketIsPublic = $parent.get('is_public');
|
|
258
|
+
const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
|
|
259
|
+
const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
|
|
260
|
+
const $bucketMaxFileSize = $parent.get('max_file_size');
|
|
261
|
+
const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : lambda(null, () => null);
|
|
262
|
+
const $files = fieldArgs.getRaw('files');
|
|
263
|
+
const $withPgClient = grafastContext().get('withPgClient');
|
|
264
|
+
const $pgSettings = grafastContext().get('pgSettings');
|
|
265
|
+
const $combined = object({
|
|
266
|
+
bucketId: $bucketId,
|
|
267
|
+
bucketKey: $bucketKey,
|
|
268
|
+
bucketType: $bucketType,
|
|
269
|
+
bucketIsPublic: $bucketIsPublic,
|
|
270
|
+
bucketAllowCustomKeys: $bucketAllowCustomKeys,
|
|
271
|
+
bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
|
|
272
|
+
bucketMaxFileSize: $bucketMaxFileSize,
|
|
273
|
+
bucketOwnerId: $bucketOwnerId,
|
|
274
|
+
files: $files,
|
|
275
|
+
withPgClient: $withPgClient,
|
|
276
|
+
pgSettings: $pgSettings,
|
|
277
|
+
});
|
|
278
|
+
return lambda($combined, async (vals) => {
|
|
279
|
+
const { files } = vals;
|
|
280
|
+
if (!Array.isArray(files) || files.length === 0) {
|
|
281
|
+
throw new Error('INVALID_FILES: must provide at least one file');
|
|
282
|
+
}
|
|
283
|
+
return vals.withPgClient(vals.pgSettings, async (pgClient) => {
|
|
284
|
+
return pgClient.withTransaction(async (txClient) => {
|
|
285
|
+
const databaseId = await resolveDatabaseId(txClient);
|
|
286
|
+
if (!databaseId)
|
|
287
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
288
|
+
const allConfigs = await loadAllStorageModules(txClient, databaseId);
|
|
289
|
+
const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
|
|
290
|
+
if (!storageConfig)
|
|
291
|
+
throw new Error('STORAGE_MODULE_NOT_FOUND');
|
|
292
|
+
if (files.length > storageConfig.maxBulkFiles) {
|
|
293
|
+
throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
|
|
294
|
+
}
|
|
295
|
+
const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
|
|
296
|
+
if (totalSize > storageConfig.maxBulkTotalSize) {
|
|
297
|
+
throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
|
|
298
|
+
}
|
|
299
|
+
const bucket = {
|
|
300
|
+
id: vals.bucketId,
|
|
301
|
+
key: vals.bucketKey,
|
|
302
|
+
type: vals.bucketType,
|
|
303
|
+
is_public: vals.bucketIsPublic,
|
|
304
|
+
owner_id: vals.bucketOwnerId,
|
|
305
|
+
allowed_mime_types: vals.bucketAllowedMimeTypes,
|
|
306
|
+
max_file_size: vals.bucketMaxFileSize,
|
|
307
|
+
allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
|
|
308
|
+
};
|
|
309
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
310
|
+
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
311
|
+
const results = [];
|
|
312
|
+
for (let i = 0; i < files.length; i++) {
|
|
313
|
+
const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, files[i]);
|
|
314
|
+
results.push({ ...result, index: i });
|
|
315
|
+
}
|
|
316
|
+
return { files: results };
|
|
317
|
+
});
|
|
318
|
+
});
|
|
319
|
+
});
|
|
320
|
+
},
|
|
321
|
+
}),
|
|
322
|
+
}, `PresignedUrlPlugin adding upload fields to ${pgCodec.name}`);
|
|
323
|
+
},
|
|
324
|
+
/**
|
|
325
|
+
* Wrap delete* mutations on @storageFiles-tagged tables with S3 cleanup.
|
|
326
|
+
*
|
|
327
|
+
* Pattern: identical to graphile-bucket-provisioner-plugin's create/update hooks.
|
|
328
|
+
* 1. Read the file row BEFORE delete (need key + bucket_id for S3 cleanup)
|
|
329
|
+
* 2. Call PostGraphile's generated delete (RLS enforced)
|
|
330
|
+
* 3. If delete succeeded, check refcount and attempt sync S3 delete
|
|
331
|
+
* 4. AFTER DELETE trigger (constructive-db) enqueues async GC job as fallback
|
|
332
|
+
*/
|
|
333
|
+
GraphQLObjectType_fields_field(field, build, context) {
|
|
334
|
+
const { scope: { isRootMutation, fieldName, pgCodec }, } = context;
|
|
335
|
+
if (!isRootMutation || !pgCodec || !pgCodec.attributes) {
|
|
336
|
+
return field;
|
|
337
|
+
}
|
|
338
|
+
const tags = pgCodec.extensions?.tags;
|
|
339
|
+
if (!tags?.storageFiles) {
|
|
340
|
+
return field;
|
|
341
|
+
}
|
|
342
|
+
if (!fieldName.startsWith('delete')) {
|
|
343
|
+
return field;
|
|
344
|
+
}
|
|
345
|
+
log.debug(`Wrapping delete mutation "${fieldName}" with S3 cleanup (codec: ${pgCodec.name})`);
|
|
346
|
+
const defaultResolver = (obj) => obj[fieldName];
|
|
347
|
+
const { resolve: oldResolve = defaultResolver, ...rest } = field;
|
|
348
|
+
const capturedCodec = pgCodec;
|
|
349
|
+
return {
|
|
350
|
+
...rest,
|
|
351
|
+
async resolve(source, args, graphqlContext, info) {
|
|
352
|
+
// Extract the file ID from the mutation input
|
|
353
|
+
const inputKey = Object.keys(args.input || {}).find((k) => k !== 'clientMutationId');
|
|
354
|
+
const fileInput = inputKey ? args.input[inputKey] : null;
|
|
355
|
+
let fileRow = null;
|
|
356
|
+
if (fileInput) {
|
|
357
|
+
// Read the file row BEFORE delete to get the S3 key + bucket_id
|
|
358
|
+
const withPgClient = graphqlContext.withPgClient;
|
|
359
|
+
const pgSettings = graphqlContext.pgSettings;
|
|
360
|
+
if (withPgClient) {
|
|
361
|
+
try {
|
|
362
|
+
await withPgClient(pgSettings, async (pgClient) => {
|
|
363
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
364
|
+
if (!databaseId)
|
|
365
|
+
return;
|
|
366
|
+
const allConfigs = await loadAllStorageModules(pgClient, databaseId);
|
|
367
|
+
const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
|
|
368
|
+
if (!storageConfig)
|
|
369
|
+
return;
|
|
370
|
+
// Read the file row (RLS enforced)
|
|
371
|
+
const result = await pgClient.query({
|
|
372
|
+
text: `SELECT key, bucket_id FROM ${storageConfig.filesQualifiedName} WHERE id = $1 LIMIT 1`,
|
|
373
|
+
values: [fileInput],
|
|
374
|
+
});
|
|
375
|
+
if (result.rows.length > 0) {
|
|
376
|
+
fileRow = result.rows[0];
|
|
377
|
+
}
|
|
378
|
+
});
|
|
379
|
+
}
|
|
380
|
+
catch (err) {
|
|
381
|
+
log.warn(`Pre-delete file lookup failed: ${err.message}`);
|
|
382
|
+
}
|
|
315
383
|
}
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
384
|
+
}
|
|
385
|
+
// Call PostGraphile's generated delete (RLS enforced)
|
|
386
|
+
const result = await oldResolve(source, args, graphqlContext, info);
|
|
387
|
+
// Attempt sync S3 cleanup if we have the file row
|
|
388
|
+
if (fileRow) {
|
|
389
|
+
const withPgClient = graphqlContext.withPgClient;
|
|
390
|
+
const pgSettings = graphqlContext.pgSettings;
|
|
391
|
+
if (withPgClient) {
|
|
392
|
+
try {
|
|
393
|
+
await withPgClient(pgSettings, async (pgClient) => {
|
|
394
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
395
|
+
if (!databaseId)
|
|
396
|
+
return;
|
|
397
|
+
const allConfigs = await loadAllStorageModules(pgClient, databaseId);
|
|
398
|
+
const storageConfig = resolveStorageConfigFromCodec(capturedCodec, allConfigs);
|
|
399
|
+
if (!storageConfig)
|
|
400
|
+
return;
|
|
401
|
+
// Check refcount: any other file with the same key in this bucket?
|
|
402
|
+
const refResult = await pgClient.query({
|
|
403
|
+
text: `SELECT COUNT(*)::int AS ref_count FROM ${storageConfig.filesQualifiedName} WHERE key = $1 AND bucket_id = $2`,
|
|
404
|
+
values: [fileRow.key, fileRow.bucket_id],
|
|
405
|
+
});
|
|
406
|
+
const refCount = refResult.rows[0]?.ref_count ?? 0;
|
|
407
|
+
if (refCount > 0) {
|
|
408
|
+
log.info(`File deleted from DB; S3 key ${fileRow.key} still referenced by ${refCount} file(s)`);
|
|
409
|
+
return;
|
|
410
|
+
}
|
|
411
|
+
// No other references — attempt sync S3 delete
|
|
412
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
413
|
+
await deleteS3Object(s3ForDb, fileRow.key);
|
|
414
|
+
log.info(`Sync S3 delete succeeded for key=${fileRow.key}`);
|
|
415
|
+
});
|
|
416
|
+
}
|
|
417
|
+
catch (err) {
|
|
418
|
+
// Sync S3 delete failed — the AFTER DELETE trigger has enqueued an async GC job
|
|
419
|
+
log.warn(`Sync S3 delete failed for key=${fileRow.key}; async GC job will retry: ${err.message}`);
|
|
420
|
+
}
|
|
330
421
|
}
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
}
|
|
334
|
-
}
|
|
422
|
+
}
|
|
423
|
+
return result;
|
|
424
|
+
},
|
|
425
|
+
};
|
|
335
426
|
},
|
|
336
427
|
},
|
|
337
428
|
},
|
|
338
|
-
}
|
|
429
|
+
};
|
|
339
430
|
}
|
|
340
431
|
// --- Shared upload logic ---
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
*/
|
|
344
|
-
async function processUpload(options, input, withPgClient, pgSettings) {
|
|
345
|
-
const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
|
|
346
|
-
if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
|
|
347
|
-
throw new Error('INVALID_BUCKET_KEY');
|
|
348
|
-
}
|
|
432
|
+
async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
|
|
433
|
+
const { contentHash, contentType, size, filename, key: customKey } = input;
|
|
349
434
|
if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
|
|
350
435
|
throw new Error('INVALID_CONTENT_HASH');
|
|
351
436
|
}
|
|
@@ -355,51 +440,6 @@ async function processUpload(options, input, withPgClient, pgSettings) {
|
|
|
355
440
|
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
356
441
|
throw new Error('INVALID_CONTENT_TYPE');
|
|
357
442
|
}
|
|
358
|
-
return withPgClient(pgSettings, async (pgClient) => {
|
|
359
|
-
return pgClient.withTransaction(async (txClient) => {
|
|
360
|
-
const databaseId = await resolveDatabaseId(txClient);
|
|
361
|
-
if (!databaseId) {
|
|
362
|
-
throw new Error('DATABASE_NOT_FOUND');
|
|
363
|
-
}
|
|
364
|
-
const storageConfig = ownerId
|
|
365
|
-
? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
|
|
366
|
-
: await getStorageModuleConfig(txClient, databaseId);
|
|
367
|
-
if (!storageConfig) {
|
|
368
|
-
throw new Error(ownerId
|
|
369
|
-
? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
|
|
370
|
-
: 'STORAGE_MODULE_NOT_PROVISIONED');
|
|
371
|
-
}
|
|
372
|
-
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
373
|
-
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
374
|
-
}
|
|
375
|
-
if (filename !== undefined && filename !== null) {
|
|
376
|
-
if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
|
|
377
|
-
throw new Error('INVALID_FILENAME');
|
|
378
|
-
}
|
|
379
|
-
}
|
|
380
|
-
const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
|
|
381
|
-
if (!bucket) {
|
|
382
|
-
throw new Error('BUCKET_NOT_FOUND');
|
|
383
|
-
}
|
|
384
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
385
|
-
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
386
|
-
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
|
|
387
|
-
});
|
|
388
|
-
});
|
|
389
|
-
}
|
|
390
|
-
/**
|
|
391
|
-
* Process a single file upload within an already-resolved context.
|
|
392
|
-
* Handles dedup, custom keys, versioning, and auto-path derivation.
|
|
393
|
-
*/
|
|
394
|
-
async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
|
|
395
|
-
const { contentHash, contentType, size, filename, key: customKey } = input;
|
|
396
|
-
// --- Validate inputs ---
|
|
397
|
-
if (!contentHash || !isValidSha256(contentHash)) {
|
|
398
|
-
throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
|
|
399
|
-
}
|
|
400
|
-
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
401
|
-
throw new Error('INVALID_CONTENT_TYPE');
|
|
402
|
-
}
|
|
403
443
|
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
404
444
|
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
405
445
|
}
|
|
@@ -408,7 +448,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
408
448
|
throw new Error('INVALID_FILENAME');
|
|
409
449
|
}
|
|
410
450
|
}
|
|
411
|
-
//
|
|
451
|
+
// Validate content type against bucket's allowed_mime_types
|
|
412
452
|
if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
|
|
413
453
|
const allowed = bucket.allowed_mime_types;
|
|
414
454
|
const isAllowed = allowed.some((pattern) => {
|
|
@@ -424,11 +464,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
424
464
|
throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
|
|
425
465
|
}
|
|
426
466
|
}
|
|
427
|
-
//
|
|
467
|
+
// Validate size against bucket's max_file_size
|
|
428
468
|
if (bucket.max_file_size && size > bucket.max_file_size) {
|
|
429
469
|
throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
|
|
430
470
|
}
|
|
431
|
-
//
|
|
471
|
+
// Determine S3 key
|
|
432
472
|
let s3Key;
|
|
433
473
|
let isCustomKey = false;
|
|
434
474
|
if (customKey) {
|
|
@@ -445,11 +485,9 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
445
485
|
else {
|
|
446
486
|
s3Key = buildS3Key(contentHash);
|
|
447
487
|
}
|
|
448
|
-
//
|
|
488
|
+
// Dedup / versioning check
|
|
449
489
|
let previousVersionId = null;
|
|
450
490
|
if (isCustomKey) {
|
|
451
|
-
// Custom key mode: check if a file with this key already exists in this bucket.
|
|
452
|
-
// If so, auto-version by linking via previous_version_id.
|
|
453
491
|
const existingResult = await txClient.query({
|
|
454
492
|
text: `SELECT id, content_hash
|
|
455
493
|
FROM ${storageConfig.filesQualifiedName}
|
|
@@ -461,7 +499,6 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
461
499
|
});
|
|
462
500
|
if (existingResult.rows.length > 0) {
|
|
463
501
|
const existing = existingResult.rows[0];
|
|
464
|
-
// Same content hash = true dedup (no new upload needed)
|
|
465
502
|
if (existing.content_hash === contentHash) {
|
|
466
503
|
log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
|
|
467
504
|
return {
|
|
@@ -473,13 +510,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
473
510
|
previousVersionId: null,
|
|
474
511
|
};
|
|
475
512
|
}
|
|
476
|
-
// Different content = new version
|
|
477
513
|
previousVersionId = existing.id;
|
|
478
514
|
log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
|
|
479
515
|
}
|
|
480
516
|
}
|
|
481
517
|
else {
|
|
482
|
-
// Hash-based mode: dedup by content_hash in this bucket
|
|
483
518
|
const dedupResult = await txClient.query({
|
|
484
519
|
text: `SELECT id
|
|
485
520
|
FROM ${storageConfig.filesQualifiedName}
|
|
@@ -501,27 +536,23 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
501
536
|
};
|
|
502
537
|
}
|
|
503
538
|
}
|
|
504
|
-
//
|
|
539
|
+
// Auto-derive ltree path from custom key directory (only when has_path_shares)
|
|
505
540
|
const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
|
|
506
|
-
//
|
|
541
|
+
// Create file record
|
|
507
542
|
const hasOwnerColumn = storageConfig.membershipType !== null;
|
|
508
543
|
const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
|
|
509
544
|
const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
|
|
510
|
-
let paramIdx = values.length;
|
|
511
545
|
if (hasOwnerColumn) {
|
|
512
546
|
columns.push('owner_id');
|
|
513
547
|
values.push(bucket.owner_id);
|
|
514
|
-
paramIdx = values.length;
|
|
515
548
|
}
|
|
516
549
|
if (previousVersionId) {
|
|
517
550
|
columns.push('previous_version_id');
|
|
518
551
|
values.push(previousVersionId);
|
|
519
|
-
paramIdx = values.length;
|
|
520
552
|
}
|
|
521
553
|
if (derivedPath) {
|
|
522
554
|
columns.push('path');
|
|
523
555
|
values.push(derivedPath);
|
|
524
|
-
paramIdx = values.length;
|
|
525
556
|
}
|
|
526
557
|
const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
|
|
527
558
|
const fileResult = await txClient.query({
|
|
@@ -532,7 +563,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
532
563
|
values,
|
|
533
564
|
});
|
|
534
565
|
const fileId = fileResult.rows[0].id;
|
|
535
|
-
//
|
|
566
|
+
// Generate presigned PUT URL
|
|
536
567
|
const uploadUrl = await generatePresignedPutUrl(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
|
|
537
568
|
const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
|
|
538
569
|
return {
|