graphile-presigned-url-plugin 0.10.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/download-url-field.d.ts +1 -0
- package/download-url-field.js +6 -8
- package/esm/download-url-field.d.ts +1 -0
- package/esm/download-url-field.js +7 -9
- package/esm/index.d.ts +6 -5
- package/esm/index.js +6 -5
- package/esm/plugin.d.ts +15 -8
- package/esm/plugin.js +411 -313
- package/esm/s3-signer.d.ts +9 -2
- package/esm/s3-signer.js +16 -3
- package/esm/storage-module-cache.d.ts +33 -0
- package/esm/storage-module-cache.js +45 -0
- package/index.d.ts +6 -5
- package/index.js +8 -4
- package/package.json +2 -2
- package/plugin.d.ts +15 -8
- package/plugin.js +408 -310
- package/s3-signer.d.ts +9 -2
- package/s3-signer.js +16 -2
- package/storage-module-cache.d.ts +33 -0
- package/storage-module-cache.js +47 -0
package/plugin.js
CHANGED
|
@@ -1,23 +1,29 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
3
|
+
* Per-Table Storage Middleware Plugin for PostGraphile v5
|
|
4
4
|
*
|
|
5
|
-
*
|
|
5
|
+
* Hooks into PostGraphile's auto-generated CRUD mutations to add S3 operations:
|
|
6
6
|
*
|
|
7
|
-
* 1.
|
|
8
|
-
*
|
|
9
|
-
* content hash via UNIQUE(bucket_id, key) constraint.
|
|
7
|
+
* 1. Delete middleware — wraps `delete*` mutations on `@storageFiles`-tagged tables
|
|
8
|
+
* with S3 object cleanup (sync + async GC fallback via AFTER DELETE trigger).
|
|
10
9
|
*
|
|
11
|
-
* 2.
|
|
12
|
-
*
|
|
10
|
+
* 2. Upload fields — adds `requestUploadUrl` and `requestBulkUploadUrls` fields
|
|
11
|
+
* on `@storageBuckets`-tagged types, so clients upload via the typed bucket API.
|
|
13
12
|
*
|
|
14
|
-
*
|
|
13
|
+
* 3. Mutation entry points — adds per-bucket mutation fields on the root Mutation
|
|
14
|
+
* type (e.g., `appBucket(key: "public"): AppBucket`), so upload operations
|
|
15
|
+
* can be accessed as proper GraphQL mutations instead of queries.
|
|
16
|
+
*
|
|
17
|
+
* 4. downloadUrl — handled by download-url-field.ts (separate plugin).
|
|
18
|
+
*
|
|
19
|
+
* Scope resolution uses the codec's schema/table name matched against
|
|
20
|
+
* cached storage module configs.
|
|
15
21
|
*/
|
|
16
22
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
23
|
exports.PresignedUrlPlugin = void 0;
|
|
18
24
|
exports.createPresignedUrlPlugin = createPresignedUrlPlugin;
|
|
19
25
|
const grafast_1 = require("grafast");
|
|
20
|
-
|
|
26
|
+
require("graphile-build");
|
|
21
27
|
const logger_1 = require("@pgpmjs/logger");
|
|
22
28
|
const storage_module_cache_1 = require("./storage-module-cache");
|
|
23
29
|
const s3_signer_1 = require("./s3-signer");
|
|
@@ -25,28 +31,16 @@ const log = new logger_1.Logger('graphile-presigned-url:plugin');
|
|
|
25
31
|
// --- Protocol-level constants (not configurable) ---
|
|
26
32
|
const MAX_CONTENT_HASH_LENGTH = 128;
|
|
27
33
|
const MAX_CONTENT_TYPE_LENGTH = 255;
|
|
28
|
-
const MAX_BUCKET_KEY_LENGTH = 255;
|
|
29
34
|
const MAX_CUSTOM_KEY_LENGTH = 1024;
|
|
30
35
|
const SHA256_HEX_REGEX = /^[a-f0-9]{64}$/;
|
|
31
36
|
const CUSTOM_KEY_REGEX = /^[a-zA-Z0-9][a-zA-Z0-9_.\-\/]*$/;
|
|
32
37
|
// --- Helpers ---
|
|
33
|
-
/**
|
|
34
|
-
* Validate a SHA-256 hex string.
|
|
35
|
-
*/
|
|
36
38
|
function isValidSha256(hash) {
|
|
37
39
|
return SHA256_HEX_REGEX.test(hash);
|
|
38
40
|
}
|
|
39
|
-
/**
|
|
40
|
-
* Build the S3 key from content hash.
|
|
41
|
-
* Format: {contentHash} (flat namespace, content-addressed)
|
|
42
|
-
*/
|
|
43
41
|
function buildS3Key(contentHash) {
|
|
44
42
|
return contentHash;
|
|
45
43
|
}
|
|
46
|
-
/**
|
|
47
|
-
* Validate a custom S3 key.
|
|
48
|
-
* Must be 1-1024 chars, no path traversal, no leading slash, no null bytes.
|
|
49
|
-
*/
|
|
50
44
|
function validateCustomKey(key) {
|
|
51
45
|
if (key.length === 0 || key.length > MAX_CUSTOM_KEY_LENGTH) {
|
|
52
46
|
return 'INVALID_KEY_LENGTH: must be 1-1024 characters';
|
|
@@ -65,11 +59,6 @@ function validateCustomKey(key) {
|
|
|
65
59
|
}
|
|
66
60
|
return null;
|
|
67
61
|
}
|
|
68
|
-
/**
|
|
69
|
-
* Derive an ltree path from a custom S3 key's directory portion.
|
|
70
|
-
* e.g., "reports/2024/Q1/revenue.pdf" → "reports.2024.Q1"
|
|
71
|
-
* Returns null if the key has no directory component.
|
|
72
|
-
*/
|
|
73
62
|
function derivePathFromKey(key) {
|
|
74
63
|
const lastSlash = key.lastIndexOf('/');
|
|
75
64
|
if (lastSlash <= 0)
|
|
@@ -77,41 +66,20 @@ function derivePathFromKey(key) {
|
|
|
77
66
|
const dir = key.substring(0, lastSlash);
|
|
78
67
|
return dir.replace(/\//g, '.');
|
|
79
68
|
}
|
|
80
|
-
/**
|
|
81
|
-
* Resolve the database_id from the JWT context.
|
|
82
|
-
* The server middleware sets jwt.claims.database_id, which is accessible
|
|
83
|
-
* via jwt_private.current_database_id() — a simple function call, no
|
|
84
|
-
* metaschema query needed.
|
|
85
|
-
*/
|
|
86
69
|
async function resolveDatabaseId(pgClient) {
|
|
87
70
|
const result = await pgClient.query({
|
|
88
71
|
text: `SELECT jwt_private.current_database_id() AS id`,
|
|
89
72
|
});
|
|
90
73
|
return result.rows[0]?.id ?? null;
|
|
91
74
|
}
|
|
92
|
-
// --- Plugin factory ---
|
|
93
|
-
/**
|
|
94
|
-
* Resolve the S3 config from the options. If the option is a lazy getter
|
|
95
|
-
* function, call it (and cache the result). This avoids reading env vars
|
|
96
|
-
* or constructing an S3Client at module-import time.
|
|
97
|
-
*/
|
|
98
75
|
function resolveS3(options) {
|
|
99
76
|
if (typeof options.s3 === 'function') {
|
|
100
77
|
const resolved = options.s3();
|
|
101
|
-
// Cache so subsequent calls don't re-evaluate
|
|
102
78
|
options.s3 = resolved;
|
|
103
79
|
return resolved;
|
|
104
80
|
}
|
|
105
81
|
return options.s3;
|
|
106
82
|
}
|
|
107
|
-
/**
|
|
108
|
-
* Build a per-database S3Config by overlaying storage_module overrides
|
|
109
|
-
* onto the global S3Config.
|
|
110
|
-
*
|
|
111
|
-
* - Bucket name: from resolveBucketName(databaseId) if provided, else global
|
|
112
|
-
* - publicUrlPrefix: from storageConfig.publicUrlPrefix if set, else global
|
|
113
|
-
* - S3 client (credentials, endpoint): always global (shared IAM key)
|
|
114
|
-
*/
|
|
115
83
|
function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
116
84
|
const globalS3 = resolveS3(options);
|
|
117
85
|
const bucket = options.resolveBucketName
|
|
@@ -127,16 +95,6 @@ function resolveS3ForDatabase(options, storageConfig, databaseId) {
|
|
|
127
95
|
...(publicUrlPrefix != null ? { publicUrlPrefix } : {}),
|
|
128
96
|
};
|
|
129
97
|
}
|
|
130
|
-
/**
|
|
131
|
-
* Ensure the S3 bucket for a database exists, provisioning it lazily if needed.
|
|
132
|
-
*
|
|
133
|
-
* Checks an in-memory Set of known-provisioned bucket names. On the first
|
|
134
|
-
* request for an unseen bucket, calls the `ensureBucketProvisioned` callback
|
|
135
|
-
* (which creates the bucket with correct CORS, policies, etc.), then marks
|
|
136
|
-
* it as provisioned so subsequent requests skip the check entirely.
|
|
137
|
-
*
|
|
138
|
-
* If no `ensureBucketProvisioned` callback is configured, this is a no-op.
|
|
139
|
-
*/
|
|
140
98
|
async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, allowedOrigins) {
|
|
141
99
|
if (!options.ensureBucketProvisioned)
|
|
142
100
|
return;
|
|
@@ -147,209 +105,403 @@ async function ensureS3BucketExists(options, s3BucketName, bucket, databaseId, a
|
|
|
147
105
|
(0, storage_module_cache_1.markS3BucketProvisioned)(s3BucketName);
|
|
148
106
|
log.info(`Lazy-provisioned S3 bucket "${s3BucketName}" successfully`);
|
|
149
107
|
}
|
|
108
|
+
// --- Plugin factory ---
|
|
150
109
|
function createPresignedUrlPlugin(options) {
|
|
151
|
-
return
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
requestUploadUrl(
|
|
248
|
-
input: RequestUploadUrlInput!
|
|
249
|
-
): RequestUploadUrlPayload
|
|
250
|
-
|
|
251
|
-
"""
|
|
252
|
-
Request presigned URLs for uploading multiple files in a single batch.
|
|
253
|
-
Subject to per-storage-module limits (max_bulk_files, max_bulk_total_size).
|
|
254
|
-
Each file is processed independently — some may dedup while others get fresh URLs.
|
|
255
|
-
"""
|
|
256
|
-
requestBulkUploadUrls(
|
|
257
|
-
input: RequestBulkUploadUrlsInput!
|
|
258
|
-
): RequestBulkUploadUrlsPayload
|
|
259
|
-
}
|
|
260
|
-
`,
|
|
261
|
-
plans: {
|
|
262
|
-
Mutation: {
|
|
263
|
-
requestUploadUrl(_$mutation, fieldArgs) {
|
|
264
|
-
const $input = fieldArgs.getRaw('input');
|
|
265
|
-
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
266
|
-
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
267
|
-
const $combined = (0, grafast_1.object)({
|
|
268
|
-
input: $input,
|
|
269
|
-
withPgClient: $withPgClient,
|
|
270
|
-
pgSettings: $pgSettings,
|
|
110
|
+
return {
|
|
111
|
+
name: 'PresignedUrlPlugin',
|
|
112
|
+
version: '1.0.0',
|
|
113
|
+
description: 'Per-table S3 storage middleware: upload fields on @storageBuckets, delete middleware on @storageFiles',
|
|
114
|
+
after: ['PgAttributesPlugin', 'PgMutationCreatePlugin', 'PgMutationUpdateDeletePlugin'],
|
|
115
|
+
schema: {
|
|
116
|
+
hooks: {
|
|
117
|
+
/**
|
|
118
|
+
* Add requestUploadUrl and requestBulkUploadUrls fields on @storageBuckets types.
|
|
119
|
+
*/
|
|
120
|
+
GraphQLObjectType_fields(fields, build, context) {
|
|
121
|
+
const { scope: { pgCodec, isPgClassType, isRootMutation }, } = context;
|
|
122
|
+
// --- Path 1: Add per-bucket mutation entry points on root Mutation ---
|
|
123
|
+
if (isRootMutation) {
|
|
124
|
+
const { graphql: { GraphQLString, GraphQLNonNull }, } = build;
|
|
125
|
+
const bucketCodecs = Object.values(build.input.pgRegistry.pgCodecs).filter((codec) => codec.attributes && codec.extensions?.tags?.storageBuckets);
|
|
126
|
+
if (bucketCodecs.length === 0)
|
|
127
|
+
return fields;
|
|
128
|
+
const newFields = {};
|
|
129
|
+
for (const codec of bucketCodecs) {
|
|
130
|
+
const typeName = build.inflection.tableType(codec);
|
|
131
|
+
const bucketType = build.getTypeByName(typeName);
|
|
132
|
+
if (!bucketType) {
|
|
133
|
+
log.debug(`Skipping mutation entry point for ${codec.name}: type ${typeName} not found`);
|
|
134
|
+
continue;
|
|
135
|
+
}
|
|
136
|
+
const fieldName = typeName.charAt(0).toLowerCase() + typeName.slice(1);
|
|
137
|
+
const hasOwnerId = !!codec.attributes.owner_id;
|
|
138
|
+
const capturedCodec = codec;
|
|
139
|
+
log.debug(`Adding mutation entry point "${fieldName}" for bucket type ${typeName} (entity-scoped=${hasOwnerId})`);
|
|
140
|
+
newFields[fieldName] = context.fieldWithHooks({ fieldName }, {
|
|
141
|
+
description: `Look up a ${typeName} by key for mutation operations (upload, etc.).`,
|
|
142
|
+
type: bucketType,
|
|
143
|
+
args: {
|
|
144
|
+
key: { type: new GraphQLNonNull(GraphQLString), description: 'Bucket key (e.g., "public", "private")' },
|
|
145
|
+
...(hasOwnerId
|
|
146
|
+
? { ownerId: { type: new GraphQLNonNull(GraphQLString), description: 'Owner entity ID (required for entity-scoped buckets)' } }
|
|
147
|
+
: {}),
|
|
148
|
+
},
|
|
149
|
+
plan(_$mutation, fieldArgs) {
|
|
150
|
+
const $key = fieldArgs.getRaw('key');
|
|
151
|
+
const $ownerId = hasOwnerId ? fieldArgs.getRaw('ownerId') : (0, grafast_1.lambda)(null, () => null);
|
|
152
|
+
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
153
|
+
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
154
|
+
const $combined = (0, grafast_1.object)({
|
|
155
|
+
key: $key,
|
|
156
|
+
ownerId: $ownerId,
|
|
157
|
+
withPgClient: $withPgClient,
|
|
158
|
+
pgSettings: $pgSettings,
|
|
159
|
+
});
|
|
160
|
+
const $row = (0, grafast_1.lambda)($combined, async (vals) => {
|
|
161
|
+
return vals.withPgClient(vals.pgSettings, async (pgClient) => {
|
|
162
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
163
|
+
if (!databaseId)
|
|
164
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
165
|
+
const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(pgClient, databaseId);
|
|
166
|
+
const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
|
|
167
|
+
if (!storageConfig)
|
|
168
|
+
throw new Error('STORAGE_MODULE_NOT_FOUND');
|
|
169
|
+
const bucket = await (0, storage_module_cache_1.getBucketConfig)(pgClient, storageConfig, databaseId, vals.key, vals.ownerId ?? undefined);
|
|
170
|
+
if (!bucket)
|
|
171
|
+
throw new Error('BUCKET_NOT_FOUND');
|
|
172
|
+
return bucket;
|
|
173
|
+
});
|
|
174
|
+
});
|
|
175
|
+
const columnEntries = {};
|
|
176
|
+
for (const col of Object.keys(capturedCodec.attributes)) {
|
|
177
|
+
columnEntries[col] = (0, grafast_1.access)($row, col);
|
|
178
|
+
}
|
|
179
|
+
return (0, grafast_1.object)(columnEntries);
|
|
180
|
+
},
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
return build.extend(fields, newFields, 'PresignedUrlPlugin adding per-bucket mutation entry points');
|
|
184
|
+
}
|
|
185
|
+
// --- Path 2: Add upload fields on @storageBuckets types ---
|
|
186
|
+
if (!isPgClassType || !pgCodec || !pgCodec.attributes) {
|
|
187
|
+
return fields;
|
|
188
|
+
}
|
|
189
|
+
const tags = pgCodec.extensions?.tags;
|
|
190
|
+
if (!tags?.storageBuckets) {
|
|
191
|
+
return fields;
|
|
192
|
+
}
|
|
193
|
+
log.debug(`Adding upload fields to bucket type: ${pgCodec.name} (has @storageBuckets tag)`);
|
|
194
|
+
const { graphql: { GraphQLString, GraphQLNonNull, GraphQLInt, GraphQLBoolean, GraphQLObjectType, GraphQLList, GraphQLInputObjectType, }, } = build;
|
|
195
|
+
// --- Shared output types ---
|
|
196
|
+
const UploadUrlPayloadType = new GraphQLObjectType({
|
|
197
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestUploadUrlPayload`,
|
|
198
|
+
fields: {
|
|
199
|
+
uploadUrl: { type: GraphQLString, description: 'Presigned PUT URL (null if deduplicated)' },
|
|
200
|
+
fileId: { type: new GraphQLNonNull(GraphQLString), description: 'The file ID' },
|
|
201
|
+
key: { type: new GraphQLNonNull(GraphQLString), description: 'The S3 object key' },
|
|
202
|
+
deduplicated: { type: new GraphQLNonNull(GraphQLBoolean), description: 'Whether this file was deduplicated' },
|
|
203
|
+
expiresAt: { type: GraphQLString, description: 'Presigned URL expiry time (null if deduplicated)' },
|
|
204
|
+
previousVersionId: { type: GraphQLString, description: 'ID of the previous version' },
|
|
205
|
+
},
|
|
271
206
|
});
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
207
|
+
const BulkUploadFilePayloadType = new GraphQLObjectType({
|
|
208
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFilePayload`,
|
|
209
|
+
fields: {
|
|
210
|
+
uploadUrl: { type: GraphQLString },
|
|
211
|
+
fileId: { type: new GraphQLNonNull(GraphQLString) },
|
|
212
|
+
key: { type: new GraphQLNonNull(GraphQLString) },
|
|
213
|
+
deduplicated: { type: new GraphQLNonNull(GraphQLBoolean) },
|
|
214
|
+
expiresAt: { type: GraphQLString },
|
|
215
|
+
previousVersionId: { type: GraphQLString },
|
|
216
|
+
index: { type: new GraphQLNonNull(GraphQLInt), description: 'Index in the input array' },
|
|
217
|
+
},
|
|
275
218
|
});
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
const $combined = (0, grafast_1.object)({
|
|
282
|
-
input: $input,
|
|
283
|
-
withPgClient: $withPgClient,
|
|
284
|
-
pgSettings: $pgSettings,
|
|
219
|
+
const BulkUploadUrlsPayloadType = new GraphQLObjectType({
|
|
220
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}RequestBulkUploadUrlsPayload`,
|
|
221
|
+
fields: {
|
|
222
|
+
files: { type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFilePayloadType))) },
|
|
223
|
+
},
|
|
285
224
|
});
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
}
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
const
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
const
|
|
317
|
-
|
|
318
|
-
|
|
225
|
+
const BulkUploadFileInputType = new GraphQLInputObjectType({
|
|
226
|
+
name: `${build.inflection.upperCamelCase(pgCodec.name)}BulkUploadFileInput`,
|
|
227
|
+
fields: {
|
|
228
|
+
contentHash: { type: new GraphQLNonNull(GraphQLString) },
|
|
229
|
+
contentType: { type: new GraphQLNonNull(GraphQLString) },
|
|
230
|
+
size: { type: new GraphQLNonNull(GraphQLInt) },
|
|
231
|
+
filename: { type: GraphQLString },
|
|
232
|
+
key: { type: GraphQLString },
|
|
233
|
+
},
|
|
234
|
+
});
|
|
235
|
+
// Capture codec for closure
|
|
236
|
+
const capturedCodec = pgCodec;
|
|
237
|
+
return build.extend(fields, {
|
|
238
|
+
requestUploadUrl: context.fieldWithHooks({ fieldName: 'requestUploadUrl' }, {
|
|
239
|
+
description: 'Request a presigned URL for uploading a file to this bucket.',
|
|
240
|
+
type: UploadUrlPayloadType,
|
|
241
|
+
args: {
|
|
242
|
+
contentHash: { type: new GraphQLNonNull(GraphQLString), description: 'SHA-256 content hash (hex-encoded, 64 chars)' },
|
|
243
|
+
contentType: { type: new GraphQLNonNull(GraphQLString), description: 'MIME type of the file' },
|
|
244
|
+
size: { type: new GraphQLNonNull(GraphQLInt), description: 'File size in bytes' },
|
|
245
|
+
filename: { type: GraphQLString, description: 'Original filename (optional)' },
|
|
246
|
+
key: { type: GraphQLString, description: 'Custom S3 key (only when bucket has allow_custom_keys=true)' },
|
|
247
|
+
},
|
|
248
|
+
plan($parent, fieldArgs) {
|
|
249
|
+
const $bucketId = $parent.get('id');
|
|
250
|
+
const $bucketKey = $parent.get('key');
|
|
251
|
+
const $bucketType = $parent.get('type');
|
|
252
|
+
const $bucketIsPublic = $parent.get('is_public');
|
|
253
|
+
const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
|
|
254
|
+
const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
|
|
255
|
+
const $bucketMaxFileSize = $parent.get('max_file_size');
|
|
256
|
+
const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : (0, grafast_1.lambda)(null, () => null);
|
|
257
|
+
const $contentHash = fieldArgs.getRaw('contentHash');
|
|
258
|
+
const $contentType = fieldArgs.getRaw('contentType');
|
|
259
|
+
const $size = fieldArgs.getRaw('size');
|
|
260
|
+
const $filename = fieldArgs.getRaw('filename');
|
|
261
|
+
const $customKey = fieldArgs.getRaw('key');
|
|
262
|
+
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
263
|
+
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
264
|
+
const $combined = (0, grafast_1.object)({
|
|
265
|
+
bucketId: $bucketId,
|
|
266
|
+
bucketKey: $bucketKey,
|
|
267
|
+
bucketType: $bucketType,
|
|
268
|
+
bucketIsPublic: $bucketIsPublic,
|
|
269
|
+
bucketAllowCustomKeys: $bucketAllowCustomKeys,
|
|
270
|
+
bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
|
|
271
|
+
bucketMaxFileSize: $bucketMaxFileSize,
|
|
272
|
+
bucketOwnerId: $bucketOwnerId,
|
|
273
|
+
contentHash: $contentHash,
|
|
274
|
+
contentType: $contentType,
|
|
275
|
+
size: $size,
|
|
276
|
+
filename: $filename,
|
|
277
|
+
customKey: $customKey,
|
|
278
|
+
withPgClient: $withPgClient,
|
|
279
|
+
pgSettings: $pgSettings,
|
|
280
|
+
});
|
|
281
|
+
return (0, grafast_1.lambda)($combined, async (vals) => {
|
|
282
|
+
return vals.withPgClient(vals.pgSettings, async (pgClient) => {
|
|
283
|
+
return pgClient.withTransaction(async (txClient) => {
|
|
284
|
+
const databaseId = await resolveDatabaseId(txClient);
|
|
285
|
+
if (!databaseId)
|
|
286
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
287
|
+
const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(txClient, databaseId);
|
|
288
|
+
const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
|
|
289
|
+
if (!storageConfig)
|
|
290
|
+
throw new Error('STORAGE_MODULE_NOT_FOUND');
|
|
291
|
+
const bucket = {
|
|
292
|
+
id: vals.bucketId,
|
|
293
|
+
key: vals.bucketKey,
|
|
294
|
+
type: vals.bucketType,
|
|
295
|
+
is_public: vals.bucketIsPublic,
|
|
296
|
+
owner_id: vals.bucketOwnerId,
|
|
297
|
+
allowed_mime_types: vals.bucketAllowedMimeTypes,
|
|
298
|
+
max_file_size: vals.bucketMaxFileSize,
|
|
299
|
+
allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
|
|
300
|
+
};
|
|
301
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
302
|
+
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
303
|
+
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, {
|
|
304
|
+
contentHash: vals.contentHash,
|
|
305
|
+
contentType: vals.contentType,
|
|
306
|
+
size: vals.size,
|
|
307
|
+
filename: vals.filename,
|
|
308
|
+
key: vals.customKey,
|
|
309
|
+
});
|
|
310
|
+
});
|
|
311
|
+
});
|
|
312
|
+
});
|
|
313
|
+
},
|
|
314
|
+
}),
|
|
315
|
+
requestBulkUploadUrls: context.fieldWithHooks({ fieldName: 'requestBulkUploadUrls' }, {
|
|
316
|
+
description: 'Request presigned URLs for uploading multiple files to this bucket.',
|
|
317
|
+
type: BulkUploadUrlsPayloadType,
|
|
318
|
+
args: {
|
|
319
|
+
files: {
|
|
320
|
+
type: new GraphQLNonNull(new GraphQLList(new GraphQLNonNull(BulkUploadFileInputType))),
|
|
321
|
+
description: 'Array of files to upload',
|
|
322
|
+
},
|
|
323
|
+
},
|
|
324
|
+
plan($parent, fieldArgs) {
|
|
325
|
+
const $bucketId = $parent.get('id');
|
|
326
|
+
const $bucketKey = $parent.get('key');
|
|
327
|
+
const $bucketType = $parent.get('type');
|
|
328
|
+
const $bucketIsPublic = $parent.get('is_public');
|
|
329
|
+
const $bucketAllowCustomKeys = $parent.get('allow_custom_keys');
|
|
330
|
+
const $bucketAllowedMimeTypes = $parent.get('allowed_mime_types');
|
|
331
|
+
const $bucketMaxFileSize = $parent.get('max_file_size');
|
|
332
|
+
const $bucketOwnerId = capturedCodec.attributes.owner_id ? $parent.get('owner_id') : (0, grafast_1.lambda)(null, () => null);
|
|
333
|
+
const $files = fieldArgs.getRaw('files');
|
|
334
|
+
const $withPgClient = (0, grafast_1.context)().get('withPgClient');
|
|
335
|
+
const $pgSettings = (0, grafast_1.context)().get('pgSettings');
|
|
336
|
+
const $combined = (0, grafast_1.object)({
|
|
337
|
+
bucketId: $bucketId,
|
|
338
|
+
bucketKey: $bucketKey,
|
|
339
|
+
bucketType: $bucketType,
|
|
340
|
+
bucketIsPublic: $bucketIsPublic,
|
|
341
|
+
bucketAllowCustomKeys: $bucketAllowCustomKeys,
|
|
342
|
+
bucketAllowedMimeTypes: $bucketAllowedMimeTypes,
|
|
343
|
+
bucketMaxFileSize: $bucketMaxFileSize,
|
|
344
|
+
bucketOwnerId: $bucketOwnerId,
|
|
345
|
+
files: $files,
|
|
346
|
+
withPgClient: $withPgClient,
|
|
347
|
+
pgSettings: $pgSettings,
|
|
348
|
+
});
|
|
349
|
+
return (0, grafast_1.lambda)($combined, async (vals) => {
|
|
350
|
+
const { files } = vals;
|
|
351
|
+
if (!Array.isArray(files) || files.length === 0) {
|
|
352
|
+
throw new Error('INVALID_FILES: must provide at least one file');
|
|
353
|
+
}
|
|
354
|
+
return vals.withPgClient(vals.pgSettings, async (pgClient) => {
|
|
355
|
+
return pgClient.withTransaction(async (txClient) => {
|
|
356
|
+
const databaseId = await resolveDatabaseId(txClient);
|
|
357
|
+
if (!databaseId)
|
|
358
|
+
throw new Error('DATABASE_NOT_FOUND');
|
|
359
|
+
const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(txClient, databaseId);
|
|
360
|
+
const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
|
|
361
|
+
if (!storageConfig)
|
|
362
|
+
throw new Error('STORAGE_MODULE_NOT_FOUND');
|
|
363
|
+
if (files.length > storageConfig.maxBulkFiles) {
|
|
364
|
+
throw new Error(`BULK_LIMIT_EXCEEDED: max ${storageConfig.maxBulkFiles} files per batch`);
|
|
365
|
+
}
|
|
366
|
+
const totalSize = files.reduce((sum, f) => sum + (f.size || 0), 0);
|
|
367
|
+
if (totalSize > storageConfig.maxBulkTotalSize) {
|
|
368
|
+
throw new Error(`BULK_SIZE_EXCEEDED: total size ${totalSize} exceeds max ${storageConfig.maxBulkTotalSize} bytes`);
|
|
369
|
+
}
|
|
370
|
+
const bucket = {
|
|
371
|
+
id: vals.bucketId,
|
|
372
|
+
key: vals.bucketKey,
|
|
373
|
+
type: vals.bucketType,
|
|
374
|
+
is_public: vals.bucketIsPublic,
|
|
375
|
+
owner_id: vals.bucketOwnerId,
|
|
376
|
+
allowed_mime_types: vals.bucketAllowedMimeTypes,
|
|
377
|
+
max_file_size: vals.bucketMaxFileSize,
|
|
378
|
+
allow_custom_keys: vals.bucketAllowCustomKeys ?? false,
|
|
379
|
+
};
|
|
380
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
381
|
+
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
382
|
+
const results = [];
|
|
383
|
+
for (let i = 0; i < files.length; i++) {
|
|
384
|
+
const result = await processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, files[i]);
|
|
385
|
+
results.push({ ...result, index: i });
|
|
386
|
+
}
|
|
387
|
+
return { files: results };
|
|
388
|
+
});
|
|
389
|
+
});
|
|
390
|
+
});
|
|
391
|
+
},
|
|
392
|
+
}),
|
|
393
|
+
}, `PresignedUrlPlugin adding upload fields to ${pgCodec.name}`);
|
|
394
|
+
},
|
|
395
|
+
/**
|
|
396
|
+
* Wrap delete* mutations on @storageFiles-tagged tables with S3 cleanup.
|
|
397
|
+
*
|
|
398
|
+
* Pattern: identical to graphile-bucket-provisioner-plugin's create/update hooks.
|
|
399
|
+
* 1. Read the file row BEFORE delete (need key + bucket_id for S3 cleanup)
|
|
400
|
+
* 2. Call PostGraphile's generated delete (RLS enforced)
|
|
401
|
+
* 3. If delete succeeded, check refcount and attempt sync S3 delete
|
|
402
|
+
* 4. AFTER DELETE trigger (constructive-db) enqueues async GC job as fallback
|
|
403
|
+
*/
|
|
404
|
+
GraphQLObjectType_fields_field(field, build, context) {
|
|
405
|
+
const { scope: { isRootMutation, fieldName, pgCodec }, } = context;
|
|
406
|
+
if (!isRootMutation || !pgCodec || !pgCodec.attributes) {
|
|
407
|
+
return field;
|
|
408
|
+
}
|
|
409
|
+
const tags = pgCodec.extensions?.tags;
|
|
410
|
+
if (!tags?.storageFiles) {
|
|
411
|
+
return field;
|
|
412
|
+
}
|
|
413
|
+
if (!fieldName.startsWith('delete')) {
|
|
414
|
+
return field;
|
|
415
|
+
}
|
|
416
|
+
log.debug(`Wrapping delete mutation "${fieldName}" with S3 cleanup (codec: ${pgCodec.name})`);
|
|
417
|
+
const defaultResolver = (obj) => obj[fieldName];
|
|
418
|
+
const { resolve: oldResolve = defaultResolver, ...rest } = field;
|
|
419
|
+
const capturedCodec = pgCodec;
|
|
420
|
+
return {
|
|
421
|
+
...rest,
|
|
422
|
+
async resolve(source, args, graphqlContext, info) {
|
|
423
|
+
// Extract the file ID from the mutation input
|
|
424
|
+
const inputKey = Object.keys(args.input || {}).find((k) => k !== 'clientMutationId');
|
|
425
|
+
const fileInput = inputKey ? args.input[inputKey] : null;
|
|
426
|
+
let fileRow = null;
|
|
427
|
+
if (fileInput) {
|
|
428
|
+
// Read the file row BEFORE delete to get the S3 key + bucket_id
|
|
429
|
+
const withPgClient = graphqlContext.withPgClient;
|
|
430
|
+
const pgSettings = graphqlContext.pgSettings;
|
|
431
|
+
if (withPgClient) {
|
|
432
|
+
try {
|
|
433
|
+
await withPgClient(pgSettings, async (pgClient) => {
|
|
434
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
435
|
+
if (!databaseId)
|
|
436
|
+
return;
|
|
437
|
+
const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(pgClient, databaseId);
|
|
438
|
+
const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
|
|
439
|
+
if (!storageConfig)
|
|
440
|
+
return;
|
|
441
|
+
// Read the file row (RLS enforced)
|
|
442
|
+
const result = await pgClient.query({
|
|
443
|
+
text: `SELECT key, bucket_id FROM ${storageConfig.filesQualifiedName} WHERE id = $1 LIMIT 1`,
|
|
444
|
+
values: [fileInput],
|
|
445
|
+
});
|
|
446
|
+
if (result.rows.length > 0) {
|
|
447
|
+
fileRow = result.rows[0];
|
|
448
|
+
}
|
|
449
|
+
});
|
|
450
|
+
}
|
|
451
|
+
catch (err) {
|
|
452
|
+
log.warn(`Pre-delete file lookup failed: ${err.message}`);
|
|
453
|
+
}
|
|
319
454
|
}
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
455
|
+
}
|
|
456
|
+
// Call PostGraphile's generated delete (RLS enforced)
|
|
457
|
+
const result = await oldResolve(source, args, graphqlContext, info);
|
|
458
|
+
// Attempt sync S3 cleanup if we have the file row
|
|
459
|
+
if (fileRow) {
|
|
460
|
+
const withPgClient = graphqlContext.withPgClient;
|
|
461
|
+
const pgSettings = graphqlContext.pgSettings;
|
|
462
|
+
if (withPgClient) {
|
|
463
|
+
try {
|
|
464
|
+
await withPgClient(pgSettings, async (pgClient) => {
|
|
465
|
+
const databaseId = await resolveDatabaseId(pgClient);
|
|
466
|
+
if (!databaseId)
|
|
467
|
+
return;
|
|
468
|
+
const allConfigs = await (0, storage_module_cache_1.loadAllStorageModules)(pgClient, databaseId);
|
|
469
|
+
const storageConfig = (0, storage_module_cache_1.resolveStorageConfigFromCodec)(capturedCodec, allConfigs);
|
|
470
|
+
if (!storageConfig)
|
|
471
|
+
return;
|
|
472
|
+
// Check refcount: any other file with the same key in this bucket?
|
|
473
|
+
const refResult = await pgClient.query({
|
|
474
|
+
text: `SELECT COUNT(*)::int AS ref_count FROM ${storageConfig.filesQualifiedName} WHERE key = $1 AND bucket_id = $2`,
|
|
475
|
+
values: [fileRow.key, fileRow.bucket_id],
|
|
476
|
+
});
|
|
477
|
+
const refCount = refResult.rows[0]?.ref_count ?? 0;
|
|
478
|
+
if (refCount > 0) {
|
|
479
|
+
log.info(`File deleted from DB; S3 key ${fileRow.key} still referenced by ${refCount} file(s)`);
|
|
480
|
+
return;
|
|
481
|
+
}
|
|
482
|
+
// No other references — attempt sync S3 delete
|
|
483
|
+
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
484
|
+
await (0, s3_signer_1.deleteS3Object)(s3ForDb, fileRow.key);
|
|
485
|
+
log.info(`Sync S3 delete succeeded for key=${fileRow.key}`);
|
|
486
|
+
});
|
|
487
|
+
}
|
|
488
|
+
catch (err) {
|
|
489
|
+
// Sync S3 delete failed — the AFTER DELETE trigger has enqueued an async GC job
|
|
490
|
+
log.warn(`Sync S3 delete failed for key=${fileRow.key}; async GC job will retry: ${err.message}`);
|
|
491
|
+
}
|
|
334
492
|
}
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
}
|
|
338
|
-
}
|
|
493
|
+
}
|
|
494
|
+
return result;
|
|
495
|
+
},
|
|
496
|
+
};
|
|
339
497
|
},
|
|
340
498
|
},
|
|
341
499
|
},
|
|
342
|
-
}
|
|
500
|
+
};
|
|
343
501
|
}
|
|
344
502
|
// --- Shared upload logic ---
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
*/
|
|
348
|
-
async function processUpload(options, input, withPgClient, pgSettings) {
|
|
349
|
-
const { bucketKey, ownerId, contentHash, contentType, size, filename, key: customKey } = input;
|
|
350
|
-
if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
|
|
351
|
-
throw new Error('INVALID_BUCKET_KEY');
|
|
352
|
-
}
|
|
503
|
+
async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
|
|
504
|
+
const { contentHash, contentType, size, filename, key: customKey } = input;
|
|
353
505
|
if (!contentHash || typeof contentHash !== 'string' || contentHash.length > MAX_CONTENT_HASH_LENGTH) {
|
|
354
506
|
throw new Error('INVALID_CONTENT_HASH');
|
|
355
507
|
}
|
|
@@ -359,51 +511,6 @@ async function processUpload(options, input, withPgClient, pgSettings) {
|
|
|
359
511
|
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
360
512
|
throw new Error('INVALID_CONTENT_TYPE');
|
|
361
513
|
}
|
|
362
|
-
return withPgClient(pgSettings, async (pgClient) => {
|
|
363
|
-
return pgClient.withTransaction(async (txClient) => {
|
|
364
|
-
const databaseId = await resolveDatabaseId(txClient);
|
|
365
|
-
if (!databaseId) {
|
|
366
|
-
throw new Error('DATABASE_NOT_FOUND');
|
|
367
|
-
}
|
|
368
|
-
const storageConfig = ownerId
|
|
369
|
-
? await (0, storage_module_cache_1.getStorageModuleConfigForOwner)(txClient, databaseId, ownerId)
|
|
370
|
-
: await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
|
|
371
|
-
if (!storageConfig) {
|
|
372
|
-
throw new Error(ownerId
|
|
373
|
-
? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
|
|
374
|
-
: 'STORAGE_MODULE_NOT_PROVISIONED');
|
|
375
|
-
}
|
|
376
|
-
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
377
|
-
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
378
|
-
}
|
|
379
|
-
if (filename !== undefined && filename !== null) {
|
|
380
|
-
if (typeof filename !== 'string' || filename.length > storageConfig.maxFilenameLength) {
|
|
381
|
-
throw new Error('INVALID_FILENAME');
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey, ownerId);
|
|
385
|
-
if (!bucket) {
|
|
386
|
-
throw new Error('BUCKET_NOT_FOUND');
|
|
387
|
-
}
|
|
388
|
-
const s3ForDb = resolveS3ForDatabase(options, storageConfig, databaseId);
|
|
389
|
-
await ensureS3BucketExists(options, s3ForDb.bucket, bucket, databaseId, storageConfig.allowedOrigins);
|
|
390
|
-
return processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input);
|
|
391
|
-
});
|
|
392
|
-
});
|
|
393
|
-
}
|
|
394
|
-
/**
|
|
395
|
-
* Process a single file upload within an already-resolved context.
|
|
396
|
-
* Handles dedup, custom keys, versioning, and auto-path derivation.
|
|
397
|
-
*/
|
|
398
|
-
async function processSingleFile(options, txClient, storageConfig, databaseId, bucket, s3ForDb, input) {
|
|
399
|
-
const { contentHash, contentType, size, filename, key: customKey } = input;
|
|
400
|
-
// --- Validate inputs ---
|
|
401
|
-
if (!contentHash || !isValidSha256(contentHash)) {
|
|
402
|
-
throw new Error('INVALID_CONTENT_HASH_FORMAT: must be a 64-char lowercase hex SHA-256');
|
|
403
|
-
}
|
|
404
|
-
if (!contentType || typeof contentType !== 'string' || contentType.length > MAX_CONTENT_TYPE_LENGTH) {
|
|
405
|
-
throw new Error('INVALID_CONTENT_TYPE');
|
|
406
|
-
}
|
|
407
514
|
if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
|
|
408
515
|
throw new Error(`INVALID_FILE_SIZE: must be between 1 and ${storageConfig.defaultMaxFileSize} bytes`);
|
|
409
516
|
}
|
|
@@ -412,7 +519,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
412
519
|
throw new Error('INVALID_FILENAME');
|
|
413
520
|
}
|
|
414
521
|
}
|
|
415
|
-
//
|
|
522
|
+
// Validate content type against bucket's allowed_mime_types
|
|
416
523
|
if (bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) {
|
|
417
524
|
const allowed = bucket.allowed_mime_types;
|
|
418
525
|
const isAllowed = allowed.some((pattern) => {
|
|
@@ -428,11 +535,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
428
535
|
throw new Error(`CONTENT_TYPE_NOT_ALLOWED: ${contentType} not in bucket allowed types`);
|
|
429
536
|
}
|
|
430
537
|
}
|
|
431
|
-
//
|
|
538
|
+
// Validate size against bucket's max_file_size
|
|
432
539
|
if (bucket.max_file_size && size > bucket.max_file_size) {
|
|
433
540
|
throw new Error(`FILE_TOO_LARGE: exceeds bucket max of ${bucket.max_file_size} bytes`);
|
|
434
541
|
}
|
|
435
|
-
//
|
|
542
|
+
// Determine S3 key
|
|
436
543
|
let s3Key;
|
|
437
544
|
let isCustomKey = false;
|
|
438
545
|
if (customKey) {
|
|
@@ -449,11 +556,9 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
449
556
|
else {
|
|
450
557
|
s3Key = buildS3Key(contentHash);
|
|
451
558
|
}
|
|
452
|
-
//
|
|
559
|
+
// Dedup / versioning check
|
|
453
560
|
let previousVersionId = null;
|
|
454
561
|
if (isCustomKey) {
|
|
455
|
-
// Custom key mode: check if a file with this key already exists in this bucket.
|
|
456
|
-
// If so, auto-version by linking via previous_version_id.
|
|
457
562
|
const existingResult = await txClient.query({
|
|
458
563
|
text: `SELECT id, content_hash
|
|
459
564
|
FROM ${storageConfig.filesQualifiedName}
|
|
@@ -465,7 +570,6 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
465
570
|
});
|
|
466
571
|
if (existingResult.rows.length > 0) {
|
|
467
572
|
const existing = existingResult.rows[0];
|
|
468
|
-
// Same content hash = true dedup (no new upload needed)
|
|
469
573
|
if (existing.content_hash === contentHash) {
|
|
470
574
|
log.info(`Dedup hit (custom key): file ${existing.id} for key ${s3Key}`);
|
|
471
575
|
return {
|
|
@@ -477,13 +581,11 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
477
581
|
previousVersionId: null,
|
|
478
582
|
};
|
|
479
583
|
}
|
|
480
|
-
// Different content = new version
|
|
481
584
|
previousVersionId = existing.id;
|
|
482
585
|
log.info(`Versioning: new version of key ${s3Key}, previous=${previousVersionId}`);
|
|
483
586
|
}
|
|
484
587
|
}
|
|
485
588
|
else {
|
|
486
|
-
// Hash-based mode: dedup by content_hash in this bucket
|
|
487
589
|
const dedupResult = await txClient.query({
|
|
488
590
|
text: `SELECT id
|
|
489
591
|
FROM ${storageConfig.filesQualifiedName}
|
|
@@ -505,27 +607,23 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
505
607
|
};
|
|
506
608
|
}
|
|
507
609
|
}
|
|
508
|
-
//
|
|
610
|
+
// Auto-derive ltree path from custom key directory (only when has_path_shares)
|
|
509
611
|
const derivedPath = isCustomKey && storageConfig.hasPathShares ? derivePathFromKey(s3Key) : null;
|
|
510
|
-
//
|
|
612
|
+
// Create file record
|
|
511
613
|
const hasOwnerColumn = storageConfig.membershipType !== null;
|
|
512
614
|
const columns = ['bucket_id', 'key', 'content_hash', 'mime_type', 'size', 'filename', 'is_public'];
|
|
513
615
|
const values = [bucket.id, s3Key, contentHash, contentType, size, filename || null, bucket.is_public];
|
|
514
|
-
let paramIdx = values.length;
|
|
515
616
|
if (hasOwnerColumn) {
|
|
516
617
|
columns.push('owner_id');
|
|
517
618
|
values.push(bucket.owner_id);
|
|
518
|
-
paramIdx = values.length;
|
|
519
619
|
}
|
|
520
620
|
if (previousVersionId) {
|
|
521
621
|
columns.push('previous_version_id');
|
|
522
622
|
values.push(previousVersionId);
|
|
523
|
-
paramIdx = values.length;
|
|
524
623
|
}
|
|
525
624
|
if (derivedPath) {
|
|
526
625
|
columns.push('path');
|
|
527
626
|
values.push(derivedPath);
|
|
528
|
-
paramIdx = values.length;
|
|
529
627
|
}
|
|
530
628
|
const placeholders = values.map((_, i) => `$${i + 1}`).join(', ');
|
|
531
629
|
const fileResult = await txClient.query({
|
|
@@ -536,7 +634,7 @@ async function processSingleFile(options, txClient, storageConfig, databaseId, b
|
|
|
536
634
|
values,
|
|
537
635
|
});
|
|
538
636
|
const fileId = fileResult.rows[0].id;
|
|
539
|
-
//
|
|
637
|
+
// Generate presigned PUT URL
|
|
540
638
|
const uploadUrl = await (0, s3_signer_1.generatePresignedPutUrl)(s3ForDb, s3Key, contentType, size, storageConfig.uploadUrlExpirySeconds);
|
|
541
639
|
const expiresAt = new Date(Date.now() + storageConfig.uploadUrlExpirySeconds * 1000).toISOString();
|
|
542
640
|
return {
|