graphile-presigned-url-plugin 0.4.1 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.d.ts CHANGED
@@ -29,6 +29,6 @@
29
29
  export { PresignedUrlPlugin, createPresignedUrlPlugin } from './plugin';
30
30
  export { createDownloadUrlPlugin } from './download-url-field';
31
31
  export { PresignedUrlPreset } from './preset';
32
- export { getStorageModuleConfig, getBucketConfig, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
32
+ export { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
33
33
  export { generatePresignedPutUrl, generatePresignedGetUrl, headObject } from './s3-signer';
34
34
  export type { BucketConfig, StorageModuleConfig, RequestUploadUrlInput, RequestUploadUrlPayload, ConfirmUploadInput, ConfirmUploadPayload, S3Config, S3ConfigOrGetter, PresignedUrlPluginOptions, BucketNameResolver, EnsureBucketProvisioned, } from './types';
package/esm/index.js CHANGED
@@ -29,5 +29,5 @@
29
29
  export { PresignedUrlPlugin, createPresignedUrlPlugin } from './plugin';
30
30
  export { createDownloadUrlPlugin } from './download-url-field';
31
31
  export { PresignedUrlPreset } from './preset';
32
- export { getStorageModuleConfig, getBucketConfig, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
32
+ export { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
33
33
  export { generatePresignedPutUrl, generatePresignedGetUrl, headObject } from './s3-signer';
package/esm/plugin.js CHANGED
@@ -19,7 +19,7 @@
19
19
  import { context as grafastContext, lambda, object } from 'grafast';
20
20
  import { extendSchema, gql } from 'graphile-utils';
21
21
  import { Logger } from '@pgpmjs/logger';
22
- import { getStorageModuleConfig, getBucketConfig, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
22
+ import { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
23
23
  import { generatePresignedPutUrl, headObject } from './s3-signer';
24
24
  const log = new Logger('graphile-presigned-url:plugin');
25
25
  // --- Protocol-level constants (not configurable) ---
@@ -117,6 +117,13 @@ export function createPresignedUrlPlugin(options) {
117
117
  input RequestUploadUrlInput {
118
118
  """Bucket key (e.g., "public", "private")"""
119
119
  bucketKey: String!
120
+ """
121
+ Owner entity ID for entity-scoped uploads.
122
+ Omit for app-level (database-wide) storage.
123
+ When provided, resolves the storage module for the entity type
124
+ that owns this entity instance (e.g., a data room ID, team ID).
125
+ """
126
+ ownerId: UUID
120
127
  """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
121
128
  contentHash: String!
122
129
  """MIME type of the file (e.g., "image/png")"""
@@ -188,7 +195,7 @@ export function createPresignedUrlPlugin(options) {
188
195
  });
189
196
  return lambda($combined, async ({ input, withPgClient, pgSettings }) => {
190
197
  // --- Input validation ---
191
- const { bucketKey, contentHash, contentType, size, filename } = input;
198
+ const { bucketKey, ownerId, contentHash, contentType, size, filename } = input;
192
199
  if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
193
200
  throw new Error('INVALID_BUCKET_KEY');
194
201
  }
@@ -208,9 +215,14 @@ export function createPresignedUrlPlugin(options) {
208
215
  if (!databaseId) {
209
216
  throw new Error('DATABASE_NOT_FOUND');
210
217
  }
211
- const storageConfig = await getStorageModuleConfig(txClient, databaseId);
218
+ // --- Resolve storage module (app-level or entity-scoped) ---
219
+ const storageConfig = ownerId
220
+ ? await getStorageModuleConfigForOwner(txClient, databaseId, ownerId)
221
+ : await getStorageModuleConfig(txClient, databaseId);
212
222
  if (!storageConfig) {
213
- throw new Error('STORAGE_MODULE_NOT_PROVISIONED');
223
+ throw new Error(ownerId
224
+ ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
225
+ : 'STORAGE_MODULE_NOT_PROVISIONED');
214
226
  }
215
227
  // --- Validate size against storage module default (bucket override checked below) ---
216
228
  if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
@@ -222,7 +234,7 @@ export function createPresignedUrlPlugin(options) {
222
234
  }
223
235
  }
224
236
  // --- Look up the bucket (cached; first miss queries via RLS) ---
225
- const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey);
237
+ const bucket = await getBucketConfig(txClient, storageConfig, databaseId, bucketKey, ownerId);
226
238
  if (!bucket) {
227
239
  throw new Error('BUCKET_NOT_FOUND');
228
240
  }
@@ -276,21 +288,38 @@ export function createPresignedUrlPlugin(options) {
276
288
  };
277
289
  }
278
290
  // --- Create file record (status=pending) ---
291
+ // For app-level storage (no owner_id column), omit owner_id from the INSERT.
292
+ const hasOwnerColumn = storageConfig.membershipType !== null;
279
293
  const fileResult = await txClient.query({
280
- text: `INSERT INTO ${storageConfig.filesQualifiedName}
281
- (bucket_id, key, content_type, content_hash, size, filename, owner_id, is_public, status)
282
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'pending')
283
- RETURNING id`,
284
- values: [
285
- bucket.id,
286
- s3Key,
287
- contentType,
288
- contentHash,
289
- size,
290
- filename || null,
291
- bucket.owner_id,
292
- bucket.is_public,
293
- ],
294
+ text: hasOwnerColumn
295
+ ? `INSERT INTO ${storageConfig.filesQualifiedName}
296
+ (bucket_id, key, content_type, content_hash, size, filename, owner_id, is_public, status)
297
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'pending')
298
+ RETURNING id`
299
+ : `INSERT INTO ${storageConfig.filesQualifiedName}
300
+ (bucket_id, key, content_type, content_hash, size, filename, is_public, status)
301
+ VALUES ($1, $2, $3, $4, $5, $6, $7, 'pending')
302
+ RETURNING id`,
303
+ values: hasOwnerColumn
304
+ ? [
305
+ bucket.id,
306
+ s3Key,
307
+ contentType,
308
+ contentHash,
309
+ size,
310
+ filename || null,
311
+ bucket.owner_id,
312
+ bucket.is_public,
313
+ ]
314
+ : [
315
+ bucket.id,
316
+ s3Key,
317
+ contentType,
318
+ contentHash,
319
+ size,
320
+ filename || null,
321
+ bucket.is_public,
322
+ ],
294
323
  });
295
324
  const fileId = fileResult.rows[0].id;
296
325
  // --- Ensure the S3 bucket exists (lazy provisioning) ---
@@ -333,27 +362,16 @@ export function createPresignedUrlPlugin(options) {
333
362
  }
334
363
  return withPgClient(pgSettings, async (pgClient) => {
335
364
  return pgClient.withTransaction(async (txClient) => {
336
- // --- Resolve storage module config ---
365
+ // --- Resolve storage module by file ID (probes all file tables) ---
337
366
  const databaseId = await resolveDatabaseId(txClient);
338
367
  if (!databaseId) {
339
368
  throw new Error('DATABASE_NOT_FOUND');
340
369
  }
341
- const storageConfig = await getStorageModuleConfig(txClient, databaseId);
342
- if (!storageConfig) {
343
- throw new Error('STORAGE_MODULE_NOT_PROVISIONED');
344
- }
345
- // --- Look up the file (RLS enforced) ---
346
- const fileResult = await txClient.query({
347
- text: `SELECT id, key, content_type, status, bucket_id
348
- FROM ${storageConfig.filesQualifiedName}
349
- WHERE id = $1
350
- LIMIT 1`,
351
- values: [fileId],
352
- });
353
- if (fileResult.rows.length === 0) {
370
+ const resolved = await resolveStorageModuleByFileId(txClient, databaseId, fileId);
371
+ if (!resolved) {
354
372
  throw new Error('FILE_NOT_FOUND');
355
373
  }
356
- const file = fileResult.rows[0];
374
+ const { storageConfig, file } = resolved;
357
375
  if (file.status !== 'pending') {
358
376
  // File is already confirmed or processed — idempotent success
359
377
  return {
@@ -1,6 +1,9 @@
1
1
  import type { StorageModuleConfig, BucketConfig } from './types';
2
2
  /**
3
- * Resolve the storage module config for a database, using the LRU cache.
3
+ * Resolve the app-level storage module config for a database, using the LRU cache.
4
+ *
5
+ * This is the default path when no ownerId is provided. It returns the
6
+ * storage module with membership_type IS NULL (app-level / database-wide).
4
7
  *
5
8
  * @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
6
9
  * @param databaseId - The metaschema database UUID
@@ -14,6 +17,57 @@ export declare function getStorageModuleConfig(pgClient: {
14
17
  rows: unknown[];
15
18
  }>;
16
19
  }, databaseId: string): Promise<StorageModuleConfig | null>;
20
+ /**
21
+ * Resolve the storage module config for a specific owner entity.
22
+ *
23
+ * When ownerId is provided, this function:
24
+ * 1. Loads ALL storage modules for the database (cached)
25
+ * 2. Finds which entity-scoped module contains the ownerId in its entity table
26
+ * 3. Returns that module's config
27
+ *
28
+ * This is the core of Option C — the ownerId tells us which scope to use.
29
+ *
30
+ * @param pgClient - A pg client from the Graphile context
31
+ * @param databaseId - The metaschema database UUID
32
+ * @param ownerId - The entity instance UUID (e.g., a data room ID, team ID)
33
+ * @returns StorageModuleConfig or null if no matching module found
34
+ */
35
+ export declare function getStorageModuleConfigForOwner(pgClient: {
36
+ query: (opts: {
37
+ text: string;
38
+ values?: unknown[];
39
+ }) => Promise<{
40
+ rows: unknown[];
41
+ }>;
42
+ }, databaseId: string, ownerId: string): Promise<StorageModuleConfig | null>;
43
+ /**
44
+ * Resolve the storage module that owns a specific file by probing all file tables.
45
+ *
46
+ * Used by confirmUpload when only a fileId (UUID) is available.
47
+ * Since UUIDs are globally unique, exactly one table will contain the file.
48
+ *
49
+ * @param pgClient - A pg client from the Graphile context
50
+ * @param databaseId - The metaschema database UUID
51
+ * @param fileId - The file UUID to look up
52
+ * @returns Object with the storage config and file row, or null if not found
53
+ */
54
+ export declare function resolveStorageModuleByFileId(pgClient: {
55
+ query: (opts: {
56
+ text: string;
57
+ values?: unknown[];
58
+ }) => Promise<{
59
+ rows: unknown[];
60
+ }>;
61
+ }, databaseId: string, fileId: string): Promise<{
62
+ storageConfig: StorageModuleConfig;
63
+ file: {
64
+ id: string;
65
+ key: string;
66
+ content_type: string;
67
+ status: string;
68
+ bucket_id: string;
69
+ };
70
+ } | null>;
17
71
  /**
18
72
  * Resolve bucket metadata for a given database + bucket key, using the LRU cache.
19
73
  *
@@ -21,9 +75,10 @@ export declare function getStorageModuleConfig(pgClient: {
21
75
  * the pgClient). On cache hit, returns the cached metadata directly.
22
76
  *
23
77
  * @param pgClient - A pg client from the Graphile context
24
- * @param storageConfig - The resolved StorageModuleConfig for this database
78
+ * @param storageConfig - The resolved StorageModuleConfig for this database/scope
25
79
  * @param databaseId - The metaschema database UUID (used as cache key prefix)
26
80
  * @param bucketKey - The bucket key (e.g., "public", "private")
81
+ * @param ownerId - Optional owner entity ID for entity-scoped bucket lookup
27
82
  * @returns BucketConfig or null if the bucket doesn't exist / isn't accessible
28
83
  */
29
84
  export declare function getBucketConfig(pgClient: {
@@ -33,7 +88,7 @@ export declare function getBucketConfig(pgClient: {
33
88
  }) => Promise<{
34
89
  rows: unknown[];
35
90
  }>;
36
- }, storageConfig: StorageModuleConfig, databaseId: string, bucketKey: string): Promise<BucketConfig | null>;
91
+ }, storageConfig: StorageModuleConfig, databaseId: string, bucketKey: string, ownerId?: string): Promise<BucketConfig | null>;
37
92
  /**
38
93
  * Check whether an S3 bucket has already been provisioned (cached).
39
94
  */
@@ -1,5 +1,6 @@
1
1
  import { Logger } from '@pgpmjs/logger';
2
2
  import { LRUCache } from 'lru-cache';
3
+ import { QuoteUtils } from '@pgsql/quotes';
3
4
  const log = new Logger('graphile-presigned-url:cache');
4
5
  // --- Defaults ---
5
6
  const DEFAULT_UPLOAD_URL_EXPIRY_SECONDS = 900; // 15 minutes
@@ -25,13 +26,18 @@ const storageModuleCache = new LRUCache({
25
26
  updateAgeOnGet: true,
26
27
  });
27
28
  /**
28
- * SQL query to resolve storage module config for a database.
29
+ * SQL query to resolve the app-level storage module config for a database.
29
30
  *
30
31
  * Joins storage_module → table → schema to get fully-qualified table names.
32
+ * Filters to app-level (membership_type IS NULL) by default.
33
+ *
34
+ * Requires the multi-scope schema (membership_type column on storage_module).
31
35
  */
32
- const STORAGE_MODULE_QUERY = `
36
+ const APP_STORAGE_MODULE_QUERY = `
33
37
  SELECT
34
38
  sm.id,
39
+ sm.membership_type,
40
+ sm.entity_table_id,
35
41
  bs.schema_name AS buckets_schema,
36
42
  bt.name AS buckets_table,
37
43
  fs.schema_name AS files_schema,
@@ -46,7 +52,9 @@ const STORAGE_MODULE_QUERY = `
46
52
  sm.download_url_expiry_seconds,
47
53
  sm.default_max_file_size,
48
54
  sm.max_filename_length,
49
- sm.cache_ttl_seconds
55
+ sm.cache_ttl_seconds,
56
+ NULL AS entity_schema,
57
+ NULL AS entity_table
50
58
  FROM metaschema_modules_public.storage_module sm
51
59
  JOIN metaschema_public.table bt ON bt.id = sm.buckets_table_id
52
60
  JOIN metaschema_public.schema bs ON bs.id = bt.schema_id
@@ -55,38 +63,67 @@ const STORAGE_MODULE_QUERY = `
55
63
  JOIN metaschema_public.table urt ON urt.id = sm.upload_requests_table_id
56
64
  JOIN metaschema_public.schema urs ON urs.id = urt.schema_id
57
65
  WHERE sm.database_id = $1
66
+ AND sm.membership_type IS NULL
58
67
  LIMIT 1
59
68
  `;
60
69
  /**
61
- * Resolve the storage module config for a database, using the LRU cache.
70
+ * SQL query to resolve ALL storage modules for a database (app-level + entity-scoped).
62
71
  *
63
- * @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
64
- * @param databaseId - The metaschema database UUID
65
- * @returns StorageModuleConfig or null if no storage module is provisioned
72
+ * Returns all storage modules with their entity table names for ownerId resolution.
73
+ * Requires the multi-scope schema.
66
74
  */
67
- export async function getStorageModuleConfig(pgClient, databaseId) {
68
- const cacheKey = `storage:${databaseId}`;
69
- const cached = storageModuleCache.get(cacheKey);
70
- if (cached) {
71
- return cached;
72
- }
73
- log.debug(`Cache miss for database ${databaseId}, querying metaschema...`);
74
- const result = await pgClient.query({ text: STORAGE_MODULE_QUERY, values: [databaseId] });
75
- if (result.rows.length === 0) {
76
- log.warn(`No storage module found for database ${databaseId}`);
77
- return null;
78
- }
79
- const row = result.rows[0];
75
+ const ALL_STORAGE_MODULES_QUERY = `
76
+ SELECT
77
+ sm.id,
78
+ sm.membership_type,
79
+ sm.entity_table_id,
80
+ bs.schema_name AS buckets_schema,
81
+ bt.name AS buckets_table,
82
+ fs.schema_name AS files_schema,
83
+ ft.name AS files_table,
84
+ urs.schema_name AS upload_requests_schema,
85
+ urt.name AS upload_requests_table,
86
+ sm.endpoint,
87
+ sm.public_url_prefix,
88
+ sm.provider,
89
+ sm.allowed_origins,
90
+ sm.upload_url_expiry_seconds,
91
+ sm.download_url_expiry_seconds,
92
+ sm.default_max_file_size,
93
+ sm.max_filename_length,
94
+ sm.cache_ttl_seconds,
95
+ es.schema_name AS entity_schema,
96
+ et.name AS entity_table
97
+ FROM metaschema_modules_public.storage_module sm
98
+ JOIN metaschema_public.table bt ON bt.id = sm.buckets_table_id
99
+ JOIN metaschema_public.schema bs ON bs.id = bt.schema_id
100
+ JOIN metaschema_public.table ft ON ft.id = sm.files_table_id
101
+ JOIN metaschema_public.schema fs ON fs.id = ft.schema_id
102
+ JOIN metaschema_public.table urt ON urt.id = sm.upload_requests_table_id
103
+ JOIN metaschema_public.schema urs ON urs.id = urt.schema_id
104
+ LEFT JOIN metaschema_public.table et ON et.id = sm.entity_table_id
105
+ LEFT JOIN metaschema_public.schema es ON es.id = et.schema_id
106
+ WHERE sm.database_id = $1
107
+ `;
108
+ /**
109
+ * Build a StorageModuleConfig from a raw DB row.
110
+ */
111
+ function buildConfig(row) {
80
112
  const cacheTtlSeconds = row.cache_ttl_seconds ?? DEFAULT_CACHE_TTL_SECONDS;
81
- const config = {
113
+ return {
82
114
  id: row.id,
83
- bucketsQualifiedName: `"${row.buckets_schema}"."${row.buckets_table}"`,
84
- filesQualifiedName: `"${row.files_schema}"."${row.files_table}"`,
85
- uploadRequestsQualifiedName: `"${row.upload_requests_schema}"."${row.upload_requests_table}"`,
115
+ bucketsQualifiedName: QuoteUtils.quoteQualifiedIdentifier(row.buckets_schema, row.buckets_table),
116
+ filesQualifiedName: QuoteUtils.quoteQualifiedIdentifier(row.files_schema, row.files_table),
117
+ uploadRequestsQualifiedName: QuoteUtils.quoteQualifiedIdentifier(row.upload_requests_schema, row.upload_requests_table),
86
118
  schemaName: row.buckets_schema,
87
119
  bucketsTableName: row.buckets_table,
88
120
  filesTableName: row.files_table,
89
121
  uploadRequestsTableName: row.upload_requests_table,
122
+ membershipType: row.membership_type,
123
+ entityTableId: row.entity_table_id,
124
+ entityQualifiedName: row.entity_schema && row.entity_table
125
+ ? QuoteUtils.quoteQualifiedIdentifier(row.entity_schema, row.entity_table)
126
+ : null,
90
127
  endpoint: row.endpoint,
91
128
  publicUrlPrefix: row.public_url_prefix,
92
129
  provider: row.provider,
@@ -97,10 +134,129 @@ export async function getStorageModuleConfig(pgClient, databaseId) {
97
134
  maxFilenameLength: row.max_filename_length ?? DEFAULT_MAX_FILENAME_LENGTH,
98
135
  cacheTtlSeconds,
99
136
  };
137
+ }
138
+ /**
139
+ * Resolve the app-level storage module config for a database, using the LRU cache.
140
+ *
141
+ * This is the default path when no ownerId is provided. It returns the
142
+ * storage module with membership_type IS NULL (app-level / database-wide).
143
+ *
144
+ * @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
145
+ * @param databaseId - The metaschema database UUID
146
+ * @returns StorageModuleConfig or null if no storage module is provisioned
147
+ */
148
+ export async function getStorageModuleConfig(pgClient, databaseId) {
149
+ const cacheKey = `storage:${databaseId}:app`;
150
+ const cached = storageModuleCache.get(cacheKey);
151
+ if (cached) {
152
+ return cached;
153
+ }
154
+ log.debug(`Cache miss for app-level storage in database ${databaseId}, querying metaschema...`);
155
+ const result = await pgClient.query({ text: APP_STORAGE_MODULE_QUERY, values: [databaseId] });
156
+ if (result.rows.length === 0) {
157
+ log.warn(`No app-level storage module found for database ${databaseId}`);
158
+ return null;
159
+ }
160
+ const config = buildConfig(result.rows[0]);
100
161
  storageModuleCache.set(cacheKey, config);
101
- log.debug(`Cached storage config for database ${databaseId}: ${config.bucketsQualifiedName}`);
162
+ log.debug(`Cached app-level storage config for database ${databaseId}: ${config.bucketsQualifiedName}`);
102
163
  return config;
103
164
  }
165
+ /**
166
+ * Resolve the storage module config for a specific owner entity.
167
+ *
168
+ * When ownerId is provided, this function:
169
+ * 1. Loads ALL storage modules for the database (cached)
170
+ * 2. Finds which entity-scoped module contains the ownerId in its entity table
171
+ * 3. Returns that module's config
172
+ *
173
+ * This is the core of Option C — the ownerId tells us which scope to use.
174
+ *
175
+ * @param pgClient - A pg client from the Graphile context
176
+ * @param databaseId - The metaschema database UUID
177
+ * @param ownerId - The entity instance UUID (e.g., a data room ID, team ID)
178
+ * @returns StorageModuleConfig or null if no matching module found
179
+ */
180
+ export async function getStorageModuleConfigForOwner(pgClient, databaseId, ownerId) {
181
+ // Check if we already have a cached mapping for this ownerId
182
+ const ownerCacheKey = `storage:${databaseId}:owner:${ownerId}`;
183
+ const cachedOwner = storageModuleCache.get(ownerCacheKey);
184
+ if (cachedOwner) {
185
+ return cachedOwner;
186
+ }
187
+ // Load all storage modules for this database
188
+ const allModulesCacheKey = `storage:${databaseId}:all`;
189
+ let allConfigs;
190
+ const cachedAll = storageModuleCache.get(allModulesCacheKey);
191
+ if (cachedAll) {
192
+ // We stored a sentinel; re-derive from individual caches
193
+ // Actually, let's just query fresh — this is the cache-miss path
194
+ allConfigs = [];
195
+ }
196
+ else {
197
+ allConfigs = [];
198
+ }
199
+ if (allConfigs.length === 0) {
200
+ log.debug(`Loading all storage modules for database ${databaseId} to resolve ownerId ${ownerId}`);
201
+ const result = await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] });
202
+ allConfigs = result.rows.map(buildConfig);
203
+ // Cache each individual config by its membership type
204
+ for (const config of allConfigs) {
205
+ const key = config.membershipType === null
206
+ ? `storage:${databaseId}:app`
207
+ : `storage:${databaseId}:mt:${config.membershipType}`;
208
+ storageModuleCache.set(key, config);
209
+ }
210
+ }
211
+ // Find entity-scoped modules and probe their entity tables for the ownerId
212
+ const entityModules = allConfigs.filter((c) => c.entityQualifiedName !== null);
213
+ for (const mod of entityModules) {
214
+ const probeResult = await pgClient.query({
215
+ text: `SELECT 1 FROM ${mod.entityQualifiedName} WHERE id = $1 LIMIT 1`,
216
+ values: [ownerId],
217
+ });
218
+ if (probeResult.rows.length > 0) {
219
+ // Found the matching module — cache the ownerId→module mapping
220
+ storageModuleCache.set(ownerCacheKey, mod);
221
+ log.debug(`Resolved ownerId ${ownerId} to storage module ${mod.id} ` +
222
+ `(membershipType=${mod.membershipType}, table=${mod.bucketsQualifiedName})`);
223
+ return mod;
224
+ }
225
+ }
226
+ log.warn(`No entity-scoped storage module found for ownerId ${ownerId} in database ${databaseId}`);
227
+ return null;
228
+ }
229
+ /**
230
+ * Resolve the storage module that owns a specific file by probing all file tables.
231
+ *
232
+ * Used by confirmUpload when only a fileId (UUID) is available.
233
+ * Since UUIDs are globally unique, exactly one table will contain the file.
234
+ *
235
+ * @param pgClient - A pg client from the Graphile context
236
+ * @param databaseId - The metaschema database UUID
237
+ * @param fileId - The file UUID to look up
238
+ * @returns Object with the storage config and file row, or null if not found
239
+ */
240
+ export async function resolveStorageModuleByFileId(pgClient, databaseId, fileId) {
241
+ // Load all storage modules for this database
242
+ log.debug(`Resolving file ${fileId} across all storage modules for database ${databaseId}`);
243
+ const allConfigs = (await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] })).rows.map((row) => buildConfig(row));
244
+ // Probe each module's files table for the fileId
245
+ for (const config of allConfigs) {
246
+ const fileResult = await pgClient.query({
247
+ text: `SELECT id, key, content_type, status, bucket_id
248
+ FROM ${config.filesQualifiedName}
249
+ WHERE id = $1
250
+ LIMIT 1`,
251
+ values: [fileId],
252
+ });
253
+ if (fileResult.rows.length > 0) {
254
+ const file = fileResult.rows[0];
255
+ return { storageConfig: config, file };
256
+ }
257
+ }
258
+ return null;
259
+ }
104
260
  // --- Bucket metadata cache ---
105
261
  /**
106
262
  * LRU cache for per-database bucket metadata.
@@ -113,7 +269,7 @@ export async function getStorageModuleConfig(pgClient, databaseId) {
113
269
  * is safe. The important RLS is on the files table (INSERT/UPDATE),
114
270
  * which is never cached.
115
271
  *
116
- * Keys: `bucket:${databaseId}:${bucketKey}`
272
+ * Keys: `bucket:${databaseId}:${storageModuleId}:${bucketKey}`
117
273
  * TTL: same as storage module cache (5min dev / 1hr prod)
118
274
  */
119
275
  const bucketCache = new LRUCache({
@@ -128,24 +284,33 @@ const bucketCache = new LRUCache({
128
284
  * the pgClient). On cache hit, returns the cached metadata directly.
129
285
  *
130
286
  * @param pgClient - A pg client from the Graphile context
131
- * @param storageConfig - The resolved StorageModuleConfig for this database
287
+ * @param storageConfig - The resolved StorageModuleConfig for this database/scope
132
288
  * @param databaseId - The metaschema database UUID (used as cache key prefix)
133
289
  * @param bucketKey - The bucket key (e.g., "public", "private")
290
+ * @param ownerId - Optional owner entity ID for entity-scoped bucket lookup
134
291
  * @returns BucketConfig or null if the bucket doesn't exist / isn't accessible
135
292
  */
136
- export async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey) {
137
- const cacheKey = `bucket:${databaseId}:${bucketKey}`;
293
+ export async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey, ownerId) {
294
+ const cacheKey = `bucket:${databaseId}:${storageConfig.id}:${bucketKey}${ownerId ? `:${ownerId}` : ''}`;
138
295
  const cached = bucketCache.get(cacheKey);
139
296
  if (cached) {
140
297
  return cached;
141
298
  }
142
- log.debug(`Bucket cache miss for ${databaseId}:${bucketKey}, querying DB...`);
299
+ log.debug(`Bucket cache miss for ${databaseId}:${bucketKey}${ownerId ? ` (owner=${ownerId})` : ''}, querying DB...`);
300
+ // Entity-scoped buckets use (owner_id, key) composite lookup;
301
+ // app-level buckets just use key.
302
+ const hasOwner = ownerId && storageConfig.membershipType !== null;
143
303
  const result = await pgClient.query({
144
- text: `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
145
- FROM ${storageConfig.bucketsQualifiedName}
146
- WHERE key = $1
147
- LIMIT 1`,
148
- values: [bucketKey],
304
+ text: hasOwner
305
+ ? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
306
+ FROM ${storageConfig.bucketsQualifiedName}
307
+ WHERE key = $1 AND owner_id = $2
308
+ LIMIT 1`
309
+ : `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size
310
+ FROM ${storageConfig.bucketsQualifiedName}
311
+ WHERE key = $1
312
+ LIMIT 1`,
313
+ values: hasOwner ? [bucketKey, ownerId] : [bucketKey],
149
314
  });
150
315
  if (result.rows.length === 0) {
151
316
  return null;
@@ -156,12 +321,12 @@ export async function getBucketConfig(pgClient, storageConfig, databaseId, bucke
156
321
  key: row.key,
157
322
  type: row.type,
158
323
  is_public: row.is_public,
159
- owner_id: row.owner_id,
324
+ owner_id: row.owner_id ?? null,
160
325
  allowed_mime_types: row.allowed_mime_types,
161
326
  max_file_size: row.max_file_size,
162
327
  };
163
328
  bucketCache.set(cacheKey, config);
164
- log.debug(`Cached bucket config for ${databaseId}:${bucketKey} (id=${config.id})`);
329
+ log.debug(`Cached bucket config for ${databaseId}:${bucketKey} (id=${config.id}, scope=${storageConfig.membershipType ?? 'app'})`);
165
330
  return config;
166
331
  }
167
332
  // --- S3 bucket existence cache ---
package/esm/types.d.ts CHANGED
@@ -7,7 +7,7 @@ export interface BucketConfig {
7
7
  key: string;
8
8
  type: 'public' | 'private' | 'temp';
9
9
  is_public: boolean;
10
- owner_id: string;
10
+ owner_id: string | null;
11
11
  allowed_mime_types: string[] | null;
12
12
  max_file_size: number | null;
13
13
  }
@@ -31,6 +31,12 @@ export interface StorageModuleConfig {
31
31
  filesTableName: string;
32
32
  /** Upload requests table name */
33
33
  uploadRequestsTableName: string;
34
+ /** Membership type (NULL for app-level, non-NULL for entity-scoped) */
35
+ membershipType: number | null;
36
+ /** Entity table ID for entity-scoped storage (NULL for app-level) */
37
+ entityTableId: string | null;
38
+ /** Qualified entity table name for ownerId lookups (NULL for app-level) */
39
+ entityQualifiedName: string | null;
34
40
  /** S3-compatible API endpoint URL (per-database override) */
35
41
  endpoint: string | null;
36
42
  /** Public URL prefix for generating download URLs (per-database override) */
@@ -56,6 +62,13 @@ export interface StorageModuleConfig {
56
62
  export interface RequestUploadUrlInput {
57
63
  /** Bucket key (e.g., "public", "private") */
58
64
  bucketKey: string;
65
+ /**
66
+ * Owner entity ID for entity-scoped uploads.
67
+ * Omit for app-level (database-wide) storage.
68
+ * When provided, resolves the storage module for the entity type
69
+ * that owns this entity instance (e.g., a data room ID, team ID).
70
+ */
71
+ ownerId?: string;
59
72
  /** SHA-256 content hash computed by the client */
60
73
  contentHash: string;
61
74
  /** MIME type of the file */
package/index.d.ts CHANGED
@@ -29,6 +29,6 @@
29
29
  export { PresignedUrlPlugin, createPresignedUrlPlugin } from './plugin';
30
30
  export { createDownloadUrlPlugin } from './download-url-field';
31
31
  export { PresignedUrlPreset } from './preset';
32
- export { getStorageModuleConfig, getBucketConfig, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
32
+ export { getStorageModuleConfig, getStorageModuleConfigForOwner, getBucketConfig, resolveStorageModuleByFileId, clearStorageModuleCache, clearBucketCache, isS3BucketProvisioned, markS3BucketProvisioned } from './storage-module-cache';
33
33
  export { generatePresignedPutUrl, generatePresignedGetUrl, headObject } from './s3-signer';
34
34
  export type { BucketConfig, StorageModuleConfig, RequestUploadUrlInput, RequestUploadUrlPayload, ConfirmUploadInput, ConfirmUploadPayload, S3Config, S3ConfigOrGetter, PresignedUrlPluginOptions, BucketNameResolver, EnsureBucketProvisioned, } from './types';
package/index.js CHANGED
@@ -28,7 +28,7 @@
28
28
  * ```
29
29
  */
30
30
  Object.defineProperty(exports, "__esModule", { value: true });
31
- exports.headObject = exports.generatePresignedGetUrl = exports.generatePresignedPutUrl = exports.markS3BucketProvisioned = exports.isS3BucketProvisioned = exports.clearBucketCache = exports.clearStorageModuleCache = exports.getBucketConfig = exports.getStorageModuleConfig = exports.PresignedUrlPreset = exports.createDownloadUrlPlugin = exports.createPresignedUrlPlugin = exports.PresignedUrlPlugin = void 0;
31
+ exports.headObject = exports.generatePresignedGetUrl = exports.generatePresignedPutUrl = exports.markS3BucketProvisioned = exports.isS3BucketProvisioned = exports.clearBucketCache = exports.clearStorageModuleCache = exports.resolveStorageModuleByFileId = exports.getBucketConfig = exports.getStorageModuleConfigForOwner = exports.getStorageModuleConfig = exports.PresignedUrlPreset = exports.createDownloadUrlPlugin = exports.createPresignedUrlPlugin = exports.PresignedUrlPlugin = void 0;
32
32
  var plugin_1 = require("./plugin");
33
33
  Object.defineProperty(exports, "PresignedUrlPlugin", { enumerable: true, get: function () { return plugin_1.PresignedUrlPlugin; } });
34
34
  Object.defineProperty(exports, "createPresignedUrlPlugin", { enumerable: true, get: function () { return plugin_1.createPresignedUrlPlugin; } });
@@ -38,7 +38,9 @@ var preset_1 = require("./preset");
38
38
  Object.defineProperty(exports, "PresignedUrlPreset", { enumerable: true, get: function () { return preset_1.PresignedUrlPreset; } });
39
39
  var storage_module_cache_1 = require("./storage-module-cache");
40
40
  Object.defineProperty(exports, "getStorageModuleConfig", { enumerable: true, get: function () { return storage_module_cache_1.getStorageModuleConfig; } });
41
+ Object.defineProperty(exports, "getStorageModuleConfigForOwner", { enumerable: true, get: function () { return storage_module_cache_1.getStorageModuleConfigForOwner; } });
41
42
  Object.defineProperty(exports, "getBucketConfig", { enumerable: true, get: function () { return storage_module_cache_1.getBucketConfig; } });
43
+ Object.defineProperty(exports, "resolveStorageModuleByFileId", { enumerable: true, get: function () { return storage_module_cache_1.resolveStorageModuleByFileId; } });
42
44
  Object.defineProperty(exports, "clearStorageModuleCache", { enumerable: true, get: function () { return storage_module_cache_1.clearStorageModuleCache; } });
43
45
  Object.defineProperty(exports, "clearBucketCache", { enumerable: true, get: function () { return storage_module_cache_1.clearBucketCache; } });
44
46
  Object.defineProperty(exports, "isS3BucketProvisioned", { enumerable: true, get: function () { return storage_module_cache_1.isS3BucketProvisioned; } });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphile-presigned-url-plugin",
3
- "version": "0.4.1",
3
+ "version": "0.6.0",
4
4
  "description": "Presigned URL upload plugin for PostGraphile v5 — requestUploadUrl, confirmUpload mutations and downloadUrl computed field",
5
5
  "author": "Constructive <developers@constructive.io>",
6
6
  "homepage": "https://github.com/constructive-io/constructive",
@@ -42,7 +42,8 @@
42
42
  "dependencies": {
43
43
  "@aws-sdk/client-s3": "^3.1009.0",
44
44
  "@aws-sdk/s3-request-presigner": "^3.1009.0",
45
- "@pgpmjs/logger": "^2.5.2",
45
+ "@pgpmjs/logger": "^2.6.0",
46
+ "@pgsql/quotes": "^17.1.0",
46
47
  "lru-cache": "^11.2.7"
47
48
  },
48
49
  "peerDependencies": {
@@ -55,9 +56,9 @@
55
56
  "postgraphile": "5.0.0"
56
57
  },
57
58
  "devDependencies": {
58
- "@constructive-io/s3-utils": "^2.10.2",
59
+ "@constructive-io/s3-utils": "^2.11.0",
59
60
  "@types/node": "^22.19.11",
60
61
  "makage": "^0.1.10"
61
62
  },
62
- "gitHead": "79cd3e66871804a22c672c7ca2fa5e2105d4b368"
63
+ "gitHead": "28734dd71a973b2fe296e8240c8f86c568b4292f"
63
64
  }
package/plugin.js CHANGED
@@ -121,6 +121,13 @@ function createPresignedUrlPlugin(options) {
121
121
  input RequestUploadUrlInput {
122
122
  """Bucket key (e.g., "public", "private")"""
123
123
  bucketKey: String!
124
+ """
125
+ Owner entity ID for entity-scoped uploads.
126
+ Omit for app-level (database-wide) storage.
127
+ When provided, resolves the storage module for the entity type
128
+ that owns this entity instance (e.g., a data room ID, team ID).
129
+ """
130
+ ownerId: UUID
124
131
  """SHA-256 content hash computed by the client (hex-encoded, 64 chars)"""
125
132
  contentHash: String!
126
133
  """MIME type of the file (e.g., "image/png")"""
@@ -192,7 +199,7 @@ function createPresignedUrlPlugin(options) {
192
199
  });
193
200
  return (0, grafast_1.lambda)($combined, async ({ input, withPgClient, pgSettings }) => {
194
201
  // --- Input validation ---
195
- const { bucketKey, contentHash, contentType, size, filename } = input;
202
+ const { bucketKey, ownerId, contentHash, contentType, size, filename } = input;
196
203
  if (!bucketKey || typeof bucketKey !== 'string' || bucketKey.length > MAX_BUCKET_KEY_LENGTH) {
197
204
  throw new Error('INVALID_BUCKET_KEY');
198
205
  }
@@ -212,9 +219,14 @@ function createPresignedUrlPlugin(options) {
212
219
  if (!databaseId) {
213
220
  throw new Error('DATABASE_NOT_FOUND');
214
221
  }
215
- const storageConfig = await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
222
+ // --- Resolve storage module (app-level or entity-scoped) ---
223
+ const storageConfig = ownerId
224
+ ? await (0, storage_module_cache_1.getStorageModuleConfigForOwner)(txClient, databaseId, ownerId)
225
+ : await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
216
226
  if (!storageConfig) {
217
- throw new Error('STORAGE_MODULE_NOT_PROVISIONED');
227
+ throw new Error(ownerId
228
+ ? 'STORAGE_MODULE_NOT_FOUND_FOR_OWNER: no storage module found for the given ownerId'
229
+ : 'STORAGE_MODULE_NOT_PROVISIONED');
218
230
  }
219
231
  // --- Validate size against storage module default (bucket override checked below) ---
220
232
  if (typeof size !== 'number' || size <= 0 || size > storageConfig.defaultMaxFileSize) {
@@ -226,7 +238,7 @@ function createPresignedUrlPlugin(options) {
226
238
  }
227
239
  }
228
240
  // --- Look up the bucket (cached; first miss queries via RLS) ---
229
- const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey);
241
+ const bucket = await (0, storage_module_cache_1.getBucketConfig)(txClient, storageConfig, databaseId, bucketKey, ownerId);
230
242
  if (!bucket) {
231
243
  throw new Error('BUCKET_NOT_FOUND');
232
244
  }
@@ -280,21 +292,38 @@ function createPresignedUrlPlugin(options) {
280
292
  };
281
293
  }
282
294
  // --- Create file record (status=pending) ---
295
+ // For app-level storage (no owner_id column), omit owner_id from the INSERT.
296
+ const hasOwnerColumn = storageConfig.membershipType !== null;
283
297
  const fileResult = await txClient.query({
284
- text: `INSERT INTO ${storageConfig.filesQualifiedName}
285
- (bucket_id, key, content_type, content_hash, size, filename, owner_id, is_public, status)
286
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'pending')
287
- RETURNING id`,
288
- values: [
289
- bucket.id,
290
- s3Key,
291
- contentType,
292
- contentHash,
293
- size,
294
- filename || null,
295
- bucket.owner_id,
296
- bucket.is_public,
297
- ],
298
+ text: hasOwnerColumn
299
+ ? `INSERT INTO ${storageConfig.filesQualifiedName}
300
+ (bucket_id, key, content_type, content_hash, size, filename, owner_id, is_public, status)
301
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'pending')
302
+ RETURNING id`
303
+ : `INSERT INTO ${storageConfig.filesQualifiedName}
304
+ (bucket_id, key, content_type, content_hash, size, filename, is_public, status)
305
+ VALUES ($1, $2, $3, $4, $5, $6, $7, 'pending')
306
+ RETURNING id`,
307
+ values: hasOwnerColumn
308
+ ? [
309
+ bucket.id,
310
+ s3Key,
311
+ contentType,
312
+ contentHash,
313
+ size,
314
+ filename || null,
315
+ bucket.owner_id,
316
+ bucket.is_public,
317
+ ]
318
+ : [
319
+ bucket.id,
320
+ s3Key,
321
+ contentType,
322
+ contentHash,
323
+ size,
324
+ filename || null,
325
+ bucket.is_public,
326
+ ],
298
327
  });
299
328
  const fileId = fileResult.rows[0].id;
300
329
  // --- Ensure the S3 bucket exists (lazy provisioning) ---
@@ -337,27 +366,16 @@ function createPresignedUrlPlugin(options) {
337
366
  }
338
367
  return withPgClient(pgSettings, async (pgClient) => {
339
368
  return pgClient.withTransaction(async (txClient) => {
340
- // --- Resolve storage module config ---
369
+ // --- Resolve storage module by file ID (probes all file tables) ---
341
370
  const databaseId = await resolveDatabaseId(txClient);
342
371
  if (!databaseId) {
343
372
  throw new Error('DATABASE_NOT_FOUND');
344
373
  }
345
- const storageConfig = await (0, storage_module_cache_1.getStorageModuleConfig)(txClient, databaseId);
346
- if (!storageConfig) {
347
- throw new Error('STORAGE_MODULE_NOT_PROVISIONED');
348
- }
349
- // --- Look up the file (RLS enforced) ---
350
- const fileResult = await txClient.query({
351
- text: `SELECT id, key, content_type, status, bucket_id
352
- FROM ${storageConfig.filesQualifiedName}
353
- WHERE id = $1
354
- LIMIT 1`,
355
- values: [fileId],
356
- });
357
- if (fileResult.rows.length === 0) {
374
+ const resolved = await (0, storage_module_cache_1.resolveStorageModuleByFileId)(txClient, databaseId, fileId);
375
+ if (!resolved) {
358
376
  throw new Error('FILE_NOT_FOUND');
359
377
  }
360
- const file = fileResult.rows[0];
378
+ const { storageConfig, file } = resolved;
361
379
  if (file.status !== 'pending') {
362
380
  // File is already confirmed or processed — idempotent success
363
381
  return {
@@ -1,6 +1,9 @@
1
1
  import type { StorageModuleConfig, BucketConfig } from './types';
2
2
  /**
3
- * Resolve the storage module config for a database, using the LRU cache.
3
+ * Resolve the app-level storage module config for a database, using the LRU cache.
4
+ *
5
+ * This is the default path when no ownerId is provided. It returns the
6
+ * storage module with membership_type IS NULL (app-level / database-wide).
4
7
  *
5
8
  * @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
6
9
  * @param databaseId - The metaschema database UUID
@@ -14,6 +17,57 @@ export declare function getStorageModuleConfig(pgClient: {
14
17
  rows: unknown[];
15
18
  }>;
16
19
  }, databaseId: string): Promise<StorageModuleConfig | null>;
20
+ /**
21
+ * Resolve the storage module config for a specific owner entity.
22
+ *
23
+ * When ownerId is provided, this function:
24
+ * 1. Loads ALL storage modules for the database (cached)
25
+ * 2. Finds which entity-scoped module contains the ownerId in its entity table
26
+ * 3. Returns that module's config
27
+ *
28
+ * This is the core of Option C — the ownerId tells us which scope to use.
29
+ *
30
+ * @param pgClient - A pg client from the Graphile context
31
+ * @param databaseId - The metaschema database UUID
32
+ * @param ownerId - The entity instance UUID (e.g., a data room ID, team ID)
33
+ * @returns StorageModuleConfig or null if no matching module found
34
+ */
35
+ export declare function getStorageModuleConfigForOwner(pgClient: {
36
+ query: (opts: {
37
+ text: string;
38
+ values?: unknown[];
39
+ }) => Promise<{
40
+ rows: unknown[];
41
+ }>;
42
+ }, databaseId: string, ownerId: string): Promise<StorageModuleConfig | null>;
43
+ /**
44
+ * Resolve the storage module that owns a specific file by probing all file tables.
45
+ *
46
+ * Used by confirmUpload when only a fileId (UUID) is available.
47
+ * Since UUIDs are globally unique, exactly one table will contain the file.
48
+ *
49
+ * @param pgClient - A pg client from the Graphile context
50
+ * @param databaseId - The metaschema database UUID
51
+ * @param fileId - The file UUID to look up
52
+ * @returns Object with the storage config and file row, or null if not found
53
+ */
54
+ export declare function resolveStorageModuleByFileId(pgClient: {
55
+ query: (opts: {
56
+ text: string;
57
+ values?: unknown[];
58
+ }) => Promise<{
59
+ rows: unknown[];
60
+ }>;
61
+ }, databaseId: string, fileId: string): Promise<{
62
+ storageConfig: StorageModuleConfig;
63
+ file: {
64
+ id: string;
65
+ key: string;
66
+ content_type: string;
67
+ status: string;
68
+ bucket_id: string;
69
+ };
70
+ } | null>;
17
71
  /**
18
72
  * Resolve bucket metadata for a given database + bucket key, using the LRU cache.
19
73
  *
@@ -21,9 +75,10 @@ export declare function getStorageModuleConfig(pgClient: {
21
75
  * the pgClient). On cache hit, returns the cached metadata directly.
22
76
  *
23
77
  * @param pgClient - A pg client from the Graphile context
24
- * @param storageConfig - The resolved StorageModuleConfig for this database
78
+ * @param storageConfig - The resolved StorageModuleConfig for this database/scope
25
79
  * @param databaseId - The metaschema database UUID (used as cache key prefix)
26
80
  * @param bucketKey - The bucket key (e.g., "public", "private")
81
+ * @param ownerId - Optional owner entity ID for entity-scoped bucket lookup
27
82
  * @returns BucketConfig or null if the bucket doesn't exist / isn't accessible
28
83
  */
29
84
  export declare function getBucketConfig(pgClient: {
@@ -33,7 +88,7 @@ export declare function getBucketConfig(pgClient: {
33
88
  }) => Promise<{
34
89
  rows: unknown[];
35
90
  }>;
36
- }, storageConfig: StorageModuleConfig, databaseId: string, bucketKey: string): Promise<BucketConfig | null>;
91
+ }, storageConfig: StorageModuleConfig, databaseId: string, bucketKey: string, ownerId?: string): Promise<BucketConfig | null>;
37
92
  /**
38
93
  * Check whether an S3 bucket has already been provisioned (cached).
39
94
  */
@@ -1,6 +1,8 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.getStorageModuleConfig = getStorageModuleConfig;
4
+ exports.getStorageModuleConfigForOwner = getStorageModuleConfigForOwner;
5
+ exports.resolveStorageModuleByFileId = resolveStorageModuleByFileId;
4
6
  exports.getBucketConfig = getBucketConfig;
5
7
  exports.isS3BucketProvisioned = isS3BucketProvisioned;
6
8
  exports.markS3BucketProvisioned = markS3BucketProvisioned;
@@ -8,6 +10,7 @@ exports.clearStorageModuleCache = clearStorageModuleCache;
8
10
  exports.clearBucketCache = clearBucketCache;
9
11
  const logger_1 = require("@pgpmjs/logger");
10
12
  const lru_cache_1 = require("lru-cache");
13
+ const quotes_1 = require("@pgsql/quotes");
11
14
  const log = new logger_1.Logger('graphile-presigned-url:cache');
12
15
  // --- Defaults ---
13
16
  const DEFAULT_UPLOAD_URL_EXPIRY_SECONDS = 900; // 15 minutes
@@ -33,13 +36,18 @@ const storageModuleCache = new lru_cache_1.LRUCache({
33
36
  updateAgeOnGet: true,
34
37
  });
35
38
  /**
36
- * SQL query to resolve storage module config for a database.
39
+ * SQL query to resolve the app-level storage module config for a database.
37
40
  *
38
41
  * Joins storage_module → table → schema to get fully-qualified table names.
42
+ * Filters to app-level (membership_type IS NULL) by default.
43
+ *
44
+ * Requires the multi-scope schema (membership_type column on storage_module).
39
45
  */
40
- const STORAGE_MODULE_QUERY = `
46
+ const APP_STORAGE_MODULE_QUERY = `
41
47
  SELECT
42
48
  sm.id,
49
+ sm.membership_type,
50
+ sm.entity_table_id,
43
51
  bs.schema_name AS buckets_schema,
44
52
  bt.name AS buckets_table,
45
53
  fs.schema_name AS files_schema,
@@ -54,7 +62,9 @@ const STORAGE_MODULE_QUERY = `
54
62
  sm.download_url_expiry_seconds,
55
63
  sm.default_max_file_size,
56
64
  sm.max_filename_length,
57
- sm.cache_ttl_seconds
65
+ sm.cache_ttl_seconds,
66
+ NULL AS entity_schema,
67
+ NULL AS entity_table
58
68
  FROM metaschema_modules_public.storage_module sm
59
69
  JOIN metaschema_public.table bt ON bt.id = sm.buckets_table_id
60
70
  JOIN metaschema_public.schema bs ON bs.id = bt.schema_id
@@ -63,38 +73,67 @@ const STORAGE_MODULE_QUERY = `
63
73
  JOIN metaschema_public.table urt ON urt.id = sm.upload_requests_table_id
64
74
  JOIN metaschema_public.schema urs ON urs.id = urt.schema_id
65
75
  WHERE sm.database_id = $1
76
+ AND sm.membership_type IS NULL
66
77
  LIMIT 1
67
78
  `;
68
79
  /**
69
- * Resolve the storage module config for a database, using the LRU cache.
80
+ * SQL query to resolve ALL storage modules for a database (app-level + entity-scoped).
70
81
  *
71
- * @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
72
- * @param databaseId - The metaschema database UUID
73
- * @returns StorageModuleConfig or null if no storage module is provisioned
82
+ * Returns all storage modules with their entity table names for ownerId resolution.
83
+ * Requires the multi-scope schema.
74
84
  */
75
- async function getStorageModuleConfig(pgClient, databaseId) {
76
- const cacheKey = `storage:${databaseId}`;
77
- const cached = storageModuleCache.get(cacheKey);
78
- if (cached) {
79
- return cached;
80
- }
81
- log.debug(`Cache miss for database ${databaseId}, querying metaschema...`);
82
- const result = await pgClient.query({ text: STORAGE_MODULE_QUERY, values: [databaseId] });
83
- if (result.rows.length === 0) {
84
- log.warn(`No storage module found for database ${databaseId}`);
85
- return null;
86
- }
87
- const row = result.rows[0];
85
+ const ALL_STORAGE_MODULES_QUERY = `
86
+ SELECT
87
+ sm.id,
88
+ sm.membership_type,
89
+ sm.entity_table_id,
90
+ bs.schema_name AS buckets_schema,
91
+ bt.name AS buckets_table,
92
+ fs.schema_name AS files_schema,
93
+ ft.name AS files_table,
94
+ urs.schema_name AS upload_requests_schema,
95
+ urt.name AS upload_requests_table,
96
+ sm.endpoint,
97
+ sm.public_url_prefix,
98
+ sm.provider,
99
+ sm.allowed_origins,
100
+ sm.upload_url_expiry_seconds,
101
+ sm.download_url_expiry_seconds,
102
+ sm.default_max_file_size,
103
+ sm.max_filename_length,
104
+ sm.cache_ttl_seconds,
105
+ es.schema_name AS entity_schema,
106
+ et.name AS entity_table
107
+ FROM metaschema_modules_public.storage_module sm
108
+ JOIN metaschema_public.table bt ON bt.id = sm.buckets_table_id
109
+ JOIN metaschema_public.schema bs ON bs.id = bt.schema_id
110
+ JOIN metaschema_public.table ft ON ft.id = sm.files_table_id
111
+ JOIN metaschema_public.schema fs ON fs.id = ft.schema_id
112
+ JOIN metaschema_public.table urt ON urt.id = sm.upload_requests_table_id
113
+ JOIN metaschema_public.schema urs ON urs.id = urt.schema_id
114
+ LEFT JOIN metaschema_public.table et ON et.id = sm.entity_table_id
115
+ LEFT JOIN metaschema_public.schema es ON es.id = et.schema_id
116
+ WHERE sm.database_id = $1
117
+ `;
118
+ /**
119
+ * Build a StorageModuleConfig from a raw DB row.
120
+ */
121
+ function buildConfig(row) {
88
122
  const cacheTtlSeconds = row.cache_ttl_seconds ?? DEFAULT_CACHE_TTL_SECONDS;
89
- const config = {
123
+ return {
90
124
  id: row.id,
91
- bucketsQualifiedName: `"${row.buckets_schema}"."${row.buckets_table}"`,
92
- filesQualifiedName: `"${row.files_schema}"."${row.files_table}"`,
93
- uploadRequestsQualifiedName: `"${row.upload_requests_schema}"."${row.upload_requests_table}"`,
125
+ bucketsQualifiedName: quotes_1.QuoteUtils.quoteQualifiedIdentifier(row.buckets_schema, row.buckets_table),
126
+ filesQualifiedName: quotes_1.QuoteUtils.quoteQualifiedIdentifier(row.files_schema, row.files_table),
127
+ uploadRequestsQualifiedName: quotes_1.QuoteUtils.quoteQualifiedIdentifier(row.upload_requests_schema, row.upload_requests_table),
94
128
  schemaName: row.buckets_schema,
95
129
  bucketsTableName: row.buckets_table,
96
130
  filesTableName: row.files_table,
97
131
  uploadRequestsTableName: row.upload_requests_table,
132
+ membershipType: row.membership_type,
133
+ entityTableId: row.entity_table_id,
134
+ entityQualifiedName: row.entity_schema && row.entity_table
135
+ ? quotes_1.QuoteUtils.quoteQualifiedIdentifier(row.entity_schema, row.entity_table)
136
+ : null,
98
137
  endpoint: row.endpoint,
99
138
  publicUrlPrefix: row.public_url_prefix,
100
139
  provider: row.provider,
@@ -105,10 +144,129 @@ async function getStorageModuleConfig(pgClient, databaseId) {
105
144
  maxFilenameLength: row.max_filename_length ?? DEFAULT_MAX_FILENAME_LENGTH,
106
145
  cacheTtlSeconds,
107
146
  };
147
+ }
148
+ /**
149
+ * Resolve the app-level storage module config for a database, using the LRU cache.
150
+ *
151
+ * This is the default path when no ownerId is provided. It returns the
152
+ * storage module with membership_type IS NULL (app-level / database-wide).
153
+ *
154
+ * @param pgClient - A pg client from the Graphile context (withPgClient or pgClient)
155
+ * @param databaseId - The metaschema database UUID
156
+ * @returns StorageModuleConfig or null if no storage module is provisioned
157
+ */
158
+ async function getStorageModuleConfig(pgClient, databaseId) {
159
+ const cacheKey = `storage:${databaseId}:app`;
160
+ const cached = storageModuleCache.get(cacheKey);
161
+ if (cached) {
162
+ return cached;
163
+ }
164
+ log.debug(`Cache miss for app-level storage in database ${databaseId}, querying metaschema...`);
165
+ const result = await pgClient.query({ text: APP_STORAGE_MODULE_QUERY, values: [databaseId] });
166
+ if (result.rows.length === 0) {
167
+ log.warn(`No app-level storage module found for database ${databaseId}`);
168
+ return null;
169
+ }
170
+ const config = buildConfig(result.rows[0]);
108
171
  storageModuleCache.set(cacheKey, config);
109
- log.debug(`Cached storage config for database ${databaseId}: ${config.bucketsQualifiedName}`);
172
+ log.debug(`Cached app-level storage config for database ${databaseId}: ${config.bucketsQualifiedName}`);
110
173
  return config;
111
174
  }
175
+ /**
176
+ * Resolve the storage module config for a specific owner entity.
177
+ *
178
+ * When ownerId is provided, this function:
179
+ * 1. Loads ALL storage modules for the database (cached)
180
+ * 2. Finds which entity-scoped module contains the ownerId in its entity table
181
+ * 3. Returns that module's config
182
+ *
183
+ * This is the core of Option C — the ownerId tells us which scope to use.
184
+ *
185
+ * @param pgClient - A pg client from the Graphile context
186
+ * @param databaseId - The metaschema database UUID
187
+ * @param ownerId - The entity instance UUID (e.g., a data room ID, team ID)
188
+ * @returns StorageModuleConfig or null if no matching module found
189
+ */
190
+ async function getStorageModuleConfigForOwner(pgClient, databaseId, ownerId) {
191
+ // Check if we already have a cached mapping for this ownerId
192
+ const ownerCacheKey = `storage:${databaseId}:owner:${ownerId}`;
193
+ const cachedOwner = storageModuleCache.get(ownerCacheKey);
194
+ if (cachedOwner) {
195
+ return cachedOwner;
196
+ }
197
+ // Load all storage modules for this database
198
+ const allModulesCacheKey = `storage:${databaseId}:all`;
199
+ let allConfigs;
200
+ const cachedAll = storageModuleCache.get(allModulesCacheKey);
201
+ if (cachedAll) {
202
+ // We stored a sentinel; re-derive from individual caches
203
+ // Actually, let's just query fresh — this is the cache-miss path
204
+ allConfigs = [];
205
+ }
206
+ else {
207
+ allConfigs = [];
208
+ }
209
+ if (allConfigs.length === 0) {
210
+ log.debug(`Loading all storage modules for database ${databaseId} to resolve ownerId ${ownerId}`);
211
+ const result = await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] });
212
+ allConfigs = result.rows.map(buildConfig);
213
+ // Cache each individual config by its membership type
214
+ for (const config of allConfigs) {
215
+ const key = config.membershipType === null
216
+ ? `storage:${databaseId}:app`
217
+ : `storage:${databaseId}:mt:${config.membershipType}`;
218
+ storageModuleCache.set(key, config);
219
+ }
220
+ }
221
+ // Find entity-scoped modules and probe their entity tables for the ownerId
222
+ const entityModules = allConfigs.filter((c) => c.entityQualifiedName !== null);
223
+ for (const mod of entityModules) {
224
+ const probeResult = await pgClient.query({
225
+ text: `SELECT 1 FROM ${mod.entityQualifiedName} WHERE id = $1 LIMIT 1`,
226
+ values: [ownerId],
227
+ });
228
+ if (probeResult.rows.length > 0) {
229
+ // Found the matching module — cache the ownerId→module mapping
230
+ storageModuleCache.set(ownerCacheKey, mod);
231
+ log.debug(`Resolved ownerId ${ownerId} to storage module ${mod.id} ` +
232
+ `(membershipType=${mod.membershipType}, table=${mod.bucketsQualifiedName})`);
233
+ return mod;
234
+ }
235
+ }
236
+ log.warn(`No entity-scoped storage module found for ownerId ${ownerId} in database ${databaseId}`);
237
+ return null;
238
+ }
239
+ /**
240
+ * Resolve the storage module that owns a specific file by probing all file tables.
241
+ *
242
+ * Used by confirmUpload when only a fileId (UUID) is available.
243
+ * Since UUIDs are globally unique, exactly one table will contain the file.
244
+ *
245
+ * @param pgClient - A pg client from the Graphile context
246
+ * @param databaseId - The metaschema database UUID
247
+ * @param fileId - The file UUID to look up
248
+ * @returns Object with the storage config and file row, or null if not found
249
+ */
250
+ async function resolveStorageModuleByFileId(pgClient, databaseId, fileId) {
251
+ // Load all storage modules for this database
252
+ log.debug(`Resolving file ${fileId} across all storage modules for database ${databaseId}`);
253
+ const allConfigs = (await pgClient.query({ text: ALL_STORAGE_MODULES_QUERY, values: [databaseId] })).rows.map((row) => buildConfig(row));
254
+ // Probe each module's files table for the fileId
255
+ for (const config of allConfigs) {
256
+ const fileResult = await pgClient.query({
257
+ text: `SELECT id, key, content_type, status, bucket_id
258
+ FROM ${config.filesQualifiedName}
259
+ WHERE id = $1
260
+ LIMIT 1`,
261
+ values: [fileId],
262
+ });
263
+ if (fileResult.rows.length > 0) {
264
+ const file = fileResult.rows[0];
265
+ return { storageConfig: config, file };
266
+ }
267
+ }
268
+ return null;
269
+ }
112
270
  // --- Bucket metadata cache ---
113
271
  /**
114
272
  * LRU cache for per-database bucket metadata.
@@ -121,7 +279,7 @@ async function getStorageModuleConfig(pgClient, databaseId) {
121
279
  * is safe. The important RLS is on the files table (INSERT/UPDATE),
122
280
  * which is never cached.
123
281
  *
124
- * Keys: `bucket:${databaseId}:${bucketKey}`
282
+ * Keys: `bucket:${databaseId}:${storageModuleId}:${bucketKey}`
125
283
  * TTL: same as storage module cache (5min dev / 1hr prod)
126
284
  */
127
285
  const bucketCache = new lru_cache_1.LRUCache({
@@ -136,24 +294,33 @@ const bucketCache = new lru_cache_1.LRUCache({
136
294
  * the pgClient). On cache hit, returns the cached metadata directly.
137
295
  *
138
296
  * @param pgClient - A pg client from the Graphile context
139
- * @param storageConfig - The resolved StorageModuleConfig for this database
297
+ * @param storageConfig - The resolved StorageModuleConfig for this database/scope
140
298
  * @param databaseId - The metaschema database UUID (used as cache key prefix)
141
299
  * @param bucketKey - The bucket key (e.g., "public", "private")
300
+ * @param ownerId - Optional owner entity ID for entity-scoped bucket lookup
142
301
  * @returns BucketConfig or null if the bucket doesn't exist / isn't accessible
143
302
  */
144
- async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey) {
145
- const cacheKey = `bucket:${databaseId}:${bucketKey}`;
303
+ async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey, ownerId) {
304
+ const cacheKey = `bucket:${databaseId}:${storageConfig.id}:${bucketKey}${ownerId ? `:${ownerId}` : ''}`;
146
305
  const cached = bucketCache.get(cacheKey);
147
306
  if (cached) {
148
307
  return cached;
149
308
  }
150
- log.debug(`Bucket cache miss for ${databaseId}:${bucketKey}, querying DB...`);
309
+ log.debug(`Bucket cache miss for ${databaseId}:${bucketKey}${ownerId ? ` (owner=${ownerId})` : ''}, querying DB...`);
310
+ // Entity-scoped buckets use (owner_id, key) composite lookup;
311
+ // app-level buckets just use key.
312
+ const hasOwner = ownerId && storageConfig.membershipType !== null;
151
313
  const result = await pgClient.query({
152
- text: `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
153
- FROM ${storageConfig.bucketsQualifiedName}
154
- WHERE key = $1
155
- LIMIT 1`,
156
- values: [bucketKey],
314
+ text: hasOwner
315
+ ? `SELECT id, key, type, is_public, owner_id, allowed_mime_types, max_file_size
316
+ FROM ${storageConfig.bucketsQualifiedName}
317
+ WHERE key = $1 AND owner_id = $2
318
+ LIMIT 1`
319
+ : `SELECT id, key, type, is_public, ${storageConfig.membershipType !== null ? 'owner_id,' : ''} allowed_mime_types, max_file_size
320
+ FROM ${storageConfig.bucketsQualifiedName}
321
+ WHERE key = $1
322
+ LIMIT 1`,
323
+ values: hasOwner ? [bucketKey, ownerId] : [bucketKey],
157
324
  });
158
325
  if (result.rows.length === 0) {
159
326
  return null;
@@ -164,12 +331,12 @@ async function getBucketConfig(pgClient, storageConfig, databaseId, bucketKey) {
164
331
  key: row.key,
165
332
  type: row.type,
166
333
  is_public: row.is_public,
167
- owner_id: row.owner_id,
334
+ owner_id: row.owner_id ?? null,
168
335
  allowed_mime_types: row.allowed_mime_types,
169
336
  max_file_size: row.max_file_size,
170
337
  };
171
338
  bucketCache.set(cacheKey, config);
172
- log.debug(`Cached bucket config for ${databaseId}:${bucketKey} (id=${config.id})`);
339
+ log.debug(`Cached bucket config for ${databaseId}:${bucketKey} (id=${config.id}, scope=${storageConfig.membershipType ?? 'app'})`);
173
340
  return config;
174
341
  }
175
342
  // --- S3 bucket existence cache ---
package/types.d.ts CHANGED
@@ -7,7 +7,7 @@ export interface BucketConfig {
7
7
  key: string;
8
8
  type: 'public' | 'private' | 'temp';
9
9
  is_public: boolean;
10
- owner_id: string;
10
+ owner_id: string | null;
11
11
  allowed_mime_types: string[] | null;
12
12
  max_file_size: number | null;
13
13
  }
@@ -31,6 +31,12 @@ export interface StorageModuleConfig {
31
31
  filesTableName: string;
32
32
  /** Upload requests table name */
33
33
  uploadRequestsTableName: string;
34
+ /** Membership type (NULL for app-level, non-NULL for entity-scoped) */
35
+ membershipType: number | null;
36
+ /** Entity table ID for entity-scoped storage (NULL for app-level) */
37
+ entityTableId: string | null;
38
+ /** Qualified entity table name for ownerId lookups (NULL for app-level) */
39
+ entityQualifiedName: string | null;
34
40
  /** S3-compatible API endpoint URL (per-database override) */
35
41
  endpoint: string | null;
36
42
  /** Public URL prefix for generating download URLs (per-database override) */
@@ -56,6 +62,13 @@ export interface StorageModuleConfig {
56
62
  export interface RequestUploadUrlInput {
57
63
  /** Bucket key (e.g., "public", "private") */
58
64
  bucketKey: string;
65
+ /**
66
+ * Owner entity ID for entity-scoped uploads.
67
+ * Omit for app-level (database-wide) storage.
68
+ * When provided, resolves the storage module for the entity type
69
+ * that owns this entity instance (e.g., a data room ID, team ID).
70
+ */
71
+ ownerId?: string;
59
72
  /** SHA-256 content hash computed by the client */
60
73
  contentHash: string;
61
74
  /** MIME type of the file */