@rpcbase/server 0.537.0 → 0.539.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/email-DK8uUU4X.js +8045 -0
  2. package/dist/email-DK8uUU4X.js.map +1 -0
  3. package/dist/handler--FFBJMl6.js +153 -0
  4. package/dist/handler--FFBJMl6.js.map +1 -0
  5. package/dist/handler-0rPClEv4.js +663 -0
  6. package/dist/handler-0rPClEv4.js.map +1 -0
  7. package/dist/handler-COnCnprN.js +203 -0
  8. package/dist/handler-COnCnprN.js.map +1 -0
  9. package/dist/handler-ClQF4MOn.js +931 -0
  10. package/dist/handler-ClQF4MOn.js.map +1 -0
  11. package/dist/index.js +4988 -4830
  12. package/dist/index.js.map +1 -1
  13. package/dist/notifications.js +199 -134
  14. package/dist/notifications.js.map +1 -1
  15. package/dist/queryExecutor-Bol_iR8f.js +453 -0
  16. package/dist/queryExecutor-Bol_iR8f.js.map +1 -0
  17. package/dist/render_resend_false-MiC__Smr.js +6 -0
  18. package/dist/render_resend_false-MiC__Smr.js.map +1 -0
  19. package/dist/rts/index.d.ts +0 -1
  20. package/dist/rts/index.d.ts.map +1 -1
  21. package/dist/rts/index.js +1003 -842
  22. package/dist/rts/index.js.map +1 -1
  23. package/dist/schemas-Cjdjgehl.js +4225 -0
  24. package/dist/schemas-Cjdjgehl.js.map +1 -0
  25. package/dist/shared-nE84Or5W.js +111 -0
  26. package/dist/shared-nE84Or5W.js.map +1 -0
  27. package/dist/ssrMiddleware.d.ts +1 -1
  28. package/dist/uploads/api/file-uploads/handlers/completeUpload.d.ts.map +1 -1
  29. package/dist/uploads/api/file-uploads/postProcessors.d.ts +2 -0
  30. package/dist/uploads/api/file-uploads/postProcessors.d.ts.map +1 -1
  31. package/dist/uploads.js +103 -71
  32. package/dist/uploads.js.map +1 -1
  33. package/package.json +11 -10
  34. package/dist/email-H8nTAGxe.js +0 -12449
  35. package/dist/email-H8nTAGxe.js.map +0 -1
  36. package/dist/handler-BBzEodA0.js +0 -182
  37. package/dist/handler-BBzEodA0.js.map +0 -1
  38. package/dist/handler-BLwgdQv-.js +0 -544
  39. package/dist/handler-BLwgdQv-.js.map +0 -1
  40. package/dist/handler-BU_tEK6x.js +0 -749
  41. package/dist/handler-BU_tEK6x.js.map +0 -1
  42. package/dist/handler-CZD5p1Jv.js +0 -28
  43. package/dist/handler-CZD5p1Jv.js.map +0 -1
  44. package/dist/handler-Cq6MsoD4.js +0 -124
  45. package/dist/handler-Cq6MsoD4.js.map +0 -1
  46. package/dist/queryExecutor-JadZcQSQ.js +0 -318
  47. package/dist/queryExecutor-JadZcQSQ.js.map +0 -1
  48. package/dist/render_resend-DQANggpW.js +0 -7
  49. package/dist/render_resend-DQANggpW.js.map +0 -1
  50. package/dist/rts/api/cleanup/handler.d.ts +0 -9
  51. package/dist/rts/api/cleanup/handler.d.ts.map +0 -1
  52. package/dist/rts/api/cleanup/index.d.ts +0 -11
  53. package/dist/rts/api/cleanup/index.d.ts.map +0 -1
  54. package/dist/schemas-BR3K5Luo.js +0 -3824
  55. package/dist/schemas-BR3K5Luo.js.map +0 -1
  56. package/dist/shared-DhZ_rDdo.js +0 -87
  57. package/dist/shared-DhZ_rDdo.js.map +0 -1
@@ -0,0 +1,931 @@
1
+ import { models, getTenantFilesystemDb } from "@rpcbase/db";
2
+ import { GridFSBucket, ObjectId } from "mongodb";
3
+ import { enqueueUploadPostProcessors } from "./uploads.js";
4
+ import { JSDOM } from "jsdom";
5
+ import createDOMPurify from "dompurify";
6
+ import { g as getTenantId, b as buildUploadsAbility, a as getUploadSessionAccessQuery, e as ensureUploadIndexes, c as getBucketName, d as getModelCtx, f as getUserId, h as getChunkSizeBytes, i as getSessionTtlMs, j as computeSha256Hex, t as toBufferPayload, n as normalizeSha256Hex, k as getMaxClientUploadBytesPerSecond, l as getRawBodyLimitBytes } from "./shared-nE84Or5W.js";
7
+ import { randomBytes } from "node:crypto";
8
+ import { o as object, n as number, b as boolean, s as string, a as array, _ as _enum } from "./schemas-Cjdjgehl.js";
9
+ const MAX_SVG_BYTES = 128 * 1024;
10
+ const window = new JSDOM("").window;
11
+ const DOMPurify = createDOMPurify(window);
12
+ const normalizeForSniff = (raw) => raw.replace(/^\uFEFF/, "").trimStart();
13
+ const looksLikeSvgText = (text) => {
14
+ const normalized = normalizeForSniff(text);
15
+ if (!normalized.startsWith("<")) return false;
16
+ return /<svg(?:\s|>)/i.test(normalized);
17
+ };
18
+ const looksLikeSvg = (sniff) => looksLikeSvgText(sniff.toString("utf8"));
19
+ const sanitizeSvg = (svg) => DOMPurify.sanitize(svg, {
20
+ USE_PROFILES: {
21
+ svg: true,
22
+ svgFilters: true
23
+ }
24
+ });
25
+ const sanitizeSvgProcessor = {
26
+ id: "sanitize-svg",
27
+ maxBytes: MAX_SVG_BYTES,
28
+ match: ({
29
+ sniff
30
+ }) => looksLikeSvg(sniff),
31
+ process: (data) => {
32
+ if (data.length > MAX_SVG_BYTES) {
33
+ throw new Error("svg_too_large");
34
+ }
35
+ const svgText = data.toString("utf8");
36
+ if (!looksLikeSvgText(svgText)) {
37
+ throw new Error("svg_invalid");
38
+ }
39
+ const sanitized = sanitizeSvg(svgText);
40
+ if (!sanitized.trim() || !looksLikeSvgText(sanitized)) {
41
+ throw new Error("svg_sanitize_failed");
42
+ }
43
+ const sanitizedBuffer = Buffer.from(sanitized, "utf8");
44
+ if (sanitizedBuffer.length > MAX_SVG_BYTES) {
45
+ throw new Error("svg_too_large");
46
+ }
47
+ return {
48
+ data: sanitizedBuffer,
49
+ mimeType: "image/svg+xml"
50
+ };
51
+ }
52
+ };
53
+ const uploadProcessors = Object.freeze([sanitizeSvgProcessor]);
54
+ const getMaxUploadProcessorBytes = () => uploadProcessors.reduce((max, processor) => Math.max(max, processor.maxBytes), 0);
55
+ const selectUploadProcessors = (ctx) => uploadProcessors.filter((processor) => processor.match(ctx));
56
+ const applyUploadProcessors = async (data, ctx) => {
57
+ let currentData = data;
58
+ let currentMimeType = ctx.clientMimeType;
59
+ const applied = [];
60
+ for (const processor of uploadProcessors) {
61
+ const processorCtx = {
62
+ filename: ctx.filename,
63
+ clientMimeType: currentMimeType,
64
+ totalSize: currentData.length,
65
+ sniff: currentData
66
+ };
67
+ if (!processor.match(processorCtx)) continue;
68
+ if (currentData.length > processor.maxBytes) {
69
+ throw new Error("processor_input_too_large");
70
+ }
71
+ const result = await processor.process(currentData, processorCtx);
72
+ currentData = result.data;
73
+ if (typeof result.mimeType === "string" && result.mimeType.trim()) {
74
+ currentMimeType = result.mimeType.trim();
75
+ }
76
+ applied.push(processor.id);
77
+ }
78
+ return {
79
+ data: currentData,
80
+ mimeType: currentMimeType,
81
+ applied
82
+ };
83
+ };
84
+ const waitForStreamFinished = async (stream) => new Promise((resolve, reject) => {
85
+ stream.once("finish", resolve);
86
+ stream.once("error", reject);
87
+ });
88
+ const writeToStream = async (stream, chunk) => {
89
+ const ok = stream.write(chunk);
90
+ if (ok) return;
91
+ await new Promise((resolve, reject) => {
92
+ const onDrain = () => {
93
+ cleanup();
94
+ resolve();
95
+ };
96
+ const onError = (error) => {
97
+ cleanup();
98
+ reject(error);
99
+ };
100
+ const cleanup = () => {
101
+ stream.off("drain", onDrain);
102
+ stream.off("error", onError);
103
+ };
104
+ stream.on("drain", onDrain);
105
+ stream.on("error", onError);
106
+ });
107
+ };
108
+ const abortUploadStream = async (stream) => {
109
+ if (!stream) return;
110
+ if (typeof stream.abort === "function") {
111
+ try {
112
+ await stream.abort();
113
+ return;
114
+ } catch {
115
+ }
116
+ }
117
+ try {
118
+ ;
119
+ stream.destroy?.();
120
+ } catch {
121
+ }
122
+ };
123
+ const completeUpload = async (_payload, ctx) => {
124
+ const tenantId = getTenantId(ctx);
125
+ if (!tenantId) {
126
+ ctx.res.status(400);
127
+ return {
128
+ ok: false,
129
+ error: "tenant_missing"
130
+ };
131
+ }
132
+ const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
133
+ if (!uploadId) {
134
+ ctx.res.status(400);
135
+ return {
136
+ ok: false,
137
+ error: "invalid_upload_id"
138
+ };
139
+ }
140
+ const ability = buildUploadsAbility(ctx, tenantId);
141
+ const modelCtx = getModelCtx(ctx, tenantId, ability);
142
+ const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
143
+ if (!ability.can("update", "RBUploadSession")) {
144
+ ctx.res.status(401);
145
+ return {
146
+ ok: false,
147
+ error: "unauthorized"
148
+ };
149
+ }
150
+ const existing = await UploadSession.findOne({
151
+ $and: [{
152
+ _id: uploadId
153
+ }, getUploadSessionAccessQuery(ability, "read")]
154
+ }).lean();
155
+ if (!existing) {
156
+ ctx.res.status(404);
157
+ return {
158
+ ok: false,
159
+ error: "not_found"
160
+ };
161
+ }
162
+ if (existing.status === "done" && existing.fileId) {
163
+ return {
164
+ ok: true,
165
+ fileId: existing.fileId
166
+ };
167
+ }
168
+ const locked = await UploadSession.findOneAndUpdate({
169
+ $and: [{
170
+ _id: uploadId
171
+ }, {
172
+ status: "uploading"
173
+ }, getUploadSessionAccessQuery(ability, "update")]
174
+ }, {
175
+ $set: {
176
+ status: "assembling"
177
+ },
178
+ $unset: {
179
+ error: ""
180
+ }
181
+ }, {
182
+ returnDocument: "after"
183
+ }).lean();
184
+ if (!locked) {
185
+ ctx.res.status(409);
186
+ return {
187
+ ok: false,
188
+ error: "not_uploading"
189
+ };
190
+ }
191
+ await ensureUploadIndexes(UploadSession, UploadChunk);
192
+ const fsDb = await getTenantFilesystemDb(tenantId);
193
+ const nativeDb = fsDb.db;
194
+ if (!nativeDb) {
195
+ await UploadSession.updateOne({
196
+ $and: [{
197
+ _id: uploadId
198
+ }, getUploadSessionAccessQuery(ability, "update")]
199
+ }, {
200
+ $set: {
201
+ status: "error",
202
+ error: "filesystem_db_unavailable"
203
+ }
204
+ });
205
+ ctx.res.status(500);
206
+ return {
207
+ ok: false,
208
+ error: "assembly_failed"
209
+ };
210
+ }
211
+ const bucketName = getBucketName();
212
+ const bucket = new GridFSBucket(nativeDb, {
213
+ bucketName
214
+ });
215
+ const lockedUserId = typeof locked.userId === "string" ? locked.userId : void 0;
216
+ const maxProcessorBytes = getMaxUploadProcessorBytes();
217
+ const shouldBufferForProcessing = locked.totalSize <= maxProcessorBytes;
218
+ const declaredMimeType = locked.mimeType.trim().toLowerCase();
219
+ const declaredSvg = declaredMimeType === "image/svg+xml" || locked.filename.trim().toLowerCase().endsWith(".svg");
220
+ let uploadStream = null;
221
+ let finalMimeType = locked.mimeType;
222
+ let inlineProcessors = [];
223
+ let finalMetadata = {
224
+ uploadId,
225
+ tenantId,
226
+ mimeType: locked.mimeType,
227
+ totalSize: locked.totalSize,
228
+ ...typeof locked.isPublic === "boolean" ? {
229
+ isPublic: locked.isPublic
230
+ } : {},
231
+ ...typeof locked.ownerKeyHash === "string" ? {
232
+ ownerKeyHash: locked.ownerKeyHash
233
+ } : {},
234
+ ...lockedUserId ? {
235
+ userId: lockedUserId
236
+ } : {}
237
+ };
238
+ try {
239
+ if (!shouldBufferForProcessing && declaredSvg) {
240
+ throw new Error("svg_too_large");
241
+ }
242
+ const cursor = UploadChunk.find({
243
+ uploadId
244
+ }).sort({
245
+ index: 1
246
+ }).cursor();
247
+ let expectedIndex = 0;
248
+ const chunks = [];
249
+ let bufferedBytes = 0;
250
+ const pendingChunks = [];
251
+ const sniffParts = [];
252
+ let sniffBytes = 0;
253
+ try {
254
+ for await (const chunkDoc of cursor) {
255
+ if (chunkDoc.index !== expectedIndex) {
256
+ throw new Error("missing_chunks");
257
+ }
258
+ const chunk = chunkDoc.data;
259
+ if (shouldBufferForProcessing) {
260
+ chunks.push(chunk);
261
+ bufferedBytes += chunk.length;
262
+ } else if (!uploadStream) {
263
+ pendingChunks.push(chunk);
264
+ if (sniffBytes < maxProcessorBytes) {
265
+ const slice = chunk.subarray(0, Math.min(chunk.length, maxProcessorBytes - sniffBytes));
266
+ if (slice.length) {
267
+ sniffParts.push(slice);
268
+ sniffBytes += slice.length;
269
+ }
270
+ }
271
+ if (sniffBytes >= maxProcessorBytes) {
272
+ const sniff = Buffer.concat(sniffParts, sniffBytes);
273
+ const processors = selectUploadProcessors({
274
+ filename: locked.filename,
275
+ clientMimeType: locked.mimeType,
276
+ totalSize: locked.totalSize,
277
+ sniff
278
+ });
279
+ if (processors.length) {
280
+ throw new Error("svg_too_large");
281
+ }
282
+ finalMetadata = {
283
+ uploadId,
284
+ tenantId,
285
+ mimeType: locked.mimeType,
286
+ totalSize: locked.totalSize,
287
+ ...typeof locked.isPublic === "boolean" ? {
288
+ isPublic: locked.isPublic
289
+ } : {},
290
+ ...typeof locked.ownerKeyHash === "string" ? {
291
+ ownerKeyHash: locked.ownerKeyHash
292
+ } : {},
293
+ ...lockedUserId ? {
294
+ userId: lockedUserId
295
+ } : {}
296
+ };
297
+ uploadStream = bucket.openUploadStream(locked.filename, {
298
+ metadata: finalMetadata
299
+ });
300
+ for (const pending of pendingChunks) {
301
+ await writeToStream(uploadStream, pending);
302
+ }
303
+ pendingChunks.length = 0;
304
+ }
305
+ } else {
306
+ await writeToStream(uploadStream, chunk);
307
+ }
308
+ expectedIndex += 1;
309
+ }
310
+ } finally {
311
+ try {
312
+ await cursor.close();
313
+ } catch {
314
+ }
315
+ }
316
+ if (expectedIndex !== locked.chunksTotal) {
317
+ throw new Error("missing_chunks");
318
+ }
319
+ if (shouldBufferForProcessing) {
320
+ const assembled = Buffer.concat(chunks, bufferedBytes);
321
+ const {
322
+ data: processed,
323
+ mimeType: processedMimeType,
324
+ applied
325
+ } = await applyUploadProcessors(assembled, {
326
+ filename: locked.filename,
327
+ clientMimeType: locked.mimeType
328
+ });
329
+ finalMimeType = processedMimeType;
330
+ inlineProcessors = applied;
331
+ finalMetadata = {
332
+ uploadId,
333
+ tenantId,
334
+ mimeType: processedMimeType,
335
+ totalSize: locked.totalSize,
336
+ ...applied.length ? {
337
+ processors: applied,
338
+ processedSize: processed.length
339
+ } : {},
340
+ ...typeof locked.isPublic === "boolean" ? {
341
+ isPublic: locked.isPublic
342
+ } : {},
343
+ ...typeof locked.ownerKeyHash === "string" ? {
344
+ ownerKeyHash: locked.ownerKeyHash
345
+ } : {},
346
+ ...lockedUserId ? {
347
+ userId: lockedUserId
348
+ } : {}
349
+ };
350
+ uploadStream = bucket.openUploadStream(locked.filename, {
351
+ metadata: finalMetadata
352
+ });
353
+ const finished = waitForStreamFinished(uploadStream);
354
+ uploadStream.end(processed);
355
+ await finished;
356
+ } else {
357
+ if (!uploadStream) {
358
+ const sniff = Buffer.concat(sniffParts, sniffBytes);
359
+ const processors = selectUploadProcessors({
360
+ filename: locked.filename,
361
+ clientMimeType: locked.mimeType,
362
+ totalSize: locked.totalSize,
363
+ sniff
364
+ });
365
+ if (processors.length) {
366
+ throw new Error("svg_too_large");
367
+ }
368
+ finalMetadata = {
369
+ uploadId,
370
+ tenantId,
371
+ mimeType: locked.mimeType,
372
+ totalSize: locked.totalSize,
373
+ ...typeof locked.isPublic === "boolean" ? {
374
+ isPublic: locked.isPublic
375
+ } : {},
376
+ ...typeof locked.ownerKeyHash === "string" ? {
377
+ ownerKeyHash: locked.ownerKeyHash
378
+ } : {},
379
+ ...lockedUserId ? {
380
+ userId: lockedUserId
381
+ } : {}
382
+ };
383
+ uploadStream = bucket.openUploadStream(locked.filename, {
384
+ metadata: finalMetadata
385
+ });
386
+ for (const pending of pendingChunks) {
387
+ await writeToStream(uploadStream, pending);
388
+ }
389
+ pendingChunks.length = 0;
390
+ }
391
+ const finished = waitForStreamFinished(uploadStream);
392
+ uploadStream.end();
393
+ await finished;
394
+ }
395
+ const fileId = String(uploadStream.id ?? "");
396
+ if (!fileId) {
397
+ throw new Error("missing_file_id");
398
+ }
399
+ await UploadSession.updateOne({
400
+ $and: [{
401
+ _id: uploadId
402
+ }, getUploadSessionAccessQuery(ability, "update")]
403
+ }, {
404
+ $set: {
405
+ status: "done",
406
+ fileId
407
+ },
408
+ $unset: {
409
+ error: ""
410
+ }
411
+ });
412
+ await enqueueUploadPostProcessors({
413
+ tenantId,
414
+ uploadId,
415
+ fileId,
416
+ filename: locked.filename,
417
+ mimeType: finalMimeType,
418
+ clientMimeType: locked.mimeType,
419
+ totalSize: locked.totalSize,
420
+ ...typeof locked.isPublic === "boolean" ? {
421
+ isPublic: locked.isPublic
422
+ } : {},
423
+ ...typeof locked.ownerKeyHash === "string" ? {
424
+ ownerKeyHash: locked.ownerKeyHash
425
+ } : {},
426
+ ...lockedUserId ? {
427
+ userId: lockedUserId
428
+ } : {},
429
+ inlineProcessors,
430
+ metadata: finalMetadata
431
+ }).catch((error) => {
432
+ console.error("Upload post processor enqueue failed", {
433
+ tenantId,
434
+ uploadId,
435
+ fileId,
436
+ error
437
+ });
438
+ });
439
+ try {
440
+ await UploadChunk.deleteMany({
441
+ uploadId
442
+ });
443
+ } catch {
444
+ }
445
+ return {
446
+ ok: true,
447
+ fileId
448
+ };
449
+ } catch (error) {
450
+ const message = error instanceof Error ? error.message : String(error);
451
+ await abortUploadStream(uploadStream);
452
+ if (message === "missing_chunks") {
453
+ await UploadSession.updateOne({
454
+ $and: [{
455
+ _id: uploadId
456
+ }, getUploadSessionAccessQuery(ability, "update")]
457
+ }, {
458
+ $set: {
459
+ status: "uploading"
460
+ }
461
+ });
462
+ ctx.res.status(409);
463
+ return {
464
+ ok: false,
465
+ error: "missing_chunks"
466
+ };
467
+ }
468
+ if (message === "svg_too_large") {
469
+ await UploadSession.updateOne({
470
+ $and: [{
471
+ _id: uploadId
472
+ }, getUploadSessionAccessQuery(ability, "update")]
473
+ }, {
474
+ $set: {
475
+ status: "error",
476
+ error: message
477
+ }
478
+ });
479
+ ctx.res.status(413);
480
+ return {
481
+ ok: false,
482
+ error: message
483
+ };
484
+ }
485
+ if (message === "svg_invalid" || message === "svg_sanitize_failed") {
486
+ await UploadSession.updateOne({
487
+ $and: [{
488
+ _id: uploadId
489
+ }, getUploadSessionAccessQuery(ability, "update")]
490
+ }, {
491
+ $set: {
492
+ status: "error",
493
+ error: message
494
+ }
495
+ });
496
+ ctx.res.status(400);
497
+ return {
498
+ ok: false,
499
+ error: message
500
+ };
501
+ }
502
+ await UploadSession.updateOne({
503
+ $and: [{
504
+ _id: uploadId
505
+ }, getUploadSessionAccessQuery(ability, "update")]
506
+ }, {
507
+ $set: {
508
+ status: "error",
509
+ error: message
510
+ }
511
+ });
512
+ ctx.res.status(500);
513
+ return {
514
+ ok: false,
515
+ error: "assembly_failed"
516
+ };
517
+ }
518
+ };
519
+ const getStatus = async (_payload, ctx) => {
520
+ const tenantId = getTenantId(ctx);
521
+ if (!tenantId) {
522
+ ctx.res.status(400);
523
+ return {
524
+ ok: false,
525
+ error: "tenant_missing"
526
+ };
527
+ }
528
+ const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
529
+ if (!uploadId) {
530
+ ctx.res.status(400);
531
+ return {
532
+ ok: false,
533
+ error: "invalid_upload_id"
534
+ };
535
+ }
536
+ const ability = buildUploadsAbility(ctx, tenantId);
537
+ const modelCtx = getModelCtx(ctx, tenantId, ability);
538
+ const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
539
+ if (!ability.can("read", "RBUploadSession")) {
540
+ ctx.res.status(401);
541
+ return {
542
+ ok: false,
543
+ error: "unauthorized"
544
+ };
545
+ }
546
+ const session = await UploadSession.findOne({
547
+ $and: [{
548
+ _id: uploadId
549
+ }, getUploadSessionAccessQuery(ability, "read")]
550
+ }).lean();
551
+ if (!session) {
552
+ ctx.res.status(404);
553
+ return {
554
+ ok: false,
555
+ error: "not_found"
556
+ };
557
+ }
558
+ const receivedDocs = await UploadChunk.find({
559
+ uploadId
560
+ }, {
561
+ index: 1,
562
+ _id: 0
563
+ }).sort({
564
+ index: 1
565
+ }).lean();
566
+ const received = receivedDocs.map((doc) => typeof doc.index === "number" ? doc.index : -1).filter((n) => Number.isInteger(n) && n >= 0);
567
+ return {
568
+ ok: true,
569
+ status: session.status,
570
+ chunkSize: session.chunkSize,
571
+ chunksTotal: session.chunksTotal,
572
+ received,
573
+ ...session.fileId ? {
574
+ fileId: session.fileId
575
+ } : {}
576
+ };
577
+ };
578
+ const InitRoute = "/api/rb/file-uploads";
579
+ const ChunkRoute = "/api/rb/file-uploads/:uploadId/chunks/:index";
580
+ const StatusRoute = "/api/rb/file-uploads/:uploadId/status";
581
+ const CompleteRoute = "/api/rb/file-uploads/:uploadId/complete";
582
+ const initRequestSchema = object({
583
+ filename: string().min(1),
584
+ mimeType: string().min(1),
585
+ isPublic: boolean().optional(),
586
+ totalSize: number().int().min(1)
587
+ });
588
+ object({
589
+ ok: boolean(),
590
+ error: string().optional(),
591
+ uploadId: string().optional(),
592
+ uploadKey: string().optional(),
593
+ chunkSize: number().int().optional(),
594
+ chunksTotal: number().int().optional()
595
+ });
596
+ object({
597
+ ok: boolean(),
598
+ error: string().optional(),
599
+ status: _enum(["uploading", "assembling", "done", "error"]).optional(),
600
+ chunkSize: number().int().optional(),
601
+ chunksTotal: number().int().optional(),
602
+ received: array(number().int().min(0)).optional(),
603
+ fileId: string().optional()
604
+ });
605
+ object({
606
+ ok: boolean(),
607
+ error: string().optional(),
608
+ fileId: string().optional()
609
+ });
610
+ const initUpload = async (payload, ctx) => {
611
+ const tenantId = getTenantId(ctx);
612
+ if (!tenantId) {
613
+ ctx.res.status(400);
614
+ return {
615
+ ok: false,
616
+ error: "tenant_missing"
617
+ };
618
+ }
619
+ const userId = getUserId(ctx);
620
+ const parsed = initRequestSchema.safeParse(payload ?? {});
621
+ if (!parsed.success) {
622
+ ctx.res.status(400);
623
+ return {
624
+ ok: false,
625
+ error: "invalid_payload"
626
+ };
627
+ }
628
+ const chunkSize = getChunkSizeBytes();
629
+ const {
630
+ filename,
631
+ mimeType,
632
+ totalSize,
633
+ isPublic
634
+ } = parsed.data;
635
+ const chunksTotal = Math.ceil(totalSize / chunkSize);
636
+ const ability = buildUploadsAbility(ctx, tenantId);
637
+ const modelCtx = getModelCtx(ctx, tenantId, ability);
638
+ const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
639
+ await ensureUploadIndexes(UploadSession, UploadChunk);
640
+ const uploadId = new ObjectId().toString();
641
+ const now = Date.now();
642
+ const expiresAt = new Date(now + getSessionTtlMs());
643
+ const uploadKey = userId ? null : randomBytes(32).toString("base64url");
644
+ const ownerKeyHash = uploadKey ? computeSha256Hex(Buffer.from(uploadKey)) : void 0;
645
+ await UploadSession.create({
646
+ _id: uploadId,
647
+ ...userId ? {
648
+ userId
649
+ } : {},
650
+ ...ownerKeyHash ? {
651
+ ownerKeyHash
652
+ } : {},
653
+ filename,
654
+ mimeType,
655
+ ...typeof isPublic === "boolean" ? {
656
+ isPublic
657
+ } : {},
658
+ totalSize,
659
+ chunkSize,
660
+ chunksTotal,
661
+ status: "uploading",
662
+ createdAt: new Date(now),
663
+ expiresAt
664
+ });
665
+ return {
666
+ ok: true,
667
+ uploadId,
668
+ chunkSize,
669
+ chunksTotal,
670
+ ...uploadKey ? {
671
+ uploadKey
672
+ } : {}
673
+ };
674
+ };
675
+ const uploadChunk = async (payload, ctx) => {
676
+ const tenantId = getTenantId(ctx);
677
+ if (!tenantId) {
678
+ ctx.res.status(400);
679
+ return {
680
+ ok: false,
681
+ error: "tenant_missing"
682
+ };
683
+ }
684
+ const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
685
+ const indexRaw = String(ctx.req.params?.index ?? "").trim();
686
+ const index = Number(indexRaw);
687
+ if (!uploadId || !Number.isInteger(index) || index < 0) {
688
+ ctx.res.status(400);
689
+ return {
690
+ ok: false,
691
+ error: "invalid_chunk_ref"
692
+ };
693
+ }
694
+ const ability = buildUploadsAbility(ctx, tenantId);
695
+ const modelCtx = getModelCtx(ctx, tenantId, ability);
696
+ const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
697
+ if (!ability.can("update", "RBUploadSession")) {
698
+ ctx.res.status(401);
699
+ return {
700
+ ok: false,
701
+ error: "unauthorized"
702
+ };
703
+ }
704
+ const session = await UploadSession.findOne({
705
+ $and: [{
706
+ _id: uploadId
707
+ }, getUploadSessionAccessQuery(ability, "update")]
708
+ }).lean();
709
+ if (!session) {
710
+ ctx.res.status(404);
711
+ return {
712
+ ok: false,
713
+ error: "not_found"
714
+ };
715
+ }
716
+ if (session.status !== "uploading") {
717
+ ctx.res.status(409);
718
+ return {
719
+ ok: false,
720
+ error: "not_uploading"
721
+ };
722
+ }
723
+ if (index >= session.chunksTotal) {
724
+ ctx.res.status(400);
725
+ return {
726
+ ok: false,
727
+ error: "index_out_of_range"
728
+ };
729
+ }
730
+ const data = toBufferPayload(payload);
731
+ if (!data) {
732
+ ctx.res.status(400);
733
+ return {
734
+ ok: false,
735
+ error: "invalid_body"
736
+ };
737
+ }
738
+ const expectedSize = index === session.chunksTotal - 1 ? session.totalSize - session.chunkSize * (session.chunksTotal - 1) : session.chunkSize;
739
+ if (data.length > expectedSize) {
740
+ ctx.res.status(413);
741
+ return {
742
+ ok: false,
743
+ error: "chunk_too_large"
744
+ };
745
+ }
746
+ if (data.length !== expectedSize) {
747
+ ctx.res.status(400);
748
+ return {
749
+ ok: false,
750
+ error: "invalid_chunk_size"
751
+ };
752
+ }
753
+ const checksumHeader = ctx.req.get("X-Chunk-SHA256");
754
+ const sha256 = checksumHeader ? computeSha256Hex(data) : void 0;
755
+ if (checksumHeader) {
756
+ const expectedSha256 = normalizeSha256Hex(checksumHeader);
757
+ if (sha256 !== expectedSha256) {
758
+ ctx.res.status(400);
759
+ return {
760
+ ok: false,
761
+ error: "checksum_mismatch"
762
+ };
763
+ }
764
+ }
765
+ await ensureUploadIndexes(UploadSession, UploadChunk);
766
+ await UploadChunk.updateOne({
767
+ uploadId,
768
+ index
769
+ }, {
770
+ $set: {
771
+ uploadId,
772
+ index,
773
+ data,
774
+ size: data.length,
775
+ sha256,
776
+ expiresAt: session.expiresAt
777
+ },
778
+ $setOnInsert: {
779
+ createdAt: /* @__PURE__ */ new Date()
780
+ }
781
+ }, {
782
+ upsert: true
783
+ });
784
+ ctx.res.status(204);
785
+ return {
786
+ ok: true
787
+ };
788
+ };
789
+ const rawBodyParser = ({
790
+ limitBytes,
791
+ maxClientBytesPerSecond
792
+ }) => {
793
+ return (req, res, next) => {
794
+ const contentType = typeof req?.headers?.["content-type"] === "string" ? String(req.headers["content-type"]) : "";
795
+ if (!contentType.includes("application/octet-stream")) {
796
+ next();
797
+ return;
798
+ }
799
+ let total = 0;
800
+ const chunks = [];
801
+ let done = false;
802
+ let paused = false;
803
+ let throttleTimeout = null;
804
+ const rateBytesPerSecond = typeof maxClientBytesPerSecond === "number" && maxClientBytesPerSecond > 0 ? maxClientBytesPerSecond : null;
805
+ const cleanup = () => {
806
+ req.off("data", onData);
807
+ req.off("end", onEnd);
808
+ req.off("error", onError);
809
+ req.off("aborted", onAborted);
810
+ if (throttleTimeout) {
811
+ clearTimeout(throttleTimeout);
812
+ throttleTimeout = null;
813
+ }
814
+ };
815
+ const finish = (error) => {
816
+ if (done) return;
817
+ done = true;
818
+ cleanup();
819
+ if (error) {
820
+ next(error);
821
+ return;
822
+ }
823
+ req.body = Buffer.concat(chunks, total);
824
+ next();
825
+ };
826
+ const onData = (chunk) => {
827
+ if (done) return;
828
+ const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
829
+ total += buffer.length;
830
+ if (total > limitBytes) {
831
+ done = true;
832
+ cleanup();
833
+ req.destroy();
834
+ res.status(413).json({
835
+ ok: false,
836
+ error: "chunk_too_large"
837
+ });
838
+ return;
839
+ }
840
+ chunks.push(buffer);
841
+ if (!rateBytesPerSecond) return;
842
+ const now = Date.now();
843
+ const clientKey = getClientKey(req);
844
+ const state = getClientRateState(clientKey, rateBytesPerSecond, now);
845
+ const waitMs = consumeRateBudget(state, buffer.length, rateBytesPerSecond, now);
846
+ if (waitMs > 0 && !paused) {
847
+ paused = true;
848
+ req.pause();
849
+ throttleTimeout = setTimeout(() => {
850
+ throttleTimeout = null;
851
+ paused = false;
852
+ if (done) return;
853
+ try {
854
+ req.resume();
855
+ } catch {
856
+ }
857
+ }, waitMs);
858
+ }
859
+ };
860
+ const onEnd = () => finish();
861
+ const onError = (err) => finish(err);
862
+ const onAborted = () => finish(new Error("request_aborted"));
863
+ req.on("data", onData);
864
+ req.on("end", onEnd);
865
+ req.on("error", onError);
866
+ req.on("aborted", onAborted);
867
+ };
868
+ };
869
+ const MAX_BURST_SECONDS = 1;
870
+ const STALE_CLIENT_MS = 15 * 60 * 1e3;
871
+ const clientRateStates = /* @__PURE__ */ new Map();
872
+ let lastCleanupMs = 0;
873
+ const getClientKey = (req) => {
874
+ const rawClientIp = typeof req?.clientIp === "string" ? req.clientIp : "";
875
+ if (rawClientIp.trim()) return rawClientIp.trim();
876
+ const rawIp = typeof req?.ip === "string" ? req.ip : "";
877
+ return rawIp.trim() || "unknown";
878
+ };
879
+ const maybeCleanupStates = (now) => {
880
+ if (now - lastCleanupMs < 6e4) return;
881
+ lastCleanupMs = now;
882
+ if (clientRateStates.size < 2e3) return;
883
+ for (const [key, state] of clientRateStates) {
884
+ if (now - state.lastSeenMs > STALE_CLIENT_MS) {
885
+ clientRateStates.delete(key);
886
+ }
887
+ }
888
+ };
889
+ const getClientRateState = (key, rateBytesPerSecond, now) => {
890
+ maybeCleanupStates(now);
891
+ const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
892
+ const existing = clientRateStates.get(key);
893
+ if (existing) {
894
+ existing.lastSeenMs = now;
895
+ existing.tokens = Math.min(capacity, existing.tokens);
896
+ return existing;
897
+ }
898
+ const next = {
899
+ tokens: capacity,
900
+ lastRefillMs: now,
901
+ lastSeenMs: now
902
+ };
903
+ clientRateStates.set(key, next);
904
+ return next;
905
+ };
906
+ const consumeRateBudget = (state, bytes, rateBytesPerSecond, now) => {
907
+ const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
908
+ const elapsedMs = Math.max(0, now - state.lastRefillMs);
909
+ if (elapsedMs > 0) {
910
+ state.tokens = Math.min(capacity, state.tokens + elapsedMs * rateBytesPerSecond / 1e3);
911
+ state.lastRefillMs = now;
912
+ }
913
+ state.tokens -= bytes;
914
+ if (state.tokens >= 0) return 0;
915
+ return Math.ceil(-state.tokens / rateBytesPerSecond * 1e3);
916
+ };
917
+ const handler = (api) => {
918
+ const chunkSizeBytes = getChunkSizeBytes();
919
+ api.use(InitRoute, rawBodyParser({
920
+ limitBytes: getRawBodyLimitBytes(chunkSizeBytes),
921
+ maxClientBytesPerSecond: getMaxClientUploadBytesPerSecond()
922
+ }));
923
+ api.post(InitRoute, initUpload);
924
+ api.put(ChunkRoute, uploadChunk);
925
+ api.get(StatusRoute, getStatus);
926
+ api.post(CompleteRoute, completeUpload);
927
+ };
928
+ export {
929
+ handler as default
930
+ };
931
+ //# sourceMappingURL=handler-ClQF4MOn.js.map