@rpcbase/server 0.524.0 → 0.526.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/dist/email-Dzauaq11.js +12449 -0
  2. package/dist/email-Dzauaq11.js.map +1 -0
  3. package/dist/handler-BLwgdQv-.js +544 -0
  4. package/dist/handler-BLwgdQv-.js.map +1 -0
  5. package/dist/handler-Cq-OJ0Rf.js +182 -0
  6. package/dist/handler-Cq-OJ0Rf.js.map +1 -0
  7. package/dist/handler-Cq6MsoD4.js +124 -0
  8. package/dist/handler-Cq6MsoD4.js.map +1 -0
  9. package/dist/handler-Cx_ZP_NB.js +749 -0
  10. package/dist/handler-Cx_ZP_NB.js.map +1 -0
  11. package/dist/index.js +4783 -4935
  12. package/dist/index.js.map +1 -1
  13. package/dist/notifications.js +134 -199
  14. package/dist/notifications.js.map +1 -1
  15. package/dist/queryExecutor-DYVlCvns.js +295 -0
  16. package/dist/queryExecutor-DYVlCvns.js.map +1 -0
  17. package/dist/render_resend-CQb8_8G7.js +7 -0
  18. package/dist/render_resend-CQb8_8G7.js.map +1 -0
  19. package/dist/rts/index.js +581 -725
  20. package/dist/rts/index.js.map +1 -1
  21. package/dist/schemas-BR3K5Luo.js +3824 -0
  22. package/dist/schemas-BR3K5Luo.js.map +1 -0
  23. package/dist/shared-BfMSZm2P.js +87 -0
  24. package/dist/shared-BfMSZm2P.js.map +1 -0
  25. package/dist/ssrMiddleware.d.ts +1 -1
  26. package/dist/uploads.js +71 -84
  27. package/dist/uploads.js.map +1 -1
  28. package/package.json +9 -9
  29. package/dist/email-DK8uUU4X.js +0 -8045
  30. package/dist/email-DK8uUU4X.js.map +0 -1
  31. package/dist/handler-0rPClEv4.js +0 -663
  32. package/dist/handler-0rPClEv4.js.map +0 -1
  33. package/dist/handler-3uwH4f67.js +0 -924
  34. package/dist/handler-3uwH4f67.js.map +0 -1
  35. package/dist/handler-BsauvgjA.js +0 -153
  36. package/dist/handler-BsauvgjA.js.map +0 -1
  37. package/dist/handler-DnSJAQ_B.js +0 -203
  38. package/dist/handler-DnSJAQ_B.js.map +0 -1
  39. package/dist/queryExecutor-Bg1GGL3j.js +0 -407
  40. package/dist/queryExecutor-Bg1GGL3j.js.map +0 -1
  41. package/dist/render_resend_false-MiC__Smr.js +0 -6
  42. package/dist/render_resend_false-MiC__Smr.js.map +0 -1
  43. package/dist/schemas-Cjdjgehl.js +0 -4225
  44. package/dist/schemas-Cjdjgehl.js.map +0 -1
  45. package/dist/shared-BqZiSOmf.js +0 -111
  46. package/dist/shared-BqZiSOmf.js.map +0 -1
@@ -1,924 +0,0 @@
1
- import { models, getTenantFilesystemDb } from "@rpcbase/db";
2
- import { GridFSBucket, ObjectId } from "mongodb";
3
- import { runUploadPostProcessors } from "./uploads.js";
4
- import { JSDOM } from "jsdom";
5
- import createDOMPurify from "dompurify";
6
- import { g as getTenantId, b as buildUploadsAbility, a as getModelCtx, c as getUploadSessionAccessQuery, e as ensureUploadIndexes, d as getBucketName, f as getUserId, h as getChunkSizeBytes, i as getSessionTtlMs, j as computeSha256Hex, t as toBufferPayload, n as normalizeSha256Hex, k as getMaxClientUploadBytesPerSecond, l as getRawBodyLimitBytes } from "./shared-BqZiSOmf.js";
7
- import { randomBytes } from "node:crypto";
8
- import { o as object, n as number, b as boolean, s as string, a as array, _ as _enum } from "./schemas-Cjdjgehl.js";
9
- const MAX_SVG_BYTES = 128 * 1024;
10
- const window = new JSDOM("").window;
11
- const DOMPurify = createDOMPurify(window);
12
- const normalizeForSniff = (raw) => raw.replace(/^\uFEFF/, "").trimStart();
13
- const looksLikeSvgText = (text) => {
14
- const normalized = normalizeForSniff(text);
15
- if (!normalized.startsWith("<")) return false;
16
- return /<svg(?:\s|>)/i.test(normalized);
17
- };
18
- const looksLikeSvg = (sniff) => looksLikeSvgText(sniff.toString("utf8"));
19
- const sanitizeSvg = (svg) => DOMPurify.sanitize(svg, {
20
- USE_PROFILES: {
21
- svg: true,
22
- svgFilters: true
23
- }
24
- });
25
- const sanitizeSvgProcessor = {
26
- id: "sanitize-svg",
27
- maxBytes: MAX_SVG_BYTES,
28
- match: ({
29
- sniff
30
- }) => looksLikeSvg(sniff),
31
- process: (data) => {
32
- if (data.length > MAX_SVG_BYTES) {
33
- throw new Error("svg_too_large");
34
- }
35
- const svgText = data.toString("utf8");
36
- if (!looksLikeSvgText(svgText)) {
37
- throw new Error("svg_invalid");
38
- }
39
- const sanitized = sanitizeSvg(svgText);
40
- if (!sanitized.trim() || !looksLikeSvgText(sanitized)) {
41
- throw new Error("svg_sanitize_failed");
42
- }
43
- const sanitizedBuffer = Buffer.from(sanitized, "utf8");
44
- if (sanitizedBuffer.length > MAX_SVG_BYTES) {
45
- throw new Error("svg_too_large");
46
- }
47
- return {
48
- data: sanitizedBuffer,
49
- mimeType: "image/svg+xml"
50
- };
51
- }
52
- };
53
- const uploadProcessors = Object.freeze([sanitizeSvgProcessor]);
54
- const getMaxUploadProcessorBytes = () => uploadProcessors.reduce((max, processor) => Math.max(max, processor.maxBytes), 0);
55
- const selectUploadProcessors = (ctx) => uploadProcessors.filter((processor) => processor.match(ctx));
56
- const applyUploadProcessors = async (data, ctx) => {
57
- let currentData = data;
58
- let currentMimeType = ctx.clientMimeType;
59
- const applied = [];
60
- for (const processor of uploadProcessors) {
61
- const processorCtx = {
62
- filename: ctx.filename,
63
- clientMimeType: currentMimeType,
64
- totalSize: currentData.length,
65
- sniff: currentData
66
- };
67
- if (!processor.match(processorCtx)) continue;
68
- if (currentData.length > processor.maxBytes) {
69
- throw new Error("processor_input_too_large");
70
- }
71
- const result = await processor.process(currentData, processorCtx);
72
- currentData = result.data;
73
- if (typeof result.mimeType === "string" && result.mimeType.trim()) {
74
- currentMimeType = result.mimeType.trim();
75
- }
76
- applied.push(processor.id);
77
- }
78
- return {
79
- data: currentData,
80
- mimeType: currentMimeType,
81
- applied
82
- };
83
- };
84
- const waitForStreamFinished = async (stream) => new Promise((resolve, reject) => {
85
- stream.once("finish", resolve);
86
- stream.once("error", reject);
87
- });
88
- const writeToStream = async (stream, chunk) => {
89
- const ok = stream.write(chunk);
90
- if (ok) return;
91
- await new Promise((resolve, reject) => {
92
- const onDrain = () => {
93
- cleanup();
94
- resolve();
95
- };
96
- const onError = (error) => {
97
- cleanup();
98
- reject(error);
99
- };
100
- const cleanup = () => {
101
- stream.off("drain", onDrain);
102
- stream.off("error", onError);
103
- };
104
- stream.on("drain", onDrain);
105
- stream.on("error", onError);
106
- });
107
- };
108
- const abortUploadStream = async (stream) => {
109
- if (!stream) return;
110
- if (typeof stream.abort === "function") {
111
- try {
112
- await stream.abort();
113
- return;
114
- } catch {
115
- }
116
- }
117
- try {
118
- ;
119
- stream.destroy?.();
120
- } catch {
121
- }
122
- };
123
- const completeUpload = async (_payload, ctx) => {
124
- const tenantId = getTenantId(ctx);
125
- if (!tenantId) {
126
- ctx.res.status(400);
127
- return {
128
- ok: false,
129
- error: "tenant_missing"
130
- };
131
- }
132
- const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
133
- if (!uploadId) {
134
- ctx.res.status(400);
135
- return {
136
- ok: false,
137
- error: "invalid_upload_id"
138
- };
139
- }
140
- const ability = buildUploadsAbility(ctx, tenantId);
141
- const modelCtx = getModelCtx(ctx, tenantId, ability);
142
- const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
143
- if (!ability.can("update", "RBUploadSession")) {
144
- ctx.res.status(401);
145
- return {
146
- ok: false,
147
- error: "unauthorized"
148
- };
149
- }
150
- const existing = await UploadSession.findOne({
151
- $and: [{
152
- _id: uploadId
153
- }, getUploadSessionAccessQuery(ability, "read")]
154
- }).lean();
155
- if (!existing) {
156
- ctx.res.status(404);
157
- return {
158
- ok: false,
159
- error: "not_found"
160
- };
161
- }
162
- if (existing.status === "done" && existing.fileId) {
163
- return {
164
- ok: true,
165
- fileId: existing.fileId
166
- };
167
- }
168
- const locked = await UploadSession.findOneAndUpdate({
169
- $and: [{
170
- _id: uploadId
171
- }, {
172
- status: "uploading"
173
- }, getUploadSessionAccessQuery(ability, "update")]
174
- }, {
175
- $set: {
176
- status: "assembling"
177
- },
178
- $unset: {
179
- error: ""
180
- }
181
- }, {
182
- returnDocument: "after"
183
- }).lean();
184
- if (!locked) {
185
- ctx.res.status(409);
186
- return {
187
- ok: false,
188
- error: "not_uploading"
189
- };
190
- }
191
- await ensureUploadIndexes(UploadSession, UploadChunk);
192
- const fsDb = await getTenantFilesystemDb(tenantId);
193
- const nativeDb = fsDb.db;
194
- if (!nativeDb) {
195
- await UploadSession.updateOne({
196
- $and: [{
197
- _id: uploadId
198
- }, getUploadSessionAccessQuery(ability, "update")]
199
- }, {
200
- $set: {
201
- status: "error",
202
- error: "filesystem_db_unavailable"
203
- }
204
- });
205
- ctx.res.status(500);
206
- return {
207
- ok: false,
208
- error: "assembly_failed"
209
- };
210
- }
211
- const bucketName = getBucketName();
212
- const bucket = new GridFSBucket(nativeDb, {
213
- bucketName
214
- });
215
- const lockedUserId = typeof locked.userId === "string" ? locked.userId : void 0;
216
- const maxProcessorBytes = getMaxUploadProcessorBytes();
217
- const shouldBufferForProcessing = locked.totalSize <= maxProcessorBytes;
218
- const declaredMimeType = locked.mimeType.trim().toLowerCase();
219
- const declaredSvg = declaredMimeType === "image/svg+xml" || locked.filename.trim().toLowerCase().endsWith(".svg");
220
- let uploadStream = null;
221
- let finalMimeType = locked.mimeType;
222
- let inlineProcessors = [];
223
- let finalMetadata = {
224
- uploadId,
225
- tenantId,
226
- mimeType: locked.mimeType,
227
- totalSize: locked.totalSize,
228
- ...typeof locked.isPublic === "boolean" ? {
229
- isPublic: locked.isPublic
230
- } : {},
231
- ...typeof locked.ownerKeyHash === "string" ? {
232
- ownerKeyHash: locked.ownerKeyHash
233
- } : {},
234
- ...lockedUserId ? {
235
- userId: lockedUserId
236
- } : {}
237
- };
238
- try {
239
- if (!shouldBufferForProcessing && declaredSvg) {
240
- throw new Error("svg_too_large");
241
- }
242
- const cursor = UploadChunk.find({
243
- uploadId
244
- }).sort({
245
- index: 1
246
- }).cursor();
247
- let expectedIndex = 0;
248
- const chunks = [];
249
- let bufferedBytes = 0;
250
- const pendingChunks = [];
251
- const sniffParts = [];
252
- let sniffBytes = 0;
253
- try {
254
- for await (const chunkDoc of cursor) {
255
- if (chunkDoc.index !== expectedIndex) {
256
- throw new Error("missing_chunks");
257
- }
258
- const chunk = chunkDoc.data;
259
- if (shouldBufferForProcessing) {
260
- chunks.push(chunk);
261
- bufferedBytes += chunk.length;
262
- } else if (!uploadStream) {
263
- pendingChunks.push(chunk);
264
- if (sniffBytes < maxProcessorBytes) {
265
- const slice = chunk.subarray(0, Math.min(chunk.length, maxProcessorBytes - sniffBytes));
266
- if (slice.length) {
267
- sniffParts.push(slice);
268
- sniffBytes += slice.length;
269
- }
270
- }
271
- if (sniffBytes >= maxProcessorBytes) {
272
- const sniff = Buffer.concat(sniffParts, sniffBytes);
273
- const processors = selectUploadProcessors({
274
- filename: locked.filename,
275
- clientMimeType: locked.mimeType,
276
- totalSize: locked.totalSize,
277
- sniff
278
- });
279
- if (processors.length) {
280
- throw new Error("svg_too_large");
281
- }
282
- finalMetadata = {
283
- uploadId,
284
- tenantId,
285
- mimeType: locked.mimeType,
286
- totalSize: locked.totalSize,
287
- ...typeof locked.isPublic === "boolean" ? {
288
- isPublic: locked.isPublic
289
- } : {},
290
- ...typeof locked.ownerKeyHash === "string" ? {
291
- ownerKeyHash: locked.ownerKeyHash
292
- } : {},
293
- ...lockedUserId ? {
294
- userId: lockedUserId
295
- } : {}
296
- };
297
- uploadStream = bucket.openUploadStream(locked.filename, {
298
- metadata: finalMetadata
299
- });
300
- for (const pending of pendingChunks) {
301
- await writeToStream(uploadStream, pending);
302
- }
303
- pendingChunks.length = 0;
304
- }
305
- } else {
306
- await writeToStream(uploadStream, chunk);
307
- }
308
- expectedIndex += 1;
309
- }
310
- } finally {
311
- try {
312
- await cursor.close();
313
- } catch {
314
- }
315
- }
316
- if (expectedIndex !== locked.chunksTotal) {
317
- throw new Error("missing_chunks");
318
- }
319
- if (shouldBufferForProcessing) {
320
- const assembled = Buffer.concat(chunks, bufferedBytes);
321
- const {
322
- data: processed,
323
- mimeType: processedMimeType,
324
- applied
325
- } = await applyUploadProcessors(assembled, {
326
- filename: locked.filename,
327
- clientMimeType: locked.mimeType
328
- });
329
- finalMimeType = processedMimeType;
330
- inlineProcessors = applied;
331
- finalMetadata = {
332
- uploadId,
333
- tenantId,
334
- mimeType: processedMimeType,
335
- totalSize: locked.totalSize,
336
- ...applied.length ? {
337
- processors: applied,
338
- processedSize: processed.length
339
- } : {},
340
- ...typeof locked.isPublic === "boolean" ? {
341
- isPublic: locked.isPublic
342
- } : {},
343
- ...typeof locked.ownerKeyHash === "string" ? {
344
- ownerKeyHash: locked.ownerKeyHash
345
- } : {},
346
- ...lockedUserId ? {
347
- userId: lockedUserId
348
- } : {}
349
- };
350
- uploadStream = bucket.openUploadStream(locked.filename, {
351
- metadata: finalMetadata
352
- });
353
- const finished = waitForStreamFinished(uploadStream);
354
- uploadStream.end(processed);
355
- await finished;
356
- } else {
357
- if (!uploadStream) {
358
- const sniff = Buffer.concat(sniffParts, sniffBytes);
359
- const processors = selectUploadProcessors({
360
- filename: locked.filename,
361
- clientMimeType: locked.mimeType,
362
- totalSize: locked.totalSize,
363
- sniff
364
- });
365
- if (processors.length) {
366
- throw new Error("svg_too_large");
367
- }
368
- finalMetadata = {
369
- uploadId,
370
- tenantId,
371
- mimeType: locked.mimeType,
372
- totalSize: locked.totalSize,
373
- ...typeof locked.isPublic === "boolean" ? {
374
- isPublic: locked.isPublic
375
- } : {},
376
- ...typeof locked.ownerKeyHash === "string" ? {
377
- ownerKeyHash: locked.ownerKeyHash
378
- } : {},
379
- ...lockedUserId ? {
380
- userId: lockedUserId
381
- } : {}
382
- };
383
- uploadStream = bucket.openUploadStream(locked.filename, {
384
- metadata: finalMetadata
385
- });
386
- for (const pending of pendingChunks) {
387
- await writeToStream(uploadStream, pending);
388
- }
389
- pendingChunks.length = 0;
390
- }
391
- const finished = waitForStreamFinished(uploadStream);
392
- uploadStream.end();
393
- await finished;
394
- }
395
- const fileId = String(uploadStream.id ?? "");
396
- if (!fileId) {
397
- throw new Error("missing_file_id");
398
- }
399
- await UploadSession.updateOne({
400
- $and: [{
401
- _id: uploadId
402
- }, getUploadSessionAccessQuery(ability, "update")]
403
- }, {
404
- $set: {
405
- status: "done",
406
- fileId
407
- },
408
- $unset: {
409
- error: ""
410
- }
411
- });
412
- await runUploadPostProcessors({
413
- tenantId,
414
- uploadId,
415
- fileId,
416
- filename: locked.filename,
417
- mimeType: finalMimeType,
418
- clientMimeType: locked.mimeType,
419
- totalSize: locked.totalSize,
420
- ...typeof locked.isPublic === "boolean" ? {
421
- isPublic: locked.isPublic
422
- } : {},
423
- ...typeof locked.ownerKeyHash === "string" ? {
424
- ownerKeyHash: locked.ownerKeyHash
425
- } : {},
426
- ...lockedUserId ? {
427
- userId: lockedUserId
428
- } : {},
429
- inlineProcessors,
430
- metadata: finalMetadata
431
- });
432
- try {
433
- await UploadChunk.deleteMany({
434
- uploadId
435
- });
436
- } catch {
437
- }
438
- return {
439
- ok: true,
440
- fileId
441
- };
442
- } catch (error) {
443
- const message = error instanceof Error ? error.message : String(error);
444
- await abortUploadStream(uploadStream);
445
- if (message === "missing_chunks") {
446
- await UploadSession.updateOne({
447
- $and: [{
448
- _id: uploadId
449
- }, getUploadSessionAccessQuery(ability, "update")]
450
- }, {
451
- $set: {
452
- status: "uploading"
453
- }
454
- });
455
- ctx.res.status(409);
456
- return {
457
- ok: false,
458
- error: "missing_chunks"
459
- };
460
- }
461
- if (message === "svg_too_large") {
462
- await UploadSession.updateOne({
463
- $and: [{
464
- _id: uploadId
465
- }, getUploadSessionAccessQuery(ability, "update")]
466
- }, {
467
- $set: {
468
- status: "error",
469
- error: message
470
- }
471
- });
472
- ctx.res.status(413);
473
- return {
474
- ok: false,
475
- error: message
476
- };
477
- }
478
- if (message === "svg_invalid" || message === "svg_sanitize_failed") {
479
- await UploadSession.updateOne({
480
- $and: [{
481
- _id: uploadId
482
- }, getUploadSessionAccessQuery(ability, "update")]
483
- }, {
484
- $set: {
485
- status: "error",
486
- error: message
487
- }
488
- });
489
- ctx.res.status(400);
490
- return {
491
- ok: false,
492
- error: message
493
- };
494
- }
495
- await UploadSession.updateOne({
496
- $and: [{
497
- _id: uploadId
498
- }, getUploadSessionAccessQuery(ability, "update")]
499
- }, {
500
- $set: {
501
- status: "error",
502
- error: message
503
- }
504
- });
505
- ctx.res.status(500);
506
- return {
507
- ok: false,
508
- error: "assembly_failed"
509
- };
510
- }
511
- };
512
- const getStatus = async (_payload, ctx) => {
513
- const tenantId = getTenantId(ctx);
514
- if (!tenantId) {
515
- ctx.res.status(400);
516
- return {
517
- ok: false,
518
- error: "tenant_missing"
519
- };
520
- }
521
- const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
522
- if (!uploadId) {
523
- ctx.res.status(400);
524
- return {
525
- ok: false,
526
- error: "invalid_upload_id"
527
- };
528
- }
529
- const ability = buildUploadsAbility(ctx, tenantId);
530
- const modelCtx = getModelCtx(ctx, tenantId, ability);
531
- const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
532
- if (!ability.can("read", "RBUploadSession")) {
533
- ctx.res.status(401);
534
- return {
535
- ok: false,
536
- error: "unauthorized"
537
- };
538
- }
539
- const session = await UploadSession.findOne({
540
- $and: [{
541
- _id: uploadId
542
- }, getUploadSessionAccessQuery(ability, "read")]
543
- }).lean();
544
- if (!session) {
545
- ctx.res.status(404);
546
- return {
547
- ok: false,
548
- error: "not_found"
549
- };
550
- }
551
- const receivedDocs = await UploadChunk.find({
552
- uploadId
553
- }, {
554
- index: 1,
555
- _id: 0
556
- }).sort({
557
- index: 1
558
- }).lean();
559
- const received = receivedDocs.map((doc) => typeof doc.index === "number" ? doc.index : -1).filter((n) => Number.isInteger(n) && n >= 0);
560
- return {
561
- ok: true,
562
- status: session.status,
563
- chunkSize: session.chunkSize,
564
- chunksTotal: session.chunksTotal,
565
- received,
566
- ...session.fileId ? {
567
- fileId: session.fileId
568
- } : {}
569
- };
570
- };
571
- const InitRoute = "/api/rb/file-uploads";
572
- const ChunkRoute = "/api/rb/file-uploads/:uploadId/chunks/:index";
573
- const StatusRoute = "/api/rb/file-uploads/:uploadId/status";
574
- const CompleteRoute = "/api/rb/file-uploads/:uploadId/complete";
575
- const initRequestSchema = object({
576
- filename: string().min(1),
577
- mimeType: string().min(1),
578
- isPublic: boolean().optional(),
579
- totalSize: number().int().min(1)
580
- });
581
- object({
582
- ok: boolean(),
583
- error: string().optional(),
584
- uploadId: string().optional(),
585
- uploadKey: string().optional(),
586
- chunkSize: number().int().optional(),
587
- chunksTotal: number().int().optional()
588
- });
589
- object({
590
- ok: boolean(),
591
- error: string().optional(),
592
- status: _enum(["uploading", "assembling", "done", "error"]).optional(),
593
- chunkSize: number().int().optional(),
594
- chunksTotal: number().int().optional(),
595
- received: array(number().int().min(0)).optional(),
596
- fileId: string().optional()
597
- });
598
- object({
599
- ok: boolean(),
600
- error: string().optional(),
601
- fileId: string().optional()
602
- });
603
- const initUpload = async (payload, ctx) => {
604
- const tenantId = getTenantId(ctx);
605
- if (!tenantId) {
606
- ctx.res.status(400);
607
- return {
608
- ok: false,
609
- error: "tenant_missing"
610
- };
611
- }
612
- const userId = getUserId(ctx);
613
- const parsed = initRequestSchema.safeParse(payload ?? {});
614
- if (!parsed.success) {
615
- ctx.res.status(400);
616
- return {
617
- ok: false,
618
- error: "invalid_payload"
619
- };
620
- }
621
- const chunkSize = getChunkSizeBytes();
622
- const {
623
- filename,
624
- mimeType,
625
- totalSize,
626
- isPublic
627
- } = parsed.data;
628
- const chunksTotal = Math.ceil(totalSize / chunkSize);
629
- const ability = buildUploadsAbility(ctx, tenantId);
630
- const modelCtx = getModelCtx(ctx, tenantId, ability);
631
- const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
632
- await ensureUploadIndexes(UploadSession, UploadChunk);
633
- const uploadId = new ObjectId().toString();
634
- const now = Date.now();
635
- const expiresAt = new Date(now + getSessionTtlMs());
636
- const uploadKey = userId ? null : randomBytes(32).toString("base64url");
637
- const ownerKeyHash = uploadKey ? computeSha256Hex(Buffer.from(uploadKey)) : void 0;
638
- await UploadSession.create({
639
- _id: uploadId,
640
- ...userId ? {
641
- userId
642
- } : {},
643
- ...ownerKeyHash ? {
644
- ownerKeyHash
645
- } : {},
646
- filename,
647
- mimeType,
648
- ...typeof isPublic === "boolean" ? {
649
- isPublic
650
- } : {},
651
- totalSize,
652
- chunkSize,
653
- chunksTotal,
654
- status: "uploading",
655
- createdAt: new Date(now),
656
- expiresAt
657
- });
658
- return {
659
- ok: true,
660
- uploadId,
661
- chunkSize,
662
- chunksTotal,
663
- ...uploadKey ? {
664
- uploadKey
665
- } : {}
666
- };
667
- };
668
- const uploadChunk = async (payload, ctx) => {
669
- const tenantId = getTenantId(ctx);
670
- if (!tenantId) {
671
- ctx.res.status(400);
672
- return {
673
- ok: false,
674
- error: "tenant_missing"
675
- };
676
- }
677
- const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
678
- const indexRaw = String(ctx.req.params?.index ?? "").trim();
679
- const index = Number(indexRaw);
680
- if (!uploadId || !Number.isInteger(index) || index < 0) {
681
- ctx.res.status(400);
682
- return {
683
- ok: false,
684
- error: "invalid_chunk_ref"
685
- };
686
- }
687
- const ability = buildUploadsAbility(ctx, tenantId);
688
- const modelCtx = getModelCtx(ctx, tenantId, ability);
689
- const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
690
- if (!ability.can("update", "RBUploadSession")) {
691
- ctx.res.status(401);
692
- return {
693
- ok: false,
694
- error: "unauthorized"
695
- };
696
- }
697
- const session = await UploadSession.findOne({
698
- $and: [{
699
- _id: uploadId
700
- }, getUploadSessionAccessQuery(ability, "update")]
701
- }).lean();
702
- if (!session) {
703
- ctx.res.status(404);
704
- return {
705
- ok: false,
706
- error: "not_found"
707
- };
708
- }
709
- if (session.status !== "uploading") {
710
- ctx.res.status(409);
711
- return {
712
- ok: false,
713
- error: "not_uploading"
714
- };
715
- }
716
- if (index >= session.chunksTotal) {
717
- ctx.res.status(400);
718
- return {
719
- ok: false,
720
- error: "index_out_of_range"
721
- };
722
- }
723
- const data = toBufferPayload(payload);
724
- if (!data) {
725
- ctx.res.status(400);
726
- return {
727
- ok: false,
728
- error: "invalid_body"
729
- };
730
- }
731
- const expectedSize = index === session.chunksTotal - 1 ? session.totalSize - session.chunkSize * (session.chunksTotal - 1) : session.chunkSize;
732
- if (data.length > expectedSize) {
733
- ctx.res.status(413);
734
- return {
735
- ok: false,
736
- error: "chunk_too_large"
737
- };
738
- }
739
- if (data.length !== expectedSize) {
740
- ctx.res.status(400);
741
- return {
742
- ok: false,
743
- error: "invalid_chunk_size"
744
- };
745
- }
746
- const checksumHeader = ctx.req.get("X-Chunk-SHA256");
747
- const sha256 = checksumHeader ? computeSha256Hex(data) : void 0;
748
- if (checksumHeader) {
749
- const expectedSha256 = normalizeSha256Hex(checksumHeader);
750
- if (sha256 !== expectedSha256) {
751
- ctx.res.status(400);
752
- return {
753
- ok: false,
754
- error: "checksum_mismatch"
755
- };
756
- }
757
- }
758
- await ensureUploadIndexes(UploadSession, UploadChunk);
759
- await UploadChunk.updateOne({
760
- uploadId,
761
- index
762
- }, {
763
- $set: {
764
- uploadId,
765
- index,
766
- data,
767
- size: data.length,
768
- sha256,
769
- expiresAt: session.expiresAt
770
- },
771
- $setOnInsert: {
772
- createdAt: /* @__PURE__ */ new Date()
773
- }
774
- }, {
775
- upsert: true
776
- });
777
- ctx.res.status(204);
778
- return {
779
- ok: true
780
- };
781
- };
782
- const rawBodyParser = ({
783
- limitBytes,
784
- maxClientBytesPerSecond
785
- }) => {
786
- return (req, res, next) => {
787
- const contentType = typeof req?.headers?.["content-type"] === "string" ? String(req.headers["content-type"]) : "";
788
- if (!contentType.includes("application/octet-stream")) {
789
- next();
790
- return;
791
- }
792
- let total = 0;
793
- const chunks = [];
794
- let done = false;
795
- let paused = false;
796
- let throttleTimeout = null;
797
- const rateBytesPerSecond = typeof maxClientBytesPerSecond === "number" && maxClientBytesPerSecond > 0 ? maxClientBytesPerSecond : null;
798
- const cleanup = () => {
799
- req.off("data", onData);
800
- req.off("end", onEnd);
801
- req.off("error", onError);
802
- req.off("aborted", onAborted);
803
- if (throttleTimeout) {
804
- clearTimeout(throttleTimeout);
805
- throttleTimeout = null;
806
- }
807
- };
808
- const finish = (error) => {
809
- if (done) return;
810
- done = true;
811
- cleanup();
812
- if (error) {
813
- next(error);
814
- return;
815
- }
816
- req.body = Buffer.concat(chunks, total);
817
- next();
818
- };
819
- const onData = (chunk) => {
820
- if (done) return;
821
- const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
822
- total += buffer.length;
823
- if (total > limitBytes) {
824
- done = true;
825
- cleanup();
826
- req.destroy();
827
- res.status(413).json({
828
- ok: false,
829
- error: "chunk_too_large"
830
- });
831
- return;
832
- }
833
- chunks.push(buffer);
834
- if (!rateBytesPerSecond) return;
835
- const now = Date.now();
836
- const clientKey = getClientKey(req);
837
- const state = getClientRateState(clientKey, rateBytesPerSecond, now);
838
- const waitMs = consumeRateBudget(state, buffer.length, rateBytesPerSecond, now);
839
- if (waitMs > 0 && !paused) {
840
- paused = true;
841
- req.pause();
842
- throttleTimeout = setTimeout(() => {
843
- throttleTimeout = null;
844
- paused = false;
845
- if (done) return;
846
- try {
847
- req.resume();
848
- } catch {
849
- }
850
- }, waitMs);
851
- }
852
- };
853
- const onEnd = () => finish();
854
- const onError = (err) => finish(err);
855
- const onAborted = () => finish(new Error("request_aborted"));
856
- req.on("data", onData);
857
- req.on("end", onEnd);
858
- req.on("error", onError);
859
- req.on("aborted", onAborted);
860
- };
861
- };
862
- const MAX_BURST_SECONDS = 1;
863
- const STALE_CLIENT_MS = 15 * 60 * 1e3;
864
- const clientRateStates = /* @__PURE__ */ new Map();
865
- let lastCleanupMs = 0;
866
- const getClientKey = (req) => {
867
- const rawClientIp = typeof req?.clientIp === "string" ? req.clientIp : "";
868
- if (rawClientIp.trim()) return rawClientIp.trim();
869
- const rawIp = typeof req?.ip === "string" ? req.ip : "";
870
- return rawIp.trim() || "unknown";
871
- };
872
- const maybeCleanupStates = (now) => {
873
- if (now - lastCleanupMs < 6e4) return;
874
- lastCleanupMs = now;
875
- if (clientRateStates.size < 2e3) return;
876
- for (const [key, state] of clientRateStates) {
877
- if (now - state.lastSeenMs > STALE_CLIENT_MS) {
878
- clientRateStates.delete(key);
879
- }
880
- }
881
- };
882
- const getClientRateState = (key, rateBytesPerSecond, now) => {
883
- maybeCleanupStates(now);
884
- const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
885
- const existing = clientRateStates.get(key);
886
- if (existing) {
887
- existing.lastSeenMs = now;
888
- existing.tokens = Math.min(capacity, existing.tokens);
889
- return existing;
890
- }
891
- const next = {
892
- tokens: capacity,
893
- lastRefillMs: now,
894
- lastSeenMs: now
895
- };
896
- clientRateStates.set(key, next);
897
- return next;
898
- };
899
- const consumeRateBudget = (state, bytes, rateBytesPerSecond, now) => {
900
- const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
901
- const elapsedMs = Math.max(0, now - state.lastRefillMs);
902
- if (elapsedMs > 0) {
903
- state.tokens = Math.min(capacity, state.tokens + elapsedMs * rateBytesPerSecond / 1e3);
904
- state.lastRefillMs = now;
905
- }
906
- state.tokens -= bytes;
907
- if (state.tokens >= 0) return 0;
908
- return Math.ceil(-state.tokens / rateBytesPerSecond * 1e3);
909
- };
910
- const handler = (api) => {
911
- const chunkSizeBytes = getChunkSizeBytes();
912
- api.use(InitRoute, rawBodyParser({
913
- limitBytes: getRawBodyLimitBytes(chunkSizeBytes),
914
- maxClientBytesPerSecond: getMaxClientUploadBytesPerSecond()
915
- }));
916
- api.post(InitRoute, initUpload);
917
- api.put(ChunkRoute, uploadChunk);
918
- api.get(StatusRoute, getStatus);
919
- api.post(CompleteRoute, completeUpload);
920
- };
921
- export {
922
- handler as default
923
- };
924
- //# sourceMappingURL=handler-3uwH4f67.js.map