@rpcbase/server 0.538.0 → 0.539.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/email-DK8uUU4X.js +8045 -0
- package/dist/email-DK8uUU4X.js.map +1 -0
- package/dist/handler--FFBJMl6.js +153 -0
- package/dist/handler--FFBJMl6.js.map +1 -0
- package/dist/handler-0rPClEv4.js +663 -0
- package/dist/handler-0rPClEv4.js.map +1 -0
- package/dist/handler-COnCnprN.js +203 -0
- package/dist/handler-COnCnprN.js.map +1 -0
- package/dist/handler-ClQF4MOn.js +931 -0
- package/dist/handler-ClQF4MOn.js.map +1 -0
- package/dist/index.js +4988 -4830
- package/dist/index.js.map +1 -1
- package/dist/notifications.js +199 -134
- package/dist/notifications.js.map +1 -1
- package/dist/queryExecutor-Bol_iR8f.js +453 -0
- package/dist/queryExecutor-Bol_iR8f.js.map +1 -0
- package/dist/render_resend_false-MiC__Smr.js +6 -0
- package/dist/render_resend_false-MiC__Smr.js.map +1 -0
- package/dist/rts/index.d.ts +0 -1
- package/dist/rts/index.d.ts.map +1 -1
- package/dist/rts/index.js +1003 -842
- package/dist/rts/index.js.map +1 -1
- package/dist/schemas-Cjdjgehl.js +4225 -0
- package/dist/schemas-Cjdjgehl.js.map +1 -0
- package/dist/shared-nE84Or5W.js +111 -0
- package/dist/shared-nE84Or5W.js.map +1 -0
- package/dist/ssrMiddleware.d.ts +1 -1
- package/dist/uploads.js +99 -84
- package/dist/uploads.js.map +1 -1
- package/package.json +9 -9
- package/dist/email-H8nTAGxe.js +0 -12449
- package/dist/email-H8nTAGxe.js.map +0 -1
- package/dist/handler-BBzEodA0.js +0 -182
- package/dist/handler-BBzEodA0.js.map +0 -1
- package/dist/handler-BLwgdQv-.js +0 -544
- package/dist/handler-BLwgdQv-.js.map +0 -1
- package/dist/handler-CZD5p1Jv.js +0 -28
- package/dist/handler-CZD5p1Jv.js.map +0 -1
- package/dist/handler-Cq6MsoD4.js +0 -124
- package/dist/handler-Cq6MsoD4.js.map +0 -1
- package/dist/handler-DBtnVvP2.js +0 -756
- package/dist/handler-DBtnVvP2.js.map +0 -1
- package/dist/queryExecutor-JadZcQSQ.js +0 -318
- package/dist/queryExecutor-JadZcQSQ.js.map +0 -1
- package/dist/render_resend-DQANggpW.js +0 -7
- package/dist/render_resend-DQANggpW.js.map +0 -1
- package/dist/rts/api/cleanup/handler.d.ts +0 -9
- package/dist/rts/api/cleanup/handler.d.ts.map +0 -1
- package/dist/rts/api/cleanup/index.d.ts +0 -11
- package/dist/rts/api/cleanup/index.d.ts.map +0 -1
- package/dist/schemas-BR3K5Luo.js +0 -3824
- package/dist/schemas-BR3K5Luo.js.map +0 -1
- package/dist/shared-DhZ_rDdo.js +0 -87
- package/dist/shared-DhZ_rDdo.js.map +0 -1
package/dist/handler-DBtnVvP2.js
DELETED
|
@@ -1,756 +0,0 @@
|
|
|
1
|
-
import { enqueueUploadPostProcessors } from "./uploads.js";
|
|
2
|
-
import { a as object, i as number, n as array, r as boolean, s as string, t as _enum } from "./schemas-BR3K5Luo.js";
|
|
3
|
-
import { a as getChunkSizeBytes, c as getRawBodyLimitBytes, f as getUploadSessionAccessQuery, h as toBufferPayload, i as getBucketName, l as getSessionTtlMs, m as normalizeSha256Hex, n as computeSha256Hex, o as getMaxClientUploadBytesPerSecond, p as getUserId, r as ensureUploadIndexes, s as getModelCtx, t as buildUploadsAbility, u as getTenantId } from "./shared-DhZ_rDdo.js";
|
|
4
|
-
import { GridFSBucket, ObjectId } from "mongodb";
|
|
5
|
-
import { getTenantFilesystemDb, models } from "@rpcbase/db";
|
|
6
|
-
import { randomBytes } from "node:crypto";
|
|
7
|
-
import { JSDOM } from "jsdom";
|
|
8
|
-
import createDOMPurify from "dompurify";
|
|
9
|
-
//#region src/uploads/api/file-uploads/processors/sanitizeSvg.ts
|
|
10
|
-
var MAX_SVG_BYTES = 128 * 1024;
|
|
11
|
-
var window = new JSDOM("").window;
|
|
12
|
-
var DOMPurify = createDOMPurify(window);
|
|
13
|
-
var normalizeForSniff = (raw) => raw.replace(/^\uFEFF/, "").trimStart();
|
|
14
|
-
var looksLikeSvgText = (text) => {
|
|
15
|
-
const normalized = normalizeForSniff(text);
|
|
16
|
-
if (!normalized.startsWith("<")) return false;
|
|
17
|
-
return /<svg(?:\s|>)/i.test(normalized);
|
|
18
|
-
};
|
|
19
|
-
var looksLikeSvg = (sniff) => looksLikeSvgText(sniff.toString("utf8"));
|
|
20
|
-
var sanitizeSvg = (svg) => DOMPurify.sanitize(svg, { USE_PROFILES: {
|
|
21
|
-
svg: true,
|
|
22
|
-
svgFilters: true
|
|
23
|
-
} });
|
|
24
|
-
//#endregion
|
|
25
|
-
//#region src/uploads/api/file-uploads/processors/index.ts
|
|
26
|
-
var uploadProcessors = Object.freeze([{
|
|
27
|
-
id: "sanitize-svg",
|
|
28
|
-
maxBytes: MAX_SVG_BYTES,
|
|
29
|
-
match: ({ sniff }) => looksLikeSvg(sniff),
|
|
30
|
-
process: (data) => {
|
|
31
|
-
if (data.length > MAX_SVG_BYTES) throw new Error("svg_too_large");
|
|
32
|
-
const svgText = data.toString("utf8");
|
|
33
|
-
if (!looksLikeSvgText(svgText)) throw new Error("svg_invalid");
|
|
34
|
-
const sanitized = sanitizeSvg(svgText);
|
|
35
|
-
if (!sanitized.trim() || !looksLikeSvgText(sanitized)) throw new Error("svg_sanitize_failed");
|
|
36
|
-
const sanitizedBuffer = Buffer.from(sanitized, "utf8");
|
|
37
|
-
if (sanitizedBuffer.length > MAX_SVG_BYTES) throw new Error("svg_too_large");
|
|
38
|
-
return {
|
|
39
|
-
data: sanitizedBuffer,
|
|
40
|
-
mimeType: "image/svg+xml"
|
|
41
|
-
};
|
|
42
|
-
}
|
|
43
|
-
}]);
|
|
44
|
-
var getMaxUploadProcessorBytes = () => uploadProcessors.reduce((max, processor) => Math.max(max, processor.maxBytes), 0);
|
|
45
|
-
var selectUploadProcessors = (ctx) => uploadProcessors.filter((processor) => processor.match(ctx));
|
|
46
|
-
var applyUploadProcessors = async (data, ctx) => {
|
|
47
|
-
let currentData = data;
|
|
48
|
-
let currentMimeType = ctx.clientMimeType;
|
|
49
|
-
const applied = [];
|
|
50
|
-
for (const processor of uploadProcessors) {
|
|
51
|
-
const processorCtx = {
|
|
52
|
-
filename: ctx.filename,
|
|
53
|
-
clientMimeType: currentMimeType,
|
|
54
|
-
totalSize: currentData.length,
|
|
55
|
-
sniff: currentData
|
|
56
|
-
};
|
|
57
|
-
if (!processor.match(processorCtx)) continue;
|
|
58
|
-
if (currentData.length > processor.maxBytes) throw new Error("processor_input_too_large");
|
|
59
|
-
const result = await processor.process(currentData, processorCtx);
|
|
60
|
-
currentData = result.data;
|
|
61
|
-
if (typeof result.mimeType === "string" && result.mimeType.trim()) currentMimeType = result.mimeType.trim();
|
|
62
|
-
applied.push(processor.id);
|
|
63
|
-
}
|
|
64
|
-
return {
|
|
65
|
-
data: currentData,
|
|
66
|
-
mimeType: currentMimeType,
|
|
67
|
-
applied
|
|
68
|
-
};
|
|
69
|
-
};
|
|
70
|
-
//#endregion
|
|
71
|
-
//#region src/uploads/api/file-uploads/handlers/completeUpload.ts
|
|
72
|
-
var waitForStreamFinished = async (stream) => new Promise((resolve, reject) => {
|
|
73
|
-
stream.once("finish", resolve);
|
|
74
|
-
stream.once("error", reject);
|
|
75
|
-
});
|
|
76
|
-
var writeToStream = async (stream, chunk) => {
|
|
77
|
-
if (stream.write(chunk)) return;
|
|
78
|
-
await new Promise((resolve, reject) => {
|
|
79
|
-
const onDrain = () => {
|
|
80
|
-
cleanup();
|
|
81
|
-
resolve();
|
|
82
|
-
};
|
|
83
|
-
const onError = (error) => {
|
|
84
|
-
cleanup();
|
|
85
|
-
reject(error);
|
|
86
|
-
};
|
|
87
|
-
const cleanup = () => {
|
|
88
|
-
stream.off("drain", onDrain);
|
|
89
|
-
stream.off("error", onError);
|
|
90
|
-
};
|
|
91
|
-
stream.on("drain", onDrain);
|
|
92
|
-
stream.on("error", onError);
|
|
93
|
-
});
|
|
94
|
-
};
|
|
95
|
-
var abortUploadStream = async (stream) => {
|
|
96
|
-
if (!stream) return;
|
|
97
|
-
if (typeof stream.abort === "function") try {
|
|
98
|
-
await stream.abort();
|
|
99
|
-
return;
|
|
100
|
-
} catch {}
|
|
101
|
-
try {
|
|
102
|
-
stream.destroy?.();
|
|
103
|
-
} catch {}
|
|
104
|
-
};
|
|
105
|
-
var completeUpload = async (_payload, ctx) => {
|
|
106
|
-
const tenantId = getTenantId(ctx);
|
|
107
|
-
if (!tenantId) {
|
|
108
|
-
ctx.res.status(400);
|
|
109
|
-
return {
|
|
110
|
-
ok: false,
|
|
111
|
-
error: "tenant_missing"
|
|
112
|
-
};
|
|
113
|
-
}
|
|
114
|
-
const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
|
|
115
|
-
if (!uploadId) {
|
|
116
|
-
ctx.res.status(400);
|
|
117
|
-
return {
|
|
118
|
-
ok: false,
|
|
119
|
-
error: "invalid_upload_id"
|
|
120
|
-
};
|
|
121
|
-
}
|
|
122
|
-
const ability = buildUploadsAbility(ctx, tenantId);
|
|
123
|
-
const modelCtx = getModelCtx(ctx, tenantId, ability);
|
|
124
|
-
const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
|
|
125
|
-
if (!ability.can("update", "RBUploadSession")) {
|
|
126
|
-
ctx.res.status(401);
|
|
127
|
-
return {
|
|
128
|
-
ok: false,
|
|
129
|
-
error: "unauthorized"
|
|
130
|
-
};
|
|
131
|
-
}
|
|
132
|
-
const existing = await UploadSession.findOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "read")] }).lean();
|
|
133
|
-
if (!existing) {
|
|
134
|
-
ctx.res.status(404);
|
|
135
|
-
return {
|
|
136
|
-
ok: false,
|
|
137
|
-
error: "not_found"
|
|
138
|
-
};
|
|
139
|
-
}
|
|
140
|
-
if (existing.status === "done" && existing.fileId) return {
|
|
141
|
-
ok: true,
|
|
142
|
-
fileId: existing.fileId
|
|
143
|
-
};
|
|
144
|
-
const locked = await UploadSession.findOneAndUpdate({ $and: [
|
|
145
|
-
{ _id: uploadId },
|
|
146
|
-
{ status: "uploading" },
|
|
147
|
-
getUploadSessionAccessQuery(ability, "update")
|
|
148
|
-
] }, {
|
|
149
|
-
$set: { status: "assembling" },
|
|
150
|
-
$unset: { error: "" }
|
|
151
|
-
}, { returnDocument: "after" }).lean();
|
|
152
|
-
if (!locked) {
|
|
153
|
-
ctx.res.status(409);
|
|
154
|
-
return {
|
|
155
|
-
ok: false,
|
|
156
|
-
error: "not_uploading"
|
|
157
|
-
};
|
|
158
|
-
}
|
|
159
|
-
await ensureUploadIndexes(UploadSession, UploadChunk);
|
|
160
|
-
const nativeDb = (await getTenantFilesystemDb(tenantId)).db;
|
|
161
|
-
if (!nativeDb) {
|
|
162
|
-
await UploadSession.updateOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }, { $set: {
|
|
163
|
-
status: "error",
|
|
164
|
-
error: "filesystem_db_unavailable"
|
|
165
|
-
} });
|
|
166
|
-
ctx.res.status(500);
|
|
167
|
-
return {
|
|
168
|
-
ok: false,
|
|
169
|
-
error: "assembly_failed"
|
|
170
|
-
};
|
|
171
|
-
}
|
|
172
|
-
const bucket = new GridFSBucket(nativeDb, { bucketName: getBucketName() });
|
|
173
|
-
const lockedUserId = typeof locked.userId === "string" ? locked.userId : void 0;
|
|
174
|
-
const maxProcessorBytes = getMaxUploadProcessorBytes();
|
|
175
|
-
const shouldBufferForProcessing = locked.totalSize <= maxProcessorBytes;
|
|
176
|
-
const declaredSvg = locked.mimeType.trim().toLowerCase() === "image/svg+xml" || locked.filename.trim().toLowerCase().endsWith(".svg");
|
|
177
|
-
let uploadStream = null;
|
|
178
|
-
let finalMimeType = locked.mimeType;
|
|
179
|
-
let inlineProcessors = [];
|
|
180
|
-
let finalMetadata = {
|
|
181
|
-
uploadId,
|
|
182
|
-
tenantId,
|
|
183
|
-
mimeType: locked.mimeType,
|
|
184
|
-
totalSize: locked.totalSize,
|
|
185
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
186
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
187
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
188
|
-
};
|
|
189
|
-
try {
|
|
190
|
-
if (!shouldBufferForProcessing && declaredSvg) throw new Error("svg_too_large");
|
|
191
|
-
const cursor = UploadChunk.find({ uploadId }).sort({ index: 1 }).cursor();
|
|
192
|
-
let expectedIndex = 0;
|
|
193
|
-
const chunks = [];
|
|
194
|
-
let bufferedBytes = 0;
|
|
195
|
-
const pendingChunks = [];
|
|
196
|
-
const sniffParts = [];
|
|
197
|
-
let sniffBytes = 0;
|
|
198
|
-
try {
|
|
199
|
-
for await (const chunkDoc of cursor) {
|
|
200
|
-
if (chunkDoc.index !== expectedIndex) throw new Error("missing_chunks");
|
|
201
|
-
const chunk = chunkDoc.data;
|
|
202
|
-
if (shouldBufferForProcessing) {
|
|
203
|
-
chunks.push(chunk);
|
|
204
|
-
bufferedBytes += chunk.length;
|
|
205
|
-
} else if (!uploadStream) {
|
|
206
|
-
pendingChunks.push(chunk);
|
|
207
|
-
if (sniffBytes < maxProcessorBytes) {
|
|
208
|
-
const slice = chunk.subarray(0, Math.min(chunk.length, maxProcessorBytes - sniffBytes));
|
|
209
|
-
if (slice.length) {
|
|
210
|
-
sniffParts.push(slice);
|
|
211
|
-
sniffBytes += slice.length;
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
if (sniffBytes >= maxProcessorBytes) {
|
|
215
|
-
const sniff = Buffer.concat(sniffParts, sniffBytes);
|
|
216
|
-
if (selectUploadProcessors({
|
|
217
|
-
filename: locked.filename,
|
|
218
|
-
clientMimeType: locked.mimeType,
|
|
219
|
-
totalSize: locked.totalSize,
|
|
220
|
-
sniff
|
|
221
|
-
}).length) throw new Error("svg_too_large");
|
|
222
|
-
finalMetadata = {
|
|
223
|
-
uploadId,
|
|
224
|
-
tenantId,
|
|
225
|
-
mimeType: locked.mimeType,
|
|
226
|
-
totalSize: locked.totalSize,
|
|
227
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
228
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
229
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
230
|
-
};
|
|
231
|
-
uploadStream = bucket.openUploadStream(locked.filename, { metadata: finalMetadata });
|
|
232
|
-
for (const pending of pendingChunks) await writeToStream(uploadStream, pending);
|
|
233
|
-
pendingChunks.length = 0;
|
|
234
|
-
}
|
|
235
|
-
} else await writeToStream(uploadStream, chunk);
|
|
236
|
-
expectedIndex += 1;
|
|
237
|
-
}
|
|
238
|
-
} finally {
|
|
239
|
-
try {
|
|
240
|
-
await cursor.close();
|
|
241
|
-
} catch {}
|
|
242
|
-
}
|
|
243
|
-
if (expectedIndex !== locked.chunksTotal) throw new Error("missing_chunks");
|
|
244
|
-
if (shouldBufferForProcessing) {
|
|
245
|
-
const { data: processed, mimeType: processedMimeType, applied } = await applyUploadProcessors(Buffer.concat(chunks, bufferedBytes), {
|
|
246
|
-
filename: locked.filename,
|
|
247
|
-
clientMimeType: locked.mimeType
|
|
248
|
-
});
|
|
249
|
-
finalMimeType = processedMimeType;
|
|
250
|
-
inlineProcessors = applied;
|
|
251
|
-
finalMetadata = {
|
|
252
|
-
uploadId,
|
|
253
|
-
tenantId,
|
|
254
|
-
mimeType: processedMimeType,
|
|
255
|
-
totalSize: locked.totalSize,
|
|
256
|
-
...applied.length ? {
|
|
257
|
-
processors: applied,
|
|
258
|
-
processedSize: processed.length
|
|
259
|
-
} : {},
|
|
260
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
261
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
262
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
263
|
-
};
|
|
264
|
-
uploadStream = bucket.openUploadStream(locked.filename, { metadata: finalMetadata });
|
|
265
|
-
const finished = waitForStreamFinished(uploadStream);
|
|
266
|
-
uploadStream.end(processed);
|
|
267
|
-
await finished;
|
|
268
|
-
} else {
|
|
269
|
-
if (!uploadStream) {
|
|
270
|
-
const sniff = Buffer.concat(sniffParts, sniffBytes);
|
|
271
|
-
if (selectUploadProcessors({
|
|
272
|
-
filename: locked.filename,
|
|
273
|
-
clientMimeType: locked.mimeType,
|
|
274
|
-
totalSize: locked.totalSize,
|
|
275
|
-
sniff
|
|
276
|
-
}).length) throw new Error("svg_too_large");
|
|
277
|
-
finalMetadata = {
|
|
278
|
-
uploadId,
|
|
279
|
-
tenantId,
|
|
280
|
-
mimeType: locked.mimeType,
|
|
281
|
-
totalSize: locked.totalSize,
|
|
282
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
283
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
284
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
285
|
-
};
|
|
286
|
-
uploadStream = bucket.openUploadStream(locked.filename, { metadata: finalMetadata });
|
|
287
|
-
for (const pending of pendingChunks) await writeToStream(uploadStream, pending);
|
|
288
|
-
pendingChunks.length = 0;
|
|
289
|
-
}
|
|
290
|
-
const finished = waitForStreamFinished(uploadStream);
|
|
291
|
-
uploadStream.end();
|
|
292
|
-
await finished;
|
|
293
|
-
}
|
|
294
|
-
const fileId = String(uploadStream.id ?? "");
|
|
295
|
-
if (!fileId) throw new Error("missing_file_id");
|
|
296
|
-
await UploadSession.updateOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }, {
|
|
297
|
-
$set: {
|
|
298
|
-
status: "done",
|
|
299
|
-
fileId
|
|
300
|
-
},
|
|
301
|
-
$unset: { error: "" }
|
|
302
|
-
});
|
|
303
|
-
await enqueueUploadPostProcessors({
|
|
304
|
-
tenantId,
|
|
305
|
-
uploadId,
|
|
306
|
-
fileId,
|
|
307
|
-
filename: locked.filename,
|
|
308
|
-
mimeType: finalMimeType,
|
|
309
|
-
clientMimeType: locked.mimeType,
|
|
310
|
-
totalSize: locked.totalSize,
|
|
311
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
312
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
313
|
-
...lockedUserId ? { userId: lockedUserId } : {},
|
|
314
|
-
inlineProcessors,
|
|
315
|
-
metadata: finalMetadata
|
|
316
|
-
}).catch((error) => {
|
|
317
|
-
console.error("Upload post processor enqueue failed", {
|
|
318
|
-
tenantId,
|
|
319
|
-
uploadId,
|
|
320
|
-
fileId,
|
|
321
|
-
error
|
|
322
|
-
});
|
|
323
|
-
});
|
|
324
|
-
try {
|
|
325
|
-
await UploadChunk.deleteMany({ uploadId });
|
|
326
|
-
} catch {}
|
|
327
|
-
return {
|
|
328
|
-
ok: true,
|
|
329
|
-
fileId
|
|
330
|
-
};
|
|
331
|
-
} catch (error) {
|
|
332
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
333
|
-
await abortUploadStream(uploadStream);
|
|
334
|
-
if (message === "missing_chunks") {
|
|
335
|
-
await UploadSession.updateOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }, { $set: { status: "uploading" } });
|
|
336
|
-
ctx.res.status(409);
|
|
337
|
-
return {
|
|
338
|
-
ok: false,
|
|
339
|
-
error: "missing_chunks"
|
|
340
|
-
};
|
|
341
|
-
}
|
|
342
|
-
if (message === "svg_too_large") {
|
|
343
|
-
await UploadSession.updateOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }, { $set: {
|
|
344
|
-
status: "error",
|
|
345
|
-
error: message
|
|
346
|
-
} });
|
|
347
|
-
ctx.res.status(413);
|
|
348
|
-
return {
|
|
349
|
-
ok: false,
|
|
350
|
-
error: message
|
|
351
|
-
};
|
|
352
|
-
}
|
|
353
|
-
if (message === "svg_invalid" || message === "svg_sanitize_failed") {
|
|
354
|
-
await UploadSession.updateOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }, { $set: {
|
|
355
|
-
status: "error",
|
|
356
|
-
error: message
|
|
357
|
-
} });
|
|
358
|
-
ctx.res.status(400);
|
|
359
|
-
return {
|
|
360
|
-
ok: false,
|
|
361
|
-
error: message
|
|
362
|
-
};
|
|
363
|
-
}
|
|
364
|
-
await UploadSession.updateOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }, { $set: {
|
|
365
|
-
status: "error",
|
|
366
|
-
error: message
|
|
367
|
-
} });
|
|
368
|
-
ctx.res.status(500);
|
|
369
|
-
return {
|
|
370
|
-
ok: false,
|
|
371
|
-
error: "assembly_failed"
|
|
372
|
-
};
|
|
373
|
-
}
|
|
374
|
-
};
|
|
375
|
-
//#endregion
|
|
376
|
-
//#region src/uploads/api/file-uploads/handlers/getStatus.ts
|
|
377
|
-
var getStatus = async (_payload, ctx) => {
|
|
378
|
-
const tenantId = getTenantId(ctx);
|
|
379
|
-
if (!tenantId) {
|
|
380
|
-
ctx.res.status(400);
|
|
381
|
-
return {
|
|
382
|
-
ok: false,
|
|
383
|
-
error: "tenant_missing"
|
|
384
|
-
};
|
|
385
|
-
}
|
|
386
|
-
const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
|
|
387
|
-
if (!uploadId) {
|
|
388
|
-
ctx.res.status(400);
|
|
389
|
-
return {
|
|
390
|
-
ok: false,
|
|
391
|
-
error: "invalid_upload_id"
|
|
392
|
-
};
|
|
393
|
-
}
|
|
394
|
-
const ability = buildUploadsAbility(ctx, tenantId);
|
|
395
|
-
const modelCtx = getModelCtx(ctx, tenantId, ability);
|
|
396
|
-
const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
|
|
397
|
-
if (!ability.can("read", "RBUploadSession")) {
|
|
398
|
-
ctx.res.status(401);
|
|
399
|
-
return {
|
|
400
|
-
ok: false,
|
|
401
|
-
error: "unauthorized"
|
|
402
|
-
};
|
|
403
|
-
}
|
|
404
|
-
const session = await UploadSession.findOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "read")] }).lean();
|
|
405
|
-
if (!session) {
|
|
406
|
-
ctx.res.status(404);
|
|
407
|
-
return {
|
|
408
|
-
ok: false,
|
|
409
|
-
error: "not_found"
|
|
410
|
-
};
|
|
411
|
-
}
|
|
412
|
-
const received = (await UploadChunk.find({ uploadId }, {
|
|
413
|
-
index: 1,
|
|
414
|
-
_id: 0
|
|
415
|
-
}).sort({ index: 1 }).lean()).map((doc) => typeof doc.index === "number" ? doc.index : -1).filter((n) => Number.isInteger(n) && n >= 0);
|
|
416
|
-
return {
|
|
417
|
-
ok: true,
|
|
418
|
-
status: session.status,
|
|
419
|
-
chunkSize: session.chunkSize,
|
|
420
|
-
chunksTotal: session.chunksTotal,
|
|
421
|
-
received,
|
|
422
|
-
...session.fileId ? { fileId: session.fileId } : {}
|
|
423
|
-
};
|
|
424
|
-
};
|
|
425
|
-
//#endregion
|
|
426
|
-
//#region src/uploads/api/file-uploads/index.ts
|
|
427
|
-
var InitRoute = "/api/rb/file-uploads";
|
|
428
|
-
var ChunkRoute = "/api/rb/file-uploads/:uploadId/chunks/:index";
|
|
429
|
-
var StatusRoute = "/api/rb/file-uploads/:uploadId/status";
|
|
430
|
-
var CompleteRoute = "/api/rb/file-uploads/:uploadId/complete";
|
|
431
|
-
var initRequestSchema = object({
|
|
432
|
-
filename: string().min(1),
|
|
433
|
-
mimeType: string().min(1),
|
|
434
|
-
isPublic: boolean().optional(),
|
|
435
|
-
totalSize: number().int().min(1)
|
|
436
|
-
});
|
|
437
|
-
object({
|
|
438
|
-
ok: boolean(),
|
|
439
|
-
error: string().optional(),
|
|
440
|
-
uploadId: string().optional(),
|
|
441
|
-
uploadKey: string().optional(),
|
|
442
|
-
chunkSize: number().int().optional(),
|
|
443
|
-
chunksTotal: number().int().optional()
|
|
444
|
-
});
|
|
445
|
-
object({
|
|
446
|
-
ok: boolean(),
|
|
447
|
-
error: string().optional(),
|
|
448
|
-
status: _enum([
|
|
449
|
-
"uploading",
|
|
450
|
-
"assembling",
|
|
451
|
-
"done",
|
|
452
|
-
"error"
|
|
453
|
-
]).optional(),
|
|
454
|
-
chunkSize: number().int().optional(),
|
|
455
|
-
chunksTotal: number().int().optional(),
|
|
456
|
-
received: array(number().int().min(0)).optional(),
|
|
457
|
-
fileId: string().optional()
|
|
458
|
-
});
|
|
459
|
-
object({
|
|
460
|
-
ok: boolean(),
|
|
461
|
-
error: string().optional(),
|
|
462
|
-
fileId: string().optional()
|
|
463
|
-
});
|
|
464
|
-
//#endregion
|
|
465
|
-
//#region src/uploads/api/file-uploads/handlers/initUpload.ts
|
|
466
|
-
var initUpload = async (payload, ctx) => {
|
|
467
|
-
const tenantId = getTenantId(ctx);
|
|
468
|
-
if (!tenantId) {
|
|
469
|
-
ctx.res.status(400);
|
|
470
|
-
return {
|
|
471
|
-
ok: false,
|
|
472
|
-
error: "tenant_missing"
|
|
473
|
-
};
|
|
474
|
-
}
|
|
475
|
-
const userId = getUserId(ctx);
|
|
476
|
-
const parsed = initRequestSchema.safeParse(payload ?? {});
|
|
477
|
-
if (!parsed.success) {
|
|
478
|
-
ctx.res.status(400);
|
|
479
|
-
return {
|
|
480
|
-
ok: false,
|
|
481
|
-
error: "invalid_payload"
|
|
482
|
-
};
|
|
483
|
-
}
|
|
484
|
-
const chunkSize = getChunkSizeBytes();
|
|
485
|
-
const { filename, mimeType, totalSize, isPublic } = parsed.data;
|
|
486
|
-
const chunksTotal = Math.ceil(totalSize / chunkSize);
|
|
487
|
-
const modelCtx = getModelCtx(ctx, tenantId, buildUploadsAbility(ctx, tenantId));
|
|
488
|
-
const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
|
|
489
|
-
await ensureUploadIndexes(UploadSession, UploadChunk);
|
|
490
|
-
const uploadId = new ObjectId().toString();
|
|
491
|
-
const now = Date.now();
|
|
492
|
-
const expiresAt = new Date(now + getSessionTtlMs());
|
|
493
|
-
const uploadKey = userId ? null : randomBytes(32).toString("base64url");
|
|
494
|
-
const ownerKeyHash = uploadKey ? computeSha256Hex(Buffer.from(uploadKey)) : void 0;
|
|
495
|
-
await UploadSession.create({
|
|
496
|
-
_id: uploadId,
|
|
497
|
-
...userId ? { userId } : {},
|
|
498
|
-
...ownerKeyHash ? { ownerKeyHash } : {},
|
|
499
|
-
filename,
|
|
500
|
-
mimeType,
|
|
501
|
-
...typeof isPublic === "boolean" ? { isPublic } : {},
|
|
502
|
-
totalSize,
|
|
503
|
-
chunkSize,
|
|
504
|
-
chunksTotal,
|
|
505
|
-
status: "uploading",
|
|
506
|
-
createdAt: new Date(now),
|
|
507
|
-
expiresAt
|
|
508
|
-
});
|
|
509
|
-
return {
|
|
510
|
-
ok: true,
|
|
511
|
-
uploadId,
|
|
512
|
-
chunkSize,
|
|
513
|
-
chunksTotal,
|
|
514
|
-
...uploadKey ? { uploadKey } : {}
|
|
515
|
-
};
|
|
516
|
-
};
|
|
517
|
-
//#endregion
|
|
518
|
-
//#region src/uploads/api/file-uploads/handlers/uploadChunk.ts
|
|
519
|
-
var uploadChunk = async (payload, ctx) => {
|
|
520
|
-
const tenantId = getTenantId(ctx);
|
|
521
|
-
if (!tenantId) {
|
|
522
|
-
ctx.res.status(400);
|
|
523
|
-
return {
|
|
524
|
-
ok: false,
|
|
525
|
-
error: "tenant_missing"
|
|
526
|
-
};
|
|
527
|
-
}
|
|
528
|
-
const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
|
|
529
|
-
const indexRaw = String(ctx.req.params?.index ?? "").trim();
|
|
530
|
-
const index = Number(indexRaw);
|
|
531
|
-
if (!uploadId || !Number.isInteger(index) || index < 0) {
|
|
532
|
-
ctx.res.status(400);
|
|
533
|
-
return {
|
|
534
|
-
ok: false,
|
|
535
|
-
error: "invalid_chunk_ref"
|
|
536
|
-
};
|
|
537
|
-
}
|
|
538
|
-
const ability = buildUploadsAbility(ctx, tenantId);
|
|
539
|
-
const modelCtx = getModelCtx(ctx, tenantId, ability);
|
|
540
|
-
const [UploadSession, UploadChunk] = await Promise.all([models.get("RBUploadSession", modelCtx), models.get("RBUploadChunk", modelCtx)]);
|
|
541
|
-
if (!ability.can("update", "RBUploadSession")) {
|
|
542
|
-
ctx.res.status(401);
|
|
543
|
-
return {
|
|
544
|
-
ok: false,
|
|
545
|
-
error: "unauthorized"
|
|
546
|
-
};
|
|
547
|
-
}
|
|
548
|
-
const session = await UploadSession.findOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }).lean();
|
|
549
|
-
if (!session) {
|
|
550
|
-
ctx.res.status(404);
|
|
551
|
-
return {
|
|
552
|
-
ok: false,
|
|
553
|
-
error: "not_found"
|
|
554
|
-
};
|
|
555
|
-
}
|
|
556
|
-
if (session.status !== "uploading") {
|
|
557
|
-
ctx.res.status(409);
|
|
558
|
-
return {
|
|
559
|
-
ok: false,
|
|
560
|
-
error: "not_uploading"
|
|
561
|
-
};
|
|
562
|
-
}
|
|
563
|
-
if (index >= session.chunksTotal) {
|
|
564
|
-
ctx.res.status(400);
|
|
565
|
-
return {
|
|
566
|
-
ok: false,
|
|
567
|
-
error: "index_out_of_range"
|
|
568
|
-
};
|
|
569
|
-
}
|
|
570
|
-
const data = toBufferPayload(payload);
|
|
571
|
-
if (!data) {
|
|
572
|
-
ctx.res.status(400);
|
|
573
|
-
return {
|
|
574
|
-
ok: false,
|
|
575
|
-
error: "invalid_body"
|
|
576
|
-
};
|
|
577
|
-
}
|
|
578
|
-
const expectedSize = index === session.chunksTotal - 1 ? session.totalSize - session.chunkSize * (session.chunksTotal - 1) : session.chunkSize;
|
|
579
|
-
if (data.length > expectedSize) {
|
|
580
|
-
ctx.res.status(413);
|
|
581
|
-
return {
|
|
582
|
-
ok: false,
|
|
583
|
-
error: "chunk_too_large"
|
|
584
|
-
};
|
|
585
|
-
}
|
|
586
|
-
if (data.length !== expectedSize) {
|
|
587
|
-
ctx.res.status(400);
|
|
588
|
-
return {
|
|
589
|
-
ok: false,
|
|
590
|
-
error: "invalid_chunk_size"
|
|
591
|
-
};
|
|
592
|
-
}
|
|
593
|
-
const checksumHeader = ctx.req.get("X-Chunk-SHA256");
|
|
594
|
-
const sha256 = checksumHeader ? computeSha256Hex(data) : void 0;
|
|
595
|
-
if (checksumHeader) {
|
|
596
|
-
if (sha256 !== normalizeSha256Hex(checksumHeader)) {
|
|
597
|
-
ctx.res.status(400);
|
|
598
|
-
return {
|
|
599
|
-
ok: false,
|
|
600
|
-
error: "checksum_mismatch"
|
|
601
|
-
};
|
|
602
|
-
}
|
|
603
|
-
}
|
|
604
|
-
await ensureUploadIndexes(UploadSession, UploadChunk);
|
|
605
|
-
await UploadChunk.updateOne({
|
|
606
|
-
uploadId,
|
|
607
|
-
index
|
|
608
|
-
}, {
|
|
609
|
-
$set: {
|
|
610
|
-
uploadId,
|
|
611
|
-
index,
|
|
612
|
-
data,
|
|
613
|
-
size: data.length,
|
|
614
|
-
sha256,
|
|
615
|
-
expiresAt: session.expiresAt
|
|
616
|
-
},
|
|
617
|
-
$setOnInsert: { createdAt: /* @__PURE__ */ new Date() }
|
|
618
|
-
}, { upsert: true });
|
|
619
|
-
ctx.res.status(204);
|
|
620
|
-
return { ok: true };
|
|
621
|
-
};
|
|
622
|
-
//#endregion
|
|
623
|
-
//#region src/uploads/api/file-uploads/middleware/rawBodyParser.ts
|
|
624
|
-
var rawBodyParser = ({ limitBytes, maxClientBytesPerSecond }) => {
|
|
625
|
-
return (req, res, next) => {
|
|
626
|
-
if (!(typeof req?.headers?.["content-type"] === "string" ? String(req.headers["content-type"]) : "").includes("application/octet-stream")) {
|
|
627
|
-
next();
|
|
628
|
-
return;
|
|
629
|
-
}
|
|
630
|
-
let total = 0;
|
|
631
|
-
const chunks = [];
|
|
632
|
-
let done = false;
|
|
633
|
-
let paused = false;
|
|
634
|
-
let throttleTimeout = null;
|
|
635
|
-
const rateBytesPerSecond = typeof maxClientBytesPerSecond === "number" && maxClientBytesPerSecond > 0 ? maxClientBytesPerSecond : null;
|
|
636
|
-
const cleanup = () => {
|
|
637
|
-
req.off("data", onData);
|
|
638
|
-
req.off("end", onEnd);
|
|
639
|
-
req.off("error", onError);
|
|
640
|
-
req.off("aborted", onAborted);
|
|
641
|
-
if (throttleTimeout) {
|
|
642
|
-
clearTimeout(throttleTimeout);
|
|
643
|
-
throttleTimeout = null;
|
|
644
|
-
}
|
|
645
|
-
};
|
|
646
|
-
const finish = (error) => {
|
|
647
|
-
if (done) return;
|
|
648
|
-
done = true;
|
|
649
|
-
cleanup();
|
|
650
|
-
if (error) {
|
|
651
|
-
next(error);
|
|
652
|
-
return;
|
|
653
|
-
}
|
|
654
|
-
req.body = Buffer.concat(chunks, total);
|
|
655
|
-
next();
|
|
656
|
-
};
|
|
657
|
-
const onData = (chunk) => {
|
|
658
|
-
if (done) return;
|
|
659
|
-
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
|
660
|
-
total += buffer.length;
|
|
661
|
-
if (total > limitBytes) {
|
|
662
|
-
done = true;
|
|
663
|
-
cleanup();
|
|
664
|
-
req.destroy();
|
|
665
|
-
res.status(413).json({
|
|
666
|
-
ok: false,
|
|
667
|
-
error: "chunk_too_large"
|
|
668
|
-
});
|
|
669
|
-
return;
|
|
670
|
-
}
|
|
671
|
-
chunks.push(buffer);
|
|
672
|
-
if (!rateBytesPerSecond) return;
|
|
673
|
-
const now = Date.now();
|
|
674
|
-
const waitMs = consumeRateBudget(getClientRateState(getClientKey(req), rateBytesPerSecond, now), buffer.length, rateBytesPerSecond, now);
|
|
675
|
-
if (waitMs > 0 && !paused) {
|
|
676
|
-
paused = true;
|
|
677
|
-
req.pause();
|
|
678
|
-
throttleTimeout = setTimeout(() => {
|
|
679
|
-
throttleTimeout = null;
|
|
680
|
-
paused = false;
|
|
681
|
-
if (done) return;
|
|
682
|
-
try {
|
|
683
|
-
req.resume();
|
|
684
|
-
} catch {}
|
|
685
|
-
}, waitMs);
|
|
686
|
-
}
|
|
687
|
-
};
|
|
688
|
-
const onEnd = () => finish();
|
|
689
|
-
const onError = (err) => finish(err);
|
|
690
|
-
const onAborted = () => finish(/* @__PURE__ */ new Error("request_aborted"));
|
|
691
|
-
req.on("data", onData);
|
|
692
|
-
req.on("end", onEnd);
|
|
693
|
-
req.on("error", onError);
|
|
694
|
-
req.on("aborted", onAborted);
|
|
695
|
-
};
|
|
696
|
-
};
|
|
697
|
-
var MAX_BURST_SECONDS = 1;
|
|
698
|
-
var STALE_CLIENT_MS = 900 * 1e3;
|
|
699
|
-
var clientRateStates = /* @__PURE__ */ new Map();
|
|
700
|
-
var lastCleanupMs = 0;
|
|
701
|
-
var getClientKey = (req) => {
|
|
702
|
-
const rawClientIp = typeof req?.clientIp === "string" ? req.clientIp : "";
|
|
703
|
-
if (rawClientIp.trim()) return rawClientIp.trim();
|
|
704
|
-
return (typeof req?.ip === "string" ? req.ip : "").trim() || "unknown";
|
|
705
|
-
};
|
|
706
|
-
var maybeCleanupStates = (now) => {
|
|
707
|
-
if (now - lastCleanupMs < 6e4) return;
|
|
708
|
-
lastCleanupMs = now;
|
|
709
|
-
if (clientRateStates.size < 2e3) return;
|
|
710
|
-
for (const [key, state] of clientRateStates) if (now - state.lastSeenMs > STALE_CLIENT_MS) clientRateStates.delete(key);
|
|
711
|
-
};
|
|
712
|
-
var getClientRateState = (key, rateBytesPerSecond, now) => {
|
|
713
|
-
maybeCleanupStates(now);
|
|
714
|
-
const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
|
|
715
|
-
const existing = clientRateStates.get(key);
|
|
716
|
-
if (existing) {
|
|
717
|
-
existing.lastSeenMs = now;
|
|
718
|
-
existing.tokens = Math.min(capacity, existing.tokens);
|
|
719
|
-
return existing;
|
|
720
|
-
}
|
|
721
|
-
const next = {
|
|
722
|
-
tokens: capacity,
|
|
723
|
-
lastRefillMs: now,
|
|
724
|
-
lastSeenMs: now
|
|
725
|
-
};
|
|
726
|
-
clientRateStates.set(key, next);
|
|
727
|
-
return next;
|
|
728
|
-
};
|
|
729
|
-
var consumeRateBudget = (state, bytes, rateBytesPerSecond, now) => {
|
|
730
|
-
const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
|
|
731
|
-
const elapsedMs = Math.max(0, now - state.lastRefillMs);
|
|
732
|
-
if (elapsedMs > 0) {
|
|
733
|
-
state.tokens = Math.min(capacity, state.tokens + elapsedMs * rateBytesPerSecond / 1e3);
|
|
734
|
-
state.lastRefillMs = now;
|
|
735
|
-
}
|
|
736
|
-
state.tokens -= bytes;
|
|
737
|
-
if (state.tokens >= 0) return 0;
|
|
738
|
-
return Math.ceil(-state.tokens / rateBytesPerSecond * 1e3);
|
|
739
|
-
};
|
|
740
|
-
//#endregion
|
|
741
|
-
//#region src/uploads/api/file-uploads/handler.ts
|
|
742
|
-
var handler_default = (api) => {
|
|
743
|
-
const chunkSizeBytes = getChunkSizeBytes();
|
|
744
|
-
api.use(InitRoute, rawBodyParser({
|
|
745
|
-
limitBytes: getRawBodyLimitBytes(chunkSizeBytes),
|
|
746
|
-
maxClientBytesPerSecond: getMaxClientUploadBytesPerSecond()
|
|
747
|
-
}));
|
|
748
|
-
api.post(InitRoute, initUpload);
|
|
749
|
-
api.put(ChunkRoute, uploadChunk);
|
|
750
|
-
api.get(StatusRoute, getStatus);
|
|
751
|
-
api.post(CompleteRoute, completeUpload);
|
|
752
|
-
};
|
|
753
|
-
//#endregion
|
|
754
|
-
export { handler_default as default };
|
|
755
|
-
|
|
756
|
-
//# sourceMappingURL=handler-DBtnVvP2.js.map
|