@rpcbase/server 0.488.0 → 0.490.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/dist/applyRouteLoaders.d.ts +0 -9
- package/dist/applyRouteLoaders.d.ts.map +0 -1
- package/dist/checkInitReplicaSet.d.ts +0 -6
- package/dist/checkInitReplicaSet.d.ts.map +0 -1
- package/dist/dev/coverage.d.ts +0 -3
- package/dist/dev/coverage.d.ts.map +0 -1
- package/dist/email-DEw8keax.js +0 -8042
- package/dist/email-DEw8keax.js.map +0 -1
- package/dist/email.d.ts +0 -19
- package/dist/email.d.ts.map +0 -1
- package/dist/getDerivedKey.d.ts +0 -3
- package/dist/getDerivedKey.d.ts.map +0 -1
- package/dist/handler-BwK8qxLn.js +0 -438
- package/dist/handler-BwK8qxLn.js.map +0 -1
- package/dist/handler-CedzJJg0.js +0 -114
- package/dist/handler-CedzJJg0.js.map +0 -1
- package/dist/handler-Cohj3cz3.js +0 -176
- package/dist/handler-Cohj3cz3.js.map +0 -1
- package/dist/handler-qCAUmVgd.js +0 -684
- package/dist/handler-qCAUmVgd.js.map +0 -1
- package/dist/hashPassword.d.ts +0 -2
- package/dist/hashPassword.d.ts.map +0 -1
- package/dist/index.d.ts +0 -7
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js +0 -4628
- package/dist/index.js.map +0 -1
- package/dist/initServer.d.ts +0 -9
- package/dist/initServer.d.ts.map +0 -1
- package/dist/metricsIngestProxyMiddleware.d.ts +0 -3
- package/dist/metricsIngestProxyMiddleware.d.ts.map +0 -1
- package/dist/notifications/api/notifications/handler.d.ts +0 -4
- package/dist/notifications/api/notifications/handler.d.ts.map +0 -1
- package/dist/notifications/api/notifications/index.d.ts +0 -168
- package/dist/notifications/api/notifications/index.d.ts.map +0 -1
- package/dist/notifications/api/notifications/shared.d.ts +0 -6
- package/dist/notifications/api/notifications/shared.d.ts.map +0 -1
- package/dist/notifications/createNotification.d.ts +0 -13
- package/dist/notifications/createNotification.d.ts.map +0 -1
- package/dist/notifications/digest.d.ts +0 -13
- package/dist/notifications/digest.d.ts.map +0 -1
- package/dist/notifications/routes.d.ts +0 -2
- package/dist/notifications/routes.d.ts.map +0 -1
- package/dist/notifications.d.ts +0 -4
- package/dist/notifications.d.ts.map +0 -1
- package/dist/notifications.js +0 -127
- package/dist/notifications.js.map +0 -1
- package/dist/passwordHashStorage.d.ts +0 -11
- package/dist/passwordHashStorage.d.ts.map +0 -1
- package/dist/posthog.d.ts +0 -9
- package/dist/posthog.d.ts.map +0 -1
- package/dist/renderSSR.d.ts +0 -12
- package/dist/renderSSR.d.ts.map +0 -1
- package/dist/render_resend_false-MiC__Smr.js +0 -6
- package/dist/render_resend_false-MiC__Smr.js.map +0 -1
- package/dist/rts/api/changes/handler.d.ts +0 -9
- package/dist/rts/api/changes/handler.d.ts.map +0 -1
- package/dist/rts/api/changes/index.d.ts +0 -25
- package/dist/rts/api/changes/index.d.ts.map +0 -1
- package/dist/rts/index.d.ts +0 -40
- package/dist/rts/index.d.ts.map +0 -1
- package/dist/rts/index.js +0 -631
- package/dist/rts/index.js.map +0 -1
- package/dist/rts/routes.d.ts +0 -2
- package/dist/rts/routes.d.ts.map +0 -1
- package/dist/schemas-7qqi9OQy.js +0 -4225
- package/dist/schemas-7qqi9OQy.js.map +0 -1
- package/dist/shared-BJomDDWK.js +0 -107
- package/dist/shared-BJomDDWK.js.map +0 -1
- package/dist/ssrMiddleware.d.ts +0 -18
- package/dist/ssrMiddleware.d.ts.map +0 -1
- package/dist/types/index.d.ts +0 -6
- package/dist/types/index.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/handler.d.ts +0 -5
- package/dist/uploads/api/file-uploads/handler.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/handlers/completeUpload.d.ts +0 -5
- package/dist/uploads/api/file-uploads/handlers/completeUpload.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/handlers/getStatus.d.ts +0 -5
- package/dist/uploads/api/file-uploads/handlers/getStatus.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/handlers/initUpload.d.ts +0 -5
- package/dist/uploads/api/file-uploads/handlers/initUpload.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/handlers/uploadChunk.d.ts +0 -9
- package/dist/uploads/api/file-uploads/handlers/uploadChunk.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/index.d.ts +0 -43
- package/dist/uploads/api/file-uploads/index.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/middleware/rawBodyParser.d.ts +0 -5
- package/dist/uploads/api/file-uploads/middleware/rawBodyParser.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/processors/index.d.ts +0 -25
- package/dist/uploads/api/file-uploads/processors/index.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/processors/sanitizeSvg.d.ts +0 -5
- package/dist/uploads/api/file-uploads/processors/sanitizeSvg.d.ts.map +0 -1
- package/dist/uploads/api/file-uploads/shared.d.ts +0 -32
- package/dist/uploads/api/file-uploads/shared.d.ts.map +0 -1
- package/dist/uploads/api/files/handler.d.ts +0 -4
- package/dist/uploads/api/files/handler.d.ts.map +0 -1
- package/dist/uploads/api/files/handlers/deleteFile.d.ts +0 -9
- package/dist/uploads/api/files/handlers/deleteFile.d.ts.map +0 -1
- package/dist/uploads/api/files/handlers/getFile.d.ts +0 -4
- package/dist/uploads/api/files/handlers/getFile.d.ts.map +0 -1
- package/dist/uploads/api/files/index.d.ts +0 -4
- package/dist/uploads/api/files/index.d.ts.map +0 -1
- package/dist/uploads/routes.d.ts +0 -2
- package/dist/uploads/routes.d.ts.map +0 -1
- package/dist/uploads.d.ts +0 -2
- package/dist/uploads.d.ts.map +0 -1
- package/dist/uploads.js +0 -10
- package/dist/uploads.js.map +0 -1
package/dist/handler-qCAUmVgd.js
DELETED
|
@@ -1,684 +0,0 @@
|
|
|
1
|
-
import { models, getTenantFilesystemDb } from "@rpcbase/db";
|
|
2
|
-
import { GridFSBucket, ObjectId } from "mongodb";
|
|
3
|
-
import { JSDOM } from "jsdom";
|
|
4
|
-
import createDOMPurify from "dompurify";
|
|
5
|
-
import { g as getTenantId, a as getModelCtx, b as buildUploadsAbility, c as getUploadSessionAccessQuery, e as ensureUploadIndexes, d as getBucketName, f as getUserId, h as getChunkSizeBytes, i as getSessionTtlMs, j as computeSha256Hex, t as toBufferPayload, n as normalizeSha256Hex, k as getMaxClientUploadBytesPerSecond, l as getRawBodyLimitBytes } from "./shared-BJomDDWK.js";
|
|
6
|
-
import { randomBytes } from "node:crypto";
|
|
7
|
-
import { o as object, n as number, b as boolean, s as string, a as array, _ as _enum } from "./schemas-7qqi9OQy.js";
|
|
8
|
-
const MAX_SVG_BYTES = 128 * 1024;
|
|
9
|
-
const window = new JSDOM("").window;
|
|
10
|
-
const DOMPurify = createDOMPurify(window);
|
|
11
|
-
const normalizeForSniff = (raw) => raw.replace(/^\uFEFF/, "").trimStart();
|
|
12
|
-
const looksLikeSvgText = (text) => {
|
|
13
|
-
const normalized = normalizeForSniff(text);
|
|
14
|
-
if (!normalized.startsWith("<")) return false;
|
|
15
|
-
return /<svg(?:\s|>)/i.test(normalized);
|
|
16
|
-
};
|
|
17
|
-
const looksLikeSvg = (sniff) => looksLikeSvgText(sniff.toString("utf8"));
|
|
18
|
-
const sanitizeSvg = (svg) => DOMPurify.sanitize(svg, {
|
|
19
|
-
USE_PROFILES: { svg: true, svgFilters: true }
|
|
20
|
-
});
|
|
21
|
-
const sanitizeSvgProcessor = {
|
|
22
|
-
id: "sanitize-svg",
|
|
23
|
-
maxBytes: MAX_SVG_BYTES,
|
|
24
|
-
match: ({ sniff }) => looksLikeSvg(sniff),
|
|
25
|
-
process: (data) => {
|
|
26
|
-
if (data.length > MAX_SVG_BYTES) {
|
|
27
|
-
throw new Error("svg_too_large");
|
|
28
|
-
}
|
|
29
|
-
const svgText = data.toString("utf8");
|
|
30
|
-
if (!looksLikeSvgText(svgText)) {
|
|
31
|
-
throw new Error("svg_invalid");
|
|
32
|
-
}
|
|
33
|
-
const sanitized = sanitizeSvg(svgText);
|
|
34
|
-
if (!sanitized.trim() || !looksLikeSvgText(sanitized)) {
|
|
35
|
-
throw new Error("svg_sanitize_failed");
|
|
36
|
-
}
|
|
37
|
-
const sanitizedBuffer = Buffer.from(sanitized, "utf8");
|
|
38
|
-
if (sanitizedBuffer.length > MAX_SVG_BYTES) {
|
|
39
|
-
throw new Error("svg_too_large");
|
|
40
|
-
}
|
|
41
|
-
return { data: sanitizedBuffer, mimeType: "image/svg+xml" };
|
|
42
|
-
}
|
|
43
|
-
};
|
|
44
|
-
const uploadProcessors = Object.freeze([sanitizeSvgProcessor]);
|
|
45
|
-
const getMaxUploadProcessorBytes = () => uploadProcessors.reduce((max, processor) => Math.max(max, processor.maxBytes), 0);
|
|
46
|
-
const selectUploadProcessors = (ctx) => uploadProcessors.filter((processor) => processor.match(ctx));
|
|
47
|
-
const applyUploadProcessors = async (data, ctx) => {
|
|
48
|
-
let currentData = data;
|
|
49
|
-
let currentMimeType = ctx.clientMimeType;
|
|
50
|
-
const applied = [];
|
|
51
|
-
for (const processor of uploadProcessors) {
|
|
52
|
-
const processorCtx = {
|
|
53
|
-
filename: ctx.filename,
|
|
54
|
-
clientMimeType: currentMimeType,
|
|
55
|
-
totalSize: currentData.length,
|
|
56
|
-
sniff: currentData
|
|
57
|
-
};
|
|
58
|
-
if (!processor.match(processorCtx)) continue;
|
|
59
|
-
if (currentData.length > processor.maxBytes) {
|
|
60
|
-
throw new Error("processor_input_too_large");
|
|
61
|
-
}
|
|
62
|
-
const result = await processor.process(currentData, processorCtx);
|
|
63
|
-
currentData = result.data;
|
|
64
|
-
if (typeof result.mimeType === "string" && result.mimeType.trim()) {
|
|
65
|
-
currentMimeType = result.mimeType.trim();
|
|
66
|
-
}
|
|
67
|
-
applied.push(processor.id);
|
|
68
|
-
}
|
|
69
|
-
return {
|
|
70
|
-
data: currentData,
|
|
71
|
-
mimeType: currentMimeType,
|
|
72
|
-
applied
|
|
73
|
-
};
|
|
74
|
-
};
|
|
75
|
-
const waitForStreamFinished = async (stream) => new Promise((resolve, reject) => {
|
|
76
|
-
stream.once("finish", resolve);
|
|
77
|
-
stream.once("error", reject);
|
|
78
|
-
});
|
|
79
|
-
const writeToStream = async (stream, chunk) => {
|
|
80
|
-
const ok = stream.write(chunk);
|
|
81
|
-
if (ok) return;
|
|
82
|
-
await new Promise((resolve, reject) => {
|
|
83
|
-
const onDrain = () => {
|
|
84
|
-
cleanup();
|
|
85
|
-
resolve();
|
|
86
|
-
};
|
|
87
|
-
const onError = (error) => {
|
|
88
|
-
cleanup();
|
|
89
|
-
reject(error);
|
|
90
|
-
};
|
|
91
|
-
const cleanup = () => {
|
|
92
|
-
stream.off("drain", onDrain);
|
|
93
|
-
stream.off("error", onError);
|
|
94
|
-
};
|
|
95
|
-
stream.on("drain", onDrain);
|
|
96
|
-
stream.on("error", onError);
|
|
97
|
-
});
|
|
98
|
-
};
|
|
99
|
-
const abortUploadStream = async (stream) => {
|
|
100
|
-
if (!stream) return;
|
|
101
|
-
if (typeof stream.abort === "function") {
|
|
102
|
-
try {
|
|
103
|
-
await stream.abort();
|
|
104
|
-
return;
|
|
105
|
-
} catch {
|
|
106
|
-
}
|
|
107
|
-
}
|
|
108
|
-
try {
|
|
109
|
-
;
|
|
110
|
-
stream.destroy?.();
|
|
111
|
-
} catch {
|
|
112
|
-
}
|
|
113
|
-
};
|
|
114
|
-
const completeUpload = async (_payload, ctx) => {
|
|
115
|
-
const tenantId = getTenantId(ctx);
|
|
116
|
-
if (!tenantId) {
|
|
117
|
-
ctx.res.status(400);
|
|
118
|
-
return { ok: false, error: "tenant_missing" };
|
|
119
|
-
}
|
|
120
|
-
const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
|
|
121
|
-
if (!uploadId) {
|
|
122
|
-
ctx.res.status(400);
|
|
123
|
-
return { ok: false, error: "invalid_upload_id" };
|
|
124
|
-
}
|
|
125
|
-
const modelCtx = getModelCtx(ctx, tenantId);
|
|
126
|
-
const [UploadSession, UploadChunk] = await Promise.all([
|
|
127
|
-
models.get("RBUploadSession", modelCtx),
|
|
128
|
-
models.get("RBUploadChunk", modelCtx)
|
|
129
|
-
]);
|
|
130
|
-
const ability = buildUploadsAbility(ctx, tenantId);
|
|
131
|
-
if (!ability.can("update", "RBUploadSession")) {
|
|
132
|
-
ctx.res.status(401);
|
|
133
|
-
return { ok: false, error: "unauthorized" };
|
|
134
|
-
}
|
|
135
|
-
const existing = await UploadSession.findOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "read")] }).lean();
|
|
136
|
-
if (!existing) {
|
|
137
|
-
ctx.res.status(404);
|
|
138
|
-
return { ok: false, error: "not_found" };
|
|
139
|
-
}
|
|
140
|
-
if (existing.status === "done" && existing.fileId) {
|
|
141
|
-
return { ok: true, fileId: existing.fileId };
|
|
142
|
-
}
|
|
143
|
-
const locked = await UploadSession.findOneAndUpdate(
|
|
144
|
-
{ $and: [{ _id: uploadId }, { status: "uploading" }, getUploadSessionAccessQuery(ability, "update")] },
|
|
145
|
-
{ $set: { status: "assembling" }, $unset: { error: "" } },
|
|
146
|
-
{ new: true }
|
|
147
|
-
).lean();
|
|
148
|
-
if (!locked) {
|
|
149
|
-
ctx.res.status(409);
|
|
150
|
-
return { ok: false, error: "not_uploading" };
|
|
151
|
-
}
|
|
152
|
-
await ensureUploadIndexes(UploadSession, UploadChunk);
|
|
153
|
-
const fsDb = await getTenantFilesystemDb(tenantId);
|
|
154
|
-
const nativeDb = fsDb.db;
|
|
155
|
-
if (!nativeDb) {
|
|
156
|
-
await UploadSession.updateOne(
|
|
157
|
-
{ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] },
|
|
158
|
-
{ $set: { status: "error", error: "filesystem_db_unavailable" } }
|
|
159
|
-
);
|
|
160
|
-
ctx.res.status(500);
|
|
161
|
-
return { ok: false, error: "assembly_failed" };
|
|
162
|
-
}
|
|
163
|
-
const bucketName = getBucketName();
|
|
164
|
-
const bucket = new GridFSBucket(nativeDb, { bucketName });
|
|
165
|
-
const lockedUserId = typeof locked.userId === "string" ? locked.userId : void 0;
|
|
166
|
-
const maxProcessorBytes = getMaxUploadProcessorBytes();
|
|
167
|
-
const shouldBufferForProcessing = locked.totalSize <= maxProcessorBytes;
|
|
168
|
-
const declaredMimeType = locked.mimeType.trim().toLowerCase();
|
|
169
|
-
const declaredSvg = declaredMimeType === "image/svg+xml" || locked.filename.trim().toLowerCase().endsWith(".svg");
|
|
170
|
-
let uploadStream = null;
|
|
171
|
-
try {
|
|
172
|
-
if (!shouldBufferForProcessing && declaredSvg) {
|
|
173
|
-
throw new Error("svg_too_large");
|
|
174
|
-
}
|
|
175
|
-
const cursor = UploadChunk.find({ uploadId }).sort({ index: 1 }).cursor();
|
|
176
|
-
let expectedIndex = 0;
|
|
177
|
-
const chunks = [];
|
|
178
|
-
let bufferedBytes = 0;
|
|
179
|
-
const pendingChunks = [];
|
|
180
|
-
const sniffParts = [];
|
|
181
|
-
let sniffBytes = 0;
|
|
182
|
-
try {
|
|
183
|
-
for await (const chunkDoc of cursor) {
|
|
184
|
-
if (chunkDoc.index !== expectedIndex) {
|
|
185
|
-
throw new Error("missing_chunks");
|
|
186
|
-
}
|
|
187
|
-
const chunk = chunkDoc.data;
|
|
188
|
-
if (shouldBufferForProcessing) {
|
|
189
|
-
chunks.push(chunk);
|
|
190
|
-
bufferedBytes += chunk.length;
|
|
191
|
-
} else if (!uploadStream) {
|
|
192
|
-
pendingChunks.push(chunk);
|
|
193
|
-
if (sniffBytes < maxProcessorBytes) {
|
|
194
|
-
const slice = chunk.subarray(0, Math.min(chunk.length, maxProcessorBytes - sniffBytes));
|
|
195
|
-
if (slice.length) {
|
|
196
|
-
sniffParts.push(slice);
|
|
197
|
-
sniffBytes += slice.length;
|
|
198
|
-
}
|
|
199
|
-
}
|
|
200
|
-
if (sniffBytes >= maxProcessorBytes) {
|
|
201
|
-
const sniff = Buffer.concat(sniffParts, sniffBytes);
|
|
202
|
-
const processors = selectUploadProcessors({
|
|
203
|
-
filename: locked.filename,
|
|
204
|
-
clientMimeType: locked.mimeType,
|
|
205
|
-
totalSize: locked.totalSize,
|
|
206
|
-
sniff
|
|
207
|
-
});
|
|
208
|
-
if (processors.length) {
|
|
209
|
-
throw new Error("svg_too_large");
|
|
210
|
-
}
|
|
211
|
-
uploadStream = bucket.openUploadStream(locked.filename, {
|
|
212
|
-
metadata: {
|
|
213
|
-
uploadId,
|
|
214
|
-
tenantId,
|
|
215
|
-
mimeType: locked.mimeType,
|
|
216
|
-
totalSize: locked.totalSize,
|
|
217
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
218
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
219
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
220
|
-
}
|
|
221
|
-
});
|
|
222
|
-
for (const pending of pendingChunks) {
|
|
223
|
-
await writeToStream(uploadStream, pending);
|
|
224
|
-
}
|
|
225
|
-
pendingChunks.length = 0;
|
|
226
|
-
}
|
|
227
|
-
} else {
|
|
228
|
-
await writeToStream(uploadStream, chunk);
|
|
229
|
-
}
|
|
230
|
-
expectedIndex += 1;
|
|
231
|
-
}
|
|
232
|
-
} finally {
|
|
233
|
-
try {
|
|
234
|
-
await cursor.close();
|
|
235
|
-
} catch {
|
|
236
|
-
}
|
|
237
|
-
}
|
|
238
|
-
if (expectedIndex !== locked.chunksTotal) {
|
|
239
|
-
throw new Error("missing_chunks");
|
|
240
|
-
}
|
|
241
|
-
if (shouldBufferForProcessing) {
|
|
242
|
-
const assembled = Buffer.concat(chunks, bufferedBytes);
|
|
243
|
-
const { data: processed, mimeType: processedMimeType, applied } = await applyUploadProcessors(assembled, {
|
|
244
|
-
filename: locked.filename,
|
|
245
|
-
clientMimeType: locked.mimeType
|
|
246
|
-
});
|
|
247
|
-
uploadStream = bucket.openUploadStream(locked.filename, {
|
|
248
|
-
metadata: {
|
|
249
|
-
uploadId,
|
|
250
|
-
tenantId,
|
|
251
|
-
mimeType: processedMimeType,
|
|
252
|
-
totalSize: locked.totalSize,
|
|
253
|
-
...applied.length ? { processors: applied, processedSize: processed.length } : {},
|
|
254
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
255
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
256
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
257
|
-
}
|
|
258
|
-
});
|
|
259
|
-
const finished = waitForStreamFinished(uploadStream);
|
|
260
|
-
uploadStream.end(processed);
|
|
261
|
-
await finished;
|
|
262
|
-
} else {
|
|
263
|
-
if (!uploadStream) {
|
|
264
|
-
const sniff = Buffer.concat(sniffParts, sniffBytes);
|
|
265
|
-
const processors = selectUploadProcessors({
|
|
266
|
-
filename: locked.filename,
|
|
267
|
-
clientMimeType: locked.mimeType,
|
|
268
|
-
totalSize: locked.totalSize,
|
|
269
|
-
sniff
|
|
270
|
-
});
|
|
271
|
-
if (processors.length) {
|
|
272
|
-
throw new Error("svg_too_large");
|
|
273
|
-
}
|
|
274
|
-
uploadStream = bucket.openUploadStream(locked.filename, {
|
|
275
|
-
metadata: {
|
|
276
|
-
uploadId,
|
|
277
|
-
tenantId,
|
|
278
|
-
mimeType: locked.mimeType,
|
|
279
|
-
totalSize: locked.totalSize,
|
|
280
|
-
...typeof locked.isPublic === "boolean" ? { isPublic: locked.isPublic } : {},
|
|
281
|
-
...typeof locked.ownerKeyHash === "string" ? { ownerKeyHash: locked.ownerKeyHash } : {},
|
|
282
|
-
...lockedUserId ? { userId: lockedUserId } : {}
|
|
283
|
-
}
|
|
284
|
-
});
|
|
285
|
-
for (const pending of pendingChunks) {
|
|
286
|
-
await writeToStream(uploadStream, pending);
|
|
287
|
-
}
|
|
288
|
-
pendingChunks.length = 0;
|
|
289
|
-
}
|
|
290
|
-
const finished = waitForStreamFinished(uploadStream);
|
|
291
|
-
uploadStream.end();
|
|
292
|
-
await finished;
|
|
293
|
-
}
|
|
294
|
-
const fileId = String(uploadStream.id ?? "");
|
|
295
|
-
if (!fileId) {
|
|
296
|
-
throw new Error("missing_file_id");
|
|
297
|
-
}
|
|
298
|
-
await UploadSession.updateOne(
|
|
299
|
-
{ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] },
|
|
300
|
-
{ $set: { status: "done", fileId }, $unset: { error: "" } }
|
|
301
|
-
);
|
|
302
|
-
try {
|
|
303
|
-
await UploadChunk.deleteMany({ uploadId });
|
|
304
|
-
} catch {
|
|
305
|
-
}
|
|
306
|
-
return { ok: true, fileId };
|
|
307
|
-
} catch (error) {
|
|
308
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
309
|
-
await abortUploadStream(uploadStream);
|
|
310
|
-
if (message === "missing_chunks") {
|
|
311
|
-
await UploadSession.updateOne(
|
|
312
|
-
{ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] },
|
|
313
|
-
{ $set: { status: "uploading" } }
|
|
314
|
-
);
|
|
315
|
-
ctx.res.status(409);
|
|
316
|
-
return { ok: false, error: "missing_chunks" };
|
|
317
|
-
}
|
|
318
|
-
if (message === "svg_too_large") {
|
|
319
|
-
await UploadSession.updateOne(
|
|
320
|
-
{ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] },
|
|
321
|
-
{ $set: { status: "error", error: message } }
|
|
322
|
-
);
|
|
323
|
-
ctx.res.status(413);
|
|
324
|
-
return { ok: false, error: message };
|
|
325
|
-
}
|
|
326
|
-
if (message === "svg_invalid" || message === "svg_sanitize_failed") {
|
|
327
|
-
await UploadSession.updateOne(
|
|
328
|
-
{ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] },
|
|
329
|
-
{ $set: { status: "error", error: message } }
|
|
330
|
-
);
|
|
331
|
-
ctx.res.status(400);
|
|
332
|
-
return { ok: false, error: message };
|
|
333
|
-
}
|
|
334
|
-
await UploadSession.updateOne(
|
|
335
|
-
{ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] },
|
|
336
|
-
{ $set: { status: "error", error: message } }
|
|
337
|
-
);
|
|
338
|
-
ctx.res.status(500);
|
|
339
|
-
return { ok: false, error: "assembly_failed" };
|
|
340
|
-
}
|
|
341
|
-
};
|
|
342
|
-
const getStatus = async (_payload, ctx) => {
|
|
343
|
-
const tenantId = getTenantId(ctx);
|
|
344
|
-
if (!tenantId) {
|
|
345
|
-
ctx.res.status(400);
|
|
346
|
-
return { ok: false, error: "tenant_missing" };
|
|
347
|
-
}
|
|
348
|
-
const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
|
|
349
|
-
if (!uploadId) {
|
|
350
|
-
ctx.res.status(400);
|
|
351
|
-
return { ok: false, error: "invalid_upload_id" };
|
|
352
|
-
}
|
|
353
|
-
const modelCtx = getModelCtx(ctx, tenantId);
|
|
354
|
-
const [UploadSession, UploadChunk] = await Promise.all([
|
|
355
|
-
models.get("RBUploadSession", modelCtx),
|
|
356
|
-
models.get("RBUploadChunk", modelCtx)
|
|
357
|
-
]);
|
|
358
|
-
const ability = buildUploadsAbility(ctx, tenantId);
|
|
359
|
-
if (!ability.can("read", "RBUploadSession")) {
|
|
360
|
-
ctx.res.status(401);
|
|
361
|
-
return { ok: false, error: "unauthorized" };
|
|
362
|
-
}
|
|
363
|
-
const session = await UploadSession.findOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "read")] }).lean();
|
|
364
|
-
if (!session) {
|
|
365
|
-
ctx.res.status(404);
|
|
366
|
-
return { ok: false, error: "not_found" };
|
|
367
|
-
}
|
|
368
|
-
const receivedDocs = await UploadChunk.find(
|
|
369
|
-
{ uploadId },
|
|
370
|
-
{ index: 1, _id: 0 }
|
|
371
|
-
).sort({ index: 1 }).lean();
|
|
372
|
-
const received = receivedDocs.map((doc) => typeof doc.index === "number" ? doc.index : -1).filter((n) => Number.isInteger(n) && n >= 0);
|
|
373
|
-
return {
|
|
374
|
-
ok: true,
|
|
375
|
-
status: session.status,
|
|
376
|
-
chunkSize: session.chunkSize,
|
|
377
|
-
chunksTotal: session.chunksTotal,
|
|
378
|
-
received,
|
|
379
|
-
...session.fileId ? { fileId: session.fileId } : {}
|
|
380
|
-
};
|
|
381
|
-
};
|
|
382
|
-
const InitRoute = "/api/rb/file-uploads";
|
|
383
|
-
const ChunkRoute = "/api/rb/file-uploads/:uploadId/chunks/:index";
|
|
384
|
-
const StatusRoute = "/api/rb/file-uploads/:uploadId/status";
|
|
385
|
-
const CompleteRoute = "/api/rb/file-uploads/:uploadId/complete";
|
|
386
|
-
const initRequestSchema = object({
|
|
387
|
-
filename: string().min(1),
|
|
388
|
-
mimeType: string().min(1),
|
|
389
|
-
isPublic: boolean().optional(),
|
|
390
|
-
totalSize: number().int().min(1)
|
|
391
|
-
});
|
|
392
|
-
object({
|
|
393
|
-
ok: boolean(),
|
|
394
|
-
error: string().optional(),
|
|
395
|
-
uploadId: string().optional(),
|
|
396
|
-
uploadKey: string().optional(),
|
|
397
|
-
chunkSize: number().int().optional(),
|
|
398
|
-
chunksTotal: number().int().optional()
|
|
399
|
-
});
|
|
400
|
-
object({
|
|
401
|
-
ok: boolean(),
|
|
402
|
-
error: string().optional(),
|
|
403
|
-
status: _enum(["uploading", "assembling", "done", "error"]).optional(),
|
|
404
|
-
chunkSize: number().int().optional(),
|
|
405
|
-
chunksTotal: number().int().optional(),
|
|
406
|
-
received: array(number().int().min(0)).optional(),
|
|
407
|
-
fileId: string().optional()
|
|
408
|
-
});
|
|
409
|
-
object({
|
|
410
|
-
ok: boolean(),
|
|
411
|
-
error: string().optional(),
|
|
412
|
-
fileId: string().optional()
|
|
413
|
-
});
|
|
414
|
-
const initUpload = async (payload, ctx) => {
|
|
415
|
-
const tenantId = getTenantId(ctx);
|
|
416
|
-
if (!tenantId) {
|
|
417
|
-
ctx.res.status(400);
|
|
418
|
-
return { ok: false, error: "tenant_missing" };
|
|
419
|
-
}
|
|
420
|
-
const userId = getUserId(ctx);
|
|
421
|
-
const parsed = initRequestSchema.safeParse(payload ?? {});
|
|
422
|
-
if (!parsed.success) {
|
|
423
|
-
ctx.res.status(400);
|
|
424
|
-
return { ok: false, error: "invalid_payload" };
|
|
425
|
-
}
|
|
426
|
-
const chunkSize = getChunkSizeBytes();
|
|
427
|
-
const { filename, mimeType, totalSize, isPublic } = parsed.data;
|
|
428
|
-
const chunksTotal = Math.ceil(totalSize / chunkSize);
|
|
429
|
-
const modelCtx = getModelCtx(ctx, tenantId);
|
|
430
|
-
const [UploadSession, UploadChunk] = await Promise.all([
|
|
431
|
-
models.get("RBUploadSession", modelCtx),
|
|
432
|
-
models.get("RBUploadChunk", modelCtx)
|
|
433
|
-
]);
|
|
434
|
-
await ensureUploadIndexes(UploadSession, UploadChunk);
|
|
435
|
-
const uploadId = new ObjectId().toString();
|
|
436
|
-
const now = Date.now();
|
|
437
|
-
const expiresAt = new Date(now + getSessionTtlMs());
|
|
438
|
-
const uploadKey = userId ? null : randomBytes(32).toString("base64url");
|
|
439
|
-
const ownerKeyHash = uploadKey ? computeSha256Hex(Buffer.from(uploadKey)) : void 0;
|
|
440
|
-
await UploadSession.create({
|
|
441
|
-
_id: uploadId,
|
|
442
|
-
...userId ? { userId } : {},
|
|
443
|
-
...ownerKeyHash ? { ownerKeyHash } : {},
|
|
444
|
-
filename,
|
|
445
|
-
mimeType,
|
|
446
|
-
...typeof isPublic === "boolean" ? { isPublic } : {},
|
|
447
|
-
totalSize,
|
|
448
|
-
chunkSize,
|
|
449
|
-
chunksTotal,
|
|
450
|
-
status: "uploading",
|
|
451
|
-
createdAt: new Date(now),
|
|
452
|
-
expiresAt
|
|
453
|
-
});
|
|
454
|
-
return {
|
|
455
|
-
ok: true,
|
|
456
|
-
uploadId,
|
|
457
|
-
chunkSize,
|
|
458
|
-
chunksTotal,
|
|
459
|
-
...uploadKey ? { uploadKey } : {}
|
|
460
|
-
};
|
|
461
|
-
};
|
|
462
|
-
const uploadChunk = async (payload, ctx) => {
|
|
463
|
-
const tenantId = getTenantId(ctx);
|
|
464
|
-
if (!tenantId) {
|
|
465
|
-
ctx.res.status(400);
|
|
466
|
-
return { ok: false, error: "tenant_missing" };
|
|
467
|
-
}
|
|
468
|
-
const uploadId = String(ctx.req.params?.uploadId ?? "").trim();
|
|
469
|
-
const indexRaw = String(ctx.req.params?.index ?? "").trim();
|
|
470
|
-
const index = Number(indexRaw);
|
|
471
|
-
if (!uploadId || !Number.isInteger(index) || index < 0) {
|
|
472
|
-
ctx.res.status(400);
|
|
473
|
-
return { ok: false, error: "invalid_chunk_ref" };
|
|
474
|
-
}
|
|
475
|
-
const modelCtx = getModelCtx(ctx, tenantId);
|
|
476
|
-
const [UploadSession, UploadChunk] = await Promise.all([
|
|
477
|
-
models.get("RBUploadSession", modelCtx),
|
|
478
|
-
models.get("RBUploadChunk", modelCtx)
|
|
479
|
-
]);
|
|
480
|
-
const ability = buildUploadsAbility(ctx, tenantId);
|
|
481
|
-
if (!ability.can("update", "RBUploadSession")) {
|
|
482
|
-
ctx.res.status(401);
|
|
483
|
-
return { ok: false, error: "unauthorized" };
|
|
484
|
-
}
|
|
485
|
-
const session = await UploadSession.findOne({ $and: [{ _id: uploadId }, getUploadSessionAccessQuery(ability, "update")] }).lean();
|
|
486
|
-
if (!session) {
|
|
487
|
-
ctx.res.status(404);
|
|
488
|
-
return { ok: false, error: "not_found" };
|
|
489
|
-
}
|
|
490
|
-
if (session.status !== "uploading") {
|
|
491
|
-
ctx.res.status(409);
|
|
492
|
-
return { ok: false, error: "not_uploading" };
|
|
493
|
-
}
|
|
494
|
-
if (index >= session.chunksTotal) {
|
|
495
|
-
ctx.res.status(400);
|
|
496
|
-
return { ok: false, error: "index_out_of_range" };
|
|
497
|
-
}
|
|
498
|
-
const data = toBufferPayload(payload);
|
|
499
|
-
if (!data) {
|
|
500
|
-
ctx.res.status(400);
|
|
501
|
-
return { ok: false, error: "invalid_body" };
|
|
502
|
-
}
|
|
503
|
-
const expectedSize = index === session.chunksTotal - 1 ? session.totalSize - session.chunkSize * (session.chunksTotal - 1) : session.chunkSize;
|
|
504
|
-
if (data.length > expectedSize) {
|
|
505
|
-
ctx.res.status(413);
|
|
506
|
-
return { ok: false, error: "chunk_too_large" };
|
|
507
|
-
}
|
|
508
|
-
if (data.length !== expectedSize) {
|
|
509
|
-
ctx.res.status(400);
|
|
510
|
-
return { ok: false, error: "invalid_chunk_size" };
|
|
511
|
-
}
|
|
512
|
-
const checksumHeader = ctx.req.get("X-Chunk-SHA256");
|
|
513
|
-
const sha256 = checksumHeader ? computeSha256Hex(data) : void 0;
|
|
514
|
-
if (checksumHeader) {
|
|
515
|
-
const expectedSha256 = normalizeSha256Hex(checksumHeader);
|
|
516
|
-
if (sha256 !== expectedSha256) {
|
|
517
|
-
ctx.res.status(400);
|
|
518
|
-
return { ok: false, error: "checksum_mismatch" };
|
|
519
|
-
}
|
|
520
|
-
}
|
|
521
|
-
await ensureUploadIndexes(UploadSession, UploadChunk);
|
|
522
|
-
await UploadChunk.updateOne(
|
|
523
|
-
{ uploadId, index },
|
|
524
|
-
{
|
|
525
|
-
$set: {
|
|
526
|
-
uploadId,
|
|
527
|
-
index,
|
|
528
|
-
data,
|
|
529
|
-
size: data.length,
|
|
530
|
-
sha256,
|
|
531
|
-
expiresAt: session.expiresAt
|
|
532
|
-
},
|
|
533
|
-
$setOnInsert: {
|
|
534
|
-
createdAt: /* @__PURE__ */ new Date()
|
|
535
|
-
}
|
|
536
|
-
},
|
|
537
|
-
{ upsert: true }
|
|
538
|
-
);
|
|
539
|
-
ctx.res.status(204);
|
|
540
|
-
return { ok: true };
|
|
541
|
-
};
|
|
542
|
-
const rawBodyParser = ({
|
|
543
|
-
limitBytes,
|
|
544
|
-
maxClientBytesPerSecond
|
|
545
|
-
}) => {
|
|
546
|
-
return (req, res, next) => {
|
|
547
|
-
const contentType = typeof req?.headers?.["content-type"] === "string" ? String(req.headers["content-type"]) : "";
|
|
548
|
-
if (!contentType.includes("application/octet-stream")) {
|
|
549
|
-
next();
|
|
550
|
-
return;
|
|
551
|
-
}
|
|
552
|
-
let total = 0;
|
|
553
|
-
const chunks = [];
|
|
554
|
-
let done = false;
|
|
555
|
-
let paused = false;
|
|
556
|
-
let throttleTimeout = null;
|
|
557
|
-
const rateBytesPerSecond = typeof maxClientBytesPerSecond === "number" && maxClientBytesPerSecond > 0 ? maxClientBytesPerSecond : null;
|
|
558
|
-
const cleanup = () => {
|
|
559
|
-
req.off("data", onData);
|
|
560
|
-
req.off("end", onEnd);
|
|
561
|
-
req.off("error", onError);
|
|
562
|
-
req.off("aborted", onAborted);
|
|
563
|
-
if (throttleTimeout) {
|
|
564
|
-
clearTimeout(throttleTimeout);
|
|
565
|
-
throttleTimeout = null;
|
|
566
|
-
}
|
|
567
|
-
};
|
|
568
|
-
const finish = (error) => {
|
|
569
|
-
if (done) return;
|
|
570
|
-
done = true;
|
|
571
|
-
cleanup();
|
|
572
|
-
if (error) {
|
|
573
|
-
next(error);
|
|
574
|
-
return;
|
|
575
|
-
}
|
|
576
|
-
req.body = Buffer.concat(chunks, total);
|
|
577
|
-
next();
|
|
578
|
-
};
|
|
579
|
-
const onData = (chunk) => {
|
|
580
|
-
if (done) return;
|
|
581
|
-
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
|
|
582
|
-
total += buffer.length;
|
|
583
|
-
if (total > limitBytes) {
|
|
584
|
-
done = true;
|
|
585
|
-
cleanup();
|
|
586
|
-
req.destroy();
|
|
587
|
-
res.status(413).json({ ok: false, error: "chunk_too_large" });
|
|
588
|
-
return;
|
|
589
|
-
}
|
|
590
|
-
chunks.push(buffer);
|
|
591
|
-
if (!rateBytesPerSecond) return;
|
|
592
|
-
const now = Date.now();
|
|
593
|
-
const clientKey = getClientKey(req);
|
|
594
|
-
const state = getClientRateState(clientKey, rateBytesPerSecond, now);
|
|
595
|
-
const waitMs = consumeRateBudget(state, buffer.length, rateBytesPerSecond, now);
|
|
596
|
-
if (waitMs > 0 && !paused) {
|
|
597
|
-
paused = true;
|
|
598
|
-
req.pause();
|
|
599
|
-
throttleTimeout = setTimeout(() => {
|
|
600
|
-
throttleTimeout = null;
|
|
601
|
-
paused = false;
|
|
602
|
-
if (done) return;
|
|
603
|
-
try {
|
|
604
|
-
req.resume();
|
|
605
|
-
} catch {
|
|
606
|
-
}
|
|
607
|
-
}, waitMs);
|
|
608
|
-
}
|
|
609
|
-
};
|
|
610
|
-
const onEnd = () => finish();
|
|
611
|
-
const onError = (err) => finish(err);
|
|
612
|
-
const onAborted = () => finish(new Error("request_aborted"));
|
|
613
|
-
req.on("data", onData);
|
|
614
|
-
req.on("end", onEnd);
|
|
615
|
-
req.on("error", onError);
|
|
616
|
-
req.on("aborted", onAborted);
|
|
617
|
-
};
|
|
618
|
-
};
|
|
619
|
-
const MAX_BURST_SECONDS = 1;
|
|
620
|
-
const STALE_CLIENT_MS = 15 * 60 * 1e3;
|
|
621
|
-
const clientRateStates = /* @__PURE__ */ new Map();
|
|
622
|
-
let lastCleanupMs = 0;
|
|
623
|
-
const getClientKey = (req) => {
|
|
624
|
-
const rawClientIp = typeof req?.clientIp === "string" ? req.clientIp : "";
|
|
625
|
-
if (rawClientIp.trim()) return rawClientIp.trim();
|
|
626
|
-
const rawIp = typeof req?.ip === "string" ? req.ip : "";
|
|
627
|
-
return rawIp.trim() || "unknown";
|
|
628
|
-
};
|
|
629
|
-
const maybeCleanupStates = (now) => {
|
|
630
|
-
if (now - lastCleanupMs < 6e4) return;
|
|
631
|
-
lastCleanupMs = now;
|
|
632
|
-
if (clientRateStates.size < 2e3) return;
|
|
633
|
-
for (const [key, state] of clientRateStates) {
|
|
634
|
-
if (now - state.lastSeenMs > STALE_CLIENT_MS) {
|
|
635
|
-
clientRateStates.delete(key);
|
|
636
|
-
}
|
|
637
|
-
}
|
|
638
|
-
};
|
|
639
|
-
const getClientRateState = (key, rateBytesPerSecond, now) => {
|
|
640
|
-
maybeCleanupStates(now);
|
|
641
|
-
const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
|
|
642
|
-
const existing = clientRateStates.get(key);
|
|
643
|
-
if (existing) {
|
|
644
|
-
existing.lastSeenMs = now;
|
|
645
|
-
existing.tokens = Math.min(capacity, existing.tokens);
|
|
646
|
-
return existing;
|
|
647
|
-
}
|
|
648
|
-
const next = {
|
|
649
|
-
tokens: capacity,
|
|
650
|
-
lastRefillMs: now,
|
|
651
|
-
lastSeenMs: now
|
|
652
|
-
};
|
|
653
|
-
clientRateStates.set(key, next);
|
|
654
|
-
return next;
|
|
655
|
-
};
|
|
656
|
-
const consumeRateBudget = (state, bytes, rateBytesPerSecond, now) => {
|
|
657
|
-
const capacity = rateBytesPerSecond * MAX_BURST_SECONDS;
|
|
658
|
-
const elapsedMs = Math.max(0, now - state.lastRefillMs);
|
|
659
|
-
if (elapsedMs > 0) {
|
|
660
|
-
state.tokens = Math.min(capacity, state.tokens + elapsedMs * rateBytesPerSecond / 1e3);
|
|
661
|
-
state.lastRefillMs = now;
|
|
662
|
-
}
|
|
663
|
-
state.tokens -= bytes;
|
|
664
|
-
if (state.tokens >= 0) return 0;
|
|
665
|
-
return Math.ceil(-state.tokens / rateBytesPerSecond * 1e3);
|
|
666
|
-
};
|
|
667
|
-
const handler = (api) => {
|
|
668
|
-
const chunkSizeBytes = getChunkSizeBytes();
|
|
669
|
-
api.use(
|
|
670
|
-
InitRoute,
|
|
671
|
-
rawBodyParser({
|
|
672
|
-
limitBytes: getRawBodyLimitBytes(chunkSizeBytes),
|
|
673
|
-
maxClientBytesPerSecond: getMaxClientUploadBytesPerSecond()
|
|
674
|
-
})
|
|
675
|
-
);
|
|
676
|
-
api.post(InitRoute, initUpload);
|
|
677
|
-
api.put(ChunkRoute, uploadChunk);
|
|
678
|
-
api.get(StatusRoute, getStatus);
|
|
679
|
-
api.post(CompleteRoute, completeUpload);
|
|
680
|
-
};
|
|
681
|
-
export {
|
|
682
|
-
handler as default
|
|
683
|
-
};
|
|
684
|
-
//# sourceMappingURL=handler-qCAUmVgd.js.map
|