@smoothglue/sync-whiteboard 1.0.3-79290.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/assets.js +170 -0
- package/dist/logger.js +13 -0
- package/dist/rooms.js +279 -0
- package/dist/schema.js +11 -0
- package/dist/server.js +202 -0
- package/package.json +3 -3
package/dist/assets.js
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.storeAsset = storeAsset;
|
|
7
|
+
exports.loadAsset = loadAsset;
|
|
8
|
+
const stream_1 = require("stream");
|
|
9
|
+
const logger_1 = __importDefault(require("./logger"));
|
|
10
|
+
// --- Configuration ---
|
|
11
|
+
const ASSET_STORAGE_URL = process.env.SWB_ASSET_STORAGE_URL;
|
|
12
|
+
if (!ASSET_STORAGE_URL) {
|
|
13
|
+
// Critical configuration missing, exit the process.
|
|
14
|
+
logger_1.default.fatal("FATAL ERROR: ASSET_STORAGE_URL environment variable is not set.");
|
|
15
|
+
process.exit(1);
|
|
16
|
+
}
|
|
17
|
+
logger_1.default.info({ assetStorageUrl: ASSET_STORAGE_URL }, `[ASSETS] Using Asset Storage URL`);
|
|
18
|
+
/**
|
|
19
|
+
* Stores an asset by proxying a PUT request to the configured asset storage backend.
|
|
20
|
+
* @param id - The unique identifier for the asset (generated by the client).
|
|
21
|
+
* @param fileStream - The readable stream containing the asset data (from the client request).
|
|
22
|
+
* @param contentType - The MIME type of the asset.
|
|
23
|
+
* @param originalFilename - The original filename provided by the client.
|
|
24
|
+
* @returns The asset ID upon successful storage.
|
|
25
|
+
* @throws Throws an error if the request to the backend fails.
|
|
26
|
+
*/
|
|
27
|
+
async function storeAsset(id, fileStream, contentType = "application/octet-stream", originalFilename, credentials) {
|
|
28
|
+
const url = `${ASSET_STORAGE_URL}/${id}`;
|
|
29
|
+
logger_1.default.debug({ assetId: id, filename: originalFilename, targetUrl: url }, `[ASSETS] Storing asset`);
|
|
30
|
+
// Ensure we have a readable stream
|
|
31
|
+
if (!(fileStream instanceof stream_1.Readable)) {
|
|
32
|
+
logger_1.default.error({ assetId: id, receivedType: typeof fileStream }, "[ASSETS] Error: storeAsset received a non-readable stream type.");
|
|
33
|
+
throw new Error("Invalid stream type provided to storeAsset.");
|
|
34
|
+
}
|
|
35
|
+
let webStream = null;
|
|
36
|
+
try {
|
|
37
|
+
// Convert Node.js stream to Web Standard stream for fetch body
|
|
38
|
+
webStream = stream_1.Readable.toWeb(fileStream);
|
|
39
|
+
// Sanitize the filename for use in the Content-Disposition header
|
|
40
|
+
// This removes non-ASCII characters that can crash the fetch call.
|
|
41
|
+
const sanitizedFilename = originalFilename.replace(/[^\x00-\x7F]/g, "");
|
|
42
|
+
const headers = {
|
|
43
|
+
"Content-Type": contentType,
|
|
44
|
+
"X-Original-Filename": encodeURIComponent(originalFilename), // Pass the original filename (properly encoded) for the backend to use
|
|
45
|
+
"Content-Disposition": `attachment; filename="${sanitizedFilename}"`, // Use the sanitized filename in the Content-Disposition header to avoid errors
|
|
46
|
+
};
|
|
47
|
+
// forward auth headers if included from client
|
|
48
|
+
// Prioritize Authorization header, but fall back to Cookie
|
|
49
|
+
if (credentials?.authorization) {
|
|
50
|
+
headers["Authorization"] = credentials.authorization;
|
|
51
|
+
}
|
|
52
|
+
if (credentials?.cookie) {
|
|
53
|
+
headers["Cookie"] = credentials.cookie;
|
|
54
|
+
}
|
|
55
|
+
// Make the PUT request to the asset storage backend
|
|
56
|
+
const response = await fetch(url, {
|
|
57
|
+
method: "PUT",
|
|
58
|
+
headers: headers,
|
|
59
|
+
body: webStream, // Cast is necessary due to type mismatches
|
|
60
|
+
// @ts-ignore - duplex: 'half' is required for streaming request bodies with Node fetch
|
|
61
|
+
duplex: "half",
|
|
62
|
+
});
|
|
63
|
+
logger_1.default.debug({ assetId: id, status: response.status }, `[ASSETS] Backend PUT response status`);
|
|
64
|
+
// Handle backend errors
|
|
65
|
+
if (!response.ok) {
|
|
66
|
+
const errorBody = await response.text();
|
|
67
|
+
const err = new Error(`Backend failed to store asset ${id}. Status: ${response.status}. Body: ${errorBody}. Headers ${headers}`);
|
|
68
|
+
logger_1.default.error({
|
|
69
|
+
err,
|
|
70
|
+
assetId: id,
|
|
71
|
+
responseStatus: response.status,
|
|
72
|
+
responseStatusText: response.statusText,
|
|
73
|
+
responseBody: errorBody,
|
|
74
|
+
}, `[ASSETS] Error response from backend storing asset`);
|
|
75
|
+
// Ensure streams are closed on error
|
|
76
|
+
if (webStream) {
|
|
77
|
+
await webStream.cancel().catch((cancelErr) => logger_1.default.error({
|
|
78
|
+
err: cancelErr,
|
|
79
|
+
assetId: id,
|
|
80
|
+
stage: "cancel_upload_after_failed_fetch",
|
|
81
|
+
}, `[ASSETS] Error cancelling upload webStream`));
|
|
82
|
+
}
|
|
83
|
+
throw err;
|
|
84
|
+
}
|
|
85
|
+
logger_1.default.debug({ assetId: id, targetUrl: url }, `[ASSETS] Successfully proxied storage for asset`);
|
|
86
|
+
return id; // Return the ID, confirming success
|
|
87
|
+
}
|
|
88
|
+
catch (error) {
|
|
89
|
+
logger_1.default.error({ err: error, assetId: id, targetUrl: url, operation: "storeAsset" }, `[ASSETS] Network or fetch error storing asset`);
|
|
90
|
+
// Clean up streams on error
|
|
91
|
+
if (fileStream instanceof stream_1.Readable && !fileStream.destroyed) {
|
|
92
|
+
fileStream.destroy(error instanceof Error ? error : new Error(String(error)));
|
|
93
|
+
}
|
|
94
|
+
if (webStream) {
|
|
95
|
+
await webStream.cancel().catch((cancelErr) => logger_1.default.error({
|
|
96
|
+
err: cancelErr,
|
|
97
|
+
assetId: id,
|
|
98
|
+
stage: "cancel_upload_during_error_handling",
|
|
99
|
+
}, `[ASSETS] Error cancelling upload webStream`));
|
|
100
|
+
}
|
|
101
|
+
throw error; // Re-throw error for the server handler
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Loads an asset by proxying a GET request to the configured asset storage backend.
|
|
106
|
+
* @param id - The unique identifier for the asset.
|
|
107
|
+
* @returns An object containing the asset's data as a Readable stream and its Content-Type.
|
|
108
|
+
* @throws Throws an error if the request to the backend fails or the asset is not found.
|
|
109
|
+
*/
|
|
110
|
+
async function loadAsset(id, credentials) {
|
|
111
|
+
const url = `${ASSET_STORAGE_URL}/${id}`;
|
|
112
|
+
logger_1.default.debug({ assetId: id, targetUrl: url }, `[ASSETS] Loading asset`);
|
|
113
|
+
const headers = {};
|
|
114
|
+
// Pass auth headers from client to backend call
|
|
115
|
+
// Prioritize Authorization header, but fall back to Cookie
|
|
116
|
+
if (credentials?.authorization) {
|
|
117
|
+
headers["Authorization"] = credentials.authorization;
|
|
118
|
+
}
|
|
119
|
+
else if (credentials?.cookie) {
|
|
120
|
+
headers["Cookie"] = credentials.cookie;
|
|
121
|
+
}
|
|
122
|
+
try {
|
|
123
|
+
// Make the GET request to the actual asset storage backend
|
|
124
|
+
const response = await fetch(url, {
|
|
125
|
+
method: "GET",
|
|
126
|
+
headers,
|
|
127
|
+
});
|
|
128
|
+
logger_1.default.debug({ assetId: id, status: response.status }, `[ASSETS] Backend GET response status`);
|
|
129
|
+
// Handle backend errors (like 404 Not Found)
|
|
130
|
+
if (!response.ok) {
|
|
131
|
+
if (response.status === 404) {
|
|
132
|
+
logger_1.default.warn({ assetId: id, targetUrl: url, status: 404 }, `[ASSETS] Asset not found at backend (404)`);
|
|
133
|
+
const notFoundError = new Error(`Asset ${id} not found.`);
|
|
134
|
+
notFoundError.code = "ENOENT"; // Mimic filesystem error code
|
|
135
|
+
throw notFoundError;
|
|
136
|
+
}
|
|
137
|
+
// Handle other non-OK statuses
|
|
138
|
+
const errorBody = await response.text();
|
|
139
|
+
const err = new Error(// better logging context
|
|
140
|
+
`Backend failed to load asset ${id}. Status: ${response.status}. Body: ${errorBody}. Headers ${headers}`);
|
|
141
|
+
logger_1.default.error({
|
|
142
|
+
err,
|
|
143
|
+
assetId: id,
|
|
144
|
+
responseStatus: response.status,
|
|
145
|
+
responseStatusText: response.statusText,
|
|
146
|
+
responseBody: errorBody,
|
|
147
|
+
}, `[ASSETS] Error response from backend loading asset`);
|
|
148
|
+
throw err;
|
|
149
|
+
}
|
|
150
|
+
// Ensure response body exists
|
|
151
|
+
if (!response.body) {
|
|
152
|
+
logger_1.default.error({ assetId: id, targetUrl: url }, `[ASSETS] No response body received from backend for asset`);
|
|
153
|
+
throw new Error(`No response body received for asset ${id}.`);
|
|
154
|
+
}
|
|
155
|
+
// Get the Content-Type header provided by the backend
|
|
156
|
+
const contentType = response.headers.get("Content-Type") || "application/octet-stream";
|
|
157
|
+
logger_1.default.debug({ assetId: id, contentType: contentType }, `[ASSETS] Received Content-Type from backend`);
|
|
158
|
+
// Convert the Web Standard stream from fetch response to a Node.js stream
|
|
159
|
+
const nodeStream = stream_1.Readable.fromWeb(response.body);
|
|
160
|
+
// Return the stream and content type for the server handler to use
|
|
161
|
+
return { stream: nodeStream, contentType: contentType };
|
|
162
|
+
}
|
|
163
|
+
catch (error) {
|
|
164
|
+
// Avoid double-logging known 'Not Found' errors
|
|
165
|
+
if (error.code !== "ENOENT") {
|
|
166
|
+
logger_1.default.error({ err: error, assetId: id, targetUrl: url, operation: "loadAsset" }, `[ASSETS] Network or fetch error loading asset`);
|
|
167
|
+
}
|
|
168
|
+
throw error; // Re-throw error for the server handler
|
|
169
|
+
}
|
|
170
|
+
}
|
package/dist/logger.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.loggerConfig = void 0;
|
|
7
|
+
const pino_1 = __importDefault(require("pino"));
|
|
8
|
+
exports.loggerConfig = {
|
|
9
|
+
level: process.env.SWB_LOG_LEVEL ?? "info",
|
|
10
|
+
timestamp: pino_1.default.stdTimeFunctions.isoTime,
|
|
11
|
+
};
|
|
12
|
+
const logger = (0, pino_1.default)(exports.loggerConfig);
|
|
13
|
+
exports.default = logger;
|
package/dist/rooms.js
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.getOrCreateRoom = getOrCreateRoom;
|
|
7
|
+
/**
|
|
8
|
+
* @file Manages tldraw room instances, including creation, persistence, and lifecycle.
|
|
9
|
+
* This module handles loading snapshots from a backend, saving them periodically,
|
|
10
|
+
* and cleaning up inactive rooms from memory.
|
|
11
|
+
*/
|
|
12
|
+
const sync_core_1 = require("@tldraw/sync-core");
|
|
13
|
+
const schema_1 = require("./schema");
|
|
14
|
+
const logger_1 = __importDefault(require("./logger"));
|
|
15
|
+
// --- Configuration ---
|
|
16
|
+
const SNAPSHOT_STORAGE_URL = process.env.SWB_SNAPSHOT_STORAGE_URL;
|
|
17
|
+
if (!SNAPSHOT_STORAGE_URL) {
|
|
18
|
+
logger_1.default.fatal("FATAL ERROR: SNAPSHOT_STORAGE_URL environment variable is not set.");
|
|
19
|
+
process.exit(1);
|
|
20
|
+
}
|
|
21
|
+
logger_1.default.info({ snapshotStorageUrl: SNAPSHOT_STORAGE_URL }, `[ROOMS] Using Snapshot Storage URL`);
|
|
22
|
+
const SAVE_INTERVAL_MS = parseInt(process.env.SWB_SAVE_INTERVAL_MS || "5000", 10);
|
|
23
|
+
logger_1.default.info({ saveIntervalMs: SAVE_INTERVAL_MS }, `[ROOMS] Snapshot save interval`);
|
|
24
|
+
// In-memory map holding active room states, keyed by roomId
|
|
25
|
+
const rooms = new Map();
|
|
26
|
+
// Mutex to prevent race conditions when multiple requests try to create the same room simultaneously
|
|
27
|
+
let createRoomMutex = Promise.resolve(undefined);
|
|
28
|
+
// --- End Room State Tracking ---
|
|
29
|
+
/**
|
|
30
|
+
* Reads the latest snapshot for a room from the backend API via HTTP GET.
|
|
31
|
+
* @param roomId - The ID of the room.
|
|
32
|
+
* @returns The snapshot data, or undefined if the backend returns 404.
|
|
33
|
+
* @throws Throws an error for non-404 HTTP errors or network issues.
|
|
34
|
+
*/
|
|
35
|
+
async function readSnapshotFromBackend(roomId, credentials) {
|
|
36
|
+
const url = `${SNAPSHOT_STORAGE_URL}/${roomId}`;
|
|
37
|
+
logger_1.default.debug({ roomId, url }, `[ROOMS] Loading snapshot for room ${roomId} from ${url}`);
|
|
38
|
+
const headers = new Headers();
|
|
39
|
+
headers.append("Accept", "application/json");
|
|
40
|
+
// Prioritize Authorization header, but fall back to Cookie
|
|
41
|
+
if (credentials?.authorization) {
|
|
42
|
+
headers.append("Authorization", credentials.authorization);
|
|
43
|
+
}
|
|
44
|
+
else if (credentials?.cookie) {
|
|
45
|
+
headers.append("Cookie", credentials.cookie);
|
|
46
|
+
}
|
|
47
|
+
try {
|
|
48
|
+
const response = await fetch(url, {
|
|
49
|
+
method: "GET",
|
|
50
|
+
headers: headers,
|
|
51
|
+
});
|
|
52
|
+
if (response.ok) {
|
|
53
|
+
const snapshot = await response.json();
|
|
54
|
+
// Validate the snapshot structure before returning
|
|
55
|
+
if (snapshot &&
|
|
56
|
+
typeof snapshot === "object" &&
|
|
57
|
+
!("documents" in snapshot)) {
|
|
58
|
+
logger_1.default.warn({ roomId, snapshotReceived: snapshot }, `[ROOMS] Invalid snapshot received from backend for room ${roomId}. Missing 'documents' property.`);
|
|
59
|
+
return undefined;
|
|
60
|
+
}
|
|
61
|
+
logger_1.default.debug({ roomId, snapshotSize: JSON.stringify(snapshot).length }, `[ROOMS] Snapshot loaded successfully for room ${roomId}`);
|
|
62
|
+
return snapshot;
|
|
63
|
+
}
|
|
64
|
+
else if (response.status === 404) {
|
|
65
|
+
logger_1.default.info({ roomId, url, status: 404 }, `[ROOMS] No existing snapshot found for room ${roomId} (404)`);
|
|
66
|
+
return undefined; // Expected case for a new room
|
|
67
|
+
}
|
|
68
|
+
else {
|
|
69
|
+
// Handle unexpected errors from the backend
|
|
70
|
+
const errorBody = await response.text();
|
|
71
|
+
const err = new Error(`Backend failed to load snapshot for ${roomId}. Status: ${response.status}. Body: ${errorBody}.`);
|
|
72
|
+
logger_1.default.error({
|
|
73
|
+
err,
|
|
74
|
+
roomId,
|
|
75
|
+
url,
|
|
76
|
+
responseStatus: response.status,
|
|
77
|
+
responseBody: errorBody,
|
|
78
|
+
}, `[ROOMS] Error loading snapshot for room ${roomId}`);
|
|
79
|
+
throw err;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
catch (error) {
|
|
83
|
+
logger_1.default.error({ err: error, roomId, url }, `[ROOMS] Network or fetch error loading snapshot for room ${roomId}`);
|
|
84
|
+
throw error; // Propagate error to getOrCreateRoom
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Saves the current room snapshot to the backend API via HTTP POST.
|
|
89
|
+
* @param roomId - The ID of the room.
|
|
90
|
+
* @param room - The TLSocketRoom instance containing the state to save.
|
|
91
|
+
*/
|
|
92
|
+
async function saveSnapshotToBackend(roomId, room, credentials) {
|
|
93
|
+
const url = `${SNAPSHOT_STORAGE_URL}/${roomId}`;
|
|
94
|
+
const snapshot = room.getCurrentSnapshot();
|
|
95
|
+
const headers = new Headers();
|
|
96
|
+
headers.append("Content-Type", "application/json");
|
|
97
|
+
// Prioritize Authorization header, but fall back to Cookie
|
|
98
|
+
if (credentials?.authorization) {
|
|
99
|
+
headers.append("Authorization", credentials.authorization);
|
|
100
|
+
}
|
|
101
|
+
else if (credentials?.cookie) {
|
|
102
|
+
headers.append("Cookie", credentials.cookie);
|
|
103
|
+
}
|
|
104
|
+
logger_1.default.debug({ roomId, url, snapshotSize: JSON.stringify(snapshot).length }, `[ROOMS] Saving snapshot for room ${roomId} to ${url}`);
|
|
105
|
+
try {
|
|
106
|
+
const response = await fetch(url, {
|
|
107
|
+
method: "POST",
|
|
108
|
+
headers: headers,
|
|
109
|
+
body: JSON.stringify(snapshot),
|
|
110
|
+
});
|
|
111
|
+
if (!response.ok) {
|
|
112
|
+
const errorBody = await response.text();
|
|
113
|
+
logger_1.default.warn({
|
|
114
|
+
roomId,
|
|
115
|
+
url,
|
|
116
|
+
responseStatus: response.status,
|
|
117
|
+
responseBody: errorBody,
|
|
118
|
+
}, `[ROOMS] Error saving snapshot for room ${roomId}: ${response.status} ${response.statusText}`);
|
|
119
|
+
}
|
|
120
|
+
else {
|
|
121
|
+
logger_1.default.debug({ roomId }, `[ROOMS] Snapshot saved successfully for room ${roomId}`);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
catch (error) {
|
|
125
|
+
logger_1.default.error({ err: error, roomId, url }, `[ROOMS] Network or fetch error saving snapshot for room ${roomId}`);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Retrieves an existing active room instance from memory or creates a new one.
|
|
130
|
+
* If creating, it loads the initial state from the backend.
|
|
131
|
+
* Uses a mutex to handle concurrent requests safely.
|
|
132
|
+
* @param roomId - The ID of the room to get or create.
|
|
133
|
+
* @returns A promise resolving to the TLSocketRoom instance.
|
|
134
|
+
* @throws Throws an error if backend interaction fails during creation.
|
|
135
|
+
*/
|
|
136
|
+
async function getOrCreateRoom(roomId, credentials) {
|
|
137
|
+
createRoomMutex = createRoomMutex.then(async () => {
|
|
138
|
+
if (rooms.has(roomId)) {
|
|
139
|
+
const existingRoomState = rooms.get(roomId);
|
|
140
|
+
if (!existingRoomState.room.isClosed()) {
|
|
141
|
+
logger_1.default.debug({ roomId }, "[ROOMS] Active room instance found in memory.");
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
else {
|
|
145
|
+
logger_1.default.info({ roomId }, `[ROOMS] Found closed room ${roomId}, removing before creating new one.`);
|
|
146
|
+
rooms.delete(roomId);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
logger_1.default.info({ roomId }, `[ROOMS] Creating or recreating room: ${roomId}`);
|
|
150
|
+
const initialSnapshot = await readSnapshotFromBackend(roomId, credentials);
|
|
151
|
+
const tldrawInstanceLogger = logger_1.default.child({
|
|
152
|
+
tldrawRoomId: roomId,
|
|
153
|
+
component: "tldraw-sync-core",
|
|
154
|
+
});
|
|
155
|
+
const tldrawLogAdapter = {
|
|
156
|
+
warn: (...args) => {
|
|
157
|
+
const msg = args.find((arg) => typeof arg === "string") || "tldraw room warning";
|
|
158
|
+
const details = args.filter((arg) => typeof arg !== "string");
|
|
159
|
+
tldrawInstanceLogger.warn(details.length ? { details } : {}, msg);
|
|
160
|
+
},
|
|
161
|
+
error: (...args) => {
|
|
162
|
+
const errorArg = args.find((arg) => arg instanceof Error);
|
|
163
|
+
if (errorArg) {
|
|
164
|
+
const msg = args
|
|
165
|
+
.filter((arg) => typeof arg === "string" && arg !== errorArg.message)
|
|
166
|
+
.join(" ") ||
|
|
167
|
+
errorArg.message ||
|
|
168
|
+
"tldraw room error";
|
|
169
|
+
const details = args.filter((arg) => arg !== errorArg && typeof arg !== "string");
|
|
170
|
+
tldrawInstanceLogger.error({ err: errorArg, details: details.length ? details : undefined }, msg);
|
|
171
|
+
}
|
|
172
|
+
else {
|
|
173
|
+
const msg = args.find((arg) => typeof arg === "string") ||
|
|
174
|
+
"tldraw room error (no Error instance)";
|
|
175
|
+
const details = args.filter((arg) => typeof arg !== "string");
|
|
176
|
+
tldrawInstanceLogger.error(details.length ? { details } : {}, msg);
|
|
177
|
+
}
|
|
178
|
+
},
|
|
179
|
+
};
|
|
180
|
+
const newRoomState = {
|
|
181
|
+
id: roomId,
|
|
182
|
+
needsPersist: false,
|
|
183
|
+
persistPromise: null,
|
|
184
|
+
credentials,
|
|
185
|
+
room: null,
|
|
186
|
+
};
|
|
187
|
+
const roomInstance = new sync_core_1.TLSocketRoom({
|
|
188
|
+
schema: schema_1.whiteboardSchema,
|
|
189
|
+
initialSnapshot: undefined,
|
|
190
|
+
log: tldrawLogAdapter,
|
|
191
|
+
onSessionRemoved(roomInstance, args) {
|
|
192
|
+
logger_1.default.debug({ roomId, remainingSessions: args.numSessionsRemaining }, `[ROOMS] Session removed. Remaining: ${args.numSessionsRemaining}`);
|
|
193
|
+
if (args.numSessionsRemaining === 0) {
|
|
194
|
+
logger_1.default.info({ roomId }, `[ROOMS] Last user left. Triggering final save.`);
|
|
195
|
+
const savePromise = newRoomState.persistPromise ?? Promise.resolve();
|
|
196
|
+
savePromise.finally(() => {
|
|
197
|
+
logger_1.default.info({ roomId }, `[ROOMS] Performing final save...`);
|
|
198
|
+
saveSnapshotToBackend(roomId, roomInstance, newRoomState.credentials).finally(() => {
|
|
199
|
+
logger_1.default.info({ roomId }, `[ROOMS] Closing room after final save.`);
|
|
200
|
+
roomInstance.close();
|
|
201
|
+
});
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
},
|
|
205
|
+
onDataChange() {
|
|
206
|
+
newRoomState.needsPersist = true;
|
|
207
|
+
},
|
|
208
|
+
});
|
|
209
|
+
newRoomState.room = roomInstance;
|
|
210
|
+
rooms.set(roomId, newRoomState);
|
|
211
|
+
logger_1.default.info({ roomId }, `[ROOMS] Room created successfully.`);
|
|
212
|
+
if (rooms.size > 0) {
|
|
213
|
+
startPersistenceInterval();
|
|
214
|
+
}
|
|
215
|
+
// Load the REAL snapshot in the background, AFTER the room has been created.
|
|
216
|
+
readSnapshotFromBackend(roomId, credentials)
|
|
217
|
+
.then((snapshot) => {
|
|
218
|
+
if (snapshot && !roomInstance.isClosed()) {
|
|
219
|
+
logger_1.default.info({ roomId }, `Snapshot loaded in background. Hydrating room.`);
|
|
220
|
+
// Once loaded, push the full snapshot to all connected clients.
|
|
221
|
+
roomInstance.loadSnapshot(snapshot);
|
|
222
|
+
newRoomState.needsPersist = false; // Reset flag after initial load
|
|
223
|
+
}
|
|
224
|
+
})
|
|
225
|
+
.catch((err) => {
|
|
226
|
+
logger_1.default.error({ err, roomId }, "Failed to load snapshot in background");
|
|
227
|
+
roomInstance.close();
|
|
228
|
+
rooms.delete(roomId);
|
|
229
|
+
});
|
|
230
|
+
});
|
|
231
|
+
await createRoomMutex;
|
|
232
|
+
const roomState = rooms.get(roomId);
|
|
233
|
+
if (!roomState || roomState.room.isClosed()) {
|
|
234
|
+
logger_1.default.error({ roomId }, `[ROOMS] Failed to get or create a valid room instance after mutex.`);
|
|
235
|
+
throw new Error(`Failed to retrieve valid room instance for ${roomId}`);
|
|
236
|
+
}
|
|
237
|
+
return roomState.room;
|
|
238
|
+
}
|
|
239
|
+
// --- Smart Periodic Persistence ---
|
|
240
|
+
let persistenceInterval = null;
|
|
241
|
+
function stopPersistenceInterval() {
|
|
242
|
+
if (persistenceInterval) {
|
|
243
|
+
logger_1.default.info("[ROOMS] No active rooms. Stopping periodic persistence.");
|
|
244
|
+
clearInterval(persistenceInterval);
|
|
245
|
+
persistenceInterval = null;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
function startPersistenceInterval() {
|
|
249
|
+
if (persistenceInterval)
|
|
250
|
+
return;
|
|
251
|
+
logger_1.default.info("[ROOMS] First active room created. Starting periodic persistence.");
|
|
252
|
+
persistenceInterval = setInterval(() => {
|
|
253
|
+
logger_1.default.debug("[ROOMS] Periodic persistence check initiated.");
|
|
254
|
+
let updatedRoomCount = 0;
|
|
255
|
+
for (const roomState of rooms.values()) {
|
|
256
|
+
if (roomState.room.isClosed()) {
|
|
257
|
+
logger_1.default.info({ roomId: roomState.id }, `[ROOMS] Removing closed room during periodic check.`);
|
|
258
|
+
rooms.delete(roomState.id);
|
|
259
|
+
continue;
|
|
260
|
+
}
|
|
261
|
+
if (roomState.needsPersist && !roomState.persistPromise) {
|
|
262
|
+
roomState.needsPersist = false;
|
|
263
|
+
updatedRoomCount++;
|
|
264
|
+
roomState.persistPromise = saveSnapshotToBackend(roomState.id, roomState.room, roomState.credentials)
|
|
265
|
+
.catch((error) => {
|
|
266
|
+
logger_1.default.error({ err: error, roomId: roomState.id }, `[ROOMS] Periodic save failed.`);
|
|
267
|
+
})
|
|
268
|
+
.finally(() => {
|
|
269
|
+
roomState.persistPromise = null;
|
|
270
|
+
logger_1.default.debug({ roomId: roomState.id }, "[ROOMS] Persistence promise cleared.");
|
|
271
|
+
});
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
logger_1.default.debug({ roomsChecked: rooms.size, roomsUpdatedThisInterval: updatedRoomCount }, "[ROOMS] Periodic persistence check completed.");
|
|
275
|
+
if (rooms.size === 0) {
|
|
276
|
+
stopPersistenceInterval();
|
|
277
|
+
}
|
|
278
|
+
}, SAVE_INTERVAL_MS);
|
|
279
|
+
}
|
package/dist/schema.js
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.whiteboardSchema = void 0;
|
|
4
|
+
const tlschema_1 = require("@tldraw/tlschema");
|
|
5
|
+
exports.whiteboardSchema = (0, tlschema_1.createTLSchema)({
|
|
6
|
+
shapes: {
|
|
7
|
+
...tlschema_1.defaultShapeSchemas,
|
|
8
|
+
//TODO: add custom shapes here
|
|
9
|
+
},
|
|
10
|
+
bindings: tlschema_1.defaultBindingSchemas,
|
|
11
|
+
});
|
package/dist/server.js
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
const fastify_1 = __importDefault(require("fastify"));
|
|
7
|
+
const stream_1 = require("stream");
|
|
8
|
+
const websocket_1 = __importDefault(require("@fastify/websocket"));
|
|
9
|
+
const cors_1 = __importDefault(require("@fastify/cors"));
|
|
10
|
+
const assets_1 = require("./assets");
|
|
11
|
+
const logger_1 = require("./logger");
|
|
12
|
+
const rooms_1 = require("./rooms");
|
|
13
|
+
// Configuration
|
|
14
|
+
const parseCorsWhitelist = (cors) => {
|
|
15
|
+
const normalized = (cors || "").replace(/\s/g, "");
|
|
16
|
+
return normalized === "*"
|
|
17
|
+
? normalized
|
|
18
|
+
: normalized.split(",").filter((x) => x.length > 0);
|
|
19
|
+
};
|
|
20
|
+
const PORT = parseInt(process.env.SWB_PORT || "5858", 10);
|
|
21
|
+
const HOST = process.env.SWB_HOST || "0.0.0.0"; // Listen on all interfaces by default
|
|
22
|
+
const CORS_WHITELIST = parseCorsWhitelist(process.env.SWB_CORS_WHITELIST);
|
|
23
|
+
// Initialize Fastify app with logging
|
|
24
|
+
const app = (0, fastify_1.default)({ logger: logger_1.loggerConfig });
|
|
25
|
+
// --- Register Plugins ---
|
|
26
|
+
app.register(websocket_1.default); // Enable WebSocket support
|
|
27
|
+
app.register(cors_1.default, {
|
|
28
|
+
// Configure CORS
|
|
29
|
+
origin: CORS_WHITELIST,
|
|
30
|
+
methods: ["GET", "PUT", "POST", "DELETE", "OPTIONS"], // Allowed HTTP methods
|
|
31
|
+
allowedHeaders: [
|
|
32
|
+
"Content-Type",
|
|
33
|
+
"Authorization",
|
|
34
|
+
"X-Original-Filename",
|
|
35
|
+
"Content-Disposition",
|
|
36
|
+
],
|
|
37
|
+
});
|
|
38
|
+
// --- Define Routes ---
|
|
39
|
+
app.register(async (svc) => {
|
|
40
|
+
// Health check endpoint
|
|
41
|
+
svc.get("/", async () => ({
|
|
42
|
+
status: "sync-whiteboard is running",
|
|
43
|
+
time: new Date().toISOString(),
|
|
44
|
+
}));
|
|
45
|
+
// WebSocket connection endpoint for tldraw sync
|
|
46
|
+
svc.get("/connect/:roomId", { websocket: true }, async (socket, req) => {
|
|
47
|
+
const { roomId } = req.params;
|
|
48
|
+
const sessionId = req.query?.sessionId;
|
|
49
|
+
// Client provides sessionId via query param, handled by TLSocketRoom
|
|
50
|
+
// Capture the Authorization header from the incoming request
|
|
51
|
+
// Capture both potential auth headers
|
|
52
|
+
const credentials = {
|
|
53
|
+
authorization: req.headers.authorization,
|
|
54
|
+
cookie: req.headers.cookie,
|
|
55
|
+
};
|
|
56
|
+
if (!credentials) {
|
|
57
|
+
req.log.warn({ roomId }, `[SERVER] Connection attempt without Authorization header.`);
|
|
58
|
+
}
|
|
59
|
+
try {
|
|
60
|
+
// Get or create the room instance (loads/creates state)
|
|
61
|
+
const room = await (0, rooms_1.getOrCreateRoom)(roomId, credentials);
|
|
62
|
+
req.log.debug(`[SERVER] Handling WebSocket connection for room ${roomId}`);
|
|
63
|
+
room.handleSocketConnect({ sessionId, socket });
|
|
64
|
+
}
|
|
65
|
+
catch (error) {
|
|
66
|
+
req.log.error({ err: error, roomId: roomId }, `[SERVER] Error initializing room`);
|
|
67
|
+
socket.close(1011, "Internal server error during room initialization");
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
/**
|
|
71
|
+
* Pre-warms a room by loading its snapshot into memory from the backend.
|
|
72
|
+
* This is called by the client before it attempts a WebSocket connection
|
|
73
|
+
* to avoid a long wait time on initial ("cold") loads.
|
|
74
|
+
*/
|
|
75
|
+
svc.post("/warm-room/:roomId", async (req, reply) => {
|
|
76
|
+
const { roomId } = req.params;
|
|
77
|
+
const credentials = {
|
|
78
|
+
authorization: req.headers.authorization,
|
|
79
|
+
cookie: req.headers.cookie,
|
|
80
|
+
};
|
|
81
|
+
try {
|
|
82
|
+
req.log.info({ roomId }, `[SERVER] Warming up room`);
|
|
83
|
+
// This will trigger the slow readSnapshotFromBackend if the room is cold
|
|
84
|
+
await (0, rooms_1.getOrCreateRoom)(roomId, credentials);
|
|
85
|
+
reply.code(200).send({ success: true, message: "Room is warm." });
|
|
86
|
+
}
|
|
87
|
+
catch (error) {
|
|
88
|
+
req.log.error({ err: error, roomId }, `[SERVER] Error warming up room`);
|
|
89
|
+
reply.code(500).send({ success: false, error: "Failed to warm up room" });
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
// --- Asset Handling ---
|
|
93
|
+
// Allow raw body parsing for asset uploads
|
|
94
|
+
svc.addContentTypeParser("*", (_, __, done) => done(null));
|
|
95
|
+
/**
|
|
96
|
+
* Handles asset uploads (PUT /assets/:id).
|
|
97
|
+
* Proxies the request body stream to the asset storage backend via storeAsset.
|
|
98
|
+
*/
|
|
99
|
+
svc.put("/assets/:id", async (req, reply) => {
|
|
100
|
+
const { id } = req.params;
|
|
101
|
+
const contentType = req.headers["content-type"] || "application/octet-stream";
|
|
102
|
+
const credentials = {
|
|
103
|
+
authorization: req.headers.authorization,
|
|
104
|
+
cookie: req.headers.cookie,
|
|
105
|
+
};
|
|
106
|
+
// Extract original filename from custom header
|
|
107
|
+
const originalFilenameHeaderRaw = req.headers["x-original-filename"];
|
|
108
|
+
const originalFilenameHeader = Array.isArray(originalFilenameHeaderRaw)
|
|
109
|
+
? originalFilenameHeaderRaw[0]
|
|
110
|
+
: originalFilenameHeaderRaw;
|
|
111
|
+
let originalFilename = "unknown_asset";
|
|
112
|
+
if (typeof originalFilenameHeader === "string" &&
|
|
113
|
+
originalFilenameHeader.length > 0) {
|
|
114
|
+
try {
|
|
115
|
+
originalFilename = decodeURIComponent(originalFilenameHeader);
|
|
116
|
+
}
|
|
117
|
+
catch (e) {
|
|
118
|
+
req.log.warn({ headerValue: originalFilenameHeaderRaw }, `[SERVER] Failed to decode X-Original-Filename header`);
|
|
119
|
+
originalFilename = "decode_error";
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
else {
|
|
123
|
+
req.log.warn({ headerValue: originalFilenameHeaderRaw }, `[SERVER] X-Original-Filename header missing or invalid`);
|
|
124
|
+
}
|
|
125
|
+
req.log.debug({
|
|
126
|
+
assetId: id,
|
|
127
|
+
contentType: contentType,
|
|
128
|
+
originalFilename: originalFilename,
|
|
129
|
+
}, `[SERVER] PUT /assets/:id`);
|
|
130
|
+
// Validate request body is a stream
|
|
131
|
+
if (!(req.raw instanceof stream_1.Readable)) {
|
|
132
|
+
req.log.error({ assetId: id }, `[SERVER] Error: Request raw body is not a Readable stream`);
|
|
133
|
+
return reply.code(500).send({
|
|
134
|
+
success: false,
|
|
135
|
+
error: "Internal server error: Invalid request body stream.",
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
try {
|
|
139
|
+
// Call the asset storage logic (which proxies to the backend)
|
|
140
|
+
await (0, assets_1.storeAsset)(id, req.raw, contentType, originalFilename, credentials);
|
|
141
|
+
req.log.debug({ assetId: id }, `[SERVER] Asset stored successfully.`);
|
|
142
|
+
reply.code(200).send({ success: true });
|
|
143
|
+
}
|
|
144
|
+
catch (error) {
|
|
145
|
+
req.log.error({ err: error, assetId: id }, `[SERVER] Error storing asset`);
|
|
146
|
+
const statusCode = error?.code === "ENOENT" ? 404 : 500;
|
|
147
|
+
reply.code(statusCode).send({
|
|
148
|
+
success: false,
|
|
149
|
+
error: error.message || "Failed to store asset",
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
/**
|
|
154
|
+
* Handles asset retrieval (GET /assets/:id).
|
|
155
|
+
* Proxies the request to the asset storage backend via loadAsset and streams the response.
|
|
156
|
+
*/
|
|
157
|
+
svc.get("/assets/:id", async (req, reply) => {
|
|
158
|
+
const { id } = req.params;
|
|
159
|
+
req.log.debug({ assetId: id }, `[SERVER] GET /assets/:id`);
|
|
160
|
+
// Capture the credentials if included from client
|
|
161
|
+
const credentials = {
|
|
162
|
+
authorization: req.headers.authorization,
|
|
163
|
+
cookie: req.headers.cookie,
|
|
164
|
+
};
|
|
165
|
+
try {
|
|
166
|
+
// Call the asset loading logic (which proxies to the backend)
|
|
167
|
+
const { stream: dataStream, contentType } = await (0, assets_1.loadAsset)(id, credentials);
|
|
168
|
+
req.log.debug({ assetId: id, contentType: contentType }, `[SERVER] Asset loaded. Sending reply...`);
|
|
169
|
+
// Set the correct Content-Type header and send the stream
|
|
170
|
+
reply.header("Content-Type", contentType);
|
|
171
|
+
reply.send(dataStream);
|
|
172
|
+
}
|
|
173
|
+
catch (error) {
|
|
174
|
+
req.log.error({ err: error, assetId: id }, `[SERVER] Error loading asset`);
|
|
175
|
+
if (error.code === "ENOENT") {
|
|
176
|
+
// Asset not found by the backend
|
|
177
|
+
reply.code(404).send({ success: false, error: "Asset not found" });
|
|
178
|
+
}
|
|
179
|
+
else {
|
|
180
|
+
// Other errors during load
|
|
181
|
+
reply.code(500).send({
|
|
182
|
+
success: false,
|
|
183
|
+
error: error.message || "Failed to load asset",
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
});
|
|
188
|
+
// --- End Asset Handling ---
|
|
189
|
+
});
|
|
190
|
+
// --- End Define Routes ---
|
|
191
|
+
// --- Start Server ---
|
|
192
|
+
const start = async () => {
|
|
193
|
+
try {
|
|
194
|
+
await app.listen({ port: PORT, host: HOST });
|
|
195
|
+
app.log.info(`Sync Whiteboard server running on http://${HOST}:${PORT}`);
|
|
196
|
+
}
|
|
197
|
+
catch (err) {
|
|
198
|
+
app.log.fatal({ err: err }, "Server failed to start");
|
|
199
|
+
process.exit(1); // Exit if server fails to start
|
|
200
|
+
}
|
|
201
|
+
};
|
|
202
|
+
start();
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@smoothglue/sync-whiteboard",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.1.0",
|
|
4
4
|
"main": "dist/server.js",
|
|
5
5
|
"scripts": {
|
|
6
6
|
"dev": "ts-node-dev --respawn --transpile-only src/server.ts",
|
|
@@ -25,11 +25,11 @@
|
|
|
25
25
|
"url": "https://code.build.smoothglue.io/braingu/smoothglue/frontend/sync-whiteboard.git"
|
|
26
26
|
},
|
|
27
27
|
"devDependencies": {
|
|
28
|
-
"@types/node": "^22.
|
|
28
|
+
"@types/node": "^22.18.0",
|
|
29
29
|
"@types/ws": "^8.18.1",
|
|
30
30
|
"ts-node": "^10.9.2",
|
|
31
31
|
"ts-node-dev": "^2.0.0",
|
|
32
|
-
"typescript": "^5.
|
|
32
|
+
"typescript": "^5.9.2"
|
|
33
33
|
},
|
|
34
34
|
"dependencies": {
|
|
35
35
|
"@fastify/cors": "^11.0.1",
|