@vuer-ai/vuer-rtc-server 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env +1 -0
- package/S3_COMPRESSION_GUIDE.md +233 -0
- package/dist/archive/ArchivalService.d.ts +117 -0
- package/dist/archive/ArchivalService.d.ts.map +1 -0
- package/dist/archive/ArchivalService.js +181 -0
- package/dist/archive/ArchivalService.js.map +1 -0
- package/dist/broker/InMemoryBroker.d.ts +2 -0
- package/dist/broker/InMemoryBroker.d.ts.map +1 -1
- package/dist/broker/InMemoryBroker.js +4 -0
- package/dist/broker/InMemoryBroker.js.map +1 -1
- package/dist/compression/CompressionUtils.d.ts +57 -0
- package/dist/compression/CompressionUtils.d.ts.map +1 -0
- package/dist/compression/CompressionUtils.js +90 -0
- package/dist/compression/CompressionUtils.js.map +1 -0
- package/dist/compression/index.d.ts +7 -0
- package/dist/compression/index.d.ts.map +1 -0
- package/dist/compression/index.js +7 -0
- package/dist/compression/index.js.map +1 -0
- package/dist/journal/CoalescingService.d.ts +63 -0
- package/dist/journal/CoalescingService.d.ts.map +1 -0
- package/dist/journal/CoalescingService.js +507 -0
- package/dist/journal/CoalescingService.js.map +1 -0
- package/dist/journal/JournalRLE.d.ts +81 -0
- package/dist/journal/JournalRLE.d.ts.map +1 -0
- package/dist/journal/JournalRLE.js +199 -0
- package/dist/journal/JournalRLE.js.map +1 -0
- package/dist/journal/JournalService.d.ts +7 -3
- package/dist/journal/JournalService.d.ts.map +1 -1
- package/dist/journal/JournalService.js +152 -12
- package/dist/journal/JournalService.js.map +1 -1
- package/dist/journal/RLECompression.d.ts +73 -0
- package/dist/journal/RLECompression.d.ts.map +1 -0
- package/dist/journal/RLECompression.js +152 -0
- package/dist/journal/RLECompression.js.map +1 -0
- package/dist/journal/rle-demo.d.ts +8 -0
- package/dist/journal/rle-demo.d.ts.map +1 -0
- package/dist/journal/rle-demo.js +159 -0
- package/dist/journal/rle-demo.js.map +1 -0
- package/dist/persistence/S3ColdStorage.d.ts +62 -0
- package/dist/persistence/S3ColdStorage.d.ts.map +1 -0
- package/dist/persistence/S3ColdStorage.js +88 -0
- package/dist/persistence/S3ColdStorage.js.map +1 -0
- package/dist/persistence/S3ColdStorageIntegration.d.ts +78 -0
- package/dist/persistence/S3ColdStorageIntegration.d.ts.map +1 -0
- package/dist/persistence/S3ColdStorageIntegration.js +93 -0
- package/dist/persistence/S3ColdStorageIntegration.js.map +1 -0
- package/dist/serve.d.ts +2 -0
- package/dist/serve.d.ts.map +1 -1
- package/dist/serve.js +623 -15
- package/dist/serve.js.map +1 -1
- package/docs/RLE_COMPRESSION.md +397 -0
- package/examples/compression-example.ts +259 -0
- package/package.json +14 -14
- package/src/archive/ArchivalService.ts +250 -0
- package/src/broker/InMemoryBroker.ts +5 -0
- package/src/compression/CompressionUtils.ts +113 -0
- package/src/compression/index.ts +14 -0
- package/src/journal/COALESCING.md +267 -0
- package/src/journal/CoalescingService.ts +626 -0
- package/src/journal/JournalRLE.ts +265 -0
- package/src/journal/JournalService.ts +163 -11
- package/src/journal/RLECompression.ts +210 -0
- package/src/journal/rle-demo.ts +193 -0
- package/src/serve.ts +702 -15
- package/tests/benchmark/journal-optimization-benchmark.test.ts +482 -0
- package/tests/compression/compression.test.ts +343 -0
- package/tests/integration/repositories.test.ts +89 -0
- package/tests/journal/compaction-load-bug.test.ts +409 -0
- package/tests/journal/compaction.test.ts +42 -2
- package/tests/journal/journal-rle.test.ts +511 -0
- package/tests/journal/lww-ordering-bug.test.ts +248 -0
- package/tests/journal/multi-session-coalescing.test.ts +871 -0
- package/tests/journal/rle-compression.test.ts +526 -0
- package/tests/journal/text-coalescing.test.ts +210 -0
- package/tests/unit/s3-compression.test.ts +257 -0
- package/PHASE1_SUMMARY.md +0 -94
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Reproduction test for: Document load fails after compaction
|
|
3
|
+
*
|
|
4
|
+
* Bug: Server-side getStateForClient() filters by lamportTime,
|
|
5
|
+
* but client-side initFromServer() filters by vectorClock comparison.
|
|
6
|
+
* This mismatch causes messages to be sent but not applied.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { describe, it, expect, beforeEach } from '@jest/globals';
|
|
10
|
+
import type { CRDTMessage, Operation } from '@vuer-ai/vuer-rtc';
|
|
11
|
+
import { applyMessage, createEmptyGraph, OperationValidator } from '@vuer-ai/vuer-rtc';
|
|
12
|
+
|
|
13
|
+
interface JournalEntry {
|
|
14
|
+
msg: CRDTMessage;
|
|
15
|
+
deletedAt?: number;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
interface Snapshot {
|
|
19
|
+
graph: any;
|
|
20
|
+
vectorClock: Record<string, number>;
|
|
21
|
+
lamportTime: number;
|
|
22
|
+
journalIndex: number;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
interface DocumentState {
|
|
26
|
+
snapshot: Snapshot;
|
|
27
|
+
journal: JournalEntry[];
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
function makeMsg(
|
|
31
|
+
id: string,
|
|
32
|
+
sessionId: string,
|
|
33
|
+
ops: Operation[],
|
|
34
|
+
lamportTime: number,
|
|
35
|
+
clock?: Record<string, number>,
|
|
36
|
+
): CRDTMessage {
|
|
37
|
+
return {
|
|
38
|
+
id,
|
|
39
|
+
sessionId,
|
|
40
|
+
clock: clock ?? { [sessionId]: lamportTime },
|
|
41
|
+
lamportTime,
|
|
42
|
+
timestamp: Date.now() / 1000,
|
|
43
|
+
ops,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
function nodeInsertOp(parentKey: string, nodeKey: string, props: Record<string, unknown> = {}): Operation {
|
|
48
|
+
return {
|
|
49
|
+
key: parentKey,
|
|
50
|
+
otype: 'node.insert',
|
|
51
|
+
path: 'children',
|
|
52
|
+
value: {
|
|
53
|
+
key: nodeKey,
|
|
54
|
+
tag: 'Mesh',
|
|
55
|
+
name: nodeKey,
|
|
56
|
+
...props,
|
|
57
|
+
},
|
|
58
|
+
} as Operation;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function vec3SetOp(nodeKey: string, path: string, value: [number, number, number]): Operation {
|
|
62
|
+
return {
|
|
63
|
+
key: nodeKey,
|
|
64
|
+
otype: 'vector3.set',
|
|
65
|
+
path,
|
|
66
|
+
value,
|
|
67
|
+
} as Operation;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Simulate server-side compaction logic
|
|
72
|
+
*/
|
|
73
|
+
function serverCompact(state: DocumentState): void {
|
|
74
|
+
let newGraph = state.snapshot.graph;
|
|
75
|
+
let mergedClock = { ...state.snapshot.vectorClock };
|
|
76
|
+
let maxLamport = state.snapshot.lamportTime;
|
|
77
|
+
|
|
78
|
+
for (const entry of state.journal) {
|
|
79
|
+
if (!entry.deletedAt) {
|
|
80
|
+
const realOps = entry.msg.ops.filter((op) => !op.otype.startsWith('meta.'));
|
|
81
|
+
if (realOps.length > 0) {
|
|
82
|
+
newGraph = applyMessage(newGraph, { ...entry.msg, ops: realOps });
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
for (const [sessionId, time] of Object.entries(entry.msg.clock)) {
|
|
86
|
+
mergedClock[sessionId] = Math.max(mergedClock[sessionId] || 0, time);
|
|
87
|
+
}
|
|
88
|
+
maxLamport = Math.max(maxLamport, entry.msg.lamportTime);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
state.snapshot = {
|
|
92
|
+
graph: newGraph,
|
|
93
|
+
vectorClock: mergedClock,
|
|
94
|
+
lamportTime: maxLamport,
|
|
95
|
+
journalIndex: maxLamport,
|
|
96
|
+
};
|
|
97
|
+
state.journal = [];
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Simulate server-side getStateForClient (CURRENT BUGGY VERSION)
|
|
102
|
+
*/
|
|
103
|
+
function serverGetStateForClient_BUGGY(state: DocumentState): {
|
|
104
|
+
snapshot: Snapshot;
|
|
105
|
+
journal: CRDTMessage[];
|
|
106
|
+
} {
|
|
107
|
+
// Server filters by lamportTime only
|
|
108
|
+
const postSnapshotJournal = state.journal
|
|
109
|
+
.filter((e) => e.msg.lamportTime > state.snapshot.lamportTime)
|
|
110
|
+
.map((e) => e.msg);
|
|
111
|
+
|
|
112
|
+
return {
|
|
113
|
+
snapshot: state.snapshot,
|
|
114
|
+
journal: postSnapshotJournal,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Simulate server-side getStateForClient (FIXED VERSION)
|
|
120
|
+
*/
|
|
121
|
+
function serverGetStateForClient_FIXED(state: DocumentState): {
|
|
122
|
+
snapshot: Snapshot;
|
|
123
|
+
journal: CRDTMessage[];
|
|
124
|
+
} {
|
|
125
|
+
// Server filters using vector clock comparison (matches client-side logic)
|
|
126
|
+
const postSnapshotJournal = state.journal
|
|
127
|
+
.filter((e) => {
|
|
128
|
+
// Include message if ANY session in its clock is ahead of snapshot
|
|
129
|
+
for (const [sessionId, time] of Object.entries(e.msg.clock)) {
|
|
130
|
+
if (time > (state.snapshot.vectorClock[sessionId] ?? 0)) {
|
|
131
|
+
return true;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
return false; // All clock components <= snapshot, already applied
|
|
135
|
+
})
|
|
136
|
+
.map((e) => e.msg);
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
snapshot: state.snapshot,
|
|
140
|
+
journal: postSnapshotJournal,
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Simulate client-side initFromServer filtering (CURRENT VERSION)
|
|
146
|
+
*/
|
|
147
|
+
function clientFilterJournal(snapshot: Snapshot, journal: CRDTMessage[]): CRDTMessage[] {
|
|
148
|
+
return journal.filter((msg) => {
|
|
149
|
+
for (const [sid, time] of Object.entries(msg.clock)) {
|
|
150
|
+
if (time > (snapshot.vectorClock[sid] ?? 0)) return true;
|
|
151
|
+
}
|
|
152
|
+
return false; // All components are <= snapshot clock
|
|
153
|
+
});
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
describe('Document load fails after compaction (Bug)', () => {
|
|
157
|
+
it('should reproduce the bug: message sent by server but filtered out by client', () => {
|
|
158
|
+
// Setup: Create a document with entries from two sessions
|
|
159
|
+
const state: DocumentState = {
|
|
160
|
+
snapshot: {
|
|
161
|
+
graph: createEmptyGraph(),
|
|
162
|
+
vectorClock: {},
|
|
163
|
+
lamportTime: 0,
|
|
164
|
+
journalIndex: 0,
|
|
165
|
+
},
|
|
166
|
+
journal: [],
|
|
167
|
+
};
|
|
168
|
+
|
|
169
|
+
// Alice creates scene and cube
|
|
170
|
+
const m1 = makeMsg('m1', 'alice', [nodeInsertOp('', 'scene')], 1, { alice: 1 });
|
|
171
|
+
const m2 = makeMsg('m2', 'alice', [nodeInsertOp('scene', 'cube', { position: [0, 0, 0] })], 2, { alice: 2 });
|
|
172
|
+
state.journal.push({ msg: m1 }, { msg: m2 });
|
|
173
|
+
|
|
174
|
+
// Bob moves the cube
|
|
175
|
+
const m3 = makeMsg('m3', 'bob', [vec3SetOp('cube', 'position', [1, 1, 1])], 3, { alice: 2, bob: 1 });
|
|
176
|
+
state.journal.push({ msg: m3 });
|
|
177
|
+
|
|
178
|
+
// Server compacts
|
|
179
|
+
serverCompact(state);
|
|
180
|
+
|
|
181
|
+
// Check snapshot state after compaction
|
|
182
|
+
expect(state.snapshot.lamportTime).toBe(3);
|
|
183
|
+
expect(state.snapshot.vectorClock).toEqual({ alice: 2, bob: 1 });
|
|
184
|
+
expect(state.journal.length).toBe(0);
|
|
185
|
+
|
|
186
|
+
// Alice sends another update AFTER compaction
|
|
187
|
+
const m4 = makeMsg('m4', 'alice', [vec3SetOp('cube', 'position', [2, 2, 2])], 4, { alice: 3, bob: 1 });
|
|
188
|
+
state.journal.push({ msg: m4 });
|
|
189
|
+
|
|
190
|
+
// Server sends state to new client
|
|
191
|
+
const transferred = serverGetStateForClient_BUGGY(state);
|
|
192
|
+
|
|
193
|
+
// BUG: Server includes m4 because lamportTime 4 > 3
|
|
194
|
+
expect(transferred.journal.length).toBe(1);
|
|
195
|
+
expect(transferred.journal[0].id).toBe('m4');
|
|
196
|
+
|
|
197
|
+
// But client filters it out because alice: 3 <= snapshot.vectorClock.alice: 3 is FALSE
|
|
198
|
+
// Wait, actually alice: 3 > 2, so it SHOULD pass...
|
|
199
|
+
|
|
200
|
+
// Let me create a different scenario that triggers the bug:
|
|
201
|
+
});
|
|
202
|
+
|
|
203
|
+
it('should reproduce the actual bug: vector clock vs lamport time mismatch', () => {
|
|
204
|
+
// Setup: Concurrent messages from multiple sessions
|
|
205
|
+
const state: DocumentState = {
|
|
206
|
+
snapshot: {
|
|
207
|
+
graph: createEmptyGraph(),
|
|
208
|
+
vectorClock: {},
|
|
209
|
+
lamportTime: 0,
|
|
210
|
+
journalIndex: 0,
|
|
211
|
+
},
|
|
212
|
+
journal: [],
|
|
213
|
+
};
|
|
214
|
+
|
|
215
|
+
// Alice creates scene
|
|
216
|
+
const m1 = makeMsg('m1', 'alice', [nodeInsertOp('', 'scene')], 1, { alice: 1 });
|
|
217
|
+
// Bob creates cube (concurrent with alice)
|
|
218
|
+
const m2 = makeMsg('m2', 'bob', [nodeInsertOp('scene', 'cube')], 2, { bob: 1 });
|
|
219
|
+
// Alice moves cube (has seen bob's message)
|
|
220
|
+
const m3 = makeMsg('m3', 'alice', [vec3SetOp('cube', 'position', [1, 1, 1])], 3, { alice: 2, bob: 1 });
|
|
221
|
+
|
|
222
|
+
state.journal.push({ msg: m1 }, { msg: m2 }, { msg: m3 });
|
|
223
|
+
|
|
224
|
+
// Compact
|
|
225
|
+
serverCompact(state);
|
|
226
|
+
|
|
227
|
+
// Snapshot now has:
|
|
228
|
+
// lamportTime: 3
|
|
229
|
+
// vectorClock: { alice: 2, bob: 1 }
|
|
230
|
+
expect(state.snapshot.lamportTime).toBe(3);
|
|
231
|
+
expect(state.snapshot.vectorClock).toEqual({ alice: 2, bob: 1 });
|
|
232
|
+
|
|
233
|
+
// Bob sends another message with OLD local clock (hasn't seen alice's latest)
|
|
234
|
+
const m4 = makeMsg('m4', 'bob', [vec3SetOp('cube', 'position', [5, 5, 5])], 4, { alice: 1, bob: 2 });
|
|
235
|
+
state.journal.push({ msg: m4 });
|
|
236
|
+
|
|
237
|
+
// Server sends state
|
|
238
|
+
const transferred = serverGetStateForClient_BUGGY(state);
|
|
239
|
+
|
|
240
|
+
// Server includes m4 because lamportTime 4 > 3 ✓
|
|
241
|
+
expect(transferred.journal.length).toBe(1);
|
|
242
|
+
expect(transferred.journal[0].id).toBe('m4');
|
|
243
|
+
|
|
244
|
+
// Client filtering:
|
|
245
|
+
// m4.clock = { alice: 1, bob: 2 }
|
|
246
|
+
// snapshot.vectorClock = { alice: 2, bob: 1 }
|
|
247
|
+
// alice: 1 > 2? NO
|
|
248
|
+
// bob: 2 > 1? YES
|
|
249
|
+
// Result: Message PASSES client filter
|
|
250
|
+
|
|
251
|
+
const clientFiltered = clientFilterJournal(transferred.snapshot, transferred.journal);
|
|
252
|
+
expect(clientFiltered.length).toBe(1);
|
|
253
|
+
|
|
254
|
+
// Actually, this scenario also passes. Let me think of another case...
|
|
255
|
+
});
|
|
256
|
+
|
|
257
|
+
it('should demonstrate the core issue: filter criteria mismatch', () => {
|
|
258
|
+
// The real bug is more subtle. Let's test with a scenario where:
|
|
259
|
+
// - Message has lamportTime > snapshot.lamportTime (included by server)
|
|
260
|
+
// - But message's clock is completely dominated by snapshot clock (excluded by client)
|
|
261
|
+
|
|
262
|
+
// This can happen when compaction happens at a different lamport time than
|
|
263
|
+
// the max vector clock component
|
|
264
|
+
|
|
265
|
+
const state: DocumentState = {
|
|
266
|
+
snapshot: {
|
|
267
|
+
graph: createEmptyGraph(),
|
|
268
|
+
vectorClock: {},
|
|
269
|
+
lamportTime: 0,
|
|
270
|
+
journalIndex: 0,
|
|
271
|
+
},
|
|
272
|
+
journal: [],
|
|
273
|
+
};
|
|
274
|
+
|
|
275
|
+
// Three sessions: alice, bob, carol
|
|
276
|
+
const m1 = makeMsg('m1', 'alice', [nodeInsertOp('', 'scene')], 1, { alice: 1 });
|
|
277
|
+
const m2 = makeMsg('m2', 'bob', [nodeInsertOp('scene', 'cube')], 2, { bob: 1 });
|
|
278
|
+
const m3 = makeMsg('m3', 'carol', [vec3SetOp('cube', 'position', [1, 1, 1])], 3, { carol: 1 });
|
|
279
|
+
|
|
280
|
+
state.journal.push({ msg: m1 }, { msg: m2 }, { msg: m3 });
|
|
281
|
+
|
|
282
|
+
// Compact - this sets:
|
|
283
|
+
// lamportTime: 3 (max)
|
|
284
|
+
// vectorClock: { alice: 1, bob: 1, carol: 1 }
|
|
285
|
+
serverCompact(state);
|
|
286
|
+
|
|
287
|
+
expect(state.snapshot.lamportTime).toBe(3);
|
|
288
|
+
expect(state.snapshot.vectorClock).toEqual({ alice: 1, bob: 1, carol: 1 });
|
|
289
|
+
|
|
290
|
+
// Now alice sends a message that was delayed/concurrent
|
|
291
|
+
// Alice hasn't seen bob or carol's messages yet
|
|
292
|
+
const m4 = makeMsg('m4', 'alice', [vec3SetOp('cube', 'position', [2, 2, 2])], 4, { alice: 2 });
|
|
293
|
+
state.journal.push({ msg: m4 });
|
|
294
|
+
|
|
295
|
+
// Server filtering: lamportTime 4 > 3 → INCLUDED
|
|
296
|
+
const transferred = serverGetStateForClient_BUGGY(state);
|
|
297
|
+
expect(transferred.journal.length).toBe(1);
|
|
298
|
+
expect(transferred.journal[0].id).toBe('m4');
|
|
299
|
+
|
|
300
|
+
// Client filtering:
|
|
301
|
+
// m4.clock = { alice: 2 }
|
|
302
|
+
// snapshot.vectorClock = { alice: 1, bob: 1, carol: 1 }
|
|
303
|
+
// alice: 2 > 1? YES → INCLUDED
|
|
304
|
+
const clientFiltered = clientFilterJournal(transferred.snapshot, transferred.journal);
|
|
305
|
+
expect(clientFiltered.length).toBe(1);
|
|
306
|
+
|
|
307
|
+
// Still works... Let me try a different approach
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
it('ACTUAL BUG: journalIndex field confusion', () => {
|
|
311
|
+
// I think the actual bug might be that snapshot.journalIndex is set to lamportTime,
|
|
312
|
+
// but should be used differently. Or there's confusion between lamportTime filtering
|
|
313
|
+
// and journalIndex.
|
|
314
|
+
|
|
315
|
+
// Looking at the code again:
|
|
316
|
+
// In compact():
|
|
317
|
+
// state.snapshot.journalIndex = maxLamport;
|
|
318
|
+
// In getStateForClient():
|
|
319
|
+
// .filter((e) => e.msg.lamportTime > state.snapshot.lamportTime)
|
|
320
|
+
//
|
|
321
|
+
// This should work correctly IF all messages are ordered by lamportTime
|
|
322
|
+
// and there are no gaps.
|
|
323
|
+
|
|
324
|
+
// But what if there's a gap in lamport times after compaction?
|
|
325
|
+
|
|
326
|
+
const state: DocumentState = {
|
|
327
|
+
snapshot: {
|
|
328
|
+
graph: createEmptyGraph(),
|
|
329
|
+
vectorClock: {},
|
|
330
|
+
lamportTime: 0,
|
|
331
|
+
journalIndex: 0,
|
|
332
|
+
},
|
|
333
|
+
journal: [],
|
|
334
|
+
};
|
|
335
|
+
|
|
336
|
+
// Messages with lamport times 1, 2, 5 (gap at 3, 4)
|
|
337
|
+
const m1 = makeMsg('m1', 'alice', [nodeInsertOp('', 'scene')], 1, { alice: 1 });
|
|
338
|
+
const m2 = makeMsg('m2', 'bob', [nodeInsertOp('scene', 'cube')], 2, { bob: 1 });
|
|
339
|
+
const m5 = makeMsg('m5', 'carol', [vec3SetOp('cube', 'position', [1, 1, 1])], 5, { carol: 1 });
|
|
340
|
+
|
|
341
|
+
state.journal.push({ msg: m1 }, { msg: m2 }, { msg: m5 });
|
|
342
|
+
|
|
343
|
+
// Compact
|
|
344
|
+
serverCompact(state);
|
|
345
|
+
|
|
346
|
+
// Snapshot: lamportTime = 5
|
|
347
|
+
expect(state.snapshot.lamportTime).toBe(5);
|
|
348
|
+
|
|
349
|
+
// New message with lamportTime 3 (filling the gap, received late)
|
|
350
|
+
const m3 = makeMsg('m3', 'dave', [vec3SetOp('cube', 'position', [2, 2, 2])], 3, { dave: 1 });
|
|
351
|
+
state.journal.push({ msg: m3 });
|
|
352
|
+
|
|
353
|
+
// Server: lamportTime 3 > 5? NO → EXCLUDED
|
|
354
|
+
const transferred = serverGetStateForClient_BUGGY(state);
|
|
355
|
+
expect(transferred.journal.length).toBe(0); // BUG: m3 is excluded!
|
|
356
|
+
|
|
357
|
+
// But m3 should be included because it hasn't been applied yet!
|
|
358
|
+
// This is the bug: after compaction, any message with lamportTime <= snapshot.lamportTime
|
|
359
|
+
// is assumed to be already applied, even if it arrived late.
|
|
360
|
+
|
|
361
|
+
console.log('BUG REPRODUCED: Message m3 with lamportTime 3 is excluded after compaction');
|
|
362
|
+
console.log('snapshot.lamportTime:', state.snapshot.lamportTime);
|
|
363
|
+
console.log('m3.lamportTime:', m3.lamportTime);
|
|
364
|
+
console.log('Server includes m3?:', m3.lamportTime > state.snapshot.lamportTime);
|
|
365
|
+
});
|
|
366
|
+
|
|
367
|
+
it('FIX: Use vector clock comparison to include delayed messages', () => {
|
|
368
|
+
const state: DocumentState = {
|
|
369
|
+
snapshot: {
|
|
370
|
+
graph: createEmptyGraph(),
|
|
371
|
+
vectorClock: {},
|
|
372
|
+
lamportTime: 0,
|
|
373
|
+
journalIndex: 0,
|
|
374
|
+
},
|
|
375
|
+
journal: [],
|
|
376
|
+
};
|
|
377
|
+
|
|
378
|
+
// Messages with lamport times 1, 2, 5 (gap at 3, 4)
|
|
379
|
+
const m1 = makeMsg('m1', 'alice', [nodeInsertOp('', 'scene')], 1, { alice: 1 });
|
|
380
|
+
const m2 = makeMsg('m2', 'bob', [nodeInsertOp('scene', 'cube')], 2, { bob: 1 });
|
|
381
|
+
const m5 = makeMsg('m5', 'carol', [vec3SetOp('cube', 'position', [1, 1, 1])], 5, { carol: 1 });
|
|
382
|
+
|
|
383
|
+
state.journal.push({ msg: m1 }, { msg: m2 }, { msg: m5 });
|
|
384
|
+
|
|
385
|
+
// Compact
|
|
386
|
+
serverCompact(state);
|
|
387
|
+
|
|
388
|
+
// Snapshot: lamportTime = 5, vectorClock = { alice: 1, bob: 1, carol: 1 }
|
|
389
|
+
expect(state.snapshot.lamportTime).toBe(5);
|
|
390
|
+
expect(state.snapshot.vectorClock).toEqual({ alice: 1, bob: 1, carol: 1 });
|
|
391
|
+
|
|
392
|
+
// New message from dave (new session) with lamportTime 3 (filling the gap)
|
|
393
|
+
const m3 = makeMsg('m3', 'dave', [vec3SetOp('cube', 'position', [2, 2, 2])], 3, { dave: 1 });
|
|
394
|
+
state.journal.push({ msg: m3 });
|
|
395
|
+
|
|
396
|
+
// Buggy version: EXCLUDES m3 (lamportTime 3 <= 5)
|
|
397
|
+
const buggyResult = serverGetStateForClient_BUGGY(state);
|
|
398
|
+
expect(buggyResult.journal.length).toBe(0);
|
|
399
|
+
|
|
400
|
+
// Fixed version: INCLUDES m3 (dave: 1 > snapshot.vectorClock.dave: 0)
|
|
401
|
+
const fixedResult = serverGetStateForClient_FIXED(state);
|
|
402
|
+
expect(fixedResult.journal.length).toBe(1);
|
|
403
|
+
expect(fixedResult.journal[0].id).toBe('m3');
|
|
404
|
+
|
|
405
|
+
console.log('FIX VERIFIED: Message m3 is now correctly included');
|
|
406
|
+
console.log('m3.clock:', m3.clock);
|
|
407
|
+
console.log('snapshot.vectorClock:', state.snapshot.vectorClock);
|
|
408
|
+
});
|
|
409
|
+
});
|
|
@@ -410,8 +410,8 @@ describe('JournalService.compact()', () => {
|
|
|
410
410
|
|
|
411
411
|
expect(graphAfter.nodes['cube'].position).toEqual(graphBefore.nodes['cube'].position);
|
|
412
412
|
expect(graphAfter.nodes['cube'].opacity).toBe(graphBefore.nodes['cube'].opacity);
|
|
413
|
-
// sphere should be tombstoned
|
|
414
|
-
expect(graphAfter.
|
|
413
|
+
// sphere should be tombstoned (tombstones are at root level, not on node)
|
|
414
|
+
expect(graphAfter.tombstones['sphere']).toBeDefined();
|
|
415
415
|
});
|
|
416
416
|
|
|
417
417
|
it('should update snapshot vector clock after compaction', () => {
|
|
@@ -645,4 +645,44 @@ describe('Edge Cases', () => {
|
|
|
645
645
|
expect(snap.vectorClock[s]).toBe(10);
|
|
646
646
|
}
|
|
647
647
|
});
|
|
648
|
+
|
|
649
|
+
it('should handle graphs with circular references (parent pointers) during compaction', () => {
|
|
650
|
+
const DOC_ID = 'doc-circular';
|
|
651
|
+
svc.createDocument(DOC_ID);
|
|
652
|
+
|
|
653
|
+
// Create a scene graph
|
|
654
|
+
svc.processMessage(DOC_ID, makeMsg('m1', 'a', [nodeInsertOp('', 'scene', { tag: 'Scene' })], 1));
|
|
655
|
+
svc.processMessage(DOC_ID, makeMsg('m2', 'a', [nodeInsertOp('scene', 'parent', { tag: 'Group' })], 2));
|
|
656
|
+
svc.processMessage(DOC_ID, makeMsg('m3', 'a', [nodeInsertOp('parent', 'child', { tag: 'Mesh', position: [1, 2, 3] })], 3));
|
|
657
|
+
|
|
658
|
+
const graphBefore = svc.computeGraph(svc.loadDocument(DOC_ID)!);
|
|
659
|
+
|
|
660
|
+
// Add circular references (parent pointers) that would cause JSON.stringify to fail
|
|
661
|
+
// This simulates what happens in real scene graphs with bidirectional links
|
|
662
|
+
const circularGraph = {
|
|
663
|
+
...graphBefore,
|
|
664
|
+
nodes: {
|
|
665
|
+
...graphBefore.nodes,
|
|
666
|
+
parent: { ...graphBefore.nodes['parent'], parent: graphBefore.nodes['scene'] },
|
|
667
|
+
child: { ...graphBefore.nodes['child'], parent: graphBefore.nodes['parent'] },
|
|
668
|
+
},
|
|
669
|
+
};
|
|
670
|
+
|
|
671
|
+
// Manually set state to have circular refs (simulating real-world scenario)
|
|
672
|
+
const state = svc.loadDocument(DOC_ID)!;
|
|
673
|
+
state.snapshot.graph = circularGraph as any;
|
|
674
|
+
|
|
675
|
+
// Compaction should not throw even with circular references
|
|
676
|
+
// The safeSerialize function in JournalService strips parent pointers
|
|
677
|
+
expect(() => svc.compact(DOC_ID)).not.toThrow();
|
|
678
|
+
|
|
679
|
+
// Verify graph structure is preserved (minus parent pointers)
|
|
680
|
+
const stateAfter = svc.loadDocument(DOC_ID)!;
|
|
681
|
+
expect(stateAfter.snapshot.graph.nodes['scene']).toBeDefined();
|
|
682
|
+
expect(stateAfter.snapshot.graph.nodes['parent']).toBeDefined();
|
|
683
|
+
expect(stateAfter.snapshot.graph.nodes['child']).toBeDefined();
|
|
684
|
+
|
|
685
|
+
// Verify data integrity (non-circular properties should be preserved)
|
|
686
|
+
expect((stateAfter.snapshot.graph.nodes['child'] as any).position).toEqual([1, 2, 3]);
|
|
687
|
+
});
|
|
648
688
|
});
|