@vuer-ai/vuer-rtc-server 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env +1 -0
- package/S3_COMPRESSION_GUIDE.md +233 -0
- package/dist/archive/ArchivalService.d.ts +117 -0
- package/dist/archive/ArchivalService.d.ts.map +1 -0
- package/dist/archive/ArchivalService.js +181 -0
- package/dist/archive/ArchivalService.js.map +1 -0
- package/dist/broker/InMemoryBroker.d.ts +2 -0
- package/dist/broker/InMemoryBroker.d.ts.map +1 -1
- package/dist/broker/InMemoryBroker.js +4 -0
- package/dist/broker/InMemoryBroker.js.map +1 -1
- package/dist/compression/CompressionUtils.d.ts +57 -0
- package/dist/compression/CompressionUtils.d.ts.map +1 -0
- package/dist/compression/CompressionUtils.js +90 -0
- package/dist/compression/CompressionUtils.js.map +1 -0
- package/dist/compression/index.d.ts +7 -0
- package/dist/compression/index.d.ts.map +1 -0
- package/dist/compression/index.js +7 -0
- package/dist/compression/index.js.map +1 -0
- package/dist/journal/CoalescingService.d.ts +63 -0
- package/dist/journal/CoalescingService.d.ts.map +1 -0
- package/dist/journal/CoalescingService.js +507 -0
- package/dist/journal/CoalescingService.js.map +1 -0
- package/dist/journal/JournalRLE.d.ts +81 -0
- package/dist/journal/JournalRLE.d.ts.map +1 -0
- package/dist/journal/JournalRLE.js +199 -0
- package/dist/journal/JournalRLE.js.map +1 -0
- package/dist/journal/JournalService.d.ts +7 -3
- package/dist/journal/JournalService.d.ts.map +1 -1
- package/dist/journal/JournalService.js +152 -12
- package/dist/journal/JournalService.js.map +1 -1
- package/dist/journal/RLECompression.d.ts +73 -0
- package/dist/journal/RLECompression.d.ts.map +1 -0
- package/dist/journal/RLECompression.js +152 -0
- package/dist/journal/RLECompression.js.map +1 -0
- package/dist/journal/rle-demo.d.ts +8 -0
- package/dist/journal/rle-demo.d.ts.map +1 -0
- package/dist/journal/rle-demo.js +159 -0
- package/dist/journal/rle-demo.js.map +1 -0
- package/dist/persistence/S3ColdStorage.d.ts +62 -0
- package/dist/persistence/S3ColdStorage.d.ts.map +1 -0
- package/dist/persistence/S3ColdStorage.js +88 -0
- package/dist/persistence/S3ColdStorage.js.map +1 -0
- package/dist/persistence/S3ColdStorageIntegration.d.ts +78 -0
- package/dist/persistence/S3ColdStorageIntegration.d.ts.map +1 -0
- package/dist/persistence/S3ColdStorageIntegration.js +93 -0
- package/dist/persistence/S3ColdStorageIntegration.js.map +1 -0
- package/dist/serve.d.ts +2 -0
- package/dist/serve.d.ts.map +1 -1
- package/dist/serve.js +623 -15
- package/dist/serve.js.map +1 -1
- package/docs/RLE_COMPRESSION.md +397 -0
- package/examples/compression-example.ts +259 -0
- package/package.json +14 -14
- package/src/archive/ArchivalService.ts +250 -0
- package/src/broker/InMemoryBroker.ts +5 -0
- package/src/compression/CompressionUtils.ts +113 -0
- package/src/compression/index.ts +14 -0
- package/src/journal/COALESCING.md +267 -0
- package/src/journal/CoalescingService.ts +626 -0
- package/src/journal/JournalRLE.ts +265 -0
- package/src/journal/JournalService.ts +163 -11
- package/src/journal/RLECompression.ts +210 -0
- package/src/journal/rle-demo.ts +193 -0
- package/src/serve.ts +702 -15
- package/tests/benchmark/journal-optimization-benchmark.test.ts +482 -0
- package/tests/compression/compression.test.ts +343 -0
- package/tests/integration/repositories.test.ts +89 -0
- package/tests/journal/compaction-load-bug.test.ts +409 -0
- package/tests/journal/compaction.test.ts +42 -2
- package/tests/journal/journal-rle.test.ts +511 -0
- package/tests/journal/lww-ordering-bug.test.ts +248 -0
- package/tests/journal/multi-session-coalescing.test.ts +871 -0
- package/tests/journal/rle-compression.test.ts +526 -0
- package/tests/journal/text-coalescing.test.ts +210 -0
- package/tests/unit/s3-compression.test.ts +257 -0
- package/PHASE1_SUMMARY.md +0 -94
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Compression Utilities for S3 Cold Storage
|
|
3
|
+
*
|
|
4
|
+
* Provides gzip compression/decompression for journal snapshots
|
|
5
|
+
* when archiving to S3 cold storage. Maintains data integrity
|
|
6
|
+
* through roundtrip verification.
|
|
7
|
+
*/
|
|
8
|
+
import { gzipSync, gunzipSync } from 'zlib';
|
|
9
|
+
/**
|
|
10
|
+
* Compress a JSON snapshot for S3 storage.
|
|
11
|
+
*
|
|
12
|
+
* @param snapshot The snapshot object to compress
|
|
13
|
+
* @returns Compression result with size metrics and ratio
|
|
14
|
+
*/
|
|
15
|
+
export function compressSnapshot(snapshot) {
|
|
16
|
+
// Serialize to JSON string with minimal whitespace
|
|
17
|
+
const jsonString = JSON.stringify(snapshot);
|
|
18
|
+
const originalBuffer = Buffer.from(jsonString, 'utf-8');
|
|
19
|
+
// Compress using gzip
|
|
20
|
+
const compressedBuffer = gzipSync(originalBuffer, {
|
|
21
|
+
level: 6, // Balance between compression ratio and speed
|
|
22
|
+
});
|
|
23
|
+
const originalSize = originalBuffer.byteLength;
|
|
24
|
+
const compressedSize = compressedBuffer.byteLength;
|
|
25
|
+
const ratio = (compressedSize / originalSize) * 100;
|
|
26
|
+
return {
|
|
27
|
+
compressed: compressedBuffer,
|
|
28
|
+
original: originalBuffer,
|
|
29
|
+
ratio,
|
|
30
|
+
originalSize,
|
|
31
|
+
compressedSize,
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Decompress a snapshot from S3 storage.
|
|
36
|
+
*
|
|
37
|
+
* @param compressedBuffer The gzipped buffer from S3
|
|
38
|
+
* @returns Decompressed data with verification
|
|
39
|
+
*/
|
|
40
|
+
export function decompressSnapshot(compressedBuffer) {
|
|
41
|
+
try {
|
|
42
|
+
const decompressedBuffer = gunzipSync(compressedBuffer);
|
|
43
|
+
return {
|
|
44
|
+
decompressed: decompressedBuffer,
|
|
45
|
+
verified: true,
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
catch (error) {
|
|
49
|
+
throw new Error(`Failed to decompress snapshot: ${error instanceof Error ? error.message : String(error)}`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Verify that compression roundtrip preserves data integrity.
|
|
54
|
+
*
|
|
55
|
+
* Compresses a snapshot, then decompresses it and verifies
|
|
56
|
+
* the result matches the original. Useful for testing.
|
|
57
|
+
*
|
|
58
|
+
* @param snapshot The snapshot to verify
|
|
59
|
+
* @returns true if roundtrip preserves data, false otherwise
|
|
60
|
+
*/
|
|
61
|
+
export function verifyCompressionRoundtrip(snapshot) {
|
|
62
|
+
try {
|
|
63
|
+
const original = JSON.stringify(snapshot);
|
|
64
|
+
const compressed = compressSnapshot(snapshot);
|
|
65
|
+
const decompressed = decompressSnapshot(compressed.compressed);
|
|
66
|
+
const decompressedString = decompressed.decompressed.toString('utf-8');
|
|
67
|
+
return original === decompressedString;
|
|
68
|
+
}
|
|
69
|
+
catch {
|
|
70
|
+
return false;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Get compression statistics for a snapshot.
|
|
75
|
+
*
|
|
76
|
+
* Returns size and compression ratio without storing data.
|
|
77
|
+
*
|
|
78
|
+
* @param snapshot The snapshot to analyze
|
|
79
|
+
* @returns Statistics including original size, compressed size, and ratio
|
|
80
|
+
*/
|
|
81
|
+
export function getCompressionStats(snapshot) {
|
|
82
|
+
const result = compressSnapshot(snapshot);
|
|
83
|
+
return {
|
|
84
|
+
originalSize: result.originalSize,
|
|
85
|
+
compressedSize: result.compressedSize,
|
|
86
|
+
ratio: result.ratio,
|
|
87
|
+
savedBytes: result.originalSize - result.compressedSize,
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
//# sourceMappingURL=CompressionUtils.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"CompressionUtils.js","sourceRoot":"","sources":["../../src/compression/CompressionUtils.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAEH,OAAO,EAAE,QAAQ,EAAE,UAAU,EAAE,MAAM,MAAM,CAAC;AAe5C;;;;;GAKG;AACH,MAAM,UAAU,gBAAgB,CAAC,QAAiB;IAChD,mDAAmD;IACnD,MAAM,UAAU,GAAG,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,CAAC;IAC5C,MAAM,cAAc,GAAG,MAAM,CAAC,IAAI,CAAC,UAAU,EAAE,OAAO,CAAC,CAAC;IAExD,sBAAsB;IACtB,MAAM,gBAAgB,GAAG,QAAQ,CAAC,cAAc,EAAE;QAChD,KAAK,EAAE,CAAC,EAAE,8CAA8C;KACzD,CAAC,CAAC;IAEH,MAAM,YAAY,GAAG,cAAc,CAAC,UAAU,CAAC;IAC/C,MAAM,cAAc,GAAG,gBAAgB,CAAC,UAAU,CAAC;IACnD,MAAM,KAAK,GAAG,CAAC,cAAc,GAAG,YAAY,CAAC,GAAG,GAAG,CAAC;IAEpD,OAAO;QACL,UAAU,EAAE,gBAAgB;QAC5B,QAAQ,EAAE,cAAc;QACxB,KAAK;QACL,YAAY;QACZ,cAAc;KACf,CAAC;AACJ,CAAC;AAED;;;;;GAKG;AACH,MAAM,UAAU,kBAAkB,CAAC,gBAAwB;IACzD,IAAI,CAAC;QACH,MAAM,kBAAkB,GAAG,UAAU,CAAC,gBAAgB,CAAC,CAAC;QACxD,OAAO;YACL,YAAY,EAAE,kBAAkB;YAChC,QAAQ,EAAE,IAAI;SACf,CAAC;IACJ,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QACf,MAAM,IAAI,KAAK,CAAC,kCAAkC,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC;IAC9G,CAAC;AACH,CAAC;AAED;;;;;;;;GAQG;AACH,MAAM,UAAU,0BAA0B,CAAC,QAAiB;IAC1D,IAAI,CAAC;QACH,MAAM,QAAQ,GAAG,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,CAAC;QAC1C,MAAM,UAAU,GAAG,gBAAgB,CAAC,QAAQ,CAAC,CAAC;QAC9C,MAAM,YAAY,GAAG,kBAAkB,CAAC,UAAU,CAAC,UAAU,CAAC,CAAC;QAC/D,MAAM,kBAAkB,GAAG,YAAY,CAAC,YAAY,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;QACvE,OAAO,QAAQ,KAAK,kBAAkB,CAAC;IACzC,CAAC;IAAC,MAAM,CAAC;QACP,OAAO,KAAK,CAAC;IACf,CAAC;AACH,CAAC;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,mBAAmB,CAAC,QAAiB;IAMnD,MAAM,MAAM,GAAG,gBAAgB,CAAC,QAAQ,CAAC,CAAC;IAC1C,OAAO;QACL,YAAY,EAAE,MAAM,CAAC,YAAY;QACjC,cAAc,EAAE,MAAM,CAAC,cAAc;QACrC,KAAK,EAAE,MAAM,CAAC,KAAK;QACnB,UAAU,EAAE,MAAM,CAAC,YAAY,GAAG,MAAM,CAAC,cAAc;KACxD,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Compression module exports
|
|
3
|
+
*
|
|
4
|
+
* Provides gzip compression utilities for S3 cold storage archival
|
|
5
|
+
*/
|
|
6
|
+
export { compressSnapshot, decompressSnapshot, verifyCompressionRoundtrip, getCompressionStats, type CompressionResult, type DecompressionResult, } from './CompressionUtils.js';
|
|
7
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/compression/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EACL,gBAAgB,EAChB,kBAAkB,EAClB,0BAA0B,EAC1B,mBAAmB,EACnB,KAAK,iBAAiB,EACtB,KAAK,mBAAmB,GACzB,MAAM,uBAAuB,CAAC"}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Compression module exports
|
|
3
|
+
*
|
|
4
|
+
* Provides gzip compression utilities for S3 cold storage archival
|
|
5
|
+
*/
|
|
6
|
+
export { compressSnapshot, decompressSnapshot, verifyCompressionRoundtrip, getCompressionStats, } from './CompressionUtils.js';
|
|
7
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/compression/index.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EACL,gBAAgB,EAChB,kBAAkB,EAClB,0BAA0B,EAC1B,mBAAmB,GAGpB,MAAM,uBAAuB,CAAC"}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CoalescingService - Merges compatible operations in journal batches
|
|
3
|
+
*
|
|
4
|
+
* Reduces journal size by:
|
|
5
|
+
* - Merging consecutive text.insert operations into multi-char inserts
|
|
6
|
+
* - Merging set operations on same (key, path) within time threshold
|
|
7
|
+
* - Merging additive operations (vector3.add, number.add)
|
|
8
|
+
*/
|
|
9
|
+
import type { PrismaClient } from '@prisma/client';
|
|
10
|
+
/**
|
|
11
|
+
* Coalescing strategy for set operations
|
|
12
|
+
*/
|
|
13
|
+
export type SetCoalescingStrategy = 'last-write' | 'throttle';
|
|
14
|
+
/**
|
|
15
|
+
* Per-property coalescing configuration
|
|
16
|
+
*/
|
|
17
|
+
export interface PropertyCoalescingConfig {
|
|
18
|
+
/** Time window for coalescing (ms) */
|
|
19
|
+
thresholdMs?: number;
|
|
20
|
+
/** Coalescing strategy */
|
|
21
|
+
strategy?: SetCoalescingStrategy;
|
|
22
|
+
/** Sampling interval for throttle strategy (ms) */
|
|
23
|
+
samplingIntervalMs?: number;
|
|
24
|
+
/** Delta threshold - only coalesce if value change is below this (for numeric values) */
|
|
25
|
+
deltaThreshold?: number;
|
|
26
|
+
}
|
|
27
|
+
export interface CoalescingConfig {
|
|
28
|
+
/** Default time threshold for set operations (ms) */
|
|
29
|
+
setThresholdMs?: number;
|
|
30
|
+
/** Default coalescing strategy for set operations */
|
|
31
|
+
setStrategy?: SetCoalescingStrategy;
|
|
32
|
+
/** Default sampling interval for throttle strategy (ms) */
|
|
33
|
+
samplingIntervalMs?: number;
|
|
34
|
+
/** Per-property overrides (key format: "key.path" or "*.path" for all keys) */
|
|
35
|
+
propertyConfig?: Record<string, PropertyCoalescingConfig>;
|
|
36
|
+
enableTextCoalesce?: boolean;
|
|
37
|
+
enableSetCoalesce?: boolean;
|
|
38
|
+
enableVectorCoalesce?: boolean;
|
|
39
|
+
}
|
|
40
|
+
export interface CoalescingResult {
|
|
41
|
+
ok: boolean;
|
|
42
|
+
before: {
|
|
43
|
+
journalBatches: number;
|
|
44
|
+
operations: number;
|
|
45
|
+
};
|
|
46
|
+
after: {
|
|
47
|
+
journalBatches: number;
|
|
48
|
+
operations: number;
|
|
49
|
+
};
|
|
50
|
+
reduction: {
|
|
51
|
+
journalBatches: number;
|
|
52
|
+
operations: number;
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
export declare class CoalescingService {
|
|
56
|
+
private prisma;
|
|
57
|
+
constructor(prisma: PrismaClient);
|
|
58
|
+
/**
|
|
59
|
+
* Coalesce operations in a document's journal
|
|
60
|
+
*/
|
|
61
|
+
coalesce(docId: string, config?: CoalescingConfig): Promise<CoalescingResult>;
|
|
62
|
+
}
|
|
63
|
+
//# sourceMappingURL=CoalescingService.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"CoalescingService.d.ts","sourceRoot":"","sources":["../../src/journal/CoalescingService.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAOnD;;GAEG;AACH,MAAM,MAAM,qBAAqB,GAC7B,YAAY,GACZ,UAAU,CAAC;AAEf;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACvC,sCAAsC;IACtC,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,0BAA0B;IAC1B,QAAQ,CAAC,EAAE,qBAAqB,CAAC;IACjC,mDAAmD;IACnD,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,yFAAyF;IACzF,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB;AAED,MAAM,WAAW,gBAAgB;IAC/B,qDAAqD;IACrD,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,qDAAqD;IACrD,WAAW,CAAC,EAAE,qBAAqB,CAAC;IACpC,2DAA2D;IAC3D,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,+EAA+E;IAC/E,cAAc,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,wBAAwB,CAAC,CAAC;IAG1D,kBAAkB,CAAC,EAAE,OAAO,CAAC;IAC7B,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,oBAAoB,CAAC,EAAE,OAAO,CAAC;CAChC;AAED,MAAM,WAAW,gBAAgB;IAC/B,EAAE,EAAE,OAAO,CAAC;IACZ,MAAM,EAAE;QAAE,cAAc,EAAE,MAAM,CAAC;QAAC,UAAU,EAAE,MAAM,CAAA;KAAE,CAAC;IACvD,KAAK,EAAE;QAAE,cAAc,EAAE,MAAM,CAAC;QAAC,UAAU,EAAE,MAAM,CAAA;KAAE,CAAC;IACtD,SAAS,EAAE;QAAE,cAAc,EAAE,MAAM,CAAC;QAAC,UAAU,EAAE,MAAM,CAAA;KAAE,CAAC;CAC3D;AAueD,qBAAa,iBAAiB;IAChB,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,YAAY;IAExC;;OAEG;IACG,QAAQ,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,GAAE,gBAAqB,GAAG,OAAO,CAAC,gBAAgB,CAAC;CA0ExF"}
|
|
@@ -0,0 +1,507 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CoalescingService - Merges compatible operations in journal batches
|
|
3
|
+
*
|
|
4
|
+
* Reduces journal size by:
|
|
5
|
+
* - Merging consecutive text.insert operations into multi-char inserts
|
|
6
|
+
* - Merging set operations on same (key, path) within time threshold
|
|
7
|
+
* - Merging additive operations (vector3.add, number.add)
|
|
8
|
+
*/
|
|
9
|
+
// Default time threshold for set operation coalescing (ms)
|
|
10
|
+
const DEFAULT_SET_THRESHOLD_MS = 5000;
|
|
11
|
+
const DEFAULT_SAMPLING_INTERVAL_MS = 100;
|
|
12
|
+
/**
|
|
13
|
+
* Check if an operation is a position-based text insert
|
|
14
|
+
*/
|
|
15
|
+
function isPositionTextInsertOp(op) {
|
|
16
|
+
return op.otype === 'text.insert' &&
|
|
17
|
+
typeof op.position === 'number' &&
|
|
18
|
+
typeof op.value === 'string';
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Check if an operation is a position-based text delete
|
|
22
|
+
*/
|
|
23
|
+
function isPositionTextDeleteOp(op) {
|
|
24
|
+
return op.otype === 'text.delete' &&
|
|
25
|
+
typeof op.position === 'number' &&
|
|
26
|
+
typeof op.length === 'number';
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Check if an operation is a set operation
|
|
30
|
+
*/
|
|
31
|
+
function isSetOp(op) {
|
|
32
|
+
return op.otype.endsWith('.set');
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Check if an operation is an additive operation
|
|
36
|
+
* Note: number.multiply is NOT included because multiplying multiple
|
|
37
|
+
* factors together gives a different result than applying them sequentially
|
|
38
|
+
*/
|
|
39
|
+
function isAdditiveOp(op) {
|
|
40
|
+
return op.otype === 'vector3.add' ||
|
|
41
|
+
op.otype === 'number.add';
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Coalesce consecutive text inserts and deletes (works with timestamped ops)
|
|
45
|
+
*
|
|
46
|
+
* Insert coalescing: Consecutive inserts at adjacent positions
|
|
47
|
+
* insert(10, "a"), insert(11, "b"), insert(12, "c") → insert(10, "abc")
|
|
48
|
+
*
|
|
49
|
+
* Delete coalescing:
|
|
50
|
+
* - Forward (Delete key): Same position
|
|
51
|
+
* del(10, 1), del(10, 1), del(10, 1) → del(10, 3)
|
|
52
|
+
* - Backward (Backspace): Position moves left by length
|
|
53
|
+
* del(10, 1), del(9, 1), del(8, 1) → del(8, 3)
|
|
54
|
+
*/
|
|
55
|
+
function coalesceTextOps(tOps) {
|
|
56
|
+
if (tOps.length === 0)
|
|
57
|
+
return tOps;
|
|
58
|
+
const result = [];
|
|
59
|
+
let pendingInsert = null;
|
|
60
|
+
let pendingDelete = null;
|
|
61
|
+
for (const tOp of tOps) {
|
|
62
|
+
const op = tOp.op;
|
|
63
|
+
if (isPositionTextInsertOp(op)) {
|
|
64
|
+
// Flush any pending delete
|
|
65
|
+
if (pendingDelete !== null) {
|
|
66
|
+
result.push({ op: pendingDelete.op, timestamp: pendingDelete.timestamp });
|
|
67
|
+
pendingDelete = null;
|
|
68
|
+
}
|
|
69
|
+
// Coalesce inserts
|
|
70
|
+
if (pendingInsert === null) {
|
|
71
|
+
pendingInsert = {
|
|
72
|
+
op: {
|
|
73
|
+
otype: 'text.insert',
|
|
74
|
+
key: op.key,
|
|
75
|
+
path: op.path,
|
|
76
|
+
position: op.position,
|
|
77
|
+
value: op.value,
|
|
78
|
+
},
|
|
79
|
+
timestamp: tOp.timestamp,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
else if (pendingInsert.op.key === op.key &&
|
|
83
|
+
pendingInsert.op.path === op.path &&
|
|
84
|
+
pendingInsert.op.position + pendingInsert.op.value.length === op.position) {
|
|
85
|
+
// Consecutive insert - merge
|
|
86
|
+
pendingInsert = {
|
|
87
|
+
op: {
|
|
88
|
+
otype: 'text.insert',
|
|
89
|
+
key: pendingInsert.op.key,
|
|
90
|
+
path: pendingInsert.op.path,
|
|
91
|
+
position: pendingInsert.op.position,
|
|
92
|
+
value: pendingInsert.op.value + op.value,
|
|
93
|
+
},
|
|
94
|
+
timestamp: tOp.timestamp,
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
else {
|
|
98
|
+
// Non-consecutive - flush and start new
|
|
99
|
+
result.push({ op: pendingInsert.op, timestamp: pendingInsert.timestamp });
|
|
100
|
+
pendingInsert = {
|
|
101
|
+
op: {
|
|
102
|
+
otype: 'text.insert',
|
|
103
|
+
key: op.key,
|
|
104
|
+
path: op.path,
|
|
105
|
+
position: op.position,
|
|
106
|
+
value: op.value,
|
|
107
|
+
},
|
|
108
|
+
timestamp: tOp.timestamp,
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
else if (isPositionTextDeleteOp(op)) {
|
|
113
|
+
// Flush any pending insert
|
|
114
|
+
if (pendingInsert !== null) {
|
|
115
|
+
result.push({ op: pendingInsert.op, timestamp: pendingInsert.timestamp });
|
|
116
|
+
pendingInsert = null;
|
|
117
|
+
}
|
|
118
|
+
// Coalesce deletes
|
|
119
|
+
if (pendingDelete === null) {
|
|
120
|
+
pendingDelete = {
|
|
121
|
+
op: {
|
|
122
|
+
otype: 'text.delete',
|
|
123
|
+
key: op.key,
|
|
124
|
+
path: op.path,
|
|
125
|
+
position: op.position,
|
|
126
|
+
length: op.length,
|
|
127
|
+
},
|
|
128
|
+
timestamp: tOp.timestamp,
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
else if (pendingDelete.op.key === op.key &&
|
|
132
|
+
pendingDelete.op.path === op.path) {
|
|
133
|
+
// Check if this is forward deletion (same position) or backward deletion (position moves left)
|
|
134
|
+
const isForwardDelete = pendingDelete.op.position === op.position;
|
|
135
|
+
const isBackwardDelete = op.position + op.length === pendingDelete.op.position;
|
|
136
|
+
if (isForwardDelete) {
|
|
137
|
+
// Forward delete: accumulate lengths at same position
|
|
138
|
+
pendingDelete = {
|
|
139
|
+
op: {
|
|
140
|
+
otype: 'text.delete',
|
|
141
|
+
key: pendingDelete.op.key,
|
|
142
|
+
path: pendingDelete.op.path,
|
|
143
|
+
position: pendingDelete.op.position,
|
|
144
|
+
length: pendingDelete.op.length + op.length,
|
|
145
|
+
},
|
|
146
|
+
timestamp: tOp.timestamp,
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
else if (isBackwardDelete) {
|
|
150
|
+
// Backward delete: position moves left, accumulate lengths
|
|
151
|
+
pendingDelete = {
|
|
152
|
+
op: {
|
|
153
|
+
otype: 'text.delete',
|
|
154
|
+
key: pendingDelete.op.key,
|
|
155
|
+
path: pendingDelete.op.path,
|
|
156
|
+
position: op.position, // use new (leftmost) position
|
|
157
|
+
length: pendingDelete.op.length + op.length,
|
|
158
|
+
},
|
|
159
|
+
timestamp: tOp.timestamp,
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
else {
|
|
163
|
+
// Non-consecutive - flush and start new
|
|
164
|
+
result.push({ op: pendingDelete.op, timestamp: pendingDelete.timestamp });
|
|
165
|
+
pendingDelete = {
|
|
166
|
+
op: {
|
|
167
|
+
otype: 'text.delete',
|
|
168
|
+
key: op.key,
|
|
169
|
+
path: op.path,
|
|
170
|
+
position: op.position,
|
|
171
|
+
length: op.length,
|
|
172
|
+
},
|
|
173
|
+
timestamp: tOp.timestamp,
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
else {
|
|
178
|
+
// Different key/path - flush and start new
|
|
179
|
+
result.push({ op: pendingDelete.op, timestamp: pendingDelete.timestamp });
|
|
180
|
+
pendingDelete = {
|
|
181
|
+
op: {
|
|
182
|
+
otype: 'text.delete',
|
|
183
|
+
key: op.key,
|
|
184
|
+
path: op.path,
|
|
185
|
+
position: op.position,
|
|
186
|
+
length: op.length,
|
|
187
|
+
},
|
|
188
|
+
timestamp: tOp.timestamp,
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
else {
|
|
193
|
+
// Other operation type - flush pending
|
|
194
|
+
if (pendingInsert !== null) {
|
|
195
|
+
result.push({ op: pendingInsert.op, timestamp: pendingInsert.timestamp });
|
|
196
|
+
pendingInsert = null;
|
|
197
|
+
}
|
|
198
|
+
if (pendingDelete !== null) {
|
|
199
|
+
result.push({ op: pendingDelete.op, timestamp: pendingDelete.timestamp });
|
|
200
|
+
pendingDelete = null;
|
|
201
|
+
}
|
|
202
|
+
result.push(tOp);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
// Flush any remaining pending ops
|
|
206
|
+
if (pendingInsert !== null) {
|
|
207
|
+
result.push({ op: pendingInsert.op, timestamp: pendingInsert.timestamp });
|
|
208
|
+
}
|
|
209
|
+
if (pendingDelete !== null) {
|
|
210
|
+
result.push({ op: pendingDelete.op, timestamp: pendingDelete.timestamp });
|
|
211
|
+
}
|
|
212
|
+
return result;
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Coalesce set operations with configurable strategy
|
|
216
|
+
* Uses embedded timestamps in TimestampedOp for correct tracking
|
|
217
|
+
*
|
|
218
|
+
* Strategies:
|
|
219
|
+
* - 'last-write': Keep only the last write within threshold (default)
|
|
220
|
+
* - 'throttle': Sample at regular intervals (samplingIntervalMs)
|
|
221
|
+
*/
|
|
222
|
+
function coalesceSetOps(tOps, config) {
|
|
223
|
+
if (tOps.length === 0)
|
|
224
|
+
return tOps;
|
|
225
|
+
const thresholdMs = config.setThresholdMs ?? DEFAULT_SET_THRESHOLD_MS;
|
|
226
|
+
const strategy = config.setStrategy ?? 'last-write';
|
|
227
|
+
const samplingIntervalMs = config.samplingIntervalMs ?? DEFAULT_SAMPLING_INTERVAL_MS;
|
|
228
|
+
if (strategy === 'last-write') {
|
|
229
|
+
return coalesceSetOpsLastWrite(tOps, thresholdMs);
|
|
230
|
+
}
|
|
231
|
+
else if (strategy === 'throttle') {
|
|
232
|
+
return coalesceSetOpsThrottle(tOps, thresholdMs, samplingIntervalMs);
|
|
233
|
+
}
|
|
234
|
+
return tOps;
|
|
235
|
+
}
|
|
236
|
+
/**
|
|
237
|
+
* Last-write strategy: Keep only the last write within threshold
|
|
238
|
+
*/
|
|
239
|
+
function coalesceSetOpsLastWrite(tOps, thresholdMs) {
|
|
240
|
+
const result = [];
|
|
241
|
+
const lastSetByKey = new Map();
|
|
242
|
+
for (const tOp of tOps) {
|
|
243
|
+
const op = tOp.op;
|
|
244
|
+
const timestamp = tOp.timestamp;
|
|
245
|
+
if (isSetOp(op)) {
|
|
246
|
+
const key = `${op.key}:${op.path}`;
|
|
247
|
+
const existing = lastSetByKey.get(key);
|
|
248
|
+
if (existing && (timestamp - existing.tOp.timestamp) <= thresholdMs) {
|
|
249
|
+
// Within threshold - replace existing with new
|
|
250
|
+
result[existing.resultIndex] = tOp;
|
|
251
|
+
lastSetByKey.set(key, { tOp, resultIndex: existing.resultIndex });
|
|
252
|
+
}
|
|
253
|
+
else {
|
|
254
|
+
// New set or outside threshold
|
|
255
|
+
const resultIndex = result.length;
|
|
256
|
+
result.push(tOp);
|
|
257
|
+
lastSetByKey.set(key, { tOp, resultIndex });
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
else {
|
|
261
|
+
result.push(tOp);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
return result;
|
|
265
|
+
}
|
|
266
|
+
/**
|
|
267
|
+
* Throttle strategy: Sample at regular intervals
|
|
268
|
+
* Keep one sample per interval, dropping intermediate updates
|
|
269
|
+
*/
|
|
270
|
+
function coalesceSetOpsThrottle(tOps, thresholdMs, samplingIntervalMs) {
|
|
271
|
+
const result = [];
|
|
272
|
+
const lastSampleByKey = new Map();
|
|
273
|
+
for (const tOp of tOps) {
|
|
274
|
+
const op = tOp.op;
|
|
275
|
+
const timestamp = tOp.timestamp;
|
|
276
|
+
if (isSetOp(op)) {
|
|
277
|
+
const key = `${op.key}:${op.path}`;
|
|
278
|
+
const existing = lastSampleByKey.get(key);
|
|
279
|
+
if (!existing) {
|
|
280
|
+
// First sample for this key
|
|
281
|
+
const resultIndex = result.length;
|
|
282
|
+
result.push(tOp);
|
|
283
|
+
lastSampleByKey.set(key, {
|
|
284
|
+
tOp,
|
|
285
|
+
resultIndex,
|
|
286
|
+
intervalStart: Math.floor(timestamp / samplingIntervalMs) * samplingIntervalMs,
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
else {
|
|
290
|
+
const currentInterval = Math.floor(timestamp / samplingIntervalMs) * samplingIntervalMs;
|
|
291
|
+
const timeSinceFirstSample = timestamp - existing.intervalStart;
|
|
292
|
+
if (currentInterval === existing.intervalStart) {
|
|
293
|
+
// Same interval - replace with latest
|
|
294
|
+
result[existing.resultIndex] = tOp;
|
|
295
|
+
lastSampleByKey.set(key, { ...existing, tOp });
|
|
296
|
+
}
|
|
297
|
+
else if (timeSinceFirstSample <= thresholdMs) {
|
|
298
|
+
// New interval, still within threshold - add new sample
|
|
299
|
+
const resultIndex = result.length;
|
|
300
|
+
result.push(tOp);
|
|
301
|
+
lastSampleByKey.set(key, { tOp, resultIndex, intervalStart: currentInterval });
|
|
302
|
+
}
|
|
303
|
+
else {
|
|
304
|
+
// Outside threshold - reset
|
|
305
|
+
const resultIndex = result.length;
|
|
306
|
+
result.push(tOp);
|
|
307
|
+
lastSampleByKey.set(key, { tOp, resultIndex, intervalStart: currentInterval });
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
else {
|
|
312
|
+
result.push(tOp);
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
return result;
|
|
316
|
+
}
|
|
317
|
+
/**
|
|
318
|
+
* Coalesce additive operations (works with timestamped ops)
|
|
319
|
+
*/
|
|
320
|
+
function coalesceAdditiveOps(tOps) {
|
|
321
|
+
if (tOps.length === 0)
|
|
322
|
+
return tOps;
|
|
323
|
+
const result = [];
|
|
324
|
+
const pendingByKey = new Map();
|
|
325
|
+
for (const tOp of tOps) {
|
|
326
|
+
const op = tOp.op;
|
|
327
|
+
if (isAdditiveOp(op)) {
|
|
328
|
+
const key = `${op.key}:${op.path}:${op.otype}`;
|
|
329
|
+
const existing = pendingByKey.get(key);
|
|
330
|
+
if (existing && existing.op.otype === op.otype) {
|
|
331
|
+
// Merge values
|
|
332
|
+
if (op.otype === 'vector3.add') {
|
|
333
|
+
const va = existing.op.value;
|
|
334
|
+
const vb = op.value;
|
|
335
|
+
existing.op.value = [va[0] + vb[0], va[1] + vb[1], va[2] + vb[2]];
|
|
336
|
+
existing.timestamp = tOp.timestamp; // update to latest timestamp
|
|
337
|
+
}
|
|
338
|
+
else if (op.otype === 'number.add') {
|
|
339
|
+
existing.op.value = existing.op.value + op.value;
|
|
340
|
+
existing.timestamp = tOp.timestamp;
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
else {
|
|
344
|
+
if (existing) {
|
|
345
|
+
result.push(existing);
|
|
346
|
+
}
|
|
347
|
+
pendingByKey.set(key, { op: { ...op }, timestamp: tOp.timestamp });
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
else {
|
|
351
|
+
// Flush all pending additive ops
|
|
352
|
+
for (const pending of pendingByKey.values()) {
|
|
353
|
+
result.push(pending);
|
|
354
|
+
}
|
|
355
|
+
pendingByKey.clear();
|
|
356
|
+
result.push(tOp);
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
// Flush remaining
|
|
360
|
+
for (const pending of pendingByKey.values()) {
|
|
361
|
+
result.push(pending);
|
|
362
|
+
}
|
|
363
|
+
return result;
|
|
364
|
+
}
|
|
365
|
+
/**
|
|
366
|
+
* Coalesce operations in a message array
|
|
367
|
+
*
|
|
368
|
+
* IMPORTANT: Preserves interleaved order to maintain causality.
|
|
369
|
+
* Groups messages into consecutive runs from the same session, then coalesces
|
|
370
|
+
* each run separately. This ensures Lamport's "happened-before" relation is preserved.
|
|
371
|
+
*
|
|
372
|
+
* Example:
|
|
373
|
+
* Input: [alice-msg1, alice-msg2, bob-msg3, alice-msg4]
|
|
374
|
+
* Output: [alice-run1, bob-run2, alice-run3]
|
|
375
|
+
*
|
|
376
|
+
* This prevents the bug where all operations from a session get the last message's
|
|
377
|
+
* lamportTime, which breaks LWW conflict resolution.
|
|
378
|
+
*/
|
|
379
|
+
function coalesceOperations(messages, config) {
|
|
380
|
+
if (messages.length === 0)
|
|
381
|
+
return [];
|
|
382
|
+
// Group messages into consecutive runs from the same session
|
|
383
|
+
const runs = [];
|
|
384
|
+
let currentRun = [];
|
|
385
|
+
let lastSessionId = null;
|
|
386
|
+
for (const msg of messages) {
|
|
387
|
+
if (msg.sessionId !== lastSessionId && currentRun.length > 0) {
|
|
388
|
+
// Session switch - emit current run and start new one
|
|
389
|
+
runs.push(currentRun);
|
|
390
|
+
currentRun = [];
|
|
391
|
+
}
|
|
392
|
+
currentRun.push(msg);
|
|
393
|
+
lastSessionId = msg.sessionId;
|
|
394
|
+
}
|
|
395
|
+
if (currentRun.length > 0) {
|
|
396
|
+
runs.push(currentRun);
|
|
397
|
+
}
|
|
398
|
+
// Coalesce each run separately
|
|
399
|
+
const coalescedMessages = [];
|
|
400
|
+
for (const run of runs) {
|
|
401
|
+
// Flatten all ops from this run with embedded timestamps
|
|
402
|
+
let tOps = [];
|
|
403
|
+
for (const msg of run) {
|
|
404
|
+
const timestamp = msg.timestamp * 1000; // convert to ms
|
|
405
|
+
for (const op of msg.ops) {
|
|
406
|
+
tOps.push({ op, timestamp });
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
// Apply coalescing in order
|
|
410
|
+
if (config.enableTextCoalesce !== false) {
|
|
411
|
+
tOps = coalesceTextOps(tOps);
|
|
412
|
+
}
|
|
413
|
+
if (config.enableSetCoalesce !== false) {
|
|
414
|
+
tOps = coalesceSetOps(tOps, config);
|
|
415
|
+
}
|
|
416
|
+
if (config.enableVectorCoalesce !== false) {
|
|
417
|
+
tOps = coalesceAdditiveOps(tOps);
|
|
418
|
+
}
|
|
419
|
+
// Extract final ops
|
|
420
|
+
const allOps = tOps.map(t => t.op);
|
|
421
|
+
if (allOps.length === 0)
|
|
422
|
+
continue;
|
|
423
|
+
// Rebuild message for this run - use the last message's metadata
|
|
424
|
+
const lastMsg = run[run.length - 1];
|
|
425
|
+
coalescedMessages.push({
|
|
426
|
+
...lastMsg,
|
|
427
|
+
ops: allOps,
|
|
428
|
+
});
|
|
429
|
+
}
|
|
430
|
+
return coalescedMessages;
|
|
431
|
+
}
|
|
432
|
+
export class CoalescingService {
|
|
433
|
+
prisma;
|
|
434
|
+
constructor(prisma) {
|
|
435
|
+
this.prisma = prisma;
|
|
436
|
+
}
|
|
437
|
+
/**
|
|
438
|
+
* Coalesce operations in a document's journal
|
|
439
|
+
*/
|
|
440
|
+
async coalesce(docId, config = {}) {
|
|
441
|
+
// Get all journal batches
|
|
442
|
+
const batches = await this.prisma.journalBatch.findMany({
|
|
443
|
+
where: { documentId: docId },
|
|
444
|
+
orderBy: { lamportTime: 'asc' },
|
|
445
|
+
});
|
|
446
|
+
const beforeOpsCount = batches.reduce((sum, b) => {
|
|
447
|
+
const ops = Array.isArray(b.operations) ? b.operations : [];
|
|
448
|
+
return sum + ops.length;
|
|
449
|
+
}, 0);
|
|
450
|
+
// Convert batches to messages
|
|
451
|
+
const messages = batches.map(b => ({
|
|
452
|
+
id: b.id,
|
|
453
|
+
sessionId: b.sessionId ?? 'unknown',
|
|
454
|
+
clock: b.vectorClock ?? {},
|
|
455
|
+
lamportTime: b.lamportTime ?? 0,
|
|
456
|
+
timestamp: b.persistedAt.getTime() / 1000,
|
|
457
|
+
ops: (Array.isArray(b.operations) ? b.operations : []),
|
|
458
|
+
}));
|
|
459
|
+
// Coalesce
|
|
460
|
+
const coalesced = coalesceOperations(messages, config);
|
|
461
|
+
// Count ops after
|
|
462
|
+
const afterOpsCount = coalesced.reduce((sum, m) => sum + m.ops.length, 0);
|
|
463
|
+
// If no reduction, skip update
|
|
464
|
+
if (afterOpsCount >= beforeOpsCount) {
|
|
465
|
+
return {
|
|
466
|
+
ok: true,
|
|
467
|
+
before: { journalBatches: batches.length, operations: beforeOpsCount },
|
|
468
|
+
after: { journalBatches: batches.length, operations: beforeOpsCount },
|
|
469
|
+
reduction: { journalBatches: 0, operations: 0 },
|
|
470
|
+
};
|
|
471
|
+
}
|
|
472
|
+
// Delete old batches and create new ones (one per session)
|
|
473
|
+
const now = new Date();
|
|
474
|
+
await this.prisma.$transaction(async (tx) => {
|
|
475
|
+
// Delete old batches
|
|
476
|
+
await tx.journalBatch.deleteMany({ where: { documentId: docId } });
|
|
477
|
+
// Create coalesced batches - one per session
|
|
478
|
+
for (let i = 0; i < coalesced.length; i++) {
|
|
479
|
+
const msg = coalesced[i];
|
|
480
|
+
if (msg.ops.length > 0) {
|
|
481
|
+
await tx.journalBatch.create({
|
|
482
|
+
data: {
|
|
483
|
+
documentId: docId,
|
|
484
|
+
batchId: `coalesced-${msg.sessionId}-${Date.now()}-${i}`,
|
|
485
|
+
lamportTime: msg.lamportTime,
|
|
486
|
+
sessionId: msg.sessionId,
|
|
487
|
+
vectorClock: msg.clock,
|
|
488
|
+
operations: msg.ops,
|
|
489
|
+
startTime: now,
|
|
490
|
+
endTime: now,
|
|
491
|
+
},
|
|
492
|
+
});
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
});
|
|
496
|
+
return {
|
|
497
|
+
ok: true,
|
|
498
|
+
before: { journalBatches: batches.length, operations: beforeOpsCount },
|
|
499
|
+
after: { journalBatches: coalesced.length, operations: afterOpsCount },
|
|
500
|
+
reduction: {
|
|
501
|
+
journalBatches: batches.length - coalesced.length,
|
|
502
|
+
operations: beforeOpsCount - afterOpsCount,
|
|
503
|
+
},
|
|
504
|
+
};
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
//# sourceMappingURL=CoalescingService.js.map
|