@quereus/plugin-sync 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +2 -1
  2. package/package.json +4 -4
  3. package/dist/src/clock/hlc.d.ts +0 -105
  4. package/dist/src/clock/hlc.d.ts.map +0 -1
  5. package/dist/src/clock/hlc.js +0 -251
  6. package/dist/src/clock/hlc.js.map +0 -1
  7. package/dist/src/clock/index.d.ts +0 -6
  8. package/dist/src/clock/index.d.ts.map +0 -1
  9. package/dist/src/clock/index.js +0 -6
  10. package/dist/src/clock/index.js.map +0 -1
  11. package/dist/src/clock/site.d.ts +0 -58
  12. package/dist/src/clock/site.d.ts.map +0 -1
  13. package/dist/src/clock/site.js +0 -137
  14. package/dist/src/clock/site.js.map +0 -1
  15. package/dist/src/create-sync-module.d.ts +0 -85
  16. package/dist/src/create-sync-module.d.ts.map +0 -1
  17. package/dist/src/create-sync-module.js +0 -54
  18. package/dist/src/create-sync-module.js.map +0 -1
  19. package/dist/src/index.d.ts +0 -31
  20. package/dist/src/index.d.ts.map +0 -1
  21. package/dist/src/index.js +0 -42
  22. package/dist/src/index.js.map +0 -1
  23. package/dist/src/metadata/change-log.d.ts +0 -67
  24. package/dist/src/metadata/change-log.d.ts.map +0 -1
  25. package/dist/src/metadata/change-log.js +0 -107
  26. package/dist/src/metadata/change-log.js.map +0 -1
  27. package/dist/src/metadata/column-version.d.ts +0 -58
  28. package/dist/src/metadata/column-version.d.ts.map +0 -1
  29. package/dist/src/metadata/column-version.js +0 -100
  30. package/dist/src/metadata/column-version.js.map +0 -1
  31. package/dist/src/metadata/index.d.ts +0 -11
  32. package/dist/src/metadata/index.d.ts.map +0 -1
  33. package/dist/src/metadata/index.js +0 -11
  34. package/dist/src/metadata/index.js.map +0 -1
  35. package/dist/src/metadata/keys.d.ts +0 -180
  36. package/dist/src/metadata/keys.d.ts.map +0 -1
  37. package/dist/src/metadata/keys.js +0 -390
  38. package/dist/src/metadata/keys.js.map +0 -1
  39. package/dist/src/metadata/peer-state.d.ts +0 -52
  40. package/dist/src/metadata/peer-state.d.ts.map +0 -1
  41. package/dist/src/metadata/peer-state.js +0 -87
  42. package/dist/src/metadata/peer-state.js.map +0 -1
  43. package/dist/src/metadata/schema-migration.d.ts +0 -60
  44. package/dist/src/metadata/schema-migration.d.ts.map +0 -1
  45. package/dist/src/metadata/schema-migration.js +0 -126
  46. package/dist/src/metadata/schema-migration.js.map +0 -1
  47. package/dist/src/metadata/schema-version.d.ts +0 -163
  48. package/dist/src/metadata/schema-version.d.ts.map +0 -1
  49. package/dist/src/metadata/schema-version.js +0 -307
  50. package/dist/src/metadata/schema-version.js.map +0 -1
  51. package/dist/src/metadata/tombstones.d.ts +0 -67
  52. package/dist/src/metadata/tombstones.d.ts.map +0 -1
  53. package/dist/src/metadata/tombstones.js +0 -125
  54. package/dist/src/metadata/tombstones.js.map +0 -1
  55. package/dist/src/sync/events.d.ts +0 -117
  56. package/dist/src/sync/events.d.ts.map +0 -1
  57. package/dist/src/sync/events.js +0 -56
  58. package/dist/src/sync/events.js.map +0 -1
  59. package/dist/src/sync/index.d.ts +0 -8
  60. package/dist/src/sync/index.d.ts.map +0 -1
  61. package/dist/src/sync/index.js +0 -8
  62. package/dist/src/sync/index.js.map +0 -1
  63. package/dist/src/sync/manager.d.ts +0 -146
  64. package/dist/src/sync/manager.d.ts.map +0 -1
  65. package/dist/src/sync/manager.js +0 -8
  66. package/dist/src/sync/manager.js.map +0 -1
  67. package/dist/src/sync/protocol.d.ts +0 -282
  68. package/dist/src/sync/protocol.d.ts.map +0 -1
  69. package/dist/src/sync/protocol.js +0 -16
  70. package/dist/src/sync/protocol.js.map +0 -1
  71. package/dist/src/sync/store-adapter.d.ts +0 -42
  72. package/dist/src/sync/store-adapter.d.ts.map +0 -1
  73. package/dist/src/sync/store-adapter.js +0 -232
  74. package/dist/src/sync/store-adapter.js.map +0 -1
  75. package/dist/src/sync/sync-manager-impl.d.ts +0 -91
  76. package/dist/src/sync/sync-manager-impl.d.ts.map +0 -1
  77. package/dist/src/sync/sync-manager-impl.js +0 -1123
  78. package/dist/src/sync/sync-manager-impl.js.map +0 -1
@@ -1,1123 +0,0 @@
1
- /**
2
- * SyncManager implementation.
3
- *
4
- * Coordinates CRDT metadata tracking and sync operations.
5
- */
6
- import { HLCManager, compareHLC } from '../clock/hlc.js';
7
- import { generateSiteId, SITE_ID_KEY, serializeSiteIdentity, deserializeSiteIdentity, siteIdEquals, } from '../clock/site.js';
8
- import { ColumnVersionStore, deserializeColumnVersion } from '../metadata/column-version.js';
9
- import { TombstoneStore, deserializeTombstone } from '../metadata/tombstones.js';
10
- import { PeerStateStore } from '../metadata/peer-state.js';
11
- import { SchemaMigrationStore, deserializeMigration } from '../metadata/schema-migration.js';
12
- import { ChangeLogStore } from '../metadata/change-log.js';
13
- import { SYNC_KEY_PREFIX, buildAllColumnVersionsScanBounds, buildAllTombstonesScanBounds, buildAllSchemaMigrationsScanBounds, buildAllChangeLogScanBounds, parseColumnVersionKey, parseTombstoneKey, parseSchemaMigrationKey, encodePK, } from '../metadata/keys.js';
14
- /** Default chunk size for streaming snapshots. */
15
- const DEFAULT_SNAPSHOT_CHUNK_SIZE = 1000;
16
- /** Key prefix for snapshot checkpoints. */
17
- const CHECKPOINT_PREFIX = 'sc:';
18
- /**
19
- * Implementation of SyncManager.
20
- */
21
- export class SyncManagerImpl {
22
- kv;
23
- config;
24
- hlcManager;
25
- columnVersions;
26
- tombstones;
27
- peerStates;
28
- changeLog;
29
- schemaMigrations;
30
- syncEvents;
31
- applyToStore;
32
- getTableSchema;
33
- // Pending changes for the current transaction
34
- pendingChanges = [];
35
- currentTransactionId = null;
36
- constructor(kv, config, hlcManager, syncEvents, applyToStore, getTableSchema) {
37
- this.kv = kv;
38
- this.config = config;
39
- this.hlcManager = hlcManager;
40
- this.syncEvents = syncEvents;
41
- this.applyToStore = applyToStore;
42
- this.getTableSchema = getTableSchema;
43
- this.columnVersions = new ColumnVersionStore(kv);
44
- this.tombstones = new TombstoneStore(kv, config.tombstoneTTL);
45
- this.peerStates = new PeerStateStore(kv);
46
- this.changeLog = new ChangeLogStore(kv);
47
- this.schemaMigrations = new SchemaMigrationStore(kv);
48
- }
49
- /**
50
- * Create a new SyncManager, initializing or loading site identity.
51
- *
52
- * @param kv - KV store for sync metadata
53
- * @param storeEvents - Store event emitter to subscribe to local changes
54
- * @param config - Sync configuration
55
- * @param syncEvents - Sync event emitter for UI integration
56
- * @param applyToStore - Optional callback for applying remote changes to the store
57
- * @param getTableSchema - Optional callback for getting table schema by name
58
- */
59
- static async create(kv, storeEvents, config, syncEvents, applyToStore, getTableSchema) {
60
- // Load or create site identity
61
- const siteIdKey = new TextEncoder().encode(SITE_ID_KEY);
62
- let siteId;
63
- const existingIdentity = await kv.get(siteIdKey);
64
- if (existingIdentity) {
65
- const identity = deserializeSiteIdentity(existingIdentity);
66
- siteId = identity.siteId;
67
- }
68
- else if (config.siteId) {
69
- siteId = config.siteId;
70
- await kv.put(siteIdKey, serializeSiteIdentity({ siteId, createdAt: Date.now() }));
71
- }
72
- else {
73
- siteId = generateSiteId();
74
- await kv.put(siteIdKey, serializeSiteIdentity({ siteId, createdAt: Date.now() }));
75
- }
76
- // Load HLC state
77
- const hlcKey = SYNC_KEY_PREFIX.HLC_STATE;
78
- const hlcData = await kv.get(hlcKey);
79
- let hlcState;
80
- if (hlcData) {
81
- const view = new DataView(hlcData.buffer, hlcData.byteOffset, hlcData.byteLength);
82
- hlcState = {
83
- wallTime: view.getBigUint64(0, false),
84
- counter: view.getUint16(8, false),
85
- };
86
- }
87
- const hlcManager = new HLCManager(siteId, hlcState);
88
- const manager = new SyncManagerImpl(kv, config, hlcManager, syncEvents, applyToStore, getTableSchema);
89
- // Subscribe to store events
90
- storeEvents.onDataChange((event) => manager.handleDataChange(event));
91
- storeEvents.onSchemaChange((event) => manager.handleSchemaChange(event));
92
- return manager;
93
- }
94
- getSiteId() {
95
- return this.hlcManager.getSiteId();
96
- }
97
- getCurrentHLC() {
98
- return this.hlcManager.now();
99
- }
100
- /**
101
- * Handle a data change event from the store.
102
- * Records CRDT metadata for the change.
103
- * Skips remote events to prevent duplicate recording.
104
- */
105
- async handleDataChange(event) {
106
- // Skip events from remote sync - metadata already recorded by the originating replica
107
- if (event.remote)
108
- return;
109
- const hlc = this.hlcManager.tick();
110
- const { schemaName, tableName, type, oldRow, newRow } = event;
111
- // Support both 'key' and 'pk' property names
112
- const pk = event.key ?? event.pk;
113
- if (!pk) {
114
- // Cannot record change without primary key
115
- return;
116
- }
117
- const batch = this.kv.batch();
118
- if (type === 'delete') {
119
- // Record tombstone
120
- this.tombstones.setTombstoneBatch(batch, schemaName, tableName, pk, hlc);
121
- // Record in change log for efficient delta queries
122
- this.changeLog.recordDeletionBatch(batch, hlc, schemaName, tableName, pk);
123
- // Delete column versions for this row
124
- await this.columnVersions.deleteRowVersions(schemaName, tableName, pk);
125
- const change = {
126
- type: 'delete',
127
- schema: schemaName,
128
- table: tableName,
129
- pk,
130
- hlc,
131
- };
132
- this.pendingChanges.push(change);
133
- }
134
- else {
135
- // Insert or update: record column versions
136
- if (newRow) {
137
- await this.recordColumnVersions(batch, schemaName, tableName, pk, oldRow, newRow, hlc);
138
- }
139
- }
140
- // Persist HLC state in batch
141
- const hlcState = this.hlcManager.getState();
142
- const hlcBuffer = new Uint8Array(10);
143
- const hlcView = new DataView(hlcBuffer.buffer);
144
- hlcView.setBigUint64(0, hlcState.wallTime, false);
145
- hlcView.setUint16(8, hlcState.counter, false);
146
- batch.put(SYNC_KEY_PREFIX.HLC_STATE, hlcBuffer);
147
- await batch.write();
148
- // Emit local change event with current pending changes, then clear them
149
- const changesToEmit = [...this.pendingChanges];
150
- this.pendingChanges = [];
151
- this.syncEvents.emitLocalChange({
152
- transactionId: this.currentTransactionId || crypto.randomUUID(),
153
- changes: changesToEmit,
154
- pendingSync: true,
155
- });
156
- }
157
- /**
158
- * Handle a schema change event from the store.
159
- * Records schema migrations for sync.
160
- * Skips remote events to prevent duplicate recording.
161
- */
162
- async handleSchemaChange(event) {
163
- // Skip events from remote sync - metadata already recorded by the originating replica
164
- if (event.remote)
165
- return;
166
- const hlc = this.hlcManager.tick();
167
- const { type, objectType, schemaName, objectName, ddl } = event;
168
- // Map store event type to migration type
169
- let migrationType;
170
- if (objectType === 'table') {
171
- switch (type) {
172
- case 'create':
173
- migrationType = 'create_table';
174
- break;
175
- case 'drop':
176
- migrationType = 'drop_table';
177
- break;
178
- case 'alter':
179
- migrationType = 'alter_column';
180
- break;
181
- default: return; // Unknown type
182
- }
183
- }
184
- else if (objectType === 'index') {
185
- switch (type) {
186
- case 'create':
187
- migrationType = 'add_index';
188
- break;
189
- case 'drop':
190
- migrationType = 'drop_index';
191
- break;
192
- default: return; // Unknown type
193
- }
194
- }
195
- else {
196
- return; // Unknown object type
197
- }
198
- // Get next schema version for this table
199
- const currentVersion = await this.schemaMigrations.getCurrentVersion(schemaName, objectName);
200
- const newVersion = currentVersion + 1;
201
- // Record the migration
202
- await this.schemaMigrations.recordMigration(schemaName, objectName, {
203
- type: migrationType,
204
- ddl: ddl || '',
205
- hlc,
206
- schemaVersion: newVersion,
207
- });
208
- // Persist HLC state
209
- const hlcState = this.hlcManager.getState();
210
- const hlcBuffer = new Uint8Array(10);
211
- const hlcView = new DataView(hlcBuffer.buffer);
212
- hlcView.setBigUint64(0, hlcState.wallTime, false);
213
- hlcView.setUint16(8, hlcState.counter, false);
214
- await this.kv.put(SYNC_KEY_PREFIX.HLC_STATE, hlcBuffer);
215
- // Emit local change event for the schema migration
216
- this.syncEvents.emitLocalChange({
217
- transactionId: crypto.randomUUID(),
218
- changes: [],
219
- pendingSync: true,
220
- });
221
- }
222
- async recordColumnVersions(batch, schemaName, tableName, pk, oldRow, newRow, hlc) {
223
- // Try to get actual column names from schema
224
- const tableSchema = this.getTableSchema?.(schemaName, tableName);
225
- const columnNames = tableSchema?.columns?.map(c => c.name);
226
- // Debug logging for column name resolution
227
- if (!tableSchema) {
228
- console.warn(`[Sync] No table schema found for ${schemaName}.${tableName} - using fallback column names`);
229
- }
230
- // For each column that changed, record the new version
231
- for (let i = 0; i < newRow.length; i++) {
232
- const oldValue = oldRow?.[i];
233
- const newValue = newRow[i];
234
- // Only record if value changed (or it's an insert)
235
- if (!oldRow || oldValue !== newValue) {
236
- // Use actual column name if available, otherwise fall back to index-based
237
- const column = columnNames?.[i] ?? `col_${i}`;
238
- const version = { hlc, value: newValue };
239
- this.columnVersions.setColumnVersionBatch(batch, schemaName, tableName, pk, column, version);
240
- // Record in change log for efficient delta queries
241
- this.changeLog.recordColumnChangeBatch(batch, hlc, schemaName, tableName, pk, column);
242
- const change = {
243
- type: 'column',
244
- schema: schemaName,
245
- table: tableName,
246
- pk,
247
- column,
248
- value: newValue,
249
- hlc,
250
- };
251
- this.pendingChanges.push(change);
252
- }
253
- }
254
- }
255
- async persistHLCState() {
256
- const state = this.hlcManager.getState();
257
- const buffer = new Uint8Array(10);
258
- const view = new DataView(buffer.buffer);
259
- view.setBigUint64(0, state.wallTime, false);
260
- view.setUint16(8, state.counter, false);
261
- await this.kv.put(SYNC_KEY_PREFIX.HLC_STATE, buffer);
262
- }
263
- async getChangesSince(peerSiteId, sinceHLC) {
264
- const changes = [];
265
- if (sinceHLC) {
266
- // Use change log for efficient delta query
267
- for await (const logEntry of this.changeLog.getChangesSince(sinceHLC)) {
268
- // Don't include changes from the requesting peer
269
- if (siteIdEquals(logEntry.hlc.siteId, peerSiteId))
270
- continue;
271
- if (logEntry.entryType === 'column') {
272
- // Look up the column version to get the value
273
- const cv = await this.columnVersions.getColumnVersion(logEntry.schema, logEntry.table, logEntry.pk, logEntry.column);
274
- if (!cv)
275
- continue; // May have been superseded
276
- const columnChange = {
277
- type: 'column',
278
- schema: logEntry.schema,
279
- table: logEntry.table,
280
- pk: logEntry.pk,
281
- column: logEntry.column,
282
- value: cv.value,
283
- hlc: cv.hlc,
284
- };
285
- changes.push(columnChange);
286
- }
287
- else {
288
- // Look up tombstone for the deletion
289
- const tombstone = await this.tombstones.getTombstone(logEntry.schema, logEntry.table, logEntry.pk);
290
- if (!tombstone)
291
- continue; // May have been pruned
292
- const deletion = {
293
- type: 'delete',
294
- schema: logEntry.schema,
295
- table: logEntry.table,
296
- pk: logEntry.pk,
297
- hlc: tombstone.hlc,
298
- };
299
- changes.push(deletion);
300
- }
301
- }
302
- }
303
- else {
304
- // No sinceHLC - need all changes (full scan fallback)
305
- await this.collectAllChanges(peerSiteId, changes);
306
- }
307
- // Collect schema migrations since sinceHLC
308
- const schemaMigrations = [];
309
- const smBounds = buildAllSchemaMigrationsScanBounds();
310
- for await (const entry of this.kv.iterate(smBounds)) {
311
- const parsed = parseSchemaMigrationKey(entry.key);
312
- if (!parsed)
313
- continue;
314
- const migration = deserializeMigration(entry.value);
315
- // Filter by HLC if provided
316
- if (sinceHLC && compareHLC(migration.hlc, sinceHLC) <= 0)
317
- continue;
318
- // Don't include changes from the requesting peer
319
- if (siteIdEquals(migration.hlc.siteId, peerSiteId))
320
- continue;
321
- schemaMigrations.push({
322
- type: migration.type,
323
- schema: parsed.schema,
324
- table: parsed.table,
325
- ddl: migration.ddl,
326
- hlc: migration.hlc,
327
- schemaVersion: migration.schemaVersion,
328
- });
329
- }
330
- // If no changes, return empty array
331
- if (changes.length === 0 && schemaMigrations.length === 0) {
332
- return [];
333
- }
334
- // Changes from change log are already in HLC order
335
- // Schema migrations need sorting
336
- schemaMigrations.sort((a, b) => compareHLC(a.hlc, b.hlc));
337
- // Batch changes up to config.batchSize
338
- const result = [];
339
- for (let i = 0; i < changes.length; i += this.config.batchSize) {
340
- const batch = changes.slice(i, i + this.config.batchSize);
341
- const maxHLC = batch.reduce((max, c) => compareHLC(c.hlc, max) > 0 ? c.hlc : max, batch[0].hlc);
342
- result.push({
343
- siteId: this.getSiteId(),
344
- transactionId: crypto.randomUUID(),
345
- hlc: maxHLC,
346
- changes: batch,
347
- schemaMigrations: i === 0 ? schemaMigrations : [], // Only include schema migrations in first batch
348
- });
349
- }
350
- // If no data changes but we have schema migrations, create a changeset for them
351
- if (result.length === 0 && schemaMigrations.length > 0) {
352
- const maxHLC = schemaMigrations.reduce((max, m) => compareHLC(m.hlc, max) > 0 ? m.hlc : max, schemaMigrations[0].hlc);
353
- result.push({
354
- siteId: this.getSiteId(),
355
- transactionId: crypto.randomUUID(),
356
- hlc: maxHLC,
357
- changes: [],
358
- schemaMigrations,
359
- });
360
- }
361
- return result;
362
- }
363
- /**
364
- * Fallback: collect all changes when no sinceHLC is provided.
365
- */
366
- async collectAllChanges(peerSiteId, changes) {
367
- // Collect all column changes
368
- const cvBounds = buildAllColumnVersionsScanBounds();
369
- for await (const entry of this.kv.iterate(cvBounds)) {
370
- const parsed = parseColumnVersionKey(entry.key);
371
- if (!parsed)
372
- continue;
373
- const cv = deserializeColumnVersion(entry.value);
374
- // Don't include changes from the requesting peer
375
- if (siteIdEquals(cv.hlc.siteId, peerSiteId))
376
- continue;
377
- const columnChange = {
378
- type: 'column',
379
- schema: parsed.schema,
380
- table: parsed.table,
381
- pk: parsed.pk,
382
- column: parsed.column,
383
- value: cv.value,
384
- hlc: cv.hlc,
385
- };
386
- changes.push(columnChange);
387
- }
388
- // Collect all deletions (tombstones)
389
- const tbBounds = buildAllTombstonesScanBounds();
390
- for await (const entry of this.kv.iterate(tbBounds)) {
391
- const parsed = parseTombstoneKey(entry.key);
392
- if (!parsed)
393
- continue;
394
- const tombstone = deserializeTombstone(entry.value);
395
- // Don't include changes from the requesting peer
396
- if (siteIdEquals(tombstone.hlc.siteId, peerSiteId))
397
- continue;
398
- const deletion = {
399
- type: 'delete',
400
- schema: parsed.schema,
401
- table: parsed.table,
402
- pk: parsed.pk,
403
- hlc: tombstone.hlc,
404
- };
405
- changes.push(deletion);
406
- }
407
- // Sort by HLC for consistent ordering
408
- changes.sort((a, b) => compareHLC(a.hlc, b.hlc));
409
- }
410
- async applyChanges(changes) {
411
- let applied = 0;
412
- let skipped = 0;
413
- let conflicts = 0;
414
- // Collect changes to apply to the store (grouped by row for column merging)
415
- const dataChangesToApply = [];
416
- const schemaChangesToApply = [];
417
- const appliedChanges = [];
418
- for (const changeSet of changes) {
419
- // Update our clock with the remote HLC
420
- this.hlcManager.receive(changeSet.hlc);
421
- // Process schema migrations first (DDL before DML)
422
- for (const migration of changeSet.schemaMigrations) {
423
- // Use the incoming schemaVersion if provided, otherwise calculate next version
424
- const schemaVersion = migration.schemaVersion ??
425
- (await this.schemaMigrations.getCurrentVersion(migration.schema, migration.table)) + 1;
426
- // Check if we've already recorded this migration
427
- const existingMigration = await this.schemaMigrations.getMigration(migration.schema, migration.table, schemaVersion);
428
- if (existingMigration) {
429
- // Already have this migration version - skip
430
- // (HLC comparison for first-writer-wins is done via checkConflict if needed)
431
- if (compareHLC(migration.hlc, existingMigration.hlc) <= 0) {
432
- skipped++;
433
- continue;
434
- }
435
- }
436
- // Record the migration in our metadata store so we can forward it to other peers
437
- await this.schemaMigrations.recordMigration(migration.schema, migration.table, {
438
- type: migration.type,
439
- ddl: migration.ddl,
440
- hlc: migration.hlc,
441
- schemaVersion,
442
- });
443
- schemaChangesToApply.push({
444
- type: migration.type,
445
- schema: migration.schema,
446
- table: migration.table,
447
- ddl: migration.ddl,
448
- });
449
- // Count schema migrations as applied changes
450
- applied++;
451
- }
452
- // Process data changes
453
- for (const change of changeSet.changes) {
454
- const result = await this.resolveAndRecordChange(change, changeSet.siteId);
455
- if (result.outcome === 'applied') {
456
- applied++;
457
- appliedChanges.push({ change, siteId: changeSet.siteId });
458
- if (result.dataChange) {
459
- dataChangesToApply.push(result.dataChange);
460
- }
461
- }
462
- else if (result.outcome === 'skipped') {
463
- skipped++;
464
- }
465
- else if (result.outcome === 'conflict') {
466
- conflicts++;
467
- }
468
- }
469
- }
470
- // Apply data and schema changes to the store via callback
471
- if (this.applyToStore && (dataChangesToApply.length > 0 || schemaChangesToApply.length > 0)) {
472
- await this.applyToStore(dataChangesToApply, schemaChangesToApply, { remote: true });
473
- }
474
- // Emit a single remote change event with all applied changes for UI reactivity
475
- if (appliedChanges.length > 0) {
476
- // Group by siteId to emit one event per originating site
477
- const changesBySite = new Map();
478
- for (const { change, siteId } of appliedChanges) {
479
- const siteKey = Array.from(siteId).join(',');
480
- const siteChanges = changesBySite.get(siteKey);
481
- if (siteChanges) {
482
- siteChanges.push(change);
483
- }
484
- else {
485
- changesBySite.set(siteKey, [change]);
486
- }
487
- }
488
- const appliedAt = this.hlcManager.now();
489
- for (const [siteKey, siteChanges] of changesBySite) {
490
- const siteIdBytes = new Uint8Array(siteKey.split(',').map(Number));
491
- this.syncEvents.emitRemoteChange({
492
- siteId: siteIdBytes,
493
- transactionId: crypto.randomUUID(),
494
- changes: siteChanges,
495
- appliedAt,
496
- });
497
- }
498
- }
499
- await this.persistHLCState();
500
- return {
501
- applied,
502
- skipped,
503
- conflicts,
504
- transactions: changes.length,
505
- };
506
- }
507
- /**
508
- * Resolve CRDT conflicts and record metadata for a change.
509
- * Returns outcome and the data change to apply (if any).
510
- */
511
- async resolveAndRecordChange(change, _remoteSiteId) {
512
- // Skip changes that originated from ourselves (echo prevention).
513
- // This can happen when a peer re-sends changes it received from us,
514
- // or when changes propagate through a coordinator back to the originator.
515
- if (siteIdEquals(change.hlc.siteId, this.getSiteId())) {
516
- return { outcome: 'skipped' };
517
- }
518
- if (change.type === 'delete') {
519
- // Check if we should apply this deletion
520
- const existingTombstone = await this.tombstones.getTombstone(change.schema, change.table, change.pk);
521
- if (existingTombstone && compareHLC(change.hlc, existingTombstone.hlc) <= 0) {
522
- return { outcome: 'skipped' };
523
- }
524
- // Record the tombstone and delete column versions (metadata update)
525
- await this.tombstones.setTombstone(change.schema, change.table, change.pk, change.hlc);
526
- await this.columnVersions.deleteRowVersions(change.schema, change.table, change.pk);
527
- // Return the data change for the store
528
- return {
529
- outcome: 'applied',
530
- dataChange: {
531
- type: 'delete',
532
- schema: change.schema,
533
- table: change.table,
534
- pk: change.pk,
535
- },
536
- };
537
- }
538
- else {
539
- // Column change
540
- const shouldApply = await this.columnVersions.shouldApplyWrite(change.schema, change.table, change.pk, change.column, change.hlc);
541
- if (!shouldApply) {
542
- // Local version is newer - this is a conflict where local wins
543
- const localVersion = await this.columnVersions.getColumnVersion(change.schema, change.table, change.pk, change.column);
544
- if (localVersion) {
545
- const conflictEvent = {
546
- table: change.table,
547
- pk: change.pk,
548
- column: change.column,
549
- localValue: localVersion.value,
550
- remoteValue: change.value,
551
- winner: 'local',
552
- winningHLC: localVersion.hlc,
553
- };
554
- this.syncEvents.emitConflictResolved(conflictEvent);
555
- }
556
- return { outcome: 'conflict' };
557
- }
558
- // Check for tombstone blocking
559
- const isBlocked = await this.tombstones.isDeletedAndBlocking(change.schema, change.table, change.pk, change.hlc, this.config.allowResurrection);
560
- if (isBlocked) {
561
- return { outcome: 'skipped' };
562
- }
563
- // Record the column version (metadata update)
564
- await this.columnVersions.setColumnVersion(change.schema, change.table, change.pk, change.column, { hlc: change.hlc, value: change.value });
565
- // Return the data change for the store
566
- return {
567
- outcome: 'applied',
568
- dataChange: {
569
- type: 'update', // Column changes are updates (or inserts handled by store)
570
- schema: change.schema,
571
- table: change.table,
572
- pk: change.pk,
573
- columns: { [change.column]: change.value },
574
- },
575
- };
576
- }
577
- }
578
- async canDeltaSync(peerSiteId, sinceHLC) {
579
- const peerState = await this.peerStates.getPeerState(peerSiteId);
580
- if (!peerState) {
581
- // Never synced with this peer - need full snapshot
582
- return false;
583
- }
584
- // Check if the sinceHLC is within tombstone TTL
585
- const now = Date.now();
586
- const sinceTime = Number(sinceHLC.wallTime);
587
- if (now - sinceTime > this.config.tombstoneTTL) {
588
- // Too old - tombstones may have been pruned
589
- return false;
590
- }
591
- return true;
592
- }
593
- async getSnapshot() {
594
- const tableData = new Map();
595
- const cvBounds = buildAllColumnVersionsScanBounds();
596
- for await (const entry of this.kv.iterate(cvBounds)) {
597
- const parsed = parseColumnVersionKey(entry.key);
598
- if (!parsed)
599
- continue;
600
- const cv = deserializeColumnVersion(entry.value);
601
- const tableKey = `${parsed.schema}.${parsed.table}`;
602
- const rowKey = encodePK(parsed.pk);
603
- if (!tableData.has(tableKey)) {
604
- tableData.set(tableKey, new Map());
605
- }
606
- const tableRows = tableData.get(tableKey);
607
- if (!tableRows.has(rowKey)) {
608
- tableRows.set(rowKey, new Map());
609
- }
610
- const rowVersions = tableRows.get(rowKey);
611
- rowVersions.set(parsed.column, cv);
612
- }
613
- // Build table snapshots
614
- const tables = [];
615
- for (const [tableKey, rows] of tableData) {
616
- const [schema, table] = tableKey.split('.');
617
- const columnVersions = new Map();
618
- const rowsArray = [];
619
- for (const [rowKey, rowVersions] of rows) {
620
- // Convert column map to array (row representation)
621
- const row = Array.from(rowVersions.values()).map(cv => cv.value);
622
- rowsArray.push(row);
623
- // Add column versions with their values
624
- for (const [column, cv] of rowVersions) {
625
- const versionKey = `${rowKey}:${column}`;
626
- columnVersions.set(versionKey, { hlc: cv.hlc, value: cv.value });
627
- }
628
- }
629
- tables.push({
630
- schema,
631
- table,
632
- rows: rowsArray,
633
- columnVersions,
634
- });
635
- }
636
- // Collect all schema migrations
637
- const schemaMigrations = [];
638
- const smBounds = buildAllSchemaMigrationsScanBounds();
639
- for await (const entry of this.kv.iterate(smBounds)) {
640
- const parsed = parseSchemaMigrationKey(entry.key);
641
- if (!parsed)
642
- continue;
643
- const migration = deserializeMigration(entry.value);
644
- schemaMigrations.push({
645
- type: migration.type,
646
- schema: parsed.schema,
647
- table: parsed.table,
648
- ddl: migration.ddl,
649
- hlc: migration.hlc,
650
- schemaVersion: migration.schemaVersion,
651
- });
652
- }
653
- return {
654
- siteId: this.getSiteId(),
655
- hlc: this.getCurrentHLC(),
656
- tables,
657
- schemaMigrations,
658
- };
659
- }
660
- async applySnapshot(snapshot) {
661
- // Clear existing sync metadata (column versions, tombstones, and change log)
662
- const batch = this.kv.batch();
663
- // Delete all existing column versions
664
- const cvBounds = buildAllColumnVersionsScanBounds();
665
- for await (const entry of this.kv.iterate(cvBounds)) {
666
- batch.delete(entry.key);
667
- }
668
- // Delete all existing tombstones
669
- const tbBounds = buildAllTombstonesScanBounds();
670
- for await (const entry of this.kv.iterate(tbBounds)) {
671
- batch.delete(entry.key);
672
- }
673
- // Delete all existing change log entries
674
- const clBounds = buildAllChangeLogScanBounds();
675
- for await (const entry of this.kv.iterate(clBounds)) {
676
- batch.delete(entry.key);
677
- }
678
- await batch.write();
679
- // Apply the snapshot's column versions and rebuild change log
680
- const applyBatch = this.kv.batch();
681
- for (const tableSnapshot of snapshot.tables) {
682
- for (const [versionKey, cvEntry] of tableSnapshot.columnVersions) {
683
- // versionKey format: {rowKey}:{column}
684
- const lastColon = versionKey.lastIndexOf(':');
685
- if (lastColon === -1)
686
- continue;
687
- const rowKey = versionKey.slice(0, lastColon);
688
- const column = versionKey.slice(lastColon + 1);
689
- const pk = JSON.parse(rowKey);
690
- this.columnVersions.setColumnVersionBatch(applyBatch, tableSnapshot.schema, tableSnapshot.table, pk, column, { hlc: cvEntry.hlc, value: cvEntry.value });
691
- // Rebuild change log entry
692
- this.changeLog.recordColumnChangeBatch(applyBatch, cvEntry.hlc, tableSnapshot.schema, tableSnapshot.table, pk, column);
693
- }
694
- }
695
- await applyBatch.write();
696
- // Update our HLC to be at least as high as the snapshot
697
- this.hlcManager.receive(snapshot.hlc);
698
- await this.persistHLCState();
699
- // Emit sync state change
700
- this.syncEvents.emitSyncStateChange({ status: 'synced', lastSyncHLC: snapshot.hlc });
701
- }
702
- async updatePeerSyncState(peerSiteId, hlc) {
703
- await this.peerStates.setPeerState(peerSiteId, hlc);
704
- }
705
- async getPeerSyncState(peerSiteId) {
706
- const state = await this.peerStates.getPeerState(peerSiteId);
707
- return state?.lastSyncHLC;
708
- }
709
- async pruneTombstones() {
710
- const now = Date.now();
711
- let count = 0;
712
- const batch = this.kv.batch();
713
- const tbBounds = buildAllTombstonesScanBounds();
714
- for await (const entry of this.kv.iterate(tbBounds)) {
715
- const tombstone = deserializeTombstone(entry.value);
716
- if (now - tombstone.createdAt > this.config.tombstoneTTL) {
717
- batch.delete(entry.key);
718
- count++;
719
- }
720
- }
721
- await batch.write();
722
- return count;
723
- }
724
- /**
725
- * Get the sync event emitter for UI integration.
726
- */
727
- getEventEmitter() {
728
- return this.syncEvents;
729
- }
730
- // ============================================================================
731
- // Streaming Snapshot API
732
- // ============================================================================
733
- async *getSnapshotStream(chunkSize = DEFAULT_SNAPSHOT_CHUNK_SIZE) {
734
- const snapshotId = crypto.randomUUID();
735
- const siteId = this.getSiteId();
736
- const hlc = this.getCurrentHLC();
737
- // Count tables and migrations for header
738
- const tableKeys = new Set();
739
- const cvBounds = buildAllColumnVersionsScanBounds();
740
- for await (const entry of this.kv.iterate(cvBounds)) {
741
- const parsed = parseColumnVersionKey(entry.key);
742
- if (parsed)
743
- tableKeys.add(`${parsed.schema}.${parsed.table}`);
744
- }
745
- let migrationCount = 0;
746
- const smBounds = buildAllSchemaMigrationsScanBounds();
747
- for await (const _entry of this.kv.iterate(smBounds)) {
748
- migrationCount++;
749
- }
750
- // Yield header
751
- const header = {
752
- type: 'header',
753
- siteId,
754
- hlc,
755
- tableCount: tableKeys.size,
756
- migrationCount,
757
- snapshotId,
758
- };
759
- yield header;
760
- // Stream each table
761
- let totalEntries = 0;
762
- for (const tableKey of tableKeys) {
763
- const [schema, table] = tableKey.split('.');
764
- // Estimate entries for this table
765
- let tableEntryCount = 0;
766
- const tableCvBounds = buildAllColumnVersionsScanBounds();
767
- for await (const entry of this.kv.iterate(tableCvBounds)) {
768
- const parsed = parseColumnVersionKey(entry.key);
769
- if (parsed && parsed.schema === schema && parsed.table === table) {
770
- tableEntryCount++;
771
- }
772
- }
773
- // Yield table start
774
- const tableStart = {
775
- type: 'table-start',
776
- schema,
777
- table,
778
- estimatedEntries: tableEntryCount,
779
- };
780
- yield tableStart;
781
- // Stream column versions in chunks
782
- let entries = [];
783
- let entriesWritten = 0;
784
- for await (const entry of this.kv.iterate(tableCvBounds)) {
785
- const parsed = parseColumnVersionKey(entry.key);
786
- if (!parsed || parsed.schema !== schema || parsed.table !== table)
787
- continue;
788
- const cv = deserializeColumnVersion(entry.value);
789
- const versionKey = `${encodePK(parsed.pk)}:${parsed.column}`;
790
- entries.push([versionKey, cv.hlc, cv.value]);
791
- entriesWritten++;
792
- if (entries.length >= chunkSize) {
793
- const chunk = {
794
- type: 'column-versions',
795
- schema,
796
- table,
797
- entries,
798
- };
799
- yield chunk;
800
- entries = [];
801
- }
802
- }
803
- // Yield remaining entries
804
- if (entries.length > 0) {
805
- const chunk = {
806
- type: 'column-versions',
807
- schema,
808
- table,
809
- entries,
810
- };
811
- yield chunk;
812
- }
813
- // Yield table end
814
- const tableEnd = {
815
- type: 'table-end',
816
- schema,
817
- table,
818
- entriesWritten,
819
- };
820
- yield tableEnd;
821
- totalEntries += entriesWritten;
822
- }
823
- // Stream schema migrations
824
- for await (const entry of this.kv.iterate(smBounds)) {
825
- const parsed = parseSchemaMigrationKey(entry.key);
826
- if (!parsed)
827
- continue;
828
- const migration = deserializeMigration(entry.value);
829
- const migrationChunk = {
830
- type: 'schema-migration',
831
- migration: {
832
- type: migration.type,
833
- schema: parsed.schema,
834
- table: parsed.table,
835
- ddl: migration.ddl,
836
- hlc: migration.hlc,
837
- schemaVersion: migration.schemaVersion,
838
- },
839
- };
840
- yield migrationChunk;
841
- }
842
- // Yield footer
843
- const footer = {
844
- type: 'footer',
845
- snapshotId,
846
- totalTables: tableKeys.size,
847
- totalEntries,
848
- totalMigrations: migrationCount,
849
- };
850
- yield footer;
851
- }
852
- async applySnapshotStream(chunks, onProgress) {
853
- let snapshotId;
854
- let snapshotHLC;
855
- let totalTables = 0;
856
- let totalEntries = 0;
857
- let tablesProcessed = 0;
858
- let entriesProcessed = 0;
859
- let currentTable;
860
- const completedTables = [];
861
- // Clear existing data before applying
862
- const clearBatch = this.kv.batch();
863
- for await (const entry of this.kv.iterate(buildAllColumnVersionsScanBounds())) {
864
- clearBatch.delete(entry.key);
865
- }
866
- for await (const entry of this.kv.iterate(buildAllTombstonesScanBounds())) {
867
- clearBatch.delete(entry.key);
868
- }
869
- for await (const entry of this.kv.iterate(buildAllChangeLogScanBounds())) {
870
- clearBatch.delete(entry.key);
871
- }
872
- await clearBatch.write();
873
- // Process chunks
874
- let batch = this.kv.batch();
875
- let batchSize = 0;
876
- const BATCH_FLUSH_SIZE = 1000;
877
- for await (const chunk of chunks) {
878
- switch (chunk.type) {
879
- case 'header':
880
- snapshotId = chunk.snapshotId;
881
- snapshotHLC = chunk.hlc;
882
- totalTables = chunk.tableCount;
883
- break;
884
- case 'table-start':
885
- currentTable = `${chunk.schema}.${chunk.table}`;
886
- totalEntries += chunk.estimatedEntries;
887
- break;
888
- case 'column-versions':
889
- for (const [versionKey, hlc, value] of chunk.entries) {
890
- const lastColon = versionKey.lastIndexOf(':');
891
- if (lastColon === -1)
892
- continue;
893
- const rowKey = versionKey.slice(0, lastColon);
894
- const column = versionKey.slice(lastColon + 1);
895
- const pk = JSON.parse(rowKey);
896
- this.columnVersions.setColumnVersionBatch(batch, chunk.schema, chunk.table, pk, column, { hlc, value });
897
- this.changeLog.recordColumnChangeBatch(batch, hlc, chunk.schema, chunk.table, pk, column);
898
- batchSize++;
899
- entriesProcessed++;
900
- if (batchSize >= BATCH_FLUSH_SIZE) {
901
- await batch.write();
902
- batch = this.kv.batch();
903
- batchSize = 0;
904
- // Save checkpoint with completed tables
905
- if (snapshotId && snapshotHLC) {
906
- await this.saveSnapshotCheckpoint({
907
- snapshotId,
908
- siteId: this.getSiteId(),
909
- hlc: snapshotHLC,
910
- lastTableIndex: tablesProcessed,
911
- lastEntryIndex: entriesProcessed,
912
- completedTables: [...completedTables],
913
- entriesProcessed,
914
- createdAt: Date.now(),
915
- });
916
- }
917
- }
918
- }
919
- if (onProgress && snapshotId) {
920
- onProgress({
921
- snapshotId,
922
- tablesProcessed,
923
- totalTables,
924
- entriesProcessed,
925
- totalEntries,
926
- currentTable,
927
- });
928
- }
929
- break;
930
- case 'table-end':
931
- tablesProcessed++;
932
- if (currentTable) {
933
- completedTables.push(currentTable);
934
- }
935
- break;
936
- case 'schema-migration':
937
- // TODO: Apply schema migration
938
- break;
939
- case 'footer':
940
- // Flush remaining batch
941
- if (batchSize > 0) {
942
- await batch.write();
943
- }
944
- // Update HLC
945
- if (snapshotHLC) {
946
- this.hlcManager.receive(snapshotHLC);
947
- await this.persistHLCState();
948
- }
949
- // Clear checkpoint
950
- if (snapshotId) {
951
- await this.clearSnapshotCheckpoint(snapshotId);
952
- }
953
- // Emit sync state change
954
- if (snapshotHLC) {
955
- this.syncEvents.emitSyncStateChange({ status: 'synced', lastSyncHLC: snapshotHLC });
956
- }
957
- break;
958
- }
959
- }
960
- }
961
- async getSnapshotCheckpoint(snapshotId) {
962
- const key = new TextEncoder().encode(`${CHECKPOINT_PREFIX}${snapshotId}`);
963
- const data = await this.kv.get(key);
964
- if (!data)
965
- return undefined;
966
- const json = new TextDecoder().decode(data);
967
- const obj = JSON.parse(json);
968
- // Reconstruct HLC with proper types
969
- return {
970
- ...obj,
971
- hlc: {
972
- wallTime: BigInt(obj.hlc.wallTime),
973
- counter: obj.hlc.counter,
974
- siteId: new Uint8Array(obj.hlc.siteId),
975
- },
976
- siteId: new Uint8Array(obj.siteId),
977
- };
978
- }
979
- async saveSnapshotCheckpoint(checkpoint) {
980
- const key = new TextEncoder().encode(`${CHECKPOINT_PREFIX}${checkpoint.snapshotId}`);
981
- const json = JSON.stringify({
982
- ...checkpoint,
983
- hlc: {
984
- wallTime: checkpoint.hlc.wallTime.toString(),
985
- counter: checkpoint.hlc.counter,
986
- siteId: Array.from(checkpoint.hlc.siteId),
987
- },
988
- siteId: Array.from(checkpoint.siteId),
989
- });
990
- await this.kv.put(key, new TextEncoder().encode(json));
991
- }
992
- async clearSnapshotCheckpoint(snapshotId) {
993
- const key = new TextEncoder().encode(`${CHECKPOINT_PREFIX}${snapshotId}`);
994
- await this.kv.delete(key);
995
- }
996
- async *resumeSnapshotStream(checkpoint) {
997
- // Resume streaming from checkpoint position
998
- // Skip tables that have already been completed
999
- const completedSet = new Set(checkpoint.completedTables);
1000
- const snapshotId = checkpoint.snapshotId;
1001
- const siteId = checkpoint.siteId;
1002
- const hlc = checkpoint.hlc;
1003
- // Count tables and migrations for header
1004
- const tableKeys = new Set();
1005
- const cvBounds = buildAllColumnVersionsScanBounds();
1006
- for await (const entry of this.kv.iterate(cvBounds)) {
1007
- const parsed = parseColumnVersionKey(entry.key);
1008
- if (parsed)
1009
- tableKeys.add(`${parsed.schema}.${parsed.table}`);
1010
- }
1011
- let migrationCount = 0;
1012
- const smBounds = buildAllSchemaMigrationsScanBounds();
1013
- for await (const _entry of this.kv.iterate(smBounds)) {
1014
- migrationCount++;
1015
- }
1016
- // Yield header (receiver needs to know this is a resume)
1017
- const header = {
1018
- type: 'header',
1019
- siteId,
1020
- hlc,
1021
- tableCount: tableKeys.size,
1022
- migrationCount,
1023
- snapshotId,
1024
- };
1025
- yield header;
1026
- // Stream each table, skipping completed ones
1027
- let totalEntries = checkpoint.entriesProcessed;
1028
- for (const tableKey of tableKeys) {
1029
- // Skip already completed tables
1030
- if (completedSet.has(tableKey))
1031
- continue;
1032
- const [schema, table] = tableKey.split('.');
1033
- // Count entries for this table
1034
- let tableEntryCount = 0;
1035
- const tableCvBounds = buildAllColumnVersionsScanBounds();
1036
- for await (const entry of this.kv.iterate(tableCvBounds)) {
1037
- const parsed = parseColumnVersionKey(entry.key);
1038
- if (parsed && parsed.schema === schema && parsed.table === table) {
1039
- tableEntryCount++;
1040
- }
1041
- }
1042
- // Yield table start
1043
- const tableStart = {
1044
- type: 'table-start',
1045
- schema,
1046
- table,
1047
- estimatedEntries: tableEntryCount,
1048
- };
1049
- yield tableStart;
1050
- // Stream column versions in chunks
1051
- let entries = [];
1052
- let entriesWritten = 0;
1053
- const chunkSize = DEFAULT_SNAPSHOT_CHUNK_SIZE;
1054
- for await (const entry of this.kv.iterate(tableCvBounds)) {
1055
- const parsed = parseColumnVersionKey(entry.key);
1056
- if (!parsed || parsed.schema !== schema || parsed.table !== table)
1057
- continue;
1058
- const cv = deserializeColumnVersion(entry.value);
1059
- const versionKey = `${encodePK(parsed.pk)}:${parsed.column}`;
1060
- entries.push([versionKey, cv.hlc, cv.value]);
1061
- entriesWritten++;
1062
- if (entries.length >= chunkSize) {
1063
- const chunk = {
1064
- type: 'column-versions',
1065
- schema,
1066
- table,
1067
- entries,
1068
- };
1069
- yield chunk;
1070
- entries = [];
1071
- }
1072
- }
1073
- // Yield remaining entries
1074
- if (entries.length > 0) {
1075
- const chunk = {
1076
- type: 'column-versions',
1077
- schema,
1078
- table,
1079
- entries,
1080
- };
1081
- yield chunk;
1082
- }
1083
- // Yield table end
1084
- const tableEnd = {
1085
- type: 'table-end',
1086
- schema,
1087
- table,
1088
- entriesWritten,
1089
- };
1090
- yield tableEnd;
1091
- totalEntries += entriesWritten;
1092
- }
1093
- // Stream schema migrations
1094
- for await (const entry of this.kv.iterate(smBounds)) {
1095
- const parsed = parseSchemaMigrationKey(entry.key);
1096
- if (!parsed)
1097
- continue;
1098
- const migration = deserializeMigration(entry.value);
1099
- const migrationChunk = {
1100
- type: 'schema-migration',
1101
- migration: {
1102
- type: migration.type,
1103
- schema: parsed.schema,
1104
- table: parsed.table,
1105
- ddl: migration.ddl,
1106
- hlc: migration.hlc,
1107
- schemaVersion: migration.schemaVersion,
1108
- },
1109
- };
1110
- yield migrationChunk;
1111
- }
1112
- // Yield footer
1113
- const footer = {
1114
- type: 'footer',
1115
- snapshotId,
1116
- totalTables: tableKeys.size,
1117
- totalEntries,
1118
- totalMigrations: migrationCount,
1119
- };
1120
- yield footer;
1121
- }
1122
- }
1123
- //# sourceMappingURL=sync-manager-impl.js.map