@trestleinc/replicate 1.1.1 → 1.1.2-preview.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/README.md +395 -146
  2. package/dist/client/index.d.ts +311 -19
  3. package/dist/client/index.js +4027 -0
  4. package/dist/component/_generated/api.d.ts +13 -17
  5. package/dist/component/_generated/api.js +24 -4
  6. package/dist/component/_generated/component.d.ts +79 -77
  7. package/dist/component/_generated/component.js +1 -0
  8. package/dist/component/_generated/dataModel.d.ts +12 -15
  9. package/dist/component/_generated/dataModel.js +1 -0
  10. package/dist/component/_generated/server.d.ts +19 -22
  11. package/dist/component/_generated/server.js +65 -1
  12. package/dist/component/_virtual/rolldown_runtime.js +18 -0
  13. package/dist/component/convex.config.d.ts +6 -2
  14. package/dist/component/convex.config.js +7 -3
  15. package/dist/component/logger.d.ts +10 -6
  16. package/dist/component/logger.js +25 -28
  17. package/dist/component/public.d.ts +70 -61
  18. package/dist/component/public.js +311 -295
  19. package/dist/component/schema.d.ts +53 -45
  20. package/dist/component/schema.js +26 -32
  21. package/dist/component/shared/types.d.ts +9 -0
  22. package/dist/component/shared/types.js +15 -0
  23. package/dist/server/index.d.ts +134 -13
  24. package/dist/server/index.js +368 -0
  25. package/dist/shared/index.d.ts +27 -3
  26. package/dist/shared/index.js +1 -2
  27. package/package.json +34 -29
  28. package/src/client/collection.ts +339 -306
  29. package/src/client/errors.ts +9 -9
  30. package/src/client/index.ts +13 -32
  31. package/src/client/logger.ts +2 -2
  32. package/src/client/merge.ts +37 -34
  33. package/src/client/persistence/custom.ts +84 -0
  34. package/src/client/persistence/index.ts +9 -46
  35. package/src/client/persistence/indexeddb.ts +111 -84
  36. package/src/client/persistence/memory.ts +3 -3
  37. package/src/client/persistence/sqlite/browser.ts +168 -0
  38. package/src/client/persistence/sqlite/native.ts +29 -0
  39. package/src/client/persistence/sqlite/schema.ts +124 -0
  40. package/src/client/persistence/types.ts +32 -28
  41. package/src/client/prose-schema.ts +55 -0
  42. package/src/client/prose.ts +28 -25
  43. package/src/client/replicate.ts +5 -5
  44. package/src/client/services/cursor.ts +109 -0
  45. package/src/component/_generated/component.ts +31 -29
  46. package/src/component/convex.config.ts +2 -2
  47. package/src/component/logger.ts +7 -7
  48. package/src/component/public.ts +225 -237
  49. package/src/component/schema.ts +18 -15
  50. package/src/server/builder.ts +20 -7
  51. package/src/server/index.ts +3 -5
  52. package/src/server/schema.ts +5 -5
  53. package/src/server/storage.ts +113 -59
  54. package/src/shared/index.ts +5 -5
  55. package/src/shared/types.ts +51 -14
  56. package/dist/client/collection.d.ts +0 -96
  57. package/dist/client/errors.d.ts +0 -59
  58. package/dist/client/logger.d.ts +0 -2
  59. package/dist/client/merge.d.ts +0 -77
  60. package/dist/client/persistence/adapters/index.d.ts +0 -8
  61. package/dist/client/persistence/adapters/opsqlite.d.ts +0 -46
  62. package/dist/client/persistence/adapters/sqljs.d.ts +0 -83
  63. package/dist/client/persistence/index.d.ts +0 -49
  64. package/dist/client/persistence/indexeddb.d.ts +0 -17
  65. package/dist/client/persistence/memory.d.ts +0 -16
  66. package/dist/client/persistence/sqlite-browser.d.ts +0 -51
  67. package/dist/client/persistence/sqlite-level.d.ts +0 -63
  68. package/dist/client/persistence/sqlite-rn.d.ts +0 -36
  69. package/dist/client/persistence/sqlite.d.ts +0 -47
  70. package/dist/client/persistence/types.d.ts +0 -42
  71. package/dist/client/prose.d.ts +0 -56
  72. package/dist/client/replicate.d.ts +0 -40
  73. package/dist/client/services/checkpoint.d.ts +0 -18
  74. package/dist/client/services/reconciliation.d.ts +0 -24
  75. package/dist/index.js +0 -1618
  76. package/dist/server/builder.d.ts +0 -94
  77. package/dist/server/schema.d.ts +0 -27
  78. package/dist/server/storage.d.ts +0 -80
  79. package/dist/server.js +0 -281
  80. package/dist/shared/types.d.ts +0 -50
  81. package/dist/shared/types.js +0 -6
  82. package/dist/shared.js +0 -6
  83. package/src/client/persistence/adapters/index.ts +0 -8
  84. package/src/client/persistence/adapters/opsqlite.ts +0 -54
  85. package/src/client/persistence/adapters/sqljs.ts +0 -128
  86. package/src/client/persistence/sqlite-browser.ts +0 -107
  87. package/src/client/persistence/sqlite-level.ts +0 -407
  88. package/src/client/persistence/sqlite-rn.ts +0 -44
  89. package/src/client/persistence/sqlite.ts +0 -160
  90. package/src/client/services/checkpoint.ts +0 -86
  91. package/src/client/services/reconciliation.ts +0 -108
@@ -1,107 +1,21 @@
1
- import * as Y from 'yjs';
2
- import { v } from 'convex/values';
3
- import { mutation, query } from '$/component/_generated/server';
4
- import { getLogger } from '$/component/logger';
5
- import { OperationType } from '$/shared/types.js';
1
+ import * as Y from "yjs";
2
+ import { v } from "convex/values";
3
+ import { mutation, query } from "$/component/_generated/server";
4
+ import { getLogger } from "$/component/logger";
5
+ import { OperationType } from "$/shared/types";
6
6
 
7
7
  export { OperationType };
8
8
 
9
- // Default size threshold for auto-compaction (5MB)
10
9
  const DEFAULT_SIZE_THRESHOLD = 5_000_000;
10
+ const DEFAULT_PEER_TIMEOUT = 5 * 60 * 1000;
11
11
 
12
- /**
13
- * Auto-compacts a document's deltas into a snapshot when size threshold is exceeded.
14
- * Returns null if no compaction needed, or the compaction result.
15
- */
16
- async function _maybeCompactDocument(
17
- ctx: any,
18
- collection: string,
19
- documentId: string,
20
- threshold: number = DEFAULT_SIZE_THRESHOLD
21
- ): Promise<{ deltasCompacted: number; snapshotSize: number } | null> {
22
- const logger = getLogger(['compaction']);
23
-
24
- // Get all deltas for this specific document
25
- const deltas = await ctx.db
26
- .query('documents')
27
- .withIndex('by_collection_document_version', (q: any) =>
28
- q.eq('collection', collection).eq('documentId', documentId)
29
- )
30
- .collect();
31
-
32
- // Calculate total size
33
- const totalSize = deltas.reduce((sum: number, d: any) => sum + d.crdtBytes.byteLength, 0);
34
-
35
- // Skip if below size threshold
36
- if (totalSize < threshold) {
37
- return null;
38
- }
39
-
40
- logger.info('Auto-compacting document', {
41
- collection,
42
- documentId,
43
- deltaCount: deltas.length,
44
- totalSize,
45
- threshold,
46
- });
47
-
48
- // Merge deltas into snapshot
49
- const sorted = deltas.sort((a: any, b: any) => a.timestamp - b.timestamp);
50
- const updates = sorted.map((d: any) => new Uint8Array(d.crdtBytes));
51
- const compactedState = Y.mergeUpdatesV2(updates);
52
-
53
- // Validate compacted state
54
- const testDoc = new Y.Doc({ guid: `${collection}:${documentId}` });
55
- try {
56
- Y.applyUpdateV2(testDoc, compactedState);
57
- } catch (error) {
58
- logger.error('Compacted state validation failed', {
59
- collection,
60
- documentId,
61
- error: String(error),
62
- });
63
- testDoc.destroy();
64
- return null;
65
- }
66
- testDoc.destroy();
67
-
68
- // Delete existing snapshot for this document (keep only 1)
69
- const existingSnapshot = await ctx.db
70
- .query('snapshots')
71
- .withIndex('by_document', (q: any) =>
72
- q.eq('collection', collection).eq('documentId', documentId)
73
- )
12
+ async function getNextSeq(ctx: any, collection: string): Promise<number> {
13
+ const latest = await ctx.db
14
+ .query("documents")
15
+ .withIndex("by_seq", (q: any) => q.eq("collection", collection))
16
+ .order("desc")
74
17
  .first();
75
- if (existingSnapshot) {
76
- await ctx.db.delete('snapshots', existingSnapshot._id);
77
- }
78
-
79
- // Store new per-document snapshot
80
- await ctx.db.insert('snapshots', {
81
- collection,
82
- documentId,
83
- snapshotBytes: compactedState.buffer as ArrayBuffer,
84
- latestCompactionTimestamp: sorted[sorted.length - 1].timestamp,
85
- createdAt: Date.now(),
86
- metadata: {
87
- deltaCount: deltas.length,
88
- totalSize,
89
- },
90
- });
91
-
92
- // Delete old deltas
93
- for (const delta of sorted) {
94
- await ctx.db.delete('documents', delta._id);
95
- }
96
-
97
- logger.info('Auto-compaction completed', {
98
- collection,
99
- documentId,
100
- deltasCompacted: deltas.length,
101
- snapshotSize: compactedState.length,
102
- });
103
-
104
- return { deltasCompacted: deltas.length, snapshotSize: compactedState.length };
18
+ return (latest?.seq ?? 0) + 1;
105
19
  }
106
20
 
107
21
  export const insertDocument = mutation({
@@ -109,34 +23,22 @@ export const insertDocument = mutation({
109
23
  collection: v.string(),
110
24
  documentId: v.string(),
111
25
  crdtBytes: v.bytes(),
112
- version: v.number(),
113
- threshold: v.optional(v.number()),
114
26
  },
115
27
  returns: v.object({
116
28
  success: v.boolean(),
117
- compacted: v.optional(v.boolean()),
29
+ seq: v.number(),
118
30
  }),
119
31
  handler: async (ctx, args) => {
120
- await ctx.db.insert('documents', {
32
+ const seq = await getNextSeq(ctx, args.collection);
33
+
34
+ await ctx.db.insert("documents", {
121
35
  collection: args.collection,
122
36
  documentId: args.documentId,
123
37
  crdtBytes: args.crdtBytes,
124
- version: args.version,
125
- timestamp: Date.now(),
38
+ seq,
126
39
  });
127
40
 
128
- // Auto-compact if size threshold exceeded
129
- const compactionResult = await _maybeCompactDocument(
130
- ctx,
131
- args.collection,
132
- args.documentId,
133
- args.threshold ?? DEFAULT_SIZE_THRESHOLD
134
- );
135
-
136
- return {
137
- success: true,
138
- compacted: compactionResult !== null,
139
- };
41
+ return { success: true, seq };
140
42
  },
141
43
  });
142
44
 
@@ -145,34 +47,22 @@ export const updateDocument = mutation({
145
47
  collection: v.string(),
146
48
  documentId: v.string(),
147
49
  crdtBytes: v.bytes(),
148
- version: v.number(),
149
- threshold: v.optional(v.number()),
150
50
  },
151
51
  returns: v.object({
152
52
  success: v.boolean(),
153
- compacted: v.optional(v.boolean()),
53
+ seq: v.number(),
154
54
  }),
155
55
  handler: async (ctx, args) => {
156
- await ctx.db.insert('documents', {
56
+ const seq = await getNextSeq(ctx, args.collection);
57
+
58
+ await ctx.db.insert("documents", {
157
59
  collection: args.collection,
158
60
  documentId: args.documentId,
159
61
  crdtBytes: args.crdtBytes,
160
- version: args.version,
161
- timestamp: Date.now(),
62
+ seq,
162
63
  });
163
64
 
164
- // Auto-compact if size threshold exceeded
165
- const compactionResult = await _maybeCompactDocument(
166
- ctx,
167
- args.collection,
168
- args.documentId,
169
- args.threshold ?? DEFAULT_SIZE_THRESHOLD
170
- );
171
-
172
- return {
173
- success: true,
174
- compacted: compactionResult !== null,
175
- };
65
+ return { success: true, seq };
176
66
  },
177
67
  });
178
68
 
@@ -181,141 +71,250 @@ export const deleteDocument = mutation({
181
71
  collection: v.string(),
182
72
  documentId: v.string(),
183
73
  crdtBytes: v.bytes(),
184
- version: v.number(),
185
- threshold: v.optional(v.number()),
186
74
  },
187
75
  returns: v.object({
188
76
  success: v.boolean(),
189
- compacted: v.optional(v.boolean()),
77
+ seq: v.number(),
190
78
  }),
191
79
  handler: async (ctx, args) => {
192
- await ctx.db.insert('documents', {
80
+ const seq = await getNextSeq(ctx, args.collection);
81
+
82
+ await ctx.db.insert("documents", {
193
83
  collection: args.collection,
194
84
  documentId: args.documentId,
195
85
  crdtBytes: args.crdtBytes,
196
- version: args.version,
197
- timestamp: Date.now(),
86
+ seq,
198
87
  });
199
88
 
200
- // Auto-compact if size threshold exceeded
201
- const compactionResult = await _maybeCompactDocument(
202
- ctx,
203
- args.collection,
204
- args.documentId,
205
- args.threshold ?? DEFAULT_SIZE_THRESHOLD
206
- );
89
+ return { success: true, seq };
90
+ },
91
+ });
207
92
 
208
- return {
209
- success: true,
210
- compacted: compactionResult !== null,
211
- };
93
+ export const mark = mutation({
94
+ args: {
95
+ collection: v.string(),
96
+ peerId: v.string(),
97
+ syncedSeq: v.number(),
98
+ },
99
+ returns: v.null(),
100
+ handler: async (ctx, args) => {
101
+ const existing = await ctx.db
102
+ .query("peers")
103
+ .withIndex("by_collection_peer", (q: any) =>
104
+ q.eq("collection", args.collection).eq("peerId", args.peerId),
105
+ )
106
+ .first();
107
+
108
+ if (existing) {
109
+ await ctx.db.patch(existing._id, {
110
+ lastSyncedSeq: Math.max(existing.lastSyncedSeq, args.syncedSeq),
111
+ lastSeenAt: Date.now(),
112
+ });
113
+ }
114
+ else {
115
+ await ctx.db.insert("peers", {
116
+ collection: args.collection,
117
+ peerId: args.peerId,
118
+ lastSyncedSeq: args.syncedSeq,
119
+ lastSeenAt: Date.now(),
120
+ });
121
+ }
122
+
123
+ return null;
124
+ },
125
+ });
126
+
127
+ export const compact = mutation({
128
+ args: {
129
+ collection: v.string(),
130
+ documentId: v.string(),
131
+ snapshotBytes: v.bytes(),
132
+ stateVector: v.bytes(),
133
+ peerTimeout: v.optional(v.number()),
134
+ },
135
+ returns: v.object({
136
+ success: v.boolean(),
137
+ removed: v.number(),
138
+ retained: v.number(),
139
+ }),
140
+ handler: async (ctx, args) => {
141
+ const logger = getLogger(["compaction"]);
142
+ const now = Date.now();
143
+ const peerTimeout = args.peerTimeout ?? DEFAULT_PEER_TIMEOUT;
144
+ const peerCutoff = now - peerTimeout;
145
+
146
+ const deltas = await ctx.db
147
+ .query("documents")
148
+ .withIndex("by_collection_document", (q: any) =>
149
+ q.eq("collection", args.collection).eq("documentId", args.documentId),
150
+ )
151
+ .collect();
152
+
153
+ const activePeers = await ctx.db
154
+ .query("peers")
155
+ .withIndex("by_collection", (q: any) => q.eq("collection", args.collection))
156
+ .filter((q: any) => q.gt(q.field("lastSeenAt"), peerCutoff))
157
+ .collect();
158
+
159
+ const minSyncedSeq = activePeers.length > 0
160
+ ? Math.min(...activePeers.map((p: any) => p.lastSyncedSeq))
161
+ : Infinity;
162
+
163
+ const existingSnapshot = await ctx.db
164
+ .query("snapshots")
165
+ .withIndex("by_document", (q: any) =>
166
+ q.eq("collection", args.collection).eq("documentId", args.documentId),
167
+ )
168
+ .first();
169
+
170
+ if (existingSnapshot) {
171
+ await ctx.db.delete(existingSnapshot._id);
172
+ }
173
+
174
+ const snapshotSeq = deltas.length > 0
175
+ ? Math.max(...deltas.map((d: any) => d.seq))
176
+ : 0;
177
+
178
+ await ctx.db.insert("snapshots", {
179
+ collection: args.collection,
180
+ documentId: args.documentId,
181
+ snapshotBytes: args.snapshotBytes,
182
+ stateVector: args.stateVector,
183
+ snapshotSeq,
184
+ createdAt: now,
185
+ });
186
+
187
+ let removed = 0;
188
+ for (const delta of deltas) {
189
+ if (delta.seq < minSyncedSeq) {
190
+ await ctx.db.delete(delta._id);
191
+ removed++;
192
+ }
193
+ }
194
+
195
+ logger.info("Compaction completed", {
196
+ collection: args.collection,
197
+ documentId: args.documentId,
198
+ removed,
199
+ retained: deltas.length - removed,
200
+ activePeers: activePeers.length,
201
+ minSyncedSeq,
202
+ });
203
+
204
+ return { success: true, removed, retained: deltas.length - removed };
212
205
  },
213
206
  });
214
207
 
215
208
  export const stream = query({
216
209
  args: {
217
210
  collection: v.string(),
218
- checkpoint: v.object({
219
- lastModified: v.number(),
220
- }),
221
- vector: v.optional(v.bytes()),
211
+ cursor: v.number(),
222
212
  limit: v.optional(v.number()),
213
+ sizeThreshold: v.optional(v.number()),
223
214
  },
224
215
  returns: v.object({
225
216
  changes: v.array(
226
217
  v.object({
227
- documentId: v.optional(v.string()),
218
+ documentId: v.string(),
228
219
  crdtBytes: v.bytes(),
229
- version: v.number(),
230
- timestamp: v.number(),
220
+ seq: v.number(),
231
221
  operationType: v.string(),
232
- })
222
+ }),
233
223
  ),
234
- checkpoint: v.object({
235
- lastModified: v.number(),
236
- }),
224
+ cursor: v.number(),
237
225
  hasMore: v.boolean(),
226
+ compact: v.optional(v.string()),
238
227
  }),
239
228
  handler: async (ctx, args) => {
240
229
  const limit = args.limit ?? 100;
230
+ const sizeThreshold = args.sizeThreshold ?? DEFAULT_SIZE_THRESHOLD;
241
231
 
242
- // Get deltas newer than checkpoint
243
232
  const documents = await ctx.db
244
- .query('documents')
245
- .withIndex('by_timestamp', (q) =>
246
- q.eq('collection', args.collection).gt('timestamp', args.checkpoint.lastModified)
233
+ .query("documents")
234
+ .withIndex("by_seq", (q: any) =>
235
+ q.eq("collection", args.collection).gt("seq", args.cursor),
247
236
  )
248
- .order('asc')
237
+ .order("asc")
249
238
  .take(limit);
250
239
 
251
240
  if (documents.length > 0) {
252
- const changes = documents.map((doc) => ({
241
+ const changes = documents.map((doc: any) => ({
253
242
  documentId: doc.documentId,
254
243
  crdtBytes: doc.crdtBytes,
255
- version: doc.version,
256
- timestamp: doc.timestamp,
244
+ seq: doc.seq,
257
245
  operationType: OperationType.Delta,
258
246
  }));
259
247
 
260
- const newCheckpoint = {
261
- lastModified: documents[documents.length - 1]?.timestamp ?? args.checkpoint.lastModified,
262
- };
248
+ const newCursor = documents[documents.length - 1]?.seq ?? args.cursor;
249
+
250
+ let compactHint: string | undefined;
251
+ const allDocs = await ctx.db
252
+ .query("documents")
253
+ .withIndex("by_collection", (q: any) => q.eq("collection", args.collection))
254
+ .collect();
255
+
256
+ const sizeByDocument = new Map<string, number>();
257
+ for (const doc of allDocs) {
258
+ const current = sizeByDocument.get(doc.documentId) ?? 0;
259
+ sizeByDocument.set(doc.documentId, current + doc.crdtBytes.byteLength);
260
+ }
261
+
262
+ for (const [docId, size] of sizeByDocument) {
263
+ if (size > sizeThreshold) {
264
+ compactHint = docId;
265
+ break;
266
+ }
267
+ }
263
268
 
264
269
  return {
265
270
  changes,
266
- checkpoint: newCheckpoint,
271
+ cursor: newCursor,
267
272
  hasMore: documents.length === limit,
273
+ compact: compactHint,
268
274
  };
269
275
  }
270
276
 
271
- // Check for disparity - client checkpoint older than oldest delta
272
277
  const oldestDelta = await ctx.db
273
- .query('documents')
274
- .withIndex('by_timestamp', (q) => q.eq('collection', args.collection))
275
- .order('asc')
278
+ .query("documents")
279
+ .withIndex("by_seq", (q: any) => q.eq("collection", args.collection))
280
+ .order("asc")
276
281
  .first();
277
282
 
278
- if (oldestDelta && args.checkpoint.lastModified < oldestDelta.timestamp) {
279
- // Disparity detected - need to send all per-document snapshots
280
- // Get all snapshots for this collection
283
+ if (oldestDelta && args.cursor < oldestDelta.seq) {
281
284
  const snapshots = await ctx.db
282
- .query('snapshots')
283
- .withIndex('by_document', (q) => q.eq('collection', args.collection))
285
+ .query("snapshots")
286
+ .withIndex("by_document", (q: any) => q.eq("collection", args.collection))
284
287
  .collect();
285
288
 
286
289
  if (snapshots.length === 0) {
287
290
  throw new Error(
288
- `Disparity detected but no snapshots available for collection: ${args.collection}. ` +
289
- `Client checkpoint: ${args.checkpoint.lastModified}, ` +
290
- `Oldest delta: ${oldestDelta.timestamp}`
291
+ `Disparity detected but no snapshots available for collection: ${args.collection}. `
292
+ + `Client cursor: ${args.cursor}, Oldest delta seq: ${oldestDelta.seq}`,
291
293
  );
292
294
  }
293
295
 
294
- // Return all snapshots as changes
295
- const changes = snapshots.map((snapshot) => ({
296
+ const changes = snapshots.map((snapshot: any) => ({
296
297
  documentId: snapshot.documentId,
297
298
  crdtBytes: snapshot.snapshotBytes,
298
- version: 0,
299
- timestamp: snapshot.createdAt,
299
+ seq: snapshot.snapshotSeq,
300
300
  operationType: OperationType.Snapshot,
301
301
  }));
302
302
 
303
- // Find the latest compaction timestamp to use as checkpoint
304
- const latestTimestamp = Math.max(...snapshots.map((s) => s.latestCompactionTimestamp));
303
+ const latestSeq = Math.max(...snapshots.map((s: any) => s.snapshotSeq));
305
304
 
306
305
  return {
307
306
  changes,
308
- checkpoint: {
309
- lastModified: latestTimestamp,
310
- },
307
+ cursor: latestSeq,
311
308
  hasMore: false,
309
+ compact: undefined,
312
310
  };
313
311
  }
314
312
 
315
313
  return {
316
314
  changes: [],
317
- checkpoint: args.checkpoint,
315
+ cursor: args.cursor,
318
316
  hasMore: false,
317
+ compact: undefined,
319
318
  };
320
319
  },
321
320
  });
@@ -327,52 +326,45 @@ export const getInitialState = query({
327
326
  returns: v.union(
328
327
  v.object({
329
328
  crdtBytes: v.bytes(),
330
- checkpoint: v.object({
331
- lastModified: v.number(),
332
- }),
329
+ cursor: v.number(),
333
330
  }),
334
- v.null()
331
+ v.null(),
335
332
  ),
336
333
  handler: async (ctx, args) => {
337
- const logger = getLogger(['ssr']);
334
+ const logger = getLogger(["ssr"]);
338
335
 
339
- // Get all per-document snapshots for this collection
340
336
  const snapshots = await ctx.db
341
- .query('snapshots')
342
- .withIndex('by_document', (q) => q.eq('collection', args.collection))
337
+ .query("snapshots")
338
+ .withIndex("by_document", (q: any) => q.eq("collection", args.collection))
343
339
  .collect();
344
340
 
345
- // Get all deltas for this collection
346
341
  const deltas = await ctx.db
347
- .query('documents')
348
- .withIndex('by_collection', (q) => q.eq('collection', args.collection))
342
+ .query("documents")
343
+ .withIndex("by_collection", (q: any) => q.eq("collection", args.collection))
349
344
  .collect();
350
345
 
351
346
  if (snapshots.length === 0 && deltas.length === 0) {
352
- logger.info('No initial state available - collection is empty', {
347
+ logger.info("No initial state available - collection is empty", {
353
348
  collection: args.collection,
354
349
  });
355
350
  return null;
356
351
  }
357
352
 
358
- // Merge all snapshots and deltas together
359
353
  const updates: Uint8Array[] = [];
360
- let latestTimestamp = 0;
354
+ let latestSeq = 0;
361
355
 
362
- // Add all per-document snapshots
363
356
  for (const snapshot of snapshots) {
364
357
  updates.push(new Uint8Array(snapshot.snapshotBytes));
365
- latestTimestamp = Math.max(latestTimestamp, snapshot.latestCompactionTimestamp);
358
+ latestSeq = Math.max(latestSeq, snapshot.snapshotSeq);
366
359
  }
367
360
 
368
- // Add all deltas
369
- const sorted = deltas.sort((a, b) => a.timestamp - b.timestamp);
361
+ const sorted = deltas.sort((a: any, b: any) => a.seq - b.seq);
370
362
  for (const delta of sorted) {
371
363
  updates.push(new Uint8Array(delta.crdtBytes));
372
- latestTimestamp = Math.max(latestTimestamp, delta.timestamp);
364
+ latestSeq = Math.max(latestSeq, delta.seq);
373
365
  }
374
366
 
375
- logger.info('Reconstructing initial state', {
367
+ logger.info("Reconstructing initial state", {
376
368
  collection: args.collection,
377
369
  snapshotCount: snapshots.length,
378
370
  deltaCount: deltas.length,
@@ -380,7 +372,7 @@ export const getInitialState = query({
380
372
 
381
373
  const merged = Y.mergeUpdatesV2(updates);
382
374
 
383
- logger.info('Initial state reconstructed', {
375
+ logger.info("Initial state reconstructed", {
384
376
  collection: args.collection,
385
377
  originalSize: updates.reduce((sum, u) => sum + u.byteLength, 0),
386
378
  mergedSize: merged.byteLength,
@@ -388,17 +380,11 @@ export const getInitialState = query({
388
380
 
389
381
  return {
390
382
  crdtBytes: merged.buffer as ArrayBuffer,
391
- checkpoint: {
392
- lastModified: latestTimestamp,
393
- },
383
+ cursor: latestSeq,
394
384
  };
395
385
  },
396
386
  });
397
387
 
398
- /**
399
- * Recovery query for state vector based sync.
400
- * Client sends its state vector, server computes and returns the diff.
401
- */
402
388
  export const recovery = query({
403
389
  args: {
404
390
  collection: v.string(),
@@ -407,49 +393,50 @@ export const recovery = query({
407
393
  returns: v.object({
408
394
  diff: v.optional(v.bytes()),
409
395
  serverStateVector: v.bytes(),
396
+ cursor: v.number(),
410
397
  }),
411
398
  handler: async (ctx, args) => {
412
- const logger = getLogger(['recovery']);
399
+ const logger = getLogger(["recovery"]);
413
400
 
414
- // Get all snapshots for this collection
415
401
  const snapshots = await ctx.db
416
- .query('snapshots')
417
- .withIndex('by_document', (q) => q.eq('collection', args.collection))
402
+ .query("snapshots")
403
+ .withIndex("by_document", (q: any) => q.eq("collection", args.collection))
418
404
  .collect();
419
405
 
420
- // Get all deltas for this collection
421
406
  const deltas = await ctx.db
422
- .query('documents')
423
- .withIndex('by_collection', (q) => q.eq('collection', args.collection))
407
+ .query("documents")
408
+ .withIndex("by_collection", (q: any) => q.eq("collection", args.collection))
424
409
  .collect();
425
410
 
426
411
  if (snapshots.length === 0 && deltas.length === 0) {
427
- // Empty collection - return empty state vector
428
412
  const emptyDoc = new Y.Doc();
429
413
  const emptyVector = Y.encodeStateVector(emptyDoc);
430
414
  emptyDoc.destroy();
431
- return { serverStateVector: emptyVector.buffer as ArrayBuffer };
415
+ return {
416
+ serverStateVector: emptyVector.buffer as ArrayBuffer,
417
+ cursor: 0,
418
+ };
432
419
  }
433
420
 
434
- // Merge all snapshots and deltas into full server state
435
421
  const updates: Uint8Array[] = [];
422
+ let latestSeq = 0;
436
423
 
437
424
  for (const snapshot of snapshots) {
438
425
  updates.push(new Uint8Array(snapshot.snapshotBytes));
426
+ latestSeq = Math.max(latestSeq, snapshot.snapshotSeq);
439
427
  }
440
428
 
441
429
  for (const delta of deltas) {
442
430
  updates.push(new Uint8Array(delta.crdtBytes));
431
+ latestSeq = Math.max(latestSeq, delta.seq);
443
432
  }
444
433
 
445
434
  const mergedState = Y.mergeUpdatesV2(updates);
446
-
447
- // Compute diff relative to client's state vector
448
435
  const clientVector = new Uint8Array(args.clientStateVector);
449
436
  const diff = Y.diffUpdateV2(mergedState, clientVector);
450
437
  const serverVector = Y.encodeStateVectorFromUpdateV2(mergedState);
451
438
 
452
- logger.info('Recovery sync computed', {
439
+ logger.info("Recovery sync computed", {
453
440
  collection: args.collection,
454
441
  snapshotCount: snapshots.length,
455
442
  deltaCount: deltas.length,
@@ -460,6 +447,7 @@ export const recovery = query({
460
447
  return {
461
448
  diff: diff.byteLength > 0 ? (diff.buffer as ArrayBuffer) : undefined,
462
449
  serverStateVector: serverVector.buffer as ArrayBuffer,
450
+ cursor: latestSeq,
463
451
  };
464
452
  },
465
453
  });