@trestleinc/replicate 0.1.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/README.md +356 -420
  2. package/dist/client/collection.d.ts +78 -76
  3. package/dist/client/errors.d.ts +59 -0
  4. package/dist/client/index.d.ts +22 -18
  5. package/dist/client/logger.d.ts +0 -1
  6. package/dist/client/merge.d.ts +77 -0
  7. package/dist/client/persistence/adapters/index.d.ts +8 -0
  8. package/dist/client/persistence/adapters/opsqlite.d.ts +46 -0
  9. package/dist/client/persistence/adapters/sqljs.d.ts +83 -0
  10. package/dist/client/persistence/index.d.ts +49 -0
  11. package/dist/client/persistence/indexeddb.d.ts +17 -0
  12. package/dist/client/persistence/memory.d.ts +16 -0
  13. package/dist/client/persistence/sqlite-browser.d.ts +51 -0
  14. package/dist/client/persistence/sqlite-level.d.ts +63 -0
  15. package/dist/client/persistence/sqlite-rn.d.ts +36 -0
  16. package/dist/client/persistence/sqlite.d.ts +47 -0
  17. package/dist/client/persistence/types.d.ts +42 -0
  18. package/dist/client/prose.d.ts +56 -0
  19. package/dist/client/replicate.d.ts +40 -0
  20. package/dist/client/services/checkpoint.d.ts +18 -0
  21. package/dist/client/services/reconciliation.d.ts +24 -0
  22. package/dist/component/_generated/api.d.ts +35 -0
  23. package/dist/component/_generated/api.js +3 -3
  24. package/dist/component/_generated/component.d.ts +89 -0
  25. package/dist/component/_generated/component.js +0 -0
  26. package/dist/component/_generated/dataModel.d.ts +45 -0
  27. package/dist/component/_generated/dataModel.js +0 -0
  28. package/{src → dist}/component/_generated/server.d.ts +9 -38
  29. package/dist/component/convex.config.d.ts +2 -2
  30. package/dist/component/convex.config.js +2 -1
  31. package/dist/component/logger.d.ts +8 -0
  32. package/dist/component/logger.js +30 -0
  33. package/dist/component/public.d.ts +36 -61
  34. package/dist/component/public.js +232 -58
  35. package/dist/component/schema.d.ts +32 -8
  36. package/dist/component/schema.js +19 -6
  37. package/dist/index.js +1553 -308
  38. package/dist/server/builder.d.ts +94 -0
  39. package/dist/server/index.d.ts +14 -17
  40. package/dist/server/schema.d.ts +17 -63
  41. package/dist/server/storage.d.ts +80 -0
  42. package/dist/server.js +268 -83
  43. package/dist/shared/index.d.ts +5 -0
  44. package/dist/shared/index.js +2 -0
  45. package/dist/shared/types.d.ts +50 -0
  46. package/dist/shared/types.js +6 -0
  47. package/dist/shared.js +6 -0
  48. package/package.json +59 -49
  49. package/src/client/collection.ts +877 -450
  50. package/src/client/errors.ts +45 -0
  51. package/src/client/index.ts +52 -26
  52. package/src/client/logger.ts +2 -28
  53. package/src/client/merge.ts +374 -0
  54. package/src/client/persistence/adapters/index.ts +8 -0
  55. package/src/client/persistence/adapters/opsqlite.ts +54 -0
  56. package/src/client/persistence/adapters/sqljs.ts +128 -0
  57. package/src/client/persistence/index.ts +54 -0
  58. package/src/client/persistence/indexeddb.ts +110 -0
  59. package/src/client/persistence/memory.ts +61 -0
  60. package/src/client/persistence/sqlite-browser.ts +107 -0
  61. package/src/client/persistence/sqlite-level.ts +407 -0
  62. package/src/client/persistence/sqlite-rn.ts +44 -0
  63. package/src/client/persistence/sqlite.ts +161 -0
  64. package/src/client/persistence/types.ts +49 -0
  65. package/src/client/prose.ts +369 -0
  66. package/src/client/replicate.ts +80 -0
  67. package/src/client/services/checkpoint.ts +86 -0
  68. package/src/client/services/reconciliation.ts +108 -0
  69. package/src/component/_generated/api.ts +52 -0
  70. package/src/component/_generated/component.ts +103 -0
  71. package/src/component/_generated/{dataModel.d.ts → dataModel.ts} +1 -1
  72. package/src/component/_generated/server.ts +161 -0
  73. package/src/component/convex.config.ts +3 -1
  74. package/src/component/logger.ts +36 -0
  75. package/src/component/public.ts +364 -111
  76. package/src/component/schema.ts +18 -5
  77. package/src/env.d.ts +31 -0
  78. package/src/server/builder.ts +85 -0
  79. package/src/server/index.ts +9 -24
  80. package/src/server/schema.ts +20 -76
  81. package/src/server/storage.ts +313 -0
  82. package/src/shared/index.ts +5 -0
  83. package/src/shared/types.ts +52 -0
  84. package/LICENSE.package +0 -201
  85. package/dist/client/storage.d.ts +0 -143
  86. package/dist/server/replication.d.ts +0 -122
  87. package/dist/server/ssr.d.ts +0 -79
  88. package/dist/ssr.js +0 -19
  89. package/src/client/storage.ts +0 -206
  90. package/src/component/_generated/api.d.ts +0 -95
  91. package/src/component/_generated/api.js +0 -23
  92. package/src/component/_generated/server.js +0 -90
  93. package/src/server/replication.ts +0 -244
  94. package/src/server/ssr.ts +0 -106
@@ -1,176 +1,234 @@
1
+ import * as Y from 'yjs';
1
2
  import { v } from 'convex/values';
2
- import { mutation, query } from './_generated/server';
3
+ import { mutation, query } from '$/component/_generated/server';
4
+ import { getLogger } from '$/component/logger';
5
+ import { OperationType } from '$/shared/types.js';
6
+
7
+ export { OperationType };
8
+
9
+ // Default size threshold for auto-compaction (5MB)
10
+ const DEFAULT_SIZE_THRESHOLD = 5_000_000;
3
11
 
4
12
  /**
5
- * Insert a new document with CRDT bytes (Yjs format).
6
- * Appends delta to event log (event sourcing pattern).
7
- *
8
- * @param collectionName - Collection identifier
9
- * @param documentId - Unique document identifier
10
- * @param crdtBytes - ArrayBuffer containing Yjs CRDT bytes (delta)
11
- * @param version - CRDT version number
13
+ * Auto-compacts a document's deltas into a snapshot when size threshold is exceeded.
14
+ * Returns null if no compaction needed, or the compaction result.
12
15
  */
16
+ async function _maybeCompactDocument(
17
+ ctx: any,
18
+ collection: string,
19
+ documentId: string,
20
+ threshold: number = DEFAULT_SIZE_THRESHOLD
21
+ ): Promise<{ deltasCompacted: number; snapshotSize: number } | null> {
22
+ const logger = getLogger(['compaction']);
23
+
24
+ // Get all deltas for this specific document
25
+ const deltas = await ctx.db
26
+ .query('documents')
27
+ .withIndex('by_collection_document_version', (q: any) =>
28
+ q.eq('collection', collection).eq('documentId', documentId)
29
+ )
30
+ .collect();
31
+
32
+ // Calculate total size
33
+ const totalSize = deltas.reduce((sum: number, d: any) => sum + d.crdtBytes.byteLength, 0);
34
+
35
+ // Skip if below size threshold
36
+ if (totalSize < threshold) {
37
+ return null;
38
+ }
39
+
40
+ logger.info('Auto-compacting document', {
41
+ collection,
42
+ documentId,
43
+ deltaCount: deltas.length,
44
+ totalSize,
45
+ threshold,
46
+ });
47
+
48
+ // Merge deltas into snapshot
49
+ const sorted = deltas.sort((a: any, b: any) => a.timestamp - b.timestamp);
50
+ const updates = sorted.map((d: any) => new Uint8Array(d.crdtBytes));
51
+ const compactedState = Y.mergeUpdatesV2(updates);
52
+
53
+ // Validate compacted state
54
+ const testDoc = new Y.Doc({ guid: `${collection}:${documentId}` });
55
+ try {
56
+ Y.applyUpdateV2(testDoc, compactedState);
57
+ } catch (error) {
58
+ logger.error('Compacted state validation failed', {
59
+ collection,
60
+ documentId,
61
+ error: String(error),
62
+ });
63
+ testDoc.destroy();
64
+ return null;
65
+ }
66
+ testDoc.destroy();
67
+
68
+ // Delete existing snapshot for this document (keep only 1)
69
+ const existingSnapshot = await ctx.db
70
+ .query('snapshots')
71
+ .withIndex('by_document', (q: any) =>
72
+ q.eq('collection', collection).eq('documentId', documentId)
73
+ )
74
+ .first();
75
+ if (existingSnapshot) {
76
+ await ctx.db.delete('snapshots', existingSnapshot._id);
77
+ }
78
+
79
+ // Store new per-document snapshot
80
+ await ctx.db.insert('snapshots', {
81
+ collection,
82
+ documentId,
83
+ snapshotBytes: compactedState.buffer as ArrayBuffer,
84
+ latestCompactionTimestamp: sorted[sorted.length - 1].timestamp,
85
+ createdAt: Date.now(),
86
+ metadata: {
87
+ deltaCount: deltas.length,
88
+ totalSize,
89
+ },
90
+ });
91
+
92
+ // Delete old deltas
93
+ for (const delta of sorted) {
94
+ await ctx.db.delete('documents', delta._id);
95
+ }
96
+
97
+ logger.info('Auto-compaction completed', {
98
+ collection,
99
+ documentId,
100
+ deltasCompacted: deltas.length,
101
+ snapshotSize: compactedState.length,
102
+ });
103
+
104
+ return { deltasCompacted: deltas.length, snapshotSize: compactedState.length };
105
+ }
106
+
13
107
  export const insertDocument = mutation({
14
108
  args: {
15
- collectionName: v.string(),
109
+ collection: v.string(),
16
110
  documentId: v.string(),
17
111
  crdtBytes: v.bytes(),
18
112
  version: v.number(),
113
+ threshold: v.optional(v.number()),
19
114
  },
20
115
  returns: v.object({
21
116
  success: v.boolean(),
117
+ compacted: v.optional(v.boolean()),
22
118
  }),
23
119
  handler: async (ctx, args) => {
24
- // Append delta to event log (no duplicate check - event sourcing!)
25
120
  await ctx.db.insert('documents', {
26
- collectionName: args.collectionName,
121
+ collection: args.collection,
27
122
  documentId: args.documentId,
28
123
  crdtBytes: args.crdtBytes,
29
124
  version: args.version,
30
125
  timestamp: Date.now(),
31
- operationType: 'insert',
32
126
  });
33
127
 
34
- return { success: true };
128
+ // Auto-compact if size threshold exceeded
129
+ const compactionResult = await _maybeCompactDocument(
130
+ ctx,
131
+ args.collection,
132
+ args.documentId,
133
+ args.threshold ?? DEFAULT_SIZE_THRESHOLD
134
+ );
135
+
136
+ return {
137
+ success: true,
138
+ compacted: compactionResult !== null,
139
+ };
35
140
  },
36
141
  });
37
142
 
38
- /**
39
- * Update an existing document with new CRDT bytes (Yjs format).
40
- * Appends delta to event log (event sourcing pattern).
41
- *
42
- * @param collectionName - Collection identifier
43
- * @param documentId - Unique document identifier
44
- * @param crdtBytes - ArrayBuffer containing Yjs CRDT bytes (delta)
45
- * @param version - CRDT version number
46
- */
47
143
  export const updateDocument = mutation({
48
144
  args: {
49
- collectionName: v.string(),
145
+ collection: v.string(),
50
146
  documentId: v.string(),
51
147
  crdtBytes: v.bytes(),
52
148
  version: v.number(),
149
+ threshold: v.optional(v.number()),
53
150
  },
54
151
  returns: v.object({
55
152
  success: v.boolean(),
153
+ compacted: v.optional(v.boolean()),
56
154
  }),
57
155
  handler: async (ctx, args) => {
58
- // Append delta to event log (no check - event sourcing!)
59
156
  await ctx.db.insert('documents', {
60
- collectionName: args.collectionName,
157
+ collection: args.collection,
61
158
  documentId: args.documentId,
62
159
  crdtBytes: args.crdtBytes,
63
160
  version: args.version,
64
161
  timestamp: Date.now(),
65
- operationType: 'update',
66
162
  });
67
163
 
68
- return { success: true };
164
+ // Auto-compact if size threshold exceeded
165
+ const compactionResult = await _maybeCompactDocument(
166
+ ctx,
167
+ args.collection,
168
+ args.documentId,
169
+ args.threshold ?? DEFAULT_SIZE_THRESHOLD
170
+ );
171
+
172
+ return {
173
+ success: true,
174
+ compacted: compactionResult !== null,
175
+ };
69
176
  },
70
177
  });
71
178
 
72
- /**
73
- * Delete a document from CRDT storage.
74
- * Appends deletion delta to event log (preserves history).
75
- *
76
- * @param collectionName - Collection identifier
77
- * @param documentId - Unique document identifier
78
- * @param crdtBytes - ArrayBuffer containing Yjs deletion delta
79
- * @param version - CRDT version number
80
- */
81
179
  export const deleteDocument = mutation({
82
180
  args: {
83
- collectionName: v.string(),
181
+ collection: v.string(),
84
182
  documentId: v.string(),
85
183
  crdtBytes: v.bytes(),
86
184
  version: v.number(),
185
+ threshold: v.optional(v.number()),
87
186
  },
88
187
  returns: v.object({
89
188
  success: v.boolean(),
189
+ compacted: v.optional(v.boolean()),
90
190
  }),
91
191
  handler: async (ctx, args) => {
92
- // Append deletion delta to event log (preserve history!)
93
192
  await ctx.db.insert('documents', {
94
- collectionName: args.collectionName,
193
+ collection: args.collection,
95
194
  documentId: args.documentId,
96
195
  crdtBytes: args.crdtBytes,
97
196
  version: args.version,
98
197
  timestamp: Date.now(),
99
- operationType: 'delete',
100
198
  });
101
199
 
102
- return { success: true };
103
- },
104
- });
200
+ // Auto-compact if size threshold exceeded
201
+ const compactionResult = await _maybeCompactDocument(
202
+ ctx,
203
+ args.collection,
204
+ args.documentId,
205
+ args.threshold ?? DEFAULT_SIZE_THRESHOLD
206
+ );
105
207
 
106
- /**
107
- * Get complete event history for a document.
108
- * Returns all CRDT deltas in chronological order.
109
- *
110
- * Used for:
111
- * - Future recovery features (client-side)
112
- * - Audit trails
113
- * - Debugging
114
- *
115
- * @param collectionName - Collection identifier
116
- * @param documentId - Unique document identifier
117
- */
118
- export const getDocumentHistory = query({
119
- args: {
120
- collectionName: v.string(),
121
- documentId: v.string(),
122
- },
123
- returns: v.array(
124
- v.object({
125
- crdtBytes: v.bytes(),
126
- version: v.number(),
127
- timestamp: v.number(),
128
- operationType: v.string(),
129
- })
130
- ),
131
- handler: async (ctx, args) => {
132
- // Fetch ALL deltas for this document in chronological order
133
- const deltas = await ctx.db
134
- .query('documents')
135
- .withIndex('by_collection_document_version', (q) =>
136
- q.eq('collectionName', args.collectionName).eq('documentId', args.documentId)
137
- )
138
- .order('asc')
139
- .collect();
140
-
141
- return deltas.map((d) => ({
142
- crdtBytes: d.crdtBytes,
143
- version: d.version,
144
- timestamp: d.timestamp,
145
- operationType: d.operationType,
146
- }));
208
+ return {
209
+ success: true,
210
+ compacted: compactionResult !== null,
211
+ };
147
212
  },
148
213
  });
149
214
 
150
- /**
151
- * Stream CRDT changes for incremental replication.
152
- * Returns Yjs CRDT bytes for documents modified since the checkpoint.
153
- * Can be used for both polling (awaitReplication) and subscriptions (live updates).
154
- *
155
- * @param collectionName - Collection identifier
156
- * @param checkpoint - Last replication checkpoint
157
- * @param limit - Maximum number of changes to return (default: 100)
158
- */
159
215
  export const stream = query({
160
216
  args: {
161
- collectionName: v.string(),
217
+ collection: v.string(),
162
218
  checkpoint: v.object({
163
219
  lastModified: v.number(),
164
220
  }),
221
+ vector: v.optional(v.bytes()),
165
222
  limit: v.optional(v.number()),
166
223
  },
167
224
  returns: v.object({
168
225
  changes: v.array(
169
226
  v.object({
170
- documentId: v.string(),
227
+ documentId: v.optional(v.string()),
171
228
  crdtBytes: v.bytes(),
172
229
  version: v.number(),
173
230
  timestamp: v.number(),
231
+ operationType: v.string(),
174
232
  })
175
233
  ),
176
234
  checkpoint: v.object({
@@ -181,32 +239,227 @@ export const stream = query({
181
239
  handler: async (ctx, args) => {
182
240
  const limit = args.limit ?? 100;
183
241
 
242
+ // Get deltas newer than checkpoint
184
243
  const documents = await ctx.db
185
244
  .query('documents')
186
245
  .withIndex('by_timestamp', (q) =>
187
- q.eq('collectionName', args.collectionName).gt('timestamp', args.checkpoint.lastModified)
246
+ q.eq('collection', args.collection).gt('timestamp', args.checkpoint.lastModified)
188
247
  )
189
248
  .order('asc')
190
249
  .take(limit);
191
250
 
192
- const changes = documents.map((doc) => ({
193
- documentId: doc.documentId,
194
- crdtBytes: doc.crdtBytes,
195
- version: doc.version,
196
- timestamp: doc.timestamp,
197
- }));
198
-
199
- const newCheckpoint = {
200
- lastModified:
201
- documents.length > 0
202
- ? (documents[documents.length - 1]?.timestamp ?? args.checkpoint.lastModified)
203
- : args.checkpoint.lastModified,
251
+ if (documents.length > 0) {
252
+ const changes = documents.map((doc) => ({
253
+ documentId: doc.documentId,
254
+ crdtBytes: doc.crdtBytes,
255
+ version: doc.version,
256
+ timestamp: doc.timestamp,
257
+ operationType: OperationType.Delta,
258
+ }));
259
+
260
+ const newCheckpoint = {
261
+ lastModified: documents[documents.length - 1]?.timestamp ?? args.checkpoint.lastModified,
262
+ };
263
+
264
+ return {
265
+ changes,
266
+ checkpoint: newCheckpoint,
267
+ hasMore: documents.length === limit,
268
+ };
269
+ }
270
+
271
+ // Check for disparity - client checkpoint older than oldest delta
272
+ const oldestDelta = await ctx.db
273
+ .query('documents')
274
+ .withIndex('by_timestamp', (q) => q.eq('collection', args.collection))
275
+ .order('asc')
276
+ .first();
277
+
278
+ if (oldestDelta && args.checkpoint.lastModified < oldestDelta.timestamp) {
279
+ // Disparity detected - need to send all per-document snapshots
280
+ // Get all snapshots for this collection
281
+ const snapshots = await ctx.db
282
+ .query('snapshots')
283
+ .withIndex('by_document', (q) => q.eq('collection', args.collection))
284
+ .collect();
285
+
286
+ if (snapshots.length === 0) {
287
+ throw new Error(
288
+ `Disparity detected but no snapshots available for collection: ${args.collection}. ` +
289
+ `Client checkpoint: ${args.checkpoint.lastModified}, ` +
290
+ `Oldest delta: ${oldestDelta.timestamp}`
291
+ );
292
+ }
293
+
294
+ // Return all snapshots as changes
295
+ const changes = snapshots.map((snapshot) => ({
296
+ documentId: snapshot.documentId,
297
+ crdtBytes: snapshot.snapshotBytes,
298
+ version: 0,
299
+ timestamp: snapshot.createdAt,
300
+ operationType: OperationType.Snapshot,
301
+ }));
302
+
303
+ // Find the latest compaction timestamp to use as checkpoint
304
+ const latestTimestamp = Math.max(...snapshots.map((s) => s.latestCompactionTimestamp));
305
+
306
+ return {
307
+ changes,
308
+ checkpoint: {
309
+ lastModified: latestTimestamp,
310
+ },
311
+ hasMore: false,
312
+ };
313
+ }
314
+
315
+ return {
316
+ changes: [],
317
+ checkpoint: args.checkpoint,
318
+ hasMore: false,
204
319
  };
320
+ },
321
+ });
322
+
323
+ export const getInitialState = query({
324
+ args: {
325
+ collection: v.string(),
326
+ },
327
+ returns: v.union(
328
+ v.object({
329
+ crdtBytes: v.bytes(),
330
+ checkpoint: v.object({
331
+ lastModified: v.number(),
332
+ }),
333
+ }),
334
+ v.null()
335
+ ),
336
+ handler: async (ctx, args) => {
337
+ const logger = getLogger(['ssr']);
338
+
339
+ // Get all per-document snapshots for this collection
340
+ const snapshots = await ctx.db
341
+ .query('snapshots')
342
+ .withIndex('by_document', (q) => q.eq('collection', args.collection))
343
+ .collect();
344
+
345
+ // Get all deltas for this collection
346
+ const deltas = await ctx.db
347
+ .query('documents')
348
+ .withIndex('by_collection', (q) => q.eq('collection', args.collection))
349
+ .collect();
350
+
351
+ if (snapshots.length === 0 && deltas.length === 0) {
352
+ logger.info('No initial state available - collection is empty', {
353
+ collection: args.collection,
354
+ });
355
+ return null;
356
+ }
357
+
358
+ // Merge all snapshots and deltas together
359
+ const updates: Uint8Array[] = [];
360
+ let latestTimestamp = 0;
361
+
362
+ // Add all per-document snapshots
363
+ for (const snapshot of snapshots) {
364
+ updates.push(new Uint8Array(snapshot.snapshotBytes));
365
+ latestTimestamp = Math.max(latestTimestamp, snapshot.latestCompactionTimestamp);
366
+ }
367
+
368
+ // Add all deltas
369
+ const sorted = deltas.sort((a, b) => a.timestamp - b.timestamp);
370
+ for (const delta of sorted) {
371
+ updates.push(new Uint8Array(delta.crdtBytes));
372
+ latestTimestamp = Math.max(latestTimestamp, delta.timestamp);
373
+ }
374
+
375
+ logger.info('Reconstructing initial state', {
376
+ collection: args.collection,
377
+ snapshotCount: snapshots.length,
378
+ deltaCount: deltas.length,
379
+ });
380
+
381
+ const merged = Y.mergeUpdatesV2(updates);
382
+
383
+ logger.info('Initial state reconstructed', {
384
+ collection: args.collection,
385
+ originalSize: updates.reduce((sum, u) => sum + u.byteLength, 0),
386
+ mergedSize: merged.byteLength,
387
+ });
388
+
389
+ return {
390
+ crdtBytes: merged.buffer as ArrayBuffer,
391
+ checkpoint: {
392
+ lastModified: latestTimestamp,
393
+ },
394
+ };
395
+ },
396
+ });
397
+
398
+ /**
399
+ * Recovery query for state vector based sync.
400
+ * Client sends its state vector, server computes and returns the diff.
401
+ */
402
+ export const recovery = query({
403
+ args: {
404
+ collection: v.string(),
405
+ clientStateVector: v.bytes(),
406
+ },
407
+ returns: v.object({
408
+ diff: v.optional(v.bytes()),
409
+ serverStateVector: v.bytes(),
410
+ }),
411
+ handler: async (ctx, args) => {
412
+ const logger = getLogger(['recovery']);
413
+
414
+ // Get all snapshots for this collection
415
+ const snapshots = await ctx.db
416
+ .query('snapshots')
417
+ .withIndex('by_document', (q) => q.eq('collection', args.collection))
418
+ .collect();
419
+
420
+ // Get all deltas for this collection
421
+ const deltas = await ctx.db
422
+ .query('documents')
423
+ .withIndex('by_collection', (q) => q.eq('collection', args.collection))
424
+ .collect();
425
+
426
+ if (snapshots.length === 0 && deltas.length === 0) {
427
+ // Empty collection - return empty state vector
428
+ const emptyDoc = new Y.Doc();
429
+ const emptyVector = Y.encodeStateVector(emptyDoc);
430
+ emptyDoc.destroy();
431
+ return { serverStateVector: emptyVector.buffer as ArrayBuffer };
432
+ }
433
+
434
+ // Merge all snapshots and deltas into full server state
435
+ const updates: Uint8Array[] = [];
436
+
437
+ for (const snapshot of snapshots) {
438
+ updates.push(new Uint8Array(snapshot.snapshotBytes));
439
+ }
440
+
441
+ for (const delta of deltas) {
442
+ updates.push(new Uint8Array(delta.crdtBytes));
443
+ }
444
+
445
+ const mergedState = Y.mergeUpdatesV2(updates);
446
+
447
+ // Compute diff relative to client's state vector
448
+ const clientVector = new Uint8Array(args.clientStateVector);
449
+ const diff = Y.diffUpdateV2(mergedState, clientVector);
450
+ const serverVector = Y.encodeStateVectorFromUpdateV2(mergedState);
451
+
452
+ logger.info('Recovery sync computed', {
453
+ collection: args.collection,
454
+ snapshotCount: snapshots.length,
455
+ deltaCount: deltas.length,
456
+ diffSize: diff.byteLength,
457
+ hasDiff: diff.byteLength > 0,
458
+ });
205
459
 
206
460
  return {
207
- changes,
208
- checkpoint: newCheckpoint,
209
- hasMore: documents.length === limit,
461
+ diff: diff.byteLength > 0 ? (diff.buffer as ArrayBuffer) : undefined,
462
+ serverStateVector: serverVector.buffer as ArrayBuffer,
210
463
  };
211
464
  },
212
465
  });
@@ -3,14 +3,27 @@ import { v } from 'convex/values';
3
3
 
4
4
  export default defineSchema({
5
5
  documents: defineTable({
6
- collectionName: v.string(),
6
+ collection: v.string(),
7
7
  documentId: v.string(),
8
8
  crdtBytes: v.bytes(),
9
9
  version: v.number(),
10
10
  timestamp: v.number(),
11
- operationType: v.string(), // 'insert' | 'update' | 'delete'
12
11
  })
13
- .index('by_collection', ['collectionName'])
14
- .index('by_collection_document_version', ['collectionName', 'documentId', 'version'])
15
- .index('by_timestamp', ['collectionName', 'timestamp']),
12
+ .index('by_collection', ['collection'])
13
+ .index('by_collection_document_version', ['collection', 'documentId', 'version'])
14
+ .index('by_timestamp', ['collection', 'timestamp']),
15
+
16
+ snapshots: defineTable({
17
+ collection: v.string(),
18
+ documentId: v.string(),
19
+ snapshotBytes: v.bytes(),
20
+ latestCompactionTimestamp: v.number(),
21
+ createdAt: v.number(),
22
+ metadata: v.optional(
23
+ v.object({
24
+ deltaCount: v.number(),
25
+ totalSize: v.number(),
26
+ })
27
+ ),
28
+ }).index('by_document', ['collection', 'documentId']),
16
29
  });
package/src/env.d.ts ADDED
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Type declarations for import.meta.env
3
+ *
4
+ * This provides TypeScript support for environment variables accessed via import.meta.env
5
+ * Used by both browser tests (via vitest.browser.config.ts define) and potentially Vite apps.
6
+ */
7
+
8
+ interface ImportMetaEnv {
9
+ readonly VITE_CONVEX_URL?: string;
10
+ // Add other VITE_ prefixed env vars as needed
11
+ }
12
+
13
+ interface ImportMeta {
14
+ readonly env: ImportMetaEnv;
15
+ }
16
+
17
+ /**
18
+ * Module declaration for y-leveldb.
19
+ * The package has broken package.json exports, so we declare the module manually.
20
+ */
21
+ declare module 'y-leveldb' {
22
+ import type { AbstractLevel } from 'abstract-level';
23
+ import type * as Y from 'yjs';
24
+
25
+ export class LeveldbPersistence {
26
+ constructor(location: string, options?: { level?: AbstractLevel<unknown, unknown> });
27
+ getYDoc(docName: string): Promise<Y.Doc>;
28
+ storeUpdate(docName: string, update: Uint8Array): Promise<void>;
29
+ destroy(): void;
30
+ }
31
+ }