@trestleinc/replicate 0.1.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/README.md +356 -420
  2. package/dist/client/collection.d.ts +78 -76
  3. package/dist/client/errors.d.ts +59 -0
  4. package/dist/client/index.d.ts +22 -18
  5. package/dist/client/logger.d.ts +0 -1
  6. package/dist/client/merge.d.ts +77 -0
  7. package/dist/client/persistence/adapters/index.d.ts +8 -0
  8. package/dist/client/persistence/adapters/opsqlite.d.ts +46 -0
  9. package/dist/client/persistence/adapters/sqljs.d.ts +83 -0
  10. package/dist/client/persistence/index.d.ts +49 -0
  11. package/dist/client/persistence/indexeddb.d.ts +17 -0
  12. package/dist/client/persistence/memory.d.ts +16 -0
  13. package/dist/client/persistence/sqlite-browser.d.ts +51 -0
  14. package/dist/client/persistence/sqlite-level.d.ts +63 -0
  15. package/dist/client/persistence/sqlite-rn.d.ts +36 -0
  16. package/dist/client/persistence/sqlite.d.ts +47 -0
  17. package/dist/client/persistence/types.d.ts +42 -0
  18. package/dist/client/prose.d.ts +56 -0
  19. package/dist/client/replicate.d.ts +40 -0
  20. package/dist/client/services/checkpoint.d.ts +18 -0
  21. package/dist/client/services/reconciliation.d.ts +24 -0
  22. package/dist/component/_generated/api.d.ts +35 -0
  23. package/dist/component/_generated/api.js +3 -3
  24. package/dist/component/_generated/component.d.ts +89 -0
  25. package/dist/component/_generated/component.js +0 -0
  26. package/dist/component/_generated/dataModel.d.ts +45 -0
  27. package/dist/component/_generated/dataModel.js +0 -0
  28. package/{src → dist}/component/_generated/server.d.ts +9 -38
  29. package/dist/component/convex.config.d.ts +2 -2
  30. package/dist/component/convex.config.js +2 -1
  31. package/dist/component/logger.d.ts +8 -0
  32. package/dist/component/logger.js +30 -0
  33. package/dist/component/public.d.ts +36 -61
  34. package/dist/component/public.js +232 -58
  35. package/dist/component/schema.d.ts +32 -8
  36. package/dist/component/schema.js +19 -6
  37. package/dist/index.js +1553 -308
  38. package/dist/server/builder.d.ts +94 -0
  39. package/dist/server/index.d.ts +14 -17
  40. package/dist/server/schema.d.ts +17 -63
  41. package/dist/server/storage.d.ts +80 -0
  42. package/dist/server.js +268 -83
  43. package/dist/shared/index.d.ts +5 -0
  44. package/dist/shared/index.js +2 -0
  45. package/dist/shared/types.d.ts +50 -0
  46. package/dist/shared/types.js +6 -0
  47. package/dist/shared.js +6 -0
  48. package/package.json +59 -49
  49. package/src/client/collection.ts +877 -450
  50. package/src/client/errors.ts +45 -0
  51. package/src/client/index.ts +52 -26
  52. package/src/client/logger.ts +2 -28
  53. package/src/client/merge.ts +374 -0
  54. package/src/client/persistence/adapters/index.ts +8 -0
  55. package/src/client/persistence/adapters/opsqlite.ts +54 -0
  56. package/src/client/persistence/adapters/sqljs.ts +128 -0
  57. package/src/client/persistence/index.ts +54 -0
  58. package/src/client/persistence/indexeddb.ts +110 -0
  59. package/src/client/persistence/memory.ts +61 -0
  60. package/src/client/persistence/sqlite-browser.ts +107 -0
  61. package/src/client/persistence/sqlite-level.ts +407 -0
  62. package/src/client/persistence/sqlite-rn.ts +44 -0
  63. package/src/client/persistence/sqlite.ts +161 -0
  64. package/src/client/persistence/types.ts +49 -0
  65. package/src/client/prose.ts +369 -0
  66. package/src/client/replicate.ts +80 -0
  67. package/src/client/services/checkpoint.ts +86 -0
  68. package/src/client/services/reconciliation.ts +108 -0
  69. package/src/component/_generated/api.ts +52 -0
  70. package/src/component/_generated/component.ts +103 -0
  71. package/src/component/_generated/{dataModel.d.ts → dataModel.ts} +1 -1
  72. package/src/component/_generated/server.ts +161 -0
  73. package/src/component/convex.config.ts +3 -1
  74. package/src/component/logger.ts +36 -0
  75. package/src/component/public.ts +364 -111
  76. package/src/component/schema.ts +18 -5
  77. package/src/env.d.ts +31 -0
  78. package/src/server/builder.ts +85 -0
  79. package/src/server/index.ts +9 -24
  80. package/src/server/schema.ts +20 -76
  81. package/src/server/storage.ts +313 -0
  82. package/src/shared/index.ts +5 -0
  83. package/src/shared/types.ts +52 -0
  84. package/LICENSE.package +0 -201
  85. package/dist/client/storage.d.ts +0 -143
  86. package/dist/server/replication.d.ts +0 -122
  87. package/dist/server/ssr.d.ts +0 -79
  88. package/dist/ssr.js +0 -19
  89. package/src/client/storage.ts +0 -206
  90. package/src/component/_generated/api.d.ts +0 -95
  91. package/src/component/_generated/api.js +0 -23
  92. package/src/component/_generated/server.js +0 -90
  93. package/src/server/replication.ts +0 -244
  94. package/src/server/ssr.ts +0 -106
@@ -0,0 +1,30 @@
1
+ class ComponentLogger {
2
+ category;
3
+ constructor(category){
4
+ this.category = category;
5
+ }
6
+ format(level, message, context) {
7
+ const prefix = `[${this.category.join(':')}]`;
8
+ const contextStr = context ? ` ${JSON.stringify(context)}` : '';
9
+ return `${prefix} ${level}: ${message}${contextStr}`;
10
+ }
11
+ debug(message, context) {
12
+ console.log(this.format('DEBUG', message, context));
13
+ }
14
+ info(message, context) {
15
+ console.log(this.format('INFO', message, context));
16
+ }
17
+ warn(message, context) {
18
+ console.warn(this.format('WARN', message, context));
19
+ }
20
+ error(message, context) {
21
+ console.error(this.format('ERROR', message, context));
22
+ }
23
+ }
24
+ function getLogger(category) {
25
+ return new ComponentLogger([
26
+ 'component',
27
+ ...category
28
+ ]);
29
+ }
30
+ export { getLogger };
@@ -1,87 +1,39 @@
1
- /**
2
- * Insert a new document with CRDT bytes (Yjs format).
3
- * Appends delta to event log (event sourcing pattern).
4
- *
5
- * @param collectionName - Collection identifier
6
- * @param documentId - Unique document identifier
7
- * @param crdtBytes - ArrayBuffer containing Yjs CRDT bytes (delta)
8
- * @param version - CRDT version number
9
- */
1
+ import { OperationType } from '$/shared/types.js';
2
+ export { OperationType };
10
3
  export declare const insertDocument: import("convex/server").RegisteredMutation<"public", {
11
- collectionName: string;
4
+ threshold?: number | undefined;
5
+ collection: string;
12
6
  documentId: string;
13
7
  crdtBytes: ArrayBuffer;
14
8
  version: number;
15
9
  }, Promise<{
16
10
  success: boolean;
11
+ compacted: boolean;
17
12
  }>>;
18
- /**
19
- * Update an existing document with new CRDT bytes (Yjs format).
20
- * Appends delta to event log (event sourcing pattern).
21
- *
22
- * @param collectionName - Collection identifier
23
- * @param documentId - Unique document identifier
24
- * @param crdtBytes - ArrayBuffer containing Yjs CRDT bytes (delta)
25
- * @param version - CRDT version number
26
- */
27
13
  export declare const updateDocument: import("convex/server").RegisteredMutation<"public", {
28
- collectionName: string;
14
+ threshold?: number | undefined;
15
+ collection: string;
29
16
  documentId: string;
30
17
  crdtBytes: ArrayBuffer;
31
18
  version: number;
32
19
  }, Promise<{
33
20
  success: boolean;
21
+ compacted: boolean;
34
22
  }>>;
35
- /**
36
- * Delete a document from CRDT storage.
37
- * Appends deletion delta to event log (preserves history).
38
- *
39
- * @param collectionName - Collection identifier
40
- * @param documentId - Unique document identifier
41
- * @param crdtBytes - ArrayBuffer containing Yjs deletion delta
42
- * @param version - CRDT version number
43
- */
44
23
  export declare const deleteDocument: import("convex/server").RegisteredMutation<"public", {
45
- collectionName: string;
24
+ threshold?: number | undefined;
25
+ collection: string;
46
26
  documentId: string;
47
27
  crdtBytes: ArrayBuffer;
48
28
  version: number;
49
29
  }, Promise<{
50
30
  success: boolean;
31
+ compacted: boolean;
51
32
  }>>;
52
- /**
53
- * Get complete event history for a document.
54
- * Returns all CRDT deltas in chronological order.
55
- *
56
- * Used for:
57
- * - Future recovery features (client-side)
58
- * - Audit trails
59
- * - Debugging
60
- *
61
- * @param collectionName - Collection identifier
62
- * @param documentId - Unique document identifier
63
- */
64
- export declare const getDocumentHistory: import("convex/server").RegisteredQuery<"public", {
65
- collectionName: string;
66
- documentId: string;
67
- }, Promise<{
68
- crdtBytes: ArrayBuffer;
69
- version: number;
70
- timestamp: number;
71
- operationType: string;
72
- }[]>>;
73
- /**
74
- * Stream CRDT changes for incremental replication.
75
- * Returns Yjs CRDT bytes for documents modified since the checkpoint.
76
- * Can be used for both polling (awaitReplication) and subscriptions (live updates).
77
- *
78
- * @param collectionName - Collection identifier
79
- * @param checkpoint - Last replication checkpoint
80
- * @param limit - Maximum number of changes to return (default: 100)
81
- */
82
33
  export declare const stream: import("convex/server").RegisteredQuery<"public", {
83
34
  limit?: number | undefined;
84
- collectionName: string;
35
+ vector?: ArrayBuffer | undefined;
36
+ collection: string;
85
37
  checkpoint: {
86
38
  lastModified: number;
87
39
  };
@@ -91,9 +43,32 @@ export declare const stream: import("convex/server").RegisteredQuery<"public", {
91
43
  crdtBytes: ArrayBuffer;
92
44
  version: number;
93
45
  timestamp: number;
46
+ operationType: OperationType;
94
47
  }[];
95
48
  checkpoint: {
96
49
  lastModified: number;
97
50
  };
98
51
  hasMore: boolean;
99
52
  }>>;
53
+ export declare const getInitialState: import("convex/server").RegisteredQuery<"public", {
54
+ collection: string;
55
+ }, Promise<{
56
+ crdtBytes: ArrayBuffer;
57
+ checkpoint: {
58
+ lastModified: number;
59
+ };
60
+ } | null>>;
61
+ /**
62
+ * Recovery query for state vector based sync.
63
+ * Client sends its state vector, server computes and returns the diff.
64
+ */
65
+ export declare const recovery: import("convex/server").RegisteredQuery<"public", {
66
+ collection: string;
67
+ clientStateVector: ArrayBuffer;
68
+ }, Promise<{
69
+ serverStateVector: ArrayBuffer;
70
+ diff?: undefined;
71
+ } | {
72
+ diff: ArrayBuffer | undefined;
73
+ serverStateVector: ArrayBuffer;
74
+ }>>;
@@ -1,112 +1,163 @@
1
+ import { Doc, applyUpdateV2, diffUpdateV2, encodeStateVector, encodeStateVectorFromUpdateV2, mergeUpdatesV2 } from "yjs";
1
2
  import { v } from "convex/values";
2
3
  import { mutation, query } from "./_generated/server.js";
4
+ import { getLogger } from "./logger.js";
5
+ import { OperationType } from "../shared/types.js";
6
+ const DEFAULT_SIZE_THRESHOLD = 5000000;
7
+ async function _maybeCompactDocument(ctx, collection, documentId, threshold = DEFAULT_SIZE_THRESHOLD) {
8
+ const logger = getLogger([
9
+ 'compaction'
10
+ ]);
11
+ const deltas = await ctx.db.query('documents').withIndex('by_collection_document_version', (q)=>q.eq('collection', collection).eq('documentId', documentId)).collect();
12
+ const totalSize = deltas.reduce((sum, d)=>sum + d.crdtBytes.byteLength, 0);
13
+ if (totalSize < threshold) return null;
14
+ logger.info('Auto-compacting document', {
15
+ collection,
16
+ documentId,
17
+ deltaCount: deltas.length,
18
+ totalSize,
19
+ threshold
20
+ });
21
+ const sorted = deltas.sort((a, b)=>a.timestamp - b.timestamp);
22
+ const updates = sorted.map((d)=>new Uint8Array(d.crdtBytes));
23
+ const compactedState = mergeUpdatesV2(updates);
24
+ const testDoc = new Doc({
25
+ guid: `${collection}:${documentId}`
26
+ });
27
+ try {
28
+ applyUpdateV2(testDoc, compactedState);
29
+ } catch (error) {
30
+ logger.error('Compacted state validation failed', {
31
+ collection,
32
+ documentId,
33
+ error: String(error)
34
+ });
35
+ testDoc.destroy();
36
+ return null;
37
+ }
38
+ testDoc.destroy();
39
+ const existingSnapshot = await ctx.db.query('snapshots').withIndex('by_document', (q)=>q.eq('collection', collection).eq('documentId', documentId)).first();
40
+ if (existingSnapshot) await ctx.db.delete('snapshots', existingSnapshot._id);
41
+ await ctx.db.insert('snapshots', {
42
+ collection,
43
+ documentId,
44
+ snapshotBytes: compactedState.buffer,
45
+ latestCompactionTimestamp: sorted[sorted.length - 1].timestamp,
46
+ createdAt: Date.now(),
47
+ metadata: {
48
+ deltaCount: deltas.length,
49
+ totalSize
50
+ }
51
+ });
52
+ for (const delta of sorted)await ctx.db.delete('documents', delta._id);
53
+ logger.info('Auto-compaction completed', {
54
+ collection,
55
+ documentId,
56
+ deltasCompacted: deltas.length,
57
+ snapshotSize: compactedState.length
58
+ });
59
+ return {
60
+ deltasCompacted: deltas.length,
61
+ snapshotSize: compactedState.length
62
+ };
63
+ }
3
64
  const insertDocument = mutation({
4
65
  args: {
5
- collectionName: v.string(),
66
+ collection: v.string(),
6
67
  documentId: v.string(),
7
68
  crdtBytes: v.bytes(),
8
- version: v.number()
69
+ version: v.number(),
70
+ threshold: v.optional(v.number())
9
71
  },
10
72
  returns: v.object({
11
- success: v.boolean()
73
+ success: v.boolean(),
74
+ compacted: v.optional(v.boolean())
12
75
  }),
13
76
  handler: async (ctx, args)=>{
14
77
  await ctx.db.insert('documents', {
15
- collectionName: args.collectionName,
78
+ collection: args.collection,
16
79
  documentId: args.documentId,
17
80
  crdtBytes: args.crdtBytes,
18
81
  version: args.version,
19
- timestamp: Date.now(),
20
- operationType: 'insert'
82
+ timestamp: Date.now()
21
83
  });
84
+ const compactionResult = await _maybeCompactDocument(ctx, args.collection, args.documentId, args.threshold ?? DEFAULT_SIZE_THRESHOLD);
22
85
  return {
23
- success: true
86
+ success: true,
87
+ compacted: null !== compactionResult
24
88
  };
25
89
  }
26
90
  });
27
91
  const updateDocument = mutation({
28
92
  args: {
29
- collectionName: v.string(),
93
+ collection: v.string(),
30
94
  documentId: v.string(),
31
95
  crdtBytes: v.bytes(),
32
- version: v.number()
96
+ version: v.number(),
97
+ threshold: v.optional(v.number())
33
98
  },
34
99
  returns: v.object({
35
- success: v.boolean()
100
+ success: v.boolean(),
101
+ compacted: v.optional(v.boolean())
36
102
  }),
37
103
  handler: async (ctx, args)=>{
38
104
  await ctx.db.insert('documents', {
39
- collectionName: args.collectionName,
105
+ collection: args.collection,
40
106
  documentId: args.documentId,
41
107
  crdtBytes: args.crdtBytes,
42
108
  version: args.version,
43
- timestamp: Date.now(),
44
- operationType: 'update'
109
+ timestamp: Date.now()
45
110
  });
111
+ const compactionResult = await _maybeCompactDocument(ctx, args.collection, args.documentId, args.threshold ?? DEFAULT_SIZE_THRESHOLD);
46
112
  return {
47
- success: true
113
+ success: true,
114
+ compacted: null !== compactionResult
48
115
  };
49
116
  }
50
117
  });
51
118
  const deleteDocument = mutation({
52
119
  args: {
53
- collectionName: v.string(),
120
+ collection: v.string(),
54
121
  documentId: v.string(),
55
122
  crdtBytes: v.bytes(),
56
- version: v.number()
123
+ version: v.number(),
124
+ threshold: v.optional(v.number())
57
125
  },
58
126
  returns: v.object({
59
- success: v.boolean()
127
+ success: v.boolean(),
128
+ compacted: v.optional(v.boolean())
60
129
  }),
61
130
  handler: async (ctx, args)=>{
62
131
  await ctx.db.insert('documents', {
63
- collectionName: args.collectionName,
132
+ collection: args.collection,
64
133
  documentId: args.documentId,
65
134
  crdtBytes: args.crdtBytes,
66
135
  version: args.version,
67
- timestamp: Date.now(),
68
- operationType: 'delete'
136
+ timestamp: Date.now()
69
137
  });
138
+ const compactionResult = await _maybeCompactDocument(ctx, args.collection, args.documentId, args.threshold ?? DEFAULT_SIZE_THRESHOLD);
70
139
  return {
71
- success: true
140
+ success: true,
141
+ compacted: null !== compactionResult
72
142
  };
73
143
  }
74
144
  });
75
- const getDocumentHistory = query({
76
- args: {
77
- collectionName: v.string(),
78
- documentId: v.string()
79
- },
80
- returns: v.array(v.object({
81
- crdtBytes: v.bytes(),
82
- version: v.number(),
83
- timestamp: v.number(),
84
- operationType: v.string()
85
- })),
86
- handler: async (ctx, args)=>{
87
- const deltas = await ctx.db.query('documents').withIndex('by_collection_document_version', (q)=>q.eq('collectionName', args.collectionName).eq('documentId', args.documentId)).order('asc').collect();
88
- return deltas.map((d)=>({
89
- crdtBytes: d.crdtBytes,
90
- version: d.version,
91
- timestamp: d.timestamp,
92
- operationType: d.operationType
93
- }));
94
- }
95
- });
96
145
  const stream = query({
97
146
  args: {
98
- collectionName: v.string(),
147
+ collection: v.string(),
99
148
  checkpoint: v.object({
100
149
  lastModified: v.number()
101
150
  }),
151
+ vector: v.optional(v.bytes()),
102
152
  limit: v.optional(v.number())
103
153
  },
104
154
  returns: v.object({
105
155
  changes: v.array(v.object({
106
- documentId: v.string(),
156
+ documentId: v.optional(v.string()),
107
157
  crdtBytes: v.bytes(),
108
158
  version: v.number(),
109
- timestamp: v.number()
159
+ timestamp: v.number(),
160
+ operationType: v.string()
110
161
  })),
111
162
  checkpoint: v.object({
112
163
  lastModified: v.number()
@@ -115,21 +166,144 @@ const stream = query({
115
166
  }),
116
167
  handler: async (ctx, args)=>{
117
168
  const limit = args.limit ?? 100;
118
- const documents = await ctx.db.query('documents').withIndex('by_timestamp', (q)=>q.eq('collectionName', args.collectionName).gt('timestamp', args.checkpoint.lastModified)).order('asc').take(limit);
119
- const changes = documents.map((doc)=>({
120
- documentId: doc.documentId,
121
- crdtBytes: doc.crdtBytes,
122
- version: doc.version,
123
- timestamp: doc.timestamp
124
- }));
125
- const newCheckpoint = {
126
- lastModified: documents.length > 0 ? documents[documents.length - 1]?.timestamp ?? args.checkpoint.lastModified : args.checkpoint.lastModified
169
+ const documents = await ctx.db.query('documents').withIndex('by_timestamp', (q)=>q.eq('collection', args.collection).gt('timestamp', args.checkpoint.lastModified)).order('asc').take(limit);
170
+ if (documents.length > 0) {
171
+ const changes = documents.map((doc)=>({
172
+ documentId: doc.documentId,
173
+ crdtBytes: doc.crdtBytes,
174
+ version: doc.version,
175
+ timestamp: doc.timestamp,
176
+ operationType: OperationType.Delta
177
+ }));
178
+ const newCheckpoint = {
179
+ lastModified: documents[documents.length - 1]?.timestamp ?? args.checkpoint.lastModified
180
+ };
181
+ return {
182
+ changes,
183
+ checkpoint: newCheckpoint,
184
+ hasMore: documents.length === limit
185
+ };
186
+ }
187
+ const oldestDelta = await ctx.db.query('documents').withIndex('by_timestamp', (q)=>q.eq('collection', args.collection)).order('asc').first();
188
+ if (oldestDelta && args.checkpoint.lastModified < oldestDelta.timestamp) {
189
+ const snapshots = await ctx.db.query('snapshots').withIndex('by_document', (q)=>q.eq('collection', args.collection)).collect();
190
+ if (0 === snapshots.length) throw new Error(`Disparity detected but no snapshots available for collection: ${args.collection}. Client checkpoint: ${args.checkpoint.lastModified}, Oldest delta: ${oldestDelta.timestamp}`);
191
+ const changes = snapshots.map((snapshot)=>({
192
+ documentId: snapshot.documentId,
193
+ crdtBytes: snapshot.snapshotBytes,
194
+ version: 0,
195
+ timestamp: snapshot.createdAt,
196
+ operationType: OperationType.Snapshot
197
+ }));
198
+ const latestTimestamp = Math.max(...snapshots.map((s)=>s.latestCompactionTimestamp));
199
+ return {
200
+ changes,
201
+ checkpoint: {
202
+ lastModified: latestTimestamp
203
+ },
204
+ hasMore: false
205
+ };
206
+ }
207
+ return {
208
+ changes: [],
209
+ checkpoint: args.checkpoint,
210
+ hasMore: false
127
211
  };
212
+ }
213
+ });
214
+ const getInitialState = query({
215
+ args: {
216
+ collection: v.string()
217
+ },
218
+ returns: v.union(v.object({
219
+ crdtBytes: v.bytes(),
220
+ checkpoint: v.object({
221
+ lastModified: v.number()
222
+ })
223
+ }), v["null"]()),
224
+ handler: async (ctx, args)=>{
225
+ const logger = getLogger([
226
+ 'ssr'
227
+ ]);
228
+ const snapshots = await ctx.db.query('snapshots').withIndex('by_document', (q)=>q.eq('collection', args.collection)).collect();
229
+ const deltas = await ctx.db.query('documents').withIndex('by_collection', (q)=>q.eq('collection', args.collection)).collect();
230
+ if (0 === snapshots.length && 0 === deltas.length) {
231
+ logger.info('No initial state available - collection is empty', {
232
+ collection: args.collection
233
+ });
234
+ return null;
235
+ }
236
+ const updates = [];
237
+ let latestTimestamp = 0;
238
+ for (const snapshot of snapshots){
239
+ updates.push(new Uint8Array(snapshot.snapshotBytes));
240
+ latestTimestamp = Math.max(latestTimestamp, snapshot.latestCompactionTimestamp);
241
+ }
242
+ const sorted = deltas.sort((a, b)=>a.timestamp - b.timestamp);
243
+ for (const delta of sorted){
244
+ updates.push(new Uint8Array(delta.crdtBytes));
245
+ latestTimestamp = Math.max(latestTimestamp, delta.timestamp);
246
+ }
247
+ logger.info('Reconstructing initial state', {
248
+ collection: args.collection,
249
+ snapshotCount: snapshots.length,
250
+ deltaCount: deltas.length
251
+ });
252
+ const merged = mergeUpdatesV2(updates);
253
+ logger.info('Initial state reconstructed', {
254
+ collection: args.collection,
255
+ originalSize: updates.reduce((sum, u)=>sum + u.byteLength, 0),
256
+ mergedSize: merged.byteLength
257
+ });
258
+ return {
259
+ crdtBytes: merged.buffer,
260
+ checkpoint: {
261
+ lastModified: latestTimestamp
262
+ }
263
+ };
264
+ }
265
+ });
266
+ const recovery = query({
267
+ args: {
268
+ collection: v.string(),
269
+ clientStateVector: v.bytes()
270
+ },
271
+ returns: v.object({
272
+ diff: v.optional(v.bytes()),
273
+ serverStateVector: v.bytes()
274
+ }),
275
+ handler: async (ctx, args)=>{
276
+ const logger = getLogger([
277
+ 'recovery'
278
+ ]);
279
+ const snapshots = await ctx.db.query('snapshots').withIndex('by_document', (q)=>q.eq('collection', args.collection)).collect();
280
+ const deltas = await ctx.db.query('documents').withIndex('by_collection', (q)=>q.eq('collection', args.collection)).collect();
281
+ if (0 === snapshots.length && 0 === deltas.length) {
282
+ const emptyDoc = new Doc();
283
+ const emptyVector = encodeStateVector(emptyDoc);
284
+ emptyDoc.destroy();
285
+ return {
286
+ serverStateVector: emptyVector.buffer
287
+ };
288
+ }
289
+ const updates = [];
290
+ for (const snapshot of snapshots)updates.push(new Uint8Array(snapshot.snapshotBytes));
291
+ for (const delta of deltas)updates.push(new Uint8Array(delta.crdtBytes));
292
+ const mergedState = mergeUpdatesV2(updates);
293
+ const clientVector = new Uint8Array(args.clientStateVector);
294
+ const diff = diffUpdateV2(mergedState, clientVector);
295
+ const serverVector = encodeStateVectorFromUpdateV2(mergedState);
296
+ logger.info('Recovery sync computed', {
297
+ collection: args.collection,
298
+ snapshotCount: snapshots.length,
299
+ deltaCount: deltas.length,
300
+ diffSize: diff.byteLength,
301
+ hasDiff: diff.byteLength > 0
302
+ });
128
303
  return {
129
- changes,
130
- checkpoint: newCheckpoint,
131
- hasMore: documents.length === limit
304
+ diff: diff.byteLength > 0 ? diff.buffer : void 0,
305
+ serverStateVector: serverVector.buffer
132
306
  };
133
307
  }
134
308
  });
135
- export { deleteDocument, getDocumentHistory, insertDocument, stream, updateDocument };
309
+ export { OperationType, deleteDocument, getInitialState, insertDocument, recovery, stream, updateDocument };
@@ -1,22 +1,46 @@
1
1
  declare const _default: import("convex/server").SchemaDefinition<{
2
2
  documents: import("convex/server").TableDefinition<import("convex/values").VObject<{
3
- collectionName: string;
3
+ collection: string;
4
4
  documentId: string;
5
5
  crdtBytes: ArrayBuffer;
6
6
  version: number;
7
7
  timestamp: number;
8
- operationType: string;
9
8
  }, {
10
- collectionName: import("convex/values").VString<string, "required">;
9
+ collection: import("convex/values").VString<string, "required">;
11
10
  documentId: import("convex/values").VString<string, "required">;
12
11
  crdtBytes: import("convex/values").VBytes<ArrayBuffer, "required">;
13
12
  version: import("convex/values").VFloat64<number, "required">;
14
13
  timestamp: import("convex/values").VFloat64<number, "required">;
15
- operationType: import("convex/values").VString<string, "required">;
16
- }, "required", "collectionName" | "documentId" | "crdtBytes" | "version" | "timestamp" | "operationType">, {
17
- by_collection: ["collectionName", "_creationTime"];
18
- by_collection_document_version: ["collectionName", "documentId", "version", "_creationTime"];
19
- by_timestamp: ["collectionName", "timestamp", "_creationTime"];
14
+ }, "required", "collection" | "documentId" | "crdtBytes" | "version" | "timestamp">, {
15
+ by_collection: ["collection", "_creationTime"];
16
+ by_collection_document_version: ["collection", "documentId", "version", "_creationTime"];
17
+ by_timestamp: ["collection", "timestamp", "_creationTime"];
18
+ }, {}, {}>;
19
+ snapshots: import("convex/server").TableDefinition<import("convex/values").VObject<{
20
+ metadata?: {
21
+ deltaCount: number;
22
+ totalSize: number;
23
+ } | undefined;
24
+ collection: string;
25
+ documentId: string;
26
+ createdAt: number;
27
+ snapshotBytes: ArrayBuffer;
28
+ latestCompactionTimestamp: number;
29
+ }, {
30
+ collection: import("convex/values").VString<string, "required">;
31
+ documentId: import("convex/values").VString<string, "required">;
32
+ snapshotBytes: import("convex/values").VBytes<ArrayBuffer, "required">;
33
+ latestCompactionTimestamp: import("convex/values").VFloat64<number, "required">;
34
+ createdAt: import("convex/values").VFloat64<number, "required">;
35
+ metadata: import("convex/values").VObject<{
36
+ deltaCount: number;
37
+ totalSize: number;
38
+ } | undefined, {
39
+ deltaCount: import("convex/values").VFloat64<number, "required">;
40
+ totalSize: import("convex/values").VFloat64<number, "required">;
41
+ }, "optional", "deltaCount" | "totalSize">;
42
+ }, "required", "collection" | "documentId" | "createdAt" | "metadata" | "snapshotBytes" | "latestCompactionTimestamp" | "metadata.deltaCount" | "metadata.totalSize">, {
43
+ by_document: ["collection", "documentId", "_creationTime"];
20
44
  }, {}, {}>;
21
45
  }, true>;
22
46
  export default _default;
@@ -2,21 +2,34 @@ import { defineSchema, defineTable } from "convex/server";
2
2
  import { v } from "convex/values";
3
3
  const schema = defineSchema({
4
4
  documents: defineTable({
5
- collectionName: v.string(),
5
+ collection: v.string(),
6
6
  documentId: v.string(),
7
7
  crdtBytes: v.bytes(),
8
8
  version: v.number(),
9
- timestamp: v.number(),
10
- operationType: v.string()
9
+ timestamp: v.number()
11
10
  }).index('by_collection', [
12
- 'collectionName'
11
+ 'collection'
13
12
  ]).index('by_collection_document_version', [
14
- 'collectionName',
13
+ 'collection',
15
14
  'documentId',
16
15
  'version'
17
16
  ]).index('by_timestamp', [
18
- 'collectionName',
17
+ 'collection',
19
18
  'timestamp'
19
+ ]),
20
+ snapshots: defineTable({
21
+ collection: v.string(),
22
+ documentId: v.string(),
23
+ snapshotBytes: v.bytes(),
24
+ latestCompactionTimestamp: v.number(),
25
+ createdAt: v.number(),
26
+ metadata: v.optional(v.object({
27
+ deltaCount: v.number(),
28
+ totalSize: v.number()
29
+ }))
30
+ }).index('by_document', [
31
+ 'collection',
32
+ 'documentId'
20
33
  ])
21
34
  });
22
35
  export { schema as default };