@trestleinc/replicate 1.1.2 → 1.2.0-preview.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -41
- package/package.json +3 -1
- package/src/client/collection.ts +334 -523
- package/src/client/errors.ts +1 -1
- package/src/client/index.ts +4 -7
- package/src/client/merge.ts +2 -2
- package/src/client/persistence/indexeddb.ts +10 -14
- package/src/client/prose.ts +147 -203
- package/src/client/services/awareness.ts +373 -0
- package/src/client/services/context.ts +114 -0
- package/src/client/services/seq.ts +78 -0
- package/src/client/services/session.ts +20 -0
- package/src/client/services/sync.ts +122 -0
- package/src/client/subdocs.ts +263 -0
- package/src/component/_generated/api.ts +2 -2
- package/src/component/_generated/component.ts +73 -28
- package/src/component/mutations.ts +734 -0
- package/src/component/schema.ts +31 -14
- package/src/server/collection.ts +98 -0
- package/src/server/index.ts +2 -2
- package/src/server/{storage.ts → replicate.ts} +214 -75
- package/dist/client/index.d.ts +0 -314
- package/dist/client/index.js +0 -4027
- package/dist/component/_generated/api.d.ts +0 -31
- package/dist/component/_generated/api.js +0 -25
- package/dist/component/_generated/component.d.ts +0 -91
- package/dist/component/_generated/component.js +0 -1
- package/dist/component/_generated/dataModel.d.ts +0 -42
- package/dist/component/_generated/dataModel.js +0 -1
- package/dist/component/_generated/server.d.ts +0 -117
- package/dist/component/_generated/server.js +0 -73
- package/dist/component/_virtual/rolldown_runtime.js +0 -18
- package/dist/component/convex.config.d.ts +0 -6
- package/dist/component/convex.config.js +0 -8
- package/dist/component/logger.d.ts +0 -12
- package/dist/component/logger.js +0 -27
- package/dist/component/public.d.ts +0 -83
- package/dist/component/public.js +0 -325
- package/dist/component/schema.d.ts +0 -54
- package/dist/component/schema.js +0 -29
- package/dist/component/shared/types.d.ts +0 -9
- package/dist/component/shared/types.js +0 -15
- package/dist/server/index.d.ts +0 -135
- package/dist/server/index.js +0 -368
- package/dist/shared/index.d.ts +0 -29
- package/dist/shared/index.js +0 -1
- package/src/client/prose-schema.ts +0 -55
- package/src/client/services/cursor.ts +0 -109
- package/src/component/public.ts +0 -453
- package/src/server/builder.ts +0 -98
- /package/src/client/{replicate.ts → ops.ts} +0 -0
package/dist/component/public.js
DELETED
|
@@ -1,325 +0,0 @@
|
|
|
1
|
-
import { getLogger } from "./logger.js";
|
|
2
|
-
import { mutation, query } from "./_generated/server.js";
|
|
3
|
-
import { OperationType } from "./shared/types.js";
|
|
4
|
-
import * as Y from "yjs";
|
|
5
|
-
import { v } from "convex/values";
|
|
6
|
-
|
|
7
|
-
//#region src/component/public.ts
|
|
8
|
-
const DEFAULT_SIZE_THRESHOLD = 5e6;
|
|
9
|
-
const DEFAULT_PEER_TIMEOUT = 300 * 1e3;
|
|
10
|
-
async function getNextSeq(ctx, collection) {
|
|
11
|
-
return ((await ctx.db.query("documents").withIndex("by_seq", (q) => q.eq("collection", collection)).order("desc").first())?.seq ?? 0) + 1;
|
|
12
|
-
}
|
|
13
|
-
const insertDocument = mutation({
|
|
14
|
-
args: {
|
|
15
|
-
collection: v.string(),
|
|
16
|
-
documentId: v.string(),
|
|
17
|
-
crdtBytes: v.bytes()
|
|
18
|
-
},
|
|
19
|
-
returns: v.object({
|
|
20
|
-
success: v.boolean(),
|
|
21
|
-
seq: v.number()
|
|
22
|
-
}),
|
|
23
|
-
handler: async (ctx, args) => {
|
|
24
|
-
const seq = await getNextSeq(ctx, args.collection);
|
|
25
|
-
await ctx.db.insert("documents", {
|
|
26
|
-
collection: args.collection,
|
|
27
|
-
documentId: args.documentId,
|
|
28
|
-
crdtBytes: args.crdtBytes,
|
|
29
|
-
seq
|
|
30
|
-
});
|
|
31
|
-
return {
|
|
32
|
-
success: true,
|
|
33
|
-
seq
|
|
34
|
-
};
|
|
35
|
-
}
|
|
36
|
-
});
|
|
37
|
-
const updateDocument = mutation({
|
|
38
|
-
args: {
|
|
39
|
-
collection: v.string(),
|
|
40
|
-
documentId: v.string(),
|
|
41
|
-
crdtBytes: v.bytes()
|
|
42
|
-
},
|
|
43
|
-
returns: v.object({
|
|
44
|
-
success: v.boolean(),
|
|
45
|
-
seq: v.number()
|
|
46
|
-
}),
|
|
47
|
-
handler: async (ctx, args) => {
|
|
48
|
-
const seq = await getNextSeq(ctx, args.collection);
|
|
49
|
-
await ctx.db.insert("documents", {
|
|
50
|
-
collection: args.collection,
|
|
51
|
-
documentId: args.documentId,
|
|
52
|
-
crdtBytes: args.crdtBytes,
|
|
53
|
-
seq
|
|
54
|
-
});
|
|
55
|
-
return {
|
|
56
|
-
success: true,
|
|
57
|
-
seq
|
|
58
|
-
};
|
|
59
|
-
}
|
|
60
|
-
});
|
|
61
|
-
const deleteDocument = mutation({
|
|
62
|
-
args: {
|
|
63
|
-
collection: v.string(),
|
|
64
|
-
documentId: v.string(),
|
|
65
|
-
crdtBytes: v.bytes()
|
|
66
|
-
},
|
|
67
|
-
returns: v.object({
|
|
68
|
-
success: v.boolean(),
|
|
69
|
-
seq: v.number()
|
|
70
|
-
}),
|
|
71
|
-
handler: async (ctx, args) => {
|
|
72
|
-
const seq = await getNextSeq(ctx, args.collection);
|
|
73
|
-
await ctx.db.insert("documents", {
|
|
74
|
-
collection: args.collection,
|
|
75
|
-
documentId: args.documentId,
|
|
76
|
-
crdtBytes: args.crdtBytes,
|
|
77
|
-
seq
|
|
78
|
-
});
|
|
79
|
-
return {
|
|
80
|
-
success: true,
|
|
81
|
-
seq
|
|
82
|
-
};
|
|
83
|
-
}
|
|
84
|
-
});
|
|
85
|
-
const mark = mutation({
|
|
86
|
-
args: {
|
|
87
|
-
collection: v.string(),
|
|
88
|
-
peerId: v.string(),
|
|
89
|
-
syncedSeq: v.number()
|
|
90
|
-
},
|
|
91
|
-
returns: v.null(),
|
|
92
|
-
handler: async (ctx, args) => {
|
|
93
|
-
const existing = await ctx.db.query("peers").withIndex("by_collection_peer", (q) => q.eq("collection", args.collection).eq("peerId", args.peerId)).first();
|
|
94
|
-
if (existing) await ctx.db.patch(existing._id, {
|
|
95
|
-
lastSyncedSeq: Math.max(existing.lastSyncedSeq, args.syncedSeq),
|
|
96
|
-
lastSeenAt: Date.now()
|
|
97
|
-
});
|
|
98
|
-
else await ctx.db.insert("peers", {
|
|
99
|
-
collection: args.collection,
|
|
100
|
-
peerId: args.peerId,
|
|
101
|
-
lastSyncedSeq: args.syncedSeq,
|
|
102
|
-
lastSeenAt: Date.now()
|
|
103
|
-
});
|
|
104
|
-
return null;
|
|
105
|
-
}
|
|
106
|
-
});
|
|
107
|
-
const compact = mutation({
|
|
108
|
-
args: {
|
|
109
|
-
collection: v.string(),
|
|
110
|
-
documentId: v.string(),
|
|
111
|
-
snapshotBytes: v.bytes(),
|
|
112
|
-
stateVector: v.bytes(),
|
|
113
|
-
peerTimeout: v.optional(v.number())
|
|
114
|
-
},
|
|
115
|
-
returns: v.object({
|
|
116
|
-
success: v.boolean(),
|
|
117
|
-
removed: v.number(),
|
|
118
|
-
retained: v.number()
|
|
119
|
-
}),
|
|
120
|
-
handler: async (ctx, args) => {
|
|
121
|
-
const logger = getLogger(["compaction"]);
|
|
122
|
-
const now = Date.now();
|
|
123
|
-
const peerCutoff = now - (args.peerTimeout ?? DEFAULT_PEER_TIMEOUT);
|
|
124
|
-
const deltas = await ctx.db.query("documents").withIndex("by_collection_document", (q) => q.eq("collection", args.collection).eq("documentId", args.documentId)).collect();
|
|
125
|
-
const activePeers = await ctx.db.query("peers").withIndex("by_collection", (q) => q.eq("collection", args.collection)).filter((q) => q.gt(q.field("lastSeenAt"), peerCutoff)).collect();
|
|
126
|
-
const minSyncedSeq = activePeers.length > 0 ? Math.min(...activePeers.map((p) => p.lastSyncedSeq)) : Infinity;
|
|
127
|
-
const existingSnapshot = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection).eq("documentId", args.documentId)).first();
|
|
128
|
-
if (existingSnapshot) await ctx.db.delete(existingSnapshot._id);
|
|
129
|
-
const snapshotSeq = deltas.length > 0 ? Math.max(...deltas.map((d) => d.seq)) : 0;
|
|
130
|
-
await ctx.db.insert("snapshots", {
|
|
131
|
-
collection: args.collection,
|
|
132
|
-
documentId: args.documentId,
|
|
133
|
-
snapshotBytes: args.snapshotBytes,
|
|
134
|
-
stateVector: args.stateVector,
|
|
135
|
-
snapshotSeq,
|
|
136
|
-
createdAt: now
|
|
137
|
-
});
|
|
138
|
-
let removed = 0;
|
|
139
|
-
for (const delta of deltas) if (delta.seq < minSyncedSeq) {
|
|
140
|
-
await ctx.db.delete(delta._id);
|
|
141
|
-
removed++;
|
|
142
|
-
}
|
|
143
|
-
logger.info("Compaction completed", {
|
|
144
|
-
collection: args.collection,
|
|
145
|
-
documentId: args.documentId,
|
|
146
|
-
removed,
|
|
147
|
-
retained: deltas.length - removed,
|
|
148
|
-
activePeers: activePeers.length,
|
|
149
|
-
minSyncedSeq
|
|
150
|
-
});
|
|
151
|
-
return {
|
|
152
|
-
success: true,
|
|
153
|
-
removed,
|
|
154
|
-
retained: deltas.length - removed
|
|
155
|
-
};
|
|
156
|
-
}
|
|
157
|
-
});
|
|
158
|
-
const stream = query({
|
|
159
|
-
args: {
|
|
160
|
-
collection: v.string(),
|
|
161
|
-
cursor: v.number(),
|
|
162
|
-
limit: v.optional(v.number()),
|
|
163
|
-
sizeThreshold: v.optional(v.number())
|
|
164
|
-
},
|
|
165
|
-
returns: v.object({
|
|
166
|
-
changes: v.array(v.object({
|
|
167
|
-
documentId: v.string(),
|
|
168
|
-
crdtBytes: v.bytes(),
|
|
169
|
-
seq: v.number(),
|
|
170
|
-
operationType: v.string()
|
|
171
|
-
})),
|
|
172
|
-
cursor: v.number(),
|
|
173
|
-
hasMore: v.boolean(),
|
|
174
|
-
compact: v.optional(v.string())
|
|
175
|
-
}),
|
|
176
|
-
handler: async (ctx, args) => {
|
|
177
|
-
const limit = args.limit ?? 100;
|
|
178
|
-
const sizeThreshold = args.sizeThreshold ?? DEFAULT_SIZE_THRESHOLD;
|
|
179
|
-
const documents = await ctx.db.query("documents").withIndex("by_seq", (q) => q.eq("collection", args.collection).gt("seq", args.cursor)).order("asc").take(limit);
|
|
180
|
-
if (documents.length > 0) {
|
|
181
|
-
const changes = documents.map((doc) => ({
|
|
182
|
-
documentId: doc.documentId,
|
|
183
|
-
crdtBytes: doc.crdtBytes,
|
|
184
|
-
seq: doc.seq,
|
|
185
|
-
operationType: OperationType.Delta
|
|
186
|
-
}));
|
|
187
|
-
const newCursor = documents[documents.length - 1]?.seq ?? args.cursor;
|
|
188
|
-
let compactHint;
|
|
189
|
-
const allDocs = await ctx.db.query("documents").withIndex("by_collection", (q) => q.eq("collection", args.collection)).collect();
|
|
190
|
-
const sizeByDocument = /* @__PURE__ */ new Map();
|
|
191
|
-
for (const doc of allDocs) {
|
|
192
|
-
const current = sizeByDocument.get(doc.documentId) ?? 0;
|
|
193
|
-
sizeByDocument.set(doc.documentId, current + doc.crdtBytes.byteLength);
|
|
194
|
-
}
|
|
195
|
-
for (const [docId, size] of sizeByDocument) if (size > sizeThreshold) {
|
|
196
|
-
compactHint = docId;
|
|
197
|
-
break;
|
|
198
|
-
}
|
|
199
|
-
return {
|
|
200
|
-
changes,
|
|
201
|
-
cursor: newCursor,
|
|
202
|
-
hasMore: documents.length === limit,
|
|
203
|
-
compact: compactHint
|
|
204
|
-
};
|
|
205
|
-
}
|
|
206
|
-
const oldestDelta = await ctx.db.query("documents").withIndex("by_seq", (q) => q.eq("collection", args.collection)).order("asc").first();
|
|
207
|
-
if (oldestDelta && args.cursor < oldestDelta.seq) {
|
|
208
|
-
const snapshots = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection)).collect();
|
|
209
|
-
if (snapshots.length === 0) throw new Error(`Disparity detected but no snapshots available for collection: ${args.collection}. Client cursor: ${args.cursor}, Oldest delta seq: ${oldestDelta.seq}`);
|
|
210
|
-
return {
|
|
211
|
-
changes: snapshots.map((snapshot) => ({
|
|
212
|
-
documentId: snapshot.documentId,
|
|
213
|
-
crdtBytes: snapshot.snapshotBytes,
|
|
214
|
-
seq: snapshot.snapshotSeq,
|
|
215
|
-
operationType: OperationType.Snapshot
|
|
216
|
-
})),
|
|
217
|
-
cursor: Math.max(...snapshots.map((s) => s.snapshotSeq)),
|
|
218
|
-
hasMore: false,
|
|
219
|
-
compact: void 0
|
|
220
|
-
};
|
|
221
|
-
}
|
|
222
|
-
return {
|
|
223
|
-
changes: [],
|
|
224
|
-
cursor: args.cursor,
|
|
225
|
-
hasMore: false,
|
|
226
|
-
compact: void 0
|
|
227
|
-
};
|
|
228
|
-
}
|
|
229
|
-
});
|
|
230
|
-
const getInitialState = query({
|
|
231
|
-
args: { collection: v.string() },
|
|
232
|
-
returns: v.union(v.object({
|
|
233
|
-
crdtBytes: v.bytes(),
|
|
234
|
-
cursor: v.number()
|
|
235
|
-
}), v.null()),
|
|
236
|
-
handler: async (ctx, args) => {
|
|
237
|
-
const logger = getLogger(["ssr"]);
|
|
238
|
-
const snapshots = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection)).collect();
|
|
239
|
-
const deltas = await ctx.db.query("documents").withIndex("by_collection", (q) => q.eq("collection", args.collection)).collect();
|
|
240
|
-
if (snapshots.length === 0 && deltas.length === 0) {
|
|
241
|
-
logger.info("No initial state available - collection is empty", { collection: args.collection });
|
|
242
|
-
return null;
|
|
243
|
-
}
|
|
244
|
-
const updates = [];
|
|
245
|
-
let latestSeq = 0;
|
|
246
|
-
for (const snapshot of snapshots) {
|
|
247
|
-
updates.push(new Uint8Array(snapshot.snapshotBytes));
|
|
248
|
-
latestSeq = Math.max(latestSeq, snapshot.snapshotSeq);
|
|
249
|
-
}
|
|
250
|
-
const sorted = deltas.sort((a, b) => a.seq - b.seq);
|
|
251
|
-
for (const delta of sorted) {
|
|
252
|
-
updates.push(new Uint8Array(delta.crdtBytes));
|
|
253
|
-
latestSeq = Math.max(latestSeq, delta.seq);
|
|
254
|
-
}
|
|
255
|
-
logger.info("Reconstructing initial state", {
|
|
256
|
-
collection: args.collection,
|
|
257
|
-
snapshotCount: snapshots.length,
|
|
258
|
-
deltaCount: deltas.length
|
|
259
|
-
});
|
|
260
|
-
const merged = Y.mergeUpdatesV2(updates);
|
|
261
|
-
logger.info("Initial state reconstructed", {
|
|
262
|
-
collection: args.collection,
|
|
263
|
-
originalSize: updates.reduce((sum, u) => sum + u.byteLength, 0),
|
|
264
|
-
mergedSize: merged.byteLength
|
|
265
|
-
});
|
|
266
|
-
return {
|
|
267
|
-
crdtBytes: merged.buffer,
|
|
268
|
-
cursor: latestSeq
|
|
269
|
-
};
|
|
270
|
-
}
|
|
271
|
-
});
|
|
272
|
-
const recovery = query({
|
|
273
|
-
args: {
|
|
274
|
-
collection: v.string(),
|
|
275
|
-
clientStateVector: v.bytes()
|
|
276
|
-
},
|
|
277
|
-
returns: v.object({
|
|
278
|
-
diff: v.optional(v.bytes()),
|
|
279
|
-
serverStateVector: v.bytes(),
|
|
280
|
-
cursor: v.number()
|
|
281
|
-
}),
|
|
282
|
-
handler: async (ctx, args) => {
|
|
283
|
-
const logger = getLogger(["recovery"]);
|
|
284
|
-
const snapshots = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection)).collect();
|
|
285
|
-
const deltas = await ctx.db.query("documents").withIndex("by_collection", (q) => q.eq("collection", args.collection)).collect();
|
|
286
|
-
if (snapshots.length === 0 && deltas.length === 0) {
|
|
287
|
-
const emptyDoc = new Y.Doc();
|
|
288
|
-
const emptyVector = Y.encodeStateVector(emptyDoc);
|
|
289
|
-
emptyDoc.destroy();
|
|
290
|
-
return {
|
|
291
|
-
serverStateVector: emptyVector.buffer,
|
|
292
|
-
cursor: 0
|
|
293
|
-
};
|
|
294
|
-
}
|
|
295
|
-
const updates = [];
|
|
296
|
-
let latestSeq = 0;
|
|
297
|
-
for (const snapshot of snapshots) {
|
|
298
|
-
updates.push(new Uint8Array(snapshot.snapshotBytes));
|
|
299
|
-
latestSeq = Math.max(latestSeq, snapshot.snapshotSeq);
|
|
300
|
-
}
|
|
301
|
-
for (const delta of deltas) {
|
|
302
|
-
updates.push(new Uint8Array(delta.crdtBytes));
|
|
303
|
-
latestSeq = Math.max(latestSeq, delta.seq);
|
|
304
|
-
}
|
|
305
|
-
const mergedState = Y.mergeUpdatesV2(updates);
|
|
306
|
-
const clientVector = new Uint8Array(args.clientStateVector);
|
|
307
|
-
const diff = Y.diffUpdateV2(mergedState, clientVector);
|
|
308
|
-
const serverVector = Y.encodeStateVectorFromUpdateV2(mergedState);
|
|
309
|
-
logger.info("Recovery sync computed", {
|
|
310
|
-
collection: args.collection,
|
|
311
|
-
snapshotCount: snapshots.length,
|
|
312
|
-
deltaCount: deltas.length,
|
|
313
|
-
diffSize: diff.byteLength,
|
|
314
|
-
hasDiff: diff.byteLength > 0
|
|
315
|
-
});
|
|
316
|
-
return {
|
|
317
|
-
diff: diff.byteLength > 0 ? diff.buffer : void 0,
|
|
318
|
-
serverStateVector: serverVector.buffer,
|
|
319
|
-
cursor: latestSeq
|
|
320
|
-
};
|
|
321
|
-
}
|
|
322
|
-
});
|
|
323
|
-
|
|
324
|
-
//#endregion
|
|
325
|
-
export { OperationType, compact, deleteDocument, getInitialState, insertDocument, mark, recovery, stream, updateDocument };
|
|
@@ -1,54 +0,0 @@
|
|
|
1
|
-
import * as convex_server7 from "convex/server";
|
|
2
|
-
import * as convex_values0 from "convex/values";
|
|
3
|
-
|
|
4
|
-
//#region src/component/schema.d.ts
|
|
5
|
-
declare const _default: convex_server7.SchemaDefinition<{
|
|
6
|
-
documents: convex_server7.TableDefinition<convex_values0.VObject<{
|
|
7
|
-
collection: string;
|
|
8
|
-
documentId: string;
|
|
9
|
-
crdtBytes: ArrayBuffer;
|
|
10
|
-
seq: number;
|
|
11
|
-
}, {
|
|
12
|
-
collection: convex_values0.VString<string, "required">;
|
|
13
|
-
documentId: convex_values0.VString<string, "required">;
|
|
14
|
-
crdtBytes: convex_values0.VBytes<ArrayBuffer, "required">;
|
|
15
|
-
seq: convex_values0.VFloat64<number, "required">;
|
|
16
|
-
}, "required", "collection" | "documentId" | "crdtBytes" | "seq">, {
|
|
17
|
-
by_collection: ["collection", "_creationTime"];
|
|
18
|
-
by_collection_document: ["collection", "documentId", "_creationTime"];
|
|
19
|
-
by_seq: ["collection", "seq", "_creationTime"];
|
|
20
|
-
}, {}, {}>;
|
|
21
|
-
snapshots: convex_server7.TableDefinition<convex_values0.VObject<{
|
|
22
|
-
collection: string;
|
|
23
|
-
documentId: string;
|
|
24
|
-
snapshotBytes: ArrayBuffer;
|
|
25
|
-
stateVector: ArrayBuffer;
|
|
26
|
-
snapshotSeq: number;
|
|
27
|
-
createdAt: number;
|
|
28
|
-
}, {
|
|
29
|
-
collection: convex_values0.VString<string, "required">;
|
|
30
|
-
documentId: convex_values0.VString<string, "required">;
|
|
31
|
-
snapshotBytes: convex_values0.VBytes<ArrayBuffer, "required">;
|
|
32
|
-
stateVector: convex_values0.VBytes<ArrayBuffer, "required">;
|
|
33
|
-
snapshotSeq: convex_values0.VFloat64<number, "required">;
|
|
34
|
-
createdAt: convex_values0.VFloat64<number, "required">;
|
|
35
|
-
}, "required", "collection" | "documentId" | "snapshotBytes" | "stateVector" | "snapshotSeq" | "createdAt">, {
|
|
36
|
-
by_document: ["collection", "documentId", "_creationTime"];
|
|
37
|
-
}, {}, {}>;
|
|
38
|
-
peers: convex_server7.TableDefinition<convex_values0.VObject<{
|
|
39
|
-
collection: string;
|
|
40
|
-
peerId: string;
|
|
41
|
-
lastSyncedSeq: number;
|
|
42
|
-
lastSeenAt: number;
|
|
43
|
-
}, {
|
|
44
|
-
collection: convex_values0.VString<string, "required">;
|
|
45
|
-
peerId: convex_values0.VString<string, "required">;
|
|
46
|
-
lastSyncedSeq: convex_values0.VFloat64<number, "required">;
|
|
47
|
-
lastSeenAt: convex_values0.VFloat64<number, "required">;
|
|
48
|
-
}, "required", "collection" | "peerId" | "lastSyncedSeq" | "lastSeenAt">, {
|
|
49
|
-
by_collection: ["collection", "_creationTime"];
|
|
50
|
-
by_collection_peer: ["collection", "peerId", "_creationTime"];
|
|
51
|
-
}, {}, {}>;
|
|
52
|
-
}, true>;
|
|
53
|
-
//#endregion
|
|
54
|
-
export { _default as default };
|
package/dist/component/schema.js
DELETED
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
import { defineSchema, defineTable } from "convex/server";
|
|
2
|
-
import { v } from "convex/values";
|
|
3
|
-
|
|
4
|
-
//#region src/component/schema.ts
|
|
5
|
-
var schema_default = defineSchema({
|
|
6
|
-
documents: defineTable({
|
|
7
|
-
collection: v.string(),
|
|
8
|
-
documentId: v.string(),
|
|
9
|
-
crdtBytes: v.bytes(),
|
|
10
|
-
seq: v.number()
|
|
11
|
-
}).index("by_collection", ["collection"]).index("by_collection_document", ["collection", "documentId"]).index("by_seq", ["collection", "seq"]),
|
|
12
|
-
snapshots: defineTable({
|
|
13
|
-
collection: v.string(),
|
|
14
|
-
documentId: v.string(),
|
|
15
|
-
snapshotBytes: v.bytes(),
|
|
16
|
-
stateVector: v.bytes(),
|
|
17
|
-
snapshotSeq: v.number(),
|
|
18
|
-
createdAt: v.number()
|
|
19
|
-
}).index("by_document", ["collection", "documentId"]),
|
|
20
|
-
peers: defineTable({
|
|
21
|
-
collection: v.string(),
|
|
22
|
-
peerId: v.string(),
|
|
23
|
-
lastSyncedSeq: v.number(),
|
|
24
|
-
lastSeenAt: v.number()
|
|
25
|
-
}).index("by_collection", ["collection"]).index("by_collection_peer", ["collection", "peerId"])
|
|
26
|
-
});
|
|
27
|
-
|
|
28
|
-
//#endregion
|
|
29
|
-
export { schema_default as default };
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
//#region src/shared/types.ts
|
|
2
|
-
/** Operation type for streaming changes */
|
|
3
|
-
let OperationType = /* @__PURE__ */ function(OperationType$1) {
|
|
4
|
-
OperationType$1["Delta"] = "delta";
|
|
5
|
-
OperationType$1["Snapshot"] = "snapshot";
|
|
6
|
-
return OperationType$1;
|
|
7
|
-
}({});
|
|
8
|
-
const SIZE_MULTIPLIERS = {
|
|
9
|
-
kb: 1024,
|
|
10
|
-
mb: 1024 ** 2,
|
|
11
|
-
gb: 1024 ** 3
|
|
12
|
-
};
|
|
13
|
-
|
|
14
|
-
//#endregion
|
|
15
|
-
export { OperationType };
|
package/dist/server/index.d.ts
DELETED
|
@@ -1,135 +0,0 @@
|
|
|
1
|
-
import * as convex_values0 from "convex/values";
|
|
2
|
-
import * as convex_server0 from "convex/server";
|
|
3
|
-
import { GenericDataModel, GenericMutationCtx, GenericQueryCtx } from "convex/server";
|
|
4
|
-
|
|
5
|
-
//#region src/shared/types.d.ts
|
|
6
|
-
|
|
7
|
-
type SizeUnit = "kb" | "mb" | "gb";
|
|
8
|
-
type Size = `${number}${SizeUnit}`;
|
|
9
|
-
type DurationUnit = "m" | "h" | "d";
|
|
10
|
-
type Duration = `${number}${DurationUnit}`;
|
|
11
|
-
interface CompactionConfig {
|
|
12
|
-
sizeThreshold: Size;
|
|
13
|
-
peerTimeout: Duration;
|
|
14
|
-
}
|
|
15
|
-
//#endregion
|
|
16
|
-
//#region src/server/builder.d.ts
|
|
17
|
-
/**
|
|
18
|
-
* Configuration for replicate handlers (without component - used with factory pattern).
|
|
19
|
-
*/
|
|
20
|
-
interface ReplicateConfig<T extends object> {
|
|
21
|
-
collection: string;
|
|
22
|
-
compaction?: Partial<CompactionConfig>;
|
|
23
|
-
hooks?: {
|
|
24
|
-
evalRead?: (ctx: GenericQueryCtx<GenericDataModel>, collection: string) => void | Promise<void>;
|
|
25
|
-
evalWrite?: (ctx: GenericMutationCtx<GenericDataModel>, doc: T) => void | Promise<void>;
|
|
26
|
-
evalRemove?: (ctx: GenericMutationCtx<GenericDataModel>, docId: string) => void | Promise<void>;
|
|
27
|
-
evalMark?: (ctx: GenericMutationCtx<GenericDataModel>, peerId: string) => void | Promise<void>;
|
|
28
|
-
evalCompact?: (ctx: GenericMutationCtx<GenericDataModel>, documentId: string) => void | Promise<void>;
|
|
29
|
-
onStream?: (ctx: GenericQueryCtx<GenericDataModel>, result: any) => void | Promise<void>;
|
|
30
|
-
onInsert?: (ctx: GenericMutationCtx<GenericDataModel>, doc: T) => void | Promise<void>;
|
|
31
|
-
onUpdate?: (ctx: GenericMutationCtx<GenericDataModel>, doc: T) => void | Promise<void>;
|
|
32
|
-
onRemove?: (ctx: GenericMutationCtx<GenericDataModel>, docId: string) => void | Promise<void>;
|
|
33
|
-
transform?: (docs: T[]) => T[] | Promise<T[]>;
|
|
34
|
-
};
|
|
35
|
-
}
|
|
36
|
-
/**
|
|
37
|
-
* Create a replicate function bound to your component. Call this once in your
|
|
38
|
-
* convex/replicate.ts file, then use the returned function for all collections.
|
|
39
|
-
*
|
|
40
|
-
* @example
|
|
41
|
-
* ```typescript
|
|
42
|
-
* // convex/replicate.ts (create once)
|
|
43
|
-
* import { replicate } from '@trestleinc/replicate/server';
|
|
44
|
-
* import { components } from './_generated/api';
|
|
45
|
-
*
|
|
46
|
-
* export const tasks = replicate(components.replicate)<Task>({ collection: 'tasks' });
|
|
47
|
-
*
|
|
48
|
-
* // Or bind once and reuse:
|
|
49
|
-
* const r = replicate(components.replicate);
|
|
50
|
-
* export const tasks = r<Task>({ collection: 'tasks' });
|
|
51
|
-
* export const notebooks = r<Notebook>({ collection: 'notebooks' });
|
|
52
|
-
* ```
|
|
53
|
-
*/
|
|
54
|
-
declare function replicate(component: any): <T extends object>(config: ReplicateConfig<T>) => {
|
|
55
|
-
__collection: string;
|
|
56
|
-
stream: convex_server0.RegisteredQuery<"public", {
|
|
57
|
-
limit?: number | undefined;
|
|
58
|
-
sizeThreshold?: number | undefined;
|
|
59
|
-
cursor: number;
|
|
60
|
-
}, Promise<any>>;
|
|
61
|
-
material: convex_server0.RegisteredQuery<"public", {}, Promise<{
|
|
62
|
-
documents: T[];
|
|
63
|
-
cursor?: number;
|
|
64
|
-
count: number;
|
|
65
|
-
crdtBytes?: ArrayBuffer;
|
|
66
|
-
}>>;
|
|
67
|
-
recovery: convex_server0.RegisteredQuery<"public", {
|
|
68
|
-
clientStateVector: ArrayBuffer;
|
|
69
|
-
}, Promise<any>>;
|
|
70
|
-
insert: convex_server0.RegisteredMutation<"public", {
|
|
71
|
-
documentId: string;
|
|
72
|
-
crdtBytes: ArrayBuffer;
|
|
73
|
-
materializedDoc: any;
|
|
74
|
-
}, Promise<{
|
|
75
|
-
success: boolean;
|
|
76
|
-
seq: any;
|
|
77
|
-
}>>;
|
|
78
|
-
update: convex_server0.RegisteredMutation<"public", {
|
|
79
|
-
documentId: string;
|
|
80
|
-
crdtBytes: ArrayBuffer;
|
|
81
|
-
materializedDoc: any;
|
|
82
|
-
}, Promise<{
|
|
83
|
-
success: boolean;
|
|
84
|
-
seq: any;
|
|
85
|
-
}>>;
|
|
86
|
-
remove: convex_server0.RegisteredMutation<"public", {
|
|
87
|
-
documentId: string;
|
|
88
|
-
crdtBytes: ArrayBuffer;
|
|
89
|
-
}, Promise<{
|
|
90
|
-
success: boolean;
|
|
91
|
-
seq: any;
|
|
92
|
-
}>>;
|
|
93
|
-
mark: convex_server0.RegisteredMutation<"public", {
|
|
94
|
-
peerId: string;
|
|
95
|
-
syncedSeq: number;
|
|
96
|
-
}, Promise<null>>;
|
|
97
|
-
compact: convex_server0.RegisteredMutation<"public", {
|
|
98
|
-
peerTimeout?: number | undefined;
|
|
99
|
-
documentId: string;
|
|
100
|
-
snapshotBytes: ArrayBuffer;
|
|
101
|
-
stateVector: ArrayBuffer;
|
|
102
|
-
}, Promise<any>>;
|
|
103
|
-
};
|
|
104
|
-
//#endregion
|
|
105
|
-
//#region src/server/schema.d.ts
|
|
106
|
-
/**
|
|
107
|
-
* Define a table with automatic timestamp field for replication.
|
|
108
|
-
* All replicated tables must have an `id` field and define a `by_doc_id` index.
|
|
109
|
-
*
|
|
110
|
-
* @example
|
|
111
|
-
* ```typescript
|
|
112
|
-
* // convex/schema.ts
|
|
113
|
-
* export default defineSchema({
|
|
114
|
-
* tasks: table(
|
|
115
|
-
* { id: v.string(), text: v.string(), isCompleted: v.boolean() },
|
|
116
|
-
* (t) => t.index('by_doc_id', ['id']).index('by_completed', ['isCompleted'])
|
|
117
|
-
* ),
|
|
118
|
-
* });
|
|
119
|
-
* ```
|
|
120
|
-
*/
|
|
121
|
-
declare function table(userFields: Record<string, any>, applyIndexes?: (table: any) => any): any;
|
|
122
|
-
//#endregion
|
|
123
|
-
//#region src/server/index.d.ts
|
|
124
|
-
declare const schema: {
|
|
125
|
-
readonly table: typeof table;
|
|
126
|
-
readonly prose: () => convex_values0.VObject<{
|
|
127
|
-
content?: any[] | undefined;
|
|
128
|
-
type: "doc";
|
|
129
|
-
}, {
|
|
130
|
-
type: convex_values0.VLiteral<"doc", "required">;
|
|
131
|
-
content: convex_values0.VArray<any[] | undefined, convex_values0.VAny<any, "required", string>, "optional">;
|
|
132
|
-
}, "required", "type" | "content">;
|
|
133
|
-
};
|
|
134
|
-
//#endregion
|
|
135
|
-
export { type ReplicateConfig, replicate, schema };
|