@trestleinc/replicate 1.1.2 → 1.2.0-preview.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -41
- package/dist/client/index.d.ts +34 -26
- package/dist/client/index.js +904 -732
- package/dist/component/_generated/api.d.ts +2 -2
- package/dist/component/_generated/component.d.ts +84 -27
- package/dist/component/convex.config.d.ts +2 -2
- package/dist/component/mutations.d.ts +131 -0
- package/dist/component/mutations.js +493 -0
- package/dist/component/schema.d.ts +71 -31
- package/dist/component/schema.js +37 -14
- package/dist/server/index.d.ts +58 -47
- package/dist/server/index.js +227 -132
- package/package.json +3 -1
- package/src/client/collection.ts +334 -523
- package/src/client/errors.ts +1 -1
- package/src/client/index.ts +4 -7
- package/src/client/merge.ts +2 -2
- package/src/client/persistence/indexeddb.ts +10 -14
- package/src/client/prose.ts +147 -203
- package/src/client/services/awareness.ts +373 -0
- package/src/client/services/context.ts +114 -0
- package/src/client/services/seq.ts +78 -0
- package/src/client/services/session.ts +20 -0
- package/src/client/services/sync.ts +122 -0
- package/src/client/subdocs.ts +263 -0
- package/src/component/_generated/api.ts +2 -2
- package/src/component/_generated/component.ts +73 -28
- package/src/component/mutations.ts +734 -0
- package/src/component/schema.ts +31 -14
- package/src/server/collection.ts +98 -0
- package/src/server/index.ts +2 -2
- package/src/server/{storage.ts → replicate.ts} +214 -75
- package/dist/component/public.d.ts +0 -83
- package/dist/component/public.js +0 -325
- package/src/client/prose-schema.ts +0 -55
- package/src/client/services/cursor.ts +0 -109
- package/src/component/public.ts +0 -453
- package/src/server/builder.ts +0 -98
- /package/src/client/{replicate.ts → ops.ts} +0 -0
|
@@ -1,83 +0,0 @@
|
|
|
1
|
-
import { OperationType } from "./shared/types.js";
|
|
2
|
-
import * as convex_server0 from "convex/server";
|
|
3
|
-
|
|
4
|
-
//#region src/component/public.d.ts
|
|
5
|
-
declare namespace public_d_exports {
|
|
6
|
-
export { OperationType, compact, deleteDocument, getInitialState, insertDocument, mark, recovery, stream, updateDocument };
|
|
7
|
-
}
|
|
8
|
-
declare const insertDocument: convex_server0.RegisteredMutation<"public", {
|
|
9
|
-
collection: string;
|
|
10
|
-
documentId: string;
|
|
11
|
-
crdtBytes: ArrayBuffer;
|
|
12
|
-
}, Promise<{
|
|
13
|
-
success: boolean;
|
|
14
|
-
seq: number;
|
|
15
|
-
}>>;
|
|
16
|
-
declare const updateDocument: convex_server0.RegisteredMutation<"public", {
|
|
17
|
-
collection: string;
|
|
18
|
-
documentId: string;
|
|
19
|
-
crdtBytes: ArrayBuffer;
|
|
20
|
-
}, Promise<{
|
|
21
|
-
success: boolean;
|
|
22
|
-
seq: number;
|
|
23
|
-
}>>;
|
|
24
|
-
declare const deleteDocument: convex_server0.RegisteredMutation<"public", {
|
|
25
|
-
collection: string;
|
|
26
|
-
documentId: string;
|
|
27
|
-
crdtBytes: ArrayBuffer;
|
|
28
|
-
}, Promise<{
|
|
29
|
-
success: boolean;
|
|
30
|
-
seq: number;
|
|
31
|
-
}>>;
|
|
32
|
-
declare const mark: convex_server0.RegisteredMutation<"public", {
|
|
33
|
-
collection: string;
|
|
34
|
-
peerId: string;
|
|
35
|
-
syncedSeq: number;
|
|
36
|
-
}, Promise<null>>;
|
|
37
|
-
declare const compact: convex_server0.RegisteredMutation<"public", {
|
|
38
|
-
peerTimeout?: number | undefined;
|
|
39
|
-
collection: string;
|
|
40
|
-
documentId: string;
|
|
41
|
-
snapshotBytes: ArrayBuffer;
|
|
42
|
-
stateVector: ArrayBuffer;
|
|
43
|
-
}, Promise<{
|
|
44
|
-
success: boolean;
|
|
45
|
-
removed: number;
|
|
46
|
-
retained: number;
|
|
47
|
-
}>>;
|
|
48
|
-
declare const stream: convex_server0.RegisteredQuery<"public", {
|
|
49
|
-
limit?: number | undefined;
|
|
50
|
-
sizeThreshold?: number | undefined;
|
|
51
|
-
collection: string;
|
|
52
|
-
cursor: number;
|
|
53
|
-
}, Promise<{
|
|
54
|
-
changes: {
|
|
55
|
-
documentId: any;
|
|
56
|
-
crdtBytes: any;
|
|
57
|
-
seq: any;
|
|
58
|
-
operationType: OperationType;
|
|
59
|
-
}[];
|
|
60
|
-
cursor: number;
|
|
61
|
-
hasMore: boolean;
|
|
62
|
-
compact: string | undefined;
|
|
63
|
-
}>>;
|
|
64
|
-
declare const getInitialState: convex_server0.RegisteredQuery<"public", {
|
|
65
|
-
collection: string;
|
|
66
|
-
}, Promise<{
|
|
67
|
-
crdtBytes: ArrayBuffer;
|
|
68
|
-
cursor: number;
|
|
69
|
-
} | null>>;
|
|
70
|
-
declare const recovery: convex_server0.RegisteredQuery<"public", {
|
|
71
|
-
collection: string;
|
|
72
|
-
clientStateVector: ArrayBuffer;
|
|
73
|
-
}, Promise<{
|
|
74
|
-
serverStateVector: ArrayBuffer;
|
|
75
|
-
cursor: number;
|
|
76
|
-
diff?: undefined;
|
|
77
|
-
} | {
|
|
78
|
-
diff: ArrayBuffer | undefined;
|
|
79
|
-
serverStateVector: ArrayBuffer;
|
|
80
|
-
cursor: number;
|
|
81
|
-
}>>;
|
|
82
|
-
//#endregion
|
|
83
|
-
export { OperationType, compact, deleteDocument, getInitialState, insertDocument, mark, public_d_exports, recovery, stream, updateDocument };
|
package/dist/component/public.js
DELETED
|
@@ -1,325 +0,0 @@
|
|
|
1
|
-
import { getLogger } from "./logger.js";
|
|
2
|
-
import { mutation, query } from "./_generated/server.js";
|
|
3
|
-
import { OperationType } from "./shared/types.js";
|
|
4
|
-
import * as Y from "yjs";
|
|
5
|
-
import { v } from "convex/values";
|
|
6
|
-
|
|
7
|
-
//#region src/component/public.ts
|
|
8
|
-
const DEFAULT_SIZE_THRESHOLD = 5e6;
|
|
9
|
-
const DEFAULT_PEER_TIMEOUT = 300 * 1e3;
|
|
10
|
-
async function getNextSeq(ctx, collection) {
|
|
11
|
-
return ((await ctx.db.query("documents").withIndex("by_seq", (q) => q.eq("collection", collection)).order("desc").first())?.seq ?? 0) + 1;
|
|
12
|
-
}
|
|
13
|
-
const insertDocument = mutation({
|
|
14
|
-
args: {
|
|
15
|
-
collection: v.string(),
|
|
16
|
-
documentId: v.string(),
|
|
17
|
-
crdtBytes: v.bytes()
|
|
18
|
-
},
|
|
19
|
-
returns: v.object({
|
|
20
|
-
success: v.boolean(),
|
|
21
|
-
seq: v.number()
|
|
22
|
-
}),
|
|
23
|
-
handler: async (ctx, args) => {
|
|
24
|
-
const seq = await getNextSeq(ctx, args.collection);
|
|
25
|
-
await ctx.db.insert("documents", {
|
|
26
|
-
collection: args.collection,
|
|
27
|
-
documentId: args.documentId,
|
|
28
|
-
crdtBytes: args.crdtBytes,
|
|
29
|
-
seq
|
|
30
|
-
});
|
|
31
|
-
return {
|
|
32
|
-
success: true,
|
|
33
|
-
seq
|
|
34
|
-
};
|
|
35
|
-
}
|
|
36
|
-
});
|
|
37
|
-
const updateDocument = mutation({
|
|
38
|
-
args: {
|
|
39
|
-
collection: v.string(),
|
|
40
|
-
documentId: v.string(),
|
|
41
|
-
crdtBytes: v.bytes()
|
|
42
|
-
},
|
|
43
|
-
returns: v.object({
|
|
44
|
-
success: v.boolean(),
|
|
45
|
-
seq: v.number()
|
|
46
|
-
}),
|
|
47
|
-
handler: async (ctx, args) => {
|
|
48
|
-
const seq = await getNextSeq(ctx, args.collection);
|
|
49
|
-
await ctx.db.insert("documents", {
|
|
50
|
-
collection: args.collection,
|
|
51
|
-
documentId: args.documentId,
|
|
52
|
-
crdtBytes: args.crdtBytes,
|
|
53
|
-
seq
|
|
54
|
-
});
|
|
55
|
-
return {
|
|
56
|
-
success: true,
|
|
57
|
-
seq
|
|
58
|
-
};
|
|
59
|
-
}
|
|
60
|
-
});
|
|
61
|
-
const deleteDocument = mutation({
|
|
62
|
-
args: {
|
|
63
|
-
collection: v.string(),
|
|
64
|
-
documentId: v.string(),
|
|
65
|
-
crdtBytes: v.bytes()
|
|
66
|
-
},
|
|
67
|
-
returns: v.object({
|
|
68
|
-
success: v.boolean(),
|
|
69
|
-
seq: v.number()
|
|
70
|
-
}),
|
|
71
|
-
handler: async (ctx, args) => {
|
|
72
|
-
const seq = await getNextSeq(ctx, args.collection);
|
|
73
|
-
await ctx.db.insert("documents", {
|
|
74
|
-
collection: args.collection,
|
|
75
|
-
documentId: args.documentId,
|
|
76
|
-
crdtBytes: args.crdtBytes,
|
|
77
|
-
seq
|
|
78
|
-
});
|
|
79
|
-
return {
|
|
80
|
-
success: true,
|
|
81
|
-
seq
|
|
82
|
-
};
|
|
83
|
-
}
|
|
84
|
-
});
|
|
85
|
-
const mark = mutation({
|
|
86
|
-
args: {
|
|
87
|
-
collection: v.string(),
|
|
88
|
-
peerId: v.string(),
|
|
89
|
-
syncedSeq: v.number()
|
|
90
|
-
},
|
|
91
|
-
returns: v.null(),
|
|
92
|
-
handler: async (ctx, args) => {
|
|
93
|
-
const existing = await ctx.db.query("peers").withIndex("by_collection_peer", (q) => q.eq("collection", args.collection).eq("peerId", args.peerId)).first();
|
|
94
|
-
if (existing) await ctx.db.patch(existing._id, {
|
|
95
|
-
lastSyncedSeq: Math.max(existing.lastSyncedSeq, args.syncedSeq),
|
|
96
|
-
lastSeenAt: Date.now()
|
|
97
|
-
});
|
|
98
|
-
else await ctx.db.insert("peers", {
|
|
99
|
-
collection: args.collection,
|
|
100
|
-
peerId: args.peerId,
|
|
101
|
-
lastSyncedSeq: args.syncedSeq,
|
|
102
|
-
lastSeenAt: Date.now()
|
|
103
|
-
});
|
|
104
|
-
return null;
|
|
105
|
-
}
|
|
106
|
-
});
|
|
107
|
-
const compact = mutation({
|
|
108
|
-
args: {
|
|
109
|
-
collection: v.string(),
|
|
110
|
-
documentId: v.string(),
|
|
111
|
-
snapshotBytes: v.bytes(),
|
|
112
|
-
stateVector: v.bytes(),
|
|
113
|
-
peerTimeout: v.optional(v.number())
|
|
114
|
-
},
|
|
115
|
-
returns: v.object({
|
|
116
|
-
success: v.boolean(),
|
|
117
|
-
removed: v.number(),
|
|
118
|
-
retained: v.number()
|
|
119
|
-
}),
|
|
120
|
-
handler: async (ctx, args) => {
|
|
121
|
-
const logger = getLogger(["compaction"]);
|
|
122
|
-
const now = Date.now();
|
|
123
|
-
const peerCutoff = now - (args.peerTimeout ?? DEFAULT_PEER_TIMEOUT);
|
|
124
|
-
const deltas = await ctx.db.query("documents").withIndex("by_collection_document", (q) => q.eq("collection", args.collection).eq("documentId", args.documentId)).collect();
|
|
125
|
-
const activePeers = await ctx.db.query("peers").withIndex("by_collection", (q) => q.eq("collection", args.collection)).filter((q) => q.gt(q.field("lastSeenAt"), peerCutoff)).collect();
|
|
126
|
-
const minSyncedSeq = activePeers.length > 0 ? Math.min(...activePeers.map((p) => p.lastSyncedSeq)) : Infinity;
|
|
127
|
-
const existingSnapshot = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection).eq("documentId", args.documentId)).first();
|
|
128
|
-
if (existingSnapshot) await ctx.db.delete(existingSnapshot._id);
|
|
129
|
-
const snapshotSeq = deltas.length > 0 ? Math.max(...deltas.map((d) => d.seq)) : 0;
|
|
130
|
-
await ctx.db.insert("snapshots", {
|
|
131
|
-
collection: args.collection,
|
|
132
|
-
documentId: args.documentId,
|
|
133
|
-
snapshotBytes: args.snapshotBytes,
|
|
134
|
-
stateVector: args.stateVector,
|
|
135
|
-
snapshotSeq,
|
|
136
|
-
createdAt: now
|
|
137
|
-
});
|
|
138
|
-
let removed = 0;
|
|
139
|
-
for (const delta of deltas) if (delta.seq < minSyncedSeq) {
|
|
140
|
-
await ctx.db.delete(delta._id);
|
|
141
|
-
removed++;
|
|
142
|
-
}
|
|
143
|
-
logger.info("Compaction completed", {
|
|
144
|
-
collection: args.collection,
|
|
145
|
-
documentId: args.documentId,
|
|
146
|
-
removed,
|
|
147
|
-
retained: deltas.length - removed,
|
|
148
|
-
activePeers: activePeers.length,
|
|
149
|
-
minSyncedSeq
|
|
150
|
-
});
|
|
151
|
-
return {
|
|
152
|
-
success: true,
|
|
153
|
-
removed,
|
|
154
|
-
retained: deltas.length - removed
|
|
155
|
-
};
|
|
156
|
-
}
|
|
157
|
-
});
|
|
158
|
-
const stream = query({
|
|
159
|
-
args: {
|
|
160
|
-
collection: v.string(),
|
|
161
|
-
cursor: v.number(),
|
|
162
|
-
limit: v.optional(v.number()),
|
|
163
|
-
sizeThreshold: v.optional(v.number())
|
|
164
|
-
},
|
|
165
|
-
returns: v.object({
|
|
166
|
-
changes: v.array(v.object({
|
|
167
|
-
documentId: v.string(),
|
|
168
|
-
crdtBytes: v.bytes(),
|
|
169
|
-
seq: v.number(),
|
|
170
|
-
operationType: v.string()
|
|
171
|
-
})),
|
|
172
|
-
cursor: v.number(),
|
|
173
|
-
hasMore: v.boolean(),
|
|
174
|
-
compact: v.optional(v.string())
|
|
175
|
-
}),
|
|
176
|
-
handler: async (ctx, args) => {
|
|
177
|
-
const limit = args.limit ?? 100;
|
|
178
|
-
const sizeThreshold = args.sizeThreshold ?? DEFAULT_SIZE_THRESHOLD;
|
|
179
|
-
const documents = await ctx.db.query("documents").withIndex("by_seq", (q) => q.eq("collection", args.collection).gt("seq", args.cursor)).order("asc").take(limit);
|
|
180
|
-
if (documents.length > 0) {
|
|
181
|
-
const changes = documents.map((doc) => ({
|
|
182
|
-
documentId: doc.documentId,
|
|
183
|
-
crdtBytes: doc.crdtBytes,
|
|
184
|
-
seq: doc.seq,
|
|
185
|
-
operationType: OperationType.Delta
|
|
186
|
-
}));
|
|
187
|
-
const newCursor = documents[documents.length - 1]?.seq ?? args.cursor;
|
|
188
|
-
let compactHint;
|
|
189
|
-
const allDocs = await ctx.db.query("documents").withIndex("by_collection", (q) => q.eq("collection", args.collection)).collect();
|
|
190
|
-
const sizeByDocument = /* @__PURE__ */ new Map();
|
|
191
|
-
for (const doc of allDocs) {
|
|
192
|
-
const current = sizeByDocument.get(doc.documentId) ?? 0;
|
|
193
|
-
sizeByDocument.set(doc.documentId, current + doc.crdtBytes.byteLength);
|
|
194
|
-
}
|
|
195
|
-
for (const [docId, size] of sizeByDocument) if (size > sizeThreshold) {
|
|
196
|
-
compactHint = docId;
|
|
197
|
-
break;
|
|
198
|
-
}
|
|
199
|
-
return {
|
|
200
|
-
changes,
|
|
201
|
-
cursor: newCursor,
|
|
202
|
-
hasMore: documents.length === limit,
|
|
203
|
-
compact: compactHint
|
|
204
|
-
};
|
|
205
|
-
}
|
|
206
|
-
const oldestDelta = await ctx.db.query("documents").withIndex("by_seq", (q) => q.eq("collection", args.collection)).order("asc").first();
|
|
207
|
-
if (oldestDelta && args.cursor < oldestDelta.seq) {
|
|
208
|
-
const snapshots = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection)).collect();
|
|
209
|
-
if (snapshots.length === 0) throw new Error(`Disparity detected but no snapshots available for collection: ${args.collection}. Client cursor: ${args.cursor}, Oldest delta seq: ${oldestDelta.seq}`);
|
|
210
|
-
return {
|
|
211
|
-
changes: snapshots.map((snapshot) => ({
|
|
212
|
-
documentId: snapshot.documentId,
|
|
213
|
-
crdtBytes: snapshot.snapshotBytes,
|
|
214
|
-
seq: snapshot.snapshotSeq,
|
|
215
|
-
operationType: OperationType.Snapshot
|
|
216
|
-
})),
|
|
217
|
-
cursor: Math.max(...snapshots.map((s) => s.snapshotSeq)),
|
|
218
|
-
hasMore: false,
|
|
219
|
-
compact: void 0
|
|
220
|
-
};
|
|
221
|
-
}
|
|
222
|
-
return {
|
|
223
|
-
changes: [],
|
|
224
|
-
cursor: args.cursor,
|
|
225
|
-
hasMore: false,
|
|
226
|
-
compact: void 0
|
|
227
|
-
};
|
|
228
|
-
}
|
|
229
|
-
});
|
|
230
|
-
const getInitialState = query({
|
|
231
|
-
args: { collection: v.string() },
|
|
232
|
-
returns: v.union(v.object({
|
|
233
|
-
crdtBytes: v.bytes(),
|
|
234
|
-
cursor: v.number()
|
|
235
|
-
}), v.null()),
|
|
236
|
-
handler: async (ctx, args) => {
|
|
237
|
-
const logger = getLogger(["ssr"]);
|
|
238
|
-
const snapshots = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection)).collect();
|
|
239
|
-
const deltas = await ctx.db.query("documents").withIndex("by_collection", (q) => q.eq("collection", args.collection)).collect();
|
|
240
|
-
if (snapshots.length === 0 && deltas.length === 0) {
|
|
241
|
-
logger.info("No initial state available - collection is empty", { collection: args.collection });
|
|
242
|
-
return null;
|
|
243
|
-
}
|
|
244
|
-
const updates = [];
|
|
245
|
-
let latestSeq = 0;
|
|
246
|
-
for (const snapshot of snapshots) {
|
|
247
|
-
updates.push(new Uint8Array(snapshot.snapshotBytes));
|
|
248
|
-
latestSeq = Math.max(latestSeq, snapshot.snapshotSeq);
|
|
249
|
-
}
|
|
250
|
-
const sorted = deltas.sort((a, b) => a.seq - b.seq);
|
|
251
|
-
for (const delta of sorted) {
|
|
252
|
-
updates.push(new Uint8Array(delta.crdtBytes));
|
|
253
|
-
latestSeq = Math.max(latestSeq, delta.seq);
|
|
254
|
-
}
|
|
255
|
-
logger.info("Reconstructing initial state", {
|
|
256
|
-
collection: args.collection,
|
|
257
|
-
snapshotCount: snapshots.length,
|
|
258
|
-
deltaCount: deltas.length
|
|
259
|
-
});
|
|
260
|
-
const merged = Y.mergeUpdatesV2(updates);
|
|
261
|
-
logger.info("Initial state reconstructed", {
|
|
262
|
-
collection: args.collection,
|
|
263
|
-
originalSize: updates.reduce((sum, u) => sum + u.byteLength, 0),
|
|
264
|
-
mergedSize: merged.byteLength
|
|
265
|
-
});
|
|
266
|
-
return {
|
|
267
|
-
crdtBytes: merged.buffer,
|
|
268
|
-
cursor: latestSeq
|
|
269
|
-
};
|
|
270
|
-
}
|
|
271
|
-
});
|
|
272
|
-
const recovery = query({
|
|
273
|
-
args: {
|
|
274
|
-
collection: v.string(),
|
|
275
|
-
clientStateVector: v.bytes()
|
|
276
|
-
},
|
|
277
|
-
returns: v.object({
|
|
278
|
-
diff: v.optional(v.bytes()),
|
|
279
|
-
serverStateVector: v.bytes(),
|
|
280
|
-
cursor: v.number()
|
|
281
|
-
}),
|
|
282
|
-
handler: async (ctx, args) => {
|
|
283
|
-
const logger = getLogger(["recovery"]);
|
|
284
|
-
const snapshots = await ctx.db.query("snapshots").withIndex("by_document", (q) => q.eq("collection", args.collection)).collect();
|
|
285
|
-
const deltas = await ctx.db.query("documents").withIndex("by_collection", (q) => q.eq("collection", args.collection)).collect();
|
|
286
|
-
if (snapshots.length === 0 && deltas.length === 0) {
|
|
287
|
-
const emptyDoc = new Y.Doc();
|
|
288
|
-
const emptyVector = Y.encodeStateVector(emptyDoc);
|
|
289
|
-
emptyDoc.destroy();
|
|
290
|
-
return {
|
|
291
|
-
serverStateVector: emptyVector.buffer,
|
|
292
|
-
cursor: 0
|
|
293
|
-
};
|
|
294
|
-
}
|
|
295
|
-
const updates = [];
|
|
296
|
-
let latestSeq = 0;
|
|
297
|
-
for (const snapshot of snapshots) {
|
|
298
|
-
updates.push(new Uint8Array(snapshot.snapshotBytes));
|
|
299
|
-
latestSeq = Math.max(latestSeq, snapshot.snapshotSeq);
|
|
300
|
-
}
|
|
301
|
-
for (const delta of deltas) {
|
|
302
|
-
updates.push(new Uint8Array(delta.crdtBytes));
|
|
303
|
-
latestSeq = Math.max(latestSeq, delta.seq);
|
|
304
|
-
}
|
|
305
|
-
const mergedState = Y.mergeUpdatesV2(updates);
|
|
306
|
-
const clientVector = new Uint8Array(args.clientStateVector);
|
|
307
|
-
const diff = Y.diffUpdateV2(mergedState, clientVector);
|
|
308
|
-
const serverVector = Y.encodeStateVectorFromUpdateV2(mergedState);
|
|
309
|
-
logger.info("Recovery sync computed", {
|
|
310
|
-
collection: args.collection,
|
|
311
|
-
snapshotCount: snapshots.length,
|
|
312
|
-
deltaCount: deltas.length,
|
|
313
|
-
diffSize: diff.byteLength,
|
|
314
|
-
hasDiff: diff.byteLength > 0
|
|
315
|
-
});
|
|
316
|
-
return {
|
|
317
|
-
diff: diff.byteLength > 0 ? diff.buffer : void 0,
|
|
318
|
-
serverStateVector: serverVector.buffer,
|
|
319
|
-
cursor: latestSeq
|
|
320
|
-
};
|
|
321
|
-
}
|
|
322
|
-
});
|
|
323
|
-
|
|
324
|
-
//#endregion
|
|
325
|
-
export { OperationType, compact, deleteDocument, getInitialState, insertDocument, mark, recovery, stream, updateDocument };
|
|
@@ -1,55 +0,0 @@
|
|
|
1
|
-
import { z } from "zod";
|
|
2
|
-
import type { ProseValue } from "$/shared/types";
|
|
3
|
-
|
|
4
|
-
const PROSE_MARKER = Symbol.for("replicate:prose");
|
|
5
|
-
|
|
6
|
-
function createProseSchema(): z.ZodType<ProseValue> {
|
|
7
|
-
const schema = z.custom<ProseValue>(
|
|
8
|
-
(val) => {
|
|
9
|
-
if (val == null) return true;
|
|
10
|
-
if (typeof val !== "object") return false;
|
|
11
|
-
return (val as { type?: string }).type === "doc";
|
|
12
|
-
},
|
|
13
|
-
{ message: "Expected prose document with type \"doc\"" },
|
|
14
|
-
);
|
|
15
|
-
|
|
16
|
-
Object.defineProperty(schema, PROSE_MARKER, { value: true, writable: false });
|
|
17
|
-
|
|
18
|
-
return schema;
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
function emptyProse(): ProseValue {
|
|
22
|
-
return { type: "doc", content: [] } as unknown as ProseValue;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
export function prose(): z.ZodType<ProseValue> {
|
|
26
|
-
return createProseSchema();
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
prose.empty = emptyProse;
|
|
30
|
-
|
|
31
|
-
export function isProseSchema(schema: unknown): boolean {
|
|
32
|
-
return (
|
|
33
|
-
schema != null
|
|
34
|
-
&& typeof schema === "object"
|
|
35
|
-
&& PROSE_MARKER in schema
|
|
36
|
-
&& (schema as Record<symbol, unknown>)[PROSE_MARKER] === true
|
|
37
|
-
);
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
export function extractProseFields(schema: z.ZodObject<z.ZodRawShape>): string[] {
|
|
41
|
-
const fields: string[] = [];
|
|
42
|
-
|
|
43
|
-
for (const [key, fieldSchema] of Object.entries(schema.shape)) {
|
|
44
|
-
let unwrapped = fieldSchema;
|
|
45
|
-
while (unwrapped instanceof z.ZodOptional || unwrapped instanceof z.ZodNullable) {
|
|
46
|
-
unwrapped = unwrapped.unwrap();
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
if (isProseSchema(unwrapped)) {
|
|
50
|
-
fields.push(key);
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
return fields;
|
|
55
|
-
}
|
|
@@ -1,109 +0,0 @@
|
|
|
1
|
-
import { Effect, Context, Layer } from "effect";
|
|
2
|
-
import { IDBError, IDBWriteError } from "$/client/errors";
|
|
3
|
-
import type { KeyValueStore } from "$/client/persistence/types";
|
|
4
|
-
|
|
5
|
-
export type Cursor = number;
|
|
6
|
-
|
|
7
|
-
export class CursorService extends Context.Tag("CursorService")<
|
|
8
|
-
CursorService,
|
|
9
|
-
{
|
|
10
|
-
readonly loadCursor: (collection: string) => Effect.Effect<Cursor, IDBError>;
|
|
11
|
-
readonly saveCursor: (collection: string, cursor: Cursor) => Effect.Effect<void, IDBWriteError>;
|
|
12
|
-
readonly clearCursor: (collection: string) => Effect.Effect<void, IDBError>;
|
|
13
|
-
readonly loadPeerId: (collection: string) => Effect.Effect<string, IDBError | IDBWriteError>;
|
|
14
|
-
}
|
|
15
|
-
>() {}
|
|
16
|
-
|
|
17
|
-
function generatePeerId(): string {
|
|
18
|
-
return crypto.randomUUID();
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
export function createCursorLayer(kv: KeyValueStore) {
|
|
22
|
-
return Layer.succeed(
|
|
23
|
-
CursorService,
|
|
24
|
-
CursorService.of({
|
|
25
|
-
loadCursor: collection =>
|
|
26
|
-
Effect.gen(function* (_) {
|
|
27
|
-
const key = `cursor:${collection}`;
|
|
28
|
-
const stored = yield* _(
|
|
29
|
-
Effect.tryPromise({
|
|
30
|
-
try: () => kv.get<Cursor>(key),
|
|
31
|
-
catch: cause => new IDBError({ operation: "get", key, cause }),
|
|
32
|
-
}),
|
|
33
|
-
);
|
|
34
|
-
|
|
35
|
-
if (stored !== undefined) {
|
|
36
|
-
yield* _(
|
|
37
|
-
Effect.logDebug("Loaded cursor from storage", {
|
|
38
|
-
collection,
|
|
39
|
-
cursor: stored,
|
|
40
|
-
}),
|
|
41
|
-
);
|
|
42
|
-
return stored;
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
yield* _(
|
|
46
|
-
Effect.logDebug("No stored cursor, using default", {
|
|
47
|
-
collection,
|
|
48
|
-
}),
|
|
49
|
-
);
|
|
50
|
-
return 0;
|
|
51
|
-
}),
|
|
52
|
-
|
|
53
|
-
saveCursor: (collection, cursor) =>
|
|
54
|
-
Effect.gen(function* (_) {
|
|
55
|
-
const key = `cursor:${collection}`;
|
|
56
|
-
yield* _(
|
|
57
|
-
Effect.tryPromise({
|
|
58
|
-
try: () => kv.set(key, cursor),
|
|
59
|
-
catch: cause => new IDBWriteError({ key, value: cursor, cause }),
|
|
60
|
-
}),
|
|
61
|
-
);
|
|
62
|
-
yield* _(
|
|
63
|
-
Effect.logDebug("Cursor saved", {
|
|
64
|
-
collection,
|
|
65
|
-
cursor,
|
|
66
|
-
}),
|
|
67
|
-
);
|
|
68
|
-
}),
|
|
69
|
-
|
|
70
|
-
clearCursor: collection =>
|
|
71
|
-
Effect.gen(function* (_) {
|
|
72
|
-
const key = `cursor:${collection}`;
|
|
73
|
-
yield* _(
|
|
74
|
-
Effect.tryPromise({
|
|
75
|
-
try: () => kv.del(key),
|
|
76
|
-
catch: cause => new IDBError({ operation: "delete", key, cause }),
|
|
77
|
-
}),
|
|
78
|
-
);
|
|
79
|
-
yield* _(Effect.logDebug("Cursor cleared", { collection }));
|
|
80
|
-
}),
|
|
81
|
-
|
|
82
|
-
loadPeerId: collection =>
|
|
83
|
-
Effect.gen(function* (_) {
|
|
84
|
-
const key = `peerId:${collection}`;
|
|
85
|
-
const stored = yield* _(
|
|
86
|
-
Effect.tryPromise({
|
|
87
|
-
try: () => kv.get<string>(key),
|
|
88
|
-
catch: cause => new IDBError({ operation: "get", key, cause }),
|
|
89
|
-
}),
|
|
90
|
-
);
|
|
91
|
-
|
|
92
|
-
if (stored) {
|
|
93
|
-
yield* _(Effect.logDebug("Loaded peerId from storage", { collection, peerId: stored }));
|
|
94
|
-
return stored;
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
const newPeerId = generatePeerId();
|
|
98
|
-
yield* _(
|
|
99
|
-
Effect.tryPromise({
|
|
100
|
-
try: () => kv.set(key, newPeerId),
|
|
101
|
-
catch: cause => new IDBWriteError({ key, value: newPeerId, cause }),
|
|
102
|
-
}),
|
|
103
|
-
);
|
|
104
|
-
yield* _(Effect.logDebug("Generated new peerId", { collection, peerId: newPeerId }));
|
|
105
|
-
return newPeerId;
|
|
106
|
-
}),
|
|
107
|
-
}),
|
|
108
|
-
);
|
|
109
|
-
}
|