@automerge/automerge-repo 2.0.0-alpha.2 → 2.0.0-alpha.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -6
- package/dist/AutomergeUrl.d.ts +17 -5
- package/dist/AutomergeUrl.d.ts.map +1 -1
- package/dist/AutomergeUrl.js +71 -24
- package/dist/DocHandle.d.ts +89 -20
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +189 -28
- package/dist/FindProgress.d.ts +30 -0
- package/dist/FindProgress.d.ts.map +1 -0
- package/dist/FindProgress.js +1 -0
- package/dist/RemoteHeadsSubscriptions.d.ts +4 -5
- package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -1
- package/dist/RemoteHeadsSubscriptions.js +4 -1
- package/dist/Repo.d.ts +44 -6
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +226 -87
- package/dist/entrypoints/fullfat.d.ts +1 -0
- package/dist/entrypoints/fullfat.d.ts.map +1 -1
- package/dist/entrypoints/fullfat.js +1 -2
- package/dist/helpers/abortable.d.ts +39 -0
- package/dist/helpers/abortable.d.ts.map +1 -0
- package/dist/helpers/abortable.js +45 -0
- package/dist/helpers/bufferFromHex.d.ts +3 -0
- package/dist/helpers/bufferFromHex.d.ts.map +1 -0
- package/dist/helpers/bufferFromHex.js +13 -0
- package/dist/helpers/headsAreSame.d.ts +2 -2
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/mergeArrays.d.ts +1 -1
- package/dist/helpers/mergeArrays.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.js +13 -13
- package/dist/helpers/tests/storage-adapter-tests.d.ts +2 -2
- package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.js +25 -48
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -1
- package/dist/storage/StorageSubsystem.d.ts +11 -1
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +20 -4
- package/dist/synchronizer/CollectionSynchronizer.d.ts +17 -3
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +43 -18
- package/dist/synchronizer/DocSynchronizer.d.ts +10 -2
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +30 -8
- package/dist/synchronizer/Synchronizer.d.ts +11 -0
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/types.d.ts +4 -1
- package/dist/types.d.ts.map +1 -1
- package/fuzz/fuzz.ts +3 -3
- package/package.json +3 -3
- package/src/AutomergeUrl.ts +101 -26
- package/src/DocHandle.ts +256 -38
- package/src/FindProgress.ts +48 -0
- package/src/RemoteHeadsSubscriptions.ts +11 -9
- package/src/Repo.ts +310 -95
- package/src/entrypoints/fullfat.ts +1 -2
- package/src/helpers/abortable.ts +61 -0
- package/src/helpers/bufferFromHex.ts +14 -0
- package/src/helpers/headsAreSame.ts +2 -2
- package/src/helpers/tests/network-adapter-tests.ts +14 -13
- package/src/helpers/tests/storage-adapter-tests.ts +44 -86
- package/src/index.ts +2 -0
- package/src/storage/StorageSubsystem.ts +29 -4
- package/src/synchronizer/CollectionSynchronizer.ts +56 -19
- package/src/synchronizer/DocSynchronizer.ts +34 -9
- package/src/synchronizer/Synchronizer.ts +14 -0
- package/src/types.ts +4 -1
- package/test/AutomergeUrl.test.ts +130 -0
- package/test/CollectionSynchronizer.test.ts +4 -4
- package/test/DocHandle.test.ts +189 -29
- package/test/DocSynchronizer.test.ts +10 -3
- package/test/Repo.test.ts +377 -191
- package/test/StorageSubsystem.test.ts +17 -0
- package/test/remoteHeads.test.ts +27 -12
package/dist/Repo.js
CHANGED
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
import { next as Automerge } from "@automerge/automerge/slim";
|
|
2
2
|
import debug from "debug";
|
|
3
3
|
import { EventEmitter } from "eventemitter3";
|
|
4
|
-
import { generateAutomergeUrl, interpretAsDocumentId, parseAutomergeUrl, } from "./AutomergeUrl.js";
|
|
5
|
-
import { DocHandle } from "./DocHandle.js";
|
|
4
|
+
import { encodeHeads, generateAutomergeUrl, interpretAsDocumentId, isValidAutomergeUrl, parseAutomergeUrl, } from "./AutomergeUrl.js";
|
|
5
|
+
import { DELETED, DocHandle, READY, UNAVAILABLE, UNLOADED, } from "./DocHandle.js";
|
|
6
6
|
import { RemoteHeadsSubscriptions } from "./RemoteHeadsSubscriptions.js";
|
|
7
7
|
import { headsAreSame } from "./helpers/headsAreSame.js";
|
|
8
8
|
import { throttle } from "./helpers/throttle.js";
|
|
9
9
|
import { NetworkSubsystem } from "./network/NetworkSubsystem.js";
|
|
10
10
|
import { StorageSubsystem } from "./storage/StorageSubsystem.js";
|
|
11
11
|
import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js";
|
|
12
|
+
import { abortable } from "./helpers/abortable.js";
|
|
12
13
|
function randomPeerId() {
|
|
13
14
|
return ("peer-" + Math.random().toString(36).slice(4));
|
|
14
15
|
}
|
|
@@ -30,7 +31,8 @@ export class Repo extends EventEmitter {
|
|
|
30
31
|
/** @hidden */
|
|
31
32
|
saveDebounceRate = 100;
|
|
32
33
|
#handleCache = {};
|
|
33
|
-
|
|
34
|
+
/** @hidden */
|
|
35
|
+
synchronizer;
|
|
34
36
|
/** By default, we share generously with all peers. */
|
|
35
37
|
/** @hidden */
|
|
36
38
|
sharePolicy = async () => true;
|
|
@@ -39,31 +41,11 @@ export class Repo extends EventEmitter {
|
|
|
39
41
|
peerMetadataByPeerId = {};
|
|
40
42
|
#remoteHeadsSubscriptions = new RemoteHeadsSubscriptions();
|
|
41
43
|
#remoteHeadsGossipingEnabled = false;
|
|
42
|
-
constructor({ storage, network = [], peerId = randomPeerId(), sharePolicy, isEphemeral = storage === undefined, enableRemoteHeadsGossiping = false, } = {}) {
|
|
44
|
+
constructor({ storage, network = [], peerId = randomPeerId(), sharePolicy, isEphemeral = storage === undefined, enableRemoteHeadsGossiping = false, denylist = [], } = {}) {
|
|
43
45
|
super();
|
|
44
46
|
this.#remoteHeadsGossipingEnabled = enableRemoteHeadsGossiping;
|
|
45
47
|
this.#log = debug(`automerge-repo:repo`);
|
|
46
48
|
this.sharePolicy = sharePolicy ?? this.sharePolicy;
|
|
47
|
-
// DOC COLLECTION
|
|
48
|
-
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
49
|
-
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
50
|
-
this.on("document", async ({ handle }) => {
|
|
51
|
-
if (storageSubsystem) {
|
|
52
|
-
// Save when the document changes, but no more often than saveDebounceRate.
|
|
53
|
-
const saveFn = ({ handle, doc, }) => {
|
|
54
|
-
void storageSubsystem.saveDoc(handle.documentId, doc);
|
|
55
|
-
};
|
|
56
|
-
handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate));
|
|
57
|
-
}
|
|
58
|
-
handle.on("unavailable", () => {
|
|
59
|
-
this.#log("document unavailable", { documentId: handle.documentId });
|
|
60
|
-
this.emit("unavailable-document", {
|
|
61
|
-
documentId: handle.documentId,
|
|
62
|
-
});
|
|
63
|
-
});
|
|
64
|
-
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
65
|
-
this.#synchronizer.addDocument(handle.documentId);
|
|
66
|
-
});
|
|
67
49
|
this.on("delete-document", ({ documentId }) => {
|
|
68
50
|
// TODO Pass the delete on to the network
|
|
69
51
|
// synchronizer.removeDocument(documentId)
|
|
@@ -75,20 +57,25 @@ export class Repo extends EventEmitter {
|
|
|
75
57
|
});
|
|
76
58
|
// SYNCHRONIZER
|
|
77
59
|
// The synchronizer uses the network subsystem to keep documents in sync with peers.
|
|
78
|
-
this
|
|
60
|
+
this.synchronizer = new CollectionSynchronizer(this, denylist);
|
|
79
61
|
// When the synchronizer emits messages, send them to peers
|
|
80
|
-
this
|
|
62
|
+
this.synchronizer.on("message", message => {
|
|
81
63
|
this.#log(`sending ${message.type} message to ${message.targetId}`);
|
|
82
64
|
networkSubsystem.send(message);
|
|
83
65
|
});
|
|
66
|
+
// Forward metrics from doc synchronizers
|
|
67
|
+
this.synchronizer.on("metrics", event => this.emit("doc-metrics", event));
|
|
84
68
|
if (this.#remoteHeadsGossipingEnabled) {
|
|
85
|
-
this
|
|
69
|
+
this.synchronizer.on("open-doc", ({ peerId, documentId }) => {
|
|
86
70
|
this.#remoteHeadsSubscriptions.subscribePeerToDoc(peerId, documentId);
|
|
87
71
|
});
|
|
88
72
|
}
|
|
89
73
|
// STORAGE
|
|
90
74
|
// The storage subsystem has access to some form of persistence, and deals with save and loading documents.
|
|
91
75
|
const storageSubsystem = storage ? new StorageSubsystem(storage) : undefined;
|
|
76
|
+
if (storageSubsystem) {
|
|
77
|
+
storageSubsystem.on("document-loaded", event => this.emit("doc-metrics", { type: "doc-loaded", ...event }));
|
|
78
|
+
}
|
|
92
79
|
this.storageSubsystem = storageSubsystem;
|
|
93
80
|
// NETWORK
|
|
94
81
|
// The network subsystem deals with sending and receiving messages to and from peers.
|
|
@@ -113,18 +100,18 @@ export class Repo extends EventEmitter {
|
|
|
113
100
|
.catch(err => {
|
|
114
101
|
console.log("error in share policy", { err });
|
|
115
102
|
});
|
|
116
|
-
this
|
|
103
|
+
this.synchronizer.addPeer(peerId);
|
|
117
104
|
});
|
|
118
105
|
// When a peer disconnects, remove it from the synchronizer
|
|
119
106
|
networkSubsystem.on("peer-disconnected", ({ peerId }) => {
|
|
120
|
-
this
|
|
107
|
+
this.synchronizer.removePeer(peerId);
|
|
121
108
|
this.#remoteHeadsSubscriptions.removePeer(peerId);
|
|
122
109
|
});
|
|
123
110
|
// Handle incoming messages
|
|
124
111
|
networkSubsystem.on("message", async (msg) => {
|
|
125
112
|
this.#receiveMessage(msg);
|
|
126
113
|
});
|
|
127
|
-
this
|
|
114
|
+
this.synchronizer.on("sync-state", message => {
|
|
128
115
|
this.#saveSyncState(message);
|
|
129
116
|
const handle = this.#handleCache[message.documentId];
|
|
130
117
|
const { storageId } = this.peerMetadataByPeerId[message.peerId] || {};
|
|
@@ -133,11 +120,12 @@ export class Repo extends EventEmitter {
|
|
|
133
120
|
}
|
|
134
121
|
const heads = handle.getRemoteHeads(storageId);
|
|
135
122
|
const haveHeadsChanged = message.syncState.theirHeads &&
|
|
136
|
-
(!heads ||
|
|
123
|
+
(!heads ||
|
|
124
|
+
!headsAreSame(heads, encodeHeads(message.syncState.theirHeads)));
|
|
137
125
|
if (haveHeadsChanged && message.syncState.theirHeads) {
|
|
138
|
-
handle.setRemoteHeads(storageId, message.syncState.theirHeads);
|
|
126
|
+
handle.setRemoteHeads(storageId, encodeHeads(message.syncState.theirHeads));
|
|
139
127
|
if (storageId && this.#remoteHeadsGossipingEnabled) {
|
|
140
|
-
this.#remoteHeadsSubscriptions.handleImmediateRemoteHeadsChanged(message.documentId, storageId, message.syncState.theirHeads);
|
|
128
|
+
this.#remoteHeadsSubscriptions.handleImmediateRemoteHeadsChanged(message.documentId, storageId, encodeHeads(message.syncState.theirHeads));
|
|
141
129
|
}
|
|
142
130
|
}
|
|
143
131
|
});
|
|
@@ -172,6 +160,20 @@ export class Repo extends EventEmitter {
|
|
|
172
160
|
});
|
|
173
161
|
}
|
|
174
162
|
}
|
|
163
|
+
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
164
|
+
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
165
|
+
#registerHandleWithSubsystems(handle) {
|
|
166
|
+
const { storageSubsystem } = this;
|
|
167
|
+
if (storageSubsystem) {
|
|
168
|
+
// Save when the document changes, but no more often than saveDebounceRate.
|
|
169
|
+
const saveFn = ({ handle, doc }) => {
|
|
170
|
+
void storageSubsystem.saveDoc(handle.documentId, doc);
|
|
171
|
+
};
|
|
172
|
+
handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate));
|
|
173
|
+
}
|
|
174
|
+
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
175
|
+
this.synchronizer.addDocument(handle);
|
|
176
|
+
}
|
|
175
177
|
#receiveMessage(message) {
|
|
176
178
|
switch (message.type) {
|
|
177
179
|
case "remote-subscription-change":
|
|
@@ -188,7 +190,7 @@ export class Repo extends EventEmitter {
|
|
|
188
190
|
case "request":
|
|
189
191
|
case "ephemeral":
|
|
190
192
|
case "doc-unavailable":
|
|
191
|
-
this
|
|
193
|
+
this.synchronizer.receiveMessage(message).catch(err => {
|
|
192
194
|
console.log("error receiving message", { err });
|
|
193
195
|
});
|
|
194
196
|
}
|
|
@@ -229,7 +231,7 @@ export class Repo extends EventEmitter {
|
|
|
229
231
|
}
|
|
230
232
|
/** Returns a list of all connected peer ids */
|
|
231
233
|
get peers() {
|
|
232
|
-
return this
|
|
234
|
+
return this.synchronizer.peers;
|
|
233
235
|
}
|
|
234
236
|
getStorageIdOfPeer(peerId) {
|
|
235
237
|
return this.peerMetadataByPeerId[peerId]?.storageId;
|
|
@@ -245,7 +247,7 @@ export class Repo extends EventEmitter {
|
|
|
245
247
|
const handle = this.#getHandle({
|
|
246
248
|
documentId,
|
|
247
249
|
});
|
|
248
|
-
this
|
|
250
|
+
this.#registerHandleWithSubsystems(handle);
|
|
249
251
|
handle.update(() => {
|
|
250
252
|
let nextDoc;
|
|
251
253
|
if (initialValue) {
|
|
@@ -271,18 +273,13 @@ export class Repo extends EventEmitter {
|
|
|
271
273
|
* Any peers this `Repo` is connected to for whom `sharePolicy` returns `true` will
|
|
272
274
|
* be notified of the newly created DocHandle.
|
|
273
275
|
*
|
|
274
|
-
* @throws if the cloned handle is not yet ready or if
|
|
275
|
-
* `clonedHandle.docSync()` returns `undefined` (i.e. the handle is unavailable).
|
|
276
276
|
*/
|
|
277
277
|
clone(clonedHandle) {
|
|
278
278
|
if (!clonedHandle.isReady()) {
|
|
279
279
|
throw new Error(`Cloned handle is not yet in ready state.
|
|
280
|
-
(Try await handle.
|
|
281
|
-
}
|
|
282
|
-
const sourceDoc = clonedHandle.docSync();
|
|
283
|
-
if (!sourceDoc) {
|
|
284
|
-
throw new Error("Cloned handle doesn't have a document.");
|
|
280
|
+
(Try await handle.whenReady() first.)`);
|
|
285
281
|
}
|
|
282
|
+
const sourceDoc = clonedHandle.doc();
|
|
286
283
|
const handle = this.create();
|
|
287
284
|
handle.update(() => {
|
|
288
285
|
// we replace the document with the new cloned one
|
|
@@ -290,57 +287,172 @@ export class Repo extends EventEmitter {
|
|
|
290
287
|
});
|
|
291
288
|
return handle;
|
|
292
289
|
}
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
const documentId = interpretAsDocumentId(id);
|
|
301
|
-
// If we have the handle cached, return it
|
|
290
|
+
findWithProgress(id, options = {}) {
|
|
291
|
+
const { signal } = options;
|
|
292
|
+
const abortPromise = abortable(signal);
|
|
293
|
+
const { documentId, heads } = isValidAutomergeUrl(id)
|
|
294
|
+
? parseAutomergeUrl(id)
|
|
295
|
+
: { documentId: interpretAsDocumentId(id), heads: undefined };
|
|
296
|
+
// Check cache first - return plain FindStep for terminal states
|
|
302
297
|
if (this.#handleCache[documentId]) {
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
}
|
|
298
|
+
const handle = this.#handleCache[documentId];
|
|
299
|
+
if (handle.state === UNAVAILABLE) {
|
|
300
|
+
const result = {
|
|
301
|
+
state: "unavailable",
|
|
302
|
+
error: new Error(`Document ${id} is unavailable`),
|
|
303
|
+
handle,
|
|
304
|
+
};
|
|
305
|
+
return result;
|
|
306
|
+
}
|
|
307
|
+
if (handle.state === DELETED) {
|
|
308
|
+
return {
|
|
309
|
+
state: "failed",
|
|
310
|
+
error: new Error(`Document ${id} was deleted`),
|
|
311
|
+
handle,
|
|
312
|
+
};
|
|
313
|
+
}
|
|
314
|
+
if (handle.state === READY) {
|
|
315
|
+
// If we already have the handle, return it immediately (or a view of the handle if heads are specified)
|
|
316
|
+
return {
|
|
317
|
+
state: "ready",
|
|
318
|
+
// TODO: this handle needs to be cached (or at least avoid running clone)
|
|
319
|
+
handle: heads ? handle.view(heads) : handle,
|
|
320
|
+
};
|
|
310
321
|
}
|
|
311
|
-
return this.#handleCache[documentId];
|
|
312
322
|
}
|
|
313
|
-
//
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
323
|
+
// the generator takes over `this`, so we need an alias to the repo this
|
|
324
|
+
// eslint-disable-next-line @typescript-eslint/no-this-alias
|
|
325
|
+
const that = this;
|
|
326
|
+
async function* progressGenerator() {
|
|
327
|
+
try {
|
|
328
|
+
const handle = that.#getHandle({ documentId });
|
|
329
|
+
yield { state: "loading", progress: 25, handle };
|
|
330
|
+
const loadingPromise = await (that.storageSubsystem
|
|
331
|
+
? that.storageSubsystem.loadDoc(handle.documentId)
|
|
332
|
+
: Promise.resolve(null));
|
|
333
|
+
const loadedDoc = await Promise.race([loadingPromise, abortPromise]);
|
|
320
334
|
if (loadedDoc) {
|
|
321
|
-
// uhhhh, sorry if you're reading this because we were lying to the type system
|
|
322
335
|
handle.update(() => loadedDoc);
|
|
323
336
|
handle.doneLoading();
|
|
337
|
+
yield { state: "loading", progress: 50, handle };
|
|
324
338
|
}
|
|
325
339
|
else {
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
handle.request();
|
|
330
|
-
})
|
|
331
|
-
.catch(err => {
|
|
332
|
-
this.#log("error waiting for network", { err });
|
|
333
|
-
});
|
|
334
|
-
this.emit("document", { handle });
|
|
340
|
+
await Promise.race([that.networkSubsystem.whenReady(), abortPromise]);
|
|
341
|
+
handle.request();
|
|
342
|
+
yield { state: "loading", progress: 75, handle };
|
|
335
343
|
}
|
|
336
|
-
|
|
344
|
+
that.#registerHandleWithSubsystems(handle);
|
|
345
|
+
await Promise.race([
|
|
346
|
+
handle.whenReady([READY, UNAVAILABLE]),
|
|
347
|
+
abortPromise,
|
|
348
|
+
]);
|
|
349
|
+
if (handle.state === UNAVAILABLE) {
|
|
350
|
+
yield { state: "unavailable", handle };
|
|
351
|
+
}
|
|
352
|
+
if (handle.state === DELETED) {
|
|
353
|
+
throw new Error(`Document ${id} was deleted`);
|
|
354
|
+
}
|
|
355
|
+
yield { state: "ready", handle };
|
|
356
|
+
}
|
|
357
|
+
catch (error) {
|
|
358
|
+
yield {
|
|
359
|
+
state: "failed",
|
|
360
|
+
error: error instanceof Error ? error : new Error(String(error)),
|
|
361
|
+
handle,
|
|
362
|
+
};
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
const iterator = progressGenerator();
|
|
366
|
+
const next = async () => {
|
|
367
|
+
const result = await iterator.next();
|
|
368
|
+
return { ...result.value, next };
|
|
369
|
+
};
|
|
370
|
+
const untilReady = async (allowableStates) => {
|
|
371
|
+
for await (const state of iterator) {
|
|
372
|
+
if (allowableStates.includes(state.handle.state)) {
|
|
373
|
+
return state.handle;
|
|
374
|
+
}
|
|
375
|
+
if (state.state === "unavailable") {
|
|
376
|
+
throw new Error(`Document ${id} is unavailable`);
|
|
377
|
+
}
|
|
378
|
+
if (state.state === "ready")
|
|
379
|
+
return state.handle;
|
|
380
|
+
if (state.state === "failed")
|
|
381
|
+
throw state.error;
|
|
382
|
+
}
|
|
383
|
+
throw new Error("Iterator completed without reaching ready state");
|
|
384
|
+
};
|
|
385
|
+
const handle = this.#getHandle({ documentId });
|
|
386
|
+
const initial = { state: "loading", progress: 0, handle };
|
|
387
|
+
return { ...initial, next, untilReady };
|
|
388
|
+
}
|
|
389
|
+
async find(id, options = {}) {
|
|
390
|
+
const { allowableStates = ["ready"], signal } = options;
|
|
391
|
+
const progress = this.findWithProgress(id, { signal });
|
|
392
|
+
/*if (allowableStates.includes(progress.state)) {
|
|
393
|
+
console.log("returning early")
|
|
394
|
+
return progress.handle
|
|
395
|
+
}*/
|
|
396
|
+
if ("untilReady" in progress) {
|
|
397
|
+
this.#registerHandleWithSubsystems(progress.handle);
|
|
398
|
+
return progress.untilReady(allowableStates);
|
|
337
399
|
}
|
|
338
400
|
else {
|
|
401
|
+
return progress.handle;
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
/**
|
|
405
|
+
* Loads a document without waiting for ready state
|
|
406
|
+
*/
|
|
407
|
+
async #loadDocument(documentId) {
|
|
408
|
+
// If we have the handle cached, return it
|
|
409
|
+
if (this.#handleCache[documentId]) {
|
|
410
|
+
return this.#handleCache[documentId];
|
|
411
|
+
}
|
|
412
|
+
// If we don't already have the handle, make an empty one and try loading it
|
|
413
|
+
const handle = this.#getHandle({ documentId });
|
|
414
|
+
const loadedDoc = await (this.storageSubsystem
|
|
415
|
+
? this.storageSubsystem.loadDoc(handle.documentId)
|
|
416
|
+
: Promise.resolve(null));
|
|
417
|
+
if (loadedDoc) {
|
|
418
|
+
// We need to cast this to <T> because loadDoc operates in <unknowns>.
|
|
419
|
+
// This is really where we ought to be validating the input matches <T>.
|
|
420
|
+
handle.update(() => loadedDoc);
|
|
421
|
+
handle.doneLoading();
|
|
422
|
+
}
|
|
423
|
+
else {
|
|
424
|
+
// Because the network subsystem might still be booting up, we wait
|
|
425
|
+
// here so that we don't immediately give up loading because we're still
|
|
426
|
+
// making our initial connection to a sync server.
|
|
427
|
+
await this.networkSubsystem.whenReady();
|
|
339
428
|
handle.request();
|
|
340
|
-
this.emit("document", { handle });
|
|
341
429
|
}
|
|
430
|
+
this.#registerHandleWithSubsystems(handle);
|
|
342
431
|
return handle;
|
|
343
432
|
}
|
|
433
|
+
/**
|
|
434
|
+
* Retrieves a document by id. It gets data from the local system, but also emits a `document`
|
|
435
|
+
* event to advertise interest in the document.
|
|
436
|
+
*/
|
|
437
|
+
async findClassic(
|
|
438
|
+
/** The url or documentId of the handle to retrieve */
|
|
439
|
+
id, options = {}) {
|
|
440
|
+
const documentId = interpretAsDocumentId(id);
|
|
441
|
+
const { allowableStates, signal } = options;
|
|
442
|
+
return Promise.race([
|
|
443
|
+
(async () => {
|
|
444
|
+
const handle = await this.#loadDocument(documentId);
|
|
445
|
+
if (!allowableStates) {
|
|
446
|
+
await handle.whenReady([READY, UNAVAILABLE]);
|
|
447
|
+
if (handle.state === UNAVAILABLE && !signal?.aborted) {
|
|
448
|
+
throw new Error(`Document ${id} is unavailable`);
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
return handle;
|
|
452
|
+
})(),
|
|
453
|
+
abortable(signal),
|
|
454
|
+
]);
|
|
455
|
+
}
|
|
344
456
|
delete(
|
|
345
457
|
/** The url or documentId of the handle to delete */
|
|
346
458
|
id) {
|
|
@@ -360,9 +472,7 @@ export class Repo extends EventEmitter {
|
|
|
360
472
|
async export(id) {
|
|
361
473
|
const documentId = interpretAsDocumentId(id);
|
|
362
474
|
const handle = this.#getHandle({ documentId });
|
|
363
|
-
const doc =
|
|
364
|
-
if (!doc)
|
|
365
|
-
return undefined;
|
|
475
|
+
const doc = handle.doc();
|
|
366
476
|
return Automerge.save(doc);
|
|
367
477
|
}
|
|
368
478
|
/**
|
|
@@ -408,17 +518,46 @@ export class Repo extends EventEmitter {
|
|
|
408
518
|
? documents.map(id => this.#handleCache[id])
|
|
409
519
|
: Object.values(this.#handleCache);
|
|
410
520
|
await Promise.all(handles.map(async (handle) => {
|
|
411
|
-
|
|
412
|
-
if (!doc) {
|
|
413
|
-
return;
|
|
414
|
-
}
|
|
415
|
-
return this.storageSubsystem.saveDoc(handle.documentId, doc);
|
|
521
|
+
return this.storageSubsystem.saveDoc(handle.documentId, handle.doc());
|
|
416
522
|
}));
|
|
417
523
|
}
|
|
524
|
+
/**
|
|
525
|
+
* Removes a DocHandle from the handleCache.
|
|
526
|
+
* @hidden this API is experimental and may change.
|
|
527
|
+
* @param documentId - documentId of the DocHandle to remove from handleCache, if present in cache.
|
|
528
|
+
* @returns Promise<void>
|
|
529
|
+
*/
|
|
530
|
+
async removeFromCache(documentId) {
|
|
531
|
+
if (!this.#handleCache[documentId]) {
|
|
532
|
+
this.#log(`WARN: removeFromCache called but handle not found in handleCache for documentId: ${documentId}`);
|
|
533
|
+
return;
|
|
534
|
+
}
|
|
535
|
+
const handle = this.#getHandle({ documentId });
|
|
536
|
+
await handle.whenReady([READY, UNLOADED, DELETED, UNAVAILABLE]);
|
|
537
|
+
const doc = handle.doc();
|
|
538
|
+
// because this is an internal-ish function, we'll be extra careful about undefined docs here
|
|
539
|
+
if (doc) {
|
|
540
|
+
if (handle.isReady()) {
|
|
541
|
+
handle.unload();
|
|
542
|
+
}
|
|
543
|
+
else {
|
|
544
|
+
this.#log(`WARN: removeFromCache called but handle for documentId: ${documentId} in unexpected state: ${handle.state}`);
|
|
545
|
+
}
|
|
546
|
+
delete this.#handleCache[documentId];
|
|
547
|
+
// TODO: remove document from synchronizer when removeDocument is implemented
|
|
548
|
+
// this.synchronizer.removeDocument(documentId)
|
|
549
|
+
}
|
|
550
|
+
else {
|
|
551
|
+
this.#log(`WARN: removeFromCache called but doc undefined for documentId: ${documentId}`);
|
|
552
|
+
}
|
|
553
|
+
}
|
|
418
554
|
shutdown() {
|
|
419
555
|
this.networkSubsystem.adapters.forEach(adapter => {
|
|
420
556
|
adapter.disconnect();
|
|
421
557
|
});
|
|
422
558
|
return this.flush();
|
|
423
559
|
}
|
|
560
|
+
metrics() {
|
|
561
|
+
return { documents: this.synchronizer.metrics() };
|
|
562
|
+
}
|
|
424
563
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"fullfat.d.ts","sourceRoot":"","sources":["../../src/entrypoints/fullfat.ts"],"names":[],"mappings":"AAAA,cAAc,aAAa,CAAA"}
|
|
1
|
+
{"version":3,"file":"fullfat.d.ts","sourceRoot":"","sources":["../../src/entrypoints/fullfat.ts"],"names":[],"mappings":"AAAA,cAAc,aAAa,CAAA;AAS3B,OAAO,sBAAsB,CAAA"}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Creates a promise that rejects when the signal is aborted.
|
|
3
|
+
*
|
|
4
|
+
* @remarks
|
|
5
|
+
* This utility creates a promise that rejects when the provided AbortSignal is aborted.
|
|
6
|
+
* It's designed to be used with Promise.race() to make operations abortable.
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* ```typescript
|
|
10
|
+
* const controller = new AbortController();
|
|
11
|
+
*
|
|
12
|
+
* try {
|
|
13
|
+
* const result = await Promise.race([
|
|
14
|
+
* fetch('https://api.example.com/data'),
|
|
15
|
+
* abortable(controller.signal)
|
|
16
|
+
* ]);
|
|
17
|
+
* } catch (err) {
|
|
18
|
+
* if (err.name === 'AbortError') {
|
|
19
|
+
* console.log('The operation was aborted');
|
|
20
|
+
* }
|
|
21
|
+
* }
|
|
22
|
+
*
|
|
23
|
+
* // Later, to abort:
|
|
24
|
+
* controller.abort();
|
|
25
|
+
* ```
|
|
26
|
+
*
|
|
27
|
+
* @param signal - An AbortSignal that can be used to abort the operation
|
|
28
|
+
* @param cleanup - Optional cleanup function that will be called if aborted
|
|
29
|
+
* @returns A promise that rejects with AbortError when the signal is aborted
|
|
30
|
+
* @throws {DOMException} With name "AbortError" when aborted
|
|
31
|
+
*/
|
|
32
|
+
export declare function abortable(signal?: AbortSignal, cleanup?: () => void): Promise<never>;
|
|
33
|
+
/**
|
|
34
|
+
* Include this type in an options object to pass an AbortSignal to a function.
|
|
35
|
+
*/
|
|
36
|
+
export interface AbortOptions {
|
|
37
|
+
signal?: AbortSignal;
|
|
38
|
+
}
|
|
39
|
+
//# sourceMappingURL=abortable.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"abortable.d.ts","sourceRoot":"","sources":["../../src/helpers/abortable.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,wBAAgB,SAAS,CACvB,MAAM,CAAC,EAAE,WAAW,EACpB,OAAO,CAAC,EAAE,MAAM,IAAI,GACnB,OAAO,CAAC,KAAK,CAAC,CAmBhB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,MAAM,CAAC,EAAE,WAAW,CAAA;CACrB"}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Creates a promise that rejects when the signal is aborted.
|
|
3
|
+
*
|
|
4
|
+
* @remarks
|
|
5
|
+
* This utility creates a promise that rejects when the provided AbortSignal is aborted.
|
|
6
|
+
* It's designed to be used with Promise.race() to make operations abortable.
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* ```typescript
|
|
10
|
+
* const controller = new AbortController();
|
|
11
|
+
*
|
|
12
|
+
* try {
|
|
13
|
+
* const result = await Promise.race([
|
|
14
|
+
* fetch('https://api.example.com/data'),
|
|
15
|
+
* abortable(controller.signal)
|
|
16
|
+
* ]);
|
|
17
|
+
* } catch (err) {
|
|
18
|
+
* if (err.name === 'AbortError') {
|
|
19
|
+
* console.log('The operation was aborted');
|
|
20
|
+
* }
|
|
21
|
+
* }
|
|
22
|
+
*
|
|
23
|
+
* // Later, to abort:
|
|
24
|
+
* controller.abort();
|
|
25
|
+
* ```
|
|
26
|
+
*
|
|
27
|
+
* @param signal - An AbortSignal that can be used to abort the operation
|
|
28
|
+
* @param cleanup - Optional cleanup function that will be called if aborted
|
|
29
|
+
* @returns A promise that rejects with AbortError when the signal is aborted
|
|
30
|
+
* @throws {DOMException} With name "AbortError" when aborted
|
|
31
|
+
*/
|
|
32
|
+
export function abortable(signal, cleanup) {
|
|
33
|
+
if (signal?.aborted) {
|
|
34
|
+
throw new DOMException("Operation aborted", "AbortError");
|
|
35
|
+
}
|
|
36
|
+
if (!signal) {
|
|
37
|
+
return new Promise(() => { }); // Never resolves
|
|
38
|
+
}
|
|
39
|
+
return new Promise((_, reject) => {
|
|
40
|
+
signal.addEventListener("abort", () => {
|
|
41
|
+
cleanup?.();
|
|
42
|
+
reject(new DOMException("Operation aborted", "AbortError"));
|
|
43
|
+
}, { once: true });
|
|
44
|
+
});
|
|
45
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"bufferFromHex.d.ts","sourceRoot":"","sources":["../../src/helpers/bufferFromHex.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,uBAAuB,cAAe,MAAM,KAAG,UAS3D,CAAA;AAED,eAAO,MAAM,qBAAqB,SAAU,UAAU,KAAG,MAExD,CAAA"}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export const uint8ArrayFromHexString = (hexString) => {
|
|
2
|
+
if (hexString.length % 2 !== 0) {
|
|
3
|
+
throw new Error("Hex string must have an even length");
|
|
4
|
+
}
|
|
5
|
+
const bytes = new Uint8Array(hexString.length / 2);
|
|
6
|
+
for (let i = 0; i < hexString.length; i += 2) {
|
|
7
|
+
bytes[i >> 1] = parseInt(hexString.slice(i, i + 2), 16);
|
|
8
|
+
}
|
|
9
|
+
return bytes;
|
|
10
|
+
};
|
|
11
|
+
export const uint8ArrayToHexString = (data) => {
|
|
12
|
+
return Array.from(data, byte => byte.toString(16).padStart(2, "0")).join("");
|
|
13
|
+
};
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
import {
|
|
2
|
-
export declare const headsAreSame: (a:
|
|
1
|
+
import type { UrlHeads } from "../types.js";
|
|
2
|
+
export declare const headsAreSame: (a: UrlHeads, b: UrlHeads) => boolean;
|
|
3
3
|
//# sourceMappingURL=headsAreSame.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"headsAreSame.d.ts","sourceRoot":"","sources":["../../src/helpers/headsAreSame.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"headsAreSame.d.ts","sourceRoot":"","sources":["../../src/helpers/headsAreSame.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAA;AAE3C,eAAO,MAAM,YAAY,MAAO,QAAQ,KAAK,QAAQ,YAEpD,CAAA"}
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export declare function mergeArrays(myArrays: Uint8Array[]): Uint8Array
|
|
1
|
+
export declare function mergeArrays(myArrays: Uint8Array[]): Uint8Array<ArrayBuffer>;
|
|
2
2
|
//# sourceMappingURL=mergeArrays.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"mergeArrays.d.ts","sourceRoot":"","sources":["../../src/helpers/mergeArrays.ts"],"names":[],"mappings":"AAAA,wBAAgB,WAAW,CAAC,QAAQ,EAAE,UAAU,EAAE,
|
|
1
|
+
{"version":3,"file":"mergeArrays.d.ts","sourceRoot":"","sources":["../../src/helpers/mergeArrays.ts"],"names":[],"mappings":"AAAA,wBAAgB,WAAW,CAAC,QAAQ,EAAE,UAAU,EAAE,2BAgBjD"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"network-adapter-tests.d.ts","sourceRoot":"","sources":["../../../src/helpers/tests/network-adapter-tests.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAAE,uBAAuB,EAAE,MAAM,0CAA0C,CAAA;AAIvF;;;;;;;;;;;GAWG;AACH,wBAAgB,sBAAsB,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,IAAI,
|
|
1
|
+
{"version":3,"file":"network-adapter-tests.d.ts","sourceRoot":"","sources":["../../../src/helpers/tests/network-adapter-tests.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAAE,uBAAuB,EAAE,MAAM,0CAA0C,CAAA;AAIvF;;;;;;;;;;;GAWG;AACH,wBAAgB,sBAAsB,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,IAAI,CA2Q5E;AAID,KAAK,OAAO,GAAG,uBAAuB,GAAG,uBAAuB,EAAE,CAAA;AAElE,MAAM,MAAM,OAAO,GAAG,MAAM,OAAO,CAAC;IAClC,QAAQ,EAAE,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;IACrC,QAAQ,CAAC,EAAE,MAAM,IAAI,CAAA;CACtB,CAAC,CAAA"}
|