@automerge/automerge-repo 2.0.0-collectionsync-alpha.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -8
- package/dist/AutomergeUrl.d.ts +17 -5
- package/dist/AutomergeUrl.d.ts.map +1 -1
- package/dist/AutomergeUrl.js +71 -24
- package/dist/DocHandle.d.ts +33 -41
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +105 -66
- package/dist/FindProgress.d.ts +30 -0
- package/dist/FindProgress.d.ts.map +1 -0
- package/dist/FindProgress.js +1 -0
- package/dist/RemoteHeadsSubscriptions.d.ts +4 -5
- package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -1
- package/dist/RemoteHeadsSubscriptions.js +4 -1
- package/dist/Repo.d.ts +24 -5
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +355 -169
- package/dist/helpers/abortable.d.ts +36 -0
- package/dist/helpers/abortable.d.ts.map +1 -0
- package/dist/helpers/abortable.js +47 -0
- package/dist/helpers/arraysAreEqual.d.ts.map +1 -1
- package/dist/helpers/bufferFromHex.d.ts +3 -0
- package/dist/helpers/bufferFromHex.d.ts.map +1 -0
- package/dist/helpers/bufferFromHex.js +13 -0
- package/dist/helpers/debounce.d.ts.map +1 -1
- package/dist/helpers/eventPromise.d.ts.map +1 -1
- package/dist/helpers/headsAreSame.d.ts +2 -2
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/mergeArrays.d.ts +1 -1
- package/dist/helpers/mergeArrays.d.ts.map +1 -1
- package/dist/helpers/pause.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.js +13 -13
- package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.js +6 -9
- package/dist/helpers/throttle.d.ts.map +1 -1
- package/dist/helpers/withTimeout.d.ts.map +1 -1
- package/dist/index.d.ts +35 -7
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +37 -6
- package/dist/network/NetworkSubsystem.d.ts +0 -1
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +0 -3
- package/dist/network/messages.d.ts +1 -7
- package/dist/network/messages.d.ts.map +1 -1
- package/dist/network/messages.js +1 -2
- package/dist/storage/StorageAdapter.d.ts +0 -9
- package/dist/storage/StorageAdapter.d.ts.map +1 -1
- package/dist/storage/StorageAdapter.js +0 -33
- package/dist/storage/StorageSubsystem.d.ts +6 -2
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +131 -37
- package/dist/storage/keyHash.d.ts +1 -1
- package/dist/storage/keyHash.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.d.ts +3 -4
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +32 -26
- package/dist/synchronizer/DocSynchronizer.d.ts +8 -8
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +205 -79
- package/dist/types.d.ts +4 -1
- package/dist/types.d.ts.map +1 -1
- package/fuzz/fuzz.ts +3 -3
- package/package.json +4 -5
- package/src/AutomergeUrl.ts +101 -26
- package/src/DocHandle.ts +158 -77
- package/src/FindProgress.ts +48 -0
- package/src/RemoteHeadsSubscriptions.ts +11 -9
- package/src/Repo.ts +465 -180
- package/src/helpers/abortable.ts +62 -0
- package/src/helpers/bufferFromHex.ts +14 -0
- package/src/helpers/headsAreSame.ts +2 -2
- package/src/helpers/tests/network-adapter-tests.ts +14 -13
- package/src/helpers/tests/storage-adapter-tests.ts +13 -24
- package/src/index.ts +57 -38
- package/src/network/NetworkSubsystem.ts +0 -4
- package/src/network/messages.ts +2 -11
- package/src/storage/StorageAdapter.ts +0 -42
- package/src/storage/StorageSubsystem.ts +155 -45
- package/src/storage/keyHash.ts +1 -1
- package/src/synchronizer/CollectionSynchronizer.ts +42 -29
- package/src/synchronizer/DocSynchronizer.ts +263 -89
- package/src/types.ts +4 -1
- package/test/AutomergeUrl.test.ts +130 -0
- package/test/CollectionSynchronizer.test.ts +6 -8
- package/test/DocHandle.test.ts +161 -77
- package/test/DocSynchronizer.test.ts +11 -9
- package/test/RemoteHeadsSubscriptions.test.ts +1 -1
- package/test/Repo.test.ts +406 -341
- package/test/StorageSubsystem.test.ts +95 -20
- package/test/remoteHeads.test.ts +28 -13
- package/dist/CollectionHandle.d.ts +0 -14
- package/dist/CollectionHandle.d.ts.map +0 -1
- package/dist/CollectionHandle.js +0 -37
- package/dist/DocUrl.d.ts +0 -47
- package/dist/DocUrl.d.ts.map +0 -1
- package/dist/DocUrl.js +0 -72
- package/dist/EphemeralData.d.ts +0 -20
- package/dist/EphemeralData.d.ts.map +0 -1
- package/dist/EphemeralData.js +0 -1
- package/dist/ferigan.d.ts +0 -51
- package/dist/ferigan.d.ts.map +0 -1
- package/dist/ferigan.js +0 -98
- package/dist/src/DocHandle.d.ts +0 -182
- package/dist/src/DocHandle.d.ts.map +0 -1
- package/dist/src/DocHandle.js +0 -405
- package/dist/src/DocUrl.d.ts +0 -49
- package/dist/src/DocUrl.d.ts.map +0 -1
- package/dist/src/DocUrl.js +0 -72
- package/dist/src/EphemeralData.d.ts +0 -19
- package/dist/src/EphemeralData.d.ts.map +0 -1
- package/dist/src/EphemeralData.js +0 -1
- package/dist/src/Repo.d.ts +0 -74
- package/dist/src/Repo.d.ts.map +0 -1
- package/dist/src/Repo.js +0 -208
- package/dist/src/helpers/arraysAreEqual.d.ts +0 -2
- package/dist/src/helpers/arraysAreEqual.d.ts.map +0 -1
- package/dist/src/helpers/arraysAreEqual.js +0 -2
- package/dist/src/helpers/cbor.d.ts +0 -4
- package/dist/src/helpers/cbor.d.ts.map +0 -1
- package/dist/src/helpers/cbor.js +0 -8
- package/dist/src/helpers/eventPromise.d.ts +0 -11
- package/dist/src/helpers/eventPromise.d.ts.map +0 -1
- package/dist/src/helpers/eventPromise.js +0 -7
- package/dist/src/helpers/headsAreSame.d.ts +0 -2
- package/dist/src/helpers/headsAreSame.d.ts.map +0 -1
- package/dist/src/helpers/headsAreSame.js +0 -4
- package/dist/src/helpers/mergeArrays.d.ts +0 -2
- package/dist/src/helpers/mergeArrays.d.ts.map +0 -1
- package/dist/src/helpers/mergeArrays.js +0 -15
- package/dist/src/helpers/pause.d.ts +0 -6
- package/dist/src/helpers/pause.d.ts.map +0 -1
- package/dist/src/helpers/pause.js +0 -10
- package/dist/src/helpers/tests/network-adapter-tests.d.ts +0 -21
- package/dist/src/helpers/tests/network-adapter-tests.d.ts.map +0 -1
- package/dist/src/helpers/tests/network-adapter-tests.js +0 -122
- package/dist/src/helpers/withTimeout.d.ts +0 -12
- package/dist/src/helpers/withTimeout.d.ts.map +0 -1
- package/dist/src/helpers/withTimeout.js +0 -24
- package/dist/src/index.d.ts +0 -53
- package/dist/src/index.d.ts.map +0 -1
- package/dist/src/index.js +0 -40
- package/dist/src/network/NetworkAdapter.d.ts +0 -26
- package/dist/src/network/NetworkAdapter.d.ts.map +0 -1
- package/dist/src/network/NetworkAdapter.js +0 -4
- package/dist/src/network/NetworkSubsystem.d.ts +0 -23
- package/dist/src/network/NetworkSubsystem.d.ts.map +0 -1
- package/dist/src/network/NetworkSubsystem.js +0 -120
- package/dist/src/network/messages.d.ts +0 -85
- package/dist/src/network/messages.d.ts.map +0 -1
- package/dist/src/network/messages.js +0 -23
- package/dist/src/storage/StorageAdapter.d.ts +0 -14
- package/dist/src/storage/StorageAdapter.d.ts.map +0 -1
- package/dist/src/storage/StorageAdapter.js +0 -1
- package/dist/src/storage/StorageSubsystem.d.ts +0 -12
- package/dist/src/storage/StorageSubsystem.d.ts.map +0 -1
- package/dist/src/storage/StorageSubsystem.js +0 -145
- package/dist/src/synchronizer/CollectionSynchronizer.d.ts +0 -25
- package/dist/src/synchronizer/CollectionSynchronizer.d.ts.map +0 -1
- package/dist/src/synchronizer/CollectionSynchronizer.js +0 -106
- package/dist/src/synchronizer/DocSynchronizer.d.ts +0 -29
- package/dist/src/synchronizer/DocSynchronizer.d.ts.map +0 -1
- package/dist/src/synchronizer/DocSynchronizer.js +0 -263
- package/dist/src/synchronizer/Synchronizer.d.ts +0 -9
- package/dist/src/synchronizer/Synchronizer.d.ts.map +0 -1
- package/dist/src/synchronizer/Synchronizer.js +0 -2
- package/dist/src/types.d.ts +0 -16
- package/dist/src/types.d.ts.map +0 -1
- package/dist/src/types.js +0 -1
- package/dist/test/CollectionSynchronizer.test.d.ts +0 -2
- package/dist/test/CollectionSynchronizer.test.d.ts.map +0 -1
- package/dist/test/CollectionSynchronizer.test.js +0 -57
- package/dist/test/DocHandle.test.d.ts +0 -2
- package/dist/test/DocHandle.test.d.ts.map +0 -1
- package/dist/test/DocHandle.test.js +0 -238
- package/dist/test/DocSynchronizer.test.d.ts +0 -2
- package/dist/test/DocSynchronizer.test.d.ts.map +0 -1
- package/dist/test/DocSynchronizer.test.js +0 -111
- package/dist/test/Network.test.d.ts +0 -2
- package/dist/test/Network.test.d.ts.map +0 -1
- package/dist/test/Network.test.js +0 -11
- package/dist/test/Repo.test.d.ts +0 -2
- package/dist/test/Repo.test.d.ts.map +0 -1
- package/dist/test/Repo.test.js +0 -568
- package/dist/test/StorageSubsystem.test.d.ts +0 -2
- package/dist/test/StorageSubsystem.test.d.ts.map +0 -1
- package/dist/test/StorageSubsystem.test.js +0 -56
- package/dist/test/helpers/DummyNetworkAdapter.d.ts +0 -9
- package/dist/test/helpers/DummyNetworkAdapter.d.ts.map +0 -1
- package/dist/test/helpers/DummyNetworkAdapter.js +0 -15
- package/dist/test/helpers/DummyStorageAdapter.d.ts +0 -16
- package/dist/test/helpers/DummyStorageAdapter.d.ts.map +0 -1
- package/dist/test/helpers/DummyStorageAdapter.js +0 -33
- package/dist/test/helpers/generate-large-object.d.ts +0 -5
- package/dist/test/helpers/generate-large-object.d.ts.map +0 -1
- package/dist/test/helpers/generate-large-object.js +0 -9
- package/dist/test/helpers/getRandomItem.d.ts +0 -2
- package/dist/test/helpers/getRandomItem.d.ts.map +0 -1
- package/dist/test/helpers/getRandomItem.js +0 -4
- package/dist/test/types.d.ts +0 -4
- package/dist/test/types.d.ts.map +0 -1
- package/dist/test/types.js +0 -1
- package/src/CollectionHandle.ts +0 -54
- package/src/ferigan.ts +0 -184
|
@@ -1,145 +0,0 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge/next"
|
|
2
|
-
import * as sha256 from "fast-sha256"
|
|
3
|
-
import { mergeArrays } from "../helpers/mergeArrays.js"
|
|
4
|
-
import debug from "debug"
|
|
5
|
-
import { headsAreSame } from "../helpers/headsAreSame.js"
|
|
6
|
-
function keyHash(binary) {
|
|
7
|
-
const hash = sha256.hash(binary)
|
|
8
|
-
const hashArray = Array.from(new Uint8Array(hash)) // convert buffer to byte array
|
|
9
|
-
const hashHex = hashArray.map(b => ("00" + b.toString(16)).slice(-2)).join("") // convert bytes to hex string
|
|
10
|
-
return hashHex
|
|
11
|
-
}
|
|
12
|
-
function headsHash(heads) {
|
|
13
|
-
let encoder = new TextEncoder()
|
|
14
|
-
let headsbinary = mergeArrays(heads.map(h => encoder.encode(h)))
|
|
15
|
-
return keyHash(headsbinary)
|
|
16
|
-
}
|
|
17
|
-
export class StorageSubsystem {
|
|
18
|
-
#storageAdapter
|
|
19
|
-
#chunkInfos = new Map()
|
|
20
|
-
#storedHeads = new Map()
|
|
21
|
-
#log = debug(`automerge-repo:storage-subsystem`)
|
|
22
|
-
#snapshotting = false
|
|
23
|
-
constructor(storageAdapter) {
|
|
24
|
-
this.#storageAdapter = storageAdapter
|
|
25
|
-
}
|
|
26
|
-
async #saveIncremental(documentId, doc) {
|
|
27
|
-
const binary = A.saveSince(doc, this.#storedHeads.get(documentId) ?? [])
|
|
28
|
-
if (binary && binary.length > 0) {
|
|
29
|
-
const key = [documentId, "incremental", keyHash(binary)]
|
|
30
|
-
this.#log(`Saving incremental ${key} for document ${documentId}`)
|
|
31
|
-
await this.#storageAdapter.save(key, binary)
|
|
32
|
-
if (!this.#chunkInfos.has(documentId)) {
|
|
33
|
-
this.#chunkInfos.set(documentId, [])
|
|
34
|
-
}
|
|
35
|
-
this.#chunkInfos.get(documentId).push({
|
|
36
|
-
key,
|
|
37
|
-
type: "incremental",
|
|
38
|
-
size: binary.length,
|
|
39
|
-
})
|
|
40
|
-
this.#storedHeads.set(documentId, A.getHeads(doc))
|
|
41
|
-
} else {
|
|
42
|
-
return Promise.resolve()
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
async #saveTotal(documentId, doc, sourceChunks) {
|
|
46
|
-
this.#snapshotting = true
|
|
47
|
-
const binary = A.save(doc)
|
|
48
|
-
const snapshotHash = headsHash(A.getHeads(doc))
|
|
49
|
-
const key = [documentId, "snapshot", snapshotHash]
|
|
50
|
-
const oldKeys = new Set(
|
|
51
|
-
sourceChunks.map(c => c.key).filter(k => k[2] !== snapshotHash)
|
|
52
|
-
)
|
|
53
|
-
this.#log(`Saving snapshot ${key} for document ${documentId}`)
|
|
54
|
-
this.#log(`deleting old chunks ${Array.from(oldKeys)}`)
|
|
55
|
-
await this.#storageAdapter.save(key, binary)
|
|
56
|
-
for (const key of oldKeys) {
|
|
57
|
-
await this.#storageAdapter.remove(key)
|
|
58
|
-
}
|
|
59
|
-
const newChunkInfos =
|
|
60
|
-
this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? []
|
|
61
|
-
newChunkInfos.push({ key, type: "snapshot", size: binary.length })
|
|
62
|
-
this.#chunkInfos.set(documentId, newChunkInfos)
|
|
63
|
-
this.#snapshotting = false
|
|
64
|
-
}
|
|
65
|
-
async loadDoc(documentId) {
|
|
66
|
-
const loaded = await this.#storageAdapter.loadRange([documentId])
|
|
67
|
-
const binaries = []
|
|
68
|
-
const chunkInfos = []
|
|
69
|
-
for (const chunk of loaded) {
|
|
70
|
-
const chunkType = chunkTypeFromKey(chunk.key)
|
|
71
|
-
if (chunkType == null) {
|
|
72
|
-
continue
|
|
73
|
-
}
|
|
74
|
-
chunkInfos.push({
|
|
75
|
-
key: chunk.key,
|
|
76
|
-
type: chunkType,
|
|
77
|
-
size: chunk.data.length,
|
|
78
|
-
})
|
|
79
|
-
binaries.push(chunk.data)
|
|
80
|
-
}
|
|
81
|
-
this.#chunkInfos.set(documentId, chunkInfos)
|
|
82
|
-
const binary = mergeArrays(binaries)
|
|
83
|
-
if (binary.length === 0) {
|
|
84
|
-
return null
|
|
85
|
-
}
|
|
86
|
-
const newDoc = A.loadIncremental(A.init(), binary)
|
|
87
|
-
this.#storedHeads.set(documentId, A.getHeads(newDoc))
|
|
88
|
-
return newDoc
|
|
89
|
-
}
|
|
90
|
-
async saveDoc(documentId, doc) {
|
|
91
|
-
if (!this.#shouldSave(documentId, doc)) {
|
|
92
|
-
return
|
|
93
|
-
}
|
|
94
|
-
let sourceChunks = this.#chunkInfos.get(documentId) ?? []
|
|
95
|
-
if (this.#shouldCompact(sourceChunks)) {
|
|
96
|
-
this.#saveTotal(documentId, doc, sourceChunks)
|
|
97
|
-
} else {
|
|
98
|
-
this.#saveIncremental(documentId, doc)
|
|
99
|
-
}
|
|
100
|
-
this.#storedHeads.set(documentId, A.getHeads(doc))
|
|
101
|
-
}
|
|
102
|
-
async remove(documentId) {
|
|
103
|
-
this.#storageAdapter.removeRange([documentId, "snapshot"])
|
|
104
|
-
this.#storageAdapter.removeRange([documentId, "incremental"])
|
|
105
|
-
}
|
|
106
|
-
#shouldSave(documentId, doc) {
|
|
107
|
-
const oldHeads = this.#storedHeads.get(documentId)
|
|
108
|
-
if (!oldHeads) {
|
|
109
|
-
return true
|
|
110
|
-
}
|
|
111
|
-
const newHeads = A.getHeads(doc)
|
|
112
|
-
if (headsAreSame(newHeads, oldHeads)) {
|
|
113
|
-
return false
|
|
114
|
-
}
|
|
115
|
-
return true
|
|
116
|
-
}
|
|
117
|
-
#shouldCompact(sourceChunks) {
|
|
118
|
-
if (this.#snapshotting) {
|
|
119
|
-
return false
|
|
120
|
-
}
|
|
121
|
-
// compact if the incremental size is greater than the snapshot size
|
|
122
|
-
let snapshotSize = 0
|
|
123
|
-
let incrementalSize = 0
|
|
124
|
-
for (const chunk of sourceChunks) {
|
|
125
|
-
if (chunk.type === "snapshot") {
|
|
126
|
-
snapshotSize += chunk.size
|
|
127
|
-
} else {
|
|
128
|
-
incrementalSize += chunk.size
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
return incrementalSize >= snapshotSize
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
function chunkTypeFromKey(key) {
|
|
135
|
-
if (key.length < 2) {
|
|
136
|
-
return null
|
|
137
|
-
}
|
|
138
|
-
const chunkTypeStr = key[key.length - 2]
|
|
139
|
-
if (chunkTypeStr === "snapshot" || chunkTypeStr === "incremental") {
|
|
140
|
-
const chunkType = chunkTypeStr
|
|
141
|
-
return chunkType
|
|
142
|
-
} else {
|
|
143
|
-
return null
|
|
144
|
-
}
|
|
145
|
-
}
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
import { Repo } from "../Repo.js"
|
|
2
|
-
import { PeerId, DocumentId } from "../types.js"
|
|
3
|
-
import { Synchronizer } from "./Synchronizer.js"
|
|
4
|
-
import { SynchronizerMessage } from "../network/messages.js"
|
|
5
|
-
/** A CollectionSynchronizer is responsible for synchronizing a DocCollection with peers. */
|
|
6
|
-
export declare class CollectionSynchronizer extends Synchronizer {
|
|
7
|
-
#private
|
|
8
|
-
private repo
|
|
9
|
-
constructor(repo: Repo)
|
|
10
|
-
/**
|
|
11
|
-
* When we receive a sync message for a document we haven't got in memory, we
|
|
12
|
-
* register it with the repo and start synchronizing
|
|
13
|
-
*/
|
|
14
|
-
receiveMessage(message: SynchronizerMessage): Promise<void>
|
|
15
|
-
/**
|
|
16
|
-
* Starts synchronizing the given document with all peers that we share it generously with.
|
|
17
|
-
*/
|
|
18
|
-
addDocument(documentId: DocumentId): void
|
|
19
|
-
removeDocument(documentId: DocumentId): void
|
|
20
|
-
/** Adds a peer and maybe starts synchronizing with them */
|
|
21
|
-
addPeer(peerId: PeerId): void
|
|
22
|
-
/** Removes a peer and stops synchronizing with them */
|
|
23
|
-
removePeer(peerId: PeerId): void
|
|
24
|
-
}
|
|
25
|
-
//# sourceMappingURL=CollectionSynchronizer.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"CollectionSynchronizer.d.ts","sourceRoot":"","sources":["../../../src/synchronizer/CollectionSynchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,IAAI,EAAE,MAAM,YAAY,CAAA;AAOjC,OAAO,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,aAAa,CAAA;AAEhD,OAAO,EAAE,YAAY,EAAE,MAAM,mBAAmB,CAAA;AAGhD,OAAO,EAGL,mBAAmB,EAEpB,MAAM,wBAAwB,CAAA;AAG/B,4FAA4F;AAC5F,qBAAa,sBAAuB,SAAQ,YAAY;;IAU1C,OAAO,CAAC,IAAI;gBAAJ,IAAI,EAAE,IAAI;IAiC9B;;;OAGG;IACG,cAAc,CAAC,OAAO,EAAE,mBAAmB;IAyBjD;;OAEG;IACH,WAAW,CAAC,UAAU,EAAE,UAAU;IAYlC,cAAc,CAAC,UAAU,EAAE,UAAU;IAIrC,2DAA2D;IAC3D,OAAO,CAAC,MAAM,EAAE,MAAM;IAgBtB,uDAAuD;IACvD,UAAU,CAAC,MAAM,EAAE,MAAM;CAQ1B"}
|
|
@@ -1,106 +0,0 @@
|
|
|
1
|
-
import { stringifyAutomergeUrl } from "../DocUrl.js"
|
|
2
|
-
import { DocSynchronizer } from "./DocSynchronizer.js"
|
|
3
|
-
import { Synchronizer } from "./Synchronizer.js"
|
|
4
|
-
import debug from "debug"
|
|
5
|
-
const log = debug("automerge-repo:collectionsync")
|
|
6
|
-
/** A CollectionSynchronizer is responsible for synchronizing a DocCollection with peers. */
|
|
7
|
-
export class CollectionSynchronizer extends Synchronizer {
|
|
8
|
-
repo
|
|
9
|
-
/** The set of peers we are connected with */
|
|
10
|
-
#peers = new Set()
|
|
11
|
-
/** A map of documentIds to their synchronizers */
|
|
12
|
-
#docSynchronizers = {}
|
|
13
|
-
/** Used to determine if the document is know to the Collection and a synchronizer exists or is being set up */
|
|
14
|
-
#docSetUp = {}
|
|
15
|
-
constructor(repo) {
|
|
16
|
-
super()
|
|
17
|
-
this.repo = repo
|
|
18
|
-
}
|
|
19
|
-
/** Returns a synchronizer for the given document, creating one if it doesn't already exist. */
|
|
20
|
-
#fetchDocSynchronizer(documentId) {
|
|
21
|
-
if (!this.#docSynchronizers[documentId]) {
|
|
22
|
-
const handle = this.repo.find(stringifyAutomergeUrl({ documentId }))
|
|
23
|
-
this.#docSynchronizers[documentId] = this.#initDocSynchronizer(handle)
|
|
24
|
-
}
|
|
25
|
-
return this.#docSynchronizers[documentId]
|
|
26
|
-
}
|
|
27
|
-
/** Creates a new docSynchronizer and sets it up to propagate messages */
|
|
28
|
-
#initDocSynchronizer(handle) {
|
|
29
|
-
const docSynchronizer = new DocSynchronizer(handle)
|
|
30
|
-
docSynchronizer.on("message", event => this.emit("message", event))
|
|
31
|
-
return docSynchronizer
|
|
32
|
-
}
|
|
33
|
-
/** returns an array of peerIds that we share this document generously with */
|
|
34
|
-
async #documentGenerousPeers(documentId) {
|
|
35
|
-
const peers = Array.from(this.#peers)
|
|
36
|
-
const generousPeers = []
|
|
37
|
-
for (const peerId of peers) {
|
|
38
|
-
const okToShare = await this.repo.sharePolicy(peerId, documentId)
|
|
39
|
-
if (okToShare) generousPeers.push(peerId)
|
|
40
|
-
}
|
|
41
|
-
return generousPeers
|
|
42
|
-
}
|
|
43
|
-
// PUBLIC
|
|
44
|
-
/**
|
|
45
|
-
* When we receive a sync message for a document we haven't got in memory, we
|
|
46
|
-
* register it with the repo and start synchronizing
|
|
47
|
-
*/
|
|
48
|
-
async receiveMessage(message) {
|
|
49
|
-
log(
|
|
50
|
-
`onSyncMessage: ${message.senderId}, ${message.documentId}, ${
|
|
51
|
-
"data" in message ? message.data.byteLength + "bytes" : ""
|
|
52
|
-
}`
|
|
53
|
-
)
|
|
54
|
-
const documentId = message.documentId
|
|
55
|
-
if (!documentId) {
|
|
56
|
-
throw new Error("received a message with an invalid documentId")
|
|
57
|
-
}
|
|
58
|
-
this.#docSetUp[documentId] = true
|
|
59
|
-
const docSynchronizer = this.#fetchDocSynchronizer(documentId)
|
|
60
|
-
docSynchronizer.receiveMessage(message)
|
|
61
|
-
// Initiate sync with any new peers
|
|
62
|
-
const peers = await this.#documentGenerousPeers(documentId)
|
|
63
|
-
docSynchronizer.beginSync(
|
|
64
|
-
peers.filter(peerId => !docSynchronizer.hasPeer(peerId))
|
|
65
|
-
)
|
|
66
|
-
}
|
|
67
|
-
/**
|
|
68
|
-
* Starts synchronizing the given document with all peers that we share it generously with.
|
|
69
|
-
*/
|
|
70
|
-
addDocument(documentId) {
|
|
71
|
-
// HACK: this is a hack to prevent us from adding the same document twice
|
|
72
|
-
if (this.#docSetUp[documentId]) {
|
|
73
|
-
return
|
|
74
|
-
}
|
|
75
|
-
const docSynchronizer = this.#fetchDocSynchronizer(documentId)
|
|
76
|
-
void this.#documentGenerousPeers(documentId).then(peers => {
|
|
77
|
-
docSynchronizer.beginSync(peers)
|
|
78
|
-
})
|
|
79
|
-
}
|
|
80
|
-
// TODO: implement this
|
|
81
|
-
removeDocument(documentId) {
|
|
82
|
-
throw new Error("not implemented")
|
|
83
|
-
}
|
|
84
|
-
/** Adds a peer and maybe starts synchronizing with them */
|
|
85
|
-
addPeer(peerId) {
|
|
86
|
-
log(`adding ${peerId} & synchronizing with them`)
|
|
87
|
-
if (this.#peers.has(peerId)) {
|
|
88
|
-
return
|
|
89
|
-
}
|
|
90
|
-
this.#peers.add(peerId)
|
|
91
|
-
for (const docSynchronizer of Object.values(this.#docSynchronizers)) {
|
|
92
|
-
const { documentId } = docSynchronizer
|
|
93
|
-
this.repo.sharePolicy(peerId, documentId).then(okToShare => {
|
|
94
|
-
if (okToShare) docSynchronizer.beginSync([peerId])
|
|
95
|
-
})
|
|
96
|
-
}
|
|
97
|
-
}
|
|
98
|
-
/** Removes a peer and stops synchronizing with them */
|
|
99
|
-
removePeer(peerId) {
|
|
100
|
-
log(`removing peer ${peerId}`)
|
|
101
|
-
this.#peers.delete(peerId)
|
|
102
|
-
for (const docSynchronizer of Object.values(this.#docSynchronizers)) {
|
|
103
|
-
docSynchronizer.endSync(peerId)
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
}
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
import { DocHandle } from "../DocHandle.js"
|
|
2
|
-
import { PeerId } from "../types.js"
|
|
3
|
-
import { Synchronizer } from "./Synchronizer.js"
|
|
4
|
-
import {
|
|
5
|
-
EphemeralMessage,
|
|
6
|
-
RequestMessage,
|
|
7
|
-
SynchronizerMessage,
|
|
8
|
-
SyncMessage,
|
|
9
|
-
} from "../network/messages.js"
|
|
10
|
-
type PeerDocumentStatus = "unknown" | "has" | "unavailable" | "wants"
|
|
11
|
-
/**
|
|
12
|
-
* DocSynchronizer takes a handle to an Automerge document, and receives & dispatches sync messages
|
|
13
|
-
* to bring it inline with all other peers' versions.
|
|
14
|
-
*/
|
|
15
|
-
export declare class DocSynchronizer extends Synchronizer {
|
|
16
|
-
#private
|
|
17
|
-
private handle
|
|
18
|
-
constructor(handle: DocHandle<any>)
|
|
19
|
-
get peerStates(): Record<PeerId, PeerDocumentStatus>
|
|
20
|
-
get documentId(): import("../types.js").DocumentId
|
|
21
|
-
hasPeer(peerId: PeerId): boolean
|
|
22
|
-
beginSync(peerIds: PeerId[]): void
|
|
23
|
-
endSync(peerId: PeerId): void
|
|
24
|
-
receiveMessage(message: SynchronizerMessage): void
|
|
25
|
-
receiveEphemeralMessage(message: EphemeralMessage): void
|
|
26
|
-
receiveSyncMessage(message: SyncMessage | RequestMessage): void
|
|
27
|
-
}
|
|
28
|
-
export {}
|
|
29
|
-
//# sourceMappingURL=DocSynchronizer.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"DocSynchronizer.d.ts","sourceRoot":"","sources":["../../../src/synchronizer/DocSynchronizer.ts"],"names":[],"mappings":"AACA,OAAO,EAEL,SAAS,EAKV,MAAM,iBAAiB,CAAA;AACxB,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;AACpC,OAAO,EAAE,YAAY,EAAE,MAAM,mBAAmB,CAAA;AAGhD,OAAO,EACL,gBAAgB,EAIhB,cAAc,EACd,mBAAmB,EACnB,WAAW,EACZ,MAAM,wBAAwB,CAAA;AAE/B,KAAK,kBAAkB,GAAG,SAAS,GAAG,KAAK,GAAG,aAAa,GAAG,OAAO,CAAA;AAGrE;;;GAGG;AACH,qBAAa,eAAgB,SAAQ,YAAY;;IAiBnC,OAAO,CAAC,MAAM;gBAAN,MAAM,EAAE,SAAS,CAAC,GAAG,CAAC;IAoB1C,IAAI,UAAU,uCAEb;IAED,IAAI,UAAU,qCAEb;IAiHD,OAAO,CAAC,MAAM,EAAE,MAAM;IAItB,SAAS,CAAC,OAAO,EAAE,MAAM,EAAE;IA6B3B,OAAO,CAAC,MAAM,EAAE,MAAM;IAKtB,cAAc,CAAC,OAAO,EAAE,mBAAmB;IAkB3C,uBAAuB,CAAC,OAAO,EAAE,gBAAgB;IAuBjD,kBAAkB,CAAC,OAAO,EAAE,WAAW,GAAG,cAAc;CA2EzD"}
|
|
@@ -1,263 +0,0 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge/next"
|
|
2
|
-
import { READY, REQUESTING, UNAVAILABLE } from "../DocHandle.js"
|
|
3
|
-
import { Synchronizer } from "./Synchronizer.js"
|
|
4
|
-
import debug from "debug"
|
|
5
|
-
import { isRequestMessage } from "../network/messages.js"
|
|
6
|
-
import { decode } from "cbor-x"
|
|
7
|
-
/**
|
|
8
|
-
* DocSynchronizer takes a handle to an Automerge document, and receives & dispatches sync messages
|
|
9
|
-
* to bring it inline with all other peers' versions.
|
|
10
|
-
*/
|
|
11
|
-
export class DocSynchronizer extends Synchronizer {
|
|
12
|
-
handle
|
|
13
|
-
#log
|
|
14
|
-
#conciseLog
|
|
15
|
-
#opsLog
|
|
16
|
-
/** Active peers */
|
|
17
|
-
#peers = []
|
|
18
|
-
#peerDocumentStatuses = {}
|
|
19
|
-
/** Sync state for each peer we've communicated with (including inactive peers) */
|
|
20
|
-
#syncStates = {}
|
|
21
|
-
#pendingSyncMessages = []
|
|
22
|
-
#syncStarted = false
|
|
23
|
-
constructor(handle) {
|
|
24
|
-
super()
|
|
25
|
-
this.handle = handle
|
|
26
|
-
const docId = handle.documentId.slice(0, 5)
|
|
27
|
-
this.#conciseLog = debug(`automerge-repo:concise:docsync:${docId}`) // Only logs one line per receive/send
|
|
28
|
-
this.#log = debug(`automerge-repo:docsync:${docId}`)
|
|
29
|
-
this.#opsLog = debug(`automerge-repo:ops:docsync:${docId}`) // Log list of ops of each message
|
|
30
|
-
handle.on("change", () => this.#syncWithPeers())
|
|
31
|
-
handle.on("ephemeral-message-outbound", payload =>
|
|
32
|
-
this.#broadcastToPeers(payload)
|
|
33
|
-
)
|
|
34
|
-
// Process pending sync messages immediately after the handle becomes ready.
|
|
35
|
-
void (async () => {
|
|
36
|
-
await handle.doc([READY, REQUESTING])
|
|
37
|
-
this.#processAllPendingSyncMessages()
|
|
38
|
-
})()
|
|
39
|
-
}
|
|
40
|
-
get peerStates() {
|
|
41
|
-
return this.#peerDocumentStatuses
|
|
42
|
-
}
|
|
43
|
-
get documentId() {
|
|
44
|
-
return this.handle.documentId
|
|
45
|
-
}
|
|
46
|
-
/// PRIVATE
|
|
47
|
-
async #syncWithPeers() {
|
|
48
|
-
this.#log(`syncWithPeers`)
|
|
49
|
-
const doc = await this.handle.doc()
|
|
50
|
-
if (doc === undefined) return
|
|
51
|
-
this.#peers.forEach(peerId => this.#sendSyncMessage(peerId, doc))
|
|
52
|
-
}
|
|
53
|
-
async #broadcastToPeers({ data }) {
|
|
54
|
-
this.#log(`broadcastToPeers`, this.#peers)
|
|
55
|
-
this.#peers.forEach(peerId => this.#sendEphemeralMessage(peerId, data))
|
|
56
|
-
}
|
|
57
|
-
#sendEphemeralMessage(peerId, data) {
|
|
58
|
-
this.#log(`sendEphemeralMessage ->${peerId}`)
|
|
59
|
-
this.emit("message", {
|
|
60
|
-
type: "ephemeral",
|
|
61
|
-
targetId: peerId,
|
|
62
|
-
documentId: this.handle.documentId,
|
|
63
|
-
data,
|
|
64
|
-
})
|
|
65
|
-
}
|
|
66
|
-
#getSyncState(peerId) {
|
|
67
|
-
if (!this.#peers.includes(peerId)) {
|
|
68
|
-
this.#log("adding a new peer", peerId)
|
|
69
|
-
this.#peers.push(peerId)
|
|
70
|
-
}
|
|
71
|
-
// when a peer is added, we don't know if it has the document or not
|
|
72
|
-
if (!(peerId in this.#peerDocumentStatuses)) {
|
|
73
|
-
this.#peerDocumentStatuses[peerId] = "unknown"
|
|
74
|
-
}
|
|
75
|
-
return this.#syncStates[peerId] ?? A.initSyncState()
|
|
76
|
-
}
|
|
77
|
-
#setSyncState(peerId, syncState) {
|
|
78
|
-
// TODO: we maybe should be persisting sync states. But we want to be careful about how often we
|
|
79
|
-
// do that, because it can generate a lot of disk activity.
|
|
80
|
-
// TODO: we only need to do this on reconnect
|
|
81
|
-
this.#syncStates[peerId] = syncState
|
|
82
|
-
}
|
|
83
|
-
#sendSyncMessage(peerId, doc) {
|
|
84
|
-
this.#log(`sendSyncMessage ->${peerId}`)
|
|
85
|
-
const syncState = this.#getSyncState(peerId)
|
|
86
|
-
const [newSyncState, message] = A.generateSyncMessage(doc, syncState)
|
|
87
|
-
this.#setSyncState(peerId, newSyncState)
|
|
88
|
-
if (message) {
|
|
89
|
-
this.#logMessage(`sendSyncMessage 🡒 ${peerId}`, message)
|
|
90
|
-
const decoded = A.decodeSyncMessage(message)
|
|
91
|
-
if (
|
|
92
|
-
!this.handle.isReady() &&
|
|
93
|
-
decoded.heads.length === 0 &&
|
|
94
|
-
newSyncState.sharedHeads.length === 0 &&
|
|
95
|
-
!Object.values(this.#peerDocumentStatuses).includes("has") &&
|
|
96
|
-
this.#peerDocumentStatuses[peerId] === "unknown"
|
|
97
|
-
) {
|
|
98
|
-
// we don't have the document (or access to it), so we request it
|
|
99
|
-
this.emit("message", {
|
|
100
|
-
type: "request",
|
|
101
|
-
targetId: peerId,
|
|
102
|
-
documentId: this.handle.documentId,
|
|
103
|
-
data: message,
|
|
104
|
-
})
|
|
105
|
-
} else {
|
|
106
|
-
this.emit("message", {
|
|
107
|
-
type: "sync",
|
|
108
|
-
targetId: peerId,
|
|
109
|
-
data: message,
|
|
110
|
-
documentId: this.handle.documentId,
|
|
111
|
-
})
|
|
112
|
-
}
|
|
113
|
-
// if we have sent heads, then the peer now has or will have the document
|
|
114
|
-
if (decoded.heads.length > 0) {
|
|
115
|
-
this.#peerDocumentStatuses[peerId] = "has"
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
#logMessage = (label, message) => {
|
|
120
|
-
// This is real expensive...
|
|
121
|
-
return
|
|
122
|
-
const size = message.byteLength
|
|
123
|
-
const logText = `${label} ${size}b`
|
|
124
|
-
const decoded = A.decodeSyncMessage(message)
|
|
125
|
-
this.#conciseLog(logText)
|
|
126
|
-
this.#log(logText, decoded)
|
|
127
|
-
// expanding is expensive, so only do it if we're logging at this level
|
|
128
|
-
const expanded = this.#opsLog.enabled
|
|
129
|
-
? decoded.changes.flatMap(change =>
|
|
130
|
-
A.decodeChange(change).ops.map(op => JSON.stringify(op))
|
|
131
|
-
)
|
|
132
|
-
: null
|
|
133
|
-
this.#opsLog(logText, expanded)
|
|
134
|
-
}
|
|
135
|
-
/// PUBLIC
|
|
136
|
-
hasPeer(peerId) {
|
|
137
|
-
return this.#peers.includes(peerId)
|
|
138
|
-
}
|
|
139
|
-
beginSync(peerIds) {
|
|
140
|
-
this.#log(`beginSync: ${peerIds.join(", ")}`)
|
|
141
|
-
// HACK: if we have a sync state already, we round-trip it through the encoding system to make
|
|
142
|
-
// sure state is preserved. This prevents an infinite loop caused by failed attempts to send
|
|
143
|
-
// messages during disconnection.
|
|
144
|
-
// TODO: cover that case with a test and remove this hack
|
|
145
|
-
peerIds.forEach(peerId => {
|
|
146
|
-
const syncStateRaw = this.#getSyncState(peerId)
|
|
147
|
-
const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
|
|
148
|
-
this.#setSyncState(peerId, syncState)
|
|
149
|
-
})
|
|
150
|
-
// At this point if we don't have anything in our storage, we need to use an empty doc to sync
|
|
151
|
-
// with; but we don't want to surface that state to the front end
|
|
152
|
-
void this.handle.doc([READY, REQUESTING, UNAVAILABLE]).then(doc => {
|
|
153
|
-
// we register out peers first, then say that sync has started
|
|
154
|
-
this.#syncStarted = true
|
|
155
|
-
this.#checkDocUnavailable()
|
|
156
|
-
if (doc === undefined) return
|
|
157
|
-
peerIds.forEach(peerId => {
|
|
158
|
-
this.#sendSyncMessage(peerId, doc)
|
|
159
|
-
})
|
|
160
|
-
})
|
|
161
|
-
}
|
|
162
|
-
endSync(peerId) {
|
|
163
|
-
this.#log(`removing peer ${peerId}`)
|
|
164
|
-
this.#peers = this.#peers.filter(p => p !== peerId)
|
|
165
|
-
}
|
|
166
|
-
receiveMessage(message) {
|
|
167
|
-
switch (message.type) {
|
|
168
|
-
case "sync":
|
|
169
|
-
case "request":
|
|
170
|
-
this.receiveSyncMessage(message)
|
|
171
|
-
break
|
|
172
|
-
case "ephemeral":
|
|
173
|
-
this.receiveEphemeralMessage(message)
|
|
174
|
-
break
|
|
175
|
-
case "doc-unavailable":
|
|
176
|
-
this.#peerDocumentStatuses[message.senderId] = "unavailable"
|
|
177
|
-
this.#checkDocUnavailable()
|
|
178
|
-
break
|
|
179
|
-
default:
|
|
180
|
-
throw new Error(`unknown message type: ${message}`)
|
|
181
|
-
}
|
|
182
|
-
}
|
|
183
|
-
receiveEphemeralMessage(message) {
|
|
184
|
-
if (message.documentId !== this.handle.documentId)
|
|
185
|
-
throw new Error(`channelId doesn't match documentId`)
|
|
186
|
-
const { senderId, data } = message
|
|
187
|
-
const contents = decode(data)
|
|
188
|
-
this.handle.emit("ephemeral-message", {
|
|
189
|
-
handle: this.handle,
|
|
190
|
-
senderId,
|
|
191
|
-
message: contents,
|
|
192
|
-
})
|
|
193
|
-
this.#peers.forEach(peerId => {
|
|
194
|
-
if (peerId === senderId) return
|
|
195
|
-
this.emit("message", {
|
|
196
|
-
...message,
|
|
197
|
-
targetId: peerId,
|
|
198
|
-
})
|
|
199
|
-
})
|
|
200
|
-
}
|
|
201
|
-
receiveSyncMessage(message) {
|
|
202
|
-
if (message.documentId !== this.handle.documentId)
|
|
203
|
-
throw new Error(`channelId doesn't match documentId`)
|
|
204
|
-
// We need to block receiving the syncMessages until we've checked local storage
|
|
205
|
-
if (!this.handle.inState([READY, REQUESTING, UNAVAILABLE])) {
|
|
206
|
-
this.#pendingSyncMessages.push(message)
|
|
207
|
-
return
|
|
208
|
-
}
|
|
209
|
-
this.#processAllPendingSyncMessages()
|
|
210
|
-
this.#processSyncMessage(message)
|
|
211
|
-
}
|
|
212
|
-
#processSyncMessage(message) {
|
|
213
|
-
if (isRequestMessage(message)) {
|
|
214
|
-
this.#peerDocumentStatuses[message.senderId] = "wants"
|
|
215
|
-
}
|
|
216
|
-
this.#checkDocUnavailable()
|
|
217
|
-
// if the message has heads, then the peer has the document
|
|
218
|
-
if (A.decodeSyncMessage(message.data).heads.length > 0) {
|
|
219
|
-
this.#peerDocumentStatuses[message.senderId] = "has"
|
|
220
|
-
}
|
|
221
|
-
this.handle.update(doc => {
|
|
222
|
-
const [newDoc, newSyncState] = A.receiveSyncMessage(
|
|
223
|
-
doc,
|
|
224
|
-
this.#getSyncState(message.senderId),
|
|
225
|
-
message.data
|
|
226
|
-
)
|
|
227
|
-
this.#setSyncState(message.senderId, newSyncState)
|
|
228
|
-
// respond to just this peer (as required)
|
|
229
|
-
this.#sendSyncMessage(message.senderId, doc)
|
|
230
|
-
return newDoc
|
|
231
|
-
})
|
|
232
|
-
this.#checkDocUnavailable()
|
|
233
|
-
}
|
|
234
|
-
#checkDocUnavailable() {
|
|
235
|
-
// if we know none of the peers have the document, tell all our peers that we don't either
|
|
236
|
-
if (
|
|
237
|
-
this.#syncStarted &&
|
|
238
|
-
this.handle.inState([REQUESTING]) &&
|
|
239
|
-
this.#peers.every(
|
|
240
|
-
peerId =>
|
|
241
|
-
this.#peerDocumentStatuses[peerId] === "unavailable" ||
|
|
242
|
-
this.#peerDocumentStatuses[peerId] === "wants"
|
|
243
|
-
)
|
|
244
|
-
) {
|
|
245
|
-
this.#peers
|
|
246
|
-
.filter(peerId => this.#peerDocumentStatuses[peerId] === "wants")
|
|
247
|
-
.forEach(peerId => {
|
|
248
|
-
this.emit("message", {
|
|
249
|
-
type: "doc-unavailable",
|
|
250
|
-
documentId: this.handle.documentId,
|
|
251
|
-
targetId: peerId,
|
|
252
|
-
})
|
|
253
|
-
})
|
|
254
|
-
this.handle.unavailable()
|
|
255
|
-
}
|
|
256
|
-
}
|
|
257
|
-
#processAllPendingSyncMessages() {
|
|
258
|
-
for (const message of this.#pendingSyncMessages) {
|
|
259
|
-
this.#processSyncMessage(message)
|
|
260
|
-
}
|
|
261
|
-
this.#pendingSyncMessages = []
|
|
262
|
-
}
|
|
263
|
-
}
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import { EventEmitter } from "eventemitter3"
|
|
2
|
-
import { Message, MessageContents } from "../network/messages.js"
|
|
3
|
-
export declare abstract class Synchronizer extends EventEmitter<SynchronizerEvents> {
|
|
4
|
-
abstract receiveMessage(message: Message): void
|
|
5
|
-
}
|
|
6
|
-
export interface SynchronizerEvents {
|
|
7
|
-
message: (arg: MessageContents) => void
|
|
8
|
-
}
|
|
9
|
-
//# sourceMappingURL=Synchronizer.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"Synchronizer.d.ts","sourceRoot":"","sources":["../../../src/synchronizer/Synchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,OAAO,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAA;AAEjE,8BAAsB,YAAa,SAAQ,YAAY,CAAC,kBAAkB,CAAC;IACzE,QAAQ,CAAC,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,IAAI;CAChD;AAED,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;CACxC"}
|
package/dist/src/types.d.ts
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
export type DocumentId = string & {
|
|
2
|
-
__documentId: true
|
|
3
|
-
}
|
|
4
|
-
export type AutomergeUrl = string & {
|
|
5
|
-
__documentUrl: true
|
|
6
|
-
}
|
|
7
|
-
export type BinaryDocumentId = Uint8Array & {
|
|
8
|
-
__binaryDocumentId: true
|
|
9
|
-
}
|
|
10
|
-
export type PeerId = string & {
|
|
11
|
-
__peerId: false
|
|
12
|
-
}
|
|
13
|
-
export type DistributiveOmit<T, K extends keyof any> = T extends any
|
|
14
|
-
? Omit<T, K>
|
|
15
|
-
: never
|
|
16
|
-
//# sourceMappingURL=types.d.ts.map
|
package/dist/src/types.d.ts.map
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG;IAAE,YAAY,EAAE,IAAI,CAAA;CAAE,CAAA;AACxD,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG;IAAE,aAAa,EAAE,IAAI,CAAA;CAAE,CAAA;AAC3D,MAAM,MAAM,gBAAgB,GAAG,UAAU,GAAG;IAAE,kBAAkB,EAAE,IAAI,CAAA;CAAE,CAAA;AAExE,MAAM,MAAM,MAAM,GAAG,MAAM,GAAG;IAAE,QAAQ,EAAE,KAAK,CAAA;CAAE,CAAA;AAEjD,MAAM,MAAM,gBAAgB,CAAC,CAAC,EAAE,CAAC,SAAS,MAAM,GAAG,IAAI,CAAC,SAAS,GAAG,GAChE,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,GACV,KAAK,CAAA"}
|
package/dist/src/types.js
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"CollectionSynchronizer.test.d.ts","sourceRoot":"","sources":["../../test/CollectionSynchronizer.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
import assert from "assert"
|
|
2
|
-
import { beforeEach } from "mocha"
|
|
3
|
-
import { Repo } from "../src/index.js"
|
|
4
|
-
import { CollectionSynchronizer } from "../src/synchronizer/CollectionSynchronizer.js"
|
|
5
|
-
describe("CollectionSynchronizer", () => {
|
|
6
|
-
let repo
|
|
7
|
-
let synchronizer
|
|
8
|
-
beforeEach(() => {
|
|
9
|
-
repo = new Repo({
|
|
10
|
-
network: [],
|
|
11
|
-
})
|
|
12
|
-
synchronizer = new CollectionSynchronizer(repo)
|
|
13
|
-
})
|
|
14
|
-
it("is not null", async () => {
|
|
15
|
-
assert(synchronizer !== null)
|
|
16
|
-
})
|
|
17
|
-
it("starts synchronizing a document to peers when added", done => {
|
|
18
|
-
const handle = repo.create()
|
|
19
|
-
synchronizer.addPeer("peer1")
|
|
20
|
-
synchronizer.once("message", event => {
|
|
21
|
-
assert(event.targetId === "peer1")
|
|
22
|
-
assert(event.documentId === handle.documentId)
|
|
23
|
-
done()
|
|
24
|
-
})
|
|
25
|
-
synchronizer.addDocument(handle.documentId)
|
|
26
|
-
})
|
|
27
|
-
it("starts synchronizing existing documents when a peer is added", done => {
|
|
28
|
-
const handle = repo.create()
|
|
29
|
-
synchronizer.addDocument(handle.documentId)
|
|
30
|
-
synchronizer.once("message", event => {
|
|
31
|
-
assert(event.targetId === "peer1")
|
|
32
|
-
assert(event.documentId === handle.documentId)
|
|
33
|
-
done()
|
|
34
|
-
})
|
|
35
|
-
synchronizer.addPeer("peer1")
|
|
36
|
-
})
|
|
37
|
-
it("should not synchronize to a peer which is excluded from the share policy", done => {
|
|
38
|
-
const handle = repo.create()
|
|
39
|
-
repo.sharePolicy = async peerId => peerId !== "peer1"
|
|
40
|
-
synchronizer.addDocument(handle.documentId)
|
|
41
|
-
synchronizer.once("message", () => {
|
|
42
|
-
done(new Error("Should not have sent a message"))
|
|
43
|
-
})
|
|
44
|
-
synchronizer.addPeer("peer1")
|
|
45
|
-
setTimeout(done)
|
|
46
|
-
})
|
|
47
|
-
it("should not synchronize a document which is excluded from the share policy", done => {
|
|
48
|
-
const handle = repo.create()
|
|
49
|
-
repo.sharePolicy = async (_, documentId) => documentId !== handle.documentId
|
|
50
|
-
synchronizer.addPeer("peer2")
|
|
51
|
-
synchronizer.once("message", () => {
|
|
52
|
-
done(new Error("Should not have sent a message"))
|
|
53
|
-
})
|
|
54
|
-
synchronizer.addDocument(handle.documentId)
|
|
55
|
-
setTimeout(done)
|
|
56
|
-
})
|
|
57
|
-
})
|