@automerge/automerge-repo 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc +28 -0
- package/.mocharc.json +5 -0
- package/README.md +298 -0
- package/TODO.md +54 -0
- package/dist/DocCollection.d.ts +44 -0
- package/dist/DocCollection.d.ts.map +1 -0
- package/dist/DocCollection.js +85 -0
- package/dist/DocHandle.d.ts +78 -0
- package/dist/DocHandle.d.ts.map +1 -0
- package/dist/DocHandle.js +227 -0
- package/dist/EphemeralData.d.ts +27 -0
- package/dist/EphemeralData.d.ts.map +1 -0
- package/dist/EphemeralData.js +28 -0
- package/dist/Repo.d.ts +30 -0
- package/dist/Repo.d.ts.map +1 -0
- package/dist/Repo.js +97 -0
- package/dist/helpers/arraysAreEqual.d.ts +2 -0
- package/dist/helpers/arraysAreEqual.d.ts.map +1 -0
- package/dist/helpers/arraysAreEqual.js +1 -0
- package/dist/helpers/eventPromise.d.ts +5 -0
- package/dist/helpers/eventPromise.d.ts.map +1 -0
- package/dist/helpers/eventPromise.js +6 -0
- package/dist/helpers/headsAreSame.d.ts +3 -0
- package/dist/helpers/headsAreSame.d.ts.map +1 -0
- package/dist/helpers/headsAreSame.js +7 -0
- package/dist/helpers/mergeArrays.d.ts +2 -0
- package/dist/helpers/mergeArrays.d.ts.map +1 -0
- package/dist/helpers/mergeArrays.js +15 -0
- package/dist/helpers/pause.d.ts +3 -0
- package/dist/helpers/pause.d.ts.map +1 -0
- package/dist/helpers/pause.js +7 -0
- package/dist/helpers/withTimeout.d.ts +9 -0
- package/dist/helpers/withTimeout.d.ts.map +1 -0
- package/dist/helpers/withTimeout.js +22 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +10 -0
- package/dist/network/NetworkAdapter.d.ts +37 -0
- package/dist/network/NetworkAdapter.d.ts.map +1 -0
- package/dist/network/NetworkAdapter.js +4 -0
- package/dist/network/NetworkSubsystem.d.ts +23 -0
- package/dist/network/NetworkSubsystem.d.ts.map +1 -0
- package/dist/network/NetworkSubsystem.js +89 -0
- package/dist/storage/StorageAdapter.d.ts +6 -0
- package/dist/storage/StorageAdapter.d.ts.map +1 -0
- package/dist/storage/StorageAdapter.js +2 -0
- package/dist/storage/StorageSubsystem.d.ts +12 -0
- package/dist/storage/StorageSubsystem.d.ts.map +1 -0
- package/dist/storage/StorageSubsystem.js +65 -0
- package/dist/synchronizer/CollectionSynchronizer.d.ts +24 -0
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -0
- package/dist/synchronizer/CollectionSynchronizer.js +92 -0
- package/dist/synchronizer/DocSynchronizer.d.ts +18 -0
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -0
- package/dist/synchronizer/DocSynchronizer.js +136 -0
- package/dist/synchronizer/Synchronizer.d.ts +10 -0
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -0
- package/dist/synchronizer/Synchronizer.js +3 -0
- package/dist/test-utilities/adapter-tests.d.ts +21 -0
- package/dist/test-utilities/adapter-tests.d.ts.map +1 -0
- package/dist/test-utilities/adapter-tests.js +117 -0
- package/dist/types.d.ts +10 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +1 -0
- package/fuzz/fuzz.ts +129 -0
- package/package.json +65 -0
- package/src/DocCollection.ts +123 -0
- package/src/DocHandle.ts +386 -0
- package/src/EphemeralData.ts +46 -0
- package/src/Repo.ts +155 -0
- package/src/helpers/arraysAreEqual.ts +2 -0
- package/src/helpers/eventPromise.ts +10 -0
- package/src/helpers/headsAreSame.ts +8 -0
- package/src/helpers/mergeArrays.ts +17 -0
- package/src/helpers/pause.ts +9 -0
- package/src/helpers/withTimeout.ts +28 -0
- package/src/index.ts +22 -0
- package/src/network/NetworkAdapter.ts +54 -0
- package/src/network/NetworkSubsystem.ts +130 -0
- package/src/storage/StorageAdapter.ts +5 -0
- package/src/storage/StorageSubsystem.ts +91 -0
- package/src/synchronizer/CollectionSynchronizer.ts +112 -0
- package/src/synchronizer/DocSynchronizer.ts +182 -0
- package/src/synchronizer/Synchronizer.ts +15 -0
- package/src/test-utilities/adapter-tests.ts +163 -0
- package/src/types.ts +3 -0
- package/test/CollectionSynchronizer.test.ts +73 -0
- package/test/DocCollection.test.ts +19 -0
- package/test/DocHandle.test.ts +281 -0
- package/test/DocSynchronizer.test.ts +68 -0
- package/test/EphemeralData.test.ts +44 -0
- package/test/Network.test.ts +13 -0
- package/test/Repo.test.ts +367 -0
- package/test/StorageSubsystem.test.ts +78 -0
- package/test/helpers/DummyNetworkAdapter.ts +8 -0
- package/test/helpers/DummyStorageAdapter.ts +23 -0
- package/test/helpers/getRandomItem.ts +4 -0
- package/test/types.ts +3 -0
- package/tsconfig.json +16 -0
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import EventEmitter from "eventemitter3"
|
|
2
|
+
import { PeerId, ChannelId } from "../types.js"
|
|
3
|
+
|
|
4
|
+
export abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents> {
|
|
5
|
+
peerId?: PeerId // hmmm, maybe not
|
|
6
|
+
|
|
7
|
+
abstract connect(url?: string): void
|
|
8
|
+
|
|
9
|
+
abstract sendMessage(
|
|
10
|
+
peerId: PeerId,
|
|
11
|
+
channelId: ChannelId,
|
|
12
|
+
message: Uint8Array,
|
|
13
|
+
broadcast: boolean
|
|
14
|
+
): void
|
|
15
|
+
|
|
16
|
+
abstract join(channelId: ChannelId): void
|
|
17
|
+
|
|
18
|
+
abstract leave(channelId: ChannelId): void
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// events & payloads
|
|
22
|
+
|
|
23
|
+
export interface NetworkAdapterEvents {
|
|
24
|
+
open: (payload: OpenPayload) => void
|
|
25
|
+
close: () => void
|
|
26
|
+
"peer-candidate": (payload: PeerCandidatePayload) => void
|
|
27
|
+
"peer-disconnected": (payload: PeerDisconnectedPayload) => void
|
|
28
|
+
message: (payload: InboundMessagePayload) => void
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface OpenPayload {
|
|
32
|
+
network: NetworkAdapter
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export interface PeerCandidatePayload {
|
|
36
|
+
peerId: PeerId
|
|
37
|
+
channelId: ChannelId
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
export interface MessagePayload {
|
|
41
|
+
targetId: PeerId
|
|
42
|
+
channelId: ChannelId
|
|
43
|
+
message: Uint8Array
|
|
44
|
+
broadcast: boolean
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export interface InboundMessagePayload extends MessagePayload {
|
|
48
|
+
type?: string
|
|
49
|
+
senderId: PeerId
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
export interface PeerDisconnectedPayload {
|
|
53
|
+
peerId: PeerId
|
|
54
|
+
}
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
import EventEmitter from "eventemitter3"
|
|
2
|
+
import {
|
|
3
|
+
InboundMessagePayload,
|
|
4
|
+
NetworkAdapter,
|
|
5
|
+
PeerDisconnectedPayload,
|
|
6
|
+
} from "./NetworkAdapter.js"
|
|
7
|
+
import { ChannelId, PeerId } from "../types.js"
|
|
8
|
+
|
|
9
|
+
import debug from "debug"
|
|
10
|
+
|
|
11
|
+
export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
12
|
+
#log: debug.Debugger
|
|
13
|
+
#adaptersByPeer: Record<PeerId, NetworkAdapter> = {}
|
|
14
|
+
#channels: ChannelId[]
|
|
15
|
+
|
|
16
|
+
constructor(
|
|
17
|
+
private adapters: NetworkAdapter[],
|
|
18
|
+
public peerId = randomPeerId()
|
|
19
|
+
) {
|
|
20
|
+
super()
|
|
21
|
+
this.#log = debug(`automerge-repo:network:${this.peerId}`)
|
|
22
|
+
this.#channels = []
|
|
23
|
+
this.adapters.forEach(a => this.addNetworkAdapter(a))
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
addNetworkAdapter(networkAdapter: NetworkAdapter) {
|
|
27
|
+
networkAdapter.connect(this.peerId)
|
|
28
|
+
|
|
29
|
+
networkAdapter.on("peer-candidate", ({ peerId, channelId }) => {
|
|
30
|
+
this.#log(`peer candidate: ${peerId} `)
|
|
31
|
+
|
|
32
|
+
// TODO: This is where authentication would happen
|
|
33
|
+
|
|
34
|
+
if (!this.#adaptersByPeer[peerId]) {
|
|
35
|
+
// TODO: handle losing a server here
|
|
36
|
+
this.#adaptersByPeer[peerId] = networkAdapter
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
this.emit("peer", { peerId, channelId })
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
networkAdapter.on("peer-disconnected", ({ peerId }) => {
|
|
43
|
+
this.#log(`peer disconnected: ${peerId} `)
|
|
44
|
+
delete this.#adaptersByPeer[peerId]
|
|
45
|
+
this.emit("peer-disconnected", { peerId })
|
|
46
|
+
})
|
|
47
|
+
|
|
48
|
+
networkAdapter.on("message", msg => {
|
|
49
|
+
const { senderId, channelId, broadcast, message } = msg
|
|
50
|
+
this.#log(`message from ${senderId}`)
|
|
51
|
+
|
|
52
|
+
// If we receive a broadcast message from a network adapter we need to re-broadcast it to all
|
|
53
|
+
// our other peers. This is the world's worst gossip protocol.
|
|
54
|
+
|
|
55
|
+
// TODO: This relies on the network forming a tree! If there are cycles, this approach will
|
|
56
|
+
// loop messages around forever.
|
|
57
|
+
if (broadcast) {
|
|
58
|
+
Object.entries(this.#adaptersByPeer)
|
|
59
|
+
.filter(([id]) => id !== senderId)
|
|
60
|
+
.forEach(([id, peer]) => {
|
|
61
|
+
peer.sendMessage(id as PeerId, channelId, message, broadcast)
|
|
62
|
+
})
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
this.emit("message", msg)
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
networkAdapter.on("close", () => {
|
|
69
|
+
this.#log("adapter closed")
|
|
70
|
+
Object.entries(this.#adaptersByPeer).forEach(([peerId, other]) => {
|
|
71
|
+
if (other === networkAdapter) {
|
|
72
|
+
delete this.#adaptersByPeer[peerId as PeerId]
|
|
73
|
+
}
|
|
74
|
+
})
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
this.#channels.forEach(c => networkAdapter.join(c))
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
sendMessage(
|
|
81
|
+
peerId: PeerId,
|
|
82
|
+
channelId: ChannelId,
|
|
83
|
+
message: Uint8Array,
|
|
84
|
+
broadcast: boolean
|
|
85
|
+
) {
|
|
86
|
+
if (broadcast) {
|
|
87
|
+
Object.entries(this.#adaptersByPeer).forEach(([id, peer]) => {
|
|
88
|
+
this.#log(`sending broadcast to ${id}`)
|
|
89
|
+
peer.sendMessage(id as PeerId, channelId, message, true)
|
|
90
|
+
})
|
|
91
|
+
} else {
|
|
92
|
+
const peer = this.#adaptersByPeer[peerId]
|
|
93
|
+
if (!peer) {
|
|
94
|
+
this.#log(`Tried to send message but peer not found: ${peerId}`)
|
|
95
|
+
return
|
|
96
|
+
}
|
|
97
|
+
this.#log(`Sending message to ${peerId}`)
|
|
98
|
+
peer.sendMessage(peerId, channelId, message, false)
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
join(channelId: ChannelId) {
|
|
103
|
+
this.#log(`Joining channel ${channelId}`)
|
|
104
|
+
this.#channels.push(channelId)
|
|
105
|
+
this.adapters.forEach(a => a.join(channelId))
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
leave(channelId: ChannelId) {
|
|
109
|
+
this.#log(`Leaving channel ${channelId}`)
|
|
110
|
+
this.#channels = this.#channels.filter(c => c !== channelId)
|
|
111
|
+
this.adapters.forEach(a => a.leave(channelId))
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
function randomPeerId() {
|
|
116
|
+
return `user-${Math.round(Math.random() * 100000)}` as PeerId
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// events & payloads
|
|
120
|
+
|
|
121
|
+
export interface NetworkSubsystemEvents {
|
|
122
|
+
peer: (payload: PeerPayload) => void
|
|
123
|
+
"peer-disconnected": (payload: PeerDisconnectedPayload) => void
|
|
124
|
+
message: (payload: InboundMessagePayload) => void
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
export interface PeerPayload {
|
|
128
|
+
peerId: PeerId
|
|
129
|
+
channelId: ChannelId
|
|
130
|
+
}
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import * as A from "@automerge/automerge"
|
|
2
|
+
import { DocumentId } from "../types.js"
|
|
3
|
+
import { mergeArrays } from "../helpers/mergeArrays.js"
|
|
4
|
+
import { StorageAdapter } from "./StorageAdapter.js"
|
|
5
|
+
|
|
6
|
+
export class StorageSubsystem {
|
|
7
|
+
#storageAdapter: StorageAdapter
|
|
8
|
+
#changeCount: Record<DocumentId, number> = {}
|
|
9
|
+
|
|
10
|
+
constructor(storageAdapter: StorageAdapter) {
|
|
11
|
+
this.#storageAdapter = storageAdapter
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
#saveIncremental(documentId: DocumentId, doc: A.Doc<unknown>) {
|
|
15
|
+
const binary = A.saveIncremental(doc)
|
|
16
|
+
if (binary && binary.length > 0) {
|
|
17
|
+
if (!this.#changeCount[documentId]) {
|
|
18
|
+
this.#changeCount[documentId] = 0
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
this.#storageAdapter.save(
|
|
22
|
+
`${documentId}.incremental.${this.#changeCount[documentId]}`,
|
|
23
|
+
binary
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
this.#changeCount[documentId]++
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
#saveTotal(documentId: DocumentId, doc: A.Doc<unknown>) {
|
|
31
|
+
const binary = A.save(doc)
|
|
32
|
+
this.#storageAdapter.save(`${documentId}.snapshot`, binary)
|
|
33
|
+
|
|
34
|
+
for (let i = 0; i < this.#changeCount[documentId]; i++) {
|
|
35
|
+
this.#storageAdapter.remove(`${documentId}.incremental.${i}`)
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
this.#changeCount[documentId] = 0
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async loadBinary(documentId: DocumentId): Promise<Uint8Array> {
|
|
42
|
+
const result = []
|
|
43
|
+
let binary = await this.#storageAdapter.load(`${documentId}.snapshot`)
|
|
44
|
+
if (binary && binary.length > 0) {
|
|
45
|
+
result.push(binary)
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
let index = 0
|
|
49
|
+
while (
|
|
50
|
+
(binary = await this.#storageAdapter.load(
|
|
51
|
+
`${documentId}.incremental.${index}`
|
|
52
|
+
))
|
|
53
|
+
) {
|
|
54
|
+
this.#changeCount[documentId] = index + 1
|
|
55
|
+
if (binary && binary.length > 0) result.push(binary)
|
|
56
|
+
index += 1
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return mergeArrays(result)
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
async load<T>(
|
|
63
|
+
documentId: DocumentId,
|
|
64
|
+
prevDoc: A.Doc<T> = A.init<T>()
|
|
65
|
+
): Promise<A.Doc<T>> {
|
|
66
|
+
const doc = A.loadIncremental(prevDoc, await this.loadBinary(documentId))
|
|
67
|
+
A.saveIncremental(doc)
|
|
68
|
+
return doc
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
save(documentId: DocumentId, doc: A.Doc<unknown>) {
|
|
72
|
+
if (this.#shouldCompact(documentId)) {
|
|
73
|
+
this.#saveTotal(documentId, doc)
|
|
74
|
+
} else {
|
|
75
|
+
this.#saveIncremental(documentId, doc)
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
remove(documentId: DocumentId) {
|
|
80
|
+
this.#storageAdapter.remove(`${documentId}.snapshot`)
|
|
81
|
+
|
|
82
|
+
for (let i = 0; i < this.#changeCount[documentId]; i++) {
|
|
83
|
+
this.#storageAdapter.remove(`${documentId}.incremental.${i}`)
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// TODO: make this, you know, good.
|
|
88
|
+
#shouldCompact(documentId: DocumentId) {
|
|
89
|
+
return this.#changeCount[documentId] >= 20
|
|
90
|
+
}
|
|
91
|
+
}
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import { DocCollection } from "../DocCollection.js"
|
|
2
|
+
import { DocHandle } from "../DocHandle.js"
|
|
3
|
+
import { ChannelId, DocumentId, PeerId } from "../types.js"
|
|
4
|
+
import { DocSynchronizer } from "./DocSynchronizer.js"
|
|
5
|
+
import { Synchronizer } from "./Synchronizer.js"
|
|
6
|
+
|
|
7
|
+
import debug from "debug"
|
|
8
|
+
const log = debug("automerge-repo:collectionsync")
|
|
9
|
+
|
|
10
|
+
/** A CollectionSynchronizer is responsible for synchronizing a DocCollection with peers. */
|
|
11
|
+
export class CollectionSynchronizer extends Synchronizer {
|
|
12
|
+
/** The set of peers we are connected with */
|
|
13
|
+
#peers: Set<PeerId> = new Set()
|
|
14
|
+
|
|
15
|
+
/** A map of documentIds to their synchronizers */
|
|
16
|
+
#docSynchronizers: Record<DocumentId, DocSynchronizer> = {}
|
|
17
|
+
|
|
18
|
+
constructor(private repo: DocCollection) {
|
|
19
|
+
super()
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/** Returns a synchronizer for the given document, creating one if it doesn't already exist. */
|
|
23
|
+
#fetchDocSynchronizer(documentId: DocumentId) {
|
|
24
|
+
if (!this.#docSynchronizers[documentId]) {
|
|
25
|
+
const handle = this.repo.find(documentId)
|
|
26
|
+
this.#docSynchronizers[documentId] = this.#initDocSynchronizer(handle)
|
|
27
|
+
}
|
|
28
|
+
return this.#docSynchronizers[documentId]
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/** Creates a new docSynchronizer and sets it up to propagate messages */
|
|
32
|
+
#initDocSynchronizer(handle: DocHandle<unknown>): DocSynchronizer {
|
|
33
|
+
const docSynchronizer = new DocSynchronizer(handle)
|
|
34
|
+
docSynchronizer.on("message", event => this.emit("message", event))
|
|
35
|
+
return docSynchronizer
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/** returns an array of peerIds that we share this document generously with */
|
|
39
|
+
async #documentGenerousPeers(documentId: DocumentId): Promise<PeerId[]> {
|
|
40
|
+
const peers = Array.from(this.#peers)
|
|
41
|
+
const generousPeers: PeerId[] = []
|
|
42
|
+
for (const peerId of peers) {
|
|
43
|
+
const okToShare = await this.repo.sharePolicy(peerId, documentId)
|
|
44
|
+
if (okToShare) generousPeers.push(peerId)
|
|
45
|
+
}
|
|
46
|
+
return generousPeers
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// PUBLIC
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* When we receive a sync message for a document we haven't got in memory, we
|
|
53
|
+
* register it with the repo and start synchronizing
|
|
54
|
+
*/
|
|
55
|
+
async receiveSyncMessage(
|
|
56
|
+
peerId: PeerId,
|
|
57
|
+
channelId: ChannelId,
|
|
58
|
+
message: Uint8Array
|
|
59
|
+
) {
|
|
60
|
+
log(`onSyncMessage: ${peerId}, ${channelId}, ${message.byteLength}bytes`)
|
|
61
|
+
|
|
62
|
+
const documentId = channelId as unknown as DocumentId
|
|
63
|
+
const docSynchronizer = await this.#fetchDocSynchronizer(documentId)
|
|
64
|
+
|
|
65
|
+
await docSynchronizer.receiveSyncMessage(peerId, channelId, message)
|
|
66
|
+
|
|
67
|
+
// Initiate sync with any new peers
|
|
68
|
+
const peers = await this.#documentGenerousPeers(documentId)
|
|
69
|
+
peers
|
|
70
|
+
.filter(peerId => !docSynchronizer.hasPeer(peerId))
|
|
71
|
+
.forEach(peerId => docSynchronizer.beginSync(peerId))
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Starts synchronizing the given document with all peers that we share it generously with.
|
|
76
|
+
*/
|
|
77
|
+
addDocument(documentId: DocumentId) {
|
|
78
|
+
const docSynchronizer = this.#fetchDocSynchronizer(documentId)
|
|
79
|
+
void this.#documentGenerousPeers(documentId).then(peers => {
|
|
80
|
+
peers.forEach(peerId => {
|
|
81
|
+
docSynchronizer.beginSync(peerId)
|
|
82
|
+
})
|
|
83
|
+
})
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// TODO: implement this
|
|
87
|
+
removeDocument(documentId: DocumentId) {
|
|
88
|
+
throw new Error("not implemented")
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/** Adds a peer and maybe starts synchronizing with them */
|
|
92
|
+
addPeer(peerId: PeerId) {
|
|
93
|
+
log(`adding ${peerId} & synchronizing with them`)
|
|
94
|
+
this.#peers.add(peerId)
|
|
95
|
+
for (const docSynchronizer of Object.values(this.#docSynchronizers)) {
|
|
96
|
+
const { documentId } = docSynchronizer
|
|
97
|
+
void this.repo.sharePolicy(peerId, documentId).then(okToShare => {
|
|
98
|
+
if (okToShare) docSynchronizer.beginSync(peerId)
|
|
99
|
+
})
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/** Removes a peer and stops synchronizing with them */
|
|
104
|
+
removePeer(peerId: PeerId) {
|
|
105
|
+
log(`removing peer ${peerId}`)
|
|
106
|
+
this.#peers.delete(peerId)
|
|
107
|
+
|
|
108
|
+
for (const docSynchronizer of Object.values(this.#docSynchronizers)) {
|
|
109
|
+
docSynchronizer.endSync(peerId)
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
}
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import * as A from "@automerge/automerge"
|
|
2
|
+
import { DocHandle } from "../DocHandle.js"
|
|
3
|
+
import { ChannelId, PeerId } from "../types.js"
|
|
4
|
+
import { Synchronizer } from "./Synchronizer.js"
|
|
5
|
+
|
|
6
|
+
import debug from "debug"
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* DocSynchronizer takes a handle to an Automerge document, and receives & dispatches sync messages
|
|
10
|
+
* to bring it inline with all other peers' versions.
|
|
11
|
+
*/
|
|
12
|
+
export class DocSynchronizer extends Synchronizer {
|
|
13
|
+
#log: debug.Debugger
|
|
14
|
+
#conciseLog: debug.Debugger
|
|
15
|
+
#opsLog: debug.Debugger
|
|
16
|
+
|
|
17
|
+
/** Active peers */
|
|
18
|
+
#peers: PeerId[] = []
|
|
19
|
+
|
|
20
|
+
/** Sync state for each peer we've communicated with (including inactive peers) */
|
|
21
|
+
#syncStates: Record<PeerId, A.SyncState> = {}
|
|
22
|
+
|
|
23
|
+
#pendingSyncMessages: Array<{ peerId: PeerId; message: Uint8Array }> = []
|
|
24
|
+
|
|
25
|
+
constructor(private handle: DocHandle<any>) {
|
|
26
|
+
super()
|
|
27
|
+
const docId = handle.documentId.slice(0, 5)
|
|
28
|
+
this.#conciseLog = debug(`automerge-repo:concise:docsync:${docId}`) // Only logs one line per receive/send
|
|
29
|
+
this.#log = debug(`automerge-repo:docsync:${docId}`)
|
|
30
|
+
this.#opsLog = debug(`automerge-repo:ops:docsync:${docId}`) // Log list of ops of each message
|
|
31
|
+
|
|
32
|
+
handle.on("change", () => this.#syncWithPeers())
|
|
33
|
+
|
|
34
|
+
// Process pending sync messages immediately after the handle becomes ready.
|
|
35
|
+
void (async () => {
|
|
36
|
+
await handle.loadAttemptedValue()
|
|
37
|
+
this.#processAllPendingSyncMessages()
|
|
38
|
+
})()
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
get documentId() {
|
|
42
|
+
return this.handle.documentId
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/// PRIVATE
|
|
46
|
+
|
|
47
|
+
async #syncWithPeers() {
|
|
48
|
+
this.#log(`syncWithPeers`)
|
|
49
|
+
const doc = await this.handle.value()
|
|
50
|
+
this.#peers.forEach(peerId => this.#sendSyncMessage(peerId, doc))
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
#getSyncState(peerId: PeerId) {
|
|
54
|
+
if (!this.#peers.includes(peerId)) {
|
|
55
|
+
this.#log("adding a new peer", peerId)
|
|
56
|
+
this.#peers.push(peerId)
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return this.#syncStates[peerId] ?? A.initSyncState()
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
#setSyncState(peerId: PeerId, syncState: A.SyncState) {
|
|
63
|
+
// TODO: we maybe should be persisting sync states. But we want to be careful about how often we
|
|
64
|
+
// do that, because it can generate a lot of disk activity.
|
|
65
|
+
|
|
66
|
+
// TODO: we only need to do this on reconnect
|
|
67
|
+
|
|
68
|
+
this.#syncStates[peerId] = syncState
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
#sendSyncMessage(peerId: PeerId, doc: A.Doc<unknown>) {
|
|
72
|
+
this.#log(`sendSyncMessage ->${peerId}`)
|
|
73
|
+
|
|
74
|
+
const syncState = this.#getSyncState(peerId)
|
|
75
|
+
const [newSyncState, message] = A.generateSyncMessage(doc, syncState)
|
|
76
|
+
this.#setSyncState(peerId, newSyncState)
|
|
77
|
+
if (message) {
|
|
78
|
+
this.#logMessage(`sendSyncMessage 🡒 ${peerId}`, message)
|
|
79
|
+
|
|
80
|
+
const channelId = this.handle.documentId as string as ChannelId
|
|
81
|
+
this.emit("message", {
|
|
82
|
+
targetId: peerId,
|
|
83
|
+
channelId,
|
|
84
|
+
message,
|
|
85
|
+
broadcast: false,
|
|
86
|
+
})
|
|
87
|
+
} else {
|
|
88
|
+
this.#log(`sendSyncMessage ->${peerId} [no message generated]`)
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
#logMessage = (label: string, message: Uint8Array) => {
|
|
93
|
+
// This is real expensive...
|
|
94
|
+
return
|
|
95
|
+
|
|
96
|
+
const size = message.byteLength
|
|
97
|
+
const logText = `${label} ${size}b`
|
|
98
|
+
const decoded = A.decodeSyncMessage(message)
|
|
99
|
+
|
|
100
|
+
this.#conciseLog(logText)
|
|
101
|
+
this.#log(logText, decoded)
|
|
102
|
+
|
|
103
|
+
// expanding is expensive, so only do it if we're logging at this level
|
|
104
|
+
const expanded = this.#opsLog.enabled
|
|
105
|
+
? decoded.changes.flatMap(change =>
|
|
106
|
+
A.decodeChange(change).ops.map(op => JSON.stringify(op))
|
|
107
|
+
)
|
|
108
|
+
: null
|
|
109
|
+
this.#opsLog(logText, expanded)
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/// PUBLIC
|
|
113
|
+
|
|
114
|
+
hasPeer(peerId: PeerId) {
|
|
115
|
+
return this.#peers.includes(peerId)
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
beginSync(peerId: PeerId) {
|
|
119
|
+
this.#log(`beginSync: ${peerId}`)
|
|
120
|
+
|
|
121
|
+
// At this point if we don't have anything in our storage, we need to use an empty doc to sync
|
|
122
|
+
// with; but we don't want to surface that state to the front end
|
|
123
|
+
void this.handle.loadAttemptedValue().then(doc => {
|
|
124
|
+
// HACK: if we have a sync state already, we round-trip it through the encoding system to make
|
|
125
|
+
// sure state is preserved. This prevents an infinite loop caused by failed attempts to send
|
|
126
|
+
// messages during disconnection.
|
|
127
|
+
// TODO: cover that case with a test and remove this hack
|
|
128
|
+
const syncStateRaw = this.#getSyncState(peerId)
|
|
129
|
+
const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
|
|
130
|
+
this.#setSyncState(peerId, syncState)
|
|
131
|
+
|
|
132
|
+
this.#sendSyncMessage(peerId, doc)
|
|
133
|
+
})
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
endSync(peerId: PeerId) {
|
|
137
|
+
this.#log(`removing peer ${peerId}`)
|
|
138
|
+
this.#peers = this.#peers.filter(p => p !== peerId)
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
receiveSyncMessage(
|
|
142
|
+
peerId: PeerId,
|
|
143
|
+
channelId: ChannelId,
|
|
144
|
+
message: Uint8Array
|
|
145
|
+
) {
|
|
146
|
+
if ((channelId as string) !== (this.documentId as string))
|
|
147
|
+
throw new Error(`channelId doesn't match documentId`)
|
|
148
|
+
|
|
149
|
+
// We need to block receiving the syncMessages until we've checked local storage
|
|
150
|
+
if (!this.handle.isReadyOrRequesting()) {
|
|
151
|
+
this.#pendingSyncMessages.push({ peerId, message })
|
|
152
|
+
return
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
this.#processAllPendingSyncMessages()
|
|
156
|
+
this.#processSyncMessage(peerId, message)
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
#processSyncMessage(peerId: PeerId, message: Uint8Array) {
|
|
160
|
+
this.handle.update(doc => {
|
|
161
|
+
const [newDoc, newSyncState] = A.receiveSyncMessage(
|
|
162
|
+
doc,
|
|
163
|
+
this.#getSyncState(peerId),
|
|
164
|
+
message
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
this.#setSyncState(peerId, newSyncState)
|
|
168
|
+
|
|
169
|
+
// respond to just this peer (as required)
|
|
170
|
+
this.#sendSyncMessage(peerId, doc)
|
|
171
|
+
return newDoc
|
|
172
|
+
})
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
#processAllPendingSyncMessages() {
|
|
176
|
+
for (const { peerId, message } of this.#pendingSyncMessages) {
|
|
177
|
+
this.#processSyncMessage(peerId, message)
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
this.#pendingSyncMessages = []
|
|
181
|
+
}
|
|
182
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import EventEmitter from "eventemitter3"
|
|
2
|
+
import { ChannelId, PeerId } from "../types.js"
|
|
3
|
+
import { MessagePayload } from "../network/NetworkAdapter.js"
|
|
4
|
+
|
|
5
|
+
export abstract class Synchronizer extends EventEmitter<SynchronizerEvents> {
|
|
6
|
+
abstract receiveSyncMessage(
|
|
7
|
+
peerId: PeerId,
|
|
8
|
+
channelId: ChannelId,
|
|
9
|
+
message: Uint8Array
|
|
10
|
+
): void
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface SynchronizerEvents {
|
|
14
|
+
message: (arg: MessagePayload) => void
|
|
15
|
+
}
|