@automerge/automerge-repo 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc +28 -0
- package/.mocharc.json +5 -0
- package/README.md +298 -0
- package/TODO.md +54 -0
- package/dist/DocCollection.d.ts +44 -0
- package/dist/DocCollection.d.ts.map +1 -0
- package/dist/DocCollection.js +85 -0
- package/dist/DocHandle.d.ts +78 -0
- package/dist/DocHandle.d.ts.map +1 -0
- package/dist/DocHandle.js +227 -0
- package/dist/EphemeralData.d.ts +27 -0
- package/dist/EphemeralData.d.ts.map +1 -0
- package/dist/EphemeralData.js +28 -0
- package/dist/Repo.d.ts +30 -0
- package/dist/Repo.d.ts.map +1 -0
- package/dist/Repo.js +97 -0
- package/dist/helpers/arraysAreEqual.d.ts +2 -0
- package/dist/helpers/arraysAreEqual.d.ts.map +1 -0
- package/dist/helpers/arraysAreEqual.js +1 -0
- package/dist/helpers/eventPromise.d.ts +5 -0
- package/dist/helpers/eventPromise.d.ts.map +1 -0
- package/dist/helpers/eventPromise.js +6 -0
- package/dist/helpers/headsAreSame.d.ts +3 -0
- package/dist/helpers/headsAreSame.d.ts.map +1 -0
- package/dist/helpers/headsAreSame.js +7 -0
- package/dist/helpers/mergeArrays.d.ts +2 -0
- package/dist/helpers/mergeArrays.d.ts.map +1 -0
- package/dist/helpers/mergeArrays.js +15 -0
- package/dist/helpers/pause.d.ts +3 -0
- package/dist/helpers/pause.d.ts.map +1 -0
- package/dist/helpers/pause.js +7 -0
- package/dist/helpers/withTimeout.d.ts +9 -0
- package/dist/helpers/withTimeout.d.ts.map +1 -0
- package/dist/helpers/withTimeout.js +22 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +10 -0
- package/dist/network/NetworkAdapter.d.ts +37 -0
- package/dist/network/NetworkAdapter.d.ts.map +1 -0
- package/dist/network/NetworkAdapter.js +4 -0
- package/dist/network/NetworkSubsystem.d.ts +23 -0
- package/dist/network/NetworkSubsystem.d.ts.map +1 -0
- package/dist/network/NetworkSubsystem.js +89 -0
- package/dist/storage/StorageAdapter.d.ts +6 -0
- package/dist/storage/StorageAdapter.d.ts.map +1 -0
- package/dist/storage/StorageAdapter.js +2 -0
- package/dist/storage/StorageSubsystem.d.ts +12 -0
- package/dist/storage/StorageSubsystem.d.ts.map +1 -0
- package/dist/storage/StorageSubsystem.js +65 -0
- package/dist/synchronizer/CollectionSynchronizer.d.ts +24 -0
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -0
- package/dist/synchronizer/CollectionSynchronizer.js +92 -0
- package/dist/synchronizer/DocSynchronizer.d.ts +18 -0
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -0
- package/dist/synchronizer/DocSynchronizer.js +136 -0
- package/dist/synchronizer/Synchronizer.d.ts +10 -0
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -0
- package/dist/synchronizer/Synchronizer.js +3 -0
- package/dist/test-utilities/adapter-tests.d.ts +21 -0
- package/dist/test-utilities/adapter-tests.d.ts.map +1 -0
- package/dist/test-utilities/adapter-tests.js +117 -0
- package/dist/types.d.ts +10 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +1 -0
- package/fuzz/fuzz.ts +129 -0
- package/package.json +65 -0
- package/src/DocCollection.ts +123 -0
- package/src/DocHandle.ts +386 -0
- package/src/EphemeralData.ts +46 -0
- package/src/Repo.ts +155 -0
- package/src/helpers/arraysAreEqual.ts +2 -0
- package/src/helpers/eventPromise.ts +10 -0
- package/src/helpers/headsAreSame.ts +8 -0
- package/src/helpers/mergeArrays.ts +17 -0
- package/src/helpers/pause.ts +9 -0
- package/src/helpers/withTimeout.ts +28 -0
- package/src/index.ts +22 -0
- package/src/network/NetworkAdapter.ts +54 -0
- package/src/network/NetworkSubsystem.ts +130 -0
- package/src/storage/StorageAdapter.ts +5 -0
- package/src/storage/StorageSubsystem.ts +91 -0
- package/src/synchronizer/CollectionSynchronizer.ts +112 -0
- package/src/synchronizer/DocSynchronizer.ts +182 -0
- package/src/synchronizer/Synchronizer.ts +15 -0
- package/src/test-utilities/adapter-tests.ts +163 -0
- package/src/types.ts +3 -0
- package/test/CollectionSynchronizer.test.ts +73 -0
- package/test/DocCollection.test.ts +19 -0
- package/test/DocHandle.test.ts +281 -0
- package/test/DocSynchronizer.test.ts +68 -0
- package/test/EphemeralData.test.ts +44 -0
- package/test/Network.test.ts +13 -0
- package/test/Repo.test.ts +367 -0
- package/test/StorageSubsystem.test.ts +78 -0
- package/test/helpers/DummyNetworkAdapter.ts +8 -0
- package/test/helpers/DummyStorageAdapter.ts +23 -0
- package/test/helpers/getRandomItem.ts +4 -0
- package/test/types.ts +3 -0
- package/tsconfig.json +16 -0
package/.eslintrc
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
{
|
|
2
|
+
"env": {
|
|
3
|
+
"browser": true,
|
|
4
|
+
"es2021": true
|
|
5
|
+
},
|
|
6
|
+
"extends": [
|
|
7
|
+
"eslint:recommended",
|
|
8
|
+
"plugin:@typescript-eslint/eslint-recommended",
|
|
9
|
+
"plugin:@typescript-eslint/recommended"
|
|
10
|
+
],
|
|
11
|
+
"ignorePatterns": ["dist/**"],
|
|
12
|
+
"parser": "@typescript-eslint/parser",
|
|
13
|
+
"plugins": ["@typescript-eslint", "mocha"],
|
|
14
|
+
"parserOptions": {
|
|
15
|
+
"project": "./tsconfig.json",
|
|
16
|
+
"ecmaVersion": "latest",
|
|
17
|
+
"sourceType": "module"
|
|
18
|
+
},
|
|
19
|
+
"rules": {
|
|
20
|
+
"semi": ["error", "never"],
|
|
21
|
+
"import/extensions": 0,
|
|
22
|
+
"lines-between-class-members": 0,
|
|
23
|
+
"@typescript-eslint/no-floating-promises": "error",
|
|
24
|
+
"@typescript-eslint/no-empty-function": ["warn", { "allow": ["methods"]}],
|
|
25
|
+
"no-param-reassign": 0,
|
|
26
|
+
"no-use-before-define": 0
|
|
27
|
+
}
|
|
28
|
+
}
|
package/.mocharc.json
ADDED
package/README.md
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
# Automerge Repo
|
|
2
|
+
|
|
3
|
+
This is a wrapper for the [Automerge](https://github.com/automerge/automerge) CRDT library which
|
|
4
|
+
provides facilities to support working with many documents at once, as well as pluggable networking
|
|
5
|
+
and storage.
|
|
6
|
+
|
|
7
|
+
This is the core library. It handles dispatch of events and provides shared functionality such as
|
|
8
|
+
deciding which peers to connect to or when to write data out to storage.
|
|
9
|
+
|
|
10
|
+
Other packages in this monorepo include:
|
|
11
|
+
|
|
12
|
+
- [@automerge/automerge-repo-demo-counter](/packages/@automerge/automerge-repo-demo-counter/): A React-based demonstration
|
|
13
|
+
application.
|
|
14
|
+
- [@automerge/automerge-repo-react-hooks](/packages/@automerge/automerge-repo-react-hooks/): Example hooks for use with
|
|
15
|
+
React.
|
|
16
|
+
- [@automerge/automerge-repo-sync-server](/packages/@automerge/automerge-repo-sync-server/): A small synchronization
|
|
17
|
+
server that facilitates asynchronous communication between peers
|
|
18
|
+
|
|
19
|
+
#### Storage adapters
|
|
20
|
+
|
|
21
|
+
- [automerge-repo-storage-localforage](/packages/automerge-repo-storage-localforage/): A storage
|
|
22
|
+
adapter to persist data in a browser
|
|
23
|
+
- [automerge-repo-storage-nodefs](/packages/automerge-repo-storage-nodefs/): A storage adapter to
|
|
24
|
+
write changes to the filesystem
|
|
25
|
+
|
|
26
|
+
#### Network adapters
|
|
27
|
+
|
|
28
|
+
- [automerge-repo-network-websocket](/packages/automerge-repo-network-websocket/): Network adapters
|
|
29
|
+
for both sides of a client/server configuration over websocket
|
|
30
|
+
- [automerge-repo-network-localfirstrelay](/packages/automerge-repo-network-localfirstrelay/): A
|
|
31
|
+
network client that uses [@localfirst/relay](https://github.com/local-first-web/relay) to relay
|
|
32
|
+
traffic between peers
|
|
33
|
+
- [automerge-repo-network-messagechannel](/packages/automerge-repo-network-messagechannel/): A
|
|
34
|
+
network adapter that uses the [MessageChannel
|
|
35
|
+
API](https://developer.mozilla.org/en-US/docs/Web/API/MessageChannel) to communicate between tabs
|
|
36
|
+
- [automerge-repo-network-broadcastchannel](/packages/automerge-repo-network-broadcastchannel/):
|
|
37
|
+
Likely only useful for experimentation, but allows simple (inefficient) tab-to-tab data
|
|
38
|
+
synchronization
|
|
39
|
+
|
|
40
|
+
## Usage
|
|
41
|
+
|
|
42
|
+
This library provides two main components: the `Repo` itself, and the `DocHandle`s it contains.
|
|
43
|
+
|
|
44
|
+
A `Repo` exposes these methods:
|
|
45
|
+
|
|
46
|
+
- `create<T>()`
|
|
47
|
+
Creates a new, empty `Automerge.Doc` and returns a `DocHandle` for it.
|
|
48
|
+
- `find<T>(docId: DocumentId)`
|
|
49
|
+
Looks up a given document either on the local machine or (if necessary) over any configured
|
|
50
|
+
networks.
|
|
51
|
+
- `delete(docId: DocumentId)`
|
|
52
|
+
Deletes the local copy of a document from the local cache and local storage. _This does not currently delete the document from any other peers_.
|
|
53
|
+
- `.on("document", ({handle: DocHandle}) => void)`
|
|
54
|
+
Registers a callback to be fired each time a new document is loaded or created.
|
|
55
|
+
- `.on("delete-document", ({handle: DocHandle}) => void)`
|
|
56
|
+
Registers a callback to be fired each time a new document is loaded or created.
|
|
57
|
+
|
|
58
|
+
A `DocHandle` is a wrapper around an `Automerge.Doc`. Its primary function is to dispatch changes to
|
|
59
|
+
the document.
|
|
60
|
+
|
|
61
|
+
- `handle.change((doc: T) => void)`
|
|
62
|
+
Calls the provided callback with an instrumented mutable object
|
|
63
|
+
representing the document. Any changes made to the document will be recorded and distributed to
|
|
64
|
+
other nodes.
|
|
65
|
+
- `handle.value()`
|
|
66
|
+
Returns a `Promise<Doc<T>>` that will contain the current value of the document.
|
|
67
|
+
it waits until the document has finished loading and/or synchronizing over the network before
|
|
68
|
+
returning a value.
|
|
69
|
+
|
|
70
|
+
When required, you can also access the underlying document directly, but only after the handle is ready:
|
|
71
|
+
|
|
72
|
+
```ts
|
|
73
|
+
if (handle.ready()) {
|
|
74
|
+
doc = handle.doc
|
|
75
|
+
} else {
|
|
76
|
+
handle.value().then(d => {
|
|
77
|
+
doc = d
|
|
78
|
+
})
|
|
79
|
+
}
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
A `DocHandle` also emits these events:
|
|
83
|
+
|
|
84
|
+
- `change({handle: DocHandle, doc: Doc<T>})`
|
|
85
|
+
Called any time changes are created or received on the document. Request the `value()` from the
|
|
86
|
+
handle.
|
|
87
|
+
- `patch({handle: DocHandle, before: Doc, after: Doc, patches: Patch[]})`
|
|
88
|
+
Useful for manual increment maintenance of a video, most notably for text editors.
|
|
89
|
+
- `delete`
|
|
90
|
+
Called when the document is deleted locally.
|
|
91
|
+
|
|
92
|
+
## Creating a repo
|
|
93
|
+
|
|
94
|
+
The repo needs to be configured with storage and network adapters. If you give it neither, it will
|
|
95
|
+
still work, but you won't be able to find any data and data created won't outlast the process.
|
|
96
|
+
|
|
97
|
+
Multiple network adapters (even of the same type) can be added to a repo, even after it is created.
|
|
98
|
+
|
|
99
|
+
A repo currently only supports a single storage adapter, and it must be provided at creation.
|
|
100
|
+
|
|
101
|
+
Here is an example of creating a repo with a localforage storage adapter and a broadcast channel
|
|
102
|
+
network adapter:
|
|
103
|
+
|
|
104
|
+
```ts
|
|
105
|
+
const repo = new Repo({
|
|
106
|
+
network: [new BroadcastChannelNetworkAdapter()],
|
|
107
|
+
storage: new LocalForageStorageAdapter(),
|
|
108
|
+
sharePolicy: async (peerId: PeerId, documentId: DocumentId) => true // this is the default
|
|
109
|
+
})
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### Share Policy
|
|
113
|
+
The share policy is used to determine which document in your repo should be _automatically_ shared with other peers. **The default setting is to share all documents with all peers.**
|
|
114
|
+
|
|
115
|
+
> **Warning**
|
|
116
|
+
> If your local repo has deleted a document, a connecting peer with the default share policy will still share that document with you.
|
|
117
|
+
|
|
118
|
+
You can override this by providing a custom share policy. The function should return a promise resolving to a boolean value indicating whether the document should be shared with the peer.
|
|
119
|
+
|
|
120
|
+
The share policy will not stop a document being _requested_ by another peer by its `DocumentId`.
|
|
121
|
+
|
|
122
|
+
```ts
|
|
123
|
+
## Starting the demo app
|
|
124
|
+
|
|
125
|
+
```bash
|
|
126
|
+
yarn
|
|
127
|
+
yarn dev
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## Quickstart
|
|
131
|
+
|
|
132
|
+
The following instructions will get you a working React app running in a browser.
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
yarn create vite
|
|
136
|
+
# Project name: hello-automerge-repo
|
|
137
|
+
# Select a framework: React
|
|
138
|
+
# Select a variant: TypeScript
|
|
139
|
+
|
|
140
|
+
cd hello-automerge-repo
|
|
141
|
+
yarn
|
|
142
|
+
yarn add @automerge/automerge automerge-repo automerge-repo-react-hooks automerge-repo-network-broadcastchannel automerge-repo-storage-localforage vite-plugin-wasm vite-plugin-top-level-await
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
Edit the `vite.config.ts`. (This is all need to work around packaging hiccups due to WASM. We look
|
|
146
|
+
forward to the day that we can delete this step entirely.)
|
|
147
|
+
|
|
148
|
+
```ts
|
|
149
|
+
// vite.config.ts
|
|
150
|
+
import { defineConfig } from "vite"
|
|
151
|
+
import react from "@vitejs/plugin-react"
|
|
152
|
+
import wasm from "vite-plugin-wasm"
|
|
153
|
+
import topLevelAwait from "vite-plugin-top-level-await"
|
|
154
|
+
|
|
155
|
+
export default defineConfig({
|
|
156
|
+
plugins: [wasm(), topLevelAwait(), react()],
|
|
157
|
+
|
|
158
|
+
worker: {
|
|
159
|
+
format: "es",
|
|
160
|
+
plugins: [wasm(), topLevelAwait()],
|
|
161
|
+
},
|
|
162
|
+
|
|
163
|
+
optimizeDeps: {
|
|
164
|
+
// This is necessary because otherwise `vite dev` includes two separate
|
|
165
|
+
// versions of the JS wrapper. This causes problems because the JS
|
|
166
|
+
// wrapper has a module level variable to track JS side heap
|
|
167
|
+
// allocations, and initializing this twice causes horrible breakage
|
|
168
|
+
exclude: [
|
|
169
|
+
"@automerge/automerge-wasm",
|
|
170
|
+
"@automerge/automerge-wasm/bundler/bindgen_bg.wasm",
|
|
171
|
+
"@syntect/wasm",
|
|
172
|
+
],
|
|
173
|
+
},
|
|
174
|
+
|
|
175
|
+
server: {
|
|
176
|
+
fs: {
|
|
177
|
+
strict: false,
|
|
178
|
+
},
|
|
179
|
+
},
|
|
180
|
+
})
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
Now set up the repo in `src/main.tsx` by importing the bits, creating the repo, and passing down a
|
|
184
|
+
RepoContext. We also create a document and store its `documentId` in localStorage.
|
|
185
|
+
|
|
186
|
+
```tsx
|
|
187
|
+
// src/main.tsx
|
|
188
|
+
import React from "react"
|
|
189
|
+
import ReactDOM from "react-dom/client"
|
|
190
|
+
import App from "./App.js"
|
|
191
|
+
import { Repo } from "@automerge/automerge-repo"
|
|
192
|
+
import { BroadcastChannelNetworkAdapter } from "@automerge/automerge-repo-network-broadcastchannel"
|
|
193
|
+
import { LocalForageStorageAdapter } from "@automerge/automerge-repo-storage-localforage"
|
|
194
|
+
import { RepoContext } from "@automerge/automerge-repo-react-hooks"
|
|
195
|
+
|
|
196
|
+
const repo = new Repo({
|
|
197
|
+
network: [new BroadcastChannelNetworkAdapter()],
|
|
198
|
+
storage: new LocalForageStorageAdapter(),
|
|
199
|
+
})
|
|
200
|
+
|
|
201
|
+
let rootDocId = localStorage.rootDocId
|
|
202
|
+
if (!rootDocId) {
|
|
203
|
+
const handle = repo.create()
|
|
204
|
+
localStorage.rootDocId = rootDocId = handle.documentId
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
ReactDOM.createRoot(document.getElementById("root") as HTMLElement).render(
|
|
208
|
+
<RepoContext.Provider value={repo}>
|
|
209
|
+
<React.StrictMode>
|
|
210
|
+
<App documentId={rootDocId} />
|
|
211
|
+
</React.StrictMode>
|
|
212
|
+
</RepoContext.Provider>
|
|
213
|
+
)
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
Now update `App.tsx` to load the document from the Repo based on the documentId passed in. Then, use
|
|
217
|
+
the document to render a button that increments the count.
|
|
218
|
+
|
|
219
|
+
```tsx
|
|
220
|
+
// App.tsx
|
|
221
|
+
import { useDocument } from "@automerge/automerge-repo-react-hooks"
|
|
222
|
+
import { DocumentId } from "@automerge/automerge-repo"
|
|
223
|
+
|
|
224
|
+
interface Doc {
|
|
225
|
+
count: number
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
export default function App(props: { documentId: DocumentId }) {
|
|
229
|
+
const [doc, changeDoc] = useDocument<Doc>(props.documentId)
|
|
230
|
+
|
|
231
|
+
return (
|
|
232
|
+
<button
|
|
233
|
+
onClick={() => {
|
|
234
|
+
changeDoc((d: any) => {
|
|
235
|
+
d.count = (d.count || 0) + 1
|
|
236
|
+
})
|
|
237
|
+
}}
|
|
238
|
+
>
|
|
239
|
+
count is: {doc?.count ?? 0}
|
|
240
|
+
</button>
|
|
241
|
+
)
|
|
242
|
+
}
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
You should now have a working React application using Automerge. Try running it with `yarn dev`, and
|
|
246
|
+
open it in two browser windows. You should see the count increment in both windows.
|
|
247
|
+
|
|
248
|
+

|
|
249
|
+
|
|
250
|
+
This application is also available as a package in this repo in
|
|
251
|
+
[automerge-repo-demo-counter](/packages/automerge-repo-demo-counter). You can run it with `yarn
|
|
252
|
+
dev:demo`.
|
|
253
|
+
|
|
254
|
+
### Adding a sync server
|
|
255
|
+
|
|
256
|
+
First, get a sync-server running locally, following the instructions for the
|
|
257
|
+
[automerge-repo-sync-server](/packages/automerge-repo-sync-server/) package.
|
|
258
|
+
|
|
259
|
+
Next, update your application to synchronize with it:
|
|
260
|
+
|
|
261
|
+
Install the websocket network adapter:
|
|
262
|
+
|
|
263
|
+
```bash
|
|
264
|
+
yarn add automerge-repo-network-websocket
|
|
265
|
+
```
|
|
266
|
+
|
|
267
|
+
Now import it and add it to your list of network adapters:
|
|
268
|
+
|
|
269
|
+
```ts
|
|
270
|
+
// main.tsx
|
|
271
|
+
import { BrowserWebSocketClientAdapter } from "@automerge/automerge-repo-network-websocket" // <-- add this line
|
|
272
|
+
|
|
273
|
+
// ...
|
|
274
|
+
|
|
275
|
+
const repo = new Repo({
|
|
276
|
+
network: [
|
|
277
|
+
new BroadcastChannelNetworkAdapter(),
|
|
278
|
+
new BrowserWebSocketClientAdapter("ws://localhost:3030"), // <-- add this line
|
|
279
|
+
],
|
|
280
|
+
storage: new LocalForageStorageAdapter(),
|
|
281
|
+
})
|
|
282
|
+
|
|
283
|
+
// ...
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
And you're finished! You can test that your sync server is opening the same document in two
|
|
287
|
+
different browsers (e.g. Chrome and Firefox). (Note that with our current trivial implementation
|
|
288
|
+
you'll need to manually copy the `rootDocId` value between the browsers.)
|
|
289
|
+
|
|
290
|
+
## Acknowledgements
|
|
291
|
+
|
|
292
|
+
Originally authored by Peter van Hardenberg.
|
|
293
|
+
|
|
294
|
+
With gratitude for contributions by:
|
|
295
|
+
- Herb Caudill
|
|
296
|
+
- Jeremy Rose
|
|
297
|
+
- Alex Currie-Clark
|
|
298
|
+
- Dylan Mackenzie
|
package/TODO.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
## TODO
|
|
2
|
+
|
|
3
|
+
cursor sharing (multi channel?)
|
|
4
|
+
repo should be a class
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
peer authentication
|
|
9
|
+
E2E encryption
|
|
10
|
+
write more tests
|
|
11
|
+
file-handle based storage
|
|
12
|
+
|
|
13
|
+
// TODO:
|
|
14
|
+
// efficient sharing of sets of documents
|
|
15
|
+
|
|
16
|
+
# Future Work and Known Issues
|
|
17
|
+
|
|
18
|
+
There are a number of problems with the current design which I will briefly enumerate here:
|
|
19
|
+
|
|
20
|
+
- NetworkSubsystem
|
|
21
|
+
- peer candidate selection -> do we trust this peer? (see Network.js peer-candidate)
|
|
22
|
+
- handle disconnections -> try another protocol
|
|
23
|
+
- syncstates aren't persisted... but neither are client-ids. should they be?
|
|
24
|
+
|
|
25
|
+
- StorageSubsystem
|
|
26
|
+
- customizable save intervals / manual-only saving
|
|
27
|
+
- separate backends for incremental vs. full document saves
|
|
28
|
+
- S3/redis store for a node storage peer
|
|
29
|
+
|
|
30
|
+
- Repo Design Problems
|
|
31
|
+
- sending cursors / ephemeral data
|
|
32
|
+
- we should decide what to sync with a peer based on the peer, not just the docId
|
|
33
|
+
- no way of discovering documents as a batch or requesting synchronization for multiple documents.
|
|
34
|
+
|
|
35
|
+
- SyncProtocol work
|
|
36
|
+
- multi-document syncprotocol
|
|
37
|
+
- non-peer-specific broadcast SyncMessages
|
|
38
|
+
- syncing large repos without having to do expensive loads into memory
|
|
39
|
+
- how to decide what documents to sync with a peer
|
|
40
|
+
- one-way sync support -> i want to receive but not send changes
|
|
41
|
+
- peer-oriented instead of document-oriented sync
|
|
42
|
+
- encrypt contents but not structure, allowing syncing with a semi-trusted peer instead of all the peers
|
|
43
|
+
- change.hash & change.deps but with a consistently salted hash?
|
|
44
|
+
- RLE encode block of changes
|
|
45
|
+
|
|
46
|
+
- Synchronizer & network needs improved handling of disconnection & reconnection of peers
|
|
47
|
+
- TODO: preserving syncState in localStorage would be a good optimization
|
|
48
|
+
StorageSubsystem:
|
|
49
|
+
// TODO: can we do incremental save that takes advantage of the last binary?
|
|
50
|
+
/\* TODO: we probably want to be able to distinguish between
|
|
51
|
+
- incremental & compacted writes due to cost & frequency -> give the option for two storage engines
|
|
52
|
+
- we probably also want to have compaction callbacks. count / timeout / manual calls...
|
|
53
|
+
\*/
|
|
54
|
+
- figure out a compaction callback system (and an option for explicit saves only)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import EventEmitter from "eventemitter3";
|
|
2
|
+
import { DocHandle } from "./DocHandle.js";
|
|
3
|
+
import { type DocumentId } from "./types.js";
|
|
4
|
+
import { type SharePolicy } from "./Repo.js";
|
|
5
|
+
/**
|
|
6
|
+
* A DocCollection is a collection of DocHandles. It supports creating new documents and finding
|
|
7
|
+
* documents by ID.
|
|
8
|
+
* */
|
|
9
|
+
export declare class DocCollection extends EventEmitter<DocCollectionEvents> {
|
|
10
|
+
#private;
|
|
11
|
+
/** By default, we share generously with all peers. */
|
|
12
|
+
sharePolicy: SharePolicy;
|
|
13
|
+
constructor();
|
|
14
|
+
/** Returns all the handles we have cached. */
|
|
15
|
+
get handles(): Record<DocumentId, DocHandle<any>>;
|
|
16
|
+
/**
|
|
17
|
+
* Creates a new document and returns a handle to it. The initial value of the document is
|
|
18
|
+
* an empty object `{}`. Its documentId is generated by the system. we emit a `document` event
|
|
19
|
+
* to advertise interest in the document.
|
|
20
|
+
*/
|
|
21
|
+
create<T>(): DocHandle<T>;
|
|
22
|
+
/**
|
|
23
|
+
* Retrieves a document by id. It gets data from the local system, but also emits a `document`
|
|
24
|
+
* event to advertise interest in the document.
|
|
25
|
+
*/
|
|
26
|
+
find<T>(
|
|
27
|
+
/** The documentId of the handle to retrieve */
|
|
28
|
+
documentId: DocumentId): DocHandle<T>;
|
|
29
|
+
delete(
|
|
30
|
+
/** The documentId of the handle to delete */
|
|
31
|
+
documentId: DocumentId): void;
|
|
32
|
+
}
|
|
33
|
+
interface DocCollectionEvents {
|
|
34
|
+
document: (arg: DocumentPayload) => void;
|
|
35
|
+
"delete-document": (arg: DeleteDocumentPayload) => void;
|
|
36
|
+
}
|
|
37
|
+
interface DocumentPayload {
|
|
38
|
+
handle: DocHandle<any>;
|
|
39
|
+
}
|
|
40
|
+
interface DeleteDocumentPayload {
|
|
41
|
+
documentId: DocumentId;
|
|
42
|
+
}
|
|
43
|
+
export {};
|
|
44
|
+
//# sourceMappingURL=DocCollection.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"DocCollection.d.ts","sourceRoot":"","sources":["../src/DocCollection.ts"],"names":[],"mappings":"AAAA,OAAO,YAAY,MAAM,eAAe,CAAA;AAExC,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAC1C,OAAO,EAAE,KAAK,UAAU,EAAE,MAAM,YAAY,CAAA;AAC5C,OAAO,EAAE,KAAK,WAAW,EAAE,MAAM,WAAW,CAAA;AAE5C;;;KAGK;AACL,qBAAa,aAAc,SAAQ,YAAY,CAAC,mBAAmB,CAAC;;IAGlE,sDAAsD;IACtD,WAAW,EAAE,WAAW,CAAmB;;IAuB3C,8CAA8C;IAC9C,IAAI,OAAO,uCAEV;IAED;;;;OAIG;IACH,MAAM,CAAC,CAAC,KAAK,SAAS,CAAC,CAAC,CAAC;IAyBzB;;;OAGG;IACH,IAAI,CAAC,CAAC;IACJ,+CAA+C;IAC/C,UAAU,EAAE,UAAU,GACrB,SAAS,CAAC,CAAC,CAAC;IAmBf,MAAM;IACJ,6CAA6C;IAC7C,UAAU,EAAE,UAAU;CAQzB;AAGD,UAAU,mBAAmB;IAC3B,QAAQ,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;IACxC,iBAAiB,EAAE,CAAC,GAAG,EAAE,qBAAqB,KAAK,IAAI,CAAA;CACxD;AAED,UAAU,eAAe;IACvB,MAAM,EAAE,SAAS,CAAC,GAAG,CAAC,CAAA;CACvB;AAED,UAAU,qBAAqB;IAC7B,UAAU,EAAE,UAAU,CAAA;CACvB"}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import EventEmitter from "eventemitter3";
|
|
2
|
+
import { v4 as uuid } from "uuid";
|
|
3
|
+
import { DocHandle } from "./DocHandle.js";
|
|
4
|
+
/**
|
|
5
|
+
* A DocCollection is a collection of DocHandles. It supports creating new documents and finding
|
|
6
|
+
* documents by ID.
|
|
7
|
+
* */
|
|
8
|
+
export class DocCollection extends EventEmitter {
|
|
9
|
+
#handleCache = {};
|
|
10
|
+
/** By default, we share generously with all peers. */
|
|
11
|
+
sharePolicy = async () => true;
|
|
12
|
+
constructor() {
|
|
13
|
+
super();
|
|
14
|
+
}
|
|
15
|
+
/** Returns an existing handle if we have it; creates one otherwise. */
|
|
16
|
+
#getHandle(
|
|
17
|
+
/** The documentId of the handle to look up or create */
|
|
18
|
+
documentId,
|
|
19
|
+
/** If we know we're creating a new document, specify this so we can have access to it immediately */
|
|
20
|
+
isNew) {
|
|
21
|
+
// If we have the handle cached, return it
|
|
22
|
+
if (this.#handleCache[documentId])
|
|
23
|
+
return this.#handleCache[documentId];
|
|
24
|
+
// If not, create a new handle, cache it, and return it
|
|
25
|
+
const handle = new DocHandle(documentId, { isNew });
|
|
26
|
+
this.#handleCache[documentId] = handle;
|
|
27
|
+
return handle;
|
|
28
|
+
}
|
|
29
|
+
/** Returns all the handles we have cached. */
|
|
30
|
+
get handles() {
|
|
31
|
+
return this.#handleCache;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Creates a new document and returns a handle to it. The initial value of the document is
|
|
35
|
+
* an empty object `{}`. Its documentId is generated by the system. we emit a `document` event
|
|
36
|
+
* to advertise interest in the document.
|
|
37
|
+
*/
|
|
38
|
+
create() {
|
|
39
|
+
// TODO:
|
|
40
|
+
// either
|
|
41
|
+
// - pass an initial value and do something like this to ensure that you get a valid initial value
|
|
42
|
+
// const myInitialValue = {
|
|
43
|
+
// tasks: [],
|
|
44
|
+
// filter: "all",
|
|
45
|
+
//
|
|
46
|
+
// const guaranteeInitialValue = (doc: any) => {
|
|
47
|
+
// if (!doc.tasks) doc.tasks = []
|
|
48
|
+
// if (!doc.filter) doc.filter = "all"
|
|
49
|
+
// return { ...myInitialValue, ...doc }
|
|
50
|
+
// }
|
|
51
|
+
// or
|
|
52
|
+
// - pass a "reify" function that takes a `<any>` and returns `<T>`
|
|
53
|
+
const documentId = uuid();
|
|
54
|
+
const handle = this.#getHandle(documentId, true);
|
|
55
|
+
this.emit("document", { handle });
|
|
56
|
+
return handle;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Retrieves a document by id. It gets data from the local system, but also emits a `document`
|
|
60
|
+
* event to advertise interest in the document.
|
|
61
|
+
*/
|
|
62
|
+
find(
|
|
63
|
+
/** The documentId of the handle to retrieve */
|
|
64
|
+
documentId) {
|
|
65
|
+
// TODO: we want a way to make sure we don't yield intermediate document states during initial synchronization
|
|
66
|
+
// If we already have a handle, return it
|
|
67
|
+
if (this.#handleCache[documentId])
|
|
68
|
+
return this.#handleCache[documentId];
|
|
69
|
+
// Otherwise, create a new handle
|
|
70
|
+
const handle = this.#getHandle(documentId, false);
|
|
71
|
+
// we don't directly initialize a value here because the StorageSubsystem and Synchronizers go
|
|
72
|
+
// and get the data asynchronously and block on read instead of on create
|
|
73
|
+
// emit a document event to advertise interest in this document
|
|
74
|
+
this.emit("document", { handle });
|
|
75
|
+
return handle;
|
|
76
|
+
}
|
|
77
|
+
delete(
|
|
78
|
+
/** The documentId of the handle to delete */
|
|
79
|
+
documentId) {
|
|
80
|
+
const handle = this.#getHandle(documentId, false);
|
|
81
|
+
handle.delete();
|
|
82
|
+
delete this.#handleCache[documentId];
|
|
83
|
+
this.emit("delete-document", { documentId });
|
|
84
|
+
}
|
|
85
|
+
}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import * as A from "@automerge/automerge";
|
|
2
|
+
import EventEmitter from "eventemitter3";
|
|
3
|
+
import type { ChannelId, DocumentId, PeerId } from "./types.js";
|
|
4
|
+
/** DocHandle is a wrapper around a single Automerge document that lets us listen for changes. */
|
|
5
|
+
export declare class DocHandle<T>//
|
|
6
|
+
extends EventEmitter<DocHandleEvents<T>> {
|
|
7
|
+
#private;
|
|
8
|
+
documentId: DocumentId;
|
|
9
|
+
constructor(documentId: DocumentId, { isNew, timeoutDelay }?: DocHandleOptions);
|
|
10
|
+
get doc(): A.unstable.Doc<T>;
|
|
11
|
+
isReady: () => boolean;
|
|
12
|
+
isReadyOrRequesting: () => boolean;
|
|
13
|
+
isDeleted: () => boolean;
|
|
14
|
+
/**
|
|
15
|
+
* Returns the current document, waiting for the handle to be ready if necessary.
|
|
16
|
+
*/
|
|
17
|
+
value(awaitStates?: HandleState[]): Promise<A.unstable.Doc<T>>;
|
|
18
|
+
loadAttemptedValue(): Promise<A.unstable.Doc<T>>;
|
|
19
|
+
/** `load` is called by the repo when the document is found in storage */
|
|
20
|
+
load(binary: Uint8Array): void;
|
|
21
|
+
/** `update` is called by the repo when we receive changes from the network */
|
|
22
|
+
update(callback: (doc: A.Doc<T>) => A.Doc<T>): void;
|
|
23
|
+
/** `change` is called by the repo when the document is changed locally */
|
|
24
|
+
change(callback: A.ChangeFn<T>, options?: A.ChangeOptions<T>): void;
|
|
25
|
+
changeAt(heads: A.Heads, callback: A.ChangeFn<T>, options?: A.ChangeOptions<T>): void;
|
|
26
|
+
/** `request` is called by the repo when the document is not found in storage */
|
|
27
|
+
request(): void;
|
|
28
|
+
/** `delete` is called by the repo when the document is deleted */
|
|
29
|
+
delete(): void;
|
|
30
|
+
}
|
|
31
|
+
interface DocHandleOptions {
|
|
32
|
+
isNew?: boolean;
|
|
33
|
+
timeoutDelay?: number;
|
|
34
|
+
}
|
|
35
|
+
export interface DocHandleMessagePayload {
|
|
36
|
+
destinationId: PeerId;
|
|
37
|
+
channelId: ChannelId;
|
|
38
|
+
data: Uint8Array;
|
|
39
|
+
}
|
|
40
|
+
export interface DocHandleChangePayload<T> {
|
|
41
|
+
handle: DocHandle<T>;
|
|
42
|
+
doc: A.Doc<T>;
|
|
43
|
+
}
|
|
44
|
+
export interface DocHandleDeletePayload<T> {
|
|
45
|
+
handle: DocHandle<T>;
|
|
46
|
+
}
|
|
47
|
+
export interface DocHandlePatchPayload<T> {
|
|
48
|
+
handle: DocHandle<T>;
|
|
49
|
+
patches: A.Patch[];
|
|
50
|
+
before: A.Doc<T>;
|
|
51
|
+
after: A.Doc<T>;
|
|
52
|
+
}
|
|
53
|
+
export interface DocHandleEvents<T> {
|
|
54
|
+
change: (payload: DocHandleChangePayload<T>) => void;
|
|
55
|
+
patch: (payload: DocHandlePatchPayload<T>) => void;
|
|
56
|
+
delete: (payload: DocHandleDeletePayload<T>) => void;
|
|
57
|
+
}
|
|
58
|
+
export declare const HandleState: {
|
|
59
|
+
readonly IDLE: "idle";
|
|
60
|
+
readonly LOADING: "loading";
|
|
61
|
+
readonly REQUESTING: "requesting";
|
|
62
|
+
readonly READY: "ready";
|
|
63
|
+
readonly ERROR: "error";
|
|
64
|
+
readonly DELETED: "deleted";
|
|
65
|
+
};
|
|
66
|
+
export type HandleState = (typeof HandleState)[keyof typeof HandleState];
|
|
67
|
+
export declare const Event: {
|
|
68
|
+
readonly CREATE: "CREATE";
|
|
69
|
+
readonly LOAD: "LOAD";
|
|
70
|
+
readonly FIND: "FIND";
|
|
71
|
+
readonly REQUEST: "REQUEST";
|
|
72
|
+
readonly REQUEST_COMPLETE: "REQUEST_COMPLETE";
|
|
73
|
+
readonly UPDATE: "UPDATE";
|
|
74
|
+
readonly TIMEOUT: "TIMEOUT";
|
|
75
|
+
readonly DELETE: "DELETE";
|
|
76
|
+
};
|
|
77
|
+
export {};
|
|
78
|
+
//# sourceMappingURL=DocHandle.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"DocHandle.d.ts","sourceRoot":"","sources":["../src/DocHandle.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,sBAAsB,CAAA;AAEzC,OAAO,YAAY,MAAM,eAAe,CAAA;AAgBxC,OAAO,KAAK,EAAE,SAAS,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,YAAY,CAAA;AAG/D,iGAAiG;AACjG,qBAAa,SAAS,CAAC,CAAC,CAAE,EAAE;AAC1B,SAAQ,YAAY,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC;;IAQ/B,UAAU,EAAE,UAAU;gBAAtB,UAAU,EAAE,UAAU,EAC7B,EAAE,KAAa,EAAE,YAAqB,EAAE,GAAE,gBAAqB;IAkHjE,IAAI,GAAG,sBAQN;IAwBD,OAAO,gBAA8B;IACrC,mBAAmB,gBACkC;IACrD,SAAS,gBAAgC;IAEzC;;OAEG;IACG,KAAK,CAAC,WAAW,GAAE,WAAW,EAAY;IAc1C,kBAAkB;IAIxB,yEAAyE;IACzE,IAAI,CAAC,MAAM,EAAE,UAAU;IAMvB,8EAA8E;IAC9E,MAAM,CAAC,QAAQ,EAAE,CAAC,GAAG,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;IAI5C,2EAA2E;IAC3E,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,OAAO,GAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAM;IAehE,QAAQ,CAAC,KAAK,EAAE,CAAC,CAAC,KAAK,EAAE,QAAQ,EAAE,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,OAAO,GAAE,CAAC,CAAC,aAAa,CAAC,CAAC,CAAM;IAelF,gFAAgF;IAChF,OAAO;IAIP,kEAAkE;IAClE,MAAM;CAGP;AAID,UAAU,gBAAgB;IACxB,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,YAAY,CAAC,EAAE,MAAM,CAAA;CACtB;AAED,MAAM,WAAW,uBAAuB;IACtC,aAAa,EAAE,MAAM,CAAA;IACrB,SAAS,EAAE,SAAS,CAAA;IACpB,IAAI,EAAE,UAAU,CAAA;CACjB;AAED,MAAM,WAAW,sBAAsB,CAAC,CAAC;IACvC,MAAM,EAAE,SAAS,CAAC,CAAC,CAAC,CAAA;IACpB,GAAG,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;CACd;AAED,MAAM,WAAW,sBAAsB,CAAC,CAAC;IACvC,MAAM,EAAE,SAAS,CAAC,CAAC,CAAC,CAAA;CACrB;AAED,MAAM,WAAW,qBAAqB,CAAC,CAAC;IACtC,MAAM,EAAE,SAAS,CAAC,CAAC,CAAC,CAAA;IACpB,OAAO,EAAE,CAAC,CAAC,KAAK,EAAE,CAAA;IAClB,MAAM,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;IAChB,KAAK,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;CAChB;AAED,MAAM,WAAW,eAAe,CAAC,CAAC;IAChC,MAAM,EAAE,CAAC,OAAO,EAAE,sBAAsB,CAAC,CAAC,CAAC,KAAK,IAAI,CAAA;IACpD,KAAK,EAAE,CAAC,OAAO,EAAE,qBAAqB,CAAC,CAAC,CAAC,KAAK,IAAI,CAAA;IAClD,MAAM,EAAE,CAAC,OAAO,EAAE,sBAAsB,CAAC,CAAC,CAAC,KAAK,IAAI,CAAA;CACrD;AAMD,eAAO,MAAM,WAAW;;;;;;;CAOd,CAAA;AACV,MAAM,MAAM,WAAW,GAAG,CAAC,OAAO,WAAW,CAAC,CAAC,MAAM,OAAO,WAAW,CAAC,CAAA;AAkBxE,eAAO,MAAM,KAAK;;;;;;;;;CASR,CAAA"}
|