@automerge/automerge-repo 0.2.1 → 1.0.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +7 -24
  2. package/dist/DocCollection.d.ts +4 -4
  3. package/dist/DocCollection.d.ts.map +1 -1
  4. package/dist/DocCollection.js +25 -17
  5. package/dist/DocHandle.d.ts +46 -13
  6. package/dist/DocHandle.d.ts.map +1 -1
  7. package/dist/DocHandle.js +104 -53
  8. package/dist/DocUrl.d.ts +38 -18
  9. package/dist/DocUrl.d.ts.map +1 -1
  10. package/dist/DocUrl.js +63 -24
  11. package/dist/Repo.d.ts.map +1 -1
  12. package/dist/Repo.js +9 -9
  13. package/dist/helpers/headsAreSame.d.ts +2 -2
  14. package/dist/helpers/headsAreSame.d.ts.map +1 -1
  15. package/dist/helpers/headsAreSame.js +1 -4
  16. package/dist/helpers/tests/network-adapter-tests.js +10 -10
  17. package/dist/index.d.ts +3 -2
  18. package/dist/index.d.ts.map +1 -1
  19. package/dist/index.js +1 -0
  20. package/dist/network/NetworkAdapter.d.ts +2 -3
  21. package/dist/network/NetworkAdapter.d.ts.map +1 -1
  22. package/dist/network/NetworkSubsystem.d.ts +2 -3
  23. package/dist/network/NetworkSubsystem.d.ts.map +1 -1
  24. package/dist/network/NetworkSubsystem.js +9 -13
  25. package/dist/storage/StorageAdapter.d.ts +9 -5
  26. package/dist/storage/StorageAdapter.d.ts.map +1 -1
  27. package/dist/storage/StorageSubsystem.d.ts +4 -4
  28. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  29. package/dist/storage/StorageSubsystem.js +109 -31
  30. package/dist/synchronizer/CollectionSynchronizer.d.ts +1 -1
  31. package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
  32. package/dist/synchronizer/CollectionSynchronizer.js +5 -1
  33. package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
  34. package/dist/synchronizer/DocSynchronizer.js +6 -5
  35. package/dist/types.d.ts +6 -0
  36. package/dist/types.d.ts.map +1 -1
  37. package/package.json +8 -5
  38. package/src/DocCollection.ts +32 -22
  39. package/src/DocHandle.ts +119 -77
  40. package/src/DocUrl.ts +90 -0
  41. package/src/Repo.ts +10 -11
  42. package/src/helpers/headsAreSame.ts +3 -5
  43. package/src/helpers/tests/network-adapter-tests.ts +10 -10
  44. package/src/index.ts +7 -5
  45. package/src/network/NetworkAdapter.ts +2 -3
  46. package/src/network/NetworkSubsystem.ts +9 -14
  47. package/src/storage/StorageAdapter.ts +7 -5
  48. package/src/storage/StorageSubsystem.ts +133 -36
  49. package/src/synchronizer/CollectionSynchronizer.ts +10 -2
  50. package/src/synchronizer/DocSynchronizer.ts +7 -6
  51. package/src/types.ts +4 -1
  52. package/test/CollectionSynchronizer.test.ts +1 -1
  53. package/test/DocCollection.test.ts +3 -2
  54. package/test/DocHandle.test.ts +40 -35
  55. package/test/DocSynchronizer.test.ts +3 -2
  56. package/test/Repo.test.ts +134 -27
  57. package/test/StorageSubsystem.test.ts +13 -10
  58. package/test/helpers/DummyNetworkAdapter.ts +2 -2
  59. package/test/helpers/DummyStorageAdapter.ts +8 -4
package/src/DocUrl.ts ADDED
@@ -0,0 +1,90 @@
1
+ import {
2
+ type AutomergeUrl,
3
+ type BinaryDocumentId,
4
+ type DocumentId,
5
+ } from "./types"
6
+ import { v4 as uuid } from "uuid"
7
+ import bs58check from "bs58check"
8
+
9
+ export const urlPrefix = "automerge:"
10
+
11
+ /**
12
+ * given an Automerge URL, return a decoded DocumentId (and the encoded DocumentId)
13
+ *
14
+ * @param url
15
+ * @returns { documentId: Uint8Array(16), encodedDocumentId: bs58check.encode(documentId) }
16
+ */
17
+ export const parseAutomergeUrl = (url: AutomergeUrl) => {
18
+ const { binaryDocumentId: binaryDocumentId, encodedDocumentId } = parts(url)
19
+ if (!binaryDocumentId) throw new Error("Invalid document URL: " + url)
20
+ return { binaryDocumentId, encodedDocumentId }
21
+ }
22
+
23
+ interface StringifyAutomergeUrlOptions {
24
+ documentId: DocumentId | BinaryDocumentId
25
+ }
26
+
27
+ /**
28
+ * Given a documentId in either canonical form, return an Automerge URL
29
+ * Throws on invalid input.
30
+ * Note: this is an object because we anticipate adding fields in the future.
31
+ * @param { documentId: EncodedDocumentId | DocumentId }
32
+ * @returns AutomergeUrl
33
+ */
34
+ export const stringifyAutomergeUrl = ({
35
+ documentId,
36
+ }: StringifyAutomergeUrlOptions): AutomergeUrl => {
37
+ if (documentId instanceof Uint8Array)
38
+ return (urlPrefix +
39
+ binaryToDocumentId(documentId as BinaryDocumentId)) as AutomergeUrl
40
+ else if (typeof documentId === "string") {
41
+ return (urlPrefix + documentId) as AutomergeUrl
42
+ }
43
+ throw new Error("Invalid documentId: " + documentId)
44
+ }
45
+
46
+ /**
47
+ * Given a string, return true if it is a valid Automerge URL
48
+ * also acts as a type discriminator in Typescript.
49
+ * @param str: URL candidate
50
+ * @returns boolean
51
+ */
52
+ export const isValidAutomergeUrl = (str: string): str is AutomergeUrl => {
53
+ if (!str.startsWith(urlPrefix)) return false
54
+
55
+ const { binaryDocumentId: documentId } = parts(str)
56
+ return documentId ? true : false
57
+ }
58
+
59
+ /**
60
+ * generateAutomergeUrl produces a new AutomergeUrl.
61
+ * generally only called by create(), but used in tests as well.
62
+ * @returns a new Automerge URL with a random UUID documentId
63
+ */
64
+ export const generateAutomergeUrl = (): AutomergeUrl =>
65
+ stringifyAutomergeUrl({
66
+ documentId: uuid(null, new Uint8Array(16)) as BinaryDocumentId,
67
+ })
68
+
69
+ export const documentIdToBinary = (
70
+ docId: DocumentId
71
+ ): BinaryDocumentId | undefined =>
72
+ bs58check.decodeUnsafe(docId) as BinaryDocumentId | undefined
73
+
74
+ export const binaryToDocumentId = (docId: BinaryDocumentId): DocumentId =>
75
+ bs58check.encode(docId) as DocumentId
76
+
77
+ /**
78
+ * parts breaks up the URL into constituent pieces,
79
+ * eventually this could include things like heads, so we use this structure
80
+ * we return both a binary & string-encoded version of the document ID
81
+ * @param str
82
+ * @returns { binaryDocumentId, encodedDocumentId }
83
+ */
84
+ const parts = (str: string) => {
85
+ const regex = new RegExp(`^${urlPrefix}(\\w+)$`)
86
+ const [m, docMatch] = str.match(regex) || []
87
+ const encodedDocumentId = docMatch as DocumentId
88
+ const binaryDocumentId = documentIdToBinary(encodedDocumentId)
89
+ return { binaryDocumentId, encodedDocumentId }
90
+ }
package/src/Repo.ts CHANGED
@@ -5,12 +5,10 @@ import { NetworkSubsystem } from "./network/NetworkSubsystem.js"
5
5
  import { StorageAdapter } from "./storage/StorageAdapter.js"
6
6
  import { StorageSubsystem } from "./storage/StorageSubsystem.js"
7
7
  import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
8
- import { ChannelId, DocumentId, PeerId } from "./types.js"
8
+ import { DocumentId, PeerId } from "./types.js"
9
9
 
10
10
  import debug from "debug"
11
11
 
12
- const SYNC_CHANNEL = "sync_channel" as ChannelId
13
-
14
12
  /** A Repo is a DocCollection with networking, syncing, and storage capabilities. */
15
13
  export class Repo extends DocCollection {
16
14
  #log: debug.Debugger
@@ -31,14 +29,15 @@ export class Repo extends DocCollection {
31
29
  this.on("document", async ({ handle }) => {
32
30
  if (storageSubsystem) {
33
31
  // Save when the document changes
34
- handle.on("change", async ({ handle }) => {
35
- const doc = await handle.value()
36
- await storageSubsystem.save(handle.documentId, doc)
32
+ handle.on("heads-changed", async ({ handle, doc }) => {
33
+ await storageSubsystem.saveDoc(handle.documentId, doc)
37
34
  })
38
35
 
39
36
  // Try to load from disk
40
- const binary = await storageSubsystem.loadBinary(handle.documentId)
41
- handle.load(binary)
37
+ const loadedDoc = await storageSubsystem.loadDoc(handle.documentId)
38
+ if (loadedDoc) {
39
+ handle.update(() => loadedDoc)
40
+ }
42
41
  }
43
42
 
44
43
  handle.request()
@@ -47,12 +46,12 @@ export class Repo extends DocCollection {
47
46
  synchronizer.addDocument(handle.documentId)
48
47
  })
49
48
 
50
- this.on("delete-document", ({ documentId }) => {
49
+ this.on("delete-document", ({ encodedDocumentId }) => {
51
50
  // TODO Pass the delete on to the network
52
51
  // synchronizer.removeDocument(documentId)
53
52
 
54
53
  if (storageSubsystem) {
55
- storageSubsystem.remove(documentId)
54
+ storageSubsystem.remove(encodedDocumentId)
56
55
  }
57
56
  })
58
57
 
@@ -112,7 +111,7 @@ export class Repo extends DocCollection {
112
111
  })
113
112
 
114
113
  // We establish a special channel for sync messages
115
- networkSubsystem.join(SYNC_CHANNEL)
114
+ networkSubsystem.join()
116
115
 
117
116
  // EPHEMERAL DATA
118
117
  // The ephemeral data subsystem uses the network to send and receive messages that are not
@@ -1,8 +1,6 @@
1
- import * as A from "@automerge/automerge"
1
+ import {Heads} from "@automerge/automerge"
2
2
  import { arraysAreEqual } from "./arraysAreEqual.js"
3
3
 
4
- export const headsAreSame = <T>(a: A.Doc<T>, b: A.Doc<T>) => {
5
- const aHeads = A.getHeads(a)
6
- const bHeads = A.getHeads(b)
7
- return arraysAreEqual(aHeads, bHeads)
4
+ export const headsAreSame = (a: Heads, b: Heads) => {
5
+ return arraysAreEqual(a, b)
8
6
  }
@@ -37,7 +37,7 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
37
37
 
38
38
  // Bob receives the document
39
39
  await eventPromise(bobRepo, "document")
40
- const bobHandle = bobRepo.find<TestDoc>(aliceHandle.documentId)
40
+ const bobHandle = bobRepo.find<TestDoc>(aliceHandle.url)
41
41
 
42
42
  // Alice changes the document
43
43
  aliceHandle.change(d => {
@@ -46,7 +46,7 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
46
46
 
47
47
  // Bob receives the change
48
48
  await eventPromise(bobHandle, "change")
49
- assert.equal((await bobHandle.value()).foo, "bar")
49
+ assert.equal((await bobHandle.doc()).foo, "bar")
50
50
 
51
51
  // Bob changes the document
52
52
  bobHandle.change(d => {
@@ -55,7 +55,7 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
55
55
 
56
56
  // Alice receives the change
57
57
  await eventPromise(aliceHandle, "change")
58
- assert.equal((await aliceHandle.value()).foo, "baz")
58
+ assert.equal((await aliceHandle.doc()).foo, "baz")
59
59
  }
60
60
 
61
61
  // Run the test in both directions, in case they're different types of adapters
@@ -83,12 +83,12 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
83
83
 
84
84
  // Alice creates a document
85
85
  const aliceHandle = aliceRepo.create<TestDoc>()
86
- const documentId = aliceHandle.documentId
86
+ const docUrl = aliceHandle.url
87
87
 
88
88
  // Bob and Charlie receive the document
89
89
  await eventPromises([bobRepo, charlieRepo], "document")
90
- const bobHandle = bobRepo.find<TestDoc>(documentId)
91
- const charlieHandle = charlieRepo.find<TestDoc>(documentId)
90
+ const bobHandle = bobRepo.find<TestDoc>(docUrl)
91
+ const charlieHandle = charlieRepo.find<TestDoc>(docUrl)
92
92
 
93
93
  // Alice changes the document
94
94
  aliceHandle.change(d => {
@@ -97,8 +97,8 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
97
97
 
98
98
  // Bob and Charlie receive the change
99
99
  await eventPromises([bobHandle, charlieHandle], "change")
100
- assert.equal((await bobHandle.value()).foo, "bar")
101
- assert.equal((await charlieHandle.value()).foo, "bar")
100
+ assert.equal((await bobHandle.doc()).foo, "bar")
101
+ assert.equal((await charlieHandle.doc()).foo, "bar")
102
102
 
103
103
  // Charlie changes the document
104
104
  charlieHandle.change(d => {
@@ -107,8 +107,8 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
107
107
 
108
108
  // Alice and Bob receive the change
109
109
  await eventPromises([aliceHandle, bobHandle], "change")
110
- assert.equal((await bobHandle.value()).foo, "baz")
111
- assert.equal((await charlieHandle.value()).foo, "baz")
110
+ assert.equal((await bobHandle.doc()).foo, "baz")
111
+ assert.equal((await charlieHandle.doc()).foo, "baz")
112
112
 
113
113
  teardown()
114
114
  })
package/src/index.ts CHANGED
@@ -1,9 +1,6 @@
1
1
  export { DocCollection } from "./DocCollection.js"
2
2
  export { DocHandle, HandleState } from "./DocHandle.js"
3
- export type {
4
- DocHandleChangePayload,
5
- DocHandlePatchPayload,
6
- } from "./DocHandle.js"
3
+ export type { DocHandleChangePayload } from "./DocHandle.js"
7
4
  export { NetworkAdapter } from "./network/NetworkAdapter.js"
8
5
  export type {
9
6
  InboundMessagePayload,
@@ -14,7 +11,12 @@ export type {
14
11
  } from "./network/NetworkAdapter.js"
15
12
  export { NetworkSubsystem } from "./network/NetworkSubsystem.js"
16
13
  export { Repo, type SharePolicy } from "./Repo.js"
17
- export { StorageAdapter } from "./storage/StorageAdapter.js"
14
+ export { StorageAdapter, type StorageKey } from "./storage/StorageAdapter.js"
18
15
  export { StorageSubsystem } from "./storage/StorageSubsystem.js"
19
16
  export { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
17
+ export {
18
+ parseAutomergeUrl,
19
+ isValidAutomergeUrl,
20
+ stringifyAutomergeUrl as generateAutomergeUrl,
21
+ } from "./DocUrl.js"
20
22
  export * from "./types.js"
@@ -13,9 +13,9 @@ export abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents>
13
13
  broadcast: boolean
14
14
  ): void
15
15
 
16
- abstract join(channelId: ChannelId): void
16
+ abstract join(): void
17
17
 
18
- abstract leave(channelId: ChannelId): void
18
+ abstract leave(): void
19
19
  }
20
20
 
21
21
  // events & payloads
@@ -34,7 +34,6 @@ export interface OpenPayload {
34
34
 
35
35
  export interface PeerCandidatePayload {
36
36
  peerId: PeerId
37
- channelId: ChannelId
38
37
  }
39
38
 
40
39
  export interface MessagePayload {
@@ -11,7 +11,6 @@ import debug from "debug"
11
11
  export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
12
12
  #log: debug.Debugger
13
13
  #adaptersByPeer: Record<PeerId, NetworkAdapter> = {}
14
- #channels: ChannelId[]
15
14
 
16
15
  constructor(
17
16
  private adapters: NetworkAdapter[],
@@ -19,14 +18,13 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
19
18
  ) {
20
19
  super()
21
20
  this.#log = debug(`automerge-repo:network:${this.peerId}`)
22
- this.#channels = []
23
21
  this.adapters.forEach(a => this.addNetworkAdapter(a))
24
22
  }
25
23
 
26
24
  addNetworkAdapter(networkAdapter: NetworkAdapter) {
27
25
  networkAdapter.connect(this.peerId)
28
26
 
29
- networkAdapter.on("peer-candidate", ({ peerId, channelId }) => {
27
+ networkAdapter.on("peer-candidate", ({ peerId }) => {
30
28
  this.#log(`peer candidate: ${peerId} `)
31
29
 
32
30
  // TODO: This is where authentication would happen
@@ -36,7 +34,7 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
36
34
  this.#adaptersByPeer[peerId] = networkAdapter
37
35
  }
38
36
 
39
- this.emit("peer", { peerId, channelId })
37
+ this.emit("peer", { peerId })
40
38
  })
41
39
 
42
40
  networkAdapter.on("peer-disconnected", ({ peerId }) => {
@@ -74,7 +72,7 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
74
72
  })
75
73
  })
76
74
 
77
- this.#channels.forEach(c => networkAdapter.join(c))
75
+ networkAdapter.join()
78
76
  }
79
77
 
80
78
  sendMessage(
@@ -99,16 +97,14 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
99
97
  }
100
98
  }
101
99
 
102
- join(channelId: ChannelId) {
103
- this.#log(`Joining channel ${channelId}`)
104
- this.#channels.push(channelId)
105
- this.adapters.forEach(a => a.join(channelId))
100
+ join() {
101
+ this.#log(`Joining network`)
102
+ this.adapters.forEach(a => a.join())
106
103
  }
107
104
 
108
- leave(channelId: ChannelId) {
109
- this.#log(`Leaving channel ${channelId}`)
110
- this.#channels = this.#channels.filter(c => c !== channelId)
111
- this.adapters.forEach(a => a.leave(channelId))
105
+ leave() {
106
+ this.#log(`Leaving network`)
107
+ this.adapters.forEach(a => a.leave())
112
108
  }
113
109
  }
114
110
 
@@ -126,5 +122,4 @@ export interface NetworkSubsystemEvents {
126
122
 
127
123
  export interface PeerPayload {
128
124
  peerId: PeerId
129
- channelId: ChannelId
130
125
  }
@@ -4,15 +4,17 @@ export abstract class StorageAdapter {
4
4
  // [documentId, "snapshot"] or [documentId, "incremental", "0"]
5
5
  // but the storage adapter is agnostic to the meaning of the key
6
6
  // and we expect to store other data in the future such as syncstates
7
- abstract load(key: string[]): Promise<Uint8Array | undefined>
8
- abstract save(key: string[], data: Uint8Array): Promise<void>
9
- abstract remove(key: string[]): Promise<void>
7
+ abstract load(key: StorageKey): Promise<Uint8Array | undefined>
8
+ abstract save(key: StorageKey, data: Uint8Array): Promise<void>
9
+ abstract remove(key: StorageKey): Promise<void>
10
10
 
11
11
  // the keyprefix will match any key that starts with the given array
12
12
  // for example, [documentId, "incremental"] will match all incremental saves
13
13
  // or [documentId] will match all data for a given document
14
14
  // be careful! this will also match [documentId, "syncState"]!
15
15
  // (we aren't using this yet but keep it in mind.)
16
- abstract loadRange(keyPrefix: string[]): Promise<Uint8Array[]>
17
- abstract removeRange(keyPrefix: string[]): Promise<void>
16
+ abstract loadRange(keyPrefix: StorageKey): Promise<{key: StorageKey, data: Uint8Array}[]>
17
+ abstract removeRange(keyPrefix: StorageKey): Promise<void>
18
18
  }
19
+
20
+ export type StorageKey = string[]
@@ -1,8 +1,20 @@
1
1
  import * as A from "@automerge/automerge"
2
- import { DocumentId } from "../types.js"
3
- import { StorageAdapter } from "./StorageAdapter.js"
4
- import { mergeArrays } from "../helpers/mergeArrays.js"
2
+ import { StorageAdapter, StorageKey } from "./StorageAdapter.js"
5
3
  import * as sha256 from "fast-sha256"
4
+ import { type DocumentId } from "../types.js"
5
+ import { mergeArrays } from "../helpers/mergeArrays.js"
6
+ import debug from "debug"
7
+ import { headsAreSame } from "../helpers/headsAreSame.js"
8
+
9
+ // Metadata about a chunk of data loaded from storage. This is stored on the
10
+ // StorageSubsystem so when we are compacting we know what chunks we can safely delete
11
+ type StorageChunkInfo = {
12
+ key: StorageKey
13
+ type: ChunkType
14
+ size: number
15
+ }
16
+
17
+ export type ChunkType = "snapshot" | "incremental"
6
18
 
7
19
  function keyHash(binary: Uint8Array) {
8
20
  const hash = sha256.hash(binary)
@@ -11,58 +23,108 @@ function keyHash(binary: Uint8Array) {
11
23
  return hashHex
12
24
  }
13
25
 
26
+ function headsHash(heads: A.Heads): string {
27
+ let encoder = new TextEncoder()
28
+ let headsbinary = mergeArrays(heads.map(h => encoder.encode(h)))
29
+ return keyHash(headsbinary)
30
+ }
31
+
14
32
  export class StorageSubsystem {
15
33
  #storageAdapter: StorageAdapter
34
+ #chunkInfos: Map<DocumentId, StorageChunkInfo[]> = new Map()
35
+ #storedHeads: Map<DocumentId, A.Heads> = new Map()
36
+ #log = debug(`automerge-repo:storage-subsystem`)
16
37
 
17
38
  constructor(storageAdapter: StorageAdapter) {
18
39
  this.#storageAdapter = storageAdapter
19
40
  }
20
41
 
21
- async #saveIncremental(documentId: DocumentId, doc: A.Doc<unknown>) {
22
- const binary = A.saveIncremental(doc)
42
+ async #saveIncremental(
43
+ documentId: DocumentId,
44
+ doc: A.Doc<unknown>
45
+ ): Promise<void> {
46
+ const binary = A.saveSince(doc, this.#storedHeads.get(documentId) ?? [])
23
47
  if (binary && binary.length > 0) {
24
48
  const key = [documentId, "incremental", keyHash(binary)]
25
- return await this.#storageAdapter.save(key, binary)
49
+ this.#log(`Saving incremental ${key} for document ${documentId}`)
50
+ await this.#storageAdapter.save(key, binary)
51
+ if (!this.#chunkInfos.has(documentId)) {
52
+ this.#chunkInfos.set(documentId, [])
53
+ }
54
+ this.#chunkInfos.get(documentId)!!.push({
55
+ key,
56
+ type: "incremental",
57
+ size: binary.length,
58
+ })
59
+ this.#storedHeads.set(documentId, A.getHeads(doc))
26
60
  } else {
27
- Promise.resolve()
61
+ return Promise.resolve()
28
62
  }
29
63
  }
30
64
 
31
- async #saveTotal(documentId: DocumentId, doc: A.Doc<unknown>) {
65
+ async #saveTotal(
66
+ documentId: DocumentId,
67
+ doc: A.Doc<unknown>,
68
+ sourceChunks: StorageChunkInfo[]
69
+ ): Promise<void> {
32
70
  const binary = A.save(doc)
71
+ const snapshotHash = headsHash(A.getHeads(doc))
72
+ const key = [documentId, "snapshot", snapshotHash]
73
+ const oldKeys = new Set(
74
+ sourceChunks.map(c => c.key).filter(k => k[2] !== snapshotHash)
75
+ )
33
76
 
34
- // TODO: this is still racy if two nodes are both writing to the store
35
- await this.#storageAdapter.save([documentId, "snapshot"], binary)
36
-
37
- // don't start deleting the incremental keys until save is done!
38
- return this.#storageAdapter.removeRange([documentId, "incremental"])
39
- }
77
+ this.#log(`Saving snapshot ${key} for document ${documentId}`)
78
+ this.#log(`deleting old chunks ${Array.from(oldKeys)}`)
40
79
 
41
- async loadBinary(documentId: DocumentId): Promise<Uint8Array> {
42
- // it would probably be best to ensure .snapshot comes back first
43
- // prevent the race condition with saveIncremental
44
- const binaries: Uint8Array[] = await this.#storageAdapter.loadRange([
45
- documentId,
46
- ])
80
+ await this.#storageAdapter.save(key, binary)
47
81
 
48
- return mergeArrays(binaries)
82
+ for (const key of oldKeys) {
83
+ await this.#storageAdapter.remove(key)
84
+ }
85
+ const newChunkInfos =
86
+ this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? []
87
+ newChunkInfos.push({ key, type: "snapshot", size: binary.length })
88
+ this.#chunkInfos.set(documentId, newChunkInfos)
49
89
  }
50
90
 
51
- async load<T>(
52
- documentId: DocumentId,
53
- prevDoc: A.Doc<T> = A.init<T>()
54
- ): Promise<A.Doc<T>> {
55
- const doc = A.loadIncremental(prevDoc, await this.loadBinary(documentId))
56
- A.saveIncremental(doc)
57
- return doc
91
+ async loadDoc(documentId: DocumentId): Promise<A.Doc<unknown> | null> {
92
+ const loaded = await this.#storageAdapter.loadRange([documentId])
93
+ const binaries = []
94
+ const chunkInfos: StorageChunkInfo[] = []
95
+ for (const chunk of loaded) {
96
+ const chunkType = chunkTypeFromKey(chunk.key)
97
+ if (chunkType == null) {
98
+ continue
99
+ }
100
+ chunkInfos.push({
101
+ key: chunk.key,
102
+ type: chunkType,
103
+ size: chunk.data.length,
104
+ })
105
+ binaries.push(chunk.data)
106
+ }
107
+ this.#chunkInfos.set(documentId, chunkInfos)
108
+ const binary = mergeArrays(binaries)
109
+ if (binary.length === 0) {
110
+ return null
111
+ }
112
+ const newDoc = A.loadIncremental(A.init(), binary)
113
+ this.#storedHeads.set(documentId, A.getHeads(newDoc))
114
+ return newDoc
58
115
  }
59
116
 
60
- async save(documentId: DocumentId, doc: A.Doc<unknown>) {
61
- if (this.#shouldCompact(documentId)) {
62
- return this.#saveTotal(documentId, doc)
117
+ async saveDoc(documentId: DocumentId, doc: A.Doc<unknown>): Promise<void> {
118
+ if (!this.#shouldSave(documentId, doc)) {
119
+ return
120
+ }
121
+ let sourceChunks = this.#chunkInfos.get(documentId) ?? []
122
+ if (this.#shouldCompact(sourceChunks)) {
123
+ this.#saveTotal(documentId, doc, sourceChunks)
63
124
  } else {
64
- return this.#saveIncremental(documentId, doc)
125
+ this.#saveIncremental(documentId, doc)
65
126
  }
127
+ this.#storedHeads.set(documentId, A.getHeads(doc))
66
128
  }
67
129
 
68
130
  async remove(documentId: DocumentId) {
@@ -70,9 +132,44 @@ export class StorageSubsystem {
70
132
  this.#storageAdapter.removeRange([documentId, "incremental"])
71
133
  }
72
134
 
73
- // TODO: make this, you know, good.
74
- // this is probably fine
75
- #shouldCompact(documentId: DocumentId) {
76
- return Math.random() < 0.05 // this.#changeCount[documentId] >= 20
135
+ #shouldSave(documentId: DocumentId, doc: A.Doc<unknown>): boolean {
136
+ const oldHeads = this.#storedHeads.get(documentId)
137
+ if (!oldHeads) {
138
+ return true
139
+ }
140
+
141
+ const newHeads = A.getHeads(doc)
142
+ if (headsAreSame(newHeads, oldHeads)) {
143
+ return false
144
+ }
145
+
146
+ return true
147
+ }
148
+
149
+ #shouldCompact(sourceChunks: StorageChunkInfo[]) {
150
+ // compact if the incremental size is greater than the snapshot size
151
+ let snapshotSize = 0
152
+ let incrementalSize = 0
153
+ for (const chunk of sourceChunks) {
154
+ if (chunk.type === "snapshot") {
155
+ snapshotSize += chunk.size
156
+ } else {
157
+ incrementalSize += chunk.size
158
+ }
159
+ }
160
+ return incrementalSize > snapshotSize
161
+ }
162
+ }
163
+
164
+ function chunkTypeFromKey(key: StorageKey): ChunkType | null {
165
+ if (key.length < 2) {
166
+ return null
167
+ }
168
+ const chunkTypeStr = key[key.length - 2]
169
+ if (chunkTypeStr === "snapshot" || chunkTypeStr === "incremental") {
170
+ const chunkType: ChunkType = chunkTypeStr
171
+ return chunkType
172
+ } else {
173
+ return null
77
174
  }
78
175
  }
@@ -1,6 +1,11 @@
1
1
  import { DocCollection } from "../DocCollection.js"
2
2
  import { DocHandle } from "../DocHandle.js"
3
- import { ChannelId, DocumentId, PeerId } from "../types.js"
3
+ import {
4
+ documentIdToBinary,
5
+ binaryToDocumentId,
6
+ stringifyAutomergeUrl,
7
+ } from "../DocUrl.js"
8
+ import { ChannelId, BinaryDocumentId, PeerId, DocumentId } from "../types.js"
4
9
  import { DocSynchronizer } from "./DocSynchronizer.js"
5
10
  import { Synchronizer } from "./Synchronizer.js"
6
11
 
@@ -22,7 +27,7 @@ export class CollectionSynchronizer extends Synchronizer {
22
27
  /** Returns a synchronizer for the given document, creating one if it doesn't already exist. */
23
28
  #fetchDocSynchronizer(documentId: DocumentId) {
24
29
  if (!this.#docSynchronizers[documentId]) {
25
- const handle = this.repo.find(documentId)
30
+ const handle = this.repo.find(stringifyAutomergeUrl({ documentId }))
26
31
  this.#docSynchronizers[documentId] = this.#initDocSynchronizer(handle)
27
32
  }
28
33
  return this.#docSynchronizers[documentId]
@@ -60,6 +65,9 @@ export class CollectionSynchronizer extends Synchronizer {
60
65
  log(`onSyncMessage: ${peerId}, ${channelId}, ${message.byteLength}bytes`)
61
66
 
62
67
  const documentId = channelId as unknown as DocumentId
68
+ if (!documentId) {
69
+ throw new Error("received a message with an invalid documentId")
70
+ }
63
71
  const docSynchronizer = await this.#fetchDocSynchronizer(documentId)
64
72
 
65
73
  await docSynchronizer.receiveSyncMessage(peerId, channelId, message)
@@ -1,5 +1,5 @@
1
1
  import * as A from "@automerge/automerge"
2
- import { DocHandle } from "../DocHandle.js"
2
+ import { DocHandle, READY, REQUESTING } from "../DocHandle.js"
3
3
  import { ChannelId, PeerId } from "../types.js"
4
4
  import { Synchronizer } from "./Synchronizer.js"
5
5
 
@@ -33,7 +33,7 @@ export class DocSynchronizer extends Synchronizer {
33
33
 
34
34
  // Process pending sync messages immediately after the handle becomes ready.
35
35
  void (async () => {
36
- await handle.loadAttemptedValue()
36
+ await handle.doc([READY, REQUESTING])
37
37
  this.#processAllPendingSyncMessages()
38
38
  })()
39
39
  }
@@ -46,7 +46,7 @@ export class DocSynchronizer extends Synchronizer {
46
46
 
47
47
  async #syncWithPeers() {
48
48
  this.#log(`syncWithPeers`)
49
- const doc = await this.handle.value()
49
+ const doc = await this.handle.doc()
50
50
  this.#peers.forEach(peerId => this.#sendSyncMessage(peerId, doc))
51
51
  }
52
52
 
@@ -78,6 +78,7 @@ export class DocSynchronizer extends Synchronizer {
78
78
  this.#logMessage(`sendSyncMessage 🡒 ${peerId}`, message)
79
79
 
80
80
  const channelId = this.handle.documentId as string as ChannelId
81
+
81
82
  this.emit("message", {
82
83
  targetId: peerId,
83
84
  channelId,
@@ -120,7 +121,7 @@ export class DocSynchronizer extends Synchronizer {
120
121
 
121
122
  // At this point if we don't have anything in our storage, we need to use an empty doc to sync
122
123
  // with; but we don't want to surface that state to the front end
123
- void this.handle.loadAttemptedValue().then(doc => {
124
+ void this.handle.doc([READY, REQUESTING]).then(doc => {
124
125
  // HACK: if we have a sync state already, we round-trip it through the encoding system to make
125
126
  // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
126
127
  // messages during disconnection.
@@ -143,11 +144,11 @@ export class DocSynchronizer extends Synchronizer {
143
144
  channelId: ChannelId,
144
145
  message: Uint8Array
145
146
  ) {
146
- if ((channelId as string) !== (this.documentId as string))
147
+ if ((channelId as string) !== (this.handle.documentId as string))
147
148
  throw new Error(`channelId doesn't match documentId`)
148
149
 
149
150
  // We need to block receiving the syncMessages until we've checked local storage
150
- if (!this.handle.isReadyOrRequesting()) {
151
+ if (!this.handle.inState([READY, REQUESTING])) {
151
152
  this.#pendingSyncMessages.push({ peerId, message })
152
153
  return
153
154
  }
package/src/types.ts CHANGED
@@ -1,3 +1,6 @@
1
- export type DocumentId = string & { __documentId: true }
1
+ export type DocumentId = string & { __documentId: true } // for logging
2
+ export type AutomergeUrl = string & { __documentUrl: true } // for opening / linking
3
+ export type BinaryDocumentId = Uint8Array & { __binaryDocumentId: true } // for storing / syncing
4
+
2
5
  export type PeerId = string & { __peerId: false }
3
6
  export type ChannelId = string & { __channelId: false }