@automerge/automerge-repo 1.0.12 → 1.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/dist/AutomergeUrl.d.ts +45 -0
  2. package/dist/AutomergeUrl.d.ts.map +1 -0
  3. package/dist/AutomergeUrl.js +108 -0
  4. package/dist/DocHandle.js +1 -1
  5. package/dist/Repo.d.ts +5 -5
  6. package/dist/Repo.d.ts.map +1 -1
  7. package/dist/Repo.js +10 -21
  8. package/dist/helpers/cbor.js +1 -1
  9. package/dist/index.d.ts +3 -3
  10. package/dist/index.d.ts.map +1 -1
  11. package/dist/index.js +1 -1
  12. package/dist/network/NetworkAdapter.d.ts +3 -3
  13. package/dist/network/NetworkAdapter.d.ts.map +1 -1
  14. package/dist/network/messages.d.ts +7 -18
  15. package/dist/network/messages.d.ts.map +1 -1
  16. package/dist/storage/StorageAdapter.d.ts +19 -22
  17. package/dist/storage/StorageAdapter.d.ts.map +1 -1
  18. package/dist/storage/StorageAdapter.js +2 -2
  19. package/dist/storage/StorageSubsystem.d.ts +39 -3
  20. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  21. package/dist/storage/StorageSubsystem.js +128 -75
  22. package/dist/storage/chunkTypeFromKey.d.ts +13 -0
  23. package/dist/storage/chunkTypeFromKey.d.ts.map +1 -0
  24. package/dist/storage/chunkTypeFromKey.js +18 -0
  25. package/dist/storage/keyHash.d.ts +4 -0
  26. package/dist/storage/keyHash.d.ts.map +1 -0
  27. package/dist/storage/keyHash.js +15 -0
  28. package/dist/storage/types.d.ts +37 -0
  29. package/dist/storage/types.d.ts.map +1 -0
  30. package/dist/storage/types.js +1 -0
  31. package/dist/synchronizer/CollectionSynchronizer.js +1 -1
  32. package/dist/types.d.ts +20 -12
  33. package/dist/types.d.ts.map +1 -1
  34. package/package.json +2 -2
  35. package/src/AutomergeUrl.ts +144 -0
  36. package/src/DocHandle.ts +1 -1
  37. package/src/Repo.ts +14 -26
  38. package/src/helpers/cbor.ts +1 -1
  39. package/src/index.ts +12 -4
  40. package/src/network/NetworkAdapter.ts +3 -3
  41. package/src/network/messages.ts +8 -21
  42. package/src/storage/StorageAdapter.ts +23 -30
  43. package/src/storage/StorageSubsystem.ts +159 -93
  44. package/src/storage/chunkTypeFromKey.ts +22 -0
  45. package/src/storage/keyHash.ts +17 -0
  46. package/src/storage/types.ts +39 -0
  47. package/src/synchronizer/CollectionSynchronizer.ts +1 -1
  48. package/src/types.ts +23 -11
  49. package/test/AutomergeUrl.test.ts +100 -0
  50. package/test/DocHandle.test.ts +1 -1
  51. package/test/DocSynchronizer.test.ts +1 -1
  52. package/test/Repo.test.ts +22 -6
  53. package/test/StorageSubsystem.test.ts +144 -36
  54. package/test/helpers/DummyStorageAdapter.ts +2 -4
  55. package/dist/DocUrl.d.ts +0 -39
  56. package/dist/DocUrl.d.ts.map +0 -1
  57. package/dist/DocUrl.js +0 -74
  58. package/src/DocUrl.ts +0 -96
@@ -1,7 +1,7 @@
1
1
  import { Encoder, decode as cborXdecode } from "cbor-x"
2
2
 
3
3
  export function encode(obj: unknown): Buffer {
4
- const encoder = new Encoder({ tagUint8Array: false })
4
+ const encoder = new Encoder({ tagUint8Array: false, useRecords: false })
5
5
  return encoder.encode(obj)
6
6
  }
7
7
 
package/src/index.ts CHANGED
@@ -31,7 +31,7 @@ export {
31
31
  isValidAutomergeUrl,
32
32
  parseAutomergeUrl,
33
33
  stringifyAutomergeUrl,
34
- } from "./DocUrl.js"
34
+ } from "./AutomergeUrl.js"
35
35
  export { Repo } from "./Repo.js"
36
36
  export { NetworkAdapter } from "./network/NetworkAdapter.js"
37
37
  export { isValidRepoMessage } from "./network/messages.js"
@@ -52,6 +52,7 @@ export type {
52
52
  DocHandleOutboundEphemeralMessagePayload,
53
53
  HandleState,
54
54
  } from "./DocHandle.js"
55
+
55
56
  export type {
56
57
  DeleteDocumentPayload,
57
58
  DocumentPayload,
@@ -59,21 +60,28 @@ export type {
59
60
  RepoEvents,
60
61
  SharePolicy,
61
62
  } from "./Repo.js"
63
+
62
64
  export type {
63
65
  NetworkAdapterEvents,
64
66
  OpenPayload,
65
67
  PeerCandidatePayload,
66
68
  PeerDisconnectedPayload,
67
69
  } from "./network/NetworkAdapter.js"
70
+
68
71
  export type {
69
- ArriveMessage,
70
72
  DocumentUnavailableMessage,
71
73
  EphemeralMessage,
72
74
  Message,
73
75
  RepoMessage,
74
76
  RequestMessage,
75
77
  SyncMessage,
76
- WelcomeMessage,
77
78
  } from "./network/messages.js"
78
- export type { StorageKey } from "./storage/StorageAdapter.js"
79
+
80
+ export type {
81
+ Chunk,
82
+ ChunkInfo,
83
+ ChunkType,
84
+ StorageKey,
85
+ } from "./storage/types.js"
86
+
79
87
  export * from "./types.js"
@@ -1,6 +1,6 @@
1
1
  import { EventEmitter } from "eventemitter3"
2
2
  import { PeerId } from "../types.js"
3
- import { RepoMessage } from "./messages.js"
3
+ import { Message } from "./messages.js"
4
4
 
5
5
  /** An interface representing some way to connect to other peers
6
6
  *
@@ -22,7 +22,7 @@ export abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents>
22
22
  *
23
23
  * @argument message - the message to send
24
24
  */
25
- abstract send(message: RepoMessage): void
25
+ abstract send(message: Message): void
26
26
 
27
27
  /** Called by the {@link Repo} to disconnect from the network */
28
28
  abstract disconnect(): void
@@ -44,7 +44,7 @@ export interface NetworkAdapterEvents {
44
44
  "peer-disconnected": (payload: PeerDisconnectedPayload) => void
45
45
 
46
46
  /** Emitted when the network adapter receives a message from a peer */
47
- message: (payload: RepoMessage) => void
47
+ message: (payload: Message) => void
48
48
  }
49
49
 
50
50
  export interface OpenPayload {
@@ -87,26 +87,18 @@ export type RequestMessage = {
87
87
  documentId: DocumentId
88
88
  }
89
89
 
90
- /** Notify the network that we have arrived so everyone knows our peer ID */
91
- export type ArriveMessage = {
92
- type: "arrive"
90
+ /** (anticipating work in progress) */
91
+ export type AuthMessage<TPayload = any> = {
92
+ type: "auth"
93
93
 
94
94
  /** The peer ID of the sender of this message */
95
95
  senderId: PeerId
96
96
 
97
- /** Arrive messages don't have a targetId */
98
- targetId: never
99
- }
100
-
101
- /** Respond to an arriving peer with our peer ID */
102
- export type WelcomeMessage = {
103
- type: "welcome"
104
-
105
- /** The peer ID of the recipient sender this message */
106
- senderId: PeerId
107
-
108
97
  /** The peer ID of the recipient of this message */
109
98
  targetId: PeerId
99
+
100
+ /** The payload of the auth message (up to the specific auth provider) */
101
+ payload: TPayload
110
102
  }
111
103
 
112
104
  /** These are message types that a {@link NetworkAdapter} surfaces to a {@link Repo}. */
@@ -116,13 +108,8 @@ export type RepoMessage =
116
108
  | RequestMessage
117
109
  | DocumentUnavailableMessage
118
110
 
119
- /** These are all the message types that a {@link NetworkAdapter} might see.
120
- *
121
- * @remarks
122
- * It is not _required_ that a {@link NetworkAdapter} use these types: They are free to use
123
- * whatever message type makes sense for their transport. However, this type is a useful default.
124
- * */
125
- export type Message = RepoMessage | ArriveMessage | WelcomeMessage
111
+ /** These are all the message types that a {@link NetworkAdapter} might see. */
112
+ export type Message = RepoMessage | AuthMessage
126
113
 
127
114
  /**
128
115
  * The contents of a message, without the sender ID or other properties added by the {@link NetworkSubsystem})
@@ -1,41 +1,34 @@
1
+ import { StorageKey, Chunk } from "./types.js"
2
+
1
3
  /** A storage adapter represents some way of storing binary data for a {@link Repo}
2
4
  *
3
5
  * @remarks
4
- * `StorageAdapter`s are a little like a key/value store. The keys are arrays
5
- * of strings ({@link StorageKey}) and the values are binary blobs.
6
+ * `StorageAdapter`s provide a key/value storage interface. The keys are arrays of strings
7
+ * ({@link StorageKey}) and the values are binary blobs.
6
8
  */
7
9
  export abstract class StorageAdapter {
8
- // load, store, or remove a single binary blob based on an array key
9
- // automerge-repo mostly uses keys in the following form:
10
- // [documentId, "snapshot"] or [documentId, "incremental", "0"]
11
- // but the storage adapter is agnostic to the meaning of the key
12
- // and we expect to store other data in the future such as syncstates
13
- /** Load the single blob correspongind to `key` */
10
+ /** Load the single value corresponding to `key` */
14
11
  abstract load(key: StorageKey): Promise<Uint8Array | undefined>
15
- /** save the blod `data` to the key `key` */
12
+
13
+ /** Save the value `data` to the key `key` */
16
14
  abstract save(key: StorageKey, data: Uint8Array): Promise<void>
17
- /** remove the blob corresponding to `key` */
15
+
16
+ /** Remove the value corresponding to `key` */
18
17
  abstract remove(key: StorageKey): Promise<void>
19
18
 
20
- // the keyprefix will match any key that starts with the given array
21
- // for example, [documentId, "incremental"] will match all incremental saves
22
- // or [documentId] will match all data for a given document
23
- // be careful! this will also match [documentId, "syncState"]!
24
- // (we aren't using this yet but keep it in mind.)
25
- /** Load all blobs with keys that start with `keyPrefix` */
26
- abstract loadRange(keyPrefix: StorageKey): Promise<{key: StorageKey, data: Uint8Array}[]>
27
- /** Remove all blobs with keys that start with `keyPrefix` */
19
+ /**
20
+ * Load all values with keys that start with `keyPrefix`.
21
+ *
22
+ * @remarks
23
+ * The `keyprefix` will match any key that starts with the given array. For example:
24
+ * - `[documentId, "incremental"]` will match all incremental saves
25
+ * - `[documentId]` will match all data for a given document.
26
+ *
27
+ * Be careful! `[documentId]` would also match something like `[documentId, "syncState"]`! We
28
+ * aren't using this yet but keep it in mind.)
29
+ */
30
+ abstract loadRange(keyPrefix: StorageKey): Promise<Chunk[]>
31
+
32
+ /** Remove all values with keys that start with `keyPrefix` */
28
33
  abstract removeRange(keyPrefix: StorageKey): Promise<void>
29
34
  }
30
-
31
- /** The type of keys for a {@link StorageAdapter}
32
- *
33
- * @remarks
34
- * Storage keys are arrays because they are hierarchical and the storage
35
- * subsystem will need to be able to do range queries for all keys that
36
- * have a particular prefix. For example, incremental changes for a given
37
- * document might be stored under `[<documentId>, "incremental", <SHA256>]`.
38
- * `StorageAdapter` implementations should not assume any particular structure
39
- * though.
40
- **/
41
- export type StorageKey = string[]
@@ -1,46 +1,156 @@
1
1
  import * as A from "@automerge/automerge/next"
2
2
  import debug from "debug"
3
- import * as sha256 from "fast-sha256"
4
3
  import { headsAreSame } from "../helpers/headsAreSame.js"
5
4
  import { mergeArrays } from "../helpers/mergeArrays.js"
6
5
  import { type DocumentId } from "../types.js"
7
- import { StorageAdapter, StorageKey } from "./StorageAdapter.js"
8
-
9
- // Metadata about a chunk of data loaded from storage. This is stored on the
10
- // StorageSubsystem so when we are compacting we know what chunks we can safely delete
11
- type StorageChunkInfo = {
12
- key: StorageKey
13
- type: ChunkType
14
- size: number
15
- }
6
+ import { StorageAdapter } from "./StorageAdapter.js"
7
+ import { ChunkInfo, StorageKey } from "./types.js"
8
+ import { keyHash, headsHash } from "./keyHash.js"
9
+ import { chunkTypeFromKey } from "./chunkTypeFromKey.js"
10
+
11
+ /**
12
+ * The storage subsystem is responsible for saving and loading Automerge documents to and from
13
+ * storage adapter. It also provides a generic key/value storage interface for other uses.
14
+ */
15
+ export class StorageSubsystem {
16
+ /** The storage adapter to use for saving and loading documents */
17
+ #storageAdapter: StorageAdapter
16
18
 
17
- export type ChunkType = "snapshot" | "incremental"
19
+ /** Record of the latest heads we've loaded or saved for each document */
20
+ #storedHeads: Map<DocumentId, A.Heads> = new Map()
18
21
 
19
- function keyHash(binary: Uint8Array) {
20
- const hash = sha256.hash(binary)
21
- const hashArray = Array.from(new Uint8Array(hash)) // convert buffer to byte array
22
- const hashHex = hashArray.map(b => ("00" + b.toString(16)).slice(-2)).join("") // convert bytes to hex string
23
- return hashHex
24
- }
22
+ /** Metadata on the chunks we've already loaded for each document */
23
+ #chunkInfos: Map<DocumentId, ChunkInfo[]> = new Map()
25
24
 
26
- function headsHash(heads: A.Heads): string {
27
- const encoder = new TextEncoder()
28
- const headsbinary = mergeArrays(heads.map((h: string) => encoder.encode(h)))
29
- return keyHash(headsbinary)
30
- }
25
+ /** Flag to avoid compacting when a compaction is already underway */
26
+ #compacting = false
31
27
 
32
- export class StorageSubsystem {
33
- #storageAdapter: StorageAdapter
34
- #chunkInfos: Map<DocumentId, StorageChunkInfo[]> = new Map()
35
- #storedHeads: Map<DocumentId, A.Heads> = new Map()
36
28
  #log = debug(`automerge-repo:storage-subsystem`)
37
29
 
38
- #snapshotting = false
39
-
40
30
  constructor(storageAdapter: StorageAdapter) {
41
31
  this.#storageAdapter = storageAdapter
42
32
  }
43
33
 
34
+ // ARBITRARY KEY/VALUE STORAGE
35
+
36
+ // The `load`, `save`, and `remove` methods are for generic key/value storage, as opposed to
37
+ // Automerge documents. For example, they're used by the LocalFirstAuthProvider to persist the
38
+ // encrypted team graph that encodes group membership and permissions.
39
+ //
40
+ // The namespace parameter is to prevent collisions with other users of the storage subsystem.
41
+ // Typically this will be the name of the plug-in, adapter, or other system that is using it. For
42
+ // example, the LocalFirstAuthProvider uses the namespace `LocalFirstAuthProvider`.
43
+
44
+ /** Loads a value from storage. */
45
+ async load(
46
+ /** Namespace to prevent collisions with other users of the storage subsystem. */
47
+ namespace: string,
48
+
49
+ /** Key to load. Typically a UUID or other unique identifier, but could be any string. */
50
+ key: string
51
+ ): Promise<Uint8Array | undefined> {
52
+ const storageKey = [namespace, key] as StorageKey
53
+ return await this.#storageAdapter.load(storageKey)
54
+ }
55
+
56
+ /** Saves a value in storage. */
57
+ async save(
58
+ /** Namespace to prevent collisions with other users of the storage subsystem. */
59
+ namespace: string,
60
+
61
+ /** Key to load. Typically a UUID or other unique identifier, but could be any string. */
62
+ key: string,
63
+
64
+ /** Data to save, as a binary blob. */
65
+ data: Uint8Array
66
+ ): Promise<void> {
67
+ const storageKey = [namespace, key] as StorageKey
68
+ await this.#storageAdapter.save(storageKey, data)
69
+ }
70
+
71
+ /** Removes a value from storage. */
72
+ async remove(
73
+ /** Namespace to prevent collisions with other users of the storage subsystem. */
74
+ namespace: string,
75
+
76
+ /** Key to remove. Typically a UUID or other unique identifier, but could be any string. */
77
+ key: string
78
+ ): Promise<void> {
79
+ const storageKey = [namespace, key] as StorageKey
80
+ await this.#storageAdapter.remove(storageKey)
81
+ }
82
+
83
+ // AUTOMERGE DOCUMENT STORAGE
84
+
85
+ /**
86
+ * Loads the Automerge document with the given ID from storage.
87
+ */
88
+ async loadDoc<T>(documentId: DocumentId): Promise<A.Doc<T> | null> {
89
+ // Load all the chunks for this document
90
+ const chunks = await this.#storageAdapter.loadRange([documentId])
91
+ const binaries = []
92
+ const chunkInfos: ChunkInfo[] = []
93
+
94
+ for (const chunk of chunks) {
95
+ // chunks might have been deleted in the interim
96
+ if (chunk.data === undefined) continue
97
+
98
+ const chunkType = chunkTypeFromKey(chunk.key)
99
+ if (chunkType == null) continue
100
+
101
+ chunkInfos.push({
102
+ key: chunk.key,
103
+ type: chunkType,
104
+ size: chunk.data.length,
105
+ })
106
+ binaries.push(chunk.data)
107
+ }
108
+ this.#chunkInfos.set(documentId, chunkInfos)
109
+
110
+ // Merge the chunks into a single binary
111
+ const binary = mergeArrays(binaries)
112
+ if (binary.length === 0) return null
113
+
114
+ // Load into an Automerge document
115
+ const newDoc = A.loadIncremental(A.init(), binary) as A.Doc<T>
116
+
117
+ // Record the latest heads for the document
118
+ this.#storedHeads.set(documentId, A.getHeads(newDoc))
119
+
120
+ return newDoc
121
+ }
122
+
123
+ /**
124
+ * Saves the provided Automerge document to storage.
125
+ *
126
+ * @remarks
127
+ * Under the hood this makes incremental saves until the incremental size is greater than the
128
+ * snapshot size, at which point the document is compacted into a single snapshot.
129
+ */
130
+ async saveDoc(documentId: DocumentId, doc: A.Doc<unknown>): Promise<void> {
131
+ // Don't bother saving if the document hasn't changed
132
+ if (!this.#shouldSave(documentId, doc)) return
133
+
134
+ const sourceChunks = this.#chunkInfos.get(documentId) ?? []
135
+ if (this.#shouldCompact(sourceChunks)) {
136
+ await this.#saveTotal(documentId, doc, sourceChunks)
137
+ } else {
138
+ await this.#saveIncremental(documentId, doc)
139
+ }
140
+ this.#storedHeads.set(documentId, A.getHeads(doc))
141
+ }
142
+
143
+ /**
144
+ * Removes the Automerge document with the given ID from storage
145
+ */
146
+ async removeDoc(documentId: DocumentId) {
147
+ await this.#storageAdapter.removeRange([documentId, "snapshot"])
148
+ await this.#storageAdapter.removeRange([documentId, "incremental"])
149
+ }
150
+
151
+ /**
152
+ * Saves just the incremental changes since the last save.
153
+ */
44
154
  async #saveIncremental(
45
155
  documentId: DocumentId,
46
156
  doc: A.Doc<unknown>
@@ -64,12 +174,16 @@ export class StorageSubsystem {
64
174
  }
65
175
  }
66
176
 
177
+ /**
178
+ * Compacts the document storage into a single shapshot.
179
+ */
67
180
  async #saveTotal(
68
181
  documentId: DocumentId,
69
182
  doc: A.Doc<unknown>,
70
- sourceChunks: StorageChunkInfo[]
183
+ sourceChunks: ChunkInfo[]
71
184
  ): Promise<void> {
72
- this.#snapshotting = true
185
+ this.#compacting = true
186
+
73
187
  const binary = A.save(doc)
74
188
  const snapshotHash = headsHash(A.getHeads(doc))
75
189
  const key = [documentId, "snapshot", snapshotHash]
@@ -85,76 +199,41 @@ export class StorageSubsystem {
85
199
  for (const key of oldKeys) {
86
200
  await this.#storageAdapter.remove(key)
87
201
  }
202
+
88
203
  const newChunkInfos =
89
204
  this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? []
90
205
  newChunkInfos.push({ key, type: "snapshot", size: binary.length })
91
- this.#chunkInfos.set(documentId, newChunkInfos)
92
- this.#snapshotting = false
93
- }
94
-
95
- async loadDoc(documentId: DocumentId): Promise<A.Doc<unknown> | null> {
96
- const loaded = await this.#storageAdapter.loadRange([documentId])
97
- const binaries = []
98
- const chunkInfos: StorageChunkInfo[] = []
99
- for (const chunk of loaded) {
100
- const chunkType = chunkTypeFromKey(chunk.key)
101
- if (chunkType == null) {
102
- continue
103
- }
104
- chunkInfos.push({
105
- key: chunk.key,
106
- type: chunkType,
107
- size: chunk.data.length,
108
- })
109
- binaries.push(chunk.data)
110
- }
111
- this.#chunkInfos.set(documentId, chunkInfos)
112
- const binary = mergeArrays(binaries)
113
- if (binary.length === 0) {
114
- return null
115
- }
116
- const newDoc = A.loadIncremental(A.init(), binary)
117
- this.#storedHeads.set(documentId, A.getHeads(newDoc))
118
- return newDoc
119
- }
120
206
 
121
- async saveDoc(documentId: DocumentId, doc: A.Doc<unknown>): Promise<void> {
122
- if (!this.#shouldSave(documentId, doc)) {
123
- return
124
- }
125
- const sourceChunks = this.#chunkInfos.get(documentId) ?? []
126
- if (this.#shouldCompact(sourceChunks)) {
127
- void this.#saveTotal(documentId, doc, sourceChunks)
128
- } else {
129
- void this.#saveIncremental(documentId, doc)
130
- }
131
- this.#storedHeads.set(documentId, A.getHeads(doc))
132
- }
207
+ this.#chunkInfos.set(documentId, newChunkInfos)
133
208
 
134
- async remove(documentId: DocumentId) {
135
- void this.#storageAdapter.removeRange([documentId, "snapshot"])
136
- void this.#storageAdapter.removeRange([documentId, "incremental"])
209
+ this.#compacting = false
137
210
  }
138
211
 
212
+ /**
213
+ * Returns true if the document has changed since the last time it was saved.
214
+ */
139
215
  #shouldSave(documentId: DocumentId, doc: A.Doc<unknown>): boolean {
140
216
  const oldHeads = this.#storedHeads.get(documentId)
141
217
  if (!oldHeads) {
218
+ // we haven't saved this document before
142
219
  return true
143
220
  }
144
221
 
145
222
  const newHeads = A.getHeads(doc)
146
223
  if (headsAreSame(newHeads, oldHeads)) {
224
+ // the document hasn't changed
147
225
  return false
148
226
  }
149
227
 
150
- return true
228
+ return true // the document has changed
151
229
  }
152
230
 
153
- #shouldCompact(sourceChunks: StorageChunkInfo[]) {
154
- if (this.#snapshotting) {
155
- return false
156
- }
157
- // compact if the incremental size is greater than the snapshot size
231
+ /**
232
+ * We only compact if the incremental size is greater than the snapshot size.
233
+ */
234
+ #shouldCompact(sourceChunks: ChunkInfo[]) {
235
+ if (this.#compacting) return false
236
+
158
237
  let snapshotSize = 0
159
238
  let incrementalSize = 0
160
239
  for (const chunk of sourceChunks) {
@@ -167,16 +246,3 @@ export class StorageSubsystem {
167
246
  return incrementalSize >= snapshotSize
168
247
  }
169
248
  }
170
-
171
- function chunkTypeFromKey(key: StorageKey): ChunkType | null {
172
- if (key.length < 2) {
173
- return null
174
- }
175
- const chunkTypeStr = key[key.length - 2]
176
- if (chunkTypeStr === "snapshot" || chunkTypeStr === "incremental") {
177
- const chunkType: ChunkType = chunkTypeStr
178
- return chunkType
179
- } else {
180
- return null
181
- }
182
- }
@@ -0,0 +1,22 @@
1
+ import { StorageKey } from "./types.js"
2
+ import { ChunkType } from "./types.js"
3
+
4
+ /**
5
+ * Keys for storing Automerge documents are of the form:
6
+ * ```ts
7
+ * [documentId, "snapshot", hash] // OR
8
+ * [documentId, "incremental", hash]
9
+ * ```
10
+ * This function returns the chunk type ("snapshot" or "incremental") if the key is in one of these
11
+ * forms.
12
+ */
13
+ export function chunkTypeFromKey(key: StorageKey): ChunkType | null {
14
+ if (key.length < 2) return null
15
+
16
+ const chunkTypeStr = key[key.length - 2] // next-to-last element in key
17
+ if (chunkTypeStr === "snapshot" || chunkTypeStr === "incremental") {
18
+ return chunkTypeStr as ChunkType
19
+ }
20
+
21
+ return null
22
+ }
@@ -0,0 +1,17 @@
1
+ import * as A from "@automerge/automerge/next"
2
+ import * as sha256 from "fast-sha256"
3
+ import { mergeArrays } from "../helpers/mergeArrays.js"
4
+
5
+ export function keyHash(binary: Uint8Array) {
6
+ // calculate hash
7
+ const hash = sha256.hash(binary)
8
+ return bufferToHexString(hash)
9
+ }
10
+ export function headsHash(heads: A.Heads): string {
11
+ const encoder = new TextEncoder()
12
+ const headsbinary = mergeArrays(heads.map((h: string) => encoder.encode(h)))
13
+ return keyHash(headsbinary)
14
+ }
15
+ function bufferToHexString(data: Uint8Array) {
16
+ return Array.from(data, byte => byte.toString(16).padStart(2, "0")).join("")
17
+ }
@@ -0,0 +1,39 @@
1
+ /**
2
+ * A chunk is a snapshot or incremental change that is stored in a {@link StorageAdapter}.
3
+ */
4
+ export type Chunk = {
5
+ key: StorageKey
6
+ data: Uint8Array | undefined
7
+ }
8
+
9
+ /**
10
+ * Metadata about a chunk of data loaded from storage. This is stored on the StorageSubsystem so
11
+ * when we are compacting we know what chunks we can safely delete.
12
+ */
13
+ export type ChunkInfo = {
14
+ key: StorageKey
15
+ type: ChunkType
16
+ size: number
17
+ }
18
+
19
+ export type ChunkType = "snapshot" | "incremental"
20
+
21
+ /**
22
+ * A storage key is an array of strings that represents a path to a value in a
23
+ * {@link StorageAdapter}.
24
+ *
25
+ * @remarks
26
+ * Storage keys are arrays because they are hierarchical and they allow the storage subsystem to do
27
+ * range queries for all keys that have a particular prefix. For example, incremental changes for a
28
+ * given document might be stored under `[<documentId>, "incremental", <SHA256>]`.
29
+ *
30
+ * automerge-repo mostly uses keys in the following form:
31
+ * ```ts
32
+ * [documentId, "snapshot", hash] // OR
33
+ * [documentId, "incremental", hash]
34
+ * ```
35
+ *
36
+ * However, the storage adapter implementation should be agnostic to the meaning of the key and
37
+ * should not assume any particular structure.
38
+ **/
39
+ export type StorageKey = string[]
@@ -1,6 +1,6 @@
1
1
  import debug from "debug"
2
2
  import { DocHandle } from "../DocHandle.js"
3
- import { stringifyAutomergeUrl } from "../DocUrl.js"
3
+ import { stringifyAutomergeUrl } from "../AutomergeUrl.js"
4
4
  import { Repo } from "../Repo.js"
5
5
  import { RepoMessage } from "../network/messages.js"
6
6
  import { DocumentId, PeerId } from "../types.js"
package/src/types.ts CHANGED
@@ -1,20 +1,32 @@
1
- /** The ID of a document. Typically you should use a {@link AutomergeUrl} instead.
2
- */
3
- export type DocumentId = string & { __documentId: true } // for logging
4
-
5
- /** A branded string representing a URL for a document
6
- *
7
- * @remarks
8
- * An automerge URL has the form `automerge:<base58 encoded string>`. This
9
- * type is returned from various routines which validate a url.
10
- *
1
+ /**
2
+ * A branded string representing a URL for a document, in the form `automerge:<base58check encoded
3
+ * string>`; for example, `automerge:4NMNnkMhL8jXrdJ9jamS58PAVdXu`.
11
4
  */
12
5
  export type AutomergeUrl = string & { __documentUrl: true } // for opening / linking
13
6
 
14
- /** A document ID as a Uint8Array instead of a bas58 encoded string. Typically you should use a {@link AutomergeUrl} instead.
7
+ /**
8
+ * The base58check-encoded UUID of a document. This is the string following the `automerge:`
9
+ * protocol prefix in an AutomergeUrl; for example, `4NMNnkMhL8jXrdJ9jamS58PAVdXu`. When recording
10
+ * links to an Automerge document in another Automerge document, you should store a
11
+ * {@link AutomergeUrl} instead.
15
12
  */
13
+ export type DocumentId = string & { __documentId: true } // for logging
14
+
15
+ /** The unencoded UUID of a document. Typically you should use a {@link AutomergeUrl} instead. */
16
16
  export type BinaryDocumentId = Uint8Array & { __binaryDocumentId: true } // for storing / syncing
17
17
 
18
+ /**
19
+ * A UUID encoded as a hex string. As of v1.0, a {@link DocumentID} is stored as a base58-encoded string with a checksum.
20
+ * Support for this format will be removed in a future version.
21
+ */
22
+ export type LegacyDocumentId = string & { __legacyDocumentId: true }
23
+
24
+ export type AnyDocumentId =
25
+ | AutomergeUrl
26
+ | DocumentId
27
+ | BinaryDocumentId
28
+ | LegacyDocumentId
29
+
18
30
  /** A branded type for peer IDs */
19
31
  export type PeerId = string & { __peerId: true }
20
32