@automerge/automerge-repo 2.0.0-alpha.2 → 2.0.0-alpha.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/AutomergeUrl.d.ts +17 -5
- package/dist/AutomergeUrl.d.ts.map +1 -1
- package/dist/AutomergeUrl.js +71 -24
- package/dist/DocHandle.d.ts +80 -8
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +181 -10
- package/dist/RemoteHeadsSubscriptions.d.ts +4 -5
- package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -1
- package/dist/RemoteHeadsSubscriptions.js +4 -1
- package/dist/Repo.d.ts +35 -2
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +112 -70
- package/dist/entrypoints/fullfat.d.ts +1 -0
- package/dist/entrypoints/fullfat.d.ts.map +1 -1
- package/dist/entrypoints/fullfat.js +1 -2
- package/dist/helpers/bufferFromHex.d.ts +3 -0
- package/dist/helpers/bufferFromHex.d.ts.map +1 -0
- package/dist/helpers/bufferFromHex.js +13 -0
- package/dist/helpers/headsAreSame.d.ts +2 -2
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/mergeArrays.d.ts +1 -1
- package/dist/helpers/mergeArrays.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.d.ts +2 -2
- package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.js +25 -48
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -1
- package/dist/storage/StorageSubsystem.d.ts +11 -1
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +20 -4
- package/dist/synchronizer/CollectionSynchronizer.d.ts +15 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +29 -8
- package/dist/synchronizer/DocSynchronizer.d.ts +7 -0
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +14 -0
- package/dist/synchronizer/Synchronizer.d.ts +11 -0
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/types.d.ts +4 -1
- package/dist/types.d.ts.map +1 -1
- package/package.json +3 -3
- package/src/AutomergeUrl.ts +101 -26
- package/src/DocHandle.ts +245 -20
- package/src/RemoteHeadsSubscriptions.ts +11 -9
- package/src/Repo.ts +163 -68
- package/src/entrypoints/fullfat.ts +1 -2
- package/src/helpers/bufferFromHex.ts +14 -0
- package/src/helpers/headsAreSame.ts +2 -2
- package/src/helpers/tests/storage-adapter-tests.ts +44 -86
- package/src/index.ts +2 -0
- package/src/storage/StorageSubsystem.ts +29 -4
- package/src/synchronizer/CollectionSynchronizer.ts +42 -9
- package/src/synchronizer/DocSynchronizer.ts +15 -0
- package/src/synchronizer/Synchronizer.ts +14 -0
- package/src/types.ts +4 -1
- package/test/AutomergeUrl.test.ts +130 -0
- package/test/DocHandle.test.ts +209 -2
- package/test/DocSynchronizer.test.ts +10 -3
- package/test/Repo.test.ts +228 -3
- package/test/StorageSubsystem.test.ts +17 -0
package/test/Repo.test.ts
CHANGED
|
@@ -3,7 +3,13 @@ import { MessageChannelNetworkAdapter } from "../../automerge-repo-network-messa
|
|
|
3
3
|
import assert from "assert"
|
|
4
4
|
import * as Uuid from "uuid"
|
|
5
5
|
import { describe, expect, it } from "vitest"
|
|
6
|
-
import {
|
|
6
|
+
import {
|
|
7
|
+
encodeHeads,
|
|
8
|
+
getHeadsFromUrl,
|
|
9
|
+
isValidAutomergeUrl,
|
|
10
|
+
parseAutomergeUrl,
|
|
11
|
+
UrlHeads,
|
|
12
|
+
} from "../src/AutomergeUrl.js"
|
|
7
13
|
import {
|
|
8
14
|
generateAutomergeUrl,
|
|
9
15
|
stringifyAutomergeUrl,
|
|
@@ -486,6 +492,39 @@ describe("Repo", () => {
|
|
|
486
492
|
const doc = await handle.doc()
|
|
487
493
|
expect(doc).toEqual({})
|
|
488
494
|
})
|
|
495
|
+
|
|
496
|
+
describe("handle cache", () => {
|
|
497
|
+
it("contains doc handle", async () => {
|
|
498
|
+
const { repo } = setup()
|
|
499
|
+
const handle = repo.create({ foo: "bar" })
|
|
500
|
+
await handle.doc()
|
|
501
|
+
assert(repo.handles[handle.documentId])
|
|
502
|
+
})
|
|
503
|
+
|
|
504
|
+
it("delete removes doc handle", async () => {
|
|
505
|
+
const { repo } = setup()
|
|
506
|
+
const handle = repo.create({ foo: "bar" })
|
|
507
|
+
await handle.doc()
|
|
508
|
+
await repo.delete(handle.documentId)
|
|
509
|
+
assert(repo.handles[handle.documentId] === undefined)
|
|
510
|
+
})
|
|
511
|
+
|
|
512
|
+
it("removeFromCache removes doc handle", async () => {
|
|
513
|
+
const { repo } = setup()
|
|
514
|
+
const handle = repo.create({ foo: "bar" })
|
|
515
|
+
await handle.doc()
|
|
516
|
+
await repo.removeFromCache(handle.documentId)
|
|
517
|
+
assert(repo.handles[handle.documentId] === undefined)
|
|
518
|
+
})
|
|
519
|
+
|
|
520
|
+
it("removeFromCache for documentId not found", async () => {
|
|
521
|
+
const { repo } = setup()
|
|
522
|
+
const badDocumentId = "badbadbad" as DocumentId
|
|
523
|
+
const handleCacheSize = Object.keys(repo.handles).length
|
|
524
|
+
await repo.removeFromCache(badDocumentId)
|
|
525
|
+
assert(Object.keys(repo.handles).length === handleCacheSize)
|
|
526
|
+
})
|
|
527
|
+
})
|
|
489
528
|
})
|
|
490
529
|
|
|
491
530
|
describe("flush behaviour", () => {
|
|
@@ -832,6 +871,46 @@ describe("Repo", () => {
|
|
|
832
871
|
teardown()
|
|
833
872
|
})
|
|
834
873
|
|
|
874
|
+
it("synchronizes changes from bobRepo to charlieRepo when loading from storage", async () => {
|
|
875
|
+
const { bobRepo, bobStorage, teardown } = await setup()
|
|
876
|
+
|
|
877
|
+
// We create a repo that uses bobStorage to put a document into its imaginary disk
|
|
878
|
+
// without it knowing about it
|
|
879
|
+
const bobRepo2 = new Repo({
|
|
880
|
+
storage: bobStorage,
|
|
881
|
+
})
|
|
882
|
+
const inStorageHandle = bobRepo2.create<TestDoc>({
|
|
883
|
+
foo: "foundOnFakeDisk",
|
|
884
|
+
})
|
|
885
|
+
await bobRepo2.flush()
|
|
886
|
+
|
|
887
|
+
// Now, let's load it on the original bob repo (which shares a "disk")
|
|
888
|
+
const bobFoundIt = bobRepo.find<TestDoc>(inStorageHandle.url)
|
|
889
|
+
await bobFoundIt.whenReady()
|
|
890
|
+
|
|
891
|
+
// Before checking if it syncs, make sure we have it!
|
|
892
|
+
// (This behaviour is mostly test-validation, we are already testing load/save elsewhere.)
|
|
893
|
+
assert.deepStrictEqual(await bobFoundIt.doc(), { foo: "foundOnFakeDisk" })
|
|
894
|
+
|
|
895
|
+
await pause(10)
|
|
896
|
+
|
|
897
|
+
// We should have a docSynchronizer and its peers should be alice and charlie
|
|
898
|
+
assert.strictEqual(
|
|
899
|
+
bobRepo.synchronizer.docSynchronizers[bobFoundIt.documentId]?.hasPeer(
|
|
900
|
+
"alice" as PeerId
|
|
901
|
+
),
|
|
902
|
+
true
|
|
903
|
+
)
|
|
904
|
+
assert.strictEqual(
|
|
905
|
+
bobRepo.synchronizer.docSynchronizers[bobFoundIt.documentId]?.hasPeer(
|
|
906
|
+
"charlie" as PeerId
|
|
907
|
+
),
|
|
908
|
+
true
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
teardown()
|
|
912
|
+
})
|
|
913
|
+
|
|
835
914
|
it("charlieRepo doesn't have a document it's not supposed to have", async () => {
|
|
836
915
|
const { aliceRepo, bobRepo, charlieRepo, notForCharlie, teardown } =
|
|
837
916
|
await setup()
|
|
@@ -1102,7 +1181,10 @@ describe("Repo", () => {
|
|
|
1102
1181
|
bobHandle.documentId,
|
|
1103
1182
|
await charlieRepo!.storageSubsystem.id()
|
|
1104
1183
|
)
|
|
1105
|
-
assert.deepStrictEqual(
|
|
1184
|
+
assert.deepStrictEqual(
|
|
1185
|
+
encodeHeads(storedSyncState.sharedHeads),
|
|
1186
|
+
bobHandle.heads()
|
|
1187
|
+
)
|
|
1106
1188
|
|
|
1107
1189
|
teardown()
|
|
1108
1190
|
})
|
|
@@ -1202,7 +1284,7 @@ describe("Repo", () => {
|
|
|
1202
1284
|
|
|
1203
1285
|
const nextRemoteHeadsPromise = new Promise<{
|
|
1204
1286
|
storageId: StorageId
|
|
1205
|
-
heads:
|
|
1287
|
+
heads: UrlHeads
|
|
1206
1288
|
}>(resolve => {
|
|
1207
1289
|
handle.on("remote-heads", ({ storageId, heads }) => {
|
|
1208
1290
|
resolve({ storageId, heads })
|
|
@@ -1413,6 +1495,149 @@ describe("Repo", () => {
|
|
|
1413
1495
|
teardown()
|
|
1414
1496
|
})
|
|
1415
1497
|
})
|
|
1498
|
+
|
|
1499
|
+
describe("the denylist", () => {
|
|
1500
|
+
it("should immediately return an unavailable message in response to a request for a denylisted document", async () => {
|
|
1501
|
+
const storage = new DummyStorageAdapter()
|
|
1502
|
+
|
|
1503
|
+
// first create the document in storage
|
|
1504
|
+
const dummyRepo = new Repo({ network: [], storage })
|
|
1505
|
+
const doc = dummyRepo.create({ foo: "bar" })
|
|
1506
|
+
await dummyRepo.flush()
|
|
1507
|
+
|
|
1508
|
+
// Check that the document actually is in storage
|
|
1509
|
+
let docId = doc.documentId
|
|
1510
|
+
assert(storage.keys().some((k: string) => k.includes(docId)))
|
|
1511
|
+
|
|
1512
|
+
const channel = new MessageChannel()
|
|
1513
|
+
const { port1: clientToServer, port2: serverToClient } = channel
|
|
1514
|
+
const server = new Repo({
|
|
1515
|
+
network: [new MessageChannelNetworkAdapter(serverToClient)],
|
|
1516
|
+
storage,
|
|
1517
|
+
denylist: [doc.url],
|
|
1518
|
+
})
|
|
1519
|
+
const client = new Repo({
|
|
1520
|
+
network: [new MessageChannelNetworkAdapter(clientToServer)],
|
|
1521
|
+
})
|
|
1522
|
+
|
|
1523
|
+
await Promise.all([
|
|
1524
|
+
eventPromise(server.networkSubsystem, "peer"),
|
|
1525
|
+
eventPromise(client.networkSubsystem, "peer"),
|
|
1526
|
+
])
|
|
1527
|
+
|
|
1528
|
+
const clientDoc = client.find(doc.url)
|
|
1529
|
+
await pause(100)
|
|
1530
|
+
assert.strictEqual(clientDoc.docSync(), undefined)
|
|
1531
|
+
|
|
1532
|
+
const openDocs = Object.keys(server.metrics().documents).length
|
|
1533
|
+
assert.deepEqual(openDocs, 0)
|
|
1534
|
+
})
|
|
1535
|
+
})
|
|
1536
|
+
})
|
|
1537
|
+
|
|
1538
|
+
describe("Repo heads-in-URLs functionality", () => {
|
|
1539
|
+
const setup = () => {
|
|
1540
|
+
const repo = new Repo({})
|
|
1541
|
+
const handle = repo.create()
|
|
1542
|
+
handle.change((doc: any) => (doc.title = "Hello World"))
|
|
1543
|
+
return { repo, handle }
|
|
1544
|
+
}
|
|
1545
|
+
|
|
1546
|
+
it("finds a document view by URL with heads", async () => {
|
|
1547
|
+
const { repo, handle } = setup()
|
|
1548
|
+
const heads = handle.heads()!
|
|
1549
|
+
const url = stringifyAutomergeUrl({ documentId: handle.documentId, heads })
|
|
1550
|
+
const view = repo.find(url)
|
|
1551
|
+
expect(view.docSync()).toEqual({ title: "Hello World" })
|
|
1552
|
+
})
|
|
1553
|
+
|
|
1554
|
+
it("returns a view, not the actual handle, when finding by URL with heads", async () => {
|
|
1555
|
+
const { repo, handle } = setup()
|
|
1556
|
+
const heads = handle.heads()!
|
|
1557
|
+
await handle.change((doc: any) => (doc.title = "Changed"))
|
|
1558
|
+
const url = stringifyAutomergeUrl({ documentId: handle.documentId, heads })
|
|
1559
|
+
const view = repo.find(url)
|
|
1560
|
+
expect(view.docSync()).toEqual({ title: "Hello World" })
|
|
1561
|
+
expect(handle.docSync()).toEqual({ title: "Changed" })
|
|
1562
|
+
})
|
|
1563
|
+
|
|
1564
|
+
it("changes to a document view do not affect the original", async () => {
|
|
1565
|
+
const { repo, handle } = setup()
|
|
1566
|
+
const heads = handle.heads()!
|
|
1567
|
+
const url = stringifyAutomergeUrl({ documentId: handle.documentId, heads })
|
|
1568
|
+
const view = repo.find(url)
|
|
1569
|
+
expect(() =>
|
|
1570
|
+
view.change((doc: any) => (doc.title = "Changed in View"))
|
|
1571
|
+
).toThrow()
|
|
1572
|
+
expect(handle.docSync()).toEqual({ title: "Hello World" })
|
|
1573
|
+
})
|
|
1574
|
+
|
|
1575
|
+
it("document views are read-only", async () => {
|
|
1576
|
+
const { repo, handle } = setup()
|
|
1577
|
+
const heads = handle.heads()!
|
|
1578
|
+
const url = stringifyAutomergeUrl({ documentId: handle.documentId, heads })
|
|
1579
|
+
const view = repo.find(url)
|
|
1580
|
+
expect(() => view.change((doc: any) => (doc.title = "Changed"))).toThrow()
|
|
1581
|
+
})
|
|
1582
|
+
|
|
1583
|
+
it("finds the latest document when given a URL without heads", async () => {
|
|
1584
|
+
const { repo, handle } = setup()
|
|
1585
|
+
await handle.change((doc: any) => (doc.title = "Changed"))
|
|
1586
|
+
const found = repo.find(handle.url)
|
|
1587
|
+
expect(found.docSync()).toEqual({ title: "Changed" })
|
|
1588
|
+
})
|
|
1589
|
+
|
|
1590
|
+
it("getHeadsFromUrl returns heads array if present or undefined", () => {
|
|
1591
|
+
const { repo, handle } = setup()
|
|
1592
|
+
const heads = handle.heads()!
|
|
1593
|
+
const url = stringifyAutomergeUrl({ documentId: handle.documentId, heads })
|
|
1594
|
+
expect(getHeadsFromUrl(url)).toEqual(heads)
|
|
1595
|
+
|
|
1596
|
+
const urlWithoutHeads = generateAutomergeUrl()
|
|
1597
|
+
expect(getHeadsFromUrl(urlWithoutHeads)).toBeUndefined()
|
|
1598
|
+
})
|
|
1599
|
+
|
|
1600
|
+
it("isValidAutomergeUrl returns true for valid URLs", () => {
|
|
1601
|
+
const { repo, handle } = setup()
|
|
1602
|
+
const url = generateAutomergeUrl()
|
|
1603
|
+
expect(isValidAutomergeUrl(url)).toBe(true)
|
|
1604
|
+
|
|
1605
|
+
const urlWithHeads = stringifyAutomergeUrl({
|
|
1606
|
+
documentId: handle.documentId,
|
|
1607
|
+
heads: handle.heads()!,
|
|
1608
|
+
})
|
|
1609
|
+
expect(isValidAutomergeUrl(urlWithHeads)).toBe(true)
|
|
1610
|
+
})
|
|
1611
|
+
|
|
1612
|
+
it("isValidAutomergeUrl returns false for invalid URLs", () => {
|
|
1613
|
+
const { repo, handle } = setup()
|
|
1614
|
+
expect(isValidAutomergeUrl("not a url")).toBe(false)
|
|
1615
|
+
expect(isValidAutomergeUrl("automerge:invalidid")).toBe(false)
|
|
1616
|
+
expect(isValidAutomergeUrl("automerge:validid#invalidhead")).toBe(false)
|
|
1617
|
+
})
|
|
1618
|
+
|
|
1619
|
+
it("parseAutomergeUrl extracts documentId and heads", () => {
|
|
1620
|
+
const { repo, handle } = setup()
|
|
1621
|
+
const url = stringifyAutomergeUrl({
|
|
1622
|
+
documentId: handle.documentId,
|
|
1623
|
+
heads: handle.heads()!,
|
|
1624
|
+
})
|
|
1625
|
+
const parsed = parseAutomergeUrl(url)
|
|
1626
|
+
expect(parsed.documentId).toBe(handle.documentId)
|
|
1627
|
+
expect(parsed.heads).toEqual(handle.heads())
|
|
1628
|
+
})
|
|
1629
|
+
|
|
1630
|
+
it("stringifyAutomergeUrl creates valid URL", () => {
|
|
1631
|
+
const { repo, handle } = setup()
|
|
1632
|
+
const url = stringifyAutomergeUrl({
|
|
1633
|
+
documentId: handle.documentId,
|
|
1634
|
+
heads: handle.heads()!,
|
|
1635
|
+
})
|
|
1636
|
+
expect(isValidAutomergeUrl(url)).toBe(true)
|
|
1637
|
+
const parsed = parseAutomergeUrl(url)
|
|
1638
|
+
expect(parsed.documentId).toBe(handle.documentId)
|
|
1639
|
+
expect(parsed.heads).toEqual(handle.heads())
|
|
1640
|
+
})
|
|
1416
1641
|
})
|
|
1417
1642
|
|
|
1418
1643
|
const warn = console.warn
|
|
@@ -211,6 +211,23 @@ describe("StorageSubsystem", () => {
|
|
|
211
211
|
)
|
|
212
212
|
assert.strictEqual(loadedSyncState, undefined)
|
|
213
213
|
})
|
|
214
|
+
|
|
215
|
+
it("returns a undefined if loading an existing sync state fails", async () => {
|
|
216
|
+
const storage = new StorageSubsystem(adapter)
|
|
217
|
+
|
|
218
|
+
const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
|
|
219
|
+
const bobStorageId = Uuid.v4() as StorageId
|
|
220
|
+
|
|
221
|
+
const syncStateKey = [documentId, "sync-state", bobStorageId]
|
|
222
|
+
// Save garbage data to simulate a corrupted sync state
|
|
223
|
+
await adapter.save(syncStateKey, Buffer.from("invalid data"))
|
|
224
|
+
|
|
225
|
+
const loadedSyncState = await storage.loadSyncState(
|
|
226
|
+
documentId,
|
|
227
|
+
bobStorageId
|
|
228
|
+
)
|
|
229
|
+
assert.strictEqual(loadedSyncState, undefined)
|
|
230
|
+
})
|
|
214
231
|
})
|
|
215
232
|
|
|
216
233
|
describe("storage id", () => {
|