@automerge/automerge-repo 1.0.0-alpha.4 → 1.0.0-alpha.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/DocHandle.js +2 -2
- package/dist/network/NetworkAdapter.d.ts +2 -3
- package/dist/network/NetworkAdapter.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.d.ts +1 -3
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +0 -9
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +8 -2
- package/package.json +3 -3
- package/src/DocHandle.ts +3 -3
- package/src/network/NetworkAdapter.ts +2 -4
- package/src/network/NetworkSubsystem.ts +14 -23
- package/src/storage/StorageSubsystem.ts +9 -2
- package/test/Repo.test.ts +25 -5
- package/test/helpers/DummyNetworkAdapter.ts +2 -3
- package/test/helpers/generate-large-object.ts +13 -0
package/dist/DocHandle.js
CHANGED
|
@@ -77,7 +77,7 @@ export class DocHandle//
|
|
|
77
77
|
awaitingNetwork: {
|
|
78
78
|
on: {
|
|
79
79
|
NETWORK_READY: { target: REQUESTING },
|
|
80
|
-
}
|
|
80
|
+
},
|
|
81
81
|
},
|
|
82
82
|
requesting: {
|
|
83
83
|
on: {
|
|
@@ -341,4 +341,4 @@ export const Event = {
|
|
|
341
341
|
};
|
|
342
342
|
// CONSTANTS
|
|
343
343
|
export const { IDLE, LOADING, AWAITING_NETWORK, REQUESTING, READY, FAILED, DELETED, UNAVAILABLE, } = HandleState;
|
|
344
|
-
const { CREATE, FIND, REQUEST, UPDATE, TIMEOUT, DELETE, REQUEST_COMPLETE, MARK_UNAVAILABLE, AWAIT_NETWORK, NETWORK_READY } = Event;
|
|
344
|
+
const { CREATE, FIND, REQUEST, UPDATE, TIMEOUT, DELETE, REQUEST_COMPLETE, MARK_UNAVAILABLE, AWAIT_NETWORK, NETWORK_READY, } = Event;
|
|
@@ -3,10 +3,9 @@ import { PeerId } from "../types.js";
|
|
|
3
3
|
import { Message } from "./messages.js";
|
|
4
4
|
export declare abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents> {
|
|
5
5
|
peerId?: PeerId;
|
|
6
|
-
abstract connect(
|
|
6
|
+
abstract connect(peerId: PeerId): void;
|
|
7
7
|
abstract send(message: Message): void;
|
|
8
|
-
abstract
|
|
9
|
-
abstract leave(): void;
|
|
8
|
+
abstract disconnect(): void;
|
|
10
9
|
}
|
|
11
10
|
export interface NetworkAdapterEvents {
|
|
12
11
|
ready: (payload: OpenPayload) => void;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"NetworkAdapter.d.ts","sourceRoot":"","sources":["../../src/network/NetworkAdapter.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;AACpC,OAAO,EAAE,OAAO,EAAE,MAAM,eAAe,CAAA;AAEvC,8BAAsB,cAAe,SAAQ,YAAY,CAAC,oBAAoB,CAAC;IAC7E,MAAM,CAAC,EAAE,MAAM,CAAA;IAEf,QAAQ,CAAC,OAAO,CAAC,
|
|
1
|
+
{"version":3,"file":"NetworkAdapter.d.ts","sourceRoot":"","sources":["../../src/network/NetworkAdapter.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;AACpC,OAAO,EAAE,OAAO,EAAE,MAAM,eAAe,CAAA;AAEvC,8BAAsB,cAAe,SAAQ,YAAY,CAAC,oBAAoB,CAAC;IAC7E,MAAM,CAAC,EAAE,MAAM,CAAA;IAEf,QAAQ,CAAC,OAAO,CAAC,MAAM,EAAE,MAAM,GAAG,IAAI;IAEtC,QAAQ,CAAC,IAAI,CAAC,OAAO,EAAE,OAAO,GAAG,IAAI;IAErC,QAAQ,CAAC,UAAU,IAAI,IAAI;CAC5B;AAID,MAAM,WAAW,oBAAoB;IACnC,KAAK,EAAE,CAAC,OAAO,EAAE,WAAW,KAAK,IAAI,CAAA;IACrC,KAAK,EAAE,MAAM,IAAI,CAAA;IACjB,gBAAgB,EAAE,CAAC,OAAO,EAAE,oBAAoB,KAAK,IAAI,CAAA;IACzD,mBAAmB,EAAE,CAAC,OAAO,EAAE,uBAAuB,KAAK,IAAI,CAAA;IAC/D,OAAO,EAAE,CAAC,OAAO,EAAE,OAAO,KAAK,IAAI,CAAA;CACpC;AAED,MAAM,WAAW,WAAW;IAC1B,OAAO,EAAE,cAAc,CAAA;CACxB;AAED,MAAM,WAAW,oBAAoB;IACnC,MAAM,EAAE,MAAM,CAAA;CACf;AAED,MAAM,WAAW,uBAAuB;IACtC,MAAM,EAAE,MAAM,CAAA;CACf"}
|
|
@@ -8,8 +8,6 @@ export declare class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvent
|
|
|
8
8
|
constructor(adapters: NetworkAdapter[], peerId?: PeerId);
|
|
9
9
|
addNetworkAdapter(networkAdapter: NetworkAdapter): void;
|
|
10
10
|
send(message: MessageContents): void;
|
|
11
|
-
join(): void;
|
|
12
|
-
leave(): void;
|
|
13
11
|
isReady: () => boolean;
|
|
14
12
|
whenReady: () => Promise<void>;
|
|
15
13
|
}
|
|
@@ -17,7 +15,7 @@ export interface NetworkSubsystemEvents {
|
|
|
17
15
|
peer: (payload: PeerPayload) => void;
|
|
18
16
|
"peer-disconnected": (payload: PeerDisconnectedPayload) => void;
|
|
19
17
|
message: (payload: Message) => void;
|
|
20
|
-
|
|
18
|
+
ready: () => void;
|
|
21
19
|
}
|
|
22
20
|
export interface PeerPayload {
|
|
23
21
|
peerId: PeerId;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"NetworkSubsystem.d.ts","sourceRoot":"","sources":["../../src/network/NetworkSubsystem.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;AACpC,OAAO,EAAE,cAAc,EAAE,uBAAuB,EAAE,MAAM,qBAAqB,CAAA;AAE7E,OAAO,EAIL,OAAO,EACP,eAAe,EAChB,MAAM,eAAe,CAAA;AAUtB,qBAAa,gBAAiB,SAAQ,YAAY,CAAC,sBAAsB,CAAC;;
|
|
1
|
+
{"version":3,"file":"NetworkSubsystem.d.ts","sourceRoot":"","sources":["../../src/network/NetworkSubsystem.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;AACpC,OAAO,EAAE,cAAc,EAAE,uBAAuB,EAAE,MAAM,qBAAqB,CAAA;AAE7E,OAAO,EAIL,OAAO,EACP,eAAe,EAChB,MAAM,eAAe,CAAA;AAUtB,qBAAa,gBAAiB,SAAQ,YAAY,CAAC,sBAAsB,CAAC;;IAUzB,MAAM;gBAAzC,QAAQ,EAAE,cAAc,EAAE,EAAS,MAAM,SAAiB;IAMtE,iBAAiB,CAAC,cAAc,EAAE,cAAc;IAsEhD,IAAI,CAAC,OAAO,EAAE,eAAe;IA2B7B,OAAO,gBAEN;IAED,SAAS,sBAUR;CACF;AAQD,MAAM,WAAW,sBAAsB;IACrC,IAAI,EAAE,CAAC,OAAO,EAAE,WAAW,KAAK,IAAI,CAAA;IACpC,mBAAmB,EAAE,CAAC,OAAO,EAAE,uBAAuB,KAAK,IAAI,CAAA;IAC/D,OAAO,EAAE,CAAC,OAAO,EAAE,OAAO,KAAK,IAAI,CAAA;IACnC,KAAK,EAAE,MAAM,IAAI,CAAA;CAClB;AAED,MAAM,WAAW,WAAW;IAC1B,MAAM,EAAE,MAAM,CAAA;CACf"}
|
|
@@ -66,7 +66,6 @@ export class NetworkSubsystem extends EventEmitter {
|
|
|
66
66
|
});
|
|
67
67
|
});
|
|
68
68
|
networkAdapter.connect(this.peerId);
|
|
69
|
-
networkAdapter.join();
|
|
70
69
|
}
|
|
71
70
|
send(message) {
|
|
72
71
|
const peer = this.#adaptersByPeer[message.targetId];
|
|
@@ -93,14 +92,6 @@ export class NetworkSubsystem extends EventEmitter {
|
|
|
93
92
|
peer.send(outbound);
|
|
94
93
|
}
|
|
95
94
|
}
|
|
96
|
-
join() {
|
|
97
|
-
this.#log(`Joining network`);
|
|
98
|
-
this.#adapters.forEach(a => a.join());
|
|
99
|
-
}
|
|
100
|
-
leave() {
|
|
101
|
-
this.#log(`Leaving network`);
|
|
102
|
-
this.#adapters.forEach(a => a.leave());
|
|
103
|
-
}
|
|
104
95
|
isReady = () => {
|
|
105
96
|
return this.#readyAdapterCount === this.#adapters.length;
|
|
106
97
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"StorageSubsystem.d.ts","sourceRoot":"","sources":["../../src/storage/StorageSubsystem.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,2BAA2B,CAAA;AAC9C,OAAO,EAAE,cAAc,EAAc,MAAM,qBAAqB,CAAA;AAEhE,OAAO,EAAE,KAAK,UAAU,EAAE,MAAM,aAAa,CAAA;AAa7C,MAAM,MAAM,SAAS,GAAG,UAAU,GAAG,aAAa,CAAA;AAelD,qBAAa,gBAAgB;;
|
|
1
|
+
{"version":3,"file":"StorageSubsystem.d.ts","sourceRoot":"","sources":["../../src/storage/StorageSubsystem.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,2BAA2B,CAAA;AAC9C,OAAO,EAAE,cAAc,EAAc,MAAM,qBAAqB,CAAA;AAEhE,OAAO,EAAE,KAAK,UAAU,EAAE,MAAM,aAAa,CAAA;AAa7C,MAAM,MAAM,SAAS,GAAG,UAAU,GAAG,aAAa,CAAA;AAelD,qBAAa,gBAAgB;;gBAQf,cAAc,EAAE,cAAc;IAuDpC,OAAO,CAAC,UAAU,EAAE,UAAU,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,IAAI,CAAC;IA0B/D,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,GAAG,EAAE,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,OAAO,CAAC,IAAI,CAAC;IAanE,MAAM,CAAC,UAAU,EAAE,UAAU;CAmCpC"}
|
|
@@ -19,6 +19,7 @@ export class StorageSubsystem {
|
|
|
19
19
|
#chunkInfos = new Map();
|
|
20
20
|
#storedHeads = new Map();
|
|
21
21
|
#log = debug(`automerge-repo:storage-subsystem`);
|
|
22
|
+
#snapshotting = false;
|
|
22
23
|
constructor(storageAdapter) {
|
|
23
24
|
this.#storageAdapter = storageAdapter;
|
|
24
25
|
}
|
|
@@ -43,6 +44,7 @@ export class StorageSubsystem {
|
|
|
43
44
|
}
|
|
44
45
|
}
|
|
45
46
|
async #saveTotal(documentId, doc, sourceChunks) {
|
|
47
|
+
this.#snapshotting = true;
|
|
46
48
|
const binary = A.save(doc);
|
|
47
49
|
const snapshotHash = headsHash(A.getHeads(doc));
|
|
48
50
|
const key = [documentId, "snapshot", snapshotHash];
|
|
@@ -56,6 +58,7 @@ export class StorageSubsystem {
|
|
|
56
58
|
const newChunkInfos = this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? [];
|
|
57
59
|
newChunkInfos.push({ key, type: "snapshot", size: binary.length });
|
|
58
60
|
this.#chunkInfos.set(documentId, newChunkInfos);
|
|
61
|
+
this.#snapshotting = false;
|
|
59
62
|
}
|
|
60
63
|
async loadDoc(documentId) {
|
|
61
64
|
const loaded = await this.#storageAdapter.loadRange([documentId]);
|
|
@@ -96,7 +99,7 @@ export class StorageSubsystem {
|
|
|
96
99
|
this.#storedHeads.set(documentId, A.getHeads(doc));
|
|
97
100
|
}
|
|
98
101
|
async remove(documentId) {
|
|
99
|
-
this.#storageAdapter.
|
|
102
|
+
this.#storageAdapter.removeRange([documentId, "snapshot"]);
|
|
100
103
|
this.#storageAdapter.removeRange([documentId, "incremental"]);
|
|
101
104
|
}
|
|
102
105
|
#shouldSave(documentId, doc) {
|
|
@@ -111,6 +114,9 @@ export class StorageSubsystem {
|
|
|
111
114
|
return true;
|
|
112
115
|
}
|
|
113
116
|
#shouldCompact(sourceChunks) {
|
|
117
|
+
if (this.#snapshotting) {
|
|
118
|
+
return false;
|
|
119
|
+
}
|
|
114
120
|
// compact if the incremental size is greater than the snapshot size
|
|
115
121
|
let snapshotSize = 0;
|
|
116
122
|
let incrementalSize = 0;
|
|
@@ -122,7 +128,7 @@ export class StorageSubsystem {
|
|
|
122
128
|
incrementalSize += chunk.size;
|
|
123
129
|
}
|
|
124
130
|
}
|
|
125
|
-
return incrementalSize
|
|
131
|
+
return incrementalSize >= snapshotSize;
|
|
126
132
|
}
|
|
127
133
|
}
|
|
128
134
|
function chunkTypeFromKey(key) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@automerge/automerge-repo",
|
|
3
|
-
"version": "1.0.0-alpha.
|
|
3
|
+
"version": "1.0.0-alpha.5",
|
|
4
4
|
"description": "A repository object to manage a collection of automerge documents",
|
|
5
5
|
"repository": "https://github.com/automerge/automerge-repo",
|
|
6
6
|
"author": "Peter van Hardenberg <pvh@pvh.ca>",
|
|
@@ -31,7 +31,7 @@
|
|
|
31
31
|
"typescript": "^5.1.6"
|
|
32
32
|
},
|
|
33
33
|
"peerDependencies": {
|
|
34
|
-
"@automerge/automerge": "^2.1.0-alpha.
|
|
34
|
+
"@automerge/automerge": "^2.1.0-alpha.13"
|
|
35
35
|
},
|
|
36
36
|
"dependencies": {
|
|
37
37
|
"bs58check": "^3.0.1",
|
|
@@ -65,5 +65,5 @@
|
|
|
65
65
|
"publishConfig": {
|
|
66
66
|
"access": "public"
|
|
67
67
|
},
|
|
68
|
-
"gitHead": "
|
|
68
|
+
"gitHead": "9cd9be160ebda37c8f0d70f2d5cadea5b951a3c3"
|
|
69
69
|
}
|
package/src/DocHandle.ts
CHANGED
|
@@ -101,7 +101,7 @@ export class DocHandle<T> //
|
|
|
101
101
|
awaitingNetwork: {
|
|
102
102
|
on: {
|
|
103
103
|
NETWORK_READY: { target: REQUESTING },
|
|
104
|
-
}
|
|
104
|
+
},
|
|
105
105
|
},
|
|
106
106
|
requesting: {
|
|
107
107
|
on: {
|
|
@@ -501,7 +501,7 @@ type DocHandleEvent<T> =
|
|
|
501
501
|
| TimeoutEvent
|
|
502
502
|
| DeleteEvent
|
|
503
503
|
| MarkUnavailableEvent
|
|
504
|
-
| AwaitNetworkEvent
|
|
504
|
+
| AwaitNetworkEvent
|
|
505
505
|
| NetworkReadyEvent
|
|
506
506
|
|
|
507
507
|
type DocHandleXstateMachine<T> = Interpreter<
|
|
@@ -541,5 +541,5 @@ const {
|
|
|
541
541
|
REQUEST_COMPLETE,
|
|
542
542
|
MARK_UNAVAILABLE,
|
|
543
543
|
AWAIT_NETWORK,
|
|
544
|
-
NETWORK_READY
|
|
544
|
+
NETWORK_READY,
|
|
545
545
|
} = Event
|
|
@@ -5,13 +5,11 @@ import { Message } from "./messages.js"
|
|
|
5
5
|
export abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents> {
|
|
6
6
|
peerId?: PeerId // hmmm, maybe not
|
|
7
7
|
|
|
8
|
-
abstract connect(
|
|
8
|
+
abstract connect(peerId: PeerId): void
|
|
9
9
|
|
|
10
10
|
abstract send(message: Message): void
|
|
11
11
|
|
|
12
|
-
abstract
|
|
13
|
-
|
|
14
|
-
abstract leave(): void
|
|
12
|
+
abstract disconnect(): void
|
|
15
13
|
}
|
|
16
14
|
|
|
17
15
|
// events & payloads
|
|
@@ -25,13 +25,10 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
25
25
|
#count = 0
|
|
26
26
|
#sessionId: SessionId = Math.random().toString(36).slice(2) as SessionId
|
|
27
27
|
#ephemeralSessionCounts: Record<EphemeralMessageSource, number> = {}
|
|
28
|
-
#readyAdapterCount = 0
|
|
28
|
+
#readyAdapterCount = 0
|
|
29
29
|
#adapters: NetworkAdapter[] = []
|
|
30
30
|
|
|
31
|
-
constructor(
|
|
32
|
-
adapters: NetworkAdapter[],
|
|
33
|
-
public peerId = randomPeerId()
|
|
34
|
-
) {
|
|
31
|
+
constructor(adapters: NetworkAdapter[], public peerId = randomPeerId()) {
|
|
35
32
|
super()
|
|
36
33
|
this.#log = debug(`automerge-repo:network:${this.peerId}`)
|
|
37
34
|
adapters.forEach(a => this.addNetworkAdapter(a))
|
|
@@ -41,7 +38,12 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
41
38
|
this.#adapters.push(networkAdapter)
|
|
42
39
|
networkAdapter.once("ready", () => {
|
|
43
40
|
this.#readyAdapterCount++
|
|
44
|
-
this.#log(
|
|
41
|
+
this.#log(
|
|
42
|
+
"Adapters ready: ",
|
|
43
|
+
this.#readyAdapterCount,
|
|
44
|
+
"/",
|
|
45
|
+
this.#adapters.length
|
|
46
|
+
)
|
|
45
47
|
if (this.#readyAdapterCount === this.#adapters.length) {
|
|
46
48
|
this.emit("ready")
|
|
47
49
|
}
|
|
@@ -100,7 +102,6 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
100
102
|
})
|
|
101
103
|
|
|
102
104
|
networkAdapter.connect(this.peerId)
|
|
103
|
-
networkAdapter.join()
|
|
104
105
|
}
|
|
105
106
|
|
|
106
107
|
send(message: MessageContents) {
|
|
@@ -116,11 +117,11 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
116
117
|
"count" in message
|
|
117
118
|
? message
|
|
118
119
|
: {
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
120
|
+
...message,
|
|
121
|
+
count: ++this.#count,
|
|
122
|
+
sessionId: this.#sessionId,
|
|
123
|
+
senderId: this.peerId,
|
|
124
|
+
}
|
|
124
125
|
this.#log("Ephemeral message", outbound)
|
|
125
126
|
peer.send(outbound)
|
|
126
127
|
} else {
|
|
@@ -130,16 +131,6 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
130
131
|
}
|
|
131
132
|
}
|
|
132
133
|
|
|
133
|
-
join() {
|
|
134
|
-
this.#log(`Joining network`)
|
|
135
|
-
this.#adapters.forEach(a => a.join())
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
leave() {
|
|
139
|
-
this.#log(`Leaving network`)
|
|
140
|
-
this.#adapters.forEach(a => a.leave())
|
|
141
|
-
}
|
|
142
|
-
|
|
143
134
|
isReady = () => {
|
|
144
135
|
return this.#readyAdapterCount === this.#adapters.length
|
|
145
136
|
}
|
|
@@ -167,7 +158,7 @@ export interface NetworkSubsystemEvents {
|
|
|
167
158
|
peer: (payload: PeerPayload) => void
|
|
168
159
|
"peer-disconnected": (payload: PeerDisconnectedPayload) => void
|
|
169
160
|
message: (payload: Message) => void
|
|
170
|
-
|
|
161
|
+
ready: () => void
|
|
171
162
|
}
|
|
172
163
|
|
|
173
164
|
export interface PeerPayload {
|
|
@@ -35,6 +35,8 @@ export class StorageSubsystem {
|
|
|
35
35
|
#storedHeads: Map<DocumentId, A.Heads> = new Map()
|
|
36
36
|
#log = debug(`automerge-repo:storage-subsystem`)
|
|
37
37
|
|
|
38
|
+
#snapshotting = false
|
|
39
|
+
|
|
38
40
|
constructor(storageAdapter: StorageAdapter) {
|
|
39
41
|
this.#storageAdapter = storageAdapter
|
|
40
42
|
}
|
|
@@ -67,6 +69,7 @@ export class StorageSubsystem {
|
|
|
67
69
|
doc: A.Doc<unknown>,
|
|
68
70
|
sourceChunks: StorageChunkInfo[]
|
|
69
71
|
): Promise<void> {
|
|
72
|
+
this.#snapshotting = true
|
|
70
73
|
const binary = A.save(doc)
|
|
71
74
|
const snapshotHash = headsHash(A.getHeads(doc))
|
|
72
75
|
const key = [documentId, "snapshot", snapshotHash]
|
|
@@ -86,6 +89,7 @@ export class StorageSubsystem {
|
|
|
86
89
|
this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? []
|
|
87
90
|
newChunkInfos.push({ key, type: "snapshot", size: binary.length })
|
|
88
91
|
this.#chunkInfos.set(documentId, newChunkInfos)
|
|
92
|
+
this.#snapshotting = false
|
|
89
93
|
}
|
|
90
94
|
|
|
91
95
|
async loadDoc(documentId: DocumentId): Promise<A.Doc<unknown> | null> {
|
|
@@ -128,7 +132,7 @@ export class StorageSubsystem {
|
|
|
128
132
|
}
|
|
129
133
|
|
|
130
134
|
async remove(documentId: DocumentId) {
|
|
131
|
-
this.#storageAdapter.
|
|
135
|
+
this.#storageAdapter.removeRange([documentId, "snapshot"])
|
|
132
136
|
this.#storageAdapter.removeRange([documentId, "incremental"])
|
|
133
137
|
}
|
|
134
138
|
|
|
@@ -147,6 +151,9 @@ export class StorageSubsystem {
|
|
|
147
151
|
}
|
|
148
152
|
|
|
149
153
|
#shouldCompact(sourceChunks: StorageChunkInfo[]) {
|
|
154
|
+
if (this.#snapshotting) {
|
|
155
|
+
return false
|
|
156
|
+
}
|
|
150
157
|
// compact if the incremental size is greater than the snapshot size
|
|
151
158
|
let snapshotSize = 0
|
|
152
159
|
let incrementalSize = 0
|
|
@@ -157,7 +164,7 @@ export class StorageSubsystem {
|
|
|
157
164
|
incrementalSize += chunk.size
|
|
158
165
|
}
|
|
159
166
|
}
|
|
160
|
-
return incrementalSize
|
|
167
|
+
return incrementalSize >= snapshotSize
|
|
161
168
|
}
|
|
162
169
|
}
|
|
163
170
|
|
package/test/Repo.test.ts
CHANGED
|
@@ -18,6 +18,10 @@ import { getRandomItem } from "./helpers/getRandomItem.js"
|
|
|
18
18
|
import { TestDoc } from "./types.js"
|
|
19
19
|
import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl.js"
|
|
20
20
|
import { READY, AWAITING_NETWORK } from "../src/DocHandle.js"
|
|
21
|
+
import {
|
|
22
|
+
generateLargeObject,
|
|
23
|
+
LargeObject,
|
|
24
|
+
} from "./helpers/generate-large-object.js"
|
|
21
25
|
|
|
22
26
|
describe("Repo", () => {
|
|
23
27
|
describe("single repo", () => {
|
|
@@ -98,7 +102,6 @@ describe("Repo", () => {
|
|
|
98
102
|
|
|
99
103
|
networkAdapter.emit("ready", { network: networkAdapter })
|
|
100
104
|
await eventPromise(handle, "unavailable")
|
|
101
|
-
|
|
102
105
|
})
|
|
103
106
|
|
|
104
107
|
it("can find a created document", async () => {
|
|
@@ -130,7 +133,6 @@ describe("Repo", () => {
|
|
|
130
133
|
const bobHandle = repo2.find<TestDoc>(handle.url)
|
|
131
134
|
await bobHandle.whenReady()
|
|
132
135
|
assert.equal(bobHandle.isReady(), true)
|
|
133
|
-
|
|
134
136
|
})
|
|
135
137
|
|
|
136
138
|
it("saves the document when changed and can find it again", async () => {
|
|
@@ -162,6 +164,7 @@ describe("Repo", () => {
|
|
|
162
164
|
handle.change(d => {
|
|
163
165
|
d.foo = "bar"
|
|
164
166
|
})
|
|
167
|
+
// we now have a snapshot and an incremental change in storage
|
|
165
168
|
assert.equal(handle.isReady(), true)
|
|
166
169
|
await handle.doc()
|
|
167
170
|
repo.delete(handle.documentId)
|
|
@@ -274,6 +277,21 @@ describe("Repo", () => {
|
|
|
274
277
|
assert(storage.keys().length !== 0)
|
|
275
278
|
}
|
|
276
279
|
})
|
|
280
|
+
|
|
281
|
+
it("doesn't create multiple snapshots in storage when a series of large changes are made in succession", async () => {
|
|
282
|
+
const { repo, storageAdapter } = setup()
|
|
283
|
+
const handle = repo.create<{ objects: LargeObject[] }>()
|
|
284
|
+
|
|
285
|
+
for (let i = 0; i < 5; i++) {
|
|
286
|
+
handle.change(d => {
|
|
287
|
+
d.objects = []
|
|
288
|
+
d.objects.push(generateLargeObject(100))
|
|
289
|
+
})
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
const storageKeyTypes = storageAdapter.keys().map(k => k.split(".")[1])
|
|
293
|
+
assert(storageKeyTypes.filter(k => k === "snapshot").length === 1)
|
|
294
|
+
})
|
|
277
295
|
})
|
|
278
296
|
|
|
279
297
|
describe("sync", async () => {
|
|
@@ -331,7 +349,9 @@ describe("Repo", () => {
|
|
|
331
349
|
}
|
|
332
350
|
|
|
333
351
|
function doConnectAlice() {
|
|
334
|
-
aliceRepo.networkSubsystem.addNetworkAdapter(
|
|
352
|
+
aliceRepo.networkSubsystem.addNetworkAdapter(
|
|
353
|
+
new MessageChannelNetworkAdapter(aliceToBob)
|
|
354
|
+
)
|
|
335
355
|
//bobRepo.networkSubsystem.addNetworkAdapter(new MessageChannelNetworkAdapter(bobToAlice))
|
|
336
356
|
}
|
|
337
357
|
|
|
@@ -580,9 +600,9 @@ describe("Repo", () => {
|
|
|
580
600
|
const doc =
|
|
581
601
|
Math.random() < 0.5
|
|
582
602
|
? // heads, create a new doc
|
|
583
|
-
|
|
603
|
+
repo.create<TestDoc>()
|
|
584
604
|
: // tails, pick a random doc
|
|
585
|
-
|
|
605
|
+
(getRandomItem(docs) as DocHandle<TestDoc>)
|
|
586
606
|
|
|
587
607
|
// make sure the doc is ready
|
|
588
608
|
if (!doc.isReady()) {
|
|
@@ -6,12 +6,11 @@ export class DummyNetworkAdapter extends NetworkAdapter {
|
|
|
6
6
|
super()
|
|
7
7
|
this.#startReady = startReady
|
|
8
8
|
}
|
|
9
|
-
send() {}
|
|
9
|
+
send() { }
|
|
10
10
|
connect(_: string) {
|
|
11
11
|
if (this.#startReady) {
|
|
12
12
|
this.emit("ready", { network: this })
|
|
13
13
|
}
|
|
14
14
|
}
|
|
15
|
-
|
|
16
|
-
leave() {}
|
|
15
|
+
disconnect() { }
|
|
17
16
|
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export type LargeObject = { [key: string]: number }
|
|
2
|
+
|
|
3
|
+
export function generateLargeObject(size: number): LargeObject {
|
|
4
|
+
const largeObject: LargeObject = {}
|
|
5
|
+
|
|
6
|
+
for (let i = 0; i < size; i++) {
|
|
7
|
+
const key = `key${i}`
|
|
8
|
+
const value = Math.random()
|
|
9
|
+
largeObject[key] = value
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
return largeObject
|
|
13
|
+
}
|