@peerbit/shared-log 9.0.10 → 9.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/benchmark/index.js +2 -2
- package/dist/benchmark/index.js.map +1 -1
- package/dist/benchmark/replication.js +3 -3
- package/dist/benchmark/replication.js.map +1 -1
- package/dist/src/index.d.ts +46 -31
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +426 -229
- package/dist/src/index.js.map +1 -1
- package/dist/src/pid.d.ts.map +1 -1
- package/dist/src/pid.js +20 -19
- package/dist/src/pid.js.map +1 -1
- package/dist/src/ranges.d.ts +13 -3
- package/dist/src/ranges.d.ts.map +1 -1
- package/dist/src/ranges.js +207 -335
- package/dist/src/ranges.js.map +1 -1
- package/dist/src/replication-domain-hash.d.ts +5 -0
- package/dist/src/replication-domain-hash.d.ts.map +1 -0
- package/dist/src/replication-domain-hash.js +30 -0
- package/dist/src/replication-domain-hash.js.map +1 -0
- package/dist/src/replication-domain-time.d.ts +14 -0
- package/dist/src/replication-domain-time.d.ts.map +1 -0
- package/dist/src/replication-domain-time.js +59 -0
- package/dist/src/replication-domain-time.js.map +1 -0
- package/dist/src/replication-domain.d.ts +33 -0
- package/dist/src/replication-domain.d.ts.map +1 -0
- package/dist/src/replication-domain.js +6 -0
- package/dist/src/replication-domain.js.map +1 -0
- package/dist/src/replication.d.ts +10 -8
- package/dist/src/replication.d.ts.map +1 -1
- package/dist/src/replication.js +64 -46
- package/dist/src/replication.js.map +1 -1
- package/dist/src/role.d.ts +2 -1
- package/dist/src/role.d.ts.map +1 -1
- package/dist/src/role.js +6 -5
- package/dist/src/role.js.map +1 -1
- package/package.json +2 -2
- package/src/index.ts +604 -310
- package/src/pid.ts +20 -19
- package/src/ranges.ts +291 -371
- package/src/replication-domain-hash.ts +43 -0
- package/src/replication-domain-time.ts +85 -0
- package/src/replication-domain.ts +50 -0
- package/src/replication.ts +50 -46
- package/src/role.ts +6 -5
package/dist/src/index.js
CHANGED
|
@@ -7,12 +7,12 @@ var __decorate = (this && this.__decorate) || function (decorators, target, key,
|
|
|
7
7
|
var __metadata = (this && this.__metadata) || function (k, v) {
|
|
8
8
|
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
9
9
|
};
|
|
10
|
-
import {
|
|
10
|
+
import { BorshError, field, variant } from "@dao-xyz/borsh";
|
|
11
11
|
import { CustomEvent } from "@libp2p/interface";
|
|
12
12
|
import { AnyBlockStore, RemoteBlocks } from "@peerbit/blocks";
|
|
13
13
|
import { Cache } from "@peerbit/cache";
|
|
14
|
-
import { AccessError, PublicSignKey,
|
|
15
|
-
import { And, ByteMatchQuery, CountRequest, DeleteRequest,
|
|
14
|
+
import { AccessError, PublicSignKey, sha256Base64Sync, sha256Sync, } from "@peerbit/crypto";
|
|
15
|
+
import { And, ByteMatchQuery, CountRequest, DeleteRequest, Or, SearchRequest, Sort, StringMatch, SumRequest, } from "@peerbit/indexer-interface";
|
|
16
16
|
import { Entry, Log, ShallowEntry, } from "@peerbit/log";
|
|
17
17
|
import { logger as loggerFn } from "@peerbit/logger";
|
|
18
18
|
import { ClosedError, Program } from "@peerbit/program";
|
|
@@ -23,17 +23,33 @@ import { AbortError, delay, waitFor } from "@peerbit/time";
|
|
|
23
23
|
import debounce from "p-debounce";
|
|
24
24
|
import pDefer, {} from "p-defer";
|
|
25
25
|
import PQueue from "p-queue";
|
|
26
|
+
import { concat } from "uint8arrays";
|
|
26
27
|
import { BlocksMessage } from "./blocks.js";
|
|
27
28
|
import { CPUUsageIntervalLag } from "./cpu.js";
|
|
28
29
|
import { EntryWithRefs, ExchangeHeadsMessage, RequestIPrune, RequestMaybeSync, ResponseIPrune, ResponseMaybeSync, createExchangeHeadsMessages, } from "./exchange-heads.js";
|
|
29
30
|
import { TransportMessage } from "./message.js";
|
|
30
31
|
import { PIDReplicationController } from "./pid.js";
|
|
31
|
-
import { getCoverSet, getSamples, isMatured } from "./ranges.js";
|
|
32
|
-
import {
|
|
33
|
-
import {
|
|
34
|
-
|
|
32
|
+
import { getCoverSet, getSamples, hasCoveringRange, isMatured, minimumWidthToCover, } from "./ranges.js";
|
|
33
|
+
import { createReplicationDomainHash, hashToU32, } from "./replication-domain-hash.js";
|
|
34
|
+
import { createReplicationDomainTime, } from "./replication-domain-time.js";
|
|
35
|
+
import {} from "./replication-domain.js";
|
|
36
|
+
import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, ReplicationError, ReplicationIntent, ReplicationRange, ReplicationRangeIndexable, RequestReplicationInfoMessage, ResponseRoleMessage, StoppedReplicating, decodeReplicas, encodeReplicas, maxReplicas, } from "./replication.js";
|
|
37
|
+
import { MAX_U32, Observer, Replicator, scaleToU32 } from "./role.js";
|
|
38
|
+
export { createReplicationDomainHash, createReplicationDomainTime, };
|
|
35
39
|
export { CPUUsageIntervalLag };
|
|
40
|
+
export * from "./replication.js";
|
|
36
41
|
export const logger = loggerFn({ module: "shared-log" });
|
|
42
|
+
const getLatestEntry = (entries) => {
|
|
43
|
+
let latest = undefined;
|
|
44
|
+
for (const element of entries) {
|
|
45
|
+
let entry = element instanceof EntryWithRefs ? element.entry : element;
|
|
46
|
+
if (!latest ||
|
|
47
|
+
entry.meta.clock.timestamp.compare(latest.meta.clock.timestamp) > 0) {
|
|
48
|
+
latest = entry;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
return latest;
|
|
52
|
+
};
|
|
37
53
|
const groupByGid = async (entries) => {
|
|
38
54
|
const groupByGid = new Map();
|
|
39
55
|
for (const head of entries) {
|
|
@@ -61,8 +77,15 @@ const isAdaptiveReplicatorOption = (options) => {
|
|
|
61
77
|
if (options.factor != null) {
|
|
62
78
|
return false;
|
|
63
79
|
}
|
|
80
|
+
if (Array.isArray(options)) {
|
|
81
|
+
return false;
|
|
82
|
+
}
|
|
64
83
|
return true;
|
|
65
84
|
};
|
|
85
|
+
const isUnreplicationOptions = (options) => options === false ||
|
|
86
|
+
options === 0 ||
|
|
87
|
+
(options?.offset === undefined &&
|
|
88
|
+
options?.factor === 0);
|
|
66
89
|
export const DEFAULT_MIN_REPLICAS = 2;
|
|
67
90
|
export const WAIT_FOR_REPLICATOR_TIMEOUT = 9000;
|
|
68
91
|
export const WAIT_FOR_ROLE_MATURITY = 5000;
|
|
@@ -72,7 +95,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
72
95
|
log;
|
|
73
96
|
rpc;
|
|
74
97
|
// options
|
|
75
|
-
|
|
98
|
+
_isReplicating;
|
|
99
|
+
_isAdaptiveReplicating;
|
|
76
100
|
_replicationRangeIndex;
|
|
77
101
|
/* private _totalParticipation!: number; */
|
|
78
102
|
_gidPeersHistory;
|
|
@@ -85,7 +109,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
85
109
|
_respondToIHaveTimeout;
|
|
86
110
|
_pendingDeletes;
|
|
87
111
|
_pendingIHave;
|
|
88
|
-
|
|
112
|
+
latestReplicationInfoMessage;
|
|
89
113
|
remoteBlocks;
|
|
90
114
|
openTime;
|
|
91
115
|
oldestOpenTime;
|
|
@@ -109,18 +133,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
109
133
|
distributionDebounceTime;
|
|
110
134
|
replicationController;
|
|
111
135
|
history;
|
|
136
|
+
domain;
|
|
112
137
|
pq;
|
|
113
138
|
constructor(properties) {
|
|
114
139
|
super();
|
|
115
140
|
this.log = new Log(properties);
|
|
116
141
|
this.rpc = new RPC();
|
|
117
142
|
}
|
|
118
|
-
/**
|
|
119
|
-
* Return the
|
|
120
|
-
*/
|
|
121
|
-
get replicationSettings() {
|
|
122
|
-
return this._replicationSettings;
|
|
123
|
-
}
|
|
124
143
|
get compatibility() {
|
|
125
144
|
return this._logProperties?.compatibility;
|
|
126
145
|
}
|
|
@@ -128,30 +147,33 @@ let SharedLog = class SharedLog extends Program {
|
|
|
128
147
|
return (this.compatibility ?? Number.MAX_VALUE) < 9;
|
|
129
148
|
}
|
|
130
149
|
// @deprecated
|
|
131
|
-
getRole() {
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
150
|
+
async getRole() {
|
|
151
|
+
const segments = await this.getMyReplicationSegments();
|
|
152
|
+
if (segments.length > 1) {
|
|
153
|
+
throw new Error("More than one replication segment found. Can only use one segment for compatbility with v8");
|
|
154
|
+
}
|
|
155
|
+
if (segments.length > 0) {
|
|
156
|
+
const segment = segments[0].toReplicationRange();
|
|
137
157
|
return new Replicator({
|
|
138
|
-
factor:
|
|
139
|
-
offset:
|
|
158
|
+
factor: segment.factor / MAX_U32,
|
|
159
|
+
offset: segment.offset / MAX_U32,
|
|
140
160
|
});
|
|
141
161
|
}
|
|
142
162
|
// TODO this is not accurate but might be good enough
|
|
143
163
|
return new Observer();
|
|
144
164
|
}
|
|
145
165
|
async isReplicating() {
|
|
146
|
-
if (!this.
|
|
166
|
+
if (!this._isReplicating) {
|
|
147
167
|
return false;
|
|
148
168
|
}
|
|
169
|
+
/*
|
|
149
170
|
if (isAdaptiveReplicatorOption(this._replicationSettings)) {
|
|
150
171
|
return true;
|
|
151
172
|
}
|
|
152
|
-
|
|
173
|
+
|
|
174
|
+
if ((this.replicationSettings as FixedReplicationOptions).factor !== 0) {
|
|
153
175
|
return true;
|
|
154
|
-
}
|
|
176
|
+
} */
|
|
155
177
|
return (await this.countReplicationSegments()) > 0;
|
|
156
178
|
}
|
|
157
179
|
/* get totalParticipation(): number {
|
|
@@ -159,7 +181,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
159
181
|
} */
|
|
160
182
|
async calculateTotalParticipation() {
|
|
161
183
|
const sum = await this.replicationIndex.sum(new SumRequest({ key: "width" }));
|
|
162
|
-
return Number(sum) /
|
|
184
|
+
return Number(sum) / MAX_U32;
|
|
163
185
|
}
|
|
164
186
|
async countReplicationSegments() {
|
|
165
187
|
const count = await this.replicationIndex.count(new CountRequest({
|
|
@@ -171,6 +193,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
171
193
|
return count;
|
|
172
194
|
}
|
|
173
195
|
setupRebalanceDebounceFunction() {
|
|
196
|
+
this.rebalanceParticipationDebounced = undefined;
|
|
174
197
|
this.rebalanceParticipationDebounced = debounce(() => this.rebalanceParticipation(),
|
|
175
198
|
/* Math.max(
|
|
176
199
|
REBALANCE_DEBOUNCE_INTERVAL,
|
|
@@ -181,107 +204,194 @@ let SharedLog = class SharedLog extends Program {
|
|
|
181
204
|
) */
|
|
182
205
|
REBALANCE_DEBOUNCE_INTERVAL);
|
|
183
206
|
}
|
|
184
|
-
async
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
this.
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
});
|
|
200
|
-
this.cpuUsage =
|
|
201
|
-
options?.limits?.cpu && typeof options?.limits?.cpu === "object"
|
|
202
|
-
? options?.limits?.cpu?.monitor || new CPUUsageIntervalLag()
|
|
203
|
-
: new CPUUsageIntervalLag();
|
|
204
|
-
this.cpuUsage?.start?.();
|
|
205
|
-
this.setupRebalanceDebounceFunction();
|
|
206
|
-
};
|
|
207
|
-
if (options) {
|
|
207
|
+
async _replicate(options, { reset, checkDuplicates, announce, } = {}) {
|
|
208
|
+
let offsetWasProvided = false;
|
|
209
|
+
if (isUnreplicationOptions(options)) {
|
|
210
|
+
await this.unreplicate();
|
|
211
|
+
}
|
|
212
|
+
else {
|
|
213
|
+
let ranges = [];
|
|
214
|
+
if (options == null) {
|
|
215
|
+
options = {};
|
|
216
|
+
}
|
|
217
|
+
else if (options === true) {
|
|
218
|
+
options = {};
|
|
219
|
+
}
|
|
220
|
+
this._isReplicating = true;
|
|
221
|
+
this._isAdaptiveReplicating = false;
|
|
208
222
|
if (isAdaptiveReplicatorOption(options)) {
|
|
209
|
-
this.
|
|
210
|
-
setupDebouncedRebalancing(
|
|
223
|
+
this._isAdaptiveReplicating = true;
|
|
224
|
+
this.setupDebouncedRebalancing(options);
|
|
225
|
+
// initial role in a dynamic setup
|
|
226
|
+
const maybeRange = await this.getDynamicRange();
|
|
227
|
+
if (!maybeRange) {
|
|
228
|
+
// not allowed
|
|
229
|
+
return;
|
|
230
|
+
}
|
|
231
|
+
ranges = [maybeRange];
|
|
232
|
+
offsetWasProvided = true;
|
|
211
233
|
}
|
|
212
|
-
else if (options
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
234
|
+
else if (options instanceof ReplicationRange) {
|
|
235
|
+
ranges = [
|
|
236
|
+
options.toReplicationRangeIndexable(this.node.identity.publicKey),
|
|
237
|
+
];
|
|
238
|
+
offsetWasProvided = true;
|
|
216
239
|
}
|
|
217
240
|
else {
|
|
241
|
+
let rangeArgs;
|
|
218
242
|
if (typeof options === "number") {
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
243
|
+
rangeArgs = [
|
|
244
|
+
{
|
|
245
|
+
factor: options,
|
|
246
|
+
},
|
|
247
|
+
];
|
|
222
248
|
}
|
|
223
249
|
else {
|
|
224
|
-
|
|
250
|
+
rangeArgs = (Array.isArray(options) ? options : [{ ...options }]);
|
|
251
|
+
}
|
|
252
|
+
if (rangeArgs.length === 0) {
|
|
253
|
+
// nothing to do
|
|
254
|
+
return;
|
|
225
255
|
}
|
|
256
|
+
for (const rangeArg of rangeArgs) {
|
|
257
|
+
const normalized = rangeArg.normalized ?? true;
|
|
258
|
+
offsetWasProvided = rangeArg.offset != null;
|
|
259
|
+
const offset = rangeArg.offset ??
|
|
260
|
+
(normalized ? Math.random() : scaleToU32(Math.random()));
|
|
261
|
+
let factor = rangeArg.factor;
|
|
262
|
+
let width = normalized ? 1 : scaleToU32(1);
|
|
263
|
+
ranges.push(new ReplicationRangeIndexable({
|
|
264
|
+
normalized,
|
|
265
|
+
offset: offset,
|
|
266
|
+
length: typeof factor === "number"
|
|
267
|
+
? factor
|
|
268
|
+
: factor === "all"
|
|
269
|
+
? width
|
|
270
|
+
: width - offset,
|
|
271
|
+
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
272
|
+
mode: rangeArg.strict
|
|
273
|
+
? ReplicationIntent.Strict
|
|
274
|
+
: ReplicationIntent.NonStrict, // automatic means that this range might be reused later for dynamic replication behaviour
|
|
275
|
+
timestamp: BigInt(+new Date()),
|
|
276
|
+
}));
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
for (const range of ranges) {
|
|
280
|
+
this.oldestOpenTime = Math.min(Number(range.timestamp), this.oldestOpenTime);
|
|
226
281
|
}
|
|
282
|
+
let resetRanges = reset;
|
|
283
|
+
if (!resetRanges && !offsetWasProvided) {
|
|
284
|
+
resetRanges = true;
|
|
285
|
+
// because if we do something like replicate ({ factor: 0.5 }) it means that we want to replicate 50%
|
|
286
|
+
// but ({ replicate: 0.5, offset: 0.5 }) means that we want to add a range
|
|
287
|
+
// TODO make behaviour more clear
|
|
288
|
+
}
|
|
289
|
+
await this.startAnnounceReplicating(ranges, {
|
|
290
|
+
reset: resetRanges ?? false,
|
|
291
|
+
checkDuplicates,
|
|
292
|
+
announce,
|
|
293
|
+
});
|
|
227
294
|
}
|
|
228
|
-
|
|
229
|
-
|
|
295
|
+
}
|
|
296
|
+
setupDebouncedRebalancing(options) {
|
|
297
|
+
this.cpuUsage?.stop?.();
|
|
298
|
+
this.replicationController = new PIDReplicationController(this.node.identity.publicKey.hashcode(), {
|
|
299
|
+
storage: options?.limits?.storage != null
|
|
300
|
+
? { max: options?.limits?.storage }
|
|
301
|
+
: undefined,
|
|
302
|
+
cpu: options?.limits?.cpu != null
|
|
303
|
+
? {
|
|
304
|
+
max: typeof options?.limits?.cpu === "object"
|
|
305
|
+
? options.limits.cpu.max
|
|
306
|
+
: options?.limits?.cpu,
|
|
307
|
+
}
|
|
308
|
+
: undefined,
|
|
309
|
+
});
|
|
310
|
+
this.cpuUsage =
|
|
311
|
+
options?.limits?.cpu && typeof options?.limits?.cpu === "object"
|
|
312
|
+
? options?.limits?.cpu?.monitor || new CPUUsageIntervalLag()
|
|
313
|
+
: new CPUUsageIntervalLag();
|
|
314
|
+
this.cpuUsage?.start?.();
|
|
315
|
+
this.setupRebalanceDebounceFunction();
|
|
316
|
+
}
|
|
317
|
+
async replicate(rangeOrEntry, options) {
|
|
318
|
+
let range = undefined;
|
|
319
|
+
if (rangeOrEntry instanceof ReplicationRange) {
|
|
320
|
+
range = rangeOrEntry;
|
|
230
321
|
}
|
|
231
|
-
if (
|
|
232
|
-
|
|
233
|
-
|
|
322
|
+
else if (rangeOrEntry instanceof Entry) {
|
|
323
|
+
range = {
|
|
324
|
+
factor: 1,
|
|
325
|
+
offset: await this.domain.fromEntry(rangeOrEntry),
|
|
326
|
+
normalized: false,
|
|
327
|
+
};
|
|
328
|
+
}
|
|
329
|
+
else if (Array.isArray(rangeOrEntry)) {
|
|
330
|
+
let ranges = [];
|
|
331
|
+
for (const entry of rangeOrEntry) {
|
|
332
|
+
if (entry instanceof Entry) {
|
|
333
|
+
ranges.push({
|
|
334
|
+
factor: 1,
|
|
335
|
+
offset: await this.domain.fromEntry(entry),
|
|
336
|
+
normalized: false,
|
|
337
|
+
});
|
|
338
|
+
}
|
|
339
|
+
else {
|
|
340
|
+
ranges.push(entry);
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
range = ranges;
|
|
234
344
|
}
|
|
235
345
|
else {
|
|
236
|
-
|
|
237
|
-
const range = new ReplicationRangeIndexable({
|
|
238
|
-
offset: this._replicationSettings.offset ??
|
|
239
|
-
Math.random(),
|
|
240
|
-
length: this._replicationSettings.factor,
|
|
241
|
-
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
242
|
-
replicationIntent: ReplicationIntent.Explicit, // automatic means that this range might be reused later for dynamic replication behaviour
|
|
243
|
-
timestamp: BigInt(+new Date()),
|
|
244
|
-
id: sha256Sync(this.node.identity.publicKey.bytes),
|
|
245
|
-
});
|
|
246
|
-
await this.startAnnounceReplicating(range);
|
|
346
|
+
range = rangeOrEntry ?? true;
|
|
247
347
|
}
|
|
348
|
+
const newRanges = await this._replicate(range, options);
|
|
349
|
+
// assume new role
|
|
350
|
+
await this.distribute();
|
|
351
|
+
return newRanges;
|
|
248
352
|
}
|
|
249
|
-
async
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
353
|
+
async unreplicate(rangeOrEntry) {
|
|
354
|
+
let range;
|
|
355
|
+
if (rangeOrEntry instanceof Entry) {
|
|
356
|
+
range = {
|
|
357
|
+
factor: 1,
|
|
358
|
+
offset: await this.domain.fromEntry(rangeOrEntry),
|
|
359
|
+
};
|
|
360
|
+
}
|
|
361
|
+
else if (rangeOrEntry instanceof ReplicationRange) {
|
|
362
|
+
range = rangeOrEntry;
|
|
253
363
|
}
|
|
254
364
|
else {
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
}
|
|
260
|
-
else {
|
|
261
|
-
await this.setupReplicationSettings(range ?? true);
|
|
262
|
-
}
|
|
365
|
+
this._isReplicating = false;
|
|
366
|
+
this._isAdaptiveReplicating = false;
|
|
367
|
+
await this.removeReplicator(this.node.identity.publicKey);
|
|
368
|
+
return;
|
|
263
369
|
}
|
|
264
|
-
|
|
265
|
-
|
|
370
|
+
if (this._isAdaptiveReplicating) {
|
|
371
|
+
// we can not unreplicate individual ranges when dynamically replicating (yet)
|
|
372
|
+
// TODO support this by never deleting the range with the segment id that is generated by the dynamic replication method
|
|
373
|
+
throw new Error("Unsupported when adaptive replicating");
|
|
374
|
+
}
|
|
375
|
+
const indexed = await this.replicationIndex.query(new SearchRequest({
|
|
376
|
+
query: {
|
|
377
|
+
width: 1,
|
|
378
|
+
start1: range.offset,
|
|
379
|
+
},
|
|
380
|
+
}));
|
|
381
|
+
const segmentIds = indexed.results.map((x) => x.id.key);
|
|
382
|
+
await this.removeReplicationRange(segmentIds, this.node.identity.publicKey);
|
|
383
|
+
await this.rpc.send(new StoppedReplicating({ segmentIds }), {
|
|
384
|
+
priority: 1,
|
|
385
|
+
});
|
|
266
386
|
}
|
|
267
387
|
async removeReplicator(key) {
|
|
268
388
|
const fn = async () => {
|
|
269
|
-
|
|
270
|
-
query: { hash: key.hashcode() },
|
|
271
|
-
fetch: 0xffffffff,
|
|
272
|
-
}), { reference: true });
|
|
273
|
-
if (prev.results.length === 0) {
|
|
274
|
-
return;
|
|
275
|
-
}
|
|
276
|
-
/* let sumWidth = prev.results.reduce(
|
|
277
|
-
(acc, x) => acc + x.value.widthNormalized,
|
|
278
|
-
0,
|
|
279
|
-
);
|
|
280
|
-
this._totalParticipation -= sumWidth;
|
|
281
|
-
*/
|
|
282
|
-
let idMatcher = new Or(prev.results.map((x) => new ByteMatchQuery({ key: "id", value: x.value.id })));
|
|
283
|
-
await this.replicationIndex.del(new DeleteRequest({ query: idMatcher }));
|
|
389
|
+
await this.replicationIndex.del(new DeleteRequest({ query: { hash: key.hashcode() } }));
|
|
284
390
|
await this.updateOldestTimestampFromIndex();
|
|
391
|
+
if (this.node.identity.publicKey.equals(key)) {
|
|
392
|
+
// announce that we are no longer replicating
|
|
393
|
+
await this.rpc.send(new AllReplicatingSegmentsMessage({ segments: [] }), { priority: 1 });
|
|
394
|
+
}
|
|
285
395
|
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
286
396
|
detail: { publicKey: key },
|
|
287
397
|
}));
|
|
@@ -310,11 +420,6 @@ let SharedLog = class SharedLog extends Program {
|
|
|
310
420
|
value: from.hashcode(),
|
|
311
421
|
});
|
|
312
422
|
let query = new And([idMatcher, identityMatcher]);
|
|
313
|
-
/* const prevSum = await this.replicationIndex.sum(
|
|
314
|
-
new SumRequest({ query, key: "width" }),
|
|
315
|
-
);
|
|
316
|
-
const prevSumNormalized = Number(prevSum) / SEGMENT_COORDINATE_SCALE;
|
|
317
|
-
this._totalParticipation -= prevSumNormalized; */
|
|
318
423
|
await this.replicationIndex.del(new DeleteRequest({ query }));
|
|
319
424
|
await this.updateOldestTimestampFromIndex();
|
|
320
425
|
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
@@ -326,27 +431,38 @@ let SharedLog = class SharedLog extends Program {
|
|
|
326
431
|
};
|
|
327
432
|
return this.pq.add(fn);
|
|
328
433
|
}
|
|
329
|
-
async addReplicationRange(
|
|
434
|
+
async addReplicationRange(ranges, from, { reset, checkDuplicates, } = {}) {
|
|
330
435
|
const fn = async () => {
|
|
331
436
|
if (this._isTrustedReplicator &&
|
|
332
437
|
!(await this._isTrustedReplicator(from))) {
|
|
333
438
|
return false;
|
|
334
439
|
}
|
|
335
|
-
range.id = new Uint8Array(range.id);
|
|
336
440
|
let prevCount = await this.replicationIndex.count(new CountRequest({
|
|
337
441
|
query: new StringMatch({ key: "hash", value: from.hashcode() }),
|
|
338
442
|
}));
|
|
339
443
|
const isNewReplicator = prevCount === 0;
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
444
|
+
if (reset) {
|
|
445
|
+
await this.replicationIndex.del(new DeleteRequest({ query: { hash: from.hashcode() } }));
|
|
446
|
+
}
|
|
447
|
+
else if (checkDuplicates) {
|
|
448
|
+
let deduplicated = [];
|
|
449
|
+
// TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
|
|
450
|
+
for (const range of ranges) {
|
|
451
|
+
if (!(await hasCoveringRange(this.replicationIndex, range))) {
|
|
452
|
+
deduplicated.push(range);
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
ranges = deduplicated;
|
|
456
|
+
}
|
|
457
|
+
for (const range of ranges) {
|
|
458
|
+
await this.replicationIndex.put(range);
|
|
459
|
+
if (!reset) {
|
|
460
|
+
this.oldestOpenTime = Math.min(Number(range.timestamp), this.oldestOpenTime);
|
|
344
461
|
}
|
|
345
|
-
/* this._totalParticipation -= prev.value.widthNormalized; */
|
|
346
462
|
}
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
463
|
+
if (reset) {
|
|
464
|
+
await this.updateOldestTimestampFromIndex();
|
|
465
|
+
}
|
|
350
466
|
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
351
467
|
detail: { publicKey: from },
|
|
352
468
|
}));
|
|
@@ -360,17 +476,35 @@ let SharedLog = class SharedLog extends Program {
|
|
|
360
476
|
}
|
|
361
477
|
return true;
|
|
362
478
|
};
|
|
479
|
+
// we sequialize this because we are going to queries to check wether to add or not
|
|
480
|
+
// if two processes do the same this both process might add a range while only one in practice should
|
|
363
481
|
return this.pq.add(fn);
|
|
364
482
|
}
|
|
365
|
-
async startAnnounceReplicating(range) {
|
|
366
|
-
const added = await this.addReplicationRange(range, this.node.identity.publicKey);
|
|
483
|
+
async startAnnounceReplicating(range, options = {}) {
|
|
484
|
+
const added = await this.addReplicationRange(range, this.node.identity.publicKey, options);
|
|
367
485
|
if (!added) {
|
|
368
486
|
logger.warn("Not allowed to replicate by canReplicate");
|
|
369
487
|
}
|
|
488
|
+
let message;
|
|
370
489
|
if (added) {
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
490
|
+
if (options.reset) {
|
|
491
|
+
message = new AllReplicatingSegmentsMessage({
|
|
492
|
+
segments: range.map((x) => x.toReplicationRange()),
|
|
493
|
+
});
|
|
494
|
+
}
|
|
495
|
+
else {
|
|
496
|
+
message = new AddedReplicationSegmentMessage({
|
|
497
|
+
segments: range.map((x) => x.toReplicationRange()),
|
|
498
|
+
});
|
|
499
|
+
}
|
|
500
|
+
if (options.announce) {
|
|
501
|
+
return options.announce(message);
|
|
502
|
+
}
|
|
503
|
+
else {
|
|
504
|
+
await this.rpc.send(message, {
|
|
505
|
+
priority: 1,
|
|
506
|
+
});
|
|
507
|
+
}
|
|
374
508
|
}
|
|
375
509
|
}
|
|
376
510
|
async append(data, options) {
|
|
@@ -404,16 +538,22 @@ let SharedLog = class SharedLog extends Program {
|
|
|
404
538
|
}
|
|
405
539
|
const result = await this.log.append(data, appendOptions);
|
|
406
540
|
let mode = undefined;
|
|
541
|
+
if (options?.replicate) {
|
|
542
|
+
await this.replicate(result.entry, { checkDuplicates: true });
|
|
543
|
+
}
|
|
407
544
|
for (const message of await createExchangeHeadsMessages(this.log, [result.entry], this._gidParentCache)) {
|
|
408
545
|
if (options?.target === "replicators" || !options?.target) {
|
|
409
546
|
const minReplicas = decodeReplicas(result.entry).getValue(this);
|
|
410
|
-
let leaders = await this.findLeaders(result.entry
|
|
547
|
+
let leaders = await this.findLeaders(result.entry, minReplicas);
|
|
411
548
|
const isLeader = leaders.includes(this.node.identity.publicKey.hashcode());
|
|
412
549
|
if (message.heads[0].gidRefrences.length > 0) {
|
|
413
550
|
const newAndOldLeaders = new Set(leaders);
|
|
414
551
|
for (const ref of message.heads[0].gidRefrences) {
|
|
415
|
-
|
|
416
|
-
|
|
552
|
+
const entryFromGid = this.log.entryIndex.getHeads(ref, false);
|
|
553
|
+
for (const entry of await entryFromGid.all()) {
|
|
554
|
+
for (const hash of await this.findLeaders(entry, minReplicas)) {
|
|
555
|
+
newAndOldLeaders.add(hash);
|
|
556
|
+
}
|
|
417
557
|
}
|
|
418
558
|
}
|
|
419
559
|
leaders = newAndOldLeaders;
|
|
@@ -453,10 +593,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
453
593
|
: options.replicas.max
|
|
454
594
|
: undefined,
|
|
455
595
|
};
|
|
596
|
+
this.domain = options?.domain ?? createReplicationDomainHash();
|
|
456
597
|
this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 10 * 1000; // TODO make into arg
|
|
457
598
|
this._pendingDeletes = new Map();
|
|
458
599
|
this._pendingIHave = new Map();
|
|
459
|
-
this.
|
|
600
|
+
this.latestReplicationInfoMessage = new Map();
|
|
460
601
|
this.syncInFlightQueue = new Map();
|
|
461
602
|
this.syncInFlightQueueInverted = new Map();
|
|
462
603
|
this.syncInFlight = new Map();
|
|
@@ -495,6 +636,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
495
636
|
});
|
|
496
637
|
const logIndex = await logScope.scope("log");
|
|
497
638
|
await this.node.indexer.start(); // TODO why do we need to start the indexer here?
|
|
639
|
+
const hasIndexedReplicationInfo = (await this.replicationIndex.getSize()) > 0;
|
|
498
640
|
/* this._totalParticipation = await this.calculateTotalParticipation(); */
|
|
499
641
|
this._gidPeersHistory = new Map();
|
|
500
642
|
await this.log.open(this.remoteBlocks, this.node.identity, {
|
|
@@ -528,6 +670,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
528
670
|
this._onUnsubscriptionFn =
|
|
529
671
|
this._onUnsubscriptionFn || this._onUnsubscription.bind(this);
|
|
530
672
|
await this.node.services.pubsub.addEventListener("unsubscribe", this._onUnsubscriptionFn);
|
|
673
|
+
await this.rpc.subscribe();
|
|
531
674
|
// await this.log.load();
|
|
532
675
|
// TODO (do better)
|
|
533
676
|
// we do this distribution interval to eliminate the sideeffects arriving from updating roles and joining entries continously.
|
|
@@ -579,7 +722,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
579
722
|
this.syncMoreInterval = setTimeout(requestSync, 1e4);
|
|
580
723
|
});
|
|
581
724
|
};
|
|
582
|
-
|
|
725
|
+
// if we had a previous session with replication info, and new replication info dictates that we unreplicate
|
|
726
|
+
// we should do that. Otherwise if options is a unreplication we dont need to do anything because
|
|
727
|
+
// we are already unreplicated (as we are just opening)
|
|
728
|
+
if (hasIndexedReplicationInfo ||
|
|
729
|
+
isUnreplicationOptions(options?.replicate) === false) {
|
|
730
|
+
await this.replicate(options?.replicate, { checkDuplicates: true });
|
|
731
|
+
}
|
|
583
732
|
requestSync();
|
|
584
733
|
}
|
|
585
734
|
async afterOpen() {
|
|
@@ -607,7 +756,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
607
756
|
}
|
|
608
757
|
async onChange(change) {
|
|
609
758
|
for (const added of change.added) {
|
|
610
|
-
this.onEntryAdded(added);
|
|
759
|
+
this.onEntryAdded(added.entry);
|
|
611
760
|
}
|
|
612
761
|
for (const removed of change.removed) {
|
|
613
762
|
this.onEntryRemoved(removed.hash);
|
|
@@ -637,6 +786,17 @@ let SharedLog = class SharedLog extends Program {
|
|
|
637
786
|
throw error;
|
|
638
787
|
}
|
|
639
788
|
}
|
|
789
|
+
async getCover(args, roleAge) {
|
|
790
|
+
roleAge = roleAge ?? (await this.getDefaultMinRoleAge());
|
|
791
|
+
const range = await this.domain.fromArgs(args, this);
|
|
792
|
+
const set = await getCoverSet(this.replicationIndex, roleAge, range.offset, range.length ??
|
|
793
|
+
(await minimumWidthToCover(this.replicas.min.getValue(this))), MAX_U32);
|
|
794
|
+
// add all in flight
|
|
795
|
+
for (const [key, _] of this.syncInFlight) {
|
|
796
|
+
set.add(key);
|
|
797
|
+
}
|
|
798
|
+
return [...set];
|
|
799
|
+
}
|
|
640
800
|
async _close() {
|
|
641
801
|
clearTimeout(this.syncMoreInterval);
|
|
642
802
|
clearInterval(this.distributeInterval);
|
|
@@ -658,7 +818,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
658
818
|
this.syncInFlightQueue.clear();
|
|
659
819
|
this.syncInFlightQueueInverted.clear();
|
|
660
820
|
this.syncInFlight.clear();
|
|
661
|
-
this.
|
|
821
|
+
this.latestReplicationInfoMessage.clear();
|
|
662
822
|
this._gidPeersHistory.clear();
|
|
663
823
|
this._replicationRangeIndex = undefined;
|
|
664
824
|
this.cpuUsage?.stop?.();
|
|
@@ -727,6 +887,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
727
887
|
const headsWithGid = await this.log.entryIndex
|
|
728
888
|
.getHeads(gid)
|
|
729
889
|
.all();
|
|
890
|
+
const latestEntry = getLatestEntry(entries);
|
|
730
891
|
const maxReplicasFromHead = headsWithGid && headsWithGid.length > 0
|
|
731
892
|
? maxReplicas(this, [...headsWithGid.values()])
|
|
732
893
|
: this.replicas.min.getValue(this);
|
|
@@ -734,10 +895,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
734
895
|
const isReplicating = await this.isReplicating();
|
|
735
896
|
let isLeader;
|
|
736
897
|
if (isReplicating) {
|
|
737
|
-
isLeader = await this.waitForIsLeader(
|
|
898
|
+
isLeader = await this.waitForIsLeader(latestEntry, Math.max(maxReplicasFromHead, maxReplicasFromNewEntries));
|
|
738
899
|
}
|
|
739
900
|
else {
|
|
740
|
-
isLeader = await this.findLeaders(
|
|
901
|
+
isLeader = await this.findLeaders(latestEntry, Math.max(maxReplicasFromHead, maxReplicasFromNewEntries));
|
|
741
902
|
isLeader = isLeader.includes(this.node.identity.publicKey.hashcode())
|
|
742
903
|
? isLeader
|
|
743
904
|
: false;
|
|
@@ -803,7 +964,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
803
964
|
.all();
|
|
804
965
|
if (headsWithGid && headsWithGid.length > 0) {
|
|
805
966
|
const minReplicas = maxReplicas(this, headsWithGid.values());
|
|
806
|
-
const isLeader = await this.isLeader(entries[0].entry
|
|
967
|
+
const isLeader = await this.isLeader(entries[0].entry, minReplicas);
|
|
807
968
|
if (!isLeader) {
|
|
808
969
|
Promise.all(this.prune(entries.map((x) => x.entry))).catch((e) => {
|
|
809
970
|
logger.info(e.toString());
|
|
@@ -819,7 +980,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
819
980
|
for (const hash of msg.hashes) {
|
|
820
981
|
const indexedEntry = await this.log.entryIndex.getShallow(hash);
|
|
821
982
|
if (indexedEntry &&
|
|
822
|
-
(await this.isLeader(indexedEntry.value
|
|
983
|
+
(await this.isLeader(indexedEntry.value, decodeReplicas(indexedEntry.value).getValue(this)))) {
|
|
823
984
|
this._gidPeersHistory
|
|
824
985
|
.get(indexedEntry.value.meta.gid)
|
|
825
986
|
?.delete(context.from.hashcode());
|
|
@@ -833,7 +994,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
833
994
|
prevPendingIHave?.clear();
|
|
834
995
|
},
|
|
835
996
|
callback: async (entry) => {
|
|
836
|
-
if (await this.isLeader(entry
|
|
997
|
+
if (await this.isLeader(entry, decodeReplicas(entry).getValue(this))) {
|
|
837
998
|
this._gidPeersHistory
|
|
838
999
|
.get(entry.meta.gid)
|
|
839
1000
|
?.delete(context.from.hashcode());
|
|
@@ -907,7 +1068,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
907
1068
|
if (context.from.equals(this.node.identity.publicKey)) {
|
|
908
1069
|
return;
|
|
909
1070
|
}
|
|
910
|
-
await this.rpc.send(new
|
|
1071
|
+
await this.rpc.send(new AllReplicatingSegmentsMessage({
|
|
911
1072
|
segments: (await this.getMyReplicationSegments()).map((x) => x.toReplicationRange()),
|
|
912
1073
|
}), {
|
|
913
1074
|
mode: new SilentDelivery({ to: [context.from], redundancy: 1 }),
|
|
@@ -916,9 +1077,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
916
1077
|
if (this.v8Behaviour) {
|
|
917
1078
|
const role = this.getRole();
|
|
918
1079
|
if (role instanceof Replicator) {
|
|
919
|
-
const fixedSettings = this
|
|
920
|
-
|
|
921
|
-
if (fixedSettings.factor === 1) {
|
|
1080
|
+
const fixedSettings = !this._isAdaptiveReplicating;
|
|
1081
|
+
if (fixedSettings) {
|
|
922
1082
|
await this.rpc.send(new ResponseRoleMessage({
|
|
923
1083
|
role,
|
|
924
1084
|
}), {
|
|
@@ -931,8 +1091,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
931
1091
|
}
|
|
932
1092
|
}
|
|
933
1093
|
}
|
|
934
|
-
else if (msg instanceof
|
|
935
|
-
msg instanceof
|
|
1094
|
+
else if (msg instanceof AllReplicatingSegmentsMessage ||
|
|
1095
|
+
msg instanceof AddedReplicationSegmentMessage) {
|
|
936
1096
|
if (context.from.equals(this.node.identity.publicKey)) {
|
|
937
1097
|
return;
|
|
938
1098
|
}
|
|
@@ -944,23 +1104,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
944
1104
|
timeout: this.waitForReplicatorTimeout,
|
|
945
1105
|
})
|
|
946
1106
|
.then(async () => {
|
|
947
|
-
//
|
|
948
|
-
|
|
1107
|
+
// do use an operation log here, because we want to make sure that we don't miss any updates
|
|
1108
|
+
// and do them in the right order
|
|
1109
|
+
const prev = this.latestReplicationInfoMessage.get(context.from.hashcode());
|
|
949
1110
|
if (prev && prev > context.timestamp) {
|
|
950
1111
|
return;
|
|
951
1112
|
}
|
|
952
|
-
this.
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
let addedOnce = false;
|
|
957
|
-
for (const segment of replicationInfoMessage.segments) {
|
|
958
|
-
const added = await this.addReplicationRange(segment.toReplicationRangeIndexable(context.from), context.from);
|
|
959
|
-
if (typeof added === "boolean") {
|
|
960
|
-
addedOnce = addedOnce || added;
|
|
961
|
-
}
|
|
962
|
-
}
|
|
963
|
-
addedOnce && (await this.distribute());
|
|
1113
|
+
this.latestReplicationInfoMessage.set(context.from.hashcode(), context.timestamp);
|
|
1114
|
+
let reset = msg instanceof AllReplicatingSegmentsMessage;
|
|
1115
|
+
const added = await this.addReplicationRange(replicationInfoMessage.segments.map((x) => x.toReplicationRangeIndexable(context.from)), context.from, { reset, checkDuplicates: true });
|
|
1116
|
+
added && (await this.distribute());
|
|
964
1117
|
/* await this._modifyReplicators(msg.role, context.from!); */
|
|
965
1118
|
})
|
|
966
1119
|
.catch((e) => {
|
|
@@ -1011,7 +1164,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1011
1164
|
}));
|
|
1012
1165
|
return ranges.results.map((x) => x.value);
|
|
1013
1166
|
}
|
|
1014
|
-
async
|
|
1167
|
+
async getMyTotalParticipation() {
|
|
1015
1168
|
// sum all of my replicator rects
|
|
1016
1169
|
return (await this.getMyReplicationSegments()).reduce((acc, { widthNormalized }) => acc + widthNormalized, 0);
|
|
1017
1170
|
}
|
|
@@ -1056,11 +1209,86 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1056
1209
|
throw e;
|
|
1057
1210
|
});
|
|
1058
1211
|
}
|
|
1059
|
-
async
|
|
1060
|
-
|
|
1212
|
+
async join(entries, options) {
|
|
1213
|
+
let messageToSend = undefined;
|
|
1214
|
+
if (options?.replicate) {
|
|
1215
|
+
// TODO this block should perhaps be called from a callback on the this.log.join method on all the ignored element because already joined, like "onAlreadyJoined"
|
|
1216
|
+
// check which entrise we already have but not are replicating, and replicate them
|
|
1217
|
+
let alreadyJoined = [];
|
|
1218
|
+
for (const element of entries) {
|
|
1219
|
+
if (typeof element === "string") {
|
|
1220
|
+
const entry = await this.log.get(element);
|
|
1221
|
+
if (entry) {
|
|
1222
|
+
alreadyJoined.push(entry);
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
else if (element instanceof Entry) {
|
|
1226
|
+
if (await this.log.has(element.hash)) {
|
|
1227
|
+
alreadyJoined.push(element);
|
|
1228
|
+
}
|
|
1229
|
+
}
|
|
1230
|
+
else {
|
|
1231
|
+
const entry = await this.log.get(element.hash);
|
|
1232
|
+
if (entry) {
|
|
1233
|
+
alreadyJoined.push(entry);
|
|
1234
|
+
}
|
|
1235
|
+
}
|
|
1236
|
+
}
|
|
1237
|
+
// assume is heads
|
|
1238
|
+
await this.replicate(alreadyJoined, {
|
|
1239
|
+
checkDuplicates: true,
|
|
1240
|
+
announce: (msg) => {
|
|
1241
|
+
if (msg instanceof AllReplicatingSegmentsMessage) {
|
|
1242
|
+
throw new Error("Unexpected");
|
|
1243
|
+
}
|
|
1244
|
+
messageToSend = msg;
|
|
1245
|
+
},
|
|
1246
|
+
});
|
|
1247
|
+
}
|
|
1248
|
+
let joinOptions = options?.replicate
|
|
1249
|
+
? {
|
|
1250
|
+
...options,
|
|
1251
|
+
onChange: async (change) => {
|
|
1252
|
+
if (change.added) {
|
|
1253
|
+
for (const entry of change.added) {
|
|
1254
|
+
if (entry.head) {
|
|
1255
|
+
await this.replicate(entry.entry, {
|
|
1256
|
+
checkDuplicates: true,
|
|
1257
|
+
// we override the announce step here to make sure we announce all new replication info
|
|
1258
|
+
// in one large message instead
|
|
1259
|
+
announce: (msg) => {
|
|
1260
|
+
if (msg instanceof AllReplicatingSegmentsMessage) {
|
|
1261
|
+
throw new Error("Unexpected");
|
|
1262
|
+
}
|
|
1263
|
+
if (messageToSend) {
|
|
1264
|
+
// merge segments to make it into one messages
|
|
1265
|
+
for (const segment of msg.segments) {
|
|
1266
|
+
messageToSend.segments.push(segment);
|
|
1267
|
+
}
|
|
1268
|
+
}
|
|
1269
|
+
else {
|
|
1270
|
+
messageToSend = msg;
|
|
1271
|
+
}
|
|
1272
|
+
},
|
|
1273
|
+
});
|
|
1274
|
+
}
|
|
1275
|
+
}
|
|
1276
|
+
}
|
|
1277
|
+
},
|
|
1278
|
+
}
|
|
1279
|
+
: options;
|
|
1280
|
+
await this.log.join(entries, joinOptions);
|
|
1281
|
+
if (messageToSend) {
|
|
1282
|
+
await this.rpc.send(messageToSend, {
|
|
1283
|
+
priority: 1,
|
|
1284
|
+
});
|
|
1285
|
+
}
|
|
1286
|
+
}
|
|
1287
|
+
async isLeader(entry, numberOfLeaders, options) {
|
|
1288
|
+
const isLeader = (await this.findLeaders(entry, numberOfLeaders, options)).find((l) => l === this.node.identity.publicKey.hashcode());
|
|
1061
1289
|
return !!isLeader;
|
|
1062
1290
|
}
|
|
1063
|
-
async waitForIsLeader(
|
|
1291
|
+
async waitForIsLeader(entry, numberOfLeaders, timeout = this.waitForReplicatorTimeout) {
|
|
1064
1292
|
return new Promise((resolve, reject) => {
|
|
1065
1293
|
const removeListeners = () => {
|
|
1066
1294
|
this.events.removeEventListener("replication:change", roleListener);
|
|
@@ -1075,7 +1303,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1075
1303
|
removeListeners();
|
|
1076
1304
|
resolve(false);
|
|
1077
1305
|
}, timeout);
|
|
1078
|
-
const check = () => this.findLeaders(
|
|
1306
|
+
const check = () => this.findLeaders(entry, numberOfLeaders).then((leaders) => {
|
|
1079
1307
|
const isLeader = leaders.find((l) => l === this.node.identity.publicKey.hashcode());
|
|
1080
1308
|
if (isLeader) {
|
|
1081
1309
|
removeListeners();
|
|
@@ -1091,20 +1319,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1091
1319
|
check();
|
|
1092
1320
|
});
|
|
1093
1321
|
}
|
|
1094
|
-
async findLeaders(
|
|
1322
|
+
async findLeaders(entry, numberOfLeaders, options) {
|
|
1095
1323
|
if (this.closed) {
|
|
1096
1324
|
return [this.node.identity.publicKey.hashcode()]; // Assumption: if the store is closed, always assume we have responsibility over the data
|
|
1097
1325
|
}
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
// Convert this thing we wan't to distribute to 8 bytes so we get can convert it into a u64
|
|
1101
|
-
// modulus into an index
|
|
1102
|
-
const utf8writer = new BinaryWriter();
|
|
1103
|
-
utf8writer.string(subject.toString());
|
|
1104
|
-
const seed = await sha256(utf8writer.finalize());
|
|
1105
|
-
// convert hash of slot to a number
|
|
1106
|
-
const cursor = hashToUniformNumber(seed); // bounded between 0 and 1
|
|
1107
|
-
return this.findLeadersFromUniformNumber(cursor, numberOfLeaders, options);
|
|
1326
|
+
const cursor = await this.domain.fromEntry(entry);
|
|
1327
|
+
return this.findLeadersFromU32(cursor, numberOfLeaders, options);
|
|
1108
1328
|
}
|
|
1109
1329
|
async getDefaultMinRoleAge() {
|
|
1110
1330
|
if ((await this.isReplicating()) === false) {
|
|
@@ -1115,39 +1335,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1115
1335
|
const diffToOldest = replLength > 1 ? now - this.oldestOpenTime - 1 : Number.MAX_SAFE_INTEGER;
|
|
1116
1336
|
return Math.min(this.timeUntilRoleMaturity, diffToOldest, Math.round((this.timeUntilRoleMaturity * Math.log(replLength + 1)) / 3)); // / 3 so that if 2 replicators and timeUntilRoleMaturity = 1e4 the result will be 1
|
|
1117
1337
|
}
|
|
1118
|
-
async
|
|
1338
|
+
async findLeadersFromU32(cursor, numberOfLeaders, options) {
|
|
1119
1339
|
const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time
|
|
1120
|
-
|
|
1121
|
-
return samples;
|
|
1122
|
-
}
|
|
1123
|
-
/**
|
|
1124
|
-
*
|
|
1125
|
-
* @returns groups where at least one in any group will have the entry you are looking for
|
|
1126
|
-
*/
|
|
1127
|
-
async getReplicatorUnion(roleAge) {
|
|
1128
|
-
roleAge = roleAge ?? (await this.getDefaultMinRoleAge());
|
|
1129
|
-
if (this.closed === true) {
|
|
1130
|
-
throw new ClosedError();
|
|
1131
|
-
}
|
|
1132
|
-
// Total replication "width"
|
|
1133
|
-
const width = 1;
|
|
1134
|
-
// How much width you need to "query" to
|
|
1135
|
-
const peers = this.replicationIndex; // TODO types
|
|
1136
|
-
const minReplicas = Math.min(await peers.getSize(), this.replicas.min.getValue(this));
|
|
1137
|
-
// If min replicas = 2
|
|
1138
|
-
// then we need to make sure we cover 0.5 of the total 'width' of the replication space
|
|
1139
|
-
// to make sure we reach sufficient amount of nodes such that at least one one has
|
|
1140
|
-
// the entry we are looking for
|
|
1141
|
-
const coveringWidth = width / minReplicas;
|
|
1142
|
-
const set = await getCoverSet(coveringWidth, peers, roleAge, this.node.identity.publicKey);
|
|
1143
|
-
// add all in flight
|
|
1144
|
-
for (const [key, _] of this.syncInFlight) {
|
|
1145
|
-
set.add(key);
|
|
1146
|
-
}
|
|
1147
|
-
return [...set];
|
|
1340
|
+
return getSamples(cursor, this.replicationIndex, numberOfLeaders, roleAge);
|
|
1148
1341
|
}
|
|
1149
1342
|
async isReplicator(entry, options) {
|
|
1150
|
-
return this.isLeader(entry
|
|
1343
|
+
return this.isLeader(entry, decodeReplicas(entry).getValue(this), options);
|
|
1151
1344
|
}
|
|
1152
1345
|
async handleSubscriptionChange(publicKey, topics, subscribed) {
|
|
1153
1346
|
for (const topic of topics) {
|
|
@@ -1178,7 +1371,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1178
1371
|
const replicationSegments = await this.getMyReplicationSegments();
|
|
1179
1372
|
if (replicationSegments.length > 0) {
|
|
1180
1373
|
this.rpc
|
|
1181
|
-
.send(new
|
|
1374
|
+
.send(new AllReplicatingSegmentsMessage({
|
|
1182
1375
|
segments: replicationSegments.map((x) => x.toReplicationRange()),
|
|
1183
1376
|
}), {
|
|
1184
1377
|
mode: new SilentDelivery({ redundancy: 1, to: [publicKey] }),
|
|
@@ -1187,7 +1380,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1187
1380
|
if (this.v8Behaviour) {
|
|
1188
1381
|
// for backwards compatibility
|
|
1189
1382
|
this.rpc
|
|
1190
|
-
.send(new ResponseRoleMessage({ role: this.getRole() }), {
|
|
1383
|
+
.send(new ResponseRoleMessage({ role: await this.getRole() }), {
|
|
1191
1384
|
mode: new SilentDelivery({ redundancy: 1, to: [publicKey] }),
|
|
1192
1385
|
})
|
|
1193
1386
|
.catch((e) => logger.error(e.toString()));
|
|
@@ -1255,7 +1448,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1255
1448
|
const minMinReplicasValue = this.replicas.max
|
|
1256
1449
|
? Math.min(minReplicasValue, this.replicas.max.getValue(this))
|
|
1257
1450
|
: minReplicasValue;
|
|
1258
|
-
const leaders = await this.findLeaders(entry
|
|
1451
|
+
const leaders = await this.findLeaders(entry, minMinReplicasValue);
|
|
1259
1452
|
if (leaders.find((x) => x === this.node.identity.publicKey.hashcode())) {
|
|
1260
1453
|
reject(new Error("Failed to delete, is leader"));
|
|
1261
1454
|
return;
|
|
@@ -1338,7 +1531,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1338
1531
|
continue; // TODO maybe close store?
|
|
1339
1532
|
}
|
|
1340
1533
|
const oldPeersSet = this._gidPeersHistory.get(gid);
|
|
1341
|
-
const currentPeers = await this.findLeaders(
|
|
1534
|
+
const currentPeers = await this.findLeaders(getLatestEntry(entries), maxReplicas(this, entries));
|
|
1342
1535
|
const isLeader = currentPeers.find((x) => x === this.node.identity.publicKey.hashcode());
|
|
1343
1536
|
const currentPeersSet = new Set(currentPeers);
|
|
1344
1537
|
this._gidPeersHistory.set(gid, currentPeersSet);
|
|
@@ -1408,7 +1601,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1408
1601
|
}
|
|
1409
1602
|
async _onUnsubscription(evt) {
|
|
1410
1603
|
logger.debug(`Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify(evt.detail.unsubscriptions.map((x) => x))}'`);
|
|
1411
|
-
this.
|
|
1604
|
+
this.latestReplicationInfoMessage.delete(evt.detail.from.hashcode());
|
|
1605
|
+
// TODO only emit this if the peer is actually replicating anything
|
|
1412
1606
|
this.events.dispatchEvent(new CustomEvent("replicator:leave", {
|
|
1413
1607
|
detail: { publicKey: evt.detail.from },
|
|
1414
1608
|
}));
|
|
@@ -1450,10 +1644,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1450
1644
|
return false;
|
|
1451
1645
|
}
|
|
1452
1646
|
// The role is fixed (no changes depending on memory usage or peer count etc)
|
|
1453
|
-
if (!this.
|
|
1647
|
+
if (!this._isReplicating) {
|
|
1454
1648
|
return false;
|
|
1455
1649
|
}
|
|
1456
|
-
if (
|
|
1650
|
+
if (this._isAdaptiveReplicating) {
|
|
1457
1651
|
const peers = this.replicationIndex;
|
|
1458
1652
|
const usedMemory = await this.getMemoryUsage();
|
|
1459
1653
|
let dynamicRange = await this.getDynamicRange();
|
|
@@ -1461,10 +1655,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1461
1655
|
return; // not allowed to replicate
|
|
1462
1656
|
}
|
|
1463
1657
|
const peersSize = (await peers.getSize()) || 1;
|
|
1658
|
+
const totalParticipation = await this.calculateTotalParticipation();
|
|
1464
1659
|
const newFactor = this.replicationController.step({
|
|
1465
1660
|
memoryUsage: usedMemory,
|
|
1466
1661
|
currentFactor: dynamicRange.widthNormalized,
|
|
1467
|
-
totalFactor:
|
|
1662
|
+
totalFactor: totalParticipation, // TODO use this._totalParticipation when flakiness is fixed
|
|
1468
1663
|
peerCount: peersSize,
|
|
1469
1664
|
cpuUsage: this.cpuUsage?.value(),
|
|
1470
1665
|
});
|
|
@@ -1473,11 +1668,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1473
1668
|
if (relativeDifference > 0.0001) {
|
|
1474
1669
|
// TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
|
|
1475
1670
|
dynamicRange = new ReplicationRangeIndexable({
|
|
1476
|
-
offset:
|
|
1477
|
-
length: newFactor,
|
|
1671
|
+
offset: hashToU32(this.node.identity.publicKey.bytes),
|
|
1672
|
+
length: scaleToU32(newFactor),
|
|
1478
1673
|
publicKeyHash: dynamicRange.hash,
|
|
1479
1674
|
id: dynamicRange.id,
|
|
1480
|
-
|
|
1675
|
+
mode: dynamicRange.mode,
|
|
1481
1676
|
timestamp: dynamicRange.timestamp,
|
|
1482
1677
|
});
|
|
1483
1678
|
const canReplicate = !this._isTrustedReplicator ||
|
|
@@ -1485,7 +1680,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1485
1680
|
if (!canReplicate) {
|
|
1486
1681
|
return false;
|
|
1487
1682
|
}
|
|
1488
|
-
await this.startAnnounceReplicating(dynamicRange
|
|
1683
|
+
await this.startAnnounceReplicating([dynamicRange], {
|
|
1684
|
+
checkDuplicates: false,
|
|
1685
|
+
reset: false,
|
|
1686
|
+
});
|
|
1489
1687
|
/* await this._updateRole(newRole, onRoleChange); */
|
|
1490
1688
|
this.rebalanceParticipationDebounced?.();
|
|
1491
1689
|
return true;
|
|
@@ -1498,31 +1696,30 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1498
1696
|
return false;
|
|
1499
1697
|
}
|
|
1500
1698
|
async getDynamicRange() {
|
|
1699
|
+
let dynamicRangeId = sha256Sync(concat([
|
|
1700
|
+
this.node.identity.publicKey.bytes,
|
|
1701
|
+
new TextEncoder().encode("dynamic"),
|
|
1702
|
+
]));
|
|
1501
1703
|
let range = (await this.replicationIndex.query(new SearchRequest({
|
|
1502
1704
|
query: [
|
|
1503
|
-
new
|
|
1504
|
-
key: "
|
|
1505
|
-
value:
|
|
1506
|
-
}),
|
|
1507
|
-
new IntegerCompare({
|
|
1508
|
-
key: "replicationIntent",
|
|
1509
|
-
value: ReplicationIntent.Automatic,
|
|
1510
|
-
compare: "eq",
|
|
1705
|
+
new ByteMatchQuery({
|
|
1706
|
+
key: "id",
|
|
1707
|
+
value: dynamicRangeId,
|
|
1511
1708
|
}),
|
|
1512
1709
|
],
|
|
1513
1710
|
fetch: 1,
|
|
1514
1711
|
})))?.results[0]?.value;
|
|
1515
1712
|
if (!range) {
|
|
1516
|
-
let seed = Math.random();
|
|
1517
1713
|
range = new ReplicationRangeIndexable({
|
|
1518
|
-
|
|
1714
|
+
normalized: true,
|
|
1715
|
+
offset: Math.random(),
|
|
1519
1716
|
length: 0,
|
|
1520
1717
|
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
1521
|
-
|
|
1718
|
+
mode: ReplicationIntent.NonStrict,
|
|
1522
1719
|
timestamp: BigInt(+new Date()),
|
|
1523
|
-
id:
|
|
1720
|
+
id: dynamicRangeId,
|
|
1524
1721
|
});
|
|
1525
|
-
const added = await this.addReplicationRange(range, this.node.identity.publicKey);
|
|
1722
|
+
const added = await this.addReplicationRange([range], this.node.identity.publicKey, { reset: false, checkDuplicates: false });
|
|
1526
1723
|
if (!added) {
|
|
1527
1724
|
logger.warn("Not allowed to replicate by canReplicate");
|
|
1528
1725
|
return;
|