@peerbit/shared-log 12.2.0-10dfe9b → 12.2.0-3333888
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/benchmark/pid-convergence.d.ts +2 -0
- package/dist/benchmark/pid-convergence.d.ts.map +1 -0
- package/dist/benchmark/pid-convergence.js +138 -0
- package/dist/benchmark/pid-convergence.js.map +1 -0
- package/dist/benchmark/sync-catchup.d.ts +3 -0
- package/dist/benchmark/sync-catchup.d.ts.map +1 -0
- package/dist/benchmark/sync-catchup.js +109 -0
- package/dist/benchmark/sync-catchup.js.map +1 -0
- package/dist/src/index.d.ts +7 -2
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +63 -32
- package/dist/src/index.js.map +1 -1
- package/package.json +18 -18
- package/src/index.ts +80 -36
package/src/index.ts
CHANGED
|
@@ -376,11 +376,18 @@ export type SharedLogOptions<
|
|
|
376
376
|
export const DEFAULT_MIN_REPLICAS = 2;
|
|
377
377
|
export const WAIT_FOR_REPLICATOR_TIMEOUT = 9000;
|
|
378
378
|
export const WAIT_FOR_ROLE_MATURITY = 5000;
|
|
379
|
-
|
|
379
|
+
// TODO(prune): Investigate if/when a non-zero prune delay is required for correctness
|
|
380
|
+
// (e.g. responsibility/replication-info message reordering in multi-peer scenarios).
|
|
381
|
+
// Prefer making pruning robust without timing-based heuristics.
|
|
382
|
+
export const WAIT_FOR_PRUNE_DELAY = 0;
|
|
380
383
|
const PRUNE_DEBOUNCE_INTERVAL = 500;
|
|
381
384
|
|
|
382
385
|
// DONT SET THIS ANY LOWER, because it will make the pid controller unstable as the system responses are not fast enough to updates from the pid controller
|
|
383
386
|
const RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL = 1000;
|
|
387
|
+
const RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE = 0.01;
|
|
388
|
+
const RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE_WITH_CPU_LIMIT = 0.005;
|
|
389
|
+
const RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE_WITH_MEMORY_LIMIT = 0.001;
|
|
390
|
+
const RECALCULATE_PARTICIPATION_RELATIVE_DENOMINATOR_FLOOR = 1e-3;
|
|
384
391
|
|
|
385
392
|
const DEFAULT_DISTRIBUTION_DEBOUNCE_TIME = 500;
|
|
386
393
|
|
|
@@ -617,15 +624,6 @@ export class SharedLog<
|
|
|
617
624
|
) {
|
|
618
625
|
this.rebalanceParticipationDebounced = undefined;
|
|
619
626
|
|
|
620
|
-
// make the rebalancing to respect warmup time
|
|
621
|
-
let intervalTime = interval * 2;
|
|
622
|
-
let timeout = setTimeout(() => {
|
|
623
|
-
intervalTime = interval;
|
|
624
|
-
}, this.timeUntilRoleMaturity);
|
|
625
|
-
this._closeController.signal.addEventListener("abort", () => {
|
|
626
|
-
clearTimeout(timeout);
|
|
627
|
-
});
|
|
628
|
-
|
|
629
627
|
this.rebalanceParticipationDebounced = debounceFixedInterval(
|
|
630
628
|
() => this.rebalanceParticipation(),
|
|
631
629
|
/* Math.max(
|
|
@@ -635,7 +633,7 @@ export class SharedLog<
|
|
|
635
633
|
REBALANCE_DEBOUNCE_INTERVAL
|
|
636
634
|
)
|
|
637
635
|
) */
|
|
638
|
-
|
|
636
|
+
interval, // TODO make this dynamic on the number of replicators
|
|
639
637
|
);
|
|
640
638
|
}
|
|
641
639
|
|
|
@@ -1198,16 +1196,20 @@ export class SharedLog<
|
|
|
1198
1196
|
}
|
|
1199
1197
|
}
|
|
1200
1198
|
|
|
1199
|
+
let prevCountForOwner: number | undefined = undefined;
|
|
1201
1200
|
if (existing.length === 0) {
|
|
1202
|
-
|
|
1201
|
+
prevCountForOwner = await this.replicationIndex.count({
|
|
1203
1202
|
query: new StringMatch({ key: "hash", value: from.hashcode() }),
|
|
1204
1203
|
});
|
|
1205
|
-
isNewReplicator =
|
|
1204
|
+
isNewReplicator = prevCountForOwner === 0;
|
|
1206
1205
|
} else {
|
|
1207
1206
|
isNewReplicator = false;
|
|
1208
1207
|
}
|
|
1209
1208
|
|
|
1210
|
-
if (
|
|
1209
|
+
if (
|
|
1210
|
+
checkDuplicates &&
|
|
1211
|
+
(existing.length > 0 || (prevCountForOwner ?? 0) > 0)
|
|
1212
|
+
) {
|
|
1211
1213
|
let deduplicated: ReplicationRangeIndexable<any>[] = [];
|
|
1212
1214
|
|
|
1213
1215
|
// TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
|
|
@@ -1883,8 +1885,8 @@ export class SharedLog<
|
|
|
1883
1885
|
this.timeUntilRoleMaturity =
|
|
1884
1886
|
options?.timeUntilRoleMaturity ?? WAIT_FOR_ROLE_MATURITY;
|
|
1885
1887
|
this.waitForReplicatorTimeout =
|
|
1886
|
-
options?.waitForReplicatorTimeout
|
|
1887
|
-
this.waitForPruneDelay = options?.waitForPruneDelay
|
|
1888
|
+
options?.waitForReplicatorTimeout ?? WAIT_FOR_REPLICATOR_TIMEOUT;
|
|
1889
|
+
this.waitForPruneDelay = options?.waitForPruneDelay ?? WAIT_FOR_PRUNE_DELAY;
|
|
1888
1890
|
|
|
1889
1891
|
if (this.waitForReplicatorTimeout < this.timeUntilRoleMaturity) {
|
|
1890
1892
|
this.waitForReplicatorTimeout = this.timeUntilRoleMaturity; // does not makes sense to expect a replicator to mature faster than it is reachable
|
|
@@ -2392,15 +2394,35 @@ export class SharedLog<
|
|
|
2392
2394
|
set.add(key);
|
|
2393
2395
|
}
|
|
2394
2396
|
|
|
2395
|
-
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2397
|
+
if (options?.reachableOnly) {
|
|
2398
|
+
// Prefer the live pubsub subscriber set when filtering reachability.
|
|
2399
|
+
// `uniqueReplicators` is primarily driven by replication messages and can lag during
|
|
2400
|
+
// joins/restarts; using subscribers prevents excluding peers that are reachable but
|
|
2401
|
+
// whose replication ranges were loaded from disk or haven't been processed yet.
|
|
2402
|
+
const subscribers =
|
|
2403
|
+
(await this.node.services.pubsub.getSubscribers(this.topic)) ??
|
|
2404
|
+
undefined;
|
|
2405
|
+
const subscriberHashcodes = subscribers
|
|
2406
|
+
? new Set(subscribers.map((key) => key.hashcode()))
|
|
2407
|
+
: undefined;
|
|
2408
|
+
|
|
2409
|
+
const reachable: string[] = [];
|
|
2410
|
+
const selfHash = this.node.identity.publicKey.hashcode();
|
|
2411
|
+
for (const peer of set) {
|
|
2412
|
+
if (peer === selfHash) {
|
|
2413
|
+
reachable.push(peer);
|
|
2414
|
+
continue;
|
|
2415
|
+
}
|
|
2416
|
+
if (
|
|
2417
|
+
subscriberHashcodes
|
|
2418
|
+
? subscriberHashcodes.has(peer)
|
|
2419
|
+
: this.uniqueReplicators.has(peer)
|
|
2420
|
+
) {
|
|
2421
|
+
reachable.push(peer);
|
|
2422
|
+
}
|
|
2400
2423
|
}
|
|
2424
|
+
return reachable;
|
|
2401
2425
|
}
|
|
2402
|
-
return reachableSet;
|
|
2403
|
-
}
|
|
2404
2426
|
|
|
2405
2427
|
return [...set];
|
|
2406
2428
|
} catch (error) {
|
|
@@ -3140,25 +3162,29 @@ export class SharedLog<
|
|
|
3140
3162
|
},
|
|
3141
3163
|
): Promise<void> {
|
|
3142
3164
|
let entriesToReplicate: Entry<T>[] = [];
|
|
3143
|
-
if (options?.replicate) {
|
|
3165
|
+
if (options?.replicate && this.log.length > 0) {
|
|
3144
3166
|
// TODO this block should perhaps be called from a callback on the this.log.join method on all the ignored element because already joined, like "onAlreadyJoined"
|
|
3145
3167
|
|
|
3146
3168
|
// check which entrise we already have but not are replicating, and replicate them
|
|
3147
3169
|
// we can not just do the 'join' call because it will ignore the already joined entries
|
|
3148
3170
|
for (const element of entries) {
|
|
3149
3171
|
if (typeof element === "string") {
|
|
3150
|
-
|
|
3151
|
-
|
|
3152
|
-
|
|
3172
|
+
if (await this.log.has(element)) {
|
|
3173
|
+
const entry = await this.log.get(element);
|
|
3174
|
+
if (entry) {
|
|
3175
|
+
entriesToReplicate.push(entry);
|
|
3176
|
+
}
|
|
3153
3177
|
}
|
|
3154
3178
|
} else if (element instanceof Entry) {
|
|
3155
3179
|
if (await this.log.has(element.hash)) {
|
|
3156
3180
|
entriesToReplicate.push(element);
|
|
3157
3181
|
}
|
|
3158
3182
|
} else {
|
|
3159
|
-
|
|
3160
|
-
|
|
3161
|
-
|
|
3183
|
+
if (await this.log.has(element.hash)) {
|
|
3184
|
+
const entry = await this.log.get(element.hash);
|
|
3185
|
+
if (entry) {
|
|
3186
|
+
entriesToReplicate.push(entry);
|
|
3187
|
+
}
|
|
3162
3188
|
}
|
|
3163
3189
|
}
|
|
3164
3190
|
}
|
|
@@ -3750,7 +3776,7 @@ export class SharedLog<
|
|
|
3750
3776
|
for (const [k, v] of this._requestIPruneResponseReplicatorSet) {
|
|
3751
3777
|
v.delete(publicKey.hashcode());
|
|
3752
3778
|
if (v.size === 0) {
|
|
3753
|
-
this.
|
|
3779
|
+
this._requestIPruneResponseReplicatorSet.delete(k);
|
|
3754
3780
|
}
|
|
3755
3781
|
}
|
|
3756
3782
|
|
|
@@ -4100,8 +4126,13 @@ export class SharedLog<
|
|
|
4100
4126
|
);
|
|
4101
4127
|
}
|
|
4102
4128
|
|
|
4103
|
-
async waitForPruned(
|
|
4104
|
-
|
|
4129
|
+
async waitForPruned(options?: {
|
|
4130
|
+
timeout?: number;
|
|
4131
|
+
signal?: AbortSignal;
|
|
4132
|
+
delayInterval?: number;
|
|
4133
|
+
timeoutMessage?: string;
|
|
4134
|
+
}) {
|
|
4135
|
+
await waitFor(() => this._pendingDeletes.size === 0, options);
|
|
4105
4136
|
}
|
|
4106
4137
|
|
|
4107
4138
|
async onReplicationChange(
|
|
@@ -4280,11 +4311,24 @@ export class SharedLog<
|
|
|
4280
4311
|
cpuUsage: this.cpuUsage?.value(),
|
|
4281
4312
|
});
|
|
4282
4313
|
|
|
4314
|
+
const absoluteDifference = Math.abs(dynamicRange.widthNormalized - newFactor);
|
|
4283
4315
|
const relativeDifference =
|
|
4284
|
-
|
|
4285
|
-
|
|
4316
|
+
absoluteDifference /
|
|
4317
|
+
Math.max(
|
|
4318
|
+
dynamicRange.widthNormalized,
|
|
4319
|
+
RECALCULATE_PARTICIPATION_RELATIVE_DENOMINATOR_FLOOR,
|
|
4320
|
+
);
|
|
4321
|
+
|
|
4322
|
+
let minRelativeChange = RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE;
|
|
4323
|
+
if (this.replicationController.maxMemoryLimit != null) {
|
|
4324
|
+
minRelativeChange =
|
|
4325
|
+
RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE_WITH_MEMORY_LIMIT;
|
|
4326
|
+
} else if (this.replicationController.maxCPUUsage != null) {
|
|
4327
|
+
minRelativeChange =
|
|
4328
|
+
RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE_WITH_CPU_LIMIT;
|
|
4329
|
+
}
|
|
4286
4330
|
|
|
4287
|
-
if (relativeDifference >
|
|
4331
|
+
if (relativeDifference > minRelativeChange) {
|
|
4288
4332
|
// TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
|
|
4289
4333
|
dynamicRange = new this.indexableDomain.constructorRange({
|
|
4290
4334
|
offset: dynamicRange.start1,
|