@peerbit/shared-log 12.3.1 → 12.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/benchmark/pid-convergence.js.map +1 -1
- package/dist/benchmark/rateless-iblt-sender-startsync.js.map +1 -1
- package/dist/benchmark/rateless-iblt-startsync-cache.js.map +1 -1
- package/dist/benchmark/sync-catchup.d.ts.map +1 -1
- package/dist/benchmark/sync-catchup.js +2 -2
- package/dist/benchmark/sync-catchup.js.map +1 -1
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js.map +1 -1
- package/dist/src/ranges.d.ts.map +1 -1
- package/dist/src/ranges.js +3 -2
- package/dist/src/ranges.js.map +1 -1
- package/dist/src/sync/rateless-iblt.d.ts.map +1 -1
- package/dist/src/sync/rateless-iblt.js +3 -2
- package/dist/src/sync/rateless-iblt.js.map +1 -1
- package/dist/src/sync/simple.d.ts +1 -1
- package/dist/src/sync/simple.d.ts.map +1 -1
- package/dist/src/sync/simple.js.map +1 -1
- package/package.json +7 -7
- package/src/index.ts +79 -74
- package/src/ranges.ts +22 -19
- package/src/sync/rateless-iblt.ts +6 -8
- package/src/sync/simple.ts +5 -8
package/src/index.ts
CHANGED
|
@@ -2445,35 +2445,35 @@ export class SharedLog<
|
|
|
2445
2445
|
set.add(key);
|
|
2446
2446
|
}
|
|
2447
2447
|
|
|
2448
|
-
|
|
2449
|
-
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
|
|
2456
|
-
|
|
2457
|
-
|
|
2458
|
-
|
|
2448
|
+
if (options?.reachableOnly) {
|
|
2449
|
+
// Prefer the live pubsub subscriber set when filtering reachability.
|
|
2450
|
+
// `uniqueReplicators` is primarily driven by replication messages and can lag during
|
|
2451
|
+
// joins/restarts; using subscribers prevents excluding peers that are reachable but
|
|
2452
|
+
// whose replication ranges were loaded from disk or haven't been processed yet.
|
|
2453
|
+
const subscribers =
|
|
2454
|
+
(await this.node.services.pubsub.getSubscribers(this.topic)) ??
|
|
2455
|
+
undefined;
|
|
2456
|
+
const subscriberHashcodes = subscribers
|
|
2457
|
+
? new Set(subscribers.map((key) => key.hashcode()))
|
|
2458
|
+
: undefined;
|
|
2459
2459
|
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
|
|
2470
|
-
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
}
|
|
2460
|
+
const reachable: string[] = [];
|
|
2461
|
+
const selfHash = this.node.identity.publicKey.hashcode();
|
|
2462
|
+
for (const peer of set) {
|
|
2463
|
+
if (peer === selfHash) {
|
|
2464
|
+
reachable.push(peer);
|
|
2465
|
+
continue;
|
|
2466
|
+
}
|
|
2467
|
+
if (
|
|
2468
|
+
subscriberHashcodes
|
|
2469
|
+
? subscriberHashcodes.has(peer)
|
|
2470
|
+
: this.uniqueReplicators.has(peer)
|
|
2471
|
+
) {
|
|
2472
|
+
reachable.push(peer);
|
|
2474
2473
|
}
|
|
2475
|
-
return reachable;
|
|
2476
2474
|
}
|
|
2475
|
+
return reachable;
|
|
2476
|
+
}
|
|
2477
2477
|
|
|
2478
2478
|
return [...set];
|
|
2479
2479
|
} catch (error) {
|
|
@@ -2952,60 +2952,63 @@ export class SharedLog<
|
|
|
2952
2952
|
}
|
|
2953
2953
|
}
|
|
2954
2954
|
} else if (
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
2958
|
-
|
|
2955
|
+
msg instanceof AllReplicatingSegmentsMessage ||
|
|
2956
|
+
msg instanceof AddedReplicationSegmentMessage
|
|
2957
|
+
) {
|
|
2958
|
+
if (context.from.equals(this.node.identity.publicKey)) {
|
|
2959
|
+
return;
|
|
2960
|
+
}
|
|
2961
|
+
|
|
2962
|
+
const replicationInfoMessage = msg as
|
|
2963
|
+
| AllReplicatingSegmentsMessage
|
|
2964
|
+
| AddedReplicationSegmentMessage;
|
|
2965
|
+
|
|
2966
|
+
// Process replication updates even if the sender isn't yet considered "ready" by
|
|
2967
|
+
// `Program.waitFor()`. Dropping these messages can lead to missing replicator info
|
|
2968
|
+
// (and downstream `waitForReplicator()` timeouts) under timing-sensitive joins.
|
|
2969
|
+
const from = context.from!;
|
|
2970
|
+
const messageTimestamp = context.message.header.timestamp;
|
|
2971
|
+
(async () => {
|
|
2972
|
+
const prev = this.latestReplicationInfoMessage.get(from.hashcode());
|
|
2973
|
+
if (prev && prev > messageTimestamp) {
|
|
2959
2974
|
return;
|
|
2960
2975
|
}
|
|
2961
2976
|
|
|
2962
|
-
|
|
2963
|
-
|
|
2964
|
-
|
|
2965
|
-
|
|
2966
|
-
// Process replication updates even if the sender isn't yet considered "ready" by
|
|
2967
|
-
// `Program.waitFor()`. Dropping these messages can lead to missing replicator info
|
|
2968
|
-
// (and downstream `waitForReplicator()` timeouts) under timing-sensitive joins.
|
|
2969
|
-
const from = context.from!;
|
|
2970
|
-
const messageTimestamp = context.message.header.timestamp;
|
|
2971
|
-
(async () => {
|
|
2972
|
-
const prev = this.latestReplicationInfoMessage.get(from.hashcode());
|
|
2973
|
-
if (prev && prev > messageTimestamp) {
|
|
2974
|
-
return;
|
|
2975
|
-
}
|
|
2976
|
-
|
|
2977
|
-
this.latestReplicationInfoMessage.set(from.hashcode(), messageTimestamp);
|
|
2977
|
+
this.latestReplicationInfoMessage.set(
|
|
2978
|
+
from.hashcode(),
|
|
2979
|
+
messageTimestamp,
|
|
2980
|
+
);
|
|
2978
2981
|
|
|
2979
|
-
|
|
2980
|
-
|
|
2981
|
-
|
|
2982
|
+
if (this.closed) {
|
|
2983
|
+
return;
|
|
2984
|
+
}
|
|
2982
2985
|
|
|
2983
|
-
|
|
2984
|
-
|
|
2985
|
-
|
|
2986
|
-
|
|
2987
|
-
|
|
2988
|
-
|
|
2989
|
-
|
|
2990
|
-
|
|
2991
|
-
|
|
2992
|
-
|
|
2993
|
-
|
|
2994
|
-
|
|
2995
|
-
|
|
2996
|
-
|
|
2997
|
-
return;
|
|
2998
|
-
}
|
|
2999
|
-
logger.error(
|
|
3000
|
-
`Failed to apply replication settings from '${from.hashcode()}': ${
|
|
3001
|
-
e?.message ?? e
|
|
3002
|
-
}`,
|
|
3003
|
-
);
|
|
3004
|
-
});
|
|
3005
|
-
} else if (msg instanceof StoppedReplicating) {
|
|
3006
|
-
if (context.from.equals(this.node.identity.publicKey)) {
|
|
2986
|
+
const reset = msg instanceof AllReplicatingSegmentsMessage;
|
|
2987
|
+
await this.addReplicationRange(
|
|
2988
|
+
replicationInfoMessage.segments.map((x) =>
|
|
2989
|
+
x.toReplicationRangeIndexable(from),
|
|
2990
|
+
),
|
|
2991
|
+
from,
|
|
2992
|
+
{
|
|
2993
|
+
reset,
|
|
2994
|
+
checkDuplicates: true,
|
|
2995
|
+
timestamp: Number(messageTimestamp),
|
|
2996
|
+
},
|
|
2997
|
+
);
|
|
2998
|
+
})().catch((e) => {
|
|
2999
|
+
if (isNotStartedError(e)) {
|
|
3007
3000
|
return;
|
|
3008
3001
|
}
|
|
3002
|
+
logger.error(
|
|
3003
|
+
`Failed to apply replication settings from '${from.hashcode()}': ${
|
|
3004
|
+
e?.message ?? e
|
|
3005
|
+
}`,
|
|
3006
|
+
);
|
|
3007
|
+
});
|
|
3008
|
+
} else if (msg instanceof StoppedReplicating) {
|
|
3009
|
+
if (context.from.equals(this.node.identity.publicKey)) {
|
|
3010
|
+
return;
|
|
3011
|
+
}
|
|
3009
3012
|
|
|
3010
3013
|
const rangesToRemove = await this.resolveReplicationRangesFromIdsAndKey(
|
|
3011
3014
|
msg.segmentIds,
|
|
@@ -4466,7 +4469,9 @@ export class SharedLog<
|
|
|
4466
4469
|
cpuUsage: this.cpuUsage?.value(),
|
|
4467
4470
|
});
|
|
4468
4471
|
|
|
4469
|
-
const absoluteDifference = Math.abs(
|
|
4472
|
+
const absoluteDifference = Math.abs(
|
|
4473
|
+
dynamicRange.widthNormalized - newFactor,
|
|
4474
|
+
);
|
|
4470
4475
|
const relativeDifference =
|
|
4471
4476
|
absoluteDifference /
|
|
4472
4477
|
Math.max(
|
package/src/ranges.ts
CHANGED
|
@@ -2255,22 +2255,22 @@ export const getCoverSet = async <R extends "u32" | "u64">(properties: {
|
|
|
2255
2255
|
return next;
|
|
2256
2256
|
};
|
|
2257
2257
|
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2258
|
+
const resolveNextAbove = async (
|
|
2259
|
+
nextLocation: NumberFromType<R>,
|
|
2260
|
+
roleAge: number,
|
|
2261
|
+
) => {
|
|
2262
|
+
// if not get closest from above
|
|
2263
|
+
const next = await fetchOne<undefined, R>(
|
|
2264
|
+
getClosest("above", peers, nextLocation, true, properties.numbers, {
|
|
2265
|
+
time: {
|
|
2266
|
+
matured: true,
|
|
2267
|
+
roleAgeLimit: roleAge,
|
|
2268
|
+
now,
|
|
2269
|
+
},
|
|
2270
|
+
}),
|
|
2271
|
+
);
|
|
2272
|
+
return next;
|
|
2273
|
+
};
|
|
2274
2274
|
|
|
2275
2275
|
const resolveNext = async (
|
|
2276
2276
|
nextLocation: NumberFromType<R>,
|
|
@@ -2366,7 +2366,8 @@ export const getCoverSet = async <R extends "u32" | "u64">(properties: {
|
|
|
2366
2366
|
current = nextCandidate[0];
|
|
2367
2367
|
|
|
2368
2368
|
const isLast =
|
|
2369
|
-
distanceBefore < widthToCoverScaled &&
|
|
2369
|
+
distanceBefore < widthToCoverScaled &&
|
|
2370
|
+
coveredLength >= widthToCoverScaled;
|
|
2370
2371
|
|
|
2371
2372
|
const lastDistanceToEndLocation = properties.numbers.min(
|
|
2372
2373
|
getDistance(
|
|
@@ -2642,8 +2643,10 @@ export const mergeReplicationChanges = <R extends NumericType>(
|
|
|
2642
2643
|
const newer = v[j];
|
|
2643
2644
|
if (newer.type === "added" && !newer.matured) {
|
|
2644
2645
|
adjusted = true;
|
|
2645
|
-
const {
|
|
2646
|
-
|
|
2646
|
+
const {
|
|
2647
|
+
rangesFromA: updatedRemoved,
|
|
2648
|
+
rangesFromB: updatedNewer,
|
|
2649
|
+
} = symmetricDifferenceRanges(v[i].range, newer.range);
|
|
2647
2650
|
|
|
2648
2651
|
for (const diff of updatedRemoved) {
|
|
2649
2652
|
results.push({
|
|
@@ -13,10 +13,7 @@ import type { RPC, RequestContext } from "@peerbit/rpc";
|
|
|
13
13
|
import { SilentDelivery } from "@peerbit/stream-interface";
|
|
14
14
|
import { type EntryWithRefs } from "../exchange-heads.js";
|
|
15
15
|
import { TransportMessage } from "../message.js";
|
|
16
|
-
import {
|
|
17
|
-
type EntryReplicated,
|
|
18
|
-
matchEntriesInRangeQuery,
|
|
19
|
-
} from "../ranges.js";
|
|
16
|
+
import { type EntryReplicated, matchEntriesInRangeQuery } from "../ranges.js";
|
|
20
17
|
import type {
|
|
21
18
|
SyncableKey,
|
|
22
19
|
SynchronizerComponents,
|
|
@@ -220,9 +217,7 @@ export class RatelessIBLTSynchronizer<D extends "u32" | "u64">
|
|
|
220
217
|
}
|
|
221
218
|
>;
|
|
222
219
|
|
|
223
|
-
constructor(
|
|
224
|
-
readonly properties: SynchronizerComponents<D>,
|
|
225
|
-
) {
|
|
220
|
+
constructor(readonly properties: SynchronizerComponents<D>) {
|
|
226
221
|
this.simple = new SimpleSyncronizer(properties);
|
|
227
222
|
this.outgoingSyncProcesses = new Map();
|
|
228
223
|
this.ingoingSyncProcesses = new Map();
|
|
@@ -472,7 +467,10 @@ export class RatelessIBLTSynchronizer<D extends "u32" | "u64">
|
|
|
472
467
|
|
|
473
468
|
const startSync = new StartSync({ from: start, to: end, symbols: [] });
|
|
474
469
|
const encoder = new EncoderWrapper();
|
|
475
|
-
if (
|
|
470
|
+
if (
|
|
471
|
+
typeof BigUint64Array !== "undefined" &&
|
|
472
|
+
sortedEntries instanceof BigUint64Array
|
|
473
|
+
) {
|
|
476
474
|
encoder.add_symbols(sortedEntries);
|
|
477
475
|
} else {
|
|
478
476
|
for (const entry of sortedEntries) {
|
package/src/sync/simple.ts
CHANGED
|
@@ -16,7 +16,7 @@ import {
|
|
|
16
16
|
} from "../exchange-heads.js";
|
|
17
17
|
import { TransportMessage } from "../message.js";
|
|
18
18
|
import type { EntryReplicated } from "../ranges.js";
|
|
19
|
-
import type {
|
|
19
|
+
import type { SyncOptions, SyncableKey, Syncronizer } from "./index.js";
|
|
20
20
|
|
|
21
21
|
@variant([0, 1])
|
|
22
22
|
export class RequestMaybeSync extends TransportMessage {
|
|
@@ -157,13 +157,10 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
|
|
|
157
157
|
hashes = [...properties.entries.keys()];
|
|
158
158
|
}
|
|
159
159
|
|
|
160
|
-
return this.rpc.send(
|
|
161
|
-
|
|
162
|
-
{
|
|
163
|
-
|
|
164
|
-
mode: new SilentDelivery({ to: properties.targets, redundancy: 1 }),
|
|
165
|
-
},
|
|
166
|
-
);
|
|
160
|
+
return this.rpc.send(new RequestMaybeSync({ hashes }), {
|
|
161
|
+
priority: 1,
|
|
162
|
+
mode: new SilentDelivery({ to: properties.targets, redundancy: 1 }),
|
|
163
|
+
});
|
|
167
164
|
}
|
|
168
165
|
|
|
169
166
|
async onMessage(
|