@peerbit/shared-log 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +202 -0
- package/README.md +3 -0
- package/lib/esm/exchange-heads.d.ts +42 -0
- package/lib/esm/exchange-heads.js +123 -0
- package/lib/esm/exchange-heads.js.map +1 -0
- package/lib/esm/exchange-replication.d.ts +14 -0
- package/lib/esm/exchange-replication.js +215 -0
- package/lib/esm/exchange-replication.js.map +1 -0
- package/lib/esm/index.d.ts +68 -0
- package/lib/esm/index.js +411 -0
- package/lib/esm/index.js.map +1 -0
- package/lib/esm/message.d.ts +2 -0
- package/lib/esm/message.js +13 -0
- package/lib/esm/message.js.map +1 -0
- package/lib/esm/package.json +3 -0
- package/lib/esm/role.d.ts +13 -0
- package/lib/esm/role.js +42 -0
- package/lib/esm/role.js.map +1 -0
- package/package.json +45 -0
- package/src/exchange-heads.ts +109 -0
- package/src/exchange-replication.ts +206 -0
- package/src/index.ts +596 -0
- package/src/message.ts +4 -0
- package/src/role.ts +26 -0
package/src/index.ts
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
1
|
+
import { QueryContext, RPC } from "@peerbit/rpc";
|
|
2
|
+
import { TransportMessage } from "./message.js";
|
|
3
|
+
import {
|
|
4
|
+
AppendOptions,
|
|
5
|
+
Entry,
|
|
6
|
+
Log,
|
|
7
|
+
LogEvents,
|
|
8
|
+
LogProperties,
|
|
9
|
+
} from "@peerbit/log";
|
|
10
|
+
import {
|
|
11
|
+
AbstractProgram,
|
|
12
|
+
Address,
|
|
13
|
+
ComposableProgram,
|
|
14
|
+
ProgramInitializationOptions,
|
|
15
|
+
} from "@peerbit/program";
|
|
16
|
+
import {
|
|
17
|
+
BinaryReader,
|
|
18
|
+
BinaryWriter,
|
|
19
|
+
BorshError,
|
|
20
|
+
deserialize,
|
|
21
|
+
field,
|
|
22
|
+
serialize,
|
|
23
|
+
variant,
|
|
24
|
+
} from "@dao-xyz/borsh";
|
|
25
|
+
import {
|
|
26
|
+
AccessError,
|
|
27
|
+
getKeypairFromPeerId,
|
|
28
|
+
getPublicKeyFromPeerId,
|
|
29
|
+
sha256,
|
|
30
|
+
sha256Base64Sync,
|
|
31
|
+
} from "@peerbit/crypto";
|
|
32
|
+
import { logger as loggerFn } from "@peerbit/logger";
|
|
33
|
+
import {
|
|
34
|
+
AbsolutMinReplicas,
|
|
35
|
+
EntryWithRefs,
|
|
36
|
+
ExchangeHeadsMessage,
|
|
37
|
+
MinReplicas,
|
|
38
|
+
createExchangeHeadsMessage,
|
|
39
|
+
} from "./exchange-heads.js";
|
|
40
|
+
import {
|
|
41
|
+
Subscription,
|
|
42
|
+
SubscriptionEvent,
|
|
43
|
+
UnsubcriptionEvent,
|
|
44
|
+
} from "@peerbit/pubsub-interface";
|
|
45
|
+
import { startsWith } from "@peerbit/uint8arrays";
|
|
46
|
+
import { TimeoutError } from "@peerbit/time";
|
|
47
|
+
import {
|
|
48
|
+
REPLICATOR_TYPE_VARIANT,
|
|
49
|
+
Observer,
|
|
50
|
+
Replicator,
|
|
51
|
+
SubscriptionType,
|
|
52
|
+
} from "./role.js";
|
|
53
|
+
|
|
54
|
+
export { Observer, Replicator, SubscriptionType };
|
|
55
|
+
|
|
56
|
+
export const logger = loggerFn({ module: "peer" });
|
|
57
|
+
|
|
58
|
+
const groupByGid = async <T extends Entry<any> | EntryWithRefs<any>>(
|
|
59
|
+
entries: T[]
|
|
60
|
+
): Promise<Map<string, T[]>> => {
|
|
61
|
+
const groupByGid: Map<string, T[]> = new Map();
|
|
62
|
+
for (const head of entries) {
|
|
63
|
+
const gid = await (head instanceof Entry
|
|
64
|
+
? head.getGid()
|
|
65
|
+
: head.entry.getGid());
|
|
66
|
+
let value = groupByGid.get(gid);
|
|
67
|
+
if (!value) {
|
|
68
|
+
value = [];
|
|
69
|
+
groupByGid.set(gid, value);
|
|
70
|
+
}
|
|
71
|
+
value.push(head);
|
|
72
|
+
}
|
|
73
|
+
return groupByGid;
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
export type SyncFilter = (entries: Entry<any>) => Promise<boolean> | boolean;
|
|
77
|
+
|
|
78
|
+
export interface SharedLogOptions {
|
|
79
|
+
minReplicas?: number;
|
|
80
|
+
sync?: SyncFilter;
|
|
81
|
+
role?: SubscriptionType;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
export const DEFAULT_MIN_REPLICAS = 2;
|
|
85
|
+
|
|
86
|
+
export type Args<T> = LogProperties<T> & LogEvents<T> & SharedLogOptions;
|
|
87
|
+
@variant("shared_log")
|
|
88
|
+
export class SharedLog<T> extends ComposableProgram<Args<T>> {
|
|
89
|
+
@field({ type: Log })
|
|
90
|
+
log: Log<T>;
|
|
91
|
+
|
|
92
|
+
@field({ type: RPC })
|
|
93
|
+
rpc: RPC<TransportMessage, TransportMessage>;
|
|
94
|
+
|
|
95
|
+
// options
|
|
96
|
+
private _minReplicas: MinReplicas;
|
|
97
|
+
private _sync?: SyncFilter;
|
|
98
|
+
private _role: SubscriptionType;
|
|
99
|
+
|
|
100
|
+
private _sortedPeersCache: string[] | undefined;
|
|
101
|
+
private _lastSubscriptionMessageId: number;
|
|
102
|
+
private _gidPeersHistory: Map<string, Set<string>>;
|
|
103
|
+
|
|
104
|
+
private _onSubscriptionFn: (arg: any) => any;
|
|
105
|
+
private _onUnsubscriptionFn: (arg: any) => any;
|
|
106
|
+
private _logProperties?: LogProperties<T> & LogEvents<T>;
|
|
107
|
+
|
|
108
|
+
constructor(properties?: { id?: Uint8Array }) {
|
|
109
|
+
super();
|
|
110
|
+
this.log = new Log(properties);
|
|
111
|
+
this.rpc = new RPC();
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
get minReplicas() {
|
|
115
|
+
return this._minReplicas;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
set minReplicas(minReplicas: MinReplicas) {
|
|
119
|
+
this._minReplicas = minReplicas;
|
|
120
|
+
}
|
|
121
|
+
get role(): SubscriptionType {
|
|
122
|
+
return this._role;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
async append(
|
|
126
|
+
data: T,
|
|
127
|
+
options?: AppendOptions<T> | undefined
|
|
128
|
+
): Promise<{
|
|
129
|
+
entry: Entry<T>;
|
|
130
|
+
removed: Entry<T>[];
|
|
131
|
+
}> {
|
|
132
|
+
const result = await this.log.append(data, options);
|
|
133
|
+
await this.rpc.send(
|
|
134
|
+
await createExchangeHeadsMessage(this.log, [result.entry], true)
|
|
135
|
+
);
|
|
136
|
+
return result;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
async open(options?: Args<T>): Promise<void> {
|
|
140
|
+
this._minReplicas = new AbsolutMinReplicas(options?.minReplicas || 2);
|
|
141
|
+
this._sync = options?.sync;
|
|
142
|
+
this._role = options?.role || new Replicator();
|
|
143
|
+
this._logProperties = options;
|
|
144
|
+
|
|
145
|
+
this._lastSubscriptionMessageId = 0;
|
|
146
|
+
this._onSubscriptionFn = this._onSubscription.bind(this);
|
|
147
|
+
|
|
148
|
+
this._sortedPeersCache = [];
|
|
149
|
+
this._gidPeersHistory = new Map();
|
|
150
|
+
|
|
151
|
+
this.node.services.pubsub.addEventListener(
|
|
152
|
+
"subscribe",
|
|
153
|
+
this._onSubscriptionFn
|
|
154
|
+
);
|
|
155
|
+
|
|
156
|
+
this._onUnsubscriptionFn = this._onUnsubscription.bind(this);
|
|
157
|
+
this.node.services.pubsub.addEventListener(
|
|
158
|
+
"unsubscribe",
|
|
159
|
+
this._onUnsubscriptionFn
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
await this.log.open(this.node.services.blocks, this.node.identity, {
|
|
163
|
+
keychain: this.node.keychain,
|
|
164
|
+
|
|
165
|
+
...this._logProperties,
|
|
166
|
+
trim: this._logProperties?.trim && {
|
|
167
|
+
...this._logProperties?.trim,
|
|
168
|
+
filter: {
|
|
169
|
+
canTrim: async (gid) => !(await this.isLeader(gid)), // TODO types
|
|
170
|
+
cacheId: () => this._lastSubscriptionMessageId,
|
|
171
|
+
},
|
|
172
|
+
},
|
|
173
|
+
cache:
|
|
174
|
+
this.node.memory &&
|
|
175
|
+
(await this.node.memory.sublevel(sha256Base64Sync(this.log.id))),
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
try {
|
|
179
|
+
if (this._role instanceof Replicator) {
|
|
180
|
+
this.modifySortedSubscriptionCache(
|
|
181
|
+
true,
|
|
182
|
+
getPublicKeyFromPeerId(this.node.peerId).hashcode()
|
|
183
|
+
);
|
|
184
|
+
await this.log.load();
|
|
185
|
+
} else {
|
|
186
|
+
await this.log.load({ heads: true, reload: true });
|
|
187
|
+
}
|
|
188
|
+
} catch (error) {
|
|
189
|
+
if (error instanceof AccessError) {
|
|
190
|
+
logger.error(
|
|
191
|
+
"Failed to load all entries due to access error, make sure you are opening the program with approate keychain configuration"
|
|
192
|
+
);
|
|
193
|
+
} else {
|
|
194
|
+
throw error;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Take into account existing subscription
|
|
199
|
+
this.node.services.pubsub.getSubscribers(this.topic)?.forEach((v, k) => {
|
|
200
|
+
this.handleSubscriptionChange(
|
|
201
|
+
k,
|
|
202
|
+
[{ topic: this.topic, data: v.data }],
|
|
203
|
+
true
|
|
204
|
+
);
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
// Open for communcation
|
|
208
|
+
await this.rpc.open({
|
|
209
|
+
queryType: TransportMessage,
|
|
210
|
+
responseType: TransportMessage,
|
|
211
|
+
responseHandler: this._onMessage.bind(this),
|
|
212
|
+
topic: this.topic,
|
|
213
|
+
subscriptionData: serialize(this.role),
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
get topic() {
|
|
218
|
+
return this.log.idString;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
private async _close() {
|
|
222
|
+
this._gidPeersHistory = new Map();
|
|
223
|
+
this._sortedPeersCache = undefined;
|
|
224
|
+
|
|
225
|
+
this.node.services.pubsub.removeEventListener(
|
|
226
|
+
"subscribe",
|
|
227
|
+
this._onSubscriptionFn
|
|
228
|
+
);
|
|
229
|
+
|
|
230
|
+
this._onUnsubscriptionFn = this._onUnsubscription.bind(this);
|
|
231
|
+
this.node.services.pubsub.removeEventListener(
|
|
232
|
+
"unsubscribe",
|
|
233
|
+
this._onUnsubscriptionFn
|
|
234
|
+
);
|
|
235
|
+
}
|
|
236
|
+
async close(from?: AbstractProgram): Promise<boolean> {
|
|
237
|
+
const superClosed = await super.close(from);
|
|
238
|
+
|
|
239
|
+
if (!superClosed) {
|
|
240
|
+
return superClosed;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
await this._close();
|
|
244
|
+
await this.log.close();
|
|
245
|
+
return superClosed;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
async drop(): Promise<void> {
|
|
249
|
+
await this._close();
|
|
250
|
+
await this.log.drop();
|
|
251
|
+
return super.drop();
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// Callback for receiving a message from the network
|
|
255
|
+
async _onMessage(
|
|
256
|
+
msg: TransportMessage,
|
|
257
|
+
context: QueryContext
|
|
258
|
+
): Promise<TransportMessage | undefined> {
|
|
259
|
+
try {
|
|
260
|
+
if (msg instanceof ExchangeHeadsMessage) {
|
|
261
|
+
/**
|
|
262
|
+
* I have recieved heads from someone else.
|
|
263
|
+
* I can use them to load associated logs and join/sync them with the data stores I own
|
|
264
|
+
*/
|
|
265
|
+
|
|
266
|
+
const { heads } = msg;
|
|
267
|
+
// replication topic === trustedNetwork address
|
|
268
|
+
|
|
269
|
+
logger.debug(
|
|
270
|
+
`${this.node.identity.publicKey.hashcode()}: Recieved heads: ${
|
|
271
|
+
heads.length === 1 ? heads[0].entry.hash : "#" + heads.length
|
|
272
|
+
}, logId: ${this.log.idString}`
|
|
273
|
+
);
|
|
274
|
+
if (heads) {
|
|
275
|
+
const filteredHeads: EntryWithRefs<any>[] = [];
|
|
276
|
+
for (const head of heads) {
|
|
277
|
+
if (!this.log.has(head.entry.hash)) {
|
|
278
|
+
head.entry.init({
|
|
279
|
+
// we need to init because we perhaps need to decrypt gid
|
|
280
|
+
keychain: this.log.keychain,
|
|
281
|
+
encoding: this.log.encoding,
|
|
282
|
+
});
|
|
283
|
+
filteredHeads.push(head);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
let toMerge: EntryWithRefs<any>[];
|
|
288
|
+
if (!this._sync) {
|
|
289
|
+
toMerge = [];
|
|
290
|
+
for (const [gid, value] of await groupByGid(filteredHeads)) {
|
|
291
|
+
if (!(await this.isLeader(gid, this._minReplicas.value))) {
|
|
292
|
+
logger.debug(
|
|
293
|
+
`${this.node.identity.publicKey.hashcode()}: Dropping heads with gid: ${gid}. Because not leader`
|
|
294
|
+
);
|
|
295
|
+
continue;
|
|
296
|
+
}
|
|
297
|
+
for (const head of value) {
|
|
298
|
+
toMerge.push(head);
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
} else {
|
|
302
|
+
toMerge = await Promise.all(
|
|
303
|
+
filteredHeads.map((x) => this._sync!(x.entry))
|
|
304
|
+
).then((filter) => filteredHeads.filter((v, ix) => filter[ix]));
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if (toMerge.length > 0) {
|
|
308
|
+
await this.log.join(toMerge);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
} else {
|
|
312
|
+
throw new Error("Unexpected message");
|
|
313
|
+
}
|
|
314
|
+
} catch (e: any) {
|
|
315
|
+
if (e instanceof BorshError) {
|
|
316
|
+
logger.trace(
|
|
317
|
+
`${this.node.identity.publicKey.hashcode()}: Failed to handle message on topic: ${JSON.stringify(
|
|
318
|
+
this.log.idString
|
|
319
|
+
)}: Got message for a different namespace`
|
|
320
|
+
);
|
|
321
|
+
return;
|
|
322
|
+
}
|
|
323
|
+
if (e instanceof AccessError) {
|
|
324
|
+
logger.trace(
|
|
325
|
+
`${this.node.identity.publicKey.hashcode()}: Failed to handle message for log: ${JSON.stringify(
|
|
326
|
+
this.log.idString
|
|
327
|
+
)}: Do not have permissions`
|
|
328
|
+
);
|
|
329
|
+
return;
|
|
330
|
+
}
|
|
331
|
+
logger.error(e);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
getReplicatorsSorted(): string[] | undefined {
|
|
336
|
+
return this._sortedPeersCache;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
async isLeader(
|
|
340
|
+
slot: { toString(): string },
|
|
341
|
+
numberOfLeaders: number = this.minReplicas.value
|
|
342
|
+
): Promise<boolean> {
|
|
343
|
+
const isLeader = (await this.findLeaders(slot, numberOfLeaders)).find(
|
|
344
|
+
(l) => l === this.node.identity.publicKey.hashcode()
|
|
345
|
+
);
|
|
346
|
+
return !!isLeader;
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
async findLeaders(
|
|
350
|
+
subject: { toString(): string },
|
|
351
|
+
numberOfLeaders: number = this.minReplicas.value
|
|
352
|
+
): Promise<string[]> {
|
|
353
|
+
// For a fixed set or members, the choosen leaders will always be the same (address invariant)
|
|
354
|
+
// This allows for that same content is always chosen to be distributed to same peers, to remove unecessary copies
|
|
355
|
+
const peers: string[] = this.getReplicatorsSorted() || [];
|
|
356
|
+
|
|
357
|
+
if (peers.length === 0) {
|
|
358
|
+
return [];
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
numberOfLeaders = Math.min(numberOfLeaders, peers.length);
|
|
362
|
+
|
|
363
|
+
// Convert this thing we wan't to distribute to 8 bytes so we get can convert it into a u64
|
|
364
|
+
// modulus into an index
|
|
365
|
+
const utf8writer = new BinaryWriter();
|
|
366
|
+
utf8writer.string(subject.toString());
|
|
367
|
+
const seed = await sha256(utf8writer.finalize());
|
|
368
|
+
|
|
369
|
+
// convert hash of slot to a number
|
|
370
|
+
const seedNumber = new BinaryReader(
|
|
371
|
+
seed.subarray(seed.length - 8, seed.length)
|
|
372
|
+
).u64();
|
|
373
|
+
const startIndex = Number(seedNumber % BigInt(peers.length));
|
|
374
|
+
|
|
375
|
+
// we only step forward 1 step (ignoring that step backward 1 could be 'closer')
|
|
376
|
+
// This does not matter, we only have to make sure all nodes running the code comes to somewhat the
|
|
377
|
+
// same conclusion (are running the same leader selection algorithm)
|
|
378
|
+
const leaders = new Array(numberOfLeaders);
|
|
379
|
+
for (let i = 0; i < numberOfLeaders; i++) {
|
|
380
|
+
leaders[i] = peers[(i + startIndex) % peers.length];
|
|
381
|
+
}
|
|
382
|
+
return leaders;
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
private modifySortedSubscriptionCache(subscribed: boolean, fromHash: string) {
|
|
386
|
+
const sortedPeer = this._sortedPeersCache;
|
|
387
|
+
if (!sortedPeer) {
|
|
388
|
+
if (this.closed === false) {
|
|
389
|
+
throw new Error("Unexpected, sortedPeersCache is undefined");
|
|
390
|
+
}
|
|
391
|
+
return;
|
|
392
|
+
}
|
|
393
|
+
const code = fromHash;
|
|
394
|
+
if (subscribed) {
|
|
395
|
+
// TODO use Set + list for fast lookup
|
|
396
|
+
if (!sortedPeer.find((x) => x === code)) {
|
|
397
|
+
sortedPeer.push(code);
|
|
398
|
+
sortedPeer.sort((a, b) => a.localeCompare(b));
|
|
399
|
+
}
|
|
400
|
+
} else {
|
|
401
|
+
const deleteIndex = sortedPeer.findIndex((x) => x === code);
|
|
402
|
+
sortedPeer.splice(deleteIndex, 1);
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
async handleSubscriptionChange(
|
|
407
|
+
fromHash: string,
|
|
408
|
+
changes: { topic: string; data?: Uint8Array }[],
|
|
409
|
+
subscribed: boolean
|
|
410
|
+
) {
|
|
411
|
+
// TODO why are we doing two loops?
|
|
412
|
+
for (const subscription of changes) {
|
|
413
|
+
if (this.log.idString !== subscription.topic) {
|
|
414
|
+
continue;
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
if (
|
|
418
|
+
!subscription.data ||
|
|
419
|
+
!startsWith(subscription.data, REPLICATOR_TYPE_VARIANT)
|
|
420
|
+
) {
|
|
421
|
+
continue;
|
|
422
|
+
}
|
|
423
|
+
this._lastSubscriptionMessageId += 1;
|
|
424
|
+
this.modifySortedSubscriptionCache(subscribed, fromHash);
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
for (const subscription of changes) {
|
|
428
|
+
if (this.log.idString !== subscription.topic) {
|
|
429
|
+
continue;
|
|
430
|
+
}
|
|
431
|
+
if (subscription.data) {
|
|
432
|
+
try {
|
|
433
|
+
const type = deserialize(subscription.data, SubscriptionType);
|
|
434
|
+
if (type instanceof Replicator) {
|
|
435
|
+
await this.replicationReorganization();
|
|
436
|
+
}
|
|
437
|
+
} catch (error: any) {
|
|
438
|
+
logger.warn(
|
|
439
|
+
"Recieved subscription with invalid data on topic: " +
|
|
440
|
+
subscription.topic +
|
|
441
|
+
". Error: " +
|
|
442
|
+
error?.message
|
|
443
|
+
);
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
/**
|
|
450
|
+
* When a peers join the networkk and want to participate the leaders for particular log subgraphs might change, hence some might start replicating, might some stop
|
|
451
|
+
* This method will go through my owned entries, and see whether I should share them with a new leader, and/or I should stop care about specific entries
|
|
452
|
+
* @param channel
|
|
453
|
+
*/
|
|
454
|
+
async replicationReorganization() {
|
|
455
|
+
const changed = false;
|
|
456
|
+
const heads = await this.log.getHeads();
|
|
457
|
+
const groupedByGid = await groupByGid(heads);
|
|
458
|
+
let storeChanged = false;
|
|
459
|
+
for (const [gid, entries] of groupedByGid) {
|
|
460
|
+
const toSend: Map<string, Entry<any>> = new Map();
|
|
461
|
+
const newPeers: string[] = [];
|
|
462
|
+
|
|
463
|
+
if (entries.length === 0) {
|
|
464
|
+
continue; // TODO maybe close store?
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
const oldPeersSet = this._gidPeersHistory.get(gid);
|
|
468
|
+
const currentPeers = await this.findLeaders(gid);
|
|
469
|
+
for (const currentPeer of currentPeers) {
|
|
470
|
+
if (
|
|
471
|
+
!oldPeersSet?.has(currentPeer) &&
|
|
472
|
+
currentPeer !== this.node.identity.publicKey.hashcode()
|
|
473
|
+
) {
|
|
474
|
+
storeChanged = true;
|
|
475
|
+
// second condition means that if the new peer is us, we should not do anything, since we are expecting to recieve heads, not send
|
|
476
|
+
newPeers.push(currentPeer);
|
|
477
|
+
|
|
478
|
+
// send heads to the new peer
|
|
479
|
+
// console.log('new gid for peer', newPeers.length, this.id.toString(), newPeer, gid, entries.length, newPeers)
|
|
480
|
+
try {
|
|
481
|
+
logger.debug(
|
|
482
|
+
`${this.node.identity.publicKey.hashcode()}: Exchange heads ${
|
|
483
|
+
entries.length === 1 ? entries[0].hash : "#" + entries.length
|
|
484
|
+
} on rebalance`
|
|
485
|
+
);
|
|
486
|
+
for (const entry of entries) {
|
|
487
|
+
toSend.set(entry.hash, entry);
|
|
488
|
+
}
|
|
489
|
+
} catch (error) {
|
|
490
|
+
if (error instanceof TimeoutError) {
|
|
491
|
+
logger.error(
|
|
492
|
+
"Missing channel when reorg to peer: " + currentPeer.toString()
|
|
493
|
+
);
|
|
494
|
+
continue;
|
|
495
|
+
}
|
|
496
|
+
throw error;
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// We don't need this clause anymore because we got the trim option!
|
|
502
|
+
if (
|
|
503
|
+
!currentPeers.find((x) => x === this.node.identity.publicKey.hashcode())
|
|
504
|
+
) {
|
|
505
|
+
let entriesToDelete = entries.filter((e) => !e.createdLocally);
|
|
506
|
+
|
|
507
|
+
if (this._sync) {
|
|
508
|
+
// dont delete entries which we wish to keep
|
|
509
|
+
entriesToDelete = await Promise.all(
|
|
510
|
+
entriesToDelete.map((x) => this._sync!(x))
|
|
511
|
+
).then((filter) => entriesToDelete.filter((v, ix) => !filter[ix]));
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
// delete entries since we are not suppose to replicate this anymore
|
|
515
|
+
// TODO add delay? freeze time? (to ensure resiliance for bad io)
|
|
516
|
+
if (entriesToDelete.length > 0) {
|
|
517
|
+
await this.log.remove(entriesToDelete, {
|
|
518
|
+
recursively: true,
|
|
519
|
+
});
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
// TODO if length === 0 maybe close store?
|
|
523
|
+
}
|
|
524
|
+
this._gidPeersHistory.set(gid, new Set(currentPeers));
|
|
525
|
+
|
|
526
|
+
if (toSend.size === 0) {
|
|
527
|
+
continue;
|
|
528
|
+
}
|
|
529
|
+
const message = await createExchangeHeadsMessage(
|
|
530
|
+
this.log,
|
|
531
|
+
[...toSend.values()], // TODO send to peers directly
|
|
532
|
+
true
|
|
533
|
+
);
|
|
534
|
+
|
|
535
|
+
// TODO perhaps send less messages to more recievers for performance reasons?
|
|
536
|
+
await this.rpc.send(message, {
|
|
537
|
+
to: newPeers,
|
|
538
|
+
strict: true,
|
|
539
|
+
});
|
|
540
|
+
}
|
|
541
|
+
if (storeChanged) {
|
|
542
|
+
await this.log.trim(); // because for entries createdLocally,we can have trim options that still allow us to delete them
|
|
543
|
+
}
|
|
544
|
+
return storeChanged || changed;
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
replicators() {
|
|
548
|
+
// TODO Optimize this so we don't have to recreate the array all the time!
|
|
549
|
+
const minReplicas = this.minReplicas.value;
|
|
550
|
+
const replicators = this.getReplicatorsSorted();
|
|
551
|
+
if (!replicators) {
|
|
552
|
+
return []; // No subscribers and we are not replicating
|
|
553
|
+
}
|
|
554
|
+
const numberOfGroups = Math.min(
|
|
555
|
+
Math.ceil(replicators!.length / minReplicas)
|
|
556
|
+
);
|
|
557
|
+
const groups = new Array<string[]>(numberOfGroups);
|
|
558
|
+
for (let i = 0; i < groups.length; i++) {
|
|
559
|
+
groups[i] = [];
|
|
560
|
+
}
|
|
561
|
+
for (let i = 0; i < replicators!.length; i++) {
|
|
562
|
+
groups[i % numberOfGroups].push(replicators![i]);
|
|
563
|
+
}
|
|
564
|
+
return groups;
|
|
565
|
+
}
|
|
566
|
+
async replicator(gid) {
|
|
567
|
+
return this.isLeader(gid);
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
async _onUnsubscription(evt: CustomEvent<UnsubcriptionEvent>) {
|
|
571
|
+
logger.debug(
|
|
572
|
+
`Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify(
|
|
573
|
+
evt.detail.unsubscriptions.map((x) => x.topic)
|
|
574
|
+
)}'`
|
|
575
|
+
);
|
|
576
|
+
|
|
577
|
+
return this.handleSubscriptionChange(
|
|
578
|
+
evt.detail.from.hashcode(),
|
|
579
|
+
evt.detail.unsubscriptions,
|
|
580
|
+
false
|
|
581
|
+
);
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
async _onSubscription(evt: CustomEvent<SubscriptionEvent>) {
|
|
585
|
+
logger.debug(
|
|
586
|
+
`New peer '${evt.detail.from.hashcode()}' connected to '${JSON.stringify(
|
|
587
|
+
evt.detail.subscriptions.map((x) => x.topic)
|
|
588
|
+
)}'`
|
|
589
|
+
);
|
|
590
|
+
return this.handleSubscriptionChange(
|
|
591
|
+
evt.detail.from.hashcode(),
|
|
592
|
+
evt.detail.subscriptions,
|
|
593
|
+
true
|
|
594
|
+
);
|
|
595
|
+
}
|
|
596
|
+
}
|
package/src/message.ts
ADDED
package/src/role.ts
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { field, variant } from "@dao-xyz/borsh";
|
|
2
|
+
|
|
3
|
+
export abstract class SubscriptionType {}
|
|
4
|
+
|
|
5
|
+
export const NO_TYPE_VARIANT = new Uint8Array([0]);
|
|
6
|
+
|
|
7
|
+
@variant(0)
|
|
8
|
+
export class NoType extends SubscriptionType {}
|
|
9
|
+
|
|
10
|
+
export const OBSERVER_TYPE_VARIANT = new Uint8Array([1]);
|
|
11
|
+
@variant(1)
|
|
12
|
+
export class Observer extends SubscriptionType {}
|
|
13
|
+
|
|
14
|
+
export const REPLICATOR_TYPE_VARIANT = new Uint8Array([2]);
|
|
15
|
+
|
|
16
|
+
@variant(2)
|
|
17
|
+
export class Replicator extends SubscriptionType {
|
|
18
|
+
@field({ type: "u32" })
|
|
19
|
+
multiplier: number; // 1 means I do the same amount of work as anyone else, 2 means double
|
|
20
|
+
|
|
21
|
+
constructor() {
|
|
22
|
+
// multiplier is unsupported for now, so contructor is empty
|
|
23
|
+
super();
|
|
24
|
+
this.multiplier = 1;
|
|
25
|
+
}
|
|
26
|
+
}
|