@waku/core 0.0.27 → 0.0.28-070b625.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundle/{base_protocol-LhsIWF3-.js → base_protocol-D0Zdzb-v.js} +2 -5
- package/bundle/{browser-BQyFvtq6.js → browser-DoQRY-an.js} +18 -13
- package/bundle/{index-8YyfzF9R.js → index-BJwgMx4y.js} +35 -62
- package/bundle/index.js +258 -342
- package/bundle/lib/base_protocol.js +3 -3
- package/bundle/lib/message/version_0.js +3 -3
- package/bundle/lib/predefined_bootstrap_nodes.js +17 -17
- package/bundle/{version_0-FXfzO8Km.js → version_0-C6o0DvNW.js} +566 -321
- package/dist/.tsbuildinfo +1 -1
- package/dist/index.d.ts +3 -2
- package/dist/index.js +3 -2
- package/dist/index.js.map +1 -1
- package/dist/lib/base_protocol.d.ts +4 -5
- package/dist/lib/base_protocol.js +0 -3
- package/dist/lib/base_protocol.js.map +1 -1
- package/dist/lib/filter/index.js +4 -0
- package/dist/lib/filter/index.js.map +1 -1
- package/dist/lib/light_push/index.d.ts +12 -2
- package/dist/lib/light_push/index.js +79 -76
- package/dist/lib/light_push/index.js.map +1 -1
- package/dist/lib/message/version_0.js +1 -1
- package/dist/lib/message/version_0.js.map +1 -1
- package/dist/lib/metadata/index.d.ts +1 -1
- package/dist/lib/metadata/index.js +42 -14
- package/dist/lib/metadata/index.js.map +1 -1
- package/dist/lib/predefined_bootstrap_nodes.d.ts +11 -11
- package/dist/lib/predefined_bootstrap_nodes.js +16 -16
- package/dist/lib/predefined_bootstrap_nodes.js.map +1 -1
- package/dist/lib/store/history_rpc.js +1 -1
- package/dist/lib/store/history_rpc.js.map +1 -1
- package/dist/lib/store/index.d.ts +14 -6
- package/dist/lib/store/index.js +50 -232
- package/dist/lib/store/index.js.map +1 -1
- package/dist/lib/wait_for_remote_peer.js +4 -2
- package/dist/lib/wait_for_remote_peer.js.map +1 -1
- package/package.json +1 -129
- package/src/index.ts +3 -2
- package/src/lib/base_protocol.ts +4 -9
- package/src/lib/filter/index.ts +7 -0
- package/src/lib/light_push/index.ts +97 -118
- package/src/lib/metadata/index.ts +56 -26
- package/src/lib/predefined_bootstrap_nodes.ts +22 -22
- package/src/lib/store/index.ts +77 -339
- package/src/lib/wait_for_remote_peer.ts +14 -4
package/src/lib/store/index.ts
CHANGED
@@ -1,17 +1,14 @@
|
|
1
|
-
import type {
|
2
|
-
import { sha256 } from "@noble/hashes/sha256";
|
1
|
+
import type { Peer } from "@libp2p/interface";
|
3
2
|
import {
|
4
3
|
Cursor,
|
5
4
|
IDecodedMessage,
|
6
5
|
IDecoder,
|
7
|
-
|
6
|
+
IStoreCore,
|
8
7
|
Libp2p,
|
9
8
|
ProtocolCreateOptions
|
10
9
|
} from "@waku/interfaces";
|
11
10
|
import { proto_store as proto } from "@waku/proto";
|
12
|
-
import { ensurePubsubTopicIsConfigured, isDefined } from "@waku/utils";
|
13
11
|
import { Logger } from "@waku/utils";
|
14
|
-
import { concat, utf8ToBytes } from "@waku/utils/bytes";
|
15
12
|
import all from "it-all";
|
16
13
|
import * as lp from "it-length-prefixed";
|
17
14
|
import { pipe } from "it-pipe";
|
@@ -28,9 +25,7 @@ const log = new Logger("store");
|
|
28
25
|
|
29
26
|
export const StoreCodec = "/vac/waku/store/2.0.0-beta4";
|
30
27
|
|
31
|
-
export
|
32
|
-
|
33
|
-
export { PageDirection };
|
28
|
+
export { PageDirection, Params };
|
34
29
|
|
35
30
|
export interface TimeFilter {
|
36
31
|
startTime: Date;
|
@@ -72,361 +67,104 @@ export interface QueryOptions {
|
|
72
67
|
*
|
73
68
|
* The Waku Store protocol can be used to retrieved historical messages.
|
74
69
|
*/
|
75
|
-
class
|
76
|
-
private readonly NUM_PEERS_PROTOCOL = 1;
|
77
|
-
|
70
|
+
export class StoreCore extends BaseProtocol implements IStoreCore {
|
78
71
|
constructor(libp2p: Libp2p, options?: ProtocolCreateOptions) {
|
79
72
|
super(StoreCodec, libp2p.components, log, options!.pubsubTopics!, options);
|
80
73
|
}
|
81
74
|
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
private async processMessages<T extends IDecodedMessage>(
|
87
|
-
messages: Promise<T | undefined>[],
|
88
|
-
callback: (message: T) => Promise<void | boolean> | boolean | void,
|
89
|
-
options?: QueryOptions
|
90
|
-
): Promise<boolean> {
|
91
|
-
let abort = false;
|
92
|
-
const messagesOrUndef: Array<T | undefined> = await Promise.all(messages);
|
93
|
-
let processedMessages: Array<T> = messagesOrUndef.filter(isDefined);
|
94
|
-
|
95
|
-
if (this.shouldReverseOrder(options)) {
|
96
|
-
processedMessages = processedMessages.reverse();
|
97
|
-
}
|
98
|
-
|
99
|
-
await Promise.all(
|
100
|
-
processedMessages.map(async (msg) => {
|
101
|
-
if (msg && !abort) {
|
102
|
-
abort = Boolean(await callback(msg));
|
103
|
-
}
|
104
|
-
})
|
105
|
-
);
|
106
|
-
|
107
|
-
return abort;
|
108
|
-
}
|
109
|
-
|
110
|
-
/**
|
111
|
-
* Determines whether to reverse the order of messages based on the provided options.
|
112
|
-
*
|
113
|
-
* Messages in pages are ordered from oldest (first) to most recent (last).
|
114
|
-
* https://github.com/vacp2p/rfc/issues/533
|
115
|
-
*
|
116
|
-
* @private
|
117
|
-
*/
|
118
|
-
private shouldReverseOrder(options?: QueryOptions): boolean {
|
119
|
-
return (
|
120
|
-
typeof options?.pageDirection === "undefined" ||
|
121
|
-
options?.pageDirection === PageDirection.BACKWARD
|
122
|
-
);
|
123
|
-
}
|
124
|
-
|
125
|
-
/**
|
126
|
-
* @deprecated Use `queryWithOrderedCallback` instead
|
127
|
-
**/
|
128
|
-
queryOrderedCallback = this.queryWithOrderedCallback;
|
129
|
-
|
130
|
-
/**
|
131
|
-
* Do a query to a Waku Store to retrieve historical/missed messages.
|
132
|
-
*
|
133
|
-
* The callback function takes a `WakuMessage` in input,
|
134
|
-
* messages are processed in order:
|
135
|
-
* - oldest to latest if `options.pageDirection` == { @link PageDirection.FORWARD }
|
136
|
-
* - latest to oldest if `options.pageDirection` == { @link PageDirection.BACKWARD }
|
137
|
-
*
|
138
|
-
* The ordering may affect performance.
|
139
|
-
* The ordering depends on the behavior of the remote store node.
|
140
|
-
* If strong ordering is needed, you may need to handle this at application level
|
141
|
-
* and set your own timestamps too (the WakuMessage timestamps are not certified).
|
142
|
-
*
|
143
|
-
* @throws If not able to reach a Waku Store peer to query,
|
144
|
-
* or if an error is encountered when processing the reply,
|
145
|
-
* or if two decoders with the same content topic are passed.
|
146
|
-
*/
|
147
|
-
async queryWithOrderedCallback<T extends IDecodedMessage>(
|
148
|
-
decoders: IDecoder<T>[],
|
149
|
-
callback: (message: T) => Promise<void | boolean> | boolean | void,
|
150
|
-
options?: QueryOptions
|
151
|
-
): Promise<void> {
|
152
|
-
for await (const promises of this.queryGenerator(decoders, options)) {
|
153
|
-
if (await this.processMessages(promises, callback, options)) break;
|
154
|
-
}
|
155
|
-
}
|
156
|
-
|
157
|
-
/**
|
158
|
-
* Do a query to a Waku Store to retrieve historical/missed messages.
|
159
|
-
* The callback function takes a `Promise<WakuMessage>` in input,
|
160
|
-
* useful if messages need to be decrypted and performance matters.
|
161
|
-
*
|
162
|
-
* The order of the messages passed to the callback is as follows:
|
163
|
-
* - within a page, messages are expected to be ordered from oldest to most recent
|
164
|
-
* - pages direction depends on { @link QueryOptions.pageDirection }
|
165
|
-
*
|
166
|
-
* Do note that the resolution of the `Promise<WakuMessage | undefined` may
|
167
|
-
* break the order as it may rely on the browser decryption API, which in turn,
|
168
|
-
* may have a different speed depending on the type of decryption.
|
169
|
-
*
|
170
|
-
* @throws If not able to reach a Waku Store peer to query,
|
171
|
-
* or if an error is encountered when processing the reply,
|
172
|
-
* or if two decoders with the same content topic are passed.
|
173
|
-
*/
|
174
|
-
async queryWithPromiseCallback<T extends IDecodedMessage>(
|
175
|
-
decoders: IDecoder<T>[],
|
176
|
-
callback: (
|
177
|
-
message: Promise<T | undefined>
|
178
|
-
) => Promise<void | boolean> | boolean | void,
|
179
|
-
options?: QueryOptions
|
180
|
-
): Promise<void> {
|
181
|
-
let abort = false;
|
182
|
-
for await (const page of this.queryGenerator(decoders, options)) {
|
183
|
-
const _promises = page.map(async (msgPromise) => {
|
184
|
-
if (abort) return;
|
185
|
-
abort = Boolean(await callback(msgPromise));
|
186
|
-
});
|
187
|
-
|
188
|
-
await Promise.all(_promises);
|
189
|
-
if (abort) break;
|
190
|
-
}
|
191
|
-
}
|
192
|
-
|
193
|
-
/**
|
194
|
-
* Do a query to a Waku Store to retrieve historical/missed messages.
|
195
|
-
*
|
196
|
-
* This is a generator, useful if you want most control on how messages
|
197
|
-
* are processed.
|
198
|
-
*
|
199
|
-
* The order of the messages returned by the remote Waku node SHOULD BE
|
200
|
-
* as follows:
|
201
|
-
* - within a page, messages SHOULD be ordered from oldest to most recent
|
202
|
-
* - pages direction depends on { @link QueryOptions.pageDirection }
|
203
|
-
* @throws If not able to reach a Waku Store peer to query,
|
204
|
-
* or if an error is encountered when processing the reply,
|
205
|
-
* or if two decoders with the same content topic are passed.
|
206
|
-
*
|
207
|
-
* This API only supports querying a single pubsub topic at a time.
|
208
|
-
* If multiple decoders are provided, they must all have the same pubsub topic.
|
209
|
-
* @throws If multiple decoders with different pubsub topics are provided.
|
210
|
-
* @throws If no decoders are provided.
|
211
|
-
* @throws If no decoders are found for the provided pubsub topic.
|
212
|
-
*/
|
213
|
-
async *queryGenerator<T extends IDecodedMessage>(
|
214
|
-
decoders: IDecoder<T>[],
|
215
|
-
options?: QueryOptions
|
75
|
+
async *queryPerPage<T extends IDecodedMessage>(
|
76
|
+
queryOpts: Params,
|
77
|
+
decoders: Map<string, IDecoder<T>>,
|
78
|
+
peer: Peer
|
216
79
|
): AsyncGenerator<Promise<T | undefined>[]> {
|
217
|
-
if (decoders.length === 0) {
|
218
|
-
throw new Error("No decoders provided");
|
219
|
-
}
|
220
|
-
|
221
|
-
let startTime, endTime;
|
222
|
-
|
223
|
-
if (options?.timeFilter) {
|
224
|
-
startTime = options.timeFilter.startTime;
|
225
|
-
endTime = options.timeFilter.endTime;
|
226
|
-
}
|
227
|
-
|
228
|
-
// convert array to set to remove duplicates
|
229
|
-
const uniquePubsubTopicsInQuery = Array.from(
|
230
|
-
new Set(decoders.map((decoder) => decoder.pubsubTopic))
|
231
|
-
);
|
232
|
-
|
233
|
-
// If multiple pubsub topics are provided, throw an error
|
234
|
-
if (uniquePubsubTopicsInQuery.length > 1) {
|
235
|
-
throw new Error(
|
236
|
-
"API does not support querying multiple pubsub topics at once"
|
237
|
-
);
|
238
|
-
}
|
239
|
-
|
240
|
-
// we can be certain that there is only one pubsub topic in the query
|
241
|
-
const pubsubTopicForQuery = uniquePubsubTopicsInQuery[0];
|
242
|
-
|
243
|
-
ensurePubsubTopicIsConfigured(pubsubTopicForQuery, this.pubsubTopics);
|
244
|
-
|
245
|
-
// check that the pubsubTopic from the Cursor and Decoder match
|
246
80
|
if (
|
247
|
-
|
248
|
-
|
81
|
+
queryOpts.contentTopics.toString() !==
|
82
|
+
Array.from(decoders.keys()).toString()
|
249
83
|
) {
|
250
84
|
throw new Error(
|
251
|
-
|
85
|
+
"Internal error, the decoders should match the query's content topics"
|
252
86
|
);
|
253
87
|
}
|
254
88
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
throw new Error(
|
259
|
-
"API does not support different decoder per content topic"
|
260
|
-
);
|
261
|
-
}
|
262
|
-
decodersAsMap.set(dec.contentTopic, dec);
|
263
|
-
});
|
89
|
+
let currentCursor = queryOpts.cursor;
|
90
|
+
while (true) {
|
91
|
+
queryOpts.cursor = currentCursor;
|
264
92
|
|
265
|
-
|
266
|
-
.filter((decoder) => decoder.pubsubTopic === pubsubTopicForQuery)
|
267
|
-
.map((dec) => dec.contentTopic);
|
93
|
+
const historyRpcQuery = HistoryRpc.createQuery(queryOpts);
|
268
94
|
|
269
|
-
|
270
|
-
throw new Error("No decoders found for topic " + pubsubTopicForQuery);
|
271
|
-
}
|
272
|
-
|
273
|
-
const queryOpts = Object.assign(
|
274
|
-
{
|
275
|
-
pubsubTopic: pubsubTopicForQuery,
|
276
|
-
pageDirection: PageDirection.BACKWARD,
|
277
|
-
pageSize: DefaultPageSize
|
278
|
-
},
|
279
|
-
options,
|
280
|
-
{ contentTopics, startTime, endTime }
|
281
|
-
);
|
282
|
-
|
283
|
-
const peer = (
|
284
|
-
await this.getPeers({
|
285
|
-
numPeers: this.NUM_PEERS_PROTOCOL,
|
286
|
-
maxBootstrapPeers: 1
|
287
|
-
})
|
288
|
-
)[0];
|
289
|
-
|
290
|
-
for await (const messages of paginate<T>(
|
291
|
-
this.getStream.bind(this, peer),
|
292
|
-
queryOpts,
|
293
|
-
decodersAsMap,
|
294
|
-
options?.cursor
|
295
|
-
)) {
|
296
|
-
yield messages;
|
297
|
-
}
|
298
|
-
}
|
299
|
-
}
|
95
|
+
const stream = await this.getStream(peer);
|
300
96
|
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
)
|
307
|
-
|
308
|
-
queryOpts.contentTopics.toString() !==
|
309
|
-
Array.from(decoders.keys()).toString()
|
310
|
-
) {
|
311
|
-
throw new Error(
|
312
|
-
"Internal error, the decoders should match the query's content topics"
|
313
|
-
);
|
314
|
-
}
|
315
|
-
|
316
|
-
let currentCursor = cursor;
|
317
|
-
while (true) {
|
318
|
-
queryOpts.cursor = currentCursor;
|
319
|
-
|
320
|
-
const historyRpcQuery = HistoryRpc.createQuery(queryOpts);
|
321
|
-
|
322
|
-
log.info(
|
323
|
-
"Querying store peer",
|
324
|
-
`for (${queryOpts.pubsubTopic})`,
|
325
|
-
queryOpts.contentTopics
|
326
|
-
);
|
327
|
-
|
328
|
-
const stream = await streamFactory();
|
329
|
-
|
330
|
-
const res = await pipe(
|
331
|
-
[historyRpcQuery.encode()],
|
332
|
-
lp.encode,
|
333
|
-
stream,
|
334
|
-
lp.decode,
|
335
|
-
async (source) => await all(source)
|
336
|
-
);
|
337
|
-
|
338
|
-
const bytes = new Uint8ArrayList();
|
339
|
-
res.forEach((chunk) => {
|
340
|
-
bytes.append(chunk);
|
341
|
-
});
|
342
|
-
|
343
|
-
const reply = historyRpcQuery.decode(bytes);
|
97
|
+
const res = await pipe(
|
98
|
+
[historyRpcQuery.encode()],
|
99
|
+
lp.encode,
|
100
|
+
stream,
|
101
|
+
lp.decode,
|
102
|
+
async (source) => await all(source)
|
103
|
+
);
|
344
104
|
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
105
|
+
const bytes = new Uint8ArrayList();
|
106
|
+
res.forEach((chunk) => {
|
107
|
+
bytes.append(chunk);
|
108
|
+
});
|
349
109
|
|
350
|
-
|
110
|
+
const reply = historyRpcQuery.decode(bytes);
|
351
111
|
|
352
|
-
|
353
|
-
|
354
|
-
|
112
|
+
if (!reply.response) {
|
113
|
+
log.warn("Stopping pagination due to store `response` field missing");
|
114
|
+
break;
|
115
|
+
}
|
355
116
|
|
356
|
-
|
357
|
-
log.warn(
|
358
|
-
"Stopping pagination due to store `response.messages` field missing or empty"
|
359
|
-
);
|
360
|
-
break;
|
361
|
-
}
|
117
|
+
const response = reply.response as proto.HistoryResponse;
|
362
118
|
|
363
|
-
|
119
|
+
if (response.error && response.error !== HistoryError.NONE) {
|
120
|
+
throw "History response contains an Error: " + response.error;
|
121
|
+
}
|
364
122
|
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
return decoder.fromProtoObj(
|
371
|
-
queryOpts.pubsubTopic,
|
372
|
-
toProtoMessage(protoMsg)
|
373
|
-
);
|
374
|
-
}
|
123
|
+
if (!response.messages || !response.messages.length) {
|
124
|
+
log.warn(
|
125
|
+
"Stopping pagination due to store `response.messages` field missing or empty"
|
126
|
+
);
|
127
|
+
break;
|
375
128
|
}
|
376
|
-
return Promise.resolve(undefined);
|
377
|
-
});
|
378
129
|
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
130
|
+
log.error(`${response.messages.length} messages retrieved from store`);
|
131
|
+
|
132
|
+
yield response.messages.map((protoMsg) => {
|
133
|
+
const contentTopic = protoMsg.contentTopic;
|
134
|
+
if (typeof contentTopic !== "undefined") {
|
135
|
+
const decoder = decoders.get(contentTopic);
|
136
|
+
if (decoder) {
|
137
|
+
return decoder.fromProtoObj(
|
138
|
+
queryOpts.pubsubTopic,
|
139
|
+
toProtoMessage(protoMsg)
|
140
|
+
);
|
141
|
+
}
|
142
|
+
}
|
143
|
+
return Promise.resolve(undefined);
|
144
|
+
});
|
388
145
|
|
389
|
-
|
146
|
+
const nextCursor = response.pagingInfo?.cursor;
|
147
|
+
if (typeof nextCursor === "undefined") {
|
148
|
+
// If the server does not return cursor then there is an issue,
|
149
|
+
// Need to abort, or we end up in an infinite loop
|
150
|
+
log.warn(
|
151
|
+
"Stopping pagination due to `response.pagingInfo.cursor` missing from store response"
|
152
|
+
);
|
153
|
+
break;
|
154
|
+
}
|
390
155
|
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
156
|
+
currentCursor = nextCursor;
|
157
|
+
|
158
|
+
const responsePageSize = response.pagingInfo?.pageSize;
|
159
|
+
const queryPageSize = historyRpcQuery.query?.pagingInfo?.pageSize;
|
160
|
+
if (
|
161
|
+
// Response page size smaller than query, meaning this is the last page
|
162
|
+
responsePageSize &&
|
163
|
+
queryPageSize &&
|
164
|
+
responsePageSize < queryPageSize
|
165
|
+
) {
|
166
|
+
break;
|
167
|
+
}
|
400
168
|
}
|
401
169
|
}
|
402
170
|
}
|
403
|
-
|
404
|
-
export async function createCursor(message: IDecodedMessage): Promise<Cursor> {
|
405
|
-
if (
|
406
|
-
!message ||
|
407
|
-
!message.timestamp ||
|
408
|
-
!message.payload ||
|
409
|
-
!message.contentTopic
|
410
|
-
) {
|
411
|
-
throw new Error("Message is missing required fields");
|
412
|
-
}
|
413
|
-
|
414
|
-
const contentTopicBytes = utf8ToBytes(message.contentTopic);
|
415
|
-
|
416
|
-
const digest = sha256(concat([contentTopicBytes, message.payload]));
|
417
|
-
|
418
|
-
const messageTime = BigInt(message.timestamp.getTime()) * BigInt(1000000);
|
419
|
-
|
420
|
-
return {
|
421
|
-
digest,
|
422
|
-
pubsubTopic: message.pubsubTopic,
|
423
|
-
senderTime: messageTime,
|
424
|
-
receiverTime: messageTime
|
425
|
-
};
|
426
|
-
}
|
427
|
-
|
428
|
-
export function wakuStore(
|
429
|
-
init: Partial<ProtocolCreateOptions> = {}
|
430
|
-
): (libp2p: Libp2p) => IStore {
|
431
|
-
return (libp2p: Libp2p) => new Store(libp2p, init);
|
432
|
-
}
|
@@ -1,10 +1,16 @@
|
|
1
1
|
import type { IdentifyResult } from "@libp2p/interface";
|
2
|
-
import type {
|
2
|
+
import type {
|
3
|
+
IBaseProtocolCore,
|
4
|
+
IMetadata,
|
5
|
+
IRelay,
|
6
|
+
Waku
|
7
|
+
} from "@waku/interfaces";
|
3
8
|
import { Protocols } from "@waku/interfaces";
|
4
9
|
import { Logger } from "@waku/utils";
|
5
10
|
import { pEvent } from "p-event";
|
6
11
|
const log = new Logger("wait-for-remote-peer");
|
7
12
|
|
13
|
+
//TODO: move this function within the Waku class: https://github.com/waku-org/js-waku/issues/1761
|
8
14
|
/**
|
9
15
|
* Wait for a remote peer to be ready given the passed protocols.
|
10
16
|
* Must be used after attempting to connect to nodes, using
|
@@ -45,7 +51,7 @@ export async function waitForRemotePeer(
|
|
45
51
|
if (!waku.store)
|
46
52
|
throw new Error("Cannot wait for Store peer: protocol not mounted");
|
47
53
|
promises.push(
|
48
|
-
waitForConnectedPeer(waku.store, waku.libp2p.services.metadata)
|
54
|
+
waitForConnectedPeer(waku.store.protocol, waku.libp2p.services.metadata)
|
49
55
|
);
|
50
56
|
}
|
51
57
|
|
@@ -53,7 +59,10 @@ export async function waitForRemotePeer(
|
|
53
59
|
if (!waku.lightPush)
|
54
60
|
throw new Error("Cannot wait for LightPush peer: protocol not mounted");
|
55
61
|
promises.push(
|
56
|
-
waitForConnectedPeer(
|
62
|
+
waitForConnectedPeer(
|
63
|
+
waku.lightPush.protocol,
|
64
|
+
waku.libp2p.services.metadata
|
65
|
+
)
|
57
66
|
);
|
58
67
|
}
|
59
68
|
|
@@ -76,12 +85,13 @@ export async function waitForRemotePeer(
|
|
76
85
|
}
|
77
86
|
}
|
78
87
|
|
88
|
+
//TODO: move this function within protocol SDK class: https://github.com/waku-org/js-waku/issues/1761
|
79
89
|
/**
|
80
90
|
* Wait for a peer with the given protocol to be connected.
|
81
91
|
* If sharding is enabled on the node, it will also wait for the peer to be confirmed by the metadata service.
|
82
92
|
*/
|
83
93
|
async function waitForConnectedPeer(
|
84
|
-
protocol:
|
94
|
+
protocol: IBaseProtocolCore,
|
85
95
|
metadataService?: IMetadata
|
86
96
|
): Promise<void> {
|
87
97
|
const codec = protocol.multicodec;
|