@waku/core 0.0.27 → 0.0.28-17a8640.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundle/{base_protocol-LhsIWF3-.js → base_protocol-D0Zdzb-v.js} +2 -5
- package/bundle/{browser-BQyFvtq6.js → browser-DoQRY-an.js} +18 -13
- package/bundle/{index-8YyfzF9R.js → index-BJwgMx4y.js} +35 -62
- package/bundle/index.js +258 -342
- package/bundle/lib/base_protocol.js +3 -3
- package/bundle/lib/message/version_0.js +3 -3
- package/bundle/lib/predefined_bootstrap_nodes.js +17 -17
- package/bundle/{version_0-FXfzO8Km.js → version_0-C6o0DvNW.js} +566 -321
- package/dist/.tsbuildinfo +1 -1
- package/dist/index.d.ts +3 -2
- package/dist/index.js +3 -2
- package/dist/index.js.map +1 -1
- package/dist/lib/base_protocol.d.ts +4 -5
- package/dist/lib/base_protocol.js +0 -3
- package/dist/lib/base_protocol.js.map +1 -1
- package/dist/lib/filter/index.js +4 -0
- package/dist/lib/filter/index.js.map +1 -1
- package/dist/lib/light_push/index.d.ts +12 -2
- package/dist/lib/light_push/index.js +79 -76
- package/dist/lib/light_push/index.js.map +1 -1
- package/dist/lib/message/version_0.js +1 -1
- package/dist/lib/message/version_0.js.map +1 -1
- package/dist/lib/metadata/index.d.ts +1 -1
- package/dist/lib/metadata/index.js +42 -14
- package/dist/lib/metadata/index.js.map +1 -1
- package/dist/lib/predefined_bootstrap_nodes.d.ts +11 -11
- package/dist/lib/predefined_bootstrap_nodes.js +16 -16
- package/dist/lib/predefined_bootstrap_nodes.js.map +1 -1
- package/dist/lib/store/history_rpc.js +1 -1
- package/dist/lib/store/history_rpc.js.map +1 -1
- package/dist/lib/store/index.d.ts +14 -6
- package/dist/lib/store/index.js +50 -232
- package/dist/lib/store/index.js.map +1 -1
- package/dist/lib/wait_for_remote_peer.js +4 -2
- package/dist/lib/wait_for_remote_peer.js.map +1 -1
- package/package.json +1 -129
- package/src/index.ts +3 -2
- package/src/lib/base_protocol.ts +4 -9
- package/src/lib/filter/index.ts +7 -0
- package/src/lib/light_push/index.ts +97 -118
- package/src/lib/metadata/index.ts +56 -26
- package/src/lib/predefined_bootstrap_nodes.ts +22 -22
- package/src/lib/store/index.ts +77 -339
- package/src/lib/wait_for_remote_peer.ts +14 -4
package/bundle/index.js
CHANGED
@@ -1,13 +1,9 @@
|
|
1
|
-
import { v as version_0, e as encodingLength, a as encode$1, d as decode$1, M as MessagePush, F as FilterSubscribeRequest, b as FilterSubscribeResponse$1, P as PushRpc$1, c as PushResponse, H as HistoryRpc$1, f as PagingInfo, g as HistoryResponse, h as createEncoder, W as WakuMetadataResponse, i as WakuMetadataRequest } from './version_0-
|
2
|
-
export { j as createDecoder } from './version_0-
|
3
|
-
import { g as getDefaultExportFromCjs,
|
4
|
-
import { b as bytesToUtf8, u as utf8ToBytes, c as concat$1, s as sha256, a as
|
5
|
-
import { B as BaseProtocol, d as decodeRelayShard, e as encodeRelayShard } from './base_protocol-
|
6
|
-
export { S as StreamManager } from './base_protocol-
|
7
|
-
|
8
|
-
function isDefined(value) {
|
9
|
-
return Boolean(value);
|
10
|
-
}
|
1
|
+
import { v as version_0, e as encodingLength, a as encode$1, d as decode$1, M as MessagePush, F as FilterSubscribeRequest, b as FilterSubscribeResponse$1, P as PushRpc$1, c as PushResponse, H as HistoryRpc$1, f as PagingInfo, g as HistoryResponse, h as createEncoder, W as WakuMetadataResponse, i as WakuMetadataRequest } from './version_0-C6o0DvNW.js';
|
2
|
+
export { j as createDecoder } from './version_0-C6o0DvNW.js';
|
3
|
+
import { g as getDefaultExportFromCjs, P as ProtocolError, a as Protocols, E as EConnectionStateEvents, T as Tags, b as EPeersByDiscoveryEvents } from './browser-DoQRY-an.js';
|
4
|
+
import { b as bytesToUtf8, u as utf8ToBytes, c as concat$1, s as sha256, a as allocUnsafe, d as alloc, L as Logger, e as singleShardInfoToPubsubTopic, f as ensurePubsubTopicIsConfigured, D as DefaultPubsubTopic, p as pubsubTopicToSingleShardInfo, g as shardInfoToPubsubTopics } from './index-BJwgMx4y.js';
|
5
|
+
import { B as BaseProtocol, d as decodeRelayShard, e as encodeRelayShard } from './base_protocol-D0Zdzb-v.js';
|
6
|
+
export { S as StreamManager } from './base_protocol-D0Zdzb-v.js';
|
11
7
|
|
12
8
|
function groupByContentTopic(values) {
|
13
9
|
const groupedDecoders = new Map();
|
@@ -174,13 +170,18 @@ function all(source) {
|
|
174
170
|
return arr;
|
175
171
|
}
|
176
172
|
|
173
|
+
/**
|
174
|
+
* To guarantee Uint8Array semantics, convert nodejs Buffers
|
175
|
+
* into vanilla Uint8Arrays
|
176
|
+
*/
|
177
|
+
function asUint8Array(buf) {
|
178
|
+
return buf;
|
179
|
+
}
|
180
|
+
|
177
181
|
/**
|
178
182
|
* Returns a new Uint8Array created by concatenating the passed Uint8Arrays
|
179
183
|
*/
|
180
184
|
function concat(arrays, length) {
|
181
|
-
if (globalThis.Buffer != null) {
|
182
|
-
return asUint8Array(globalThis.Buffer.concat(arrays, length));
|
183
|
-
}
|
184
185
|
if (length == null) {
|
185
186
|
length = arrays.reduce((acc, curr) => acc + curr.length, 0);
|
186
187
|
}
|
@@ -1358,6 +1359,47 @@ function _pushable(getNext, options) {
|
|
1358
1359
|
return pushable;
|
1359
1360
|
}
|
1360
1361
|
|
1362
|
+
/**
|
1363
|
+
* @packageDocumentation
|
1364
|
+
*
|
1365
|
+
* Merge several (async)iterables into one, yield values as they arrive.
|
1366
|
+
*
|
1367
|
+
* Nb. sources are iterated over in parallel so the order of emitted items is not guaranteed.
|
1368
|
+
*
|
1369
|
+
* @example
|
1370
|
+
*
|
1371
|
+
* ```javascript
|
1372
|
+
* import merge from 'it-merge'
|
1373
|
+
* import all from 'it-all'
|
1374
|
+
*
|
1375
|
+
* // This can also be an iterator, generator, etc
|
1376
|
+
* const values1 = [0, 1, 2, 3, 4]
|
1377
|
+
* const values2 = [5, 6, 7, 8, 9]
|
1378
|
+
*
|
1379
|
+
* const arr = all(merge(values1, values2))
|
1380
|
+
*
|
1381
|
+
* console.info(arr) // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
|
1382
|
+
* ```
|
1383
|
+
*
|
1384
|
+
* Async sources must be awaited:
|
1385
|
+
*
|
1386
|
+
* ```javascript
|
1387
|
+
* import merge from 'it-merge'
|
1388
|
+
* import all from 'it-all'
|
1389
|
+
*
|
1390
|
+
* // This can also be an iterator, async iterator, generator, etc
|
1391
|
+
* const values1 = async function * () {
|
1392
|
+
* yield * [0, 1, 2, 3, 4]
|
1393
|
+
* }
|
1394
|
+
* const values2 = async function * () {
|
1395
|
+
* yield * [5, 6, 7, 8, 9]
|
1396
|
+
* }
|
1397
|
+
*
|
1398
|
+
* const arr = await all(merge(values1(), values2()))
|
1399
|
+
*
|
1400
|
+
* console.info(arr) // 0, 1, 5, 6, 2, 3, 4, 7, 8, 9 <- nb. order is not guaranteed
|
1401
|
+
* ```
|
1402
|
+
*/
|
1361
1403
|
function isAsyncIterable$1(thing) {
|
1362
1404
|
return thing[Symbol.asyncIterator] != null;
|
1363
1405
|
}
|
@@ -1827,6 +1869,7 @@ class Subscription {
|
|
1827
1869
|
}
|
1828
1870
|
}
|
1829
1871
|
}
|
1872
|
+
const DEFAULT_NUM_PEERS = 3;
|
1830
1873
|
class Filter extends BaseProtocol {
|
1831
1874
|
activeSubscriptions = new Map();
|
1832
1875
|
getActiveSubscription(pubsubTopic) {
|
@@ -1836,8 +1879,11 @@ class Filter extends BaseProtocol {
|
|
1836
1879
|
this.activeSubscriptions.set(pubsubTopic, subscription);
|
1837
1880
|
return subscription;
|
1838
1881
|
}
|
1882
|
+
//TODO: Remove when FilterCore and FilterSDK are introduced
|
1883
|
+
numPeersToUse;
|
1839
1884
|
constructor(libp2p, options) {
|
1840
1885
|
super(FilterCodecs.SUBSCRIBE, libp2p.components, log$6, options.pubsubTopics, options);
|
1886
|
+
this.numPeersToUse = options?.numPeersToUse ?? DEFAULT_NUM_PEERS;
|
1841
1887
|
libp2p.handle(FilterCodecs.PUSH, this.onRequest.bind(this)).catch((e) => {
|
1842
1888
|
log$6.error("Failed to register ", FilterCodecs.PUSH, e);
|
1843
1889
|
});
|
@@ -1990,122 +2036,125 @@ const LightPushCodec = "/vac/waku/lightpush/2.0.0-beta1";
|
|
1990
2036
|
/**
|
1991
2037
|
* Implements the [Waku v2 Light Push protocol](https://rfc.vac.dev/spec/19/).
|
1992
2038
|
*/
|
1993
|
-
class
|
2039
|
+
class LightPushCore extends BaseProtocol {
|
1994
2040
|
constructor(libp2p, options) {
|
1995
2041
|
super(LightPushCodec, libp2p.components, log$5, options.pubsubTopics, options);
|
1996
2042
|
}
|
1997
|
-
async preparePushMessage(encoder, message
|
2043
|
+
async preparePushMessage(encoder, message) {
|
1998
2044
|
try {
|
1999
2045
|
if (!message.payload || message.payload.length === 0) {
|
2000
2046
|
log$5.error("Failed to send waku light push: payload is empty");
|
2001
|
-
return { query: null, error:
|
2047
|
+
return { query: null, error: ProtocolError.EMPTY_PAYLOAD };
|
2002
2048
|
}
|
2003
2049
|
if (!(await isMessageSizeUnderCap(encoder, message))) {
|
2004
2050
|
log$5.error("Failed to send waku light push: message is bigger than 1MB");
|
2005
|
-
return { query: null, error:
|
2051
|
+
return { query: null, error: ProtocolError.SIZE_TOO_BIG };
|
2006
2052
|
}
|
2007
2053
|
const protoMessage = await encoder.toProtoObj(message);
|
2008
2054
|
if (!protoMessage) {
|
2009
2055
|
log$5.error("Failed to encode to protoMessage, aborting push");
|
2010
2056
|
return {
|
2011
2057
|
query: null,
|
2012
|
-
error:
|
2058
|
+
error: ProtocolError.ENCODE_FAILED
|
2013
2059
|
};
|
2014
2060
|
}
|
2015
|
-
const query = PushRpc.createRequest(protoMessage, pubsubTopic);
|
2061
|
+
const query = PushRpc.createRequest(protoMessage, encoder.pubsubTopic);
|
2016
2062
|
return { query, error: null };
|
2017
2063
|
}
|
2018
2064
|
catch (error) {
|
2019
2065
|
log$5.error("Failed to prepare push message", error);
|
2020
2066
|
return {
|
2021
2067
|
query: null,
|
2022
|
-
error:
|
2068
|
+
error: ProtocolError.GENERIC_FAIL
|
2023
2069
|
};
|
2024
2070
|
}
|
2025
2071
|
}
|
2026
|
-
async send(encoder, message) {
|
2027
|
-
const {
|
2028
|
-
ensurePubsubTopicIsConfigured(pubsubTopic, this.pubsubTopics);
|
2029
|
-
const recipients = [];
|
2030
|
-
const { query, error: preparationError } = await this.preparePushMessage(encoder, message, pubsubTopic);
|
2072
|
+
async send(encoder, message, peer) {
|
2073
|
+
const { query, error: preparationError } = await this.preparePushMessage(encoder, message);
|
2031
2074
|
if (preparationError || !query) {
|
2032
2075
|
return {
|
2033
|
-
|
2034
|
-
|
2076
|
+
success: null,
|
2077
|
+
failure: {
|
2078
|
+
error: preparationError,
|
2079
|
+
peerId: peer.id
|
2080
|
+
}
|
2035
2081
|
};
|
2036
2082
|
}
|
2037
|
-
|
2038
|
-
|
2039
|
-
|
2040
|
-
}
|
2041
|
-
|
2083
|
+
let stream;
|
2084
|
+
try {
|
2085
|
+
stream = await this.getStream(peer);
|
2086
|
+
}
|
2087
|
+
catch (err) {
|
2088
|
+
log$5.error(`Failed to get a stream for remote peer${peer.id.toString()}`, err);
|
2042
2089
|
return {
|
2043
|
-
|
2044
|
-
|
2090
|
+
success: null,
|
2091
|
+
failure: {
|
2092
|
+
error: ProtocolError.REMOTE_PEER_FAULT,
|
2093
|
+
peerId: peer.id
|
2094
|
+
}
|
2045
2095
|
};
|
2046
2096
|
}
|
2047
|
-
|
2048
|
-
|
2049
|
-
|
2050
|
-
|
2051
|
-
|
2052
|
-
|
2053
|
-
|
2054
|
-
|
2055
|
-
|
2056
|
-
|
2057
|
-
|
2058
|
-
|
2059
|
-
}
|
2060
|
-
|
2061
|
-
|
2062
|
-
|
2063
|
-
|
2064
|
-
const bytes = new Uint8ArrayList();
|
2065
|
-
res.forEach((chunk) => {
|
2066
|
-
bytes.append(chunk);
|
2067
|
-
});
|
2068
|
-
let response;
|
2069
|
-
try {
|
2070
|
-
response = PushRpc.decode(bytes).response;
|
2071
|
-
}
|
2072
|
-
catch (err) {
|
2073
|
-
log$5.error("Failed to decode push reply", err);
|
2074
|
-
return { recipients, error: SendError.DECODE_FAILED };
|
2075
|
-
}
|
2076
|
-
if (!response) {
|
2077
|
-
log$5.error("Remote peer fault: No response in PushRPC");
|
2078
|
-
return { recipients, error: SendError.REMOTE_PEER_FAULT };
|
2079
|
-
}
|
2080
|
-
if (!response.isSuccess) {
|
2081
|
-
log$5.error("Remote peer rejected the message: ", response.info);
|
2082
|
-
return { recipients, error: SendError.REMOTE_PEER_REJECTED };
|
2083
|
-
}
|
2084
|
-
recipients.some((recipient) => recipient.equals(peer.id)) ||
|
2085
|
-
recipients.push(peer.id);
|
2086
|
-
return { recipients };
|
2097
|
+
let res;
|
2098
|
+
try {
|
2099
|
+
res = await pipe([query.encode()], encode, stream, decode, async (source) => await all(source));
|
2100
|
+
}
|
2101
|
+
catch (err) {
|
2102
|
+
log$5.error("Failed to send waku light push request", err);
|
2103
|
+
return {
|
2104
|
+
success: null,
|
2105
|
+
failure: {
|
2106
|
+
error: ProtocolError.GENERIC_FAIL,
|
2107
|
+
peerId: peer.id
|
2108
|
+
}
|
2109
|
+
};
|
2110
|
+
}
|
2111
|
+
const bytes = new Uint8ArrayList();
|
2112
|
+
res.forEach((chunk) => {
|
2113
|
+
bytes.append(chunk);
|
2087
2114
|
});
|
2088
|
-
|
2089
|
-
|
2090
|
-
|
2091
|
-
|
2092
|
-
|
2093
|
-
.
|
2094
|
-
|
2095
|
-
|
2096
|
-
|
2097
|
-
|
2115
|
+
let response;
|
2116
|
+
try {
|
2117
|
+
response = PushRpc.decode(bytes).response;
|
2118
|
+
}
|
2119
|
+
catch (err) {
|
2120
|
+
log$5.error("Failed to decode push reply", err);
|
2121
|
+
return {
|
2122
|
+
success: null,
|
2123
|
+
failure: {
|
2124
|
+
error: ProtocolError.DECODE_FAILED,
|
2125
|
+
peerId: peer.id
|
2126
|
+
}
|
2127
|
+
};
|
2128
|
+
}
|
2129
|
+
if (!response) {
|
2130
|
+
log$5.error("Remote peer fault: No response in PushRPC");
|
2131
|
+
return {
|
2132
|
+
success: null,
|
2133
|
+
failure: {
|
2134
|
+
error: ProtocolError.REMOTE_PEER_FAULT,
|
2135
|
+
peerId: peer.id
|
2136
|
+
}
|
2137
|
+
};
|
2138
|
+
}
|
2139
|
+
if (!response.isSuccess) {
|
2140
|
+
log$5.error("Remote peer rejected the message: ", response.info);
|
2141
|
+
return {
|
2142
|
+
success: null,
|
2143
|
+
failure: {
|
2144
|
+
error: ProtocolError.REMOTE_PEER_REJECTED,
|
2145
|
+
peerId: peer.id
|
2146
|
+
}
|
2147
|
+
};
|
2148
|
+
}
|
2149
|
+
return { success: peer.id, failure: null };
|
2098
2150
|
}
|
2099
2151
|
}
|
2100
|
-
function wakuLightPush(init = {}) {
|
2101
|
-
return (libp2p) => new LightPush(libp2p, init);
|
2102
|
-
}
|
2103
2152
|
|
2104
2153
|
var index$1 = /*#__PURE__*/Object.freeze({
|
2105
2154
|
__proto__: null,
|
2106
2155
|
LightPushCodec: LightPushCodec,
|
2107
|
-
|
2108
|
-
|
2156
|
+
LightPushCore: LightPushCore,
|
2157
|
+
get PushResponse () { return PushResponse; }
|
2109
2158
|
});
|
2110
2159
|
|
2111
2160
|
const EmptyMessage = {
|
@@ -2121,7 +2170,7 @@ function toProtoMessage(wire) {
|
|
2121
2170
|
return { ...EmptyMessage, ...wire };
|
2122
2171
|
}
|
2123
2172
|
|
2124
|
-
const OneMillion = BigInt(
|
2173
|
+
const OneMillion = BigInt(1_000_000);
|
2125
2174
|
var PageDirection;
|
2126
2175
|
(function (PageDirection) {
|
2127
2176
|
PageDirection["BACKWARD"] = "backward";
|
@@ -2194,261 +2243,80 @@ function directionToProto(pageDirection) {
|
|
2194
2243
|
var HistoryError = HistoryResponse.HistoryError;
|
2195
2244
|
const log$4 = new Logger("store");
|
2196
2245
|
const StoreCodec = "/vac/waku/store/2.0.0-beta4";
|
2197
|
-
const DefaultPageSize = 10;
|
2198
2246
|
/**
|
2199
2247
|
* Implements the [Waku v2 Store protocol](https://rfc.vac.dev/spec/13/).
|
2200
2248
|
*
|
2201
2249
|
* The Waku Store protocol can be used to retrieved historical messages.
|
2202
2250
|
*/
|
2203
|
-
class
|
2204
|
-
NUM_PEERS_PROTOCOL = 1;
|
2251
|
+
class StoreCore extends BaseProtocol {
|
2205
2252
|
constructor(libp2p, options) {
|
2206
2253
|
super(StoreCodec, libp2p.components, log$4, options.pubsubTopics, options);
|
2207
2254
|
}
|
2208
|
-
|
2209
|
-
|
2210
|
-
|
2211
|
-
|
2212
|
-
async processMessages(messages, callback, options) {
|
2213
|
-
let abort = false;
|
2214
|
-
const messagesOrUndef = await Promise.all(messages);
|
2215
|
-
let processedMessages = messagesOrUndef.filter(isDefined);
|
2216
|
-
if (this.shouldReverseOrder(options)) {
|
2217
|
-
processedMessages = processedMessages.reverse();
|
2255
|
+
async *queryPerPage(queryOpts, decoders, peer) {
|
2256
|
+
if (queryOpts.contentTopics.toString() !==
|
2257
|
+
Array.from(decoders.keys()).toString()) {
|
2258
|
+
throw new Error("Internal error, the decoders should match the query's content topics");
|
2218
2259
|
}
|
2219
|
-
|
2220
|
-
|
2221
|
-
|
2260
|
+
let currentCursor = queryOpts.cursor;
|
2261
|
+
while (true) {
|
2262
|
+
queryOpts.cursor = currentCursor;
|
2263
|
+
const historyRpcQuery = HistoryRpc.createQuery(queryOpts);
|
2264
|
+
const stream = await this.getStream(peer);
|
2265
|
+
const res = await pipe([historyRpcQuery.encode()], encode, stream, decode, async (source) => await all(source));
|
2266
|
+
const bytes = new Uint8ArrayList();
|
2267
|
+
res.forEach((chunk) => {
|
2268
|
+
bytes.append(chunk);
|
2269
|
+
});
|
2270
|
+
const reply = historyRpcQuery.decode(bytes);
|
2271
|
+
if (!reply.response) {
|
2272
|
+
log$4.warn("Stopping pagination due to store `response` field missing");
|
2273
|
+
break;
|
2222
2274
|
}
|
2223
|
-
|
2224
|
-
|
2225
|
-
|
2226
|
-
|
2227
|
-
|
2228
|
-
|
2229
|
-
* Messages in pages are ordered from oldest (first) to most recent (last).
|
2230
|
-
* https://github.com/vacp2p/rfc/issues/533
|
2231
|
-
*
|
2232
|
-
* @private
|
2233
|
-
*/
|
2234
|
-
shouldReverseOrder(options) {
|
2235
|
-
return (typeof options?.pageDirection === "undefined" ||
|
2236
|
-
options?.pageDirection === PageDirection.BACKWARD);
|
2237
|
-
}
|
2238
|
-
/**
|
2239
|
-
* @deprecated Use `queryWithOrderedCallback` instead
|
2240
|
-
**/
|
2241
|
-
queryOrderedCallback = this.queryWithOrderedCallback;
|
2242
|
-
/**
|
2243
|
-
* Do a query to a Waku Store to retrieve historical/missed messages.
|
2244
|
-
*
|
2245
|
-
* The callback function takes a `WakuMessage` in input,
|
2246
|
-
* messages are processed in order:
|
2247
|
-
* - oldest to latest if `options.pageDirection` == { @link PageDirection.FORWARD }
|
2248
|
-
* - latest to oldest if `options.pageDirection` == { @link PageDirection.BACKWARD }
|
2249
|
-
*
|
2250
|
-
* The ordering may affect performance.
|
2251
|
-
* The ordering depends on the behavior of the remote store node.
|
2252
|
-
* If strong ordering is needed, you may need to handle this at application level
|
2253
|
-
* and set your own timestamps too (the WakuMessage timestamps are not certified).
|
2254
|
-
*
|
2255
|
-
* @throws If not able to reach a Waku Store peer to query,
|
2256
|
-
* or if an error is encountered when processing the reply,
|
2257
|
-
* or if two decoders with the same content topic are passed.
|
2258
|
-
*/
|
2259
|
-
async queryWithOrderedCallback(decoders, callback, options) {
|
2260
|
-
for await (const promises of this.queryGenerator(decoders, options)) {
|
2261
|
-
if (await this.processMessages(promises, callback, options))
|
2275
|
+
const response = reply.response;
|
2276
|
+
if (response.error && response.error !== HistoryError.NONE) {
|
2277
|
+
throw "History response contains an Error: " + response.error;
|
2278
|
+
}
|
2279
|
+
if (!response.messages || !response.messages.length) {
|
2280
|
+
log$4.warn("Stopping pagination due to store `response.messages` field missing or empty");
|
2262
2281
|
break;
|
2263
|
-
|
2264
|
-
|
2265
|
-
|
2266
|
-
|
2267
|
-
|
2268
|
-
|
2269
|
-
|
2270
|
-
|
2271
|
-
|
2272
|
-
|
2273
|
-
|
2274
|
-
* Do note that the resolution of the `Promise<WakuMessage | undefined` may
|
2275
|
-
* break the order as it may rely on the browser decryption API, which in turn,
|
2276
|
-
* may have a different speed depending on the type of decryption.
|
2277
|
-
*
|
2278
|
-
* @throws If not able to reach a Waku Store peer to query,
|
2279
|
-
* or if an error is encountered when processing the reply,
|
2280
|
-
* or if two decoders with the same content topic are passed.
|
2281
|
-
*/
|
2282
|
-
async queryWithPromiseCallback(decoders, callback, options) {
|
2283
|
-
let abort = false;
|
2284
|
-
for await (const page of this.queryGenerator(decoders, options)) {
|
2285
|
-
const _promises = page.map(async (msgPromise) => {
|
2286
|
-
if (abort)
|
2287
|
-
return;
|
2288
|
-
abort = Boolean(await callback(msgPromise));
|
2282
|
+
}
|
2283
|
+
log$4.error(`${response.messages.length} messages retrieved from store`);
|
2284
|
+
yield response.messages.map((protoMsg) => {
|
2285
|
+
const contentTopic = protoMsg.contentTopic;
|
2286
|
+
if (typeof contentTopic !== "undefined") {
|
2287
|
+
const decoder = decoders.get(contentTopic);
|
2288
|
+
if (decoder) {
|
2289
|
+
return decoder.fromProtoObj(queryOpts.pubsubTopic, toProtoMessage(protoMsg));
|
2290
|
+
}
|
2291
|
+
}
|
2292
|
+
return Promise.resolve(undefined);
|
2289
2293
|
});
|
2290
|
-
|
2291
|
-
if (
|
2294
|
+
const nextCursor = response.pagingInfo?.cursor;
|
2295
|
+
if (typeof nextCursor === "undefined") {
|
2296
|
+
// If the server does not return cursor then there is an issue,
|
2297
|
+
// Need to abort, or we end up in an infinite loop
|
2298
|
+
log$4.warn("Stopping pagination due to `response.pagingInfo.cursor` missing from store response");
|
2299
|
+
break;
|
2300
|
+
}
|
2301
|
+
currentCursor = nextCursor;
|
2302
|
+
const responsePageSize = response.pagingInfo?.pageSize;
|
2303
|
+
const queryPageSize = historyRpcQuery.query?.pagingInfo?.pageSize;
|
2304
|
+
if (
|
2305
|
+
// Response page size smaller than query, meaning this is the last page
|
2306
|
+
responsePageSize &&
|
2307
|
+
queryPageSize &&
|
2308
|
+
responsePageSize < queryPageSize) {
|
2292
2309
|
break;
|
2293
|
-
}
|
2294
|
-
}
|
2295
|
-
/**
|
2296
|
-
* Do a query to a Waku Store to retrieve historical/missed messages.
|
2297
|
-
*
|
2298
|
-
* This is a generator, useful if you want most control on how messages
|
2299
|
-
* are processed.
|
2300
|
-
*
|
2301
|
-
* The order of the messages returned by the remote Waku node SHOULD BE
|
2302
|
-
* as follows:
|
2303
|
-
* - within a page, messages SHOULD be ordered from oldest to most recent
|
2304
|
-
* - pages direction depends on { @link QueryOptions.pageDirection }
|
2305
|
-
* @throws If not able to reach a Waku Store peer to query,
|
2306
|
-
* or if an error is encountered when processing the reply,
|
2307
|
-
* or if two decoders with the same content topic are passed.
|
2308
|
-
*
|
2309
|
-
* This API only supports querying a single pubsub topic at a time.
|
2310
|
-
* If multiple decoders are provided, they must all have the same pubsub topic.
|
2311
|
-
* @throws If multiple decoders with different pubsub topics are provided.
|
2312
|
-
* @throws If no decoders are provided.
|
2313
|
-
* @throws If no decoders are found for the provided pubsub topic.
|
2314
|
-
*/
|
2315
|
-
async *queryGenerator(decoders, options) {
|
2316
|
-
if (decoders.length === 0) {
|
2317
|
-
throw new Error("No decoders provided");
|
2318
|
-
}
|
2319
|
-
let startTime, endTime;
|
2320
|
-
if (options?.timeFilter) {
|
2321
|
-
startTime = options.timeFilter.startTime;
|
2322
|
-
endTime = options.timeFilter.endTime;
|
2323
|
-
}
|
2324
|
-
// convert array to set to remove duplicates
|
2325
|
-
const uniquePubsubTopicsInQuery = Array.from(new Set(decoders.map((decoder) => decoder.pubsubTopic)));
|
2326
|
-
// If multiple pubsub topics are provided, throw an error
|
2327
|
-
if (uniquePubsubTopicsInQuery.length > 1) {
|
2328
|
-
throw new Error("API does not support querying multiple pubsub topics at once");
|
2329
|
-
}
|
2330
|
-
// we can be certain that there is only one pubsub topic in the query
|
2331
|
-
const pubsubTopicForQuery = uniquePubsubTopicsInQuery[0];
|
2332
|
-
ensurePubsubTopicIsConfigured(pubsubTopicForQuery, this.pubsubTopics);
|
2333
|
-
// check that the pubsubTopic from the Cursor and Decoder match
|
2334
|
-
if (options?.cursor?.pubsubTopic &&
|
2335
|
-
options.cursor.pubsubTopic !== pubsubTopicForQuery) {
|
2336
|
-
throw new Error(`Cursor pubsub topic (${options?.cursor?.pubsubTopic}) does not match decoder pubsub topic (${pubsubTopicForQuery})`);
|
2337
|
-
}
|
2338
|
-
const decodersAsMap = new Map();
|
2339
|
-
decoders.forEach((dec) => {
|
2340
|
-
if (decodersAsMap.has(dec.contentTopic)) {
|
2341
|
-
throw new Error("API does not support different decoder per content topic");
|
2342
|
-
}
|
2343
|
-
decodersAsMap.set(dec.contentTopic, dec);
|
2344
|
-
});
|
2345
|
-
const contentTopics = decoders
|
2346
|
-
.filter((decoder) => decoder.pubsubTopic === pubsubTopicForQuery)
|
2347
|
-
.map((dec) => dec.contentTopic);
|
2348
|
-
if (contentTopics.length === 0) {
|
2349
|
-
throw new Error("No decoders found for topic " + pubsubTopicForQuery);
|
2350
|
-
}
|
2351
|
-
const queryOpts = Object.assign({
|
2352
|
-
pubsubTopic: pubsubTopicForQuery,
|
2353
|
-
pageDirection: PageDirection.BACKWARD,
|
2354
|
-
pageSize: DefaultPageSize
|
2355
|
-
}, options, { contentTopics, startTime, endTime });
|
2356
|
-
const peer = (await this.getPeers({
|
2357
|
-
numPeers: this.NUM_PEERS_PROTOCOL,
|
2358
|
-
maxBootstrapPeers: 1
|
2359
|
-
}))[0];
|
2360
|
-
for await (const messages of paginate(this.getStream.bind(this, peer), queryOpts, decodersAsMap, options?.cursor)) {
|
2361
|
-
yield messages;
|
2362
|
-
}
|
2363
|
-
}
|
2364
|
-
}
|
2365
|
-
async function* paginate(streamFactory, queryOpts, decoders, cursor) {
|
2366
|
-
if (queryOpts.contentTopics.toString() !==
|
2367
|
-
Array.from(decoders.keys()).toString()) {
|
2368
|
-
throw new Error("Internal error, the decoders should match the query's content topics");
|
2369
|
-
}
|
2370
|
-
let currentCursor = cursor;
|
2371
|
-
while (true) {
|
2372
|
-
queryOpts.cursor = currentCursor;
|
2373
|
-
const historyRpcQuery = HistoryRpc.createQuery(queryOpts);
|
2374
|
-
log$4.info("Querying store peer", `for (${queryOpts.pubsubTopic})`, queryOpts.contentTopics);
|
2375
|
-
const stream = await streamFactory();
|
2376
|
-
const res = await pipe([historyRpcQuery.encode()], encode, stream, decode, async (source) => await all(source));
|
2377
|
-
const bytes = new Uint8ArrayList();
|
2378
|
-
res.forEach((chunk) => {
|
2379
|
-
bytes.append(chunk);
|
2380
|
-
});
|
2381
|
-
const reply = historyRpcQuery.decode(bytes);
|
2382
|
-
if (!reply.response) {
|
2383
|
-
log$4.warn("Stopping pagination due to store `response` field missing");
|
2384
|
-
break;
|
2385
|
-
}
|
2386
|
-
const response = reply.response;
|
2387
|
-
if (response.error && response.error !== HistoryError.NONE) {
|
2388
|
-
throw "History response contains an Error: " + response.error;
|
2389
|
-
}
|
2390
|
-
if (!response.messages || !response.messages.length) {
|
2391
|
-
log$4.warn("Stopping pagination due to store `response.messages` field missing or empty");
|
2392
|
-
break;
|
2393
|
-
}
|
2394
|
-
log$4.error(`${response.messages.length} messages retrieved from store`);
|
2395
|
-
yield response.messages.map((protoMsg) => {
|
2396
|
-
const contentTopic = protoMsg.contentTopic;
|
2397
|
-
if (typeof contentTopic !== "undefined") {
|
2398
|
-
const decoder = decoders.get(contentTopic);
|
2399
|
-
if (decoder) {
|
2400
|
-
return decoder.fromProtoObj(queryOpts.pubsubTopic, toProtoMessage(protoMsg));
|
2401
|
-
}
|
2402
2310
|
}
|
2403
|
-
return Promise.resolve(undefined);
|
2404
|
-
});
|
2405
|
-
const nextCursor = response.pagingInfo?.cursor;
|
2406
|
-
if (typeof nextCursor === "undefined") {
|
2407
|
-
// If the server does not return cursor then there is an issue,
|
2408
|
-
// Need to abort, or we end up in an infinite loop
|
2409
|
-
log$4.warn("Stopping pagination due to `response.pagingInfo.cursor` missing from store response");
|
2410
|
-
break;
|
2411
|
-
}
|
2412
|
-
currentCursor = nextCursor;
|
2413
|
-
const responsePageSize = response.pagingInfo?.pageSize;
|
2414
|
-
const queryPageSize = historyRpcQuery.query?.pagingInfo?.pageSize;
|
2415
|
-
if (
|
2416
|
-
// Response page size smaller than query, meaning this is the last page
|
2417
|
-
responsePageSize &&
|
2418
|
-
queryPageSize &&
|
2419
|
-
responsePageSize < queryPageSize) {
|
2420
|
-
break;
|
2421
2311
|
}
|
2422
2312
|
}
|
2423
2313
|
}
|
2424
|
-
async function createCursor(message) {
|
2425
|
-
if (!message ||
|
2426
|
-
!message.timestamp ||
|
2427
|
-
!message.payload ||
|
2428
|
-
!message.contentTopic) {
|
2429
|
-
throw new Error("Message is missing required fields");
|
2430
|
-
}
|
2431
|
-
const contentTopicBytes = utf8ToBytes(message.contentTopic);
|
2432
|
-
const digest = sha256(concat$1([contentTopicBytes, message.payload]));
|
2433
|
-
const messageTime = BigInt(message.timestamp.getTime()) * BigInt(1000000);
|
2434
|
-
return {
|
2435
|
-
digest,
|
2436
|
-
pubsubTopic: message.pubsubTopic,
|
2437
|
-
senderTime: messageTime,
|
2438
|
-
receiverTime: messageTime
|
2439
|
-
};
|
2440
|
-
}
|
2441
|
-
function wakuStore(init = {}) {
|
2442
|
-
return (libp2p) => new Store(libp2p, init);
|
2443
|
-
}
|
2444
2314
|
|
2445
2315
|
var index = /*#__PURE__*/Object.freeze({
|
2446
2316
|
__proto__: null,
|
2447
|
-
DefaultPageSize: DefaultPageSize,
|
2448
2317
|
get PageDirection () { return PageDirection; },
|
2449
2318
|
StoreCodec: StoreCodec,
|
2450
|
-
|
2451
|
-
wakuStore: wakuStore
|
2319
|
+
StoreCore: StoreCore
|
2452
2320
|
});
|
2453
2321
|
|
2454
2322
|
class TimeoutError extends Error {
|
@@ -2569,8 +2437,8 @@ function pTimeout(promise, options) {
|
|
2569
2437
|
}
|
2570
2438
|
|
2571
2439
|
const normalizeEmitter = emitter => {
|
2572
|
-
const addListener = emitter.
|
2573
|
-
const removeListener = emitter.
|
2440
|
+
const addListener = emitter.addEventListener || emitter.on || emitter.addListener;
|
2441
|
+
const removeListener = emitter.removeEventListener || emitter.off || emitter.removeListener;
|
2574
2442
|
|
2575
2443
|
if (!addListener || !removeListener) {
|
2576
2444
|
throw new TypeError('Emitter is not compatible');
|
@@ -2684,6 +2552,7 @@ function pEvent(emitter, event, options) {
|
|
2684
2552
|
}
|
2685
2553
|
|
2686
2554
|
const log$3 = new Logger("wait-for-remote-peer");
|
2555
|
+
//TODO: move this function within the Waku class: https://github.com/waku-org/js-waku/issues/1761
|
2687
2556
|
/**
|
2688
2557
|
* Wait for a remote peer to be ready given the passed protocols.
|
2689
2558
|
* Must be used after attempting to connect to nodes, using
|
@@ -2716,12 +2585,12 @@ async function waitForRemotePeer(waku, protocols, timeoutMs) {
|
|
2716
2585
|
if (protocols.includes(Protocols.Store)) {
|
2717
2586
|
if (!waku.store)
|
2718
2587
|
throw new Error("Cannot wait for Store peer: protocol not mounted");
|
2719
|
-
promises.push(waitForConnectedPeer(waku.store, waku.libp2p.services.metadata));
|
2588
|
+
promises.push(waitForConnectedPeer(waku.store.protocol, waku.libp2p.services.metadata));
|
2720
2589
|
}
|
2721
2590
|
if (protocols.includes(Protocols.LightPush)) {
|
2722
2591
|
if (!waku.lightPush)
|
2723
2592
|
throw new Error("Cannot wait for LightPush peer: protocol not mounted");
|
2724
|
-
promises.push(waitForConnectedPeer(waku.lightPush, waku.libp2p.services.metadata));
|
2593
|
+
promises.push(waitForConnectedPeer(waku.lightPush.protocol, waku.libp2p.services.metadata));
|
2725
2594
|
}
|
2726
2595
|
if (protocols.includes(Protocols.Filter)) {
|
2727
2596
|
if (!waku.filter)
|
@@ -2735,6 +2604,7 @@ async function waitForRemotePeer(waku, protocols, timeoutMs) {
|
|
2735
2604
|
await Promise.all(promises);
|
2736
2605
|
}
|
2737
2606
|
}
|
2607
|
+
//TODO: move this function within protocol SDK class: https://github.com/waku-org/js-waku/issues/1761
|
2738
2608
|
/**
|
2739
2609
|
* Wait for a peer with the given protocol to be connected.
|
2740
2610
|
* If sharding is enabled on the node, it will also wait for the peer to be confirmed by the metadata service.
|
@@ -2820,12 +2690,31 @@ function getEnabledProtocols(waku) {
|
|
2820
2690
|
return protocols;
|
2821
2691
|
}
|
2822
2692
|
|
2693
|
+
/** Noop for browser compatibility */
|
2694
|
+
function setMaxListeners$1() { }
|
2695
|
+
|
2696
|
+
// create a setMaxListeners that doesn't break browser usage
|
2697
|
+
const setMaxListeners = (n, ...eventTargets) => {
|
2698
|
+
try {
|
2699
|
+
setMaxListeners$1(n, ...eventTargets);
|
2700
|
+
}
|
2701
|
+
catch {
|
2702
|
+
// swallow error, gulp
|
2703
|
+
}
|
2704
|
+
};
|
2705
|
+
|
2823
2706
|
/**
|
2824
2707
|
* An implementation of a typed event target
|
2825
2708
|
* etc
|
2826
2709
|
*/
|
2827
2710
|
class TypedEventEmitter extends EventTarget {
|
2828
2711
|
#listeners = new Map();
|
2712
|
+
constructor() {
|
2713
|
+
super();
|
2714
|
+
// silence MaxListenersExceededWarning warning on Node.js, this is a red
|
2715
|
+
// herring almost all of the time
|
2716
|
+
setMaxListeners(Infinity, this);
|
2717
|
+
}
|
2829
2718
|
listenerCount(type) {
|
2830
2719
|
const listeners = this.#listeners.get(type);
|
2831
2720
|
if (listeners == null) {
|
@@ -3400,7 +3289,7 @@ const MetadataCodec = "/vac/waku/metadata/1.0.0";
|
|
3400
3289
|
class Metadata extends BaseProtocol {
|
3401
3290
|
shardInfo;
|
3402
3291
|
libp2pComponents;
|
3403
|
-
handshakesConfirmed = new
|
3292
|
+
handshakesConfirmed = new Map();
|
3404
3293
|
constructor(shardInfo, libp2p) {
|
3405
3294
|
super(MetadataCodec, libp2p.components, log, shardInfoToPubsubTopics(shardInfo));
|
3406
3295
|
this.shardInfo = shardInfo;
|
@@ -3417,8 +3306,11 @@ class Metadata extends BaseProtocol {
|
|
3417
3306
|
const { stream, connection } = streamData;
|
3418
3307
|
const encodedShardInfo = WakuMetadataResponse.encode(this.shardInfo);
|
3419
3308
|
const encodedResponse = await pipe([encodedShardInfo], encode, stream, decode, async (source) => await all(source));
|
3420
|
-
const
|
3421
|
-
|
3309
|
+
const { error, shardInfo } = this.decodeMetadataResponse(encodedResponse);
|
3310
|
+
if (error) {
|
3311
|
+
return;
|
3312
|
+
}
|
3313
|
+
await this.savePeerShardInfo(connection.remotePeer, shardInfo);
|
3422
3314
|
}
|
3423
3315
|
catch (error) {
|
3424
3316
|
log.error("Error handling metadata request", error);
|
@@ -3431,19 +3323,35 @@ class Metadata extends BaseProtocol {
|
|
3431
3323
|
const request = WakuMetadataRequest.encode(this.shardInfo);
|
3432
3324
|
const peer = await this.peerStore.get(peerId);
|
3433
3325
|
if (!peer) {
|
3434
|
-
|
3326
|
+
return {
|
3327
|
+
shardInfo: null,
|
3328
|
+
error: ProtocolError.NO_PEER_AVAILABLE
|
3329
|
+
};
|
3435
3330
|
}
|
3436
3331
|
const stream = await this.getStream(peer);
|
3437
3332
|
const encodedResponse = await pipe([request], encode, stream, decode, async (source) => await all(source));
|
3438
|
-
const
|
3439
|
-
|
3440
|
-
|
3333
|
+
const { error, shardInfo } = this.decodeMetadataResponse(encodedResponse);
|
3334
|
+
if (error) {
|
3335
|
+
return {
|
3336
|
+
shardInfo: null,
|
3337
|
+
error
|
3338
|
+
};
|
3339
|
+
}
|
3340
|
+
await this.savePeerShardInfo(peerId, shardInfo);
|
3341
|
+
return {
|
3342
|
+
shardInfo,
|
3343
|
+
error: null
|
3344
|
+
};
|
3441
3345
|
}
|
3442
3346
|
async confirmOrAttemptHandshake(peerId) {
|
3443
|
-
|
3444
|
-
|
3445
|
-
|
3446
|
-
|
3347
|
+
const shardInfo = this.handshakesConfirmed.get(peerId.toString());
|
3348
|
+
if (shardInfo) {
|
3349
|
+
return {
|
3350
|
+
shardInfo,
|
3351
|
+
error: null
|
3352
|
+
};
|
3353
|
+
}
|
3354
|
+
return await this.query(peerId);
|
3447
3355
|
}
|
3448
3356
|
decodeMetadataResponse(encodedResponse) {
|
3449
3357
|
const bytes = new Uint8ArrayList();
|
@@ -3451,9 +3359,17 @@ class Metadata extends BaseProtocol {
|
|
3451
3359
|
bytes.append(chunk);
|
3452
3360
|
});
|
3453
3361
|
const response = WakuMetadataResponse.decode(bytes);
|
3454
|
-
if (!response)
|
3362
|
+
if (!response) {
|
3455
3363
|
log.error("Error decoding metadata response");
|
3456
|
-
|
3364
|
+
return {
|
3365
|
+
shardInfo: null,
|
3366
|
+
error: ProtocolError.DECODE_FAILED
|
3367
|
+
};
|
3368
|
+
}
|
3369
|
+
return {
|
3370
|
+
shardInfo: response,
|
3371
|
+
error: null
|
3372
|
+
};
|
3457
3373
|
}
|
3458
3374
|
async savePeerShardInfo(peerId, shardInfo) {
|
3459
3375
|
// add or update the shardInfo to peer store
|
@@ -3462,11 +3378,11 @@ class Metadata extends BaseProtocol {
|
|
3462
3378
|
shardInfo: encodeRelayShard(shardInfo)
|
3463
3379
|
}
|
3464
3380
|
});
|
3465
|
-
this.handshakesConfirmed.
|
3381
|
+
this.handshakesConfirmed.set(peerId.toString(), shardInfo);
|
3466
3382
|
}
|
3467
3383
|
}
|
3468
3384
|
function wakuMetadata(shardInfo) {
|
3469
3385
|
return (components) => new Metadata(shardInfo, components);
|
3470
3386
|
}
|
3471
3387
|
|
3472
|
-
export { ConnectionManager, FilterCodecs, KeepAliveManager, LightPushCodec, MetadataCodec, PageDirection,
|
3388
|
+
export { ConnectionManager, FilterCodecs, KeepAliveManager, LightPushCodec, LightPushCore, MetadataCodec, PageDirection, StoreCore, createEncoder, index$3 as message, waitForRemotePeer, wakuFilter, wakuMetadata, index$2 as waku_filter, index$1 as waku_light_push, index as waku_store };
|