@waku/core 0.0.33-d3301ff.0 → 0.0.33-e84eb62.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundle/base_protocol-BDjsZTsQ.js +275 -0
- package/bundle/{index-tdQNdKHx.js → index-CeEH6b9b.js} +24 -450
- package/bundle/index.js +100 -417
- package/bundle/lib/base_protocol.js +2 -2
- package/bundle/lib/message/version_0.js +2 -2
- package/bundle/{version_0-BrbNEwD-.js → version_0-BYg0O3M-.js} +451 -2
- package/dist/.tsbuildinfo +1 -1
- package/dist/index.d.ts +1 -2
- package/dist/index.js +1 -2
- package/dist/index.js.map +1 -1
- package/dist/lib/base_protocol.d.ts +4 -6
- package/dist/lib/base_protocol.js +10 -14
- package/dist/lib/base_protocol.js.map +1 -1
- package/dist/lib/filter/index.js +2 -2
- package/dist/lib/filter/index.js.map +1 -1
- package/dist/lib/metadata/index.js +1 -1
- package/dist/lib/metadata/index.js.map +1 -1
- package/dist/lib/stream_manager/stream_manager.d.ts +12 -9
- package/dist/lib/stream_manager/stream_manager.js +87 -56
- package/dist/lib/stream_manager/stream_manager.js.map +1 -1
- package/dist/lib/stream_manager/utils.d.ts +1 -1
- package/dist/lib/stream_manager/utils.js +5 -17
- package/dist/lib/stream_manager/utils.js.map +1 -1
- package/package.json +1 -1
- package/src/index.ts +1 -3
- package/src/lib/base_protocol.ts +14 -28
- package/src/lib/filter/index.ts +11 -2
- package/src/lib/metadata/index.ts +1 -1
- package/src/lib/stream_manager/stream_manager.ts +124 -66
- package/src/lib/stream_manager/utils.ts +5 -17
- package/bundle/base_protocol-C47QkJ2o.js +0 -335
- package/dist/lib/wait_for_remote_peer.d.ts +0 -22
- package/dist/lib/wait_for_remote_peer.js +0 -142
- package/dist/lib/wait_for_remote_peer.js.map +0 -1
- package/src/lib/wait_for_remote_peer.ts +0 -200
package/bundle/index.js
CHANGED
@@ -1,8 +1,8 @@
|
|
1
|
-
import { v as version_0, e as encodingLength, a as encode$1, d as decode$1, M as MessagePush, F as FilterSubscribeRequest, b as FilterSubscribeResponse$1, P as PushRpc$1, c as PushResponse, S as StoreQueryRequest$1, f as StoreQueryResponse$1, g as createEncoder, W as WakuMetadataRequest, h as WakuMetadataResponse } from './version_0-
|
2
|
-
export {
|
3
|
-
import { a as allocUnsafe, b as alloc, L as Logger, P as ProtocolError,
|
4
|
-
import { B as BaseProtocol
|
5
|
-
export { S as StreamManager } from './base_protocol-
|
1
|
+
import { v as version_0, e as encodingLength, a as encode$1, d as decode$1, M as MessagePush, F as FilterSubscribeRequest, b as FilterSubscribeResponse$1, P as PushRpc$1, c as PushResponse, S as StoreQueryRequest$1, f as StoreQueryResponse$1, g as createEncoder, p as pubsubTopicToSingleShardInfo, s as shardInfoToPubsubTopics, W as WakuMetadataRequest, h as pubsubTopicsToShardInfo, i as WakuMetadataResponse } from './version_0-BYg0O3M-.js';
|
2
|
+
export { j as createDecoder } from './version_0-BYg0O3M-.js';
|
3
|
+
import { a as allocUnsafe, b as alloc, L as Logger, P as ProtocolError, u as utf8ToBytes, T as Tags, E as EPeersByDiscoveryEvents, c as EConnectionStateEvents, H as HealthStatus, d as Protocols } from './index-CeEH6b9b.js';
|
4
|
+
import { B as BaseProtocol } from './base_protocol-BDjsZTsQ.js';
|
5
|
+
export { S as StreamManager } from './base_protocol-BDjsZTsQ.js';
|
6
6
|
|
7
7
|
const MB = 1024 ** 2;
|
8
8
|
const SIZE_CAP_IN_MB = 1;
|
@@ -23,6 +23,60 @@ const isWireSizeUnderCap = (buf) => buf.length / MB <= SIZE_CAP_IN_MB;
|
|
23
23
|
|
24
24
|
const DNS_DISCOVERY_TAG = "@waku/bootstrap";
|
25
25
|
|
26
|
+
const decodeRelayShard = (bytes) => {
|
27
|
+
// explicitly converting to Uint8Array to avoid Buffer
|
28
|
+
// https://github.com/libp2p/js-libp2p/issues/2146
|
29
|
+
bytes = new Uint8Array(bytes);
|
30
|
+
if (bytes.length < 3)
|
31
|
+
throw new Error("Insufficient data");
|
32
|
+
const view = new DataView(bytes.buffer);
|
33
|
+
const clusterId = view.getUint16(0);
|
34
|
+
const shards = [];
|
35
|
+
if (bytes.length === 130) {
|
36
|
+
// rsv format (Bit Vector)
|
37
|
+
for (let i = 0; i < 1024; i++) {
|
38
|
+
const byteIndex = Math.floor(i / 8) + 2; // Adjusted for the 2-byte cluster field
|
39
|
+
const bitIndex = 7 - (i % 8);
|
40
|
+
if (view.getUint8(byteIndex) & (1 << bitIndex)) {
|
41
|
+
shards.push(i);
|
42
|
+
}
|
43
|
+
}
|
44
|
+
}
|
45
|
+
else {
|
46
|
+
// rs format (Index List)
|
47
|
+
const numIndices = view.getUint8(2);
|
48
|
+
for (let i = 0, offset = 3; i < numIndices; i++, offset += 2) {
|
49
|
+
if (offset + 1 >= bytes.length)
|
50
|
+
throw new Error("Unexpected end of data");
|
51
|
+
shards.push(view.getUint16(offset));
|
52
|
+
}
|
53
|
+
}
|
54
|
+
return { clusterId, shards };
|
55
|
+
};
|
56
|
+
const encodeRelayShard = (shardInfo) => {
|
57
|
+
const { clusterId, shards } = shardInfo;
|
58
|
+
const totalLength = shards.length >= 64 ? 130 : 3 + 2 * shards.length;
|
59
|
+
const buffer = new ArrayBuffer(totalLength);
|
60
|
+
const view = new DataView(buffer);
|
61
|
+
view.setUint16(0, clusterId);
|
62
|
+
if (shards.length >= 64) {
|
63
|
+
// rsv format (Bit Vector)
|
64
|
+
for (const index of shards) {
|
65
|
+
const byteIndex = Math.floor(index / 8) + 2; // Adjusted for the 2-byte cluster field
|
66
|
+
const bitIndex = 7 - (index % 8);
|
67
|
+
view.setUint8(byteIndex, view.getUint8(byteIndex) | (1 << bitIndex));
|
68
|
+
}
|
69
|
+
}
|
70
|
+
else {
|
71
|
+
// rs format (Index List)
|
72
|
+
view.setUint8(2, shards.length);
|
73
|
+
for (let i = 0, offset = 3; i < shards.length; i++, offset += 2) {
|
74
|
+
view.setUint16(offset, shards[i]);
|
75
|
+
}
|
76
|
+
}
|
77
|
+
return new Uint8Array(buffer);
|
78
|
+
};
|
79
|
+
|
26
80
|
var index$3 = /*#__PURE__*/Object.freeze({
|
27
81
|
__proto__: null,
|
28
82
|
version_0: version_0
|
@@ -1040,7 +1094,7 @@ class FIFO {
|
|
1040
1094
|
* // [ [1, 2, 3] ]
|
1041
1095
|
* ```
|
1042
1096
|
*/
|
1043
|
-
|
1097
|
+
class AbortError extends Error {
|
1044
1098
|
type;
|
1045
1099
|
code;
|
1046
1100
|
constructor(message, code) {
|
@@ -1048,7 +1102,7 @@ let AbortError$1 = class AbortError extends Error {
|
|
1048
1102
|
this.type = 'aborted';
|
1049
1103
|
this.code = code ?? 'ABORT_ERR';
|
1050
1104
|
}
|
1051
|
-
}
|
1105
|
+
}
|
1052
1106
|
function pushable(options = {}) {
|
1053
1107
|
const getNext = (buffer) => {
|
1054
1108
|
const next = buffer.shift();
|
@@ -1168,7 +1222,7 @@ function _pushable(getNext, options) {
|
|
1168
1222
|
if (signal != null) {
|
1169
1223
|
cancel = new Promise((resolve, reject) => {
|
1170
1224
|
listener = () => {
|
1171
|
-
reject(new AbortError
|
1225
|
+
reject(new AbortError());
|
1172
1226
|
};
|
1173
1227
|
signal.addEventListener('abort', listener);
|
1174
1228
|
});
|
@@ -1557,7 +1611,7 @@ class FilterSubscribeResponse {
|
|
1557
1611
|
}
|
1558
1612
|
}
|
1559
1613
|
|
1560
|
-
const log$
|
1614
|
+
const log$5 = new Logger("filter:v2");
|
1561
1615
|
const FilterCodecs = {
|
1562
1616
|
SUBSCRIBE: "/vac/waku/filter-subscribe/2.0.0-beta1",
|
1563
1617
|
PUSH: "/vac/waku/filter-push/2.0.0-beta1"
|
@@ -1566,7 +1620,7 @@ class FilterCore extends BaseProtocol {
|
|
1566
1620
|
handleIncomingMessage;
|
1567
1621
|
pubsubTopics;
|
1568
1622
|
constructor(handleIncomingMessage, pubsubTopics, libp2p) {
|
1569
|
-
super(FilterCodecs.SUBSCRIBE, libp2p.components, log$
|
1623
|
+
super(FilterCodecs.SUBSCRIBE, libp2p.components, log$5, pubsubTopics);
|
1570
1624
|
this.handleIncomingMessage = handleIncomingMessage;
|
1571
1625
|
this.pubsubTopics = pubsubTopics;
|
1572
1626
|
libp2p
|
@@ -1574,7 +1628,7 @@ class FilterCore extends BaseProtocol {
|
|
1574
1628
|
maxInboundStreams: 100
|
1575
1629
|
})
|
1576
1630
|
.catch((e) => {
|
1577
|
-
log$
|
1631
|
+
log$5.error("Failed to register ", FilterCodecs.PUSH, e);
|
1578
1632
|
});
|
1579
1633
|
}
|
1580
1634
|
async subscribe(pubsubTopic, peer, contentTopics) {
|
@@ -1585,7 +1639,7 @@ class FilterCore extends BaseProtocol {
|
|
1585
1639
|
res = await pipe([request.encode()], encode, stream, decode, async (source) => await all(source));
|
1586
1640
|
}
|
1587
1641
|
catch (error) {
|
1588
|
-
log$
|
1642
|
+
log$5.error("Failed to send subscribe request", error);
|
1589
1643
|
return {
|
1590
1644
|
success: null,
|
1591
1645
|
failure: {
|
@@ -1596,7 +1650,7 @@ class FilterCore extends BaseProtocol {
|
|
1596
1650
|
}
|
1597
1651
|
const { statusCode, requestId, statusDesc } = FilterSubscribeResponse.decode(res[0].slice());
|
1598
1652
|
if (statusCode < 200 || statusCode >= 300) {
|
1599
|
-
log$
|
1653
|
+
log$5.error(`Filter subscribe request ${requestId} failed with status code ${statusCode}: ${statusDesc}`);
|
1600
1654
|
return {
|
1601
1655
|
failure: {
|
1602
1656
|
error: ProtocolError.REMOTE_PEER_REJECTED,
|
@@ -1616,7 +1670,7 @@ class FilterCore extends BaseProtocol {
|
|
1616
1670
|
stream = await this.getStream(peer);
|
1617
1671
|
}
|
1618
1672
|
catch (error) {
|
1619
|
-
log$
|
1673
|
+
log$5.error(`Failed to get a stream for remote peer${peer.id.toString()}`, error);
|
1620
1674
|
return {
|
1621
1675
|
success: null,
|
1622
1676
|
failure: {
|
@@ -1630,7 +1684,7 @@ class FilterCore extends BaseProtocol {
|
|
1630
1684
|
await pipe([unsubscribeRequest.encode()], encode, stream.sink);
|
1631
1685
|
}
|
1632
1686
|
catch (error) {
|
1633
|
-
log$
|
1687
|
+
log$5.error("Failed to send unsubscribe request", error);
|
1634
1688
|
return {
|
1635
1689
|
success: null,
|
1636
1690
|
failure: {
|
@@ -1659,7 +1713,7 @@ class FilterCore extends BaseProtocol {
|
|
1659
1713
|
}
|
1660
1714
|
const { statusCode, requestId, statusDesc } = FilterSubscribeResponse.decode(res[0].slice());
|
1661
1715
|
if (statusCode < 200 || statusCode >= 300) {
|
1662
|
-
log$
|
1716
|
+
log$5.error(`Filter unsubscribe all request ${requestId} failed with status code ${statusCode}: ${statusDesc}`);
|
1663
1717
|
return {
|
1664
1718
|
failure: {
|
1665
1719
|
error: ProtocolError.REMOTE_PEER_REJECTED,
|
@@ -1679,7 +1733,7 @@ class FilterCore extends BaseProtocol {
|
|
1679
1733
|
stream = await this.getStream(peer);
|
1680
1734
|
}
|
1681
1735
|
catch (error) {
|
1682
|
-
log$
|
1736
|
+
log$5.error(`Failed to get a stream for remote peer${peer.id.toString()}`, error);
|
1683
1737
|
return {
|
1684
1738
|
success: null,
|
1685
1739
|
failure: {
|
@@ -1694,7 +1748,7 @@ class FilterCore extends BaseProtocol {
|
|
1694
1748
|
res = await pipe([request.encode()], encode, stream, decode, async (source) => await all(source));
|
1695
1749
|
}
|
1696
1750
|
catch (error) {
|
1697
|
-
log$
|
1751
|
+
log$5.error("Failed to send ping request", error);
|
1698
1752
|
return {
|
1699
1753
|
success: null,
|
1700
1754
|
failure: {
|
@@ -1714,7 +1768,7 @@ class FilterCore extends BaseProtocol {
|
|
1714
1768
|
}
|
1715
1769
|
const { statusCode, requestId, statusDesc } = FilterSubscribeResponse.decode(res[0].slice());
|
1716
1770
|
if (statusCode < 200 || statusCode >= 300) {
|
1717
|
-
log$
|
1771
|
+
log$5.error(`Filter ping request ${requestId} failed with status code ${statusCode}: ${statusDesc}`);
|
1718
1772
|
return {
|
1719
1773
|
success: null,
|
1720
1774
|
failure: {
|
@@ -1731,30 +1785,30 @@ class FilterCore extends BaseProtocol {
|
|
1731
1785
|
onRequest(streamData) {
|
1732
1786
|
const { connection, stream } = streamData;
|
1733
1787
|
const { remotePeer } = connection;
|
1734
|
-
log$
|
1788
|
+
log$5.info(`Received message from ${remotePeer.toString()}`);
|
1735
1789
|
try {
|
1736
1790
|
pipe(stream, decode, async (source) => {
|
1737
1791
|
for await (const bytes of source) {
|
1738
1792
|
const response = FilterPushRpc.decode(bytes.slice());
|
1739
1793
|
const { pubsubTopic, wakuMessage } = response;
|
1740
1794
|
if (!wakuMessage) {
|
1741
|
-
log$
|
1795
|
+
log$5.error("Received empty message");
|
1742
1796
|
return;
|
1743
1797
|
}
|
1744
1798
|
if (!pubsubTopic) {
|
1745
|
-
log$
|
1799
|
+
log$5.error("Pubsub topic missing from push message");
|
1746
1800
|
return;
|
1747
1801
|
}
|
1748
1802
|
await this.handleIncomingMessage(pubsubTopic, wakuMessage, connection.remotePeer.toString());
|
1749
1803
|
}
|
1750
1804
|
}).then(() => {
|
1751
|
-
log$
|
1752
|
-
}, (e) => {
|
1753
|
-
log$
|
1805
|
+
log$5.info("Receiving pipe closed.");
|
1806
|
+
}, async (e) => {
|
1807
|
+
log$5.error("Error with receiving pipe", e, " -- ", "on peer ", connection.remotePeer.toString(), " -- ", "stream ", stream);
|
1754
1808
|
});
|
1755
1809
|
}
|
1756
1810
|
catch (e) {
|
1757
|
-
log$
|
1811
|
+
log$5.error("Error decoding message", e);
|
1758
1812
|
}
|
1759
1813
|
}
|
1760
1814
|
}
|
@@ -1820,7 +1874,7 @@ const matchRLNErrorMessage = (info) => {
|
|
1820
1874
|
return ProtocolError.RLN_PROOF_GENERATION;
|
1821
1875
|
};
|
1822
1876
|
|
1823
|
-
const log$
|
1877
|
+
const log$4 = new Logger("light-push");
|
1824
1878
|
const LightPushCodec = "/vac/waku/lightpush/2.0.0-beta1";
|
1825
1879
|
/**
|
1826
1880
|
* Implements the [Waku v2 Light Push protocol](https://rfc.vac.dev/spec/19/).
|
@@ -1828,22 +1882,22 @@ const LightPushCodec = "/vac/waku/lightpush/2.0.0-beta1";
|
|
1828
1882
|
class LightPushCore extends BaseProtocol {
|
1829
1883
|
pubsubTopics;
|
1830
1884
|
constructor(pubsubTopics, libp2p) {
|
1831
|
-
super(LightPushCodec, libp2p.components, log$
|
1885
|
+
super(LightPushCodec, libp2p.components, log$4, pubsubTopics);
|
1832
1886
|
this.pubsubTopics = pubsubTopics;
|
1833
1887
|
}
|
1834
1888
|
async preparePushMessage(encoder, message) {
|
1835
1889
|
try {
|
1836
1890
|
if (!message.payload || message.payload.length === 0) {
|
1837
|
-
log$
|
1891
|
+
log$4.error("Failed to send waku light push: payload is empty");
|
1838
1892
|
return { query: null, error: ProtocolError.EMPTY_PAYLOAD };
|
1839
1893
|
}
|
1840
1894
|
if (!(await isMessageSizeUnderCap(encoder, message))) {
|
1841
|
-
log$
|
1895
|
+
log$4.error("Failed to send waku light push: message is bigger than 1MB");
|
1842
1896
|
return { query: null, error: ProtocolError.SIZE_TOO_BIG };
|
1843
1897
|
}
|
1844
1898
|
const protoMessage = await encoder.toProtoObj(message);
|
1845
1899
|
if (!protoMessage) {
|
1846
|
-
log$
|
1900
|
+
log$4.error("Failed to encode to protoMessage, aborting push");
|
1847
1901
|
return {
|
1848
1902
|
query: null,
|
1849
1903
|
error: ProtocolError.ENCODE_FAILED
|
@@ -1853,7 +1907,7 @@ class LightPushCore extends BaseProtocol {
|
|
1853
1907
|
return { query, error: null };
|
1854
1908
|
}
|
1855
1909
|
catch (error) {
|
1856
|
-
log$
|
1910
|
+
log$4.error("Failed to prepare push message", error);
|
1857
1911
|
return {
|
1858
1912
|
query: null,
|
1859
1913
|
error: ProtocolError.GENERIC_FAIL
|
@@ -1876,7 +1930,7 @@ class LightPushCore extends BaseProtocol {
|
|
1876
1930
|
stream = await this.getStream(peer);
|
1877
1931
|
}
|
1878
1932
|
catch (error) {
|
1879
|
-
log$
|
1933
|
+
log$4.error("Failed to get stream", error);
|
1880
1934
|
return {
|
1881
1935
|
success: null,
|
1882
1936
|
failure: {
|
@@ -1890,7 +1944,7 @@ class LightPushCore extends BaseProtocol {
|
|
1890
1944
|
res = await pipe([query.encode()], encode, stream, decode, async (source) => await all(source));
|
1891
1945
|
}
|
1892
1946
|
catch (err) {
|
1893
|
-
log$
|
1947
|
+
log$4.error("Failed to send waku light push request", err);
|
1894
1948
|
return {
|
1895
1949
|
success: null,
|
1896
1950
|
failure: {
|
@@ -1908,7 +1962,7 @@ class LightPushCore extends BaseProtocol {
|
|
1908
1962
|
response = PushRpc.decode(bytes).response;
|
1909
1963
|
}
|
1910
1964
|
catch (err) {
|
1911
|
-
log$
|
1965
|
+
log$4.error("Failed to decode push reply", err);
|
1912
1966
|
return {
|
1913
1967
|
success: null,
|
1914
1968
|
failure: {
|
@@ -1918,7 +1972,7 @@ class LightPushCore extends BaseProtocol {
|
|
1918
1972
|
};
|
1919
1973
|
}
|
1920
1974
|
if (!response) {
|
1921
|
-
log$
|
1975
|
+
log$4.error("Remote peer fault: No response in PushRPC");
|
1922
1976
|
return {
|
1923
1977
|
success: null,
|
1924
1978
|
failure: {
|
@@ -1929,7 +1983,7 @@ class LightPushCore extends BaseProtocol {
|
|
1929
1983
|
}
|
1930
1984
|
if (isRLNResponseError(response.info)) {
|
1931
1985
|
const rlnErrorCase = matchRLNErrorMessage(response.info);
|
1932
|
-
log$
|
1986
|
+
log$4.error("Remote peer rejected the message: ", rlnErrorCase);
|
1933
1987
|
return {
|
1934
1988
|
success: null,
|
1935
1989
|
failure: {
|
@@ -1939,7 +1993,7 @@ class LightPushCore extends BaseProtocol {
|
|
1939
1993
|
};
|
1940
1994
|
}
|
1941
1995
|
if (!response.isSuccess) {
|
1942
|
-
log$
|
1996
|
+
log$4.error("Remote peer rejected the message: ", response.info);
|
1943
1997
|
return {
|
1944
1998
|
success: null,
|
1945
1999
|
failure: {
|
@@ -2044,12 +2098,12 @@ class StoreQueryResponse {
|
|
2044
2098
|
}
|
2045
2099
|
}
|
2046
2100
|
|
2047
|
-
const log$
|
2101
|
+
const log$3 = new Logger("store");
|
2048
2102
|
const StoreCodec = "/vac/waku/store-query/3.0.0";
|
2049
2103
|
class StoreCore extends BaseProtocol {
|
2050
2104
|
pubsubTopics;
|
2051
2105
|
constructor(pubsubTopics, libp2p) {
|
2052
|
-
super(StoreCodec, libp2p.components, log$
|
2106
|
+
super(StoreCodec, libp2p.components, log$3, pubsubTopics);
|
2053
2107
|
this.pubsubTopics = pubsubTopics;
|
2054
2108
|
}
|
2055
2109
|
async *queryPerPage(queryOpts, decoders, peer) {
|
@@ -2068,7 +2122,7 @@ class StoreCore extends BaseProtocol {
|
|
2068
2122
|
stream = await this.getStream(peer);
|
2069
2123
|
}
|
2070
2124
|
catch (e) {
|
2071
|
-
log$
|
2125
|
+
log$3.error("Failed to get stream", e);
|
2072
2126
|
break;
|
2073
2127
|
}
|
2074
2128
|
const res = await pipe([storeQueryRequest.encode()], encode, stream, decode, async (source) => await all(source));
|
@@ -2080,14 +2134,14 @@ class StoreCore extends BaseProtocol {
|
|
2080
2134
|
if (!storeQueryResponse.statusCode ||
|
2081
2135
|
storeQueryResponse.statusCode >= 300) {
|
2082
2136
|
const errorMessage = `Store query failed with status code: ${storeQueryResponse.statusCode}, description: ${storeQueryResponse.statusDesc}`;
|
2083
|
-
log$
|
2137
|
+
log$3.error(errorMessage);
|
2084
2138
|
throw new Error(errorMessage);
|
2085
2139
|
}
|
2086
2140
|
if (!storeQueryResponse.messages || !storeQueryResponse.messages.length) {
|
2087
|
-
log$
|
2141
|
+
log$3.warn("Stopping pagination due to empty messages in response");
|
2088
2142
|
break;
|
2089
2143
|
}
|
2090
|
-
log$
|
2144
|
+
log$3.info(`${storeQueryResponse.messages.length} messages retrieved from store`);
|
2091
2145
|
const decodedMessages = storeQueryResponse.messages.map((protoMsg) => {
|
2092
2146
|
if (!protoMsg.message) {
|
2093
2147
|
return Promise.resolve(undefined);
|
@@ -2125,377 +2179,6 @@ var index = /*#__PURE__*/Object.freeze({
|
|
2125
2179
|
StoreCore: StoreCore
|
2126
2180
|
});
|
2127
2181
|
|
2128
|
-
class TimeoutError extends Error {
|
2129
|
-
constructor(message) {
|
2130
|
-
super(message);
|
2131
|
-
this.name = 'TimeoutError';
|
2132
|
-
}
|
2133
|
-
}
|
2134
|
-
|
2135
|
-
/**
|
2136
|
-
An error to be thrown when the request is aborted by AbortController.
|
2137
|
-
DOMException is thrown instead of this Error when DOMException is available.
|
2138
|
-
*/
|
2139
|
-
class AbortError extends Error {
|
2140
|
-
constructor(message) {
|
2141
|
-
super();
|
2142
|
-
this.name = 'AbortError';
|
2143
|
-
this.message = message;
|
2144
|
-
}
|
2145
|
-
}
|
2146
|
-
|
2147
|
-
/**
|
2148
|
-
TODO: Remove AbortError and just throw DOMException when targeting Node 18.
|
2149
|
-
*/
|
2150
|
-
const getDOMException = errorMessage => globalThis.DOMException === undefined
|
2151
|
-
? new AbortError(errorMessage)
|
2152
|
-
: new DOMException(errorMessage);
|
2153
|
-
|
2154
|
-
/**
|
2155
|
-
TODO: Remove below function and just 'reject(signal.reason)' when targeting Node 18.
|
2156
|
-
*/
|
2157
|
-
const getAbortedReason = signal => {
|
2158
|
-
const reason = signal.reason === undefined
|
2159
|
-
? getDOMException('This operation was aborted.')
|
2160
|
-
: signal.reason;
|
2161
|
-
|
2162
|
-
return reason instanceof Error ? reason : getDOMException(reason);
|
2163
|
-
};
|
2164
|
-
|
2165
|
-
function pTimeout(promise, options) {
|
2166
|
-
const {
|
2167
|
-
milliseconds,
|
2168
|
-
fallback,
|
2169
|
-
message,
|
2170
|
-
customTimers = {setTimeout, clearTimeout},
|
2171
|
-
} = options;
|
2172
|
-
|
2173
|
-
let timer;
|
2174
|
-
|
2175
|
-
const wrappedPromise = new Promise((resolve, reject) => {
|
2176
|
-
if (typeof milliseconds !== 'number' || Math.sign(milliseconds) !== 1) {
|
2177
|
-
throw new TypeError(`Expected \`milliseconds\` to be a positive number, got \`${milliseconds}\``);
|
2178
|
-
}
|
2179
|
-
|
2180
|
-
if (options.signal) {
|
2181
|
-
const {signal} = options;
|
2182
|
-
if (signal.aborted) {
|
2183
|
-
reject(getAbortedReason(signal));
|
2184
|
-
}
|
2185
|
-
|
2186
|
-
signal.addEventListener('abort', () => {
|
2187
|
-
reject(getAbortedReason(signal));
|
2188
|
-
});
|
2189
|
-
}
|
2190
|
-
|
2191
|
-
if (milliseconds === Number.POSITIVE_INFINITY) {
|
2192
|
-
promise.then(resolve, reject);
|
2193
|
-
return;
|
2194
|
-
}
|
2195
|
-
|
2196
|
-
// We create the error outside of `setTimeout` to preserve the stack trace.
|
2197
|
-
const timeoutError = new TimeoutError();
|
2198
|
-
|
2199
|
-
timer = customTimers.setTimeout.call(undefined, () => {
|
2200
|
-
if (fallback) {
|
2201
|
-
try {
|
2202
|
-
resolve(fallback());
|
2203
|
-
} catch (error) {
|
2204
|
-
reject(error);
|
2205
|
-
}
|
2206
|
-
|
2207
|
-
return;
|
2208
|
-
}
|
2209
|
-
|
2210
|
-
if (typeof promise.cancel === 'function') {
|
2211
|
-
promise.cancel();
|
2212
|
-
}
|
2213
|
-
|
2214
|
-
if (message === false) {
|
2215
|
-
resolve();
|
2216
|
-
} else if (message instanceof Error) {
|
2217
|
-
reject(message);
|
2218
|
-
} else {
|
2219
|
-
timeoutError.message = message ?? `Promise timed out after ${milliseconds} milliseconds`;
|
2220
|
-
reject(timeoutError);
|
2221
|
-
}
|
2222
|
-
}, milliseconds);
|
2223
|
-
|
2224
|
-
(async () => {
|
2225
|
-
try {
|
2226
|
-
resolve(await promise);
|
2227
|
-
} catch (error) {
|
2228
|
-
reject(error);
|
2229
|
-
}
|
2230
|
-
})();
|
2231
|
-
});
|
2232
|
-
|
2233
|
-
const cancelablePromise = wrappedPromise.finally(() => {
|
2234
|
-
cancelablePromise.clear();
|
2235
|
-
});
|
2236
|
-
|
2237
|
-
cancelablePromise.clear = () => {
|
2238
|
-
customTimers.clearTimeout.call(undefined, timer);
|
2239
|
-
timer = undefined;
|
2240
|
-
};
|
2241
|
-
|
2242
|
-
return cancelablePromise;
|
2243
|
-
}
|
2244
|
-
|
2245
|
-
const normalizeEmitter = emitter => {
|
2246
|
-
const addListener = emitter.addEventListener || emitter.on || emitter.addListener;
|
2247
|
-
const removeListener = emitter.removeEventListener || emitter.off || emitter.removeListener;
|
2248
|
-
|
2249
|
-
if (!addListener || !removeListener) {
|
2250
|
-
throw new TypeError('Emitter is not compatible');
|
2251
|
-
}
|
2252
|
-
|
2253
|
-
return {
|
2254
|
-
addListener: addListener.bind(emitter),
|
2255
|
-
removeListener: removeListener.bind(emitter),
|
2256
|
-
};
|
2257
|
-
};
|
2258
|
-
|
2259
|
-
function pEventMultiple(emitter, event, options) {
|
2260
|
-
let cancel;
|
2261
|
-
const returnValue = new Promise((resolve, reject) => {
|
2262
|
-
options = {
|
2263
|
-
rejectionEvents: ['error'],
|
2264
|
-
multiArgs: false,
|
2265
|
-
resolveImmediately: false,
|
2266
|
-
...options,
|
2267
|
-
};
|
2268
|
-
|
2269
|
-
if (!(options.count >= 0 && (options.count === Number.POSITIVE_INFINITY || Number.isInteger(options.count)))) {
|
2270
|
-
throw new TypeError('The `count` option should be at least 0 or more');
|
2271
|
-
}
|
2272
|
-
|
2273
|
-
options.signal?.throwIfAborted();
|
2274
|
-
|
2275
|
-
// Allow multiple events
|
2276
|
-
const events = [event].flat();
|
2277
|
-
|
2278
|
-
const items = [];
|
2279
|
-
const {addListener, removeListener} = normalizeEmitter(emitter);
|
2280
|
-
|
2281
|
-
const onItem = (...arguments_) => {
|
2282
|
-
const value = options.multiArgs ? arguments_ : arguments_[0];
|
2283
|
-
|
2284
|
-
// eslint-disable-next-line unicorn/no-array-callback-reference
|
2285
|
-
if (options.filter && !options.filter(value)) {
|
2286
|
-
return;
|
2287
|
-
}
|
2288
|
-
|
2289
|
-
items.push(value);
|
2290
|
-
|
2291
|
-
if (options.count === items.length) {
|
2292
|
-
cancel();
|
2293
|
-
resolve(items);
|
2294
|
-
}
|
2295
|
-
};
|
2296
|
-
|
2297
|
-
const rejectHandler = error => {
|
2298
|
-
cancel();
|
2299
|
-
reject(error);
|
2300
|
-
};
|
2301
|
-
|
2302
|
-
cancel = () => {
|
2303
|
-
for (const event of events) {
|
2304
|
-
removeListener(event, onItem);
|
2305
|
-
}
|
2306
|
-
|
2307
|
-
for (const rejectionEvent of options.rejectionEvents) {
|
2308
|
-
removeListener(rejectionEvent, rejectHandler);
|
2309
|
-
}
|
2310
|
-
};
|
2311
|
-
|
2312
|
-
for (const event of events) {
|
2313
|
-
addListener(event, onItem);
|
2314
|
-
}
|
2315
|
-
|
2316
|
-
for (const rejectionEvent of options.rejectionEvents) {
|
2317
|
-
addListener(rejectionEvent, rejectHandler);
|
2318
|
-
}
|
2319
|
-
|
2320
|
-
if (options.signal) {
|
2321
|
-
options.signal.addEventListener('abort', () => {
|
2322
|
-
rejectHandler(options.signal.reason);
|
2323
|
-
}, {once: true});
|
2324
|
-
}
|
2325
|
-
|
2326
|
-
if (options.resolveImmediately) {
|
2327
|
-
resolve(items);
|
2328
|
-
}
|
2329
|
-
});
|
2330
|
-
|
2331
|
-
returnValue.cancel = cancel;
|
2332
|
-
|
2333
|
-
if (typeof options.timeout === 'number') {
|
2334
|
-
const timeout = pTimeout(returnValue, {milliseconds: options.timeout});
|
2335
|
-
timeout.cancel = cancel;
|
2336
|
-
return timeout;
|
2337
|
-
}
|
2338
|
-
|
2339
|
-
return returnValue;
|
2340
|
-
}
|
2341
|
-
|
2342
|
-
function pEvent(emitter, event, options) {
|
2343
|
-
if (typeof options === 'function') {
|
2344
|
-
options = {filter: options};
|
2345
|
-
}
|
2346
|
-
|
2347
|
-
options = {
|
2348
|
-
...options,
|
2349
|
-
count: 1,
|
2350
|
-
resolveImmediately: false,
|
2351
|
-
};
|
2352
|
-
|
2353
|
-
const arrayPromise = pEventMultiple(emitter, event, options);
|
2354
|
-
const promise = arrayPromise.then(array => array[0]);
|
2355
|
-
promise.cancel = arrayPromise.cancel;
|
2356
|
-
|
2357
|
-
return promise;
|
2358
|
-
}
|
2359
|
-
|
2360
|
-
const log$3 = new Logger("wait-for-remote-peer");
|
2361
|
-
//TODO: move this function within the Waku class: https://github.com/waku-org/js-waku/issues/1761
|
2362
|
-
/**
|
2363
|
-
* Wait for a remote peer to be ready given the passed protocols.
|
2364
|
-
* Must be used after attempting to connect to nodes, using
|
2365
|
-
* {@link @waku/sdk!WakuNode.dial} or a bootstrap method with
|
2366
|
-
* {@link @waku/sdk!createLightNode}.
|
2367
|
-
*
|
2368
|
-
* If the passed protocols is a GossipSub protocol, then it resolves only once
|
2369
|
-
* a peer is in a mesh, to help ensure that other peers will send and receive
|
2370
|
-
* message to us.
|
2371
|
-
*
|
2372
|
-
* @param waku The Waku Node
|
2373
|
-
* @param protocols The protocols that need to be enabled by remote peers.
|
2374
|
-
* @param timeoutMs A timeout value in milliseconds..
|
2375
|
-
*
|
2376
|
-
* @returns A promise that **resolves** if all desired protocols are fulfilled by
|
2377
|
-
* remote nodes, **rejects** if the timeoutMs is reached.
|
2378
|
-
* @throws If passing a protocol that is not mounted
|
2379
|
-
* @default Wait for remote peers with protocols enabled locally and no time out is applied.
|
2380
|
-
*/
|
2381
|
-
async function waitForRemotePeer(waku, protocols, timeoutMs) {
|
2382
|
-
protocols = protocols ?? getEnabledProtocols(waku);
|
2383
|
-
if (!waku.isStarted())
|
2384
|
-
return Promise.reject("Waku node is not started");
|
2385
|
-
const promises = [];
|
2386
|
-
if (protocols.includes(Protocols.Relay)) {
|
2387
|
-
if (!waku.relay)
|
2388
|
-
throw new Error("Cannot wait for Relay peer: protocol not mounted");
|
2389
|
-
promises.push(waitForGossipSubPeerInMesh(waku.relay));
|
2390
|
-
}
|
2391
|
-
if (protocols.includes(Protocols.Store)) {
|
2392
|
-
if (!waku.store)
|
2393
|
-
throw new Error("Cannot wait for Store peer: protocol not mounted");
|
2394
|
-
promises.push(waitForConnectedPeer(waku.store.protocol, waku.libp2p.services.metadata));
|
2395
|
-
}
|
2396
|
-
if (protocols.includes(Protocols.LightPush)) {
|
2397
|
-
if (!waku.lightPush)
|
2398
|
-
throw new Error("Cannot wait for LightPush peer: protocol not mounted");
|
2399
|
-
promises.push(waitForConnectedPeer(waku.lightPush.protocol, waku.libp2p.services.metadata));
|
2400
|
-
}
|
2401
|
-
if (protocols.includes(Protocols.Filter)) {
|
2402
|
-
if (!waku.filter)
|
2403
|
-
throw new Error("Cannot wait for Filter peer: protocol not mounted");
|
2404
|
-
promises.push(waitForConnectedPeer(waku.filter.protocol, waku.libp2p.services.metadata));
|
2405
|
-
}
|
2406
|
-
if (timeoutMs) {
|
2407
|
-
await rejectOnTimeout(Promise.all(promises), timeoutMs, "Timed out waiting for a remote peer.");
|
2408
|
-
}
|
2409
|
-
else {
|
2410
|
-
await Promise.all(promises);
|
2411
|
-
}
|
2412
|
-
}
|
2413
|
-
//TODO: move this function within protocol SDK class: https://github.com/waku-org/js-waku/issues/1761
|
2414
|
-
/**
|
2415
|
-
* Wait for a peer with the given protocol to be connected.
|
2416
|
-
* If sharding is enabled on the node, it will also wait for the peer to be confirmed by the metadata service.
|
2417
|
-
*/
|
2418
|
-
async function waitForConnectedPeer(protocol, metadataService) {
|
2419
|
-
const codec = protocol.multicodec;
|
2420
|
-
const peers = await protocol.connectedPeers();
|
2421
|
-
if (peers.length) {
|
2422
|
-
if (!metadataService) {
|
2423
|
-
log$3.info(`${codec} peer found: `, peers[0].id.toString());
|
2424
|
-
return;
|
2425
|
-
}
|
2426
|
-
// once a peer is connected, we need to confirm the metadata handshake with at least one of those peers if sharding is enabled
|
2427
|
-
try {
|
2428
|
-
await Promise.any(peers.map((peer) => metadataService.confirmOrAttemptHandshake(peer.id)));
|
2429
|
-
return;
|
2430
|
-
}
|
2431
|
-
catch (e) {
|
2432
|
-
if (e.code === "ERR_CONNECTION_BEING_CLOSED")
|
2433
|
-
log$3.error(`Connection with the peer was closed and possibly because it's on a different shard. Error: ${e}`);
|
2434
|
-
log$3.error(`Error waiting for handshake confirmation: ${e}`);
|
2435
|
-
}
|
2436
|
-
}
|
2437
|
-
log$3.info(`Waiting for ${codec} peer`);
|
2438
|
-
// else we'll just wait for the next peer to connect
|
2439
|
-
await new Promise((resolve) => {
|
2440
|
-
const cb = (evt) => {
|
2441
|
-
if (evt.detail?.protocols?.includes(codec)) {
|
2442
|
-
if (metadataService) {
|
2443
|
-
metadataService
|
2444
|
-
.confirmOrAttemptHandshake(evt.detail.peerId)
|
2445
|
-
.then(() => {
|
2446
|
-
protocol.removeLibp2pEventListener("peer:identify", cb);
|
2447
|
-
resolve();
|
2448
|
-
})
|
2449
|
-
.catch((e) => {
|
2450
|
-
if (e.code === "ERR_CONNECTION_BEING_CLOSED")
|
2451
|
-
log$3.error(`Connection with the peer was closed and possibly because it's on a different shard. Error: ${e}`);
|
2452
|
-
log$3.error(`Error waiting for handshake confirmation: ${e}`);
|
2453
|
-
});
|
2454
|
-
}
|
2455
|
-
else {
|
2456
|
-
protocol.removeLibp2pEventListener("peer:identify", cb);
|
2457
|
-
resolve();
|
2458
|
-
}
|
2459
|
-
}
|
2460
|
-
};
|
2461
|
-
protocol.addLibp2pEventListener("peer:identify", cb);
|
2462
|
-
});
|
2463
|
-
}
|
2464
|
-
/**
|
2465
|
-
* Wait for at least one peer with the given protocol to be connected and in the gossipsub
|
2466
|
-
* mesh for all pubsubTopics.
|
2467
|
-
*/
|
2468
|
-
async function waitForGossipSubPeerInMesh(waku) {
|
2469
|
-
let peers = waku.getMeshPeers();
|
2470
|
-
const pubsubTopics = waku.pubsubTopics;
|
2471
|
-
for (const topic of pubsubTopics) {
|
2472
|
-
while (peers.length == 0) {
|
2473
|
-
await pEvent(waku.gossipSub, "gossipsub:heartbeat");
|
2474
|
-
peers = waku.getMeshPeers(topic);
|
2475
|
-
}
|
2476
|
-
}
|
2477
|
-
}
|
2478
|
-
const awaitTimeout = (ms, rejectReason) => new Promise((_resolve, reject) => setTimeout(() => reject(rejectReason), ms));
|
2479
|
-
async function rejectOnTimeout(promise, timeoutMs, rejectReason) {
|
2480
|
-
await Promise.race([promise, awaitTimeout(timeoutMs, rejectReason)]);
|
2481
|
-
}
|
2482
|
-
function getEnabledProtocols(waku) {
|
2483
|
-
const protocols = [];
|
2484
|
-
if (waku.relay) {
|
2485
|
-
protocols.push(Protocols.Relay);
|
2486
|
-
}
|
2487
|
-
if (waku.filter) {
|
2488
|
-
protocols.push(Protocols.Filter);
|
2489
|
-
}
|
2490
|
-
if (waku.store) {
|
2491
|
-
protocols.push(Protocols.Store);
|
2492
|
-
}
|
2493
|
-
if (waku.lightPush) {
|
2494
|
-
protocols.push(Protocols.LightPush);
|
2495
|
-
}
|
2496
|
-
return protocols;
|
2497
|
-
}
|
2498
|
-
|
2499
2182
|
/** Noop for browser compatibility */
|
2500
2183
|
function setMaxListeners$1() { }
|
2501
2184
|
|
@@ -3205,7 +2888,7 @@ class Metadata extends BaseProtocol {
|
|
3205
2888
|
*/
|
3206
2889
|
async query(peerId) {
|
3207
2890
|
const request = WakuMetadataRequest.encode(pubsubTopicsToShardInfo(this.pubsubTopics));
|
3208
|
-
const peer = await this.peerStore.get(peerId);
|
2891
|
+
const peer = await this.libp2pComponents.peerStore.get(peerId);
|
3209
2892
|
if (!peer) {
|
3210
2893
|
return {
|
3211
2894
|
shardInfo: null,
|
@@ -3297,4 +2980,4 @@ function wakuMetadata(pubsubTopics) {
|
|
3297
2980
|
return (components) => new Metadata(pubsubTopics, components);
|
3298
2981
|
}
|
3299
2982
|
|
3300
|
-
export { ConnectionManager, FilterCodecs, FilterCore, KeepAliveManager, LightPushCodec, LightPushCore, MetadataCodec, StoreCore, createEncoder, getHealthManager, index$3 as message,
|
2983
|
+
export { ConnectionManager, FilterCodecs, FilterCore, KeepAliveManager, LightPushCodec, LightPushCore, MetadataCodec, StoreCodec, StoreCore, createEncoder, getHealthManager, index$3 as message, wakuMetadata, index$2 as waku_filter, index$1 as waku_light_push, index as waku_store };
|