@libp2p/gossipsub 14.1.1-6059227cb
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +85 -0
- package/dist/index.min.js +19 -0
- package/dist/index.min.js.map +7 -0
- package/dist/src/config.d.ts +32 -0
- package/dist/src/config.d.ts.map +1 -0
- package/dist/src/config.js +2 -0
- package/dist/src/config.js.map +1 -0
- package/dist/src/constants.d.ts +213 -0
- package/dist/src/constants.d.ts.map +1 -0
- package/dist/src/constants.js +217 -0
- package/dist/src/constants.js.map +1 -0
- package/dist/src/errors.d.ts +9 -0
- package/dist/src/errors.d.ts.map +1 -0
- package/dist/src/errors.js +15 -0
- package/dist/src/errors.js.map +1 -0
- package/dist/src/gossipsub.d.ts +419 -0
- package/dist/src/gossipsub.d.ts.map +1 -0
- package/dist/src/gossipsub.js +2520 -0
- package/dist/src/gossipsub.js.map +1 -0
- package/dist/src/index.d.ts +344 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/index.js +43 -0
- package/dist/src/index.js.map +1 -0
- package/dist/src/message/decodeRpc.d.ts +11 -0
- package/dist/src/message/decodeRpc.d.ts.map +1 -0
- package/dist/src/message/decodeRpc.js +10 -0
- package/dist/src/message/decodeRpc.js.map +1 -0
- package/dist/src/message/index.d.ts +2 -0
- package/dist/src/message/index.d.ts.map +1 -0
- package/dist/src/message/index.js +2 -0
- package/dist/src/message/index.js.map +1 -0
- package/dist/src/message/rpc.d.ts +99 -0
- package/dist/src/message/rpc.d.ts.map +1 -0
- package/dist/src/message/rpc.js +663 -0
- package/dist/src/message/rpc.js.map +1 -0
- package/dist/src/message-cache.d.ts +80 -0
- package/dist/src/message-cache.d.ts.map +1 -0
- package/dist/src/message-cache.js +144 -0
- package/dist/src/message-cache.js.map +1 -0
- package/dist/src/metrics.d.ts +467 -0
- package/dist/src/metrics.d.ts.map +1 -0
- package/dist/src/metrics.js +896 -0
- package/dist/src/metrics.js.map +1 -0
- package/dist/src/score/compute-score.d.ts +4 -0
- package/dist/src/score/compute-score.d.ts.map +1 -0
- package/dist/src/score/compute-score.js +75 -0
- package/dist/src/score/compute-score.js.map +1 -0
- package/dist/src/score/index.d.ts +4 -0
- package/dist/src/score/index.d.ts.map +1 -0
- package/dist/src/score/index.js +4 -0
- package/dist/src/score/index.js.map +1 -0
- package/dist/src/score/message-deliveries.d.ts +45 -0
- package/dist/src/score/message-deliveries.d.ts.map +1 -0
- package/dist/src/score/message-deliveries.js +75 -0
- package/dist/src/score/message-deliveries.js.map +1 -0
- package/dist/src/score/peer-score-params.d.ts +125 -0
- package/dist/src/score/peer-score-params.d.ts.map +1 -0
- package/dist/src/score/peer-score-params.js +159 -0
- package/dist/src/score/peer-score-params.js.map +1 -0
- package/dist/src/score/peer-score-thresholds.d.ts +31 -0
- package/dist/src/score/peer-score-thresholds.d.ts.map +1 -0
- package/dist/src/score/peer-score-thresholds.js +32 -0
- package/dist/src/score/peer-score-thresholds.js.map +1 -0
- package/dist/src/score/peer-score.d.ts +119 -0
- package/dist/src/score/peer-score.d.ts.map +1 -0
- package/dist/src/score/peer-score.js +459 -0
- package/dist/src/score/peer-score.js.map +1 -0
- package/dist/src/score/peer-stats.d.ts +32 -0
- package/dist/src/score/peer-stats.d.ts.map +1 -0
- package/dist/src/score/peer-stats.js +2 -0
- package/dist/src/score/peer-stats.js.map +1 -0
- package/dist/src/score/scoreMetrics.d.ts +23 -0
- package/dist/src/score/scoreMetrics.d.ts.map +1 -0
- package/dist/src/score/scoreMetrics.js +155 -0
- package/dist/src/score/scoreMetrics.js.map +1 -0
- package/dist/src/stream.d.ts +30 -0
- package/dist/src/stream.d.ts.map +1 -0
- package/dist/src/stream.js +55 -0
- package/dist/src/stream.js.map +1 -0
- package/dist/src/tracer.d.ts +53 -0
- package/dist/src/tracer.d.ts.map +1 -0
- package/dist/src/tracer.js +155 -0
- package/dist/src/tracer.js.map +1 -0
- package/dist/src/types.d.ts +148 -0
- package/dist/src/types.d.ts.map +1 -0
- package/dist/src/types.js +90 -0
- package/dist/src/types.js.map +1 -0
- package/dist/src/utils/buildRawMessage.d.ts +20 -0
- package/dist/src/utils/buildRawMessage.d.ts.map +1 -0
- package/dist/src/utils/buildRawMessage.js +151 -0
- package/dist/src/utils/buildRawMessage.js.map +1 -0
- package/dist/src/utils/create-gossip-rpc.d.ts +7 -0
- package/dist/src/utils/create-gossip-rpc.d.ts.map +1 -0
- package/dist/src/utils/create-gossip-rpc.js +31 -0
- package/dist/src/utils/create-gossip-rpc.js.map +1 -0
- package/dist/src/utils/index.d.ts +4 -0
- package/dist/src/utils/index.d.ts.map +1 -0
- package/dist/src/utils/index.js +4 -0
- package/dist/src/utils/index.js.map +1 -0
- package/dist/src/utils/messageIdToString.d.ts +5 -0
- package/dist/src/utils/messageIdToString.d.ts.map +1 -0
- package/dist/src/utils/messageIdToString.js +8 -0
- package/dist/src/utils/messageIdToString.js.map +1 -0
- package/dist/src/utils/msgIdFn.d.ts +10 -0
- package/dist/src/utils/msgIdFn.d.ts.map +1 -0
- package/dist/src/utils/msgIdFn.js +23 -0
- package/dist/src/utils/msgIdFn.js.map +1 -0
- package/dist/src/utils/multiaddr.d.ts +3 -0
- package/dist/src/utils/multiaddr.d.ts.map +1 -0
- package/dist/src/utils/multiaddr.js +15 -0
- package/dist/src/utils/multiaddr.js.map +1 -0
- package/dist/src/utils/publishConfig.d.ts +8 -0
- package/dist/src/utils/publishConfig.d.ts.map +1 -0
- package/dist/src/utils/publishConfig.js +25 -0
- package/dist/src/utils/publishConfig.js.map +1 -0
- package/dist/src/utils/set.d.ts +14 -0
- package/dist/src/utils/set.d.ts.map +1 -0
- package/dist/src/utils/set.js +41 -0
- package/dist/src/utils/set.js.map +1 -0
- package/dist/src/utils/shuffle.d.ts +7 -0
- package/dist/src/utils/shuffle.d.ts.map +1 -0
- package/dist/src/utils/shuffle.js +21 -0
- package/dist/src/utils/shuffle.js.map +1 -0
- package/dist/src/utils/time-cache.d.ts +22 -0
- package/dist/src/utils/time-cache.d.ts.map +1 -0
- package/dist/src/utils/time-cache.js +54 -0
- package/dist/src/utils/time-cache.js.map +1 -0
- package/package.json +142 -0
- package/src/config.ts +31 -0
- package/src/constants.ts +261 -0
- package/src/errors.ts +17 -0
- package/src/gossipsub.ts +3061 -0
- package/src/index.ts +404 -0
- package/src/message/decodeRpc.ts +19 -0
- package/src/message/index.ts +1 -0
- package/src/message/rpc.proto +58 -0
- package/src/message/rpc.ts +848 -0
- package/src/message-cache.ts +196 -0
- package/src/metrics.ts +1014 -0
- package/src/score/compute-score.ts +98 -0
- package/src/score/index.ts +3 -0
- package/src/score/message-deliveries.ts +95 -0
- package/src/score/peer-score-params.ts +316 -0
- package/src/score/peer-score-thresholds.ts +70 -0
- package/src/score/peer-score.ts +565 -0
- package/src/score/peer-stats.ts +33 -0
- package/src/score/scoreMetrics.ts +215 -0
- package/src/stream.ts +79 -0
- package/src/tracer.ts +177 -0
- package/src/types.ts +178 -0
- package/src/utils/buildRawMessage.ts +174 -0
- package/src/utils/create-gossip-rpc.ts +34 -0
- package/src/utils/index.ts +3 -0
- package/src/utils/messageIdToString.ts +8 -0
- package/src/utils/msgIdFn.ts +24 -0
- package/src/utils/multiaddr.ts +19 -0
- package/src/utils/publishConfig.ts +33 -0
- package/src/utils/set.ts +43 -0
- package/src/utils/shuffle.ts +21 -0
- package/src/utils/time-cache.ts +71 -0
|
@@ -0,0 +1,2520 @@
|
|
|
1
|
+
import { TypedEventEmitter, serviceCapabilities, serviceDependencies } from '@libp2p/interface';
|
|
2
|
+
import { peerIdFromMultihash, peerIdFromString } from '@libp2p/peer-id';
|
|
3
|
+
import { encode } from 'it-length-prefixed';
|
|
4
|
+
import { pipe } from 'it-pipe';
|
|
5
|
+
import { pushable } from 'it-pushable';
|
|
6
|
+
import * as Digest from 'multiformats/hashes/digest';
|
|
7
|
+
import * as constants from './constants.js';
|
|
8
|
+
import { ACCEPT_FROM_WHITELIST_DURATION_MS, ACCEPT_FROM_WHITELIST_MAX_MESSAGES, ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE, BACKOFF_SLACK } from './constants.js';
|
|
9
|
+
import { StrictNoSign, StrictSign, TopicValidatorResult } from "./index.js";
|
|
10
|
+
import { defaultDecodeRpcLimits } from './message/decodeRpc.js';
|
|
11
|
+
import { RPC } from './message/rpc.js';
|
|
12
|
+
import { MessageCache } from './message-cache.js';
|
|
13
|
+
import { ChurnReason, getMetrics, IHaveIgnoreReason, InclusionReason, ScorePenalty } from './metrics.js';
|
|
14
|
+
import { PeerScore, createPeerScoreParams, createPeerScoreThresholds } from './score/index.js';
|
|
15
|
+
import { computeAllPeersScoreWeights } from './score/scoreMetrics.js';
|
|
16
|
+
import { InboundStream, OutboundStream } from './stream.js';
|
|
17
|
+
import { IWantTracer } from './tracer.js';
|
|
18
|
+
import { ValidateError, MessageStatus, RejectReason, rejectReasonFromAcceptance } from './types.js';
|
|
19
|
+
import { buildRawMessage, validateToRawMessage } from './utils/buildRawMessage.js';
|
|
20
|
+
import { createGossipRpc, ensureControl } from './utils/create-gossip-rpc.js';
|
|
21
|
+
import { shuffle, messageIdToString } from './utils/index.js';
|
|
22
|
+
import { msgIdFnStrictNoSign, msgIdFnStrictSign } from './utils/msgIdFn.js';
|
|
23
|
+
import { multiaddrToIPStr } from './utils/multiaddr.js';
|
|
24
|
+
import { getPublishConfigFromPeerId } from './utils/publishConfig.js';
|
|
25
|
+
import { removeFirstNItemsFromSet, removeItemsFromSet } from './utils/set.js';
|
|
26
|
+
import { SimpleTimeCache } from './utils/time-cache.js';
|
|
27
|
+
var GossipStatusCode;
|
|
28
|
+
(function (GossipStatusCode) {
|
|
29
|
+
GossipStatusCode[GossipStatusCode["started"] = 0] = "started";
|
|
30
|
+
GossipStatusCode[GossipStatusCode["stopped"] = 1] = "stopped";
|
|
31
|
+
})(GossipStatusCode || (GossipStatusCode = {}));
|
|
32
|
+
export class GossipSub extends TypedEventEmitter {
|
|
33
|
+
/**
|
|
34
|
+
* The signature policy to follow by default
|
|
35
|
+
*/
|
|
36
|
+
globalSignaturePolicy;
|
|
37
|
+
protocols = [constants.GossipsubIDv12, constants.GossipsubIDv11, constants.GossipsubIDv10];
|
|
38
|
+
publishConfig;
|
|
39
|
+
dataTransform;
|
|
40
|
+
// State
|
|
41
|
+
peers = new Map();
|
|
42
|
+
streamsInbound = new Map();
|
|
43
|
+
streamsOutbound = new Map();
|
|
44
|
+
/** Ensures outbound streams are created sequentially */
|
|
45
|
+
outboundInflightQueue = pushable({ objectMode: true });
|
|
46
|
+
/** Direct peers */
|
|
47
|
+
direct = new Set();
|
|
48
|
+
/** Floodsub peers */
|
|
49
|
+
floodsubPeers = new Set();
|
|
50
|
+
/** Cache of seen messages */
|
|
51
|
+
seenCache;
|
|
52
|
+
/**
|
|
53
|
+
* Map of peer id and AcceptRequestWhileListEntry
|
|
54
|
+
*/
|
|
55
|
+
acceptFromWhitelist = new Map();
|
|
56
|
+
/**
|
|
57
|
+
* Map of topics to which peers are subscribed to
|
|
58
|
+
*/
|
|
59
|
+
topics = new Map();
|
|
60
|
+
/**
|
|
61
|
+
* List of our subscriptions
|
|
62
|
+
*/
|
|
63
|
+
subscriptions = new Set();
|
|
64
|
+
/**
|
|
65
|
+
* Map of topic meshes
|
|
66
|
+
* topic => peer id set
|
|
67
|
+
*/
|
|
68
|
+
mesh = new Map();
|
|
69
|
+
/**
|
|
70
|
+
* Map of topics to set of peers. These mesh peers are the ones to which we are publishing without a topic membership
|
|
71
|
+
* topic => peer id set
|
|
72
|
+
*/
|
|
73
|
+
fanout = new Map();
|
|
74
|
+
/**
|
|
75
|
+
* Map of last publish time for fanout topics
|
|
76
|
+
* topic => last publish time
|
|
77
|
+
*/
|
|
78
|
+
fanoutLastpub = new Map();
|
|
79
|
+
/**
|
|
80
|
+
* Map of pending messages to gossip
|
|
81
|
+
* peer id => control messages
|
|
82
|
+
*/
|
|
83
|
+
gossip = new Map();
|
|
84
|
+
/**
|
|
85
|
+
* Map of control messages
|
|
86
|
+
* peer id => control message
|
|
87
|
+
*/
|
|
88
|
+
control = new Map();
|
|
89
|
+
/**
|
|
90
|
+
* Number of IHAVEs received from peer in the last heartbeat
|
|
91
|
+
*/
|
|
92
|
+
peerhave = new Map();
|
|
93
|
+
/** Number of messages we have asked from peer in the last heartbeat */
|
|
94
|
+
iasked = new Map();
|
|
95
|
+
/** Prune backoff map */
|
|
96
|
+
backoff = new Map();
|
|
97
|
+
/**
|
|
98
|
+
* Connection direction cache, marks peers with outbound connections
|
|
99
|
+
* peer id => direction
|
|
100
|
+
*/
|
|
101
|
+
outbound = new Map();
|
|
102
|
+
msgIdFn;
|
|
103
|
+
/**
|
|
104
|
+
* A fast message id function used for internal message de-duplication
|
|
105
|
+
*/
|
|
106
|
+
fastMsgIdFn;
|
|
107
|
+
msgIdToStrFn;
|
|
108
|
+
/** Maps fast message-id to canonical message-id */
|
|
109
|
+
fastMsgIdCache;
|
|
110
|
+
/**
|
|
111
|
+
* Short term cache for published message ids. This is used for penalizing peers sending
|
|
112
|
+
* our own messages back if the messages are anonymous or use a random author.
|
|
113
|
+
*/
|
|
114
|
+
publishedMessageIds;
|
|
115
|
+
/**
|
|
116
|
+
* A message cache that contains the messages for last few heartbeat ticks
|
|
117
|
+
*/
|
|
118
|
+
mcache;
|
|
119
|
+
/** Peer score tracking */
|
|
120
|
+
score;
|
|
121
|
+
/**
|
|
122
|
+
* Custom validator function per topic.
|
|
123
|
+
* Must return or resolve quickly (< 100ms) to prevent causing penalties for late messages.
|
|
124
|
+
* If you need to apply validation that may require longer times use `asyncValidation` option and callback the
|
|
125
|
+
* validation result through `Gossipsub.reportValidationResult`
|
|
126
|
+
*/
|
|
127
|
+
topicValidators = new Map();
|
|
128
|
+
/**
|
|
129
|
+
* Make this protected so child class may want to redirect to its own log.
|
|
130
|
+
*/
|
|
131
|
+
log;
|
|
132
|
+
/**
|
|
133
|
+
* Number of heartbeats since the beginning of time
|
|
134
|
+
* This allows us to amortize some resource cleanup -- eg: backoff cleanup
|
|
135
|
+
*/
|
|
136
|
+
heartbeatTicks = 0;
|
|
137
|
+
/**
|
|
138
|
+
* Tracks IHAVE/IWANT promises broken by peers
|
|
139
|
+
*/
|
|
140
|
+
gossipTracer;
|
|
141
|
+
/**
|
|
142
|
+
* Tracks IDONTWANT messages received by peers in the current heartbeat
|
|
143
|
+
*/
|
|
144
|
+
idontwantCounts = new Map();
|
|
145
|
+
/**
|
|
146
|
+
* Tracks IDONTWANT messages received by peers and the heartbeat they were received in
|
|
147
|
+
*
|
|
148
|
+
* idontwants are stored for `mcacheLength` heartbeats before being pruned,
|
|
149
|
+
* so this map is bounded by peerCount * idontwantMaxMessages * mcacheLength
|
|
150
|
+
*/
|
|
151
|
+
idontwants = new Map();
|
|
152
|
+
components;
|
|
153
|
+
directPeerInitial = null;
|
|
154
|
+
static multicodec = constants.GossipsubIDv12;
|
|
155
|
+
// Options
|
|
156
|
+
opts;
|
|
157
|
+
decodeRpcLimits;
|
|
158
|
+
metrics;
|
|
159
|
+
status = { code: GossipStatusCode.stopped };
|
|
160
|
+
maxInboundStreams;
|
|
161
|
+
maxOutboundStreams;
|
|
162
|
+
runOnLimitedConnection;
|
|
163
|
+
allowedTopics;
|
|
164
|
+
heartbeatTimer = null;
|
|
165
|
+
constructor(components, options = {}) {
|
|
166
|
+
super();
|
|
167
|
+
const opts = {
|
|
168
|
+
fallbackToFloodsub: true,
|
|
169
|
+
floodPublish: true,
|
|
170
|
+
batchPublish: false,
|
|
171
|
+
tagMeshPeers: true,
|
|
172
|
+
doPX: false,
|
|
173
|
+
directPeers: [],
|
|
174
|
+
D: constants.GossipsubD,
|
|
175
|
+
Dlo: constants.GossipsubDlo,
|
|
176
|
+
Dhi: constants.GossipsubDhi,
|
|
177
|
+
Dscore: constants.GossipsubDscore,
|
|
178
|
+
Dout: constants.GossipsubDout,
|
|
179
|
+
Dlazy: constants.GossipsubDlazy,
|
|
180
|
+
heartbeatInterval: constants.GossipsubHeartbeatInterval,
|
|
181
|
+
fanoutTTL: constants.GossipsubFanoutTTL,
|
|
182
|
+
mcacheLength: constants.GossipsubHistoryLength,
|
|
183
|
+
mcacheGossip: constants.GossipsubHistoryGossip,
|
|
184
|
+
seenTTL: constants.GossipsubSeenTTL,
|
|
185
|
+
gossipsubIWantFollowupMs: constants.GossipsubIWantFollowupTime,
|
|
186
|
+
prunePeers: constants.GossipsubPrunePeers,
|
|
187
|
+
pruneBackoff: constants.GossipsubPruneBackoff,
|
|
188
|
+
unsubcribeBackoff: constants.GossipsubUnsubscribeBackoff,
|
|
189
|
+
graftFloodThreshold: constants.GossipsubGraftFloodThreshold,
|
|
190
|
+
opportunisticGraftPeers: constants.GossipsubOpportunisticGraftPeers,
|
|
191
|
+
opportunisticGraftTicks: constants.GossipsubOpportunisticGraftTicks,
|
|
192
|
+
directConnectTicks: constants.GossipsubDirectConnectTicks,
|
|
193
|
+
gossipFactor: constants.GossipsubGossipFactor,
|
|
194
|
+
idontwantMinDataSize: constants.GossipsubIdontwantMinDataSize,
|
|
195
|
+
idontwantMaxMessages: constants.GossipsubIdontwantMaxMessages,
|
|
196
|
+
...options,
|
|
197
|
+
scoreParams: createPeerScoreParams(options.scoreParams),
|
|
198
|
+
scoreThresholds: createPeerScoreThresholds(options.scoreThresholds)
|
|
199
|
+
};
|
|
200
|
+
this.components = components;
|
|
201
|
+
this.decodeRpcLimits = opts.decodeRpcLimits ?? defaultDecodeRpcLimits;
|
|
202
|
+
this.globalSignaturePolicy = opts.globalSignaturePolicy ?? StrictSign;
|
|
203
|
+
// Also wants to get notified of peers connected using floodsub
|
|
204
|
+
if (opts.fallbackToFloodsub) {
|
|
205
|
+
this.protocols.push(constants.FloodsubID);
|
|
206
|
+
}
|
|
207
|
+
// From pubsub
|
|
208
|
+
this.log = components.logger.forComponent(opts.debugName ?? 'libp2p:gossipsub');
|
|
209
|
+
// Gossipsub
|
|
210
|
+
this.opts = opts;
|
|
211
|
+
this.direct = new Set(opts.directPeers.map((p) => p.id.toString()));
|
|
212
|
+
this.seenCache = new SimpleTimeCache({ validityMs: opts.seenTTL });
|
|
213
|
+
this.publishedMessageIds = new SimpleTimeCache({ validityMs: opts.seenTTL });
|
|
214
|
+
if (options.msgIdFn != null) {
|
|
215
|
+
// Use custom function
|
|
216
|
+
this.msgIdFn = options.msgIdFn;
|
|
217
|
+
}
|
|
218
|
+
else {
|
|
219
|
+
switch (this.globalSignaturePolicy) {
|
|
220
|
+
case StrictSign:
|
|
221
|
+
this.msgIdFn = msgIdFnStrictSign;
|
|
222
|
+
break;
|
|
223
|
+
case StrictNoSign:
|
|
224
|
+
this.msgIdFn = msgIdFnStrictNoSign;
|
|
225
|
+
break;
|
|
226
|
+
default:
|
|
227
|
+
throw new Error(`Invalid globalSignaturePolicy: ${this.globalSignaturePolicy}`);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
if (options.fastMsgIdFn != null) {
|
|
231
|
+
this.fastMsgIdFn = options.fastMsgIdFn;
|
|
232
|
+
this.fastMsgIdCache = new SimpleTimeCache({ validityMs: opts.seenTTL });
|
|
233
|
+
}
|
|
234
|
+
// By default, gossipsub only provide a browser friendly function to convert Uint8Array message id to string.
|
|
235
|
+
this.msgIdToStrFn = options.msgIdToStrFn ?? messageIdToString;
|
|
236
|
+
this.mcache = options.messageCache ?? new MessageCache(opts.mcacheGossip, opts.mcacheLength, this.msgIdToStrFn);
|
|
237
|
+
if (options.dataTransform != null) {
|
|
238
|
+
this.dataTransform = options.dataTransform;
|
|
239
|
+
}
|
|
240
|
+
if (options.metricsRegister != null) {
|
|
241
|
+
if (options.metricsTopicStrToLabel == null) {
|
|
242
|
+
throw Error('Must set metricsTopicStrToLabel with metrics');
|
|
243
|
+
}
|
|
244
|
+
// in theory, each topic has its own meshMessageDeliveriesWindow param
|
|
245
|
+
// however in lodestar, we configure it mostly the same so just pick the max of positive ones
|
|
246
|
+
// (some topics have meshMessageDeliveriesWindow as 0)
|
|
247
|
+
const maxMeshMessageDeliveriesWindowMs = Math.max(...Object.values(opts.scoreParams.topics).map((topicParam) => topicParam.meshMessageDeliveriesWindow), constants.DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS);
|
|
248
|
+
const metrics = getMetrics(options.metricsRegister, options.metricsTopicStrToLabel, {
|
|
249
|
+
gossipPromiseExpireSec: this.opts.gossipsubIWantFollowupMs / 1000,
|
|
250
|
+
behaviourPenaltyThreshold: opts.scoreParams.behaviourPenaltyThreshold,
|
|
251
|
+
maxMeshMessageDeliveriesWindowSec: maxMeshMessageDeliveriesWindowMs / 1000
|
|
252
|
+
});
|
|
253
|
+
metrics.mcacheSize.addCollect(() => { this.onScrapeMetrics(metrics); });
|
|
254
|
+
for (const protocol of this.protocols) {
|
|
255
|
+
metrics.protocolsEnabled.set({ protocol }, 1);
|
|
256
|
+
}
|
|
257
|
+
this.metrics = metrics;
|
|
258
|
+
}
|
|
259
|
+
else {
|
|
260
|
+
this.metrics = null;
|
|
261
|
+
}
|
|
262
|
+
this.gossipTracer = new IWantTracer(this.opts.gossipsubIWantFollowupMs, this.msgIdToStrFn, this.metrics);
|
|
263
|
+
/**
|
|
264
|
+
* libp2p
|
|
265
|
+
*/
|
|
266
|
+
this.score = new PeerScore(this.opts.scoreParams, this.metrics, this.components.logger, {
|
|
267
|
+
scoreCacheValidityMs: opts.heartbeatInterval
|
|
268
|
+
});
|
|
269
|
+
this.maxInboundStreams = options.maxInboundStreams;
|
|
270
|
+
this.maxOutboundStreams = options.maxOutboundStreams;
|
|
271
|
+
this.runOnLimitedConnection = options.runOnLimitedConnection;
|
|
272
|
+
this.allowedTopics = (opts.allowedTopics != null) ? new Set(opts.allowedTopics) : null;
|
|
273
|
+
}
|
|
274
|
+
[Symbol.toStringTag] = '@chainsafe/libp2p-gossipsub';
|
|
275
|
+
[serviceCapabilities] = [
|
|
276
|
+
'@libp2p/pubsub'
|
|
277
|
+
];
|
|
278
|
+
[serviceDependencies] = [
|
|
279
|
+
'@libp2p/identify'
|
|
280
|
+
];
|
|
281
|
+
getPeers() {
|
|
282
|
+
return [...this.peers.values()];
|
|
283
|
+
}
|
|
284
|
+
isStarted() {
|
|
285
|
+
return this.status.code === GossipStatusCode.started;
|
|
286
|
+
}
|
|
287
|
+
// LIFECYCLE METHODS
|
|
288
|
+
/**
|
|
289
|
+
* Mounts the gossipsub protocol onto the libp2p node and sends our
|
|
290
|
+
* our subscriptions to every peer connected
|
|
291
|
+
*/
|
|
292
|
+
async start() {
|
|
293
|
+
// From pubsub
|
|
294
|
+
if (this.isStarted()) {
|
|
295
|
+
return;
|
|
296
|
+
}
|
|
297
|
+
this.log('starting');
|
|
298
|
+
this.publishConfig = getPublishConfigFromPeerId(this.globalSignaturePolicy, this.components.peerId, this.components.privateKey);
|
|
299
|
+
// Create the outbound inflight queue
|
|
300
|
+
// This ensures that outbound stream creation happens sequentially
|
|
301
|
+
this.outboundInflightQueue = pushable({ objectMode: true });
|
|
302
|
+
pipe(this.outboundInflightQueue, async (source) => {
|
|
303
|
+
for await (const { peerId, connection } of source) {
|
|
304
|
+
await this.createOutboundStream(peerId, connection);
|
|
305
|
+
}
|
|
306
|
+
}).catch((e) => { this.log.error('outbound inflight queue error', e); });
|
|
307
|
+
// set direct peer addresses in the address book
|
|
308
|
+
await Promise.all(this.opts.directPeers.map(async (p) => {
|
|
309
|
+
await this.components.peerStore.merge(p.id, {
|
|
310
|
+
multiaddrs: p.addrs
|
|
311
|
+
});
|
|
312
|
+
}));
|
|
313
|
+
const registrar = this.components.registrar;
|
|
314
|
+
// Incoming streams
|
|
315
|
+
// Called after a peer dials us
|
|
316
|
+
await Promise.all(this.protocols.map(async (protocol) => registrar.handle(protocol, this.onIncomingStream.bind(this), {
|
|
317
|
+
maxInboundStreams: this.maxInboundStreams,
|
|
318
|
+
maxOutboundStreams: this.maxOutboundStreams,
|
|
319
|
+
runOnLimitedConnection: this.runOnLimitedConnection
|
|
320
|
+
})));
|
|
321
|
+
// # How does Gossipsub interact with libp2p? Rough guide from Mar 2022
|
|
322
|
+
//
|
|
323
|
+
// ## Setup:
|
|
324
|
+
// Gossipsub requests libp2p to callback, TBD
|
|
325
|
+
//
|
|
326
|
+
// `this.libp2p.handle()` registers a handler for `/meshsub/1.1.0` and other Gossipsub protocols
|
|
327
|
+
// The handler callback is registered in libp2p Upgrader.protocols map.
|
|
328
|
+
//
|
|
329
|
+
// Upgrader receives an inbound connection from some transport and (`Upgrader.upgradeInbound`):
|
|
330
|
+
// - Adds encryption (NOISE in our case)
|
|
331
|
+
// - Multiplex stream
|
|
332
|
+
// - Create a muxer and register that for each new stream call Upgrader.protocols handler
|
|
333
|
+
//
|
|
334
|
+
// ## Topology
|
|
335
|
+
// - new instance of Topology (unlinked to libp2p) with handlers
|
|
336
|
+
// - registar.register(topology)
|
|
337
|
+
// register protocol with topology
|
|
338
|
+
// Topology callbacks called on connection manager changes
|
|
339
|
+
const topology = {
|
|
340
|
+
onConnect: this.onPeerConnected.bind(this),
|
|
341
|
+
onDisconnect: this.onPeerDisconnected.bind(this),
|
|
342
|
+
notifyOnLimitedConnection: this.runOnLimitedConnection
|
|
343
|
+
};
|
|
344
|
+
const registrarTopologyIds = await Promise.all(this.protocols.map(async (protocol) => registrar.register(protocol, topology)));
|
|
345
|
+
// Schedule to start heartbeat after `GossipsubHeartbeatInitialDelay`
|
|
346
|
+
const heartbeatTimeout = setTimeout(this.runHeartbeat, constants.GossipsubHeartbeatInitialDelay);
|
|
347
|
+
// Then, run heartbeat every `heartbeatInterval` offset by `GossipsubHeartbeatInitialDelay`
|
|
348
|
+
this.status = {
|
|
349
|
+
code: GossipStatusCode.started,
|
|
350
|
+
registrarTopologyIds,
|
|
351
|
+
heartbeatTimeout,
|
|
352
|
+
hearbeatStartMs: Date.now() + constants.GossipsubHeartbeatInitialDelay
|
|
353
|
+
};
|
|
354
|
+
this.score.start();
|
|
355
|
+
// connect to direct peers
|
|
356
|
+
this.directPeerInitial = setTimeout(() => {
|
|
357
|
+
Promise.resolve()
|
|
358
|
+
.then(async () => {
|
|
359
|
+
await Promise.all(Array.from(this.direct).map(async (id) => this.connect(id)));
|
|
360
|
+
})
|
|
361
|
+
.catch((err) => {
|
|
362
|
+
this.log(err);
|
|
363
|
+
});
|
|
364
|
+
}, constants.GossipsubDirectConnectInitialDelay);
|
|
365
|
+
if (this.opts.tagMeshPeers) {
|
|
366
|
+
this.addEventListener('gossipsub:graft', this.tagMeshPeer);
|
|
367
|
+
this.addEventListener('gossipsub:prune', this.untagMeshPeer);
|
|
368
|
+
}
|
|
369
|
+
this.log('started');
|
|
370
|
+
}
|
|
371
|
+
/**
|
|
372
|
+
* Unmounts the gossipsub protocol and shuts down every connection
|
|
373
|
+
*/
|
|
374
|
+
async stop() {
|
|
375
|
+
this.log('stopping');
|
|
376
|
+
// From pubsub
|
|
377
|
+
if (this.status.code !== GossipStatusCode.started) {
|
|
378
|
+
return;
|
|
379
|
+
}
|
|
380
|
+
const { registrarTopologyIds } = this.status;
|
|
381
|
+
this.status = { code: GossipStatusCode.stopped };
|
|
382
|
+
if (this.opts.tagMeshPeers) {
|
|
383
|
+
this.removeEventListener('gossipsub:graft', this.tagMeshPeer);
|
|
384
|
+
this.removeEventListener('gossipsub:prune', this.untagMeshPeer);
|
|
385
|
+
}
|
|
386
|
+
// unregister protocol and handlers
|
|
387
|
+
const registrar = this.components.registrar;
|
|
388
|
+
await Promise.all(this.protocols.map(async (protocol) => registrar.unhandle(protocol)));
|
|
389
|
+
registrarTopologyIds.forEach((id) => { registrar.unregister(id); });
|
|
390
|
+
this.outboundInflightQueue.end();
|
|
391
|
+
const closePromises = [];
|
|
392
|
+
for (const outboundStream of this.streamsOutbound.values()) {
|
|
393
|
+
closePromises.push(outboundStream.close());
|
|
394
|
+
}
|
|
395
|
+
this.streamsOutbound.clear();
|
|
396
|
+
for (const inboundStream of this.streamsInbound.values()) {
|
|
397
|
+
closePromises.push(inboundStream.close());
|
|
398
|
+
}
|
|
399
|
+
this.streamsInbound.clear();
|
|
400
|
+
await Promise.all(closePromises);
|
|
401
|
+
this.peers.clear();
|
|
402
|
+
this.subscriptions.clear();
|
|
403
|
+
// Gossipsub
|
|
404
|
+
if (this.heartbeatTimer != null) {
|
|
405
|
+
this.heartbeatTimer.cancel();
|
|
406
|
+
this.heartbeatTimer = null;
|
|
407
|
+
}
|
|
408
|
+
this.score.stop();
|
|
409
|
+
this.mesh.clear();
|
|
410
|
+
this.fanout.clear();
|
|
411
|
+
this.fanoutLastpub.clear();
|
|
412
|
+
this.gossip.clear();
|
|
413
|
+
this.control.clear();
|
|
414
|
+
this.peerhave.clear();
|
|
415
|
+
this.iasked.clear();
|
|
416
|
+
this.backoff.clear();
|
|
417
|
+
this.outbound.clear();
|
|
418
|
+
this.gossipTracer.clear();
|
|
419
|
+
this.seenCache.clear();
|
|
420
|
+
if (this.fastMsgIdCache != null) {
|
|
421
|
+
this.fastMsgIdCache.clear();
|
|
422
|
+
}
|
|
423
|
+
if (this.directPeerInitial != null) {
|
|
424
|
+
clearTimeout(this.directPeerInitial);
|
|
425
|
+
}
|
|
426
|
+
this.idontwantCounts.clear();
|
|
427
|
+
this.idontwants.clear();
|
|
428
|
+
this.log('stopped');
|
|
429
|
+
}
|
|
430
|
+
/** FOR DEBUG ONLY - Dump peer stats for all peers. Data is cloned, safe to mutate */
|
|
431
|
+
dumpPeerScoreStats() {
|
|
432
|
+
return this.score.dumpPeerScoreStats();
|
|
433
|
+
}
|
|
434
|
+
/**
|
|
435
|
+
* On an inbound stream opened
|
|
436
|
+
*/
|
|
437
|
+
onIncomingStream(stream, connection) {
|
|
438
|
+
if (!this.isStarted()) {
|
|
439
|
+
return;
|
|
440
|
+
}
|
|
441
|
+
const peerId = connection.remotePeer;
|
|
442
|
+
// add peer to router
|
|
443
|
+
this.addPeer(peerId, connection.direction, connection.remoteAddr);
|
|
444
|
+
// create inbound stream
|
|
445
|
+
this.createInboundStream(peerId, stream);
|
|
446
|
+
// attempt to create outbound stream
|
|
447
|
+
this.outboundInflightQueue.push({ peerId, connection });
|
|
448
|
+
}
|
|
449
|
+
/**
|
|
450
|
+
* Registrar notifies an established connection with pubsub protocol
|
|
451
|
+
*/
|
|
452
|
+
onPeerConnected(peerId, connection) {
|
|
453
|
+
this.metrics?.newConnectionCount.inc({ status: connection.status });
|
|
454
|
+
// libp2p may emit a closed connection and never issue peer:disconnect event
|
|
455
|
+
// see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/398
|
|
456
|
+
if (!this.isStarted() || connection.status !== 'open') {
|
|
457
|
+
return;
|
|
458
|
+
}
|
|
459
|
+
this.addPeer(peerId, connection.direction, connection.remoteAddr);
|
|
460
|
+
this.outboundInflightQueue.push({ peerId, connection });
|
|
461
|
+
}
|
|
462
|
+
/**
|
|
463
|
+
* Registrar notifies a closing connection with pubsub protocol
|
|
464
|
+
*/
|
|
465
|
+
onPeerDisconnected(peerId) {
|
|
466
|
+
this.log('connection ended %p', peerId);
|
|
467
|
+
this.removePeer(peerId);
|
|
468
|
+
}
|
|
469
|
+
async createOutboundStream(peerId, connection) {
|
|
470
|
+
if (!this.isStarted()) {
|
|
471
|
+
return;
|
|
472
|
+
}
|
|
473
|
+
const id = peerId.toString();
|
|
474
|
+
if (!this.peers.has(id)) {
|
|
475
|
+
return;
|
|
476
|
+
}
|
|
477
|
+
// TODO make this behavior more robust
|
|
478
|
+
// This behavior is different than for inbound streams
|
|
479
|
+
// If an outbound stream already exists, don't create a new stream
|
|
480
|
+
if (this.streamsOutbound.has(id)) {
|
|
481
|
+
return;
|
|
482
|
+
}
|
|
483
|
+
try {
|
|
484
|
+
const stream = new OutboundStream(await connection.newStream(this.protocols, {
|
|
485
|
+
runOnLimitedConnection: this.runOnLimitedConnection
|
|
486
|
+
}), (e) => { this.log.error('outbound pipe error', e); }, { maxBufferSize: this.opts.maxOutboundBufferSize });
|
|
487
|
+
this.log('create outbound stream %p', peerId);
|
|
488
|
+
this.streamsOutbound.set(id, stream);
|
|
489
|
+
const protocol = stream.protocol;
|
|
490
|
+
if (protocol === constants.FloodsubID) {
|
|
491
|
+
this.floodsubPeers.add(id);
|
|
492
|
+
}
|
|
493
|
+
this.metrics?.peersPerProtocol.inc({ protocol }, 1);
|
|
494
|
+
// Immediately send own subscriptions via the newly attached stream
|
|
495
|
+
if (this.subscriptions.size > 0) {
|
|
496
|
+
this.log('send subscriptions to', id);
|
|
497
|
+
this.sendSubscriptions(id, Array.from(this.subscriptions), true);
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
catch (e) {
|
|
501
|
+
this.log.error('createOutboundStream error', e);
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
createInboundStream(peerId, stream) {
|
|
505
|
+
if (!this.isStarted()) {
|
|
506
|
+
return;
|
|
507
|
+
}
|
|
508
|
+
const id = peerId.toString();
|
|
509
|
+
if (!this.peers.has(id)) {
|
|
510
|
+
return;
|
|
511
|
+
}
|
|
512
|
+
// TODO make this behavior more robust
|
|
513
|
+
// This behavior is different than for outbound streams
|
|
514
|
+
// If a peer initiates a new inbound connection
|
|
515
|
+
// we assume that one is the new canonical inbound stream
|
|
516
|
+
const priorInboundStream = this.streamsInbound.get(id);
|
|
517
|
+
if (priorInboundStream !== undefined) {
|
|
518
|
+
this.log('replacing existing inbound steam %s', id);
|
|
519
|
+
priorInboundStream.close().catch((err) => { this.log.error(err); });
|
|
520
|
+
}
|
|
521
|
+
this.log('create inbound stream %s', id);
|
|
522
|
+
const inboundStream = new InboundStream(stream, { maxDataLength: this.opts.maxInboundDataLength });
|
|
523
|
+
this.streamsInbound.set(id, inboundStream);
|
|
524
|
+
this.pipePeerReadStream(peerId, inboundStream.source).catch((err) => { this.log(err); });
|
|
525
|
+
}
|
|
526
|
+
/**
|
|
527
|
+
* Add a peer to the router
|
|
528
|
+
*/
|
|
529
|
+
addPeer(peerId, direction, addr) {
|
|
530
|
+
const id = peerId.toString();
|
|
531
|
+
if (!this.peers.has(id)) {
|
|
532
|
+
this.peers.set(id, peerId);
|
|
533
|
+
// Add to peer scoring
|
|
534
|
+
this.score.addPeer(id);
|
|
535
|
+
const currentIP = multiaddrToIPStr(addr);
|
|
536
|
+
if (currentIP !== null) {
|
|
537
|
+
this.score.addIP(id, currentIP);
|
|
538
|
+
}
|
|
539
|
+
else {
|
|
540
|
+
this.log('Added peer has no IP in current address %s %s', id, addr.toString());
|
|
541
|
+
}
|
|
542
|
+
// track the connection direction. Don't allow to unset outbound
|
|
543
|
+
if (!this.outbound.has(id)) {
|
|
544
|
+
this.outbound.set(id, direction === 'outbound');
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
/**
|
|
549
|
+
* Removes a peer from the router
|
|
550
|
+
*/
|
|
551
|
+
removePeer(peerId) {
|
|
552
|
+
const id = peerId.toString();
|
|
553
|
+
if (!this.peers.has(id)) {
|
|
554
|
+
return;
|
|
555
|
+
}
|
|
556
|
+
// delete peer
|
|
557
|
+
this.log('delete peer %p', peerId);
|
|
558
|
+
this.peers.delete(id);
|
|
559
|
+
const outboundStream = this.streamsOutbound.get(id);
|
|
560
|
+
const inboundStream = this.streamsInbound.get(id);
|
|
561
|
+
if (outboundStream != null) {
|
|
562
|
+
this.metrics?.peersPerProtocol.inc({ protocol: outboundStream.protocol }, -1);
|
|
563
|
+
}
|
|
564
|
+
// close streams
|
|
565
|
+
outboundStream?.close().catch((err) => { this.log.error(err); });
|
|
566
|
+
inboundStream?.close().catch((err) => { this.log.error(err); });
|
|
567
|
+
// remove streams
|
|
568
|
+
this.streamsOutbound.delete(id);
|
|
569
|
+
this.streamsInbound.delete(id);
|
|
570
|
+
// remove peer from topics map
|
|
571
|
+
for (const peers of this.topics.values()) {
|
|
572
|
+
peers.delete(id);
|
|
573
|
+
}
|
|
574
|
+
// Remove this peer from the mesh
|
|
575
|
+
for (const [topicStr, peers] of this.mesh) {
|
|
576
|
+
if (peers.delete(id)) {
|
|
577
|
+
this.metrics?.onRemoveFromMesh(topicStr, ChurnReason.Dc, 1);
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
// Remove this peer from the fanout
|
|
581
|
+
for (const peers of this.fanout.values()) {
|
|
582
|
+
peers.delete(id);
|
|
583
|
+
}
|
|
584
|
+
// Remove from floodsubPeers
|
|
585
|
+
this.floodsubPeers.delete(id);
|
|
586
|
+
// Remove from gossip mapping
|
|
587
|
+
this.gossip.delete(id);
|
|
588
|
+
// Remove from control mapping
|
|
589
|
+
this.control.delete(id);
|
|
590
|
+
// Remove from backoff mapping
|
|
591
|
+
this.outbound.delete(id);
|
|
592
|
+
// Remove from idontwant tracking
|
|
593
|
+
this.idontwantCounts.delete(id);
|
|
594
|
+
this.idontwants.delete(id);
|
|
595
|
+
// Remove from peer scoring
|
|
596
|
+
this.score.removePeer(id);
|
|
597
|
+
this.acceptFromWhitelist.delete(id);
|
|
598
|
+
}
|
|
599
|
+
// API METHODS
|
|
600
|
+
get started() {
|
|
601
|
+
return this.status.code === GossipStatusCode.started;
|
|
602
|
+
}
|
|
603
|
+
/**
|
|
604
|
+
* Get a the peer-ids in a topic mesh
|
|
605
|
+
*/
|
|
606
|
+
getMeshPeers(topic) {
|
|
607
|
+
const peersInTopic = this.mesh.get(topic);
|
|
608
|
+
return (peersInTopic != null) ? Array.from(peersInTopic) : [];
|
|
609
|
+
}
|
|
610
|
+
/**
|
|
611
|
+
* Get a list of the peer-ids that are subscribed to one topic.
|
|
612
|
+
*/
|
|
613
|
+
getSubscribers(topic) {
|
|
614
|
+
const peersInTopic = this.topics.get(topic);
|
|
615
|
+
return ((peersInTopic != null) ? Array.from(peersInTopic) : []).map((str) => this.peers.get(str) ?? peerIdFromString(str));
|
|
616
|
+
}
|
|
617
|
+
/**
|
|
618
|
+
* Get the list of topics which the peer is subscribed to.
|
|
619
|
+
*/
|
|
620
|
+
getTopics() {
|
|
621
|
+
return Array.from(this.subscriptions);
|
|
622
|
+
}
|
|
623
|
+
// TODO: Reviewing Pubsub API
|
|
624
|
+
// MESSAGE METHODS
|
|
625
|
+
/**
|
|
626
|
+
* Responsible for processing each RPC message received by other peers.
|
|
627
|
+
*/
|
|
628
|
+
async pipePeerReadStream(peerId, stream) {
|
|
629
|
+
try {
|
|
630
|
+
await pipe(stream, async (source) => {
|
|
631
|
+
for await (const data of source) {
|
|
632
|
+
try {
|
|
633
|
+
// TODO: Check max gossip message size, before decodeRpc()
|
|
634
|
+
const rpcBytes = data.subarray();
|
|
635
|
+
// Note: This function may throw, it must be wrapped in a try {} catch {} to prevent closing the stream.
|
|
636
|
+
// TODO: What should we do if the entire RPC is invalid?
|
|
637
|
+
const rpc = RPC.decode(rpcBytes, {
|
|
638
|
+
limits: {
|
|
639
|
+
subscriptions: this.decodeRpcLimits.maxSubscriptions,
|
|
640
|
+
messages: this.decodeRpcLimits.maxMessages,
|
|
641
|
+
control$: {
|
|
642
|
+
ihave: this.decodeRpcLimits.maxIhaveMessageIDs,
|
|
643
|
+
iwant: this.decodeRpcLimits.maxIwantMessageIDs,
|
|
644
|
+
graft: this.decodeRpcLimits.maxControlMessages,
|
|
645
|
+
prune: this.decodeRpcLimits.maxControlMessages,
|
|
646
|
+
prune$: {
|
|
647
|
+
peers: this.decodeRpcLimits.maxPeerInfos
|
|
648
|
+
},
|
|
649
|
+
idontwant: this.decodeRpcLimits.maxControlMessages,
|
|
650
|
+
idontwant$: {
|
|
651
|
+
messageIDs: this.decodeRpcLimits.maxIdontwantMessageIDs
|
|
652
|
+
}
|
|
653
|
+
}
|
|
654
|
+
}
|
|
655
|
+
});
|
|
656
|
+
this.metrics?.onRpcRecv(rpc, rpcBytes.length);
|
|
657
|
+
// Since processRpc may be overridden entirely in unsafe ways,
|
|
658
|
+
// the simplest/safest option here is to wrap in a function and capture all errors
|
|
659
|
+
// to prevent a top-level unhandled exception
|
|
660
|
+
// This processing of rpc messages should happen without awaiting full validation/execution of prior messages
|
|
661
|
+
if (this.opts.awaitRpcHandler) {
|
|
662
|
+
try {
|
|
663
|
+
await this.handleReceivedRpc(peerId, rpc);
|
|
664
|
+
}
|
|
665
|
+
catch (err) {
|
|
666
|
+
this.metrics?.onRpcRecvError();
|
|
667
|
+
this.log(err);
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
else {
|
|
671
|
+
this.handleReceivedRpc(peerId, rpc).catch((err) => {
|
|
672
|
+
this.metrics?.onRpcRecvError();
|
|
673
|
+
this.log(err);
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
catch (e) {
|
|
678
|
+
this.metrics?.onRpcDataError();
|
|
679
|
+
this.log(e);
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
});
|
|
683
|
+
}
|
|
684
|
+
catch (err) {
|
|
685
|
+
this.metrics?.onPeerReadStreamError();
|
|
686
|
+
this.handlePeerReadStreamError(err, peerId);
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
/**
|
|
690
|
+
* Handle error when read stream pipe throws, less of the functional use but more
|
|
691
|
+
* to for testing purposes to spy on the error handling
|
|
692
|
+
*/
|
|
693
|
+
handlePeerReadStreamError(err, peerId) {
|
|
694
|
+
this.log.error(err);
|
|
695
|
+
this.onPeerDisconnected(peerId);
|
|
696
|
+
}
|
|
697
|
+
/**
|
|
698
|
+
* Handles an rpc request from a peer
|
|
699
|
+
*/
|
|
700
|
+
async handleReceivedRpc(from, rpc) {
|
|
701
|
+
// Check if peer is graylisted in which case we ignore the event
|
|
702
|
+
if (!this.acceptFrom(from.toString())) {
|
|
703
|
+
this.log('received message from unacceptable peer %p', from);
|
|
704
|
+
this.metrics?.rpcRecvNotAccepted.inc();
|
|
705
|
+
return;
|
|
706
|
+
}
|
|
707
|
+
const subscriptions = (rpc.subscriptions != null) ? rpc.subscriptions.length : 0;
|
|
708
|
+
const messages = (rpc.messages != null) ? rpc.messages.length : 0;
|
|
709
|
+
let ihave = 0;
|
|
710
|
+
let iwant = 0;
|
|
711
|
+
let graft = 0;
|
|
712
|
+
let prune = 0;
|
|
713
|
+
if (rpc.control != null) {
|
|
714
|
+
if (rpc.control.ihave != null) {
|
|
715
|
+
ihave = rpc.control.ihave.length;
|
|
716
|
+
}
|
|
717
|
+
if (rpc.control.iwant != null) {
|
|
718
|
+
iwant = rpc.control.iwant.length;
|
|
719
|
+
}
|
|
720
|
+
if (rpc.control.graft != null) {
|
|
721
|
+
graft = rpc.control.graft.length;
|
|
722
|
+
}
|
|
723
|
+
if (rpc.control.prune != null) {
|
|
724
|
+
prune = rpc.control.prune.length;
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
this.log(`rpc.from ${from.toString()} subscriptions ${subscriptions} messages ${messages} ihave ${ihave} iwant ${iwant} graft ${graft} prune ${prune}`);
|
|
728
|
+
// Handle received subscriptions
|
|
729
|
+
if ((rpc.subscriptions != null) && rpc.subscriptions.length > 0) {
|
|
730
|
+
// update peer subscriptions
|
|
731
|
+
const subscriptions = [];
|
|
732
|
+
rpc.subscriptions.forEach((subOpt) => {
|
|
733
|
+
const topic = subOpt.topic;
|
|
734
|
+
const subscribe = subOpt.subscribe === true;
|
|
735
|
+
if (topic != null) {
|
|
736
|
+
if ((this.allowedTopics != null) && !this.allowedTopics.has(topic)) {
|
|
737
|
+
// Not allowed: subscription data-structures are not bounded by topic count
|
|
738
|
+
// TODO: Should apply behaviour penalties?
|
|
739
|
+
return;
|
|
740
|
+
}
|
|
741
|
+
this.handleReceivedSubscription(from, topic, subscribe);
|
|
742
|
+
subscriptions.push({ topic, subscribe });
|
|
743
|
+
}
|
|
744
|
+
});
|
|
745
|
+
this.safeDispatchEvent('subscription-change', {
|
|
746
|
+
detail: { peerId: from, subscriptions }
|
|
747
|
+
});
|
|
748
|
+
}
|
|
749
|
+
// Handle messages
|
|
750
|
+
// TODO: (up to limit)
|
|
751
|
+
for (const message of rpc.messages) {
|
|
752
|
+
if ((this.allowedTopics != null) && !this.allowedTopics.has(message.topic)) {
|
|
753
|
+
// Not allowed: message cache data-structures are not bounded by topic count
|
|
754
|
+
// TODO: Should apply behaviour penalties?
|
|
755
|
+
continue;
|
|
756
|
+
}
|
|
757
|
+
const handleReceivedMessagePromise = this.handleReceivedMessage(from, message)
|
|
758
|
+
// Should never throw, but handle just in case
|
|
759
|
+
.catch((err) => {
|
|
760
|
+
this.metrics?.onMsgRecvError(message.topic);
|
|
761
|
+
this.log(err);
|
|
762
|
+
});
|
|
763
|
+
if (this.opts.awaitRpcMessageHandler) {
|
|
764
|
+
await handleReceivedMessagePromise;
|
|
765
|
+
}
|
|
766
|
+
}
|
|
767
|
+
// Handle control messages
|
|
768
|
+
if (rpc.control != null) {
|
|
769
|
+
await this.handleControlMessage(from.toString(), rpc.control);
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
/**
|
|
773
|
+
* Handles a subscription change from a peer
|
|
774
|
+
*/
|
|
775
|
+
handleReceivedSubscription(from, topic, subscribe) {
|
|
776
|
+
this.log('subscription update from %p topic %s', from, topic);
|
|
777
|
+
let topicSet = this.topics.get(topic);
|
|
778
|
+
if (topicSet == null) {
|
|
779
|
+
topicSet = new Set();
|
|
780
|
+
this.topics.set(topic, topicSet);
|
|
781
|
+
}
|
|
782
|
+
if (subscribe) {
|
|
783
|
+
// subscribe peer to new topic
|
|
784
|
+
topicSet.add(from.toString());
|
|
785
|
+
}
|
|
786
|
+
else {
|
|
787
|
+
// unsubscribe from existing topic
|
|
788
|
+
topicSet.delete(from.toString());
|
|
789
|
+
}
|
|
790
|
+
// TODO: rust-libp2p has A LOT more logic here
|
|
791
|
+
}
|
|
792
|
+
/**
|
|
793
|
+
* Handles a newly received message from an RPC.
|
|
794
|
+
* May forward to all peers in the mesh.
|
|
795
|
+
*/
|
|
796
|
+
async handleReceivedMessage(from, rpcMsg) {
|
|
797
|
+
this.metrics?.onMsgRecvPreValidation(rpcMsg.topic);
|
|
798
|
+
const validationResult = await this.validateReceivedMessage(from, rpcMsg);
|
|
799
|
+
this.metrics?.onPrevalidationResult(rpcMsg.topic, validationResult.code);
|
|
800
|
+
const validationCode = validationResult.code;
|
|
801
|
+
switch (validationCode) {
|
|
802
|
+
case MessageStatus.duplicate:
|
|
803
|
+
// Report the duplicate
|
|
804
|
+
this.score.duplicateMessage(from.toString(), validationResult.msgIdStr, rpcMsg.topic);
|
|
805
|
+
// due to the collision of fastMsgIdFn, 2 different messages may end up the same fastMsgId
|
|
806
|
+
// so we need to also mark the duplicate message as delivered or the promise is not resolved
|
|
807
|
+
// and peer gets penalized. See https://github.com/ChainSafe/js-libp2p-gossipsub/pull/385
|
|
808
|
+
this.gossipTracer.deliverMessage(validationResult.msgIdStr, true);
|
|
809
|
+
this.mcache.observeDuplicate(validationResult.msgIdStr, from.toString());
|
|
810
|
+
return;
|
|
811
|
+
case MessageStatus.invalid:
|
|
812
|
+
// invalid messages received
|
|
813
|
+
// metrics.register_invalid_message(&raw_message.topic)
|
|
814
|
+
// Tell peer_score about reject
|
|
815
|
+
// Reject the original source, and any duplicates we've seen from other peers.
|
|
816
|
+
if (validationResult.msgIdStr != null) {
|
|
817
|
+
const msgIdStr = validationResult.msgIdStr;
|
|
818
|
+
this.score.rejectMessage(from.toString(), msgIdStr, rpcMsg.topic, validationResult.reason);
|
|
819
|
+
this.gossipTracer.rejectMessage(msgIdStr, validationResult.reason);
|
|
820
|
+
}
|
|
821
|
+
else {
|
|
822
|
+
this.score.rejectInvalidMessage(from.toString(), rpcMsg.topic);
|
|
823
|
+
}
|
|
824
|
+
this.metrics?.onMsgRecvInvalid(rpcMsg.topic, validationResult);
|
|
825
|
+
return;
|
|
826
|
+
case MessageStatus.valid:
|
|
827
|
+
// Tells score that message arrived (but is maybe not fully validated yet).
|
|
828
|
+
// Consider the message as delivered for gossip promises.
|
|
829
|
+
this.score.validateMessage(validationResult.messageId.msgIdStr);
|
|
830
|
+
this.gossipTracer.deliverMessage(validationResult.messageId.msgIdStr);
|
|
831
|
+
// Add the message to our memcache
|
|
832
|
+
// if no validation is required, mark the message as validated
|
|
833
|
+
this.mcache.put(validationResult.messageId, rpcMsg, !this.opts.asyncValidation);
|
|
834
|
+
// Dispatch the message to the user if we are subscribed to the topic
|
|
835
|
+
if (this.subscriptions.has(rpcMsg.topic)) {
|
|
836
|
+
const isFromSelf = this.components.peerId.equals(from);
|
|
837
|
+
if (!isFromSelf || this.opts.emitSelf) {
|
|
838
|
+
super.dispatchEvent(new CustomEvent('gossipsub:message', {
|
|
839
|
+
detail: {
|
|
840
|
+
propagationSource: from,
|
|
841
|
+
msgId: validationResult.messageId.msgIdStr,
|
|
842
|
+
msg: validationResult.msg
|
|
843
|
+
}
|
|
844
|
+
}));
|
|
845
|
+
// TODO: Add option to switch between emit per topic or all messages in one
|
|
846
|
+
super.dispatchEvent(new CustomEvent('message', { detail: validationResult.msg }));
|
|
847
|
+
}
|
|
848
|
+
}
|
|
849
|
+
// Forward the message to mesh peers, if no validation is required
|
|
850
|
+
// If asyncValidation is ON, expect the app layer to call reportMessageValidationResult(), then forward
|
|
851
|
+
if (!this.opts.asyncValidation) {
|
|
852
|
+
// TODO: in rust-libp2p
|
|
853
|
+
// .forward_msg(&msg_id, raw_message, Some(propagation_source))
|
|
854
|
+
this.forwardMessage(validationResult.messageId.msgIdStr, rpcMsg, from.toString());
|
|
855
|
+
}
|
|
856
|
+
break;
|
|
857
|
+
default:
|
|
858
|
+
throw new Error(`Invalid validation result: ${validationCode}`);
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
/**
|
|
862
|
+
* Handles a newly received message from an RPC.
|
|
863
|
+
* May forward to all peers in the mesh.
|
|
864
|
+
*/
|
|
865
|
+
async validateReceivedMessage(propagationSource, rpcMsg) {
|
|
866
|
+
// Fast message ID stuff
|
|
867
|
+
const fastMsgIdStr = this.fastMsgIdFn?.(rpcMsg);
|
|
868
|
+
const msgIdCached = fastMsgIdStr !== undefined ? this.fastMsgIdCache?.get(fastMsgIdStr) : undefined;
|
|
869
|
+
if (msgIdCached != null) {
|
|
870
|
+
// This message has been seen previously. Ignore it
|
|
871
|
+
return { code: MessageStatus.duplicate, msgIdStr: msgIdCached };
|
|
872
|
+
}
|
|
873
|
+
// Perform basic validation on message and convert to RawGossipsubMessage for fastMsgIdFn()
|
|
874
|
+
const validationResult = await validateToRawMessage(this.globalSignaturePolicy, rpcMsg);
|
|
875
|
+
if (!validationResult.valid) {
|
|
876
|
+
return { code: MessageStatus.invalid, reason: RejectReason.Error, error: validationResult.error };
|
|
877
|
+
}
|
|
878
|
+
const msg = validationResult.message;
|
|
879
|
+
// Try and perform the data transform to the message. If it fails, consider it invalid.
|
|
880
|
+
try {
|
|
881
|
+
if (this.dataTransform != null) {
|
|
882
|
+
msg.data = this.dataTransform.inboundTransform(rpcMsg.topic, msg.data);
|
|
883
|
+
}
|
|
884
|
+
}
|
|
885
|
+
catch (e) {
|
|
886
|
+
this.log('Invalid message, transform failed', e);
|
|
887
|
+
return { code: MessageStatus.invalid, reason: RejectReason.Error, error: ValidateError.TransformFailed };
|
|
888
|
+
}
|
|
889
|
+
// TODO: Check if message is from a blacklisted source or propagation origin
|
|
890
|
+
// - Reject any message from a blacklisted peer
|
|
891
|
+
// - Also reject any message that originated from a blacklisted peer
|
|
892
|
+
// - reject messages claiming to be from ourselves but not locally published
|
|
893
|
+
// Calculate the message id on the transformed data.
|
|
894
|
+
const msgId = await this.msgIdFn(msg);
|
|
895
|
+
const msgIdStr = this.msgIdToStrFn(msgId);
|
|
896
|
+
const messageId = { msgId, msgIdStr };
|
|
897
|
+
// Add the message to the duplicate caches
|
|
898
|
+
if (fastMsgIdStr !== undefined && (this.fastMsgIdCache != null)) {
|
|
899
|
+
const collision = this.fastMsgIdCache.put(fastMsgIdStr, msgIdStr);
|
|
900
|
+
if (collision) {
|
|
901
|
+
this.metrics?.fastMsgIdCacheCollision.inc();
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
if (this.seenCache.has(msgIdStr)) {
|
|
905
|
+
return { code: MessageStatus.duplicate, msgIdStr };
|
|
906
|
+
}
|
|
907
|
+
else {
|
|
908
|
+
this.seenCache.put(msgIdStr);
|
|
909
|
+
}
|
|
910
|
+
// possibly send IDONTWANTs to mesh peers
|
|
911
|
+
if ((rpcMsg.data?.length ?? 0) >= this.opts.idontwantMinDataSize) {
|
|
912
|
+
this.sendIDontWants(msgId, rpcMsg.topic, propagationSource.toString());
|
|
913
|
+
}
|
|
914
|
+
// (Optional) Provide custom validation here with dynamic validators per topic
|
|
915
|
+
// NOTE: This custom topicValidator() must resolve fast (< 100ms) to allow scores
|
|
916
|
+
// to not penalize peers for long validation times.
|
|
917
|
+
const topicValidator = this.topicValidators.get(rpcMsg.topic);
|
|
918
|
+
if (topicValidator != null) {
|
|
919
|
+
let acceptance;
|
|
920
|
+
// Use try {} catch {} in case topicValidator() is synchronous
|
|
921
|
+
try {
|
|
922
|
+
acceptance = await topicValidator(propagationSource, msg);
|
|
923
|
+
}
|
|
924
|
+
catch (e) {
|
|
925
|
+
const errCode = e.code;
|
|
926
|
+
if (errCode === constants.ERR_TOPIC_VALIDATOR_IGNORE) {
|
|
927
|
+
acceptance = TopicValidatorResult.Ignore;
|
|
928
|
+
}
|
|
929
|
+
if (errCode === constants.ERR_TOPIC_VALIDATOR_REJECT) {
|
|
930
|
+
acceptance = TopicValidatorResult.Reject;
|
|
931
|
+
}
|
|
932
|
+
else {
|
|
933
|
+
acceptance = TopicValidatorResult.Ignore;
|
|
934
|
+
}
|
|
935
|
+
}
|
|
936
|
+
if (acceptance !== TopicValidatorResult.Accept) {
|
|
937
|
+
return { code: MessageStatus.invalid, reason: rejectReasonFromAcceptance(acceptance), msgIdStr };
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
return { code: MessageStatus.valid, messageId, msg };
|
|
941
|
+
}
|
|
942
|
+
/**
|
|
943
|
+
* Return score of a peer.
|
|
944
|
+
*/
|
|
945
|
+
getScore(peerId) {
|
|
946
|
+
return this.score.score(peerId);
|
|
947
|
+
}
|
|
948
|
+
/**
|
|
949
|
+
* Send an rpc object to a peer with subscriptions
|
|
950
|
+
*/
|
|
951
|
+
sendSubscriptions(toPeer, topics, subscribe) {
|
|
952
|
+
this.sendRpc(toPeer, {
|
|
953
|
+
subscriptions: topics.map((topic) => ({ topic, subscribe })),
|
|
954
|
+
messages: []
|
|
955
|
+
});
|
|
956
|
+
}
|
|
957
|
+
/**
|
|
958
|
+
* Handles an rpc control message from a peer
|
|
959
|
+
*/
|
|
960
|
+
async handleControlMessage(id, controlMsg) {
|
|
961
|
+
if (controlMsg === undefined) {
|
|
962
|
+
return;
|
|
963
|
+
}
|
|
964
|
+
const iwant = (controlMsg.ihave?.length > 0) ? this.handleIHave(id, controlMsg.ihave) : [];
|
|
965
|
+
const ihave = (controlMsg.iwant?.length > 0) ? this.handleIWant(id, controlMsg.iwant) : [];
|
|
966
|
+
const prune = (controlMsg.graft?.length > 0) ? await this.handleGraft(id, controlMsg.graft) : [];
|
|
967
|
+
(controlMsg.prune?.length > 0) && (await this.handlePrune(id, controlMsg.prune));
|
|
968
|
+
(controlMsg.idontwant?.length > 0) && this.handleIdontwant(id, controlMsg.idontwant);
|
|
969
|
+
if ((iwant.length === 0) && (ihave.length === 0) && (prune.length === 0)) {
|
|
970
|
+
return;
|
|
971
|
+
}
|
|
972
|
+
const sent = this.sendRpc(id, createGossipRpc(ihave, { iwant, prune }));
|
|
973
|
+
const iwantMessageIds = iwant[0]?.messageIDs;
|
|
974
|
+
if (iwantMessageIds != null) {
|
|
975
|
+
if (sent) {
|
|
976
|
+
this.gossipTracer.addPromise(id, iwantMessageIds);
|
|
977
|
+
}
|
|
978
|
+
else {
|
|
979
|
+
this.metrics?.iwantPromiseUntracked.inc(1);
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
}
|
|
983
|
+
/**
|
|
984
|
+
* Whether to accept a message from a peer
|
|
985
|
+
*/
|
|
986
|
+
acceptFrom(id) {
|
|
987
|
+
if (this.direct.has(id)) {
|
|
988
|
+
return true;
|
|
989
|
+
}
|
|
990
|
+
const now = Date.now();
|
|
991
|
+
const entry = this.acceptFromWhitelist.get(id);
|
|
992
|
+
if ((entry != null) && entry.messagesAccepted < ACCEPT_FROM_WHITELIST_MAX_MESSAGES && entry.acceptUntil >= now) {
|
|
993
|
+
entry.messagesAccepted += 1;
|
|
994
|
+
return true;
|
|
995
|
+
}
|
|
996
|
+
const score = this.score.score(id);
|
|
997
|
+
if (score >= ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE) {
|
|
998
|
+
// peer is unlikely to be able to drop its score to `graylistThreshold`
|
|
999
|
+
// after 128 messages or 1s
|
|
1000
|
+
this.acceptFromWhitelist.set(id, {
|
|
1001
|
+
messagesAccepted: 0,
|
|
1002
|
+
acceptUntil: now + ACCEPT_FROM_WHITELIST_DURATION_MS
|
|
1003
|
+
});
|
|
1004
|
+
}
|
|
1005
|
+
else {
|
|
1006
|
+
this.acceptFromWhitelist.delete(id);
|
|
1007
|
+
}
|
|
1008
|
+
return score >= this.opts.scoreThresholds.graylistThreshold;
|
|
1009
|
+
}
|
|
1010
|
+
/**
|
|
1011
|
+
* Handles IHAVE messages
|
|
1012
|
+
*/
|
|
1013
|
+
handleIHave(id, ihave) {
|
|
1014
|
+
if (ihave.length === 0) {
|
|
1015
|
+
return [];
|
|
1016
|
+
}
|
|
1017
|
+
// we ignore IHAVE gossip from any peer whose score is below the gossips threshold
|
|
1018
|
+
const score = this.score.score(id);
|
|
1019
|
+
if (score < this.opts.scoreThresholds.gossipThreshold) {
|
|
1020
|
+
this.log('IHAVE: ignoring peer %s with score below threshold [ score = %d ]', id, score);
|
|
1021
|
+
this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.LowScore });
|
|
1022
|
+
return [];
|
|
1023
|
+
}
|
|
1024
|
+
// IHAVE flood protection
|
|
1025
|
+
const peerhave = (this.peerhave.get(id) ?? 0) + 1;
|
|
1026
|
+
this.peerhave.set(id, peerhave);
|
|
1027
|
+
if (peerhave > constants.GossipsubMaxIHaveMessages) {
|
|
1028
|
+
this.log('IHAVE: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring', id, peerhave);
|
|
1029
|
+
this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIhave });
|
|
1030
|
+
return [];
|
|
1031
|
+
}
|
|
1032
|
+
const iasked = this.iasked.get(id) ?? 0;
|
|
1033
|
+
if (iasked >= constants.GossipsubMaxIHaveLength) {
|
|
1034
|
+
this.log('IHAVE: peer %s has already advertised too many messages (%d); ignoring', id, iasked);
|
|
1035
|
+
this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIasked });
|
|
1036
|
+
return [];
|
|
1037
|
+
}
|
|
1038
|
+
// string msgId => msgId
|
|
1039
|
+
const iwant = new Map();
|
|
1040
|
+
ihave.forEach(({ topicID, messageIDs }) => {
|
|
1041
|
+
if (topicID == null || (messageIDs == null) || !this.mesh.has(topicID)) {
|
|
1042
|
+
return;
|
|
1043
|
+
}
|
|
1044
|
+
let idonthave = 0;
|
|
1045
|
+
messageIDs.forEach((msgId) => {
|
|
1046
|
+
const msgIdStr = this.msgIdToStrFn(msgId);
|
|
1047
|
+
if (!this.seenCache.has(msgIdStr)) {
|
|
1048
|
+
iwant.set(msgIdStr, msgId);
|
|
1049
|
+
idonthave++;
|
|
1050
|
+
}
|
|
1051
|
+
});
|
|
1052
|
+
this.metrics?.onIhaveRcv(topicID, messageIDs.length, idonthave);
|
|
1053
|
+
});
|
|
1054
|
+
if (iwant.size === 0) {
|
|
1055
|
+
return [];
|
|
1056
|
+
}
|
|
1057
|
+
let iask = iwant.size;
|
|
1058
|
+
if (iask + iasked > constants.GossipsubMaxIHaveLength) {
|
|
1059
|
+
iask = constants.GossipsubMaxIHaveLength - iasked;
|
|
1060
|
+
}
|
|
1061
|
+
this.log('IHAVE: Asking for %d out of %d messages from %s', iask, iwant.size, id);
|
|
1062
|
+
let iwantList = Array.from(iwant.values());
|
|
1063
|
+
// ask in random order
|
|
1064
|
+
shuffle(iwantList);
|
|
1065
|
+
// truncate to the messages we are actually asking for and update the iasked counter
|
|
1066
|
+
iwantList = iwantList.slice(0, iask);
|
|
1067
|
+
this.iasked.set(id, iasked + iask);
|
|
1068
|
+
// do not add gossipTracer promise here until a successful sendRpc()
|
|
1069
|
+
return [
|
|
1070
|
+
{
|
|
1071
|
+
messageIDs: iwantList
|
|
1072
|
+
}
|
|
1073
|
+
];
|
|
1074
|
+
}
|
|
1075
|
+
/**
|
|
1076
|
+
* Handles IWANT messages
|
|
1077
|
+
* Returns messages to send back to peer
|
|
1078
|
+
*/
|
|
1079
|
+
handleIWant(id, iwant) {
|
|
1080
|
+
if (iwant.length === 0) {
|
|
1081
|
+
return [];
|
|
1082
|
+
}
|
|
1083
|
+
// we don't respond to IWANT requests from any per whose score is below the gossip threshold
|
|
1084
|
+
const score = this.score.score(id);
|
|
1085
|
+
if (score < this.opts.scoreThresholds.gossipThreshold) {
|
|
1086
|
+
this.log('IWANT: ignoring peer %s with score below threshold [score = %d]', id, score);
|
|
1087
|
+
return [];
|
|
1088
|
+
}
|
|
1089
|
+
const ihave = new Map();
|
|
1090
|
+
const iwantByTopic = new Map();
|
|
1091
|
+
let iwantDonthave = 0;
|
|
1092
|
+
iwant.forEach(({ messageIDs }) => {
|
|
1093
|
+
messageIDs?.forEach((msgId) => {
|
|
1094
|
+
const msgIdStr = this.msgIdToStrFn(msgId);
|
|
1095
|
+
const entry = this.mcache.getWithIWantCount(msgIdStr, id);
|
|
1096
|
+
if (entry == null) {
|
|
1097
|
+
iwantDonthave++;
|
|
1098
|
+
return;
|
|
1099
|
+
}
|
|
1100
|
+
iwantByTopic.set(entry.msg.topic, 1 + (iwantByTopic.get(entry.msg.topic) ?? 0));
|
|
1101
|
+
if (entry.count > constants.GossipsubGossipRetransmission) {
|
|
1102
|
+
this.log('IWANT: Peer %s has asked for message %s too many times: ignoring request', id, msgId);
|
|
1103
|
+
return;
|
|
1104
|
+
}
|
|
1105
|
+
ihave.set(msgIdStr, entry.msg);
|
|
1106
|
+
});
|
|
1107
|
+
});
|
|
1108
|
+
this.metrics?.onIwantRcv(iwantByTopic, iwantDonthave);
|
|
1109
|
+
if (ihave.size === 0) {
|
|
1110
|
+
this.log('IWANT: Could not provide any wanted messages to %s', id);
|
|
1111
|
+
return [];
|
|
1112
|
+
}
|
|
1113
|
+
this.log('IWANT: Sending %d messages to %s', ihave.size, id);
|
|
1114
|
+
return Array.from(ihave.values());
|
|
1115
|
+
}
|
|
1116
|
+
/**
|
|
1117
|
+
* Handles Graft messages
|
|
1118
|
+
*/
|
|
1119
|
+
async handleGraft(id, graft) {
|
|
1120
|
+
const prune = [];
|
|
1121
|
+
const score = this.score.score(id);
|
|
1122
|
+
const now = Date.now();
|
|
1123
|
+
let doPX = this.opts.doPX;
|
|
1124
|
+
graft.forEach(({ topicID }) => {
|
|
1125
|
+
if (topicID == null) {
|
|
1126
|
+
return;
|
|
1127
|
+
}
|
|
1128
|
+
const peersInMesh = this.mesh.get(topicID);
|
|
1129
|
+
if (peersInMesh == null) {
|
|
1130
|
+
// don't do PX when there is an unknown topic to avoid leaking our peers
|
|
1131
|
+
doPX = false;
|
|
1132
|
+
// spam hardening: ignore GRAFTs for unknown topics
|
|
1133
|
+
return;
|
|
1134
|
+
}
|
|
1135
|
+
// check if peer is already in the mesh; if so do nothing
|
|
1136
|
+
if (peersInMesh.has(id)) {
|
|
1137
|
+
return;
|
|
1138
|
+
}
|
|
1139
|
+
const backoffExpiry = this.backoff.get(topicID)?.get(id);
|
|
1140
|
+
// This if/else chain contains the various cases of valid (and semi-valid) GRAFTs
|
|
1141
|
+
// Most of these cases result in a PRUNE immediately being sent in response
|
|
1142
|
+
// we don't GRAFT to/from direct peers; complain loudly if this happens
|
|
1143
|
+
if (this.direct.has(id)) {
|
|
1144
|
+
this.log('GRAFT: ignoring request from direct peer %s', id);
|
|
1145
|
+
// this is possibly a bug from a non-reciprical configuration; send a PRUNE
|
|
1146
|
+
prune.push(topicID);
|
|
1147
|
+
// but don't px
|
|
1148
|
+
doPX = false;
|
|
1149
|
+
// make sure we are not backing off that peer
|
|
1150
|
+
}
|
|
1151
|
+
else if (typeof backoffExpiry === 'number' && now < backoffExpiry) {
|
|
1152
|
+
this.log('GRAFT: ignoring backed off peer %s', id);
|
|
1153
|
+
// add behavioral penalty
|
|
1154
|
+
this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff);
|
|
1155
|
+
// no PX
|
|
1156
|
+
doPX = false;
|
|
1157
|
+
// check the flood cutoff -- is the GRAFT coming too fast?
|
|
1158
|
+
const floodCutoff = backoffExpiry + this.opts.graftFloodThreshold - this.opts.pruneBackoff;
|
|
1159
|
+
if (now < floodCutoff) {
|
|
1160
|
+
// extra penalty
|
|
1161
|
+
this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff);
|
|
1162
|
+
}
|
|
1163
|
+
// refresh the backoff
|
|
1164
|
+
this.addBackoff(id, topicID);
|
|
1165
|
+
prune.push(topicID);
|
|
1166
|
+
// check the score
|
|
1167
|
+
}
|
|
1168
|
+
else if (score < 0) {
|
|
1169
|
+
// we don't GRAFT peers with negative score
|
|
1170
|
+
this.log('GRAFT: ignoring peer %s with negative score: score=%d, topic=%s', id, score, topicID);
|
|
1171
|
+
// we do send them PRUNE however, because it's a matter of protocol correctness
|
|
1172
|
+
prune.push(topicID);
|
|
1173
|
+
// but we won't PX to them
|
|
1174
|
+
doPX = false;
|
|
1175
|
+
// add/refresh backoff so that we don't reGRAFT too early even if the score decays
|
|
1176
|
+
this.addBackoff(id, topicID);
|
|
1177
|
+
// check the number of mesh peers; if it is at (or over) Dhi, we only accept grafts
|
|
1178
|
+
// from peers with outbound connections; this is a defensive check to restrict potential
|
|
1179
|
+
// mesh takeover attacks combined with love bombing
|
|
1180
|
+
}
|
|
1181
|
+
else if (peersInMesh.size >= this.opts.Dhi && !(this.outbound.get(id) ?? false)) {
|
|
1182
|
+
prune.push(topicID);
|
|
1183
|
+
this.addBackoff(id, topicID);
|
|
1184
|
+
// valid graft
|
|
1185
|
+
}
|
|
1186
|
+
else {
|
|
1187
|
+
this.log('GRAFT: Add mesh link from %s in %s', id, topicID);
|
|
1188
|
+
this.score.graft(id, topicID);
|
|
1189
|
+
peersInMesh.add(id);
|
|
1190
|
+
this.metrics?.onAddToMesh(topicID, InclusionReason.Subscribed, 1);
|
|
1191
|
+
}
|
|
1192
|
+
this.safeDispatchEvent('gossipsub:graft', { detail: { peerId: id, topic: topicID, direction: 'inbound' } });
|
|
1193
|
+
});
|
|
1194
|
+
if (prune.length === 0) {
|
|
1195
|
+
return [];
|
|
1196
|
+
}
|
|
1197
|
+
const onUnsubscribe = false;
|
|
1198
|
+
return Promise.all(prune.map(async (topic) => this.makePrune(id, topic, doPX, onUnsubscribe)));
|
|
1199
|
+
}
|
|
1200
|
+
/**
|
|
1201
|
+
* Handles Prune messages
|
|
1202
|
+
*/
|
|
1203
|
+
async handlePrune(id, prune) {
|
|
1204
|
+
const score = this.score.score(id);
|
|
1205
|
+
for (const { topicID, backoff, peers } of prune) {
|
|
1206
|
+
if (topicID == null) {
|
|
1207
|
+
continue;
|
|
1208
|
+
}
|
|
1209
|
+
const peersInMesh = this.mesh.get(topicID);
|
|
1210
|
+
if (peersInMesh == null) {
|
|
1211
|
+
return;
|
|
1212
|
+
}
|
|
1213
|
+
this.log('PRUNE: Remove mesh link to %s in %s', id, topicID);
|
|
1214
|
+
this.score.prune(id, topicID);
|
|
1215
|
+
if (peersInMesh.has(id)) {
|
|
1216
|
+
peersInMesh.delete(id);
|
|
1217
|
+
this.metrics?.onRemoveFromMesh(topicID, ChurnReason.Prune, 1);
|
|
1218
|
+
}
|
|
1219
|
+
// is there a backoff specified by the peer? if so obey it
|
|
1220
|
+
if (typeof backoff === 'number' && backoff > 0) {
|
|
1221
|
+
this.doAddBackoff(id, topicID, backoff * 1000);
|
|
1222
|
+
}
|
|
1223
|
+
else {
|
|
1224
|
+
this.addBackoff(id, topicID);
|
|
1225
|
+
}
|
|
1226
|
+
// PX
|
|
1227
|
+
if ((peers != null) && (peers.length > 0)) {
|
|
1228
|
+
// we ignore PX from peers with insufficient scores
|
|
1229
|
+
if (score < this.opts.scoreThresholds.acceptPXThreshold) {
|
|
1230
|
+
this.log('PRUNE: ignoring PX from peer %s with insufficient score [score = %d, topic = %s]', id, score, topicID);
|
|
1231
|
+
}
|
|
1232
|
+
else {
|
|
1233
|
+
await this.pxConnect(peers);
|
|
1234
|
+
}
|
|
1235
|
+
}
|
|
1236
|
+
this.safeDispatchEvent('gossipsub:prune', { detail: { peerId: id, topic: topicID, direction: 'inbound' } });
|
|
1237
|
+
}
|
|
1238
|
+
}
|
|
1239
|
+
handleIdontwant(id, idontwant) {
|
|
1240
|
+
let idontwantCount = this.idontwantCounts.get(id) ?? 0;
|
|
1241
|
+
// return early if we have already received too many IDONTWANT messages from the peer
|
|
1242
|
+
if (idontwantCount >= this.opts.idontwantMaxMessages) {
|
|
1243
|
+
return;
|
|
1244
|
+
}
|
|
1245
|
+
const startIdontwantCount = idontwantCount;
|
|
1246
|
+
let idontwants = this.idontwants.get(id);
|
|
1247
|
+
if (idontwants == null) {
|
|
1248
|
+
idontwants = new Map();
|
|
1249
|
+
this.idontwants.set(id, idontwants);
|
|
1250
|
+
}
|
|
1251
|
+
let idonthave = 0;
|
|
1252
|
+
// eslint-disable-next-line no-labels
|
|
1253
|
+
out: for (const { messageIDs } of idontwant) {
|
|
1254
|
+
for (const msgId of messageIDs) {
|
|
1255
|
+
if (idontwantCount >= this.opts.idontwantMaxMessages) {
|
|
1256
|
+
// eslint-disable-next-line no-labels
|
|
1257
|
+
break out;
|
|
1258
|
+
}
|
|
1259
|
+
idontwantCount++;
|
|
1260
|
+
const msgIdStr = this.msgIdToStrFn(msgId);
|
|
1261
|
+
idontwants.set(msgIdStr, this.heartbeatTicks);
|
|
1262
|
+
if (!this.mcache.msgs.has(msgIdStr)) {
|
|
1263
|
+
idonthave++;
|
|
1264
|
+
}
|
|
1265
|
+
}
|
|
1266
|
+
}
|
|
1267
|
+
this.idontwantCounts.set(id, idontwantCount);
|
|
1268
|
+
const total = idontwantCount - startIdontwantCount;
|
|
1269
|
+
this.metrics?.onIdontwantRcv(total, idonthave);
|
|
1270
|
+
}
|
|
1271
|
+
/**
|
|
1272
|
+
* Add standard backoff log for a peer in a topic
|
|
1273
|
+
*/
|
|
1274
|
+
addBackoff(id, topic) {
|
|
1275
|
+
this.doAddBackoff(id, topic, this.opts.pruneBackoff);
|
|
1276
|
+
}
|
|
1277
|
+
/**
|
|
1278
|
+
* Add backoff expiry interval for a peer in a topic
|
|
1279
|
+
*
|
|
1280
|
+
* @param id
|
|
1281
|
+
* @param topic
|
|
1282
|
+
* @param intervalMs - backoff duration in milliseconds
|
|
1283
|
+
*/
|
|
1284
|
+
doAddBackoff(id, topic, intervalMs) {
|
|
1285
|
+
let backoff = this.backoff.get(topic);
|
|
1286
|
+
if (backoff == null) {
|
|
1287
|
+
backoff = new Map();
|
|
1288
|
+
this.backoff.set(topic, backoff);
|
|
1289
|
+
}
|
|
1290
|
+
const expire = Date.now() + intervalMs;
|
|
1291
|
+
const existingExpire = backoff.get(id) ?? 0;
|
|
1292
|
+
if (existingExpire < expire) {
|
|
1293
|
+
backoff.set(id, expire);
|
|
1294
|
+
}
|
|
1295
|
+
}
|
|
1296
|
+
/**
|
|
1297
|
+
* Apply penalties from broken IHAVE/IWANT promises
|
|
1298
|
+
*/
|
|
1299
|
+
applyIwantPenalties() {
|
|
1300
|
+
this.gossipTracer.getBrokenPromises().forEach((count, p) => {
|
|
1301
|
+
this.log("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count);
|
|
1302
|
+
this.score.addPenalty(p, count, ScorePenalty.BrokenPromise);
|
|
1303
|
+
});
|
|
1304
|
+
}
|
|
1305
|
+
/**
|
|
1306
|
+
* Clear expired backoff expiries
|
|
1307
|
+
*/
|
|
1308
|
+
clearBackoff() {
|
|
1309
|
+
// we only clear once every GossipsubPruneBackoffTicks ticks to avoid iterating over the maps too much
|
|
1310
|
+
if (this.heartbeatTicks % constants.GossipsubPruneBackoffTicks !== 0) {
|
|
1311
|
+
return;
|
|
1312
|
+
}
|
|
1313
|
+
const now = Date.now();
|
|
1314
|
+
this.backoff.forEach((backoff, topic) => {
|
|
1315
|
+
backoff.forEach((expire, id) => {
|
|
1316
|
+
// add some slack time to the expiration, see https://github.com/libp2p/specs/pull/289
|
|
1317
|
+
if (expire + BACKOFF_SLACK * this.opts.heartbeatInterval < now) {
|
|
1318
|
+
backoff.delete(id);
|
|
1319
|
+
}
|
|
1320
|
+
});
|
|
1321
|
+
if (backoff.size === 0) {
|
|
1322
|
+
this.backoff.delete(topic);
|
|
1323
|
+
}
|
|
1324
|
+
});
|
|
1325
|
+
}
|
|
1326
|
+
/**
|
|
1327
|
+
* Maybe reconnect to direct peers
|
|
1328
|
+
*/
|
|
1329
|
+
async directConnect() {
|
|
1330
|
+
const toconnect = [];
|
|
1331
|
+
this.direct.forEach((id) => {
|
|
1332
|
+
if (!this.streamsOutbound.has(id)) {
|
|
1333
|
+
toconnect.push(id);
|
|
1334
|
+
}
|
|
1335
|
+
});
|
|
1336
|
+
await Promise.all(toconnect.map(async (id) => this.connect(id)));
|
|
1337
|
+
}
|
|
1338
|
+
/**
|
|
1339
|
+
* Maybe attempt connection given signed peer records
|
|
1340
|
+
*/
|
|
1341
|
+
async pxConnect(peers) {
|
|
1342
|
+
if (peers.length > this.opts.prunePeers) {
|
|
1343
|
+
shuffle(peers);
|
|
1344
|
+
peers = peers.slice(0, this.opts.prunePeers);
|
|
1345
|
+
}
|
|
1346
|
+
const toconnect = [];
|
|
1347
|
+
await Promise.all(peers.map(async (pi) => {
|
|
1348
|
+
if (pi.peerID == null) {
|
|
1349
|
+
return;
|
|
1350
|
+
}
|
|
1351
|
+
const peer = peerIdFromMultihash(Digest.decode(pi.peerID));
|
|
1352
|
+
const p = peer.toString();
|
|
1353
|
+
if (this.peers.has(p)) {
|
|
1354
|
+
return;
|
|
1355
|
+
}
|
|
1356
|
+
if (pi.signedPeerRecord == null) {
|
|
1357
|
+
toconnect.push(p);
|
|
1358
|
+
return;
|
|
1359
|
+
}
|
|
1360
|
+
// The peer sent us a signed record
|
|
1361
|
+
// This is not a record from the peer who sent the record, but another peer who is connected with it
|
|
1362
|
+
// Ensure that it is valid
|
|
1363
|
+
try {
|
|
1364
|
+
if (!(await this.components.peerStore.consumePeerRecord(pi.signedPeerRecord, {
|
|
1365
|
+
expectedPeer: peer
|
|
1366
|
+
}))) {
|
|
1367
|
+
this.log('bogus peer record obtained through px: could not add peer record to address book');
|
|
1368
|
+
return;
|
|
1369
|
+
}
|
|
1370
|
+
toconnect.push(p);
|
|
1371
|
+
}
|
|
1372
|
+
catch (e) {
|
|
1373
|
+
this.log('bogus peer record obtained through px: invalid signature or not a peer record');
|
|
1374
|
+
}
|
|
1375
|
+
}));
|
|
1376
|
+
if (toconnect.length === 0) {
|
|
1377
|
+
return;
|
|
1378
|
+
}
|
|
1379
|
+
await Promise.all(toconnect.map(async (id) => this.connect(id)));
|
|
1380
|
+
}
|
|
1381
|
+
/**
|
|
1382
|
+
* Connect to a peer using the gossipsub protocol
|
|
1383
|
+
*/
|
|
1384
|
+
async connect(id) {
|
|
1385
|
+
this.log('Initiating connection with %s', id);
|
|
1386
|
+
const peerId = peerIdFromString(id);
|
|
1387
|
+
const connection = await this.components.connectionManager.openConnection(peerId);
|
|
1388
|
+
for (const protocol of this.protocols) {
|
|
1389
|
+
for (const topology of this.components.registrar.getTopologies(protocol)) {
|
|
1390
|
+
topology.onConnect?.(peerId, connection);
|
|
1391
|
+
}
|
|
1392
|
+
}
|
|
1393
|
+
}
|
|
1394
|
+
/**
|
|
1395
|
+
* Subscribes to a topic
|
|
1396
|
+
*/
|
|
1397
|
+
subscribe(topic) {
|
|
1398
|
+
if (this.status.code !== GossipStatusCode.started) {
|
|
1399
|
+
throw new Error('Pubsub has not started');
|
|
1400
|
+
}
|
|
1401
|
+
if (!this.subscriptions.has(topic)) {
|
|
1402
|
+
this.subscriptions.add(topic);
|
|
1403
|
+
for (const peerId of this.peers.keys()) {
|
|
1404
|
+
this.sendSubscriptions(peerId, [topic], true);
|
|
1405
|
+
}
|
|
1406
|
+
}
|
|
1407
|
+
this.join(topic);
|
|
1408
|
+
}
|
|
1409
|
+
/**
|
|
1410
|
+
* Unsubscribe to a topic
|
|
1411
|
+
*/
|
|
1412
|
+
unsubscribe(topic) {
|
|
1413
|
+
if (this.status.code !== GossipStatusCode.started) {
|
|
1414
|
+
throw new Error('Pubsub is not started');
|
|
1415
|
+
}
|
|
1416
|
+
const wasSubscribed = this.subscriptions.delete(topic);
|
|
1417
|
+
this.log('unsubscribe from %s - am subscribed %s', topic, wasSubscribed);
|
|
1418
|
+
if (wasSubscribed) {
|
|
1419
|
+
for (const peerId of this.peers.keys()) {
|
|
1420
|
+
this.sendSubscriptions(peerId, [topic], false);
|
|
1421
|
+
}
|
|
1422
|
+
}
|
|
1423
|
+
this.leave(topic);
|
|
1424
|
+
}
|
|
1425
|
+
/**
|
|
1426
|
+
* Join topic
|
|
1427
|
+
*/
|
|
1428
|
+
join(topic) {
|
|
1429
|
+
if (this.status.code !== GossipStatusCode.started) {
|
|
1430
|
+
throw new Error('Gossipsub has not started');
|
|
1431
|
+
}
|
|
1432
|
+
// if we are already in the mesh, return
|
|
1433
|
+
if (this.mesh.has(topic)) {
|
|
1434
|
+
return;
|
|
1435
|
+
}
|
|
1436
|
+
this.log('JOIN %s', topic);
|
|
1437
|
+
this.metrics?.onJoin(topic);
|
|
1438
|
+
const toAdd = new Set();
|
|
1439
|
+
const backoff = this.backoff.get(topic);
|
|
1440
|
+
// check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do,
|
|
1441
|
+
// removing the fanout entry.
|
|
1442
|
+
const fanoutPeers = this.fanout.get(topic);
|
|
1443
|
+
if (fanoutPeers != null) {
|
|
1444
|
+
// Remove fanout entry and the last published time
|
|
1445
|
+
this.fanout.delete(topic);
|
|
1446
|
+
this.fanoutLastpub.delete(topic);
|
|
1447
|
+
// remove explicit peers, peers with negative scores, and backoffed peers
|
|
1448
|
+
fanoutPeers.forEach((id) => {
|
|
1449
|
+
if (!this.direct.has(id) && this.score.score(id) >= 0 && backoff?.has(id) !== true) {
|
|
1450
|
+
toAdd.add(id);
|
|
1451
|
+
}
|
|
1452
|
+
});
|
|
1453
|
+
this.metrics?.onAddToMesh(topic, InclusionReason.Fanout, toAdd.size);
|
|
1454
|
+
}
|
|
1455
|
+
// check if we need to get more peers, which we randomly select
|
|
1456
|
+
if (toAdd.size < this.opts.D) {
|
|
1457
|
+
const fanoutCount = toAdd.size;
|
|
1458
|
+
const newPeers = this.getRandomGossipPeers(topic, this.opts.D, (id) =>
|
|
1459
|
+
// filter direct peers and peers with negative score
|
|
1460
|
+
!toAdd.has(id) && !this.direct.has(id) && this.score.score(id) >= 0 && backoff?.has(id) !== true);
|
|
1461
|
+
newPeers.forEach((peer) => {
|
|
1462
|
+
toAdd.add(peer);
|
|
1463
|
+
});
|
|
1464
|
+
this.metrics?.onAddToMesh(topic, InclusionReason.Random, toAdd.size - fanoutCount);
|
|
1465
|
+
}
|
|
1466
|
+
this.mesh.set(topic, toAdd);
|
|
1467
|
+
toAdd.forEach((id) => {
|
|
1468
|
+
this.log('JOIN: Add mesh link to %s in %s', id, topic);
|
|
1469
|
+
this.sendGraft(id, topic);
|
|
1470
|
+
// rust-libp2p
|
|
1471
|
+
// - peer_score.graft()
|
|
1472
|
+
// - Self::control_pool_add()
|
|
1473
|
+
// - peer_added_to_mesh()
|
|
1474
|
+
});
|
|
1475
|
+
}
|
|
1476
|
+
/**
|
|
1477
|
+
* Leave topic
|
|
1478
|
+
*/
|
|
1479
|
+
leave(topic) {
|
|
1480
|
+
if (this.status.code !== GossipStatusCode.started) {
|
|
1481
|
+
throw new Error('Gossipsub has not started');
|
|
1482
|
+
}
|
|
1483
|
+
this.log('LEAVE %s', topic);
|
|
1484
|
+
this.metrics?.onLeave(topic);
|
|
1485
|
+
// Send PRUNE to mesh peers
|
|
1486
|
+
const meshPeers = this.mesh.get(topic);
|
|
1487
|
+
if (meshPeers != null) {
|
|
1488
|
+
Promise.all(Array.from(meshPeers).map(async (id) => {
|
|
1489
|
+
this.log('LEAVE: Remove mesh link to %s in %s', id, topic);
|
|
1490
|
+
await this.sendPrune(id, topic);
|
|
1491
|
+
})).catch((err) => {
|
|
1492
|
+
this.log('Error sending prunes to mesh peers', err);
|
|
1493
|
+
});
|
|
1494
|
+
this.mesh.delete(topic);
|
|
1495
|
+
}
|
|
1496
|
+
}
|
|
1497
|
+
selectPeersToForward(topic, propagationSource, excludePeers) {
|
|
1498
|
+
const tosend = new Set();
|
|
1499
|
+
// Add explicit peers
|
|
1500
|
+
const peersInTopic = this.topics.get(topic);
|
|
1501
|
+
if (peersInTopic != null) {
|
|
1502
|
+
this.direct.forEach((peer) => {
|
|
1503
|
+
if (peersInTopic.has(peer) && propagationSource !== peer && !(excludePeers?.has(peer) ?? false)) {
|
|
1504
|
+
tosend.add(peer);
|
|
1505
|
+
}
|
|
1506
|
+
});
|
|
1507
|
+
// As of Mar 2022, spec + golang-libp2p include this while rust-libp2p does not
|
|
1508
|
+
// rust-libp2p: https://github.com/libp2p/rust-libp2p/blob/6cc3b4ec52c922bfcf562a29b5805c3150e37c75/protocols/gossipsub/src/behaviour.rs#L2693
|
|
1509
|
+
// spec: https://github.com/libp2p/specs/blob/10712c55ab309086a52eec7d25f294df4fa96528/pubsub/gossipsub/gossipsub-v1.0.md?plain=1#L361
|
|
1510
|
+
this.floodsubPeers.forEach((peer) => {
|
|
1511
|
+
if (peersInTopic.has(peer) &&
|
|
1512
|
+
propagationSource !== peer &&
|
|
1513
|
+
!(excludePeers?.has(peer) ?? false) &&
|
|
1514
|
+
this.score.score(peer) >= this.opts.scoreThresholds.publishThreshold) {
|
|
1515
|
+
tosend.add(peer);
|
|
1516
|
+
}
|
|
1517
|
+
});
|
|
1518
|
+
}
|
|
1519
|
+
// add mesh peers
|
|
1520
|
+
const meshPeers = this.mesh.get(topic);
|
|
1521
|
+
if ((meshPeers != null) && meshPeers.size > 0) {
|
|
1522
|
+
meshPeers.forEach((peer) => {
|
|
1523
|
+
if (propagationSource !== peer && !(excludePeers?.has(peer) ?? false)) {
|
|
1524
|
+
tosend.add(peer);
|
|
1525
|
+
}
|
|
1526
|
+
});
|
|
1527
|
+
}
|
|
1528
|
+
return tosend;
|
|
1529
|
+
}
|
|
1530
|
+
selectPeersToPublish(topic) {
|
|
1531
|
+
const tosend = new Set();
|
|
1532
|
+
const tosendCount = {
|
|
1533
|
+
direct: 0,
|
|
1534
|
+
floodsub: 0,
|
|
1535
|
+
mesh: 0,
|
|
1536
|
+
fanout: 0
|
|
1537
|
+
};
|
|
1538
|
+
const peersInTopic = this.topics.get(topic);
|
|
1539
|
+
if (peersInTopic != null) {
|
|
1540
|
+
// flood-publish behavior
|
|
1541
|
+
// send to direct peers and _all_ peers meeting the publishThreshold
|
|
1542
|
+
if (this.opts.floodPublish) {
|
|
1543
|
+
peersInTopic.forEach((id) => {
|
|
1544
|
+
if (this.direct.has(id)) {
|
|
1545
|
+
tosend.add(id);
|
|
1546
|
+
tosendCount.direct++;
|
|
1547
|
+
}
|
|
1548
|
+
else if (this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) {
|
|
1549
|
+
tosend.add(id);
|
|
1550
|
+
tosendCount.floodsub++;
|
|
1551
|
+
}
|
|
1552
|
+
});
|
|
1553
|
+
}
|
|
1554
|
+
else {
|
|
1555
|
+
// non-flood-publish behavior
|
|
1556
|
+
// send to direct peers, subscribed floodsub peers
|
|
1557
|
+
// and some mesh peers above publishThreshold
|
|
1558
|
+
// direct peers (if subscribed)
|
|
1559
|
+
this.direct.forEach((id) => {
|
|
1560
|
+
if (peersInTopic.has(id)) {
|
|
1561
|
+
tosend.add(id);
|
|
1562
|
+
tosendCount.direct++;
|
|
1563
|
+
}
|
|
1564
|
+
});
|
|
1565
|
+
// floodsub peers
|
|
1566
|
+
// Note: if there are no floodsub peers, we save a loop through peersInTopic Map
|
|
1567
|
+
this.floodsubPeers.forEach((id) => {
|
|
1568
|
+
if (peersInTopic.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) {
|
|
1569
|
+
tosend.add(id);
|
|
1570
|
+
tosendCount.floodsub++;
|
|
1571
|
+
}
|
|
1572
|
+
});
|
|
1573
|
+
// Gossipsub peers handling
|
|
1574
|
+
const meshPeers = this.mesh.get(topic);
|
|
1575
|
+
if ((meshPeers != null) && meshPeers.size > 0) {
|
|
1576
|
+
meshPeers.forEach((peer) => {
|
|
1577
|
+
tosend.add(peer);
|
|
1578
|
+
tosendCount.mesh++;
|
|
1579
|
+
});
|
|
1580
|
+
// We want to publish to at least `D` peers.
|
|
1581
|
+
// If there are insufficient peers in the mesh, publish to other topic peers
|
|
1582
|
+
if (meshPeers.size < this.opts.D) {
|
|
1583
|
+
// pick additional topic peers above the publishThreshold
|
|
1584
|
+
const topicPeers = this.getRandomGossipPeers(topic, this.opts.D - meshPeers.size, (id) => {
|
|
1585
|
+
return !meshPeers.has(id) && !this.direct.has(id) && !this.floodsubPeers.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold;
|
|
1586
|
+
});
|
|
1587
|
+
topicPeers.forEach((peer) => {
|
|
1588
|
+
tosend.add(peer);
|
|
1589
|
+
tosendCount.mesh++;
|
|
1590
|
+
});
|
|
1591
|
+
}
|
|
1592
|
+
}
|
|
1593
|
+
else {
|
|
1594
|
+
// We are not in the mesh for topic, use fanout peers
|
|
1595
|
+
const fanoutPeers = this.fanout.get(topic);
|
|
1596
|
+
if ((fanoutPeers != null) && fanoutPeers.size > 0) {
|
|
1597
|
+
fanoutPeers.forEach((peer) => {
|
|
1598
|
+
tosend.add(peer);
|
|
1599
|
+
tosendCount.fanout++;
|
|
1600
|
+
});
|
|
1601
|
+
}
|
|
1602
|
+
else {
|
|
1603
|
+
// We have no fanout peers, select mesh_n of them and add them to the fanout
|
|
1604
|
+
// If we are not in the fanout, then pick peers in topic above the publishThreshold
|
|
1605
|
+
const newFanoutPeers = this.getRandomGossipPeers(topic, this.opts.D, (id) => {
|
|
1606
|
+
return this.score.score(id) >= this.opts.scoreThresholds.publishThreshold;
|
|
1607
|
+
});
|
|
1608
|
+
// eslint-disable-next-line max-depth
|
|
1609
|
+
if (newFanoutPeers.size > 0) {
|
|
1610
|
+
this.fanout.set(topic, newFanoutPeers);
|
|
1611
|
+
newFanoutPeers.forEach((peer) => {
|
|
1612
|
+
tosend.add(peer);
|
|
1613
|
+
tosendCount.fanout++;
|
|
1614
|
+
});
|
|
1615
|
+
}
|
|
1616
|
+
}
|
|
1617
|
+
// We are publishing to fanout peers - update the time we published
|
|
1618
|
+
this.fanoutLastpub.set(topic, Date.now());
|
|
1619
|
+
}
|
|
1620
|
+
}
|
|
1621
|
+
}
|
|
1622
|
+
return { tosend, tosendCount };
|
|
1623
|
+
}
|
|
1624
|
+
/**
|
|
1625
|
+
* Forwards a message from our peers.
|
|
1626
|
+
*
|
|
1627
|
+
* For messages published by us (the app layer), this class uses `publish`
|
|
1628
|
+
*/
|
|
1629
|
+
forwardMessage(msgIdStr, rawMsg, propagationSource, excludePeers) {
|
|
1630
|
+
// message is fully validated inform peer_score
|
|
1631
|
+
if (propagationSource != null) {
|
|
1632
|
+
this.score.deliverMessage(propagationSource, msgIdStr, rawMsg.topic);
|
|
1633
|
+
}
|
|
1634
|
+
const tosend = this.selectPeersToForward(rawMsg.topic, propagationSource, excludePeers);
|
|
1635
|
+
// Note: Don't throw if tosend is empty, we can have a mesh with a single peer
|
|
1636
|
+
// forward the message to peers
|
|
1637
|
+
tosend.forEach((id) => {
|
|
1638
|
+
// sendRpc may mutate RPC message on piggyback, create a new message for each peer
|
|
1639
|
+
this.sendRpc(id, createGossipRpc([rawMsg]));
|
|
1640
|
+
});
|
|
1641
|
+
this.metrics?.onForwardMsg(rawMsg.topic, tosend.size);
|
|
1642
|
+
}
|
|
1643
|
+
/**
|
|
1644
|
+
* App layer publishes a message to peers, return number of peers this message is published to
|
|
1645
|
+
* Note: `async` due to crypto only if `StrictSign`, otherwise it's a sync fn.
|
|
1646
|
+
*
|
|
1647
|
+
* For messages not from us, this class uses `forwardMessage`.
|
|
1648
|
+
*/
|
|
1649
|
+
async publish(topic, data, opts) {
|
|
1650
|
+
const startMs = Date.now();
|
|
1651
|
+
const transformedData = (this.dataTransform != null) ? this.dataTransform.outboundTransform(topic, data) : data;
|
|
1652
|
+
if (this.publishConfig == null) {
|
|
1653
|
+
throw Error('PublishError.Uninitialized');
|
|
1654
|
+
}
|
|
1655
|
+
// Prepare raw message with user's publishConfig
|
|
1656
|
+
const { raw: rawMsg, msg } = await buildRawMessage(this.publishConfig, topic, data, transformedData);
|
|
1657
|
+
// calculate the message id from the un-transformed data
|
|
1658
|
+
const msgId = await this.msgIdFn(msg);
|
|
1659
|
+
const msgIdStr = this.msgIdToStrFn(msgId);
|
|
1660
|
+
// Current publish opt takes precedence global opts, while preserving false value
|
|
1661
|
+
const ignoreDuplicatePublishError = opts?.ignoreDuplicatePublishError ?? this.opts.ignoreDuplicatePublishError;
|
|
1662
|
+
if (this.seenCache.has(msgIdStr)) {
|
|
1663
|
+
// This message has already been seen. We don't re-publish messages that have already
|
|
1664
|
+
// been published on the network.
|
|
1665
|
+
if (ignoreDuplicatePublishError) {
|
|
1666
|
+
this.metrics?.onPublishDuplicateMsg(topic);
|
|
1667
|
+
return { recipients: [] };
|
|
1668
|
+
}
|
|
1669
|
+
throw Error('PublishError.Duplicate');
|
|
1670
|
+
}
|
|
1671
|
+
const { tosend, tosendCount } = this.selectPeersToPublish(topic);
|
|
1672
|
+
const willSendToSelf = this.opts.emitSelf && this.subscriptions.has(topic);
|
|
1673
|
+
// Current publish opt takes precedence global opts, while preserving false value
|
|
1674
|
+
const allowPublishToZeroTopicPeers = opts?.allowPublishToZeroTopicPeers ?? this.opts.allowPublishToZeroTopicPeers;
|
|
1675
|
+
if (tosend.size === 0 && !allowPublishToZeroTopicPeers && !willSendToSelf) {
|
|
1676
|
+
throw Error('PublishError.NoPeersSubscribedToTopic');
|
|
1677
|
+
}
|
|
1678
|
+
// If the message isn't a duplicate and we have sent it to some peers add it to the
|
|
1679
|
+
// duplicate cache and memcache.
|
|
1680
|
+
this.seenCache.put(msgIdStr);
|
|
1681
|
+
// all published messages are valid
|
|
1682
|
+
this.mcache.put({ msgId, msgIdStr }, rawMsg, true);
|
|
1683
|
+
// Consider the message as delivered for gossip promises.
|
|
1684
|
+
this.gossipTracer.deliverMessage(msgIdStr);
|
|
1685
|
+
// If the message is anonymous or has a random author add it to the published message ids cache.
|
|
1686
|
+
this.publishedMessageIds.put(msgIdStr);
|
|
1687
|
+
const batchPublish = opts?.batchPublish ?? this.opts.batchPublish;
|
|
1688
|
+
const rpc = createGossipRpc([rawMsg]);
|
|
1689
|
+
if (batchPublish) {
|
|
1690
|
+
this.sendRpcInBatch(tosend, rpc);
|
|
1691
|
+
}
|
|
1692
|
+
else {
|
|
1693
|
+
// Send to set of peers aggregated from direct, mesh, fanout
|
|
1694
|
+
for (const id of tosend) {
|
|
1695
|
+
// sendRpc may mutate RPC message on piggyback, create a new message for each peer
|
|
1696
|
+
const sent = this.sendRpc(id, rpc);
|
|
1697
|
+
// did not actually send the message
|
|
1698
|
+
if (!sent) {
|
|
1699
|
+
tosend.delete(id);
|
|
1700
|
+
}
|
|
1701
|
+
}
|
|
1702
|
+
}
|
|
1703
|
+
const durationMs = Date.now() - startMs;
|
|
1704
|
+
this.metrics?.onPublishMsg(topic, tosendCount, tosend.size, rawMsg.data != null ? rawMsg.data.length : 0, durationMs);
|
|
1705
|
+
// Dispatch the message to the user if we are subscribed to the topic
|
|
1706
|
+
if (willSendToSelf) {
|
|
1707
|
+
tosend.add(this.components.peerId.toString());
|
|
1708
|
+
super.dispatchEvent(new CustomEvent('gossipsub:message', {
|
|
1709
|
+
detail: {
|
|
1710
|
+
propagationSource: this.components.peerId,
|
|
1711
|
+
msgId: msgIdStr,
|
|
1712
|
+
msg
|
|
1713
|
+
}
|
|
1714
|
+
}));
|
|
1715
|
+
// TODO: Add option to switch between emit per topic or all messages in one
|
|
1716
|
+
super.dispatchEvent(new CustomEvent('message', { detail: msg }));
|
|
1717
|
+
}
|
|
1718
|
+
return {
|
|
1719
|
+
recipients: Array.from(tosend.values()).map((str) => this.peers.get(str) ?? peerIdFromString(str))
|
|
1720
|
+
};
|
|
1721
|
+
}
|
|
1722
|
+
/**
|
|
1723
|
+
* Send the same data in batch to tosend list without considering cached control messages
|
|
1724
|
+
* This is not only faster but also avoid allocating memory for each peer
|
|
1725
|
+
* see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/344
|
|
1726
|
+
*/
|
|
1727
|
+
sendRpcInBatch(tosend, rpc) {
|
|
1728
|
+
const rpcBytes = RPC.encode(rpc);
|
|
1729
|
+
const prefixedData = encode.single(rpcBytes);
|
|
1730
|
+
for (const id of tosend) {
|
|
1731
|
+
const outboundStream = this.streamsOutbound.get(id);
|
|
1732
|
+
if (outboundStream == null) {
|
|
1733
|
+
this.log(`Cannot send RPC to ${id} as there is no open stream to it available`);
|
|
1734
|
+
tosend.delete(id);
|
|
1735
|
+
continue;
|
|
1736
|
+
}
|
|
1737
|
+
try {
|
|
1738
|
+
outboundStream.pushPrefixed(prefixedData);
|
|
1739
|
+
}
|
|
1740
|
+
catch (e) {
|
|
1741
|
+
tosend.delete(id);
|
|
1742
|
+
this.log.error(`Cannot send rpc to ${id}`, e);
|
|
1743
|
+
}
|
|
1744
|
+
this.metrics?.onRpcSent(rpc, rpcBytes.length);
|
|
1745
|
+
}
|
|
1746
|
+
}
|
|
1747
|
+
/**
|
|
1748
|
+
* This function should be called when `asyncValidation` is `true` after
|
|
1749
|
+
* the message got validated by the caller. Messages are stored in the `mcache` and
|
|
1750
|
+
* validation is expected to be fast enough that the messages should still exist in the cache.
|
|
1751
|
+
* There are three possible validation outcomes and the outcome is given in acceptance.
|
|
1752
|
+
*
|
|
1753
|
+
* If acceptance = `MessageAcceptance.Accept` the message will get propagated to the
|
|
1754
|
+
* network. The `propagation_source` parameter indicates who the message was received by and
|
|
1755
|
+
* will not be forwarded back to that peer.
|
|
1756
|
+
*
|
|
1757
|
+
* If acceptance = `MessageAcceptance.Reject` the message will be deleted from the memcache
|
|
1758
|
+
* and the P₄ penalty will be applied to the `propagationSource`.
|
|
1759
|
+
*
|
|
1760
|
+
* If acceptance = `MessageAcceptance.Ignore` the message will be deleted from the memcache
|
|
1761
|
+
* but no P₄ penalty will be applied.
|
|
1762
|
+
*
|
|
1763
|
+
* This function will return true if the message was found in the cache and false if was not
|
|
1764
|
+
* in the cache anymore.
|
|
1765
|
+
*
|
|
1766
|
+
* This should only be called once per message.
|
|
1767
|
+
*/
|
|
1768
|
+
reportMessageValidationResult(msgId, propagationSource, acceptance) {
|
|
1769
|
+
let cacheEntry;
|
|
1770
|
+
if (acceptance === TopicValidatorResult.Accept) {
|
|
1771
|
+
cacheEntry = this.mcache.validate(msgId);
|
|
1772
|
+
if (cacheEntry != null) {
|
|
1773
|
+
const { message: rawMsg, originatingPeers } = cacheEntry;
|
|
1774
|
+
// message is fully validated inform peer_score
|
|
1775
|
+
this.score.deliverMessage(propagationSource, msgId, rawMsg.topic);
|
|
1776
|
+
this.forwardMessage(msgId, cacheEntry.message, propagationSource, originatingPeers);
|
|
1777
|
+
}
|
|
1778
|
+
// else, Message not in cache. Ignoring forwarding
|
|
1779
|
+
}
|
|
1780
|
+
else {
|
|
1781
|
+
// Not valid
|
|
1782
|
+
cacheEntry = this.mcache.remove(msgId);
|
|
1783
|
+
if (cacheEntry != null) {
|
|
1784
|
+
const rejectReason = rejectReasonFromAcceptance(acceptance);
|
|
1785
|
+
const { message: rawMsg, originatingPeers } = cacheEntry;
|
|
1786
|
+
// Tell peer_score about reject
|
|
1787
|
+
// Reject the original source, and any duplicates we've seen from other peers.
|
|
1788
|
+
this.score.rejectMessage(propagationSource, msgId, rawMsg.topic, rejectReason);
|
|
1789
|
+
for (const peer of originatingPeers) {
|
|
1790
|
+
this.score.rejectMessage(peer, msgId, rawMsg.topic, rejectReason);
|
|
1791
|
+
}
|
|
1792
|
+
}
|
|
1793
|
+
// else, Message not in cache. Ignoring forwarding
|
|
1794
|
+
}
|
|
1795
|
+
const firstSeenTimestampMs = this.score.messageFirstSeenTimestampMs(msgId);
|
|
1796
|
+
this.metrics?.onReportValidation(cacheEntry, acceptance, firstSeenTimestampMs);
|
|
1797
|
+
}
|
|
1798
|
+
/**
|
|
1799
|
+
* Sends a GRAFT message to a peer
|
|
1800
|
+
*/
|
|
1801
|
+
sendGraft(id, topic) {
|
|
1802
|
+
const graft = [
|
|
1803
|
+
{
|
|
1804
|
+
topicID: topic
|
|
1805
|
+
}
|
|
1806
|
+
];
|
|
1807
|
+
const out = createGossipRpc([], { graft });
|
|
1808
|
+
this.sendRpc(id, out);
|
|
1809
|
+
}
|
|
1810
|
+
/**
|
|
1811
|
+
* Sends a PRUNE message to a peer
|
|
1812
|
+
*/
|
|
1813
|
+
async sendPrune(id, topic) {
|
|
1814
|
+
// this is only called from leave() function
|
|
1815
|
+
const onUnsubscribe = true;
|
|
1816
|
+
const prune = [await this.makePrune(id, topic, this.opts.doPX, onUnsubscribe)];
|
|
1817
|
+
const out = createGossipRpc([], { prune });
|
|
1818
|
+
this.sendRpc(id, out);
|
|
1819
|
+
}
|
|
1820
|
+
sendIDontWants(msgId, topic, source) {
|
|
1821
|
+
const ids = this.mesh.get(topic);
|
|
1822
|
+
if (ids == null) {
|
|
1823
|
+
return;
|
|
1824
|
+
}
|
|
1825
|
+
// don't send IDONTWANT to:
|
|
1826
|
+
// - the source
|
|
1827
|
+
// - peers that don't support v1.2
|
|
1828
|
+
const tosend = new Set(ids);
|
|
1829
|
+
tosend.delete(source);
|
|
1830
|
+
for (const id of tosend) {
|
|
1831
|
+
if (this.streamsOutbound.get(id)?.protocol !== constants.GossipsubIDv12) {
|
|
1832
|
+
tosend.delete(id);
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
const idontwantRpc = createGossipRpc([], { idontwant: [{ messageIDs: [msgId] }] });
|
|
1836
|
+
this.sendRpcInBatch(tosend, idontwantRpc);
|
|
1837
|
+
}
|
|
1838
|
+
/**
|
|
1839
|
+
* Send an rpc object to a peer
|
|
1840
|
+
*/
|
|
1841
|
+
sendRpc(id, rpc) {
|
|
1842
|
+
const outboundStream = this.streamsOutbound.get(id);
|
|
1843
|
+
if (outboundStream == null) {
|
|
1844
|
+
this.log(`Cannot send RPC to ${id} as there is no open stream to it available`);
|
|
1845
|
+
return false;
|
|
1846
|
+
}
|
|
1847
|
+
// piggyback control message retries
|
|
1848
|
+
const ctrl = this.control.get(id);
|
|
1849
|
+
if (ctrl != null) {
|
|
1850
|
+
this.piggybackControl(id, rpc, ctrl);
|
|
1851
|
+
this.control.delete(id);
|
|
1852
|
+
}
|
|
1853
|
+
// piggyback gossip
|
|
1854
|
+
const ihave = this.gossip.get(id);
|
|
1855
|
+
if (ihave != null) {
|
|
1856
|
+
this.piggybackGossip(id, rpc, ihave);
|
|
1857
|
+
this.gossip.delete(id);
|
|
1858
|
+
}
|
|
1859
|
+
const rpcBytes = RPC.encode(rpc);
|
|
1860
|
+
try {
|
|
1861
|
+
outboundStream.push(rpcBytes);
|
|
1862
|
+
}
|
|
1863
|
+
catch (e) {
|
|
1864
|
+
this.log.error(`Cannot send rpc to ${id}`, e);
|
|
1865
|
+
// if the peer had control messages or gossip, re-attach
|
|
1866
|
+
if (ctrl != null) {
|
|
1867
|
+
this.control.set(id, ctrl);
|
|
1868
|
+
}
|
|
1869
|
+
if (ihave != null) {
|
|
1870
|
+
this.gossip.set(id, ihave);
|
|
1871
|
+
}
|
|
1872
|
+
return false;
|
|
1873
|
+
}
|
|
1874
|
+
this.metrics?.onRpcSent(rpc, rpcBytes.length);
|
|
1875
|
+
if (rpc.control?.graft != null) {
|
|
1876
|
+
for (const topic of rpc.control?.graft) {
|
|
1877
|
+
if (topic.topicID != null) {
|
|
1878
|
+
this.safeDispatchEvent('gossipsub:graft', { detail: { peerId: id, topic: topic.topicID, direction: 'outbound' } });
|
|
1879
|
+
}
|
|
1880
|
+
}
|
|
1881
|
+
}
|
|
1882
|
+
if (rpc.control?.prune != null) {
|
|
1883
|
+
for (const topic of rpc.control?.prune) {
|
|
1884
|
+
if (topic.topicID != null) {
|
|
1885
|
+
this.safeDispatchEvent('gossipsub:prune', { detail: { peerId: id, topic: topic.topicID, direction: 'outbound' } });
|
|
1886
|
+
}
|
|
1887
|
+
}
|
|
1888
|
+
}
|
|
1889
|
+
return true;
|
|
1890
|
+
}
|
|
1891
|
+
/** Mutates `outRpc` adding graft and prune control messages */
|
|
1892
|
+
piggybackControl(id, outRpc, ctrl) {
|
|
1893
|
+
const rpc = ensureControl(outRpc);
|
|
1894
|
+
for (const graft of ctrl.graft) {
|
|
1895
|
+
if (graft.topicID != null && (this.mesh.get(graft.topicID)?.has(id) ?? false)) {
|
|
1896
|
+
rpc.control.graft.push(graft);
|
|
1897
|
+
}
|
|
1898
|
+
}
|
|
1899
|
+
for (const prune of ctrl.prune) {
|
|
1900
|
+
if (prune.topicID != null && !(this.mesh.get(prune.topicID)?.has(id) ?? false)) {
|
|
1901
|
+
rpc.control.prune.push(prune);
|
|
1902
|
+
}
|
|
1903
|
+
}
|
|
1904
|
+
}
|
|
1905
|
+
/** Mutates `outRpc` adding ihave control messages */
|
|
1906
|
+
piggybackGossip(id, outRpc, ihave) {
|
|
1907
|
+
const rpc = ensureControl(outRpc);
|
|
1908
|
+
rpc.control.ihave = ihave;
|
|
1909
|
+
}
|
|
1910
|
+
/**
|
|
1911
|
+
* Send graft and prune messages
|
|
1912
|
+
*
|
|
1913
|
+
* @param tograft - peer id => topic[]
|
|
1914
|
+
* @param toprune - peer id => topic[]
|
|
1915
|
+
*/
|
|
1916
|
+
async sendGraftPrune(tograft, toprune, noPX) {
|
|
1917
|
+
const doPX = this.opts.doPX;
|
|
1918
|
+
const onUnsubscribe = false;
|
|
1919
|
+
for (const [id, topics] of tograft) {
|
|
1920
|
+
const graft = topics.map((topicID) => ({ topicID }));
|
|
1921
|
+
let prune = [];
|
|
1922
|
+
// If a peer also has prunes, process them now
|
|
1923
|
+
const pruning = toprune.get(id);
|
|
1924
|
+
if (pruning != null) {
|
|
1925
|
+
prune = await Promise.all(pruning.map(async (topicID) => this.makePrune(id, topicID, doPX && !(noPX.get(id) ?? false), onUnsubscribe)));
|
|
1926
|
+
toprune.delete(id);
|
|
1927
|
+
}
|
|
1928
|
+
this.sendRpc(id, createGossipRpc([], { graft, prune }));
|
|
1929
|
+
}
|
|
1930
|
+
for (const [id, topics] of toprune) {
|
|
1931
|
+
const prune = await Promise.all(topics.map(async (topicID) => this.makePrune(id, topicID, doPX && !(noPX.get(id) ?? false), onUnsubscribe)));
|
|
1932
|
+
this.sendRpc(id, createGossipRpc([], { prune }));
|
|
1933
|
+
}
|
|
1934
|
+
}
|
|
1935
|
+
/**
|
|
1936
|
+
* Emits gossip - Send IHAVE messages to a random set of gossip peers
|
|
1937
|
+
*/
|
|
1938
|
+
emitGossip(peersToGossipByTopic) {
|
|
1939
|
+
const gossipIDsByTopic = this.mcache.getGossipIDs(new Set(peersToGossipByTopic.keys()));
|
|
1940
|
+
for (const [topic, peersToGossip] of peersToGossipByTopic) {
|
|
1941
|
+
this.doEmitGossip(topic, peersToGossip, gossipIDsByTopic.get(topic) ?? []);
|
|
1942
|
+
}
|
|
1943
|
+
}
|
|
1944
|
+
/**
|
|
1945
|
+
* Send gossip messages to GossipFactor peers above threshold with a minimum of D_lazy
|
|
1946
|
+
* Peers are randomly selected from the heartbeat which exclude mesh + fanout peers
|
|
1947
|
+
* We also exclude direct peers, as there is no reason to emit gossip to them
|
|
1948
|
+
*
|
|
1949
|
+
* @param topic
|
|
1950
|
+
* @param candidateToGossip - peers to gossip
|
|
1951
|
+
* @param messageIDs - message ids to gossip
|
|
1952
|
+
*/
|
|
1953
|
+
doEmitGossip(topic, candidateToGossip, messageIDs) {
|
|
1954
|
+
if (messageIDs.length === 0) {
|
|
1955
|
+
return;
|
|
1956
|
+
}
|
|
1957
|
+
// shuffle to emit in random order
|
|
1958
|
+
shuffle(messageIDs);
|
|
1959
|
+
// if we are emitting more than GossipsubMaxIHaveLength ids, truncate the list
|
|
1960
|
+
if (messageIDs.length > constants.GossipsubMaxIHaveLength) {
|
|
1961
|
+
// we do the truncation (with shuffling) per peer below
|
|
1962
|
+
this.log('too many messages for gossip; will truncate IHAVE list (%d messages)', messageIDs.length);
|
|
1963
|
+
}
|
|
1964
|
+
if (candidateToGossip.size === 0) {
|
|
1965
|
+
return;
|
|
1966
|
+
}
|
|
1967
|
+
let target = this.opts.Dlazy;
|
|
1968
|
+
const gossipFactor = this.opts.gossipFactor;
|
|
1969
|
+
const factor = gossipFactor * candidateToGossip.size;
|
|
1970
|
+
let peersToGossip = candidateToGossip;
|
|
1971
|
+
if (factor > target) {
|
|
1972
|
+
target = factor;
|
|
1973
|
+
}
|
|
1974
|
+
if (target > peersToGossip.size) {
|
|
1975
|
+
target = peersToGossip.size;
|
|
1976
|
+
}
|
|
1977
|
+
else {
|
|
1978
|
+
// only shuffle if needed
|
|
1979
|
+
peersToGossip = shuffle(Array.from(peersToGossip)).slice(0, target);
|
|
1980
|
+
}
|
|
1981
|
+
// Emit the IHAVE gossip to the selected peers up to the target
|
|
1982
|
+
peersToGossip.forEach((id) => {
|
|
1983
|
+
let peerMessageIDs = messageIDs;
|
|
1984
|
+
if (messageIDs.length > constants.GossipsubMaxIHaveLength) {
|
|
1985
|
+
// shuffle and slice message IDs per peer so that we emit a different set for each peer
|
|
1986
|
+
// we have enough reduncancy in the system that this will significantly increase the message
|
|
1987
|
+
// coverage when we do truncate
|
|
1988
|
+
peerMessageIDs = shuffle(peerMessageIDs.slice()).slice(0, constants.GossipsubMaxIHaveLength);
|
|
1989
|
+
}
|
|
1990
|
+
this.pushGossip(id, {
|
|
1991
|
+
topicID: topic,
|
|
1992
|
+
messageIDs: peerMessageIDs
|
|
1993
|
+
});
|
|
1994
|
+
});
|
|
1995
|
+
}
|
|
1996
|
+
/**
|
|
1997
|
+
* Flush gossip and control messages
|
|
1998
|
+
*/
|
|
1999
|
+
flush() {
|
|
2000
|
+
// send gossip first, which will also piggyback control
|
|
2001
|
+
for (const [peer, ihave] of this.gossip.entries()) {
|
|
2002
|
+
this.gossip.delete(peer);
|
|
2003
|
+
this.sendRpc(peer, createGossipRpc([], { ihave }));
|
|
2004
|
+
}
|
|
2005
|
+
// send the remaining control messages
|
|
2006
|
+
for (const [peer, control] of this.control.entries()) {
|
|
2007
|
+
this.control.delete(peer);
|
|
2008
|
+
const out = createGossipRpc([], { graft: control.graft, prune: control.prune });
|
|
2009
|
+
this.sendRpc(peer, out);
|
|
2010
|
+
}
|
|
2011
|
+
}
|
|
2012
|
+
/**
|
|
2013
|
+
* Adds new IHAVE messages to pending gossip
|
|
2014
|
+
*/
|
|
2015
|
+
pushGossip(id, controlIHaveMsgs) {
|
|
2016
|
+
this.log('Add gossip to %s', id);
|
|
2017
|
+
const gossip = this.gossip.get(id) ?? [];
|
|
2018
|
+
this.gossip.set(id, gossip.concat(controlIHaveMsgs));
|
|
2019
|
+
}
|
|
2020
|
+
/**
|
|
2021
|
+
* Make a PRUNE control message for a peer in a topic
|
|
2022
|
+
*/
|
|
2023
|
+
async makePrune(id, topic, doPX, onUnsubscribe) {
|
|
2024
|
+
this.score.prune(id, topic);
|
|
2025
|
+
if (this.streamsOutbound.get(id)?.protocol === constants.GossipsubIDv10) {
|
|
2026
|
+
// Gossipsub v1.0 -- no backoff, the peer won't be able to parse it anyway
|
|
2027
|
+
return {
|
|
2028
|
+
topicID: topic,
|
|
2029
|
+
peers: []
|
|
2030
|
+
};
|
|
2031
|
+
}
|
|
2032
|
+
// backoff is measured in seconds
|
|
2033
|
+
// GossipsubPruneBackoff and GossipsubUnsubscribeBackoff are measured in milliseconds
|
|
2034
|
+
// The protobuf has it as a uint64
|
|
2035
|
+
const backoffMs = onUnsubscribe ? this.opts.unsubcribeBackoff : this.opts.pruneBackoff;
|
|
2036
|
+
const backoff = backoffMs / 1000;
|
|
2037
|
+
this.doAddBackoff(id, topic, backoffMs);
|
|
2038
|
+
if (!doPX) {
|
|
2039
|
+
return {
|
|
2040
|
+
topicID: topic,
|
|
2041
|
+
peers: [],
|
|
2042
|
+
backoff
|
|
2043
|
+
};
|
|
2044
|
+
}
|
|
2045
|
+
// select peers for Peer eXchange
|
|
2046
|
+
const peers = this.getRandomGossipPeers(topic, this.opts.prunePeers, (xid) => {
|
|
2047
|
+
return xid !== id && this.score.score(xid) >= 0;
|
|
2048
|
+
});
|
|
2049
|
+
const px = await Promise.all(Array.from(peers).map(async (peerId) => {
|
|
2050
|
+
// see if we have a signed record to send back; if we don't, just send
|
|
2051
|
+
// the peer ID and let the pruned peer find them in the DHT -- we can't trust
|
|
2052
|
+
// unsigned address records through PX anyways
|
|
2053
|
+
// Finding signed records in the DHT is not supported at the time of writing in js-libp2p
|
|
2054
|
+
const id = this.peers.get(peerId) ?? peerIdFromString(peerId);
|
|
2055
|
+
let peerInfo;
|
|
2056
|
+
try {
|
|
2057
|
+
peerInfo = await this.components.peerStore.get(id);
|
|
2058
|
+
}
|
|
2059
|
+
catch (err) {
|
|
2060
|
+
if (err.name !== 'NotFoundError') {
|
|
2061
|
+
throw err;
|
|
2062
|
+
}
|
|
2063
|
+
}
|
|
2064
|
+
return {
|
|
2065
|
+
peerID: id.toMultihash().bytes,
|
|
2066
|
+
signedPeerRecord: peerInfo?.peerRecordEnvelope
|
|
2067
|
+
};
|
|
2068
|
+
}));
|
|
2069
|
+
return {
|
|
2070
|
+
topicID: topic,
|
|
2071
|
+
peers: px,
|
|
2072
|
+
backoff
|
|
2073
|
+
};
|
|
2074
|
+
}
|
|
2075
|
+
runHeartbeat = () => {
|
|
2076
|
+
const timer = this.metrics?.heartbeatDuration.startTimer();
|
|
2077
|
+
this.heartbeat()
|
|
2078
|
+
.catch((err) => {
|
|
2079
|
+
this.log('Error running heartbeat', err);
|
|
2080
|
+
})
|
|
2081
|
+
.finally(() => {
|
|
2082
|
+
if (timer != null) {
|
|
2083
|
+
timer();
|
|
2084
|
+
}
|
|
2085
|
+
// Schedule the next run if still in started status
|
|
2086
|
+
if (this.status.code === GossipStatusCode.started) {
|
|
2087
|
+
// Clear previous timeout before overwriting `status.heartbeatTimeout`, it should be completed tho.
|
|
2088
|
+
clearTimeout(this.status.heartbeatTimeout);
|
|
2089
|
+
// NodeJS setInterval function is innexact, calls drift by a few miliseconds on each call.
|
|
2090
|
+
// To run the heartbeat precisely setTimeout() must be used recomputing the delay on every loop.
|
|
2091
|
+
let msToNextHeartbeat = this.opts.heartbeatInterval - ((Date.now() - this.status.hearbeatStartMs) % this.opts.heartbeatInterval);
|
|
2092
|
+
// If too close to next heartbeat, skip one
|
|
2093
|
+
if (msToNextHeartbeat < this.opts.heartbeatInterval * 0.25) {
|
|
2094
|
+
msToNextHeartbeat += this.opts.heartbeatInterval;
|
|
2095
|
+
this.metrics?.heartbeatSkipped.inc();
|
|
2096
|
+
}
|
|
2097
|
+
this.status.heartbeatTimeout = setTimeout(this.runHeartbeat, msToNextHeartbeat);
|
|
2098
|
+
}
|
|
2099
|
+
});
|
|
2100
|
+
};
|
|
2101
|
+
/**
|
|
2102
|
+
* Maintains the mesh and fanout maps in gossipsub.
|
|
2103
|
+
*/
|
|
2104
|
+
async heartbeat() {
|
|
2105
|
+
const { D, Dlo, Dhi, Dscore, Dout, fanoutTTL } = this.opts;
|
|
2106
|
+
this.heartbeatTicks++;
|
|
2107
|
+
// cache scores throught the heartbeat
|
|
2108
|
+
const scores = new Map();
|
|
2109
|
+
const getScore = (id) => {
|
|
2110
|
+
let s = scores.get(id);
|
|
2111
|
+
if (s === undefined) {
|
|
2112
|
+
s = this.score.score(id);
|
|
2113
|
+
scores.set(id, s);
|
|
2114
|
+
}
|
|
2115
|
+
return s;
|
|
2116
|
+
};
|
|
2117
|
+
// peer id => topic[]
|
|
2118
|
+
const tograft = new Map();
|
|
2119
|
+
// peer id => topic[]
|
|
2120
|
+
const toprune = new Map();
|
|
2121
|
+
// peer id => don't px
|
|
2122
|
+
const noPX = new Map();
|
|
2123
|
+
// clean up expired backoffs
|
|
2124
|
+
this.clearBackoff();
|
|
2125
|
+
// clean up peerhave/iasked counters
|
|
2126
|
+
this.peerhave.clear();
|
|
2127
|
+
this.metrics?.cacheSize.set({ cache: 'iasked' }, this.iasked.size);
|
|
2128
|
+
this.iasked.clear();
|
|
2129
|
+
// apply IWANT request penalties
|
|
2130
|
+
this.applyIwantPenalties();
|
|
2131
|
+
// clean up IDONTWANT counters
|
|
2132
|
+
this.idontwantCounts.clear();
|
|
2133
|
+
// clean up old tracked IDONTWANTs
|
|
2134
|
+
for (const idontwants of this.idontwants.values()) {
|
|
2135
|
+
for (const [msgId, heartbeatTick] of idontwants) {
|
|
2136
|
+
if (this.heartbeatTicks - heartbeatTick >= this.opts.mcacheLength) {
|
|
2137
|
+
idontwants.delete(msgId);
|
|
2138
|
+
}
|
|
2139
|
+
}
|
|
2140
|
+
}
|
|
2141
|
+
// ensure direct peers are connected
|
|
2142
|
+
if (this.heartbeatTicks % this.opts.directConnectTicks === 0) {
|
|
2143
|
+
// we only do this every few ticks to allow pending connections to complete and account for restarts/downtime
|
|
2144
|
+
await this.directConnect();
|
|
2145
|
+
}
|
|
2146
|
+
// EXTRA: Prune caches
|
|
2147
|
+
this.fastMsgIdCache?.prune();
|
|
2148
|
+
this.seenCache.prune();
|
|
2149
|
+
this.gossipTracer.prune();
|
|
2150
|
+
this.publishedMessageIds.prune();
|
|
2151
|
+
/**
|
|
2152
|
+
* Instead of calling getRandomGossipPeers multiple times to:
|
|
2153
|
+
* + get more mesh peers
|
|
2154
|
+
* + more outbound peers
|
|
2155
|
+
* + oppportunistic grafting
|
|
2156
|
+
* + emitGossip
|
|
2157
|
+
*
|
|
2158
|
+
* We want to loop through the topic peers only a single time and prepare gossip peers for all topics to improve the performance
|
|
2159
|
+
*/
|
|
2160
|
+
const peersToGossipByTopic = new Map();
|
|
2161
|
+
// maintain the mesh for topics we have joined
|
|
2162
|
+
// eslint-disable-next-line complexity
|
|
2163
|
+
this.mesh.forEach((peers, topic) => {
|
|
2164
|
+
const peersInTopic = this.topics.get(topic);
|
|
2165
|
+
const candidateMeshPeers = new Set();
|
|
2166
|
+
const peersToGossip = new Set();
|
|
2167
|
+
peersToGossipByTopic.set(topic, peersToGossip);
|
|
2168
|
+
if (peersInTopic != null) {
|
|
2169
|
+
const shuffledPeers = shuffle(Array.from(peersInTopic));
|
|
2170
|
+
const backoff = this.backoff.get(topic);
|
|
2171
|
+
for (const id of shuffledPeers) {
|
|
2172
|
+
const peerStreams = this.streamsOutbound.get(id);
|
|
2173
|
+
if ((peerStreams != null) &&
|
|
2174
|
+
this.protocols.includes(peerStreams.protocol) &&
|
|
2175
|
+
!peers.has(id) &&
|
|
2176
|
+
!this.direct.has(id)) {
|
|
2177
|
+
const score = getScore(id);
|
|
2178
|
+
if (backoff?.has(id) !== true && score >= 0) {
|
|
2179
|
+
candidateMeshPeers.add(id);
|
|
2180
|
+
}
|
|
2181
|
+
// instead of having to find gossip peers after heartbeat which require another loop
|
|
2182
|
+
// we prepare peers to gossip in a topic within heartbeat to improve performance
|
|
2183
|
+
if (score >= this.opts.scoreThresholds.gossipThreshold) {
|
|
2184
|
+
peersToGossip.add(id);
|
|
2185
|
+
}
|
|
2186
|
+
}
|
|
2187
|
+
}
|
|
2188
|
+
}
|
|
2189
|
+
// prune/graft helper functions (defined per topic)
|
|
2190
|
+
const prunePeer = (id, reason) => {
|
|
2191
|
+
this.log('HEARTBEAT: Remove mesh link to %s in %s', id, topic);
|
|
2192
|
+
// no need to update peer score here as we do it in makePrune
|
|
2193
|
+
// add prune backoff record
|
|
2194
|
+
this.addBackoff(id, topic);
|
|
2195
|
+
// remove peer from mesh
|
|
2196
|
+
peers.delete(id);
|
|
2197
|
+
// after pruning a peer from mesh, we want to gossip topic to it if its score meet the gossip threshold
|
|
2198
|
+
if (getScore(id) >= this.opts.scoreThresholds.gossipThreshold) {
|
|
2199
|
+
peersToGossip.add(id);
|
|
2200
|
+
}
|
|
2201
|
+
this.metrics?.onRemoveFromMesh(topic, reason, 1);
|
|
2202
|
+
// add to toprune
|
|
2203
|
+
const topics = toprune.get(id);
|
|
2204
|
+
if (topics == null) {
|
|
2205
|
+
toprune.set(id, [topic]);
|
|
2206
|
+
}
|
|
2207
|
+
else {
|
|
2208
|
+
topics.push(topic);
|
|
2209
|
+
}
|
|
2210
|
+
};
|
|
2211
|
+
const graftPeer = (id, reason) => {
|
|
2212
|
+
this.log('HEARTBEAT: Add mesh link to %s in %s', id, topic);
|
|
2213
|
+
// update peer score
|
|
2214
|
+
this.score.graft(id, topic);
|
|
2215
|
+
// add peer to mesh
|
|
2216
|
+
peers.add(id);
|
|
2217
|
+
// when we add a new mesh peer, we don't want to gossip messages to it
|
|
2218
|
+
peersToGossip.delete(id);
|
|
2219
|
+
this.metrics?.onAddToMesh(topic, reason, 1);
|
|
2220
|
+
// add to tograft
|
|
2221
|
+
const topics = tograft.get(id);
|
|
2222
|
+
if (topics == null) {
|
|
2223
|
+
tograft.set(id, [topic]);
|
|
2224
|
+
}
|
|
2225
|
+
else {
|
|
2226
|
+
topics.push(topic);
|
|
2227
|
+
}
|
|
2228
|
+
};
|
|
2229
|
+
// drop all peers with negative score, without PX
|
|
2230
|
+
peers.forEach((id) => {
|
|
2231
|
+
const score = getScore(id);
|
|
2232
|
+
// Record the score
|
|
2233
|
+
if (score < 0) {
|
|
2234
|
+
this.log('HEARTBEAT: Prune peer %s with negative score: score=%d, topic=%s', id, score, topic);
|
|
2235
|
+
prunePeer(id, ChurnReason.BadScore);
|
|
2236
|
+
noPX.set(id, true);
|
|
2237
|
+
}
|
|
2238
|
+
});
|
|
2239
|
+
// do we have enough peers?
|
|
2240
|
+
if (peers.size < Dlo) {
|
|
2241
|
+
const ineed = D - peers.size;
|
|
2242
|
+
// slice up to first `ineed` items and remove them from candidateMeshPeers
|
|
2243
|
+
// same to `const newMeshPeers = candidateMeshPeers.slice(0, ineed)`
|
|
2244
|
+
const newMeshPeers = removeFirstNItemsFromSet(candidateMeshPeers, ineed);
|
|
2245
|
+
newMeshPeers.forEach((p) => {
|
|
2246
|
+
graftPeer(p, InclusionReason.NotEnough);
|
|
2247
|
+
});
|
|
2248
|
+
}
|
|
2249
|
+
// do we have to many peers?
|
|
2250
|
+
if (peers.size > Dhi) {
|
|
2251
|
+
let peersArray = Array.from(peers);
|
|
2252
|
+
// sort by score
|
|
2253
|
+
peersArray.sort((a, b) => getScore(b) - getScore(a));
|
|
2254
|
+
// We keep the first D_score peers by score and the remaining up to D randomly
|
|
2255
|
+
// under the constraint that we keep D_out peers in the mesh (if we have that many)
|
|
2256
|
+
peersArray = peersArray.slice(0, Dscore).concat(shuffle(peersArray.slice(Dscore)));
|
|
2257
|
+
// count the outbound peers we are keeping
|
|
2258
|
+
let outbound = 0;
|
|
2259
|
+
peersArray.slice(0, D).forEach((p) => {
|
|
2260
|
+
if (this.outbound.get(p) ?? false) {
|
|
2261
|
+
outbound++;
|
|
2262
|
+
}
|
|
2263
|
+
});
|
|
2264
|
+
// if it's less than D_out, bubble up some outbound peers from the random selection
|
|
2265
|
+
if (outbound < Dout) {
|
|
2266
|
+
const rotate = (i) => {
|
|
2267
|
+
// rotate the peersArray to the right and put the ith peer in the front
|
|
2268
|
+
const p = peersArray[i];
|
|
2269
|
+
for (let j = i; j > 0; j--) {
|
|
2270
|
+
peersArray[j] = peersArray[j - 1];
|
|
2271
|
+
}
|
|
2272
|
+
peersArray[0] = p;
|
|
2273
|
+
};
|
|
2274
|
+
// first bubble up all outbound peers already in the selection to the front
|
|
2275
|
+
if (outbound > 0) {
|
|
2276
|
+
let ihave = outbound;
|
|
2277
|
+
for (let i = 1; i < D && ihave > 0; i++) {
|
|
2278
|
+
// eslint-disable-next-line max-depth
|
|
2279
|
+
if (this.outbound.get(peersArray[i]) ?? false) {
|
|
2280
|
+
rotate(i);
|
|
2281
|
+
ihave--;
|
|
2282
|
+
}
|
|
2283
|
+
}
|
|
2284
|
+
}
|
|
2285
|
+
// now bubble up enough outbound peers outside the selection to the front
|
|
2286
|
+
let ineed = D - outbound;
|
|
2287
|
+
for (let i = D; i < peersArray.length && ineed > 0; i++) {
|
|
2288
|
+
if (this.outbound.get(peersArray[i]) ?? false) {
|
|
2289
|
+
rotate(i);
|
|
2290
|
+
ineed--;
|
|
2291
|
+
}
|
|
2292
|
+
}
|
|
2293
|
+
}
|
|
2294
|
+
// prune the excess peers
|
|
2295
|
+
peersArray.slice(D).forEach((p) => {
|
|
2296
|
+
prunePeer(p, ChurnReason.Excess);
|
|
2297
|
+
});
|
|
2298
|
+
}
|
|
2299
|
+
// do we have enough outbound peers?
|
|
2300
|
+
if (peers.size >= Dlo) {
|
|
2301
|
+
// count the outbound peers we have
|
|
2302
|
+
let outbound = 0;
|
|
2303
|
+
peers.forEach((p) => {
|
|
2304
|
+
if (this.outbound.get(p) ?? false) {
|
|
2305
|
+
outbound++;
|
|
2306
|
+
}
|
|
2307
|
+
});
|
|
2308
|
+
// if it's less than D_out, select some peers with outbound connections and graft them
|
|
2309
|
+
if (outbound < Dout) {
|
|
2310
|
+
const ineed = Dout - outbound;
|
|
2311
|
+
const newMeshPeers = removeItemsFromSet(candidateMeshPeers, ineed, (id) => this.outbound.get(id) === true);
|
|
2312
|
+
newMeshPeers.forEach((p) => {
|
|
2313
|
+
graftPeer(p, InclusionReason.Outbound);
|
|
2314
|
+
});
|
|
2315
|
+
}
|
|
2316
|
+
}
|
|
2317
|
+
// should we try to improve the mesh with opportunistic grafting?
|
|
2318
|
+
if (this.heartbeatTicks % this.opts.opportunisticGraftTicks === 0 && peers.size > 1) {
|
|
2319
|
+
// Opportunistic grafting works as follows: we check the median score of peers in the
|
|
2320
|
+
// mesh; if this score is below the opportunisticGraftThreshold, we select a few peers at
|
|
2321
|
+
// random with score over the median.
|
|
2322
|
+
// The intention is to (slowly) improve an underperforming mesh by introducing good
|
|
2323
|
+
// scoring peers that may have been gossiping at us. This allows us to get out of sticky
|
|
2324
|
+
// situations where we are stuck with poor peers and also recover from churn of good peers.
|
|
2325
|
+
// now compute the median peer score in the mesh
|
|
2326
|
+
const peersList = Array.from(peers).sort((a, b) => getScore(a) - getScore(b));
|
|
2327
|
+
const medianIndex = Math.floor(peers.size / 2);
|
|
2328
|
+
const medianScore = getScore(peersList[medianIndex]);
|
|
2329
|
+
// if the median score is below the threshold, select a better peer (if any) and GRAFT
|
|
2330
|
+
if (medianScore < this.opts.scoreThresholds.opportunisticGraftThreshold) {
|
|
2331
|
+
const ineed = this.opts.opportunisticGraftPeers;
|
|
2332
|
+
const newMeshPeers = removeItemsFromSet(candidateMeshPeers, ineed, (id) => getScore(id) > medianScore);
|
|
2333
|
+
for (const id of newMeshPeers) {
|
|
2334
|
+
this.log('HEARTBEAT: Opportunistically graft peer %s on topic %s', id, topic);
|
|
2335
|
+
graftPeer(id, InclusionReason.Opportunistic);
|
|
2336
|
+
}
|
|
2337
|
+
}
|
|
2338
|
+
}
|
|
2339
|
+
});
|
|
2340
|
+
// expire fanout for topics we haven't published to in a while
|
|
2341
|
+
const now = Date.now();
|
|
2342
|
+
this.fanoutLastpub.forEach((lastpb, topic) => {
|
|
2343
|
+
if (lastpb + fanoutTTL < now) {
|
|
2344
|
+
this.fanout.delete(topic);
|
|
2345
|
+
this.fanoutLastpub.delete(topic);
|
|
2346
|
+
}
|
|
2347
|
+
});
|
|
2348
|
+
// maintain our fanout for topics we are publishing but we have not joined
|
|
2349
|
+
this.fanout.forEach((fanoutPeers, topic) => {
|
|
2350
|
+
// checks whether our peers are still in the topic and have a score above the publish threshold
|
|
2351
|
+
const topicPeers = this.topics.get(topic);
|
|
2352
|
+
fanoutPeers.forEach((id) => {
|
|
2353
|
+
if (!(topicPeers?.has(id) ?? false) || getScore(id) < this.opts.scoreThresholds.publishThreshold) {
|
|
2354
|
+
fanoutPeers.delete(id);
|
|
2355
|
+
}
|
|
2356
|
+
});
|
|
2357
|
+
const peersInTopic = this.topics.get(topic);
|
|
2358
|
+
const candidateFanoutPeers = [];
|
|
2359
|
+
// the fanout map contains topics to which we are not subscribed.
|
|
2360
|
+
const peersToGossip = new Set();
|
|
2361
|
+
peersToGossipByTopic.set(topic, peersToGossip);
|
|
2362
|
+
if (peersInTopic != null) {
|
|
2363
|
+
const shuffledPeers = shuffle(Array.from(peersInTopic));
|
|
2364
|
+
for (const id of shuffledPeers) {
|
|
2365
|
+
const peerStreams = this.streamsOutbound.get(id);
|
|
2366
|
+
if ((peerStreams != null) &&
|
|
2367
|
+
this.protocols.includes(peerStreams.protocol) &&
|
|
2368
|
+
!fanoutPeers.has(id) &&
|
|
2369
|
+
!this.direct.has(id)) {
|
|
2370
|
+
const score = getScore(id);
|
|
2371
|
+
if (score >= this.opts.scoreThresholds.publishThreshold) {
|
|
2372
|
+
candidateFanoutPeers.push(id);
|
|
2373
|
+
}
|
|
2374
|
+
// instead of having to find gossip peers after heartbeat which require another loop
|
|
2375
|
+
// we prepare peers to gossip in a topic within heartbeat to improve performance
|
|
2376
|
+
if (score >= this.opts.scoreThresholds.gossipThreshold) {
|
|
2377
|
+
peersToGossip.add(id);
|
|
2378
|
+
}
|
|
2379
|
+
}
|
|
2380
|
+
}
|
|
2381
|
+
}
|
|
2382
|
+
// do we need more peers?
|
|
2383
|
+
if (fanoutPeers.size < D) {
|
|
2384
|
+
const ineed = D - fanoutPeers.size;
|
|
2385
|
+
candidateFanoutPeers.slice(0, ineed).forEach((id) => {
|
|
2386
|
+
fanoutPeers.add(id);
|
|
2387
|
+
peersToGossip?.delete(id);
|
|
2388
|
+
});
|
|
2389
|
+
}
|
|
2390
|
+
});
|
|
2391
|
+
this.emitGossip(peersToGossipByTopic);
|
|
2392
|
+
// send coalesced GRAFT/PRUNE messages (will piggyback gossip)
|
|
2393
|
+
await this.sendGraftPrune(tograft, toprune, noPX);
|
|
2394
|
+
// flush pending gossip that wasn't piggybacked above
|
|
2395
|
+
this.flush();
|
|
2396
|
+
// advance the message history window
|
|
2397
|
+
this.mcache.shift();
|
|
2398
|
+
this.dispatchEvent(new CustomEvent('gossipsub:heartbeat'));
|
|
2399
|
+
}
|
|
2400
|
+
/**
|
|
2401
|
+
* Given a topic, returns up to count peers subscribed to that topic
|
|
2402
|
+
* that pass an optional filter function
|
|
2403
|
+
*
|
|
2404
|
+
* @param topic
|
|
2405
|
+
* @param count
|
|
2406
|
+
* @param filter - a function to filter acceptable peers
|
|
2407
|
+
*/
|
|
2408
|
+
getRandomGossipPeers(topic, count, filter = () => true) {
|
|
2409
|
+
const peersInTopic = this.topics.get(topic);
|
|
2410
|
+
if (peersInTopic == null) {
|
|
2411
|
+
return new Set();
|
|
2412
|
+
}
|
|
2413
|
+
// Adds all peers using our protocol
|
|
2414
|
+
// that also pass the filter function
|
|
2415
|
+
let peers = [];
|
|
2416
|
+
peersInTopic.forEach((id) => {
|
|
2417
|
+
const peerStreams = this.streamsOutbound.get(id);
|
|
2418
|
+
if (peerStreams == null) {
|
|
2419
|
+
return;
|
|
2420
|
+
}
|
|
2421
|
+
if (this.protocols.includes(peerStreams.protocol) && filter(id)) {
|
|
2422
|
+
peers.push(id);
|
|
2423
|
+
}
|
|
2424
|
+
});
|
|
2425
|
+
// Pseudo-randomly shuffles peers
|
|
2426
|
+
peers = shuffle(peers);
|
|
2427
|
+
if (count > 0 && peers.length > count) {
|
|
2428
|
+
peers = peers.slice(0, count);
|
|
2429
|
+
}
|
|
2430
|
+
return new Set(peers);
|
|
2431
|
+
}
|
|
2432
|
+
onScrapeMetrics(metrics) {
|
|
2433
|
+
/* Data structure sizes */
|
|
2434
|
+
metrics.mcacheSize.set(this.mcache.size);
|
|
2435
|
+
metrics.mcacheNotValidatedCount.set(this.mcache.notValidatedCount);
|
|
2436
|
+
// Arbitrary size
|
|
2437
|
+
metrics.cacheSize.set({ cache: 'direct' }, this.direct.size);
|
|
2438
|
+
metrics.cacheSize.set({ cache: 'seenCache' }, this.seenCache.size);
|
|
2439
|
+
metrics.cacheSize.set({ cache: 'fastMsgIdCache' }, this.fastMsgIdCache?.size ?? 0);
|
|
2440
|
+
metrics.cacheSize.set({ cache: 'publishedMessageIds' }, this.publishedMessageIds.size);
|
|
2441
|
+
metrics.cacheSize.set({ cache: 'mcache' }, this.mcache.size);
|
|
2442
|
+
metrics.cacheSize.set({ cache: 'score' }, this.score.size);
|
|
2443
|
+
metrics.cacheSize.set({ cache: 'gossipTracer.promises' }, this.gossipTracer.size);
|
|
2444
|
+
metrics.cacheSize.set({ cache: 'gossipTracer.requests' }, this.gossipTracer.requestMsByMsgSize);
|
|
2445
|
+
// Bounded by topic
|
|
2446
|
+
metrics.cacheSize.set({ cache: 'topics' }, this.topics.size);
|
|
2447
|
+
metrics.cacheSize.set({ cache: 'subscriptions' }, this.subscriptions.size);
|
|
2448
|
+
metrics.cacheSize.set({ cache: 'mesh' }, this.mesh.size);
|
|
2449
|
+
metrics.cacheSize.set({ cache: 'fanout' }, this.fanout.size);
|
|
2450
|
+
// Bounded by peer
|
|
2451
|
+
metrics.cacheSize.set({ cache: 'peers' }, this.peers.size);
|
|
2452
|
+
metrics.cacheSize.set({ cache: 'streamsOutbound' }, this.streamsOutbound.size);
|
|
2453
|
+
metrics.cacheSize.set({ cache: 'streamsInbound' }, this.streamsInbound.size);
|
|
2454
|
+
metrics.cacheSize.set({ cache: 'acceptFromWhitelist' }, this.acceptFromWhitelist.size);
|
|
2455
|
+
metrics.cacheSize.set({ cache: 'gossip' }, this.gossip.size);
|
|
2456
|
+
metrics.cacheSize.set({ cache: 'control' }, this.control.size);
|
|
2457
|
+
metrics.cacheSize.set({ cache: 'peerhave' }, this.peerhave.size);
|
|
2458
|
+
metrics.cacheSize.set({ cache: 'outbound' }, this.outbound.size);
|
|
2459
|
+
// 2D nested data structure
|
|
2460
|
+
let backoffSize = 0;
|
|
2461
|
+
const now = Date.now();
|
|
2462
|
+
metrics.connectedPeersBackoffSec.reset();
|
|
2463
|
+
for (const backoff of this.backoff.values()) {
|
|
2464
|
+
backoffSize += backoff.size;
|
|
2465
|
+
for (const [peer, expiredMs] of backoff.entries()) {
|
|
2466
|
+
if (this.peers.has(peer)) {
|
|
2467
|
+
metrics.connectedPeersBackoffSec.observe(Math.max(0, expiredMs - now) / 1000);
|
|
2468
|
+
}
|
|
2469
|
+
}
|
|
2470
|
+
}
|
|
2471
|
+
metrics.cacheSize.set({ cache: 'backoff' }, backoffSize);
|
|
2472
|
+
let idontwantsCount = 0;
|
|
2473
|
+
for (const idontwant of this.idontwants.values()) {
|
|
2474
|
+
idontwantsCount += idontwant.size;
|
|
2475
|
+
}
|
|
2476
|
+
metrics.cacheSize.set({ cache: 'idontwants' }, idontwantsCount);
|
|
2477
|
+
// Peer counts
|
|
2478
|
+
for (const [topicStr, peers] of this.topics) {
|
|
2479
|
+
metrics.topicPeersCount.set({ topicStr }, peers.size);
|
|
2480
|
+
}
|
|
2481
|
+
for (const [topicStr, peers] of this.mesh) {
|
|
2482
|
+
metrics.meshPeerCounts.set({ topicStr }, peers.size);
|
|
2483
|
+
}
|
|
2484
|
+
// Peer scores
|
|
2485
|
+
const scores = [];
|
|
2486
|
+
const scoreByPeer = new Map();
|
|
2487
|
+
metrics.behaviourPenalty.reset();
|
|
2488
|
+
for (const peerIdStr of this.peers.keys()) {
|
|
2489
|
+
const score = this.score.score(peerIdStr);
|
|
2490
|
+
scores.push(score);
|
|
2491
|
+
scoreByPeer.set(peerIdStr, score);
|
|
2492
|
+
metrics.behaviourPenalty.observe(this.score.peerStats.get(peerIdStr)?.behaviourPenalty ?? 0);
|
|
2493
|
+
}
|
|
2494
|
+
metrics.registerScores(scores, this.opts.scoreThresholds);
|
|
2495
|
+
// Breakdown score per mesh topicLabel
|
|
2496
|
+
metrics.registerScorePerMesh(this.mesh, scoreByPeer);
|
|
2497
|
+
// Breakdown on each score weight
|
|
2498
|
+
const sw = computeAllPeersScoreWeights(this.peers.keys(), this.score.peerStats, this.score.params, this.score.peerIPs, metrics.topicStrToLabel);
|
|
2499
|
+
metrics.registerScoreWeights(sw);
|
|
2500
|
+
}
|
|
2501
|
+
tagMeshPeer = (evt) => {
|
|
2502
|
+
const { peerId, topic } = evt.detail;
|
|
2503
|
+
this.components.peerStore.merge(this.peers.get(peerId) ?? peerIdFromString(peerId), {
|
|
2504
|
+
tags: {
|
|
2505
|
+
[topic]: {
|
|
2506
|
+
value: 100
|
|
2507
|
+
}
|
|
2508
|
+
}
|
|
2509
|
+
}).catch((err) => { this.log.error('Error tagging peer %s with topic %s', peerId, topic, err); });
|
|
2510
|
+
};
|
|
2511
|
+
untagMeshPeer = (evt) => {
|
|
2512
|
+
const { peerId, topic } = evt.detail;
|
|
2513
|
+
this.components.peerStore.merge(this.peers.get(peerId) ?? peerIdFromString(peerId), {
|
|
2514
|
+
tags: {
|
|
2515
|
+
[topic]: undefined
|
|
2516
|
+
}
|
|
2517
|
+
}).catch((err) => { this.log.error('Error untagging peer %s with topic %s', peerId, topic, err); });
|
|
2518
|
+
};
|
|
2519
|
+
}
|
|
2520
|
+
//# sourceMappingURL=gossipsub.js.map
|