@libp2p/gossipsub 14.1.1-6059227cb

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. package/README.md +85 -0
  2. package/dist/index.min.js +19 -0
  3. package/dist/index.min.js.map +7 -0
  4. package/dist/src/config.d.ts +32 -0
  5. package/dist/src/config.d.ts.map +1 -0
  6. package/dist/src/config.js +2 -0
  7. package/dist/src/config.js.map +1 -0
  8. package/dist/src/constants.d.ts +213 -0
  9. package/dist/src/constants.d.ts.map +1 -0
  10. package/dist/src/constants.js +217 -0
  11. package/dist/src/constants.js.map +1 -0
  12. package/dist/src/errors.d.ts +9 -0
  13. package/dist/src/errors.d.ts.map +1 -0
  14. package/dist/src/errors.js +15 -0
  15. package/dist/src/errors.js.map +1 -0
  16. package/dist/src/gossipsub.d.ts +419 -0
  17. package/dist/src/gossipsub.d.ts.map +1 -0
  18. package/dist/src/gossipsub.js +2520 -0
  19. package/dist/src/gossipsub.js.map +1 -0
  20. package/dist/src/index.d.ts +344 -0
  21. package/dist/src/index.d.ts.map +1 -0
  22. package/dist/src/index.js +43 -0
  23. package/dist/src/index.js.map +1 -0
  24. package/dist/src/message/decodeRpc.d.ts +11 -0
  25. package/dist/src/message/decodeRpc.d.ts.map +1 -0
  26. package/dist/src/message/decodeRpc.js +10 -0
  27. package/dist/src/message/decodeRpc.js.map +1 -0
  28. package/dist/src/message/index.d.ts +2 -0
  29. package/dist/src/message/index.d.ts.map +1 -0
  30. package/dist/src/message/index.js +2 -0
  31. package/dist/src/message/index.js.map +1 -0
  32. package/dist/src/message/rpc.d.ts +99 -0
  33. package/dist/src/message/rpc.d.ts.map +1 -0
  34. package/dist/src/message/rpc.js +663 -0
  35. package/dist/src/message/rpc.js.map +1 -0
  36. package/dist/src/message-cache.d.ts +80 -0
  37. package/dist/src/message-cache.d.ts.map +1 -0
  38. package/dist/src/message-cache.js +144 -0
  39. package/dist/src/message-cache.js.map +1 -0
  40. package/dist/src/metrics.d.ts +467 -0
  41. package/dist/src/metrics.d.ts.map +1 -0
  42. package/dist/src/metrics.js +896 -0
  43. package/dist/src/metrics.js.map +1 -0
  44. package/dist/src/score/compute-score.d.ts +4 -0
  45. package/dist/src/score/compute-score.d.ts.map +1 -0
  46. package/dist/src/score/compute-score.js +75 -0
  47. package/dist/src/score/compute-score.js.map +1 -0
  48. package/dist/src/score/index.d.ts +4 -0
  49. package/dist/src/score/index.d.ts.map +1 -0
  50. package/dist/src/score/index.js +4 -0
  51. package/dist/src/score/index.js.map +1 -0
  52. package/dist/src/score/message-deliveries.d.ts +45 -0
  53. package/dist/src/score/message-deliveries.d.ts.map +1 -0
  54. package/dist/src/score/message-deliveries.js +75 -0
  55. package/dist/src/score/message-deliveries.js.map +1 -0
  56. package/dist/src/score/peer-score-params.d.ts +125 -0
  57. package/dist/src/score/peer-score-params.d.ts.map +1 -0
  58. package/dist/src/score/peer-score-params.js +159 -0
  59. package/dist/src/score/peer-score-params.js.map +1 -0
  60. package/dist/src/score/peer-score-thresholds.d.ts +31 -0
  61. package/dist/src/score/peer-score-thresholds.d.ts.map +1 -0
  62. package/dist/src/score/peer-score-thresholds.js +32 -0
  63. package/dist/src/score/peer-score-thresholds.js.map +1 -0
  64. package/dist/src/score/peer-score.d.ts +119 -0
  65. package/dist/src/score/peer-score.d.ts.map +1 -0
  66. package/dist/src/score/peer-score.js +459 -0
  67. package/dist/src/score/peer-score.js.map +1 -0
  68. package/dist/src/score/peer-stats.d.ts +32 -0
  69. package/dist/src/score/peer-stats.d.ts.map +1 -0
  70. package/dist/src/score/peer-stats.js +2 -0
  71. package/dist/src/score/peer-stats.js.map +1 -0
  72. package/dist/src/score/scoreMetrics.d.ts +23 -0
  73. package/dist/src/score/scoreMetrics.d.ts.map +1 -0
  74. package/dist/src/score/scoreMetrics.js +155 -0
  75. package/dist/src/score/scoreMetrics.js.map +1 -0
  76. package/dist/src/stream.d.ts +30 -0
  77. package/dist/src/stream.d.ts.map +1 -0
  78. package/dist/src/stream.js +55 -0
  79. package/dist/src/stream.js.map +1 -0
  80. package/dist/src/tracer.d.ts +53 -0
  81. package/dist/src/tracer.d.ts.map +1 -0
  82. package/dist/src/tracer.js +155 -0
  83. package/dist/src/tracer.js.map +1 -0
  84. package/dist/src/types.d.ts +148 -0
  85. package/dist/src/types.d.ts.map +1 -0
  86. package/dist/src/types.js +90 -0
  87. package/dist/src/types.js.map +1 -0
  88. package/dist/src/utils/buildRawMessage.d.ts +20 -0
  89. package/dist/src/utils/buildRawMessage.d.ts.map +1 -0
  90. package/dist/src/utils/buildRawMessage.js +151 -0
  91. package/dist/src/utils/buildRawMessage.js.map +1 -0
  92. package/dist/src/utils/create-gossip-rpc.d.ts +7 -0
  93. package/dist/src/utils/create-gossip-rpc.d.ts.map +1 -0
  94. package/dist/src/utils/create-gossip-rpc.js +31 -0
  95. package/dist/src/utils/create-gossip-rpc.js.map +1 -0
  96. package/dist/src/utils/index.d.ts +4 -0
  97. package/dist/src/utils/index.d.ts.map +1 -0
  98. package/dist/src/utils/index.js +4 -0
  99. package/dist/src/utils/index.js.map +1 -0
  100. package/dist/src/utils/messageIdToString.d.ts +5 -0
  101. package/dist/src/utils/messageIdToString.d.ts.map +1 -0
  102. package/dist/src/utils/messageIdToString.js +8 -0
  103. package/dist/src/utils/messageIdToString.js.map +1 -0
  104. package/dist/src/utils/msgIdFn.d.ts +10 -0
  105. package/dist/src/utils/msgIdFn.d.ts.map +1 -0
  106. package/dist/src/utils/msgIdFn.js +23 -0
  107. package/dist/src/utils/msgIdFn.js.map +1 -0
  108. package/dist/src/utils/multiaddr.d.ts +3 -0
  109. package/dist/src/utils/multiaddr.d.ts.map +1 -0
  110. package/dist/src/utils/multiaddr.js +15 -0
  111. package/dist/src/utils/multiaddr.js.map +1 -0
  112. package/dist/src/utils/publishConfig.d.ts +8 -0
  113. package/dist/src/utils/publishConfig.d.ts.map +1 -0
  114. package/dist/src/utils/publishConfig.js +25 -0
  115. package/dist/src/utils/publishConfig.js.map +1 -0
  116. package/dist/src/utils/set.d.ts +14 -0
  117. package/dist/src/utils/set.d.ts.map +1 -0
  118. package/dist/src/utils/set.js +41 -0
  119. package/dist/src/utils/set.js.map +1 -0
  120. package/dist/src/utils/shuffle.d.ts +7 -0
  121. package/dist/src/utils/shuffle.d.ts.map +1 -0
  122. package/dist/src/utils/shuffle.js +21 -0
  123. package/dist/src/utils/shuffle.js.map +1 -0
  124. package/dist/src/utils/time-cache.d.ts +22 -0
  125. package/dist/src/utils/time-cache.d.ts.map +1 -0
  126. package/dist/src/utils/time-cache.js +54 -0
  127. package/dist/src/utils/time-cache.js.map +1 -0
  128. package/package.json +142 -0
  129. package/src/config.ts +31 -0
  130. package/src/constants.ts +261 -0
  131. package/src/errors.ts +17 -0
  132. package/src/gossipsub.ts +3061 -0
  133. package/src/index.ts +404 -0
  134. package/src/message/decodeRpc.ts +19 -0
  135. package/src/message/index.ts +1 -0
  136. package/src/message/rpc.proto +58 -0
  137. package/src/message/rpc.ts +848 -0
  138. package/src/message-cache.ts +196 -0
  139. package/src/metrics.ts +1014 -0
  140. package/src/score/compute-score.ts +98 -0
  141. package/src/score/index.ts +3 -0
  142. package/src/score/message-deliveries.ts +95 -0
  143. package/src/score/peer-score-params.ts +316 -0
  144. package/src/score/peer-score-thresholds.ts +70 -0
  145. package/src/score/peer-score.ts +565 -0
  146. package/src/score/peer-stats.ts +33 -0
  147. package/src/score/scoreMetrics.ts +215 -0
  148. package/src/stream.ts +79 -0
  149. package/src/tracer.ts +177 -0
  150. package/src/types.ts +178 -0
  151. package/src/utils/buildRawMessage.ts +174 -0
  152. package/src/utils/create-gossip-rpc.ts +34 -0
  153. package/src/utils/index.ts +3 -0
  154. package/src/utils/messageIdToString.ts +8 -0
  155. package/src/utils/msgIdFn.ts +24 -0
  156. package/src/utils/multiaddr.ts +19 -0
  157. package/src/utils/publishConfig.ts +33 -0
  158. package/src/utils/set.ts +43 -0
  159. package/src/utils/shuffle.ts +21 -0
  160. package/src/utils/time-cache.ts +71 -0
@@ -0,0 +1,3061 @@
1
+ import { TypedEventEmitter, serviceCapabilities, serviceDependencies } from '@libp2p/interface'
2
+ import { peerIdFromMultihash, peerIdFromString } from '@libp2p/peer-id'
3
+ import { encode } from 'it-length-prefixed'
4
+ import { pipe } from 'it-pipe'
5
+ import { pushable } from 'it-pushable'
6
+ import * as Digest from 'multiformats/hashes/digest'
7
+ import * as constants from './constants.js'
8
+ import {
9
+ ACCEPT_FROM_WHITELIST_DURATION_MS,
10
+ ACCEPT_FROM_WHITELIST_MAX_MESSAGES,
11
+ ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE,
12
+ BACKOFF_SLACK
13
+ } from './constants.js'
14
+ import { StrictNoSign, StrictSign, TopicValidatorResult } from './index.ts'
15
+ import { defaultDecodeRpcLimits } from './message/decodeRpc.js'
16
+ import { RPC } from './message/rpc.js'
17
+ import { MessageCache } from './message-cache.js'
18
+ import {
19
+ ChurnReason,
20
+ getMetrics,
21
+ IHaveIgnoreReason,
22
+ InclusionReason,
23
+
24
+ ScorePenalty
25
+
26
+ } from './metrics.js'
27
+ import {
28
+ PeerScore,
29
+
30
+ createPeerScoreParams,
31
+ createPeerScoreThresholds
32
+
33
+ } from './score/index.js'
34
+ import { computeAllPeersScoreWeights } from './score/scoreMetrics.js'
35
+ import { InboundStream, OutboundStream } from './stream.js'
36
+ import { IWantTracer } from './tracer.js'
37
+ import {
38
+
39
+ ValidateError,
40
+
41
+ MessageStatus,
42
+ RejectReason,
43
+
44
+ rejectReasonFromAcceptance
45
+
46
+ } from './types.js'
47
+ import { buildRawMessage, validateToRawMessage } from './utils/buildRawMessage.js'
48
+ import { createGossipRpc, ensureControl } from './utils/create-gossip-rpc.js'
49
+ import { shuffle, messageIdToString } from './utils/index.js'
50
+ import { msgIdFnStrictNoSign, msgIdFnStrictSign } from './utils/msgIdFn.js'
51
+ import { multiaddrToIPStr } from './utils/multiaddr.js'
52
+ import { getPublishConfigFromPeerId } from './utils/publishConfig.js'
53
+ import { removeFirstNItemsFromSet, removeItemsFromSet } from './utils/set.js'
54
+ import { SimpleTimeCache } from './utils/time-cache.js'
55
+ import type { GossipSubComponents, GossipSubEvents, GossipsubMessage, GossipsubOpts, MeshPeer, Message, PublishResult, SubscriptionChangeData, TopicValidatorFn } from './index.ts'
56
+ import type { DecodeRPCLimits } from './message/decodeRpc.js'
57
+ import type { MessageCacheRecord } from './message-cache.js'
58
+ import type { Metrics, ToSendGroupCount } from './metrics.js'
59
+ import type { PeerScoreParams, PeerScoreThresholds, PeerScoreStatsDump } from './score/index.js'
60
+ import type { MsgIdFn, PublishConfig, TopicStr, MsgIdStr, PeerIdStr, RejectReasonObj, FastMsgIdFn, DataTransform, MsgIdToStrFn, MessageId, PublishOpts } from './types.js'
61
+ import type {
62
+ Connection, Stream, PeerId, Peer,
63
+ Logger,
64
+ Topology,
65
+ TypedEventTarget,
66
+ MessageStreamDirection
67
+ } from '@libp2p/interface'
68
+ import type { Multiaddr } from '@multiformats/multiaddr'
69
+ import type { Uint8ArrayList } from 'uint8arraylist'
70
+
71
+ enum GossipStatusCode {
72
+ started,
73
+ stopped
74
+ }
75
+
76
+ type GossipStatus =
77
+ | {
78
+ code: GossipStatusCode.started
79
+ registrarTopologyIds: string[]
80
+ heartbeatTimeout: ReturnType<typeof setTimeout>
81
+ hearbeatStartMs: number
82
+ }
83
+ | {
84
+ code: GossipStatusCode.stopped
85
+ }
86
+
87
+ interface GossipOptions extends GossipsubOpts {
88
+ scoreParams: PeerScoreParams
89
+ scoreThresholds: PeerScoreThresholds
90
+ }
91
+
92
+ interface AcceptFromWhitelistEntry {
93
+ /** number of messages accepted since recomputing the peer's score */
94
+ messagesAccepted: number
95
+ /** have to recompute score after this time */
96
+ acceptUntil: number
97
+ }
98
+
99
+ type ReceivedMessageResult =
100
+ | { code: MessageStatus.duplicate, msgIdStr: MsgIdStr }
101
+ | ({ code: MessageStatus.invalid, msgIdStr?: MsgIdStr } & RejectReasonObj)
102
+ | { code: MessageStatus.valid, messageId: MessageId, msg: Message }
103
+
104
+ export class GossipSub extends TypedEventEmitter<GossipSubEvents> implements TypedEventTarget<GossipSubEvents> {
105
+ /**
106
+ * The signature policy to follow by default
107
+ */
108
+ public readonly globalSignaturePolicy: typeof StrictSign | typeof StrictNoSign
109
+ public protocols: string[] = [constants.GossipsubIDv12, constants.GossipsubIDv11, constants.GossipsubIDv10]
110
+
111
+ private publishConfig: PublishConfig | undefined
112
+
113
+ private readonly dataTransform: DataTransform | undefined
114
+
115
+ // State
116
+
117
+ public readonly peers = new Map<PeerIdStr, PeerId>()
118
+ public readonly streamsInbound = new Map<PeerIdStr, InboundStream>()
119
+ public readonly streamsOutbound = new Map<PeerIdStr, OutboundStream>()
120
+
121
+ /** Ensures outbound streams are created sequentially */
122
+ private outboundInflightQueue = pushable<{ peerId: PeerId, connection: Connection }>({ objectMode: true })
123
+
124
+ /** Direct peers */
125
+ public readonly direct = new Set<PeerIdStr>()
126
+
127
+ /** Floodsub peers */
128
+ private readonly floodsubPeers = new Set<PeerIdStr>()
129
+
130
+ /** Cache of seen messages */
131
+ private readonly seenCache: SimpleTimeCache<void>
132
+
133
+ /**
134
+ * Map of peer id and AcceptRequestWhileListEntry
135
+ */
136
+ private readonly acceptFromWhitelist = new Map<PeerIdStr, AcceptFromWhitelistEntry>()
137
+
138
+ /**
139
+ * Map of topics to which peers are subscribed to
140
+ */
141
+ private readonly topics = new Map<TopicStr, Set<PeerIdStr>>()
142
+
143
+ /**
144
+ * List of our subscriptions
145
+ */
146
+ private readonly subscriptions = new Set<TopicStr>()
147
+
148
+ /**
149
+ * Map of topic meshes
150
+ * topic => peer id set
151
+ */
152
+ public readonly mesh = new Map<TopicStr, Set<PeerIdStr>>()
153
+
154
+ /**
155
+ * Map of topics to set of peers. These mesh peers are the ones to which we are publishing without a topic membership
156
+ * topic => peer id set
157
+ */
158
+ public readonly fanout = new Map<TopicStr, Set<PeerIdStr>>()
159
+
160
+ /**
161
+ * Map of last publish time for fanout topics
162
+ * topic => last publish time
163
+ */
164
+ private readonly fanoutLastpub = new Map<TopicStr, number>()
165
+
166
+ /**
167
+ * Map of pending messages to gossip
168
+ * peer id => control messages
169
+ */
170
+ public readonly gossip = new Map<PeerIdStr, RPC.ControlIHave[]>()
171
+
172
+ /**
173
+ * Map of control messages
174
+ * peer id => control message
175
+ */
176
+ public readonly control = new Map<PeerIdStr, RPC.ControlMessage>()
177
+
178
+ /**
179
+ * Number of IHAVEs received from peer in the last heartbeat
180
+ */
181
+ private readonly peerhave = new Map<PeerIdStr, number>()
182
+
183
+ /** Number of messages we have asked from peer in the last heartbeat */
184
+ private readonly iasked = new Map<PeerIdStr, number>()
185
+
186
+ /** Prune backoff map */
187
+ private readonly backoff = new Map<TopicStr, Map<PeerIdStr, number>>()
188
+
189
+ /**
190
+ * Connection direction cache, marks peers with outbound connections
191
+ * peer id => direction
192
+ */
193
+ private readonly outbound = new Map<PeerIdStr, boolean>()
194
+ private readonly msgIdFn: MsgIdFn
195
+
196
+ /**
197
+ * A fast message id function used for internal message de-duplication
198
+ */
199
+ private readonly fastMsgIdFn: FastMsgIdFn | undefined
200
+
201
+ private readonly msgIdToStrFn: MsgIdToStrFn
202
+
203
+ /** Maps fast message-id to canonical message-id */
204
+ private readonly fastMsgIdCache: SimpleTimeCache<MsgIdStr> | undefined
205
+
206
+ /**
207
+ * Short term cache for published message ids. This is used for penalizing peers sending
208
+ * our own messages back if the messages are anonymous or use a random author.
209
+ */
210
+ private readonly publishedMessageIds: SimpleTimeCache<void>
211
+
212
+ /**
213
+ * A message cache that contains the messages for last few heartbeat ticks
214
+ */
215
+ private readonly mcache: MessageCache
216
+
217
+ /** Peer score tracking */
218
+ public readonly score: PeerScore
219
+
220
+ /**
221
+ * Custom validator function per topic.
222
+ * Must return or resolve quickly (< 100ms) to prevent causing penalties for late messages.
223
+ * If you need to apply validation that may require longer times use `asyncValidation` option and callback the
224
+ * validation result through `Gossipsub.reportValidationResult`
225
+ */
226
+ public readonly topicValidators = new Map<TopicStr, TopicValidatorFn>()
227
+
228
+ /**
229
+ * Make this protected so child class may want to redirect to its own log.
230
+ */
231
+ protected readonly log: Logger
232
+
233
+ /**
234
+ * Number of heartbeats since the beginning of time
235
+ * This allows us to amortize some resource cleanup -- eg: backoff cleanup
236
+ */
237
+ private heartbeatTicks = 0
238
+
239
+ /**
240
+ * Tracks IHAVE/IWANT promises broken by peers
241
+ */
242
+ readonly gossipTracer: IWantTracer
243
+
244
+ /**
245
+ * Tracks IDONTWANT messages received by peers in the current heartbeat
246
+ */
247
+ private readonly idontwantCounts = new Map<PeerIdStr, number>()
248
+
249
+ /**
250
+ * Tracks IDONTWANT messages received by peers and the heartbeat they were received in
251
+ *
252
+ * idontwants are stored for `mcacheLength` heartbeats before being pruned,
253
+ * so this map is bounded by peerCount * idontwantMaxMessages * mcacheLength
254
+ */
255
+ private readonly idontwants = new Map<PeerIdStr, Map<MsgIdStr, number>>()
256
+
257
+ private readonly components: GossipSubComponents
258
+
259
+ private directPeerInitial: ReturnType<typeof setTimeout> | null = null
260
+
261
+ public static multicodec: string = constants.GossipsubIDv12
262
+
263
+ // Options
264
+ readonly opts: Required<GossipOptions>
265
+ private readonly decodeRpcLimits: DecodeRPCLimits
266
+
267
+ private readonly metrics: Metrics | null
268
+ private status: GossipStatus = { code: GossipStatusCode.stopped }
269
+ private readonly maxInboundStreams?: number
270
+ private readonly maxOutboundStreams?: number
271
+ private readonly runOnLimitedConnection?: boolean
272
+ private readonly allowedTopics: Set<TopicStr> | null
273
+
274
+ private heartbeatTimer: {
275
+ _intervalId: ReturnType<typeof setInterval> | undefined
276
+ runPeriodically(fn: () => void, period: number): void
277
+ cancel(): void
278
+ } | null = null
279
+
280
+ constructor (components: GossipSubComponents, options: Partial<GossipsubOpts> = {}) {
281
+ super()
282
+
283
+ const opts = {
284
+ fallbackToFloodsub: true,
285
+ floodPublish: true,
286
+ batchPublish: false,
287
+ tagMeshPeers: true,
288
+ doPX: false,
289
+ directPeers: [],
290
+ D: constants.GossipsubD,
291
+ Dlo: constants.GossipsubDlo,
292
+ Dhi: constants.GossipsubDhi,
293
+ Dscore: constants.GossipsubDscore,
294
+ Dout: constants.GossipsubDout,
295
+ Dlazy: constants.GossipsubDlazy,
296
+ heartbeatInterval: constants.GossipsubHeartbeatInterval,
297
+ fanoutTTL: constants.GossipsubFanoutTTL,
298
+ mcacheLength: constants.GossipsubHistoryLength,
299
+ mcacheGossip: constants.GossipsubHistoryGossip,
300
+ seenTTL: constants.GossipsubSeenTTL,
301
+ gossipsubIWantFollowupMs: constants.GossipsubIWantFollowupTime,
302
+ prunePeers: constants.GossipsubPrunePeers,
303
+ pruneBackoff: constants.GossipsubPruneBackoff,
304
+ unsubcribeBackoff: constants.GossipsubUnsubscribeBackoff,
305
+ graftFloodThreshold: constants.GossipsubGraftFloodThreshold,
306
+ opportunisticGraftPeers: constants.GossipsubOpportunisticGraftPeers,
307
+ opportunisticGraftTicks: constants.GossipsubOpportunisticGraftTicks,
308
+ directConnectTicks: constants.GossipsubDirectConnectTicks,
309
+ gossipFactor: constants.GossipsubGossipFactor,
310
+ idontwantMinDataSize: constants.GossipsubIdontwantMinDataSize,
311
+ idontwantMaxMessages: constants.GossipsubIdontwantMaxMessages,
312
+ ...options,
313
+ scoreParams: createPeerScoreParams(options.scoreParams),
314
+ scoreThresholds: createPeerScoreThresholds(options.scoreThresholds)
315
+ }
316
+
317
+ this.components = components
318
+ this.decodeRpcLimits = opts.decodeRpcLimits ?? defaultDecodeRpcLimits
319
+
320
+ this.globalSignaturePolicy = opts.globalSignaturePolicy ?? StrictSign
321
+
322
+ // Also wants to get notified of peers connected using floodsub
323
+ if (opts.fallbackToFloodsub) {
324
+ this.protocols.push(constants.FloodsubID)
325
+ }
326
+
327
+ // From pubsub
328
+ this.log = components.logger.forComponent(opts.debugName ?? 'libp2p:gossipsub')
329
+
330
+ // Gossipsub
331
+
332
+ this.opts = opts as Required<GossipOptions>
333
+ this.direct = new Set(opts.directPeers.map((p) => p.id.toString()))
334
+ this.seenCache = new SimpleTimeCache<void>({ validityMs: opts.seenTTL })
335
+ this.publishedMessageIds = new SimpleTimeCache<void>({ validityMs: opts.seenTTL })
336
+
337
+ if (options.msgIdFn != null) {
338
+ // Use custom function
339
+ this.msgIdFn = options.msgIdFn
340
+ } else {
341
+ switch (this.globalSignaturePolicy) {
342
+ case StrictSign:
343
+ this.msgIdFn = msgIdFnStrictSign
344
+ break
345
+ case StrictNoSign:
346
+ this.msgIdFn = msgIdFnStrictNoSign
347
+ break
348
+ default:
349
+ throw new Error(`Invalid globalSignaturePolicy: ${this.globalSignaturePolicy}`)
350
+ }
351
+ }
352
+
353
+ if (options.fastMsgIdFn != null) {
354
+ this.fastMsgIdFn = options.fastMsgIdFn
355
+ this.fastMsgIdCache = new SimpleTimeCache<MsgIdStr>({ validityMs: opts.seenTTL })
356
+ }
357
+
358
+ // By default, gossipsub only provide a browser friendly function to convert Uint8Array message id to string.
359
+ this.msgIdToStrFn = options.msgIdToStrFn ?? messageIdToString
360
+
361
+ this.mcache = options.messageCache ?? new MessageCache(opts.mcacheGossip, opts.mcacheLength, this.msgIdToStrFn)
362
+
363
+ if (options.dataTransform != null) {
364
+ this.dataTransform = options.dataTransform
365
+ }
366
+
367
+ if (options.metricsRegister != null) {
368
+ if (options.metricsTopicStrToLabel == null) {
369
+ throw Error('Must set metricsTopicStrToLabel with metrics')
370
+ }
371
+
372
+ // in theory, each topic has its own meshMessageDeliveriesWindow param
373
+ // however in lodestar, we configure it mostly the same so just pick the max of positive ones
374
+ // (some topics have meshMessageDeliveriesWindow as 0)
375
+ const maxMeshMessageDeliveriesWindowMs = Math.max(
376
+ ...Object.values(opts.scoreParams.topics).map((topicParam) => topicParam.meshMessageDeliveriesWindow),
377
+ constants.DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS
378
+ )
379
+
380
+ const metrics = getMetrics(options.metricsRegister, options.metricsTopicStrToLabel, {
381
+ gossipPromiseExpireSec: this.opts.gossipsubIWantFollowupMs / 1000,
382
+ behaviourPenaltyThreshold: opts.scoreParams.behaviourPenaltyThreshold,
383
+ maxMeshMessageDeliveriesWindowSec: maxMeshMessageDeliveriesWindowMs / 1000
384
+ })
385
+
386
+ metrics.mcacheSize.addCollect(() => { this.onScrapeMetrics(metrics) })
387
+ for (const protocol of this.protocols) {
388
+ metrics.protocolsEnabled.set({ protocol }, 1)
389
+ }
390
+
391
+ this.metrics = metrics
392
+ } else {
393
+ this.metrics = null
394
+ }
395
+
396
+ this.gossipTracer = new IWantTracer(this.opts.gossipsubIWantFollowupMs, this.msgIdToStrFn, this.metrics)
397
+
398
+ /**
399
+ * libp2p
400
+ */
401
+ this.score = new PeerScore(this.opts.scoreParams, this.metrics, this.components.logger, {
402
+ scoreCacheValidityMs: opts.heartbeatInterval
403
+ })
404
+
405
+ this.maxInboundStreams = options.maxInboundStreams
406
+ this.maxOutboundStreams = options.maxOutboundStreams
407
+ this.runOnLimitedConnection = options.runOnLimitedConnection
408
+
409
+ this.allowedTopics = (opts.allowedTopics != null) ? new Set(opts.allowedTopics) : null
410
+ }
411
+
412
+ readonly [Symbol.toStringTag] = '@chainsafe/libp2p-gossipsub'
413
+
414
+ readonly [serviceCapabilities]: string[] = [
415
+ '@libp2p/pubsub'
416
+ ]
417
+
418
+ readonly [serviceDependencies]: string[] = [
419
+ '@libp2p/identify'
420
+ ]
421
+
422
+ getPeers (): PeerId[] {
423
+ return [...this.peers.values()]
424
+ }
425
+
426
+ isStarted (): boolean {
427
+ return this.status.code === GossipStatusCode.started
428
+ }
429
+
430
+ // LIFECYCLE METHODS
431
+
432
+ /**
433
+ * Mounts the gossipsub protocol onto the libp2p node and sends our
434
+ * our subscriptions to every peer connected
435
+ */
436
+ async start (): Promise<void> {
437
+ // From pubsub
438
+ if (this.isStarted()) {
439
+ return
440
+ }
441
+
442
+ this.log('starting')
443
+
444
+ this.publishConfig = getPublishConfigFromPeerId(this.globalSignaturePolicy, this.components.peerId, this.components.privateKey)
445
+
446
+ // Create the outbound inflight queue
447
+ // This ensures that outbound stream creation happens sequentially
448
+ this.outboundInflightQueue = pushable({ objectMode: true })
449
+ pipe(this.outboundInflightQueue, async (source) => {
450
+ for await (const { peerId, connection } of source) {
451
+ await this.createOutboundStream(peerId, connection)
452
+ }
453
+ }).catch((e) => { this.log.error('outbound inflight queue error', e) })
454
+
455
+ // set direct peer addresses in the address book
456
+ await Promise.all(
457
+ this.opts.directPeers.map(async (p) => {
458
+ await this.components.peerStore.merge(p.id, {
459
+ multiaddrs: p.addrs
460
+ })
461
+ })
462
+ )
463
+
464
+ const registrar = this.components.registrar
465
+ // Incoming streams
466
+ // Called after a peer dials us
467
+ await Promise.all(
468
+ this.protocols.map(async (protocol) =>
469
+ registrar.handle(protocol, this.onIncomingStream.bind(this), {
470
+ maxInboundStreams: this.maxInboundStreams,
471
+ maxOutboundStreams: this.maxOutboundStreams,
472
+ runOnLimitedConnection: this.runOnLimitedConnection
473
+ })
474
+ )
475
+ )
476
+
477
+ // # How does Gossipsub interact with libp2p? Rough guide from Mar 2022
478
+ //
479
+ // ## Setup:
480
+ // Gossipsub requests libp2p to callback, TBD
481
+ //
482
+ // `this.libp2p.handle()` registers a handler for `/meshsub/1.1.0` and other Gossipsub protocols
483
+ // The handler callback is registered in libp2p Upgrader.protocols map.
484
+ //
485
+ // Upgrader receives an inbound connection from some transport and (`Upgrader.upgradeInbound`):
486
+ // - Adds encryption (NOISE in our case)
487
+ // - Multiplex stream
488
+ // - Create a muxer and register that for each new stream call Upgrader.protocols handler
489
+ //
490
+ // ## Topology
491
+ // - new instance of Topology (unlinked to libp2p) with handlers
492
+ // - registar.register(topology)
493
+
494
+ // register protocol with topology
495
+ // Topology callbacks called on connection manager changes
496
+ const topology: Topology = {
497
+ onConnect: this.onPeerConnected.bind(this),
498
+ onDisconnect: this.onPeerDisconnected.bind(this),
499
+ notifyOnLimitedConnection: this.runOnLimitedConnection
500
+ }
501
+ const registrarTopologyIds = await Promise.all(
502
+ this.protocols.map(async (protocol) => registrar.register(protocol, topology))
503
+ )
504
+
505
+ // Schedule to start heartbeat after `GossipsubHeartbeatInitialDelay`
506
+ const heartbeatTimeout = setTimeout(this.runHeartbeat, constants.GossipsubHeartbeatInitialDelay)
507
+ // Then, run heartbeat every `heartbeatInterval` offset by `GossipsubHeartbeatInitialDelay`
508
+
509
+ this.status = {
510
+ code: GossipStatusCode.started,
511
+ registrarTopologyIds,
512
+ heartbeatTimeout,
513
+ hearbeatStartMs: Date.now() + constants.GossipsubHeartbeatInitialDelay
514
+ }
515
+
516
+ this.score.start()
517
+ // connect to direct peers
518
+ this.directPeerInitial = setTimeout(() => {
519
+ Promise.resolve()
520
+ .then(async () => {
521
+ await Promise.all(Array.from(this.direct).map(async (id) => this.connect(id)))
522
+ })
523
+ .catch((err) => {
524
+ this.log(err)
525
+ })
526
+ }, constants.GossipsubDirectConnectInitialDelay)
527
+
528
+ if (this.opts.tagMeshPeers) {
529
+ this.addEventListener('gossipsub:graft', this.tagMeshPeer)
530
+ this.addEventListener('gossipsub:prune', this.untagMeshPeer)
531
+ }
532
+
533
+ this.log('started')
534
+ }
535
+
536
+ /**
537
+ * Unmounts the gossipsub protocol and shuts down every connection
538
+ */
539
+ async stop (): Promise<void> {
540
+ this.log('stopping')
541
+ // From pubsub
542
+
543
+ if (this.status.code !== GossipStatusCode.started) {
544
+ return
545
+ }
546
+
547
+ const { registrarTopologyIds } = this.status
548
+ this.status = { code: GossipStatusCode.stopped }
549
+
550
+ if (this.opts.tagMeshPeers) {
551
+ this.removeEventListener('gossipsub:graft', this.tagMeshPeer)
552
+ this.removeEventListener('gossipsub:prune', this.untagMeshPeer)
553
+ }
554
+
555
+ // unregister protocol and handlers
556
+ const registrar = this.components.registrar
557
+ await Promise.all(this.protocols.map(async (protocol) => registrar.unhandle(protocol)))
558
+ registrarTopologyIds.forEach((id) => { registrar.unregister(id) })
559
+
560
+ this.outboundInflightQueue.end()
561
+
562
+ const closePromises = []
563
+ for (const outboundStream of this.streamsOutbound.values()) {
564
+ closePromises.push(outboundStream.close())
565
+ }
566
+ this.streamsOutbound.clear()
567
+
568
+ for (const inboundStream of this.streamsInbound.values()) {
569
+ closePromises.push(inboundStream.close())
570
+ }
571
+ this.streamsInbound.clear()
572
+
573
+ await Promise.all(closePromises)
574
+
575
+ this.peers.clear()
576
+ this.subscriptions.clear()
577
+
578
+ // Gossipsub
579
+
580
+ if (this.heartbeatTimer != null) {
581
+ this.heartbeatTimer.cancel()
582
+ this.heartbeatTimer = null
583
+ }
584
+
585
+ this.score.stop()
586
+
587
+ this.mesh.clear()
588
+ this.fanout.clear()
589
+ this.fanoutLastpub.clear()
590
+ this.gossip.clear()
591
+ this.control.clear()
592
+ this.peerhave.clear()
593
+ this.iasked.clear()
594
+ this.backoff.clear()
595
+ this.outbound.clear()
596
+ this.gossipTracer.clear()
597
+ this.seenCache.clear()
598
+ if (this.fastMsgIdCache != null) { this.fastMsgIdCache.clear() }
599
+ if (this.directPeerInitial != null) { clearTimeout(this.directPeerInitial) }
600
+ this.idontwantCounts.clear()
601
+ this.idontwants.clear()
602
+
603
+ this.log('stopped')
604
+ }
605
+
606
+ /** FOR DEBUG ONLY - Dump peer stats for all peers. Data is cloned, safe to mutate */
607
+ dumpPeerScoreStats (): PeerScoreStatsDump {
608
+ return this.score.dumpPeerScoreStats()
609
+ }
610
+
611
+ /**
612
+ * On an inbound stream opened
613
+ */
614
+ private onIncomingStream (stream: Stream, connection: Connection): void {
615
+ if (!this.isStarted()) {
616
+ return
617
+ }
618
+
619
+ const peerId = connection.remotePeer
620
+ // add peer to router
621
+ this.addPeer(peerId, connection.direction, connection.remoteAddr)
622
+ // create inbound stream
623
+ this.createInboundStream(peerId, stream)
624
+ // attempt to create outbound stream
625
+ this.outboundInflightQueue.push({ peerId, connection })
626
+ }
627
+
628
+ /**
629
+ * Registrar notifies an established connection with pubsub protocol
630
+ */
631
+ private onPeerConnected (peerId: PeerId, connection: Connection): void {
632
+ this.metrics?.newConnectionCount.inc({ status: connection.status })
633
+ // libp2p may emit a closed connection and never issue peer:disconnect event
634
+ // see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/398
635
+ if (!this.isStarted() || connection.status !== 'open') {
636
+ return
637
+ }
638
+
639
+ this.addPeer(peerId, connection.direction, connection.remoteAddr)
640
+ this.outboundInflightQueue.push({ peerId, connection })
641
+ }
642
+
643
+ /**
644
+ * Registrar notifies a closing connection with pubsub protocol
645
+ */
646
+ private onPeerDisconnected (peerId: PeerId): void {
647
+ this.log('connection ended %p', peerId)
648
+ this.removePeer(peerId)
649
+ }
650
+
651
+ private async createOutboundStream (peerId: PeerId, connection: Connection): Promise<void> {
652
+ if (!this.isStarted()) {
653
+ return
654
+ }
655
+
656
+ const id = peerId.toString()
657
+
658
+ if (!this.peers.has(id)) {
659
+ return
660
+ }
661
+
662
+ // TODO make this behavior more robust
663
+ // This behavior is different than for inbound streams
664
+ // If an outbound stream already exists, don't create a new stream
665
+ if (this.streamsOutbound.has(id)) {
666
+ return
667
+ }
668
+
669
+ try {
670
+ const stream = new OutboundStream(
671
+ await connection.newStream(this.protocols, {
672
+ runOnLimitedConnection: this.runOnLimitedConnection
673
+ }),
674
+ (e) => { this.log.error('outbound pipe error', e) },
675
+ { maxBufferSize: this.opts.maxOutboundBufferSize }
676
+ )
677
+
678
+ this.log('create outbound stream %p', peerId)
679
+
680
+ this.streamsOutbound.set(id, stream)
681
+
682
+ const protocol = stream.protocol
683
+ if (protocol === constants.FloodsubID) {
684
+ this.floodsubPeers.add(id)
685
+ }
686
+ this.metrics?.peersPerProtocol.inc({ protocol }, 1)
687
+
688
+ // Immediately send own subscriptions via the newly attached stream
689
+ if (this.subscriptions.size > 0) {
690
+ this.log('send subscriptions to', id)
691
+ this.sendSubscriptions(id, Array.from(this.subscriptions), true)
692
+ }
693
+ } catch (e) {
694
+ this.log.error('createOutboundStream error', e)
695
+ }
696
+ }
697
+
698
+ private createInboundStream (peerId: PeerId, stream: Stream): void {
699
+ if (!this.isStarted()) {
700
+ return
701
+ }
702
+
703
+ const id = peerId.toString()
704
+
705
+ if (!this.peers.has(id)) {
706
+ return
707
+ }
708
+
709
+ // TODO make this behavior more robust
710
+ // This behavior is different than for outbound streams
711
+ // If a peer initiates a new inbound connection
712
+ // we assume that one is the new canonical inbound stream
713
+ const priorInboundStream = this.streamsInbound.get(id)
714
+ if (priorInboundStream !== undefined) {
715
+ this.log('replacing existing inbound steam %s', id)
716
+ priorInboundStream.close().catch((err) => { this.log.error(err) })
717
+ }
718
+
719
+ this.log('create inbound stream %s', id)
720
+
721
+ const inboundStream = new InboundStream(stream, { maxDataLength: this.opts.maxInboundDataLength })
722
+ this.streamsInbound.set(id, inboundStream)
723
+
724
+ this.pipePeerReadStream(peerId, inboundStream.source).catch((err) => { this.log(err) })
725
+ }
726
+
727
+ /**
728
+ * Add a peer to the router
729
+ */
730
+ private addPeer (peerId: PeerId, direction: MessageStreamDirection, addr: Multiaddr): void {
731
+ const id = peerId.toString()
732
+
733
+ if (!this.peers.has(id)) {
734
+ this.peers.set(id, peerId)
735
+
736
+ // Add to peer scoring
737
+ this.score.addPeer(id)
738
+ const currentIP = multiaddrToIPStr(addr)
739
+ if (currentIP !== null) {
740
+ this.score.addIP(id, currentIP)
741
+ } else {
742
+ this.log('Added peer has no IP in current address %s %s', id, addr.toString())
743
+ }
744
+
745
+ // track the connection direction. Don't allow to unset outbound
746
+ if (!this.outbound.has(id)) {
747
+ this.outbound.set(id, direction === 'outbound')
748
+ }
749
+ }
750
+ }
751
+
752
+ /**
753
+ * Removes a peer from the router
754
+ */
755
+ private removePeer (peerId: PeerId): void {
756
+ const id = peerId.toString()
757
+
758
+ if (!this.peers.has(id)) {
759
+ return
760
+ }
761
+
762
+ // delete peer
763
+ this.log('delete peer %p', peerId)
764
+ this.peers.delete(id)
765
+
766
+ const outboundStream = this.streamsOutbound.get(id)
767
+ const inboundStream = this.streamsInbound.get(id)
768
+
769
+ if (outboundStream != null) {
770
+ this.metrics?.peersPerProtocol.inc({ protocol: outboundStream.protocol }, -1)
771
+ }
772
+
773
+ // close streams
774
+ outboundStream?.close().catch((err) => { this.log.error(err) })
775
+ inboundStream?.close().catch((err) => { this.log.error(err) })
776
+
777
+ // remove streams
778
+ this.streamsOutbound.delete(id)
779
+ this.streamsInbound.delete(id)
780
+
781
+ // remove peer from topics map
782
+ for (const peers of this.topics.values()) {
783
+ peers.delete(id)
784
+ }
785
+
786
+ // Remove this peer from the mesh
787
+ for (const [topicStr, peers] of this.mesh) {
788
+ if (peers.delete(id)) {
789
+ this.metrics?.onRemoveFromMesh(topicStr, ChurnReason.Dc, 1)
790
+ }
791
+ }
792
+
793
+ // Remove this peer from the fanout
794
+ for (const peers of this.fanout.values()) {
795
+ peers.delete(id)
796
+ }
797
+
798
+ // Remove from floodsubPeers
799
+ this.floodsubPeers.delete(id)
800
+ // Remove from gossip mapping
801
+ this.gossip.delete(id)
802
+ // Remove from control mapping
803
+ this.control.delete(id)
804
+ // Remove from backoff mapping
805
+ this.outbound.delete(id)
806
+ // Remove from idontwant tracking
807
+ this.idontwantCounts.delete(id)
808
+ this.idontwants.delete(id)
809
+
810
+ // Remove from peer scoring
811
+ this.score.removePeer(id)
812
+
813
+ this.acceptFromWhitelist.delete(id)
814
+ }
815
+
816
+ // API METHODS
817
+
818
+ get started (): boolean {
819
+ return this.status.code === GossipStatusCode.started
820
+ }
821
+
822
+ /**
823
+ * Get a the peer-ids in a topic mesh
824
+ */
825
+ getMeshPeers (topic: TopicStr): PeerIdStr[] {
826
+ const peersInTopic = this.mesh.get(topic)
827
+ return (peersInTopic != null) ? Array.from(peersInTopic) : []
828
+ }
829
+
830
+ /**
831
+ * Get a list of the peer-ids that are subscribed to one topic.
832
+ */
833
+ getSubscribers (topic: TopicStr): PeerId[] {
834
+ const peersInTopic = this.topics.get(topic)
835
+ return ((peersInTopic != null) ? Array.from(peersInTopic) : []).map((str) => this.peers.get(str) ?? peerIdFromString(str))
836
+ }
837
+
838
+ /**
839
+ * Get the list of topics which the peer is subscribed to.
840
+ */
841
+ getTopics (): TopicStr[] {
842
+ return Array.from(this.subscriptions)
843
+ }
844
+
845
+ // TODO: Reviewing Pubsub API
846
+
847
+ // MESSAGE METHODS
848
+
849
+ /**
850
+ * Responsible for processing each RPC message received by other peers.
851
+ */
852
+ private async pipePeerReadStream (peerId: PeerId, stream: AsyncIterable<Uint8ArrayList>): Promise<void> {
853
+ try {
854
+ await pipe(stream, async (source) => {
855
+ for await (const data of source) {
856
+ try {
857
+ // TODO: Check max gossip message size, before decodeRpc()
858
+ const rpcBytes = data.subarray()
859
+ // Note: This function may throw, it must be wrapped in a try {} catch {} to prevent closing the stream.
860
+ // TODO: What should we do if the entire RPC is invalid?
861
+ const rpc = RPC.decode(rpcBytes, {
862
+ limits: {
863
+ subscriptions: this.decodeRpcLimits.maxSubscriptions,
864
+ messages: this.decodeRpcLimits.maxMessages,
865
+ control$: {
866
+ ihave: this.decodeRpcLimits.maxIhaveMessageIDs,
867
+ iwant: this.decodeRpcLimits.maxIwantMessageIDs,
868
+ graft: this.decodeRpcLimits.maxControlMessages,
869
+ prune: this.decodeRpcLimits.maxControlMessages,
870
+ prune$: {
871
+ peers: this.decodeRpcLimits.maxPeerInfos
872
+ },
873
+ idontwant: this.decodeRpcLimits.maxControlMessages,
874
+ idontwant$: {
875
+ messageIDs: this.decodeRpcLimits.maxIdontwantMessageIDs
876
+ }
877
+ }
878
+ }
879
+ })
880
+
881
+ this.metrics?.onRpcRecv(rpc, rpcBytes.length)
882
+
883
+ // Since processRpc may be overridden entirely in unsafe ways,
884
+ // the simplest/safest option here is to wrap in a function and capture all errors
885
+ // to prevent a top-level unhandled exception
886
+ // This processing of rpc messages should happen without awaiting full validation/execution of prior messages
887
+ if (this.opts.awaitRpcHandler) {
888
+ try {
889
+ await this.handleReceivedRpc(peerId, rpc)
890
+ } catch (err) {
891
+ this.metrics?.onRpcRecvError()
892
+ this.log(err)
893
+ }
894
+ } else {
895
+ this.handleReceivedRpc(peerId, rpc).catch((err) => {
896
+ this.metrics?.onRpcRecvError()
897
+ this.log(err)
898
+ })
899
+ }
900
+ } catch (e) {
901
+ this.metrics?.onRpcDataError()
902
+ this.log(e as Error)
903
+ }
904
+ }
905
+ })
906
+ } catch (err) {
907
+ this.metrics?.onPeerReadStreamError()
908
+ this.handlePeerReadStreamError(err as Error, peerId)
909
+ }
910
+ }
911
+
912
+ /**
913
+ * Handle error when read stream pipe throws, less of the functional use but more
914
+ * to for testing purposes to spy on the error handling
915
+ */
916
+ private handlePeerReadStreamError (err: Error, peerId: PeerId): void {
917
+ this.log.error(err)
918
+ this.onPeerDisconnected(peerId)
919
+ }
920
+
921
+ /**
922
+ * Handles an rpc request from a peer
923
+ */
924
+ public async handleReceivedRpc (from: PeerId, rpc: RPC): Promise<void> {
925
+ // Check if peer is graylisted in which case we ignore the event
926
+ if (!this.acceptFrom(from.toString())) {
927
+ this.log('received message from unacceptable peer %p', from)
928
+ this.metrics?.rpcRecvNotAccepted.inc()
929
+ return
930
+ }
931
+
932
+ const subscriptions = (rpc.subscriptions != null) ? rpc.subscriptions.length : 0
933
+ const messages = (rpc.messages != null) ? rpc.messages.length : 0
934
+ let ihave = 0
935
+ let iwant = 0
936
+ let graft = 0
937
+ let prune = 0
938
+ if (rpc.control != null) {
939
+ if (rpc.control.ihave != null) { ihave = rpc.control.ihave.length }
940
+ if (rpc.control.iwant != null) { iwant = rpc.control.iwant.length }
941
+ if (rpc.control.graft != null) { graft = rpc.control.graft.length }
942
+ if (rpc.control.prune != null) { prune = rpc.control.prune.length }
943
+ }
944
+ this.log(
945
+ `rpc.from ${from.toString()} subscriptions ${subscriptions} messages ${messages} ihave ${ihave} iwant ${iwant} graft ${graft} prune ${prune}`
946
+ )
947
+
948
+ // Handle received subscriptions
949
+ if ((rpc.subscriptions != null) && rpc.subscriptions.length > 0) {
950
+ // update peer subscriptions
951
+
952
+ const subscriptions: Array<{ topic: TopicStr, subscribe: boolean }> = []
953
+
954
+ rpc.subscriptions.forEach((subOpt) => {
955
+ const topic = subOpt.topic
956
+ const subscribe = subOpt.subscribe === true
957
+
958
+ if (topic != null) {
959
+ if ((this.allowedTopics != null) && !this.allowedTopics.has(topic)) {
960
+ // Not allowed: subscription data-structures are not bounded by topic count
961
+ // TODO: Should apply behaviour penalties?
962
+ return
963
+ }
964
+
965
+ this.handleReceivedSubscription(from, topic, subscribe)
966
+
967
+ subscriptions.push({ topic, subscribe })
968
+ }
969
+ })
970
+
971
+ this.safeDispatchEvent<SubscriptionChangeData>('subscription-change', {
972
+ detail: { peerId: from, subscriptions }
973
+ })
974
+ }
975
+
976
+ // Handle messages
977
+ // TODO: (up to limit)
978
+ for (const message of rpc.messages) {
979
+ if ((this.allowedTopics != null) && !this.allowedTopics.has(message.topic)) {
980
+ // Not allowed: message cache data-structures are not bounded by topic count
981
+ // TODO: Should apply behaviour penalties?
982
+ continue
983
+ }
984
+
985
+ const handleReceivedMessagePromise = this.handleReceivedMessage(from, message)
986
+ // Should never throw, but handle just in case
987
+ .catch((err) => {
988
+ this.metrics?.onMsgRecvError(message.topic)
989
+ this.log(err)
990
+ })
991
+
992
+ if (this.opts.awaitRpcMessageHandler) {
993
+ await handleReceivedMessagePromise
994
+ }
995
+ }
996
+
997
+ // Handle control messages
998
+ if (rpc.control != null) {
999
+ await this.handleControlMessage(from.toString(), rpc.control)
1000
+ }
1001
+ }
1002
+
1003
+ /**
1004
+ * Handles a subscription change from a peer
1005
+ */
1006
+ private handleReceivedSubscription (from: PeerId, topic: TopicStr, subscribe: boolean): void {
1007
+ this.log('subscription update from %p topic %s', from, topic)
1008
+
1009
+ let topicSet = this.topics.get(topic)
1010
+ if (topicSet == null) {
1011
+ topicSet = new Set()
1012
+ this.topics.set(topic, topicSet)
1013
+ }
1014
+
1015
+ if (subscribe) {
1016
+ // subscribe peer to new topic
1017
+ topicSet.add(from.toString())
1018
+ } else {
1019
+ // unsubscribe from existing topic
1020
+ topicSet.delete(from.toString())
1021
+ }
1022
+
1023
+ // TODO: rust-libp2p has A LOT more logic here
1024
+ }
1025
+
1026
+ /**
1027
+ * Handles a newly received message from an RPC.
1028
+ * May forward to all peers in the mesh.
1029
+ */
1030
+ private async handleReceivedMessage (from: PeerId, rpcMsg: RPC.Message): Promise<void> {
1031
+ this.metrics?.onMsgRecvPreValidation(rpcMsg.topic)
1032
+
1033
+ const validationResult = await this.validateReceivedMessage(from, rpcMsg)
1034
+
1035
+ this.metrics?.onPrevalidationResult(rpcMsg.topic, validationResult.code)
1036
+
1037
+ const validationCode = validationResult.code
1038
+ switch (validationCode) {
1039
+ case MessageStatus.duplicate:
1040
+ // Report the duplicate
1041
+ this.score.duplicateMessage(from.toString(), validationResult.msgIdStr, rpcMsg.topic)
1042
+ // due to the collision of fastMsgIdFn, 2 different messages may end up the same fastMsgId
1043
+ // so we need to also mark the duplicate message as delivered or the promise is not resolved
1044
+ // and peer gets penalized. See https://github.com/ChainSafe/js-libp2p-gossipsub/pull/385
1045
+ this.gossipTracer.deliverMessage(validationResult.msgIdStr, true)
1046
+ this.mcache.observeDuplicate(validationResult.msgIdStr, from.toString())
1047
+ return
1048
+
1049
+ case MessageStatus.invalid:
1050
+ // invalid messages received
1051
+ // metrics.register_invalid_message(&raw_message.topic)
1052
+ // Tell peer_score about reject
1053
+ // Reject the original source, and any duplicates we've seen from other peers.
1054
+ if (validationResult.msgIdStr != null) {
1055
+ const msgIdStr = validationResult.msgIdStr
1056
+ this.score.rejectMessage(from.toString(), msgIdStr, rpcMsg.topic, validationResult.reason)
1057
+ this.gossipTracer.rejectMessage(msgIdStr, validationResult.reason)
1058
+ } else {
1059
+ this.score.rejectInvalidMessage(from.toString(), rpcMsg.topic)
1060
+ }
1061
+
1062
+ this.metrics?.onMsgRecvInvalid(rpcMsg.topic, validationResult)
1063
+ return
1064
+
1065
+ case MessageStatus.valid:
1066
+ // Tells score that message arrived (but is maybe not fully validated yet).
1067
+ // Consider the message as delivered for gossip promises.
1068
+ this.score.validateMessage(validationResult.messageId.msgIdStr)
1069
+ this.gossipTracer.deliverMessage(validationResult.messageId.msgIdStr)
1070
+
1071
+ // Add the message to our memcache
1072
+ // if no validation is required, mark the message as validated
1073
+ this.mcache.put(validationResult.messageId, rpcMsg, !this.opts.asyncValidation)
1074
+
1075
+ // Dispatch the message to the user if we are subscribed to the topic
1076
+ if (this.subscriptions.has(rpcMsg.topic)) {
1077
+ const isFromSelf = this.components.peerId.equals(from)
1078
+
1079
+ if (!isFromSelf || this.opts.emitSelf) {
1080
+ super.dispatchEvent(
1081
+ new CustomEvent<GossipsubMessage>('gossipsub:message', {
1082
+ detail: {
1083
+ propagationSource: from,
1084
+ msgId: validationResult.messageId.msgIdStr,
1085
+ msg: validationResult.msg
1086
+ }
1087
+ })
1088
+ )
1089
+ // TODO: Add option to switch between emit per topic or all messages in one
1090
+ super.dispatchEvent(new CustomEvent<Message>('message', { detail: validationResult.msg }))
1091
+ }
1092
+ }
1093
+
1094
+ // Forward the message to mesh peers, if no validation is required
1095
+ // If asyncValidation is ON, expect the app layer to call reportMessageValidationResult(), then forward
1096
+ if (!this.opts.asyncValidation) {
1097
+ // TODO: in rust-libp2p
1098
+ // .forward_msg(&msg_id, raw_message, Some(propagation_source))
1099
+ this.forwardMessage(validationResult.messageId.msgIdStr, rpcMsg, from.toString())
1100
+ }
1101
+ break
1102
+ default:
1103
+ throw new Error(`Invalid validation result: ${validationCode}`)
1104
+ }
1105
+ }
1106
+
1107
+ /**
1108
+ * Handles a newly received message from an RPC.
1109
+ * May forward to all peers in the mesh.
1110
+ */
1111
+ private async validateReceivedMessage (
1112
+ propagationSource: PeerId,
1113
+ rpcMsg: RPC.Message
1114
+ ): Promise<ReceivedMessageResult> {
1115
+ // Fast message ID stuff
1116
+ const fastMsgIdStr = this.fastMsgIdFn?.(rpcMsg)
1117
+ const msgIdCached = fastMsgIdStr !== undefined ? this.fastMsgIdCache?.get(fastMsgIdStr) : undefined
1118
+
1119
+ if (msgIdCached != null) {
1120
+ // This message has been seen previously. Ignore it
1121
+ return { code: MessageStatus.duplicate, msgIdStr: msgIdCached }
1122
+ }
1123
+
1124
+ // Perform basic validation on message and convert to RawGossipsubMessage for fastMsgIdFn()
1125
+ const validationResult = await validateToRawMessage(this.globalSignaturePolicy, rpcMsg)
1126
+
1127
+ if (!validationResult.valid) {
1128
+ return { code: MessageStatus.invalid, reason: RejectReason.Error, error: validationResult.error }
1129
+ }
1130
+
1131
+ const msg = validationResult.message
1132
+
1133
+ // Try and perform the data transform to the message. If it fails, consider it invalid.
1134
+ try {
1135
+ if (this.dataTransform != null) {
1136
+ msg.data = this.dataTransform.inboundTransform(rpcMsg.topic, msg.data)
1137
+ }
1138
+ } catch (e) {
1139
+ this.log('Invalid message, transform failed', e)
1140
+ return { code: MessageStatus.invalid, reason: RejectReason.Error, error: ValidateError.TransformFailed }
1141
+ }
1142
+
1143
+ // TODO: Check if message is from a blacklisted source or propagation origin
1144
+ // - Reject any message from a blacklisted peer
1145
+ // - Also reject any message that originated from a blacklisted peer
1146
+ // - reject messages claiming to be from ourselves but not locally published
1147
+
1148
+ // Calculate the message id on the transformed data.
1149
+ const msgId = await this.msgIdFn(msg)
1150
+ const msgIdStr = this.msgIdToStrFn(msgId)
1151
+ const messageId = { msgId, msgIdStr }
1152
+
1153
+ // Add the message to the duplicate caches
1154
+ if (fastMsgIdStr !== undefined && (this.fastMsgIdCache != null)) {
1155
+ const collision = this.fastMsgIdCache.put(fastMsgIdStr, msgIdStr)
1156
+ if (collision) {
1157
+ this.metrics?.fastMsgIdCacheCollision.inc()
1158
+ }
1159
+ }
1160
+
1161
+ if (this.seenCache.has(msgIdStr)) {
1162
+ return { code: MessageStatus.duplicate, msgIdStr }
1163
+ } else {
1164
+ this.seenCache.put(msgIdStr)
1165
+ }
1166
+
1167
+ // possibly send IDONTWANTs to mesh peers
1168
+ if ((rpcMsg.data?.length ?? 0) >= this.opts.idontwantMinDataSize) {
1169
+ this.sendIDontWants(msgId, rpcMsg.topic, propagationSource.toString())
1170
+ }
1171
+
1172
+ // (Optional) Provide custom validation here with dynamic validators per topic
1173
+ // NOTE: This custom topicValidator() must resolve fast (< 100ms) to allow scores
1174
+ // to not penalize peers for long validation times.
1175
+ const topicValidator = this.topicValidators.get(rpcMsg.topic)
1176
+ if (topicValidator != null) {
1177
+ let acceptance: TopicValidatorResult
1178
+ // Use try {} catch {} in case topicValidator() is synchronous
1179
+ try {
1180
+ acceptance = await topicValidator(propagationSource, msg)
1181
+ } catch (e) {
1182
+ const errCode = (e as { code: string }).code
1183
+ if (errCode === constants.ERR_TOPIC_VALIDATOR_IGNORE) { acceptance = TopicValidatorResult.Ignore }
1184
+ if (errCode === constants.ERR_TOPIC_VALIDATOR_REJECT) { acceptance = TopicValidatorResult.Reject } else { acceptance = TopicValidatorResult.Ignore }
1185
+ }
1186
+
1187
+ if (acceptance !== TopicValidatorResult.Accept) {
1188
+ return { code: MessageStatus.invalid, reason: rejectReasonFromAcceptance(acceptance), msgIdStr }
1189
+ }
1190
+ }
1191
+
1192
+ return { code: MessageStatus.valid, messageId, msg }
1193
+ }
1194
+
1195
+ /**
1196
+ * Return score of a peer.
1197
+ */
1198
+ getScore (peerId: PeerIdStr): number {
1199
+ return this.score.score(peerId)
1200
+ }
1201
+
1202
+ /**
1203
+ * Send an rpc object to a peer with subscriptions
1204
+ */
1205
+ private sendSubscriptions (toPeer: PeerIdStr, topics: string[], subscribe: boolean): void {
1206
+ this.sendRpc(toPeer, {
1207
+ subscriptions: topics.map((topic) => ({ topic, subscribe })),
1208
+ messages: []
1209
+ })
1210
+ }
1211
+
1212
+ /**
1213
+ * Handles an rpc control message from a peer
1214
+ */
1215
+ private async handleControlMessage (id: PeerIdStr, controlMsg: RPC.ControlMessage): Promise<void> {
1216
+ if (controlMsg === undefined) {
1217
+ return
1218
+ }
1219
+
1220
+ const iwant = (controlMsg.ihave?.length > 0) ? this.handleIHave(id, controlMsg.ihave) : []
1221
+ const ihave = (controlMsg.iwant?.length > 0) ? this.handleIWant(id, controlMsg.iwant) : []
1222
+ const prune = (controlMsg.graft?.length > 0) ? await this.handleGraft(id, controlMsg.graft) : []
1223
+ ;(controlMsg.prune?.length > 0) && (await this.handlePrune(id, controlMsg.prune))
1224
+ ;(controlMsg.idontwant?.length > 0) && this.handleIdontwant(id, controlMsg.idontwant)
1225
+
1226
+ if ((iwant.length === 0) && (ihave.length === 0) && (prune.length === 0)) {
1227
+ return
1228
+ }
1229
+
1230
+ const sent = this.sendRpc(id, createGossipRpc(ihave, { iwant, prune }))
1231
+ const iwantMessageIds = iwant[0]?.messageIDs
1232
+ if (iwantMessageIds != null) {
1233
+ if (sent) {
1234
+ this.gossipTracer.addPromise(id, iwantMessageIds)
1235
+ } else {
1236
+ this.metrics?.iwantPromiseUntracked.inc(1)
1237
+ }
1238
+ }
1239
+ }
1240
+
1241
+ /**
1242
+ * Whether to accept a message from a peer
1243
+ */
1244
+ public acceptFrom (id: PeerIdStr): boolean {
1245
+ if (this.direct.has(id)) {
1246
+ return true
1247
+ }
1248
+
1249
+ const now = Date.now()
1250
+ const entry = this.acceptFromWhitelist.get(id)
1251
+
1252
+ if ((entry != null) && entry.messagesAccepted < ACCEPT_FROM_WHITELIST_MAX_MESSAGES && entry.acceptUntil >= now) {
1253
+ entry.messagesAccepted += 1
1254
+ return true
1255
+ }
1256
+
1257
+ const score = this.score.score(id)
1258
+ if (score >= ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE) {
1259
+ // peer is unlikely to be able to drop its score to `graylistThreshold`
1260
+ // after 128 messages or 1s
1261
+ this.acceptFromWhitelist.set(id, {
1262
+ messagesAccepted: 0,
1263
+ acceptUntil: now + ACCEPT_FROM_WHITELIST_DURATION_MS
1264
+ })
1265
+ } else {
1266
+ this.acceptFromWhitelist.delete(id)
1267
+ }
1268
+
1269
+ return score >= this.opts.scoreThresholds.graylistThreshold
1270
+ }
1271
+
1272
+ /**
1273
+ * Handles IHAVE messages
1274
+ */
1275
+ private handleIHave (id: PeerIdStr, ihave: RPC.ControlIHave[]): RPC.ControlIWant[] {
1276
+ if (ihave.length === 0) {
1277
+ return []
1278
+ }
1279
+
1280
+ // we ignore IHAVE gossip from any peer whose score is below the gossips threshold
1281
+ const score = this.score.score(id)
1282
+ if (score < this.opts.scoreThresholds.gossipThreshold) {
1283
+ this.log('IHAVE: ignoring peer %s with score below threshold [ score = %d ]', id, score)
1284
+ this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.LowScore })
1285
+ return []
1286
+ }
1287
+
1288
+ // IHAVE flood protection
1289
+ const peerhave = (this.peerhave.get(id) ?? 0) + 1
1290
+ this.peerhave.set(id, peerhave)
1291
+ if (peerhave > constants.GossipsubMaxIHaveMessages) {
1292
+ this.log(
1293
+ 'IHAVE: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring',
1294
+ id,
1295
+ peerhave
1296
+ )
1297
+ this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIhave })
1298
+ return []
1299
+ }
1300
+
1301
+ const iasked = this.iasked.get(id) ?? 0
1302
+ if (iasked >= constants.GossipsubMaxIHaveLength) {
1303
+ this.log('IHAVE: peer %s has already advertised too many messages (%d); ignoring', id, iasked)
1304
+ this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIasked })
1305
+ return []
1306
+ }
1307
+
1308
+ // string msgId => msgId
1309
+ const iwant = new Map<MsgIdStr, Uint8Array>()
1310
+
1311
+ ihave.forEach(({ topicID, messageIDs }) => {
1312
+ if (topicID == null || (messageIDs == null) || !this.mesh.has(topicID)) {
1313
+ return
1314
+ }
1315
+
1316
+ let idonthave = 0
1317
+
1318
+ messageIDs.forEach((msgId) => {
1319
+ const msgIdStr = this.msgIdToStrFn(msgId)
1320
+ if (!this.seenCache.has(msgIdStr)) {
1321
+ iwant.set(msgIdStr, msgId)
1322
+ idonthave++
1323
+ }
1324
+ })
1325
+
1326
+ this.metrics?.onIhaveRcv(topicID, messageIDs.length, idonthave)
1327
+ })
1328
+
1329
+ if (iwant.size === 0) {
1330
+ return []
1331
+ }
1332
+
1333
+ let iask = iwant.size
1334
+ if (iask + iasked > constants.GossipsubMaxIHaveLength) {
1335
+ iask = constants.GossipsubMaxIHaveLength - iasked
1336
+ }
1337
+
1338
+ this.log('IHAVE: Asking for %d out of %d messages from %s', iask, iwant.size, id)
1339
+
1340
+ let iwantList = Array.from(iwant.values())
1341
+ // ask in random order
1342
+ shuffle(iwantList)
1343
+
1344
+ // truncate to the messages we are actually asking for and update the iasked counter
1345
+ iwantList = iwantList.slice(0, iask)
1346
+ this.iasked.set(id, iasked + iask)
1347
+
1348
+ // do not add gossipTracer promise here until a successful sendRpc()
1349
+
1350
+ return [
1351
+ {
1352
+ messageIDs: iwantList
1353
+ }
1354
+ ]
1355
+ }
1356
+
1357
+ /**
1358
+ * Handles IWANT messages
1359
+ * Returns messages to send back to peer
1360
+ */
1361
+ private handleIWant (id: PeerIdStr, iwant: RPC.ControlIWant[]): RPC.Message[] {
1362
+ if (iwant.length === 0) {
1363
+ return []
1364
+ }
1365
+
1366
+ // we don't respond to IWANT requests from any per whose score is below the gossip threshold
1367
+ const score = this.score.score(id)
1368
+ if (score < this.opts.scoreThresholds.gossipThreshold) {
1369
+ this.log('IWANT: ignoring peer %s with score below threshold [score = %d]', id, score)
1370
+ return []
1371
+ }
1372
+
1373
+ const ihave = new Map<MsgIdStr, RPC.Message>()
1374
+ const iwantByTopic = new Map<TopicStr, number>()
1375
+ let iwantDonthave = 0
1376
+
1377
+ iwant.forEach(({ messageIDs }) => {
1378
+ messageIDs?.forEach((msgId) => {
1379
+ const msgIdStr = this.msgIdToStrFn(msgId)
1380
+ const entry = this.mcache.getWithIWantCount(msgIdStr, id)
1381
+ if (entry == null) {
1382
+ iwantDonthave++
1383
+ return
1384
+ }
1385
+
1386
+ iwantByTopic.set(entry.msg.topic, 1 + (iwantByTopic.get(entry.msg.topic) ?? 0))
1387
+
1388
+ if (entry.count > constants.GossipsubGossipRetransmission) {
1389
+ this.log('IWANT: Peer %s has asked for message %s too many times: ignoring request', id, msgId)
1390
+ return
1391
+ }
1392
+
1393
+ ihave.set(msgIdStr, entry.msg)
1394
+ })
1395
+ })
1396
+
1397
+ this.metrics?.onIwantRcv(iwantByTopic, iwantDonthave)
1398
+
1399
+ if (ihave.size === 0) {
1400
+ this.log('IWANT: Could not provide any wanted messages to %s', id)
1401
+ return []
1402
+ }
1403
+
1404
+ this.log('IWANT: Sending %d messages to %s', ihave.size, id)
1405
+
1406
+ return Array.from(ihave.values())
1407
+ }
1408
+
1409
+ /**
1410
+ * Handles Graft messages
1411
+ */
1412
+ private async handleGraft (id: PeerIdStr, graft: RPC.ControlGraft[]): Promise<RPC.ControlPrune[]> {
1413
+ const prune: TopicStr[] = []
1414
+ const score = this.score.score(id)
1415
+ const now = Date.now()
1416
+ let doPX = this.opts.doPX
1417
+
1418
+ graft.forEach(({ topicID }) => {
1419
+ if (topicID == null) {
1420
+ return
1421
+ }
1422
+
1423
+ const peersInMesh = this.mesh.get(topicID)
1424
+ if (peersInMesh == null) {
1425
+ // don't do PX when there is an unknown topic to avoid leaking our peers
1426
+ doPX = false
1427
+ // spam hardening: ignore GRAFTs for unknown topics
1428
+ return
1429
+ }
1430
+
1431
+ // check if peer is already in the mesh; if so do nothing
1432
+ if (peersInMesh.has(id)) {
1433
+ return
1434
+ }
1435
+
1436
+ const backoffExpiry = this.backoff.get(topicID)?.get(id)
1437
+
1438
+ // This if/else chain contains the various cases of valid (and semi-valid) GRAFTs
1439
+ // Most of these cases result in a PRUNE immediately being sent in response
1440
+
1441
+ // we don't GRAFT to/from direct peers; complain loudly if this happens
1442
+ if (this.direct.has(id)) {
1443
+ this.log('GRAFT: ignoring request from direct peer %s', id)
1444
+ // this is possibly a bug from a non-reciprical configuration; send a PRUNE
1445
+ prune.push(topicID)
1446
+ // but don't px
1447
+ doPX = false
1448
+
1449
+ // make sure we are not backing off that peer
1450
+ } else if (typeof backoffExpiry === 'number' && now < backoffExpiry) {
1451
+ this.log('GRAFT: ignoring backed off peer %s', id)
1452
+ // add behavioral penalty
1453
+ this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff)
1454
+ // no PX
1455
+ doPX = false
1456
+ // check the flood cutoff -- is the GRAFT coming too fast?
1457
+ const floodCutoff = backoffExpiry + this.opts.graftFloodThreshold - this.opts.pruneBackoff
1458
+ if (now < floodCutoff) {
1459
+ // extra penalty
1460
+ this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff)
1461
+ }
1462
+ // refresh the backoff
1463
+ this.addBackoff(id, topicID)
1464
+ prune.push(topicID)
1465
+
1466
+ // check the score
1467
+ } else if (score < 0) {
1468
+ // we don't GRAFT peers with negative score
1469
+ this.log('GRAFT: ignoring peer %s with negative score: score=%d, topic=%s', id, score, topicID)
1470
+ // we do send them PRUNE however, because it's a matter of protocol correctness
1471
+ prune.push(topicID)
1472
+ // but we won't PX to them
1473
+ doPX = false
1474
+ // add/refresh backoff so that we don't reGRAFT too early even if the score decays
1475
+ this.addBackoff(id, topicID)
1476
+
1477
+ // check the number of mesh peers; if it is at (or over) Dhi, we only accept grafts
1478
+ // from peers with outbound connections; this is a defensive check to restrict potential
1479
+ // mesh takeover attacks combined with love bombing
1480
+ } else if (peersInMesh.size >= this.opts.Dhi && !(this.outbound.get(id) ?? false)) {
1481
+ prune.push(topicID)
1482
+ this.addBackoff(id, topicID)
1483
+
1484
+ // valid graft
1485
+ } else {
1486
+ this.log('GRAFT: Add mesh link from %s in %s', id, topicID)
1487
+ this.score.graft(id, topicID)
1488
+ peersInMesh.add(id)
1489
+
1490
+ this.metrics?.onAddToMesh(topicID, InclusionReason.Subscribed, 1)
1491
+ }
1492
+
1493
+ this.safeDispatchEvent<MeshPeer>('gossipsub:graft', { detail: { peerId: id, topic: topicID, direction: 'inbound' } })
1494
+ })
1495
+
1496
+ if (prune.length === 0) {
1497
+ return []
1498
+ }
1499
+
1500
+ const onUnsubscribe = false
1501
+ return Promise.all(prune.map(async (topic) => this.makePrune(id, topic, doPX, onUnsubscribe)))
1502
+ }
1503
+
1504
+ /**
1505
+ * Handles Prune messages
1506
+ */
1507
+ private async handlePrune (id: PeerIdStr, prune: RPC.ControlPrune[]): Promise<void> {
1508
+ const score = this.score.score(id)
1509
+
1510
+ for (const { topicID, backoff, peers } of prune) {
1511
+ if (topicID == null) {
1512
+ continue
1513
+ }
1514
+
1515
+ const peersInMesh = this.mesh.get(topicID)
1516
+ if (peersInMesh == null) {
1517
+ return
1518
+ }
1519
+
1520
+ this.log('PRUNE: Remove mesh link to %s in %s', id, topicID)
1521
+ this.score.prune(id, topicID)
1522
+ if (peersInMesh.has(id)) {
1523
+ peersInMesh.delete(id)
1524
+ this.metrics?.onRemoveFromMesh(topicID, ChurnReason.Prune, 1)
1525
+ }
1526
+
1527
+ // is there a backoff specified by the peer? if so obey it
1528
+ if (typeof backoff === 'number' && backoff > 0) {
1529
+ this.doAddBackoff(id, topicID, backoff * 1000)
1530
+ } else {
1531
+ this.addBackoff(id, topicID)
1532
+ }
1533
+
1534
+ // PX
1535
+ if ((peers != null) && (peers.length > 0)) {
1536
+ // we ignore PX from peers with insufficient scores
1537
+ if (score < this.opts.scoreThresholds.acceptPXThreshold) {
1538
+ this.log(
1539
+ 'PRUNE: ignoring PX from peer %s with insufficient score [score = %d, topic = %s]',
1540
+ id,
1541
+ score,
1542
+ topicID
1543
+ )
1544
+ } else {
1545
+ await this.pxConnect(peers)
1546
+ }
1547
+ }
1548
+
1549
+ this.safeDispatchEvent<MeshPeer>('gossipsub:prune', { detail: { peerId: id, topic: topicID, direction: 'inbound' } })
1550
+ }
1551
+ }
1552
+
1553
+ private handleIdontwant (id: PeerIdStr, idontwant: RPC.ControlIDontWant[]): void {
1554
+ let idontwantCount = this.idontwantCounts.get(id) ?? 0
1555
+ // return early if we have already received too many IDONTWANT messages from the peer
1556
+ if (idontwantCount >= this.opts.idontwantMaxMessages) {
1557
+ return
1558
+ }
1559
+ const startIdontwantCount = idontwantCount
1560
+
1561
+ let idontwants = this.idontwants.get(id)
1562
+ if (idontwants == null) {
1563
+ idontwants = new Map()
1564
+ this.idontwants.set(id, idontwants)
1565
+ }
1566
+ let idonthave = 0
1567
+ // eslint-disable-next-line no-labels
1568
+ out: for (const { messageIDs } of idontwant) {
1569
+ for (const msgId of messageIDs) {
1570
+ if (idontwantCount >= this.opts.idontwantMaxMessages) {
1571
+ // eslint-disable-next-line no-labels
1572
+ break out
1573
+ }
1574
+ idontwantCount++
1575
+
1576
+ const msgIdStr = this.msgIdToStrFn(msgId)
1577
+ idontwants.set(msgIdStr, this.heartbeatTicks)
1578
+ if (!this.mcache.msgs.has(msgIdStr)) { idonthave++ }
1579
+ }
1580
+ }
1581
+ this.idontwantCounts.set(id, idontwantCount)
1582
+ const total = idontwantCount - startIdontwantCount
1583
+ this.metrics?.onIdontwantRcv(total, idonthave)
1584
+ }
1585
+
1586
+ /**
1587
+ * Add standard backoff log for a peer in a topic
1588
+ */
1589
+ private addBackoff (id: PeerIdStr, topic: TopicStr): void {
1590
+ this.doAddBackoff(id, topic, this.opts.pruneBackoff)
1591
+ }
1592
+
1593
+ /**
1594
+ * Add backoff expiry interval for a peer in a topic
1595
+ *
1596
+ * @param id
1597
+ * @param topic
1598
+ * @param intervalMs - backoff duration in milliseconds
1599
+ */
1600
+ private doAddBackoff (id: PeerIdStr, topic: TopicStr, intervalMs: number): void {
1601
+ let backoff = this.backoff.get(topic)
1602
+ if (backoff == null) {
1603
+ backoff = new Map()
1604
+ this.backoff.set(topic, backoff)
1605
+ }
1606
+ const expire = Date.now() + intervalMs
1607
+ const existingExpire = backoff.get(id) ?? 0
1608
+ if (existingExpire < expire) {
1609
+ backoff.set(id, expire)
1610
+ }
1611
+ }
1612
+
1613
+ /**
1614
+ * Apply penalties from broken IHAVE/IWANT promises
1615
+ */
1616
+ private applyIwantPenalties (): void {
1617
+ this.gossipTracer.getBrokenPromises().forEach((count, p) => {
1618
+ this.log("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count)
1619
+ this.score.addPenalty(p, count, ScorePenalty.BrokenPromise)
1620
+ })
1621
+ }
1622
+
1623
+ /**
1624
+ * Clear expired backoff expiries
1625
+ */
1626
+ private clearBackoff (): void {
1627
+ // we only clear once every GossipsubPruneBackoffTicks ticks to avoid iterating over the maps too much
1628
+ if (this.heartbeatTicks % constants.GossipsubPruneBackoffTicks !== 0) {
1629
+ return
1630
+ }
1631
+
1632
+ const now = Date.now()
1633
+ this.backoff.forEach((backoff, topic) => {
1634
+ backoff.forEach((expire, id) => {
1635
+ // add some slack time to the expiration, see https://github.com/libp2p/specs/pull/289
1636
+ if (expire + BACKOFF_SLACK * this.opts.heartbeatInterval < now) {
1637
+ backoff.delete(id)
1638
+ }
1639
+ })
1640
+ if (backoff.size === 0) {
1641
+ this.backoff.delete(topic)
1642
+ }
1643
+ })
1644
+ }
1645
+
1646
+ /**
1647
+ * Maybe reconnect to direct peers
1648
+ */
1649
+ private async directConnect (): Promise<void> {
1650
+ const toconnect: string[] = []
1651
+ this.direct.forEach((id) => {
1652
+ if (!this.streamsOutbound.has(id)) {
1653
+ toconnect.push(id)
1654
+ }
1655
+ })
1656
+
1657
+ await Promise.all(toconnect.map(async (id) => this.connect(id)))
1658
+ }
1659
+
1660
+ /**
1661
+ * Maybe attempt connection given signed peer records
1662
+ */
1663
+ private async pxConnect (peers: RPC.PeerInfo[]): Promise<void> {
1664
+ if (peers.length > this.opts.prunePeers) {
1665
+ shuffle(peers)
1666
+ peers = peers.slice(0, this.opts.prunePeers)
1667
+ }
1668
+ const toconnect: string[] = []
1669
+
1670
+ await Promise.all(
1671
+ peers.map(async (pi) => {
1672
+ if (pi.peerID == null) {
1673
+ return
1674
+ }
1675
+
1676
+ const peer = peerIdFromMultihash(Digest.decode(pi.peerID))
1677
+ const p = peer.toString()
1678
+
1679
+ if (this.peers.has(p)) {
1680
+ return
1681
+ }
1682
+
1683
+ if (pi.signedPeerRecord == null) {
1684
+ toconnect.push(p)
1685
+ return
1686
+ }
1687
+
1688
+ // The peer sent us a signed record
1689
+ // This is not a record from the peer who sent the record, but another peer who is connected with it
1690
+ // Ensure that it is valid
1691
+ try {
1692
+ if (!(await this.components.peerStore.consumePeerRecord(pi.signedPeerRecord, {
1693
+ expectedPeer: peer
1694
+ }))) {
1695
+ this.log('bogus peer record obtained through px: could not add peer record to address book')
1696
+ return
1697
+ }
1698
+ toconnect.push(p)
1699
+ } catch (e) {
1700
+ this.log('bogus peer record obtained through px: invalid signature or not a peer record')
1701
+ }
1702
+ })
1703
+ )
1704
+
1705
+ if (toconnect.length === 0) {
1706
+ return
1707
+ }
1708
+
1709
+ await Promise.all(toconnect.map(async (id) => this.connect(id)))
1710
+ }
1711
+
1712
+ /**
1713
+ * Connect to a peer using the gossipsub protocol
1714
+ */
1715
+ private async connect (id: PeerIdStr): Promise<void> {
1716
+ this.log('Initiating connection with %s', id)
1717
+ const peerId = peerIdFromString(id)
1718
+ const connection = await this.components.connectionManager.openConnection(peerId)
1719
+ for (const protocol of this.protocols) {
1720
+ for (const topology of this.components.registrar.getTopologies(protocol)) {
1721
+ topology.onConnect?.(peerId, connection)
1722
+ }
1723
+ }
1724
+ }
1725
+
1726
+ /**
1727
+ * Subscribes to a topic
1728
+ */
1729
+ subscribe (topic: TopicStr): void {
1730
+ if (this.status.code !== GossipStatusCode.started) {
1731
+ throw new Error('Pubsub has not started')
1732
+ }
1733
+
1734
+ if (!this.subscriptions.has(topic)) {
1735
+ this.subscriptions.add(topic)
1736
+
1737
+ for (const peerId of this.peers.keys()) {
1738
+ this.sendSubscriptions(peerId, [topic], true)
1739
+ }
1740
+ }
1741
+
1742
+ this.join(topic)
1743
+ }
1744
+
1745
+ /**
1746
+ * Unsubscribe to a topic
1747
+ */
1748
+ unsubscribe (topic: TopicStr): void {
1749
+ if (this.status.code !== GossipStatusCode.started) {
1750
+ throw new Error('Pubsub is not started')
1751
+ }
1752
+
1753
+ const wasSubscribed = this.subscriptions.delete(topic)
1754
+
1755
+ this.log('unsubscribe from %s - am subscribed %s', topic, wasSubscribed)
1756
+
1757
+ if (wasSubscribed) {
1758
+ for (const peerId of this.peers.keys()) {
1759
+ this.sendSubscriptions(peerId, [topic], false)
1760
+ }
1761
+ }
1762
+
1763
+ this.leave(topic)
1764
+ }
1765
+
1766
+ /**
1767
+ * Join topic
1768
+ */
1769
+ private join (topic: TopicStr): void {
1770
+ if (this.status.code !== GossipStatusCode.started) {
1771
+ throw new Error('Gossipsub has not started')
1772
+ }
1773
+
1774
+ // if we are already in the mesh, return
1775
+ if (this.mesh.has(topic)) {
1776
+ return
1777
+ }
1778
+
1779
+ this.log('JOIN %s', topic)
1780
+ this.metrics?.onJoin(topic)
1781
+
1782
+ const toAdd = new Set<PeerIdStr>()
1783
+ const backoff = this.backoff.get(topic)
1784
+
1785
+ // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do,
1786
+ // removing the fanout entry.
1787
+ const fanoutPeers = this.fanout.get(topic)
1788
+ if (fanoutPeers != null) {
1789
+ // Remove fanout entry and the last published time
1790
+ this.fanout.delete(topic)
1791
+ this.fanoutLastpub.delete(topic)
1792
+
1793
+ // remove explicit peers, peers with negative scores, and backoffed peers
1794
+ fanoutPeers.forEach((id) => {
1795
+ if (!this.direct.has(id) && this.score.score(id) >= 0 && backoff?.has(id) !== true) {
1796
+ toAdd.add(id)
1797
+ }
1798
+ })
1799
+
1800
+ this.metrics?.onAddToMesh(topic, InclusionReason.Fanout, toAdd.size)
1801
+ }
1802
+
1803
+ // check if we need to get more peers, which we randomly select
1804
+ if (toAdd.size < this.opts.D) {
1805
+ const fanoutCount = toAdd.size
1806
+ const newPeers = this.getRandomGossipPeers(
1807
+ topic,
1808
+ this.opts.D,
1809
+ (id: PeerIdStr): boolean =>
1810
+ // filter direct peers and peers with negative score
1811
+ !toAdd.has(id) && !this.direct.has(id) && this.score.score(id) >= 0 && backoff?.has(id) !== true
1812
+ )
1813
+
1814
+ newPeers.forEach((peer) => {
1815
+ toAdd.add(peer)
1816
+ })
1817
+
1818
+ this.metrics?.onAddToMesh(topic, InclusionReason.Random, toAdd.size - fanoutCount)
1819
+ }
1820
+
1821
+ this.mesh.set(topic, toAdd)
1822
+
1823
+ toAdd.forEach((id) => {
1824
+ this.log('JOIN: Add mesh link to %s in %s', id, topic)
1825
+ this.sendGraft(id, topic)
1826
+
1827
+ // rust-libp2p
1828
+ // - peer_score.graft()
1829
+ // - Self::control_pool_add()
1830
+ // - peer_added_to_mesh()
1831
+ })
1832
+ }
1833
+
1834
+ /**
1835
+ * Leave topic
1836
+ */
1837
+ private leave (topic: TopicStr): void {
1838
+ if (this.status.code !== GossipStatusCode.started) {
1839
+ throw new Error('Gossipsub has not started')
1840
+ }
1841
+
1842
+ this.log('LEAVE %s', topic)
1843
+ this.metrics?.onLeave(topic)
1844
+
1845
+ // Send PRUNE to mesh peers
1846
+ const meshPeers = this.mesh.get(topic)
1847
+ if (meshPeers != null) {
1848
+ Promise.all(
1849
+ Array.from(meshPeers).map(async (id) => {
1850
+ this.log('LEAVE: Remove mesh link to %s in %s', id, topic)
1851
+ await this.sendPrune(id, topic)
1852
+ })
1853
+ ).catch((err) => {
1854
+ this.log('Error sending prunes to mesh peers', err)
1855
+ })
1856
+ this.mesh.delete(topic)
1857
+ }
1858
+ }
1859
+
1860
+ private selectPeersToForward (topic: TopicStr, propagationSource?: PeerIdStr, excludePeers?: Set<PeerIdStr>): Set<string> {
1861
+ const tosend = new Set<PeerIdStr>()
1862
+
1863
+ // Add explicit peers
1864
+ const peersInTopic = this.topics.get(topic)
1865
+ if (peersInTopic != null) {
1866
+ this.direct.forEach((peer) => {
1867
+ if (peersInTopic.has(peer) && propagationSource !== peer && !(excludePeers?.has(peer) ?? false)) {
1868
+ tosend.add(peer)
1869
+ }
1870
+ })
1871
+
1872
+ // As of Mar 2022, spec + golang-libp2p include this while rust-libp2p does not
1873
+ // rust-libp2p: https://github.com/libp2p/rust-libp2p/blob/6cc3b4ec52c922bfcf562a29b5805c3150e37c75/protocols/gossipsub/src/behaviour.rs#L2693
1874
+ // spec: https://github.com/libp2p/specs/blob/10712c55ab309086a52eec7d25f294df4fa96528/pubsub/gossipsub/gossipsub-v1.0.md?plain=1#L361
1875
+ this.floodsubPeers.forEach((peer) => {
1876
+ if (
1877
+ peersInTopic.has(peer) &&
1878
+ propagationSource !== peer &&
1879
+ !(excludePeers?.has(peer) ?? false) &&
1880
+ this.score.score(peer) >= this.opts.scoreThresholds.publishThreshold
1881
+ ) {
1882
+ tosend.add(peer)
1883
+ }
1884
+ })
1885
+ }
1886
+
1887
+ // add mesh peers
1888
+ const meshPeers = this.mesh.get(topic)
1889
+ if ((meshPeers != null) && meshPeers.size > 0) {
1890
+ meshPeers.forEach((peer) => {
1891
+ if (propagationSource !== peer && !(excludePeers?.has(peer) ?? false)) {
1892
+ tosend.add(peer)
1893
+ }
1894
+ })
1895
+ }
1896
+
1897
+ return tosend
1898
+ }
1899
+
1900
+ private selectPeersToPublish (topic: TopicStr): {
1901
+ tosend: Set<PeerIdStr>
1902
+ tosendCount: ToSendGroupCount
1903
+ } {
1904
+ const tosend = new Set<PeerIdStr>()
1905
+ const tosendCount: ToSendGroupCount = {
1906
+ direct: 0,
1907
+ floodsub: 0,
1908
+ mesh: 0,
1909
+ fanout: 0
1910
+ }
1911
+
1912
+ const peersInTopic = this.topics.get(topic)
1913
+ if (peersInTopic != null) {
1914
+ // flood-publish behavior
1915
+ // send to direct peers and _all_ peers meeting the publishThreshold
1916
+ if (this.opts.floodPublish) {
1917
+ peersInTopic.forEach((id) => {
1918
+ if (this.direct.has(id)) {
1919
+ tosend.add(id)
1920
+ tosendCount.direct++
1921
+ } else if (this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) {
1922
+ tosend.add(id)
1923
+ tosendCount.floodsub++
1924
+ }
1925
+ })
1926
+ } else {
1927
+ // non-flood-publish behavior
1928
+ // send to direct peers, subscribed floodsub peers
1929
+ // and some mesh peers above publishThreshold
1930
+
1931
+ // direct peers (if subscribed)
1932
+ this.direct.forEach((id) => {
1933
+ if (peersInTopic.has(id)) {
1934
+ tosend.add(id)
1935
+ tosendCount.direct++
1936
+ }
1937
+ })
1938
+
1939
+ // floodsub peers
1940
+ // Note: if there are no floodsub peers, we save a loop through peersInTopic Map
1941
+ this.floodsubPeers.forEach((id) => {
1942
+ if (peersInTopic.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) {
1943
+ tosend.add(id)
1944
+ tosendCount.floodsub++
1945
+ }
1946
+ })
1947
+
1948
+ // Gossipsub peers handling
1949
+ const meshPeers = this.mesh.get(topic)
1950
+ if ((meshPeers != null) && meshPeers.size > 0) {
1951
+ meshPeers.forEach((peer) => {
1952
+ tosend.add(peer)
1953
+ tosendCount.mesh++
1954
+ })
1955
+
1956
+ // We want to publish to at least `D` peers.
1957
+ // If there are insufficient peers in the mesh, publish to other topic peers
1958
+ if (meshPeers.size < this.opts.D) {
1959
+ // pick additional topic peers above the publishThreshold
1960
+ const topicPeers = this.getRandomGossipPeers(topic, this.opts.D - meshPeers.size, (id) => {
1961
+ return !meshPeers.has(id) && !this.direct.has(id) && !this.floodsubPeers.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold
1962
+ })
1963
+
1964
+ topicPeers.forEach((peer) => {
1965
+ tosend.add(peer)
1966
+ tosendCount.mesh++
1967
+ })
1968
+ }
1969
+ } else {
1970
+ // We are not in the mesh for topic, use fanout peers
1971
+
1972
+ const fanoutPeers = this.fanout.get(topic)
1973
+ if ((fanoutPeers != null) && fanoutPeers.size > 0) {
1974
+ fanoutPeers.forEach((peer) => {
1975
+ tosend.add(peer)
1976
+ tosendCount.fanout++
1977
+ })
1978
+ } else {
1979
+ // We have no fanout peers, select mesh_n of them and add them to the fanout
1980
+
1981
+ // If we are not in the fanout, then pick peers in topic above the publishThreshold
1982
+ const newFanoutPeers = this.getRandomGossipPeers(topic, this.opts.D, (id) => {
1983
+ return this.score.score(id) >= this.opts.scoreThresholds.publishThreshold
1984
+ })
1985
+
1986
+ // eslint-disable-next-line max-depth
1987
+ if (newFanoutPeers.size > 0) {
1988
+ this.fanout.set(topic, newFanoutPeers)
1989
+
1990
+ newFanoutPeers.forEach((peer) => {
1991
+ tosend.add(peer)
1992
+ tosendCount.fanout++
1993
+ })
1994
+ }
1995
+ }
1996
+
1997
+ // We are publishing to fanout peers - update the time we published
1998
+ this.fanoutLastpub.set(topic, Date.now())
1999
+ }
2000
+ }
2001
+ }
2002
+
2003
+ return { tosend, tosendCount }
2004
+ }
2005
+
2006
+ /**
2007
+ * Forwards a message from our peers.
2008
+ *
2009
+ * For messages published by us (the app layer), this class uses `publish`
2010
+ */
2011
+ private forwardMessage (
2012
+ msgIdStr: string,
2013
+ rawMsg: RPC.Message,
2014
+ propagationSource?: PeerIdStr,
2015
+ excludePeers?: Set<PeerIdStr>
2016
+ ): void {
2017
+ // message is fully validated inform peer_score
2018
+ if (propagationSource != null) {
2019
+ this.score.deliverMessage(propagationSource, msgIdStr, rawMsg.topic)
2020
+ }
2021
+
2022
+ const tosend = this.selectPeersToForward(rawMsg.topic, propagationSource, excludePeers)
2023
+
2024
+ // Note: Don't throw if tosend is empty, we can have a mesh with a single peer
2025
+
2026
+ // forward the message to peers
2027
+ tosend.forEach((id) => {
2028
+ // sendRpc may mutate RPC message on piggyback, create a new message for each peer
2029
+ this.sendRpc(id, createGossipRpc([rawMsg]))
2030
+ })
2031
+
2032
+ this.metrics?.onForwardMsg(rawMsg.topic, tosend.size)
2033
+ }
2034
+
2035
+ /**
2036
+ * App layer publishes a message to peers, return number of peers this message is published to
2037
+ * Note: `async` due to crypto only if `StrictSign`, otherwise it's a sync fn.
2038
+ *
2039
+ * For messages not from us, this class uses `forwardMessage`.
2040
+ */
2041
+ async publish (topic: TopicStr, data: Uint8Array, opts?: PublishOpts): Promise<PublishResult> {
2042
+ const startMs = Date.now()
2043
+ const transformedData = (this.dataTransform != null) ? this.dataTransform.outboundTransform(topic, data) : data
2044
+
2045
+ if (this.publishConfig == null) {
2046
+ throw Error('PublishError.Uninitialized')
2047
+ }
2048
+
2049
+ // Prepare raw message with user's publishConfig
2050
+ const { raw: rawMsg, msg } = await buildRawMessage(this.publishConfig, topic, data, transformedData)
2051
+
2052
+ // calculate the message id from the un-transformed data
2053
+ const msgId = await this.msgIdFn(msg)
2054
+ const msgIdStr = this.msgIdToStrFn(msgId)
2055
+
2056
+ // Current publish opt takes precedence global opts, while preserving false value
2057
+ const ignoreDuplicatePublishError = opts?.ignoreDuplicatePublishError ?? this.opts.ignoreDuplicatePublishError
2058
+
2059
+ if (this.seenCache.has(msgIdStr)) {
2060
+ // This message has already been seen. We don't re-publish messages that have already
2061
+ // been published on the network.
2062
+ if (ignoreDuplicatePublishError) {
2063
+ this.metrics?.onPublishDuplicateMsg(topic)
2064
+ return { recipients: [] }
2065
+ }
2066
+ throw Error('PublishError.Duplicate')
2067
+ }
2068
+
2069
+ const { tosend, tosendCount } = this.selectPeersToPublish(topic)
2070
+ const willSendToSelf = this.opts.emitSelf && this.subscriptions.has(topic)
2071
+
2072
+ // Current publish opt takes precedence global opts, while preserving false value
2073
+ const allowPublishToZeroTopicPeers = opts?.allowPublishToZeroTopicPeers ?? this.opts.allowPublishToZeroTopicPeers
2074
+
2075
+ if (tosend.size === 0 && !allowPublishToZeroTopicPeers && !willSendToSelf) {
2076
+ throw Error('PublishError.NoPeersSubscribedToTopic')
2077
+ }
2078
+
2079
+ // If the message isn't a duplicate and we have sent it to some peers add it to the
2080
+ // duplicate cache and memcache.
2081
+ this.seenCache.put(msgIdStr)
2082
+ // all published messages are valid
2083
+ this.mcache.put({ msgId, msgIdStr }, rawMsg, true)
2084
+ // Consider the message as delivered for gossip promises.
2085
+ this.gossipTracer.deliverMessage(msgIdStr)
2086
+
2087
+ // If the message is anonymous or has a random author add it to the published message ids cache.
2088
+ this.publishedMessageIds.put(msgIdStr)
2089
+
2090
+ const batchPublish = opts?.batchPublish ?? this.opts.batchPublish
2091
+ const rpc = createGossipRpc([rawMsg])
2092
+ if (batchPublish) {
2093
+ this.sendRpcInBatch(tosend, rpc)
2094
+ } else {
2095
+ // Send to set of peers aggregated from direct, mesh, fanout
2096
+ for (const id of tosend) {
2097
+ // sendRpc may mutate RPC message on piggyback, create a new message for each peer
2098
+ const sent = this.sendRpc(id, rpc)
2099
+
2100
+ // did not actually send the message
2101
+ if (!sent) {
2102
+ tosend.delete(id)
2103
+ }
2104
+ }
2105
+ }
2106
+
2107
+ const durationMs = Date.now() - startMs
2108
+ this.metrics?.onPublishMsg(
2109
+ topic,
2110
+ tosendCount,
2111
+ tosend.size,
2112
+ rawMsg.data != null ? rawMsg.data.length : 0,
2113
+ durationMs
2114
+ )
2115
+
2116
+ // Dispatch the message to the user if we are subscribed to the topic
2117
+ if (willSendToSelf) {
2118
+ tosend.add(this.components.peerId.toString())
2119
+
2120
+ super.dispatchEvent(
2121
+ new CustomEvent<GossipsubMessage>('gossipsub:message', {
2122
+ detail: {
2123
+ propagationSource: this.components.peerId,
2124
+ msgId: msgIdStr,
2125
+ msg
2126
+ }
2127
+ })
2128
+ )
2129
+ // TODO: Add option to switch between emit per topic or all messages in one
2130
+ super.dispatchEvent(new CustomEvent<Message>('message', { detail: msg }))
2131
+ }
2132
+
2133
+ return {
2134
+ recipients: Array.from(tosend.values()).map((str) => this.peers.get(str) ?? peerIdFromString(str))
2135
+ }
2136
+ }
2137
+
2138
+ /**
2139
+ * Send the same data in batch to tosend list without considering cached control messages
2140
+ * This is not only faster but also avoid allocating memory for each peer
2141
+ * see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/344
2142
+ */
2143
+ private sendRpcInBatch (tosend: Set<PeerIdStr>, rpc: RPC): void {
2144
+ const rpcBytes = RPC.encode(rpc)
2145
+ const prefixedData = encode.single(rpcBytes)
2146
+ for (const id of tosend) {
2147
+ const outboundStream = this.streamsOutbound.get(id)
2148
+ if (outboundStream == null) {
2149
+ this.log(`Cannot send RPC to ${id} as there is no open stream to it available`)
2150
+ tosend.delete(id)
2151
+ continue
2152
+ }
2153
+ try {
2154
+ outboundStream.pushPrefixed(prefixedData)
2155
+ } catch (e) {
2156
+ tosend.delete(id)
2157
+ this.log.error(`Cannot send rpc to ${id}`, e)
2158
+ }
2159
+
2160
+ this.metrics?.onRpcSent(rpc, rpcBytes.length)
2161
+ }
2162
+ }
2163
+
2164
+ /**
2165
+ * This function should be called when `asyncValidation` is `true` after
2166
+ * the message got validated by the caller. Messages are stored in the `mcache` and
2167
+ * validation is expected to be fast enough that the messages should still exist in the cache.
2168
+ * There are three possible validation outcomes and the outcome is given in acceptance.
2169
+ *
2170
+ * If acceptance = `MessageAcceptance.Accept` the message will get propagated to the
2171
+ * network. The `propagation_source` parameter indicates who the message was received by and
2172
+ * will not be forwarded back to that peer.
2173
+ *
2174
+ * If acceptance = `MessageAcceptance.Reject` the message will be deleted from the memcache
2175
+ * and the P₄ penalty will be applied to the `propagationSource`.
2176
+ *
2177
+ * If acceptance = `MessageAcceptance.Ignore` the message will be deleted from the memcache
2178
+ * but no P₄ penalty will be applied.
2179
+ *
2180
+ * This function will return true if the message was found in the cache and false if was not
2181
+ * in the cache anymore.
2182
+ *
2183
+ * This should only be called once per message.
2184
+ */
2185
+ reportMessageValidationResult (msgId: MsgIdStr, propagationSource: PeerIdStr, acceptance: TopicValidatorResult): void {
2186
+ let cacheEntry: MessageCacheRecord | null
2187
+
2188
+ if (acceptance === TopicValidatorResult.Accept) {
2189
+ cacheEntry = this.mcache.validate(msgId)
2190
+
2191
+ if (cacheEntry != null) {
2192
+ const { message: rawMsg, originatingPeers } = cacheEntry
2193
+ // message is fully validated inform peer_score
2194
+ this.score.deliverMessage(propagationSource, msgId, rawMsg.topic)
2195
+
2196
+ this.forwardMessage(msgId, cacheEntry.message, propagationSource, originatingPeers)
2197
+ }
2198
+ // else, Message not in cache. Ignoring forwarding
2199
+ } else {
2200
+ // Not valid
2201
+ cacheEntry = this.mcache.remove(msgId)
2202
+
2203
+ if (cacheEntry != null) {
2204
+ const rejectReason = rejectReasonFromAcceptance(acceptance)
2205
+ const { message: rawMsg, originatingPeers } = cacheEntry
2206
+
2207
+ // Tell peer_score about reject
2208
+ // Reject the original source, and any duplicates we've seen from other peers.
2209
+ this.score.rejectMessage(propagationSource, msgId, rawMsg.topic, rejectReason)
2210
+ for (const peer of originatingPeers) {
2211
+ this.score.rejectMessage(peer, msgId, rawMsg.topic, rejectReason)
2212
+ }
2213
+ }
2214
+ // else, Message not in cache. Ignoring forwarding
2215
+ }
2216
+
2217
+ const firstSeenTimestampMs = this.score.messageFirstSeenTimestampMs(msgId)
2218
+ this.metrics?.onReportValidation(cacheEntry, acceptance, firstSeenTimestampMs)
2219
+ }
2220
+
2221
+ /**
2222
+ * Sends a GRAFT message to a peer
2223
+ */
2224
+ private sendGraft (id: PeerIdStr, topic: string): void {
2225
+ const graft = [
2226
+ {
2227
+ topicID: topic
2228
+ }
2229
+ ]
2230
+ const out = createGossipRpc([], { graft })
2231
+ this.sendRpc(id, out)
2232
+ }
2233
+
2234
+ /**
2235
+ * Sends a PRUNE message to a peer
2236
+ */
2237
+ private async sendPrune (id: PeerIdStr, topic: string): Promise<void> {
2238
+ // this is only called from leave() function
2239
+ const onUnsubscribe = true
2240
+ const prune = [await this.makePrune(id, topic, this.opts.doPX, onUnsubscribe)]
2241
+ const out = createGossipRpc([], { prune })
2242
+ this.sendRpc(id, out)
2243
+ }
2244
+
2245
+ private sendIDontWants (msgId: Uint8Array, topic: string, source: PeerIdStr): void {
2246
+ const ids = this.mesh.get(topic)
2247
+ if (ids == null) {
2248
+ return
2249
+ }
2250
+
2251
+ // don't send IDONTWANT to:
2252
+ // - the source
2253
+ // - peers that don't support v1.2
2254
+ const tosend = new Set(ids)
2255
+ tosend.delete(source)
2256
+ for (const id of tosend) {
2257
+ if (this.streamsOutbound.get(id)?.protocol !== constants.GossipsubIDv12) {
2258
+ tosend.delete(id)
2259
+ }
2260
+ }
2261
+
2262
+ const idontwantRpc = createGossipRpc([], { idontwant: [{ messageIDs: [msgId] }] })
2263
+ this.sendRpcInBatch(tosend, idontwantRpc)
2264
+ }
2265
+
2266
+ /**
2267
+ * Send an rpc object to a peer
2268
+ */
2269
+ private sendRpc (id: PeerIdStr, rpc: RPC): boolean {
2270
+ const outboundStream = this.streamsOutbound.get(id)
2271
+ if (outboundStream == null) {
2272
+ this.log(`Cannot send RPC to ${id} as there is no open stream to it available`)
2273
+ return false
2274
+ }
2275
+
2276
+ // piggyback control message retries
2277
+ const ctrl = this.control.get(id)
2278
+ if (ctrl != null) {
2279
+ this.piggybackControl(id, rpc, ctrl)
2280
+ this.control.delete(id)
2281
+ }
2282
+
2283
+ // piggyback gossip
2284
+ const ihave = this.gossip.get(id)
2285
+ if (ihave != null) {
2286
+ this.piggybackGossip(id, rpc, ihave)
2287
+ this.gossip.delete(id)
2288
+ }
2289
+
2290
+ const rpcBytes = RPC.encode(rpc)
2291
+ try {
2292
+ outboundStream.push(rpcBytes)
2293
+ } catch (e) {
2294
+ this.log.error(`Cannot send rpc to ${id}`, e)
2295
+
2296
+ // if the peer had control messages or gossip, re-attach
2297
+ if (ctrl != null) {
2298
+ this.control.set(id, ctrl)
2299
+ }
2300
+ if (ihave != null) {
2301
+ this.gossip.set(id, ihave)
2302
+ }
2303
+
2304
+ return false
2305
+ }
2306
+
2307
+ this.metrics?.onRpcSent(rpc, rpcBytes.length)
2308
+
2309
+ if (rpc.control?.graft != null) {
2310
+ for (const topic of rpc.control?.graft) {
2311
+ if (topic.topicID != null) {
2312
+ this.safeDispatchEvent<MeshPeer>('gossipsub:graft', { detail: { peerId: id, topic: topic.topicID, direction: 'outbound' } })
2313
+ }
2314
+ }
2315
+ }
2316
+ if (rpc.control?.prune != null) {
2317
+ for (const topic of rpc.control?.prune) {
2318
+ if (topic.topicID != null) {
2319
+ this.safeDispatchEvent<MeshPeer>('gossipsub:prune', { detail: { peerId: id, topic: topic.topicID, direction: 'outbound' } })
2320
+ }
2321
+ }
2322
+ }
2323
+
2324
+ return true
2325
+ }
2326
+
2327
+ /** Mutates `outRpc` adding graft and prune control messages */
2328
+ public piggybackControl (id: PeerIdStr, outRpc: RPC, ctrl: RPC.ControlMessage): void {
2329
+ const rpc = ensureControl(outRpc)
2330
+ for (const graft of ctrl.graft) {
2331
+ if (graft.topicID != null && (this.mesh.get(graft.topicID)?.has(id) ?? false)) {
2332
+ rpc.control.graft.push(graft)
2333
+ }
2334
+ }
2335
+
2336
+ for (const prune of ctrl.prune) {
2337
+ if (prune.topicID != null && !(this.mesh.get(prune.topicID)?.has(id) ?? false)) {
2338
+ rpc.control.prune.push(prune)
2339
+ }
2340
+ }
2341
+ }
2342
+
2343
+ /** Mutates `outRpc` adding ihave control messages */
2344
+ private piggybackGossip (id: PeerIdStr, outRpc: RPC, ihave: RPC.ControlIHave[]): void {
2345
+ const rpc = ensureControl(outRpc)
2346
+ rpc.control.ihave = ihave
2347
+ }
2348
+
2349
+ /**
2350
+ * Send graft and prune messages
2351
+ *
2352
+ * @param tograft - peer id => topic[]
2353
+ * @param toprune - peer id => topic[]
2354
+ */
2355
+ private async sendGraftPrune (
2356
+ tograft: Map<string, string[]>,
2357
+ toprune: Map<string, string[]>,
2358
+ noPX: Map<string, boolean>
2359
+ ): Promise<void> {
2360
+ const doPX = this.opts.doPX
2361
+ const onUnsubscribe = false
2362
+ for (const [id, topics] of tograft) {
2363
+ const graft = topics.map((topicID) => ({ topicID }))
2364
+ let prune: RPC.ControlPrune[] = []
2365
+ // If a peer also has prunes, process them now
2366
+ const pruning = toprune.get(id)
2367
+ if (pruning != null) {
2368
+ prune = await Promise.all(
2369
+ pruning.map(
2370
+ async (topicID) => this.makePrune(id, topicID, doPX && !(noPX.get(id) ?? false), onUnsubscribe)
2371
+ )
2372
+ )
2373
+ toprune.delete(id)
2374
+ }
2375
+
2376
+ this.sendRpc(id, createGossipRpc([], { graft, prune }))
2377
+ }
2378
+ for (const [id, topics] of toprune) {
2379
+ const prune = await Promise.all(
2380
+ topics.map(
2381
+ async (topicID) => this.makePrune(id, topicID, doPX && !(noPX.get(id) ?? false), onUnsubscribe)
2382
+ )
2383
+ )
2384
+ this.sendRpc(id, createGossipRpc([], { prune }))
2385
+ }
2386
+ }
2387
+
2388
+ /**
2389
+ * Emits gossip - Send IHAVE messages to a random set of gossip peers
2390
+ */
2391
+ private emitGossip (peersToGossipByTopic: Map<string, Set<PeerIdStr>>): void {
2392
+ const gossipIDsByTopic = this.mcache.getGossipIDs(new Set(peersToGossipByTopic.keys()))
2393
+ for (const [topic, peersToGossip] of peersToGossipByTopic) {
2394
+ this.doEmitGossip(topic, peersToGossip, gossipIDsByTopic.get(topic) ?? [])
2395
+ }
2396
+ }
2397
+
2398
+ /**
2399
+ * Send gossip messages to GossipFactor peers above threshold with a minimum of D_lazy
2400
+ * Peers are randomly selected from the heartbeat which exclude mesh + fanout peers
2401
+ * We also exclude direct peers, as there is no reason to emit gossip to them
2402
+ *
2403
+ * @param topic
2404
+ * @param candidateToGossip - peers to gossip
2405
+ * @param messageIDs - message ids to gossip
2406
+ */
2407
+ private doEmitGossip (topic: string, candidateToGossip: Set<PeerIdStr>, messageIDs: Uint8Array[]): void {
2408
+ if (messageIDs.length === 0) {
2409
+ return
2410
+ }
2411
+
2412
+ // shuffle to emit in random order
2413
+ shuffle(messageIDs)
2414
+
2415
+ // if we are emitting more than GossipsubMaxIHaveLength ids, truncate the list
2416
+ if (messageIDs.length > constants.GossipsubMaxIHaveLength) {
2417
+ // we do the truncation (with shuffling) per peer below
2418
+ this.log('too many messages for gossip; will truncate IHAVE list (%d messages)', messageIDs.length)
2419
+ }
2420
+
2421
+ if (candidateToGossip.size === 0) { return }
2422
+ let target = this.opts.Dlazy
2423
+ const gossipFactor = this.opts.gossipFactor
2424
+ const factor = gossipFactor * candidateToGossip.size
2425
+ let peersToGossip: Set<PeerIdStr> | PeerIdStr[] = candidateToGossip
2426
+ if (factor > target) {
2427
+ target = factor
2428
+ }
2429
+ if (target > peersToGossip.size) {
2430
+ target = peersToGossip.size
2431
+ } else {
2432
+ // only shuffle if needed
2433
+ peersToGossip = shuffle(Array.from(peersToGossip)).slice(0, target)
2434
+ }
2435
+
2436
+ // Emit the IHAVE gossip to the selected peers up to the target
2437
+ peersToGossip.forEach((id) => {
2438
+ let peerMessageIDs = messageIDs
2439
+ if (messageIDs.length > constants.GossipsubMaxIHaveLength) {
2440
+ // shuffle and slice message IDs per peer so that we emit a different set for each peer
2441
+ // we have enough reduncancy in the system that this will significantly increase the message
2442
+ // coverage when we do truncate
2443
+ peerMessageIDs = shuffle(peerMessageIDs.slice()).slice(0, constants.GossipsubMaxIHaveLength)
2444
+ }
2445
+ this.pushGossip(id, {
2446
+ topicID: topic,
2447
+ messageIDs: peerMessageIDs
2448
+ })
2449
+ })
2450
+ }
2451
+
2452
+ /**
2453
+ * Flush gossip and control messages
2454
+ */
2455
+ private flush (): void {
2456
+ // send gossip first, which will also piggyback control
2457
+ for (const [peer, ihave] of this.gossip.entries()) {
2458
+ this.gossip.delete(peer)
2459
+ this.sendRpc(peer, createGossipRpc([], { ihave }))
2460
+ }
2461
+ // send the remaining control messages
2462
+ for (const [peer, control] of this.control.entries()) {
2463
+ this.control.delete(peer)
2464
+ const out = createGossipRpc([], { graft: control.graft, prune: control.prune })
2465
+ this.sendRpc(peer, out)
2466
+ }
2467
+ }
2468
+
2469
+ /**
2470
+ * Adds new IHAVE messages to pending gossip
2471
+ */
2472
+ private pushGossip (id: PeerIdStr, controlIHaveMsgs: RPC.ControlIHave): void {
2473
+ this.log('Add gossip to %s', id)
2474
+ const gossip = this.gossip.get(id) ?? []
2475
+ this.gossip.set(id, gossip.concat(controlIHaveMsgs))
2476
+ }
2477
+
2478
+ /**
2479
+ * Make a PRUNE control message for a peer in a topic
2480
+ */
2481
+ private async makePrune (
2482
+ id: PeerIdStr,
2483
+ topic: string,
2484
+ doPX: boolean,
2485
+ onUnsubscribe: boolean
2486
+ ): Promise<RPC.ControlPrune> {
2487
+ this.score.prune(id, topic)
2488
+ if (this.streamsOutbound.get(id)?.protocol === constants.GossipsubIDv10) {
2489
+ // Gossipsub v1.0 -- no backoff, the peer won't be able to parse it anyway
2490
+ return {
2491
+ topicID: topic,
2492
+ peers: []
2493
+ }
2494
+ }
2495
+ // backoff is measured in seconds
2496
+ // GossipsubPruneBackoff and GossipsubUnsubscribeBackoff are measured in milliseconds
2497
+ // The protobuf has it as a uint64
2498
+ const backoffMs = onUnsubscribe ? this.opts.unsubcribeBackoff : this.opts.pruneBackoff
2499
+ const backoff = backoffMs / 1000
2500
+ this.doAddBackoff(id, topic, backoffMs)
2501
+
2502
+ if (!doPX) {
2503
+ return {
2504
+ topicID: topic,
2505
+ peers: [],
2506
+ backoff
2507
+ }
2508
+ }
2509
+
2510
+ // select peers for Peer eXchange
2511
+ const peers = this.getRandomGossipPeers(topic, this.opts.prunePeers, (xid) => {
2512
+ return xid !== id && this.score.score(xid) >= 0
2513
+ })
2514
+ const px = await Promise.all(
2515
+ Array.from(peers).map(async (peerId) => {
2516
+ // see if we have a signed record to send back; if we don't, just send
2517
+ // the peer ID and let the pruned peer find them in the DHT -- we can't trust
2518
+ // unsigned address records through PX anyways
2519
+ // Finding signed records in the DHT is not supported at the time of writing in js-libp2p
2520
+ const id = this.peers.get(peerId) ?? peerIdFromString(peerId)
2521
+ let peerInfo: Peer | undefined
2522
+
2523
+ try {
2524
+ peerInfo = await this.components.peerStore.get(id)
2525
+ } catch (err: any) {
2526
+ if (err.name !== 'NotFoundError') {
2527
+ throw err
2528
+ }
2529
+ }
2530
+
2531
+ return {
2532
+ peerID: id.toMultihash().bytes,
2533
+ signedPeerRecord: peerInfo?.peerRecordEnvelope
2534
+ }
2535
+ })
2536
+ )
2537
+ return {
2538
+ topicID: topic,
2539
+ peers: px,
2540
+ backoff
2541
+ }
2542
+ }
2543
+
2544
+ private readonly runHeartbeat = (): void => {
2545
+ const timer = this.metrics?.heartbeatDuration.startTimer()
2546
+
2547
+ this.heartbeat()
2548
+ .catch((err) => {
2549
+ this.log('Error running heartbeat', err)
2550
+ })
2551
+ .finally(() => {
2552
+ if (timer != null) {
2553
+ timer()
2554
+ }
2555
+
2556
+ // Schedule the next run if still in started status
2557
+ if (this.status.code === GossipStatusCode.started) {
2558
+ // Clear previous timeout before overwriting `status.heartbeatTimeout`, it should be completed tho.
2559
+ clearTimeout(this.status.heartbeatTimeout)
2560
+
2561
+ // NodeJS setInterval function is innexact, calls drift by a few miliseconds on each call.
2562
+ // To run the heartbeat precisely setTimeout() must be used recomputing the delay on every loop.
2563
+ let msToNextHeartbeat =
2564
+ this.opts.heartbeatInterval - ((Date.now() - this.status.hearbeatStartMs) % this.opts.heartbeatInterval)
2565
+
2566
+ // If too close to next heartbeat, skip one
2567
+ if (msToNextHeartbeat < this.opts.heartbeatInterval * 0.25) {
2568
+ msToNextHeartbeat += this.opts.heartbeatInterval
2569
+ this.metrics?.heartbeatSkipped.inc()
2570
+ }
2571
+
2572
+ this.status.heartbeatTimeout = setTimeout(this.runHeartbeat, msToNextHeartbeat)
2573
+ }
2574
+ })
2575
+ }
2576
+
2577
+ /**
2578
+ * Maintains the mesh and fanout maps in gossipsub.
2579
+ */
2580
+ public async heartbeat (): Promise<void> {
2581
+ const { D, Dlo, Dhi, Dscore, Dout, fanoutTTL } = this.opts
2582
+
2583
+ this.heartbeatTicks++
2584
+
2585
+ // cache scores throught the heartbeat
2586
+ const scores = new Map<string, number>()
2587
+ const getScore = (id: string): number => {
2588
+ let s = scores.get(id)
2589
+ if (s === undefined) {
2590
+ s = this.score.score(id)
2591
+ scores.set(id, s)
2592
+ }
2593
+ return s
2594
+ }
2595
+
2596
+ // peer id => topic[]
2597
+ const tograft = new Map<string, string[]>()
2598
+ // peer id => topic[]
2599
+ const toprune = new Map<string, string[]>()
2600
+ // peer id => don't px
2601
+ const noPX = new Map<string, boolean>()
2602
+
2603
+ // clean up expired backoffs
2604
+ this.clearBackoff()
2605
+
2606
+ // clean up peerhave/iasked counters
2607
+ this.peerhave.clear()
2608
+ this.metrics?.cacheSize.set({ cache: 'iasked' }, this.iasked.size)
2609
+ this.iasked.clear()
2610
+
2611
+ // apply IWANT request penalties
2612
+ this.applyIwantPenalties()
2613
+
2614
+ // clean up IDONTWANT counters
2615
+ this.idontwantCounts.clear()
2616
+
2617
+ // clean up old tracked IDONTWANTs
2618
+ for (const idontwants of this.idontwants.values()) {
2619
+ for (const [msgId, heartbeatTick] of idontwants) {
2620
+ if (this.heartbeatTicks - heartbeatTick >= this.opts.mcacheLength) {
2621
+ idontwants.delete(msgId)
2622
+ }
2623
+ }
2624
+ }
2625
+
2626
+ // ensure direct peers are connected
2627
+ if (this.heartbeatTicks % this.opts.directConnectTicks === 0) {
2628
+ // we only do this every few ticks to allow pending connections to complete and account for restarts/downtime
2629
+ await this.directConnect()
2630
+ }
2631
+
2632
+ // EXTRA: Prune caches
2633
+ this.fastMsgIdCache?.prune()
2634
+ this.seenCache.prune()
2635
+ this.gossipTracer.prune()
2636
+ this.publishedMessageIds.prune()
2637
+
2638
+ /**
2639
+ * Instead of calling getRandomGossipPeers multiple times to:
2640
+ * + get more mesh peers
2641
+ * + more outbound peers
2642
+ * + oppportunistic grafting
2643
+ * + emitGossip
2644
+ *
2645
+ * We want to loop through the topic peers only a single time and prepare gossip peers for all topics to improve the performance
2646
+ */
2647
+
2648
+ const peersToGossipByTopic = new Map<string, Set<PeerIdStr>>()
2649
+ // maintain the mesh for topics we have joined
2650
+ // eslint-disable-next-line complexity
2651
+ this.mesh.forEach((peers, topic) => {
2652
+ const peersInTopic = this.topics.get(topic)
2653
+ const candidateMeshPeers = new Set<PeerIdStr>()
2654
+ const peersToGossip = new Set<PeerIdStr>()
2655
+ peersToGossipByTopic.set(topic, peersToGossip)
2656
+
2657
+ if (peersInTopic != null) {
2658
+ const shuffledPeers = shuffle(Array.from(peersInTopic))
2659
+ const backoff = this.backoff.get(topic)
2660
+ for (const id of shuffledPeers) {
2661
+ const peerStreams = this.streamsOutbound.get(id)
2662
+ if (
2663
+ (peerStreams != null) &&
2664
+ this.protocols.includes(peerStreams.protocol) &&
2665
+ !peers.has(id) &&
2666
+ !this.direct.has(id)
2667
+ ) {
2668
+ const score = getScore(id)
2669
+ if (backoff?.has(id) !== true && score >= 0) { candidateMeshPeers.add(id) }
2670
+ // instead of having to find gossip peers after heartbeat which require another loop
2671
+ // we prepare peers to gossip in a topic within heartbeat to improve performance
2672
+ if (score >= this.opts.scoreThresholds.gossipThreshold) { peersToGossip.add(id) }
2673
+ }
2674
+ }
2675
+ }
2676
+
2677
+ // prune/graft helper functions (defined per topic)
2678
+ const prunePeer = (id: PeerIdStr, reason: ChurnReason): void => {
2679
+ this.log('HEARTBEAT: Remove mesh link to %s in %s', id, topic)
2680
+ // no need to update peer score here as we do it in makePrune
2681
+ // add prune backoff record
2682
+ this.addBackoff(id, topic)
2683
+ // remove peer from mesh
2684
+ peers.delete(id)
2685
+ // after pruning a peer from mesh, we want to gossip topic to it if its score meet the gossip threshold
2686
+ if (getScore(id) >= this.opts.scoreThresholds.gossipThreshold) { peersToGossip.add(id) }
2687
+ this.metrics?.onRemoveFromMesh(topic, reason, 1)
2688
+ // add to toprune
2689
+ const topics = toprune.get(id)
2690
+ if (topics == null) {
2691
+ toprune.set(id, [topic])
2692
+ } else {
2693
+ topics.push(topic)
2694
+ }
2695
+ }
2696
+
2697
+ const graftPeer = (id: PeerIdStr, reason: InclusionReason): void => {
2698
+ this.log('HEARTBEAT: Add mesh link to %s in %s', id, topic)
2699
+ // update peer score
2700
+ this.score.graft(id, topic)
2701
+ // add peer to mesh
2702
+ peers.add(id)
2703
+ // when we add a new mesh peer, we don't want to gossip messages to it
2704
+ peersToGossip.delete(id)
2705
+ this.metrics?.onAddToMesh(topic, reason, 1)
2706
+ // add to tograft
2707
+ const topics = tograft.get(id)
2708
+ if (topics == null) {
2709
+ tograft.set(id, [topic])
2710
+ } else {
2711
+ topics.push(topic)
2712
+ }
2713
+ }
2714
+
2715
+ // drop all peers with negative score, without PX
2716
+ peers.forEach((id) => {
2717
+ const score = getScore(id)
2718
+
2719
+ // Record the score
2720
+
2721
+ if (score < 0) {
2722
+ this.log('HEARTBEAT: Prune peer %s with negative score: score=%d, topic=%s', id, score, topic)
2723
+ prunePeer(id, ChurnReason.BadScore)
2724
+ noPX.set(id, true)
2725
+ }
2726
+ })
2727
+
2728
+ // do we have enough peers?
2729
+ if (peers.size < Dlo) {
2730
+ const ineed = D - peers.size
2731
+ // slice up to first `ineed` items and remove them from candidateMeshPeers
2732
+ // same to `const newMeshPeers = candidateMeshPeers.slice(0, ineed)`
2733
+ const newMeshPeers = removeFirstNItemsFromSet(candidateMeshPeers, ineed)
2734
+
2735
+ newMeshPeers.forEach((p) => {
2736
+ graftPeer(p, InclusionReason.NotEnough)
2737
+ })
2738
+ }
2739
+
2740
+ // do we have to many peers?
2741
+ if (peers.size > Dhi) {
2742
+ let peersArray = Array.from(peers)
2743
+ // sort by score
2744
+ peersArray.sort((a, b) => getScore(b) - getScore(a))
2745
+ // We keep the first D_score peers by score and the remaining up to D randomly
2746
+ // under the constraint that we keep D_out peers in the mesh (if we have that many)
2747
+ peersArray = peersArray.slice(0, Dscore).concat(shuffle(peersArray.slice(Dscore)))
2748
+
2749
+ // count the outbound peers we are keeping
2750
+ let outbound = 0
2751
+ peersArray.slice(0, D).forEach((p) => {
2752
+ if (this.outbound.get(p) ?? false) {
2753
+ outbound++
2754
+ }
2755
+ })
2756
+
2757
+ // if it's less than D_out, bubble up some outbound peers from the random selection
2758
+ if (outbound < Dout) {
2759
+ const rotate = (i: number): void => {
2760
+ // rotate the peersArray to the right and put the ith peer in the front
2761
+ const p = peersArray[i]
2762
+ for (let j = i; j > 0; j--) {
2763
+ peersArray[j] = peersArray[j - 1]
2764
+ }
2765
+ peersArray[0] = p
2766
+ }
2767
+
2768
+ // first bubble up all outbound peers already in the selection to the front
2769
+ if (outbound > 0) {
2770
+ let ihave = outbound
2771
+ for (let i = 1; i < D && ihave > 0; i++) {
2772
+ // eslint-disable-next-line max-depth
2773
+ if (this.outbound.get(peersArray[i]) ?? false) {
2774
+ rotate(i)
2775
+ ihave--
2776
+ }
2777
+ }
2778
+ }
2779
+
2780
+ // now bubble up enough outbound peers outside the selection to the front
2781
+ let ineed = D - outbound
2782
+ for (let i = D; i < peersArray.length && ineed > 0; i++) {
2783
+ if (this.outbound.get(peersArray[i]) ?? false) {
2784
+ rotate(i)
2785
+ ineed--
2786
+ }
2787
+ }
2788
+ }
2789
+
2790
+ // prune the excess peers
2791
+ peersArray.slice(D).forEach((p) => {
2792
+ prunePeer(p, ChurnReason.Excess)
2793
+ })
2794
+ }
2795
+
2796
+ // do we have enough outbound peers?
2797
+ if (peers.size >= Dlo) {
2798
+ // count the outbound peers we have
2799
+ let outbound = 0
2800
+ peers.forEach((p) => {
2801
+ if (this.outbound.get(p) ?? false) {
2802
+ outbound++
2803
+ }
2804
+ })
2805
+
2806
+ // if it's less than D_out, select some peers with outbound connections and graft them
2807
+ if (outbound < Dout) {
2808
+ const ineed = Dout - outbound
2809
+ const newMeshPeers = removeItemsFromSet(candidateMeshPeers, ineed, (id) => this.outbound.get(id) === true)
2810
+
2811
+ newMeshPeers.forEach((p) => {
2812
+ graftPeer(p, InclusionReason.Outbound)
2813
+ })
2814
+ }
2815
+ }
2816
+
2817
+ // should we try to improve the mesh with opportunistic grafting?
2818
+ if (this.heartbeatTicks % this.opts.opportunisticGraftTicks === 0 && peers.size > 1) {
2819
+ // Opportunistic grafting works as follows: we check the median score of peers in the
2820
+ // mesh; if this score is below the opportunisticGraftThreshold, we select a few peers at
2821
+ // random with score over the median.
2822
+ // The intention is to (slowly) improve an underperforming mesh by introducing good
2823
+ // scoring peers that may have been gossiping at us. This allows us to get out of sticky
2824
+ // situations where we are stuck with poor peers and also recover from churn of good peers.
2825
+
2826
+ // now compute the median peer score in the mesh
2827
+ const peersList = Array.from(peers).sort((a, b) => getScore(a) - getScore(b))
2828
+ const medianIndex = Math.floor(peers.size / 2)
2829
+ const medianScore = getScore(peersList[medianIndex])
2830
+
2831
+ // if the median score is below the threshold, select a better peer (if any) and GRAFT
2832
+ if (medianScore < this.opts.scoreThresholds.opportunisticGraftThreshold) {
2833
+ const ineed = this.opts.opportunisticGraftPeers
2834
+ const newMeshPeers = removeItemsFromSet(candidateMeshPeers, ineed, (id) => getScore(id) > medianScore)
2835
+ for (const id of newMeshPeers) {
2836
+ this.log('HEARTBEAT: Opportunistically graft peer %s on topic %s', id, topic)
2837
+ graftPeer(id, InclusionReason.Opportunistic)
2838
+ }
2839
+ }
2840
+ }
2841
+ })
2842
+
2843
+ // expire fanout for topics we haven't published to in a while
2844
+ const now = Date.now()
2845
+ this.fanoutLastpub.forEach((lastpb, topic) => {
2846
+ if (lastpb + fanoutTTL < now) {
2847
+ this.fanout.delete(topic)
2848
+ this.fanoutLastpub.delete(topic)
2849
+ }
2850
+ })
2851
+
2852
+ // maintain our fanout for topics we are publishing but we have not joined
2853
+ this.fanout.forEach((fanoutPeers, topic) => {
2854
+ // checks whether our peers are still in the topic and have a score above the publish threshold
2855
+ const topicPeers = this.topics.get(topic)
2856
+ fanoutPeers.forEach((id) => {
2857
+ if (!(topicPeers?.has(id) ?? false) || getScore(id) < this.opts.scoreThresholds.publishThreshold) {
2858
+ fanoutPeers.delete(id)
2859
+ }
2860
+ })
2861
+
2862
+ const peersInTopic = this.topics.get(topic)
2863
+ const candidateFanoutPeers = []
2864
+ // the fanout map contains topics to which we are not subscribed.
2865
+ const peersToGossip = new Set<PeerIdStr>()
2866
+ peersToGossipByTopic.set(topic, peersToGossip)
2867
+
2868
+ if (peersInTopic != null) {
2869
+ const shuffledPeers = shuffle(Array.from(peersInTopic))
2870
+ for (const id of shuffledPeers) {
2871
+ const peerStreams = this.streamsOutbound.get(id)
2872
+ if (
2873
+ (peerStreams != null) &&
2874
+ this.protocols.includes(peerStreams.protocol) &&
2875
+ !fanoutPeers.has(id) &&
2876
+ !this.direct.has(id)
2877
+ ) {
2878
+ const score = getScore(id)
2879
+ if (score >= this.opts.scoreThresholds.publishThreshold) { candidateFanoutPeers.push(id) }
2880
+ // instead of having to find gossip peers after heartbeat which require another loop
2881
+ // we prepare peers to gossip in a topic within heartbeat to improve performance
2882
+ if (score >= this.opts.scoreThresholds.gossipThreshold) { peersToGossip.add(id) }
2883
+ }
2884
+ }
2885
+ }
2886
+
2887
+ // do we need more peers?
2888
+ if (fanoutPeers.size < D) {
2889
+ const ineed = D - fanoutPeers.size
2890
+ candidateFanoutPeers.slice(0, ineed).forEach((id) => {
2891
+ fanoutPeers.add(id)
2892
+ peersToGossip?.delete(id)
2893
+ })
2894
+ }
2895
+ })
2896
+
2897
+ this.emitGossip(peersToGossipByTopic)
2898
+
2899
+ // send coalesced GRAFT/PRUNE messages (will piggyback gossip)
2900
+ await this.sendGraftPrune(tograft, toprune, noPX)
2901
+
2902
+ // flush pending gossip that wasn't piggybacked above
2903
+ this.flush()
2904
+
2905
+ // advance the message history window
2906
+ this.mcache.shift()
2907
+
2908
+ this.dispatchEvent(new CustomEvent('gossipsub:heartbeat'))
2909
+ }
2910
+
2911
+ /**
2912
+ * Given a topic, returns up to count peers subscribed to that topic
2913
+ * that pass an optional filter function
2914
+ *
2915
+ * @param topic
2916
+ * @param count
2917
+ * @param filter - a function to filter acceptable peers
2918
+ */
2919
+ private getRandomGossipPeers (
2920
+ topic: string,
2921
+ count: number,
2922
+ filter: (id: string) => boolean = () => true
2923
+ ): Set<string> {
2924
+ const peersInTopic = this.topics.get(topic)
2925
+
2926
+ if (peersInTopic == null) {
2927
+ return new Set()
2928
+ }
2929
+
2930
+ // Adds all peers using our protocol
2931
+ // that also pass the filter function
2932
+ let peers: string[] = []
2933
+ peersInTopic.forEach((id) => {
2934
+ const peerStreams = this.streamsOutbound.get(id)
2935
+ if (peerStreams == null) {
2936
+ return
2937
+ }
2938
+ if (this.protocols.includes(peerStreams.protocol) && filter(id)) {
2939
+ peers.push(id)
2940
+ }
2941
+ })
2942
+
2943
+ // Pseudo-randomly shuffles peers
2944
+ peers = shuffle(peers)
2945
+ if (count > 0 && peers.length > count) {
2946
+ peers = peers.slice(0, count)
2947
+ }
2948
+
2949
+ return new Set(peers)
2950
+ }
2951
+
2952
+ private onScrapeMetrics (metrics: Metrics): void {
2953
+ /* Data structure sizes */
2954
+ metrics.mcacheSize.set(this.mcache.size)
2955
+ metrics.mcacheNotValidatedCount.set(this.mcache.notValidatedCount)
2956
+ // Arbitrary size
2957
+ metrics.cacheSize.set({ cache: 'direct' }, this.direct.size)
2958
+ metrics.cacheSize.set({ cache: 'seenCache' }, this.seenCache.size)
2959
+ metrics.cacheSize.set({ cache: 'fastMsgIdCache' }, this.fastMsgIdCache?.size ?? 0)
2960
+ metrics.cacheSize.set({ cache: 'publishedMessageIds' }, this.publishedMessageIds.size)
2961
+ metrics.cacheSize.set({ cache: 'mcache' }, this.mcache.size)
2962
+ metrics.cacheSize.set({ cache: 'score' }, this.score.size)
2963
+ metrics.cacheSize.set({ cache: 'gossipTracer.promises' }, this.gossipTracer.size)
2964
+ metrics.cacheSize.set({ cache: 'gossipTracer.requests' }, this.gossipTracer.requestMsByMsgSize)
2965
+ // Bounded by topic
2966
+ metrics.cacheSize.set({ cache: 'topics' }, this.topics.size)
2967
+ metrics.cacheSize.set({ cache: 'subscriptions' }, this.subscriptions.size)
2968
+ metrics.cacheSize.set({ cache: 'mesh' }, this.mesh.size)
2969
+ metrics.cacheSize.set({ cache: 'fanout' }, this.fanout.size)
2970
+ // Bounded by peer
2971
+ metrics.cacheSize.set({ cache: 'peers' }, this.peers.size)
2972
+ metrics.cacheSize.set({ cache: 'streamsOutbound' }, this.streamsOutbound.size)
2973
+ metrics.cacheSize.set({ cache: 'streamsInbound' }, this.streamsInbound.size)
2974
+ metrics.cacheSize.set({ cache: 'acceptFromWhitelist' }, this.acceptFromWhitelist.size)
2975
+ metrics.cacheSize.set({ cache: 'gossip' }, this.gossip.size)
2976
+ metrics.cacheSize.set({ cache: 'control' }, this.control.size)
2977
+ metrics.cacheSize.set({ cache: 'peerhave' }, this.peerhave.size)
2978
+ metrics.cacheSize.set({ cache: 'outbound' }, this.outbound.size)
2979
+
2980
+ // 2D nested data structure
2981
+ let backoffSize = 0
2982
+ const now = Date.now()
2983
+ metrics.connectedPeersBackoffSec.reset()
2984
+ for (const backoff of this.backoff.values()) {
2985
+ backoffSize += backoff.size
2986
+ for (const [peer, expiredMs] of backoff.entries()) {
2987
+ if (this.peers.has(peer)) {
2988
+ metrics.connectedPeersBackoffSec.observe(Math.max(0, expiredMs - now) / 1000)
2989
+ }
2990
+ }
2991
+ }
2992
+ metrics.cacheSize.set({ cache: 'backoff' }, backoffSize)
2993
+
2994
+ let idontwantsCount = 0
2995
+ for (const idontwant of this.idontwants.values()) {
2996
+ idontwantsCount += idontwant.size
2997
+ }
2998
+ metrics.cacheSize.set({ cache: 'idontwants' }, idontwantsCount)
2999
+
3000
+ // Peer counts
3001
+
3002
+ for (const [topicStr, peers] of this.topics) {
3003
+ metrics.topicPeersCount.set({ topicStr }, peers.size)
3004
+ }
3005
+
3006
+ for (const [topicStr, peers] of this.mesh) {
3007
+ metrics.meshPeerCounts.set({ topicStr }, peers.size)
3008
+ }
3009
+
3010
+ // Peer scores
3011
+
3012
+ const scores: number[] = []
3013
+ const scoreByPeer = new Map<PeerIdStr, number>()
3014
+ metrics.behaviourPenalty.reset()
3015
+
3016
+ for (const peerIdStr of this.peers.keys()) {
3017
+ const score = this.score.score(peerIdStr)
3018
+ scores.push(score)
3019
+ scoreByPeer.set(peerIdStr, score)
3020
+ metrics.behaviourPenalty.observe(this.score.peerStats.get(peerIdStr)?.behaviourPenalty ?? 0)
3021
+ }
3022
+
3023
+ metrics.registerScores(scores, this.opts.scoreThresholds)
3024
+
3025
+ // Breakdown score per mesh topicLabel
3026
+
3027
+ metrics.registerScorePerMesh(this.mesh, scoreByPeer)
3028
+
3029
+ // Breakdown on each score weight
3030
+
3031
+ const sw = computeAllPeersScoreWeights(
3032
+ this.peers.keys(),
3033
+ this.score.peerStats,
3034
+ this.score.params,
3035
+ this.score.peerIPs,
3036
+ metrics.topicStrToLabel
3037
+ )
3038
+
3039
+ metrics.registerScoreWeights(sw)
3040
+ }
3041
+
3042
+ private readonly tagMeshPeer = (evt: CustomEvent<MeshPeer>): void => {
3043
+ const { peerId, topic } = evt.detail
3044
+ this.components.peerStore.merge(this.peers.get(peerId) ?? peerIdFromString(peerId), {
3045
+ tags: {
3046
+ [topic]: {
3047
+ value: 100
3048
+ }
3049
+ }
3050
+ }).catch((err) => { this.log.error('Error tagging peer %s with topic %s', peerId, topic, err) })
3051
+ }
3052
+
3053
+ private readonly untagMeshPeer = (evt: CustomEvent<MeshPeer>): void => {
3054
+ const { peerId, topic } = evt.detail
3055
+ this.components.peerStore.merge(this.peers.get(peerId) ?? peerIdFromString(peerId), {
3056
+ tags: {
3057
+ [topic]: undefined
3058
+ }
3059
+ }).catch((err) => { this.log.error('Error untagging peer %s with topic %s', peerId, topic, err) })
3060
+ }
3061
+ }