@lodestar/reqresp 1.35.0-dev.f80d2d52da → 1.35.0-dev.fcf8d024ea

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/lib/ReqResp.d.ts +1 -1
  2. package/lib/ReqResp.d.ts.map +1 -0
  3. package/lib/ReqResp.js +16 -6
  4. package/lib/ReqResp.js.map +1 -1
  5. package/lib/encoders/requestDecode.d.ts.map +1 -0
  6. package/lib/encoders/requestEncode.d.ts.map +1 -0
  7. package/lib/encoders/responseDecode.d.ts +1 -1
  8. package/lib/encoders/responseDecode.d.ts.map +1 -0
  9. package/lib/encoders/responseDecode.js.map +1 -1
  10. package/lib/encoders/responseEncode.d.ts.map +1 -0
  11. package/lib/encodingStrategies/index.d.ts.map +1 -0
  12. package/lib/encodingStrategies/sszSnappy/decode.d.ts.map +1 -0
  13. package/lib/encodingStrategies/sszSnappy/encode.d.ts.map +1 -0
  14. package/lib/encodingStrategies/sszSnappy/errors.d.ts.map +1 -0
  15. package/lib/encodingStrategies/sszSnappy/index.d.ts +1 -1
  16. package/lib/encodingStrategies/sszSnappy/index.d.ts.map +1 -0
  17. package/lib/encodingStrategies/sszSnappy/index.js +1 -1
  18. package/lib/encodingStrategies/sszSnappy/snappyFrames/common.d.ts.map +1 -0
  19. package/lib/encodingStrategies/sszSnappy/snappyFrames/compress.d.ts.map +1 -0
  20. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.d.ts.map +1 -0
  21. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.js +4 -6
  22. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.js.map +1 -1
  23. package/lib/encodingStrategies/sszSnappy/utils.d.ts.map +1 -0
  24. package/lib/index.d.ts +7 -7
  25. package/lib/index.d.ts.map +1 -0
  26. package/lib/index.js +5 -5
  27. package/lib/index.js.map +1 -1
  28. package/lib/interface.d.ts.map +1 -0
  29. package/lib/metrics.d.ts.map +1 -0
  30. package/lib/rate_limiter/ReqRespRateLimiter.d.ts.map +1 -0
  31. package/lib/rate_limiter/ReqRespRateLimiter.js +8 -4
  32. package/lib/rate_limiter/ReqRespRateLimiter.js.map +1 -1
  33. package/lib/rate_limiter/rateLimiterGRCA.d.ts.map +1 -0
  34. package/lib/rate_limiter/rateLimiterGRCA.js +5 -3
  35. package/lib/rate_limiter/rateLimiterGRCA.js.map +1 -1
  36. package/lib/rate_limiter/selfRateLimiter.d.ts.map +1 -0
  37. package/lib/rate_limiter/selfRateLimiter.js +9 -2
  38. package/lib/rate_limiter/selfRateLimiter.js.map +1 -1
  39. package/lib/request/errors.d.ts.map +1 -0
  40. package/lib/request/index.d.ts +1 -1
  41. package/lib/request/index.d.ts.map +1 -0
  42. package/lib/request/index.js +1 -1
  43. package/lib/request/index.js.map +1 -1
  44. package/lib/response/errors.d.ts.map +1 -0
  45. package/lib/response/errors.js +2 -0
  46. package/lib/response/errors.js.map +1 -1
  47. package/lib/response/index.d.ts.map +1 -0
  48. package/lib/response/index.js +1 -1
  49. package/lib/response/index.js.map +1 -1
  50. package/lib/types.d.ts.map +1 -0
  51. package/lib/utils/abortableSource.d.ts.map +1 -0
  52. package/lib/utils/bufferedSource.d.ts.map +1 -0
  53. package/lib/utils/bufferedSource.js +3 -1
  54. package/lib/utils/bufferedSource.js.map +1 -1
  55. package/lib/utils/collectExactOne.d.ts.map +1 -0
  56. package/lib/utils/collectMaxResponse.d.ts.map +1 -0
  57. package/lib/utils/errorMessage.d.ts.map +1 -0
  58. package/lib/utils/index.d.ts.map +1 -0
  59. package/lib/utils/onChunk.d.ts.map +1 -0
  60. package/lib/utils/peerId.d.ts.map +1 -0
  61. package/lib/utils/protocolId.d.ts.map +1 -0
  62. package/package.json +12 -12
  63. package/src/ReqResp.ts +289 -0
  64. package/src/encoders/requestDecode.ts +29 -0
  65. package/src/encoders/requestEncode.ts +18 -0
  66. package/src/encoders/responseDecode.ts +169 -0
  67. package/src/encoders/responseEncode.ts +81 -0
  68. package/src/encodingStrategies/index.ts +46 -0
  69. package/src/encodingStrategies/sszSnappy/decode.ts +111 -0
  70. package/src/encodingStrategies/sszSnappy/encode.ts +24 -0
  71. package/src/encodingStrategies/sszSnappy/errors.ts +31 -0
  72. package/src/encodingStrategies/sszSnappy/index.ts +3 -0
  73. package/src/encodingStrategies/sszSnappy/snappyFrames/common.ts +36 -0
  74. package/src/encodingStrategies/sszSnappy/snappyFrames/compress.ts +25 -0
  75. package/src/encodingStrategies/sszSnappy/snappyFrames/uncompress.ts +114 -0
  76. package/src/encodingStrategies/sszSnappy/utils.ts +7 -0
  77. package/src/index.ts +10 -0
  78. package/src/interface.ts +26 -0
  79. package/src/metrics.ts +95 -0
  80. package/src/rate_limiter/ReqRespRateLimiter.ts +107 -0
  81. package/src/rate_limiter/rateLimiterGRCA.ts +92 -0
  82. package/src/rate_limiter/selfRateLimiter.ts +112 -0
  83. package/src/request/errors.ts +119 -0
  84. package/src/request/index.ts +225 -0
  85. package/src/response/errors.ts +50 -0
  86. package/src/response/index.ts +147 -0
  87. package/src/types.ts +158 -0
  88. package/src/utils/abortableSource.ts +80 -0
  89. package/src/utils/bufferedSource.ts +46 -0
  90. package/src/utils/collectExactOne.ts +15 -0
  91. package/src/utils/collectMaxResponse.ts +19 -0
  92. package/src/utils/errorMessage.ts +51 -0
  93. package/src/utils/index.ts +8 -0
  94. package/src/utils/onChunk.ts +12 -0
  95. package/src/utils/peerId.ts +6 -0
  96. package/src/utils/protocolId.ts +44 -0
@@ -0,0 +1,92 @@
1
+ type MiliSeconds = number;
2
+
3
+ export interface RateLimiterQuota {
4
+ /** How often are `max_tokens` fully replenished. */
5
+ quotaTimeMs: MiliSeconds;
6
+ /** Token limit. This translates on how large can an instantaneous batch of tokens be. */
7
+ quota: number;
8
+ }
9
+
10
+ /**
11
+ * Generic Cell Rate Algorithm is a leaky bucket-type scheduling algorithm.
12
+ *
13
+ * Most rate-limit implementations are either time-bucket or leaky-bucket based. The time-bucket requires the storage
14
+ * of two values and does not enforce a rate, while the leaky-bucket approach requires a separate process to
15
+ * continually refill the bucket. GCRA only storing a value (the TAT) while still being simple. GCRA may be rarely
16
+ * used because of its perceived complexity.
17
+ *
18
+ * GCRA aims to limit requests to `R = L/P`, where this implementation sets `L = 1` for simplicity. The target rate
19
+ * then is `R = 1/P` so request separated by at least `P` are not limited. Define the Theoretical Arrival Time (TAT)
20
+ * of the next request to be equal
21
+ */
22
+ export class RateLimiterGRCA<Key> {
23
+ /** Time when the bucket will be full for each peer. TAT (theoretical arrival time) from GCRA */
24
+ private readonly tatPerKey = new Map<Key, MiliSeconds>();
25
+ private readonly startTimeMs = Date.now();
26
+
27
+ constructor(
28
+ /** After how long is the bucket considered full via replenishing 1T every `t`. */
29
+ private readonly msPerBucket: MiliSeconds,
30
+ /** How often is 1 token replenished */
31
+ private readonly msPerToken: MiliSeconds
32
+ ) {}
33
+
34
+ static fromQuota<Key>(quota: RateLimiterQuota): RateLimiterGRCA<Key> {
35
+ if (quota.quota === 0) {
36
+ throw Error("Max number of tokens should be positive");
37
+ }
38
+ const msPerBucket = quota.quotaTimeMs;
39
+ if (msPerBucket === 0) {
40
+ throw Error("Replenish time must be positive");
41
+ }
42
+ const msPerToken = msPerBucket / quota.quota;
43
+ return new RateLimiterGRCA(msPerBucket, msPerToken);
44
+ }
45
+
46
+ allows(key: Key, tokens: number): boolean {
47
+ if (tokens <= 0) {
48
+ throw new Error(`Token value should always be positive. Given: ${tokens}.`);
49
+ }
50
+
51
+ const msSinceStart = Date.now() - this.startTimeMs;
52
+
53
+ /** how long does it take to replenish these tokens */
54
+ const additionalTime = this.msPerToken * tokens;
55
+
56
+ if (additionalTime > this.msPerBucket) {
57
+ // the time required to process this amount of tokens is longer than the time that makes the bucket full.
58
+ return false;
59
+ }
60
+
61
+ // If the key is new, we consider their bucket full (which means, their request will be allowed)
62
+ let resetTimeForKey = this.tatPerKey.get(key);
63
+ if (resetTimeForKey === undefined) {
64
+ resetTimeForKey = msSinceStart;
65
+ this.tatPerKey.set(key, resetTimeForKey);
66
+ }
67
+
68
+ // check how soon could the request be made
69
+ const earliestTime = resetTimeForKey + additionalTime - this.msPerBucket;
70
+ if (msSinceStart < earliestTime) {
71
+ return false;
72
+ }
73
+
74
+ // calculate the new TAT
75
+ this.tatPerKey.set(key, Math.max(msSinceStart, resetTimeForKey) + additionalTime);
76
+ return true;
77
+ }
78
+
79
+ /** Removes keys for which their bucket is full by `time_limit` */
80
+ pruneByTime(timeLimit: MiliSeconds): void {
81
+ for (const entry of this.tatPerKey.entries()) {
82
+ // remove those for which tat < lim
83
+ if (entry[1] < timeLimit) {
84
+ this.tatPerKey.delete(entry[0]);
85
+ }
86
+ }
87
+ }
88
+
89
+ pruneByKey(key: Key): void {
90
+ this.tatPerKey.delete(key);
91
+ }
92
+ }
@@ -0,0 +1,112 @@
1
+ import {Logger, MapDef} from "@lodestar/utils";
2
+
3
+ type PeerIdStr = string;
4
+ type ProtocolID = string;
5
+ /** https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#constants */
6
+ const MAX_CONCURRENT_REQUESTS = 2;
7
+
8
+ /** Sometimes a peer request comes AFTER libp2p disconnect event, check for such peers every 2 minutes */
9
+ export const CHECK_DISCONNECTED_PEERS_INTERVAL_MS = 2 * 60 * 1000;
10
+
11
+ /** Given PING_INTERVAL constants of 15s/20s, we consider a peer is disconnected if there is no request in 1 minute */
12
+ const DISCONNECTED_TIMEOUT_MS = 60 * 1000;
13
+
14
+ /**
15
+ * Timeout to consider a request is no longer in progress
16
+ * this is to cover the case where `requestCompleted()` is not called due to unexpected errors
17
+ * for example https://github.com/ChainSafe/lodestar/issues/8256
18
+ **/
19
+ export const REQUEST_TIMEOUT_MS = 30 * 1000;
20
+
21
+ type RequestId = number;
22
+ type RequestIdMs = number;
23
+
24
+ /**
25
+ * Simple rate limiter that allows a maximum of 2 concurrent requests per protocol per peer.
26
+ * The consumer should either prevent requests from being sent when the limit is reached or handle the case when the request is not allowed.
27
+ */
28
+ export class SelfRateLimiter {
29
+ private readonly rateLimitersPerPeer: MapDef<PeerIdStr, MapDef<ProtocolID, Map<RequestId, RequestIdMs>>>;
30
+ /**
31
+ * It's not convenient to handle a peer disconnected event so we track the last seen requests by peer.
32
+ * This is the same design to `ReqRespRateLimiter`.
33
+ **/
34
+ private lastSeenRequestsByPeer: Map<string, number>;
35
+ /** Interval to check lastSeenMessagesByPeer */
36
+ private cleanupInterval: NodeJS.Timeout | undefined = undefined;
37
+
38
+ constructor(private readonly logger?: Logger) {
39
+ this.rateLimitersPerPeer = new MapDef<PeerIdStr, MapDef<ProtocolID, Map<RequestId, RequestIdMs>>>(
40
+ () => new MapDef<ProtocolID, Map<RequestId, RequestIdMs>>(() => new Map())
41
+ );
42
+ this.lastSeenRequestsByPeer = new Map();
43
+ }
44
+
45
+ start(): void {
46
+ this.cleanupInterval = setInterval(this.checkDisconnectedPeers.bind(this), CHECK_DISCONNECTED_PEERS_INTERVAL_MS);
47
+ }
48
+
49
+ stop(): void {
50
+ if (this.cleanupInterval !== undefined) {
51
+ clearInterval(this.cleanupInterval);
52
+ this.cleanupInterval = undefined;
53
+ }
54
+ }
55
+
56
+ /**
57
+ * called before we send a request to a peer.
58
+ */
59
+ allows(peerId: PeerIdStr, protocolId: ProtocolID, requestId: RequestId): boolean {
60
+ const now = Date.now();
61
+ const peerRateLimiter = this.rateLimitersPerPeer.getOrDefault(peerId);
62
+ const trackedRequests = peerRateLimiter.getOrDefault(protocolId);
63
+ this.lastSeenRequestsByPeer.set(peerId, now);
64
+
65
+ let inProgressRequests = 0;
66
+ for (const [trackedRequestId, trackedRequestTimeMs] of trackedRequests.entries()) {
67
+ if (trackedRequestTimeMs + REQUEST_TIMEOUT_MS <= now) {
68
+ // request timed out, remove it
69
+ trackedRequests.delete(trackedRequestId);
70
+ this.logger?.debug("SelfRateLimiter: request timed out, removing it", {
71
+ requestId: trackedRequestId,
72
+ requestTime: trackedRequestTimeMs,
73
+ peerId,
74
+ protocolId,
75
+ });
76
+ } else {
77
+ inProgressRequests++;
78
+ }
79
+ }
80
+
81
+ if (inProgressRequests >= MAX_CONCURRENT_REQUESTS) {
82
+ return false;
83
+ }
84
+
85
+ trackedRequests.set(requestId, now);
86
+ return true;
87
+ }
88
+
89
+ /**
90
+ * called when a request to a peer is completed, regardless of success or failure.
91
+ * This should NOT be called when the request was not allowed
92
+ */
93
+ requestCompleted(peerId: PeerIdStr, protocolId: ProtocolID, requestId: RequestId): void {
94
+ const peerRateLimiter = this.rateLimitersPerPeer.getOrDefault(peerId);
95
+ const trackedRequests = peerRateLimiter.getOrDefault(protocolId);
96
+ trackedRequests.delete(requestId);
97
+ }
98
+
99
+ getPeerCount(): number {
100
+ return this.rateLimitersPerPeer.size;
101
+ }
102
+
103
+ private checkDisconnectedPeers(): void {
104
+ const now = Date.now();
105
+ for (const [peerIdStr, lastSeenTime] of this.lastSeenRequestsByPeer.entries()) {
106
+ if (now - lastSeenTime >= DISCONNECTED_TIMEOUT_MS) {
107
+ this.rateLimitersPerPeer.delete(peerIdStr);
108
+ this.lastSeenRequestsByPeer.delete(peerIdStr);
109
+ }
110
+ }
111
+ }
112
+ }
@@ -0,0 +1,119 @@
1
+ import {LodestarError, LodestarErrorObject} from "@lodestar/utils";
2
+ import {RespStatus, RpcResponseStatusError} from "../interface.js";
3
+ import {ResponseError} from "../response/index.js";
4
+
5
+ export enum RequestErrorCode {
6
+ // Declaring specific values of RpcResponseStatusError for error clarity downstream
7
+ /** `<response_chunk>` had `<result>` === INVALID_REQUEST */
8
+ INVALID_REQUEST = "REQUEST_ERROR_INVALID_REQUEST",
9
+ INVALID_RESPONSE_SSZ = "REQUEST_ERROR_INVALID_RESPONSE_SSZ",
10
+ /** `<response_chunk>` had `<result>` === SERVER_ERROR */
11
+ SERVER_ERROR = "REQUEST_ERROR_SERVER_ERROR",
12
+ /** `<response_chunk>` had `<result>` === RESOURCE_UNAVAILABLE */
13
+ RESOURCE_UNAVAILABLE = "RESOURCE_UNAVAILABLE_ERROR",
14
+ /** `<response_chunk>` had a `<result>` not known in the current spec */
15
+ UNKNOWN_ERROR_STATUS = "REQUEST_ERROR_UNKNOWN_ERROR_STATUS",
16
+ /** Could not open a stream with peer before DIAL_TIMEOUT */
17
+ DIAL_TIMEOUT = "REQUEST_ERROR_DIAL_TIMEOUT",
18
+ /** Error opening a stream with peer */
19
+ DIAL_ERROR = "REQUEST_ERROR_DIAL_ERROR",
20
+ /** Reponder did not close write stream before REQUEST_TIMEOUT */
21
+ REQUEST_TIMEOUT = "REQUEST_ERROR_REQUEST_TIMEOUT",
22
+ /** Error when sending request to responder */
23
+ REQUEST_ERROR = "REQUEST_ERROR_REQUEST_ERROR",
24
+ /** Reponder did not deliver a full reponse before max maxTotalResponseTimeout() */
25
+ RESPONSE_TIMEOUT = "REQUEST_ERROR_RESPONSE_TIMEOUT",
26
+ /** A single-response method returned 0 chunks */
27
+ EMPTY_RESPONSE = "REQUEST_ERROR_EMPTY_RESPONSE",
28
+ /** Time to first byte timeout */
29
+ TTFB_TIMEOUT = "REQUEST_ERROR_TTFB_TIMEOUT",
30
+ /** Timeout between `<response_chunk>` exceed */
31
+ RESP_TIMEOUT = "REQUEST_ERROR_RESP_TIMEOUT",
32
+ /** Request rate limited */
33
+ REQUEST_RATE_LIMITED = "REQUEST_ERROR_RATE_LIMITED",
34
+ /** Request self rate limited */
35
+ REQUEST_SELF_RATE_LIMITED = "REQUEST_ERROR_SELF_RATE_LIMITED",
36
+ /** Response rate limited */
37
+ RESP_RATE_LIMITED = "RESPONSE_ERROR_RATE_LIMITED",
38
+ /** For malformed SSZ (metadata) responses */
39
+ SSZ_OVER_MAX_SIZE = "SSZ_SNAPPY_ERROR_OVER_SSZ_MAX_SIZE",
40
+ }
41
+
42
+ type RequestErrorType =
43
+ | {code: RequestErrorCode.INVALID_REQUEST; errorMessage: string}
44
+ | {code: RequestErrorCode.INVALID_RESPONSE_SSZ; errorMessage: string}
45
+ | {code: RequestErrorCode.SERVER_ERROR; errorMessage: string}
46
+ | {code: RequestErrorCode.RESOURCE_UNAVAILABLE; errorMessage: string}
47
+ | {code: RequestErrorCode.UNKNOWN_ERROR_STATUS; status: RpcResponseStatusError; errorMessage: string}
48
+ | {code: RequestErrorCode.DIAL_TIMEOUT}
49
+ | {code: RequestErrorCode.DIAL_ERROR; error: Error}
50
+ | {code: RequestErrorCode.REQUEST_TIMEOUT}
51
+ | {code: RequestErrorCode.REQUEST_ERROR; error: Error}
52
+ | {code: RequestErrorCode.EMPTY_RESPONSE}
53
+ | {code: RequestErrorCode.TTFB_TIMEOUT}
54
+ | {code: RequestErrorCode.RESP_TIMEOUT}
55
+ | {code: RequestErrorCode.REQUEST_RATE_LIMITED}
56
+ | {code: RequestErrorCode.REQUEST_SELF_RATE_LIMITED}
57
+ | {code: RequestErrorCode.RESP_RATE_LIMITED}
58
+ | {code: RequestErrorCode.SSZ_OVER_MAX_SIZE};
59
+
60
+ export const REQUEST_ERROR_CLASS_NAME = "RequestError";
61
+
62
+ export class RequestError extends LodestarError<RequestErrorType> {
63
+ constructor(type: RequestErrorType, message?: string, stack?: string) {
64
+ super(type, message ?? renderErrorMessage(type), stack);
65
+ }
66
+
67
+ static fromObject(obj: LodestarErrorObject): RequestError {
68
+ if (obj.className !== "RequestError") {
69
+ throw new Error(`Expected className to be RequestError, but got ${obj.className}`);
70
+ }
71
+
72
+ return new RequestError(obj.type as RequestErrorType, obj.message, obj.stack);
73
+ }
74
+ }
75
+
76
+ /**
77
+ * Parse response status errors into detailed request errors for each status code for easier debugging
78
+ */
79
+ export function responseStatusErrorToRequestError(e: ResponseError): RequestErrorType {
80
+ const {errorMessage, status} = e;
81
+ // rate limited error from clients have different status, for example: lighthouse responds with 139, teku responds with 1
82
+ // but all of them has "rate limit" in the error message
83
+ // refer to https://github.com/ChainSafe/lodestar/issues/8065#issuecomment-3157266196
84
+ const errorMessageLowercase = errorMessage.toLowerCase();
85
+ if (errorMessageLowercase.includes("rate limit")) {
86
+ return {code: RequestErrorCode.RESP_RATE_LIMITED};
87
+ }
88
+
89
+ // Grandine may return this without standard RespStatus, see https://github.com/ChainSafe/lodestar/issues/8110
90
+ if (errorMessageLowercase.includes("wait ")) {
91
+ return {code: RequestErrorCode.RESP_TIMEOUT};
92
+ }
93
+
94
+ switch (status) {
95
+ case RespStatus.INVALID_REQUEST:
96
+ return {code: RequestErrorCode.INVALID_REQUEST, errorMessage};
97
+ case RespStatus.SERVER_ERROR:
98
+ return {code: RequestErrorCode.SERVER_ERROR, errorMessage};
99
+ case RespStatus.RESOURCE_UNAVAILABLE:
100
+ return {code: RequestErrorCode.RESOURCE_UNAVAILABLE, errorMessage};
101
+ default:
102
+ return {code: RequestErrorCode.UNKNOWN_ERROR_STATUS, errorMessage, status};
103
+ }
104
+ }
105
+
106
+ /**
107
+ * Render responder's errorMessage directly in main's error.message for easier debugging
108
+ */
109
+ function renderErrorMessage(type: RequestErrorType): string | undefined {
110
+ switch (type.code) {
111
+ case RequestErrorCode.INVALID_REQUEST:
112
+ case RequestErrorCode.SERVER_ERROR:
113
+ case RequestErrorCode.RESOURCE_UNAVAILABLE:
114
+ case RequestErrorCode.UNKNOWN_ERROR_STATUS:
115
+ return `${type.code}: ${type.errorMessage}`;
116
+ default:
117
+ return type.code;
118
+ }
119
+ }
@@ -0,0 +1,225 @@
1
+ import {PeerId} from "@libp2p/interface";
2
+ import {pipe} from "it-pipe";
3
+ import type {Libp2p} from "libp2p";
4
+ import {Uint8ArrayList} from "uint8arraylist";
5
+ import {ErrorAborted, Logger, TimeoutError, withTimeout} from "@lodestar/utils";
6
+ import {requestEncode} from "../encoders/requestEncode.js";
7
+ import {responseDecode} from "../encoders/responseDecode.js";
8
+ import {Metrics} from "../metrics.js";
9
+ import {ResponseError} from "../response/index.js";
10
+ import {MixedProtocol, ResponseIncoming} from "../types.js";
11
+ import {abortableSource, prettyPrintPeerId} from "../utils/index.js";
12
+ import {RequestError, RequestErrorCode, responseStatusErrorToRequestError} from "./errors.js";
13
+
14
+ export {RequestError, RequestErrorCode};
15
+
16
+ // Default spec values from https://github.com/ethereum/consensus-specs/blob/v1.2.0/specs/phase0/p2p-interface.md#configuration
17
+ export const DEFAULT_DIAL_TIMEOUT = 5 * 1000; // 5 sec
18
+ export const DEFAULT_REQUEST_TIMEOUT = 5 * 1000; // 5 sec
19
+ export const DEFAULT_TTFB_TIMEOUT = 5 * 1000; // 5 sec
20
+ export const DEFAULT_RESP_TIMEOUT = 10 * 1000; // 10 sec
21
+
22
+ export interface SendRequestOpts {
23
+ /** The maximum time for complete response transfer. */
24
+ respTimeoutMs?: number;
25
+ /** Non-spec timeout from sending request until write stream closed by responder */
26
+ requestTimeoutMs?: number;
27
+ /** The maximum time to wait for first byte of request response (time-to-first-byte). */
28
+ ttfbTimeoutMs?: number;
29
+ /** Non-spec timeout from dialing protocol until stream opened */
30
+ dialTimeoutMs?: number;
31
+ }
32
+
33
+ type SendRequestModules = {
34
+ logger: Logger;
35
+ libp2p: Libp2p;
36
+ metrics: Metrics | null;
37
+ peerClient?: string;
38
+ };
39
+
40
+ /**
41
+ * Sends ReqResp request to a peer. Throws on error. Logs each step of the request lifecycle.
42
+ *
43
+ * 1. Dial peer, establish duplex stream
44
+ * 2. Encoded and write request to peer. Expect the responder to close the stream's write side
45
+ * 3. Read and decode reponse(s) from peer. Will close the read stream if:
46
+ * - An error result is received in one of the chunks. Reads the error_message and throws.
47
+ * - The responder closes the stream. If at the end or start of a <response_chunk>, return. Otherwise throws
48
+ * - Any part of the response_chunk fails validation. Throws a typed error (see `SszSnappyError`)
49
+ * - The maximum number of requested chunks are read. Does not throw, returns read chunks only.
50
+ */
51
+ export async function* sendRequest(
52
+ {logger, libp2p, metrics, peerClient}: SendRequestModules,
53
+ peerId: PeerId,
54
+ protocols: MixedProtocol[],
55
+ protocolIDs: string[],
56
+ requestBody: Uint8Array,
57
+ signal?: AbortSignal,
58
+ opts?: SendRequestOpts,
59
+ requestId = 0
60
+ ): AsyncIterable<ResponseIncoming> {
61
+ if (protocols.length === 0) {
62
+ throw Error("sendRequest must set > 0 protocols");
63
+ }
64
+
65
+ const DIAL_TIMEOUT = opts?.dialTimeoutMs ?? DEFAULT_DIAL_TIMEOUT;
66
+ const REQUEST_TIMEOUT = opts?.requestTimeoutMs ?? DEFAULT_REQUEST_TIMEOUT;
67
+ const TTFB_TIMEOUT = opts?.ttfbTimeoutMs ?? DEFAULT_TTFB_TIMEOUT;
68
+ const RESP_TIMEOUT = opts?.respTimeoutMs ?? DEFAULT_RESP_TIMEOUT;
69
+
70
+ const peerIdStrShort = prettyPrintPeerId(peerId);
71
+ const {method, encoding, version} = protocols[0];
72
+ const logCtx = {method, version, encoding, client: peerClient, peer: peerIdStrShort, requestId};
73
+
74
+ if (signal?.aborted) {
75
+ throw new ErrorAborted("sendRequest");
76
+ }
77
+
78
+ logger.debug("Req dialing peer", logCtx);
79
+
80
+ try {
81
+ // From Altair block query methods have V1 and V2. Both protocols should be requested.
82
+ // On stream negotiation `libp2p.dialProtocol` will pick the available protocol and return
83
+ // the picked protocol in `connection.protocol`
84
+ const protocolsMap = new Map<string, MixedProtocol>(protocols.map((protocol, i) => [protocolIDs[i], protocol]));
85
+
86
+ // As of October 2020 we can't rely on libp2p.dialProtocol timeout to work so
87
+ // this function wraps the dialProtocol promise with an extra timeout
88
+ //
89
+ // > The issue might be: you add the peer's addresses to the AddressBook,
90
+ // which will result in autoDial to kick in and dial your peer. In parallel,
91
+ // you do a manual dial and it will wait for the previous one without using
92
+ // the abort signal:
93
+ //
94
+ // https://github.com/ChainSafe/lodestar/issues/1597#issuecomment-703394386
95
+
96
+ // DIAL_TIMEOUT: Non-spec timeout from dialing protocol until stream opened
97
+ const stream = await withTimeout(
98
+ async (timeoutAndParentSignal) => {
99
+ const protocolIds = Array.from(protocolsMap.keys());
100
+ const conn = await libp2p.dialProtocol(peerId, protocolIds, {signal: timeoutAndParentSignal});
101
+ if (!conn) throw Error("dialProtocol timeout");
102
+ return conn;
103
+ },
104
+ DIAL_TIMEOUT,
105
+ signal
106
+ ).catch((e: Error) => {
107
+ if (e instanceof TimeoutError) {
108
+ throw new RequestError({code: RequestErrorCode.DIAL_TIMEOUT});
109
+ }
110
+ throw new RequestError({code: RequestErrorCode.DIAL_ERROR, error: e});
111
+ });
112
+
113
+ metrics?.outgoingOpenedStreams?.inc({method});
114
+
115
+ // TODO: Does the TTFB timer start on opening stream or after receiving request
116
+ const timerTTFB = metrics?.outgoingResponseTTFB.startTimer({method});
117
+
118
+ // Parse protocol selected by the responder
119
+ const protocolId = stream.protocol ?? "unknown";
120
+ const protocol = protocolsMap.get(protocolId);
121
+ if (!protocol) throw Error(`dialProtocol selected unknown protocolId ${protocolId}`);
122
+
123
+ // Override with actual version that was negotiated
124
+ logCtx.version = protocol.version;
125
+
126
+ logger.debug("Req sending request", logCtx);
127
+
128
+ // Spec: The requester MUST close the write side of the stream once it finishes writing the request message
129
+ // Impl: stream.sink is closed automatically by js-libp2p-mplex when piped source is exhausted
130
+
131
+ // REQUEST_TIMEOUT: Non-spec timeout from sending request until write stream closed by responder
132
+ // Note: libp2p.stop() will close all connections, so not necessary to abort this pipe on parent stop
133
+ await withTimeout(() => pipe(requestEncode(protocol, requestBody), stream.sink), REQUEST_TIMEOUT, signal).catch(
134
+ (e) => {
135
+ // Must close the stream read side (stream.source) manually AND the write side
136
+ stream.abort(e);
137
+
138
+ if (e instanceof TimeoutError) {
139
+ throw new RequestError({code: RequestErrorCode.REQUEST_TIMEOUT});
140
+ }
141
+ throw new RequestError({code: RequestErrorCode.REQUEST_ERROR, error: e as Error});
142
+ }
143
+ );
144
+
145
+ logger.debug("Req request sent", logCtx);
146
+
147
+ // For goodbye method peers may disconnect before completing the response and trigger multiple errors.
148
+ // Do not expect them to reply and successfully return early
149
+ if (protocol.ignoreResponse) {
150
+ return;
151
+ }
152
+
153
+ // - TTFB_TIMEOUT: The requester MUST wait a maximum of TTFB_TIMEOUT for the first response byte to arrive
154
+ // - RESP_TIMEOUT: Requester allows a further RESP_TIMEOUT for each subsequent response_chunk
155
+ // - Max total timeout: This timeout is not required by the spec. It may not be necessary, but it's kept as
156
+ // safe-guard to close. streams in case of bugs on other timeout mechanisms.
157
+ const ttfbTimeoutController = new AbortController();
158
+ const respTimeoutController = new AbortController();
159
+
160
+ let timeoutRESP: NodeJS.Timeout | null = null;
161
+
162
+ const timeoutTTFB = setTimeout(() => {
163
+ // If we abort on first byte delay, don't need to abort for response delay
164
+ if (timeoutRESP) clearTimeout(timeoutRESP);
165
+ ttfbTimeoutController.abort();
166
+ }, TTFB_TIMEOUT);
167
+
168
+ const restartRespTimeout = (): void => {
169
+ if (timeoutRESP) clearTimeout(timeoutRESP);
170
+ timeoutRESP = setTimeout(() => respTimeoutController.abort(), RESP_TIMEOUT);
171
+ };
172
+
173
+ try {
174
+ // Note: libp2p.stop() will close all connections, so not necessary to abort this pipe on parent stop
175
+ yield* pipe(
176
+ abortableSource(stream.source as AsyncIterable<Uint8ArrayList>, [
177
+ {
178
+ signal: ttfbTimeoutController.signal,
179
+ getError: () => new RequestError({code: RequestErrorCode.TTFB_TIMEOUT}),
180
+ },
181
+ {
182
+ signal: respTimeoutController.signal,
183
+ getError: () => new RequestError({code: RequestErrorCode.RESP_TIMEOUT}),
184
+ },
185
+ ]),
186
+
187
+ // Transforms `Buffer` chunks to yield `ResponseBody` chunks
188
+ responseDecode(protocol, {
189
+ onFirstHeader() {
190
+ // On first byte, cancel the single use TTFB_TIMEOUT, and start RESP_TIMEOUT
191
+ clearTimeout(timeoutTTFB);
192
+ timerTTFB?.();
193
+ restartRespTimeout();
194
+ },
195
+ onFirstResponseChunk() {
196
+ // On <response_chunk>, cancel this chunk's RESP_TIMEOUT and start next's
197
+ restartRespTimeout();
198
+ },
199
+ })
200
+ );
201
+
202
+ // NOTE: Only log once per request to verbose, intermediate steps to debug
203
+ // NOTE: Do not log the response, logs get extremely cluttered
204
+ // NOTE: add double space after "Req " to align log with the "Resp " log
205
+ logger.verbose("Req done", logCtx);
206
+ } finally {
207
+ clearTimeout(timeoutTTFB);
208
+ if (timeoutRESP !== null) clearTimeout(timeoutRESP);
209
+
210
+ // Necessary to call `stream.close()` since collectResponses() may break out of the source before exhausting it
211
+ // `stream.close()` libp2p-mplex will .end() the source (it-pushable instance)
212
+ // If collectResponses() exhausts the source, it-pushable.end() can be safely called multiple times
213
+ await stream.close();
214
+ metrics?.outgoingClosedStreams?.inc({method});
215
+ logger.verbose("Req stream closed", logCtx);
216
+ }
217
+ } catch (e) {
218
+ logger.verbose("Req error", logCtx, e as Error);
219
+
220
+ if (e instanceof ResponseError) {
221
+ throw new RequestError(responseStatusErrorToRequestError(e));
222
+ }
223
+ throw e;
224
+ }
225
+ }
@@ -0,0 +1,50 @@
1
+ import {LodestarError, LodestarErrorMetaData, LodestarErrorObject} from "@lodestar/utils";
2
+ import {RespStatus, RpcResponseStatusError} from "../interface.js";
3
+
4
+ type RpcResponseStatusNotSuccess = Exclude<RespStatus, RespStatus.SUCCESS>;
5
+
6
+ export enum ResponseErrorCode {
7
+ RESPONSE_STATUS_ERROR = "RESPONSE_STATUS_ERROR",
8
+ }
9
+
10
+ type RequestErrorType = {
11
+ code: ResponseErrorCode;
12
+ status: RpcResponseStatusError;
13
+ errorMessage: string;
14
+ };
15
+
16
+ export const RESPONSE_ERROR_CLASS_NAME = "ResponseError";
17
+
18
+ /**
19
+ * Used internally only to signal a response status error. Since the error should never bubble up to the user,
20
+ * the error code and error message does not matter much.
21
+ */
22
+ export class ResponseError extends LodestarError<RequestErrorType> {
23
+ status: RpcResponseStatusNotSuccess;
24
+ errorMessage: string;
25
+ constructor(status: RpcResponseStatusNotSuccess, errorMessage: string, stack?: string) {
26
+ const type = {code: ResponseErrorCode.RESPONSE_STATUS_ERROR, status, errorMessage};
27
+ super(type, `RESPONSE_ERROR_${RespStatus[status]}: ${errorMessage}`, stack);
28
+ this.status = status;
29
+ this.errorMessage = errorMessage;
30
+ }
31
+
32
+ getMetadata(): LodestarErrorMetaData {
33
+ return {
34
+ status: this.status,
35
+ errorMessage: this.errorMessage,
36
+ };
37
+ }
38
+
39
+ static fromObject(obj: LodestarErrorObject): ResponseError {
40
+ if (obj.className !== RESPONSE_ERROR_CLASS_NAME) {
41
+ throw new Error(`Expected className to be ResponseError, but got ${obj.className}`);
42
+ }
43
+
44
+ return new ResponseError(
45
+ obj.type.status as RpcResponseStatusNotSuccess,
46
+ obj.type.errorMessage as string,
47
+ obj.stack
48
+ );
49
+ }
50
+ }