@lodestar/reqresp 1.35.0-dev.fcf8d024ea → 1.35.0-dev.feed916580

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/package.json +12 -12
  2. package/lib/ReqResp.d.ts.map +0 -1
  3. package/lib/encoders/requestDecode.d.ts.map +0 -1
  4. package/lib/encoders/requestEncode.d.ts.map +0 -1
  5. package/lib/encoders/responseDecode.d.ts.map +0 -1
  6. package/lib/encoders/responseEncode.d.ts.map +0 -1
  7. package/lib/encodingStrategies/index.d.ts.map +0 -1
  8. package/lib/encodingStrategies/sszSnappy/decode.d.ts.map +0 -1
  9. package/lib/encodingStrategies/sszSnappy/encode.d.ts.map +0 -1
  10. package/lib/encodingStrategies/sszSnappy/errors.d.ts.map +0 -1
  11. package/lib/encodingStrategies/sszSnappy/index.d.ts.map +0 -1
  12. package/lib/encodingStrategies/sszSnappy/snappyFrames/common.d.ts.map +0 -1
  13. package/lib/encodingStrategies/sszSnappy/snappyFrames/compress.d.ts.map +0 -1
  14. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.d.ts.map +0 -1
  15. package/lib/encodingStrategies/sszSnappy/utils.d.ts.map +0 -1
  16. package/lib/index.d.ts.map +0 -1
  17. package/lib/interface.d.ts.map +0 -1
  18. package/lib/metrics.d.ts.map +0 -1
  19. package/lib/rate_limiter/ReqRespRateLimiter.d.ts.map +0 -1
  20. package/lib/rate_limiter/rateLimiterGRCA.d.ts.map +0 -1
  21. package/lib/rate_limiter/selfRateLimiter.d.ts.map +0 -1
  22. package/lib/request/errors.d.ts.map +0 -1
  23. package/lib/request/index.d.ts.map +0 -1
  24. package/lib/response/errors.d.ts.map +0 -1
  25. package/lib/response/index.d.ts.map +0 -1
  26. package/lib/types.d.ts.map +0 -1
  27. package/lib/utils/abortableSource.d.ts.map +0 -1
  28. package/lib/utils/bufferedSource.d.ts.map +0 -1
  29. package/lib/utils/collectExactOne.d.ts.map +0 -1
  30. package/lib/utils/collectMaxResponse.d.ts.map +0 -1
  31. package/lib/utils/errorMessage.d.ts.map +0 -1
  32. package/lib/utils/index.d.ts.map +0 -1
  33. package/lib/utils/onChunk.d.ts.map +0 -1
  34. package/lib/utils/peerId.d.ts.map +0 -1
  35. package/lib/utils/protocolId.d.ts.map +0 -1
  36. package/src/ReqResp.ts +0 -289
  37. package/src/encoders/requestDecode.ts +0 -29
  38. package/src/encoders/requestEncode.ts +0 -18
  39. package/src/encoders/responseDecode.ts +0 -169
  40. package/src/encoders/responseEncode.ts +0 -81
  41. package/src/encodingStrategies/index.ts +0 -46
  42. package/src/encodingStrategies/sszSnappy/decode.ts +0 -111
  43. package/src/encodingStrategies/sszSnappy/encode.ts +0 -24
  44. package/src/encodingStrategies/sszSnappy/errors.ts +0 -31
  45. package/src/encodingStrategies/sszSnappy/index.ts +0 -3
  46. package/src/encodingStrategies/sszSnappy/snappyFrames/common.ts +0 -36
  47. package/src/encodingStrategies/sszSnappy/snappyFrames/compress.ts +0 -25
  48. package/src/encodingStrategies/sszSnappy/snappyFrames/uncompress.ts +0 -114
  49. package/src/encodingStrategies/sszSnappy/utils.ts +0 -7
  50. package/src/index.ts +0 -10
  51. package/src/interface.ts +0 -26
  52. package/src/metrics.ts +0 -95
  53. package/src/rate_limiter/ReqRespRateLimiter.ts +0 -107
  54. package/src/rate_limiter/rateLimiterGRCA.ts +0 -92
  55. package/src/rate_limiter/selfRateLimiter.ts +0 -112
  56. package/src/request/errors.ts +0 -119
  57. package/src/request/index.ts +0 -225
  58. package/src/response/errors.ts +0 -50
  59. package/src/response/index.ts +0 -147
  60. package/src/types.ts +0 -158
  61. package/src/utils/abortableSource.ts +0 -80
  62. package/src/utils/bufferedSource.ts +0 -46
  63. package/src/utils/collectExactOne.ts +0 -15
  64. package/src/utils/collectMaxResponse.ts +0 -19
  65. package/src/utils/errorMessage.ts +0 -51
  66. package/src/utils/index.ts +0 -8
  67. package/src/utils/onChunk.ts +0 -12
  68. package/src/utils/peerId.ts +0 -6
  69. package/src/utils/protocolId.ts +0 -44
package/src/interface.ts DELETED
@@ -1,26 +0,0 @@
1
- // Request/Response constants
2
- export enum RespStatus {
3
- /**
4
- * A normal response follows, with contents matching the expected message schema and encoding specified in the request
5
- */
6
- SUCCESS = 0,
7
- /**
8
- * The contents of the request are semantically invalid, or the payload is malformed,
9
- * or could not be understood. The response payload adheres to the ErrorMessage schema
10
- */
11
- INVALID_REQUEST = 1,
12
- /**
13
- * The responder encountered an error while processing the request. The response payload adheres to the ErrorMessage schema
14
- */
15
- SERVER_ERROR = 2,
16
- /**
17
- * The responder does not have requested resource. The response payload adheres to the ErrorMessage schema (described below). Note: This response code is only valid as a response to BlocksByRange
18
- */
19
- RESOURCE_UNAVAILABLE = 3,
20
- /**
21
- * Our node does not have bandwidth to serve requests due to either per-peer quota or total quota.
22
- */
23
- RATE_LIMITED = 139,
24
- }
25
-
26
- export type RpcResponseStatusError = Exclude<RespStatus, RespStatus.SUCCESS>;
package/src/metrics.ts DELETED
@@ -1,95 +0,0 @@
1
- import {MetricsRegisterExtra} from "@lodestar/utils";
2
- import {RequestErrorCode} from "./request/errors.js";
3
-
4
- export type Metrics = ReturnType<typeof getMetrics>;
5
-
6
- /**
7
- * A collection of metrics used throughout the Gossipsub behaviour.
8
- */
9
- export function getMetrics(register: MetricsRegisterExtra) {
10
- // Using function style instead of class to prevent having to re-declare all MetricsPrometheus types.
11
-
12
- return {
13
- outgoingRequests: register.gauge<{method: string}>({
14
- name: "beacon_reqresp_outgoing_requests_total",
15
- help: "Counts total requests done per method",
16
- labelNames: ["method"],
17
- }),
18
- outgoingOpenedStreams: register.counter<{method: string}>({
19
- name: "beacon_reqresp_outgoing_opened_streams_total",
20
- help: "Counts total opened streams per method",
21
- labelNames: ["method"],
22
- }),
23
- outgoingClosedStreams: register.counter<{method: string}>({
24
- name: "beacon_reqresp_outgoing_closed_streams_total",
25
- help: "Counts total closed streams per method",
26
- labelNames: ["method"],
27
- }),
28
- outgoingRequestRoundtripTime: register.histogram<{method: string}>({
29
- name: "beacon_reqresp_outgoing_request_roundtrip_time_seconds",
30
- help: "Histogram of outgoing requests round-trip time",
31
- labelNames: ["method"],
32
- // Spec sets RESP_TIMEOUT = 10 sec
33
- buckets: [0.1, 0.2, 0.5, 1, 5, 10, 15, 60],
34
- }),
35
- outgoingErrors: register.gauge<{method: string}>({
36
- name: "beacon_reqresp_outgoing_requests_error_total",
37
- help: "Counts total failed requests done per method",
38
- labelNames: ["method"],
39
- }),
40
- outgoingErrorReasons: register.gauge<{reason: RequestErrorCode}>({
41
- name: "beacon_reqresp_outgoing_requests_error_reason_total",
42
- help: "Count total outgoing request errors by reason",
43
- labelNames: ["reason"],
44
- }),
45
- incomingRequests: register.gauge<{method: string}>({
46
- name: "beacon_reqresp_incoming_requests_total",
47
- help: "Counts total responses handled per method",
48
- labelNames: ["method"],
49
- }),
50
- incomingOpenedStreams: register.counter<{method: string}>({
51
- name: "beacon_reqresp_incoming_opened_streams_total",
52
- help: "Counts total incoming opened streams per method",
53
- labelNames: ["method"],
54
- }),
55
- incomingClosedStreams: register.counter<{method: string}>({
56
- name: "beacon_reqresp_incoming_closed_streams_total",
57
- help: "Counts total incoming closed streams per method",
58
- labelNames: ["method"],
59
- }),
60
- incomingRequestHandlerTime: register.histogram<{method: string}>({
61
- name: "beacon_reqresp_incoming_request_handler_time_seconds",
62
- help: "Histogram of incoming requests internal handling time",
63
- labelNames: ["method"],
64
- // Spec sets RESP_TIMEOUT = 10 sec
65
- buckets: [0.1, 0.2, 0.5, 1, 5, 10],
66
- }),
67
- incomingErrors: register.gauge<{method: string}>({
68
- name: "beacon_reqresp_incoming_requests_error_total",
69
- help: "Counts total failed responses handled per method",
70
- labelNames: ["method"],
71
- }),
72
- outgoingResponseTTFB: register.histogram<{method: string}>({
73
- name: "beacon_reqresp_outgoing_response_ttfb_seconds",
74
- help: "Time to first byte (TTFB) for outgoing responses",
75
- labelNames: ["method"],
76
- // Spec sets TTFB_TIMEOUT = 5 sec
77
- buckets: [0.1, 1, 5],
78
- }),
79
- incomingResponseTTFB: register.histogram<{method: string}>({
80
- name: "beacon_reqresp_incoming_response_ttfb_seconds",
81
- help: "Time to first byte (TTFB) for incoming responses",
82
- labelNames: ["method"],
83
- // Spec sets TTFB_TIMEOUT = 5 sec
84
- buckets: [0.1, 1, 5],
85
- }),
86
- dialErrors: register.gauge({
87
- name: "beacon_reqresp_dial_errors_total",
88
- help: "Count total dial errors",
89
- }),
90
- selfRateLimiterPeerCount: register.gauge({
91
- name: "beacon_reqresp_self_rate_limiter_peer_count",
92
- help: "Count of peers tracked by the self rate limiter",
93
- }),
94
- };
95
- }
@@ -1,107 +0,0 @@
1
- import {PeerId} from "@libp2p/interface";
2
- import {InboundRateLimitQuota, ReqRespRateLimiterOpts} from "../types.js";
3
- import {RateLimiterGRCA} from "./rateLimiterGRCA.js";
4
-
5
- /** Sometimes a peer request comes AFTER libp2p disconnect event, check for such peers every 10 minutes */
6
- const CHECK_DISCONNECTED_PEERS_INTERVAL_MS = 10 * 60 * 1000;
7
-
8
- /** Peers don't request us for 5 mins are considered disconnected */
9
- const DISCONNECTED_TIMEOUT_MS = 5 * 60 * 1000;
10
-
11
- type ProtocolID = string;
12
-
13
- export class ReqRespRateLimiter {
14
- private readonly rateLimitersPerPeer = new Map<ProtocolID, RateLimiterGRCA<string>>();
15
- private readonly rateLimitersTotal = new Map<ProtocolID, RateLimiterGRCA<null>>();
16
- /** Interval to check lastSeenMessagesByPeer */
17
- private cleanupInterval: NodeJS.Timeout | undefined = undefined;
18
- private rateLimitMultiplier: number;
19
- /** Periodically check this to remove tracker of disconnected peers */
20
- private lastSeenRequestsByPeer: Map<string, number>;
21
-
22
- constructor(private readonly opts?: ReqRespRateLimiterOpts) {
23
- this.rateLimitMultiplier = opts?.rateLimitMultiplier ?? 1;
24
- this.lastSeenRequestsByPeer = new Map();
25
- }
26
-
27
- get enabled(): boolean {
28
- return this.rateLimitMultiplier > 0;
29
- }
30
-
31
- setRateLimits(protocolID: ProtocolID, rateLimits: InboundRateLimitQuota): void {
32
- if (!this.enabled) {
33
- return;
34
- }
35
-
36
- if (rateLimits.byPeer) {
37
- this.rateLimitersPerPeer.set(
38
- protocolID,
39
- RateLimiterGRCA.fromQuota<string>({
40
- quotaTimeMs: rateLimits.byPeer.quotaTimeMs,
41
- quota: rateLimits.byPeer.quota * this.rateLimitMultiplier,
42
- })
43
- );
44
- }
45
-
46
- if (rateLimits.total) {
47
- this.rateLimitersTotal.set(
48
- protocolID,
49
- RateLimiterGRCA.fromQuota<null>({
50
- quotaTimeMs: rateLimits.total.quotaTimeMs,
51
- quota: rateLimits.total.quota * this.rateLimitMultiplier,
52
- })
53
- );
54
- }
55
- }
56
-
57
- allows(peerId: PeerId, protocolID: string, requestCount: number): boolean {
58
- if (!this.enabled) {
59
- return true;
60
- }
61
-
62
- const peerIdStr = peerId.toString();
63
- this.lastSeenRequestsByPeer.set(peerIdStr, Date.now());
64
-
65
- const byPeer = this.rateLimitersPerPeer.get(protocolID);
66
- const total = this.rateLimitersTotal.get(protocolID);
67
-
68
- if ((byPeer && !byPeer.allows(peerIdStr, requestCount)) || (total && !total.allows(null, requestCount))) {
69
- this.opts?.onRateLimit?.(peerId, protocolID);
70
- return false;
71
- }
72
-
73
- return true;
74
- }
75
-
76
- prune(peerId: PeerId): void {
77
- const peerIdStr = peerId.toString();
78
- this.pruneByPeerIdStr(peerIdStr);
79
- }
80
-
81
- start(): void {
82
- this.cleanupInterval = setInterval(this.checkDisconnectedPeers.bind(this), CHECK_DISCONNECTED_PEERS_INTERVAL_MS);
83
- }
84
-
85
- stop(): void {
86
- if (this.cleanupInterval !== undefined) {
87
- clearInterval(this.cleanupInterval);
88
- }
89
- }
90
-
91
- private pruneByPeerIdStr(peerIdStr: string): void {
92
- // Check for every method and version to cleanup
93
- for (const method of this.rateLimitersPerPeer.values()) {
94
- method.pruneByKey(peerIdStr);
95
- }
96
- this.lastSeenRequestsByPeer.delete(peerIdStr);
97
- }
98
-
99
- private checkDisconnectedPeers(): void {
100
- const now = Date.now();
101
- for (const [peerIdStr, lastSeenTime] of this.lastSeenRequestsByPeer.entries()) {
102
- if (now - lastSeenTime >= DISCONNECTED_TIMEOUT_MS) {
103
- this.pruneByPeerIdStr(peerIdStr);
104
- }
105
- }
106
- }
107
- }
@@ -1,92 +0,0 @@
1
- type MiliSeconds = number;
2
-
3
- export interface RateLimiterQuota {
4
- /** How often are `max_tokens` fully replenished. */
5
- quotaTimeMs: MiliSeconds;
6
- /** Token limit. This translates on how large can an instantaneous batch of tokens be. */
7
- quota: number;
8
- }
9
-
10
- /**
11
- * Generic Cell Rate Algorithm is a leaky bucket-type scheduling algorithm.
12
- *
13
- * Most rate-limit implementations are either time-bucket or leaky-bucket based. The time-bucket requires the storage
14
- * of two values and does not enforce a rate, while the leaky-bucket approach requires a separate process to
15
- * continually refill the bucket. GCRA only storing a value (the TAT) while still being simple. GCRA may be rarely
16
- * used because of its perceived complexity.
17
- *
18
- * GCRA aims to limit requests to `R = L/P`, where this implementation sets `L = 1` for simplicity. The target rate
19
- * then is `R = 1/P` so request separated by at least `P` are not limited. Define the Theoretical Arrival Time (TAT)
20
- * of the next request to be equal
21
- */
22
- export class RateLimiterGRCA<Key> {
23
- /** Time when the bucket will be full for each peer. TAT (theoretical arrival time) from GCRA */
24
- private readonly tatPerKey = new Map<Key, MiliSeconds>();
25
- private readonly startTimeMs = Date.now();
26
-
27
- constructor(
28
- /** After how long is the bucket considered full via replenishing 1T every `t`. */
29
- private readonly msPerBucket: MiliSeconds,
30
- /** How often is 1 token replenished */
31
- private readonly msPerToken: MiliSeconds
32
- ) {}
33
-
34
- static fromQuota<Key>(quota: RateLimiterQuota): RateLimiterGRCA<Key> {
35
- if (quota.quota === 0) {
36
- throw Error("Max number of tokens should be positive");
37
- }
38
- const msPerBucket = quota.quotaTimeMs;
39
- if (msPerBucket === 0) {
40
- throw Error("Replenish time must be positive");
41
- }
42
- const msPerToken = msPerBucket / quota.quota;
43
- return new RateLimiterGRCA(msPerBucket, msPerToken);
44
- }
45
-
46
- allows(key: Key, tokens: number): boolean {
47
- if (tokens <= 0) {
48
- throw new Error(`Token value should always be positive. Given: ${tokens}.`);
49
- }
50
-
51
- const msSinceStart = Date.now() - this.startTimeMs;
52
-
53
- /** how long does it take to replenish these tokens */
54
- const additionalTime = this.msPerToken * tokens;
55
-
56
- if (additionalTime > this.msPerBucket) {
57
- // the time required to process this amount of tokens is longer than the time that makes the bucket full.
58
- return false;
59
- }
60
-
61
- // If the key is new, we consider their bucket full (which means, their request will be allowed)
62
- let resetTimeForKey = this.tatPerKey.get(key);
63
- if (resetTimeForKey === undefined) {
64
- resetTimeForKey = msSinceStart;
65
- this.tatPerKey.set(key, resetTimeForKey);
66
- }
67
-
68
- // check how soon could the request be made
69
- const earliestTime = resetTimeForKey + additionalTime - this.msPerBucket;
70
- if (msSinceStart < earliestTime) {
71
- return false;
72
- }
73
-
74
- // calculate the new TAT
75
- this.tatPerKey.set(key, Math.max(msSinceStart, resetTimeForKey) + additionalTime);
76
- return true;
77
- }
78
-
79
- /** Removes keys for which their bucket is full by `time_limit` */
80
- pruneByTime(timeLimit: MiliSeconds): void {
81
- for (const entry of this.tatPerKey.entries()) {
82
- // remove those for which tat < lim
83
- if (entry[1] < timeLimit) {
84
- this.tatPerKey.delete(entry[0]);
85
- }
86
- }
87
- }
88
-
89
- pruneByKey(key: Key): void {
90
- this.tatPerKey.delete(key);
91
- }
92
- }
@@ -1,112 +0,0 @@
1
- import {Logger, MapDef} from "@lodestar/utils";
2
-
3
- type PeerIdStr = string;
4
- type ProtocolID = string;
5
- /** https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/p2p-interface.md#constants */
6
- const MAX_CONCURRENT_REQUESTS = 2;
7
-
8
- /** Sometimes a peer request comes AFTER libp2p disconnect event, check for such peers every 2 minutes */
9
- export const CHECK_DISCONNECTED_PEERS_INTERVAL_MS = 2 * 60 * 1000;
10
-
11
- /** Given PING_INTERVAL constants of 15s/20s, we consider a peer is disconnected if there is no request in 1 minute */
12
- const DISCONNECTED_TIMEOUT_MS = 60 * 1000;
13
-
14
- /**
15
- * Timeout to consider a request is no longer in progress
16
- * this is to cover the case where `requestCompleted()` is not called due to unexpected errors
17
- * for example https://github.com/ChainSafe/lodestar/issues/8256
18
- **/
19
- export const REQUEST_TIMEOUT_MS = 30 * 1000;
20
-
21
- type RequestId = number;
22
- type RequestIdMs = number;
23
-
24
- /**
25
- * Simple rate limiter that allows a maximum of 2 concurrent requests per protocol per peer.
26
- * The consumer should either prevent requests from being sent when the limit is reached or handle the case when the request is not allowed.
27
- */
28
- export class SelfRateLimiter {
29
- private readonly rateLimitersPerPeer: MapDef<PeerIdStr, MapDef<ProtocolID, Map<RequestId, RequestIdMs>>>;
30
- /**
31
- * It's not convenient to handle a peer disconnected event so we track the last seen requests by peer.
32
- * This is the same design to `ReqRespRateLimiter`.
33
- **/
34
- private lastSeenRequestsByPeer: Map<string, number>;
35
- /** Interval to check lastSeenMessagesByPeer */
36
- private cleanupInterval: NodeJS.Timeout | undefined = undefined;
37
-
38
- constructor(private readonly logger?: Logger) {
39
- this.rateLimitersPerPeer = new MapDef<PeerIdStr, MapDef<ProtocolID, Map<RequestId, RequestIdMs>>>(
40
- () => new MapDef<ProtocolID, Map<RequestId, RequestIdMs>>(() => new Map())
41
- );
42
- this.lastSeenRequestsByPeer = new Map();
43
- }
44
-
45
- start(): void {
46
- this.cleanupInterval = setInterval(this.checkDisconnectedPeers.bind(this), CHECK_DISCONNECTED_PEERS_INTERVAL_MS);
47
- }
48
-
49
- stop(): void {
50
- if (this.cleanupInterval !== undefined) {
51
- clearInterval(this.cleanupInterval);
52
- this.cleanupInterval = undefined;
53
- }
54
- }
55
-
56
- /**
57
- * called before we send a request to a peer.
58
- */
59
- allows(peerId: PeerIdStr, protocolId: ProtocolID, requestId: RequestId): boolean {
60
- const now = Date.now();
61
- const peerRateLimiter = this.rateLimitersPerPeer.getOrDefault(peerId);
62
- const trackedRequests = peerRateLimiter.getOrDefault(protocolId);
63
- this.lastSeenRequestsByPeer.set(peerId, now);
64
-
65
- let inProgressRequests = 0;
66
- for (const [trackedRequestId, trackedRequestTimeMs] of trackedRequests.entries()) {
67
- if (trackedRequestTimeMs + REQUEST_TIMEOUT_MS <= now) {
68
- // request timed out, remove it
69
- trackedRequests.delete(trackedRequestId);
70
- this.logger?.debug("SelfRateLimiter: request timed out, removing it", {
71
- requestId: trackedRequestId,
72
- requestTime: trackedRequestTimeMs,
73
- peerId,
74
- protocolId,
75
- });
76
- } else {
77
- inProgressRequests++;
78
- }
79
- }
80
-
81
- if (inProgressRequests >= MAX_CONCURRENT_REQUESTS) {
82
- return false;
83
- }
84
-
85
- trackedRequests.set(requestId, now);
86
- return true;
87
- }
88
-
89
- /**
90
- * called when a request to a peer is completed, regardless of success or failure.
91
- * This should NOT be called when the request was not allowed
92
- */
93
- requestCompleted(peerId: PeerIdStr, protocolId: ProtocolID, requestId: RequestId): void {
94
- const peerRateLimiter = this.rateLimitersPerPeer.getOrDefault(peerId);
95
- const trackedRequests = peerRateLimiter.getOrDefault(protocolId);
96
- trackedRequests.delete(requestId);
97
- }
98
-
99
- getPeerCount(): number {
100
- return this.rateLimitersPerPeer.size;
101
- }
102
-
103
- private checkDisconnectedPeers(): void {
104
- const now = Date.now();
105
- for (const [peerIdStr, lastSeenTime] of this.lastSeenRequestsByPeer.entries()) {
106
- if (now - lastSeenTime >= DISCONNECTED_TIMEOUT_MS) {
107
- this.rateLimitersPerPeer.delete(peerIdStr);
108
- this.lastSeenRequestsByPeer.delete(peerIdStr);
109
- }
110
- }
111
- }
112
- }
@@ -1,119 +0,0 @@
1
- import {LodestarError, LodestarErrorObject} from "@lodestar/utils";
2
- import {RespStatus, RpcResponseStatusError} from "../interface.js";
3
- import {ResponseError} from "../response/index.js";
4
-
5
- export enum RequestErrorCode {
6
- // Declaring specific values of RpcResponseStatusError for error clarity downstream
7
- /** `<response_chunk>` had `<result>` === INVALID_REQUEST */
8
- INVALID_REQUEST = "REQUEST_ERROR_INVALID_REQUEST",
9
- INVALID_RESPONSE_SSZ = "REQUEST_ERROR_INVALID_RESPONSE_SSZ",
10
- /** `<response_chunk>` had `<result>` === SERVER_ERROR */
11
- SERVER_ERROR = "REQUEST_ERROR_SERVER_ERROR",
12
- /** `<response_chunk>` had `<result>` === RESOURCE_UNAVAILABLE */
13
- RESOURCE_UNAVAILABLE = "RESOURCE_UNAVAILABLE_ERROR",
14
- /** `<response_chunk>` had a `<result>` not known in the current spec */
15
- UNKNOWN_ERROR_STATUS = "REQUEST_ERROR_UNKNOWN_ERROR_STATUS",
16
- /** Could not open a stream with peer before DIAL_TIMEOUT */
17
- DIAL_TIMEOUT = "REQUEST_ERROR_DIAL_TIMEOUT",
18
- /** Error opening a stream with peer */
19
- DIAL_ERROR = "REQUEST_ERROR_DIAL_ERROR",
20
- /** Reponder did not close write stream before REQUEST_TIMEOUT */
21
- REQUEST_TIMEOUT = "REQUEST_ERROR_REQUEST_TIMEOUT",
22
- /** Error when sending request to responder */
23
- REQUEST_ERROR = "REQUEST_ERROR_REQUEST_ERROR",
24
- /** Reponder did not deliver a full reponse before max maxTotalResponseTimeout() */
25
- RESPONSE_TIMEOUT = "REQUEST_ERROR_RESPONSE_TIMEOUT",
26
- /** A single-response method returned 0 chunks */
27
- EMPTY_RESPONSE = "REQUEST_ERROR_EMPTY_RESPONSE",
28
- /** Time to first byte timeout */
29
- TTFB_TIMEOUT = "REQUEST_ERROR_TTFB_TIMEOUT",
30
- /** Timeout between `<response_chunk>` exceed */
31
- RESP_TIMEOUT = "REQUEST_ERROR_RESP_TIMEOUT",
32
- /** Request rate limited */
33
- REQUEST_RATE_LIMITED = "REQUEST_ERROR_RATE_LIMITED",
34
- /** Request self rate limited */
35
- REQUEST_SELF_RATE_LIMITED = "REQUEST_ERROR_SELF_RATE_LIMITED",
36
- /** Response rate limited */
37
- RESP_RATE_LIMITED = "RESPONSE_ERROR_RATE_LIMITED",
38
- /** For malformed SSZ (metadata) responses */
39
- SSZ_OVER_MAX_SIZE = "SSZ_SNAPPY_ERROR_OVER_SSZ_MAX_SIZE",
40
- }
41
-
42
- type RequestErrorType =
43
- | {code: RequestErrorCode.INVALID_REQUEST; errorMessage: string}
44
- | {code: RequestErrorCode.INVALID_RESPONSE_SSZ; errorMessage: string}
45
- | {code: RequestErrorCode.SERVER_ERROR; errorMessage: string}
46
- | {code: RequestErrorCode.RESOURCE_UNAVAILABLE; errorMessage: string}
47
- | {code: RequestErrorCode.UNKNOWN_ERROR_STATUS; status: RpcResponseStatusError; errorMessage: string}
48
- | {code: RequestErrorCode.DIAL_TIMEOUT}
49
- | {code: RequestErrorCode.DIAL_ERROR; error: Error}
50
- | {code: RequestErrorCode.REQUEST_TIMEOUT}
51
- | {code: RequestErrorCode.REQUEST_ERROR; error: Error}
52
- | {code: RequestErrorCode.EMPTY_RESPONSE}
53
- | {code: RequestErrorCode.TTFB_TIMEOUT}
54
- | {code: RequestErrorCode.RESP_TIMEOUT}
55
- | {code: RequestErrorCode.REQUEST_RATE_LIMITED}
56
- | {code: RequestErrorCode.REQUEST_SELF_RATE_LIMITED}
57
- | {code: RequestErrorCode.RESP_RATE_LIMITED}
58
- | {code: RequestErrorCode.SSZ_OVER_MAX_SIZE};
59
-
60
- export const REQUEST_ERROR_CLASS_NAME = "RequestError";
61
-
62
- export class RequestError extends LodestarError<RequestErrorType> {
63
- constructor(type: RequestErrorType, message?: string, stack?: string) {
64
- super(type, message ?? renderErrorMessage(type), stack);
65
- }
66
-
67
- static fromObject(obj: LodestarErrorObject): RequestError {
68
- if (obj.className !== "RequestError") {
69
- throw new Error(`Expected className to be RequestError, but got ${obj.className}`);
70
- }
71
-
72
- return new RequestError(obj.type as RequestErrorType, obj.message, obj.stack);
73
- }
74
- }
75
-
76
- /**
77
- * Parse response status errors into detailed request errors for each status code for easier debugging
78
- */
79
- export function responseStatusErrorToRequestError(e: ResponseError): RequestErrorType {
80
- const {errorMessage, status} = e;
81
- // rate limited error from clients have different status, for example: lighthouse responds with 139, teku responds with 1
82
- // but all of them has "rate limit" in the error message
83
- // refer to https://github.com/ChainSafe/lodestar/issues/8065#issuecomment-3157266196
84
- const errorMessageLowercase = errorMessage.toLowerCase();
85
- if (errorMessageLowercase.includes("rate limit")) {
86
- return {code: RequestErrorCode.RESP_RATE_LIMITED};
87
- }
88
-
89
- // Grandine may return this without standard RespStatus, see https://github.com/ChainSafe/lodestar/issues/8110
90
- if (errorMessageLowercase.includes("wait ")) {
91
- return {code: RequestErrorCode.RESP_TIMEOUT};
92
- }
93
-
94
- switch (status) {
95
- case RespStatus.INVALID_REQUEST:
96
- return {code: RequestErrorCode.INVALID_REQUEST, errorMessage};
97
- case RespStatus.SERVER_ERROR:
98
- return {code: RequestErrorCode.SERVER_ERROR, errorMessage};
99
- case RespStatus.RESOURCE_UNAVAILABLE:
100
- return {code: RequestErrorCode.RESOURCE_UNAVAILABLE, errorMessage};
101
- default:
102
- return {code: RequestErrorCode.UNKNOWN_ERROR_STATUS, errorMessage, status};
103
- }
104
- }
105
-
106
- /**
107
- * Render responder's errorMessage directly in main's error.message for easier debugging
108
- */
109
- function renderErrorMessage(type: RequestErrorType): string | undefined {
110
- switch (type.code) {
111
- case RequestErrorCode.INVALID_REQUEST:
112
- case RequestErrorCode.SERVER_ERROR:
113
- case RequestErrorCode.RESOURCE_UNAVAILABLE:
114
- case RequestErrorCode.UNKNOWN_ERROR_STATUS:
115
- return `${type.code}: ${type.errorMessage}`;
116
- default:
117
- return type.code;
118
- }
119
- }