@lodestar/reqresp 1.35.0-dev.e9dd48f165 → 1.35.0-dev.f45a2be721

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/lib/ReqResp.d.ts +1 -1
  2. package/lib/ReqResp.d.ts.map +1 -0
  3. package/lib/ReqResp.js +16 -6
  4. package/lib/ReqResp.js.map +1 -1
  5. package/lib/encoders/requestDecode.d.ts.map +1 -0
  6. package/lib/encoders/requestEncode.d.ts.map +1 -0
  7. package/lib/encoders/responseDecode.d.ts +1 -1
  8. package/lib/encoders/responseDecode.d.ts.map +1 -0
  9. package/lib/encoders/responseDecode.js.map +1 -1
  10. package/lib/encoders/responseEncode.d.ts.map +1 -0
  11. package/lib/encodingStrategies/index.d.ts.map +1 -0
  12. package/lib/encodingStrategies/sszSnappy/decode.d.ts.map +1 -0
  13. package/lib/encodingStrategies/sszSnappy/encode.d.ts.map +1 -0
  14. package/lib/encodingStrategies/sszSnappy/errors.d.ts.map +1 -0
  15. package/lib/encodingStrategies/sszSnappy/index.d.ts +1 -1
  16. package/lib/encodingStrategies/sszSnappy/index.d.ts.map +1 -0
  17. package/lib/encodingStrategies/sszSnappy/index.js +1 -1
  18. package/lib/encodingStrategies/sszSnappy/snappyFrames/common.d.ts.map +1 -0
  19. package/lib/encodingStrategies/sszSnappy/snappyFrames/compress.d.ts.map +1 -0
  20. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.d.ts.map +1 -0
  21. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.js +4 -6
  22. package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.js.map +1 -1
  23. package/lib/encodingStrategies/sszSnappy/utils.d.ts.map +1 -0
  24. package/lib/index.d.ts +7 -7
  25. package/lib/index.d.ts.map +1 -0
  26. package/lib/index.js +5 -5
  27. package/lib/index.js.map +1 -1
  28. package/lib/interface.d.ts.map +1 -0
  29. package/lib/metrics.d.ts.map +1 -0
  30. package/lib/rate_limiter/ReqRespRateLimiter.d.ts.map +1 -0
  31. package/lib/rate_limiter/ReqRespRateLimiter.js +8 -4
  32. package/lib/rate_limiter/ReqRespRateLimiter.js.map +1 -1
  33. package/lib/rate_limiter/rateLimiterGRCA.d.ts.map +1 -0
  34. package/lib/rate_limiter/rateLimiterGRCA.js +5 -3
  35. package/lib/rate_limiter/rateLimiterGRCA.js.map +1 -1
  36. package/lib/rate_limiter/selfRateLimiter.d.ts.map +1 -0
  37. package/lib/rate_limiter/selfRateLimiter.js +9 -2
  38. package/lib/rate_limiter/selfRateLimiter.js.map +1 -1
  39. package/lib/request/errors.d.ts.map +1 -0
  40. package/lib/request/index.d.ts +1 -1
  41. package/lib/request/index.d.ts.map +1 -0
  42. package/lib/request/index.js +1 -1
  43. package/lib/request/index.js.map +1 -1
  44. package/lib/response/errors.d.ts.map +1 -0
  45. package/lib/response/errors.js +2 -0
  46. package/lib/response/errors.js.map +1 -1
  47. package/lib/response/index.d.ts.map +1 -0
  48. package/lib/response/index.js +1 -1
  49. package/lib/response/index.js.map +1 -1
  50. package/lib/types.d.ts.map +1 -0
  51. package/lib/utils/abortableSource.d.ts.map +1 -0
  52. package/lib/utils/bufferedSource.d.ts.map +1 -0
  53. package/lib/utils/bufferedSource.js +3 -1
  54. package/lib/utils/bufferedSource.js.map +1 -1
  55. package/lib/utils/collectExactOne.d.ts.map +1 -0
  56. package/lib/utils/collectMaxResponse.d.ts.map +1 -0
  57. package/lib/utils/errorMessage.d.ts.map +1 -0
  58. package/lib/utils/index.d.ts.map +1 -0
  59. package/lib/utils/onChunk.d.ts.map +1 -0
  60. package/lib/utils/peerId.d.ts.map +1 -0
  61. package/lib/utils/protocolId.d.ts.map +1 -0
  62. package/package.json +12 -12
  63. package/src/ReqResp.ts +289 -0
  64. package/src/encoders/requestDecode.ts +29 -0
  65. package/src/encoders/requestEncode.ts +18 -0
  66. package/src/encoders/responseDecode.ts +169 -0
  67. package/src/encoders/responseEncode.ts +81 -0
  68. package/src/encodingStrategies/index.ts +46 -0
  69. package/src/encodingStrategies/sszSnappy/decode.ts +111 -0
  70. package/src/encodingStrategies/sszSnappy/encode.ts +24 -0
  71. package/src/encodingStrategies/sszSnappy/errors.ts +31 -0
  72. package/src/encodingStrategies/sszSnappy/index.ts +3 -0
  73. package/src/encodingStrategies/sszSnappy/snappyFrames/common.ts +36 -0
  74. package/src/encodingStrategies/sszSnappy/snappyFrames/compress.ts +25 -0
  75. package/src/encodingStrategies/sszSnappy/snappyFrames/uncompress.ts +114 -0
  76. package/src/encodingStrategies/sszSnappy/utils.ts +7 -0
  77. package/src/index.ts +10 -0
  78. package/src/interface.ts +26 -0
  79. package/src/metrics.ts +95 -0
  80. package/src/rate_limiter/ReqRespRateLimiter.ts +107 -0
  81. package/src/rate_limiter/rateLimiterGRCA.ts +92 -0
  82. package/src/rate_limiter/selfRateLimiter.ts +112 -0
  83. package/src/request/errors.ts +119 -0
  84. package/src/request/index.ts +225 -0
  85. package/src/response/errors.ts +50 -0
  86. package/src/response/index.ts +147 -0
  87. package/src/types.ts +158 -0
  88. package/src/utils/abortableSource.ts +80 -0
  89. package/src/utils/bufferedSource.ts +46 -0
  90. package/src/utils/collectExactOne.ts +15 -0
  91. package/src/utils/collectMaxResponse.ts +19 -0
  92. package/src/utils/errorMessage.ts +51 -0
  93. package/src/utils/index.ts +8 -0
  94. package/src/utils/onChunk.ts +12 -0
  95. package/src/utils/peerId.ts +6 -0
  96. package/src/utils/protocolId.ts +44 -0
@@ -0,0 +1,111 @@
1
+ import {decode as varintDecode, encodingLength as varintEncodingLength} from "uint8-varint";
2
+ import {Uint8ArrayList} from "uint8arraylist";
3
+ import {TypeSizes} from "../../types.js";
4
+ import {BufferedSource} from "../../utils/index.js";
5
+ import {SszSnappyError, SszSnappyErrorCode} from "./errors.js";
6
+ import {SnappyFramesUncompress} from "./snappyFrames/uncompress.js";
7
+ import {maxEncodedLen} from "./utils.js";
8
+
9
+ export const MAX_VARINT_BYTES = 10;
10
+
11
+ /**
12
+ * ssz_snappy encoding strategy reader.
13
+ * Consumes a stream source to read encoded header and payload as defined in the spec:
14
+ * ```bnf
15
+ * <encoding-dependent-header> | <encoded-payload>
16
+ * ```
17
+ */
18
+ export async function readSszSnappyPayload(bufferedSource: BufferedSource, type: TypeSizes): Promise<Uint8Array> {
19
+ const sszDataLength = await readSszSnappyHeader(bufferedSource, type);
20
+
21
+ return readSszSnappyBody(bufferedSource, sszDataLength);
22
+ }
23
+
24
+ /**
25
+ * Reads `<encoding-dependent-header>` for ssz-snappy.
26
+ * encoding-header ::= the length of the raw SSZ bytes, encoded as an unsigned protobuf varint
27
+ */
28
+ export async function readSszSnappyHeader(bufferedSource: BufferedSource, type: TypeSizes): Promise<number> {
29
+ for await (const buffer of bufferedSource) {
30
+ // Get next bytes if empty
31
+ if (buffer.length === 0) {
32
+ continue;
33
+ }
34
+
35
+ let sszDataLength: number;
36
+ try {
37
+ sszDataLength = varintDecode(buffer.subarray());
38
+ } catch (_e) {
39
+ throw new SszSnappyError({code: SszSnappyErrorCode.INVALID_VARINT_BYTES_COUNT, bytes: Infinity});
40
+ }
41
+
42
+ // MUST validate: the unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes
43
+ // encodingLength function only returns 1-8 inclusive
44
+ const varintBytes = varintEncodingLength(sszDataLength);
45
+ buffer.consume(varintBytes);
46
+
47
+ // MUST validate: the length-prefix is within the expected size bounds derived from the payload SSZ type.
48
+ const minSize = type.minSize;
49
+ const maxSize = type.maxSize;
50
+ if (sszDataLength < minSize) {
51
+ throw new SszSnappyError({code: SszSnappyErrorCode.UNDER_SSZ_MIN_SIZE, minSize, sszDataLength});
52
+ }
53
+ if (sszDataLength > maxSize) {
54
+ throw new SszSnappyError({code: SszSnappyErrorCode.OVER_SSZ_MAX_SIZE, maxSize, sszDataLength});
55
+ }
56
+
57
+ return sszDataLength;
58
+ }
59
+
60
+ throw new SszSnappyError({code: SszSnappyErrorCode.SOURCE_ABORTED});
61
+ }
62
+
63
+ /**
64
+ * Reads `<encoded-payload>` for ssz-snappy and decompress.
65
+ * The returned bytes can be SSZ deseralized
66
+ */
67
+ export async function readSszSnappyBody(bufferedSource: BufferedSource, sszDataLength: number): Promise<Uint8Array> {
68
+ const decompressor = new SnappyFramesUncompress();
69
+ const uncompressedData = new Uint8ArrayList();
70
+ let readBytes = 0;
71
+
72
+ for await (const buffer of bufferedSource) {
73
+ // SHOULD NOT read more than max_encoded_len(n) bytes after reading the SSZ length-prefix n from the header
74
+ readBytes += buffer.length;
75
+ if (readBytes > maxEncodedLen(sszDataLength)) {
76
+ throw new SszSnappyError({code: SszSnappyErrorCode.TOO_MUCH_BYTES_READ, readBytes, sszDataLength});
77
+ }
78
+
79
+ // No bytes left to consume, get next
80
+ if (buffer.length === 0) {
81
+ continue;
82
+ }
83
+
84
+ // stream contents can be passed through a buffered Snappy reader to decompress frame by frame
85
+ try {
86
+ const uncompressed = decompressor.uncompress(buffer);
87
+ buffer.consume(buffer.length);
88
+ if (uncompressed !== null) {
89
+ uncompressedData.append(uncompressed);
90
+ }
91
+ } catch (e) {
92
+ throw new SszSnappyError({code: SszSnappyErrorCode.DECOMPRESSOR_ERROR, decompressorError: e as Error});
93
+ }
94
+
95
+ // SHOULD consider invalid reading more bytes than `n` SSZ bytes
96
+ if (uncompressedData.length > sszDataLength) {
97
+ throw new SszSnappyError({code: SszSnappyErrorCode.TOO_MANY_BYTES, sszDataLength});
98
+ }
99
+
100
+ // Keep reading chunks until `n` SSZ bytes
101
+ if (uncompressedData.length < sszDataLength) {
102
+ continue;
103
+ }
104
+
105
+ // buffer.length === n
106
+ return uncompressedData.subarray(0, sszDataLength);
107
+ }
108
+
109
+ // SHOULD consider invalid: An early EOF before fully reading the declared length-prefix worth of SSZ bytes
110
+ throw new SszSnappyError({code: SszSnappyErrorCode.SOURCE_ABORTED});
111
+ }
@@ -0,0 +1,24 @@
1
+ import {encode as varintEncode} from "uint8-varint";
2
+ import {encodeSnappy} from "./snappyFrames/compress.js";
3
+
4
+ /**
5
+ * ssz_snappy encoding strategy writer.
6
+ * Yields byte chunks for encoded header and payload as defined in the spec:
7
+ * ```
8
+ * <encoding-dependent-header> | <encoded-payload>
9
+ * ```
10
+ */
11
+ export const writeSszSnappyPayload = encodeSszSnappy as (bytes: Uint8Array) => AsyncGenerator<Buffer>;
12
+
13
+ /**
14
+ * Buffered Snappy writer
15
+ */
16
+ export async function* encodeSszSnappy(bytes: Buffer): AsyncGenerator<Buffer> {
17
+ // MUST encode the length of the raw SSZ bytes, encoded as an unsigned protobuf varint
18
+ const varint = varintEncode(bytes.length);
19
+ yield Buffer.from(varint.buffer, varint.byteOffset, varint.byteLength);
20
+
21
+ // By first computing and writing the SSZ byte length, the SSZ encoder can then directly
22
+ // write the chunk contents to the stream. Snappy writer compresses frame by frame
23
+ yield* encodeSnappy(bytes);
24
+ }
@@ -0,0 +1,31 @@
1
+ import {LodestarError} from "@lodestar/utils";
2
+
3
+ export enum SszSnappyErrorCode {
4
+ /** Invalid number of bytes for protobuf varint */
5
+ INVALID_VARINT_BYTES_COUNT = "SSZ_SNAPPY_ERROR_INVALID_VARINT_BYTES_COUNT",
6
+ /** Parsed sszDataLength is under the SSZ type min size */
7
+ UNDER_SSZ_MIN_SIZE = "SSZ_SNAPPY_ERROR_UNDER_SSZ_MIN_SIZE",
8
+ /** Parsed sszDataLength is over the SSZ type max size */
9
+ OVER_SSZ_MAX_SIZE = "SSZ_SNAPPY_ERROR_OVER_SSZ_MAX_SIZE",
10
+ TOO_MUCH_BYTES_READ = "SSZ_SNAPPY_ERROR_TOO_MUCH_BYTES_READ",
11
+ DECOMPRESSOR_ERROR = "SSZ_SNAPPY_ERROR_DECOMPRESSOR_ERROR",
12
+ DESERIALIZE_ERROR = "SSZ_SNAPPY_ERROR_DESERIALIZE_ERROR",
13
+ SERIALIZE_ERROR = "SSZ_SNAPPY_ERROR_SERIALIZE_ERROR",
14
+ /** Received more bytes than specified sszDataLength */
15
+ TOO_MANY_BYTES = "SSZ_SNAPPY_ERROR_TOO_MANY_BYTES",
16
+ /** Source aborted before reading sszDataLength bytes */
17
+ SOURCE_ABORTED = "SSZ_SNAPPY_ERROR_SOURCE_ABORTED",
18
+ }
19
+
20
+ type SszSnappyErrorType =
21
+ | {code: SszSnappyErrorCode.INVALID_VARINT_BYTES_COUNT; bytes: number}
22
+ | {code: SszSnappyErrorCode.UNDER_SSZ_MIN_SIZE; minSize: number; sszDataLength: number}
23
+ | {code: SszSnappyErrorCode.OVER_SSZ_MAX_SIZE; maxSize: number; sszDataLength: number}
24
+ | {code: SszSnappyErrorCode.TOO_MUCH_BYTES_READ; readBytes: number; sszDataLength: number}
25
+ | {code: SszSnappyErrorCode.DECOMPRESSOR_ERROR; decompressorError: Error}
26
+ | {code: SszSnappyErrorCode.DESERIALIZE_ERROR; deserializeError: Error}
27
+ | {code: SszSnappyErrorCode.SERIALIZE_ERROR; serializeError: Error}
28
+ | {code: SszSnappyErrorCode.TOO_MANY_BYTES; sszDataLength: number}
29
+ | {code: SszSnappyErrorCode.SOURCE_ABORTED};
30
+
31
+ export class SszSnappyError extends LodestarError<SszSnappyErrorType> {}
@@ -0,0 +1,3 @@
1
+ export * from "./decode.js";
2
+ export * from "./encode.js";
3
+ export * from "./errors.js";
@@ -0,0 +1,36 @@
1
+ import crc32c from "@chainsafe/fast-crc32c";
2
+
3
+ export enum ChunkType {
4
+ IDENTIFIER = 0xff,
5
+ COMPRESSED = 0x00,
6
+ UNCOMPRESSED = 0x01,
7
+ PADDING = 0xfe,
8
+ SKIPPABLE = 0x80,
9
+ }
10
+
11
+ export const IDENTIFIER = Buffer.from([0x73, 0x4e, 0x61, 0x50, 0x70, 0x59]);
12
+ export const IDENTIFIER_FRAME = Buffer.from([0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59]);
13
+
14
+ /**
15
+ * As per the snappy framing format for streams, the size of any uncompressed chunk can be
16
+ * no longer than 65536 bytes.
17
+ *
18
+ * From: https://github.com/google/snappy/blob/main/framing_format.txt#L90:L92
19
+ */
20
+ export const UNCOMPRESSED_CHUNK_SIZE = 65536;
21
+
22
+ export function crc(value: Uint8Array): Buffer {
23
+ const x = crc32c.calculate(value);
24
+ const result = Buffer.allocUnsafe?.(4) ?? Buffer.alloc(4);
25
+
26
+ // As defined in section 3 of https://github.com/google/snappy/blob/master/framing_format.txt
27
+ // And other implementations for reference:
28
+ // Go: https://github.com/golang/snappy/blob/2e65f85255dbc3072edf28d6b5b8efc472979f5a/snappy.go#L97
29
+ // Python: https://github.com/andrix/python-snappy/blob/602e9c10d743f71bef0bac5e4c4dffa17340d7b3/snappy/snappy.py#L70
30
+ // Mask the right hand to (32 - 17) = 15 bits -> 0x7fff, to keep correct 32 bit values.
31
+ // Shift the left hand with >>> for correct 32 bit intermediate result.
32
+ // Then final >>> 0 for 32 bits output
33
+ result.writeUInt32LE((((x >>> 15) | ((x & 0x7fff) << 17)) + 0xa282ead8) >>> 0, 0);
34
+
35
+ return result;
36
+ }
@@ -0,0 +1,25 @@
1
+ import snappy from "snappy";
2
+ import {ChunkType, IDENTIFIER_FRAME, UNCOMPRESSED_CHUNK_SIZE, crc} from "./common.js";
3
+
4
+ // The logic in this file is largely copied (in simplified form) from https://github.com/ChainSafe/node-snappy-stream/
5
+
6
+ export async function* encodeSnappy(bytes: Buffer): AsyncGenerator<Buffer> {
7
+ yield IDENTIFIER_FRAME;
8
+
9
+ for (let i = 0; i < bytes.length; i += UNCOMPRESSED_CHUNK_SIZE) {
10
+ const chunk = bytes.subarray(i, i + UNCOMPRESSED_CHUNK_SIZE);
11
+ const compressed = snappy.compressSync(chunk);
12
+ if (compressed.length < chunk.length) {
13
+ const size = compressed.length + 4;
14
+ yield Buffer.concat([Buffer.from([ChunkType.COMPRESSED, size, size >> 8, size >> 16]), crc(chunk), compressed]);
15
+ } else {
16
+ const size = chunk.length + 4;
17
+ yield Buffer.concat([
18
+ //
19
+ Buffer.from([ChunkType.UNCOMPRESSED, size, size >> 8, size >> 16]),
20
+ crc(chunk),
21
+ chunk,
22
+ ]);
23
+ }
24
+ }
25
+ }
@@ -0,0 +1,114 @@
1
+ import {uncompress} from "snappyjs";
2
+ import {Uint8ArrayList} from "uint8arraylist";
3
+ import {ChunkType, IDENTIFIER, UNCOMPRESSED_CHUNK_SIZE, crc} from "./common.js";
4
+
5
+ export class SnappyFramesUncompress {
6
+ private buffer = new Uint8ArrayList();
7
+
8
+ private state: UncompressState = {
9
+ foundIdentifier: false,
10
+ };
11
+
12
+ /**
13
+ * Accepts chunk of data containing some part of snappy frames stream
14
+ * @param chunk
15
+ * @return Buffer if there is one or more whole frames, null if it's partial
16
+ */
17
+ uncompress(chunk: Uint8ArrayList): Uint8ArrayList | null {
18
+ this.buffer.append(chunk);
19
+ const result = new Uint8ArrayList();
20
+ while (this.buffer.length > 0) {
21
+ if (this.buffer.length < 4) break;
22
+
23
+ const type = getChunkType(this.buffer.get(0));
24
+
25
+ if (!this.state.foundIdentifier && type !== ChunkType.IDENTIFIER) {
26
+ throw "malformed input: must begin with an identifier";
27
+ }
28
+
29
+ const frameSize = getFrameSize(this.buffer, 1);
30
+
31
+ if (this.buffer.length - 4 < frameSize) {
32
+ break;
33
+ }
34
+
35
+ const frame = this.buffer.subarray(4, 4 + frameSize);
36
+ this.buffer.consume(4 + frameSize);
37
+
38
+ switch (type) {
39
+ case ChunkType.IDENTIFIER: {
40
+ if (!Buffer.prototype.equals.call(frame, IDENTIFIER)) {
41
+ throw "malformed input: bad identifier";
42
+ }
43
+ this.state.foundIdentifier = true;
44
+ continue;
45
+ }
46
+ case ChunkType.PADDING:
47
+ case ChunkType.SKIPPABLE:
48
+ continue;
49
+ case ChunkType.COMPRESSED: {
50
+ const checksum = frame.subarray(0, 4);
51
+ const data = frame.subarray(4);
52
+
53
+ const uncompressed = uncompress(data, UNCOMPRESSED_CHUNK_SIZE);
54
+ if (crc(uncompressed).compare(checksum) !== 0) {
55
+ throw "malformed input: bad checksum";
56
+ }
57
+ result.append(uncompressed);
58
+ break;
59
+ }
60
+ case ChunkType.UNCOMPRESSED: {
61
+ const checksum = frame.subarray(0, 4);
62
+ const uncompressed = frame.subarray(4);
63
+
64
+ if (uncompressed.length > UNCOMPRESSED_CHUNK_SIZE) {
65
+ throw "malformed input: too large";
66
+ }
67
+ if (crc(uncompressed).compare(checksum) !== 0) {
68
+ throw "malformed input: bad checksum";
69
+ }
70
+ result.append(uncompressed);
71
+ break;
72
+ }
73
+ }
74
+ }
75
+ if (result.length === 0) {
76
+ return null;
77
+ }
78
+ return result;
79
+ }
80
+
81
+ reset(): void {
82
+ this.buffer = new Uint8ArrayList();
83
+ this.state = {
84
+ foundIdentifier: false,
85
+ };
86
+ }
87
+ }
88
+
89
+ type UncompressState = {
90
+ foundIdentifier: boolean;
91
+ };
92
+
93
+ function getFrameSize(buffer: Uint8ArrayList, offset: number): number {
94
+ return buffer.get(offset) + (buffer.get(offset + 1) << 8) + (buffer.get(offset + 2) << 16);
95
+ }
96
+
97
+ function getChunkType(value: number): ChunkType {
98
+ switch (value) {
99
+ case ChunkType.IDENTIFIER:
100
+ return ChunkType.IDENTIFIER;
101
+ case ChunkType.COMPRESSED:
102
+ return ChunkType.COMPRESSED;
103
+ case ChunkType.UNCOMPRESSED:
104
+ return ChunkType.UNCOMPRESSED;
105
+ case ChunkType.PADDING:
106
+ return ChunkType.PADDING;
107
+ default:
108
+ // https://github.com/google/snappy/blob/main/framing_format.txt#L129
109
+ if (value >= 0x80 && value <= 0xfd) {
110
+ return ChunkType.SKIPPABLE;
111
+ }
112
+ throw new Error("Unsupported snappy chunk type");
113
+ }
114
+ }
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Computes the worst-case compression result by SSZ-Snappy
3
+ */
4
+ export function maxEncodedLen(sszLength: number): number {
5
+ // worst-case compression result by Snappy
6
+ return 32 + sszLength + sszLength / 6;
7
+ }
package/src/index.ts ADDED
@@ -0,0 +1,10 @@
1
+ export * from "./interface.js";
2
+ export type {Metrics} from "./metrics.js";
3
+ export {getMetrics} from "./metrics.js";
4
+ export type {ReqRespOpts} from "./ReqResp.js";
5
+ export {ReqResp} from "./ReqResp.js";
6
+ export * from "./request/errors.js";
7
+ export * from "./response/errors.js";
8
+ export * from "./types.js";
9
+ export {Encoding as ReqRespEncoding} from "./types.js"; // Expose enums renamed
10
+ export {collectExactOne, collectMaxResponse, formatProtocolID, parseProtocolID} from "./utils/index.js";
@@ -0,0 +1,26 @@
1
+ // Request/Response constants
2
+ export enum RespStatus {
3
+ /**
4
+ * A normal response follows, with contents matching the expected message schema and encoding specified in the request
5
+ */
6
+ SUCCESS = 0,
7
+ /**
8
+ * The contents of the request are semantically invalid, or the payload is malformed,
9
+ * or could not be understood. The response payload adheres to the ErrorMessage schema
10
+ */
11
+ INVALID_REQUEST = 1,
12
+ /**
13
+ * The responder encountered an error while processing the request. The response payload adheres to the ErrorMessage schema
14
+ */
15
+ SERVER_ERROR = 2,
16
+ /**
17
+ * The responder does not have requested resource. The response payload adheres to the ErrorMessage schema (described below). Note: This response code is only valid as a response to BlocksByRange
18
+ */
19
+ RESOURCE_UNAVAILABLE = 3,
20
+ /**
21
+ * Our node does not have bandwidth to serve requests due to either per-peer quota or total quota.
22
+ */
23
+ RATE_LIMITED = 139,
24
+ }
25
+
26
+ export type RpcResponseStatusError = Exclude<RespStatus, RespStatus.SUCCESS>;
package/src/metrics.ts ADDED
@@ -0,0 +1,95 @@
1
+ import {MetricsRegisterExtra} from "@lodestar/utils";
2
+ import {RequestErrorCode} from "./request/errors.js";
3
+
4
+ export type Metrics = ReturnType<typeof getMetrics>;
5
+
6
+ /**
7
+ * A collection of metrics used throughout the Gossipsub behaviour.
8
+ */
9
+ export function getMetrics(register: MetricsRegisterExtra) {
10
+ // Using function style instead of class to prevent having to re-declare all MetricsPrometheus types.
11
+
12
+ return {
13
+ outgoingRequests: register.gauge<{method: string}>({
14
+ name: "beacon_reqresp_outgoing_requests_total",
15
+ help: "Counts total requests done per method",
16
+ labelNames: ["method"],
17
+ }),
18
+ outgoingOpenedStreams: register.counter<{method: string}>({
19
+ name: "beacon_reqresp_outgoing_opened_streams_total",
20
+ help: "Counts total opened streams per method",
21
+ labelNames: ["method"],
22
+ }),
23
+ outgoingClosedStreams: register.counter<{method: string}>({
24
+ name: "beacon_reqresp_outgoing_closed_streams_total",
25
+ help: "Counts total closed streams per method",
26
+ labelNames: ["method"],
27
+ }),
28
+ outgoingRequestRoundtripTime: register.histogram<{method: string}>({
29
+ name: "beacon_reqresp_outgoing_request_roundtrip_time_seconds",
30
+ help: "Histogram of outgoing requests round-trip time",
31
+ labelNames: ["method"],
32
+ // Spec sets RESP_TIMEOUT = 10 sec
33
+ buckets: [0.1, 0.2, 0.5, 1, 5, 10, 15, 60],
34
+ }),
35
+ outgoingErrors: register.gauge<{method: string}>({
36
+ name: "beacon_reqresp_outgoing_requests_error_total",
37
+ help: "Counts total failed requests done per method",
38
+ labelNames: ["method"],
39
+ }),
40
+ outgoingErrorReasons: register.gauge<{reason: RequestErrorCode}>({
41
+ name: "beacon_reqresp_outgoing_requests_error_reason_total",
42
+ help: "Count total outgoing request errors by reason",
43
+ labelNames: ["reason"],
44
+ }),
45
+ incomingRequests: register.gauge<{method: string}>({
46
+ name: "beacon_reqresp_incoming_requests_total",
47
+ help: "Counts total responses handled per method",
48
+ labelNames: ["method"],
49
+ }),
50
+ incomingOpenedStreams: register.counter<{method: string}>({
51
+ name: "beacon_reqresp_incoming_opened_streams_total",
52
+ help: "Counts total incoming opened streams per method",
53
+ labelNames: ["method"],
54
+ }),
55
+ incomingClosedStreams: register.counter<{method: string}>({
56
+ name: "beacon_reqresp_incoming_closed_streams_total",
57
+ help: "Counts total incoming closed streams per method",
58
+ labelNames: ["method"],
59
+ }),
60
+ incomingRequestHandlerTime: register.histogram<{method: string}>({
61
+ name: "beacon_reqresp_incoming_request_handler_time_seconds",
62
+ help: "Histogram of incoming requests internal handling time",
63
+ labelNames: ["method"],
64
+ // Spec sets RESP_TIMEOUT = 10 sec
65
+ buckets: [0.1, 0.2, 0.5, 1, 5, 10],
66
+ }),
67
+ incomingErrors: register.gauge<{method: string}>({
68
+ name: "beacon_reqresp_incoming_requests_error_total",
69
+ help: "Counts total failed responses handled per method",
70
+ labelNames: ["method"],
71
+ }),
72
+ outgoingResponseTTFB: register.histogram<{method: string}>({
73
+ name: "beacon_reqresp_outgoing_response_ttfb_seconds",
74
+ help: "Time to first byte (TTFB) for outgoing responses",
75
+ labelNames: ["method"],
76
+ // Spec sets TTFB_TIMEOUT = 5 sec
77
+ buckets: [0.1, 1, 5],
78
+ }),
79
+ incomingResponseTTFB: register.histogram<{method: string}>({
80
+ name: "beacon_reqresp_incoming_response_ttfb_seconds",
81
+ help: "Time to first byte (TTFB) for incoming responses",
82
+ labelNames: ["method"],
83
+ // Spec sets TTFB_TIMEOUT = 5 sec
84
+ buckets: [0.1, 1, 5],
85
+ }),
86
+ dialErrors: register.gauge({
87
+ name: "beacon_reqresp_dial_errors_total",
88
+ help: "Count total dial errors",
89
+ }),
90
+ selfRateLimiterPeerCount: register.gauge({
91
+ name: "beacon_reqresp_self_rate_limiter_peer_count",
92
+ help: "Count of peers tracked by the self rate limiter",
93
+ }),
94
+ };
95
+ }
@@ -0,0 +1,107 @@
1
+ import {PeerId} from "@libp2p/interface";
2
+ import {InboundRateLimitQuota, ReqRespRateLimiterOpts} from "../types.js";
3
+ import {RateLimiterGRCA} from "./rateLimiterGRCA.js";
4
+
5
+ /** Sometimes a peer request comes AFTER libp2p disconnect event, check for such peers every 10 minutes */
6
+ const CHECK_DISCONNECTED_PEERS_INTERVAL_MS = 10 * 60 * 1000;
7
+
8
+ /** Peers don't request us for 5 mins are considered disconnected */
9
+ const DISCONNECTED_TIMEOUT_MS = 5 * 60 * 1000;
10
+
11
+ type ProtocolID = string;
12
+
13
+ export class ReqRespRateLimiter {
14
+ private readonly rateLimitersPerPeer = new Map<ProtocolID, RateLimiterGRCA<string>>();
15
+ private readonly rateLimitersTotal = new Map<ProtocolID, RateLimiterGRCA<null>>();
16
+ /** Interval to check lastSeenMessagesByPeer */
17
+ private cleanupInterval: NodeJS.Timeout | undefined = undefined;
18
+ private rateLimitMultiplier: number;
19
+ /** Periodically check this to remove tracker of disconnected peers */
20
+ private lastSeenRequestsByPeer: Map<string, number>;
21
+
22
+ constructor(private readonly opts?: ReqRespRateLimiterOpts) {
23
+ this.rateLimitMultiplier = opts?.rateLimitMultiplier ?? 1;
24
+ this.lastSeenRequestsByPeer = new Map();
25
+ }
26
+
27
+ get enabled(): boolean {
28
+ return this.rateLimitMultiplier > 0;
29
+ }
30
+
31
+ setRateLimits(protocolID: ProtocolID, rateLimits: InboundRateLimitQuota): void {
32
+ if (!this.enabled) {
33
+ return;
34
+ }
35
+
36
+ if (rateLimits.byPeer) {
37
+ this.rateLimitersPerPeer.set(
38
+ protocolID,
39
+ RateLimiterGRCA.fromQuota<string>({
40
+ quotaTimeMs: rateLimits.byPeer.quotaTimeMs,
41
+ quota: rateLimits.byPeer.quota * this.rateLimitMultiplier,
42
+ })
43
+ );
44
+ }
45
+
46
+ if (rateLimits.total) {
47
+ this.rateLimitersTotal.set(
48
+ protocolID,
49
+ RateLimiterGRCA.fromQuota<null>({
50
+ quotaTimeMs: rateLimits.total.quotaTimeMs,
51
+ quota: rateLimits.total.quota * this.rateLimitMultiplier,
52
+ })
53
+ );
54
+ }
55
+ }
56
+
57
+ allows(peerId: PeerId, protocolID: string, requestCount: number): boolean {
58
+ if (!this.enabled) {
59
+ return true;
60
+ }
61
+
62
+ const peerIdStr = peerId.toString();
63
+ this.lastSeenRequestsByPeer.set(peerIdStr, Date.now());
64
+
65
+ const byPeer = this.rateLimitersPerPeer.get(protocolID);
66
+ const total = this.rateLimitersTotal.get(protocolID);
67
+
68
+ if ((byPeer && !byPeer.allows(peerIdStr, requestCount)) || (total && !total.allows(null, requestCount))) {
69
+ this.opts?.onRateLimit?.(peerId, protocolID);
70
+ return false;
71
+ }
72
+
73
+ return true;
74
+ }
75
+
76
+ prune(peerId: PeerId): void {
77
+ const peerIdStr = peerId.toString();
78
+ this.pruneByPeerIdStr(peerIdStr);
79
+ }
80
+
81
+ start(): void {
82
+ this.cleanupInterval = setInterval(this.checkDisconnectedPeers.bind(this), CHECK_DISCONNECTED_PEERS_INTERVAL_MS);
83
+ }
84
+
85
+ stop(): void {
86
+ if (this.cleanupInterval !== undefined) {
87
+ clearInterval(this.cleanupInterval);
88
+ }
89
+ }
90
+
91
+ private pruneByPeerIdStr(peerIdStr: string): void {
92
+ // Check for every method and version to cleanup
93
+ for (const method of this.rateLimitersPerPeer.values()) {
94
+ method.pruneByKey(peerIdStr);
95
+ }
96
+ this.lastSeenRequestsByPeer.delete(peerIdStr);
97
+ }
98
+
99
+ private checkDisconnectedPeers(): void {
100
+ const now = Date.now();
101
+ for (const [peerIdStr, lastSeenTime] of this.lastSeenRequestsByPeer.entries()) {
102
+ if (now - lastSeenTime >= DISCONNECTED_TIMEOUT_MS) {
103
+ this.pruneByPeerIdStr(peerIdStr);
104
+ }
105
+ }
106
+ }
107
+ }