@lodestar/reqresp 1.35.0-dev.83de5b8dea → 1.35.0-dev.8689cc3545
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/ReqResp.d.ts.map +1 -0
- package/lib/encoders/requestDecode.d.ts.map +1 -0
- package/lib/encoders/requestEncode.d.ts.map +1 -0
- package/lib/encoders/responseDecode.d.ts.map +1 -0
- package/lib/encoders/responseEncode.d.ts.map +1 -0
- package/lib/encodingStrategies/index.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/decode.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/encode.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/errors.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/index.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/snappyFrames/common.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/snappyFrames/compress.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/snappyFrames/uncompress.d.ts.map +1 -0
- package/lib/encodingStrategies/sszSnappy/utils.d.ts.map +1 -0
- package/lib/index.d.ts.map +1 -0
- package/lib/interface.d.ts.map +1 -0
- package/lib/metrics.d.ts.map +1 -0
- package/lib/rate_limiter/ReqRespRateLimiter.d.ts.map +1 -0
- package/lib/rate_limiter/rateLimiterGRCA.d.ts.map +1 -0
- package/lib/rate_limiter/selfRateLimiter.d.ts.map +1 -0
- package/lib/request/errors.d.ts.map +1 -0
- package/lib/request/index.d.ts.map +1 -0
- package/lib/response/errors.d.ts.map +1 -0
- package/lib/response/index.d.ts.map +1 -0
- package/lib/types.d.ts.map +1 -0
- package/lib/utils/abortableSource.d.ts.map +1 -0
- package/lib/utils/bufferedSource.d.ts.map +1 -0
- package/lib/utils/collectExactOne.d.ts.map +1 -0
- package/lib/utils/collectMaxResponse.d.ts.map +1 -0
- package/lib/utils/errorMessage.d.ts.map +1 -0
- package/lib/utils/index.d.ts.map +1 -0
- package/lib/utils/onChunk.d.ts.map +1 -0
- package/lib/utils/peerId.d.ts.map +1 -0
- package/lib/utils/protocolId.d.ts.map +1 -0
- package/package.json +10 -12
- package/src/ReqResp.ts +289 -0
- package/src/encoders/requestDecode.ts +29 -0
- package/src/encoders/requestEncode.ts +18 -0
- package/src/encoders/responseDecode.ts +169 -0
- package/src/encoders/responseEncode.ts +81 -0
- package/src/encodingStrategies/index.ts +46 -0
- package/src/encodingStrategies/sszSnappy/decode.ts +111 -0
- package/src/encodingStrategies/sszSnappy/encode.ts +24 -0
- package/src/encodingStrategies/sszSnappy/errors.ts +31 -0
- package/src/encodingStrategies/sszSnappy/index.ts +3 -0
- package/src/encodingStrategies/sszSnappy/snappyFrames/common.ts +36 -0
- package/src/encodingStrategies/sszSnappy/snappyFrames/compress.ts +25 -0
- package/src/encodingStrategies/sszSnappy/snappyFrames/uncompress.ts +114 -0
- package/src/encodingStrategies/sszSnappy/utils.ts +7 -0
- package/src/index.ts +10 -0
- package/src/interface.ts +26 -0
- package/src/metrics.ts +95 -0
- package/src/rate_limiter/ReqRespRateLimiter.ts +107 -0
- package/src/rate_limiter/rateLimiterGRCA.ts +92 -0
- package/src/rate_limiter/selfRateLimiter.ts +112 -0
- package/src/request/errors.ts +119 -0
- package/src/request/index.ts +225 -0
- package/src/response/errors.ts +50 -0
- package/src/response/index.ts +147 -0
- package/src/types.ts +158 -0
- package/src/utils/abortableSource.ts +80 -0
- package/src/utils/bufferedSource.ts +46 -0
- package/src/utils/collectExactOne.ts +15 -0
- package/src/utils/collectMaxResponse.ts +19 -0
- package/src/utils/errorMessage.ts +51 -0
- package/src/utils/index.ts +8 -0
- package/src/utils/onChunk.ts +12 -0
- package/src/utils/peerId.ts +6 -0
- package/src/utils/protocolId.ts +44 -0
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import {Uint8ArrayList} from "uint8arraylist";
|
|
2
|
+
import {ForkName} from "@lodestar/params";
|
|
3
|
+
import {readEncodedPayload} from "../encodingStrategies/index.js";
|
|
4
|
+
import {RespStatus} from "../interface.js";
|
|
5
|
+
import {ResponseError} from "../response/index.js";
|
|
6
|
+
import {
|
|
7
|
+
CONTEXT_BYTES_FORK_DIGEST_LENGTH,
|
|
8
|
+
ContextBytesFactory,
|
|
9
|
+
ContextBytesType,
|
|
10
|
+
MixedProtocol,
|
|
11
|
+
ResponseIncoming,
|
|
12
|
+
} from "../types.js";
|
|
13
|
+
import {BufferedSource, decodeErrorMessage} from "../utils/index.js";
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Internal helper type to signal stream ended early
|
|
17
|
+
*/
|
|
18
|
+
enum StreamStatus {
|
|
19
|
+
Ended = "STREAM_ENDED",
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Consumes a stream source to read a `<response>`
|
|
24
|
+
* ```bnf
|
|
25
|
+
* response ::= <response_chunk>*
|
|
26
|
+
* response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
|
27
|
+
* result ::= "0" | "1" | "2" | ["128" ... "255"]
|
|
28
|
+
* ```
|
|
29
|
+
*/
|
|
30
|
+
export function responseDecode(
|
|
31
|
+
protocol: MixedProtocol,
|
|
32
|
+
cbs: {
|
|
33
|
+
onFirstHeader: () => void;
|
|
34
|
+
onFirstResponseChunk: () => void;
|
|
35
|
+
}
|
|
36
|
+
): (source: AsyncIterable<Uint8Array | Uint8ArrayList>) => AsyncIterable<ResponseIncoming> {
|
|
37
|
+
return async function* responseDecodeSink(source) {
|
|
38
|
+
const bufferedSource = new BufferedSource(source as AsyncGenerator<Uint8ArrayList>);
|
|
39
|
+
|
|
40
|
+
let readFirstHeader = false;
|
|
41
|
+
let readFirstResponseChunk = false;
|
|
42
|
+
|
|
43
|
+
// Consumers of `responseDecode()` may limit the number of <response_chunk> and break out of the while loop
|
|
44
|
+
while (!bufferedSource.isDone) {
|
|
45
|
+
const status = await readResultHeader(bufferedSource);
|
|
46
|
+
|
|
47
|
+
// Stream is only allowed to end at the start of a <response_chunk> block
|
|
48
|
+
// The happens when source ends before readResultHeader() can fetch 1 byte
|
|
49
|
+
if (status === StreamStatus.Ended) {
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if (!readFirstHeader) {
|
|
54
|
+
cbs.onFirstHeader();
|
|
55
|
+
readFirstHeader = true;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// For multiple chunks, only the last chunk is allowed to have a non-zero error
|
|
59
|
+
// code (i.e. The chunk stream is terminated once an error occurs
|
|
60
|
+
if (status !== RespStatus.SUCCESS) {
|
|
61
|
+
const errorMessage = await readErrorMessage(bufferedSource);
|
|
62
|
+
throw new ResponseError(status, errorMessage);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const forkName = await readContextBytes(protocol.contextBytes, bufferedSource);
|
|
66
|
+
const typeSizes = protocol.responseSizes(forkName);
|
|
67
|
+
const chunkData = await readEncodedPayload(bufferedSource, protocol.encoding, typeSizes);
|
|
68
|
+
|
|
69
|
+
yield {
|
|
70
|
+
data: chunkData,
|
|
71
|
+
fork: forkName,
|
|
72
|
+
protocolVersion: protocol.version,
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
if (!readFirstResponseChunk) {
|
|
76
|
+
cbs.onFirstResponseChunk();
|
|
77
|
+
readFirstResponseChunk = true;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Consumes a stream source to read a `<result>`
|
|
85
|
+
* ```bnf
|
|
86
|
+
* result ::= "0" | "1" | "2" | ["128" ... "255"]
|
|
87
|
+
* ```
|
|
88
|
+
* `<response_chunk>` starts with a single-byte response code which determines the contents of the response_chunk
|
|
89
|
+
*/
|
|
90
|
+
export async function readResultHeader(bufferedSource: BufferedSource): Promise<RespStatus | StreamStatus> {
|
|
91
|
+
for await (const buffer of bufferedSource) {
|
|
92
|
+
const status = buffer.get(0);
|
|
93
|
+
buffer.consume(1);
|
|
94
|
+
|
|
95
|
+
// If first chunk had zero bytes status === null, get next
|
|
96
|
+
if (status !== null) {
|
|
97
|
+
return status;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
return StreamStatus.Ended;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Consumes a stream source to read an optional `<error_response>?`
|
|
106
|
+
* ```bnf
|
|
107
|
+
* error_response ::= <result> | <error_message>?
|
|
108
|
+
* result ::= "1" | "2" | ["128" ... "255"]
|
|
109
|
+
* ```
|
|
110
|
+
*/
|
|
111
|
+
export async function readErrorMessage(bufferedSource: BufferedSource): Promise<string> {
|
|
112
|
+
// Read at least 256 or wait for the stream to end
|
|
113
|
+
let length: number | undefined;
|
|
114
|
+
for await (const buffer of bufferedSource) {
|
|
115
|
+
// Wait for next chunk with bytes or for the stream to end
|
|
116
|
+
// Note: The entire <error_message> is expected to be in the same chunk
|
|
117
|
+
if (buffer.length >= 256) {
|
|
118
|
+
length = 256;
|
|
119
|
+
break;
|
|
120
|
+
}
|
|
121
|
+
length = buffer.length;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// biome-ignore lint/complexity/useLiteralKeys: It is a private attribute
|
|
125
|
+
const bytes = bufferedSource["buffer"].slice(0, length);
|
|
126
|
+
|
|
127
|
+
try {
|
|
128
|
+
return decodeErrorMessage(bytes);
|
|
129
|
+
} catch (_e) {
|
|
130
|
+
// Error message is optional and may not be included in the response stream
|
|
131
|
+
return Buffer.prototype.toString.call(bytes, "hex");
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Consumes a stream source to read a variable length `<context-bytes>` depending on the method.
|
|
137
|
+
* While `<context-bytes>` has a single type of `ForkDigest`, this function only parses the `ForkName`
|
|
138
|
+
* of the `ForkDigest` or defaults to `phase0`
|
|
139
|
+
*/
|
|
140
|
+
export async function readContextBytes(
|
|
141
|
+
contextBytes: ContextBytesFactory,
|
|
142
|
+
bufferedSource: BufferedSource
|
|
143
|
+
): Promise<ForkName> {
|
|
144
|
+
switch (contextBytes.type) {
|
|
145
|
+
case ContextBytesType.Empty:
|
|
146
|
+
return ForkName.phase0;
|
|
147
|
+
|
|
148
|
+
case ContextBytesType.ForkDigest: {
|
|
149
|
+
const forkDigest = await readContextBytesForkDigest(bufferedSource);
|
|
150
|
+
return contextBytes.config.forkDigest2ForkBoundary(forkDigest).fork;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Consumes a stream source to read `<context-bytes>`, where it's a fixed-width 4 byte
|
|
157
|
+
*/
|
|
158
|
+
export async function readContextBytesForkDigest(bufferedSource: BufferedSource): Promise<Uint8Array> {
|
|
159
|
+
for await (const buffer of bufferedSource) {
|
|
160
|
+
if (buffer.length >= CONTEXT_BYTES_FORK_DIGEST_LENGTH) {
|
|
161
|
+
const bytes = buffer.slice(0, CONTEXT_BYTES_FORK_DIGEST_LENGTH);
|
|
162
|
+
buffer.consume(CONTEXT_BYTES_FORK_DIGEST_LENGTH);
|
|
163
|
+
return bytes;
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// TODO: Use typed error
|
|
168
|
+
throw Error("Source ended while reading context bytes");
|
|
169
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import {writeEncodedPayload} from "../encodingStrategies/index.js";
|
|
2
|
+
import {RespStatus, RpcResponseStatusError} from "../interface.js";
|
|
3
|
+
import {ContextBytesFactory, ContextBytesType, MixedProtocol, Protocol, ResponseOutgoing} from "../types.js";
|
|
4
|
+
import {encodeErrorMessage} from "../utils/index.js";
|
|
5
|
+
|
|
6
|
+
const SUCCESS_BUFFER = Buffer.from([RespStatus.SUCCESS]);
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Yields byte chunks for a `<response>` with a zero response code `<result>`
|
|
10
|
+
* ```bnf
|
|
11
|
+
* response ::= <response_chunk>*
|
|
12
|
+
* response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
|
13
|
+
* result ::= "0"
|
|
14
|
+
* ```
|
|
15
|
+
* Note: `response` has zero or more chunks (denoted by `<>*`)
|
|
16
|
+
*/
|
|
17
|
+
export function responseEncodeSuccess(
|
|
18
|
+
protocol: Protocol,
|
|
19
|
+
cbs: {onChunk: (chunkIndex: number) => void}
|
|
20
|
+
): (source: AsyncIterable<ResponseOutgoing>) => AsyncIterable<Buffer> {
|
|
21
|
+
return async function* responseEncodeSuccessTransform(source) {
|
|
22
|
+
let chunkIndex = 0;
|
|
23
|
+
|
|
24
|
+
for await (const chunk of source) {
|
|
25
|
+
// Postfix increment, return 0 as first chunk
|
|
26
|
+
cbs.onChunk(chunkIndex++);
|
|
27
|
+
|
|
28
|
+
// <result>
|
|
29
|
+
yield SUCCESS_BUFFER;
|
|
30
|
+
|
|
31
|
+
// <context-bytes> - from altair
|
|
32
|
+
const contextBytes = getContextBytes(protocol.contextBytes, chunk);
|
|
33
|
+
if (contextBytes) {
|
|
34
|
+
yield contextBytes as Buffer;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// <encoding-dependent-header> | <encoded-payload>
|
|
38
|
+
yield* writeEncodedPayload(chunk.data, protocol.encoding);
|
|
39
|
+
}
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Yields byte chunks for a `<response_chunk>` with a non-zero response code `<result>`
|
|
45
|
+
* denoted as `<error_response>`
|
|
46
|
+
* ```bnf
|
|
47
|
+
* error_response ::= <result> | <error_message>?
|
|
48
|
+
* result ::= "1" | "2" | ["128" ... "255"]
|
|
49
|
+
* ```
|
|
50
|
+
* Only the last `<response_chunk>` is allowed to have a non-zero error code, so this
|
|
51
|
+
* fn yields exactly one `<error_response>` and afterwards the stream must be terminated
|
|
52
|
+
*/
|
|
53
|
+
export async function* responseEncodeError(
|
|
54
|
+
protocol: Pick<MixedProtocol, "encoding">,
|
|
55
|
+
status: RpcResponseStatusError,
|
|
56
|
+
errorMessage: string
|
|
57
|
+
): AsyncGenerator<Buffer> {
|
|
58
|
+
// <result>
|
|
59
|
+
yield Buffer.from([status]);
|
|
60
|
+
|
|
61
|
+
// <error_message>? is optional
|
|
62
|
+
if (errorMessage) {
|
|
63
|
+
yield* encodeErrorMessage(errorMessage, protocol.encoding);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Yields byte chunks for a `<context-bytes>`. See `ContextBytesType` for possible types.
|
|
69
|
+
* This item is mandatory but may be empty.
|
|
70
|
+
*/
|
|
71
|
+
function getContextBytes(contextBytes: ContextBytesFactory, chunk: ResponseOutgoing): Uint8Array | null {
|
|
72
|
+
switch (contextBytes.type) {
|
|
73
|
+
// Yield nothing
|
|
74
|
+
case ContextBytesType.Empty:
|
|
75
|
+
return null;
|
|
76
|
+
|
|
77
|
+
// Yield a fixed-width 4 byte chunk, set to the `ForkDigest`
|
|
78
|
+
case ContextBytesType.ForkDigest:
|
|
79
|
+
return contextBytes.config.forkBoundary2ForkDigest(chunk.boundary);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import {Encoding, TypeSizes} from "../types.js";
|
|
2
|
+
import {BufferedSource} from "../utils/index.js";
|
|
3
|
+
import {readSszSnappyPayload} from "./sszSnappy/decode.js";
|
|
4
|
+
import {writeSszSnappyPayload} from "./sszSnappy/encode.js";
|
|
5
|
+
|
|
6
|
+
// For more info about Ethereum Consensus request/response encoding strategies, see:
|
|
7
|
+
// https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#encoding-strategies
|
|
8
|
+
// Supported encoding strategies:
|
|
9
|
+
// - ssz_snappy
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Consumes a stream source to read encoded header and payload as defined in the spec:
|
|
13
|
+
* ```
|
|
14
|
+
* <encoding-dependent-header> | <encoded-payload>
|
|
15
|
+
* ```
|
|
16
|
+
*/
|
|
17
|
+
export async function readEncodedPayload(
|
|
18
|
+
bufferedSource: BufferedSource,
|
|
19
|
+
encoding: Encoding,
|
|
20
|
+
type: TypeSizes
|
|
21
|
+
): Promise<Uint8Array> {
|
|
22
|
+
switch (encoding) {
|
|
23
|
+
case Encoding.SSZ_SNAPPY:
|
|
24
|
+
return readSszSnappyPayload(bufferedSource, type);
|
|
25
|
+
|
|
26
|
+
default:
|
|
27
|
+
throw Error("Unsupported encoding");
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Yields byte chunks for encoded header and payload as defined in the spec:
|
|
33
|
+
* ```
|
|
34
|
+
* <encoding-dependent-header> | <encoded-payload>
|
|
35
|
+
* ```
|
|
36
|
+
*/
|
|
37
|
+
export async function* writeEncodedPayload(chunkData: Uint8Array, encoding: Encoding): AsyncGenerator<Buffer> {
|
|
38
|
+
switch (encoding) {
|
|
39
|
+
case Encoding.SSZ_SNAPPY:
|
|
40
|
+
yield* writeSszSnappyPayload(chunkData);
|
|
41
|
+
break;
|
|
42
|
+
|
|
43
|
+
default:
|
|
44
|
+
throw Error("Unsupported encoding");
|
|
45
|
+
}
|
|
46
|
+
}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import {decode as varintDecode, encodingLength as varintEncodingLength} from "uint8-varint";
|
|
2
|
+
import {Uint8ArrayList} from "uint8arraylist";
|
|
3
|
+
import {TypeSizes} from "../../types.js";
|
|
4
|
+
import {BufferedSource} from "../../utils/index.js";
|
|
5
|
+
import {SszSnappyError, SszSnappyErrorCode} from "./errors.js";
|
|
6
|
+
import {SnappyFramesUncompress} from "./snappyFrames/uncompress.js";
|
|
7
|
+
import {maxEncodedLen} from "./utils.js";
|
|
8
|
+
|
|
9
|
+
export const MAX_VARINT_BYTES = 10;
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* ssz_snappy encoding strategy reader.
|
|
13
|
+
* Consumes a stream source to read encoded header and payload as defined in the spec:
|
|
14
|
+
* ```bnf
|
|
15
|
+
* <encoding-dependent-header> | <encoded-payload>
|
|
16
|
+
* ```
|
|
17
|
+
*/
|
|
18
|
+
export async function readSszSnappyPayload(bufferedSource: BufferedSource, type: TypeSizes): Promise<Uint8Array> {
|
|
19
|
+
const sszDataLength = await readSszSnappyHeader(bufferedSource, type);
|
|
20
|
+
|
|
21
|
+
return readSszSnappyBody(bufferedSource, sszDataLength);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Reads `<encoding-dependent-header>` for ssz-snappy.
|
|
26
|
+
* encoding-header ::= the length of the raw SSZ bytes, encoded as an unsigned protobuf varint
|
|
27
|
+
*/
|
|
28
|
+
export async function readSszSnappyHeader(bufferedSource: BufferedSource, type: TypeSizes): Promise<number> {
|
|
29
|
+
for await (const buffer of bufferedSource) {
|
|
30
|
+
// Get next bytes if empty
|
|
31
|
+
if (buffer.length === 0) {
|
|
32
|
+
continue;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
let sszDataLength: number;
|
|
36
|
+
try {
|
|
37
|
+
sszDataLength = varintDecode(buffer.subarray());
|
|
38
|
+
} catch (_e) {
|
|
39
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.INVALID_VARINT_BYTES_COUNT, bytes: Infinity});
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// MUST validate: the unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes
|
|
43
|
+
// encodingLength function only returns 1-8 inclusive
|
|
44
|
+
const varintBytes = varintEncodingLength(sszDataLength);
|
|
45
|
+
buffer.consume(varintBytes);
|
|
46
|
+
|
|
47
|
+
// MUST validate: the length-prefix is within the expected size bounds derived from the payload SSZ type.
|
|
48
|
+
const minSize = type.minSize;
|
|
49
|
+
const maxSize = type.maxSize;
|
|
50
|
+
if (sszDataLength < minSize) {
|
|
51
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.UNDER_SSZ_MIN_SIZE, minSize, sszDataLength});
|
|
52
|
+
}
|
|
53
|
+
if (sszDataLength > maxSize) {
|
|
54
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.OVER_SSZ_MAX_SIZE, maxSize, sszDataLength});
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return sszDataLength;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.SOURCE_ABORTED});
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Reads `<encoded-payload>` for ssz-snappy and decompress.
|
|
65
|
+
* The returned bytes can be SSZ deseralized
|
|
66
|
+
*/
|
|
67
|
+
export async function readSszSnappyBody(bufferedSource: BufferedSource, sszDataLength: number): Promise<Uint8Array> {
|
|
68
|
+
const decompressor = new SnappyFramesUncompress();
|
|
69
|
+
const uncompressedData = new Uint8ArrayList();
|
|
70
|
+
let readBytes = 0;
|
|
71
|
+
|
|
72
|
+
for await (const buffer of bufferedSource) {
|
|
73
|
+
// SHOULD NOT read more than max_encoded_len(n) bytes after reading the SSZ length-prefix n from the header
|
|
74
|
+
readBytes += buffer.length;
|
|
75
|
+
if (readBytes > maxEncodedLen(sszDataLength)) {
|
|
76
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.TOO_MUCH_BYTES_READ, readBytes, sszDataLength});
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// No bytes left to consume, get next
|
|
80
|
+
if (buffer.length === 0) {
|
|
81
|
+
continue;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// stream contents can be passed through a buffered Snappy reader to decompress frame by frame
|
|
85
|
+
try {
|
|
86
|
+
const uncompressed = decompressor.uncompress(buffer);
|
|
87
|
+
buffer.consume(buffer.length);
|
|
88
|
+
if (uncompressed !== null) {
|
|
89
|
+
uncompressedData.append(uncompressed);
|
|
90
|
+
}
|
|
91
|
+
} catch (e) {
|
|
92
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.DECOMPRESSOR_ERROR, decompressorError: e as Error});
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// SHOULD consider invalid reading more bytes than `n` SSZ bytes
|
|
96
|
+
if (uncompressedData.length > sszDataLength) {
|
|
97
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.TOO_MANY_BYTES, sszDataLength});
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Keep reading chunks until `n` SSZ bytes
|
|
101
|
+
if (uncompressedData.length < sszDataLength) {
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// buffer.length === n
|
|
106
|
+
return uncompressedData.subarray(0, sszDataLength);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// SHOULD consider invalid: An early EOF before fully reading the declared length-prefix worth of SSZ bytes
|
|
110
|
+
throw new SszSnappyError({code: SszSnappyErrorCode.SOURCE_ABORTED});
|
|
111
|
+
}
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import {encode as varintEncode} from "uint8-varint";
|
|
2
|
+
import {encodeSnappy} from "./snappyFrames/compress.js";
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* ssz_snappy encoding strategy writer.
|
|
6
|
+
* Yields byte chunks for encoded header and payload as defined in the spec:
|
|
7
|
+
* ```
|
|
8
|
+
* <encoding-dependent-header> | <encoded-payload>
|
|
9
|
+
* ```
|
|
10
|
+
*/
|
|
11
|
+
export const writeSszSnappyPayload = encodeSszSnappy as (bytes: Uint8Array) => AsyncGenerator<Buffer>;
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Buffered Snappy writer
|
|
15
|
+
*/
|
|
16
|
+
export async function* encodeSszSnappy(bytes: Buffer): AsyncGenerator<Buffer> {
|
|
17
|
+
// MUST encode the length of the raw SSZ bytes, encoded as an unsigned protobuf varint
|
|
18
|
+
const varint = varintEncode(bytes.length);
|
|
19
|
+
yield Buffer.from(varint.buffer, varint.byteOffset, varint.byteLength);
|
|
20
|
+
|
|
21
|
+
// By first computing and writing the SSZ byte length, the SSZ encoder can then directly
|
|
22
|
+
// write the chunk contents to the stream. Snappy writer compresses frame by frame
|
|
23
|
+
yield* encodeSnappy(bytes);
|
|
24
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import {LodestarError} from "@lodestar/utils";
|
|
2
|
+
|
|
3
|
+
export enum SszSnappyErrorCode {
|
|
4
|
+
/** Invalid number of bytes for protobuf varint */
|
|
5
|
+
INVALID_VARINT_BYTES_COUNT = "SSZ_SNAPPY_ERROR_INVALID_VARINT_BYTES_COUNT",
|
|
6
|
+
/** Parsed sszDataLength is under the SSZ type min size */
|
|
7
|
+
UNDER_SSZ_MIN_SIZE = "SSZ_SNAPPY_ERROR_UNDER_SSZ_MIN_SIZE",
|
|
8
|
+
/** Parsed sszDataLength is over the SSZ type max size */
|
|
9
|
+
OVER_SSZ_MAX_SIZE = "SSZ_SNAPPY_ERROR_OVER_SSZ_MAX_SIZE",
|
|
10
|
+
TOO_MUCH_BYTES_READ = "SSZ_SNAPPY_ERROR_TOO_MUCH_BYTES_READ",
|
|
11
|
+
DECOMPRESSOR_ERROR = "SSZ_SNAPPY_ERROR_DECOMPRESSOR_ERROR",
|
|
12
|
+
DESERIALIZE_ERROR = "SSZ_SNAPPY_ERROR_DESERIALIZE_ERROR",
|
|
13
|
+
SERIALIZE_ERROR = "SSZ_SNAPPY_ERROR_SERIALIZE_ERROR",
|
|
14
|
+
/** Received more bytes than specified sszDataLength */
|
|
15
|
+
TOO_MANY_BYTES = "SSZ_SNAPPY_ERROR_TOO_MANY_BYTES",
|
|
16
|
+
/** Source aborted before reading sszDataLength bytes */
|
|
17
|
+
SOURCE_ABORTED = "SSZ_SNAPPY_ERROR_SOURCE_ABORTED",
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
type SszSnappyErrorType =
|
|
21
|
+
| {code: SszSnappyErrorCode.INVALID_VARINT_BYTES_COUNT; bytes: number}
|
|
22
|
+
| {code: SszSnappyErrorCode.UNDER_SSZ_MIN_SIZE; minSize: number; sszDataLength: number}
|
|
23
|
+
| {code: SszSnappyErrorCode.OVER_SSZ_MAX_SIZE; maxSize: number; sszDataLength: number}
|
|
24
|
+
| {code: SszSnappyErrorCode.TOO_MUCH_BYTES_READ; readBytes: number; sszDataLength: number}
|
|
25
|
+
| {code: SszSnappyErrorCode.DECOMPRESSOR_ERROR; decompressorError: Error}
|
|
26
|
+
| {code: SszSnappyErrorCode.DESERIALIZE_ERROR; deserializeError: Error}
|
|
27
|
+
| {code: SszSnappyErrorCode.SERIALIZE_ERROR; serializeError: Error}
|
|
28
|
+
| {code: SszSnappyErrorCode.TOO_MANY_BYTES; sszDataLength: number}
|
|
29
|
+
| {code: SszSnappyErrorCode.SOURCE_ABORTED};
|
|
30
|
+
|
|
31
|
+
export class SszSnappyError extends LodestarError<SszSnappyErrorType> {}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import crc32c from "@chainsafe/fast-crc32c";
|
|
2
|
+
|
|
3
|
+
export enum ChunkType {
|
|
4
|
+
IDENTIFIER = 0xff,
|
|
5
|
+
COMPRESSED = 0x00,
|
|
6
|
+
UNCOMPRESSED = 0x01,
|
|
7
|
+
PADDING = 0xfe,
|
|
8
|
+
SKIPPABLE = 0x80,
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export const IDENTIFIER = Buffer.from([0x73, 0x4e, 0x61, 0x50, 0x70, 0x59]);
|
|
12
|
+
export const IDENTIFIER_FRAME = Buffer.from([0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59]);
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* As per the snappy framing format for streams, the size of any uncompressed chunk can be
|
|
16
|
+
* no longer than 65536 bytes.
|
|
17
|
+
*
|
|
18
|
+
* From: https://github.com/google/snappy/blob/main/framing_format.txt#L90:L92
|
|
19
|
+
*/
|
|
20
|
+
export const UNCOMPRESSED_CHUNK_SIZE = 65536;
|
|
21
|
+
|
|
22
|
+
export function crc(value: Uint8Array): Buffer {
|
|
23
|
+
const x = crc32c.calculate(value);
|
|
24
|
+
const result = Buffer.allocUnsafe?.(4) ?? Buffer.alloc(4);
|
|
25
|
+
|
|
26
|
+
// As defined in section 3 of https://github.com/google/snappy/blob/master/framing_format.txt
|
|
27
|
+
// And other implementations for reference:
|
|
28
|
+
// Go: https://github.com/golang/snappy/blob/2e65f85255dbc3072edf28d6b5b8efc472979f5a/snappy.go#L97
|
|
29
|
+
// Python: https://github.com/andrix/python-snappy/blob/602e9c10d743f71bef0bac5e4c4dffa17340d7b3/snappy/snappy.py#L70
|
|
30
|
+
// Mask the right hand to (32 - 17) = 15 bits -> 0x7fff, to keep correct 32 bit values.
|
|
31
|
+
// Shift the left hand with >>> for correct 32 bit intermediate result.
|
|
32
|
+
// Then final >>> 0 for 32 bits output
|
|
33
|
+
result.writeUInt32LE((((x >>> 15) | ((x & 0x7fff) << 17)) + 0xa282ead8) >>> 0, 0);
|
|
34
|
+
|
|
35
|
+
return result;
|
|
36
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import snappy from "snappy";
|
|
2
|
+
import {ChunkType, IDENTIFIER_FRAME, UNCOMPRESSED_CHUNK_SIZE, crc} from "./common.js";
|
|
3
|
+
|
|
4
|
+
// The logic in this file is largely copied (in simplified form) from https://github.com/ChainSafe/node-snappy-stream/
|
|
5
|
+
|
|
6
|
+
export async function* encodeSnappy(bytes: Buffer): AsyncGenerator<Buffer> {
|
|
7
|
+
yield IDENTIFIER_FRAME;
|
|
8
|
+
|
|
9
|
+
for (let i = 0; i < bytes.length; i += UNCOMPRESSED_CHUNK_SIZE) {
|
|
10
|
+
const chunk = bytes.subarray(i, i + UNCOMPRESSED_CHUNK_SIZE);
|
|
11
|
+
const compressed = snappy.compressSync(chunk);
|
|
12
|
+
if (compressed.length < chunk.length) {
|
|
13
|
+
const size = compressed.length + 4;
|
|
14
|
+
yield Buffer.concat([Buffer.from([ChunkType.COMPRESSED, size, size >> 8, size >> 16]), crc(chunk), compressed]);
|
|
15
|
+
} else {
|
|
16
|
+
const size = chunk.length + 4;
|
|
17
|
+
yield Buffer.concat([
|
|
18
|
+
//
|
|
19
|
+
Buffer.from([ChunkType.UNCOMPRESSED, size, size >> 8, size >> 16]),
|
|
20
|
+
crc(chunk),
|
|
21
|
+
chunk,
|
|
22
|
+
]);
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
}
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import {uncompress} from "snappyjs";
|
|
2
|
+
import {Uint8ArrayList} from "uint8arraylist";
|
|
3
|
+
import {ChunkType, IDENTIFIER, UNCOMPRESSED_CHUNK_SIZE, crc} from "./common.js";
|
|
4
|
+
|
|
5
|
+
export class SnappyFramesUncompress {
|
|
6
|
+
private buffer = new Uint8ArrayList();
|
|
7
|
+
|
|
8
|
+
private state: UncompressState = {
|
|
9
|
+
foundIdentifier: false,
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Accepts chunk of data containing some part of snappy frames stream
|
|
14
|
+
* @param chunk
|
|
15
|
+
* @return Buffer if there is one or more whole frames, null if it's partial
|
|
16
|
+
*/
|
|
17
|
+
uncompress(chunk: Uint8ArrayList): Uint8ArrayList | null {
|
|
18
|
+
this.buffer.append(chunk);
|
|
19
|
+
const result = new Uint8ArrayList();
|
|
20
|
+
while (this.buffer.length > 0) {
|
|
21
|
+
if (this.buffer.length < 4) break;
|
|
22
|
+
|
|
23
|
+
const type = getChunkType(this.buffer.get(0));
|
|
24
|
+
|
|
25
|
+
if (!this.state.foundIdentifier && type !== ChunkType.IDENTIFIER) {
|
|
26
|
+
throw "malformed input: must begin with an identifier";
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const frameSize = getFrameSize(this.buffer, 1);
|
|
30
|
+
|
|
31
|
+
if (this.buffer.length - 4 < frameSize) {
|
|
32
|
+
break;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const frame = this.buffer.subarray(4, 4 + frameSize);
|
|
36
|
+
this.buffer.consume(4 + frameSize);
|
|
37
|
+
|
|
38
|
+
switch (type) {
|
|
39
|
+
case ChunkType.IDENTIFIER: {
|
|
40
|
+
if (!Buffer.prototype.equals.call(frame, IDENTIFIER)) {
|
|
41
|
+
throw "malformed input: bad identifier";
|
|
42
|
+
}
|
|
43
|
+
this.state.foundIdentifier = true;
|
|
44
|
+
continue;
|
|
45
|
+
}
|
|
46
|
+
case ChunkType.PADDING:
|
|
47
|
+
case ChunkType.SKIPPABLE:
|
|
48
|
+
continue;
|
|
49
|
+
case ChunkType.COMPRESSED: {
|
|
50
|
+
const checksum = frame.subarray(0, 4);
|
|
51
|
+
const data = frame.subarray(4);
|
|
52
|
+
|
|
53
|
+
const uncompressed = uncompress(data, UNCOMPRESSED_CHUNK_SIZE);
|
|
54
|
+
if (crc(uncompressed).compare(checksum) !== 0) {
|
|
55
|
+
throw "malformed input: bad checksum";
|
|
56
|
+
}
|
|
57
|
+
result.append(uncompressed);
|
|
58
|
+
break;
|
|
59
|
+
}
|
|
60
|
+
case ChunkType.UNCOMPRESSED: {
|
|
61
|
+
const checksum = frame.subarray(0, 4);
|
|
62
|
+
const uncompressed = frame.subarray(4);
|
|
63
|
+
|
|
64
|
+
if (uncompressed.length > UNCOMPRESSED_CHUNK_SIZE) {
|
|
65
|
+
throw "malformed input: too large";
|
|
66
|
+
}
|
|
67
|
+
if (crc(uncompressed).compare(checksum) !== 0) {
|
|
68
|
+
throw "malformed input: bad checksum";
|
|
69
|
+
}
|
|
70
|
+
result.append(uncompressed);
|
|
71
|
+
break;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
if (result.length === 0) {
|
|
76
|
+
return null;
|
|
77
|
+
}
|
|
78
|
+
return result;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
reset(): void {
|
|
82
|
+
this.buffer = new Uint8ArrayList();
|
|
83
|
+
this.state = {
|
|
84
|
+
foundIdentifier: false,
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
type UncompressState = {
|
|
90
|
+
foundIdentifier: boolean;
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
function getFrameSize(buffer: Uint8ArrayList, offset: number): number {
|
|
94
|
+
return buffer.get(offset) + (buffer.get(offset + 1) << 8) + (buffer.get(offset + 2) << 16);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
function getChunkType(value: number): ChunkType {
|
|
98
|
+
switch (value) {
|
|
99
|
+
case ChunkType.IDENTIFIER:
|
|
100
|
+
return ChunkType.IDENTIFIER;
|
|
101
|
+
case ChunkType.COMPRESSED:
|
|
102
|
+
return ChunkType.COMPRESSED;
|
|
103
|
+
case ChunkType.UNCOMPRESSED:
|
|
104
|
+
return ChunkType.UNCOMPRESSED;
|
|
105
|
+
case ChunkType.PADDING:
|
|
106
|
+
return ChunkType.PADDING;
|
|
107
|
+
default:
|
|
108
|
+
// https://github.com/google/snappy/blob/main/framing_format.txt#L129
|
|
109
|
+
if (value >= 0x80 && value <= 0xfd) {
|
|
110
|
+
return ChunkType.SKIPPABLE;
|
|
111
|
+
}
|
|
112
|
+
throw new Error("Unsupported snappy chunk type");
|
|
113
|
+
}
|
|
114
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export * from "./interface.js";
|
|
2
|
+
export type {Metrics} from "./metrics.js";
|
|
3
|
+
export {getMetrics} from "./metrics.js";
|
|
4
|
+
export type {ReqRespOpts} from "./ReqResp.js";
|
|
5
|
+
export {ReqResp} from "./ReqResp.js";
|
|
6
|
+
export * from "./request/errors.js";
|
|
7
|
+
export * from "./response/errors.js";
|
|
8
|
+
export * from "./types.js";
|
|
9
|
+
export {Encoding as ReqRespEncoding} from "./types.js"; // Expose enums renamed
|
|
10
|
+
export {collectExactOne, collectMaxResponse, formatProtocolID, parseProtocolID} from "./utils/index.js";
|