faster-eth-abi 5.2.10__cp313-cp313-musllinux_1_2_x86_64.whl → 5.2.20__cp313-cp313-musllinux_1_2_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of faster-eth-abi might be problematic. Click here for more details.
- faster_eth_abi/_codec.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/_codec.py +7 -5
- faster_eth_abi/_decoding.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/_decoding.py +162 -18
- faster_eth_abi/_encoding.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/_encoding.py +147 -6
- faster_eth_abi/_grammar.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/_grammar.py +375 -0
- faster_eth_abi/abi.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/base.py +5 -1
- faster_eth_abi/codec.py +2675 -9
- faster_eth_abi/constants.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/decoding.py +190 -136
- faster_eth_abi/encoding.py +112 -36
- faster_eth_abi/exceptions.py +26 -14
- faster_eth_abi/from_type_str.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/from_type_str.py +7 -1
- faster_eth_abi/grammar.py +30 -326
- faster_eth_abi/io.py +5 -1
- faster_eth_abi/packed.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/packed.py +4 -0
- faster_eth_abi/registry.py +186 -91
- faster_eth_abi/tools/__init__.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/tools/_strategies.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/tools/_strategies.py +12 -6
- faster_eth_abi/typing.py +4627 -0
- faster_eth_abi/utils/__init__.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/utils/numeric.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/utils/padding.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/utils/string.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi/utils/validation.cpython-313-x86_64-linux-musl.so +0 -0
- {faster_eth_abi-5.2.10.dist-info → faster_eth_abi-5.2.20.dist-info}/METADATA +38 -14
- faster_eth_abi-5.2.20.dist-info/RECORD +46 -0
- faster_eth_abi-5.2.20.dist-info/top_level.txt +2 -0
- faster_eth_abi__mypyc.cpython-313-x86_64-linux-musl.so +0 -0
- 76f9a3652d4d2667c55c__mypyc.cpython-313-x86_64-linux-musl.so +0 -0
- faster_eth_abi-5.2.10.dist-info/RECORD +0 -44
- faster_eth_abi-5.2.10.dist-info/licenses/LICENSE +0 -21
- faster_eth_abi-5.2.10.dist-info/top_level.txt +0 -2
- {faster_eth_abi-5.2.10.dist-info → faster_eth_abi-5.2.20.dist-info}/WHEEL +0 -0
|
Binary file
|
faster_eth_abi/_codec.py
CHANGED
|
@@ -1,3 +1,9 @@
|
|
|
1
|
+
"""Internal codec helpers for encoding and decoding sequences of values using the head-tail mechanism.
|
|
2
|
+
|
|
3
|
+
Provides encode_c and decode_c functions for binary serialization and deserialization of values
|
|
4
|
+
according to ABI type specifications.
|
|
5
|
+
"""
|
|
6
|
+
|
|
1
7
|
from typing import (
|
|
2
8
|
TYPE_CHECKING,
|
|
3
9
|
Any,
|
|
@@ -39,13 +45,11 @@ def encode_c(
|
|
|
39
45
|
:returns: The head-tail encoded binary representation of the python
|
|
40
46
|
values in ``args`` as values of the ABI types in ``types``.
|
|
41
47
|
"""
|
|
42
|
-
# validate encode types and args
|
|
43
|
-
validate_list_like_param(types, "types")
|
|
44
48
|
validate_list_like_param(args, "args")
|
|
45
49
|
|
|
46
50
|
encoder = self._registry.get_tuple_encoder(*types)
|
|
47
51
|
|
|
48
|
-
return encoder(args)
|
|
52
|
+
return encoder.encode(args)
|
|
49
53
|
|
|
50
54
|
|
|
51
55
|
def decode_c(
|
|
@@ -71,8 +75,6 @@ def decode_c(
|
|
|
71
75
|
:returns: A tuple of equivalent python values for the ABI values
|
|
72
76
|
represented in ``data``.
|
|
73
77
|
"""
|
|
74
|
-
# validate decode types and data
|
|
75
|
-
validate_list_like_param(types, "types")
|
|
76
78
|
validate_bytes_param(data, "data")
|
|
77
79
|
|
|
78
80
|
decoder = self._registry.get_tuple_decoder(*types, strict=strict)
|
|
Binary file
|
faster_eth_abi/_decoding.py
CHANGED
|
@@ -1,6 +1,13 @@
|
|
|
1
|
+
"""Private helpers for decoding logic, intended for C compilation.
|
|
2
|
+
|
|
3
|
+
This file exists because the original decoding.py is not ready to be fully compiled to C.
|
|
4
|
+
This module contains functions and logic that we wish to compile.
|
|
5
|
+
"""
|
|
1
6
|
from typing import (
|
|
2
7
|
TYPE_CHECKING,
|
|
3
8
|
Any,
|
|
9
|
+
Dict,
|
|
10
|
+
Final,
|
|
4
11
|
Tuple,
|
|
5
12
|
)
|
|
6
13
|
|
|
@@ -10,18 +17,24 @@ from faster_eth_utils import (
|
|
|
10
17
|
|
|
11
18
|
from faster_eth_abi.exceptions import (
|
|
12
19
|
InsufficientDataBytes,
|
|
20
|
+
InvalidPointer,
|
|
13
21
|
NonEmptyPaddingBytes,
|
|
14
22
|
)
|
|
15
23
|
from faster_eth_abi.io import (
|
|
16
24
|
BytesIO,
|
|
17
25
|
ContextFramesBytesIO,
|
|
18
26
|
)
|
|
27
|
+
from faster_eth_abi.typing import (
|
|
28
|
+
T,
|
|
29
|
+
)
|
|
19
30
|
|
|
20
31
|
if TYPE_CHECKING:
|
|
21
32
|
from .decoding import (
|
|
33
|
+
BaseArrayDecoder,
|
|
22
34
|
DynamicArrayDecoder,
|
|
23
35
|
FixedByteSizeDecoder,
|
|
24
36
|
HeadTailDecoder,
|
|
37
|
+
SignedIntegerDecoder,
|
|
25
38
|
SizedArrayDecoder,
|
|
26
39
|
TupleDecoder,
|
|
27
40
|
)
|
|
@@ -30,14 +43,14 @@ if TYPE_CHECKING:
|
|
|
30
43
|
# Helpers
|
|
31
44
|
def decode_uint_256(stream: ContextFramesBytesIO) -> int:
|
|
32
45
|
"""
|
|
33
|
-
|
|
46
|
+
A faster version of :func:`~decoding.decode_uint_256` in decoding.py.
|
|
34
47
|
|
|
35
48
|
It recreates the logic from the UnsignedIntegerDecoder, but we can
|
|
36
49
|
skip a lot because we know the value of many vars.
|
|
37
50
|
"""
|
|
38
51
|
# read data from stream
|
|
39
52
|
if len(data := stream.read(32)) == 32:
|
|
40
|
-
return big_endian_to_int(data)
|
|
53
|
+
return big_endian_to_int(data)
|
|
41
54
|
raise InsufficientDataBytes(f"Tried to read 32 bytes, only got {len(data)} bytes.")
|
|
42
55
|
|
|
43
56
|
|
|
@@ -46,7 +59,7 @@ def get_value_byte_size(decoder: "FixedByteSizeDecoder") -> int:
|
|
|
46
59
|
|
|
47
60
|
|
|
48
61
|
# HeadTailDecoder
|
|
49
|
-
def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) ->
|
|
62
|
+
def decode_head_tail(self: "HeadTailDecoder[T]", stream: ContextFramesBytesIO) -> T:
|
|
50
63
|
# Decode the offset and move the stream cursor forward 32 bytes
|
|
51
64
|
start_pos = decode_uint_256(stream)
|
|
52
65
|
# Jump ahead to the start of the value
|
|
@@ -57,7 +70,7 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
|
|
|
57
70
|
if tail_decoder is None:
|
|
58
71
|
raise AssertionError("`tail_decoder` is None")
|
|
59
72
|
# Decode the value
|
|
60
|
-
value = tail_decoder(stream)
|
|
73
|
+
value: T = tail_decoder(stream)
|
|
61
74
|
# Return the cursor
|
|
62
75
|
stream.pop_frame()
|
|
63
76
|
|
|
@@ -65,15 +78,117 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
|
|
|
65
78
|
|
|
66
79
|
|
|
67
80
|
# TupleDecoder
|
|
68
|
-
def decode_tuple(
|
|
69
|
-
self
|
|
70
|
-
|
|
81
|
+
def decode_tuple(
|
|
82
|
+
self: "TupleDecoder[T]", stream: ContextFramesBytesIO
|
|
83
|
+
) -> Tuple[T, ...]:
|
|
84
|
+
# NOTE: the original implementation would do this but it's
|
|
85
|
+
# kinda wasteful, so we rebuilt the logic within this function
|
|
86
|
+
# validate_pointers_tuple(self, stream)
|
|
87
|
+
|
|
88
|
+
current_location = stream.tell()
|
|
89
|
+
if self._no_head_tail:
|
|
90
|
+
# TODO: if all(isinstance(d, TupleDecoder) for d in self._decoders)
|
|
91
|
+
# return tuple(decode_tuple(stream) for _ in range(len(self.decoders))
|
|
92
|
+
# and other types with compiled decode funcs
|
|
93
|
+
return tuple(decoder(stream) for decoder in self.decoders)
|
|
94
|
+
|
|
95
|
+
end_of_offsets = current_location + 32 * self.len_of_head
|
|
96
|
+
total_stream_length = len(stream.getbuffer())
|
|
97
|
+
items = []
|
|
98
|
+
for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
|
|
99
|
+
if is_head_tail:
|
|
100
|
+
# the next 32 bytes are a pointer that we should validate
|
|
101
|
+
# checkpoint the stream location so we can reset it after validation
|
|
102
|
+
step_location = stream.tell()
|
|
103
|
+
|
|
104
|
+
offset = decode_uint_256(stream)
|
|
105
|
+
indicated_idx = current_location + offset
|
|
106
|
+
if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
|
|
107
|
+
# the pointer is indicating its data is located either within the
|
|
108
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
109
|
+
# both of which are invalid
|
|
110
|
+
raise InvalidPointer(
|
|
111
|
+
"Invalid pointer in tuple at location "
|
|
112
|
+
f"{stream.tell() - 32} in payload"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# reset the stream so we can decode
|
|
116
|
+
stream.seek(step_location)
|
|
117
|
+
|
|
118
|
+
items.append(decoder(stream))
|
|
119
|
+
|
|
120
|
+
# return the stream to its original location for actual decoding
|
|
121
|
+
stream.seek(current_location)
|
|
122
|
+
|
|
123
|
+
return tuple(items)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def validate_pointers_tuple(
|
|
127
|
+
self: "TupleDecoder",
|
|
128
|
+
stream: ContextFramesBytesIO,
|
|
129
|
+
) -> None:
|
|
130
|
+
"""
|
|
131
|
+
Verify that all pointers point to a valid location in the stream.
|
|
132
|
+
"""
|
|
133
|
+
current_location = stream.tell()
|
|
134
|
+
if self._no_head_tail:
|
|
135
|
+
for decoder in self.decoders:
|
|
136
|
+
decoder(stream)
|
|
137
|
+
else:
|
|
138
|
+
end_of_offsets = current_location + 32 * self.len_of_head
|
|
139
|
+
total_stream_length = len(stream.getbuffer())
|
|
140
|
+
for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
|
|
141
|
+
if not is_head_tail:
|
|
142
|
+
# the next 32 bytes are not a pointer,
|
|
143
|
+
# so progress the stream per the decoder
|
|
144
|
+
decoder(stream)
|
|
145
|
+
else:
|
|
146
|
+
# the next 32 bytes are a pointer
|
|
147
|
+
offset = decode_uint_256(stream)
|
|
148
|
+
indicated_idx = current_location + offset
|
|
149
|
+
if (
|
|
150
|
+
indicated_idx < end_of_offsets
|
|
151
|
+
or indicated_idx >= total_stream_length
|
|
152
|
+
):
|
|
153
|
+
# the pointer is indicating its data is located either within the
|
|
154
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
155
|
+
# both of which are invalid
|
|
156
|
+
raise InvalidPointer(
|
|
157
|
+
"Invalid pointer in tuple at location "
|
|
158
|
+
f"{stream.tell() - 32} in payload"
|
|
159
|
+
)
|
|
160
|
+
# return the stream to its original location for actual decoding
|
|
161
|
+
stream.seek(current_location)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
# BaseArrayDecoder
|
|
165
|
+
def validate_pointers_array(
|
|
166
|
+
self: "BaseArrayDecoder", stream: ContextFramesBytesIO, array_size: int
|
|
167
|
+
) -> None:
|
|
168
|
+
"""
|
|
169
|
+
Verify that all pointers point to a valid location in the stream.
|
|
170
|
+
"""
|
|
171
|
+
current_location = stream.tell()
|
|
172
|
+
end_of_offsets = current_location + 32 * array_size
|
|
173
|
+
total_stream_length = len(stream.getbuffer())
|
|
174
|
+
for _ in range(array_size):
|
|
175
|
+
offset = decode_uint_256(stream)
|
|
176
|
+
indicated_idx = current_location + offset
|
|
177
|
+
if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
|
|
178
|
+
# the pointer is indicating its data is located either within the
|
|
179
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
180
|
+
# both of which are invalid
|
|
181
|
+
raise InvalidPointer(
|
|
182
|
+
"Invalid pointer in array at location "
|
|
183
|
+
f"{stream.tell() - 32} in payload"
|
|
184
|
+
)
|
|
185
|
+
stream.seek(current_location)
|
|
71
186
|
|
|
72
187
|
|
|
73
188
|
# SizedArrayDecoder
|
|
74
189
|
def decode_sized_array(
|
|
75
|
-
self: "SizedArrayDecoder", stream: ContextFramesBytesIO
|
|
76
|
-
) -> Tuple[
|
|
190
|
+
self: "SizedArrayDecoder[T]", stream: ContextFramesBytesIO
|
|
191
|
+
) -> Tuple[T, ...]:
|
|
77
192
|
item_decoder = self.item_decoder
|
|
78
193
|
if item_decoder is None:
|
|
79
194
|
raise AssertionError("`item_decoder` is None")
|
|
@@ -85,8 +200,8 @@ def decode_sized_array(
|
|
|
85
200
|
|
|
86
201
|
# DynamicArrayDecoder
|
|
87
202
|
def decode_dynamic_array(
|
|
88
|
-
self: "DynamicArrayDecoder", stream: ContextFramesBytesIO
|
|
89
|
-
) -> Tuple[
|
|
203
|
+
self: "DynamicArrayDecoder[T]", stream: ContextFramesBytesIO
|
|
204
|
+
) -> Tuple[T, ...]:
|
|
90
205
|
array_size = decode_uint_256(stream)
|
|
91
206
|
stream.push_frame(32)
|
|
92
207
|
if self.item_decoder is None:
|
|
@@ -102,7 +217,7 @@ def decode_dynamic_array(
|
|
|
102
217
|
|
|
103
218
|
# FixedByteSizeDecoder
|
|
104
219
|
def read_fixed_byte_size_data_from_stream(
|
|
105
|
-
self: "FixedByteSizeDecoder",
|
|
220
|
+
self: "FixedByteSizeDecoder[Any]",
|
|
106
221
|
# NOTE: use BytesIO here so mypyc doesn't type-check
|
|
107
222
|
# `stream` once we compile ContextFramesBytesIO.
|
|
108
223
|
stream: BytesIO,
|
|
@@ -116,7 +231,7 @@ def read_fixed_byte_size_data_from_stream(
|
|
|
116
231
|
|
|
117
232
|
|
|
118
233
|
def split_data_and_padding_fixed_byte_size(
|
|
119
|
-
self: "FixedByteSizeDecoder",
|
|
234
|
+
self: "FixedByteSizeDecoder[Any]",
|
|
120
235
|
raw_data: bytes,
|
|
121
236
|
) -> Tuple[bytes, bytes]:
|
|
122
237
|
value_byte_size = get_value_byte_size(self)
|
|
@@ -135,14 +250,43 @@ def split_data_and_padding_fixed_byte_size(
|
|
|
135
250
|
|
|
136
251
|
|
|
137
252
|
def validate_padding_bytes_fixed_byte_size(
|
|
138
|
-
self: "FixedByteSizeDecoder",
|
|
139
|
-
value:
|
|
253
|
+
self: "FixedByteSizeDecoder[T]",
|
|
254
|
+
value: T,
|
|
140
255
|
padding_bytes: bytes,
|
|
141
256
|
) -> None:
|
|
142
|
-
|
|
143
|
-
|
|
257
|
+
if padding_bytes != get_expected_padding_bytes(self, b"\x00"):
|
|
258
|
+
raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
_expected_padding_bytes_cache: Final[
|
|
262
|
+
Dict["FixedByteSizeDecoder[Any]", Dict[bytes, bytes]]
|
|
263
|
+
] = {}
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def get_expected_padding_bytes(
|
|
267
|
+
self: "FixedByteSizeDecoder[Any]", chunk: bytes
|
|
268
|
+
) -> bytes:
|
|
269
|
+
instance_cache = _expected_padding_bytes_cache.setdefault(self, {})
|
|
270
|
+
expected_padding_bytes = instance_cache.get(chunk)
|
|
271
|
+
if expected_padding_bytes is None:
|
|
272
|
+
value_byte_size = get_value_byte_size(self)
|
|
273
|
+
padding_size = self.data_byte_size - value_byte_size
|
|
274
|
+
expected_padding_bytes = chunk * padding_size
|
|
275
|
+
instance_cache[chunk] = expected_padding_bytes
|
|
276
|
+
return expected_padding_bytes
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def validate_padding_bytes_signed_integer(
|
|
280
|
+
self: "SignedIntegerDecoder",
|
|
281
|
+
value: int,
|
|
282
|
+
padding_bytes: bytes,
|
|
283
|
+
) -> None:
|
|
284
|
+
if value >= 0:
|
|
285
|
+
expected_padding_bytes = get_expected_padding_bytes(self, b"\x00")
|
|
286
|
+
else:
|
|
287
|
+
expected_padding_bytes = get_expected_padding_bytes(self, b"\xff")
|
|
144
288
|
|
|
145
|
-
if padding_bytes !=
|
|
289
|
+
if padding_bytes != expected_padding_bytes:
|
|
146
290
|
raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
|
|
147
291
|
|
|
148
292
|
|
|
Binary file
|
faster_eth_abi/_encoding.py
CHANGED
|
@@ -1,30 +1,65 @@
|
|
|
1
|
+
"""Private helpers for encoding logic, intended for C compilation.
|
|
2
|
+
|
|
3
|
+
This file exists because the original encoding.py is not ready to be fully compiled to C.
|
|
4
|
+
This module contains functions and logic that we do wish to compile.
|
|
5
|
+
"""
|
|
1
6
|
from typing import (
|
|
2
7
|
TYPE_CHECKING,
|
|
3
8
|
Any,
|
|
4
9
|
Callable,
|
|
10
|
+
Dict,
|
|
5
11
|
List,
|
|
6
12
|
Optional,
|
|
7
13
|
Sequence,
|
|
14
|
+
Tuple,
|
|
8
15
|
TypeVar,
|
|
9
16
|
)
|
|
10
17
|
|
|
18
|
+
from faster_eth_utils import (
|
|
19
|
+
is_list_like,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
from faster_eth_abi.exceptions import (
|
|
23
|
+
ValueOutOfBounds,
|
|
24
|
+
)
|
|
25
|
+
|
|
11
26
|
if TYPE_CHECKING:
|
|
12
27
|
from faster_eth_abi.encoding import (
|
|
13
28
|
BaseEncoder,
|
|
29
|
+
TupleEncoder,
|
|
14
30
|
)
|
|
15
31
|
|
|
16
32
|
|
|
17
33
|
T = TypeVar("T")
|
|
18
34
|
|
|
19
35
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
)
|
|
36
|
+
# TupleEncoder
|
|
37
|
+
def validate_tuple(self: "TupleEncoder", value: Sequence[Any]) -> None:
|
|
38
|
+
# if we check list and tuple first it compiles to much quicker C code
|
|
39
|
+
if not isinstance(value, (list, tuple)) and not is_list_like(value):
|
|
40
|
+
self.invalidate_value(
|
|
41
|
+
value,
|
|
42
|
+
msg="must be list-like object such as array or tuple",
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
validators = self.validators
|
|
46
|
+
if len(value) != len(validators):
|
|
47
|
+
self.invalidate_value(
|
|
48
|
+
value,
|
|
49
|
+
exc=ValueOutOfBounds,
|
|
50
|
+
msg=f"value has {len(value)} items when {len(validators)} " "were expected",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
for item, validator in zip(value, validators):
|
|
54
|
+
validator(item)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def encode_tuple(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
58
|
+
validate_tuple(self, values)
|
|
24
59
|
raw_head_chunks: List[Optional[bytes]] = []
|
|
25
60
|
tail_chunks: List[bytes] = []
|
|
26
|
-
for value, encoder in zip(values, encoders):
|
|
27
|
-
if
|
|
61
|
+
for value, encoder, is_dynamic in zip(values, self.encoders, self._is_dynamic):
|
|
62
|
+
if is_dynamic:
|
|
28
63
|
raw_head_chunks.append(None)
|
|
29
64
|
tail_chunks.append(encoder(value))
|
|
30
65
|
else:
|
|
@@ -46,6 +81,112 @@ def encode_tuple(
|
|
|
46
81
|
return b"".join(head_chunks) + b"".join(tail_chunks)
|
|
47
82
|
|
|
48
83
|
|
|
84
|
+
def encode_tuple_all_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
85
|
+
validate_tuple(self, values)
|
|
86
|
+
encoders = self.encoders
|
|
87
|
+
tail_chunks = [encoder(value) for encoder, value in zip(encoders, values)]
|
|
88
|
+
|
|
89
|
+
total_offset = 0
|
|
90
|
+
head_length = 32 * len(encoders)
|
|
91
|
+
head_chunks = [encode_uint_256(head_length)]
|
|
92
|
+
for item in tail_chunks[:-1]:
|
|
93
|
+
total_offset += len(item)
|
|
94
|
+
head_chunks.append(encode_uint_256(head_length + total_offset))
|
|
95
|
+
|
|
96
|
+
return b"".join(head_chunks) + b"".join(tail_chunks)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def encode_tuple_no_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
100
|
+
validate_tuple(self, values)
|
|
101
|
+
encoders = self.encoders
|
|
102
|
+
return b"".join(encoders[i](values[i]) for i in range(len(encoders)))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def encode_tuple_no_dynamic1(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
106
|
+
validate_tuple(self, values)
|
|
107
|
+
encoders: Tuple["BaseEncoder"] = self.encoders
|
|
108
|
+
return encoders[0](values[0])
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def encode_tuple_no_dynamic2(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
112
|
+
validate_tuple(self, values)
|
|
113
|
+
encoders = self.encoders
|
|
114
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder"] = self.encoders
|
|
115
|
+
return encoders[0](values[0]) + encoders[1](values[1])
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def encode_tuple_no_dynamic3(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
119
|
+
validate_tuple(self, values)
|
|
120
|
+
encoders = self.encoders
|
|
121
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
122
|
+
return b"".join(encoders[i](values[i]) for i in range(3))
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def encode_tuple_no_dynamic4(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
126
|
+
validate_tuple(self, values)
|
|
127
|
+
encoders = self.encoders
|
|
128
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
129
|
+
return b"".join(encoders[i](values[i]) for i in range(4))
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def encode_tuple_no_dynamic5(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
133
|
+
validate_tuple(self, values)
|
|
134
|
+
encoders = self.encoders
|
|
135
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
136
|
+
return b"".join(encoders[i](values[i]) for i in range(5))
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def encode_tuple_no_dynamic6(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
140
|
+
validate_tuple(self, values)
|
|
141
|
+
encoders = self.encoders
|
|
142
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
143
|
+
return b"".join(encoders[i](values[i]) for i in range(6))
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def encode_tuple_no_dynamic7(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
147
|
+
validate_tuple(self, values)
|
|
148
|
+
encoders = self.encoders
|
|
149
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
150
|
+
return b"".join(encoders[i](values[i]) for i in range(7))
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def encode_tuple_no_dynamic8(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
154
|
+
validate_tuple(self, values)
|
|
155
|
+
encoders = self.encoders
|
|
156
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
157
|
+
return b"".join(encoders[i](values[i]) for i in range(8))
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def encode_tuple_no_dynamic9(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
161
|
+
validate_tuple(self, values)
|
|
162
|
+
encoders = self.encoders
|
|
163
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
164
|
+
return b"".join(encoders[i](values[i]) for i in range(9))
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def encode_tuple_no_dynamic10(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
168
|
+
validate_tuple(self, values)
|
|
169
|
+
encoders = self.encoders
|
|
170
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
171
|
+
return b"".join(encoders[i](values[i]) for i in range(10))
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
encode_tuple_no_dynamic_funcs: Dict[
|
|
175
|
+
int, Callable[["TupleEncoder", Sequence[Any]], bytes]
|
|
176
|
+
] = {
|
|
177
|
+
1: encode_tuple_no_dynamic1,
|
|
178
|
+
2: encode_tuple_no_dynamic2,
|
|
179
|
+
3: encode_tuple_no_dynamic3,
|
|
180
|
+
4: encode_tuple_no_dynamic4,
|
|
181
|
+
5: encode_tuple_no_dynamic5,
|
|
182
|
+
6: encode_tuple_no_dynamic6,
|
|
183
|
+
7: encode_tuple_no_dynamic7,
|
|
184
|
+
8: encode_tuple_no_dynamic8,
|
|
185
|
+
9: encode_tuple_no_dynamic9,
|
|
186
|
+
10: encode_tuple_no_dynamic10,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
|
|
49
190
|
def encode_fixed(
|
|
50
191
|
value: Any,
|
|
51
192
|
encode_fn: Callable[[Any], bytes],
|
|
Binary file
|