faster-eth-abi 5.2.5__cp313-cp313-macosx_11_0_arm64.whl → 5.2.20__cp313-cp313-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of faster-eth-abi might be problematic. Click here for more details.
- faster_eth_abi/_codec.cpython-313-darwin.so +0 -0
- faster_eth_abi/_codec.py +8 -7
- faster_eth_abi/_decoding.cpython-313-darwin.so +0 -0
- faster_eth_abi/_decoding.py +299 -0
- faster_eth_abi/_encoding.cpython-313-darwin.so +0 -0
- faster_eth_abi/_encoding.py +163 -14
- faster_eth_abi/_grammar.cpython-313-darwin.so +0 -0
- faster_eth_abi/_grammar.py +375 -0
- faster_eth_abi/abi.cpython-313-darwin.so +0 -0
- faster_eth_abi/base.py +5 -1
- faster_eth_abi/codec.py +2675 -9
- faster_eth_abi/constants.cpython-313-darwin.so +0 -0
- faster_eth_abi/decoding.py +263 -242
- faster_eth_abi/encoding.py +175 -71
- faster_eth_abi/exceptions.py +26 -14
- faster_eth_abi/from_type_str.cpython-313-darwin.so +0 -0
- faster_eth_abi/from_type_str.py +7 -1
- faster_eth_abi/grammar.py +30 -325
- faster_eth_abi/io.py +5 -1
- faster_eth_abi/packed.cpython-313-darwin.so +0 -0
- faster_eth_abi/packed.py +4 -0
- faster_eth_abi/registry.py +186 -91
- faster_eth_abi/tools/__init__.cpython-313-darwin.so +0 -0
- faster_eth_abi/tools/_strategies.cpython-313-darwin.so +0 -0
- faster_eth_abi/tools/_strategies.py +12 -6
- faster_eth_abi/typing.py +4627 -0
- faster_eth_abi/utils/__init__.cpython-313-darwin.so +0 -0
- faster_eth_abi/utils/numeric.cpython-313-darwin.so +0 -0
- faster_eth_abi/utils/numeric.py +51 -20
- faster_eth_abi/utils/padding.cpython-313-darwin.so +0 -0
- faster_eth_abi/utils/string.cpython-313-darwin.so +0 -0
- faster_eth_abi/utils/validation.cpython-313-darwin.so +0 -0
- {faster_eth_abi-5.2.5.dist-info → faster_eth_abi-5.2.20.dist-info}/METADATA +52 -11
- faster_eth_abi-5.2.20.dist-info/RECORD +46 -0
- faster_eth_abi-5.2.20.dist-info/top_level.txt +2 -0
- faster_eth_abi__mypyc.cpython-313-darwin.so +0 -0
- c42f5c78bc058f310136__mypyc.cpython-313-darwin.so +0 -0
- faster_eth_abi-5.2.5.dist-info/RECORD +0 -42
- faster_eth_abi-5.2.5.dist-info/licenses/LICENSE +0 -21
- faster_eth_abi-5.2.5.dist-info/top_level.txt +0 -3
- {faster_eth_abi-5.2.5.dist-info → faster_eth_abi-5.2.20.dist-info}/WHEEL +0 -0
|
Binary file
|
faster_eth_abi/_codec.py
CHANGED
|
@@ -1,9 +1,14 @@
|
|
|
1
|
+
"""Internal codec helpers for encoding and decoding sequences of values using the head-tail mechanism.
|
|
2
|
+
|
|
3
|
+
Provides encode_c and decode_c functions for binary serialization and deserialization of values
|
|
4
|
+
according to ABI type specifications.
|
|
5
|
+
"""
|
|
6
|
+
|
|
1
7
|
from typing import (
|
|
2
8
|
TYPE_CHECKING,
|
|
3
9
|
Any,
|
|
4
10
|
Iterable,
|
|
5
11
|
Tuple,
|
|
6
|
-
cast,
|
|
7
12
|
)
|
|
8
13
|
|
|
9
14
|
from eth_typing import (
|
|
@@ -40,13 +45,11 @@ def encode_c(
|
|
|
40
45
|
:returns: The head-tail encoded binary representation of the python
|
|
41
46
|
values in ``args`` as values of the ABI types in ``types``.
|
|
42
47
|
"""
|
|
43
|
-
# validate encode types and args
|
|
44
|
-
validate_list_like_param(types, "types")
|
|
45
48
|
validate_list_like_param(args, "args")
|
|
46
49
|
|
|
47
50
|
encoder = self._registry.get_tuple_encoder(*types)
|
|
48
51
|
|
|
49
|
-
return encoder(args)
|
|
52
|
+
return encoder.encode(args)
|
|
50
53
|
|
|
51
54
|
|
|
52
55
|
def decode_c(
|
|
@@ -72,11 +75,9 @@ def decode_c(
|
|
|
72
75
|
:returns: A tuple of equivalent python values for the ABI values
|
|
73
76
|
represented in ``data``.
|
|
74
77
|
"""
|
|
75
|
-
# validate decode types and data
|
|
76
|
-
validate_list_like_param(types, "types")
|
|
77
78
|
validate_bytes_param(data, "data")
|
|
78
79
|
|
|
79
80
|
decoder = self._registry.get_tuple_decoder(*types, strict=strict)
|
|
80
81
|
stream = self.stream_class(data)
|
|
81
82
|
|
|
82
|
-
return
|
|
83
|
+
return decoder(stream)
|
|
Binary file
|
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
"""Private helpers for decoding logic, intended for C compilation.
|
|
2
|
+
|
|
3
|
+
This file exists because the original decoding.py is not ready to be fully compiled to C.
|
|
4
|
+
This module contains functions and logic that we wish to compile.
|
|
5
|
+
"""
|
|
6
|
+
from typing import (
|
|
7
|
+
TYPE_CHECKING,
|
|
8
|
+
Any,
|
|
9
|
+
Dict,
|
|
10
|
+
Final,
|
|
11
|
+
Tuple,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from faster_eth_utils import (
|
|
15
|
+
big_endian_to_int,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from faster_eth_abi.exceptions import (
|
|
19
|
+
InsufficientDataBytes,
|
|
20
|
+
InvalidPointer,
|
|
21
|
+
NonEmptyPaddingBytes,
|
|
22
|
+
)
|
|
23
|
+
from faster_eth_abi.io import (
|
|
24
|
+
BytesIO,
|
|
25
|
+
ContextFramesBytesIO,
|
|
26
|
+
)
|
|
27
|
+
from faster_eth_abi.typing import (
|
|
28
|
+
T,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from .decoding import (
|
|
33
|
+
BaseArrayDecoder,
|
|
34
|
+
DynamicArrayDecoder,
|
|
35
|
+
FixedByteSizeDecoder,
|
|
36
|
+
HeadTailDecoder,
|
|
37
|
+
SignedIntegerDecoder,
|
|
38
|
+
SizedArrayDecoder,
|
|
39
|
+
TupleDecoder,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Helpers
|
|
44
|
+
def decode_uint_256(stream: ContextFramesBytesIO) -> int:
|
|
45
|
+
"""
|
|
46
|
+
A faster version of :func:`~decoding.decode_uint_256` in decoding.py.
|
|
47
|
+
|
|
48
|
+
It recreates the logic from the UnsignedIntegerDecoder, but we can
|
|
49
|
+
skip a lot because we know the value of many vars.
|
|
50
|
+
"""
|
|
51
|
+
# read data from stream
|
|
52
|
+
if len(data := stream.read(32)) == 32:
|
|
53
|
+
return big_endian_to_int(data)
|
|
54
|
+
raise InsufficientDataBytes(f"Tried to read 32 bytes, only got {len(data)} bytes.")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_value_byte_size(decoder: "FixedByteSizeDecoder") -> int:
|
|
58
|
+
return decoder.value_bit_size // 8
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# HeadTailDecoder
|
|
62
|
+
def decode_head_tail(self: "HeadTailDecoder[T]", stream: ContextFramesBytesIO) -> T:
|
|
63
|
+
# Decode the offset and move the stream cursor forward 32 bytes
|
|
64
|
+
start_pos = decode_uint_256(stream)
|
|
65
|
+
# Jump ahead to the start of the value
|
|
66
|
+
stream.push_frame(start_pos)
|
|
67
|
+
|
|
68
|
+
# assertion check for mypy
|
|
69
|
+
tail_decoder = self.tail_decoder
|
|
70
|
+
if tail_decoder is None:
|
|
71
|
+
raise AssertionError("`tail_decoder` is None")
|
|
72
|
+
# Decode the value
|
|
73
|
+
value: T = tail_decoder(stream)
|
|
74
|
+
# Return the cursor
|
|
75
|
+
stream.pop_frame()
|
|
76
|
+
|
|
77
|
+
return value
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# TupleDecoder
|
|
81
|
+
def decode_tuple(
|
|
82
|
+
self: "TupleDecoder[T]", stream: ContextFramesBytesIO
|
|
83
|
+
) -> Tuple[T, ...]:
|
|
84
|
+
# NOTE: the original implementation would do this but it's
|
|
85
|
+
# kinda wasteful, so we rebuilt the logic within this function
|
|
86
|
+
# validate_pointers_tuple(self, stream)
|
|
87
|
+
|
|
88
|
+
current_location = stream.tell()
|
|
89
|
+
if self._no_head_tail:
|
|
90
|
+
# TODO: if all(isinstance(d, TupleDecoder) for d in self._decoders)
|
|
91
|
+
# return tuple(decode_tuple(stream) for _ in range(len(self.decoders))
|
|
92
|
+
# and other types with compiled decode funcs
|
|
93
|
+
return tuple(decoder(stream) for decoder in self.decoders)
|
|
94
|
+
|
|
95
|
+
end_of_offsets = current_location + 32 * self.len_of_head
|
|
96
|
+
total_stream_length = len(stream.getbuffer())
|
|
97
|
+
items = []
|
|
98
|
+
for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
|
|
99
|
+
if is_head_tail:
|
|
100
|
+
# the next 32 bytes are a pointer that we should validate
|
|
101
|
+
# checkpoint the stream location so we can reset it after validation
|
|
102
|
+
step_location = stream.tell()
|
|
103
|
+
|
|
104
|
+
offset = decode_uint_256(stream)
|
|
105
|
+
indicated_idx = current_location + offset
|
|
106
|
+
if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
|
|
107
|
+
# the pointer is indicating its data is located either within the
|
|
108
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
109
|
+
# both of which are invalid
|
|
110
|
+
raise InvalidPointer(
|
|
111
|
+
"Invalid pointer in tuple at location "
|
|
112
|
+
f"{stream.tell() - 32} in payload"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# reset the stream so we can decode
|
|
116
|
+
stream.seek(step_location)
|
|
117
|
+
|
|
118
|
+
items.append(decoder(stream))
|
|
119
|
+
|
|
120
|
+
# return the stream to its original location for actual decoding
|
|
121
|
+
stream.seek(current_location)
|
|
122
|
+
|
|
123
|
+
return tuple(items)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def validate_pointers_tuple(
|
|
127
|
+
self: "TupleDecoder",
|
|
128
|
+
stream: ContextFramesBytesIO,
|
|
129
|
+
) -> None:
|
|
130
|
+
"""
|
|
131
|
+
Verify that all pointers point to a valid location in the stream.
|
|
132
|
+
"""
|
|
133
|
+
current_location = stream.tell()
|
|
134
|
+
if self._no_head_tail:
|
|
135
|
+
for decoder in self.decoders:
|
|
136
|
+
decoder(stream)
|
|
137
|
+
else:
|
|
138
|
+
end_of_offsets = current_location + 32 * self.len_of_head
|
|
139
|
+
total_stream_length = len(stream.getbuffer())
|
|
140
|
+
for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
|
|
141
|
+
if not is_head_tail:
|
|
142
|
+
# the next 32 bytes are not a pointer,
|
|
143
|
+
# so progress the stream per the decoder
|
|
144
|
+
decoder(stream)
|
|
145
|
+
else:
|
|
146
|
+
# the next 32 bytes are a pointer
|
|
147
|
+
offset = decode_uint_256(stream)
|
|
148
|
+
indicated_idx = current_location + offset
|
|
149
|
+
if (
|
|
150
|
+
indicated_idx < end_of_offsets
|
|
151
|
+
or indicated_idx >= total_stream_length
|
|
152
|
+
):
|
|
153
|
+
# the pointer is indicating its data is located either within the
|
|
154
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
155
|
+
# both of which are invalid
|
|
156
|
+
raise InvalidPointer(
|
|
157
|
+
"Invalid pointer in tuple at location "
|
|
158
|
+
f"{stream.tell() - 32} in payload"
|
|
159
|
+
)
|
|
160
|
+
# return the stream to its original location for actual decoding
|
|
161
|
+
stream.seek(current_location)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
# BaseArrayDecoder
|
|
165
|
+
def validate_pointers_array(
|
|
166
|
+
self: "BaseArrayDecoder", stream: ContextFramesBytesIO, array_size: int
|
|
167
|
+
) -> None:
|
|
168
|
+
"""
|
|
169
|
+
Verify that all pointers point to a valid location in the stream.
|
|
170
|
+
"""
|
|
171
|
+
current_location = stream.tell()
|
|
172
|
+
end_of_offsets = current_location + 32 * array_size
|
|
173
|
+
total_stream_length = len(stream.getbuffer())
|
|
174
|
+
for _ in range(array_size):
|
|
175
|
+
offset = decode_uint_256(stream)
|
|
176
|
+
indicated_idx = current_location + offset
|
|
177
|
+
if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
|
|
178
|
+
# the pointer is indicating its data is located either within the
|
|
179
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
180
|
+
# both of which are invalid
|
|
181
|
+
raise InvalidPointer(
|
|
182
|
+
"Invalid pointer in array at location "
|
|
183
|
+
f"{stream.tell() - 32} in payload"
|
|
184
|
+
)
|
|
185
|
+
stream.seek(current_location)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
# SizedArrayDecoder
|
|
189
|
+
def decode_sized_array(
|
|
190
|
+
self: "SizedArrayDecoder[T]", stream: ContextFramesBytesIO
|
|
191
|
+
) -> Tuple[T, ...]:
|
|
192
|
+
item_decoder = self.item_decoder
|
|
193
|
+
if item_decoder is None:
|
|
194
|
+
raise AssertionError("`item_decoder` is None")
|
|
195
|
+
|
|
196
|
+
array_size = self.array_size
|
|
197
|
+
self.validate_pointers(stream, array_size)
|
|
198
|
+
return tuple(item_decoder(stream) for _ in range(array_size))
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
# DynamicArrayDecoder
|
|
202
|
+
def decode_dynamic_array(
|
|
203
|
+
self: "DynamicArrayDecoder[T]", stream: ContextFramesBytesIO
|
|
204
|
+
) -> Tuple[T, ...]:
|
|
205
|
+
array_size = decode_uint_256(stream)
|
|
206
|
+
stream.push_frame(32)
|
|
207
|
+
if self.item_decoder is None:
|
|
208
|
+
raise AssertionError("`item_decoder` is None")
|
|
209
|
+
|
|
210
|
+
self.validate_pointers(stream, array_size)
|
|
211
|
+
item_decoder = self.item_decoder
|
|
212
|
+
try:
|
|
213
|
+
return tuple(item_decoder(stream) for _ in range(array_size))
|
|
214
|
+
finally:
|
|
215
|
+
stream.pop_frame()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# FixedByteSizeDecoder
|
|
219
|
+
def read_fixed_byte_size_data_from_stream(
|
|
220
|
+
self: "FixedByteSizeDecoder[Any]",
|
|
221
|
+
# NOTE: use BytesIO here so mypyc doesn't type-check
|
|
222
|
+
# `stream` once we compile ContextFramesBytesIO.
|
|
223
|
+
stream: BytesIO,
|
|
224
|
+
) -> bytes:
|
|
225
|
+
data_byte_size = self.data_byte_size
|
|
226
|
+
if len(data := stream.read(data_byte_size)) == data_byte_size:
|
|
227
|
+
return data
|
|
228
|
+
raise InsufficientDataBytes(
|
|
229
|
+
f"Tried to read {data_byte_size} bytes, only got {len(data)} bytes."
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def split_data_and_padding_fixed_byte_size(
|
|
234
|
+
self: "FixedByteSizeDecoder[Any]",
|
|
235
|
+
raw_data: bytes,
|
|
236
|
+
) -> Tuple[bytes, bytes]:
|
|
237
|
+
value_byte_size = get_value_byte_size(self)
|
|
238
|
+
padding_size = self.data_byte_size - value_byte_size
|
|
239
|
+
|
|
240
|
+
if self.is_big_endian:
|
|
241
|
+
if padding_size == 0:
|
|
242
|
+
return raw_data, b""
|
|
243
|
+
padding_bytes = raw_data[:padding_size]
|
|
244
|
+
data = raw_data[padding_size:]
|
|
245
|
+
else:
|
|
246
|
+
data = raw_data[:value_byte_size]
|
|
247
|
+
padding_bytes = raw_data[value_byte_size:]
|
|
248
|
+
|
|
249
|
+
return data, padding_bytes
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def validate_padding_bytes_fixed_byte_size(
|
|
253
|
+
self: "FixedByteSizeDecoder[T]",
|
|
254
|
+
value: T,
|
|
255
|
+
padding_bytes: bytes,
|
|
256
|
+
) -> None:
|
|
257
|
+
if padding_bytes != get_expected_padding_bytes(self, b"\x00"):
|
|
258
|
+
raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
_expected_padding_bytes_cache: Final[
|
|
262
|
+
Dict["FixedByteSizeDecoder[Any]", Dict[bytes, bytes]]
|
|
263
|
+
] = {}
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def get_expected_padding_bytes(
|
|
267
|
+
self: "FixedByteSizeDecoder[Any]", chunk: bytes
|
|
268
|
+
) -> bytes:
|
|
269
|
+
instance_cache = _expected_padding_bytes_cache.setdefault(self, {})
|
|
270
|
+
expected_padding_bytes = instance_cache.get(chunk)
|
|
271
|
+
if expected_padding_bytes is None:
|
|
272
|
+
value_byte_size = get_value_byte_size(self)
|
|
273
|
+
padding_size = self.data_byte_size - value_byte_size
|
|
274
|
+
expected_padding_bytes = chunk * padding_size
|
|
275
|
+
instance_cache[chunk] = expected_padding_bytes
|
|
276
|
+
return expected_padding_bytes
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def validate_padding_bytes_signed_integer(
|
|
280
|
+
self: "SignedIntegerDecoder",
|
|
281
|
+
value: int,
|
|
282
|
+
padding_bytes: bytes,
|
|
283
|
+
) -> None:
|
|
284
|
+
if value >= 0:
|
|
285
|
+
expected_padding_bytes = get_expected_padding_bytes(self, b"\x00")
|
|
286
|
+
else:
|
|
287
|
+
expected_padding_bytes = get_expected_padding_bytes(self, b"\xff")
|
|
288
|
+
|
|
289
|
+
if padding_bytes != expected_padding_bytes:
|
|
290
|
+
raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
# BooleanDecoder
|
|
294
|
+
def decoder_fn_boolean(data: bytes) -> bool:
|
|
295
|
+
if data == b"\x00":
|
|
296
|
+
return False
|
|
297
|
+
elif data == b"\x01":
|
|
298
|
+
return True
|
|
299
|
+
raise NonEmptyPaddingBytes(f"Boolean must be either 0x0 or 0x1. Got: {data!r}")
|
|
Binary file
|
faster_eth_abi/_encoding.py
CHANGED
|
@@ -1,33 +1,65 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
"""Private helpers for encoding logic, intended for C compilation.
|
|
2
|
+
|
|
3
|
+
This file exists because the original encoding.py is not ready to be fully compiled to C.
|
|
4
|
+
This module contains functions and logic that we do wish to compile.
|
|
5
|
+
"""
|
|
4
6
|
from typing import (
|
|
5
7
|
TYPE_CHECKING,
|
|
6
8
|
Any,
|
|
7
9
|
Callable,
|
|
10
|
+
Dict,
|
|
8
11
|
List,
|
|
9
12
|
Optional,
|
|
10
13
|
Sequence,
|
|
14
|
+
Tuple,
|
|
11
15
|
TypeVar,
|
|
12
16
|
)
|
|
13
17
|
|
|
18
|
+
from faster_eth_utils import (
|
|
19
|
+
is_list_like,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
from faster_eth_abi.exceptions import (
|
|
23
|
+
ValueOutOfBounds,
|
|
24
|
+
)
|
|
25
|
+
|
|
14
26
|
if TYPE_CHECKING:
|
|
15
27
|
from faster_eth_abi.encoding import (
|
|
16
28
|
BaseEncoder,
|
|
29
|
+
TupleEncoder,
|
|
17
30
|
)
|
|
18
31
|
|
|
19
32
|
|
|
20
33
|
T = TypeVar("T")
|
|
21
34
|
|
|
22
35
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
)
|
|
36
|
+
# TupleEncoder
|
|
37
|
+
def validate_tuple(self: "TupleEncoder", value: Sequence[Any]) -> None:
|
|
38
|
+
# if we check list and tuple first it compiles to much quicker C code
|
|
39
|
+
if not isinstance(value, (list, tuple)) and not is_list_like(value):
|
|
40
|
+
self.invalidate_value(
|
|
41
|
+
value,
|
|
42
|
+
msg="must be list-like object such as array or tuple",
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
validators = self.validators
|
|
46
|
+
if len(value) != len(validators):
|
|
47
|
+
self.invalidate_value(
|
|
48
|
+
value,
|
|
49
|
+
exc=ValueOutOfBounds,
|
|
50
|
+
msg=f"value has {len(value)} items when {len(validators)} " "were expected",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
for item, validator in zip(value, validators):
|
|
54
|
+
validator(item)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def encode_tuple(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
58
|
+
validate_tuple(self, values)
|
|
27
59
|
raw_head_chunks: List[Optional[bytes]] = []
|
|
28
60
|
tail_chunks: List[bytes] = []
|
|
29
|
-
for value, encoder in zip(values, encoders):
|
|
30
|
-
if
|
|
61
|
+
for value, encoder, is_dynamic in zip(values, self.encoders, self._is_dynamic):
|
|
62
|
+
if is_dynamic:
|
|
31
63
|
raw_head_chunks.append(None)
|
|
32
64
|
tail_chunks.append(encoder(value))
|
|
33
65
|
else:
|
|
@@ -35,7 +67,12 @@ def encode_tuple(
|
|
|
35
67
|
tail_chunks.append(b"")
|
|
36
68
|
|
|
37
69
|
head_length = sum(32 if item is None else len(item) for item in raw_head_chunks)
|
|
38
|
-
tail_offsets =
|
|
70
|
+
tail_offsets = [0]
|
|
71
|
+
total_offset = 0
|
|
72
|
+
for item in tail_chunks[:-1]:
|
|
73
|
+
total_offset += len(item)
|
|
74
|
+
tail_offsets.append(total_offset)
|
|
75
|
+
|
|
39
76
|
head_chunks = tuple(
|
|
40
77
|
encode_uint_256(head_length + offset) if chunk is None else chunk
|
|
41
78
|
for chunk, offset in zip(raw_head_chunks, tail_offsets)
|
|
@@ -44,6 +81,112 @@ def encode_tuple(
|
|
|
44
81
|
return b"".join(head_chunks) + b"".join(tail_chunks)
|
|
45
82
|
|
|
46
83
|
|
|
84
|
+
def encode_tuple_all_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
85
|
+
validate_tuple(self, values)
|
|
86
|
+
encoders = self.encoders
|
|
87
|
+
tail_chunks = [encoder(value) for encoder, value in zip(encoders, values)]
|
|
88
|
+
|
|
89
|
+
total_offset = 0
|
|
90
|
+
head_length = 32 * len(encoders)
|
|
91
|
+
head_chunks = [encode_uint_256(head_length)]
|
|
92
|
+
for item in tail_chunks[:-1]:
|
|
93
|
+
total_offset += len(item)
|
|
94
|
+
head_chunks.append(encode_uint_256(head_length + total_offset))
|
|
95
|
+
|
|
96
|
+
return b"".join(head_chunks) + b"".join(tail_chunks)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def encode_tuple_no_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
100
|
+
validate_tuple(self, values)
|
|
101
|
+
encoders = self.encoders
|
|
102
|
+
return b"".join(encoders[i](values[i]) for i in range(len(encoders)))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def encode_tuple_no_dynamic1(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
106
|
+
validate_tuple(self, values)
|
|
107
|
+
encoders: Tuple["BaseEncoder"] = self.encoders
|
|
108
|
+
return encoders[0](values[0])
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def encode_tuple_no_dynamic2(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
112
|
+
validate_tuple(self, values)
|
|
113
|
+
encoders = self.encoders
|
|
114
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder"] = self.encoders
|
|
115
|
+
return encoders[0](values[0]) + encoders[1](values[1])
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def encode_tuple_no_dynamic3(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
119
|
+
validate_tuple(self, values)
|
|
120
|
+
encoders = self.encoders
|
|
121
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
122
|
+
return b"".join(encoders[i](values[i]) for i in range(3))
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def encode_tuple_no_dynamic4(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
126
|
+
validate_tuple(self, values)
|
|
127
|
+
encoders = self.encoders
|
|
128
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
129
|
+
return b"".join(encoders[i](values[i]) for i in range(4))
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def encode_tuple_no_dynamic5(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
133
|
+
validate_tuple(self, values)
|
|
134
|
+
encoders = self.encoders
|
|
135
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
136
|
+
return b"".join(encoders[i](values[i]) for i in range(5))
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def encode_tuple_no_dynamic6(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
140
|
+
validate_tuple(self, values)
|
|
141
|
+
encoders = self.encoders
|
|
142
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
143
|
+
return b"".join(encoders[i](values[i]) for i in range(6))
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def encode_tuple_no_dynamic7(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
147
|
+
validate_tuple(self, values)
|
|
148
|
+
encoders = self.encoders
|
|
149
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
150
|
+
return b"".join(encoders[i](values[i]) for i in range(7))
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def encode_tuple_no_dynamic8(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
154
|
+
validate_tuple(self, values)
|
|
155
|
+
encoders = self.encoders
|
|
156
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
157
|
+
return b"".join(encoders[i](values[i]) for i in range(8))
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def encode_tuple_no_dynamic9(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
161
|
+
validate_tuple(self, values)
|
|
162
|
+
encoders = self.encoders
|
|
163
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
164
|
+
return b"".join(encoders[i](values[i]) for i in range(9))
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def encode_tuple_no_dynamic10(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
|
|
168
|
+
validate_tuple(self, values)
|
|
169
|
+
encoders = self.encoders
|
|
170
|
+
# encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
|
|
171
|
+
return b"".join(encoders[i](values[i]) for i in range(10))
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
encode_tuple_no_dynamic_funcs: Dict[
|
|
175
|
+
int, Callable[["TupleEncoder", Sequence[Any]], bytes]
|
|
176
|
+
] = {
|
|
177
|
+
1: encode_tuple_no_dynamic1,
|
|
178
|
+
2: encode_tuple_no_dynamic2,
|
|
179
|
+
3: encode_tuple_no_dynamic3,
|
|
180
|
+
4: encode_tuple_no_dynamic4,
|
|
181
|
+
5: encode_tuple_no_dynamic5,
|
|
182
|
+
6: encode_tuple_no_dynamic6,
|
|
183
|
+
7: encode_tuple_no_dynamic7,
|
|
184
|
+
8: encode_tuple_no_dynamic8,
|
|
185
|
+
9: encode_tuple_no_dynamic9,
|
|
186
|
+
10: encode_tuple_no_dynamic10,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
|
|
47
190
|
def encode_fixed(
|
|
48
191
|
value: Any,
|
|
49
192
|
encode_fn: Callable[[Any], bytes],
|
|
@@ -72,12 +215,17 @@ def encode_signed(
|
|
|
72
215
|
def encode_elements(item_encoder: "BaseEncoder", value: Sequence[Any]) -> bytes:
|
|
73
216
|
tail_chunks = tuple(item_encoder(i) for i in value)
|
|
74
217
|
|
|
75
|
-
items_are_dynamic = getattr(item_encoder, "is_dynamic", False)
|
|
218
|
+
items_are_dynamic: bool = getattr(item_encoder, "is_dynamic", False)
|
|
76
219
|
if not items_are_dynamic or len(value) == 0:
|
|
77
220
|
return b"".join(tail_chunks)
|
|
78
221
|
|
|
79
222
|
head_length = 32 * len(value)
|
|
80
|
-
tail_offsets =
|
|
223
|
+
tail_offsets = [0]
|
|
224
|
+
total_offset = 0
|
|
225
|
+
for item in tail_chunks[:-1]:
|
|
226
|
+
total_offset += len(item)
|
|
227
|
+
tail_offsets.append(total_offset)
|
|
228
|
+
|
|
81
229
|
head_chunks = tuple(
|
|
82
230
|
encode_uint_256(head_length + offset) for offset in tail_offsets
|
|
83
231
|
)
|
|
@@ -91,8 +239,9 @@ def encode_elements_dynamic(item_encoder: "BaseEncoder", value: Sequence[Any]) -
|
|
|
91
239
|
|
|
92
240
|
|
|
93
241
|
def encode_uint_256(i: int) -> bytes:
|
|
94
|
-
# An optimized version of the `encode_uint_256` in `encoding.py` which
|
|
95
|
-
# We should not have any issues here
|
|
242
|
+
# An optimized version of the `encode_uint_256` in `encoding.py` which
|
|
243
|
+
# does not perform any validation. We should not have any issues here
|
|
244
|
+
# unless you're encoding really really massive iterables.
|
|
96
245
|
big_endian = int_to_big_endian(i)
|
|
97
246
|
return big_endian.rjust(32, b"\x00")
|
|
98
247
|
|
|
Binary file
|