faster-eth-abi 5.2.24__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- faster_eth_abi/__init__.py +17 -0
- faster_eth_abi/_codec.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/_codec.py +83 -0
- faster_eth_abi/_decoding.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/_decoding.py +372 -0
- faster_eth_abi/_encoding.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/_encoding.py +514 -0
- faster_eth_abi/_grammar.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/_grammar.py +375 -0
- faster_eth_abi/abi.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/abi.py +17 -0
- faster_eth_abi/base.py +45 -0
- faster_eth_abi/codec.py +2809 -0
- faster_eth_abi/constants.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/constants.py +7 -0
- faster_eth_abi/decoding.py +555 -0
- faster_eth_abi/encoding.py +738 -0
- faster_eth_abi/exceptions.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/exceptions.py +127 -0
- faster_eth_abi/from_type_str.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/from_type_str.py +141 -0
- faster_eth_abi/grammar.py +172 -0
- faster_eth_abi/io.py +107 -0
- faster_eth_abi/packed.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/packed.py +19 -0
- faster_eth_abi/py.typed +0 -0
- faster_eth_abi/registry.py +758 -0
- faster_eth_abi/tools/__init__.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/tools/__init__.py +3 -0
- faster_eth_abi/tools/_strategies.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/tools/_strategies.py +246 -0
- faster_eth_abi/typing.py +4627 -0
- faster_eth_abi/utils/__init__.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/utils/__init__.py +0 -0
- faster_eth_abi/utils/localcontext.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/utils/localcontext.py +49 -0
- faster_eth_abi/utils/numeric.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/utils/numeric.py +117 -0
- faster_eth_abi/utils/padding.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/utils/padding.py +22 -0
- faster_eth_abi/utils/string.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/utils/string.py +19 -0
- faster_eth_abi/utils/validation.cp311-win_amd64.pyd +0 -0
- faster_eth_abi/utils/validation.py +18 -0
- faster_eth_abi-5.2.24.dist-info/METADATA +134 -0
- faster_eth_abi-5.2.24.dist-info/RECORD +49 -0
- faster_eth_abi-5.2.24.dist-info/WHEEL +5 -0
- faster_eth_abi-5.2.24.dist-info/top_level.txt +2 -0
- faster_eth_abi__mypyc.cp311-win_amd64.pyd +0 -0
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from importlib.metadata import (
|
|
2
|
+
version as __version,
|
|
3
|
+
)
|
|
4
|
+
from typing import (
|
|
5
|
+
Final,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
from faster_eth_abi.abi import (
|
|
9
|
+
decode,
|
|
10
|
+
encode,
|
|
11
|
+
is_encodable,
|
|
12
|
+
is_encodable_type,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = ["decode", "encode", "is_encodable", "is_encodable_type"]
|
|
16
|
+
|
|
17
|
+
__version__: Final = __version("faster-eth-abi")
|
|
Binary file
|
faster_eth_abi/_codec.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""Internal codec helpers for encoding and decoding sequences of values using the head-tail mechanism.
|
|
2
|
+
|
|
3
|
+
Provides encode_c and decode_c functions for binary serialization and deserialization of values
|
|
4
|
+
according to ABI type specifications.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import (
|
|
8
|
+
TYPE_CHECKING,
|
|
9
|
+
Any,
|
|
10
|
+
Iterable,
|
|
11
|
+
Tuple,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from eth_typing import (
|
|
15
|
+
Decodable,
|
|
16
|
+
TypeStr,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from faster_eth_abi.utils.validation import (
|
|
20
|
+
validate_bytes_param,
|
|
21
|
+
validate_list_like_param,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from faster_eth_abi.codec import (
|
|
26
|
+
ABIDecoder,
|
|
27
|
+
ABIEncoder,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def encode_c(
|
|
32
|
+
self: "ABIEncoder",
|
|
33
|
+
types: Iterable[TypeStr],
|
|
34
|
+
args: Iterable[Any],
|
|
35
|
+
) -> bytes:
|
|
36
|
+
"""
|
|
37
|
+
Encodes the python values in ``args`` as a sequence of binary values of
|
|
38
|
+
the ABI types in ``types`` via the head-tail mechanism.
|
|
39
|
+
|
|
40
|
+
:param types: A list or tuple of string representations of the ABI types
|
|
41
|
+
that will be used for encoding e.g. ``('uint256', 'bytes[]',
|
|
42
|
+
'(int,int)')``
|
|
43
|
+
:param args: A list or tuple of python values to be encoded.
|
|
44
|
+
|
|
45
|
+
:returns: The head-tail encoded binary representation of the python
|
|
46
|
+
values in ``args`` as values of the ABI types in ``types``.
|
|
47
|
+
"""
|
|
48
|
+
validate_list_like_param(args, "args")
|
|
49
|
+
|
|
50
|
+
encoder = self._registry.get_tuple_encoder(*types)
|
|
51
|
+
|
|
52
|
+
return encoder.encode(args)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def decode_c(
|
|
56
|
+
self: "ABIDecoder",
|
|
57
|
+
types: Iterable[TypeStr],
|
|
58
|
+
data: Decodable,
|
|
59
|
+
strict: bool = True,
|
|
60
|
+
) -> Tuple[Any, ...]:
|
|
61
|
+
"""
|
|
62
|
+
Decodes the binary value ``data`` as a sequence of values of the ABI types
|
|
63
|
+
in ``types`` via the head-tail mechanism into a tuple of equivalent python
|
|
64
|
+
values.
|
|
65
|
+
|
|
66
|
+
:param types: A list or tuple of string representations of the ABI types that
|
|
67
|
+
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
|
|
68
|
+
:param data: The binary value to be decoded.
|
|
69
|
+
:param strict: If ``False``, dynamic-type decoders will ignore validations such
|
|
70
|
+
as making sure the data is padded to a multiple of 32 bytes or checking that
|
|
71
|
+
padding bytes are zero / empty. ``False`` is how the Solidity ABI decoder
|
|
72
|
+
currently works. However, ``True`` is the default for the faster-eth-abi
|
|
73
|
+
library.
|
|
74
|
+
|
|
75
|
+
:returns: A tuple of equivalent python values for the ABI values
|
|
76
|
+
represented in ``data``.
|
|
77
|
+
"""
|
|
78
|
+
validate_bytes_param(data, "data")
|
|
79
|
+
|
|
80
|
+
decoder = self._registry.get_tuple_decoder(*types, strict=strict)
|
|
81
|
+
stream = self.stream_class(data)
|
|
82
|
+
|
|
83
|
+
return decoder(stream)
|
|
Binary file
|
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
"""Private helpers for decoding logic, intended for C compilation.
|
|
2
|
+
|
|
3
|
+
This file exists because the original decoding.py is not ready to be fully compiled to C.
|
|
4
|
+
This module contains functions and logic that we wish to compile.
|
|
5
|
+
"""
|
|
6
|
+
import decimal
|
|
7
|
+
from typing import (
|
|
8
|
+
TYPE_CHECKING,
|
|
9
|
+
Any,
|
|
10
|
+
Dict,
|
|
11
|
+
Final,
|
|
12
|
+
Tuple,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from faster_eth_utils import (
|
|
16
|
+
big_endian_to_int,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from faster_eth_abi.exceptions import (
|
|
20
|
+
InsufficientDataBytes,
|
|
21
|
+
InvalidPointer,
|
|
22
|
+
NonEmptyPaddingBytes,
|
|
23
|
+
)
|
|
24
|
+
from faster_eth_abi.io import (
|
|
25
|
+
BytesIO,
|
|
26
|
+
ContextFramesBytesIO,
|
|
27
|
+
)
|
|
28
|
+
from faster_eth_abi.typing import (
|
|
29
|
+
T,
|
|
30
|
+
)
|
|
31
|
+
from faster_eth_abi.utils.localcontext import (
|
|
32
|
+
DECIMAL_CONTEXT,
|
|
33
|
+
)
|
|
34
|
+
from faster_eth_abi.utils.numeric import (
|
|
35
|
+
ceil32,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
from .decoding import (
|
|
40
|
+
BaseArrayDecoder,
|
|
41
|
+
ByteStringDecoder,
|
|
42
|
+
DynamicArrayDecoder,
|
|
43
|
+
FixedByteSizeDecoder,
|
|
44
|
+
HeadTailDecoder,
|
|
45
|
+
SignedFixedDecoder,
|
|
46
|
+
SignedIntegerDecoder,
|
|
47
|
+
SizedArrayDecoder,
|
|
48
|
+
StringDecoder,
|
|
49
|
+
TupleDecoder,
|
|
50
|
+
UnsignedFixedDecoder,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
Decimal: Final = decimal.Decimal
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# Helpers
|
|
58
|
+
def decode_uint_256(stream: ContextFramesBytesIO) -> int:
|
|
59
|
+
"""
|
|
60
|
+
A faster version of :func:`~decoding.decode_uint_256` in decoding.py.
|
|
61
|
+
|
|
62
|
+
It recreates the logic from the UnsignedIntegerDecoder, but we can
|
|
63
|
+
skip a lot because we know the value of many vars.
|
|
64
|
+
"""
|
|
65
|
+
# read data from stream
|
|
66
|
+
if len(data := stream.read(32)) == 32:
|
|
67
|
+
return big_endian_to_int(data)
|
|
68
|
+
raise InsufficientDataBytes(f"Tried to read 32 bytes, only got {len(data)} bytes.")
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def get_value_byte_size(decoder: "FixedByteSizeDecoder") -> int:
|
|
72
|
+
return decoder.value_bit_size // 8
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# HeadTailDecoder
|
|
76
|
+
def decode_head_tail(self: "HeadTailDecoder[T]", stream: ContextFramesBytesIO) -> T:
|
|
77
|
+
# Decode the offset and move the stream cursor forward 32 bytes
|
|
78
|
+
start_pos = decode_uint_256(stream)
|
|
79
|
+
# Jump ahead to the start of the value
|
|
80
|
+
stream.push_frame(start_pos)
|
|
81
|
+
|
|
82
|
+
# assertion check for mypy
|
|
83
|
+
tail_decoder = self.tail_decoder
|
|
84
|
+
if tail_decoder is None:
|
|
85
|
+
raise AssertionError("`tail_decoder` is None")
|
|
86
|
+
# Decode the value
|
|
87
|
+
value: T = tail_decoder(stream)
|
|
88
|
+
# Return the cursor
|
|
89
|
+
stream.pop_frame()
|
|
90
|
+
|
|
91
|
+
return value
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# TupleDecoder
|
|
95
|
+
def decode_tuple(
|
|
96
|
+
self: "TupleDecoder[T]", stream: ContextFramesBytesIO
|
|
97
|
+
) -> Tuple[T, ...]:
|
|
98
|
+
# NOTE: the original implementation would do this but it's
|
|
99
|
+
# kinda wasteful, so we rebuilt the logic within this function
|
|
100
|
+
# validate_pointers_tuple(self, stream)
|
|
101
|
+
|
|
102
|
+
current_location = stream.tell()
|
|
103
|
+
if self._no_head_tail:
|
|
104
|
+
# TODO: if all(isinstance(d, TupleDecoder) for d in self._decoders)
|
|
105
|
+
# return tuple(decode_tuple(stream) for _ in range(len(self.decoders))
|
|
106
|
+
# and other types with compiled decode funcs
|
|
107
|
+
return tuple(decoder(stream) for decoder in self.decoders)
|
|
108
|
+
|
|
109
|
+
end_of_offsets = current_location + 32 * self.len_of_head
|
|
110
|
+
total_stream_length = len(stream.getbuffer())
|
|
111
|
+
items = []
|
|
112
|
+
for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
|
|
113
|
+
if is_head_tail:
|
|
114
|
+
# the next 32 bytes are a pointer that we should validate
|
|
115
|
+
# checkpoint the stream location so we can reset it after validation
|
|
116
|
+
step_location = stream.tell()
|
|
117
|
+
|
|
118
|
+
offset = decode_uint_256(stream)
|
|
119
|
+
indicated_idx = current_location + offset
|
|
120
|
+
if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
|
|
121
|
+
# the pointer is indicating its data is located either within the
|
|
122
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
123
|
+
# both of which are invalid
|
|
124
|
+
raise InvalidPointer(
|
|
125
|
+
"Invalid pointer in tuple at location "
|
|
126
|
+
f"{stream.tell() - 32} in payload"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# reset the stream so we can decode
|
|
130
|
+
stream.seek(step_location)
|
|
131
|
+
|
|
132
|
+
items.append(decoder(stream))
|
|
133
|
+
|
|
134
|
+
# return the stream to its original location for actual decoding
|
|
135
|
+
stream.seek(current_location)
|
|
136
|
+
|
|
137
|
+
return tuple(items)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def validate_pointers_tuple(
|
|
141
|
+
self: "TupleDecoder",
|
|
142
|
+
stream: ContextFramesBytesIO,
|
|
143
|
+
) -> None:
|
|
144
|
+
"""
|
|
145
|
+
Verify that all pointers point to a valid location in the stream.
|
|
146
|
+
"""
|
|
147
|
+
current_location = stream.tell()
|
|
148
|
+
if self._no_head_tail:
|
|
149
|
+
for decoder in self.decoders:
|
|
150
|
+
decoder(stream)
|
|
151
|
+
else:
|
|
152
|
+
end_of_offsets = current_location + 32 * self.len_of_head
|
|
153
|
+
total_stream_length = len(stream.getbuffer())
|
|
154
|
+
for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
|
|
155
|
+
if not is_head_tail:
|
|
156
|
+
# the next 32 bytes are not a pointer,
|
|
157
|
+
# so progress the stream per the decoder
|
|
158
|
+
decoder(stream)
|
|
159
|
+
else:
|
|
160
|
+
# the next 32 bytes are a pointer
|
|
161
|
+
offset = decode_uint_256(stream)
|
|
162
|
+
indicated_idx = current_location + offset
|
|
163
|
+
if (
|
|
164
|
+
indicated_idx < end_of_offsets
|
|
165
|
+
or indicated_idx >= total_stream_length
|
|
166
|
+
):
|
|
167
|
+
# the pointer is indicating its data is located either within the
|
|
168
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
169
|
+
# both of which are invalid
|
|
170
|
+
raise InvalidPointer(
|
|
171
|
+
"Invalid pointer in tuple at location "
|
|
172
|
+
f"{stream.tell() - 32} in payload"
|
|
173
|
+
)
|
|
174
|
+
# return the stream to its original location for actual decoding
|
|
175
|
+
stream.seek(current_location)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
# BaseArrayDecoder
|
|
179
|
+
def validate_pointers_array(
|
|
180
|
+
self: "BaseArrayDecoder", stream: ContextFramesBytesIO, array_size: int
|
|
181
|
+
) -> None:
|
|
182
|
+
"""
|
|
183
|
+
Verify that all pointers point to a valid location in the stream.
|
|
184
|
+
"""
|
|
185
|
+
current_location = stream.tell()
|
|
186
|
+
end_of_offsets = current_location + 32 * array_size
|
|
187
|
+
total_stream_length = len(stream.getbuffer())
|
|
188
|
+
for _ in range(array_size):
|
|
189
|
+
offset = decode_uint_256(stream)
|
|
190
|
+
indicated_idx = current_location + offset
|
|
191
|
+
if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
|
|
192
|
+
# the pointer is indicating its data is located either within the
|
|
193
|
+
# offsets section of the stream or beyond the end of the stream,
|
|
194
|
+
# both of which are invalid
|
|
195
|
+
raise InvalidPointer(
|
|
196
|
+
"Invalid pointer in array at location "
|
|
197
|
+
f"{stream.tell() - 32} in payload"
|
|
198
|
+
)
|
|
199
|
+
stream.seek(current_location)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
# SizedArrayDecoder
|
|
203
|
+
def decode_sized_array(
|
|
204
|
+
self: "SizedArrayDecoder[T]", stream: ContextFramesBytesIO
|
|
205
|
+
) -> Tuple[T, ...]:
|
|
206
|
+
item_decoder = self.item_decoder
|
|
207
|
+
if item_decoder is None:
|
|
208
|
+
raise AssertionError("`item_decoder` is None")
|
|
209
|
+
|
|
210
|
+
array_size = self.array_size
|
|
211
|
+
|
|
212
|
+
if item_decoder.is_dynamic:
|
|
213
|
+
validate_pointers_array(self, stream, array_size)
|
|
214
|
+
|
|
215
|
+
return tuple(item_decoder(stream) for _ in range(array_size))
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# DynamicArrayDecoder
|
|
219
|
+
def decode_dynamic_array(
|
|
220
|
+
self: "DynamicArrayDecoder[T]", stream: ContextFramesBytesIO
|
|
221
|
+
) -> Tuple[T, ...]:
|
|
222
|
+
if self.item_decoder is None:
|
|
223
|
+
raise AssertionError("`item_decoder` is None")
|
|
224
|
+
|
|
225
|
+
array_size = decode_uint_256(stream)
|
|
226
|
+
stream.push_frame(32)
|
|
227
|
+
|
|
228
|
+
item_decoder = self.item_decoder
|
|
229
|
+
if item_decoder is None:
|
|
230
|
+
raise AssertionError("`item_decoder` is None")
|
|
231
|
+
|
|
232
|
+
if item_decoder.is_dynamic:
|
|
233
|
+
validate_pointers_array(self, stream, array_size)
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
return tuple(item_decoder(stream) for _ in range(array_size))
|
|
237
|
+
finally:
|
|
238
|
+
stream.pop_frame()
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
# FixedByteSizeDecoder
|
|
242
|
+
def read_fixed_byte_size_data_from_stream(
|
|
243
|
+
self: "FixedByteSizeDecoder[Any]",
|
|
244
|
+
# NOTE: use BytesIO here so mypyc doesn't type-check
|
|
245
|
+
# `stream` once we compile ContextFramesBytesIO.
|
|
246
|
+
stream: BytesIO,
|
|
247
|
+
) -> bytes:
|
|
248
|
+
data_byte_size = self.data_byte_size
|
|
249
|
+
if len(data := stream.read(data_byte_size)) == data_byte_size:
|
|
250
|
+
return data
|
|
251
|
+
raise InsufficientDataBytes(
|
|
252
|
+
f"Tried to read {data_byte_size} bytes, only got {len(data)} bytes."
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def split_data_and_padding_fixed_byte_size(
|
|
257
|
+
self: "FixedByteSizeDecoder[Any]",
|
|
258
|
+
raw_data: bytes,
|
|
259
|
+
) -> Tuple[bytes, bytes]:
|
|
260
|
+
value_byte_size = get_value_byte_size(self)
|
|
261
|
+
padding_size = self.data_byte_size - value_byte_size
|
|
262
|
+
|
|
263
|
+
if self.is_big_endian:
|
|
264
|
+
if padding_size == 0:
|
|
265
|
+
return raw_data, b""
|
|
266
|
+
padding_bytes = raw_data[:padding_size]
|
|
267
|
+
data = raw_data[padding_size:]
|
|
268
|
+
else:
|
|
269
|
+
data = raw_data[:value_byte_size]
|
|
270
|
+
padding_bytes = raw_data[value_byte_size:]
|
|
271
|
+
|
|
272
|
+
return data, padding_bytes
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def validate_padding_bytes_fixed_byte_size(
|
|
276
|
+
self: "FixedByteSizeDecoder[T]",
|
|
277
|
+
value: T,
|
|
278
|
+
padding_bytes: bytes,
|
|
279
|
+
) -> None:
|
|
280
|
+
if padding_bytes != get_expected_padding_bytes(self, b"\x00"):
|
|
281
|
+
raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
_expected_padding_bytes_cache: Final[
|
|
285
|
+
Dict["FixedByteSizeDecoder[Any]", Dict[bytes, bytes]]
|
|
286
|
+
] = {}
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def get_expected_padding_bytes(
|
|
290
|
+
self: "FixedByteSizeDecoder[Any]", chunk: bytes
|
|
291
|
+
) -> bytes:
|
|
292
|
+
instance_cache = _expected_padding_bytes_cache.setdefault(self, {})
|
|
293
|
+
expected_padding_bytes = instance_cache.get(chunk)
|
|
294
|
+
if expected_padding_bytes is None:
|
|
295
|
+
value_byte_size = get_value_byte_size(self)
|
|
296
|
+
padding_size = self.data_byte_size - value_byte_size
|
|
297
|
+
expected_padding_bytes = chunk * padding_size
|
|
298
|
+
instance_cache[chunk] = expected_padding_bytes
|
|
299
|
+
return expected_padding_bytes
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def validate_padding_bytes_signed_integer(
|
|
303
|
+
self: "SignedIntegerDecoder",
|
|
304
|
+
value: int,
|
|
305
|
+
padding_bytes: bytes,
|
|
306
|
+
) -> None:
|
|
307
|
+
if value >= 0:
|
|
308
|
+
expected_padding_bytes = get_expected_padding_bytes(self, b"\x00")
|
|
309
|
+
else:
|
|
310
|
+
expected_padding_bytes = get_expected_padding_bytes(self, b"\xff")
|
|
311
|
+
|
|
312
|
+
if padding_bytes != expected_padding_bytes:
|
|
313
|
+
raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
# BooleanDecoder
|
|
317
|
+
def decoder_fn_boolean(data: bytes) -> bool:
|
|
318
|
+
if data == b"\x00":
|
|
319
|
+
return False
|
|
320
|
+
elif data == b"\x01":
|
|
321
|
+
return True
|
|
322
|
+
raise NonEmptyPaddingBytes(f"Boolean must be either 0x0 or 0x1. Got: {data!r}")
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
# UnignedFixedDecoder
|
|
326
|
+
def decode_unsigned_fixed(self: "UnsignedFixedDecoder", data: bytes) -> decimal.Decimal:
|
|
327
|
+
value = big_endian_to_int(data)
|
|
328
|
+
|
|
329
|
+
with DECIMAL_CONTEXT:
|
|
330
|
+
decimal_value = Decimal(value) / self.denominator
|
|
331
|
+
|
|
332
|
+
return decimal_value
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
# SignedFixedDecoder
|
|
336
|
+
def decode_signed_fixed(self: "SignedFixedDecoder", data: bytes) -> decimal.Decimal:
|
|
337
|
+
value = big_endian_to_int(data)
|
|
338
|
+
if value >= self.neg_threshold:
|
|
339
|
+
value -= self.neg_offset
|
|
340
|
+
|
|
341
|
+
with DECIMAL_CONTEXT:
|
|
342
|
+
decimal_value = Decimal(value) / self.denominator
|
|
343
|
+
|
|
344
|
+
return decimal_value
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
# ByteStringDecoder
|
|
348
|
+
def read_bytestring_from_stream(self: "ByteStringDecoder", stream: ContextFramesBytesIO) -> bytes:
|
|
349
|
+
data_length = decode_uint_256(stream)
|
|
350
|
+
padded_length = ceil32(data_length)
|
|
351
|
+
|
|
352
|
+
data = stream.read(padded_length)
|
|
353
|
+
|
|
354
|
+
if self.strict:
|
|
355
|
+
if len(data) < padded_length:
|
|
356
|
+
raise InsufficientDataBytes(
|
|
357
|
+
f"Tried to read {padded_length} bytes, only got {len(data)} bytes"
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
padding_bytes = data[data_length:]
|
|
361
|
+
if padding_bytes != b"\x00" * (padded_length - data_length):
|
|
362
|
+
raise NonEmptyPaddingBytes(
|
|
363
|
+
f"Padding bytes were not empty: {padding_bytes!r}"
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
return data[:data_length]
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
# StringDecoder
|
|
370
|
+
def decode_string(self: "StringDecoder", stream: ContextFramesBytesIO) -> str:
|
|
371
|
+
data = read_bytestring_from_stream(self, stream)
|
|
372
|
+
return self.decoder_fn(data, self.bytes_errors)
|
|
Binary file
|