faster-eth-abi 5.2.3__cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- a1f8aa123fabc88e2b56__mypyc.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/__init__.py +12 -0
- faster_eth_abi/abi.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/abi.py +17 -0
- faster_eth_abi/base.py +41 -0
- faster_eth_abi/codec.py +167 -0
- faster_eth_abi/constants.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/constants.py +7 -0
- faster_eth_abi/decoding.py +563 -0
- faster_eth_abi/encoding.py +699 -0
- faster_eth_abi/exceptions.py +115 -0
- faster_eth_abi/from_type_str.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/from_type_str.py +135 -0
- faster_eth_abi/grammar.py +467 -0
- faster_eth_abi/io.py +103 -0
- faster_eth_abi/packed.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/packed.py +15 -0
- faster_eth_abi/py.typed +0 -0
- faster_eth_abi/registry.py +640 -0
- faster_eth_abi/tools/__init__.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/tools/__init__.py +3 -0
- faster_eth_abi/tools/_strategies.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/tools/_strategies.py +237 -0
- faster_eth_abi/utils/__init__.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/utils/__init__.py +0 -0
- faster_eth_abi/utils/numeric.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/utils/numeric.py +86 -0
- faster_eth_abi/utils/padding.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/utils/padding.py +22 -0
- faster_eth_abi/utils/string.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/utils/string.py +19 -0
- faster_eth_abi/utils/validation.cpython-314-i386-linux-gnu.so +0 -0
- faster_eth_abi/utils/validation.py +22 -0
- faster_eth_abi-5.2.3.dist-info/METADATA +95 -0
- faster_eth_abi-5.2.3.dist-info/RECORD +38 -0
- faster_eth_abi-5.2.3.dist-info/WHEEL +7 -0
- faster_eth_abi-5.2.3.dist-info/licenses/LICENSE +21 -0
- faster_eth_abi-5.2.3.dist-info/top_level.txt +3 -0
@@ -0,0 +1,563 @@
|
|
1
|
+
import abc
|
2
|
+
import decimal
|
3
|
+
from typing import (
|
4
|
+
Any,
|
5
|
+
Generator,
|
6
|
+
Tuple,
|
7
|
+
)
|
8
|
+
|
9
|
+
from faster_eth_utils import (
|
10
|
+
big_endian_to_int,
|
11
|
+
to_normalized_address,
|
12
|
+
to_tuple,
|
13
|
+
)
|
14
|
+
|
15
|
+
from faster_eth_abi.base import (
|
16
|
+
BaseCoder,
|
17
|
+
)
|
18
|
+
from faster_eth_abi.exceptions import (
|
19
|
+
InsufficientDataBytes,
|
20
|
+
InvalidPointer,
|
21
|
+
NonEmptyPaddingBytes,
|
22
|
+
)
|
23
|
+
from faster_eth_abi.from_type_str import (
|
24
|
+
parse_tuple_type_str,
|
25
|
+
parse_type_str,
|
26
|
+
)
|
27
|
+
from faster_eth_abi.io import (
|
28
|
+
ContextFramesBytesIO,
|
29
|
+
)
|
30
|
+
from faster_eth_abi.utils.numeric import (
|
31
|
+
TEN,
|
32
|
+
abi_decimal_context,
|
33
|
+
ceil32,
|
34
|
+
)
|
35
|
+
|
36
|
+
|
37
|
+
class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
|
38
|
+
"""
|
39
|
+
Base class for all decoder classes. Subclass this if you want to define a
|
40
|
+
custom decoder class. Subclasses must also implement
|
41
|
+
:any:`BaseCoder.from_type_str`.
|
42
|
+
"""
|
43
|
+
|
44
|
+
strict = True
|
45
|
+
|
46
|
+
@abc.abstractmethod
|
47
|
+
def decode(self, stream: ContextFramesBytesIO) -> Any: # pragma: no cover
|
48
|
+
"""
|
49
|
+
Decodes the given stream of bytes into a python value. Should raise
|
50
|
+
:any:`exceptions.DecodingError` if a python value cannot be decoded
|
51
|
+
from the given byte stream.
|
52
|
+
"""
|
53
|
+
|
54
|
+
def __call__(self, stream: ContextFramesBytesIO) -> Any:
|
55
|
+
return self.decode(stream)
|
56
|
+
|
57
|
+
|
58
|
+
class HeadTailDecoder(BaseDecoder):
|
59
|
+
"""
|
60
|
+
Decoder for a dynamic element of a dynamic container (a dynamic array, or a sized
|
61
|
+
array or tuple that contains dynamic elements). A dynamic element consists of a
|
62
|
+
pointer, aka offset, which is located in the head section of the encoded container,
|
63
|
+
and the actual value, which is located in the tail section of the encoding.
|
64
|
+
"""
|
65
|
+
|
66
|
+
is_dynamic = True
|
67
|
+
|
68
|
+
tail_decoder = None
|
69
|
+
|
70
|
+
def validate(self):
|
71
|
+
super().validate()
|
72
|
+
|
73
|
+
if self.tail_decoder is None:
|
74
|
+
raise ValueError("No `tail_decoder` set")
|
75
|
+
|
76
|
+
def decode(self, stream: ContextFramesBytesIO) -> Any:
|
77
|
+
# Decode the offset and move the stream cursor forward 32 bytes
|
78
|
+
start_pos = decode_uint_256(stream)
|
79
|
+
# Jump ahead to the start of the value
|
80
|
+
stream.push_frame(start_pos)
|
81
|
+
|
82
|
+
# assertion check for mypy
|
83
|
+
if self.tail_decoder is None:
|
84
|
+
raise AssertionError("`tail_decoder` is None")
|
85
|
+
# Decode the value
|
86
|
+
value = self.tail_decoder(stream)
|
87
|
+
# Return the cursor
|
88
|
+
stream.pop_frame()
|
89
|
+
|
90
|
+
return value
|
91
|
+
|
92
|
+
|
93
|
+
class TupleDecoder(BaseDecoder):
|
94
|
+
decoders: Tuple[BaseDecoder, ...] = ()
|
95
|
+
|
96
|
+
def __init__(self, decoders: Tuple[BaseDecoder, ...], **kwargs: Any) -> None:
|
97
|
+
super().__init__(**kwargs)
|
98
|
+
|
99
|
+
self.decoders = tuple(
|
100
|
+
HeadTailDecoder(tail_decoder=d) if getattr(d, "is_dynamic", False) else d
|
101
|
+
for d in decoders
|
102
|
+
)
|
103
|
+
|
104
|
+
self.is_dynamic = any(getattr(d, "is_dynamic", False) for d in self.decoders)
|
105
|
+
|
106
|
+
def validate(self):
|
107
|
+
super().validate()
|
108
|
+
|
109
|
+
if self.decoders is None:
|
110
|
+
raise ValueError("No `decoders` set")
|
111
|
+
|
112
|
+
def validate_pointers(self, stream: ContextFramesBytesIO) -> None:
|
113
|
+
"""
|
114
|
+
Verify that all pointers point to a valid location in the stream.
|
115
|
+
"""
|
116
|
+
current_location = stream.tell()
|
117
|
+
len_of_head = sum(
|
118
|
+
decoder.array_size if hasattr(decoder, "array_size") else 1
|
119
|
+
for decoder in self.decoders
|
120
|
+
)
|
121
|
+
end_of_offsets = current_location + 32 * len_of_head
|
122
|
+
total_stream_length = len(stream.getbuffer())
|
123
|
+
for decoder in self.decoders:
|
124
|
+
if isinstance(decoder, HeadTailDecoder):
|
125
|
+
# the next 32 bytes are a pointer
|
126
|
+
offset = decode_uint_256(stream)
|
127
|
+
indicated_idx = current_location + offset
|
128
|
+
if (
|
129
|
+
indicated_idx < end_of_offsets
|
130
|
+
or indicated_idx >= total_stream_length
|
131
|
+
):
|
132
|
+
# the pointer is indicating its data is located either within the
|
133
|
+
# offsets section of the stream or beyond the end of the stream,
|
134
|
+
# both of which are invalid
|
135
|
+
raise InvalidPointer(
|
136
|
+
"Invalid pointer in tuple at location "
|
137
|
+
f"{stream.tell() - 32} in payload"
|
138
|
+
)
|
139
|
+
else:
|
140
|
+
# the next 32 bytes are not a pointer, so progress the stream per
|
141
|
+
# the decoder
|
142
|
+
decoder(stream)
|
143
|
+
# return the stream to its original location for actual decoding
|
144
|
+
stream.seek(current_location)
|
145
|
+
|
146
|
+
@to_tuple # type: ignore [misc]
|
147
|
+
def decode(self, stream: ContextFramesBytesIO) -> Generator[Any, None, None]:
|
148
|
+
self.validate_pointers(stream)
|
149
|
+
for decoder in self.decoders:
|
150
|
+
yield decoder(stream)
|
151
|
+
|
152
|
+
@parse_tuple_type_str
|
153
|
+
def from_type_str(cls, abi_type, registry):
|
154
|
+
decoders = tuple(
|
155
|
+
registry.get_decoder(c.to_type_str()) for c in abi_type.components
|
156
|
+
)
|
157
|
+
|
158
|
+
return cls(decoders=decoders)
|
159
|
+
|
160
|
+
|
161
|
+
class SingleDecoder(BaseDecoder):
|
162
|
+
decoder_fn = None
|
163
|
+
|
164
|
+
def validate(self):
|
165
|
+
super().validate()
|
166
|
+
|
167
|
+
if self.decoder_fn is None:
|
168
|
+
raise ValueError("No `decoder_fn` set")
|
169
|
+
|
170
|
+
def validate_padding_bytes(self, value, padding_bytes):
|
171
|
+
raise NotImplementedError("Must be implemented by subclasses")
|
172
|
+
|
173
|
+
def decode(self, stream):
|
174
|
+
raw_data = self.read_data_from_stream(stream)
|
175
|
+
data, padding_bytes = self.split_data_and_padding(raw_data)
|
176
|
+
if self.decoder_fn is None:
|
177
|
+
raise AssertionError("`decoder_fn` is None")
|
178
|
+
value = self.decoder_fn(data)
|
179
|
+
self.validate_padding_bytes(value, padding_bytes)
|
180
|
+
|
181
|
+
return value
|
182
|
+
|
183
|
+
def read_data_from_stream(self, stream):
|
184
|
+
raise NotImplementedError("Must be implemented by subclasses")
|
185
|
+
|
186
|
+
def split_data_and_padding(self, raw_data):
|
187
|
+
return raw_data, b""
|
188
|
+
|
189
|
+
|
190
|
+
class BaseArrayDecoder(BaseDecoder):
|
191
|
+
item_decoder = None
|
192
|
+
|
193
|
+
def __init__(self, **kwargs):
|
194
|
+
super().__init__(**kwargs)
|
195
|
+
|
196
|
+
# Use a head-tail decoder to decode dynamic elements
|
197
|
+
if self.item_decoder.is_dynamic:
|
198
|
+
self.item_decoder = HeadTailDecoder(
|
199
|
+
tail_decoder=self.item_decoder,
|
200
|
+
)
|
201
|
+
|
202
|
+
def validate(self):
|
203
|
+
super().validate()
|
204
|
+
|
205
|
+
if self.item_decoder is None:
|
206
|
+
raise ValueError("No `item_decoder` set")
|
207
|
+
|
208
|
+
@parse_type_str(with_arrlist=True)
|
209
|
+
def from_type_str(cls, abi_type, registry):
|
210
|
+
item_decoder = registry.get_decoder(abi_type.item_type.to_type_str())
|
211
|
+
|
212
|
+
array_spec = abi_type.arrlist[-1]
|
213
|
+
if len(array_spec) == 1:
|
214
|
+
# If array dimension is fixed
|
215
|
+
return SizedArrayDecoder(
|
216
|
+
array_size=array_spec[0],
|
217
|
+
item_decoder=item_decoder,
|
218
|
+
)
|
219
|
+
else:
|
220
|
+
# If array dimension is dynamic
|
221
|
+
return DynamicArrayDecoder(item_decoder=item_decoder)
|
222
|
+
|
223
|
+
def validate_pointers(self, stream: ContextFramesBytesIO, array_size: int) -> None:
|
224
|
+
"""
|
225
|
+
Verify that all pointers point to a valid location in the stream.
|
226
|
+
"""
|
227
|
+
if isinstance(self.item_decoder, HeadTailDecoder):
|
228
|
+
current_location = stream.tell()
|
229
|
+
end_of_offsets = current_location + 32 * array_size
|
230
|
+
total_stream_length = len(stream.getbuffer())
|
231
|
+
for _ in range(array_size):
|
232
|
+
offset = decode_uint_256(stream)
|
233
|
+
indicated_idx = current_location + offset
|
234
|
+
if (
|
235
|
+
indicated_idx < end_of_offsets
|
236
|
+
or indicated_idx >= total_stream_length
|
237
|
+
):
|
238
|
+
# the pointer is indicating its data is located either within the
|
239
|
+
# offsets section of the stream or beyond the end of the stream,
|
240
|
+
# both of which are invalid
|
241
|
+
raise InvalidPointer(
|
242
|
+
"Invalid pointer in array at location "
|
243
|
+
f"{stream.tell() - 32} in payload"
|
244
|
+
)
|
245
|
+
stream.seek(current_location)
|
246
|
+
|
247
|
+
|
248
|
+
class SizedArrayDecoder(BaseArrayDecoder):
|
249
|
+
array_size = None
|
250
|
+
|
251
|
+
def __init__(self, **kwargs):
|
252
|
+
super().__init__(**kwargs)
|
253
|
+
|
254
|
+
self.is_dynamic = self.item_decoder.is_dynamic
|
255
|
+
|
256
|
+
@to_tuple
|
257
|
+
def decode(self, stream):
|
258
|
+
if self.item_decoder is None:
|
259
|
+
raise AssertionError("`item_decoder` is None")
|
260
|
+
|
261
|
+
self.validate_pointers(stream, self.array_size)
|
262
|
+
for _ in range(self.array_size):
|
263
|
+
yield self.item_decoder(stream)
|
264
|
+
|
265
|
+
|
266
|
+
class DynamicArrayDecoder(BaseArrayDecoder):
|
267
|
+
# Dynamic arrays are always dynamic, regardless of their elements
|
268
|
+
is_dynamic = True
|
269
|
+
|
270
|
+
@to_tuple
|
271
|
+
def decode(self, stream):
|
272
|
+
array_size = decode_uint_256(stream)
|
273
|
+
stream.push_frame(32)
|
274
|
+
if self.item_decoder is None:
|
275
|
+
raise AssertionError("`item_decoder` is None")
|
276
|
+
|
277
|
+
self.validate_pointers(stream, array_size)
|
278
|
+
for _ in range(array_size):
|
279
|
+
yield self.item_decoder(stream)
|
280
|
+
stream.pop_frame()
|
281
|
+
|
282
|
+
|
283
|
+
class FixedByteSizeDecoder(SingleDecoder):
|
284
|
+
decoder_fn = None
|
285
|
+
value_bit_size = None
|
286
|
+
data_byte_size = None
|
287
|
+
is_big_endian = None
|
288
|
+
|
289
|
+
def validate(self):
|
290
|
+
super().validate()
|
291
|
+
|
292
|
+
if self.value_bit_size is None:
|
293
|
+
raise ValueError("`value_bit_size` may not be None")
|
294
|
+
if self.data_byte_size is None:
|
295
|
+
raise ValueError("`data_byte_size` may not be None")
|
296
|
+
if self.decoder_fn is None:
|
297
|
+
raise ValueError("`decoder_fn` may not be None")
|
298
|
+
if self.is_big_endian is None:
|
299
|
+
raise ValueError("`is_big_endian` may not be None")
|
300
|
+
|
301
|
+
if self.value_bit_size % 8 != 0:
|
302
|
+
raise ValueError(
|
303
|
+
"Invalid value bit size: {self.value_bit_size}. Must be a multiple of 8"
|
304
|
+
)
|
305
|
+
|
306
|
+
if self.value_bit_size > self.data_byte_size * 8:
|
307
|
+
raise ValueError("Value byte size exceeds data size")
|
308
|
+
|
309
|
+
def read_data_from_stream(self, stream):
|
310
|
+
data = stream.read(self.data_byte_size)
|
311
|
+
|
312
|
+
if len(data) != self.data_byte_size:
|
313
|
+
raise InsufficientDataBytes(
|
314
|
+
f"Tried to read {self.data_byte_size} bytes, "
|
315
|
+
f"only got {len(data)} bytes."
|
316
|
+
)
|
317
|
+
|
318
|
+
return data
|
319
|
+
|
320
|
+
def split_data_and_padding(self, raw_data):
|
321
|
+
value_byte_size = self._get_value_byte_size()
|
322
|
+
padding_size = self.data_byte_size - value_byte_size
|
323
|
+
|
324
|
+
if self.is_big_endian:
|
325
|
+
padding_bytes = raw_data[:padding_size]
|
326
|
+
data = raw_data[padding_size:]
|
327
|
+
else:
|
328
|
+
data = raw_data[:value_byte_size]
|
329
|
+
padding_bytes = raw_data[value_byte_size:]
|
330
|
+
|
331
|
+
return data, padding_bytes
|
332
|
+
|
333
|
+
def validate_padding_bytes(self, value, padding_bytes):
|
334
|
+
value_byte_size = self._get_value_byte_size()
|
335
|
+
padding_size = self.data_byte_size - value_byte_size
|
336
|
+
|
337
|
+
if padding_bytes != b"\x00" * padding_size:
|
338
|
+
raise NonEmptyPaddingBytes(
|
339
|
+
f"Padding bytes were not empty: {padding_bytes!r}"
|
340
|
+
)
|
341
|
+
|
342
|
+
def _get_value_byte_size(self):
|
343
|
+
value_byte_size = self.value_bit_size // 8
|
344
|
+
return value_byte_size
|
345
|
+
|
346
|
+
|
347
|
+
class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
|
348
|
+
data_byte_size = 32
|
349
|
+
|
350
|
+
|
351
|
+
class BooleanDecoder(Fixed32ByteSizeDecoder):
|
352
|
+
value_bit_size = 8
|
353
|
+
is_big_endian = True
|
354
|
+
|
355
|
+
@staticmethod
|
356
|
+
def decoder_fn(data):
|
357
|
+
if data == b"\x00":
|
358
|
+
return False
|
359
|
+
elif data == b"\x01":
|
360
|
+
return True
|
361
|
+
else:
|
362
|
+
raise NonEmptyPaddingBytes(
|
363
|
+
f"Boolean must be either 0x0 or 0x1. Got: {data!r}"
|
364
|
+
)
|
365
|
+
|
366
|
+
@parse_type_str("bool")
|
367
|
+
def from_type_str(cls, abi_type, registry):
|
368
|
+
return cls()
|
369
|
+
|
370
|
+
|
371
|
+
class AddressDecoder(Fixed32ByteSizeDecoder):
|
372
|
+
value_bit_size = 20 * 8
|
373
|
+
is_big_endian = True
|
374
|
+
decoder_fn = staticmethod(to_normalized_address)
|
375
|
+
|
376
|
+
@parse_type_str("address")
|
377
|
+
def from_type_str(cls, abi_type, registry):
|
378
|
+
return cls()
|
379
|
+
|
380
|
+
|
381
|
+
#
|
382
|
+
# Unsigned Integer Decoders
|
383
|
+
#
|
384
|
+
class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
|
385
|
+
decoder_fn = staticmethod(big_endian_to_int)
|
386
|
+
is_big_endian = True
|
387
|
+
|
388
|
+
@parse_type_str("uint")
|
389
|
+
def from_type_str(cls, abi_type, registry):
|
390
|
+
return cls(value_bit_size=abi_type.sub)
|
391
|
+
|
392
|
+
|
393
|
+
decode_uint_256 = UnsignedIntegerDecoder(value_bit_size=256)
|
394
|
+
|
395
|
+
|
396
|
+
#
|
397
|
+
# Signed Integer Decoders
|
398
|
+
#
|
399
|
+
class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
|
400
|
+
is_big_endian = True
|
401
|
+
|
402
|
+
def decoder_fn(self, data):
|
403
|
+
value = big_endian_to_int(data)
|
404
|
+
if value >= 2 ** (self.value_bit_size - 1):
|
405
|
+
return value - 2**self.value_bit_size
|
406
|
+
else:
|
407
|
+
return value
|
408
|
+
|
409
|
+
def validate_padding_bytes(self, value, padding_bytes):
|
410
|
+
value_byte_size = self._get_value_byte_size()
|
411
|
+
padding_size = self.data_byte_size - value_byte_size
|
412
|
+
|
413
|
+
if value >= 0:
|
414
|
+
expected_padding_bytes = b"\x00" * padding_size
|
415
|
+
else:
|
416
|
+
expected_padding_bytes = b"\xff" * padding_size
|
417
|
+
|
418
|
+
if padding_bytes != expected_padding_bytes:
|
419
|
+
raise NonEmptyPaddingBytes(
|
420
|
+
f"Padding bytes were not empty: {padding_bytes!r}"
|
421
|
+
)
|
422
|
+
|
423
|
+
@parse_type_str("int")
|
424
|
+
def from_type_str(cls, abi_type, registry):
|
425
|
+
return cls(value_bit_size=abi_type.sub)
|
426
|
+
|
427
|
+
|
428
|
+
#
|
429
|
+
# Bytes1..32
|
430
|
+
#
|
431
|
+
class BytesDecoder(Fixed32ByteSizeDecoder):
|
432
|
+
is_big_endian = False
|
433
|
+
|
434
|
+
@staticmethod
|
435
|
+
def decoder_fn(data):
|
436
|
+
return data
|
437
|
+
|
438
|
+
@parse_type_str("bytes")
|
439
|
+
def from_type_str(cls, abi_type, registry):
|
440
|
+
return cls(value_bit_size=abi_type.sub * 8)
|
441
|
+
|
442
|
+
|
443
|
+
class BaseFixedDecoder(Fixed32ByteSizeDecoder):
|
444
|
+
frac_places = None
|
445
|
+
is_big_endian = True
|
446
|
+
|
447
|
+
def validate(self):
|
448
|
+
super().validate()
|
449
|
+
|
450
|
+
if self.frac_places is None:
|
451
|
+
raise ValueError("must specify `frac_places`")
|
452
|
+
|
453
|
+
if self.frac_places <= 0 or self.frac_places > 80:
|
454
|
+
raise ValueError("`frac_places` must be in range (0, 80]")
|
455
|
+
|
456
|
+
|
457
|
+
class UnsignedFixedDecoder(BaseFixedDecoder):
|
458
|
+
def decoder_fn(self, data):
|
459
|
+
value = big_endian_to_int(data)
|
460
|
+
|
461
|
+
with decimal.localcontext(abi_decimal_context):
|
462
|
+
decimal_value = decimal.Decimal(value) / TEN**self.frac_places
|
463
|
+
|
464
|
+
return decimal_value
|
465
|
+
|
466
|
+
@parse_type_str("ufixed")
|
467
|
+
def from_type_str(cls, abi_type, registry):
|
468
|
+
value_bit_size, frac_places = abi_type.sub
|
469
|
+
|
470
|
+
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
|
471
|
+
|
472
|
+
|
473
|
+
class SignedFixedDecoder(BaseFixedDecoder):
|
474
|
+
def decoder_fn(self, data):
|
475
|
+
value = big_endian_to_int(data)
|
476
|
+
if value >= 2 ** (self.value_bit_size - 1):
|
477
|
+
signed_value = value - 2**self.value_bit_size
|
478
|
+
else:
|
479
|
+
signed_value = value
|
480
|
+
|
481
|
+
with decimal.localcontext(abi_decimal_context):
|
482
|
+
decimal_value = decimal.Decimal(signed_value) / TEN**self.frac_places
|
483
|
+
|
484
|
+
return decimal_value
|
485
|
+
|
486
|
+
def validate_padding_bytes(self, value, padding_bytes):
|
487
|
+
value_byte_size = self._get_value_byte_size()
|
488
|
+
padding_size = self.data_byte_size - value_byte_size
|
489
|
+
|
490
|
+
if value >= 0:
|
491
|
+
expected_padding_bytes = b"\x00" * padding_size
|
492
|
+
else:
|
493
|
+
expected_padding_bytes = b"\xff" * padding_size
|
494
|
+
|
495
|
+
if padding_bytes != expected_padding_bytes:
|
496
|
+
raise NonEmptyPaddingBytes(
|
497
|
+
f"Padding bytes were not empty: {padding_bytes!r}"
|
498
|
+
)
|
499
|
+
|
500
|
+
@parse_type_str("fixed")
|
501
|
+
def from_type_str(cls, abi_type, registry):
|
502
|
+
value_bit_size, frac_places = abi_type.sub
|
503
|
+
|
504
|
+
return cls(value_bit_size=value_bit_size, frac_places=frac_places)
|
505
|
+
|
506
|
+
|
507
|
+
#
|
508
|
+
# String and Bytes
|
509
|
+
#
|
510
|
+
class ByteStringDecoder(SingleDecoder):
|
511
|
+
is_dynamic = True
|
512
|
+
|
513
|
+
@staticmethod
|
514
|
+
def decoder_fn(data):
|
515
|
+
return data
|
516
|
+
|
517
|
+
def read_data_from_stream(self, stream):
|
518
|
+
data_length = decode_uint_256(stream)
|
519
|
+
padded_length = ceil32(data_length)
|
520
|
+
|
521
|
+
data = stream.read(padded_length)
|
522
|
+
|
523
|
+
if self.strict:
|
524
|
+
if len(data) < padded_length:
|
525
|
+
raise InsufficientDataBytes(
|
526
|
+
f"Tried to read {padded_length} bytes, only got {len(data)} bytes"
|
527
|
+
)
|
528
|
+
|
529
|
+
padding_bytes = data[data_length:]
|
530
|
+
if padding_bytes != b"\x00" * (padded_length - data_length):
|
531
|
+
raise NonEmptyPaddingBytes(
|
532
|
+
f"Padding bytes were not empty: {padding_bytes!r}"
|
533
|
+
)
|
534
|
+
|
535
|
+
return data[:data_length]
|
536
|
+
|
537
|
+
def validate_padding_bytes(self, value, padding_bytes):
|
538
|
+
pass
|
539
|
+
|
540
|
+
@parse_type_str("bytes")
|
541
|
+
def from_type_str(cls, abi_type, registry):
|
542
|
+
return cls()
|
543
|
+
|
544
|
+
|
545
|
+
class StringDecoder(ByteStringDecoder):
|
546
|
+
def __init__(self, handle_string_errors="strict"):
|
547
|
+
self.bytes_errors = handle_string_errors
|
548
|
+
super().__init__()
|
549
|
+
|
550
|
+
@parse_type_str("string")
|
551
|
+
def from_type_str(cls, abi_type, registry):
|
552
|
+
return cls()
|
553
|
+
|
554
|
+
def decode(self, stream):
|
555
|
+
raw_data = self.read_data_from_stream(stream)
|
556
|
+
data, padding_bytes = self.split_data_and_padding(raw_data)
|
557
|
+
value = self.decoder_fn(data, self.bytes_errors)
|
558
|
+
self.validate_padding_bytes(value, padding_bytes)
|
559
|
+
return value
|
560
|
+
|
561
|
+
@staticmethod
|
562
|
+
def decoder_fn(data, handle_string_errors="strict"):
|
563
|
+
return data.decode("utf-8", errors=handle_string_errors)
|