faster-eth-abi 5.2.5__cp313-cp313-macosx_11_0_arm64.whl → 5.2.20__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of faster-eth-abi might be problematic. Click here for more details.

Files changed (41) hide show
  1. faster_eth_abi/_codec.cpython-313-darwin.so +0 -0
  2. faster_eth_abi/_codec.py +8 -7
  3. faster_eth_abi/_decoding.cpython-313-darwin.so +0 -0
  4. faster_eth_abi/_decoding.py +299 -0
  5. faster_eth_abi/_encoding.cpython-313-darwin.so +0 -0
  6. faster_eth_abi/_encoding.py +163 -14
  7. faster_eth_abi/_grammar.cpython-313-darwin.so +0 -0
  8. faster_eth_abi/_grammar.py +375 -0
  9. faster_eth_abi/abi.cpython-313-darwin.so +0 -0
  10. faster_eth_abi/base.py +5 -1
  11. faster_eth_abi/codec.py +2675 -9
  12. faster_eth_abi/constants.cpython-313-darwin.so +0 -0
  13. faster_eth_abi/decoding.py +263 -242
  14. faster_eth_abi/encoding.py +175 -71
  15. faster_eth_abi/exceptions.py +26 -14
  16. faster_eth_abi/from_type_str.cpython-313-darwin.so +0 -0
  17. faster_eth_abi/from_type_str.py +7 -1
  18. faster_eth_abi/grammar.py +30 -325
  19. faster_eth_abi/io.py +5 -1
  20. faster_eth_abi/packed.cpython-313-darwin.so +0 -0
  21. faster_eth_abi/packed.py +4 -0
  22. faster_eth_abi/registry.py +186 -91
  23. faster_eth_abi/tools/__init__.cpython-313-darwin.so +0 -0
  24. faster_eth_abi/tools/_strategies.cpython-313-darwin.so +0 -0
  25. faster_eth_abi/tools/_strategies.py +12 -6
  26. faster_eth_abi/typing.py +4627 -0
  27. faster_eth_abi/utils/__init__.cpython-313-darwin.so +0 -0
  28. faster_eth_abi/utils/numeric.cpython-313-darwin.so +0 -0
  29. faster_eth_abi/utils/numeric.py +51 -20
  30. faster_eth_abi/utils/padding.cpython-313-darwin.so +0 -0
  31. faster_eth_abi/utils/string.cpython-313-darwin.so +0 -0
  32. faster_eth_abi/utils/validation.cpython-313-darwin.so +0 -0
  33. {faster_eth_abi-5.2.5.dist-info → faster_eth_abi-5.2.20.dist-info}/METADATA +52 -11
  34. faster_eth_abi-5.2.20.dist-info/RECORD +46 -0
  35. faster_eth_abi-5.2.20.dist-info/top_level.txt +2 -0
  36. faster_eth_abi__mypyc.cpython-313-darwin.so +0 -0
  37. c42f5c78bc058f310136__mypyc.cpython-313-darwin.so +0 -0
  38. faster_eth_abi-5.2.5.dist-info/RECORD +0 -42
  39. faster_eth_abi-5.2.5.dist-info/licenses/LICENSE +0 -21
  40. faster_eth_abi-5.2.5.dist-info/top_level.txt +0 -3
  41. {faster_eth_abi-5.2.5.dist-info → faster_eth_abi-5.2.20.dist-info}/WHEEL +0 -0
@@ -1,23 +1,55 @@
1
+ """Classes for ABI decoding logic.
2
+
3
+ Implements classes and functions for deserializing binary data into Python values
4
+ according to ABI type specifications.
5
+ """
1
6
  import abc
2
7
  import decimal
8
+ from functools import (
9
+ cached_property,
10
+ lru_cache,
11
+ )
12
+ from types import (
13
+ MethodType,
14
+ )
3
15
  from typing import (
4
16
  Any,
5
- Generator,
17
+ Callable,
18
+ Final,
19
+ Generic,
20
+ Optional,
6
21
  Tuple,
22
+ TypeVar,
23
+ Union,
24
+ final,
7
25
  )
8
26
 
27
+ from eth_typing import (
28
+ HexAddress,
29
+ )
9
30
  from faster_eth_utils import (
10
31
  big_endian_to_int,
11
32
  to_normalized_address,
12
- to_tuple,
13
33
  )
14
34
 
35
+ from faster_eth_abi._decoding import (
36
+ decode_dynamic_array,
37
+ decode_head_tail,
38
+ decode_sized_array,
39
+ decode_tuple,
40
+ decoder_fn_boolean,
41
+ get_value_byte_size,
42
+ read_fixed_byte_size_data_from_stream,
43
+ split_data_and_padding_fixed_byte_size,
44
+ validate_padding_bytes_fixed_byte_size,
45
+ validate_padding_bytes_signed_integer,
46
+ validate_pointers_array,
47
+ )
15
48
  from faster_eth_abi.base import (
16
49
  BaseCoder,
17
50
  )
18
51
  from faster_eth_abi.exceptions import (
19
52
  InsufficientDataBytes,
20
- InvalidPointer,
21
53
  NonEmptyPaddingBytes,
22
54
  )
23
55
  from faster_eth_abi.from_type_str import (
@@ -27,14 +59,19 @@ from faster_eth_abi.from_type_str import (
27
59
  from faster_eth_abi.io import (
28
60
  ContextFramesBytesIO,
29
61
  )
62
+ from faster_eth_abi.typing import (
63
+ T,
64
+ )
30
65
  from faster_eth_abi.utils.numeric import (
31
66
  TEN,
32
67
  abi_decimal_context,
33
68
  ceil32,
34
69
  )
35
70
 
71
+ TByteStr = TypeVar("TByteStr", bytes, str)
36
72
 
37
- class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
73
+
74
+ class BaseDecoder(BaseCoder, Generic[T], metaclass=abc.ABCMeta):
38
75
  """
39
76
  Base class for all decoder classes. Subclass this if you want to define a
40
77
  custom decoder class. Subclasses must also implement
@@ -44,18 +81,18 @@ class BaseDecoder(BaseCoder, metaclass=abc.ABCMeta):
44
81
  strict = True
45
82
 
46
83
  @abc.abstractmethod
47
- def decode(self, stream: ContextFramesBytesIO) -> Any: # pragma: no cover
84
+ def decode(self, stream: ContextFramesBytesIO) -> T: # pragma: no cover
48
85
  """
49
86
  Decodes the given stream of bytes into a python value. Should raise
50
87
  :any:`exceptions.DecodingError` if a python value cannot be decoded
51
88
  from the given byte stream.
52
89
  """
53
90
 
54
- def __call__(self, stream: ContextFramesBytesIO) -> Any:
91
+ def __call__(self, stream: ContextFramesBytesIO) -> T:
55
92
  return self.decode(stream)
56
93
 
57
94
 
58
- class HeadTailDecoder(BaseDecoder):
95
+ class HeadTailDecoder(BaseDecoder[T]):
59
96
  """
60
97
  Decoder for a dynamic element of a dynamic container (a dynamic array, or a sized
61
98
  array or tuple that contains dynamic elements). A dynamic element consists of a
@@ -65,89 +102,62 @@ class HeadTailDecoder(BaseDecoder):
65
102
 
66
103
  is_dynamic = True
67
104
 
68
- tail_decoder = None
69
-
70
- def validate(self):
71
- super().validate()
105
+ def __init__(
106
+ self,
107
+ tail_decoder: Union[ # type: ignore [type-var]
108
+ "HeadTailDecoder[T]",
109
+ "SizedArrayDecoder[T]",
110
+ "DynamicArrayDecoder[T]",
111
+ "ByteStringDecoder[T]",
112
+ ],
113
+ ) -> None:
114
+ super().__init__()
72
115
 
73
- if self.tail_decoder is None:
116
+ if tail_decoder is None:
74
117
  raise ValueError("No `tail_decoder` set")
75
118
 
76
- def decode(self, stream: ContextFramesBytesIO) -> Any:
77
- # Decode the offset and move the stream cursor forward 32 bytes
78
- start_pos = decode_uint_256(stream)
79
- # Jump ahead to the start of the value
80
- stream.push_frame(start_pos)
119
+ self.tail_decoder: Final = tail_decoder
81
120
 
82
- # assertion check for mypy
83
- if self.tail_decoder is None:
84
- raise AssertionError("`tail_decoder` is None")
85
- # Decode the value
86
- value = self.tail_decoder(stream)
87
- # Return the cursor
88
- stream.pop_frame()
121
+ def decode(self, stream: ContextFramesBytesIO) -> T:
122
+ return decode_head_tail(self, stream)
89
123
 
90
- return value
124
+ __call__ = decode
91
125
 
92
126
 
93
- class TupleDecoder(BaseDecoder):
94
- decoders: Tuple[BaseDecoder, ...] = ()
127
+ class TupleDecoder(BaseDecoder[Tuple[T, ...]]):
128
+ decoders: Tuple[BaseDecoder[T], ...] = ()
95
129
 
96
- def __init__(self, decoders: Tuple[BaseDecoder, ...], **kwargs: Any) -> None:
97
- super().__init__(**kwargs)
130
+ def __init__(self, decoders: Tuple[BaseDecoder[T], ...]) -> None:
131
+ super().__init__()
98
132
 
99
- self.decoders = tuple(
133
+ self.decoders = decoders = tuple(
100
134
  HeadTailDecoder(tail_decoder=d) if getattr(d, "is_dynamic", False) else d
101
135
  for d in decoders
102
136
  )
103
137
 
104
- self.is_dynamic = any(getattr(d, "is_dynamic", False) for d in self.decoders)
138
+ self.is_dynamic = any(getattr(d, "is_dynamic", False) for d in decoders)
139
+ self.len_of_head = sum(
140
+ getattr(decoder, "array_size", 1) for decoder in decoders
141
+ )
142
+ self._is_head_tail = tuple(
143
+ isinstance(decoder, HeadTailDecoder) for decoder in decoders
144
+ )
145
+ self._no_head_tail = not any(self._is_head_tail)
105
146
 
106
- def validate(self):
147
+ def validate(self) -> None:
107
148
  super().validate()
108
149
 
109
150
  if self.decoders is None:
110
151
  raise ValueError("No `decoders` set")
111
152
 
153
+ @final
112
154
  def validate_pointers(self, stream: ContextFramesBytesIO) -> None:
113
- """
114
- Verify that all pointers point to a valid location in the stream.
115
- """
116
- current_location = stream.tell()
117
- len_of_head = sum(
118
- decoder.array_size if hasattr(decoder, "array_size") else 1
119
- for decoder in self.decoders
120
- )
121
- end_of_offsets = current_location + 32 * len_of_head
122
- total_stream_length = len(stream.getbuffer())
123
- for decoder in self.decoders:
124
- if isinstance(decoder, HeadTailDecoder):
125
- # the next 32 bytes are a pointer
126
- offset = decode_uint_256(stream)
127
- indicated_idx = current_location + offset
128
- if (
129
- indicated_idx < end_of_offsets
130
- or indicated_idx >= total_stream_length
131
- ):
132
- # the pointer is indicating its data is located either within the
133
- # offsets section of the stream or beyond the end of the stream,
134
- # both of which are invalid
135
- raise InvalidPointer(
136
- "Invalid pointer in tuple at location "
137
- f"{stream.tell() - 32} in payload"
138
- )
139
- else:
140
- # the next 32 bytes are not a pointer, so progress the stream per
141
- # the decoder
142
- decoder(stream)
143
- # return the stream to its original location for actual decoding
144
- stream.seek(current_location)
145
-
146
- @to_tuple # type: ignore [misc]
147
- def decode(self, stream: ContextFramesBytesIO) -> Generator[Any, None, None]:
148
- self.validate_pointers(stream)
149
- for decoder in self.decoders:
150
- yield decoder(stream)
155
+ raise NotImplementedError("didnt call __init__")
156
+
157
+ def decode(self, stream: ContextFramesBytesIO) -> Tuple[T, ...]:
158
+ return decode_tuple(self, stream)
159
+
160
+ __call__ = decode
151
161
 
152
162
  @parse_tuple_type_str
153
163
  def from_type_str(cls, abi_type, registry):
@@ -158,48 +168,57 @@ class TupleDecoder(BaseDecoder):
158
168
  return cls(decoders=decoders)
159
169
 
160
170
 
161
- class SingleDecoder(BaseDecoder):
171
+ class SingleDecoder(BaseDecoder[T]):
162
172
  decoder_fn = None
163
173
 
164
- def validate(self):
174
+ def validate(self) -> None:
165
175
  super().validate()
166
176
 
167
177
  if self.decoder_fn is None:
168
178
  raise ValueError("No `decoder_fn` set")
169
179
 
170
- def validate_padding_bytes(self, value, padding_bytes):
180
+ def validate_padding_bytes(self, value: Any, padding_bytes: bytes) -> None:
171
181
  raise NotImplementedError("Must be implemented by subclasses")
172
182
 
173
- def decode(self, stream):
183
+ def decode(self, stream: ContextFramesBytesIO) -> T:
174
184
  raw_data = self.read_data_from_stream(stream)
175
185
  data, padding_bytes = self.split_data_and_padding(raw_data)
176
- if self.decoder_fn is None:
177
- raise AssertionError("`decoder_fn` is None")
178
- value = self.decoder_fn(data)
186
+ value = self.decoder_fn(data) # type: ignore [misc]
179
187
  self.validate_padding_bytes(value, padding_bytes)
180
188
 
181
189
  return value
182
190
 
183
- def read_data_from_stream(self, stream):
191
+ __call__ = decode
192
+
193
+ def read_data_from_stream(self, stream: ContextFramesBytesIO) -> bytes:
184
194
  raise NotImplementedError("Must be implemented by subclasses")
185
195
 
186
- def split_data_and_padding(self, raw_data):
196
+ def split_data_and_padding(self, raw_data: bytes) -> Tuple[bytes, bytes]:
187
197
  return raw_data, b""
188
198
 
189
199
 
190
- class BaseArrayDecoder(BaseDecoder):
191
- item_decoder = None
200
+ class BaseArrayDecoder(BaseDecoder[Tuple[T, ...]]):
201
+ item_decoder: BaseDecoder = None
192
202
 
193
- def __init__(self, **kwargs):
203
+ def __init__(self, **kwargs: Any) -> None:
194
204
  super().__init__(**kwargs)
195
205
 
196
206
  # Use a head-tail decoder to decode dynamic elements
197
- if self.item_decoder.is_dynamic:
198
- self.item_decoder = HeadTailDecoder(
199
- tail_decoder=self.item_decoder,
200
- )
207
+ item_decoder = self.item_decoder
208
+ if item_decoder.is_dynamic:
209
+ self.item_decoder = HeadTailDecoder(tail_decoder=item_decoder)
210
+ self.validate_pointers = MethodType(validate_pointers_array, self)
211
+ else:
212
+
213
+ def noop(stream: ContextFramesBytesIO, array_size: int) -> None:
214
+ ...
215
+
216
+ self.validate_pointers = noop
201
217
 
202
- def validate(self):
218
+ def decode(self, stream: ContextFramesBytesIO) -> Tuple[T, ...]:
219
+ raise NotImplementedError # this is a type stub
220
+
221
+ def validate(self) -> None:
203
222
  super().validate()
204
223
 
205
224
  if self.item_decoder is None:
@@ -224,151 +243,105 @@ class BaseArrayDecoder(BaseDecoder):
224
243
  """
225
244
  Verify that all pointers point to a valid location in the stream.
226
245
  """
227
- if isinstance(self.item_decoder, HeadTailDecoder):
228
- current_location = stream.tell()
229
- end_of_offsets = current_location + 32 * array_size
230
- total_stream_length = len(stream.getbuffer())
231
- for _ in range(array_size):
232
- offset = decode_uint_256(stream)
233
- indicated_idx = current_location + offset
234
- if (
235
- indicated_idx < end_of_offsets
236
- or indicated_idx >= total_stream_length
237
- ):
238
- # the pointer is indicating its data is located either within the
239
- # offsets section of the stream or beyond the end of the stream,
240
- # both of which are invalid
241
- raise InvalidPointer(
242
- "Invalid pointer in array at location "
243
- f"{stream.tell() - 32} in payload"
244
- )
245
- stream.seek(current_location)
246
-
247
-
248
- class SizedArrayDecoder(BaseArrayDecoder):
249
- array_size = None
250
-
251
- def __init__(self, **kwargs):
246
+ validate_pointers_array(self, stream, array_size)
247
+
248
+
249
+ class SizedArrayDecoder(BaseArrayDecoder[T]):
250
+ array_size: int = None
251
+
252
+ def __init__(self, **kwargs: Any) -> None:
252
253
  super().__init__(**kwargs)
253
254
 
254
255
  self.is_dynamic = self.item_decoder.is_dynamic
255
256
 
256
- @to_tuple
257
- def decode(self, stream):
258
- if self.item_decoder is None:
259
- raise AssertionError("`item_decoder` is None")
257
+ def decode(self, stream: ContextFramesBytesIO) -> Tuple[T, ...]:
258
+ return decode_sized_array(self, stream)
260
259
 
261
- self.validate_pointers(stream, self.array_size)
262
- for _ in range(self.array_size):
263
- yield self.item_decoder(stream)
260
+ __call__ = decode
264
261
 
265
262
 
266
- class DynamicArrayDecoder(BaseArrayDecoder):
263
+ class DynamicArrayDecoder(BaseArrayDecoder[T]):
267
264
  # Dynamic arrays are always dynamic, regardless of their elements
268
265
  is_dynamic = True
269
266
 
270
- @to_tuple
271
- def decode(self, stream):
272
- array_size = decode_uint_256(stream)
273
- stream.push_frame(32)
274
- if self.item_decoder is None:
275
- raise AssertionError("`item_decoder` is None")
267
+ def decode(self, stream: ContextFramesBytesIO) -> Tuple[T, ...]:
268
+ return decode_dynamic_array(self, stream)
276
269
 
277
- self.validate_pointers(stream, array_size)
278
- for _ in range(array_size):
279
- yield self.item_decoder(stream)
280
- stream.pop_frame()
270
+ __call__ = decode
281
271
 
282
272
 
283
- class FixedByteSizeDecoder(SingleDecoder):
284
- decoder_fn = None
285
- value_bit_size = None
286
- data_byte_size = None
287
- is_big_endian = None
273
+ class FixedByteSizeDecoder(SingleDecoder[T]):
274
+ decoder_fn: Callable[[bytes], T] = None
275
+ value_bit_size: int = None
276
+ data_byte_size: int = None
277
+ is_big_endian: bool = None
278
+
279
+ def __init__(self, **kwargs: Any) -> None:
280
+ super().__init__(**kwargs)
288
281
 
289
- def validate(self):
282
+ self.read_data_from_stream = MethodType(
283
+ read_fixed_byte_size_data_from_stream, self
284
+ )
285
+ self.split_data_and_padding = MethodType(
286
+ split_data_and_padding_fixed_byte_size, self
287
+ )
288
+ self._get_value_byte_size = MethodType(get_value_byte_size, self)
289
+
290
+ # Only assign validate_padding_bytes if not overridden in subclass
291
+ if type(self).validate_padding_bytes is SingleDecoder.validate_padding_bytes:
292
+ self.validate_padding_bytes = MethodType(
293
+ validate_padding_bytes_fixed_byte_size, self
294
+ )
295
+
296
+ def validate(self) -> None:
290
297
  super().validate()
291
298
 
292
- if self.value_bit_size is None:
299
+ value_bit_size = self.value_bit_size
300
+ if value_bit_size is None:
293
301
  raise ValueError("`value_bit_size` may not be None")
294
- if self.data_byte_size is None:
302
+ data_byte_size = self.data_byte_size
303
+ if data_byte_size is None:
295
304
  raise ValueError("`data_byte_size` may not be None")
296
305
  if self.decoder_fn is None:
297
306
  raise ValueError("`decoder_fn` may not be None")
298
307
  if self.is_big_endian is None:
299
308
  raise ValueError("`is_big_endian` may not be None")
300
309
 
301
- if self.value_bit_size % 8 != 0:
310
+ if value_bit_size % 8 != 0:
302
311
  raise ValueError(
303
- "Invalid value bit size: {self.value_bit_size}. Must be a multiple of 8"
312
+ f"Invalid value bit size: {value_bit_size}. Must be a multiple of 8"
304
313
  )
305
314
 
306
- if self.value_bit_size > self.data_byte_size * 8:
315
+ if value_bit_size > data_byte_size * 8:
307
316
  raise ValueError("Value byte size exceeds data size")
308
317
 
309
- def read_data_from_stream(self, stream):
310
- data = stream.read(self.data_byte_size)
311
-
312
- if len(data) != self.data_byte_size:
313
- raise InsufficientDataBytes(
314
- f"Tried to read {self.data_byte_size} bytes, "
315
- f"only got {len(data)} bytes."
316
- )
317
-
318
- return data
318
+ def read_data_from_stream(self, stream: ContextFramesBytesIO) -> bytes:
319
+ raise NotImplementedError("didnt call __init__")
319
320
 
320
- def split_data_and_padding(self, raw_data):
321
- value_byte_size = self._get_value_byte_size()
322
- padding_size = self.data_byte_size - value_byte_size
321
+ def split_data_and_padding(self, raw_data: bytes) -> Tuple[bytes, bytes]:
322
+ raise NotImplementedError("didnt call __init__")
323
323
 
324
- if self.is_big_endian:
325
- padding_bytes = raw_data[:padding_size]
326
- data = raw_data[padding_size:]
327
- else:
328
- data = raw_data[:value_byte_size]
329
- padding_bytes = raw_data[value_byte_size:]
324
+ # This is unused, but it is kept in to preserve the eth-abi api
325
+ def _get_value_byte_size(self) -> int:
326
+ raise NotImplementedError("didnt call __init__")
330
327
 
331
- return data, padding_bytes
332
328
 
333
- def validate_padding_bytes(self, value, padding_bytes):
334
- value_byte_size = self._get_value_byte_size()
335
- padding_size = self.data_byte_size - value_byte_size
336
-
337
- if padding_bytes != b"\x00" * padding_size:
338
- raise NonEmptyPaddingBytes(
339
- f"Padding bytes were not empty: {padding_bytes!r}"
340
- )
341
-
342
- def _get_value_byte_size(self):
343
- value_byte_size = self.value_bit_size // 8
344
- return value_byte_size
345
-
346
-
347
- class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
329
+ class Fixed32ByteSizeDecoder(FixedByteSizeDecoder[T]):
348
330
  data_byte_size = 32
349
331
 
350
332
 
351
- class BooleanDecoder(Fixed32ByteSizeDecoder):
333
+ class BooleanDecoder(Fixed32ByteSizeDecoder[bool]):
352
334
  value_bit_size = 8
353
335
  is_big_endian = True
354
336
 
355
- @staticmethod
356
- def decoder_fn(data):
357
- if data == b"\x00":
358
- return False
359
- elif data == b"\x01":
360
- return True
361
- else:
362
- raise NonEmptyPaddingBytes(
363
- f"Boolean must be either 0x0 or 0x1. Got: {data!r}"
364
- )
337
+ decoder_fn = staticmethod(decoder_fn_boolean)
365
338
 
366
339
  @parse_type_str("bool")
367
340
  def from_type_str(cls, abi_type, registry):
368
341
  return cls()
369
342
 
370
343
 
371
- class AddressDecoder(Fixed32ByteSizeDecoder):
344
+ class AddressDecoder(Fixed32ByteSizeDecoder[HexAddress]):
372
345
  value_bit_size = 20 * 8
373
346
  is_big_endian = True
374
347
  decoder_fn = staticmethod(to_normalized_address)
@@ -381,8 +354,8 @@ class AddressDecoder(Fixed32ByteSizeDecoder):
381
354
  #
382
355
  # Unsigned Integer Decoders
383
356
  #
384
- class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
385
- decoder_fn = staticmethod(big_endian_to_int)
357
+ class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder[int]):
358
+ decoder_fn: "staticmethod[[bytes], int]" = staticmethod(big_endian_to_int)
386
359
  is_big_endian = True
387
360
 
388
361
  @parse_type_str("uint")
@@ -393,46 +366,74 @@ class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
393
366
  decode_uint_256 = UnsignedIntegerDecoder(value_bit_size=256)
394
367
 
395
368
 
369
+ class UnsignedIntegerDecoderCached(UnsignedIntegerDecoder):
370
+ decoder_fn: Callable[[bytes], int]
371
+ maxsize: Final[Optional[int]]
372
+
373
+ def __init__(self, maxsize: Optional[int] = None, **kwargs: Any) -> None:
374
+ super().__init__(**kwargs)
375
+ self.maxsize = maxsize
376
+ self.decoder_fn = lru_cache(maxsize=maxsize)(self.decoder_fn)
377
+
378
+
396
379
  #
397
380
  # Signed Integer Decoders
398
381
  #
399
- class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
382
+ class SignedIntegerDecoder(Fixed32ByteSizeDecoder[int]):
400
383
  is_big_endian = True
401
384
 
402
- def decoder_fn(self, data):
403
- value = big_endian_to_int(data)
404
- if value >= 2 ** (self.value_bit_size - 1):
405
- return value - 2**self.value_bit_size
406
- else:
407
- return value
385
+ def __init__(self, **kwargs: Any) -> None:
386
+ super().__init__(**kwargs)
408
387
 
409
- def validate_padding_bytes(self, value, padding_bytes):
410
- value_byte_size = self._get_value_byte_size()
411
- padding_size = self.data_byte_size - value_byte_size
388
+ # Only assign validate_padding_bytes if not overridden in subclass
389
+ if (
390
+ type(self).validate_padding_bytes
391
+ is SignedIntegerDecoder.validate_padding_bytes
392
+ ):
393
+ self.validate_padding_bytes = MethodType(
394
+ validate_padding_bytes_signed_integer, self
395
+ )
412
396
 
413
- if value >= 0:
414
- expected_padding_bytes = b"\x00" * padding_size
415
- else:
416
- expected_padding_bytes = b"\xff" * padding_size
397
+ @cached_property
398
+ def neg_threshold(self) -> int:
399
+ return int(2 ** (self.value_bit_size - 1))
417
400
 
418
- if padding_bytes != expected_padding_bytes:
419
- raise NonEmptyPaddingBytes(
420
- f"Padding bytes were not empty: {padding_bytes!r}"
421
- )
401
+ @cached_property
402
+ def neg_offset(self) -> int:
403
+ return int(2**self.value_bit_size)
404
+
405
+ def decoder_fn(self, data: bytes) -> int:
406
+ value = big_endian_to_int(data)
407
+ if value >= self.neg_threshold:
408
+ value -= self.neg_offset
409
+ return value
410
+
411
+ def validate_padding_bytes(self, value: Any, padding_bytes: bytes) -> None:
412
+ return validate_padding_bytes_signed_integer(self, value, padding_bytes)
422
413
 
423
414
  @parse_type_str("int")
424
415
  def from_type_str(cls, abi_type, registry):
425
416
  return cls(value_bit_size=abi_type.sub)
426
417
 
427
418
 
419
+ class SignedIntegerDecoderCached(SignedIntegerDecoder):
420
+ decoder_fn: Callable[[bytes], int]
421
+ maxsize: Final[Optional[int]]
422
+
423
+ def __init__(self, maxsize: Optional[int] = None, **kwargs: Any) -> None:
424
+ super().__init__(**kwargs)
425
+ self.maxsize = maxsize
426
+ self.decoder_fn = lru_cache(maxsize=maxsize)(self.decoder_fn)
427
+
428
+
428
429
  #
429
430
  # Bytes1..32
430
431
  #
431
- class BytesDecoder(Fixed32ByteSizeDecoder):
432
+ class BytesDecoder(Fixed32ByteSizeDecoder[bytes]):
432
433
  is_big_endian = False
433
434
 
434
435
  @staticmethod
435
- def decoder_fn(data):
436
+ def decoder_fn(data: bytes) -> bytes:
436
437
  return data
437
438
 
438
439
  @parse_type_str("bytes")
@@ -440,26 +441,31 @@ class BytesDecoder(Fixed32ByteSizeDecoder):
440
441
  return cls(value_bit_size=abi_type.sub * 8)
441
442
 
442
443
 
443
- class BaseFixedDecoder(Fixed32ByteSizeDecoder):
444
- frac_places = None
444
+ class BaseFixedDecoder(Fixed32ByteSizeDecoder[decimal.Decimal]):
445
+ frac_places: int = None
445
446
  is_big_endian = True
446
447
 
447
- def validate(self):
448
+ @cached_property
449
+ def denominator(self) -> decimal.Decimal:
450
+ return TEN**self.frac_places
451
+
452
+ def validate(self) -> None:
448
453
  super().validate()
449
454
 
450
- if self.frac_places is None:
455
+ frac_places = self.frac_places
456
+ if frac_places is None:
451
457
  raise ValueError("must specify `frac_places`")
452
458
 
453
- if self.frac_places <= 0 or self.frac_places > 80:
454
- raise ValueError("`frac_places` must be in range (0, 80]")
459
+ if frac_places <= 0 or frac_places > 80:
460
+ raise ValueError("`frac_places` must be in range (0, 80)")
455
461
 
456
462
 
457
463
  class UnsignedFixedDecoder(BaseFixedDecoder):
458
- def decoder_fn(self, data):
464
+ def decoder_fn(self, data: bytes) -> decimal.Decimal:
459
465
  value = big_endian_to_int(data)
460
466
 
461
467
  with decimal.localcontext(abi_decimal_context):
462
- decimal_value = decimal.Decimal(value) / TEN**self.frac_places
468
+ decimal_value = decimal.Decimal(value) / self.denominator
463
469
 
464
470
  return decimal_value
465
471
 
@@ -471,26 +477,41 @@ class UnsignedFixedDecoder(BaseFixedDecoder):
471
477
 
472
478
 
473
479
  class SignedFixedDecoder(BaseFixedDecoder):
474
- def decoder_fn(self, data):
480
+ @cached_property
481
+ def neg_threshold(self) -> int:
482
+ return int(2 ** (self.value_bit_size - 1))
483
+
484
+ @cached_property
485
+ def neg_offset(self) -> int:
486
+ return int(2**self.value_bit_size)
487
+
488
+ @cached_property
489
+ def expected_padding_pos(self) -> bytes:
490
+ value_byte_size = get_value_byte_size(self)
491
+ padding_size = self.data_byte_size - value_byte_size
492
+ return b"\x00" * padding_size
493
+
494
+ @cached_property
495
+ def expected_padding_neg(self) -> bytes:
496
+ value_byte_size = get_value_byte_size(self)
497
+ padding_size = self.data_byte_size - value_byte_size
498
+ return b"\xff" * padding_size
499
+
500
+ def decoder_fn(self, data: bytes) -> decimal.Decimal:
475
501
  value = big_endian_to_int(data)
476
- if value >= 2 ** (self.value_bit_size - 1):
477
- signed_value = value - 2**self.value_bit_size
478
- else:
479
- signed_value = value
502
+ if value >= self.neg_threshold:
503
+ value -= self.neg_offset
480
504
 
481
505
  with decimal.localcontext(abi_decimal_context):
482
- decimal_value = decimal.Decimal(signed_value) / TEN**self.frac_places
506
+ decimal_value = decimal.Decimal(value) / self.denominator
483
507
 
484
508
  return decimal_value
485
509
 
486
- def validate_padding_bytes(self, value, padding_bytes):
487
- value_byte_size = self._get_value_byte_size()
488
- padding_size = self.data_byte_size - value_byte_size
489
-
510
+ def validate_padding_bytes(self, value: Any, padding_bytes: bytes) -> None:
490
511
  if value >= 0:
491
- expected_padding_bytes = b"\x00" * padding_size
512
+ expected_padding_bytes = self.expected_padding_pos
492
513
  else:
493
- expected_padding_bytes = b"\xff" * padding_size
514
+ expected_padding_bytes = self.expected_padding_neg
494
515
 
495
516
  if padding_bytes != expected_padding_bytes:
496
517
  raise NonEmptyPaddingBytes(
@@ -507,14 +528,14 @@ class SignedFixedDecoder(BaseFixedDecoder):
507
528
  #
508
529
  # String and Bytes
509
530
  #
510
- class ByteStringDecoder(SingleDecoder):
531
+ class ByteStringDecoder(SingleDecoder[TByteStr]):
511
532
  is_dynamic = True
512
533
 
513
534
  @staticmethod
514
- def decoder_fn(data):
535
+ def decoder_fn(data: bytes) -> bytes:
515
536
  return data
516
537
 
517
- def read_data_from_stream(self, stream):
538
+ def read_data_from_stream(self, stream: ContextFramesBytesIO) -> bytes:
518
539
  data_length = decode_uint_256(stream)
519
540
  padded_length = ceil32(data_length)
520
541
 
@@ -534,7 +555,7 @@ class ByteStringDecoder(SingleDecoder):
534
555
 
535
556
  return data[:data_length]
536
557
 
537
- def validate_padding_bytes(self, value, padding_bytes):
558
+ def validate_padding_bytes(self, value: Any, padding_bytes: bytes) -> None:
538
559
  pass
539
560
 
540
561
  @parse_type_str("bytes")
@@ -542,22 +563,22 @@ class ByteStringDecoder(SingleDecoder):
542
563
  return cls()
543
564
 
544
565
 
545
- class StringDecoder(ByteStringDecoder):
546
- def __init__(self, handle_string_errors="strict"):
547
- self.bytes_errors = handle_string_errors
566
+ class StringDecoder(ByteStringDecoder[str]):
567
+ def __init__(self, handle_string_errors: str = "strict") -> None:
568
+ self.bytes_errors: Final = handle_string_errors
548
569
  super().__init__()
549
570
 
550
571
  @parse_type_str("string")
551
572
  def from_type_str(cls, abi_type, registry):
552
573
  return cls()
553
574
 
554
- def decode(self, stream):
575
+ def decode(self, stream: ContextFramesBytesIO) -> str:
555
576
  raw_data = self.read_data_from_stream(stream)
556
577
  data, padding_bytes = self.split_data_and_padding(raw_data)
557
- value = self.decoder_fn(data, self.bytes_errors)
558
- self.validate_padding_bytes(value, padding_bytes)
559
- return value
578
+ return self.decoder_fn(data, self.bytes_errors)
579
+
580
+ __call__ = decode
560
581
 
561
582
  @staticmethod
562
- def decoder_fn(data, handle_string_errors="strict"):
583
+ def decoder_fn(data: bytes, handle_string_errors: str = "strict") -> str:
563
584
  return data.decode("utf-8", errors=handle_string_errors)