faster-eth-abi 5.2.15__cp312-cp312-musllinux_1_2_x86_64.whl → 5.2.16__cp312-cp312-musllinux_1_2_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of faster-eth-abi might be problematic. Click here for more details.

faster_eth_abi/_codec.py CHANGED
@@ -1,3 +1,9 @@
1
+ """Internal codec helpers for encoding and decoding sequences of values using the head-tail mechanism.
2
+
3
+ Provides encode_c and decode_c functions for binary serialization and deserialization of values
4
+ according to ABI type specifications.
5
+ """
6
+
1
7
  from typing import (
2
8
  TYPE_CHECKING,
3
9
  Any,
@@ -1,3 +1,8 @@
1
+ """Private helpers for decoding logic, intended for C compilation.
2
+
3
+ This file exists because the original decoding.py is not ready to be fully compiled to C.
4
+ This module contains functions and logic that we wish to compile.
5
+ """
1
6
  from typing import (
2
7
  TYPE_CHECKING,
3
8
  Any,
@@ -19,6 +24,9 @@ from faster_eth_abi.io import (
19
24
  BytesIO,
20
25
  ContextFramesBytesIO,
21
26
  )
27
+ from faster_eth_abi.typing import (
28
+ T,
29
+ )
22
30
 
23
31
  if TYPE_CHECKING:
24
32
  from .decoding import (
@@ -35,7 +43,7 @@ if TYPE_CHECKING:
35
43
  # Helpers
36
44
  def decode_uint_256(stream: ContextFramesBytesIO) -> int:
37
45
  """
38
- This function is a faster version of decode_uint_256 in decoding.py.
46
+ A faster version of :func:`~decoding.decode_uint_256` in decoding.py.
39
47
 
40
48
  It recreates the logic from the UnsignedIntegerDecoder, but we can
41
49
  skip a lot because we know the value of many vars.
@@ -51,7 +59,7 @@ def get_value_byte_size(decoder: "FixedByteSizeDecoder") -> int:
51
59
 
52
60
 
53
61
  # HeadTailDecoder
54
- def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> Any:
62
+ def decode_head_tail(self: "HeadTailDecoder[T]", stream: ContextFramesBytesIO) -> T:
55
63
  # Decode the offset and move the stream cursor forward 32 bytes
56
64
  start_pos = decode_uint_256(stream)
57
65
  # Jump ahead to the start of the value
@@ -62,7 +70,7 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
62
70
  if tail_decoder is None:
63
71
  raise AssertionError("`tail_decoder` is None")
64
72
  # Decode the value
65
- value = tail_decoder(stream)
73
+ value: T = tail_decoder(stream)
66
74
  # Return the cursor
67
75
  stream.pop_frame()
68
76
 
@@ -70,7 +78,9 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
70
78
 
71
79
 
72
80
  # TupleDecoder
73
- def decode_tuple(self: "TupleDecoder", stream: ContextFramesBytesIO) -> Tuple[Any, ...]:
81
+ def decode_tuple(
82
+ self: "TupleDecoder[T]", stream: ContextFramesBytesIO
83
+ ) -> Tuple[T, ...]:
74
84
  # NOTE: the original implementation would do this but it's
75
85
  # kinda wasteful, so we rebuilt the logic within this function
76
86
  # validate_pointers_tuple(self, stream)
@@ -129,7 +139,8 @@ def validate_pointers_tuple(
129
139
  total_stream_length = len(stream.getbuffer())
130
140
  for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
131
141
  if not is_head_tail:
132
- # the next 32 bytes are not a pointer, so progress the stream per the decoder
142
+ # the next 32 bytes are not a pointer,
143
+ # so progress the stream per the decoder
133
144
  decoder(stream)
134
145
  else:
135
146
  # the next 32 bytes are a pointer
@@ -176,8 +187,8 @@ def validate_pointers_array(
176
187
 
177
188
  # SizedArrayDecoder
178
189
  def decode_sized_array(
179
- self: "SizedArrayDecoder", stream: ContextFramesBytesIO
180
- ) -> Tuple[Any, ...]:
190
+ self: "SizedArrayDecoder[T]", stream: ContextFramesBytesIO
191
+ ) -> Tuple[T, ...]:
181
192
  item_decoder = self.item_decoder
182
193
  if item_decoder is None:
183
194
  raise AssertionError("`item_decoder` is None")
@@ -189,8 +200,8 @@ def decode_sized_array(
189
200
 
190
201
  # DynamicArrayDecoder
191
202
  def decode_dynamic_array(
192
- self: "DynamicArrayDecoder", stream: ContextFramesBytesIO
193
- ) -> Tuple[Any, ...]:
203
+ self: "DynamicArrayDecoder[T]", stream: ContextFramesBytesIO
204
+ ) -> Tuple[T, ...]:
194
205
  array_size = decode_uint_256(stream)
195
206
  stream.push_frame(32)
196
207
  if self.item_decoder is None:
@@ -206,7 +217,7 @@ def decode_dynamic_array(
206
217
 
207
218
  # FixedByteSizeDecoder
208
219
  def read_fixed_byte_size_data_from_stream(
209
- self: "FixedByteSizeDecoder",
220
+ self: "FixedByteSizeDecoder[Any]",
210
221
  # NOTE: use BytesIO here so mypyc doesn't type-check
211
222
  # `stream` once we compile ContextFramesBytesIO.
212
223
  stream: BytesIO,
@@ -220,7 +231,7 @@ def read_fixed_byte_size_data_from_stream(
220
231
 
221
232
 
222
233
  def split_data_and_padding_fixed_byte_size(
223
- self: "FixedByteSizeDecoder",
234
+ self: "FixedByteSizeDecoder[Any]",
224
235
  raw_data: bytes,
225
236
  ) -> Tuple[bytes, bytes]:
226
237
  value_byte_size = get_value_byte_size(self)
@@ -239,8 +250,8 @@ def split_data_and_padding_fixed_byte_size(
239
250
 
240
251
 
241
252
  def validate_padding_bytes_fixed_byte_size(
242
- self: "FixedByteSizeDecoder",
243
- value: Any,
253
+ self: "FixedByteSizeDecoder[T]",
254
+ value: T,
244
255
  padding_bytes: bytes,
245
256
  ) -> None:
246
257
  if padding_bytes != get_expected_padding_bytes(self, b"\x00"):
@@ -248,11 +259,13 @@ def validate_padding_bytes_fixed_byte_size(
248
259
 
249
260
 
250
261
  _expected_padding_bytes_cache: Final[
251
- Dict["FixedByteSizeDecoder", Dict[bytes, bytes]]
262
+ Dict["FixedByteSizeDecoder[Any]", Dict[bytes, bytes]]
252
263
  ] = {}
253
264
 
254
265
 
255
- def get_expected_padding_bytes(self: "FixedByteSizeDecoder", chunk: bytes) -> bytes:
266
+ def get_expected_padding_bytes(
267
+ self: "FixedByteSizeDecoder[Any]", chunk: bytes
268
+ ) -> bytes:
256
269
  instance_cache = _expected_padding_bytes_cache.setdefault(self, {})
257
270
  expected_padding_bytes = instance_cache.get(chunk)
258
271
  if expected_padding_bytes is None:
@@ -1,3 +1,8 @@
1
+ """Private helpers for encoding logic, intended for C compilation.
2
+
3
+ This file exists because the original encoding.py is not ready to be fully compiled to C.
4
+ This module contains functions and logic that we do wish to compile.
5
+ """
1
6
  from typing import (
2
7
  TYPE_CHECKING,
3
8
  Any,
@@ -80,7 +85,7 @@ def encode_tuple_all_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> byt
80
85
  validate_tuple(self, values)
81
86
  encoders = self.encoders
82
87
  tail_chunks = [encoder(value) for encoder, value in zip(encoders, values)]
83
-
88
+
84
89
  total_offset = 0
85
90
  head_length = 32 * len(encoders)
86
91
  head_chunks = [encode_uint_256(head_length)]
@@ -165,6 +170,7 @@ def encode_tuple_no_dynamic10(self: "TupleEncoder", values: Sequence[Any]) -> by
165
170
  # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
166
171
  return b"".join(encoders[i](values[i]) for i in range(10))
167
172
 
173
+
168
174
  encode_tuple_no_dynamic_funcs: Dict[
169
175
  int, Callable[["TupleEncoder", Sequence[Any]], bytes]
170
176
  ] = {
@@ -1,3 +1,8 @@
1
+ """Private helpers for ABI type string grammar and parsing, intended for C compilation.
2
+
3
+ This file exists because the original grammar.py is not ready to be fully compiled to C.
4
+ This module contains functions and logic that we do wish to compile.
5
+ """
1
6
  import re
2
7
  from typing import (
3
8
  Any,
faster_eth_abi/base.py CHANGED
@@ -1,3 +1,7 @@
1
+ """Base classes for encoder and decoder implementations.
2
+
3
+ Defines the foundational interface and validation logic for all coder classes.
4
+ """
1
5
  from typing import (
2
6
  Any,
3
7
  )