faster-eth-abi 5.2.8__cp314-cp314t-win_amd64.whl → 5.2.19__cp314-cp314t-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of faster-eth-abi might be problematic. Click here for more details.

Files changed (41) hide show
  1. faster_eth_abi/_codec.cp314t-win_amd64.pyd +0 -0
  2. faster_eth_abi/_codec.py +7 -5
  3. faster_eth_abi/_decoding.cp314t-win_amd64.pyd +0 -0
  4. faster_eth_abi/_decoding.py +212 -30
  5. faster_eth_abi/_encoding.cp314t-win_amd64.pyd +0 -0
  6. faster_eth_abi/_encoding.py +159 -11
  7. faster_eth_abi/_grammar.cp314t-win_amd64.pyd +0 -0
  8. faster_eth_abi/_grammar.py +375 -0
  9. faster_eth_abi/abi.cp314t-win_amd64.pyd +0 -0
  10. faster_eth_abi/base.py +5 -1
  11. faster_eth_abi/codec.py +2675 -9
  12. faster_eth_abi/constants.cp314t-win_amd64.pyd +0 -0
  13. faster_eth_abi/decoding.py +214 -176
  14. faster_eth_abi/encoding.py +112 -38
  15. faster_eth_abi/exceptions.py +26 -14
  16. faster_eth_abi/from_type_str.cp314t-win_amd64.pyd +0 -0
  17. faster_eth_abi/from_type_str.py +7 -1
  18. faster_eth_abi/grammar.py +30 -326
  19. faster_eth_abi/io.py +5 -1
  20. faster_eth_abi/packed.cp314t-win_amd64.pyd +0 -0
  21. faster_eth_abi/packed.py +4 -0
  22. faster_eth_abi/registry.py +186 -91
  23. faster_eth_abi/tools/__init__.cp314t-win_amd64.pyd +0 -0
  24. faster_eth_abi/tools/_strategies.cp314t-win_amd64.pyd +0 -0
  25. faster_eth_abi/tools/_strategies.py +12 -6
  26. faster_eth_abi/typing.py +4627 -0
  27. faster_eth_abi/utils/__init__.cp314t-win_amd64.pyd +0 -0
  28. faster_eth_abi/utils/numeric.cp314t-win_amd64.pyd +0 -0
  29. faster_eth_abi/utils/numeric.py +51 -20
  30. faster_eth_abi/utils/padding.cp314t-win_amd64.pyd +0 -0
  31. faster_eth_abi/utils/string.cp314t-win_amd64.pyd +0 -0
  32. faster_eth_abi/utils/validation.cp314t-win_amd64.pyd +0 -0
  33. {faster_eth_abi-5.2.8.dist-info → faster_eth_abi-5.2.19.dist-info}/METADATA +38 -14
  34. faster_eth_abi-5.2.19.dist-info/RECORD +46 -0
  35. faster_eth_abi-5.2.19.dist-info/top_level.txt +2 -0
  36. faster_eth_abi__mypyc.cp314t-win_amd64.pyd +0 -0
  37. 76f9a3652d4d2667c55c__mypyc.cp314t-win_amd64.pyd +0 -0
  38. faster_eth_abi-5.2.8.dist-info/RECORD +0 -44
  39. faster_eth_abi-5.2.8.dist-info/licenses/LICENSE +0 -21
  40. faster_eth_abi-5.2.8.dist-info/top_level.txt +0 -2
  41. {faster_eth_abi-5.2.8.dist-info → faster_eth_abi-5.2.19.dist-info}/WHEEL +0 -0
Binary file
faster_eth_abi/_codec.py CHANGED
@@ -1,3 +1,9 @@
1
+ """Internal codec helpers for encoding and decoding sequences of values using the head-tail mechanism.
2
+
3
+ Provides encode_c and decode_c functions for binary serialization and deserialization of values
4
+ according to ABI type specifications.
5
+ """
6
+
1
7
  from typing import (
2
8
  TYPE_CHECKING,
3
9
  Any,
@@ -39,13 +45,11 @@ def encode_c(
39
45
  :returns: The head-tail encoded binary representation of the python
40
46
  values in ``args`` as values of the ABI types in ``types``.
41
47
  """
42
- # validate encode types and args
43
- validate_list_like_param(types, "types")
44
48
  validate_list_like_param(args, "args")
45
49
 
46
50
  encoder = self._registry.get_tuple_encoder(*types)
47
51
 
48
- return encoder(args)
52
+ return encoder.encode(args)
49
53
 
50
54
 
51
55
  def decode_c(
@@ -71,8 +75,6 @@ def decode_c(
71
75
  :returns: A tuple of equivalent python values for the ABI values
72
76
  represented in ``data``.
73
77
  """
74
- # validate decode types and data
75
- validate_list_like_param(types, "types")
76
78
  validate_bytes_param(data, "data")
77
79
 
78
80
  decoder = self._registry.get_tuple_decoder(*types, strict=strict)
@@ -1,54 +1,65 @@
1
+ """Private helpers for decoding logic, intended for C compilation.
2
+
3
+ This file exists because the original decoding.py is not ready to be fully compiled to C.
4
+ This module contains functions and logic that we wish to compile.
5
+ """
1
6
  from typing import (
2
7
  TYPE_CHECKING,
3
8
  Any,
4
- Optional,
9
+ Dict,
10
+ Final,
5
11
  Tuple,
6
12
  )
7
13
 
14
+ from faster_eth_utils import (
15
+ big_endian_to_int,
16
+ )
17
+
8
18
  from faster_eth_abi.exceptions import (
9
19
  InsufficientDataBytes,
20
+ InvalidPointer,
21
+ NonEmptyPaddingBytes,
10
22
  )
11
23
  from faster_eth_abi.io import (
12
24
  BytesIO,
13
25
  ContextFramesBytesIO,
14
26
  )
27
+ from faster_eth_abi.typing import (
28
+ T,
29
+ )
15
30
 
16
31
  if TYPE_CHECKING:
17
32
  from .decoding import (
33
+ BaseArrayDecoder,
18
34
  DynamicArrayDecoder,
19
35
  FixedByteSizeDecoder,
20
36
  HeadTailDecoder,
37
+ SignedIntegerDecoder,
21
38
  SizedArrayDecoder,
22
39
  TupleDecoder,
23
- UnsignedIntegerDecoder,
24
40
  )
25
41
 
26
42
 
27
- _UINT256_DECODER: Optional["UnsignedIntegerDecoder"] = None
28
-
29
-
30
- def __set_uint256_decoder() -> "UnsignedIntegerDecoder":
31
- # this helper breaks a circular dependency on the non-compiled decoding module
32
- from . import (
33
- decoding,
34
- )
35
-
36
- global _UINT256_DECODER
37
- _UINT256_DECODER = decoding.decode_uint_256
43
+ # Helpers
44
+ def decode_uint_256(stream: ContextFramesBytesIO) -> int:
45
+ """
46
+ A faster version of :func:`~decoding.decode_uint_256` in decoding.py.
38
47
 
39
- return _UINT256_DECODER
48
+ It recreates the logic from the UnsignedIntegerDecoder, but we can
49
+ skip a lot because we know the value of many vars.
50
+ """
51
+ # read data from stream
52
+ if len(data := stream.read(32)) == 32:
53
+ return big_endian_to_int(data)
54
+ raise InsufficientDataBytes(f"Tried to read 32 bytes, only got {len(data)} bytes.")
40
55
 
41
56
 
42
- def decode_uint_256(stream: ContextFramesBytesIO) -> int:
43
- decoder = _UINT256_DECODER
44
- if decoder is None:
45
- decoder = __set_uint256_decoder()
46
- decoded: int = decoder(stream)
47
- return decoded
57
+ def get_value_byte_size(decoder: "FixedByteSizeDecoder") -> int:
58
+ return decoder.value_bit_size // 8
48
59
 
49
60
 
50
61
  # HeadTailDecoder
51
- def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> Any:
62
+ def decode_head_tail(self: "HeadTailDecoder[T]", stream: ContextFramesBytesIO) -> T:
52
63
  # Decode the offset and move the stream cursor forward 32 bytes
53
64
  start_pos = decode_uint_256(stream)
54
65
  # Jump ahead to the start of the value
@@ -59,7 +70,7 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
59
70
  if tail_decoder is None:
60
71
  raise AssertionError("`tail_decoder` is None")
61
72
  # Decode the value
62
- value = tail_decoder(stream)
73
+ value: T = tail_decoder(stream)
63
74
  # Return the cursor
64
75
  stream.pop_frame()
65
76
 
@@ -67,15 +78,117 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
67
78
 
68
79
 
69
80
  # TupleDecoder
70
- def decode_tuple(self: "TupleDecoder", stream: ContextFramesBytesIO) -> Tuple[Any, ...]:
71
- self.validate_pointers(stream)
72
- return tuple(decoder(stream) for decoder in self.decoders)
81
+ def decode_tuple(
82
+ self: "TupleDecoder[T]", stream: ContextFramesBytesIO
83
+ ) -> Tuple[T, ...]:
84
+ # NOTE: the original implementation would do this but it's
85
+ # kinda wasteful, so we rebuilt the logic within this function
86
+ # validate_pointers_tuple(self, stream)
87
+
88
+ current_location = stream.tell()
89
+ if self._no_head_tail:
90
+ # TODO: if all(isinstance(d, TupleDecoder) for d in self._decoders)
91
+ # return tuple(decode_tuple(stream) for _ in range(len(self.decoders))
92
+ # and other types with compiled decode funcs
93
+ return tuple(decoder(stream) for decoder in self.decoders)
94
+
95
+ end_of_offsets = current_location + 32 * self.len_of_head
96
+ total_stream_length = len(stream.getbuffer())
97
+ items = []
98
+ for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
99
+ if is_head_tail:
100
+ # the next 32 bytes are a pointer that we should validate
101
+ # checkpoint the stream location so we can reset it after validation
102
+ step_location = stream.tell()
103
+
104
+ offset = decode_uint_256(stream)
105
+ indicated_idx = current_location + offset
106
+ if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
107
+ # the pointer is indicating its data is located either within the
108
+ # offsets section of the stream or beyond the end of the stream,
109
+ # both of which are invalid
110
+ raise InvalidPointer(
111
+ "Invalid pointer in tuple at location "
112
+ f"{stream.tell() - 32} in payload"
113
+ )
114
+
115
+ # reset the stream so we can decode
116
+ stream.seek(step_location)
117
+
118
+ items.append(decoder(stream))
119
+
120
+ # return the stream to its original location for actual decoding
121
+ stream.seek(current_location)
122
+
123
+ return tuple(items)
124
+
125
+
126
+ def validate_pointers_tuple(
127
+ self: "TupleDecoder",
128
+ stream: ContextFramesBytesIO,
129
+ ) -> None:
130
+ """
131
+ Verify that all pointers point to a valid location in the stream.
132
+ """
133
+ current_location = stream.tell()
134
+ if self._no_head_tail:
135
+ for decoder in self.decoders:
136
+ decoder(stream)
137
+ else:
138
+ end_of_offsets = current_location + 32 * self.len_of_head
139
+ total_stream_length = len(stream.getbuffer())
140
+ for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
141
+ if not is_head_tail:
142
+ # the next 32 bytes are not a pointer,
143
+ # so progress the stream per the decoder
144
+ decoder(stream)
145
+ else:
146
+ # the next 32 bytes are a pointer
147
+ offset = decode_uint_256(stream)
148
+ indicated_idx = current_location + offset
149
+ if (
150
+ indicated_idx < end_of_offsets
151
+ or indicated_idx >= total_stream_length
152
+ ):
153
+ # the pointer is indicating its data is located either within the
154
+ # offsets section of the stream or beyond the end of the stream,
155
+ # both of which are invalid
156
+ raise InvalidPointer(
157
+ "Invalid pointer in tuple at location "
158
+ f"{stream.tell() - 32} in payload"
159
+ )
160
+ # return the stream to its original location for actual decoding
161
+ stream.seek(current_location)
162
+
163
+
164
+ # BaseArrayDecoder
165
+ def validate_pointers_array(
166
+ self: "BaseArrayDecoder", stream: ContextFramesBytesIO, array_size: int
167
+ ) -> None:
168
+ """
169
+ Verify that all pointers point to a valid location in the stream.
170
+ """
171
+ current_location = stream.tell()
172
+ end_of_offsets = current_location + 32 * array_size
173
+ total_stream_length = len(stream.getbuffer())
174
+ for _ in range(array_size):
175
+ offset = decode_uint_256(stream)
176
+ indicated_idx = current_location + offset
177
+ if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
178
+ # the pointer is indicating its data is located either within the
179
+ # offsets section of the stream or beyond the end of the stream,
180
+ # both of which are invalid
181
+ raise InvalidPointer(
182
+ "Invalid pointer in array at location "
183
+ f"{stream.tell() - 32} in payload"
184
+ )
185
+ stream.seek(current_location)
73
186
 
74
187
 
75
188
  # SizedArrayDecoder
76
189
  def decode_sized_array(
77
- self: "SizedArrayDecoder", stream: ContextFramesBytesIO
78
- ) -> Tuple[Any, ...]:
190
+ self: "SizedArrayDecoder[T]", stream: ContextFramesBytesIO
191
+ ) -> Tuple[T, ...]:
79
192
  item_decoder = self.item_decoder
80
193
  if item_decoder is None:
81
194
  raise AssertionError("`item_decoder` is None")
@@ -87,8 +200,8 @@ def decode_sized_array(
87
200
 
88
201
  # DynamicArrayDecoder
89
202
  def decode_dynamic_array(
90
- self: "DynamicArrayDecoder", stream: ContextFramesBytesIO
91
- ) -> Tuple[Any, ...]:
203
+ self: "DynamicArrayDecoder[T]", stream: ContextFramesBytesIO
204
+ ) -> Tuple[T, ...]:
92
205
  array_size = decode_uint_256(stream)
93
206
  stream.push_frame(32)
94
207
  if self.item_decoder is None:
@@ -104,7 +217,7 @@ def decode_dynamic_array(
104
217
 
105
218
  # FixedByteSizeDecoder
106
219
  def read_fixed_byte_size_data_from_stream(
107
- self: "FixedByteSizeDecoder",
220
+ self: "FixedByteSizeDecoder[Any]",
108
221
  # NOTE: use BytesIO here so mypyc doesn't type-check
109
222
  # `stream` once we compile ContextFramesBytesIO.
110
223
  stream: BytesIO,
@@ -115,3 +228,72 @@ def read_fixed_byte_size_data_from_stream(
115
228
  raise InsufficientDataBytes(
116
229
  f"Tried to read {data_byte_size} bytes, only got {len(data)} bytes."
117
230
  )
231
+
232
+
233
+ def split_data_and_padding_fixed_byte_size(
234
+ self: "FixedByteSizeDecoder[Any]",
235
+ raw_data: bytes,
236
+ ) -> Tuple[bytes, bytes]:
237
+ value_byte_size = get_value_byte_size(self)
238
+ padding_size = self.data_byte_size - value_byte_size
239
+
240
+ if self.is_big_endian:
241
+ if padding_size == 0:
242
+ return raw_data, b""
243
+ padding_bytes = raw_data[:padding_size]
244
+ data = raw_data[padding_size:]
245
+ else:
246
+ data = raw_data[:value_byte_size]
247
+ padding_bytes = raw_data[value_byte_size:]
248
+
249
+ return data, padding_bytes
250
+
251
+
252
+ def validate_padding_bytes_fixed_byte_size(
253
+ self: "FixedByteSizeDecoder[T]",
254
+ value: T,
255
+ padding_bytes: bytes,
256
+ ) -> None:
257
+ if padding_bytes != get_expected_padding_bytes(self, b"\x00"):
258
+ raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
259
+
260
+
261
+ _expected_padding_bytes_cache: Final[
262
+ Dict["FixedByteSizeDecoder[Any]", Dict[bytes, bytes]]
263
+ ] = {}
264
+
265
+
266
+ def get_expected_padding_bytes(
267
+ self: "FixedByteSizeDecoder[Any]", chunk: bytes
268
+ ) -> bytes:
269
+ instance_cache = _expected_padding_bytes_cache.setdefault(self, {})
270
+ expected_padding_bytes = instance_cache.get(chunk)
271
+ if expected_padding_bytes is None:
272
+ value_byte_size = get_value_byte_size(self)
273
+ padding_size = self.data_byte_size - value_byte_size
274
+ expected_padding_bytes = chunk * padding_size
275
+ instance_cache[chunk] = expected_padding_bytes
276
+ return expected_padding_bytes
277
+
278
+
279
+ def validate_padding_bytes_signed_integer(
280
+ self: "SignedIntegerDecoder",
281
+ value: int,
282
+ padding_bytes: bytes,
283
+ ) -> None:
284
+ if value >= 0:
285
+ expected_padding_bytes = get_expected_padding_bytes(self, b"\x00")
286
+ else:
287
+ expected_padding_bytes = get_expected_padding_bytes(self, b"\xff")
288
+
289
+ if padding_bytes != expected_padding_bytes:
290
+ raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
291
+
292
+
293
+ # BooleanDecoder
294
+ def decoder_fn_boolean(data: bytes) -> bool:
295
+ if data == b"\x00":
296
+ return False
297
+ elif data == b"\x01":
298
+ return True
299
+ raise NonEmptyPaddingBytes(f"Boolean must be either 0x0 or 0x1. Got: {data!r}")
@@ -1,33 +1,65 @@
1
- from itertools import (
2
- accumulate,
3
- )
1
+ """Private helpers for encoding logic, intended for C compilation.
2
+
3
+ This file exists because the original encoding.py is not ready to be fully compiled to C.
4
+ This module contains functions and logic that we do wish to compile.
5
+ """
4
6
  from typing import (
5
7
  TYPE_CHECKING,
6
8
  Any,
7
9
  Callable,
10
+ Dict,
8
11
  List,
9
12
  Optional,
10
13
  Sequence,
14
+ Tuple,
11
15
  TypeVar,
12
16
  )
13
17
 
18
+ from faster_eth_utils import (
19
+ is_list_like,
20
+ )
21
+
22
+ from faster_eth_abi.exceptions import (
23
+ ValueOutOfBounds,
24
+ )
25
+
14
26
  if TYPE_CHECKING:
15
27
  from faster_eth_abi.encoding import (
16
28
  BaseEncoder,
29
+ TupleEncoder,
17
30
  )
18
31
 
19
32
 
20
33
  T = TypeVar("T")
21
34
 
22
35
 
23
- def encode_tuple(
24
- values: Sequence[Any],
25
- encoders: Sequence["BaseEncoder"],
26
- ) -> bytes:
36
+ # TupleEncoder
37
+ def validate_tuple(self: "TupleEncoder", value: Sequence[Any]) -> None:
38
+ # if we check list and tuple first it compiles to much quicker C code
39
+ if not isinstance(value, (list, tuple)) and not is_list_like(value):
40
+ self.invalidate_value(
41
+ value,
42
+ msg="must be list-like object such as array or tuple",
43
+ )
44
+
45
+ validators = self.validators
46
+ if len(value) != len(validators):
47
+ self.invalidate_value(
48
+ value,
49
+ exc=ValueOutOfBounds,
50
+ msg=f"value has {len(value)} items when {len(validators)} " "were expected",
51
+ )
52
+
53
+ for item, validator in zip(value, validators):
54
+ validator(item)
55
+
56
+
57
+ def encode_tuple(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
58
+ validate_tuple(self, values)
27
59
  raw_head_chunks: List[Optional[bytes]] = []
28
60
  tail_chunks: List[bytes] = []
29
- for value, encoder in zip(values, encoders):
30
- if getattr(encoder, "is_dynamic", False):
61
+ for value, encoder, is_dynamic in zip(values, self.encoders, self._is_dynamic):
62
+ if is_dynamic:
31
63
  raw_head_chunks.append(None)
32
64
  tail_chunks.append(encoder(value))
33
65
  else:
@@ -35,7 +67,12 @@ def encode_tuple(
35
67
  tail_chunks.append(b"")
36
68
 
37
69
  head_length = sum(32 if item is None else len(item) for item in raw_head_chunks)
38
- tail_offsets = (0, *accumulate(len(item) for item in tail_chunks[:-1]))
70
+ tail_offsets = [0]
71
+ total_offset = 0
72
+ for item in tail_chunks[:-1]:
73
+ total_offset += len(item)
74
+ tail_offsets.append(total_offset)
75
+
39
76
  head_chunks = tuple(
40
77
  encode_uint_256(head_length + offset) if chunk is None else chunk
41
78
  for chunk, offset in zip(raw_head_chunks, tail_offsets)
@@ -44,6 +81,112 @@ def encode_tuple(
44
81
  return b"".join(head_chunks) + b"".join(tail_chunks)
45
82
 
46
83
 
84
+ def encode_tuple_all_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
85
+ validate_tuple(self, values)
86
+ encoders = self.encoders
87
+ tail_chunks = [encoder(value) for encoder, value in zip(encoders, values)]
88
+
89
+ total_offset = 0
90
+ head_length = 32 * len(encoders)
91
+ head_chunks = [encode_uint_256(head_length)]
92
+ for item in tail_chunks[:-1]:
93
+ total_offset += len(item)
94
+ head_chunks.append(encode_uint_256(head_length + total_offset))
95
+
96
+ return b"".join(head_chunks) + b"".join(tail_chunks)
97
+
98
+
99
+ def encode_tuple_no_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
100
+ validate_tuple(self, values)
101
+ encoders = self.encoders
102
+ return b"".join(encoders[i](values[i]) for i in range(len(encoders)))
103
+
104
+
105
+ def encode_tuple_no_dynamic1(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
106
+ validate_tuple(self, values)
107
+ encoders: Tuple["BaseEncoder"] = self.encoders
108
+ return encoders[0](values[0])
109
+
110
+
111
+ def encode_tuple_no_dynamic2(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
112
+ validate_tuple(self, values)
113
+ encoders = self.encoders
114
+ # encoders: Tuple["BaseEncoder", "BaseEncoder"] = self.encoders
115
+ return encoders[0](values[0]) + encoders[1](values[1])
116
+
117
+
118
+ def encode_tuple_no_dynamic3(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
119
+ validate_tuple(self, values)
120
+ encoders = self.encoders
121
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
122
+ return b"".join(encoders[i](values[i]) for i in range(3))
123
+
124
+
125
+ def encode_tuple_no_dynamic4(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
126
+ validate_tuple(self, values)
127
+ encoders = self.encoders
128
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
129
+ return b"".join(encoders[i](values[i]) for i in range(4))
130
+
131
+
132
+ def encode_tuple_no_dynamic5(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
133
+ validate_tuple(self, values)
134
+ encoders = self.encoders
135
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
136
+ return b"".join(encoders[i](values[i]) for i in range(5))
137
+
138
+
139
+ def encode_tuple_no_dynamic6(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
140
+ validate_tuple(self, values)
141
+ encoders = self.encoders
142
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
143
+ return b"".join(encoders[i](values[i]) for i in range(6))
144
+
145
+
146
+ def encode_tuple_no_dynamic7(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
147
+ validate_tuple(self, values)
148
+ encoders = self.encoders
149
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
150
+ return b"".join(encoders[i](values[i]) for i in range(7))
151
+
152
+
153
+ def encode_tuple_no_dynamic8(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
154
+ validate_tuple(self, values)
155
+ encoders = self.encoders
156
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
157
+ return b"".join(encoders[i](values[i]) for i in range(8))
158
+
159
+
160
+ def encode_tuple_no_dynamic9(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
161
+ validate_tuple(self, values)
162
+ encoders = self.encoders
163
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
164
+ return b"".join(encoders[i](values[i]) for i in range(9))
165
+
166
+
167
+ def encode_tuple_no_dynamic10(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
168
+ validate_tuple(self, values)
169
+ encoders = self.encoders
170
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
171
+ return b"".join(encoders[i](values[i]) for i in range(10))
172
+
173
+
174
+ encode_tuple_no_dynamic_funcs: Dict[
175
+ int, Callable[["TupleEncoder", Sequence[Any]], bytes]
176
+ ] = {
177
+ 1: encode_tuple_no_dynamic1,
178
+ 2: encode_tuple_no_dynamic2,
179
+ 3: encode_tuple_no_dynamic3,
180
+ 4: encode_tuple_no_dynamic4,
181
+ 5: encode_tuple_no_dynamic5,
182
+ 6: encode_tuple_no_dynamic6,
183
+ 7: encode_tuple_no_dynamic7,
184
+ 8: encode_tuple_no_dynamic8,
185
+ 9: encode_tuple_no_dynamic9,
186
+ 10: encode_tuple_no_dynamic10,
187
+ }
188
+
189
+
47
190
  def encode_fixed(
48
191
  value: Any,
49
192
  encode_fn: Callable[[Any], bytes],
@@ -77,7 +220,12 @@ def encode_elements(item_encoder: "BaseEncoder", value: Sequence[Any]) -> bytes:
77
220
  return b"".join(tail_chunks)
78
221
 
79
222
  head_length = 32 * len(value)
80
- tail_offsets = (0, *accumulate(len(item) for item in tail_chunks[:-1]))
223
+ tail_offsets = [0]
224
+ total_offset = 0
225
+ for item in tail_chunks[:-1]:
226
+ total_offset += len(item)
227
+ tail_offsets.append(total_offset)
228
+
81
229
  head_chunks = tuple(
82
230
  encode_uint_256(head_length + offset) for offset in tail_offsets
83
231
  )