faster-eth-abi 5.2.12__cp314-cp314-macosx_11_0_arm64.whl → 5.2.14__cp314-cp314-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of faster-eth-abi might be problematic. Click here for more details.

Files changed (39) hide show
  1. benchmarks/__init__.py +1 -0
  2. benchmarks/batch.py +9 -0
  3. benchmarks/data.py +313 -0
  4. benchmarks/test_abi_benchmarks.py +82 -0
  5. benchmarks/test_decoding_benchmarks.py +109 -0
  6. benchmarks/test_encoding_benchmarks.py +99 -0
  7. benchmarks/test_grammar_benchmarks.py +38 -0
  8. benchmarks/test_io_benchmarks.py +99 -0
  9. benchmarks/test_packed_benchmarks.py +41 -0
  10. benchmarks/test_registry_benchmarks.py +45 -0
  11. benchmarks/type_strings.py +26 -0
  12. faster_eth_abi/_codec.cpython-314-darwin.so +0 -0
  13. faster_eth_abi/_codec.py +1 -1
  14. faster_eth_abi/_decoding.cpython-314-darwin.so +0 -0
  15. faster_eth_abi/_decoding.py +136 -5
  16. faster_eth_abi/_encoding.cpython-314-darwin.so +0 -0
  17. faster_eth_abi/_encoding.py +141 -6
  18. faster_eth_abi/_grammar.cpython-314-darwin.so +0 -0
  19. faster_eth_abi/abi.cpython-314-darwin.so +0 -0
  20. faster_eth_abi/constants.cpython-314-darwin.so +0 -0
  21. faster_eth_abi/decoding.py +107 -96
  22. faster_eth_abi/encoding.py +55 -27
  23. faster_eth_abi/from_type_str.cpython-314-darwin.so +0 -0
  24. faster_eth_abi/packed.cpython-314-darwin.so +0 -0
  25. faster_eth_abi/registry.py +47 -31
  26. faster_eth_abi/tools/__init__.cpython-314-darwin.so +0 -0
  27. faster_eth_abi/tools/_strategies.cpython-314-darwin.so +0 -0
  28. faster_eth_abi/utils/__init__.cpython-314-darwin.so +0 -0
  29. faster_eth_abi/utils/numeric.cpython-314-darwin.so +0 -0
  30. faster_eth_abi/utils/padding.cpython-314-darwin.so +0 -0
  31. faster_eth_abi/utils/string.cpython-314-darwin.so +0 -0
  32. faster_eth_abi/utils/validation.cpython-314-darwin.so +0 -0
  33. {faster_eth_abi-5.2.12.dist-info → faster_eth_abi-5.2.14.dist-info}/METADATA +14 -2
  34. faster_eth_abi-5.2.14.dist-info/RECORD +57 -0
  35. {faster_eth_abi-5.2.12.dist-info → faster_eth_abi-5.2.14.dist-info}/top_level.txt +1 -0
  36. faster_eth_abi__mypyc.cpython-314-darwin.so +0 -0
  37. faster_eth_abi-5.2.12.dist-info/RECORD +0 -46
  38. {faster_eth_abi-5.2.12.dist-info → faster_eth_abi-5.2.14.dist-info}/WHEEL +0 -0
  39. {faster_eth_abi-5.2.12.dist-info → faster_eth_abi-5.2.14.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,99 @@
1
+ """
2
+ Benchmarks for faster_eth_abi.decoding.ContextFramesBytesIO
3
+
4
+ This file benchmarks the performance of ContextFramesBytesIO, a subclass of BytesIO
5
+ that supports contextual frame management for nested ABI decoding.
6
+ """
7
+
8
+ import pytest
9
+
10
+ import eth_abi.decoding
11
+ from pytest_codspeed import (
12
+ BenchmarkFixture,
13
+ )
14
+
15
+ from benchmarks.batch import (
16
+ batch,
17
+ )
18
+ import faster_eth_abi.decoding
19
+
20
+ # Test parameters
21
+ BUFFER_SIZES = [0, 32, 1024, 4096, 65536]
22
+ FRAME_DEPTHS = [1, 5, 10, 50]
23
+
24
+
25
+ @pytest.mark.benchmark(group="ContextFramesBytesIO-init")
26
+ @pytest.mark.parametrize("size", BUFFER_SIZES)
27
+ def test_contextframesbytesio_init(benchmark: BenchmarkFixture, size):
28
+ data = b"\x01" * size
29
+ benchmark(batch, 1000, eth_abi.decoding.ContextFramesBytesIO, data)
30
+
31
+
32
+ @pytest.mark.benchmark(group="ContextFramesBytesIO-init")
33
+ @pytest.mark.parametrize("size", BUFFER_SIZES)
34
+ def test_faster_contextframesbytesio_init(benchmark: BenchmarkFixture, size):
35
+ data = b"\x01" * size
36
+ benchmark(batch, 1000, faster_eth_abi.decoding.ContextFramesBytesIO, data)
37
+
38
+
39
+ @pytest.mark.benchmark(group="ContextFramesBytesIO-push-pop")
40
+ @pytest.mark.parametrize("depth", FRAME_DEPTHS)
41
+ def test_contextframesbytesio_push_pop(benchmark: BenchmarkFixture, depth):
42
+ data = b"\x01" * 1024
43
+ stream = eth_abi.decoding.ContextFramesBytesIO(data)
44
+
45
+ def push_pop():
46
+ for i in range(depth):
47
+ stream.push_frame(i * 10)
48
+ for _ in range(depth):
49
+ stream.pop_frame()
50
+
51
+ benchmark(batch, 100, push_pop)
52
+
53
+
54
+ @pytest.mark.benchmark(group="ContextFramesBytesIO-push-pop")
55
+ @pytest.mark.parametrize("depth", FRAME_DEPTHS)
56
+ def test_faster_contextframesbytesio_push_pop(benchmark: BenchmarkFixture, depth):
57
+ data = b"\x01" * 1024
58
+ stream = faster_eth_abi.decoding.ContextFramesBytesIO(data)
59
+ ints = list(range(depth))
60
+
61
+ def push_pop():
62
+ for i in ints:
63
+ stream.push_frame(i * 10)
64
+ for _ in ints:
65
+ stream.pop_frame()
66
+
67
+ benchmark(batch, 100, push_pop)
68
+
69
+
70
+ @pytest.mark.benchmark(group="ContextFramesBytesIO-seek-in-frame")
71
+ @pytest.mark.parametrize("depth", FRAME_DEPTHS)
72
+ def test_contextframesbytesio_seek_in_frame(benchmark: BenchmarkFixture, depth):
73
+ data = b"\x01" * 1024
74
+ stream = eth_abi.decoding.ContextFramesBytesIO(data)
75
+ # Set up the frame stack before timing
76
+ for i in range(depth):
77
+ stream.push_frame(i * 10)
78
+
79
+ def seek_in_frame_ops():
80
+ for i in range(depth):
81
+ stream.seek_in_frame(i)
82
+
83
+ benchmark(batch, 100, seek_in_frame_ops)
84
+
85
+
86
+ @pytest.mark.benchmark(group="ContextFramesBytesIO-seek-in-frame")
87
+ @pytest.mark.parametrize("depth", FRAME_DEPTHS)
88
+ def test_faster_contextframesbytesio_seek_in_frame(benchmark: BenchmarkFixture, depth):
89
+ data = b"\x01" * 1024
90
+ stream = faster_eth_abi.decoding.ContextFramesBytesIO(data)
91
+ # Set up the frame stack before timing
92
+ for i in range(depth):
93
+ stream.push_frame(i * 10)
94
+
95
+ def seek_in_frame_ops():
96
+ for i in range(depth):
97
+ stream.seek_in_frame(i)
98
+
99
+ benchmark(batch, 100, seek_in_frame_ops)
@@ -0,0 +1,41 @@
1
+ import pytest
2
+
3
+ import eth_abi.packed
4
+ from pytest_codspeed import (
5
+ BenchmarkFixture,
6
+ )
7
+
8
+ from benchmarks.batch import (
9
+ batch,
10
+ )
11
+ from benchmarks.data import (
12
+ packed_cases,
13
+ packed_ids,
14
+ )
15
+ import faster_eth_abi.packed
16
+
17
+
18
+ # Packed encoding
19
+ @pytest.mark.benchmark(group="PackedEncoder")
20
+ @pytest.mark.parametrize("abi_type,value", packed_cases, ids=packed_ids)
21
+ def test_encode_packed(benchmark: BenchmarkFixture, abi_type, value):
22
+ benchmark(batch, 100, eth_abi.packed.encode_packed, [abi_type], [value])
23
+
24
+
25
+ @pytest.mark.benchmark(group="PackedEncoder")
26
+ @pytest.mark.parametrize("abi_type,value", packed_cases, ids=packed_ids)
27
+ def test_faster_encode_packed(benchmark: BenchmarkFixture, abi_type, value):
28
+ benchmark(batch, 100, faster_eth_abi.packed.encode_packed, [abi_type], [value])
29
+
30
+
31
+ # Packed is_encodable
32
+ @pytest.mark.benchmark(group="PackedIsEncodable")
33
+ @pytest.mark.parametrize("abi_type,value", packed_cases, ids=packed_ids)
34
+ def test_is_encodable_packed(benchmark: BenchmarkFixture, abi_type, value):
35
+ benchmark(batch, 100, eth_abi.packed.is_encodable_packed, abi_type, value)
36
+
37
+
38
+ @pytest.mark.benchmark(group="PackedIsEncodable")
39
+ @pytest.mark.parametrize("abi_type,value", packed_cases, ids=packed_ids)
40
+ def test_faster_is_encodable_packed(benchmark: BenchmarkFixture, abi_type, value):
41
+ benchmark(batch, 100, faster_eth_abi.packed.is_encodable_packed, abi_type, value)
@@ -0,0 +1,45 @@
1
+ import pytest
2
+
3
+ from eth_abi.registry import (
4
+ registry,
5
+ )
6
+ from pytest_codspeed import (
7
+ BenchmarkFixture,
8
+ )
9
+
10
+ from benchmarks.batch import (
11
+ batch,
12
+ )
13
+ from benchmarks.type_strings import (
14
+ type_strings,
15
+ )
16
+ from faster_eth_abi.registry import (
17
+ registry as faster_registry,
18
+ )
19
+
20
+
21
+ ITERATIONS = 50_000
22
+
23
+
24
+ @pytest.mark.benchmark(group="RegistryGetEncoder")
25
+ @pytest.mark.parametrize("type_str", type_strings)
26
+ def test_get_encoder(benchmark: BenchmarkFixture, type_str):
27
+ benchmark(batch, ITERATIONS, registry.get_encoder, type_str)
28
+
29
+
30
+ @pytest.mark.benchmark(group="RegistryGetEncoder")
31
+ @pytest.mark.parametrize("type_str", type_strings)
32
+ def test_faster_get_encoder(benchmark: BenchmarkFixture, type_str):
33
+ benchmark(batch, ITERATIONS, faster_registry.get_encoder, type_str)
34
+
35
+
36
+ @pytest.mark.benchmark(group="RegistryGetDecoder")
37
+ @pytest.mark.parametrize("type_str", type_strings)
38
+ def test_get_decoder(benchmark: BenchmarkFixture, type_str):
39
+ benchmark(batch, ITERATIONS, registry.get_decoder, type_str)
40
+
41
+
42
+ @pytest.mark.benchmark(group="RegistryGetDecoder")
43
+ @pytest.mark.parametrize("type_str", type_strings)
44
+ def test_faster_get_decoder(benchmark: BenchmarkFixture, type_str):
45
+ benchmark(batch, ITERATIONS, faster_registry.get_decoder, type_str)
@@ -0,0 +1,26 @@
1
+ # Shared list of all ABI type strings used in benchmarks
2
+
3
+ type_strings = [
4
+ "uint256",
5
+ "int8",
6
+ "address",
7
+ "bytes32",
8
+ "string",
9
+ "bool",
10
+ "uint256[2]",
11
+ "string[]",
12
+ "(uint256,bool)",
13
+ "(address,uint8)",
14
+ "(string,bytes)",
15
+ "(uint256[2],string)",
16
+ "(uint8,(bool,string))",
17
+ "((uint8,uint8),uint8)",
18
+ "(uint8[2],(string,bool[2]))",
19
+ "(uint256[],(string[],bool))",
20
+ "((uint8[2],(string,bool)),bytes32)",
21
+ "(uint8[2][2],(string[2],bool[2]))",
22
+ "uint8[]",
23
+ "bytes",
24
+ "fixed128x18",
25
+ "ufixed128x18",
26
+ ]
Binary file
faster_eth_abi/_codec.py CHANGED
@@ -43,7 +43,7 @@ def encode_c(
43
43
 
44
44
  encoder = self._registry.get_tuple_encoder(*types)
45
45
 
46
- return encoder(args)
46
+ return encoder.encode(args)
47
47
 
48
48
 
49
49
  def decode_c(
@@ -1,6 +1,8 @@
1
1
  from typing import (
2
2
  TYPE_CHECKING,
3
3
  Any,
4
+ Dict,
5
+ Final,
4
6
  Tuple,
5
7
  )
6
8
 
@@ -10,6 +12,7 @@ from faster_eth_utils import (
10
12
 
11
13
  from faster_eth_abi.exceptions import (
12
14
  InsufficientDataBytes,
15
+ InvalidPointer,
13
16
  NonEmptyPaddingBytes,
14
17
  )
15
18
  from faster_eth_abi.io import (
@@ -19,9 +22,11 @@ from faster_eth_abi.io import (
19
22
 
20
23
  if TYPE_CHECKING:
21
24
  from .decoding import (
25
+ BaseArrayDecoder,
22
26
  DynamicArrayDecoder,
23
27
  FixedByteSizeDecoder,
24
28
  HeadTailDecoder,
29
+ SignedIntegerDecoder,
25
30
  SizedArrayDecoder,
26
31
  TupleDecoder,
27
32
  )
@@ -66,8 +71,107 @@ def decode_head_tail(self: "HeadTailDecoder", stream: ContextFramesBytesIO) -> A
66
71
 
67
72
  # TupleDecoder
68
73
  def decode_tuple(self: "TupleDecoder", stream: ContextFramesBytesIO) -> Tuple[Any, ...]:
69
- self.validate_pointers(stream)
70
- return tuple(decoder(stream) for decoder in self.decoders)
74
+ # NOTE: the original implementation would do this but it's
75
+ # kinda wasteful, so we rebuilt the logic within this function
76
+ # validate_pointers_tuple(self, stream)
77
+
78
+ current_location = stream.tell()
79
+ if self._no_head_tail:
80
+ # TODO: if all(isinstance(d, TupleDecoder) for d in self._decoders)
81
+ # return tuple(decode_tuple(stream) for _ in range(len(self.decoders))
82
+ # and other types with compiled decode funcs
83
+ return tuple(decoder(stream) for decoder in self.decoders)
84
+
85
+ end_of_offsets = current_location + 32 * self.len_of_head
86
+ total_stream_length = len(stream.getbuffer())
87
+ items = []
88
+ for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
89
+ if is_head_tail:
90
+ # the next 32 bytes are a pointer that we should validate
91
+ # checkpoint the stream location so we can reset it after validation
92
+ step_location = stream.tell()
93
+
94
+ offset = decode_uint_256(stream)
95
+ indicated_idx = current_location + offset
96
+ if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
97
+ # the pointer is indicating its data is located either within the
98
+ # offsets section of the stream or beyond the end of the stream,
99
+ # both of which are invalid
100
+ raise InvalidPointer(
101
+ "Invalid pointer in tuple at location "
102
+ f"{stream.tell() - 32} in payload"
103
+ )
104
+
105
+ # reset the stream so we can decode
106
+ stream.seek(step_location)
107
+
108
+ items.append(decoder(stream))
109
+
110
+ # return the stream to its original location for actual decoding
111
+ stream.seek(current_location)
112
+
113
+ return tuple(items)
114
+
115
+
116
+ def validate_pointers_tuple(
117
+ self: "TupleDecoder",
118
+ stream: ContextFramesBytesIO,
119
+ ) -> None:
120
+ """
121
+ Verify that all pointers point to a valid location in the stream.
122
+ """
123
+ current_location = stream.tell()
124
+ if self._no_head_tail:
125
+ for decoder in self.decoders:
126
+ decoder(stream)
127
+ else:
128
+ end_of_offsets = current_location + 32 * self.len_of_head
129
+ total_stream_length = len(stream.getbuffer())
130
+ for decoder, is_head_tail in zip(self.decoders, self._is_head_tail):
131
+ if not is_head_tail:
132
+ # the next 32 bytes are not a pointer, so progress the stream per the decoder
133
+ decoder(stream)
134
+ else:
135
+ # the next 32 bytes are a pointer
136
+ offset = decode_uint_256(stream)
137
+ indicated_idx = current_location + offset
138
+ if (
139
+ indicated_idx < end_of_offsets
140
+ or indicated_idx >= total_stream_length
141
+ ):
142
+ # the pointer is indicating its data is located either within the
143
+ # offsets section of the stream or beyond the end of the stream,
144
+ # both of which are invalid
145
+ raise InvalidPointer(
146
+ "Invalid pointer in tuple at location "
147
+ f"{stream.tell() - 32} in payload"
148
+ )
149
+ # return the stream to its original location for actual decoding
150
+ stream.seek(current_location)
151
+
152
+
153
+ # BaseArrayDecoder
154
+ def validate_pointers_array(
155
+ self: "BaseArrayDecoder", stream: ContextFramesBytesIO, array_size: int
156
+ ) -> None:
157
+ """
158
+ Verify that all pointers point to a valid location in the stream.
159
+ """
160
+ current_location = stream.tell()
161
+ end_of_offsets = current_location + 32 * array_size
162
+ total_stream_length = len(stream.getbuffer())
163
+ for _ in range(array_size):
164
+ offset = decode_uint_256(stream)
165
+ indicated_idx = current_location + offset
166
+ if indicated_idx < end_of_offsets or indicated_idx >= total_stream_length:
167
+ # the pointer is indicating its data is located either within the
168
+ # offsets section of the stream or beyond the end of the stream,
169
+ # both of which are invalid
170
+ raise InvalidPointer(
171
+ "Invalid pointer in array at location "
172
+ f"{stream.tell() - 32} in payload"
173
+ )
174
+ stream.seek(current_location)
71
175
 
72
176
 
73
177
  # SizedArrayDecoder
@@ -139,10 +243,37 @@ def validate_padding_bytes_fixed_byte_size(
139
243
  value: Any,
140
244
  padding_bytes: bytes,
141
245
  ) -> None:
142
- value_byte_size = get_value_byte_size(self)
143
- padding_size = self.data_byte_size - value_byte_size
246
+ if padding_bytes != get_expected_padding_bytes(self, b"\x00"):
247
+ raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
248
+
249
+
250
+ _expected_padding_bytes_cache: Final[
251
+ Dict["FixedByteSizeDecoder", Dict[bytes, bytes]]
252
+ ] = {}
253
+
254
+
255
+ def get_expected_padding_bytes(self: "FixedByteSizeDecoder", chunk: bytes) -> bytes:
256
+ instance_cache = _expected_padding_bytes_cache.setdefault(self, {})
257
+ expected_padding_bytes = instance_cache.get(chunk)
258
+ if expected_padding_bytes is None:
259
+ value_byte_size = get_value_byte_size(self)
260
+ padding_size = self.data_byte_size - value_byte_size
261
+ expected_padding_bytes = chunk * padding_size
262
+ instance_cache[chunk] = expected_padding_bytes
263
+ return expected_padding_bytes
264
+
265
+
266
+ def validate_padding_bytes_signed_integer(
267
+ self: "SignedIntegerDecoder",
268
+ value: int,
269
+ padding_bytes: bytes,
270
+ ) -> None:
271
+ if value >= 0:
272
+ expected_padding_bytes = get_expected_padding_bytes(self, b"\x00")
273
+ else:
274
+ expected_padding_bytes = get_expected_padding_bytes(self, b"\xff")
144
275
 
145
- if padding_bytes != b"\x00" * padding_size:
276
+ if padding_bytes != expected_padding_bytes:
146
277
  raise NonEmptyPaddingBytes(f"Padding bytes were not empty: {padding_bytes!r}")
147
278
 
148
279
 
@@ -2,29 +2,59 @@ from typing import (
2
2
  TYPE_CHECKING,
3
3
  Any,
4
4
  Callable,
5
+ Dict,
5
6
  List,
6
7
  Optional,
7
8
  Sequence,
9
+ Tuple,
8
10
  TypeVar,
9
11
  )
10
12
 
13
+ from faster_eth_utils import (
14
+ is_list_like,
15
+ )
16
+
17
+ from faster_eth_abi.exceptions import (
18
+ ValueOutOfBounds,
19
+ )
20
+
11
21
  if TYPE_CHECKING:
12
22
  from faster_eth_abi.encoding import (
13
23
  BaseEncoder,
24
+ TupleEncoder,
14
25
  )
15
26
 
16
27
 
17
28
  T = TypeVar("T")
18
29
 
19
30
 
20
- def encode_tuple(
21
- values: Sequence[Any],
22
- encoders: Sequence["BaseEncoder"],
23
- ) -> bytes:
31
+ # TupleEncoder
32
+ def validate_tuple(self: "TupleEncoder", value: Sequence[Any]) -> None:
33
+ # if we check list and tuple first it compiles to much quicker C code
34
+ if not isinstance(value, (list, tuple)) and not is_list_like(value):
35
+ self.invalidate_value(
36
+ value,
37
+ msg="must be list-like object such as array or tuple",
38
+ )
39
+
40
+ validators = self.validators
41
+ if len(value) != len(validators):
42
+ self.invalidate_value(
43
+ value,
44
+ exc=ValueOutOfBounds,
45
+ msg=f"value has {len(value)} items when {len(validators)} " "were expected",
46
+ )
47
+
48
+ for item, validator in zip(value, validators):
49
+ validator(item)
50
+
51
+
52
+ def encode_tuple(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
53
+ validate_tuple(self, values)
24
54
  raw_head_chunks: List[Optional[bytes]] = []
25
55
  tail_chunks: List[bytes] = []
26
- for value, encoder in zip(values, encoders):
27
- if getattr(encoder, "is_dynamic", False):
56
+ for value, encoder, is_dynamic in zip(values, self.encoders, self._is_dynamic):
57
+ if is_dynamic:
28
58
  raw_head_chunks.append(None)
29
59
  tail_chunks.append(encoder(value))
30
60
  else:
@@ -46,6 +76,111 @@ def encode_tuple(
46
76
  return b"".join(head_chunks) + b"".join(tail_chunks)
47
77
 
48
78
 
79
+ def encode_tuple_all_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
80
+ validate_tuple(self, values)
81
+ encoders = self.encoders
82
+ tail_chunks = [encoder(value) for encoder, value in zip(encoders, values)]
83
+
84
+ total_offset = 0
85
+ head_length = 32 * len(encoders)
86
+ head_chunks = [encode_uint_256(head_length)]
87
+ for item in tail_chunks[:-1]:
88
+ total_offset += len(item)
89
+ head_chunks.append(encode_uint_256(head_length + total_offset))
90
+
91
+ return b"".join(head_chunks) + b"".join(tail_chunks)
92
+
93
+
94
+ def encode_tuple_no_dynamic(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
95
+ validate_tuple(self, values)
96
+ encoders = self.encoders
97
+ return b"".join(encoders[i](values[i]) for i in range(len(encoders)))
98
+
99
+
100
+ def encode_tuple_no_dynamic1(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
101
+ validate_tuple(self, values)
102
+ encoders: Tuple["BaseEncoder"] = self.encoders
103
+ return encoders[0](values[0])
104
+
105
+
106
+ def encode_tuple_no_dynamic2(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
107
+ validate_tuple(self, values)
108
+ encoders = self.encoders
109
+ # encoders: Tuple["BaseEncoder", "BaseEncoder"] = self.encoders
110
+ return encoders[0](values[0]) + encoders[1](values[1])
111
+
112
+
113
+ def encode_tuple_no_dynamic3(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
114
+ validate_tuple(self, values)
115
+ encoders = self.encoders
116
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
117
+ return b"".join(encoders[i](values[i]) for i in range(3))
118
+
119
+
120
+ def encode_tuple_no_dynamic4(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
121
+ validate_tuple(self, values)
122
+ encoders = self.encoders
123
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
124
+ return b"".join(encoders[i](values[i]) for i in range(4))
125
+
126
+
127
+ def encode_tuple_no_dynamic5(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
128
+ validate_tuple(self, values)
129
+ encoders = self.encoders
130
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
131
+ return b"".join(encoders[i](values[i]) for i in range(5))
132
+
133
+
134
+ def encode_tuple_no_dynamic6(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
135
+ validate_tuple(self, values)
136
+ encoders = self.encoders
137
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
138
+ return b"".join(encoders[i](values[i]) for i in range(6))
139
+
140
+
141
+ def encode_tuple_no_dynamic7(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
142
+ validate_tuple(self, values)
143
+ encoders = self.encoders
144
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
145
+ return b"".join(encoders[i](values[i]) for i in range(7))
146
+
147
+
148
+ def encode_tuple_no_dynamic8(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
149
+ validate_tuple(self, values)
150
+ encoders = self.encoders
151
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
152
+ return b"".join(encoders[i](values[i]) for i in range(8))
153
+
154
+
155
+ def encode_tuple_no_dynamic9(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
156
+ validate_tuple(self, values)
157
+ encoders = self.encoders
158
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
159
+ return b"".join(encoders[i](values[i]) for i in range(9))
160
+
161
+
162
+ def encode_tuple_no_dynamic10(self: "TupleEncoder", values: Sequence[Any]) -> bytes:
163
+ validate_tuple(self, values)
164
+ encoders = self.encoders
165
+ # encoders: Tuple["BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder", "BaseEncoder"] = self.encoders
166
+ return b"".join(encoders[i](values[i]) for i in range(10))
167
+
168
+ encode_tuple_no_dynamic_funcs: Dict[
169
+ int, Callable[["TupleEncoder", Sequence[Any]], bytes]
170
+ ] = {
171
+ 1: encode_tuple_no_dynamic1,
172
+ 2: encode_tuple_no_dynamic2,
173
+ 3: encode_tuple_no_dynamic3,
174
+ 4: encode_tuple_no_dynamic4,
175
+ 5: encode_tuple_no_dynamic5,
176
+ 6: encode_tuple_no_dynamic6,
177
+ 7: encode_tuple_no_dynamic7,
178
+ 8: encode_tuple_no_dynamic8,
179
+ 9: encode_tuple_no_dynamic9,
180
+ 10: encode_tuple_no_dynamic10,
181
+ }
182
+
183
+
49
184
  def encode_fixed(
50
185
  value: Any,
51
186
  encode_fn: Callable[[Any], bytes],
Binary file