web3 7.11.1__py3-none-any.whl → 7.12.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ens/async_ens.py +2 -2
- ens/ens.py +2 -2
- ens/specs/.DS_Store +0 -0
- ens/utils.py +14 -3
- web3/_utils/abi.py +24 -20
- web3/_utils/batching.py +22 -68
- web3/_utils/caching/request_caching_validation.py +8 -4
- web3/_utils/decorators.py +12 -9
- web3/_utils/http_session_manager.py +18 -15
- web3/_utils/method_formatters.py +17 -24
- web3/_utils/module_testing/eth_module.py +39 -54
- web3/_utils/module_testing/web3_module.py +78 -4
- web3/_utils/validation.py +1 -1
- web3/contract/utils.py +20 -35
- web3/manager.py +108 -26
- web3/method.py +7 -7
- web3/providers/async_base.py +15 -1
- web3/providers/auto.py +28 -6
- web3/providers/base.py +18 -5
- web3/providers/ipc.py +4 -6
- web3/providers/legacy_websocket.py +4 -5
- web3/providers/persistent/persistent.py +110 -40
- web3/providers/persistent/request_processor.py +34 -51
- web3/providers/persistent/subscription_manager.py +12 -7
- web3/providers/rpc/async_rpc.py +7 -7
- web3/providers/rpc/rpc.py +6 -6
- web3/utils/subscriptions.py +7 -4
- {web3-7.11.1.dist-info → web3-7.12.1.dist-info}/METADATA +69 -56
- {web3-7.11.1.dist-info → web3-7.12.1.dist-info}/RECORD +32 -31
- {web3-7.11.1.dist-info → web3-7.12.1.dist-info}/WHEEL +1 -1
- {web3-7.11.1.dist-info → web3-7.12.1.dist-info/licenses}/LICENSE +0 -0
- {web3-7.11.1.dist-info → web3-7.12.1.dist-info}/top_level.txt +0 -0
ens/async_ens.py
CHANGED
|
@@ -57,7 +57,7 @@ from ens.utils import (
|
|
|
57
57
|
address_in,
|
|
58
58
|
address_to_reverse_domain,
|
|
59
59
|
default,
|
|
60
|
-
|
|
60
|
+
dns_encode_name,
|
|
61
61
|
init_async_web3,
|
|
62
62
|
is_empty_name,
|
|
63
63
|
is_none_or_zero_address,
|
|
@@ -500,7 +500,7 @@ class AsyncENS(BaseENS):
|
|
|
500
500
|
|
|
501
501
|
calldata = resolver.encode_abi(*contract_func_with_args)
|
|
502
502
|
contract_call_result = await resolver.caller.resolve(
|
|
503
|
-
|
|
503
|
+
dns_encode_name(normal_name),
|
|
504
504
|
calldata,
|
|
505
505
|
)
|
|
506
506
|
result = self._decode_ensip10_resolve_data(
|
ens/ens.py
CHANGED
|
@@ -56,7 +56,7 @@ from .utils import (
|
|
|
56
56
|
address_in,
|
|
57
57
|
address_to_reverse_domain,
|
|
58
58
|
default,
|
|
59
|
-
|
|
59
|
+
dns_encode_name,
|
|
60
60
|
init_web3,
|
|
61
61
|
is_empty_name,
|
|
62
62
|
is_none_or_zero_address,
|
|
@@ -482,7 +482,7 @@ class ENS(BaseENS):
|
|
|
482
482
|
|
|
483
483
|
calldata = resolver.encode_abi(*contract_func_with_args)
|
|
484
484
|
contract_call_result = resolver.caller.resolve(
|
|
485
|
-
|
|
485
|
+
dns_encode_name(normal_name),
|
|
486
486
|
calldata,
|
|
487
487
|
)
|
|
488
488
|
result = self._decode_ensip10_resolve_data(
|
ens/specs/.DS_Store
ADDED
|
Binary file
|
ens/utils.py
CHANGED
|
@@ -13,6 +13,7 @@ from typing import (
|
|
|
13
13
|
Union,
|
|
14
14
|
cast,
|
|
15
15
|
)
|
|
16
|
+
import warnings
|
|
16
17
|
|
|
17
18
|
from eth_typing import (
|
|
18
19
|
Address,
|
|
@@ -132,7 +133,7 @@ def normalize_name(name: str) -> str:
|
|
|
132
133
|
return normalize_name_ensip15(name).as_text
|
|
133
134
|
|
|
134
135
|
|
|
135
|
-
def
|
|
136
|
+
def dns_encode_name(name: str) -> HexBytes:
|
|
136
137
|
r"""
|
|
137
138
|
Encode a name according to DNS standards specified in section 3.1
|
|
138
139
|
of RFC1035 with the following validations:
|
|
@@ -145,7 +146,7 @@ def ens_encode_name(name: str) -> bytes:
|
|
|
145
146
|
:param str name: the dot-separated ENS name
|
|
146
147
|
"""
|
|
147
148
|
if is_empty_name(name):
|
|
148
|
-
return b"\x00"
|
|
149
|
+
return HexBytes(b"\x00")
|
|
149
150
|
|
|
150
151
|
normalized_name = normalize_name(name)
|
|
151
152
|
|
|
@@ -163,7 +164,17 @@ def ens_encode_name(name: str) -> bytes:
|
|
|
163
164
|
dns_prepped_labels = [to_bytes(len(label)) + label for label in labels_as_bytes]
|
|
164
165
|
|
|
165
166
|
# return the joined prepped labels in order and append the zero byte at the end:
|
|
166
|
-
return b"".join(dns_prepped_labels) + b"\x00"
|
|
167
|
+
return HexBytes(b"".join(dns_prepped_labels) + b"\x00")
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def ens_encode_name(name: str) -> bytes:
|
|
171
|
+
warnings.warn(
|
|
172
|
+
"``ens_encode_name`` is deprecated and will be removed in the next "
|
|
173
|
+
"major version. Use ``dns_encode_name`` instead.",
|
|
174
|
+
DeprecationWarning,
|
|
175
|
+
stacklevel=2,
|
|
176
|
+
)
|
|
177
|
+
return bytes(dns_encode_name(name))
|
|
167
178
|
|
|
168
179
|
|
|
169
180
|
def is_valid_name(name: str) -> bool:
|
web3/_utils/abi.py
CHANGED
|
@@ -73,7 +73,6 @@ from eth_utils import (
|
|
|
73
73
|
)
|
|
74
74
|
from eth_utils.toolz import (
|
|
75
75
|
curry,
|
|
76
|
-
partial,
|
|
77
76
|
pipe,
|
|
78
77
|
)
|
|
79
78
|
|
|
@@ -115,21 +114,21 @@ def receive_func_abi_exists(contract_abi: ABI) -> Sequence[ABIReceive]:
|
|
|
115
114
|
return filter_abi_by_type("receive", contract_abi)
|
|
116
115
|
|
|
117
116
|
|
|
118
|
-
def get_indexed_event_inputs(event_abi: ABIEvent) ->
|
|
117
|
+
def get_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIComponentIndexed]:
|
|
119
118
|
return [arg for arg in event_abi["inputs"] if arg["indexed"] is True]
|
|
120
119
|
|
|
121
120
|
|
|
122
|
-
def exclude_indexed_event_inputs(event_abi: ABIEvent) ->
|
|
121
|
+
def exclude_indexed_event_inputs(event_abi: ABIEvent) -> List[ABIComponentIndexed]:
|
|
123
122
|
return [arg for arg in event_abi["inputs"] if arg["indexed"] is False]
|
|
124
123
|
|
|
125
124
|
|
|
126
|
-
def filter_by_types(types: Collection[str], contract_abi: ABI) ->
|
|
125
|
+
def filter_by_types(types: Collection[str], contract_abi: ABI) -> List[ABIElement]:
|
|
127
126
|
return [abi_element for abi_element in contract_abi if abi_element["type"] in types]
|
|
128
127
|
|
|
129
128
|
|
|
130
129
|
def filter_by_argument_name(
|
|
131
130
|
argument_names: Collection[str], contract_abi: ABI
|
|
132
|
-
) ->
|
|
131
|
+
) -> List[ABIElement]:
|
|
133
132
|
"""
|
|
134
133
|
Return a list of each ``ABIElement`` which contains arguments matching provided
|
|
135
134
|
names.
|
|
@@ -186,7 +185,7 @@ def get_name_from_abi_element_identifier(
|
|
|
186
185
|
|
|
187
186
|
def get_abi_element_signature(
|
|
188
187
|
abi_element_identifier: ABIElementIdentifier,
|
|
189
|
-
abi_element_argument_types: Optional[
|
|
188
|
+
abi_element_argument_types: Optional[Iterable[str]] = None,
|
|
190
189
|
) -> str:
|
|
191
190
|
element_name = get_name_from_abi_element_identifier(abi_element_identifier)
|
|
192
191
|
argument_types = ",".join(abi_element_argument_types or [])
|
|
@@ -585,9 +584,9 @@ def normalize_event_input_types(
|
|
|
585
584
|
|
|
586
585
|
@curry
|
|
587
586
|
def map_abi_data(
|
|
588
|
-
normalizers:
|
|
589
|
-
types:
|
|
590
|
-
data:
|
|
587
|
+
normalizers: Iterable[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],
|
|
588
|
+
types: Iterable[TypeStr],
|
|
589
|
+
data: Iterable[Any],
|
|
591
590
|
) -> Any:
|
|
592
591
|
"""
|
|
593
592
|
Applies normalizers to your data, in the context of the relevant types.
|
|
@@ -611,17 +610,21 @@ def map_abi_data(
|
|
|
611
610
|
2. Recursively mapping each of the normalizers to the data
|
|
612
611
|
3. Stripping the types back out of the tree
|
|
613
612
|
"""
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
613
|
+
return pipe(
|
|
614
|
+
data,
|
|
615
|
+
# 1. Decorating the data tree with types
|
|
616
|
+
abi_data_tree(types),
|
|
617
|
+
# 2. Recursively mapping each of the normalizers to the data
|
|
618
|
+
*map(data_tree_map, normalizers),
|
|
619
|
+
# 3. Stripping the types back out of the tree
|
|
620
|
+
strip_abi_types,
|
|
618
621
|
)
|
|
619
622
|
|
|
620
|
-
return pipe(data, *pipeline)
|
|
621
|
-
|
|
622
623
|
|
|
623
624
|
@curry
|
|
624
|
-
def abi_data_tree(
|
|
625
|
+
def abi_data_tree(
|
|
626
|
+
types: Iterable[TypeStr], data: Iterable[Any]
|
|
627
|
+
) -> List["ABITypedData"]:
|
|
625
628
|
"""
|
|
626
629
|
Decorate the data tree with pairs of (type, data). The pair tuple is actually an
|
|
627
630
|
ABITypedData, but can be accessed as a tuple.
|
|
@@ -631,10 +634,7 @@ def abi_data_tree(types: Sequence[TypeStr], data: Sequence[Any]) -> List[Any]:
|
|
|
631
634
|
>>> abi_data_tree(types=["bool[2]", "uint"], data=[[True, False], 0])
|
|
632
635
|
[("bool[2]", [("bool", True), ("bool", False)]), ("uint256", 0)]
|
|
633
636
|
"""
|
|
634
|
-
return
|
|
635
|
-
abi_sub_tree(data_type, data_value)
|
|
636
|
-
for data_type, data_value in zip(types, data)
|
|
637
|
-
]
|
|
637
|
+
return list(map(abi_sub_tree, types, data))
|
|
638
638
|
|
|
639
639
|
|
|
640
640
|
@curry
|
|
@@ -723,6 +723,10 @@ def strip_abi_type(elements: Any) -> Any:
|
|
|
723
723
|
return elements
|
|
724
724
|
|
|
725
725
|
|
|
726
|
+
def strip_abi_types(elements: Any) -> Any:
|
|
727
|
+
return recursive_map(strip_abi_type, elements)
|
|
728
|
+
|
|
729
|
+
|
|
726
730
|
def build_non_strict_registry() -> ABIRegistry:
|
|
727
731
|
# We make a copy here just to make sure that eth-abi's default registry is not
|
|
728
732
|
# affected by our custom encoder subclasses
|
web3/_utils/batching.py
CHANGED
|
@@ -1,9 +1,3 @@
|
|
|
1
|
-
from copy import (
|
|
2
|
-
copy,
|
|
3
|
-
)
|
|
4
|
-
from functools import (
|
|
5
|
-
wraps,
|
|
6
|
-
)
|
|
7
1
|
from types import (
|
|
8
2
|
TracebackType,
|
|
9
3
|
)
|
|
@@ -15,11 +9,8 @@ from typing import (
|
|
|
15
9
|
Dict,
|
|
16
10
|
Generic,
|
|
17
11
|
List,
|
|
18
|
-
Protocol,
|
|
19
|
-
Sequence,
|
|
20
12
|
Tuple,
|
|
21
13
|
Type,
|
|
22
|
-
TypeVar,
|
|
23
14
|
Union,
|
|
24
15
|
cast,
|
|
25
16
|
)
|
|
@@ -28,17 +19,10 @@ import warnings
|
|
|
28
19
|
from web3._utils.compat import (
|
|
29
20
|
Self,
|
|
30
21
|
)
|
|
31
|
-
from web3.contract.async_contract import (
|
|
32
|
-
AsyncContractFunction,
|
|
33
|
-
)
|
|
34
|
-
from web3.contract.contract import (
|
|
35
|
-
ContractFunction,
|
|
36
|
-
)
|
|
37
22
|
from web3.exceptions import (
|
|
38
23
|
Web3ValueError,
|
|
39
24
|
)
|
|
40
25
|
from web3.types import (
|
|
41
|
-
RPCEndpoint,
|
|
42
26
|
TFunc,
|
|
43
27
|
TReturn,
|
|
44
28
|
)
|
|
@@ -48,6 +32,12 @@ if TYPE_CHECKING:
|
|
|
48
32
|
AsyncWeb3,
|
|
49
33
|
Web3,
|
|
50
34
|
)
|
|
35
|
+
from web3.contract.async_contract import (
|
|
36
|
+
AsyncContractFunction,
|
|
37
|
+
)
|
|
38
|
+
from web3.contract.contract import (
|
|
39
|
+
ContractFunction,
|
|
40
|
+
)
|
|
51
41
|
from web3.method import ( # noqa: F401
|
|
52
42
|
Method,
|
|
53
43
|
)
|
|
@@ -61,13 +51,14 @@ if TYPE_CHECKING:
|
|
|
61
51
|
JSONBaseProvider,
|
|
62
52
|
)
|
|
63
53
|
from web3.types import ( # noqa: F401
|
|
54
|
+
RPCEndpoint,
|
|
64
55
|
RPCResponse,
|
|
65
56
|
)
|
|
66
57
|
|
|
67
58
|
|
|
68
59
|
BATCH_REQUEST_ID = "batch_request" # for use as the cache key for batch requests
|
|
69
60
|
|
|
70
|
-
BatchRequestInformation = Tuple[Tuple["RPCEndpoint", Any],
|
|
61
|
+
BatchRequestInformation = Tuple[Tuple["RPCEndpoint", Any], Tuple[Any, ...]]
|
|
71
62
|
RPC_METHODS_UNSUPPORTED_DURING_BATCH = {
|
|
72
63
|
"eth_subscribe",
|
|
73
64
|
"eth_unsubscribe",
|
|
@@ -104,21 +95,18 @@ class RequestBatcher(Generic[TFunc]):
|
|
|
104
95
|
)
|
|
105
96
|
|
|
106
97
|
def _initialize_batching(self) -> None:
|
|
107
|
-
self._provider.
|
|
98
|
+
self._provider._batching_context.set(self)
|
|
108
99
|
self.clear()
|
|
109
100
|
|
|
110
101
|
def _end_batching(self) -> None:
|
|
111
102
|
self.clear()
|
|
112
|
-
self._provider.
|
|
113
|
-
if self._provider.has_persistent_connection:
|
|
114
|
-
provider = cast("PersistentConnectionProvider", self._provider)
|
|
115
|
-
provider._batch_request_counter = None
|
|
103
|
+
self._provider._batching_context.set(None)
|
|
116
104
|
|
|
117
105
|
def add(self, batch_payload: TReturn) -> None:
|
|
118
106
|
self._validate_is_batching()
|
|
119
107
|
|
|
120
|
-
if
|
|
121
|
-
batch_payload = batch_payload.call()
|
|
108
|
+
if hasattr(batch_payload, "call"):
|
|
109
|
+
batch_payload = batch_payload.call()
|
|
122
110
|
|
|
123
111
|
# When batching, we don't make a request. Instead, we will get the request
|
|
124
112
|
# information and store it in the `_requests_info` list. So we have to cast the
|
|
@@ -136,8 +124,8 @@ class RequestBatcher(Generic[TFunc]):
|
|
|
136
124
|
Union[
|
|
137
125
|
"Method[Callable[..., Any]]",
|
|
138
126
|
Callable[..., Any],
|
|
139
|
-
ContractFunction,
|
|
140
|
-
AsyncContractFunction,
|
|
127
|
+
"ContractFunction",
|
|
128
|
+
"AsyncContractFunction",
|
|
141
129
|
],
|
|
142
130
|
List[Any],
|
|
143
131
|
],
|
|
@@ -156,9 +144,6 @@ class RequestBatcher(Generic[TFunc]):
|
|
|
156
144
|
def clear(self) -> None:
|
|
157
145
|
self._requests_info = []
|
|
158
146
|
self._async_requests_info = []
|
|
159
|
-
if self._provider.has_persistent_connection:
|
|
160
|
-
provider = cast("PersistentConnectionProvider", self._provider)
|
|
161
|
-
provider._batch_request_counter = next(copy(provider.request_counter))
|
|
162
147
|
|
|
163
148
|
def cancel(self) -> None:
|
|
164
149
|
self._end_batching()
|
|
@@ -181,9 +166,14 @@ class RequestBatcher(Generic[TFunc]):
|
|
|
181
166
|
|
|
182
167
|
async def async_execute(self) -> List["RPCResponse"]:
|
|
183
168
|
self._validate_is_batching()
|
|
184
|
-
|
|
185
|
-
self.
|
|
186
|
-
|
|
169
|
+
if self._provider.has_persistent_connection:
|
|
170
|
+
responses = await self.web3.manager._async_make_socket_batch_request(
|
|
171
|
+
self._async_requests_info
|
|
172
|
+
)
|
|
173
|
+
else:
|
|
174
|
+
responses = await self.web3.manager._async_make_batch_request(
|
|
175
|
+
self._async_requests_info
|
|
176
|
+
)
|
|
187
177
|
self._end_batching()
|
|
188
178
|
return responses
|
|
189
179
|
|
|
@@ -220,39 +210,3 @@ def sort_batch_response_by_response_ids(
|
|
|
220
210
|
stacklevel=2,
|
|
221
211
|
)
|
|
222
212
|
return responses
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
class SupportsBatching(Protocol):
|
|
226
|
-
_is_batching: bool
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
R = TypeVar("R")
|
|
230
|
-
T = TypeVar("T", bound=SupportsBatching)
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
def async_batching_context(
|
|
234
|
-
method: Callable[[T, List[Tuple[RPCEndpoint, Any]]], Coroutine[Any, Any, R]]
|
|
235
|
-
) -> Callable[[T, List[Tuple[RPCEndpoint, Any]]], Coroutine[Any, Any, R]]:
|
|
236
|
-
@wraps(method)
|
|
237
|
-
async def wrapper(self: T, requests: List[Tuple[RPCEndpoint, Any]]) -> R:
|
|
238
|
-
self._is_batching = True
|
|
239
|
-
try:
|
|
240
|
-
return await method(self, requests)
|
|
241
|
-
finally:
|
|
242
|
-
self._is_batching = False
|
|
243
|
-
|
|
244
|
-
return wrapper
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
def batching_context(
|
|
248
|
-
method: Callable[[T, List[Tuple[RPCEndpoint, Any]]], R]
|
|
249
|
-
) -> Callable[[T, List[Tuple[RPCEndpoint, Any]]], R]:
|
|
250
|
-
@wraps(method)
|
|
251
|
-
def wrapper(self: T, requests: List[Tuple[RPCEndpoint, Any]]) -> R:
|
|
252
|
-
self._is_batching = True
|
|
253
|
-
try:
|
|
254
|
-
return method(self, requests)
|
|
255
|
-
finally:
|
|
256
|
-
self._is_batching = False
|
|
257
|
-
|
|
258
|
-
return wrapper
|
|
@@ -73,7 +73,8 @@ def is_beyond_validation_threshold(
|
|
|
73
73
|
else:
|
|
74
74
|
provider.logger.error(
|
|
75
75
|
"Invalid request_cache_validation_threshold value. This should not "
|
|
76
|
-
|
|
76
|
+
"have happened. Request not cached.\n threshold: %s",
|
|
77
|
+
threshold,
|
|
77
78
|
)
|
|
78
79
|
return False
|
|
79
80
|
except Exception as e:
|
|
@@ -128,7 +129,8 @@ def validate_from_blocknum_in_result(
|
|
|
128
129
|
else:
|
|
129
130
|
provider.logger.error(
|
|
130
131
|
"Could not find block number in result. This should not have happened. "
|
|
131
|
-
|
|
132
|
+
"Request not cached.\n result: %s",
|
|
133
|
+
result,
|
|
132
134
|
)
|
|
133
135
|
return False
|
|
134
136
|
except Exception as e:
|
|
@@ -198,7 +200,8 @@ async def async_is_beyond_validation_threshold(
|
|
|
198
200
|
else:
|
|
199
201
|
provider.logger.error(
|
|
200
202
|
"Invalid request_cache_validation_threshold value. This should not "
|
|
201
|
-
|
|
203
|
+
"have happened. Request not cached.\n threshold: %s",
|
|
204
|
+
threshold,
|
|
202
205
|
)
|
|
203
206
|
return False
|
|
204
207
|
except Exception as e:
|
|
@@ -253,7 +256,8 @@ async def async_validate_from_blocknum_in_result(
|
|
|
253
256
|
else:
|
|
254
257
|
provider.logger.error(
|
|
255
258
|
"Could not find block number in result. This should not have happened. "
|
|
256
|
-
|
|
259
|
+
"Request not cached.\n result: %s",
|
|
260
|
+
result,
|
|
257
261
|
)
|
|
258
262
|
return False
|
|
259
263
|
except Exception as e:
|
web3/_utils/decorators.py
CHANGED
|
@@ -3,6 +3,8 @@ import threading
|
|
|
3
3
|
from typing import (
|
|
4
4
|
Any,
|
|
5
5
|
Callable,
|
|
6
|
+
Set,
|
|
7
|
+
Tuple,
|
|
6
8
|
TypeVar,
|
|
7
9
|
cast,
|
|
8
10
|
)
|
|
@@ -20,21 +22,22 @@ def reject_recursive_repeats(to_wrap: Callable[..., Any]) -> Callable[..., Any]:
|
|
|
20
22
|
Prevent simple cycles by returning None when called recursively with same instance
|
|
21
23
|
"""
|
|
22
24
|
# types ignored b/c dynamically set attribute
|
|
23
|
-
|
|
25
|
+
already_called: Set[Tuple[int, ...]] = set()
|
|
26
|
+
to_wrap.__already_called = already_called # type: ignore
|
|
27
|
+
|
|
28
|
+
add_call = already_called.add
|
|
29
|
+
remove_call = already_called.remove
|
|
24
30
|
|
|
25
31
|
@functools.wraps(to_wrap)
|
|
26
32
|
def wrapped(*args: Any) -> Any:
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
thread_local_args = (thread_id,) + arg_instances
|
|
30
|
-
if thread_local_args in to_wrap.__already_called: # type: ignore
|
|
33
|
+
thread_local_args = (threading.get_ident(), *map(id, args))
|
|
34
|
+
if thread_local_args in already_called:
|
|
31
35
|
raise Web3ValueError(f"Recursively called {to_wrap} with {args!r}")
|
|
32
|
-
|
|
36
|
+
add_call(thread_local_args)
|
|
33
37
|
try:
|
|
34
|
-
|
|
38
|
+
return to_wrap(*args)
|
|
35
39
|
finally:
|
|
36
|
-
|
|
37
|
-
return wrapped_val
|
|
40
|
+
remove_call(thread_local_args)
|
|
38
41
|
|
|
39
42
|
return wrapped
|
|
40
43
|
|
|
@@ -81,14 +81,14 @@ class HTTPSessionManager:
|
|
|
81
81
|
|
|
82
82
|
with self._lock:
|
|
83
83
|
cached_session, evicted_items = self.session_cache.cache(cache_key, session)
|
|
84
|
-
self.logger.debug(
|
|
84
|
+
self.logger.debug("Session cached: %s, %s", endpoint_uri, cached_session)
|
|
85
85
|
|
|
86
86
|
if evicted_items is not None:
|
|
87
87
|
evicted_sessions = evicted_items.values()
|
|
88
88
|
for evicted_session in evicted_sessions:
|
|
89
89
|
self.logger.debug(
|
|
90
|
-
"Session cache full. Session evicted from cache: "
|
|
91
|
-
|
|
90
|
+
"Session cache full. Session evicted from cache: %s",
|
|
91
|
+
evicted_session,
|
|
92
92
|
)
|
|
93
93
|
threading.Timer(
|
|
94
94
|
# If `request_timeout` is `None`, don't wait forever for the closing
|
|
@@ -167,7 +167,7 @@ class HTTPSessionManager:
|
|
|
167
167
|
def _close_evicted_sessions(self, evicted_sessions: List[requests.Session]) -> None:
|
|
168
168
|
for evicted_session in evicted_sessions:
|
|
169
169
|
evicted_session.close()
|
|
170
|
-
self.logger.debug(
|
|
170
|
+
self.logger.debug("Closed evicted session: %s", evicted_session)
|
|
171
171
|
|
|
172
172
|
# -- async -- #
|
|
173
173
|
|
|
@@ -178,7 +178,7 @@ class HTTPSessionManager:
|
|
|
178
178
|
request_timeout: Optional[ClientTimeout] = None,
|
|
179
179
|
) -> ClientSession:
|
|
180
180
|
# cache key should have a unique thread identifier
|
|
181
|
-
cache_key = generate_cache_key(f"{
|
|
181
|
+
cache_key = generate_cache_key(f"{id(asyncio.get_event_loop())}:{endpoint_uri}")
|
|
182
182
|
|
|
183
183
|
evicted_items = None
|
|
184
184
|
async with async_lock(self.session_pool, self._lock):
|
|
@@ -195,7 +195,7 @@ class HTTPSessionManager:
|
|
|
195
195
|
cache_key, session
|
|
196
196
|
)
|
|
197
197
|
self.logger.debug(
|
|
198
|
-
|
|
198
|
+
"Async session cached: %s, %s", endpoint_uri, cached_session
|
|
199
199
|
)
|
|
200
200
|
|
|
201
201
|
else:
|
|
@@ -215,8 +215,10 @@ class HTTPSessionManager:
|
|
|
215
215
|
)
|
|
216
216
|
if warning:
|
|
217
217
|
self.logger.debug(
|
|
218
|
-
|
|
219
|
-
|
|
218
|
+
"%s: %s, %s. Creating and caching a new async session for uri.",
|
|
219
|
+
warning,
|
|
220
|
+
endpoint_uri,
|
|
221
|
+
cached_session,
|
|
220
222
|
)
|
|
221
223
|
|
|
222
224
|
self.session_cache._data.pop(cache_key)
|
|
@@ -224,7 +226,8 @@ class HTTPSessionManager:
|
|
|
224
226
|
# if loop was closed but not the session, close the session
|
|
225
227
|
await cached_session.close()
|
|
226
228
|
self.logger.debug(
|
|
227
|
-
|
|
229
|
+
"Async session closed and evicted from cache: %s",
|
|
230
|
+
cached_session,
|
|
228
231
|
)
|
|
229
232
|
|
|
230
233
|
# replace stale session with a new session at the cache key
|
|
@@ -238,7 +241,7 @@ class HTTPSessionManager:
|
|
|
238
241
|
cache_key, _session
|
|
239
242
|
)
|
|
240
243
|
self.logger.debug(
|
|
241
|
-
|
|
244
|
+
"Async session cached: %s, %s", endpoint_uri, cached_session
|
|
242
245
|
)
|
|
243
246
|
|
|
244
247
|
if evicted_items is not None:
|
|
@@ -248,8 +251,8 @@ class HTTPSessionManager:
|
|
|
248
251
|
evicted_sessions = list(evicted_items.values())
|
|
249
252
|
for evicted_session in evicted_sessions:
|
|
250
253
|
self.logger.debug(
|
|
251
|
-
"Async session cache full. Session evicted from cache: "
|
|
252
|
-
|
|
254
|
+
"Async session cache full. Session evicted from cache: %s",
|
|
255
|
+
evicted_session,
|
|
253
256
|
)
|
|
254
257
|
# Kick off an asyncio `Task` to close the evicted sessions. In the case
|
|
255
258
|
# that the cache filled very quickly and some sessions have been evicted
|
|
@@ -323,10 +326,10 @@ class HTTPSessionManager:
|
|
|
323
326
|
|
|
324
327
|
for evicted_session in evicted_sessions:
|
|
325
328
|
await evicted_session.close()
|
|
326
|
-
self.logger.debug(
|
|
329
|
+
self.logger.debug("Closed evicted async session: %s", evicted_session)
|
|
327
330
|
|
|
328
331
|
if any(not evicted_session.closed for evicted_session in evicted_sessions):
|
|
329
332
|
self.logger.warning(
|
|
330
|
-
"Some evicted async sessions were not properly closed: "
|
|
331
|
-
|
|
333
|
+
"Some evicted async sessions were not properly closed: %s",
|
|
334
|
+
evicted_sessions,
|
|
332
335
|
)
|
web3/_utils/method_formatters.py
CHANGED
|
@@ -105,6 +105,7 @@ from web3.types import (
|
|
|
105
105
|
BlockIdentifier,
|
|
106
106
|
Formatters,
|
|
107
107
|
RPCEndpoint,
|
|
108
|
+
RPCResponse,
|
|
108
109
|
SimulateV1Payload,
|
|
109
110
|
StateOverrideParams,
|
|
110
111
|
TReturn,
|
|
@@ -650,19 +651,15 @@ simulate_v1_request_formatter: Callable[
|
|
|
650
651
|
)
|
|
651
652
|
|
|
652
653
|
block_result_formatters_copy = BLOCK_RESULT_FORMATTERS.copy()
|
|
653
|
-
block_result_formatters_copy
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
}
|
|
663
|
-
)
|
|
664
|
-
)
|
|
665
|
-
}
|
|
654
|
+
block_result_formatters_copy["calls"] = apply_list_to_array_formatter(
|
|
655
|
+
type_aware_apply_formatters_to_dict(
|
|
656
|
+
{
|
|
657
|
+
"returnData": HexBytes,
|
|
658
|
+
"logs": apply_list_to_array_formatter(log_entry_formatter),
|
|
659
|
+
"gasUsed": to_integer_if_hex,
|
|
660
|
+
"status": to_integer_if_hex,
|
|
661
|
+
}
|
|
662
|
+
)
|
|
666
663
|
)
|
|
667
664
|
simulate_v1_result_formatter = apply_formatter_if(
|
|
668
665
|
is_not_null,
|
|
@@ -1107,9 +1104,7 @@ def combine_formatters(
|
|
|
1107
1104
|
yield formatter_map[method_name]
|
|
1108
1105
|
|
|
1109
1106
|
|
|
1110
|
-
def get_request_formatters(
|
|
1111
|
-
method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]],
|
|
1112
|
-
) -> Dict[str, Callable[..., Any]]:
|
|
1107
|
+
def get_request_formatters(method_name: RPCEndpoint) -> Callable[[RPCResponse], Any]:
|
|
1113
1108
|
request_formatter_maps = (
|
|
1114
1109
|
ABI_REQUEST_FORMATTERS,
|
|
1115
1110
|
# METHOD_NORMALIZERS needs to be after ABI_REQUEST_FORMATTERS
|
|
@@ -1241,7 +1236,7 @@ FILTER_RESULT_FORMATTERS: Dict[RPCEndpoint, Callable[..., Any]] = {
|
|
|
1241
1236
|
|
|
1242
1237
|
@to_tuple
|
|
1243
1238
|
def apply_module_to_formatters(
|
|
1244
|
-
formatters:
|
|
1239
|
+
formatters: Iterable[Callable[..., TReturn]],
|
|
1245
1240
|
module: "Module",
|
|
1246
1241
|
method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]],
|
|
1247
1242
|
) -> Iterable[Callable[..., TReturn]]:
|
|
@@ -1250,9 +1245,9 @@ def apply_module_to_formatters(
|
|
|
1250
1245
|
|
|
1251
1246
|
|
|
1252
1247
|
def get_result_formatters(
|
|
1253
|
-
method_name:
|
|
1248
|
+
method_name: RPCEndpoint,
|
|
1254
1249
|
module: "Module",
|
|
1255
|
-
) ->
|
|
1250
|
+
) -> Callable[[RPCResponse], Any]:
|
|
1256
1251
|
formatters = combine_formatters((PYTHONIC_RESULT_FORMATTERS,), method_name)
|
|
1257
1252
|
formatters_requiring_module = combine_formatters(
|
|
1258
1253
|
(FILTER_RESULT_FORMATTERS,), method_name
|
|
@@ -1263,9 +1258,7 @@ def get_result_formatters(
|
|
|
1263
1258
|
return compose(*partial_formatters, *formatters)
|
|
1264
1259
|
|
|
1265
1260
|
|
|
1266
|
-
def get_error_formatters(
|
|
1267
|
-
method_name: Union[RPCEndpoint, Callable[..., RPCEndpoint]],
|
|
1268
|
-
) -> Callable[..., Any]:
|
|
1261
|
+
def get_error_formatters(method_name: RPCEndpoint) -> Callable[[RPCResponse], Any]:
|
|
1269
1262
|
# Note error formatters work on the full response dict
|
|
1270
1263
|
error_formatter_maps = (ERROR_FORMATTERS,)
|
|
1271
1264
|
formatters = combine_formatters(error_formatter_maps, method_name)
|
|
@@ -1274,8 +1267,8 @@ def get_error_formatters(
|
|
|
1274
1267
|
|
|
1275
1268
|
|
|
1276
1269
|
def get_null_result_formatters(
|
|
1277
|
-
method_name:
|
|
1278
|
-
) -> Callable[
|
|
1270
|
+
method_name: RPCEndpoint,
|
|
1271
|
+
) -> Callable[[RPCResponse], Any]:
|
|
1279
1272
|
formatters = combine_formatters((NULL_RESULT_FORMATTERS,), method_name)
|
|
1280
1273
|
|
|
1281
1274
|
return compose(*formatters)
|