web3 7.0.0b5__py3-none-any.whl → 7.0.0b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,13 +4,23 @@ from abc import (
4
4
  import asyncio
5
5
  import logging
6
6
  from typing import (
7
+ Any,
8
+ List,
7
9
  Optional,
10
+ Union,
11
+ )
12
+
13
+ from websockets import (
14
+ ConnectionClosed,
15
+ WebSocketException,
8
16
  )
9
17
 
10
18
  from web3._utils.caching import (
11
19
  generate_cache_key,
12
20
  )
13
21
  from web3.exceptions import (
22
+ ProviderConnectionError,
23
+ TaskNotRunning,
14
24
  TimeExhausted,
15
25
  )
16
26
  from web3.providers.async_base import (
@@ -35,19 +45,24 @@ class PersistentConnectionProvider(AsyncJSONBaseProvider, ABC):
35
45
  _message_listener_task: Optional["asyncio.Task[None]"] = None
36
46
  _listen_event: asyncio.Event = asyncio.Event()
37
47
 
48
+ _batch_request_counter: Optional[int] = None
49
+
38
50
  def __init__(
39
51
  self,
40
52
  request_timeout: float = DEFAULT_PERSISTENT_CONNECTION_TIMEOUT,
41
53
  subscription_response_queue_size: int = 500,
42
54
  silence_listener_task_exceptions: bool = False,
55
+ max_connection_retries: int = 5,
56
+ **kwargs: Any,
43
57
  ) -> None:
44
- super().__init__()
58
+ super().__init__(**kwargs)
45
59
  self._request_processor = RequestProcessor(
46
60
  self,
47
61
  subscription_response_queue_size=subscription_response_queue_size,
48
62
  )
49
63
  self.request_timeout = request_timeout
50
64
  self.silence_listener_task_exceptions = silence_listener_task_exceptions
65
+ self._max_connection_retries = max_connection_retries
51
66
 
52
67
  def get_endpoint_uri_or_ipc_path(self) -> str:
53
68
  if hasattr(self, "endpoint_uri"):
@@ -61,16 +76,124 @@ class PersistentConnectionProvider(AsyncJSONBaseProvider, ABC):
61
76
  )
62
77
 
63
78
  async def connect(self) -> None:
64
- raise NotImplementedError("Must be implemented by subclasses")
79
+ _connection_attempts = 0
80
+ _backoff_rate_change = 1.75
81
+ _backoff_time = 1.75
82
+
83
+ while _connection_attempts != self._max_connection_retries:
84
+ try:
85
+ _connection_attempts += 1
86
+ self.logger.info(
87
+ f"Connecting to: {self.get_endpoint_uri_or_ipc_path()}"
88
+ )
89
+ await self._provider_specific_connect()
90
+ self._message_listener_task = asyncio.create_task(
91
+ self._message_listener()
92
+ )
93
+ self._message_listener_task.add_done_callback(
94
+ self._message_listener_callback
95
+ )
96
+ self.logger.info(
97
+ f"Successfully connected to: {self.get_endpoint_uri_or_ipc_path()}"
98
+ )
99
+ break
100
+ except (WebSocketException, OSError) as e:
101
+ if _connection_attempts == self._max_connection_retries:
102
+ raise ProviderConnectionError(
103
+ f"Could not connect to: {self.get_endpoint_uri_or_ipc_path()}. "
104
+ f"Retries exceeded max of {self._max_connection_retries}."
105
+ ) from e
106
+ self.logger.info(
107
+ f"Could not connect to: {self.get_endpoint_uri_or_ipc_path()}. "
108
+ f"Retrying in {round(_backoff_time, 1)} seconds.",
109
+ exc_info=True,
110
+ )
111
+ await asyncio.sleep(_backoff_time)
112
+ _backoff_time *= _backoff_rate_change
65
113
 
66
114
  async def disconnect(self) -> None:
115
+ try:
116
+ if self._message_listener_task:
117
+ self._message_listener_task.cancel()
118
+ await self._message_listener_task
119
+ except (asyncio.CancelledError, StopAsyncIteration, ConnectionClosed):
120
+ pass
121
+ finally:
122
+ self._message_listener_task = None
123
+ self.logger.info("Message listener background task successfully shut down.")
124
+
125
+ await self._provider_specific_disconnect()
126
+ self._request_processor.clear_caches()
127
+ self.logger.info(
128
+ f"Successfully disconnected from: {self.get_endpoint_uri_or_ipc_path()}"
129
+ )
130
+
131
+ # -- private methods -- #
132
+
133
+ async def _provider_specific_connect(self) -> None:
67
134
  raise NotImplementedError("Must be implemented by subclasses")
68
135
 
69
- async def _message_listener(self) -> None:
136
+ async def _provider_specific_disconnect(self) -> None:
70
137
  raise NotImplementedError("Must be implemented by subclasses")
71
138
 
139
+ async def _provider_specific_message_listener(self) -> None:
140
+ raise NotImplementedError("Must be implemented by subclasses")
141
+
142
+ def _message_listener_callback(
143
+ self, message_listener_task: "asyncio.Task[None]"
144
+ ) -> None:
145
+ # Puts a `TaskNotRunning` in the queue to signal the end of the listener task
146
+ # to any running subscription streams that are awaiting a response.
147
+ self._request_processor._subscription_response_queue.put_nowait(
148
+ TaskNotRunning(message_listener_task)
149
+ )
150
+
151
+ async def _message_listener(self) -> None:
152
+ self.logger.info(
153
+ f"{self.__class__.__qualname__} listener background task started. Storing "
154
+ "all messages in appropriate request processor queues / caches to be "
155
+ "processed."
156
+ )
157
+ while True:
158
+ # the use of sleep(0) seems to be the most efficient way to yield control
159
+ # back to the event loop to share the loop with other tasks.
160
+ await asyncio.sleep(0)
161
+ try:
162
+ await self._provider_specific_message_listener()
163
+ except Exception as e:
164
+ if not self.silence_listener_task_exceptions:
165
+ raise e
166
+ else:
167
+ self._error_log_listener_task_exception(e)
168
+
169
+ def _error_log_listener_task_exception(self, e: Exception) -> None:
170
+ """
171
+ When silencing listener task exceptions, this method is used to log the
172
+ exception and keep the listener task alive. Override this method to fine-tune
173
+ error logging behavior for the implementation class.
174
+ """
175
+ self.logger.error(
176
+ "Exception caught in listener, error logging and keeping "
177
+ "listener background task alive."
178
+ f"\n error={e.__class__.__name__}: {e}"
179
+ )
180
+
181
+ def _handle_listener_task_exceptions(self) -> None:
182
+ """
183
+ Should be called every time a `PersistentConnectionProvider` is polling for
184
+ messages in the main loop. If the message listener task has completed and an
185
+ exception was recorded, raise the exception in the main loop.
186
+ """
187
+ msg_listener_task = getattr(self, "_message_listener_task", None)
188
+ if (
189
+ msg_listener_task
190
+ and msg_listener_task.done()
191
+ and msg_listener_task.exception()
192
+ ):
193
+ raise msg_listener_task.exception()
194
+
72
195
  async def _get_response_for_request_id(
73
- self, request_id: RPCId, timeout: Optional[float] = None
196
+ self, request_id: Union[RPCId, List[RPCId]], timeout: Optional[float] = None
74
197
  ) -> RPCResponse:
75
198
  if timeout is None:
76
199
  timeout = self.request_timeout
@@ -79,10 +202,9 @@ class PersistentConnectionProvider(AsyncJSONBaseProvider, ABC):
79
202
  request_cache_key = generate_cache_key(request_id)
80
203
 
81
204
  while True:
82
- # sleep(0) here seems to be the most efficient way to yield control
83
- # back to the event loop while waiting for the response to be in the
84
- # queue.
85
- await asyncio.sleep(0)
205
+ # check if an exception was recorded in the listener task and raise it
206
+ # in the main loop if so
207
+ self._handle_listener_task_exceptions()
86
208
 
87
209
  if request_cache_key in self._request_processor._request_response_cache:
88
210
  self.logger.debug(
@@ -92,11 +214,13 @@ class PersistentConnectionProvider(AsyncJSONBaseProvider, ABC):
92
214
  cache_key=request_cache_key,
93
215
  )
94
216
  return popped_response
217
+ else:
218
+ await asyncio.sleep(0)
95
219
 
96
220
  try:
97
221
  # Add the request timeout around the while loop that checks the request
98
- # cache and tried to recv(). If the request is neither in the cache, nor
99
- # received within the request_timeout, raise ``TimeExhausted``.
222
+ # cache. If the request is not in the cache within the request_timeout,
223
+ # raise ``TimeExhausted``.
100
224
  return await asyncio.wait_for(_match_response_id_to_request_id(), timeout)
101
225
  except asyncio.TimeoutError:
102
226
  raise TimeExhausted(
@@ -2,20 +2,32 @@ import asyncio
2
2
  from copy import (
3
3
  copy,
4
4
  )
5
+ import sys
5
6
  from typing import (
6
7
  TYPE_CHECKING,
7
8
  Any,
8
9
  Callable,
9
10
  Dict,
11
+ Generic,
10
12
  Optional,
11
13
  Tuple,
14
+ TypeVar,
15
+ Union,
12
16
  )
13
17
 
18
+ from eth_utils.toolz import (
19
+ compose,
20
+ )
21
+
22
+ from web3._utils.batching import (
23
+ BATCH_REQUEST_ID,
24
+ )
14
25
  from web3._utils.caching import (
15
26
  RequestInformation,
16
27
  generate_cache_key,
17
28
  )
18
29
  from web3.exceptions import (
30
+ TaskNotRunning,
19
31
  Web3ValueError,
20
32
  )
21
33
  from web3.types import (
@@ -31,6 +43,34 @@ if TYPE_CHECKING:
31
43
  PersistentConnectionProvider,
32
44
  )
33
45
 
46
+ T = TypeVar("T")
47
+
48
+ # TODO: This is an ugly hack for python 3.8. Remove this after we drop support for it
49
+ # and use `asyncio.Queue[T]` type directly in the `TaskReliantQueue` class.
50
+ if sys.version_info >= (3, 9):
51
+
52
+ class _TaskReliantQueue(asyncio.Queue[T], Generic[T]):
53
+ pass
54
+
55
+ else:
56
+
57
+ class _TaskReliantQueue(asyncio.Queue, Generic[T]): # type: ignore
58
+ pass
59
+
60
+
61
+ class TaskReliantQueue(_TaskReliantQueue[T]):
62
+ """
63
+ A queue that relies on a task to be running to process items in the queue.
64
+ """
65
+
66
+ async def get(self) -> T:
67
+ item = await super().get()
68
+ if isinstance(item, Exception):
69
+ # if the item is an exception, raise it so the task can handle this case
70
+ # more gracefully
71
+ raise item
72
+ return item
73
+
34
74
 
35
75
  class RequestProcessor:
36
76
  _subscription_queue_synced_with_ws_stream: bool = False
@@ -44,9 +84,9 @@ class RequestProcessor:
44
84
 
45
85
  self._request_information_cache: SimpleCache = SimpleCache(500)
46
86
  self._request_response_cache: SimpleCache = SimpleCache(500)
47
- self._subscription_response_queue: asyncio.Queue[RPCResponse] = asyncio.Queue(
48
- maxsize=subscription_response_queue_size
49
- )
87
+ self._subscription_response_queue: TaskReliantQueue[
88
+ Union[RPCResponse, TaskNotRunning]
89
+ ] = TaskReliantQueue(maxsize=subscription_response_queue_size)
50
90
 
51
91
  @property
52
92
  def active_subscriptions(self) -> Dict[str, Any]:
@@ -62,7 +102,11 @@ class RequestProcessor:
62
102
  self,
63
103
  method: RPCEndpoint,
64
104
  params: Any,
65
- response_formatters: Tuple[Callable[..., Any], ...],
105
+ response_formatters: Tuple[
106
+ Union[Dict[str, Callable[..., Any]], Callable[..., Any]],
107
+ Callable[..., Any],
108
+ Callable[..., Any],
109
+ ],
66
110
  ) -> Optional[str]:
67
111
  cached_requests_key = generate_cache_key((method, params))
68
112
  if cached_requests_key in self._provider._request_cache._data:
@@ -76,12 +120,17 @@ class RequestProcessor:
76
120
  )
77
121
  return None
78
122
 
79
- # copy the request counter and find the next request id without incrementing
80
- # since this is done when / if the request is successfully sent
81
- request_id = next(copy(self._provider.request_counter))
82
- cache_key = generate_cache_key(request_id)
123
+ if self._provider._is_batching:
124
+ # the _batch_request_counter is set when entering the context manager
125
+ current_request_id = self._provider._batch_request_counter
126
+ self._provider._batch_request_counter += 1
127
+ else:
128
+ # copy the request counter and find the next request id without incrementing
129
+ # since this is done when / if the request is successfully sent
130
+ current_request_id = next(copy(self._provider.request_counter))
131
+ cache_key = generate_cache_key(current_request_id)
83
132
 
84
- self._bump_cache_if_key_present(cache_key, request_id)
133
+ self._bump_cache_if_key_present(cache_key, current_request_id)
85
134
 
86
135
  request_info = RequestInformation(
87
136
  method,
@@ -89,7 +138,7 @@ class RequestProcessor:
89
138
  response_formatters,
90
139
  )
91
140
  self._provider.logger.debug(
92
- f"Caching request info:\n request_id={request_id},\n"
141
+ f"Caching request info:\n request_id={current_request_id},\n"
93
142
  f" cache_key={cache_key},\n request_info={request_info.__dict__}"
94
143
  )
95
144
  self._request_information_cache.cache(
@@ -153,9 +202,8 @@ class RequestProcessor:
153
202
  # i.e. subscription request information remains in the cache
154
203
  self._request_information_cache.get_cache_entry(cache_key)
155
204
  )
156
-
157
205
  else:
158
- # retrieve the request info from the cache using the request id
206
+ # retrieve the request info from the cache using the response id
159
207
  cache_key = generate_cache_key(response["id"])
160
208
  if response in self._provider._request_cache._data.values():
161
209
  request_info = (
@@ -184,6 +232,33 @@ class RequestProcessor:
184
232
 
185
233
  return request_info
186
234
 
235
+ def append_result_formatter_for_request(
236
+ self, request_id: int, result_formatter: Callable[..., Any]
237
+ ) -> None:
238
+ cache_key = generate_cache_key(request_id)
239
+ cached_request_info_for_id: RequestInformation = (
240
+ self._request_information_cache.get_cache_entry(cache_key)
241
+ )
242
+ if cached_request_info_for_id is not None:
243
+ (
244
+ current_result_formatters,
245
+ error_formatters,
246
+ null_result_formatters,
247
+ ) = cached_request_info_for_id.response_formatters
248
+ cached_request_info_for_id.response_formatters = (
249
+ compose(
250
+ result_formatter,
251
+ current_result_formatters,
252
+ ),
253
+ error_formatters,
254
+ null_result_formatters,
255
+ )
256
+ else:
257
+ self._provider.logger.debug(
258
+ f"No cached request info for response id `{request_id}`. Cannot "
259
+ f"append response formatter for response."
260
+ )
261
+
187
262
  def append_middleware_response_processor(
188
263
  self,
189
264
  response: RPCResponse,
@@ -218,7 +293,7 @@ class RequestProcessor:
218
293
  ) -> None:
219
294
  if subscription:
220
295
  if self._subscription_response_queue.full():
221
- self._provider.logger.info(
296
+ self._provider.logger.debug(
222
297
  "Subscription queue is full. Waiting for provider to consume "
223
298
  "messages before caching."
224
299
  )
@@ -229,6 +304,15 @@ class RequestProcessor:
229
304
  f"Caching subscription response:\n response={raw_response}"
230
305
  )
231
306
  await self._subscription_response_queue.put(raw_response)
307
+ elif isinstance(raw_response, list):
308
+ # Since only one batch should be in the cache at all times, we use a
309
+ # constant cache key for the batch response.
310
+ cache_key = generate_cache_key(BATCH_REQUEST_ID)
311
+ self._provider.logger.debug(
312
+ f"Caching batch response:\n cache_key={cache_key},\n"
313
+ f" response={raw_response}"
314
+ )
315
+ self._request_response_cache.cache(cache_key, raw_response)
232
316
  else:
233
317
  response_id = raw_response.get("id")
234
318
  cache_key = generate_cache_key(response_id)
@@ -289,6 +373,6 @@ class RequestProcessor:
289
373
  """Clear the request processor caches."""
290
374
  self._request_information_cache.clear()
291
375
  self._request_response_cache.clear()
292
- self._subscription_response_queue = asyncio.Queue(
376
+ self._subscription_response_queue = TaskReliantQueue(
293
377
  maxsize=self._subscription_response_queue.maxsize
294
378
  )
@@ -5,8 +5,11 @@ import os
5
5
  from typing import (
6
6
  Any,
7
7
  Dict,
8
+ List,
8
9
  Optional,
10
+ Tuple,
9
11
  Union,
12
+ cast,
10
13
  )
11
14
 
12
15
  from eth_typing import (
@@ -25,6 +28,10 @@ from websockets.exceptions import (
25
28
  WebSocketException,
26
29
  )
27
30
 
31
+ from web3._utils.batching import (
32
+ BATCH_REQUEST_ID,
33
+ sort_batch_response_by_response_ids,
34
+ )
28
35
  from web3._utils.caching import (
29
36
  async_handle_request_caching,
30
37
  )
@@ -61,7 +68,6 @@ class WebSocketProvider(PersistentConnectionProvider):
61
68
  logger = logging.getLogger("web3.providers.WebSocketProvider")
62
69
  is_async: bool = True
63
70
 
64
- _max_connection_retries: int = 5
65
71
  _ws: Optional[WebSocketClientProtocol] = None
66
72
 
67
73
  def __init__(
@@ -116,47 +122,13 @@ class WebSocketProvider(PersistentConnectionProvider):
116
122
  ) from e
117
123
  return False
118
124
 
119
- async def connect(self) -> None:
120
- _connection_attempts = 0
121
- _backoff_rate_change = 1.75
122
- _backoff_time = 1.75
123
-
124
- while _connection_attempts != self._max_connection_retries:
125
- try:
126
- _connection_attempts += 1
127
- self._ws = await connect(self.endpoint_uri, **self.websocket_kwargs)
128
- self._message_listener_task = asyncio.create_task(
129
- self._message_listener()
130
- )
131
- break
132
- except WebSocketException as e:
133
- if _connection_attempts == self._max_connection_retries:
134
- raise ProviderConnectionError(
135
- f"Could not connect to endpoint: {self.endpoint_uri}. "
136
- f"Retries exceeded max of {self._max_connection_retries}."
137
- ) from e
138
- self.logger.info(
139
- f"Could not connect to endpoint: {self.endpoint_uri}. Retrying in "
140
- f"{round(_backoff_time, 1)} seconds.",
141
- exc_info=True,
142
- )
143
- await asyncio.sleep(_backoff_time)
144
- _backoff_time *= _backoff_rate_change
125
+ async def _provider_specific_connect(self) -> None:
126
+ self._ws = await connect(self.endpoint_uri, **self.websocket_kwargs)
145
127
 
146
- async def disconnect(self) -> None:
128
+ async def _provider_specific_disconnect(self) -> None:
147
129
  if self._ws is not None and not self._ws.closed:
148
130
  await self._ws.close()
149
131
  self._ws = None
150
- self.logger.debug(
151
- f'Successfully disconnected from endpoint: "{self.endpoint_uri}'
152
- )
153
-
154
- try:
155
- self._message_listener_task.cancel()
156
- await self._message_listener_task
157
- except (asyncio.CancelledError, StopAsyncIteration):
158
- pass
159
- self._request_processor.clear_caches()
160
132
 
161
133
  @async_handle_request_caching
162
134
  async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
@@ -176,34 +148,39 @@ class WebSocketProvider(PersistentConnectionProvider):
176
148
 
177
149
  return response
178
150
 
179
- async def _message_listener(self) -> None:
180
- self.logger.info(
181
- "WebSocket listener background task started. Storing all messages in "
182
- "appropriate request processor queues / caches to be processed."
151
+ async def make_batch_request(
152
+ self, requests: List[Tuple[RPCEndpoint, Any]]
153
+ ) -> List[RPCResponse]:
154
+ request_data = self.encode_batch_rpc_request(requests)
155
+
156
+ if self._ws is None:
157
+ raise ProviderConnectionError(
158
+ "Connection to websocket has not been initiated for the provider."
159
+ )
160
+
161
+ await asyncio.wait_for(
162
+ self._ws.send(request_data), timeout=self.request_timeout
163
+ )
164
+
165
+ response = cast(
166
+ List[RPCResponse],
167
+ await self._get_response_for_request_id(BATCH_REQUEST_ID),
183
168
  )
184
- while True:
185
- # the use of sleep(0) seems to be the most efficient way to yield control
186
- # back to the event loop to share the loop with other tasks.
169
+ return response
170
+
171
+ async def _provider_specific_message_listener(self) -> None:
172
+ async for raw_message in self._ws:
187
173
  await asyncio.sleep(0)
188
174
 
189
- try:
190
- async for raw_message in self._ws:
191
- await asyncio.sleep(0)
192
-
193
- response = json.loads(raw_message)
194
- subscription = response.get("method") == "eth_subscription"
195
- await self._request_processor.cache_raw_response(
196
- response, subscription=subscription
197
- )
198
- except Exception as e:
199
- if not self.silence_listener_task_exceptions:
200
- loop = asyncio.get_event_loop()
201
- for task in asyncio.all_tasks(loop=loop):
202
- task.cancel()
203
- raise e
204
-
205
- self.logger.error(
206
- "Exception caught in listener, error logging and keeping "
207
- "listener background task alive."
208
- f"\n error={e.__class__.__name__}: {e}"
209
- )
175
+ response = json.loads(raw_message)
176
+ if isinstance(response, list):
177
+ response = sort_batch_response_by_response_ids(response)
178
+
179
+ subscription = (
180
+ response.get("method") == "eth_subscription"
181
+ if not isinstance(response, list)
182
+ else False
183
+ )
184
+ await self._request_processor.cache_raw_response(
185
+ response, subscription=subscription
186
+ )
@@ -4,9 +4,11 @@ from typing import (
4
4
  Any,
5
5
  Dict,
6
6
  Iterable,
7
+ List,
7
8
  Optional,
8
9
  Tuple,
9
10
  Union,
11
+ cast,
10
12
  )
11
13
 
12
14
  from aiohttp import (
@@ -37,6 +39,9 @@ from web3.types import (
37
39
  RPCResponse,
38
40
  )
39
41
 
42
+ from ..._utils.batching import (
43
+ sort_batch_response_by_response_ids,
44
+ )
40
45
  from ..._utils.caching import (
41
46
  async_handle_request_caching,
42
47
  )
@@ -61,6 +66,7 @@ class AsyncHTTPProvider(AsyncJSONBaseProvider):
61
66
  exception_retry_configuration: Union[
62
67
  ExceptionRetryConfiguration, Empty
63
68
  ] = empty,
69
+ **kwargs: Any,
64
70
  ) -> None:
65
71
  if endpoint_uri is None:
66
72
  self.endpoint_uri = get_default_http_endpoint()
@@ -70,7 +76,7 @@ class AsyncHTTPProvider(AsyncJSONBaseProvider):
70
76
  self._request_kwargs = request_kwargs or {}
71
77
  self._exception_retry_configuration = exception_retry_configuration
72
78
 
73
- super().__init__()
79
+ super().__init__(**kwargs)
74
80
 
75
81
  async def cache_async_session(self, session: ClientSession) -> ClientSession:
76
82
  return await _async_cache_and_return_session(self.endpoint_uri, session)
@@ -147,3 +153,15 @@ class AsyncHTTPProvider(AsyncJSONBaseProvider):
147
153
  f"Method: {method}, Response: {response}"
148
154
  )
149
155
  return response
156
+
157
+ async def make_batch_request(
158
+ self, batch_requests: List[Tuple[RPCEndpoint, Any]]
159
+ ) -> List[RPCResponse]:
160
+ self.logger.debug(f"Making batch request HTTP - uri: `{self.endpoint_uri}`")
161
+ request_data = self.encode_batch_rpc_request(batch_requests)
162
+ raw_response = await async_make_post_request(
163
+ self.endpoint_uri, request_data, **self.get_request_kwargs()
164
+ )
165
+ self.logger.debug("Received batch response HTTP.")
166
+ responses_list = cast(List[RPCResponse], self.decode_rpc_response(raw_response))
167
+ return sort_batch_response_by_response_ids(responses_list)
web3/providers/rpc/rpc.py CHANGED
@@ -5,9 +5,11 @@ from typing import (
5
5
  Any,
6
6
  Dict,
7
7
  Iterable,
8
+ List,
8
9
  Optional,
9
10
  Tuple,
10
11
  Union,
12
+ cast,
11
13
  )
12
14
 
13
15
  from eth_typing import (
@@ -35,6 +37,9 @@ from web3.types import (
35
37
  RPCResponse,
36
38
  )
37
39
 
40
+ from ..._utils.batching import (
41
+ sort_batch_response_by_response_ids,
42
+ )
38
43
  from ..._utils.caching import (
39
44
  handle_request_caching,
40
45
  )
@@ -65,6 +70,7 @@ class HTTPProvider(JSONBaseProvider):
65
70
  exception_retry_configuration: Union[
66
71
  ExceptionRetryConfiguration, Empty
67
72
  ] = empty,
73
+ **kwargs: Any,
68
74
  ) -> None:
69
75
  if endpoint_uri is None:
70
76
  self.endpoint_uri = get_default_http_endpoint()
@@ -77,7 +83,7 @@ class HTTPProvider(JSONBaseProvider):
77
83
  if session:
78
84
  cache_and_return_session(self.endpoint_uri, session)
79
85
 
80
- super().__init__()
86
+ super().__init__(**kwargs)
81
87
 
82
88
  def __str__(self) -> str:
83
89
  return f"RPC connection {self.endpoint_uri}"
@@ -155,3 +161,15 @@ class HTTPProvider(JSONBaseProvider):
155
161
  f"Method: {method}, Response: {response}"
156
162
  )
157
163
  return response
164
+
165
+ def make_batch_request(
166
+ self, batch_requests: List[Tuple[RPCEndpoint, Any]]
167
+ ) -> List[RPCResponse]:
168
+ self.logger.debug(f"Making batch request HTTP, uri: `{self.endpoint_uri}`")
169
+ request_data = self.encode_batch_rpc_request(batch_requests)
170
+ raw_response = make_post_request(
171
+ self.endpoint_uri, request_data, **self.get_request_kwargs()
172
+ )
173
+ self.logger.debug("Received batch response HTTP.")
174
+ responses_list = cast(List[RPCResponse], self.decode_rpc_response(raw_response))
175
+ return sort_batch_response_by_response_ids(responses_list)