hive-nectar 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hive_nectar-0.2.9.dist-info/METADATA +194 -0
- hive_nectar-0.2.9.dist-info/RECORD +87 -0
- hive_nectar-0.2.9.dist-info/WHEEL +4 -0
- hive_nectar-0.2.9.dist-info/entry_points.txt +2 -0
- hive_nectar-0.2.9.dist-info/licenses/LICENSE.txt +23 -0
- nectar/__init__.py +37 -0
- nectar/account.py +5076 -0
- nectar/amount.py +553 -0
- nectar/asciichart.py +303 -0
- nectar/asset.py +122 -0
- nectar/block.py +574 -0
- nectar/blockchain.py +1242 -0
- nectar/blockchaininstance.py +2590 -0
- nectar/blockchainobject.py +263 -0
- nectar/cli.py +5937 -0
- nectar/comment.py +1552 -0
- nectar/community.py +854 -0
- nectar/constants.py +95 -0
- nectar/discussions.py +1437 -0
- nectar/exceptions.py +152 -0
- nectar/haf.py +381 -0
- nectar/hive.py +630 -0
- nectar/imageuploader.py +114 -0
- nectar/instance.py +113 -0
- nectar/market.py +876 -0
- nectar/memo.py +542 -0
- nectar/message.py +379 -0
- nectar/nodelist.py +309 -0
- nectar/price.py +603 -0
- nectar/profile.py +74 -0
- nectar/py.typed +0 -0
- nectar/rc.py +333 -0
- nectar/snapshot.py +1024 -0
- nectar/storage.py +62 -0
- nectar/transactionbuilder.py +659 -0
- nectar/utils.py +630 -0
- nectar/version.py +3 -0
- nectar/vote.py +722 -0
- nectar/wallet.py +472 -0
- nectar/witness.py +728 -0
- nectarapi/__init__.py +12 -0
- nectarapi/exceptions.py +126 -0
- nectarapi/graphenerpc.py +596 -0
- nectarapi/node.py +194 -0
- nectarapi/noderpc.py +79 -0
- nectarapi/openapi.py +107 -0
- nectarapi/py.typed +0 -0
- nectarapi/rpcutils.py +98 -0
- nectarapi/version.py +3 -0
- nectarbase/__init__.py +15 -0
- nectarbase/ledgertransactions.py +106 -0
- nectarbase/memo.py +242 -0
- nectarbase/objects.py +521 -0
- nectarbase/objecttypes.py +21 -0
- nectarbase/operationids.py +102 -0
- nectarbase/operations.py +1357 -0
- nectarbase/py.typed +0 -0
- nectarbase/signedtransactions.py +89 -0
- nectarbase/transactions.py +11 -0
- nectarbase/version.py +3 -0
- nectargraphenebase/__init__.py +27 -0
- nectargraphenebase/account.py +1121 -0
- nectargraphenebase/aes.py +49 -0
- nectargraphenebase/base58.py +197 -0
- nectargraphenebase/bip32.py +575 -0
- nectargraphenebase/bip38.py +110 -0
- nectargraphenebase/chains.py +15 -0
- nectargraphenebase/dictionary.py +2 -0
- nectargraphenebase/ecdsasig.py +309 -0
- nectargraphenebase/objects.py +130 -0
- nectargraphenebase/objecttypes.py +8 -0
- nectargraphenebase/operationids.py +5 -0
- nectargraphenebase/operations.py +25 -0
- nectargraphenebase/prefix.py +13 -0
- nectargraphenebase/py.typed +0 -0
- nectargraphenebase/signedtransactions.py +221 -0
- nectargraphenebase/types.py +557 -0
- nectargraphenebase/unsignedtransactions.py +288 -0
- nectargraphenebase/version.py +3 -0
- nectarstorage/__init__.py +57 -0
- nectarstorage/base.py +317 -0
- nectarstorage/exceptions.py +15 -0
- nectarstorage/interfaces.py +244 -0
- nectarstorage/masterpassword.py +237 -0
- nectarstorage/py.typed +0 -0
- nectarstorage/ram.py +27 -0
- nectarstorage/sqlite.py +343 -0
nectarapi/graphenerpc.py
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import re
|
|
4
|
+
import threading
|
|
5
|
+
from typing import Any, Dict, List, Optional, Union
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from httpx import ConnectError as HttpxConnectError
|
|
9
|
+
from httpx import HTTPStatusError, RequestError, TimeoutException
|
|
10
|
+
|
|
11
|
+
from nectargraphenebase.chains import known_chains
|
|
12
|
+
from nectargraphenebase.version import version as nectar_version
|
|
13
|
+
|
|
14
|
+
from .exceptions import (
|
|
15
|
+
CallRetriesReached,
|
|
16
|
+
InvalidParameters,
|
|
17
|
+
NoApiWithName,
|
|
18
|
+
NoMethodWithName,
|
|
19
|
+
RPCConnection,
|
|
20
|
+
RPCError,
|
|
21
|
+
RPCErrorDoRetry,
|
|
22
|
+
UnauthorizedError,
|
|
23
|
+
WorkingNodeMissing,
|
|
24
|
+
)
|
|
25
|
+
from .node import Nodes
|
|
26
|
+
from .openapi import get_default_api_for_method
|
|
27
|
+
from .rpcutils import get_query
|
|
28
|
+
|
|
29
|
+
log = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
import atexit
|
|
33
|
+
|
|
34
|
+
_shared_httpx_client: httpx.Client | None = None
|
|
35
|
+
_proxy_clients: Dict[str, httpx.Client] = {}
|
|
36
|
+
_client_lock = threading.Lock()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _cleanup_shared_client() -> None:
|
|
40
|
+
global _shared_httpx_client
|
|
41
|
+
if _shared_httpx_client is not None:
|
|
42
|
+
_shared_httpx_client.close()
|
|
43
|
+
_shared_httpx_client = None
|
|
44
|
+
for client in _proxy_clients.values():
|
|
45
|
+
client.close()
|
|
46
|
+
_proxy_clients.clear()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
atexit.register(_cleanup_shared_client)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def shared_httpx_client(proxy: Optional[str] = None) -> httpx.Client:
|
|
53
|
+
"""
|
|
54
|
+
Return a process-wide httpx client with connection pooling.
|
|
55
|
+
|
|
56
|
+
The client is constructed lazily and reused for all RPC calls to avoid
|
|
57
|
+
repeatedly creating TCP/TLS handshakes.
|
|
58
|
+
"""
|
|
59
|
+
global _shared_httpx_client
|
|
60
|
+
if proxy:
|
|
61
|
+
with _client_lock:
|
|
62
|
+
if proxy not in _proxy_clients:
|
|
63
|
+
_proxy_clients[proxy] = httpx.Client(http2=False, proxy=proxy)
|
|
64
|
+
return _proxy_clients[proxy]
|
|
65
|
+
|
|
66
|
+
with _client_lock:
|
|
67
|
+
if _shared_httpx_client is None:
|
|
68
|
+
_shared_httpx_client = httpx.Client(http2=False)
|
|
69
|
+
return _shared_httpx_client
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class GrapheneRPC:
|
|
73
|
+
"""
|
|
74
|
+
This class allows calling API methods synchronously, without callbacks.
|
|
75
|
+
|
|
76
|
+
It logs warnings and errors.
|
|
77
|
+
|
|
78
|
+
:param str urls: Either a single HTTP URL, or a list of HTTP URLs
|
|
79
|
+
:param str user: Username for Authentication
|
|
80
|
+
:param str password: Password for Authentication
|
|
81
|
+
:param int num_retries: Number of retries for node connection (default is 100)
|
|
82
|
+
:param int num_retries_call: Number of retries for RPC calls on node error (default is 5)
|
|
83
|
+
:param int timeout: Timeout setting for HTTP nodes (default is 60)
|
|
84
|
+
:param bool autoconnect: Automatically connect on initialization (default is True)
|
|
85
|
+
:param bool use_tor: Use Tor proxy for connections
|
|
86
|
+
:param dict custom_chains: Custom chains to add to known chains
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
def __init__(
|
|
90
|
+
self,
|
|
91
|
+
urls: Union[str, List[str]],
|
|
92
|
+
user: Optional[str] = None,
|
|
93
|
+
password: Optional[str] = None,
|
|
94
|
+
**kwargs: Any,
|
|
95
|
+
) -> None:
|
|
96
|
+
"""
|
|
97
|
+
Create a synchronous HTTP RPC client for Graphene-based nodes.
|
|
98
|
+
|
|
99
|
+
Initializes RPC mode, retry/timeouts, node management, optional credentials, and feature flags. Supported keyword arguments (with defaults) control behavior:
|
|
100
|
+
- timeout (int): request timeout in seconds (default 60).
|
|
101
|
+
- num_retries (int): number of node-retry attempts for node selection (default 100).
|
|
102
|
+
- num_retries_call (int): per-call retry attempts before switching nodes (default 5).
|
|
103
|
+
- use_tor (bool): enable Tor proxies for the shared HTTP session (default False).
|
|
104
|
+
- custom_chains (dict): mapping of additional known chain configurations to merge into the client's known_chains.
|
|
105
|
+
- autoconnect (bool): if True (default), attempts to connect to a working node immediately via rpcconnect().
|
|
106
|
+
|
|
107
|
+
Credentials:
|
|
108
|
+
- user, password: optional basic-auth credentials applied to HTTP requests.
|
|
109
|
+
|
|
110
|
+
Side effects:
|
|
111
|
+
- Builds a Nodes instance for node tracking and may call rpcconnect() when autoconnect is True.
|
|
112
|
+
"""
|
|
113
|
+
self._request_id = 0
|
|
114
|
+
self.timeout = kwargs.get("timeout", 60)
|
|
115
|
+
num_retries = kwargs.get("num_retries", 100)
|
|
116
|
+
num_retries_call = kwargs.get("num_retries_call", 5)
|
|
117
|
+
self.use_tor = kwargs.get("use_tor", False)
|
|
118
|
+
self.known_chains = known_chains
|
|
119
|
+
custom_chain = kwargs.get("custom_chains", {})
|
|
120
|
+
if len(custom_chain) > 0:
|
|
121
|
+
for c in custom_chain:
|
|
122
|
+
if c not in self.known_chains:
|
|
123
|
+
self.known_chains[c] = custom_chain[c]
|
|
124
|
+
|
|
125
|
+
self.nodes = Nodes(urls, num_retries, num_retries_call)
|
|
126
|
+
if self.nodes.working_nodes_count == 0:
|
|
127
|
+
log.warning("No working nodes available at initialization")
|
|
128
|
+
|
|
129
|
+
self.user = user
|
|
130
|
+
self.password = password
|
|
131
|
+
self.url = None
|
|
132
|
+
self.session: Optional[httpx.Client] = None
|
|
133
|
+
self.rpc_queue = []
|
|
134
|
+
if kwargs.get("autoconnect", True):
|
|
135
|
+
self.rpcconnect()
|
|
136
|
+
|
|
137
|
+
def _handle_transport_error(self, exc: Exception, *, call_retry: bool = False) -> None:
|
|
138
|
+
"""
|
|
139
|
+
Centralized transport error handling: increment counters, defer to node retry policy,
|
|
140
|
+
and reconnect to the next node.
|
|
141
|
+
"""
|
|
142
|
+
self.nodes.increase_error_cnt()
|
|
143
|
+
self.nodes.sleep_and_check_retries(str(exc), sleep=False, call_retry=call_retry)
|
|
144
|
+
self.rpcconnect()
|
|
145
|
+
|
|
146
|
+
@property
|
|
147
|
+
def num_retries(self) -> int:
|
|
148
|
+
return self.nodes.num_retries
|
|
149
|
+
|
|
150
|
+
@property
|
|
151
|
+
def num_retries_call(self) -> int:
|
|
152
|
+
return self.nodes.num_retries_call
|
|
153
|
+
|
|
154
|
+
@property
|
|
155
|
+
def error_cnt_call(self) -> int:
|
|
156
|
+
return self.nodes.error_cnt_call
|
|
157
|
+
|
|
158
|
+
@property
|
|
159
|
+
def error_cnt(self) -> int:
|
|
160
|
+
return self.nodes.error_cnt
|
|
161
|
+
|
|
162
|
+
def get_request_id(self) -> int:
|
|
163
|
+
"""Get request id."""
|
|
164
|
+
self._request_id += 1
|
|
165
|
+
return self._request_id
|
|
166
|
+
|
|
167
|
+
def next(self) -> None:
|
|
168
|
+
"""
|
|
169
|
+
Advance to the next available RPC node and attempt to (re)connect.
|
|
170
|
+
"""
|
|
171
|
+
self.rpcconnect()
|
|
172
|
+
|
|
173
|
+
def rpcconnect(self, next_url: bool = True) -> None:
|
|
174
|
+
"""
|
|
175
|
+
Selects and establishes connection to an available RPC node.
|
|
176
|
+
|
|
177
|
+
Attempts to connect to the next available node (or reuse the current one) and initializes per-instance HTTP session state needed for subsequent RPC calls. On a successful connection this method sets: self.url, self.session (shared session reused), self._proxies (Tor proxies when configured), and self.headers.
|
|
178
|
+
|
|
179
|
+
Parameters:
|
|
180
|
+
next_url (bool): If True, advance to the next node before attempting connection; if False, retry the current node.
|
|
181
|
+
|
|
182
|
+
Raises:
|
|
183
|
+
RPCError: When a get_config probe returns no properties (connection reached but no config received).
|
|
184
|
+
KeyboardInterrupt: Propagated if the operation is interrupted by the user.
|
|
185
|
+
"""
|
|
186
|
+
if self.nodes.working_nodes_count == 0:
|
|
187
|
+
return
|
|
188
|
+
while True:
|
|
189
|
+
if next_url:
|
|
190
|
+
self.url = next(self.nodes)
|
|
191
|
+
self.nodes.reset_error_cnt_call()
|
|
192
|
+
log.debug("Trying to connect to node %s" % self.url)
|
|
193
|
+
self.ws = None
|
|
194
|
+
self._proxy: Optional[str] = None
|
|
195
|
+
if self.use_tor:
|
|
196
|
+
self._proxy = "socks5h://localhost:9050"
|
|
197
|
+
# Use a shared client unless proxies are required.
|
|
198
|
+
self.session = shared_httpx_client(self._proxy)
|
|
199
|
+
self.ws = None
|
|
200
|
+
self.headers = {
|
|
201
|
+
"User-Agent": "nectar v%s" % (nectar_version),
|
|
202
|
+
"content-type": "application/json; charset=utf-8",
|
|
203
|
+
}
|
|
204
|
+
break
|
|
205
|
+
|
|
206
|
+
def request_send(self, payload: bytes) -> httpx.Response:
|
|
207
|
+
"""
|
|
208
|
+
Send the prepared RPC payload to the currently connected node via HTTP POST.
|
|
209
|
+
|
|
210
|
+
Sends `payload` to the client's active URL using the shared HTTP session. If username and password were provided to the client, HTTP basic auth is applied. Raises UnauthorizedError when the node responds with HTTP 401.
|
|
211
|
+
|
|
212
|
+
Parameters:
|
|
213
|
+
payload (str | bytes): The JSON-RPC payload (string or bytes) to send in the POST body.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
httpx.Response: The raw HTTP response object from the node.
|
|
217
|
+
|
|
218
|
+
Raises:
|
|
219
|
+
UnauthorizedError: If the HTTP response status code is 401 (Unauthorized).
|
|
220
|
+
"""
|
|
221
|
+
if self.session is None:
|
|
222
|
+
raise RPCConnection("Session must be initialized")
|
|
223
|
+
if self.url is None:
|
|
224
|
+
raise RPCConnection("URL must be initialized")
|
|
225
|
+
auth: httpx.Auth | None = None
|
|
226
|
+
if self.user is not None and self.password is not None:
|
|
227
|
+
auth = httpx.BasicAuth(self.user, self.password)
|
|
228
|
+
post_kwargs: Dict[str, Any] = {
|
|
229
|
+
"content": payload,
|
|
230
|
+
"headers": self.headers,
|
|
231
|
+
"timeout": self.timeout,
|
|
232
|
+
}
|
|
233
|
+
if auth is not None:
|
|
234
|
+
post_kwargs["auth"] = auth
|
|
235
|
+
response = self.session.post(self.url, **post_kwargs)
|
|
236
|
+
if response.status_code == 401:
|
|
237
|
+
raise UnauthorizedError
|
|
238
|
+
response.raise_for_status()
|
|
239
|
+
return response
|
|
240
|
+
|
|
241
|
+
def version_string_to_int(self, network_version: str) -> int:
|
|
242
|
+
"""
|
|
243
|
+
Convert a dotted version string "MAJOR.MINOR.PATCH" into a single integer for easy comparison.
|
|
244
|
+
|
|
245
|
+
The integer is computed as: major * 10^8 + minor * 10^4 + patch. For example, "2.3.15" -> 200030015.
|
|
246
|
+
|
|
247
|
+
Parameters:
|
|
248
|
+
network_version (str): Version string in the form "major.minor.patch".
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
int: Integer representation suitable for numeric comparisons.
|
|
252
|
+
|
|
253
|
+
Raises:
|
|
254
|
+
ValueError: If any version component is not an integer.
|
|
255
|
+
IndexError: If the version string does not contain three components.
|
|
256
|
+
"""
|
|
257
|
+
version_list = network_version.split(".")
|
|
258
|
+
return int(int(version_list[0]) * 1e8 + int(version_list[1]) * 1e4 + int(version_list[2]))
|
|
259
|
+
|
|
260
|
+
def get_network(self, props: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
261
|
+
"""
|
|
262
|
+
Detects and returns the network/chain configuration for the connected node.
|
|
263
|
+
|
|
264
|
+
If props is not provided, this call fetches node configuration via get_config(api="database_api") and inspects property keys to determine the chain identifier, address prefix, network/version, and core asset definitions. It builds a chain configuration dict with keys:
|
|
265
|
+
- chain_id: canonical chain identifier string
|
|
266
|
+
- prefix: account/address prefix for the network
|
|
267
|
+
- min_version: reported chain version string
|
|
268
|
+
- chain_assets: list of asset dicts (each with keys "asset" (NAI), "precision", "symbol", and "id")
|
|
269
|
+
|
|
270
|
+
If the detected chain matches an entry in self.known_chains (preferring the highest compatible known min_version), that known_chains entry is returned instead of the freshly built config.
|
|
271
|
+
|
|
272
|
+
Special behaviors:
|
|
273
|
+
- When props is None, get_config(api="database_api") is called.
|
|
274
|
+
- If detection finds conflicting blockchain prefixes, the most frequent prefix is used.
|
|
275
|
+
- A legacy fallback removes STEEM_CHAIN_ID from props if no blockchain name is inferred, logging a warning to prefer HIVE.
|
|
276
|
+
- Test-network asset NAIs are mapped to "TBD" or "TESTS" symbols when appropriate.
|
|
277
|
+
- Asset entries are assigned stable incremental ids based on sorted NAI order.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
dict: A chain configuration (either a matching entry from self.known_chains or a freshly constructed chain_config) with keys described above.
|
|
281
|
+
|
|
282
|
+
Raises:
|
|
283
|
+
RPCError: If chain_id cannot be determined or no compatible known chain is found.
|
|
284
|
+
"""
|
|
285
|
+
if props is None:
|
|
286
|
+
props = self.get_config(api="database_api")
|
|
287
|
+
chain_id = None
|
|
288
|
+
network_version = None
|
|
289
|
+
blockchain_name = None
|
|
290
|
+
chain_config = None
|
|
291
|
+
prefix = None
|
|
292
|
+
symbols = []
|
|
293
|
+
chain_assets = []
|
|
294
|
+
|
|
295
|
+
prefix_count = {}
|
|
296
|
+
for key in props:
|
|
297
|
+
if key.split("_")[0] in prefix_count:
|
|
298
|
+
prefix_count[key.split("_")[0]] += 1
|
|
299
|
+
else:
|
|
300
|
+
prefix_count[key.split("_")[0]] = 1
|
|
301
|
+
if len(prefix_count) > 0:
|
|
302
|
+
sorted_prefix_count = sorted(prefix_count.items(), key=lambda x: x[1], reverse=True)
|
|
303
|
+
if sorted_prefix_count[0][1] > 1:
|
|
304
|
+
blockchain_name = sorted_prefix_count[0][0]
|
|
305
|
+
|
|
306
|
+
# Check for configurable chain preference
|
|
307
|
+
|
|
308
|
+
for key in props:
|
|
309
|
+
if key[-8:] == "CHAIN_ID" and blockchain_name is None:
|
|
310
|
+
chain_id = props[key]
|
|
311
|
+
blockchain_name = key.split("_")[0]
|
|
312
|
+
elif key[-8:] == "CHAIN_ID" and key.split("_")[0] == blockchain_name:
|
|
313
|
+
chain_id = props[key]
|
|
314
|
+
elif key[-13:] == "CHAIN_VERSION" and blockchain_name is None:
|
|
315
|
+
network_version = props[key]
|
|
316
|
+
elif key[-13:] == "CHAIN_VERSION" and key.split("_")[0] == blockchain_name:
|
|
317
|
+
network_version = props[key]
|
|
318
|
+
elif key[-14:] == "ADDRESS_PREFIX" and blockchain_name is None:
|
|
319
|
+
prefix = props[key]
|
|
320
|
+
elif key[-14:] == "ADDRESS_PREFIX" and key.split("_")[0] == blockchain_name:
|
|
321
|
+
prefix = props[key]
|
|
322
|
+
elif key[-6:] == "SYMBOL":
|
|
323
|
+
value = {}
|
|
324
|
+
value["asset"] = props[key]["nai"]
|
|
325
|
+
value["precision"] = props[key]["decimals"]
|
|
326
|
+
if (
|
|
327
|
+
"IS_TEST_NET" in props
|
|
328
|
+
and props["IS_TEST_NET"]
|
|
329
|
+
and "nai" in props[key]
|
|
330
|
+
and props[key]["nai"] == "@@000000013"
|
|
331
|
+
):
|
|
332
|
+
value["symbol"] = "TBD"
|
|
333
|
+
elif (
|
|
334
|
+
"IS_TEST_NET" in props
|
|
335
|
+
and props["IS_TEST_NET"]
|
|
336
|
+
and "nai" in props[key]
|
|
337
|
+
and props[key]["nai"] == "@@000000021"
|
|
338
|
+
):
|
|
339
|
+
value["symbol"] = "TESTS"
|
|
340
|
+
else:
|
|
341
|
+
value["symbol"] = key[:-7]
|
|
342
|
+
value["id"] = -1
|
|
343
|
+
symbols.append(value)
|
|
344
|
+
symbol_id = 0
|
|
345
|
+
if len(symbols) == 2:
|
|
346
|
+
symbol_id = 1
|
|
347
|
+
for s in sorted(symbols, key=lambda self: self["asset"], reverse=False):
|
|
348
|
+
s["id"] = symbol_id
|
|
349
|
+
symbol_id += 1
|
|
350
|
+
chain_assets.append(s)
|
|
351
|
+
if (
|
|
352
|
+
chain_id is not None
|
|
353
|
+
and network_version is not None
|
|
354
|
+
and len(chain_assets) > 0
|
|
355
|
+
and prefix is not None
|
|
356
|
+
):
|
|
357
|
+
chain_config = {
|
|
358
|
+
"prefix": prefix,
|
|
359
|
+
"chain_id": chain_id,
|
|
360
|
+
"min_version": network_version,
|
|
361
|
+
"chain_assets": chain_assets,
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
if chain_id is None:
|
|
365
|
+
raise RPCError("Connecting to unknown network!")
|
|
366
|
+
highest_version_chain = None
|
|
367
|
+
for k, v in list(self.known_chains.items()):
|
|
368
|
+
if (
|
|
369
|
+
blockchain_name is not None
|
|
370
|
+
and blockchain_name not in k
|
|
371
|
+
and blockchain_name != "CHAIN"
|
|
372
|
+
):
|
|
373
|
+
continue
|
|
374
|
+
if v["chain_id"] == chain_id and self.version_string_to_int(
|
|
375
|
+
v["min_version"]
|
|
376
|
+
) <= self.version_string_to_int(str(network_version)):
|
|
377
|
+
if highest_version_chain is None:
|
|
378
|
+
highest_version_chain = v
|
|
379
|
+
elif self.version_string_to_int(v["min_version"]) > self.version_string_to_int(
|
|
380
|
+
highest_version_chain["min_version"]
|
|
381
|
+
):
|
|
382
|
+
highest_version_chain = v
|
|
383
|
+
if highest_version_chain is None and chain_config is not None:
|
|
384
|
+
return chain_config
|
|
385
|
+
elif highest_version_chain is None:
|
|
386
|
+
raise RPCError("Connecting to unknown network!")
|
|
387
|
+
else:
|
|
388
|
+
return highest_version_chain
|
|
389
|
+
|
|
390
|
+
def _check_for_server_error(self, reply: Dict[str, Any]) -> None:
|
|
391
|
+
"""Checks for server error message in reply"""
|
|
392
|
+
reply_str = str(reply)
|
|
393
|
+
if re.search("Internal Server Error", reply_str) or re.search(r"\b500\b", reply_str):
|
|
394
|
+
raise RPCErrorDoRetry("Internal Server Error")
|
|
395
|
+
elif re.search("Not Implemented", reply_str) or re.search("501", reply_str):
|
|
396
|
+
raise RPCError("Not Implemented")
|
|
397
|
+
elif re.search("Bad Gateway", reply_str) or re.search("502", reply_str):
|
|
398
|
+
raise RPCErrorDoRetry("Bad Gateway")
|
|
399
|
+
elif re.search("Too Many Requests", reply_str) or re.search(r"\b429\b", reply_str):
|
|
400
|
+
raise RPCErrorDoRetry("Too Many Requests")
|
|
401
|
+
elif re.search("Service Unavailable", reply_str) or re.search(r"\b503\b", reply_str):
|
|
402
|
+
raise RPCErrorDoRetry("Service Unavailable")
|
|
403
|
+
elif re.search("Gateway Timeout", reply_str) or re.search(r"\b504\b", reply_str):
|
|
404
|
+
raise RPCErrorDoRetry("Gateway Timeout")
|
|
405
|
+
elif re.search("HTTP Version not supported", reply_str) or re.search(
|
|
406
|
+
r"\b505\b", reply_str
|
|
407
|
+
):
|
|
408
|
+
raise RPCError("HTTP Version not supported")
|
|
409
|
+
elif re.search("Proxy Authentication Required", reply_str) or re.search(
|
|
410
|
+
r"\b407\b", reply_str
|
|
411
|
+
):
|
|
412
|
+
raise RPCErrorDoRetry("Proxy Authentication Required")
|
|
413
|
+
elif re.search("Request Timeout", reply_str) or re.search(r"\b408\b", reply_str):
|
|
414
|
+
raise RPCErrorDoRetry("Request Timeout")
|
|
415
|
+
elif re.search("Conflict", reply_str) or re.search(r"\b409\b", reply_str):
|
|
416
|
+
raise RPCErrorDoRetry("Conflict")
|
|
417
|
+
elif re.search("Gone", reply_str) or re.search(r"\b410\b", reply_str):
|
|
418
|
+
raise RPCErrorDoRetry("Gone")
|
|
419
|
+
elif re.search("Length Required", reply_str) or re.search(r"\b411\b", reply_str):
|
|
420
|
+
raise RPCErrorDoRetry("Length Required")
|
|
421
|
+
elif re.search("Precondition Failed", reply_str) or re.search(r"\b412\b", reply_str):
|
|
422
|
+
raise RPCErrorDoRetry("Precondition Failed")
|
|
423
|
+
elif re.search("Request Entity Too Large", reply_str) or re.search(r"\b413\b", reply_str):
|
|
424
|
+
raise RPCErrorDoRetry("Request Entity Too Large")
|
|
425
|
+
elif re.search("Request-URI Too Long", reply_str) or re.search(r"\b414\b", reply_str):
|
|
426
|
+
raise RPCErrorDoRetry("Request-URI Too Long")
|
|
427
|
+
elif re.search("Unsupported Media Type", reply_str) or re.search(r"\b415\b", reply_str):
|
|
428
|
+
raise RPCErrorDoRetry("Unsupported Media Type")
|
|
429
|
+
elif re.search("Requested Range Not Satisfiable", reply_str) or re.search(
|
|
430
|
+
r"\b416\b", reply_str
|
|
431
|
+
):
|
|
432
|
+
raise RPCErrorDoRetry("Requested Range Not Satisfiable")
|
|
433
|
+
elif re.search("Expectation Failed", reply_str) or re.search(r"\b417\b", reply_str):
|
|
434
|
+
raise RPCErrorDoRetry("Expectation Failed")
|
|
435
|
+
elif re.search("Unprocessable Entity", reply_str) or re.search(r"\b422\b", reply_str):
|
|
436
|
+
raise RPCErrorDoRetry("Unprocessable Entity")
|
|
437
|
+
elif re.search("Locked", reply_str) or re.search(r"\b423\b", reply_str):
|
|
438
|
+
raise RPCErrorDoRetry("Locked")
|
|
439
|
+
elif re.search("Failed Dependency", reply_str) or re.search(r"\b424\b", reply_str):
|
|
440
|
+
raise RPCErrorDoRetry("Failed Dependency")
|
|
441
|
+
elif re.search("Upgrade Required", reply_str) or re.search(r"\b426\b", reply_str):
|
|
442
|
+
raise RPCErrorDoRetry("Upgrade Required")
|
|
443
|
+
elif re.search("Precondition Required", reply_str) or re.search(r"\b428\b", reply_str):
|
|
444
|
+
raise RPCErrorDoRetry("Precondition Required")
|
|
445
|
+
elif re.search("Request Header Fields Too Large", reply_str) or re.search(
|
|
446
|
+
r"\b431\b", reply_str
|
|
447
|
+
):
|
|
448
|
+
raise RPCErrorDoRetry("Request Header Fields Too Large")
|
|
449
|
+
elif re.search("Loop Detected", reply_str) or re.search(r"\b508\b", reply_str):
|
|
450
|
+
raise RPCError("Loop Detected")
|
|
451
|
+
elif re.search("Bandwidth Limit Exceeded", reply_str) or re.search(r"\b509\b", reply_str):
|
|
452
|
+
raise RPCError("Bandwidth Limit Exceeded")
|
|
453
|
+
elif re.search("Not Extended", reply_str) or re.search(r"\b510\b", reply_str):
|
|
454
|
+
raise RPCError("Not Extended")
|
|
455
|
+
elif re.search("Network Authentication Required", reply_str) or re.search(
|
|
456
|
+
r"\b511\b", reply_str
|
|
457
|
+
):
|
|
458
|
+
raise RPCError("Network Authentication Required")
|
|
459
|
+
else:
|
|
460
|
+
raise RPCError("Client returned invalid format. Expected JSON!")
|
|
461
|
+
|
|
462
|
+
def rpcexec(self, payload: Union[Dict[str, Any], List[Dict[str, Any]]]) -> Any:
|
|
463
|
+
"""
|
|
464
|
+
Execute the given JSON-RPC payload against the currently selected node and return the RPC result.
|
|
465
|
+
|
|
466
|
+
Sends an HTTP POST with `payload` to the connected node, handling empty responses, retries, node rotation, and JSON parsing. On success returns either the `result` field for single-response RPC calls or a list of results when the server returns a JSON-RPC batch/array. Resets per-call error counters on successful responses.
|
|
467
|
+
|
|
468
|
+
Parameters:
|
|
469
|
+
payload (dict or list): JSON-serializable RPC request object or a list of request objects (batch).
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
The RPC `result` (any) for a single request, or a list of results for a batch response.
|
|
473
|
+
|
|
474
|
+
Raises:
|
|
475
|
+
WorkingNodeMissing: if no working nodes are available.
|
|
476
|
+
RPCConnection: if the client is not connected to any node.
|
|
477
|
+
RPCError: for server-reported errors or unexpected / non-JSON responses that indicate an RPC failure.
|
|
478
|
+
KeyboardInterrupt: if execution is interrupted by the user.
|
|
479
|
+
"""
|
|
480
|
+
log.debug(f"Payload: {json.dumps(payload)}")
|
|
481
|
+
if self.nodes.working_nodes_count == 0:
|
|
482
|
+
raise WorkingNodeMissing("No working nodes available.")
|
|
483
|
+
if self.url is None:
|
|
484
|
+
raise RPCConnection("RPC is not connected!")
|
|
485
|
+
|
|
486
|
+
reply = {}
|
|
487
|
+
response: httpx.Response | None = None
|
|
488
|
+
while True:
|
|
489
|
+
self.nodes.increase_error_cnt_call()
|
|
490
|
+
try:
|
|
491
|
+
response = self.request_send(json.dumps(payload, ensure_ascii=False).encode("utf8"))
|
|
492
|
+
reply = response.text
|
|
493
|
+
if not bool(reply):
|
|
494
|
+
try:
|
|
495
|
+
self.nodes.sleep_and_check_retries("Empty Reply", call_retry=True)
|
|
496
|
+
except CallRetriesReached:
|
|
497
|
+
self.nodes.increase_error_cnt()
|
|
498
|
+
self.nodes.sleep_and_check_retries(
|
|
499
|
+
"Empty Reply", sleep=False, call_retry=False
|
|
500
|
+
)
|
|
501
|
+
self.rpcconnect()
|
|
502
|
+
else:
|
|
503
|
+
break
|
|
504
|
+
except KeyboardInterrupt:
|
|
505
|
+
raise
|
|
506
|
+
except (HttpxConnectError, TimeoutException, HTTPStatusError, RequestError) as e:
|
|
507
|
+
self._handle_transport_error(e, call_retry=False)
|
|
508
|
+
except Exception as e:
|
|
509
|
+
log.warning(f"Unexpected transport error type: {type(e).__name__}: {e}")
|
|
510
|
+
self._handle_transport_error(e, call_retry=False)
|
|
511
|
+
|
|
512
|
+
try:
|
|
513
|
+
if response is None:
|
|
514
|
+
try:
|
|
515
|
+
ret = json.loads(reply, strict=False)
|
|
516
|
+
except ValueError:
|
|
517
|
+
log.error(f"Non-JSON response: {reply} Node: {self.url}")
|
|
518
|
+
self._check_for_server_error(reply)
|
|
519
|
+
raise RPCError("Invalid response format")
|
|
520
|
+
else:
|
|
521
|
+
ret = response.json()
|
|
522
|
+
except ValueError:
|
|
523
|
+
self._check_for_server_error({"error": reply})
|
|
524
|
+
|
|
525
|
+
log.debug(f"Reply: {json.dumps(reply)}")
|
|
526
|
+
|
|
527
|
+
if isinstance(ret, dict) and "error" in ret:
|
|
528
|
+
if isinstance(ret["error"], dict):
|
|
529
|
+
error_message = ret["error"].get(
|
|
530
|
+
"detail", ret["error"].get("message", "Unknown error")
|
|
531
|
+
)
|
|
532
|
+
self._raise_for_error(error_message)
|
|
533
|
+
elif isinstance(ret, list):
|
|
534
|
+
ret_list = []
|
|
535
|
+
for r in ret:
|
|
536
|
+
if isinstance(r, dict) and "error" in r:
|
|
537
|
+
error_message = r["error"].get(
|
|
538
|
+
"detail", r["error"].get("message", "Unknown error")
|
|
539
|
+
)
|
|
540
|
+
self._raise_for_error(error_message)
|
|
541
|
+
elif isinstance(r, dict) and "result" in r:
|
|
542
|
+
ret_list.append(r["result"])
|
|
543
|
+
else:
|
|
544
|
+
ret_list.append(r)
|
|
545
|
+
self.nodes.reset_error_cnt_call()
|
|
546
|
+
return ret_list
|
|
547
|
+
elif isinstance(ret, dict) and "result" in ret:
|
|
548
|
+
self.nodes.reset_error_cnt_call()
|
|
549
|
+
return ret["result"]
|
|
550
|
+
else:
|
|
551
|
+
log.error(f"Unexpected response format: {ret} Node: {self.url}")
|
|
552
|
+
raise RPCError(f"Unexpected response format: {ret}")
|
|
553
|
+
|
|
554
|
+
def _raise_for_error(self, error_message: str) -> None:
|
|
555
|
+
"""Normalize common RPC error messages to dedicated exception types."""
|
|
556
|
+
lowered = error_message.lower()
|
|
557
|
+
if "invalid parameter" in lowered:
|
|
558
|
+
raise InvalidParameters(error_message)
|
|
559
|
+
if "could not find method" in lowered or "no method with name" in lowered:
|
|
560
|
+
raise NoMethodWithName(error_message)
|
|
561
|
+
if "could not find api" in lowered or "no api with name" in lowered:
|
|
562
|
+
raise NoApiWithName(error_message)
|
|
563
|
+
raise RPCError(error_message)
|
|
564
|
+
|
|
565
|
+
# End of Deprecated methods
|
|
566
|
+
####################################################################
|
|
567
|
+
def __getattr__(self, name):
|
|
568
|
+
"""Map all methods to RPC calls and pass through the arguments."""
|
|
569
|
+
|
|
570
|
+
def method(*args, **kwargs):
|
|
571
|
+
# Prefer explicit api override, then OpenAPI map, then database_api
|
|
572
|
+
api_name = kwargs.get("api") or get_default_api_for_method(name) or "database_api"
|
|
573
|
+
|
|
574
|
+
# let's be able to define the num_retries per query
|
|
575
|
+
stored_num_retries_call = self.nodes.num_retries_call
|
|
576
|
+
self.nodes.num_retries_call = kwargs.get("num_retries_call", stored_num_retries_call)
|
|
577
|
+
add_to_queue = kwargs.get("add_to_queue", False)
|
|
578
|
+
query = get_query(
|
|
579
|
+
self.get_request_id(),
|
|
580
|
+
api_name,
|
|
581
|
+
name,
|
|
582
|
+
list(args),
|
|
583
|
+
)
|
|
584
|
+
if add_to_queue:
|
|
585
|
+
self.rpc_queue.append(query)
|
|
586
|
+
self.nodes.num_retries_call = stored_num_retries_call
|
|
587
|
+
return None
|
|
588
|
+
elif len(self.rpc_queue) > 0:
|
|
589
|
+
self.rpc_queue.append(query)
|
|
590
|
+
query = self.rpc_queue
|
|
591
|
+
self.rpc_queue = []
|
|
592
|
+
r = self.rpcexec(query)
|
|
593
|
+
self.nodes.num_retries_call = stored_num_retries_call
|
|
594
|
+
return r
|
|
595
|
+
|
|
596
|
+
return method
|