hive-nectar 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hive_nectar-0.2.9.dist-info/METADATA +194 -0
- hive_nectar-0.2.9.dist-info/RECORD +87 -0
- hive_nectar-0.2.9.dist-info/WHEEL +4 -0
- hive_nectar-0.2.9.dist-info/entry_points.txt +2 -0
- hive_nectar-0.2.9.dist-info/licenses/LICENSE.txt +23 -0
- nectar/__init__.py +37 -0
- nectar/account.py +5076 -0
- nectar/amount.py +553 -0
- nectar/asciichart.py +303 -0
- nectar/asset.py +122 -0
- nectar/block.py +574 -0
- nectar/blockchain.py +1242 -0
- nectar/blockchaininstance.py +2590 -0
- nectar/blockchainobject.py +263 -0
- nectar/cli.py +5937 -0
- nectar/comment.py +1552 -0
- nectar/community.py +854 -0
- nectar/constants.py +95 -0
- nectar/discussions.py +1437 -0
- nectar/exceptions.py +152 -0
- nectar/haf.py +381 -0
- nectar/hive.py +630 -0
- nectar/imageuploader.py +114 -0
- nectar/instance.py +113 -0
- nectar/market.py +876 -0
- nectar/memo.py +542 -0
- nectar/message.py +379 -0
- nectar/nodelist.py +309 -0
- nectar/price.py +603 -0
- nectar/profile.py +74 -0
- nectar/py.typed +0 -0
- nectar/rc.py +333 -0
- nectar/snapshot.py +1024 -0
- nectar/storage.py +62 -0
- nectar/transactionbuilder.py +659 -0
- nectar/utils.py +630 -0
- nectar/version.py +3 -0
- nectar/vote.py +722 -0
- nectar/wallet.py +472 -0
- nectar/witness.py +728 -0
- nectarapi/__init__.py +12 -0
- nectarapi/exceptions.py +126 -0
- nectarapi/graphenerpc.py +596 -0
- nectarapi/node.py +194 -0
- nectarapi/noderpc.py +79 -0
- nectarapi/openapi.py +107 -0
- nectarapi/py.typed +0 -0
- nectarapi/rpcutils.py +98 -0
- nectarapi/version.py +3 -0
- nectarbase/__init__.py +15 -0
- nectarbase/ledgertransactions.py +106 -0
- nectarbase/memo.py +242 -0
- nectarbase/objects.py +521 -0
- nectarbase/objecttypes.py +21 -0
- nectarbase/operationids.py +102 -0
- nectarbase/operations.py +1357 -0
- nectarbase/py.typed +0 -0
- nectarbase/signedtransactions.py +89 -0
- nectarbase/transactions.py +11 -0
- nectarbase/version.py +3 -0
- nectargraphenebase/__init__.py +27 -0
- nectargraphenebase/account.py +1121 -0
- nectargraphenebase/aes.py +49 -0
- nectargraphenebase/base58.py +197 -0
- nectargraphenebase/bip32.py +575 -0
- nectargraphenebase/bip38.py +110 -0
- nectargraphenebase/chains.py +15 -0
- nectargraphenebase/dictionary.py +2 -0
- nectargraphenebase/ecdsasig.py +309 -0
- nectargraphenebase/objects.py +130 -0
- nectargraphenebase/objecttypes.py +8 -0
- nectargraphenebase/operationids.py +5 -0
- nectargraphenebase/operations.py +25 -0
- nectargraphenebase/prefix.py +13 -0
- nectargraphenebase/py.typed +0 -0
- nectargraphenebase/signedtransactions.py +221 -0
- nectargraphenebase/types.py +557 -0
- nectargraphenebase/unsignedtransactions.py +288 -0
- nectargraphenebase/version.py +3 -0
- nectarstorage/__init__.py +57 -0
- nectarstorage/base.py +317 -0
- nectarstorage/exceptions.py +15 -0
- nectarstorage/interfaces.py +244 -0
- nectarstorage/masterpassword.py +237 -0
- nectarstorage/py.typed +0 -0
- nectarstorage/ram.py +27 -0
- nectarstorage/sqlite.py +343 -0
nectarapi/node.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, List, Optional, Union
|
|
5
|
+
|
|
6
|
+
from .exceptions import CallRetriesReached, NumRetriesReached
|
|
7
|
+
|
|
8
|
+
log = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Node:
|
|
12
|
+
def __init__(self, url: str) -> None:
|
|
13
|
+
self.url = url
|
|
14
|
+
self.error_cnt = 0
|
|
15
|
+
self.error_cnt_call = 0
|
|
16
|
+
|
|
17
|
+
def __repr__(self) -> str:
|
|
18
|
+
return self.url
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Nodes(list[Node]):
|
|
22
|
+
"""Stores Node URLs and error counts"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
urls: Union[str, "Nodes", List[Any], tuple, set, None],
|
|
27
|
+
num_retries: int,
|
|
28
|
+
num_retries_call: int,
|
|
29
|
+
) -> None:
|
|
30
|
+
self.set_node_urls(urls)
|
|
31
|
+
self.num_retries = num_retries
|
|
32
|
+
self.num_retries_call = num_retries_call
|
|
33
|
+
|
|
34
|
+
def set_node_urls(self, urls: Union[str, "Nodes", List[Any], tuple, set, None]) -> None:
|
|
35
|
+
if isinstance(urls, str):
|
|
36
|
+
url_list = re.split(r",|;", urls)
|
|
37
|
+
if url_list is None:
|
|
38
|
+
url_list = [urls]
|
|
39
|
+
elif isinstance(urls, Nodes):
|
|
40
|
+
url_list = [urls[i].url for i in range(len(urls))]
|
|
41
|
+
elif isinstance(urls, (list, tuple, set)):
|
|
42
|
+
url_list = urls
|
|
43
|
+
elif urls is not None:
|
|
44
|
+
url_list = [urls]
|
|
45
|
+
else:
|
|
46
|
+
url_list = []
|
|
47
|
+
super().__init__([Node(x) for x in url_list])
|
|
48
|
+
self.current_node_index = -1
|
|
49
|
+
self.freeze_current_node = False
|
|
50
|
+
|
|
51
|
+
def __iter__(self) -> "Nodes": # type: ignore[override]
|
|
52
|
+
# Iterator with rotation handled by __next__
|
|
53
|
+
return self
|
|
54
|
+
|
|
55
|
+
def __next__(self) -> str:
|
|
56
|
+
next_node_count = 0
|
|
57
|
+
if self.freeze_current_node:
|
|
58
|
+
return self.url
|
|
59
|
+
while next_node_count == 0 and (
|
|
60
|
+
self.num_retries < 0 or self.node.error_cnt < self.num_retries
|
|
61
|
+
):
|
|
62
|
+
self.current_node_index += 1
|
|
63
|
+
if self.current_node_index >= self.working_nodes_count:
|
|
64
|
+
self.current_node_index = 0
|
|
65
|
+
next_node_count += 1
|
|
66
|
+
if next_node_count > self.working_nodes_count + 1:
|
|
67
|
+
raise StopIteration
|
|
68
|
+
return self.url
|
|
69
|
+
|
|
70
|
+
next = __next__ # Python 2
|
|
71
|
+
|
|
72
|
+
def export_working_nodes(self) -> List[str]:
|
|
73
|
+
nodes_list = []
|
|
74
|
+
for i in range(len(self)):
|
|
75
|
+
if self.num_retries < 0 or self[i].error_cnt <= self.num_retries:
|
|
76
|
+
nodes_list.append(self[i].url)
|
|
77
|
+
return nodes_list
|
|
78
|
+
|
|
79
|
+
def get_nodes(self) -> List[str]:
|
|
80
|
+
"""Return the list of configured node URLs (including those currently marked errored)."""
|
|
81
|
+
return [self[i].url for i in range(len(self))]
|
|
82
|
+
|
|
83
|
+
def __repr__(self) -> str:
|
|
84
|
+
nodes_list = self.export_working_nodes()
|
|
85
|
+
return str(nodes_list)
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def working_nodes_count(self) -> int:
|
|
89
|
+
n = 0
|
|
90
|
+
if self.freeze_current_node:
|
|
91
|
+
i = self.current_node_index
|
|
92
|
+
if self.current_node_index < 0:
|
|
93
|
+
i = 0
|
|
94
|
+
if self.num_retries < 0 or self[i].error_cnt <= self.num_retries:
|
|
95
|
+
n += 1
|
|
96
|
+
return n
|
|
97
|
+
for i in range(len(self)):
|
|
98
|
+
if self.num_retries < 0 or self[i].error_cnt <= self.num_retries:
|
|
99
|
+
n += 1
|
|
100
|
+
return n
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def url(self) -> str:
|
|
104
|
+
if self.node is None:
|
|
105
|
+
return ""
|
|
106
|
+
return self.node.url
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def node(self) -> Node:
|
|
110
|
+
if self.current_node_index < 0:
|
|
111
|
+
return self[0]
|
|
112
|
+
return self[self.current_node_index]
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def error_cnt(self) -> int:
|
|
116
|
+
if self.node is None:
|
|
117
|
+
return 0
|
|
118
|
+
return self.node.error_cnt
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def error_cnt_call(self) -> int:
|
|
122
|
+
if self.node is None:
|
|
123
|
+
return 0
|
|
124
|
+
return self.node.error_cnt_call
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def num_retries_call_reached(self) -> bool:
|
|
128
|
+
return self.error_cnt_call >= self.num_retries_call
|
|
129
|
+
|
|
130
|
+
def disable_node(self) -> None:
|
|
131
|
+
"""Disable current node"""
|
|
132
|
+
if self.node is not None and self.num_retries_call >= 0:
|
|
133
|
+
self.node.error_cnt_call = self.num_retries_call
|
|
134
|
+
|
|
135
|
+
def increase_error_cnt(self) -> None:
|
|
136
|
+
"""Increase node error count for current node"""
|
|
137
|
+
if self.node is not None:
|
|
138
|
+
self.node.error_cnt += 1
|
|
139
|
+
|
|
140
|
+
def increase_error_cnt_call(self) -> None:
|
|
141
|
+
"""Increase call error count for current node"""
|
|
142
|
+
if self.node is not None:
|
|
143
|
+
self.node.error_cnt_call += 1
|
|
144
|
+
|
|
145
|
+
def reset_error_cnt_call(self) -> None:
|
|
146
|
+
"""Set call error count for current node to zero"""
|
|
147
|
+
if self.node is not None:
|
|
148
|
+
self.node.error_cnt_call = 0
|
|
149
|
+
|
|
150
|
+
def reset_error_cnt(self) -> None:
|
|
151
|
+
"""Set node error count for current node to zero"""
|
|
152
|
+
if self.node is not None:
|
|
153
|
+
self.node.error_cnt = 0
|
|
154
|
+
|
|
155
|
+
def sleep_and_check_retries(
|
|
156
|
+
self,
|
|
157
|
+
errorMsg: Optional[str] = None,
|
|
158
|
+
sleep: bool = True,
|
|
159
|
+
call_retry: bool = False,
|
|
160
|
+
showMsg: bool = True,
|
|
161
|
+
) -> None:
|
|
162
|
+
"""Sleep and check if num_retries is reached"""
|
|
163
|
+
if errorMsg:
|
|
164
|
+
log.warning("Error: {}".format(errorMsg))
|
|
165
|
+
if call_retry:
|
|
166
|
+
cnt = self.error_cnt_call
|
|
167
|
+
if self.num_retries_call >= 0 and self.error_cnt_call > self.num_retries_call:
|
|
168
|
+
raise CallRetriesReached()
|
|
169
|
+
else:
|
|
170
|
+
cnt = self.error_cnt
|
|
171
|
+
if self.num_retries >= 0 and self.error_cnt > self.num_retries:
|
|
172
|
+
raise NumRetriesReached()
|
|
173
|
+
|
|
174
|
+
if showMsg:
|
|
175
|
+
if call_retry:
|
|
176
|
+
log.warning(
|
|
177
|
+
"Retry RPC Call on node: %s (%d/%d)" % (self.url, cnt, self.num_retries_call)
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
log.warning(
|
|
181
|
+
"Lost connection or internal error on node: %s (%d/%d)"
|
|
182
|
+
% (self.url, cnt, self.num_retries)
|
|
183
|
+
)
|
|
184
|
+
if not sleep:
|
|
185
|
+
return
|
|
186
|
+
if cnt < 1:
|
|
187
|
+
sleeptime = 0
|
|
188
|
+
elif cnt < 10:
|
|
189
|
+
sleeptime = (cnt - 1) * 1.5 + 0.5
|
|
190
|
+
else:
|
|
191
|
+
sleeptime = 10
|
|
192
|
+
if sleeptime:
|
|
193
|
+
log.warning("Retrying in %d seconds" % sleeptime)
|
|
194
|
+
time.sleep(sleeptime)
|
nectarapi/noderpc.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any, Dict, List, Union
|
|
3
|
+
|
|
4
|
+
from . import exceptions
|
|
5
|
+
from .graphenerpc import GrapheneRPC
|
|
6
|
+
|
|
7
|
+
log = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NodeRPC(GrapheneRPC):
|
|
11
|
+
"""This class allows to call API methods exposed by the witness node via
|
|
12
|
+
websockets / rpc-json.
|
|
13
|
+
|
|
14
|
+
:param str urls: Either a single Websocket/Http URL, or a list of URLs
|
|
15
|
+
:param str user: Username for Authentication
|
|
16
|
+
:param str password: Password for Authentication
|
|
17
|
+
:param int num_retries: Try x times to num_retries to a node on disconnect, -1 for indefinitely
|
|
18
|
+
:param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5)
|
|
19
|
+
:param int timeout: Timeout setting for https nodes (default is 60)
|
|
20
|
+
:param bool use_tor: When set to true, 'socks5h://localhost:9050' is set as proxy
|
|
21
|
+
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
25
|
+
"""Init NodeRPC
|
|
26
|
+
|
|
27
|
+
:param str urls: Either a single Websocket/Http URL, or a list of URLs
|
|
28
|
+
:param str user: Username for Authentication
|
|
29
|
+
:param str password: Password for Authentication
|
|
30
|
+
:param int num_retries: Try x times to num_retries to a node on disconnect, -1 for indefinitely
|
|
31
|
+
:param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5)
|
|
32
|
+
:param int timeout: Timeout setting for https nodes (default is 60)
|
|
33
|
+
:param bool use_tor: When set to true, 'socks5h://localhost:9050' is set as proxy
|
|
34
|
+
|
|
35
|
+
"""
|
|
36
|
+
super().__init__(*args, **kwargs)
|
|
37
|
+
self.next_node_on_empty_reply = False
|
|
38
|
+
|
|
39
|
+
def set_next_node_on_empty_reply(self, next_node_on_empty_reply: bool = True) -> None:
|
|
40
|
+
"""Switch to next node on empty reply for the next rpc call"""
|
|
41
|
+
self.next_node_on_empty_reply = next_node_on_empty_reply
|
|
42
|
+
|
|
43
|
+
def rpcexec(self, payload: Union[Dict[str, Any], List[Dict[str, Any]]]) -> Any:
|
|
44
|
+
"""
|
|
45
|
+
Execute an RPC call with node-aware retry and Hive-specific error handling.
|
|
46
|
+
|
|
47
|
+
Sends the given JSON-RPC payload via the underlying GrapheneRPC implementation and handles node-level failures, automatic retries, and node switching when appropriate. If the instance flag `next_node_on_empty_reply` is set, an empty reply may trigger switching to the next node (when multiple nodes are available). Retries are governed by the node manager's retry policy.
|
|
48
|
+
|
|
49
|
+
Parameters:
|
|
50
|
+
payload (dict or list): JSON-RPC payload to send (method, params, id, etc.).
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
RPCConnection: if no RPC URL is configured (connection not established).
|
|
54
|
+
CallRetriesReached: when the node-manager's retry budget is exhausted and no alternative node can be used.
|
|
55
|
+
RPCError: when the remote node returns an RPC error that is not recoverable by retries/switching.
|
|
56
|
+
Exception: any other unexpected exception raised by the underlying RPC call is propagated.
|
|
57
|
+
"""
|
|
58
|
+
if self.url is None:
|
|
59
|
+
raise exceptions.RPCConnection("RPC is not connected!")
|
|
60
|
+
reply = super().rpcexec(payload)
|
|
61
|
+
if self.next_node_on_empty_reply and not bool(reply) and self.nodes.working_nodes_count > 1:
|
|
62
|
+
self.next_node_on_empty_reply = False
|
|
63
|
+
self._retry_on_next_node("Empty Reply")
|
|
64
|
+
return super().rpcexec(payload)
|
|
65
|
+
self.next_node_on_empty_reply = False
|
|
66
|
+
return reply
|
|
67
|
+
|
|
68
|
+
def _retry_on_next_node(self, error_msg: str) -> None:
|
|
69
|
+
self.nodes.increase_error_cnt()
|
|
70
|
+
self.nodes.sleep_and_check_retries(error_msg, sleep=False, call_retry=False)
|
|
71
|
+
self.next()
|
|
72
|
+
|
|
73
|
+
def get_account(self, name, **kwargs):
|
|
74
|
+
"""Get full account details from account name
|
|
75
|
+
|
|
76
|
+
:param str name: Account name
|
|
77
|
+
"""
|
|
78
|
+
if isinstance(name, str):
|
|
79
|
+
return self.get_accounts([name], **kwargs)
|
nectarapi/openapi.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
from typing import Dict, Optional
|
|
2
|
+
|
|
3
|
+
# Static method→API mapping derived from the hived OpenAPI spec.
|
|
4
|
+
# We intentionally embed a small, opinionated subset to avoid shipping the full
|
|
5
|
+
# OpenAPI document with the package while still providing sensible defaults.
|
|
6
|
+
METHOD_API_MAP: Dict[str, str] = {
|
|
7
|
+
# Broadcast
|
|
8
|
+
"broadcast_transaction": "network_broadcast_api",
|
|
9
|
+
"broadcast_transaction_synchronous": "network_broadcast_api",
|
|
10
|
+
# Accounts / database
|
|
11
|
+
"find_accounts": "database_api",
|
|
12
|
+
"get_accounts": "database_api",
|
|
13
|
+
"get_dynamic_global_properties": "database_api",
|
|
14
|
+
"get_reward_fund": "database_api",
|
|
15
|
+
"get_reward_funds": "database_api",
|
|
16
|
+
"get_feed_history": "database_api",
|
|
17
|
+
"get_hardfork_properties": "database_api",
|
|
18
|
+
"get_config": "database_api",
|
|
19
|
+
"find_owner_histories": "database_api",
|
|
20
|
+
"find_escrows": "database_api",
|
|
21
|
+
"find_recurrent_transfers": "database_api",
|
|
22
|
+
"get_owner_history": "database_api",
|
|
23
|
+
"get_withdraw_routes": "database_api",
|
|
24
|
+
"find_witness_schedule": "database_api",
|
|
25
|
+
"find_accounts_recovery_requests": "database_api",
|
|
26
|
+
"find_change_recovery_account_requests": "database_api",
|
|
27
|
+
"find_savings_withdrawals": "database_api",
|
|
28
|
+
"find_vesting_delegation_expirations": "database_api",
|
|
29
|
+
"find_conversion_requests": "database_api",
|
|
30
|
+
"find_hbd_conversion_requests": "database_api",
|
|
31
|
+
# Blocks
|
|
32
|
+
"get_block": "block_api",
|
|
33
|
+
"get_block_header": "block_api",
|
|
34
|
+
"get_block_range": "block_api",
|
|
35
|
+
"get_account_count": "condenser_api",
|
|
36
|
+
# Account history
|
|
37
|
+
"get_account_history": "account_history_api",
|
|
38
|
+
"get_transaction": "account_history_api",
|
|
39
|
+
"get_ops_in_block": "account_history_api",
|
|
40
|
+
"enum_virtual_ops": "account_history_api",
|
|
41
|
+
# Keys
|
|
42
|
+
"get_key_references": "account_by_key_api",
|
|
43
|
+
# Witnesses (some nodes do not expose witness_api; database_api supports these)
|
|
44
|
+
"get_witness_by_account": "condenser_api",
|
|
45
|
+
"find_witnesses": "database_api",
|
|
46
|
+
"get_witness_schedule": "database_api",
|
|
47
|
+
"get_witness_count": "database_api",
|
|
48
|
+
"get_active_witnesses": "database_api",
|
|
49
|
+
"get_witness": "database_api",
|
|
50
|
+
"get_witnesses": "database_api",
|
|
51
|
+
"list_witnesses": "database_api",
|
|
52
|
+
"list_witness_votes": "database_api",
|
|
53
|
+
# Bridge (hivemind)
|
|
54
|
+
"get_ranked_posts": "bridge",
|
|
55
|
+
"get_account_posts": "bridge",
|
|
56
|
+
"get_discussion": "bridge",
|
|
57
|
+
"get_replies_by_last_update": "bridge",
|
|
58
|
+
"get_follow_count": "condenser_api",
|
|
59
|
+
"get_followers": "condenser_api",
|
|
60
|
+
"get_following": "condenser_api",
|
|
61
|
+
"get_blog": "condenser_api",
|
|
62
|
+
"get_blog_entries": "condenser_api",
|
|
63
|
+
"get_blog_authors": "bridge",
|
|
64
|
+
"get_content": "bridge",
|
|
65
|
+
"get_post": "bridge",
|
|
66
|
+
"get_reblogged_by": "condenser_api",
|
|
67
|
+
"get_active_votes": "condenser_api",
|
|
68
|
+
"get_tags_used_by_author": "bridge",
|
|
69
|
+
"get_follow_list": "bridge",
|
|
70
|
+
"list_subscribers": "bridge",
|
|
71
|
+
"list_community_roles": "bridge",
|
|
72
|
+
"account_notifications": "bridge",
|
|
73
|
+
"unread_notifications": "bridge",
|
|
74
|
+
"list_all_subscriptions": "bridge",
|
|
75
|
+
"list_communities": "bridge",
|
|
76
|
+
# RC
|
|
77
|
+
"get_resource_params": "rc_api",
|
|
78
|
+
"get_resource_pool": "rc_api",
|
|
79
|
+
"find_rc_accounts": "rc_api",
|
|
80
|
+
# Market history
|
|
81
|
+
"get_ticker": "market_history_api",
|
|
82
|
+
"get_volume": "market_history_api",
|
|
83
|
+
"get_order_book": "market_history_api",
|
|
84
|
+
"get_recent_trades": "market_history_api",
|
|
85
|
+
"get_trade_history": "market_history_api",
|
|
86
|
+
"get_market_history": "market_history_api",
|
|
87
|
+
"get_market_history_buckets": "market_history_api",
|
|
88
|
+
# JSON-RPC meta
|
|
89
|
+
"get_methods": "jsonrpc",
|
|
90
|
+
# Proposals
|
|
91
|
+
"find_proposals": "condenser_api",
|
|
92
|
+
"get_trending_tags": "condenser_api",
|
|
93
|
+
"get_discussions_by_promoted": "condenser_api",
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def get_default_api_for_method(method_name: str) -> Optional[str]:
|
|
98
|
+
"""
|
|
99
|
+
Return the default API name for a method using the static map.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
method_name: The RPC method (without API prefix), e.g., "get_account_history".
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
The API name string (e.g., "account_history_api") or None if unknown.
|
|
106
|
+
"""
|
|
107
|
+
return METHOD_API_MAP.get(method_name)
|
nectarapi/py.typed
ADDED
|
File without changes
|
nectarapi/rpcutils.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, Iterable, List, Union
|
|
4
|
+
|
|
5
|
+
log = logging.getLogger(__name__)
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_query(
|
|
9
|
+
request_id: int,
|
|
10
|
+
api_name: str,
|
|
11
|
+
name: str,
|
|
12
|
+
args: Union[Dict[str, Any], Iterable[Any], Any],
|
|
13
|
+
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
|
|
14
|
+
"""
|
|
15
|
+
Build an appbase-style JSON-RPC request payload.
|
|
16
|
+
|
|
17
|
+
Always emits the `api.method` form (no condenser `call` indirection). Supports:
|
|
18
|
+
- Single dict params
|
|
19
|
+
- Positional params passed as an iterable
|
|
20
|
+
- Batch creation when provided a list of dicts inside an iterable
|
|
21
|
+
"""
|
|
22
|
+
normalized_args: Any
|
|
23
|
+
# Convert tuples to lists for easier inspection
|
|
24
|
+
if isinstance(args, tuple):
|
|
25
|
+
normalized_args = list(args)
|
|
26
|
+
else:
|
|
27
|
+
normalized_args = args
|
|
28
|
+
|
|
29
|
+
# Pass through plain dict
|
|
30
|
+
if isinstance(normalized_args, dict):
|
|
31
|
+
params: Union[Dict[str, Any], List[Any]] = json.loads(json.dumps(normalized_args))
|
|
32
|
+
return {
|
|
33
|
+
"method": f"{api_name}.{name}",
|
|
34
|
+
"params": params,
|
|
35
|
+
"jsonrpc": "2.0",
|
|
36
|
+
"id": request_id,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
if isinstance(normalized_args, list) and normalized_args:
|
|
40
|
+
# Batch: list of dicts directly
|
|
41
|
+
if len(normalized_args) > 1 and all(isinstance(item, dict) for item in normalized_args):
|
|
42
|
+
queries: List[Dict[str, Any]] = []
|
|
43
|
+
for entry in normalized_args:
|
|
44
|
+
queries.append(
|
|
45
|
+
{
|
|
46
|
+
"method": f"{api_name}.{name}",
|
|
47
|
+
"params": json.loads(json.dumps(entry)),
|
|
48
|
+
"jsonrpc": "2.0",
|
|
49
|
+
"id": request_id,
|
|
50
|
+
}
|
|
51
|
+
)
|
|
52
|
+
request_id += 1
|
|
53
|
+
return queries
|
|
54
|
+
|
|
55
|
+
# Batch: list of dicts nested inside a single element
|
|
56
|
+
if (
|
|
57
|
+
len(normalized_args) == 1
|
|
58
|
+
and isinstance(normalized_args[0], list)
|
|
59
|
+
and normalized_args[0]
|
|
60
|
+
and all(isinstance(item, dict) for item in normalized_args[0])
|
|
61
|
+
):
|
|
62
|
+
queries: List[Dict[str, Any]] = []
|
|
63
|
+
for entry in normalized_args[0]:
|
|
64
|
+
queries.append(
|
|
65
|
+
{
|
|
66
|
+
"method": f"{api_name}.{name}",
|
|
67
|
+
"params": json.loads(json.dumps(entry)),
|
|
68
|
+
"jsonrpc": "2.0",
|
|
69
|
+
"id": request_id,
|
|
70
|
+
}
|
|
71
|
+
)
|
|
72
|
+
request_id += 1
|
|
73
|
+
return queries
|
|
74
|
+
|
|
75
|
+
# Single dict wrapped in a list
|
|
76
|
+
if len(normalized_args) == 1 and isinstance(normalized_args[0], dict):
|
|
77
|
+
return {
|
|
78
|
+
"method": f"{api_name}.{name}",
|
|
79
|
+
"params": json.loads(json.dumps(normalized_args[0])),
|
|
80
|
+
"jsonrpc": "2.0",
|
|
81
|
+
"id": request_id,
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Generic positional args
|
|
85
|
+
return {
|
|
86
|
+
"method": f"{api_name}.{name}",
|
|
87
|
+
"params": json.loads(json.dumps(normalized_args)),
|
|
88
|
+
"jsonrpc": "2.0",
|
|
89
|
+
"id": request_id,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
# Fallback: empty params (use list to satisfy condenser/appbase empty-arg methods)
|
|
93
|
+
return {
|
|
94
|
+
"method": f"{api_name}.{name}",
|
|
95
|
+
"jsonrpc": "2.0",
|
|
96
|
+
"params": [] if api_name == "condenser_api" else {},
|
|
97
|
+
"id": request_id,
|
|
98
|
+
}
|
nectarapi/version.py
ADDED
nectarbase/__init__.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any, Dict, Mapping
|
|
3
|
+
|
|
4
|
+
from nectargraphenebase.account import PublicKey
|
|
5
|
+
from nectargraphenebase.chains import known_chains
|
|
6
|
+
from nectargraphenebase.types import (
|
|
7
|
+
Array,
|
|
8
|
+
Signature,
|
|
9
|
+
)
|
|
10
|
+
from nectargraphenebase.unsignedtransactions import (
|
|
11
|
+
Unsigned_Transaction as GrapheneUnsigned_Transaction,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from .operations import Operation
|
|
15
|
+
|
|
16
|
+
log = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Ledger_Transaction(GrapheneUnsigned_Transaction):
|
|
20
|
+
"""Create an unsigned transaction and offer method to send it to a ledger device for signing
|
|
21
|
+
|
|
22
|
+
:param num ref_block_num:
|
|
23
|
+
:param num ref_block_prefix:
|
|
24
|
+
:param str expiration: expiration date
|
|
25
|
+
:param array operations: array of operations
|
|
26
|
+
:param dict custom_chains: custom chain which should be added to the known chains
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
30
|
+
self.known_chains = known_chains
|
|
31
|
+
custom_chain = kwargs.get("custom_chains", {})
|
|
32
|
+
if len(custom_chain) > 0:
|
|
33
|
+
for c in custom_chain:
|
|
34
|
+
if c not in self.known_chains:
|
|
35
|
+
self.known_chains[c] = custom_chain[c]
|
|
36
|
+
super().__init__(*args, **kwargs)
|
|
37
|
+
|
|
38
|
+
def add_custom_chains(self, custom_chain: Mapping[str, Any]) -> None:
|
|
39
|
+
if len(custom_chain) > 0:
|
|
40
|
+
for c in custom_chain:
|
|
41
|
+
if c not in self.known_chains:
|
|
42
|
+
self.known_chains[c] = custom_chain[c]
|
|
43
|
+
|
|
44
|
+
def getOperationKlass(self) -> type[Operation]:
|
|
45
|
+
return Operation
|
|
46
|
+
|
|
47
|
+
def getKnownChains(self) -> Dict[str, Any]:
|
|
48
|
+
"""
|
|
49
|
+
Return the mapping of known blockchain chains available to this transaction.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
dict: A mapping where keys are chain identifiers (e.g., "HIVE", "STEEM" or custom names)
|
|
53
|
+
and values are the chain metadata/configuration that was registered with this transaction.
|
|
54
|
+
"""
|
|
55
|
+
return self.known_chains
|
|
56
|
+
|
|
57
|
+
def sign(self, path: str = "48'/13'/0'/0'/0'", chain: str = "HIVE") -> "Ledger_Transaction":
|
|
58
|
+
"""
|
|
59
|
+
Sign the transaction using a Ledger device and attach the resulting signature to this transaction.
|
|
60
|
+
|
|
61
|
+
Builds APDUs for the given BIP32 path and blockchain chain identifier, sends them to a connected Ledger dongle, collects the final signature returned by the device, and stores it as the transaction's "signatures" entry.
|
|
62
|
+
|
|
63
|
+
Parameters:
|
|
64
|
+
path (str): BIP32 derivation path to use on the Ledger (default "48'/13'/0'/0'/0'").
|
|
65
|
+
chain (str): Chain identifier used when building APDUs (e.g., "HIVE" or "STEEM").
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Ledger_Transaction: self with `self.data["signatures"]` set to an Array containing the Ledger-produced Signature.
|
|
69
|
+
|
|
70
|
+
Notes:
|
|
71
|
+
- This method opens a connection to the Ledger device and closes it before returning.
|
|
72
|
+
- Any exceptions raised by the Ledger communication layer are not handled here and will propagate to the caller.
|
|
73
|
+
"""
|
|
74
|
+
from ledgerblue.comm import getDongle # type: ignore[import-not-found]
|
|
75
|
+
|
|
76
|
+
dongle = getDongle(True)
|
|
77
|
+
try:
|
|
78
|
+
apdu_list = self.build_apdu(path, chain)
|
|
79
|
+
for apdu in apdu_list:
|
|
80
|
+
result = dongle.exchange(bytes(apdu))
|
|
81
|
+
sigs = []
|
|
82
|
+
signature = result
|
|
83
|
+
sigs.append(Signature(signature))
|
|
84
|
+
self.data["signatures"] = Array(sigs)
|
|
85
|
+
return self
|
|
86
|
+
finally:
|
|
87
|
+
dongle.close()
|
|
88
|
+
|
|
89
|
+
def get_pubkey(
|
|
90
|
+
self,
|
|
91
|
+
path: str = "48'/13'/0'/0'/0'",
|
|
92
|
+
request_screen_approval: bool = False,
|
|
93
|
+
prefix: str = "STM",
|
|
94
|
+
) -> PublicKey:
|
|
95
|
+
from ledgerblue.comm import getDongle # type: ignore[import-not-found]
|
|
96
|
+
|
|
97
|
+
dongle = getDongle(True)
|
|
98
|
+
try:
|
|
99
|
+
apdu = self.build_apdu_pubkey(path, request_screen_approval)
|
|
100
|
+
result = dongle.exchange(bytes(apdu))
|
|
101
|
+
offset = 1 + result[0]
|
|
102
|
+
address = result[offset + 1 : offset + 1 + result[offset]]
|
|
103
|
+
# public_key = result[1: 1 + result[0]]
|
|
104
|
+
return PublicKey(address.decode(), prefix=prefix)
|
|
105
|
+
finally:
|
|
106
|
+
dongle.close()
|