hive-nectar 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hive_nectar-0.2.9.dist-info/METADATA +194 -0
- hive_nectar-0.2.9.dist-info/RECORD +87 -0
- hive_nectar-0.2.9.dist-info/WHEEL +4 -0
- hive_nectar-0.2.9.dist-info/entry_points.txt +2 -0
- hive_nectar-0.2.9.dist-info/licenses/LICENSE.txt +23 -0
- nectar/__init__.py +37 -0
- nectar/account.py +5076 -0
- nectar/amount.py +553 -0
- nectar/asciichart.py +303 -0
- nectar/asset.py +122 -0
- nectar/block.py +574 -0
- nectar/blockchain.py +1242 -0
- nectar/blockchaininstance.py +2590 -0
- nectar/blockchainobject.py +263 -0
- nectar/cli.py +5937 -0
- nectar/comment.py +1552 -0
- nectar/community.py +854 -0
- nectar/constants.py +95 -0
- nectar/discussions.py +1437 -0
- nectar/exceptions.py +152 -0
- nectar/haf.py +381 -0
- nectar/hive.py +630 -0
- nectar/imageuploader.py +114 -0
- nectar/instance.py +113 -0
- nectar/market.py +876 -0
- nectar/memo.py +542 -0
- nectar/message.py +379 -0
- nectar/nodelist.py +309 -0
- nectar/price.py +603 -0
- nectar/profile.py +74 -0
- nectar/py.typed +0 -0
- nectar/rc.py +333 -0
- nectar/snapshot.py +1024 -0
- nectar/storage.py +62 -0
- nectar/transactionbuilder.py +659 -0
- nectar/utils.py +630 -0
- nectar/version.py +3 -0
- nectar/vote.py +722 -0
- nectar/wallet.py +472 -0
- nectar/witness.py +728 -0
- nectarapi/__init__.py +12 -0
- nectarapi/exceptions.py +126 -0
- nectarapi/graphenerpc.py +596 -0
- nectarapi/node.py +194 -0
- nectarapi/noderpc.py +79 -0
- nectarapi/openapi.py +107 -0
- nectarapi/py.typed +0 -0
- nectarapi/rpcutils.py +98 -0
- nectarapi/version.py +3 -0
- nectarbase/__init__.py +15 -0
- nectarbase/ledgertransactions.py +106 -0
- nectarbase/memo.py +242 -0
- nectarbase/objects.py +521 -0
- nectarbase/objecttypes.py +21 -0
- nectarbase/operationids.py +102 -0
- nectarbase/operations.py +1357 -0
- nectarbase/py.typed +0 -0
- nectarbase/signedtransactions.py +89 -0
- nectarbase/transactions.py +11 -0
- nectarbase/version.py +3 -0
- nectargraphenebase/__init__.py +27 -0
- nectargraphenebase/account.py +1121 -0
- nectargraphenebase/aes.py +49 -0
- nectargraphenebase/base58.py +197 -0
- nectargraphenebase/bip32.py +575 -0
- nectargraphenebase/bip38.py +110 -0
- nectargraphenebase/chains.py +15 -0
- nectargraphenebase/dictionary.py +2 -0
- nectargraphenebase/ecdsasig.py +309 -0
- nectargraphenebase/objects.py +130 -0
- nectargraphenebase/objecttypes.py +8 -0
- nectargraphenebase/operationids.py +5 -0
- nectargraphenebase/operations.py +25 -0
- nectargraphenebase/prefix.py +13 -0
- nectargraphenebase/py.typed +0 -0
- nectargraphenebase/signedtransactions.py +221 -0
- nectargraphenebase/types.py +557 -0
- nectargraphenebase/unsignedtransactions.py +288 -0
- nectargraphenebase/version.py +3 -0
- nectarstorage/__init__.py +57 -0
- nectarstorage/base.py +317 -0
- nectarstorage/exceptions.py +15 -0
- nectarstorage/interfaces.py +244 -0
- nectarstorage/masterpassword.py +237 -0
- nectarstorage/py.typed +0 -0
- nectarstorage/ram.py +27 -0
- nectarstorage/sqlite.py +343 -0
nectar/utils.py
ADDED
|
@@ -0,0 +1,630 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import json
|
|
3
|
+
import math
|
|
4
|
+
import re
|
|
5
|
+
import secrets
|
|
6
|
+
import string
|
|
7
|
+
import time as timenow
|
|
8
|
+
from datetime import date, datetime, time, timedelta, timezone
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
11
|
+
|
|
12
|
+
from ruamel.yaml import YAML
|
|
13
|
+
|
|
14
|
+
from nectargraphenebase.account import PasswordKey
|
|
15
|
+
|
|
16
|
+
timeFormat = "%Y-%m-%dT%H:%M:%S"
|
|
17
|
+
# https://github.com/matiasb/python-unidiff/blob/master/unidiff/constants.py#L37
|
|
18
|
+
# @@ (source offset, length) (target offset, length) @@ (section header)
|
|
19
|
+
RE_HUNK_HEADER = re.compile(
|
|
20
|
+
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))?\ @@[ ]?(.*)$", flags=re.MULTILINE
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def formatTime(t: Union[float, datetime, date, time]) -> Optional[str]:
|
|
25
|
+
"""Properly Format Time for permlinks"""
|
|
26
|
+
if isinstance(t, float):
|
|
27
|
+
return datetime.fromtimestamp(t, tz=timezone.utc).strftime("%Y%m%dt%H%M%S%Z")
|
|
28
|
+
if isinstance(t, (datetime, date, time)):
|
|
29
|
+
return t.strftime("%Y%m%dt%H%M%S%Z")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def addTzInfo(
|
|
33
|
+
t: Optional[Union[datetime, date, time]], timezone_str: str = "UTC"
|
|
34
|
+
) -> Optional[datetime]:
|
|
35
|
+
"""Returns a datetime object with tzinfo added
|
|
36
|
+
Uses Python's built-in timezone when possible
|
|
37
|
+
"""
|
|
38
|
+
if not t:
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
# For non-UTC timezones, log a warning once and use UTC
|
|
42
|
+
if timezone_str.upper() != "UTC":
|
|
43
|
+
import logging
|
|
44
|
+
|
|
45
|
+
log = logging.getLogger(__name__)
|
|
46
|
+
log.warning(
|
|
47
|
+
f"Non-UTC timezone '{timezone_str}' not supported without pytz. Using UTC instead."
|
|
48
|
+
)
|
|
49
|
+
timezone_str = "UTC"
|
|
50
|
+
|
|
51
|
+
if isinstance(t, datetime):
|
|
52
|
+
if getattr(t, "tzinfo", None) is None:
|
|
53
|
+
t = t.replace(tzinfo=timezone.utc)
|
|
54
|
+
return t
|
|
55
|
+
elif isinstance(t, date) and not isinstance(t, datetime):
|
|
56
|
+
return datetime.combine(t, time.min).replace(tzinfo=timezone.utc)
|
|
57
|
+
elif isinstance(t, time):
|
|
58
|
+
return datetime.combine(date.today(), t).replace(tzinfo=timezone.utc)
|
|
59
|
+
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def formatTimeString(t: Union[str, datetime, date, time]) -> str:
|
|
64
|
+
"""Properly Format Time for permlinks"""
|
|
65
|
+
if isinstance(t, (datetime, date, time)):
|
|
66
|
+
# Convert date/time to datetime first if needed
|
|
67
|
+
if isinstance(t, date) and not isinstance(t, datetime):
|
|
68
|
+
t = datetime.combine(t, time.min)
|
|
69
|
+
elif isinstance(t, time):
|
|
70
|
+
t = datetime.combine(date.today(), t)
|
|
71
|
+
return t.strftime(timeFormat)
|
|
72
|
+
result = addTzInfo(datetime.strptime(t, timeFormat))
|
|
73
|
+
if result is None:
|
|
74
|
+
raise ValueError("Failed to add timezone info")
|
|
75
|
+
return result.strftime(timeFormat)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def formatToTimeStamp(t: Union[datetime, date, time, str]) -> int:
|
|
79
|
+
"""Returns a timestamp integer
|
|
80
|
+
|
|
81
|
+
:param datetime t: datetime object
|
|
82
|
+
:return: Timestamp as integer
|
|
83
|
+
"""
|
|
84
|
+
if isinstance(t, (datetime, date, time)):
|
|
85
|
+
t_with_tz = addTzInfo(t)
|
|
86
|
+
if t_with_tz is None:
|
|
87
|
+
raise ValueError("Failed to add timezone info")
|
|
88
|
+
t = t_with_tz
|
|
89
|
+
else:
|
|
90
|
+
t = formatTimeString(t)
|
|
91
|
+
# Ensure we have a datetime object for subtraction
|
|
92
|
+
if isinstance(t, str):
|
|
93
|
+
t = datetime.strptime(t, timeFormat).replace(tzinfo=timezone.utc)
|
|
94
|
+
elif isinstance(t, (date, time)) and not isinstance(t, datetime):
|
|
95
|
+
if isinstance(t, date):
|
|
96
|
+
t = datetime.combine(t, time.min).replace(tzinfo=timezone.utc)
|
|
97
|
+
else:
|
|
98
|
+
t = datetime.combine(date.today(), t).replace(tzinfo=timezone.utc)
|
|
99
|
+
|
|
100
|
+
# At this point, t should be a datetime object
|
|
101
|
+
if not isinstance(t, datetime):
|
|
102
|
+
raise TypeError(f"Expected datetime object, got {type(t)}")
|
|
103
|
+
|
|
104
|
+
epoch = addTzInfo(datetime(1970, 1, 1))
|
|
105
|
+
if epoch is None:
|
|
106
|
+
raise ValueError("Failed to add timezone info to epoch")
|
|
107
|
+
return int((t - epoch).total_seconds())
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def formatTimeFromNow(secs: int = 0) -> str:
|
|
111
|
+
"""Properly Format Time that is `x` seconds in the future
|
|
112
|
+
|
|
113
|
+
:param int secs: Seconds to go in the future (`x>0`) or the
|
|
114
|
+
past (`x<0`)
|
|
115
|
+
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
|
|
116
|
+
:rtype: str
|
|
117
|
+
|
|
118
|
+
"""
|
|
119
|
+
return datetime.fromtimestamp(timenow.time() + int(secs), tz=timezone.utc).strftime(timeFormat)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def formatTimedelta(td: timedelta) -> str:
|
|
123
|
+
"""Format timedelta to String"""
|
|
124
|
+
if not isinstance(td, timedelta):
|
|
125
|
+
return ""
|
|
126
|
+
days, seconds = td.days, td.seconds
|
|
127
|
+
hours = days * 24 + seconds // 3600
|
|
128
|
+
minutes = (seconds % 3600) // 60
|
|
129
|
+
seconds = seconds % 60
|
|
130
|
+
return f"{hours}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def parse_time(block_time: str) -> datetime:
|
|
134
|
+
"""Take a string representation of time from the blockchain, and parse it
|
|
135
|
+
into datetime object.
|
|
136
|
+
"""
|
|
137
|
+
return datetime.strptime(block_time, timeFormat).replace(tzinfo=timezone.utc)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def assets_from_string(text: str) -> List[str]:
|
|
141
|
+
"""Correctly split a string containing an asset pair.
|
|
142
|
+
|
|
143
|
+
Splits the string into two assets with the separator being on of the
|
|
144
|
+
following: `:`, `/`, or `-`.
|
|
145
|
+
"""
|
|
146
|
+
return re.split(r"[\-:\/]", text)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def sanitize_permlink(permlink: str) -> str:
|
|
150
|
+
permlink = permlink.strip()
|
|
151
|
+
permlink = re.sub(r"_|\s|\.", "-", permlink)
|
|
152
|
+
permlink = re.sub(r"[^\w-]", "", permlink)
|
|
153
|
+
permlink = re.sub(r"[^a-zA-Z0-9-]", "", permlink)
|
|
154
|
+
permlink = permlink.lower()
|
|
155
|
+
return permlink
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def derive_permlink(
|
|
159
|
+
title: str,
|
|
160
|
+
parent_permlink: Optional[str] = None,
|
|
161
|
+
parent_author: Optional[str] = None,
|
|
162
|
+
max_permlink_length: int = 256,
|
|
163
|
+
with_suffix: bool = True,
|
|
164
|
+
) -> str:
|
|
165
|
+
"""Derive a permlink from a comment title (for root level
|
|
166
|
+
comments) or the parent permlink and optionally the parent
|
|
167
|
+
author (for replies).
|
|
168
|
+
|
|
169
|
+
"""
|
|
170
|
+
formatted_time = formatTime(datetime.now(timezone.utc))
|
|
171
|
+
suffix = "-" + (formatted_time.lower() if formatted_time else "")
|
|
172
|
+
if parent_permlink and parent_author:
|
|
173
|
+
prefix = "re-" + sanitize_permlink(parent_author) + "-"
|
|
174
|
+
if with_suffix:
|
|
175
|
+
rem_chars = max_permlink_length - len(suffix) - len(prefix)
|
|
176
|
+
else:
|
|
177
|
+
rem_chars = max_permlink_length - len(prefix)
|
|
178
|
+
body = sanitize_permlink(parent_permlink)[:rem_chars]
|
|
179
|
+
if with_suffix:
|
|
180
|
+
return prefix + body + suffix
|
|
181
|
+
else:
|
|
182
|
+
return prefix + body
|
|
183
|
+
elif parent_permlink:
|
|
184
|
+
prefix = "re-"
|
|
185
|
+
if with_suffix:
|
|
186
|
+
rem_chars = max_permlink_length - len(suffix) - len(prefix)
|
|
187
|
+
else:
|
|
188
|
+
rem_chars = max_permlink_length - len(prefix)
|
|
189
|
+
body = sanitize_permlink(parent_permlink)[:rem_chars]
|
|
190
|
+
if with_suffix:
|
|
191
|
+
return prefix + body + suffix
|
|
192
|
+
else:
|
|
193
|
+
return prefix + body
|
|
194
|
+
else:
|
|
195
|
+
if with_suffix:
|
|
196
|
+
rem_chars = max_permlink_length - len(suffix)
|
|
197
|
+
else:
|
|
198
|
+
rem_chars = max_permlink_length
|
|
199
|
+
body = sanitize_permlink(title)[:rem_chars]
|
|
200
|
+
if len(body) == 0: # empty title or title consisted of only special chars
|
|
201
|
+
return suffix[1:] # use timestamp only, strip leading "-"
|
|
202
|
+
if with_suffix:
|
|
203
|
+
return body + suffix
|
|
204
|
+
else:
|
|
205
|
+
return body
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def resolve_authorperm(identifier: str) -> Tuple[str, str]:
|
|
209
|
+
"""
|
|
210
|
+
Parse an author/permlink identifier and return (author, permlink).
|
|
211
|
+
|
|
212
|
+
Accepts plain "author/permlink" or "@author/permlink", site URLs containing "/@author/permlink",
|
|
213
|
+
and dtube-style URLs containing "#!/v/<author>/<permlink>". Returns a 2-tuple of strings
|
|
214
|
+
(author, permlink). Raises ValueError if the identifier cannot be parsed.
|
|
215
|
+
"""
|
|
216
|
+
# without any http(s)
|
|
217
|
+
match = re.match(r"@?([\w\-\.]*)/([\w\-]*)", identifier)
|
|
218
|
+
if hasattr(match, "group"):
|
|
219
|
+
return match.group(1), match.group(2)
|
|
220
|
+
# dtube url
|
|
221
|
+
match = re.match(r"([\w\-\.]+[^#?\s]+)/#!/v/?([\w\-\.]*)/([\w\-]*)", identifier)
|
|
222
|
+
if hasattr(match, "group"):
|
|
223
|
+
return match.group(2), match.group(3)
|
|
224
|
+
# url
|
|
225
|
+
match = re.match(r"([\w\-\.]+[^#?\s]+)/@?([\w\-\.]*)/([\w\-]*)", identifier)
|
|
226
|
+
if not hasattr(match, "group"):
|
|
227
|
+
raise ValueError("Invalid identifier")
|
|
228
|
+
return match.group(2), match.group(3)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def construct_authorperm(*args: Any) -> str:
|
|
232
|
+
"""Create a post identifier from comment/post object or arguments.
|
|
233
|
+
Examples:
|
|
234
|
+
|
|
235
|
+
.. code-block:: python
|
|
236
|
+
|
|
237
|
+
>>> from nectar.utils import construct_authorperm
|
|
238
|
+
>>> print(construct_authorperm('username', 'permlink'))
|
|
239
|
+
@username/permlink
|
|
240
|
+
>>> print(construct_authorperm({'author': 'username', 'permlink': 'permlink'}))
|
|
241
|
+
@username/permlink
|
|
242
|
+
|
|
243
|
+
"""
|
|
244
|
+
username_prefix = "@"
|
|
245
|
+
if len(args) == 1:
|
|
246
|
+
op = args[0]
|
|
247
|
+
author, permlink = op["author"], op["permlink"]
|
|
248
|
+
elif len(args) == 2:
|
|
249
|
+
author, permlink = args
|
|
250
|
+
else:
|
|
251
|
+
raise ValueError("construct_identifier() received unparsable arguments")
|
|
252
|
+
|
|
253
|
+
return f"{username_prefix}{author}/{permlink}"
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def resolve_root_identifier(url: str) -> Tuple[str, str]:
|
|
257
|
+
m = re.match(r"/([^/]*)/@([^/]*)/([^#]*).*", url)
|
|
258
|
+
if not m:
|
|
259
|
+
return "", ""
|
|
260
|
+
else:
|
|
261
|
+
category = m.group(1)
|
|
262
|
+
author = m.group(2)
|
|
263
|
+
permlink = m.group(3)
|
|
264
|
+
return construct_authorperm(author, permlink), category
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def resolve_authorpermvoter(identifier: str) -> Tuple[str, str, str]:
|
|
268
|
+
"""Correctly split a string containing an authorpermvoter.
|
|
269
|
+
|
|
270
|
+
Splits the string into author and permlink with the
|
|
271
|
+
following separator: ``/`` and ``|``.
|
|
272
|
+
"""
|
|
273
|
+
pos = identifier.find("|")
|
|
274
|
+
if pos < 0:
|
|
275
|
+
raise ValueError("Invalid identifier")
|
|
276
|
+
[author, permlink] = resolve_authorperm(identifier[:pos])
|
|
277
|
+
return author, permlink, identifier[pos + 1 :]
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def construct_authorpermvoter(*args: Any) -> str:
|
|
281
|
+
"""Create a vote identifier from vote object or arguments.
|
|
282
|
+
Examples:
|
|
283
|
+
|
|
284
|
+
.. code-block:: python
|
|
285
|
+
|
|
286
|
+
>>> from nectar.utils import construct_authorpermvoter
|
|
287
|
+
>>> print(construct_authorpermvoter('username', 'permlink', 'voter'))
|
|
288
|
+
@username/permlink|voter
|
|
289
|
+
>>> print(construct_authorpermvoter({'author': 'username', 'permlink': 'permlink', 'voter': 'voter'}))
|
|
290
|
+
@username/permlink|voter
|
|
291
|
+
|
|
292
|
+
"""
|
|
293
|
+
username_prefix = "@"
|
|
294
|
+
if len(args) == 1:
|
|
295
|
+
op = args[0]
|
|
296
|
+
if "authorperm" in op:
|
|
297
|
+
authorperm, voter = op["authorperm"], op["voter"]
|
|
298
|
+
[author, permlink] = resolve_authorperm(authorperm)
|
|
299
|
+
else:
|
|
300
|
+
author, permlink, voter = op["author"], op["permlink"], op["voter"]
|
|
301
|
+
elif len(args) == 2:
|
|
302
|
+
authorperm, voter = args
|
|
303
|
+
[author, permlink] = resolve_authorperm(authorperm)
|
|
304
|
+
elif len(args) == 3:
|
|
305
|
+
author, permlink, voter = args
|
|
306
|
+
else:
|
|
307
|
+
raise ValueError("construct_identifier() received unparsable arguments")
|
|
308
|
+
|
|
309
|
+
return f"{username_prefix}{author}/{permlink}|{voter}"
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def reputation_to_score(rep: Union[str, int]) -> float:
|
|
313
|
+
"""Converts the account reputation value into the reputation score"""
|
|
314
|
+
if isinstance(rep, str):
|
|
315
|
+
rep = int(rep)
|
|
316
|
+
if rep == 0:
|
|
317
|
+
return 25.0
|
|
318
|
+
score = max([math.log10(abs(rep)) - 9, 0])
|
|
319
|
+
if rep < 0:
|
|
320
|
+
score *= -1
|
|
321
|
+
score = (score * 9.0) + 25.0
|
|
322
|
+
return score
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
def remove_from_dict(
|
|
326
|
+
obj: Any, keys: Optional[List[str]] = None, keep_keys: bool = True
|
|
327
|
+
) -> Dict[str, Any]:
|
|
328
|
+
"""Prune a class or dictionary of all but keys (keep_keys=True).
|
|
329
|
+
Prune a class or dictionary of specified keys.(keep_keys=False).
|
|
330
|
+
"""
|
|
331
|
+
if keys is None:
|
|
332
|
+
keys = []
|
|
333
|
+
if not isinstance(obj, dict):
|
|
334
|
+
obj = dict(obj)
|
|
335
|
+
if keep_keys:
|
|
336
|
+
return {k: v for k, v in obj.items() if k in keys}
|
|
337
|
+
else:
|
|
338
|
+
return {k: v for k, v in obj.items() if k not in keys}
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
def make_patch(a: str, b: str) -> str:
|
|
342
|
+
import diff_match_patch as dmp_module
|
|
343
|
+
|
|
344
|
+
dmp = dmp_module.diff_match_patch()
|
|
345
|
+
patch = dmp.patch_make(a, b)
|
|
346
|
+
patch_text = dmp.patch_toText(patch)
|
|
347
|
+
return patch_text
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def findall_patch_hunks(body: Optional[str] = None) -> List[Tuple]:
|
|
351
|
+
if body is None:
|
|
352
|
+
return []
|
|
353
|
+
return RE_HUNK_HEADER.findall(body)
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def derive_beneficiaries(beneficiaries: Union[str, List[str]]) -> List[Dict[str, Any]]:
|
|
357
|
+
"""
|
|
358
|
+
Parse beneficiaries and return a normalized, merged list of unique accounts with weights in basis points.
|
|
359
|
+
|
|
360
|
+
Accepts a comma-separated string or list with items like "account:10", "@account:10%", or "account" (unknown
|
|
361
|
+
percentage). Duplicate accounts are merged by summing their explicit percentages and any share of the remaining
|
|
362
|
+
percentage allocated to unknown entries. Unknown entries are distributed equally across all unknown slots.
|
|
363
|
+
|
|
364
|
+
Returns a list of dicts sorted by account name: [{"account": str, "weight": int_basis_points}]
|
|
365
|
+
where weight is expressed in basis points (e.g., 1000 == 10%).
|
|
366
|
+
"""
|
|
367
|
+
# Normalize input to list of entries
|
|
368
|
+
entries = beneficiaries if isinstance(beneficiaries, list) else beneficiaries.split(",")
|
|
369
|
+
|
|
370
|
+
# Collect known percentages and unknown slots per account
|
|
371
|
+
accounts = {}
|
|
372
|
+
total_known_bp = 0 # basis points (1% == 100)
|
|
373
|
+
total_unknown_slots = 0
|
|
374
|
+
|
|
375
|
+
for raw in entries:
|
|
376
|
+
token = raw.strip()
|
|
377
|
+
if not token:
|
|
378
|
+
continue
|
|
379
|
+
name_part = token.split(":")[0].strip()
|
|
380
|
+
account = name_part[1:] if name_part.startswith("@") else name_part
|
|
381
|
+
if account not in accounts:
|
|
382
|
+
accounts[account] = {"known_bp": 0, "unknown_slots": 0}
|
|
383
|
+
|
|
384
|
+
if ":" not in token:
|
|
385
|
+
# Unknown slot for this account
|
|
386
|
+
accounts[account]["unknown_slots"] += 1
|
|
387
|
+
total_unknown_slots += 1
|
|
388
|
+
continue
|
|
389
|
+
|
|
390
|
+
# Parse percentage
|
|
391
|
+
perc_str = token.split(":", 1)[1].strip()
|
|
392
|
+
if perc_str.endswith("%"):
|
|
393
|
+
perc_str = perc_str[:-1].strip()
|
|
394
|
+
try:
|
|
395
|
+
perc = float(perc_str)
|
|
396
|
+
except Exception:
|
|
397
|
+
# Treat unparsable as unknown slot
|
|
398
|
+
accounts[account]["unknown_slots"] += 1
|
|
399
|
+
total_unknown_slots += 1
|
|
400
|
+
continue
|
|
401
|
+
bp = int(perc * 100)
|
|
402
|
+
accounts[account]["known_bp"] += bp
|
|
403
|
+
total_known_bp += bp
|
|
404
|
+
|
|
405
|
+
# Distribute remaining to unknown slots equally (in bp)
|
|
406
|
+
remaining_bp = max(0, 10000 - total_known_bp)
|
|
407
|
+
if total_unknown_slots > 0 and remaining_bp > 0:
|
|
408
|
+
for account, data in accounts.items():
|
|
409
|
+
slots = data["unknown_slots"]
|
|
410
|
+
if slots > 0:
|
|
411
|
+
share_bp = int((remaining_bp * slots) / total_unknown_slots)
|
|
412
|
+
data["known_bp"] += share_bp
|
|
413
|
+
|
|
414
|
+
# Build final list (unique accounts) and sort deterministically
|
|
415
|
+
result = [{"account": acc, "weight": data["known_bp"]} for acc, data in accounts.items()]
|
|
416
|
+
result.sort(key=lambda x: x["account"])
|
|
417
|
+
return result
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def derive_tags(tags: str) -> List[str]:
|
|
421
|
+
tags_list = []
|
|
422
|
+
if len(tags.split(",")) > 1:
|
|
423
|
+
for tag in tags.split(","):
|
|
424
|
+
tags_list.append(tag.strip())
|
|
425
|
+
elif len(tags.split(" ")) > 1:
|
|
426
|
+
for tag in tags.split(" "):
|
|
427
|
+
tags_list.append(tag.strip())
|
|
428
|
+
elif len(tags) > 0:
|
|
429
|
+
tags_list.append(tags.strip())
|
|
430
|
+
return tags_list
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def seperate_yaml_dict_from_body(content: str) -> Tuple[str, Dict[str, Any]]:
|
|
434
|
+
parameter = {}
|
|
435
|
+
body = ""
|
|
436
|
+
if len(content.split("---\n")) > 1:
|
|
437
|
+
body = content[content.find("---\n", 1) + 4 :]
|
|
438
|
+
yaml_content = content[content.find("---\n") + 4 : content.find("---\n", 1)]
|
|
439
|
+
yaml = YAML(typ="safe")
|
|
440
|
+
parameter = yaml.load(yaml_content)
|
|
441
|
+
if not isinstance(parameter, dict):
|
|
442
|
+
parameter = yaml.load(yaml_content.replace(":", ": ").replace(" ", " "))
|
|
443
|
+
else:
|
|
444
|
+
body = content
|
|
445
|
+
return body, parameter
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def create_yaml_header(
|
|
449
|
+
comment: Dict[str, Any],
|
|
450
|
+
json_metadata: Optional[Dict[str, Any]] = None,
|
|
451
|
+
reply_identifier: Optional[str] = None,
|
|
452
|
+
) -> str:
|
|
453
|
+
"""
|
|
454
|
+
Create a YAML front-matter header string from post/comment data and metadata.
|
|
455
|
+
|
|
456
|
+
Builds a YAML block (string) beginning and ending with '---' that includes selected fields when present:
|
|
457
|
+
- title (quoted)
|
|
458
|
+
- permlink
|
|
459
|
+
- author
|
|
460
|
+
- "authored by" (from json_metadata["author"])
|
|
461
|
+
- description (quoted)
|
|
462
|
+
- canonical_url
|
|
463
|
+
- app
|
|
464
|
+
- last_update (from comment["last_update"] or comment["updated"])
|
|
465
|
+
- max_accepted_payout
|
|
466
|
+
- percent_hbd
|
|
467
|
+
- community (added when json_metadata["tags"] exists and comment["category"] differs from the first tag)
|
|
468
|
+
- tags (comma-separated list)
|
|
469
|
+
- beneficiaries (comma-separated entries formatted as "account:XX.XX%"; weights are converted from parts-per-10000 to percent with two decimals)
|
|
470
|
+
- reply_identifier
|
|
471
|
+
|
|
472
|
+
Parameters:
|
|
473
|
+
comment (dict): Source post/comment data. Expected keys used include
|
|
474
|
+
"title", "permlink", "author", "last_update" or "updated",
|
|
475
|
+
"max_accepted_payout", optional "percent_hbd", optional "category",
|
|
476
|
+
and optional "beneficiaries" (list of {"account": str, "weight": int}).
|
|
477
|
+
json_metadata (dict, optional): Parsed JSON metadata; may contain "author",
|
|
478
|
+
"description", "canonical_url", "app", and "tags" (list of strings).
|
|
479
|
+
reply_identifier (str or None, optional): If provided, added as "reply_identifier".
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
str: The composed YAML front-matter block as a string.
|
|
483
|
+
"""
|
|
484
|
+
if json_metadata is None:
|
|
485
|
+
json_metadata = {}
|
|
486
|
+
yaml_prefix = "---\n"
|
|
487
|
+
if comment["title"] != "":
|
|
488
|
+
yaml_prefix += f'title: "{comment["title"]}"\n'
|
|
489
|
+
if "permlink" in comment:
|
|
490
|
+
yaml_prefix += f"permlink: {comment['permlink']}\n"
|
|
491
|
+
yaml_prefix += f"author: {comment['author']}\n"
|
|
492
|
+
if "author" in json_metadata:
|
|
493
|
+
yaml_prefix += f"authored by: {json_metadata['author']}\n"
|
|
494
|
+
if "description" in json_metadata:
|
|
495
|
+
yaml_prefix += f'description: "{json_metadata["description"]}"\n'
|
|
496
|
+
if "canonical_url" in json_metadata:
|
|
497
|
+
yaml_prefix += f"canonical_url: {json_metadata['canonical_url']}\n"
|
|
498
|
+
if "app" in json_metadata:
|
|
499
|
+
yaml_prefix += f"app: {json_metadata['app']}\n"
|
|
500
|
+
if "last_update" in comment:
|
|
501
|
+
yaml_prefix += f"last_update: {comment['last_update']}\n"
|
|
502
|
+
elif "updated" in comment:
|
|
503
|
+
yaml_prefix += f"last_update: {comment['updated']}\n"
|
|
504
|
+
yaml_prefix += f"max_accepted_payout: {str(comment['max_accepted_payout'])}\n"
|
|
505
|
+
if "percent_hbd" in comment:
|
|
506
|
+
yaml_prefix += f"percent_hbd: {str(comment['percent_hbd'])}\n"
|
|
507
|
+
if "tags" in json_metadata:
|
|
508
|
+
if (
|
|
509
|
+
len(json_metadata["tags"]) > 0
|
|
510
|
+
and comment["category"] != json_metadata["tags"][0]
|
|
511
|
+
and len(comment["category"]) > 0
|
|
512
|
+
):
|
|
513
|
+
yaml_prefix += f"community: {comment['category']}\n"
|
|
514
|
+
yaml_prefix += f"tags: {','.join(json_metadata['tags'])}\n"
|
|
515
|
+
if "beneficiaries" in comment:
|
|
516
|
+
beneficiaries = []
|
|
517
|
+
for b in comment["beneficiaries"]:
|
|
518
|
+
beneficiaries.append(f"{b['account']}:{b['weight'] / 10000 * 100:.2f}%")
|
|
519
|
+
if len(beneficiaries) > 0:
|
|
520
|
+
yaml_prefix += f"beneficiaries: {','.join(beneficiaries)}\n"
|
|
521
|
+
if reply_identifier is not None:
|
|
522
|
+
yaml_prefix += f"reply_identifier: {reply_identifier}\n"
|
|
523
|
+
yaml_prefix += "---\n"
|
|
524
|
+
return yaml_prefix
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
def load_dirty_json(dirty_json: str) -> Dict[str, Any]:
|
|
528
|
+
regex_replace = [
|
|
529
|
+
(r"([ \{,:\[])(u)?'([^']+)'", r'\1"\3"'),
|
|
530
|
+
(r" False([, \}\]])", r" false\1"),
|
|
531
|
+
(r" True([, \}\]])", r" true\1"),
|
|
532
|
+
]
|
|
533
|
+
for r, s in regex_replace:
|
|
534
|
+
dirty_json = re.sub(r, s, dirty_json)
|
|
535
|
+
clean_json = json.loads(dirty_json)
|
|
536
|
+
return clean_json
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
def create_new_password(length: int = 32) -> str:
|
|
540
|
+
"""Creates a random password containing alphanumeric chars with at least 1 number and 1 upper and lower char"""
|
|
541
|
+
alphabet = string.ascii_letters + string.digits
|
|
542
|
+
while True:
|
|
543
|
+
import_password = "".join(secrets.choice(alphabet) for i in range(length))
|
|
544
|
+
if (
|
|
545
|
+
any(c.islower() for c in import_password)
|
|
546
|
+
and any(c.isupper() for c in import_password)
|
|
547
|
+
and any(c.isdigit() for c in import_password)
|
|
548
|
+
):
|
|
549
|
+
break
|
|
550
|
+
return import_password
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def import_coldcard_wif(filename: Union[str, Path]) -> Tuple[str, str]:
|
|
554
|
+
"""Reads a exported coldcard Wif text file and returns the WIF and used path"""
|
|
555
|
+
next_var = ""
|
|
556
|
+
import_password = ""
|
|
557
|
+
path = ""
|
|
558
|
+
with open(filename) as fp:
|
|
559
|
+
for line in fp:
|
|
560
|
+
if line.strip() == "":
|
|
561
|
+
continue
|
|
562
|
+
if line.strip() == "WIF (privkey):":
|
|
563
|
+
next_var = "wif"
|
|
564
|
+
continue
|
|
565
|
+
elif "Path Used" in line.strip():
|
|
566
|
+
next_var = "path"
|
|
567
|
+
continue
|
|
568
|
+
if next_var == "wif":
|
|
569
|
+
import_password = line.strip()
|
|
570
|
+
elif next_var == "path":
|
|
571
|
+
path = line
|
|
572
|
+
next_var = ""
|
|
573
|
+
return import_password, path.lstrip().replace("\n", "")
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
def generate_password(import_password: str, wif: int = 1) -> str:
|
|
577
|
+
if wif > 0:
|
|
578
|
+
password = import_password
|
|
579
|
+
for _ in range(wif):
|
|
580
|
+
pk = PasswordKey("", password, role="")
|
|
581
|
+
password = str(pk.get_private())
|
|
582
|
+
password = "P" + password
|
|
583
|
+
else:
|
|
584
|
+
password = import_password
|
|
585
|
+
return password
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
def import_pubkeys(import_pub: Union[str, Path]) -> Tuple[str, str, str, str]:
|
|
589
|
+
if not Path(import_pub).is_file():
|
|
590
|
+
raise Exception(f"File {import_pub} does not exist!")
|
|
591
|
+
with open(import_pub) as fp:
|
|
592
|
+
pubkeys = fp.read()
|
|
593
|
+
if pubkeys.find("\0") > 0:
|
|
594
|
+
with open(import_pub, encoding="utf-16") as fp:
|
|
595
|
+
pubkeys = fp.read()
|
|
596
|
+
pubkeys = ast.literal_eval(pubkeys)
|
|
597
|
+
owner = pubkeys["owner"]
|
|
598
|
+
active = pubkeys["active"]
|
|
599
|
+
posting = pubkeys["posting"]
|
|
600
|
+
memo = pubkeys["memo"]
|
|
601
|
+
return owner, active, posting, memo
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
def import_custom_json(jsonid: str, json_data: Dict[str, Any]) -> Tuple[List[str], List[str]]:
|
|
605
|
+
"""Returns a list of required authorities for a custom_json operation.
|
|
606
|
+
|
|
607
|
+
Returns the author and required posting authorities for a custom_json operation.
|
|
608
|
+
|
|
609
|
+
Args:
|
|
610
|
+
jsonid: The id of the custom json (not used but kept for compatibility)
|
|
611
|
+
json_data: The data of the custom json
|
|
612
|
+
|
|
613
|
+
Returns:
|
|
614
|
+
tuple with required author and posting authorities
|
|
615
|
+
"""
|
|
616
|
+
try:
|
|
617
|
+
if (
|
|
618
|
+
isinstance(json_data, dict)
|
|
619
|
+
and "required_auths" in json_data
|
|
620
|
+
and "required_posting_auths" in json_data
|
|
621
|
+
):
|
|
622
|
+
required_auths = json_data["required_auths"]
|
|
623
|
+
required_posting_auths = json_data["required_posting_auths"]
|
|
624
|
+
del json_data["required_auths"]
|
|
625
|
+
del json_data["required_posting_auths"]
|
|
626
|
+
return required_auths, required_posting_auths
|
|
627
|
+
else:
|
|
628
|
+
return [], []
|
|
629
|
+
except (KeyError, ValueError, TypeError):
|
|
630
|
+
return [], []
|
nectar/version.py
ADDED