hive-nectar 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hive-nectar might be problematic. Click here for more details.
- hive_nectar-0.0.2.dist-info/METADATA +182 -0
- hive_nectar-0.0.2.dist-info/RECORD +86 -0
- hive_nectar-0.0.2.dist-info/WHEEL +4 -0
- hive_nectar-0.0.2.dist-info/entry_points.txt +2 -0
- hive_nectar-0.0.2.dist-info/licenses/LICENSE.txt +23 -0
- nectar/__init__.py +32 -0
- nectar/account.py +4371 -0
- nectar/amount.py +475 -0
- nectar/asciichart.py +270 -0
- nectar/asset.py +82 -0
- nectar/block.py +446 -0
- nectar/blockchain.py +1178 -0
- nectar/blockchaininstance.py +2284 -0
- nectar/blockchainobject.py +221 -0
- nectar/blurt.py +563 -0
- nectar/cli.py +6285 -0
- nectar/comment.py +1217 -0
- nectar/community.py +513 -0
- nectar/constants.py +111 -0
- nectar/conveyor.py +309 -0
- nectar/discussions.py +1709 -0
- nectar/exceptions.py +149 -0
- nectar/hive.py +546 -0
- nectar/hivesigner.py +420 -0
- nectar/imageuploader.py +72 -0
- nectar/instance.py +129 -0
- nectar/market.py +1013 -0
- nectar/memo.py +449 -0
- nectar/message.py +357 -0
- nectar/nodelist.py +444 -0
- nectar/price.py +557 -0
- nectar/profile.py +65 -0
- nectar/rc.py +308 -0
- nectar/snapshot.py +726 -0
- nectar/steem.py +582 -0
- nectar/storage.py +53 -0
- nectar/transactionbuilder.py +622 -0
- nectar/utils.py +545 -0
- nectar/version.py +2 -0
- nectar/vote.py +557 -0
- nectar/wallet.py +472 -0
- nectar/witness.py +617 -0
- nectarapi/__init__.py +11 -0
- nectarapi/exceptions.py +123 -0
- nectarapi/graphenerpc.py +589 -0
- nectarapi/node.py +178 -0
- nectarapi/noderpc.py +229 -0
- nectarapi/rpcutils.py +97 -0
- nectarapi/version.py +2 -0
- nectarbase/__init__.py +14 -0
- nectarbase/ledgertransactions.py +75 -0
- nectarbase/memo.py +243 -0
- nectarbase/objects.py +429 -0
- nectarbase/objecttypes.py +22 -0
- nectarbase/operationids.py +102 -0
- nectarbase/operations.py +1297 -0
- nectarbase/signedtransactions.py +48 -0
- nectarbase/transactions.py +11 -0
- nectarbase/version.py +2 -0
- nectargrapheneapi/__init__.py +6 -0
- nectargraphenebase/__init__.py +27 -0
- nectargraphenebase/account.py +846 -0
- nectargraphenebase/aes.py +52 -0
- nectargraphenebase/base58.py +192 -0
- nectargraphenebase/bip32.py +494 -0
- nectargraphenebase/bip38.py +134 -0
- nectargraphenebase/chains.py +149 -0
- nectargraphenebase/dictionary.py +3 -0
- nectargraphenebase/ecdsasig.py +326 -0
- nectargraphenebase/objects.py +123 -0
- nectargraphenebase/objecttypes.py +6 -0
- nectargraphenebase/operationids.py +3 -0
- nectargraphenebase/operations.py +23 -0
- nectargraphenebase/prefix.py +11 -0
- nectargraphenebase/py23.py +38 -0
- nectargraphenebase/signedtransactions.py +201 -0
- nectargraphenebase/types.py +419 -0
- nectargraphenebase/unsignedtransactions.py +283 -0
- nectargraphenebase/version.py +2 -0
- nectarstorage/__init__.py +38 -0
- nectarstorage/base.py +306 -0
- nectarstorage/exceptions.py +16 -0
- nectarstorage/interfaces.py +237 -0
- nectarstorage/masterpassword.py +239 -0
- nectarstorage/ram.py +30 -0
- nectarstorage/sqlite.py +334 -0
nectar/utils.py
ADDED
|
@@ -0,0 +1,545 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
import ast
|
|
3
|
+
import json
|
|
4
|
+
import math
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import secrets
|
|
8
|
+
import string
|
|
9
|
+
import time as timenow
|
|
10
|
+
from datetime import date, datetime, time, timedelta, timezone
|
|
11
|
+
|
|
12
|
+
from ruamel.yaml import YAML
|
|
13
|
+
|
|
14
|
+
from nectargraphenebase.account import PasswordKey
|
|
15
|
+
|
|
16
|
+
timeFormat = "%Y-%m-%dT%H:%M:%S"
|
|
17
|
+
# https://github.com/matiasb/python-unidiff/blob/master/unidiff/constants.py#L37
|
|
18
|
+
# @@ (source offset, length) (target offset, length) @@ (section header)
|
|
19
|
+
RE_HUNK_HEADER = re.compile(
|
|
20
|
+
r"^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))?\ @@[ ]?(.*)$", flags=re.MULTILINE
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def formatTime(t):
|
|
25
|
+
"""Properly Format Time for permlinks"""
|
|
26
|
+
if isinstance(t, float):
|
|
27
|
+
return datetime.fromtimestamp(t, tz=timezone.utc).strftime("%Y%m%dt%H%M%S%Z")
|
|
28
|
+
if isinstance(t, (datetime, date, time)):
|
|
29
|
+
return t.strftime("%Y%m%dt%H%M%S%Z")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def addTzInfo(t, timezone_str="UTC"):
|
|
33
|
+
"""Returns a datetime object with tzinfo added
|
|
34
|
+
Uses Python's built-in timezone when possible
|
|
35
|
+
"""
|
|
36
|
+
if t and isinstance(t, (datetime, date, time)) and t.tzinfo is None:
|
|
37
|
+
# Use built-in timezone
|
|
38
|
+
if timezone_str.upper() == "UTC":
|
|
39
|
+
if isinstance(t, datetime):
|
|
40
|
+
t = t.replace(tzinfo=timezone.utc)
|
|
41
|
+
# For date objects that don't have tzinfo directly
|
|
42
|
+
elif isinstance(t, date) and not isinstance(t, datetime):
|
|
43
|
+
t = datetime.combine(t, time.min).replace(tzinfo=timezone.utc)
|
|
44
|
+
elif isinstance(t, time):
|
|
45
|
+
t = datetime.combine(date.today(), t).replace(tzinfo=timezone.utc)
|
|
46
|
+
else:
|
|
47
|
+
# For non-UTC timezones, we can't use pytz anymore
|
|
48
|
+
# This is a simplified approach - in the future, consider using zoneinfo for Python 3.9+
|
|
49
|
+
# For now, default to UTC with a warning
|
|
50
|
+
if isinstance(t, datetime):
|
|
51
|
+
t = t.replace(tzinfo=timezone.utc)
|
|
52
|
+
elif isinstance(t, date) and not isinstance(t, datetime):
|
|
53
|
+
t = datetime.combine(t, time.min).replace(tzinfo=timezone.utc)
|
|
54
|
+
elif isinstance(t, time):
|
|
55
|
+
t = datetime.combine(date.today(), t).replace(tzinfo=timezone.utc)
|
|
56
|
+
print(
|
|
57
|
+
f"Warning: Non-UTC timezone '{timezone_str}' not supported without pytz. Using UTC instead."
|
|
58
|
+
)
|
|
59
|
+
return t
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def formatTimeString(t):
|
|
63
|
+
"""Properly Format Time for permlinks"""
|
|
64
|
+
if isinstance(t, (datetime, date, time)):
|
|
65
|
+
return t.strftime(timeFormat)
|
|
66
|
+
return addTzInfo(datetime.strptime(t, timeFormat))
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def formatToTimeStamp(t):
|
|
70
|
+
"""Returns a timestamp integer
|
|
71
|
+
|
|
72
|
+
:param datetime t: datetime object
|
|
73
|
+
:return: Timestamp as integer
|
|
74
|
+
"""
|
|
75
|
+
if isinstance(t, (datetime, date, time)):
|
|
76
|
+
t = addTzInfo(t)
|
|
77
|
+
else:
|
|
78
|
+
t = formatTimeString(t)
|
|
79
|
+
epoch = addTzInfo(datetime(1970, 1, 1))
|
|
80
|
+
return int((t - epoch).total_seconds())
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def formatTimeFromNow(secs=0):
|
|
84
|
+
"""Properly Format Time that is `x` seconds in the future
|
|
85
|
+
|
|
86
|
+
:param int secs: Seconds to go in the future (`x>0`) or the
|
|
87
|
+
past (`x<0`)
|
|
88
|
+
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
|
|
89
|
+
:rtype: str
|
|
90
|
+
|
|
91
|
+
"""
|
|
92
|
+
return datetime.fromtimestamp(timenow.time() + int(secs), tz=timezone.utc).strftime(timeFormat)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def formatTimedelta(td):
|
|
96
|
+
"""Format timedelta to String"""
|
|
97
|
+
if not isinstance(td, timedelta):
|
|
98
|
+
return ""
|
|
99
|
+
days, seconds = td.days, td.seconds
|
|
100
|
+
hours = days * 24 + seconds // 3600
|
|
101
|
+
minutes = (seconds % 3600) // 60
|
|
102
|
+
seconds = seconds % 60
|
|
103
|
+
return "%d:%s:%s" % (hours, str(minutes).zfill(2), str(seconds).zfill(2))
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def parse_time(block_time):
|
|
107
|
+
"""Take a string representation of time from the blockchain, and parse it
|
|
108
|
+
into datetime object.
|
|
109
|
+
"""
|
|
110
|
+
return datetime.strptime(block_time, timeFormat).replace(tzinfo=timezone.utc)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def assets_from_string(text):
|
|
114
|
+
"""Correctly split a string containing an asset pair.
|
|
115
|
+
|
|
116
|
+
Splits the string into two assets with the separator being on of the
|
|
117
|
+
following: ``:``, ``/``, or ``-``.
|
|
118
|
+
"""
|
|
119
|
+
return re.split(r"[\-:\/]", text)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def sanitize_permlink(permlink):
|
|
123
|
+
permlink = permlink.strip()
|
|
124
|
+
permlink = re.sub(r"_|\s|\.", "-", permlink)
|
|
125
|
+
permlink = re.sub(r"[^\w-]", "", permlink)
|
|
126
|
+
permlink = re.sub(r"[^a-zA-Z0-9-]", "", permlink)
|
|
127
|
+
permlink = permlink.lower()
|
|
128
|
+
return permlink
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def derive_permlink(
|
|
132
|
+
title, parent_permlink=None, parent_author=None, max_permlink_length=256, with_suffix=True
|
|
133
|
+
):
|
|
134
|
+
"""Derive a permlink from a comment title (for root level
|
|
135
|
+
comments) or the parent permlink and optionally the parent
|
|
136
|
+
author (for replies).
|
|
137
|
+
|
|
138
|
+
"""
|
|
139
|
+
suffix = "-" + formatTime(datetime.now(timezone.utc)) + "z"
|
|
140
|
+
if parent_permlink and parent_author:
|
|
141
|
+
prefix = "re-" + sanitize_permlink(parent_author) + "-"
|
|
142
|
+
if with_suffix:
|
|
143
|
+
rem_chars = max_permlink_length - len(suffix) - len(prefix)
|
|
144
|
+
else:
|
|
145
|
+
rem_chars = max_permlink_length - len(prefix)
|
|
146
|
+
body = sanitize_permlink(parent_permlink)[:rem_chars]
|
|
147
|
+
if with_suffix:
|
|
148
|
+
return prefix + body + suffix
|
|
149
|
+
else:
|
|
150
|
+
return prefix + body
|
|
151
|
+
elif parent_permlink:
|
|
152
|
+
prefix = "re-"
|
|
153
|
+
if with_suffix:
|
|
154
|
+
rem_chars = max_permlink_length - len(suffix) - len(prefix)
|
|
155
|
+
else:
|
|
156
|
+
rem_chars = max_permlink_length - len(prefix)
|
|
157
|
+
body = sanitize_permlink(parent_permlink)[:rem_chars]
|
|
158
|
+
if with_suffix:
|
|
159
|
+
return prefix + body + suffix
|
|
160
|
+
else:
|
|
161
|
+
return prefix + body
|
|
162
|
+
else:
|
|
163
|
+
if with_suffix:
|
|
164
|
+
rem_chars = max_permlink_length - len(suffix)
|
|
165
|
+
else:
|
|
166
|
+
rem_chars = max_permlink_length
|
|
167
|
+
body = sanitize_permlink(title)[:rem_chars]
|
|
168
|
+
if len(body) == 0: # empty title or title consisted of only special chars
|
|
169
|
+
return suffix[1:] # use timestamp only, strip leading "-"
|
|
170
|
+
if with_suffix:
|
|
171
|
+
return body + suffix
|
|
172
|
+
else:
|
|
173
|
+
return body
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def resolve_authorperm(identifier):
|
|
177
|
+
"""Correctly split a string containing an authorperm.
|
|
178
|
+
|
|
179
|
+
Splits the string into author and permlink with the
|
|
180
|
+
following separator: ``/``.
|
|
181
|
+
|
|
182
|
+
Examples:
|
|
183
|
+
|
|
184
|
+
.. code-block:: python
|
|
185
|
+
|
|
186
|
+
>>> from nectar.utils import resolve_authorperm
|
|
187
|
+
>>> author, permlink = resolve_authorperm('https://d.tube/#!/v/pottlund/m5cqkd1a')
|
|
188
|
+
>>> author, permlink = resolve_authorperm("https://steemit.com/witness-category/@gtg/24lfrm-gtg-witness-log")
|
|
189
|
+
>>> author, permlink = resolve_authorperm("@gtg/24lfrm-gtg-witness-log")
|
|
190
|
+
>>> author, permlink = resolve_authorperm("https://busy.org/@gtg/24lfrm-gtg-witness-log")
|
|
191
|
+
|
|
192
|
+
"""
|
|
193
|
+
# without any http(s)
|
|
194
|
+
match = re.match(r"@?([\w\-\.]*)/([\w\-]*)", identifier)
|
|
195
|
+
if hasattr(match, "group"):
|
|
196
|
+
return match.group(1), match.group(2)
|
|
197
|
+
# dtube url
|
|
198
|
+
match = re.match(r"([\w\-\.]+[^#?\s]+)/#!/v/?([\w\-\.]*)/([\w\-]*)", identifier)
|
|
199
|
+
if hasattr(match, "group"):
|
|
200
|
+
return match.group(2), match.group(3)
|
|
201
|
+
# url
|
|
202
|
+
match = re.match(r"([\w\-\.]+[^#?\s]+)/@?([\w\-\.]*)/([\w\-]*)", identifier)
|
|
203
|
+
if not hasattr(match, "group"):
|
|
204
|
+
raise ValueError("Invalid identifier")
|
|
205
|
+
return match.group(2), match.group(3)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def construct_authorperm(*args):
|
|
209
|
+
"""Create a post identifier from comment/post object or arguments.
|
|
210
|
+
Examples:
|
|
211
|
+
|
|
212
|
+
.. code-block:: python
|
|
213
|
+
|
|
214
|
+
>>> from nectar.utils import construct_authorperm
|
|
215
|
+
>>> print(construct_authorperm('username', 'permlink'))
|
|
216
|
+
@username/permlink
|
|
217
|
+
>>> print(construct_authorperm({'author': 'username', 'permlink': 'permlink'}))
|
|
218
|
+
@username/permlink
|
|
219
|
+
|
|
220
|
+
"""
|
|
221
|
+
username_prefix = "@"
|
|
222
|
+
if len(args) == 1:
|
|
223
|
+
op = args[0]
|
|
224
|
+
author, permlink = op["author"], op["permlink"]
|
|
225
|
+
elif len(args) == 2:
|
|
226
|
+
author, permlink = args
|
|
227
|
+
else:
|
|
228
|
+
raise ValueError("construct_identifier() received unparsable arguments")
|
|
229
|
+
|
|
230
|
+
fields = dict(prefix=username_prefix, author=author, permlink=permlink)
|
|
231
|
+
return "{prefix}{author}/{permlink}".format(**fields)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def resolve_root_identifier(url):
|
|
235
|
+
m = re.match(r"/([^/]*)/@([^/]*)/([^#]*).*", url)
|
|
236
|
+
if not m:
|
|
237
|
+
return "", ""
|
|
238
|
+
else:
|
|
239
|
+
category = m.group(1)
|
|
240
|
+
author = m.group(2)
|
|
241
|
+
permlink = m.group(3)
|
|
242
|
+
return construct_authorperm(author, permlink), category
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def resolve_authorpermvoter(identifier):
|
|
246
|
+
"""Correctly split a string containing an authorpermvoter.
|
|
247
|
+
|
|
248
|
+
Splits the string into author and permlink with the
|
|
249
|
+
following separator: ``/`` and ``|``.
|
|
250
|
+
"""
|
|
251
|
+
pos = identifier.find("|")
|
|
252
|
+
if pos < 0:
|
|
253
|
+
raise ValueError("Invalid identifier")
|
|
254
|
+
[author, permlink] = resolve_authorperm(identifier[:pos])
|
|
255
|
+
return author, permlink, identifier[pos + 1 :]
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def construct_authorpermvoter(*args):
|
|
259
|
+
"""Create a vote identifier from vote object or arguments.
|
|
260
|
+
Examples:
|
|
261
|
+
|
|
262
|
+
.. code-block:: python
|
|
263
|
+
|
|
264
|
+
>>> from nectar.utils import construct_authorpermvoter
|
|
265
|
+
>>> print(construct_authorpermvoter('username', 'permlink', 'voter'))
|
|
266
|
+
@username/permlink|voter
|
|
267
|
+
>>> print(construct_authorpermvoter({'author': 'username', 'permlink': 'permlink', 'voter': 'voter'}))
|
|
268
|
+
@username/permlink|voter
|
|
269
|
+
|
|
270
|
+
"""
|
|
271
|
+
username_prefix = "@"
|
|
272
|
+
if len(args) == 1:
|
|
273
|
+
op = args[0]
|
|
274
|
+
if "authorperm" in op:
|
|
275
|
+
authorperm, voter = op["authorperm"], op["voter"]
|
|
276
|
+
[author, permlink] = resolve_authorperm(authorperm)
|
|
277
|
+
else:
|
|
278
|
+
author, permlink, voter = op["author"], op["permlink"], op["voter"]
|
|
279
|
+
elif len(args) == 2:
|
|
280
|
+
authorperm, voter = args
|
|
281
|
+
[author, permlink] = resolve_authorperm(authorperm)
|
|
282
|
+
elif len(args) == 3:
|
|
283
|
+
author, permlink, voter = args
|
|
284
|
+
else:
|
|
285
|
+
raise ValueError("construct_identifier() received unparsable arguments")
|
|
286
|
+
|
|
287
|
+
fields = dict(prefix=username_prefix, author=author, permlink=permlink, voter=voter)
|
|
288
|
+
return "{prefix}{author}/{permlink}|{voter}".format(**fields)
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def reputation_to_score(rep):
|
|
292
|
+
"""Converts the account reputation value into the reputation score"""
|
|
293
|
+
if isinstance(rep, str):
|
|
294
|
+
rep = int(rep)
|
|
295
|
+
if rep == 0:
|
|
296
|
+
return 25.0
|
|
297
|
+
score = max([math.log10(abs(rep)) - 9, 0])
|
|
298
|
+
if rep < 0:
|
|
299
|
+
score *= -1
|
|
300
|
+
score = (score * 9.0) + 25.0
|
|
301
|
+
return score
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def remove_from_dict(obj, keys=list(), keep_keys=True):
|
|
305
|
+
"""Prune a class or dictionary of all but keys (keep_keys=True).
|
|
306
|
+
Prune a class or dictionary of specified keys.(keep_keys=False).
|
|
307
|
+
"""
|
|
308
|
+
if not isinstance(obj, dict):
|
|
309
|
+
obj = dict(obj)
|
|
310
|
+
if keep_keys:
|
|
311
|
+
return {k: v for k, v in obj.items() if k in keys}
|
|
312
|
+
else:
|
|
313
|
+
return {k: v for k, v in obj.items() if k not in keys}
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def make_patch(a, b):
|
|
317
|
+
import diff_match_patch as dmp_module
|
|
318
|
+
|
|
319
|
+
dmp = dmp_module.diff_match_patch()
|
|
320
|
+
patch = dmp.patch_make(a, b)
|
|
321
|
+
patch_text = dmp.patch_toText(patch)
|
|
322
|
+
return patch_text
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
def findall_patch_hunks(body=None):
|
|
326
|
+
return RE_HUNK_HEADER.findall(body)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
def derive_beneficiaries(beneficiaries):
|
|
330
|
+
beneficiaries_list = []
|
|
331
|
+
beneficiaries_accounts = []
|
|
332
|
+
beneficiaries_sum = 0
|
|
333
|
+
if not isinstance(beneficiaries, list):
|
|
334
|
+
beneficiaries = beneficiaries.split(",")
|
|
335
|
+
|
|
336
|
+
for w in beneficiaries:
|
|
337
|
+
account_name = w.strip().split(":")[0]
|
|
338
|
+
if account_name[0] == "@":
|
|
339
|
+
account_name = account_name[1:]
|
|
340
|
+
if account_name in beneficiaries_accounts:
|
|
341
|
+
continue
|
|
342
|
+
if w.find(":") == -1:
|
|
343
|
+
percentage = -1
|
|
344
|
+
else:
|
|
345
|
+
percentage = w.strip().split(":")[1]
|
|
346
|
+
if "%" in percentage:
|
|
347
|
+
percentage = percentage.strip().split("%")[0].strip()
|
|
348
|
+
percentage = float(percentage)
|
|
349
|
+
beneficiaries_sum += percentage
|
|
350
|
+
beneficiaries_list.append({"account": account_name, "weight": int(percentage * 100)})
|
|
351
|
+
beneficiaries_accounts.append(account_name)
|
|
352
|
+
|
|
353
|
+
missing = 0
|
|
354
|
+
for bene in beneficiaries_list:
|
|
355
|
+
if bene["weight"] < 0:
|
|
356
|
+
missing += 1
|
|
357
|
+
index = 0
|
|
358
|
+
for bene in beneficiaries_list:
|
|
359
|
+
if bene["weight"] < 0:
|
|
360
|
+
beneficiaries_list[index]["weight"] = int(
|
|
361
|
+
(int(100 * 100) - int(beneficiaries_sum * 100)) / missing
|
|
362
|
+
)
|
|
363
|
+
index += 1
|
|
364
|
+
sorted_beneficiaries = sorted(
|
|
365
|
+
beneficiaries_list, key=lambda beneficiaries_list: beneficiaries_list["account"]
|
|
366
|
+
)
|
|
367
|
+
return sorted_beneficiaries
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def derive_tags(tags):
|
|
371
|
+
tags_list = []
|
|
372
|
+
if len(tags.split(",")) > 1:
|
|
373
|
+
for tag in tags.split(","):
|
|
374
|
+
tags_list.append(tag.strip())
|
|
375
|
+
elif len(tags.split(" ")) > 1:
|
|
376
|
+
for tag in tags.split(" "):
|
|
377
|
+
tags_list.append(tag.strip())
|
|
378
|
+
elif len(tags) > 0:
|
|
379
|
+
tags_list.append(tags.strip())
|
|
380
|
+
return tags_list
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
def seperate_yaml_dict_from_body(content):
|
|
384
|
+
parameter = {}
|
|
385
|
+
body = ""
|
|
386
|
+
if len(content.split("---\n")) > 1:
|
|
387
|
+
body = content[content.find("---\n", 1) + 4 :]
|
|
388
|
+
yaml_content = content[content.find("---\n") + 4 : content.find("---\n", 1)]
|
|
389
|
+
yaml = YAML(typ="safe")
|
|
390
|
+
parameter = yaml.load(yaml_content)
|
|
391
|
+
if not isinstance(parameter, dict):
|
|
392
|
+
parameter = yaml.load(yaml_content.replace(":", ": ").replace(" ", " "))
|
|
393
|
+
else:
|
|
394
|
+
body = content
|
|
395
|
+
return body, parameter
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
def create_yaml_header(comment, json_metadata={}, reply_identifier=None):
|
|
399
|
+
yaml_prefix = "---\n"
|
|
400
|
+
if comment["title"] != "":
|
|
401
|
+
yaml_prefix += 'title: "%s"\n' % comment["title"]
|
|
402
|
+
if "permlink" in comment:
|
|
403
|
+
yaml_prefix += "permlink: %s\n" % comment["permlink"]
|
|
404
|
+
yaml_prefix += "author: %s\n" % comment["author"]
|
|
405
|
+
if "author" in json_metadata:
|
|
406
|
+
yaml_prefix += "authored by: %s\n" % json_metadata["author"]
|
|
407
|
+
if "description" in json_metadata:
|
|
408
|
+
yaml_prefix += 'description: "%s"\n' % json_metadata["description"]
|
|
409
|
+
if "canonical_url" in json_metadata:
|
|
410
|
+
yaml_prefix += "canonical_url: %s\n" % json_metadata["canonical_url"]
|
|
411
|
+
if "app" in json_metadata:
|
|
412
|
+
yaml_prefix += "app: %s\n" % json_metadata["app"]
|
|
413
|
+
if "last_update" in comment:
|
|
414
|
+
yaml_prefix += "last_update: %s\n" % comment["last_update"]
|
|
415
|
+
elif "updated" in comment:
|
|
416
|
+
yaml_prefix += "last_update: %s\n" % comment["updated"]
|
|
417
|
+
yaml_prefix += "max_accepted_payout: %s\n" % str(comment["max_accepted_payout"])
|
|
418
|
+
if "percent_steem_dollars" in comment:
|
|
419
|
+
yaml_prefix += "percent_steem_dollars: %s\n" % str(comment["percent_steem_dollars"])
|
|
420
|
+
elif "percent_hbd" in comment:
|
|
421
|
+
yaml_prefix += "percent_hbd: %s\n" % str(comment["percent_hbd"])
|
|
422
|
+
if "tags" in json_metadata:
|
|
423
|
+
if (
|
|
424
|
+
len(json_metadata["tags"]) > 0
|
|
425
|
+
and comment["category"] != json_metadata["tags"][0]
|
|
426
|
+
and len(comment["category"]) > 0
|
|
427
|
+
):
|
|
428
|
+
yaml_prefix += "community: %s\n" % comment["category"]
|
|
429
|
+
yaml_prefix += "tags: %s\n" % ",".join(json_metadata["tags"])
|
|
430
|
+
if "beneficiaries" in comment:
|
|
431
|
+
beneficiaries = []
|
|
432
|
+
for b in comment["beneficiaries"]:
|
|
433
|
+
beneficiaries.append("%s:%.2f%%" % (b["account"], b["weight"] / 10000 * 100))
|
|
434
|
+
if len(beneficiaries) > 0:
|
|
435
|
+
yaml_prefix += "beneficiaries: %s\n" % ",".join(beneficiaries)
|
|
436
|
+
if reply_identifier is not None:
|
|
437
|
+
yaml_prefix += "reply_identifier: %s\n" % reply_identifier
|
|
438
|
+
yaml_prefix += "---\n"
|
|
439
|
+
return yaml_prefix
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def load_dirty_json(dirty_json):
|
|
443
|
+
regex_replace = [
|
|
444
|
+
(r"([ \{,:\[])(u)?'([^']+)'", r'\1"\3"'),
|
|
445
|
+
(r" False([, \}\]])", r" false\1"),
|
|
446
|
+
(r" True([, \}\]])", r" true\1"),
|
|
447
|
+
]
|
|
448
|
+
for r, s in regex_replace:
|
|
449
|
+
dirty_json = re.sub(r, s, dirty_json)
|
|
450
|
+
clean_json = json.loads(dirty_json)
|
|
451
|
+
return clean_json
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
def create_new_password(length=32):
|
|
455
|
+
"""Creates a random password containing alphanumeric chars with at least 1 number and 1 upper and lower char"""
|
|
456
|
+
alphabet = string.ascii_letters + string.digits
|
|
457
|
+
while True:
|
|
458
|
+
import_password = "".join(secrets.choice(alphabet) for i in range(length))
|
|
459
|
+
if (
|
|
460
|
+
any(c.islower() for c in import_password)
|
|
461
|
+
and any(c.isupper() for c in import_password)
|
|
462
|
+
and any(c.isdigit() for c in import_password)
|
|
463
|
+
):
|
|
464
|
+
break
|
|
465
|
+
return import_password
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def import_coldcard_wif(filename):
|
|
469
|
+
"""Reads a exported coldcard Wif text file and returns the WIF and used path"""
|
|
470
|
+
next_var = ""
|
|
471
|
+
import_password = ""
|
|
472
|
+
path = ""
|
|
473
|
+
with open(filename) as fp:
|
|
474
|
+
for line in fp:
|
|
475
|
+
if line.strip() == "":
|
|
476
|
+
continue
|
|
477
|
+
if line.strip() == "WIF (privkey):":
|
|
478
|
+
next_var = "wif"
|
|
479
|
+
continue
|
|
480
|
+
elif "Path Used" in line.strip():
|
|
481
|
+
next_var = "path"
|
|
482
|
+
continue
|
|
483
|
+
if next_var == "wif":
|
|
484
|
+
import_password = line.strip()
|
|
485
|
+
elif next_var == "path":
|
|
486
|
+
path = line
|
|
487
|
+
next_var = ""
|
|
488
|
+
return import_password, path.lstrip().replace("\n", "")
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def generate_password(import_password, wif=1):
|
|
492
|
+
if wif > 0:
|
|
493
|
+
password = import_password
|
|
494
|
+
for _ in range(wif):
|
|
495
|
+
pk = PasswordKey("", password, role="")
|
|
496
|
+
password = str(pk.get_private())
|
|
497
|
+
password = "P" + password
|
|
498
|
+
else:
|
|
499
|
+
password = import_password
|
|
500
|
+
return password
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
def import_pubkeys(import_pub):
|
|
504
|
+
if not os.path.isfile(import_pub):
|
|
505
|
+
raise Exception("File %s does not exist!" % import_pub)
|
|
506
|
+
with open(import_pub) as fp:
|
|
507
|
+
pubkeys = fp.read()
|
|
508
|
+
if pubkeys.find("\0") > 0:
|
|
509
|
+
with open(import_pub, encoding="utf-16") as fp:
|
|
510
|
+
pubkeys = fp.read()
|
|
511
|
+
pubkeys = ast.literal_eval(pubkeys)
|
|
512
|
+
owner = pubkeys["owner"]
|
|
513
|
+
active = pubkeys["active"]
|
|
514
|
+
posting = pubkeys["posting"]
|
|
515
|
+
memo = pubkeys["memo"]
|
|
516
|
+
return owner, active, posting, memo
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def import_custom_json(jsonid, json_data):
|
|
520
|
+
"""Returns a list of required authorities for a custom_json operation.
|
|
521
|
+
|
|
522
|
+
Returns the author and required posting authorities for a custom_json operation.
|
|
523
|
+
|
|
524
|
+
Args:
|
|
525
|
+
jsonid: The id of the custom json
|
|
526
|
+
json_data: The data of the custom json
|
|
527
|
+
|
|
528
|
+
Returns:
|
|
529
|
+
tuple with required author and posting authorities
|
|
530
|
+
"""
|
|
531
|
+
try:
|
|
532
|
+
if (
|
|
533
|
+
isinstance(json_data, dict)
|
|
534
|
+
and "required_auths" in json_data
|
|
535
|
+
and "required_posting_auths" in json_data
|
|
536
|
+
):
|
|
537
|
+
required_auths = json_data["required_auths"]
|
|
538
|
+
required_posting_auths = json_data["required_posting_auths"]
|
|
539
|
+
del json_data["required_auths"]
|
|
540
|
+
del json_data["required_posting_auths"]
|
|
541
|
+
return required_auths, required_posting_auths
|
|
542
|
+
else:
|
|
543
|
+
return [], []
|
|
544
|
+
except (KeyError, ValueError, TypeError):
|
|
545
|
+
return [], []
|
nectar/version.py
ADDED