deriva 1.7.10__py3-none-any.whl → 1.7.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deriva/core/__init__.py +1 -1
- deriva/core/catalog_cli.py +16 -18
- deriva/core/ermrest_catalog.py +1 -1
- deriva/core/ermrest_model.py +92 -1
- deriva/core/hatrac_cli.py +30 -2
- deriva/core/hatrac_store.py +88 -33
- deriva/core/utils/core_utils.py +312 -35
- deriva/core/utils/credenza_auth_utils.py +645 -0
- deriva/core/utils/globus_auth_utils.py +7 -13
- deriva/transfer/backup/deriva_backup.py +2 -2
- deriva/transfer/backup/deriva_backup_cli.py +5 -0
- deriva/transfer/upload/deriva_upload.py +18 -2
- {deriva-1.7.10.dist-info → deriva-1.7.12.dist-info}/METADATA +4 -3
- {deriva-1.7.10.dist-info → deriva-1.7.12.dist-info}/RECORD +18 -28
- {deriva-1.7.10.dist-info → deriva-1.7.12.dist-info}/WHEEL +1 -1
- {deriva-1.7.10.dist-info → deriva-1.7.12.dist-info}/entry_points.txt +1 -0
- {deriva-1.7.10.dist-info → deriva-1.7.12.dist-info}/top_level.txt +0 -1
- tests/deriva/__init__.py +0 -0
- tests/deriva/core/__init__.py +0 -0
- tests/deriva/core/mmo/__init__.py +0 -0
- tests/deriva/core/mmo/base.py +0 -300
- tests/deriva/core/mmo/test_mmo_drop.py +0 -252
- tests/deriva/core/mmo/test_mmo_find.py +0 -90
- tests/deriva/core/mmo/test_mmo_prune.py +0 -196
- tests/deriva/core/mmo/test_mmo_rename.py +0 -222
- tests/deriva/core/mmo/test_mmo_replace.py +0 -180
- tests/deriva/core/test_datapath.py +0 -893
- tests/deriva/core/test_ermrest_model.py +0 -782
- {deriva-1.7.10.dist-info → deriva-1.7.12.dist-info}/licenses/LICENSE +0 -0
deriva/core/utils/core_utils.py
CHANGED
|
@@ -5,6 +5,7 @@ import shutil
|
|
|
5
5
|
import errno
|
|
6
6
|
import json
|
|
7
7
|
import math
|
|
8
|
+
import datetime
|
|
8
9
|
import platform
|
|
9
10
|
import logging
|
|
10
11
|
import requests
|
|
@@ -12,11 +13,11 @@ import portalocker
|
|
|
12
13
|
from requests.adapters import HTTPAdapter
|
|
13
14
|
from urllib3.util.retry import Retry
|
|
14
15
|
from collections import OrderedDict
|
|
15
|
-
from distutils import util as du_util
|
|
16
16
|
from urllib.parse import quote as _urlquote, unquote as urlunquote
|
|
17
17
|
from urllib.parse import urlparse, urlsplit, urlunsplit, urljoin
|
|
18
18
|
from http.cookiejar import MozillaCookieJar
|
|
19
|
-
|
|
19
|
+
from typing import Any, Union
|
|
20
|
+
from collections.abc import Iterable
|
|
20
21
|
|
|
21
22
|
Kilobyte = 1024
|
|
22
23
|
Megabyte = Kilobyte ** 2
|
|
@@ -49,7 +50,7 @@ DEFAULT_SESSION_CONFIG = {
|
|
|
49
50
|
"cookie_jar": DEFAULT_COOKIE_JAR_FILE,
|
|
50
51
|
"max_request_size": DEFAULT_MAX_REQUEST_SIZE,
|
|
51
52
|
"max_chunk_limit": DEFAULT_MAX_CHUNK_LIMIT,
|
|
52
|
-
"bypass_cert_verify_host_list": []
|
|
53
|
+
"bypass_cert_verify_host_list": ["localhost"]
|
|
53
54
|
}
|
|
54
55
|
OAUTH2_SCOPES_KEY = "oauth2_scopes"
|
|
55
56
|
DEFAULT_CONFIG = {
|
|
@@ -66,8 +67,8 @@ DEFAULT_CONFIG = {
|
|
|
66
67
|
DEFAULT_CREDENTIAL = {}
|
|
67
68
|
DEFAULT_LOGGER_OVERRIDES = {
|
|
68
69
|
"globus_sdk": logging.WARNING,
|
|
69
|
-
"boto3": logging.WARNING,
|
|
70
|
-
"botocore": logging.WARNING,
|
|
70
|
+
# "boto3": logging.WARNING,
|
|
71
|
+
# "botocore": logging.WARNING,
|
|
71
72
|
}
|
|
72
73
|
|
|
73
74
|
|
|
@@ -103,9 +104,20 @@ def urlquote_dcctx(s, safe='~{}",:'):
|
|
|
103
104
|
return urlquote(s, safe=safe)
|
|
104
105
|
|
|
105
106
|
|
|
106
|
-
def stob(
|
|
107
|
-
|
|
107
|
+
def stob(val):
|
|
108
|
+
"""Convert a string representation of truth to True or False. Lifted and slightly modified from distutils.
|
|
108
109
|
|
|
110
|
+
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
|
|
111
|
+
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
|
|
112
|
+
'val' is anything else.
|
|
113
|
+
"""
|
|
114
|
+
val = str(val).lower()
|
|
115
|
+
if val in ('y', 'yes', 't', 'true', 'on', '1'):
|
|
116
|
+
return True
|
|
117
|
+
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
|
|
118
|
+
return False
|
|
119
|
+
else:
|
|
120
|
+
raise ValueError("invalid truth value %r" % (val,))
|
|
109
121
|
|
|
110
122
|
def format_exception(e):
|
|
111
123
|
if not isinstance(e, Exception):
|
|
@@ -230,7 +242,7 @@ def write_config(config_file=DEFAULT_CONFIG_FILE, config=DEFAULT_CONFIG):
|
|
|
230
242
|
def read_config(config_file=DEFAULT_CONFIG_FILE, create_default=False, default=DEFAULT_CONFIG):
|
|
231
243
|
if not config_file:
|
|
232
244
|
config_file = DEFAULT_CONFIG_FILE
|
|
233
|
-
|
|
245
|
+
|
|
234
246
|
if not os.path.isfile(config_file) and create_default:
|
|
235
247
|
logging.info("No default configuration file found, attempting to create one at: %s" % config_file)
|
|
236
248
|
try:
|
|
@@ -238,13 +250,14 @@ def read_config(config_file=DEFAULT_CONFIG_FILE, create_default=False, default=D
|
|
|
238
250
|
except Exception as e:
|
|
239
251
|
logging.warning("Unable to create configuration file %s. Using internal defaults. %s" %
|
|
240
252
|
(config_file, format_exception(e)))
|
|
241
|
-
config = json.dumps(default, ensure_ascii=False)
|
|
242
253
|
|
|
243
|
-
if
|
|
254
|
+
if os.path.isfile(config_file):
|
|
244
255
|
with open(config_file, encoding='utf-8') as cf:
|
|
245
256
|
config = cf.read()
|
|
246
|
-
|
|
247
|
-
|
|
257
|
+
return json.loads(config, object_pairs_hook=OrderedDict)
|
|
258
|
+
else:
|
|
259
|
+
logging.debug("Unable to locate configuration file %s. Using internal defaults." % config_file)
|
|
260
|
+
return default
|
|
248
261
|
|
|
249
262
|
|
|
250
263
|
def lock_file(file_path, mode, exclusive=True, timeout=60):
|
|
@@ -267,21 +280,22 @@ def write_credential(credential_file=DEFAULT_CREDENTIAL_FILE, credential=DEFAULT
|
|
|
267
280
|
def read_credential(credential_file=DEFAULT_CREDENTIAL_FILE, create_default=False, default=DEFAULT_CREDENTIAL):
|
|
268
281
|
if not credential_file:
|
|
269
282
|
credential_file = DEFAULT_CREDENTIAL_FILE
|
|
270
|
-
|
|
283
|
+
|
|
271
284
|
if not os.path.isfile(credential_file) and create_default:
|
|
272
285
|
logging.info("No default credential file found, attempting to create one at: %s" % credential_file)
|
|
273
286
|
try:
|
|
274
287
|
write_credential(credential_file, default)
|
|
275
288
|
except Exception as e:
|
|
276
|
-
logging.warning("Unable to create credential file %s. Using internal defaults. %s" %
|
|
289
|
+
logging.warning("Unable to create credential file %s. Using internal (empty) defaults. %s" %
|
|
277
290
|
(credential_file, format_exception(e)))
|
|
278
|
-
credential = json.dumps(default, ensure_ascii=False)
|
|
279
291
|
|
|
280
|
-
if
|
|
292
|
+
if os.path.isfile(credential_file):
|
|
281
293
|
with lock_file(credential_file, mode='r', exclusive=False) as cf:
|
|
282
294
|
credential = cf.read()
|
|
283
|
-
|
|
284
|
-
|
|
295
|
+
return json.loads(credential, object_pairs_hook=OrderedDict)
|
|
296
|
+
else:
|
|
297
|
+
logging.debug("Unable to locate credential file %s. Using internal (empty) defaults." % credential_file)
|
|
298
|
+
return default
|
|
285
299
|
|
|
286
300
|
|
|
287
301
|
def get_oauth_scopes_for_host(host,
|
|
@@ -430,34 +444,297 @@ def json_item_handler(input_file, callback):
|
|
|
430
444
|
finally:
|
|
431
445
|
infile.close()
|
|
432
446
|
|
|
447
|
+
def topo_ranked(depmap: dict[Any,Union[set,Iterable]]) -> list[set]:
|
|
448
|
+
"""Return list-of-sets representing values in ranked tiers as a topological partial order.
|
|
433
449
|
|
|
434
|
-
|
|
435
|
-
"""Return list of items topologically sorted.
|
|
450
|
+
:param depmap: Dictionary mapping of values to required values.
|
|
436
451
|
|
|
437
|
-
|
|
452
|
+
The entire set of values to rank must be represented as keys in
|
|
453
|
+
depmap, and therefore must be hashable. For each depmap key, the
|
|
454
|
+
corresponding value should be a set of required values, or an
|
|
455
|
+
iterable of required values suitable to pass to set(). An empty
|
|
456
|
+
set or iterable represents a lack of requirements to satisfy for
|
|
457
|
+
a given key value.
|
|
438
458
|
|
|
439
|
-
|
|
459
|
+
The result list provides a partial order satisfying the
|
|
460
|
+
requirements from the dependency map. Each entry in the list is a
|
|
461
|
+
set representing a tier of values with equal rank. Values in a
|
|
462
|
+
given tier do not require any value from a tier at a higher index.
|
|
440
463
|
|
|
441
|
-
|
|
442
|
-
multiple iterations.
|
|
464
|
+
Raises ValueError if a requirement cannot be satisfied in any order.
|
|
443
465
|
|
|
444
466
|
"""
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
467
|
+
def opportunistic_set(s):
|
|
468
|
+
if isinstance(s, set):
|
|
469
|
+
return s
|
|
470
|
+
elif isinstance(s, Iterable):
|
|
471
|
+
return set(s)
|
|
472
|
+
else:
|
|
473
|
+
raise TypeError(f"bad depmap operand to topo_ranked(), got {type(s)} instead of expected set or iterable")
|
|
474
|
+
|
|
475
|
+
if not isinstance(depmap, dict):
|
|
476
|
+
raise TypeError(f"bad depmap operand to topo_ranked(), got {type(depmap)} instead of expected dict")
|
|
477
|
+
|
|
478
|
+
# make a mutable copy that supports our incremental algorithm
|
|
479
|
+
depmap = {
|
|
480
|
+
k: opportunistic_set(v)
|
|
481
|
+
for k, v in depmap.items()
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
ranked = []
|
|
485
|
+
satisfied = set()
|
|
486
|
+
|
|
448
487
|
while depmap:
|
|
449
|
-
|
|
488
|
+
tier = set()
|
|
489
|
+
ranked.append(tier)
|
|
490
|
+
|
|
450
491
|
for item, requires in list(depmap.items()):
|
|
451
492
|
if requires.issubset(satisfied):
|
|
452
|
-
|
|
453
|
-
satisfied.add(item)
|
|
493
|
+
tier.add(item)
|
|
454
494
|
del depmap[item]
|
|
455
|
-
if not additions:
|
|
456
|
-
raise ValueError(("unsatisfiable", depmap))
|
|
457
|
-
ordered.extend(additions)
|
|
458
|
-
additions = []
|
|
459
|
-
return ordered
|
|
460
495
|
|
|
496
|
+
# sanity-check for cyclic or unreachable requirements
|
|
497
|
+
if not tier:
|
|
498
|
+
raise ValueError(f"bad operand depmap to topo_ranked(), unsatisfiable={depmap}")
|
|
499
|
+
|
|
500
|
+
satisfied.update(tier)
|
|
501
|
+
|
|
502
|
+
return ranked
|
|
503
|
+
|
|
504
|
+
def topo_sorted(depmap: dict[Any,Union[set,Iterable]]) -> list:
|
|
505
|
+
"""Return list of items topologically sorted.
|
|
506
|
+
|
|
507
|
+
:param depmap: Dictionary mapping of values to required values.
|
|
508
|
+
|
|
509
|
+
This is a simple wrapper to flatten the partially ordered output
|
|
510
|
+
of topo_ranked(depmap) into an arbitrary total order.
|
|
511
|
+
|
|
512
|
+
The entire set of values to sort must be represented as keys in
|
|
513
|
+
depmap, and therefore must be hashable. For each depmap key, the
|
|
514
|
+
corresponding value should be a set of required values, or an
|
|
515
|
+
iterable of required values suitable to pass to set(). An empty
|
|
516
|
+
set or iterable represents a lack of requirements to satisfy for
|
|
517
|
+
a given key value.
|
|
518
|
+
|
|
519
|
+
The result list provides a total order satisfying the requirements
|
|
520
|
+
from the dependency map. Values at lower indices do not require
|
|
521
|
+
values at higher indices.
|
|
522
|
+
|
|
523
|
+
Raises ValueError if a requirement cannot be satisfied in any order.
|
|
524
|
+
|
|
525
|
+
"""
|
|
526
|
+
return [ v for tier in topo_ranked(depmap) for v in tier ]
|
|
527
|
+
|
|
528
|
+
_crockford_base32_codex = '0123456789ABCDEFGHJKMNPQRSTVWXYZ'
|
|
529
|
+
|
|
530
|
+
def crockford_b32encode(v: int, grplen: int=4) -> str:
|
|
531
|
+
"""Encode a non-negative integer using the Crockford Base-32 representation.
|
|
532
|
+
|
|
533
|
+
:param v: Non-negative integer value to encode.
|
|
534
|
+
:param grplen: Non-negative number of output symbols in each group.
|
|
535
|
+
|
|
536
|
+
The input integer value is interpreted as an arbitrary-length bit
|
|
537
|
+
stream of length v.bit_length(). The input integer is
|
|
538
|
+
zero-extended to make the length a multiple of 5, i.e. effectively
|
|
539
|
+
prefix-padded with zero bits.
|
|
540
|
+
|
|
541
|
+
The result is a string uses the Crockford Base-32 representation
|
|
542
|
+
without any checksum suffix.
|
|
543
|
+
|
|
544
|
+
Output symbols are separated by a hyphen in groups of grplen
|
|
545
|
+
symbols. Specify grplen=0 to suppress hyphenation.
|
|
546
|
+
|
|
547
|
+
This function is the inverse of crockford_b32decode().
|
|
548
|
+
|
|
549
|
+
Those wishing to encode negative integers must use their own
|
|
550
|
+
convention to somehow multiplex sign information into the bit
|
|
551
|
+
stream represented by the non-negative integer.
|
|
552
|
+
|
|
553
|
+
"""
|
|
554
|
+
sep = '-'
|
|
555
|
+
|
|
556
|
+
if not isinstance(v, int):
|
|
557
|
+
raise TypeError(f"bad operand for crockford_b32encode(): {v=}")
|
|
558
|
+
|
|
559
|
+
if not isinstance(grplen, int):
|
|
560
|
+
raise TypeError(f"bad operand for crockford_b32encode(): {grplen=}")
|
|
561
|
+
|
|
562
|
+
if v < 0:
|
|
563
|
+
raise ValueError(f"bad operand for crockford_b32encode(): {v=}")
|
|
564
|
+
|
|
565
|
+
if grplen < 0:
|
|
566
|
+
raise ValueError(f"bad operand for crockford_b32encode(): {grplen=}")
|
|
567
|
+
|
|
568
|
+
encoded_rev = []
|
|
569
|
+
d = 0
|
|
570
|
+
while v > 0:
|
|
571
|
+
# encode 5-bit chunk
|
|
572
|
+
code = _crockford_base32_codex[v % 32]
|
|
573
|
+
v = v // 32
|
|
574
|
+
# add (optional) group separator
|
|
575
|
+
if grplen > 0 and d > 0 and d % grplen == 0:
|
|
576
|
+
encoded_rev.append(sep)
|
|
577
|
+
d += 1
|
|
578
|
+
encoded_rev.append(code)
|
|
579
|
+
|
|
580
|
+
# trim "leading" zeroes and separators
|
|
581
|
+
while encoded_rev and encoded_rev[-1] in {'0', sep}:
|
|
582
|
+
del encoded_rev[-1]
|
|
583
|
+
|
|
584
|
+
# but restore zero for base case
|
|
585
|
+
if not encoded_rev:
|
|
586
|
+
encoded_rev.append('0')
|
|
587
|
+
|
|
588
|
+
return ''.join(reversed(encoded_rev))
|
|
589
|
+
|
|
590
|
+
def crockford_b32decode(s: str) -> int:
|
|
591
|
+
"""Decode Crockford base-32 string representation to non-negative integer.
|
|
592
|
+
|
|
593
|
+
:param s: String to decode.
|
|
594
|
+
|
|
595
|
+
The input string is decoded as a sequence of Crockford Base-32
|
|
596
|
+
symbols, each encoding 5 bits, such that the first symbol
|
|
597
|
+
represents the most-signficant bits.
|
|
598
|
+
|
|
599
|
+
The result is the non-negative integer corresponding to the
|
|
600
|
+
decoded bit stream.
|
|
601
|
+
|
|
602
|
+
The Crockford decode process is case-insensitive and recognizes
|
|
603
|
+
several synonyms for likely typographical errors. Namely,
|
|
604
|
+
'O'->'0', 'I'->'1', and 'L'->'1'.
|
|
605
|
+
|
|
606
|
+
Optional hyphens may be present in the input string to break it
|
|
607
|
+
into symbol groups. These bear no information and are simply
|
|
608
|
+
ignored.
|
|
609
|
+
|
|
610
|
+
The optional checksum suffix from Crockford's proposal is
|
|
611
|
+
not supported.
|
|
612
|
+
|
|
613
|
+
"""
|
|
614
|
+
sep = '-'
|
|
615
|
+
inverted_codex = {
|
|
616
|
+
_crockford_base32_codex[i]: i
|
|
617
|
+
for i in range(32)
|
|
618
|
+
}
|
|
619
|
+
# add normalization alternatives
|
|
620
|
+
inverted_codex.update({'O':0, 'I':1, 'L':1})
|
|
621
|
+
|
|
622
|
+
if not isinstance(s, str):
|
|
623
|
+
raise TypeError(f"bad operand for crockford_b32decode() {s=}")
|
|
624
|
+
|
|
625
|
+
# make decoding case-insensitive
|
|
626
|
+
s = s.upper()
|
|
627
|
+
# remove separators
|
|
628
|
+
s = s.replace(sep, '')
|
|
629
|
+
|
|
630
|
+
res = 0
|
|
631
|
+
for d in range(len(s)):
|
|
632
|
+
try:
|
|
633
|
+
symbol = s[d]
|
|
634
|
+
coded = inverted_codex[symbol]
|
|
635
|
+
except KeyError as e:
|
|
636
|
+
raise ValueError(f"bad operand for crockford_b32decode(): unsupported {symbol=} in {s=}")
|
|
637
|
+
|
|
638
|
+
res = (res << 5) + coded
|
|
639
|
+
|
|
640
|
+
return res
|
|
641
|
+
|
|
642
|
+
def int_to_uintX(i: int, nbits: int) -> int:
|
|
643
|
+
"""Cast integer to an unsigned integer of desired width.
|
|
644
|
+
|
|
645
|
+
:param i: Signed integer to encode.
|
|
646
|
+
:param nbits: Number output bits.
|
|
647
|
+
|
|
648
|
+
For negative inputs, the requested nbits must be equal or greater
|
|
649
|
+
than i.bit_length(). For non-negative inputs, the requested nbits
|
|
650
|
+
must be greater than i.bit_length(). The output bits are to be
|
|
651
|
+
interpreted as 2's complement, so the most-significant bit is set
|
|
652
|
+
to represent negative inputs and kept clear to represent
|
|
653
|
+
non-negative inputs.
|
|
654
|
+
|
|
655
|
+
This function is the inverse of uintX_to_int() when both are called
|
|
656
|
+
using the same nbits operand.
|
|
657
|
+
|
|
658
|
+
"""
|
|
659
|
+
if not isinstance(i, int):
|
|
660
|
+
raise TypeError(f"bad operand to int_to_uintX() {i=}")
|
|
661
|
+
|
|
662
|
+
if not isinstance(nbits, int):
|
|
663
|
+
raise TypeError(f"bad operand to int_to_uintX() {nbits=}")
|
|
664
|
+
|
|
665
|
+
if nbits < 1:
|
|
666
|
+
raise ValueError(f"bad operand to int_to_uintX() {nbits=}")
|
|
667
|
+
|
|
668
|
+
if i >= 0:
|
|
669
|
+
if i.bit_length() >= nbits:
|
|
670
|
+
raise ValueError(f"bad operand to int_to_uintX() {i=} {nbits=}")
|
|
671
|
+
return i
|
|
672
|
+
else:
|
|
673
|
+
if i.bit_length() > nbits:
|
|
674
|
+
raise ValueError(f"bad operand to int_to_uintX() {i=} {nbits=}")
|
|
675
|
+
hibit_mask = (1 << (nbits-1))
|
|
676
|
+
return i + hibit_mask + hibit_mask
|
|
677
|
+
|
|
678
|
+
def uintX_to_int(b: int, nbits: int) -> int:
|
|
679
|
+
"""Cast unsigned integer of known width into signed integer.
|
|
680
|
+
|
|
681
|
+
:param b: The non-negative integer holding bits to convert.
|
|
682
|
+
:param nbits: The number of input bits.
|
|
683
|
+
|
|
684
|
+
The specified input nbits must be equal or greater than
|
|
685
|
+
i.bit_length(). The input bits are interpreted as 2's complement,
|
|
686
|
+
so values with the most-significant bit set are recast as negative
|
|
687
|
+
numbers while inputs with the highest bit unset remain unchanged.
|
|
688
|
+
|
|
689
|
+
This function is the inverse of int_to_uintX() when both are called
|
|
690
|
+
using the same nbits operand.
|
|
691
|
+
|
|
692
|
+
"""
|
|
693
|
+
if not isinstance(b, int):
|
|
694
|
+
raise TypeError(f"bad operand to uintX_to_int() {b=}")
|
|
695
|
+
|
|
696
|
+
if not isinstance(nbits, int):
|
|
697
|
+
raise TypeError(f"bad operand to uintX_to_int() {nbits=}")
|
|
698
|
+
|
|
699
|
+
if b < 0:
|
|
700
|
+
raise ValueError(f"bad operand to uintX_to_int() {b=}")
|
|
701
|
+
|
|
702
|
+
if nbits < 1:
|
|
703
|
+
raise ValueError(f"bad operand to uintX_to_int() {nbits=}")
|
|
704
|
+
|
|
705
|
+
if b.bit_length() > nbits:
|
|
706
|
+
raise ValueError(f"bad operand to uintX_to_int() {b=} {nbits=}")
|
|
707
|
+
|
|
708
|
+
hibit_mask = 1 << (nbits-1)
|
|
709
|
+
|
|
710
|
+
if b & hibit_mask:
|
|
711
|
+
return b - hibit_mask - hibit_mask
|
|
712
|
+
else:
|
|
713
|
+
return b
|
|
714
|
+
|
|
715
|
+
def datetime_to_epoch_microseconds(dt: datetime.datetime) -> int:
|
|
716
|
+
"""Convert a datatime to integer microseconds-since-epoch.
|
|
717
|
+
|
|
718
|
+
:param dt: A timezone-aware datetime.datetime instance.
|
|
719
|
+
"""
|
|
720
|
+
# maintain exact microsecond precision in integer result
|
|
721
|
+
delta = dt - datetime.datetime(
|
|
722
|
+
1970, 1, 1, tzinfo=datetime.timezone.utc
|
|
723
|
+
)
|
|
724
|
+
whole_seconds = delta.days * 86400 + delta.seconds
|
|
725
|
+
return whole_seconds * 1000000 + delta.microseconds
|
|
726
|
+
|
|
727
|
+
def epoch_microseconds_to_datetime(us: int) -> datetime.datetime:
|
|
728
|
+
"""Convert integer microseconds-since-epoch to timezone-aware datetime.
|
|
729
|
+
|
|
730
|
+
:param us: Integer microseconds-since-epoch.
|
|
731
|
+
"""
|
|
732
|
+
return datetime.datetime(
|
|
733
|
+
1970, 1, 1, tzinfo=datetime.timezone.utc
|
|
734
|
+
) + datetime.timedelta(
|
|
735
|
+
seconds=us//1000000,
|
|
736
|
+
microseconds=us%1000000,
|
|
737
|
+
)
|
|
461
738
|
|
|
462
739
|
class AttrDict (dict):
|
|
463
740
|
"""Dictionary with optional attribute-based lookup.
|