lamindb_setup 0.80.0__py3-none-any.whl → 0.81.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lamindb_setup/__init__.py CHANGED
@@ -33,7 +33,7 @@ Modules & settings:
33
33
 
34
34
  """
35
35
 
36
- __version__ = "0.80.0" # denote a release candidate for 0.1.0 with 0.1rc1
36
+ __version__ = "0.81.0" # denote a release candidate for 0.1.0 with 0.1rc1
37
37
 
38
38
  import os as _os
39
39
  import sys as _sys
@@ -415,9 +415,10 @@ def infer_instance_name(
415
415
  if storage == "create-s3":
416
416
  raise ValueError("pass name to init if storage = 'create-s3'")
417
417
  storage_path = UPath(storage)
418
+ # not sure if name is ever ""
418
419
  if storage_path.name != "":
419
420
  name = storage_path.name
420
421
  else:
421
422
  # dedicated treatment of bucket names
422
- name = storage_path._url.netloc
423
+ name = storage_path.drive
423
424
  return name.lower()
@@ -15,9 +15,7 @@ HOSTED_REGIONS = [
15
15
  ]
16
16
  lamin_env = os.getenv("LAMIN_ENV")
17
17
  if lamin_env is None or lamin_env == "prod":
18
- hosted_buckets_list = [f"s3://lamin-{region}" for region in HOSTED_REGIONS]
19
- hosted_buckets_list.append("s3://scverse-spatial-eu-central-1")
20
- HOSTED_BUCKETS = tuple(hosted_buckets_list)
18
+ HOSTED_BUCKETS = tuple([f"s3://lamin-{region}" for region in HOSTED_REGIONS])
21
19
  else:
22
20
  HOSTED_BUCKETS = ("s3://lamin-hosted-test",) # type: ignore
23
21
 
@@ -29,6 +27,11 @@ def _keep_trailing_slash(path_str: str):
29
27
  AWS_CREDENTIALS_EXPIRATION = 11 * 60 * 60 # refresh credentials after 11 hours
30
28
 
31
29
 
30
+ # set anon=True for these buckets if credentials fail for a public bucket
31
+ # to be expanded
32
+ PUBLIC_BUCKETS = ("cellxgene-data-public",)
33
+
34
+
32
35
  class AWSCredentialsManager:
33
36
  def __init__(self):
34
37
  self._credentials_cache = {}
@@ -38,7 +41,15 @@ class AWSCredentialsManager:
38
41
  # this is cached so will be resued with the connection initialized
39
42
  fs = S3FileSystem(cache_regions=True)
40
43
  fs.connect()
41
- self.anon = fs.session._credentials is None
44
+ self.anon: bool = fs.session._credentials is None
45
+ self.anon_public: bool | None = None
46
+ if not self.anon:
47
+ try:
48
+ # use lamindata public bucket for this test
49
+ fs.call_s3("head_bucket", Bucket="lamindata")
50
+ self.anon_public = False
51
+ except Exception as e:
52
+ self.anon_public = isinstance(e, PermissionError)
42
53
 
43
54
  def _find_root(self, path_str: str) -> str | None:
44
55
  roots = self._credentials_cache.keys()
@@ -73,6 +84,8 @@ class AWSCredentialsManager:
73
84
  anon = False
74
85
  else:
75
86
  anon = self.anon
87
+ if not anon and self.anon_public and path.drive in PUBLIC_BUCKETS:
88
+ anon = True
76
89
  connection_options = {"anon": anon}
77
90
  else:
78
91
  connection_options = credentials
@@ -132,7 +145,7 @@ class AWSCredentialsManager:
132
145
  root = "/".join(path.path.rstrip("/").split("/")[:2])
133
146
  else:
134
147
  # write the bucket for everything else
135
- root = path._url.netloc
148
+ root = path.drive
136
149
  root = "s3://" + root
137
150
  self._set_cached_credentials(_keep_trailing_slash(root), credentials)
138
151
 
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import os
4
+ import sys
4
5
  from typing import TYPE_CHECKING
5
6
 
6
7
  from appdirs import AppDirs
@@ -172,6 +173,9 @@ class SetupSettings:
172
173
 
173
174
  def __repr__(self) -> str:
174
175
  """Rich string representation."""
176
+ # do not show current setting representation when building docs
177
+ if "sphinx" in sys.modules:
178
+ return object.__repr__(self)
175
179
  repr = self.user.__repr__()
176
180
  repr += f"\nAuto-connect in Python: {self.auto_connect}\n"
177
181
  repr += f"Private Django API: {self.private_django_api}\n"
@@ -114,13 +114,15 @@ def init_storage(
114
114
  root_str = f"s3://lamin-{region}/{uid}"
115
115
  else:
116
116
  root_str = f"s3://lamin-hosted-test/{uid}"
117
- elif root_str.startswith(("gs://", "s3://")):
117
+ elif root_str.startswith(("gs://", "s3://", "hf://")):
118
118
  pass
119
119
  else: # local path
120
120
  try:
121
121
  _ = Path(root_str)
122
122
  except Exception as e:
123
- logger.error("`storage` is not a valid local, GCP storage or AWS S3 path")
123
+ logger.error(
124
+ "`storage` is not a valid local, GCP storage, AWS S3 path or Hugging Face path"
125
+ )
124
126
  raise e
125
127
  ssettings = StorageSettings(
126
128
  uid=uid,
@@ -161,6 +163,7 @@ def init_storage(
161
163
  # only newly created
162
164
  if hub_record_status == "hub-record-created" and ssettings._uuid is not None:
163
165
  delete_storage_record(ssettings._uuid, access_token=access_token) # type: ignore
166
+ hub_record_status = "hub-record-not-created"
164
167
  ssettings._instance_id = None
165
168
  return ssettings, hub_record_status
166
169
 
@@ -47,14 +47,14 @@ def hash_set(s: set[str]) -> str:
47
47
  return to_b64_str(hashlib.md5(bstr).digest())[:HASH_LENGTH]
48
48
 
49
49
 
50
- def hash_md5s_from_dir(hashes: Iterable[str]) -> tuple[str, str]:
50
+ def hash_from_hashes_list(hashes: Iterable[str]) -> str:
51
51
  # need to sort below because we don't want the order of parsing the dir to
52
52
  # affect the hash
53
53
  digests = b"".join(
54
54
  hashlib.md5(hash.encode("utf-8")).digest() for hash in sorted(hashes)
55
55
  )
56
56
  digest = hashlib.md5(digests).digest()
57
- return to_b64_str(digest)[:HASH_LENGTH], "md5-d"
57
+ return to_b64_str(digest)[:HASH_LENGTH]
58
58
 
59
59
 
60
60
  def hash_code(file_path: UPathStr):
@@ -110,7 +110,7 @@ def hash_dir(path: Path):
110
110
  hashes_sizes = map(hash_size, files)
111
111
  hashes, sizes = zip(*hashes_sizes)
112
112
 
113
- hash, hash_type = hash_md5s_from_dir(hashes)
113
+ hash, hash_type = hash_from_hashes_list(hashes), "md5-d"
114
114
  n_objects = len(hashes)
115
115
  size = sum(sizes)
116
116
  return size, hash, hash_type, n_objects
@@ -8,22 +8,22 @@ from collections import defaultdict
8
8
  from datetime import datetime, timezone
9
9
  from functools import partial
10
10
  from itertools import islice
11
- from pathlib import Path, PurePosixPath
11
+ from pathlib import Path, PosixPath, PurePosixPath, WindowsPath
12
12
  from typing import TYPE_CHECKING, Any, Literal
13
13
 
14
14
  import fsspec
15
15
  from lamin_utils import logger
16
16
  from upath import UPath
17
17
  from upath.implementations.cloud import CloudPath, S3Path # keep CloudPath!
18
- from upath.implementations.local import LocalPath, PosixUPath, WindowsUPath
18
+ from upath.implementations.local import LocalPath
19
19
 
20
20
  from ._aws_credentials import HOSTED_BUCKETS, get_aws_credentials_manager
21
- from .hashing import HASH_LENGTH, b16_to_b64, hash_md5s_from_dir
21
+ from .hashing import HASH_LENGTH, b16_to_b64, hash_from_hashes_list
22
22
 
23
23
  if TYPE_CHECKING:
24
24
  from .types import UPathStr
25
25
 
26
- LocalPathClasses = (PosixUPath, WindowsUPath, LocalPath)
26
+ LocalPathClasses = (PosixPath, WindowsPath, LocalPath)
27
27
 
28
28
  # also see https://gist.github.com/securifera/e7eed730cbe1ce43d0c29d7cd2d582f4
29
29
  # ".gz" is not listed here as it typically occurs with another suffix
@@ -291,21 +291,25 @@ def upload_from(
291
291
  callback = ProgressCallback(local_path.name, "uploading")
292
292
  kwargs["callback"] = callback
293
293
 
294
+ source: str | list[str]
295
+ destination: str | list[str]
294
296
  if local_path_is_dir and not create_folder:
295
- source = [f for f in local_path.rglob("*") if f.is_file()]
296
- destination = [str(self / f.relative_to(local_path)) for f in source]
297
- source = [str(f) for f in source] # type: ignore
297
+ source = [f.as_posix() for f in local_path.rglob("*") if f.is_file()]
298
+ destination = fsspec.utils.other_paths(
299
+ source, self.as_posix(), exists=False, flatten=False
300
+ )
298
301
  else:
299
- source = str(local_path) # type: ignore
300
- destination = str(self) # type: ignore
302
+ source = local_path.as_posix()
303
+ destination = self.as_posix()
301
304
 
302
305
  # the below lines are to avoid s3fs triggering create_bucket in upload if
303
306
  # dirs are present it allows to avoid permission error
304
307
  # would be easier to just
305
308
  if self.protocol == "s3" and local_path_is_dir and create_folder:
306
- bucket = self._url.netloc
309
+ bucket = self.drive
307
310
  if bucket not in self.fs.dircache:
308
311
  self.fs.dircache[bucket] = [{}]
312
+ assert isinstance(destination, str)
309
313
  if not destination.endswith(TRAILING_SEP): # type: ignore
310
314
  destination += "/"
311
315
  cleanup_cache = True
@@ -336,22 +340,34 @@ def synchronize(
336
340
  timestamp: float | None = None,
337
341
  ):
338
342
  """Sync to a local destination path."""
343
+ protocol = self.protocol
339
344
  # optimize the number of network requests
340
345
  if timestamp is not None:
341
346
  is_dir = False
342
347
  exists = True
343
348
  cloud_mts = timestamp
344
349
  else:
345
- # perform only one network request to check existence, type and timestamp
346
- try:
347
- cloud_mts = self.modified.timestamp()
348
- is_dir = False
349
- exists = True
350
- except FileNotFoundError:
351
- exists = False
352
- except IsADirectoryError:
353
- is_dir = True
354
- exists = True
350
+ # hf requires special treatment
351
+ if protocol == "hf":
352
+ try:
353
+ stat_hf = self.stat().as_info()
354
+ is_dir = stat_hf["type"] == "directory"
355
+ exists = True
356
+ if not is_dir:
357
+ cloud_mts = stat_hf["last_commit"].date.timestamp()
358
+ except FileNotFoundError:
359
+ exists = False
360
+ else:
361
+ # perform only one network request to check existence, type and timestamp
362
+ try:
363
+ cloud_mts = self.modified.timestamp()
364
+ is_dir = False
365
+ exists = True
366
+ except FileNotFoundError:
367
+ exists = False
368
+ except IsADirectoryError:
369
+ is_dir = True
370
+ exists = True
355
371
 
356
372
  if not exists:
357
373
  warn_or_error = f"The original path {self} does not exist anymore."
@@ -369,14 +385,18 @@ def synchronize(
369
385
  # synchronization logic for directories
370
386
  if is_dir:
371
387
  files = self.fs.find(str(self), detail=True)
372
- protocol_modified = {"s3": "LastModified", "gs": "mtime"}
373
- modified_key = protocol_modified.get(self.protocol, None)
374
- if modified_key is None:
375
- raise ValueError(f"Can't synchronize a directory for {self.protocol}.")
388
+ if protocol == "s3":
389
+ get_modified = lambda file_stat: file_stat["LastModified"]
390
+ elif protocol == "gs":
391
+ get_modified = lambda file_stat: file_stat["mtime"]
392
+ elif protocol == "hf":
393
+ get_modified = lambda file_stat: file_stat["last_commit"].date
394
+ else:
395
+ raise ValueError(f"Can't synchronize a directory for {protocol}.")
376
396
  if objectpath.exists():
377
397
  destination_exists = True
378
398
  cloud_mts_max = max(
379
- file[modified_key] for file in files.values()
399
+ get_modified(file) for file in files.values()
380
400
  ).timestamp()
381
401
  local_mts = [
382
402
  file.stat().st_mtime for file in objectpath.rglob("*") if file.is_file()
@@ -399,11 +419,10 @@ def synchronize(
399
419
  callback.set_size(len(files))
400
420
  origin_file_keys = []
401
421
  for file, stat in callback.wrap(files.items()):
402
- file_key = PurePosixPath(file).relative_to(self.path)
403
- origin_file_keys.append(file_key.as_posix())
404
- timestamp = stat[modified_key].timestamp()
405
-
406
- origin = f"{self.protocol}://{file}"
422
+ file_key = PurePosixPath(file).relative_to(self.path).as_posix()
423
+ origin_file_keys.append(file_key)
424
+ timestamp = get_modified(stat).timestamp()
425
+ origin = f"{protocol}://{file}"
407
426
  destination = objectpath / file_key
408
427
  child = callback.branched(origin, destination.as_posix())
409
428
  UPath(origin, **self.storage_options).synchronize(
@@ -435,6 +454,10 @@ def synchronize(
435
454
  objectpath.parent.mkdir(parents=True, exist_ok=True)
436
455
  need_synchronize = True
437
456
  if need_synchronize:
457
+ # hf has sync filesystem
458
+ # on sync filesystems ChildProgressCallback.branched()
459
+ # returns the default callback
460
+ # this is why a difference between s3 and hf in progress bars
438
461
  self.download_to(
439
462
  objectpath, recursive=False, print_progress=False, callback=callback
440
463
  )
@@ -615,11 +638,11 @@ def to_url(upath):
615
638
  if upath.protocol != "s3":
616
639
  raise ValueError("The provided UPath must be an S3 path.")
617
640
  key = "/".join(upath.parts[1:])
618
- bucket = upath._url.netloc
641
+ bucket = upath.drive
619
642
  if bucket == "scverse-spatial-eu-central-1":
620
643
  region = "eu-central-1"
621
644
  elif f"s3://{bucket}" not in HOSTED_BUCKETS:
622
- response = upath.fs.call_s3("head_bucket", Bucket=upath._url.netloc)
645
+ response = upath.fs.call_s3("head_bucket", Bucket=bucket)
623
646
  headers = response["ResponseMetadata"]["HTTPHeaders"]
624
647
  region = headers.get("x-amz-bucket-region")
625
648
  else:
@@ -694,38 +717,60 @@ def create_path(path: UPath, access_token: str | None = None) -> UPath:
694
717
  return get_aws_credentials_manager().enrich_path(path, access_token)
695
718
 
696
719
 
697
- def get_stat_file_cloud(stat: dict) -> tuple[int, str, str]:
720
+ def get_stat_file_cloud(stat: dict) -> tuple[int, str | None, str | None]:
698
721
  size = stat["size"]
699
- etag = stat["ETag"]
700
- # small files
701
- if "-" not in etag:
702
- # only store hash for non-multipart uploads
703
- # we can't rapidly validate multi-part uploaded files client-side
704
- # we can add more logic later down-the-road
705
- hash = b16_to_b64(etag)
722
+ hash, hash_type = None, None
723
+ # gs, use md5Hash instead of etag for now
724
+ if "md5Hash" in stat:
725
+ # gs hash is already in base64
726
+ hash = stat["md5Hash"].strip('"=')
706
727
  hash_type = "md5"
707
- else:
708
- stripped_etag, suffix = etag.split("-")
709
- suffix = suffix.strip('"')
710
- hash = b16_to_b64(stripped_etag)
711
- hash_type = f"md5-{suffix}" # this is the S3 chunk-hashing strategy
712
- return size, hash[:HASH_LENGTH], hash_type
728
+ # hf
729
+ elif "blob_id" in stat:
730
+ hash = b16_to_b64(stat["blob_id"])
731
+ hash_type = "sha1"
732
+ # s3
733
+ elif "ETag" in stat:
734
+ etag = stat["ETag"]
735
+ # small files
736
+ if "-" not in etag:
737
+ # only store hash for non-multipart uploads
738
+ # we can't rapidly validate multi-part uploaded files client-side
739
+ # we can add more logic later down-the-road
740
+ hash = b16_to_b64(etag)
741
+ hash_type = "md5"
742
+ else:
743
+ stripped_etag, suffix = etag.split("-")
744
+ suffix = suffix.strip('"')
745
+ hash = b16_to_b64(stripped_etag)
746
+ hash_type = f"md5-{suffix}" # this is the S3 chunk-hashing strategy
747
+ if hash is not None:
748
+ hash = hash[:HASH_LENGTH]
749
+ return size, hash, hash_type
713
750
 
714
751
 
715
- def get_stat_dir_cloud(path: UPath) -> tuple[int, str, str, int]:
716
- sizes = []
717
- md5s = []
752
+ def get_stat_dir_cloud(path: UPath) -> tuple[int, str | None, str | None, int]:
718
753
  objects = path.fs.find(path.as_posix(), detail=True)
754
+ hash, hash_type = None, None
755
+ compute_list_hash = True
719
756
  if path.protocol == "s3":
720
757
  accessor = "ETag"
721
758
  elif path.protocol == "gs":
722
759
  accessor = "md5Hash"
760
+ elif path.protocol == "hf":
761
+ accessor = "blob_id"
762
+ else:
763
+ compute_list_hash = False
764
+ sizes = []
765
+ hashes = []
723
766
  for object in objects.values():
724
767
  sizes.append(object["size"])
725
- md5s.append(object[accessor].strip('"='))
768
+ if compute_list_hash:
769
+ hashes.append(object[accessor].strip('"='))
726
770
  size = sum(sizes)
727
- hash, hash_type = hash_md5s_from_dir(md5s)
728
- n_objects = len(md5s)
771
+ n_objects = len(sizes)
772
+ if compute_list_hash:
773
+ hash, hash_type = hash_from_hashes_list(hashes), "md5-d"
729
774
  return size, hash, hash_type, n_objects
730
775
 
731
776
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lamindb_setup
3
- Version: 0.80.0
3
+ Version: 0.81.0
4
4
  Summary: Setup & configure LaminDB.
5
5
  Author-email: Lamin Labs <open-source@lamin.ai>
6
6
  Requires-Python: >=3.9
@@ -19,7 +19,7 @@ Requires-Dist: supabase==2.2.1
19
19
  Requires-Dist: psutil
20
20
  Requires-Dist: urllib3<2 ; extra == "aws"
21
21
  Requires-Dist: aiobotocore[boto3]>=2.5.4,<3.0.0 ; extra == "aws"
22
- Requires-Dist: s3fs>=2023.12.2,<=2024.6.1 ; extra == "aws"
22
+ Requires-Dist: s3fs>=2023.12.2,<=2024.10.0 ; extra == "aws"
23
23
  Requires-Dist: line_profiler ; extra == "dev"
24
24
  Requires-Dist: pyjwt<3.0.0 ; extra == "dev"
25
25
  Requires-Dist: psycopg2-binary ; extra == "dev"
@@ -31,7 +31,7 @@ Requires-Dist: pytest-xdist ; extra == "dev"
31
31
  Requires-Dist: nbproject-test>=0.4.3 ; extra == "dev"
32
32
  Requires-Dist: pandas ; extra == "dev"
33
33
  Requires-Dist: django-schema-graph ; extra == "erdiagram"
34
- Requires-Dist: gcsfs>=2023.12.2,<=2024.6.1 ; extra == "gcp"
34
+ Requires-Dist: gcsfs>=2023.12.2,<=2024.10.0 ; extra == "gcp"
35
35
  Project-URL: Home, https://github.com/laminlabs/lamindb-setup
36
36
  Provides-Extra: aws
37
37
  Provides-Extra: dev
@@ -1,4 +1,4 @@
1
- lamindb_setup/__init__.py,sha256=hCbASHI6nzxl7Xre0B3sTSQaCJ5mWkjI2xSF2odlJ8Q,1714
1
+ lamindb_setup/__init__.py,sha256=Xm__DIYPUbJSjykjcT4bC-mR75b1GFYVGZ0owhVfCyI,1714
2
2
  lamindb_setup/_cache.py,sha256=1XnM-V_KprbjpgPY7Bg3FYn53Iz_2_fEgcMOaSdKKbg,1332
3
3
  lamindb_setup/_check.py,sha256=28PcG8Kp6OpjSLSi1r2boL2Ryeh6xkaCL87HFbjs6GA,129
4
4
  lamindb_setup/_check_setup.py,sha256=6cSfpmVOSgU7YiVHfJpBTGTQ7rrnwunt1pJT_jkgNM8,3196
@@ -9,7 +9,7 @@ lamindb_setup/_django.py,sha256=DWUTjjVhEViX0S-zIkeqQgKovWqVgWMl4Y0ANwlA3Pw,1505
9
9
  lamindb_setup/_entry_points.py,sha256=Hs2oJQOCTaGUdWn-1mufM6qUZr9W_EJ_Oc3f0_Vc0Yw,616
10
10
  lamindb_setup/_exportdb.py,sha256=43g77-tH-vAlTn8ig1mMD9-KXLKvxUeDLaq0gVu3l-c,2114
11
11
  lamindb_setup/_importdb.py,sha256=yYYShzUajTsR-cTW4CZ-UNDWZY2uE5PAgNbp-wn8Ogc,1874
12
- lamindb_setup/_init_instance.py,sha256=6Db289T2A2464KqW0NsfSKg59zrhfny9RZxBgb9kPVs,14340
12
+ lamindb_setup/_init_instance.py,sha256=zJbkP22h4Yif_VNooL06FFoHqH1EU45CqiLwF8FUaKs,14368
13
13
  lamindb_setup/_migrate.py,sha256=x_b4k4XRfLSD-EEFMc324yK6DIK7goW33wUytbIWlNs,8917
14
14
  lamindb_setup/_register_instance.py,sha256=alQuYp2f8Ct8xvRC1gt8p_HZ0tqCd3gZD3kiPBLPpsI,1269
15
15
  lamindb_setup/_schema.py,sha256=b3uzhhWpV5mQtDwhMINc2MabGCnGLESy51ito3yl6Wc,679
@@ -18,7 +18,7 @@ lamindb_setup/_set_managed_storage.py,sha256=4tDxXQMt8Gw028uY3vIQxZQ7qBNXhQMc8sa
18
18
  lamindb_setup/_setup_user.py,sha256=-g7Xj6510BDyM8kuqAsVBZFwehlhBa_uWBSV1rPeuM8,4586
19
19
  lamindb_setup/_silence_loggers.py,sha256=AKF_YcHvX32eGXdsYK8MJlxEaZ-Uo2f6QDRzjKFCtws,1568
20
20
  lamindb_setup/core/__init__.py,sha256=BxIVMX5HQq8oZ1OuY_saUEJz5Tdd7gaCPngxVu5iou4,417
21
- lamindb_setup/core/_aws_credentials.py,sha256=uKMQO9q42Hnepz8aj3RxwLKDWUJx8pNOYrFnnNh5X40,5325
21
+ lamindb_setup/core/_aws_credentials.py,sha256=E8yanKOq-idaIz5y9D9SazHaYJKX69VLK3pn7jl82do,5815
22
22
  lamindb_setup/core/_aws_storage.py,sha256=nEjeUv4xUVpoV0Lx-zjjmyb9w804bDyaeiM-OqbfwM0,1799
23
23
  lamindb_setup/core/_deprecated.py,sha256=3qxUI1dnDlSeR0BYrv7ucjqRBEojbqotPgpShXs4KF8,2520
24
24
  lamindb_setup/core/_docs.py,sha256=3k-YY-oVaJd_9UIY-LfBg_u8raKOCNfkZQPA73KsUhs,276
@@ -27,21 +27,21 @@ lamindb_setup/core/_hub_core.py,sha256=eUxRz9iJj6RA5-MWgQqqZYAU-di5LQDamRZn6t-VO
27
27
  lamindb_setup/core/_hub_crud.py,sha256=eZErpq9t1Cp2ULBSi457ekrcqfesw4Y6IJgaqyrINMY,5276
28
28
  lamindb_setup/core/_hub_utils.py,sha256=08NwQsb53-tXa_pr-f0tPTN0FeeVf_i1p3dEbEWD0F4,3016
29
29
  lamindb_setup/core/_private_django_api.py,sha256=KIn43HOhiRjkbTbddyJqv-WNTTa1bAizbM1tWXoXPBg,2869
30
- lamindb_setup/core/_settings.py,sha256=Iv2FUz1l92V4AcuyvRsYlJxoY1oB-UB4gAaA6QgWioE,7792
30
+ lamindb_setup/core/_settings.py,sha256=mpGsSb98UsBedLsW2RuowZ17EP2tI2XRGPztqrJtrV4,7952
31
31
  lamindb_setup/core/_settings_instance.py,sha256=ajcq9zRNE598tTqyMkMqaEOubVfFeE998DPtbgyzK3A,18801
32
32
  lamindb_setup/core/_settings_load.py,sha256=5OpghcbkrK9KBM_0Iu-61FTI76UbOpPkkJpUittXS-w,4098
33
33
  lamindb_setup/core/_settings_save.py,sha256=rxGxgaK5i9exKqSJERQQyY1WZio20meoQJoYXlVW-1w,3138
34
- lamindb_setup/core/_settings_storage.py,sha256=15B7taJF1zxJ1_qAb67NuXkTFvO2TRTWMt6KTzDf1mw,11875
34
+ lamindb_setup/core/_settings_storage.py,sha256=CYwGZm0fKYN7eLLsU-sOtOKG7HzswQVjTWb0ooHKcNg,11990
35
35
  lamindb_setup/core/_settings_store.py,sha256=WcsgOmgnu9gztcrhp-N4OONNZyxICHV8M0HdJllTaEo,2219
36
36
  lamindb_setup/core/_settings_user.py,sha256=iz0MqFLKXqm8LYx_CHmr02_oNvYWFLIxKkJLdpS5W08,1476
37
37
  lamindb_setup/core/_setup_bionty_sources.py,sha256=o2L5Ww8TKgSqJtL4cGUcpJwLNYxA9BZgddhCMCu_E2g,3428
38
38
  lamindb_setup/core/cloud_sqlite_locker.py,sha256=i6TrT7HG0lqliPvZTlsZ_uplPaqhPBbabyfeR32SkA8,7107
39
39
  lamindb_setup/core/django.py,sha256=E4U9nUlV2kHd-G5v6iSdFGAAWixlQDxOFwMwOMG9xfw,3864
40
40
  lamindb_setup/core/exceptions.py,sha256=4NpLUNUIfXYVTFX2FvLZF8RW34exk2Vn2X3G4YhnTRg,276
41
- lamindb_setup/core/hashing.py,sha256=bkuvZyAuC7-Y_qZumJd_rybF-upJ5J3KxnKiymRUifw,3148
41
+ lamindb_setup/core/hashing.py,sha256=26dtak7XgmrWa_D1zuDyxObRQcriMtnc1yEigkKASmM,3142
42
42
  lamindb_setup/core/types.py,sha256=zJii2le38BJUmsNVvzDrbzGYr0yaeb-9Rw9IKmsBr3k,523
43
- lamindb_setup/core/upath.py,sha256=DJ0S3JufnCmG6EAGy2eqTp5rVX3Oqw-bUlh9-9ipKYI,27091
44
- lamindb_setup-0.80.0.dist-info/LICENSE,sha256=UOZ1F5fFDe3XXvG4oNnkL1-Ecun7zpHzRxjp-XsMeAo,11324
45
- lamindb_setup-0.80.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
46
- lamindb_setup-0.80.0.dist-info/METADATA,sha256=IBWXhrRyGmMeYdgByW0TJV2h70cSVssuxA1Do6cmfLM,1743
47
- lamindb_setup-0.80.0.dist-info/RECORD,,
43
+ lamindb_setup/core/upath.py,sha256=GD-EW9QSqJH023ox53hPDvjE86hFjXVhb0MSEU02HeY,28702
44
+ lamindb_setup-0.81.0.dist-info/LICENSE,sha256=UOZ1F5fFDe3XXvG4oNnkL1-Ecun7zpHzRxjp-XsMeAo,11324
45
+ lamindb_setup-0.81.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
46
+ lamindb_setup-0.81.0.dist-info/METADATA,sha256=mEUNPAf6Q4NfqeO2YuUIlADbKkLQLOFFzL-BNkvk3dY,1745
47
+ lamindb_setup-0.81.0.dist-info/RECORD,,