lamindb_setup 1.15.0__py3-none-any.whl → 1.15.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lamindb_setup/__init__.py +1 -1
- lamindb_setup/_connect_instance.py +0 -1
- lamindb_setup/_set_managed_storage.py +11 -3
- lamindb_setup/core/__init__.py +6 -1
- lamindb_setup/core/_aws_options.py +1 -0
- lamindb_setup/core/_clone.py +95 -14
- lamindb_setup/core/_hub_client.py +11 -4
- lamindb_setup/core/_settings.py +1 -2
- lamindb_setup/core/_settings_instance.py +15 -6
- lamindb_setup/core/_settings_load.py +2 -2
- lamindb_setup/core/_settings_save.py +1 -0
- lamindb_setup/core/_settings_storage.py +32 -21
- lamindb_setup/core/_settings_store.py +3 -2
- lamindb_setup/core/upath.py +1 -4
- lamindb_setup/io.py +237 -51
- {lamindb_setup-1.15.0.dist-info → lamindb_setup-1.15.2.dist-info}/METADATA +1 -1
- {lamindb_setup-1.15.0.dist-info → lamindb_setup-1.15.2.dist-info}/RECORD +19 -19
- {lamindb_setup-1.15.0.dist-info → lamindb_setup-1.15.2.dist-info}/LICENSE +0 -0
- {lamindb_setup-1.15.0.dist-info → lamindb_setup-1.15.2.dist-info}/WHEEL +0 -0
lamindb_setup/__init__.py
CHANGED
|
@@ -41,7 +41,7 @@ def set_managed_storage(root: UPathStr, host: str | None = None, **fs_kwargs):
|
|
|
41
41
|
"use a tuple of (local_root, host) instead"
|
|
42
42
|
)
|
|
43
43
|
|
|
44
|
-
# here the storage is registered in the hub
|
|
44
|
+
# here the storage location is registered in the hub
|
|
45
45
|
# hub_record_status="hub-record-created" if a new record is created
|
|
46
46
|
# "hub-record-retrieved" if the storage is in the hub already
|
|
47
47
|
ssettings, hub_record_status = init_storage(
|
|
@@ -65,5 +65,13 @@ def set_managed_storage(root: UPathStr, host: str | None = None, **fs_kwargs):
|
|
|
65
65
|
delete_storage_record(ssettings)
|
|
66
66
|
raise e
|
|
67
67
|
|
|
68
|
-
settings.instance.
|
|
69
|
-
|
|
68
|
+
if ssettings._instance_id != settings.instance._id:
|
|
69
|
+
logger.warning(
|
|
70
|
+
f"registered storage location {root} as read-only for this instance (it's written by instance with uid: {ssettings.instance_uid})"
|
|
71
|
+
)
|
|
72
|
+
logger.warning(
|
|
73
|
+
f"did *not* switch default storage location, it's still: {settings.storage.root_as_str}"
|
|
74
|
+
)
|
|
75
|
+
else:
|
|
76
|
+
settings.instance._storage = ssettings
|
|
77
|
+
settings.storage._set_fs_kwargs(**fs_kwargs)
|
lamindb_setup/core/__init__.py
CHANGED
|
@@ -23,7 +23,12 @@ Storage
|
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
25
|
from . import django, upath
|
|
26
|
-
from ._clone import
|
|
26
|
+
from ._clone import (
|
|
27
|
+
connect_local_sqlite,
|
|
28
|
+
connect_remote_sqlite,
|
|
29
|
+
init_local_sqlite,
|
|
30
|
+
upload_sqlite_clone,
|
|
31
|
+
)
|
|
27
32
|
from ._deprecated import deprecated # documented in lamindb.base
|
|
28
33
|
from ._docs import doc_args # documented in lamindb.base
|
|
29
34
|
from ._settings import SetupSettings
|
|
@@ -20,6 +20,7 @@ lamin_env = os.getenv("LAMIN_ENV")
|
|
|
20
20
|
if lamin_env is None or lamin_env == "prod":
|
|
21
21
|
HOSTED_BUCKETS = tuple([f"s3://lamin-{region}" for region in HOSTED_REGIONS])
|
|
22
22
|
else:
|
|
23
|
+
logger.warning("loaded LAMIN_ENV: staging")
|
|
23
24
|
HOSTED_BUCKETS = ("s3://lamin-hosted-test",) # type: ignore
|
|
24
25
|
|
|
25
26
|
|
lamindb_setup/core/_clone.py
CHANGED
|
@@ -5,14 +5,19 @@
|
|
|
5
5
|
|
|
6
6
|
init_local_sqlite
|
|
7
7
|
connect_local_sqlite
|
|
8
|
+
upload_sqlite_clone
|
|
8
9
|
"""
|
|
9
10
|
|
|
11
|
+
import gzip
|
|
10
12
|
import os
|
|
13
|
+
import shutil
|
|
14
|
+
from pathlib import Path
|
|
11
15
|
|
|
12
16
|
from lamindb_setup.core._settings_instance import InstanceSettings
|
|
13
17
|
from lamindb_setup.core._settings_load import load_instance_settings
|
|
14
18
|
from lamindb_setup.core._settings_store import instance_settings_file
|
|
15
19
|
from lamindb_setup.core.django import reset_django
|
|
20
|
+
from lamindb_setup.core.upath import create_path
|
|
16
21
|
|
|
17
22
|
|
|
18
23
|
def init_local_sqlite(
|
|
@@ -53,28 +58,31 @@ def init_local_sqlite(
|
|
|
53
58
|
if copy_suffix is not None
|
|
54
59
|
else ln_setup.settings.instance.name
|
|
55
60
|
)
|
|
56
|
-
isettings =
|
|
57
|
-
|
|
58
|
-
owner=ln_setup.settings.instance.owner, # type: ignore
|
|
59
|
-
name=name,
|
|
60
|
-
storage=ln_setup.settings.storage,
|
|
61
|
-
db=None,
|
|
62
|
-
modules=",".join(ln_setup.settings.instance.modules),
|
|
63
|
-
is_on_hub=False,
|
|
61
|
+
isettings = ln_setup._connect_instance._connect_instance(
|
|
62
|
+
owner=ln_setup.settings.instance.owner, name=name
|
|
64
63
|
)
|
|
65
|
-
|
|
64
|
+
isettings._db = None
|
|
65
|
+
isettings._is_on_hub = False
|
|
66
|
+
isettings._fine_grained_access = False
|
|
67
|
+
name = (
|
|
68
|
+
f"{isettings.name}{copy_suffix}" if copy_suffix is not None else isettings.name
|
|
69
|
+
)
|
|
70
|
+
isettings._name = name
|
|
71
|
+
isettings._is_clone = True
|
|
66
72
|
isettings._persist(write_to_disk=True)
|
|
67
73
|
|
|
68
74
|
if not isettings._sqlite_file_local.exists():
|
|
69
75
|
# Reset Django configuration before _init_db() because Django was already configured for the original Postgres instance.
|
|
70
|
-
# Without this reset, the if not settings.configured check in setup_django() would skip reconfiguration,
|
|
76
|
+
# Without this reset, the `if not settings.configured`` check in `setup_django()` would skip reconfiguration,
|
|
71
77
|
# causing migrations to run against the old Postgres database instead of the new SQLite clone database.
|
|
72
78
|
reset_django()
|
|
73
79
|
isettings._init_db()
|
|
74
80
|
|
|
75
81
|
|
|
76
|
-
def connect_local_sqlite(
|
|
77
|
-
|
|
82
|
+
def connect_local_sqlite(
|
|
83
|
+
instance: str,
|
|
84
|
+
) -> None:
|
|
85
|
+
"""Load a locally stored SQLite instance of which a remote hub Postgres instance exists.
|
|
78
86
|
|
|
79
87
|
This function bypasses the hub lookup that `lamin connect` performs, loading the SQLite clone directly from local settings files.
|
|
80
88
|
The clone must first be created via `init_local_sqlite()`.
|
|
@@ -86,8 +94,81 @@ def connect_local_sqlite(instance: str) -> None:
|
|
|
86
94
|
settings_file = instance_settings_file(name=name, owner=owner)
|
|
87
95
|
|
|
88
96
|
if not settings_file.exists():
|
|
89
|
-
raise ValueError(
|
|
97
|
+
raise ValueError(
|
|
98
|
+
"SQLite clone not found."
|
|
99
|
+
" Run `init_local_sqlite()` to create a local copy or connect to a remote copy using `connect_remote_sqlite`."
|
|
100
|
+
)
|
|
90
101
|
|
|
91
102
|
isettings = load_instance_settings(settings_file)
|
|
92
103
|
isettings._persist(write_to_disk=False)
|
|
93
|
-
|
|
104
|
+
|
|
105
|
+
# Using `setup_django` instead of `_load_db` to not ping AWS RDS
|
|
106
|
+
from lamindb_setup._check_setup import disable_auto_connect
|
|
107
|
+
|
|
108
|
+
from .django import setup_django
|
|
109
|
+
|
|
110
|
+
disable_auto_connect(setup_django)(isettings)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def connect_remote_sqlite(instance: str, *, copy_suffix: str | None = None) -> None:
|
|
114
|
+
"""Load an existing SQLite copy of a hub instance.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
instance: Instance slug in the form `account/name` (e.g., `laminlabs/privatedata-local`).
|
|
118
|
+
copy_suffix: Optional suffix of the local clone.
|
|
119
|
+
"""
|
|
120
|
+
import lamindb_setup as ln_setup
|
|
121
|
+
|
|
122
|
+
owner, name = instance.split("/")
|
|
123
|
+
|
|
124
|
+
# Step 1: Create the settings file
|
|
125
|
+
isettings = ln_setup._connect_instance._connect_instance(owner=owner, name=name)
|
|
126
|
+
isettings._db = None
|
|
127
|
+
isettings._is_on_hub = False
|
|
128
|
+
isettings._fine_grained_access = False
|
|
129
|
+
isettings._db_permissions = "read"
|
|
130
|
+
name = (
|
|
131
|
+
f"{isettings.name}{copy_suffix}" if copy_suffix is not None else isettings.name
|
|
132
|
+
)
|
|
133
|
+
isettings._name = name
|
|
134
|
+
isettings._is_clone = True
|
|
135
|
+
isettings._persist(write_to_disk=True)
|
|
136
|
+
|
|
137
|
+
connect_local_sqlite(instance=instance + (copy_suffix or ""))
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def upload_sqlite_clone(
|
|
141
|
+
local_sqlite_path: Path | str | None = None, compress: bool = True
|
|
142
|
+
) -> None:
|
|
143
|
+
"""Uploads the SQLite clone to the default storage.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
local_sqlite_path: Path to the SQLite file.
|
|
147
|
+
Defaults to the local storage path if not specified.
|
|
148
|
+
compress: Whether to compress the database with gzip before uploading.
|
|
149
|
+
"""
|
|
150
|
+
import lamindb_setup as ln_setup
|
|
151
|
+
|
|
152
|
+
if local_sqlite_path is None:
|
|
153
|
+
local_sqlite_path = ln_setup.settings.instance._sqlite_file_local
|
|
154
|
+
else:
|
|
155
|
+
local_sqlite_path = Path(local_sqlite_path)
|
|
156
|
+
|
|
157
|
+
if not local_sqlite_path.exists():
|
|
158
|
+
raise FileNotFoundError(f"Database not found at {local_sqlite_path}")
|
|
159
|
+
|
|
160
|
+
cloud_db_path = ln_setup.settings.instance._sqlite_file
|
|
161
|
+
|
|
162
|
+
if compress:
|
|
163
|
+
temp_gz_path = local_sqlite_path.with_suffix(".db.gz")
|
|
164
|
+
with (
|
|
165
|
+
open(local_sqlite_path, "rb") as f_in,
|
|
166
|
+
gzip.open(temp_gz_path, "wb") as f_out,
|
|
167
|
+
):
|
|
168
|
+
shutil.copyfileobj(f_in, f_out)
|
|
169
|
+
cloud_destination = create_path(f"{cloud_db_path}.gz")
|
|
170
|
+
cloud_destination.upload_from(temp_gz_path, print_progress=True)
|
|
171
|
+
temp_gz_path.unlink()
|
|
172
|
+
else:
|
|
173
|
+
cloud_destination = create_path(cloud_db_path)
|
|
174
|
+
cloud_destination.upload_from(local_sqlite_path, print_progress=True)
|
|
@@ -96,9 +96,12 @@ def connect_hub(
|
|
|
96
96
|
transports.append(
|
|
97
97
|
RetryTransport(
|
|
98
98
|
retry=LogRetry(total=2, backoff_factor=0.2),
|
|
99
|
-
transport=httpx.HTTPTransport(verify=True, http2=True),
|
|
99
|
+
transport=httpx.HTTPTransport(verify=True, http2=True, trust_env=True),
|
|
100
100
|
)
|
|
101
101
|
)
|
|
102
|
+
# this overwrites transports of existing httpx clients
|
|
103
|
+
# if proxies are set, the default transports that were created on clients init
|
|
104
|
+
# will be used, irrespective of these re-settings
|
|
102
105
|
client.auth._http_client._transport = transports[0]
|
|
103
106
|
client.postgrest.session._transport = transports[1]
|
|
104
107
|
# POST is not retryable by default, but for our functions it should be safe to retry
|
|
@@ -116,7 +119,7 @@ def connect_hub(
|
|
|
116
119
|
"POST",
|
|
117
120
|
],
|
|
118
121
|
),
|
|
119
|
-
transport=httpx.HTTPTransport(verify=True, http2=True),
|
|
122
|
+
transport=httpx.HTTPTransport(verify=True, http2=True, trust_env=True),
|
|
120
123
|
)
|
|
121
124
|
return client
|
|
122
125
|
|
|
@@ -246,9 +249,13 @@ def httpx_client():
|
|
|
246
249
|
else:
|
|
247
250
|
transport = RetryTransport(
|
|
248
251
|
retry=LogRetry(total=2, backoff_factor=0.2),
|
|
249
|
-
transport=httpx.HTTPTransport(verify=True, http2=True),
|
|
252
|
+
transport=httpx.HTTPTransport(verify=True, http2=True, trust_env=True),
|
|
250
253
|
)
|
|
251
|
-
client
|
|
254
|
+
# first we create a client to build the proxy map from the env variables
|
|
255
|
+
# if proxies are set, the default transports will be used
|
|
256
|
+
# otherwise the RetryTransport object that we assign below
|
|
257
|
+
client = httpx.Client(trust_env=True)
|
|
258
|
+
client._transport = transport
|
|
252
259
|
yield client
|
|
253
260
|
finally:
|
|
254
261
|
if client is not None:
|
lamindb_setup/core/_settings.py
CHANGED
|
@@ -320,8 +320,7 @@ class SetupSettings:
|
|
|
320
320
|
def paths(self) -> type[SetupPaths]:
|
|
321
321
|
"""Convert cloud paths to lamindb local paths.
|
|
322
322
|
|
|
323
|
-
Use `settings.paths.cloud_to_local_no_update`
|
|
324
|
-
or `settings.paths.cloud_to_local`.
|
|
323
|
+
Use `settings.paths.cloud_to_local_no_update` or `settings.paths.cloud_to_local`.
|
|
325
324
|
"""
|
|
326
325
|
return SetupPaths
|
|
327
326
|
|
|
@@ -54,8 +54,7 @@ def is_local_db_url(db_url: str) -> bool:
|
|
|
54
54
|
|
|
55
55
|
|
|
56
56
|
def check_is_instance_remote(root: UPathStr, db: str | None) -> bool:
|
|
57
|
-
# returns True for cloud SQLite
|
|
58
|
-
# and remote postgres
|
|
57
|
+
# returns True for cloud SQLite and remote postgres
|
|
59
58
|
root_str = str(root)
|
|
60
59
|
if not root_str.startswith("create-s3") and get_storage_type(root_str) == "local":
|
|
61
60
|
return False
|
|
@@ -83,7 +82,8 @@ class InstanceSettings:
|
|
|
83
82
|
schema_id: UUID | None = None,
|
|
84
83
|
fine_grained_access: bool = False,
|
|
85
84
|
db_permissions: str | None = None,
|
|
86
|
-
_locker_user: UserSettings | None = None, # user to lock for if cloud sqlite
|
|
85
|
+
_locker_user: UserSettings | None = None, # user to lock for if cloud sqlite,
|
|
86
|
+
_is_clone: bool = False,
|
|
87
87
|
):
|
|
88
88
|
from ._hub_utils import validate_db_arg
|
|
89
89
|
|
|
@@ -109,6 +109,7 @@ class InstanceSettings:
|
|
|
109
109
|
self._db_permissions = db_permissions
|
|
110
110
|
# if None then settings.user is used
|
|
111
111
|
self._locker_user = _locker_user
|
|
112
|
+
self._is_clone = _is_clone
|
|
112
113
|
|
|
113
114
|
def __repr__(self):
|
|
114
115
|
"""Rich string representation."""
|
|
@@ -434,7 +435,7 @@ class InstanceSettings:
|
|
|
434
435
|
|
|
435
436
|
def _update_cloud_sqlite_file(self, unlock_cloud_sqlite: bool = True) -> None:
|
|
436
437
|
"""Upload the local sqlite file to the cloud file."""
|
|
437
|
-
if self._is_cloud_sqlite:
|
|
438
|
+
if self._is_cloud_sqlite and not self._is_clone:
|
|
438
439
|
sqlite_file = self._sqlite_file
|
|
439
440
|
logger.warning(
|
|
440
441
|
f"updating{' & unlocking' if unlock_cloud_sqlite else ''} cloud SQLite "
|
|
@@ -602,6 +603,14 @@ class InstanceSettings:
|
|
|
602
603
|
disable_auto_connect(setup_django)(self, init=True)
|
|
603
604
|
|
|
604
605
|
def _load_db(self) -> tuple[bool, str]:
|
|
606
|
+
"""Load the database connection.
|
|
607
|
+
|
|
608
|
+
For cloud SQLite instances, downloads the database file from cloud storage.
|
|
609
|
+
For all instances, initializes Django ORM with the database connection.
|
|
610
|
+
|
|
611
|
+
Returns:
|
|
612
|
+
Tuple of (success: bool, error_message: str). Returns (True, "") on success.
|
|
613
|
+
"""
|
|
605
614
|
# Is the database available and initialized as LaminDB?
|
|
606
615
|
# returns a tuple of status code and message
|
|
607
616
|
if self.dialect == "sqlite" and not self._sqlite_file.exists():
|
|
@@ -615,8 +624,8 @@ class InstanceSettings:
|
|
|
615
624
|
return False, f"SQLite file {self._sqlite_file} does not exist"
|
|
616
625
|
# we need the local sqlite to setup django
|
|
617
626
|
self._update_local_sqlite_file()
|
|
618
|
-
|
|
619
|
-
# as warnings
|
|
627
|
+
|
|
628
|
+
# setting up django also performs a check for migrations & prints them as warnings
|
|
620
629
|
# this should fail, e.g., if the db is not reachable
|
|
621
630
|
from lamindb_setup._check_setup import disable_auto_connect
|
|
622
631
|
|
|
@@ -69,8 +69,7 @@ def load_or_create_user_settings(api_key: str | None = None) -> UserSettings:
|
|
|
69
69
|
"""Return current user settings.
|
|
70
70
|
|
|
71
71
|
Args:
|
|
72
|
-
api_key: if provided and there is no current user,
|
|
73
|
-
perform login and return the user settings.
|
|
72
|
+
api_key: if provided and there is no current user, perform login and return the user settings.
|
|
74
73
|
"""
|
|
75
74
|
current_user_settings = current_user_settings_file()
|
|
76
75
|
if not current_user_settings.exists():
|
|
@@ -125,6 +124,7 @@ def setup_instance_from_store(store: InstanceSettingsStore) -> InstanceSettings:
|
|
|
125
124
|
schema_id=None if store.schema_id in {None, "null"} else UUID(store.schema_id),
|
|
126
125
|
fine_grained_access=store.fine_grained_access,
|
|
127
126
|
db_permissions=_null_to_value(store.db_permissions),
|
|
127
|
+
_is_clone=store.is_clone,
|
|
128
128
|
)
|
|
129
129
|
|
|
130
130
|
|
|
@@ -12,17 +12,14 @@ from lamin_utils import logger
|
|
|
12
12
|
from lamindb_setup.errors import StorageAlreadyManaged
|
|
13
13
|
|
|
14
14
|
from ._aws_options import (
|
|
15
|
-
HOSTED_REGIONS,
|
|
16
15
|
LAMIN_ENDPOINTS,
|
|
17
16
|
get_aws_options_manager,
|
|
18
17
|
)
|
|
19
|
-
from ._aws_storage import find_closest_aws_region
|
|
20
18
|
from ._deprecated import deprecated
|
|
21
19
|
from .hashing import hash_and_encode_as_b62
|
|
22
20
|
from .upath import (
|
|
23
21
|
LocalPathClasses,
|
|
24
22
|
UPath,
|
|
25
|
-
_split_path_query,
|
|
26
23
|
create_path,
|
|
27
24
|
get_storage_region,
|
|
28
25
|
)
|
|
@@ -58,12 +55,40 @@ def get_storage_type(root_as_str: str) -> StorageType:
|
|
|
58
55
|
return convert.get(protocol, protocol) # type: ignore
|
|
59
56
|
|
|
60
57
|
|
|
58
|
+
def sanitize_root_user_input(root: UPathStr) -> UPath:
|
|
59
|
+
"""Format a root path string."""
|
|
60
|
+
root_upath = root if isinstance(root, UPath) else UPath(root)
|
|
61
|
+
root_upath = root_upath.expanduser()
|
|
62
|
+
if isinstance(root_upath, LocalPathClasses): # local paths
|
|
63
|
+
try:
|
|
64
|
+
(root_upath / ".lamindb").mkdir(parents=True, exist_ok=True)
|
|
65
|
+
root_upath = root_upath.resolve()
|
|
66
|
+
except Exception:
|
|
67
|
+
logger.warning(f"unable to create .lamindb/ folder in {root_upath}")
|
|
68
|
+
return root_upath
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def convert_sanitized_root_path_to_str(root_upath: UPath) -> str:
|
|
72
|
+
# embed endpoint_url into path string for storing and displaying
|
|
73
|
+
if root_upath.protocol == "s3":
|
|
74
|
+
endpoint_url = root_upath.storage_options.get("endpoint_url", None)
|
|
75
|
+
# LAMIN_ENDPOINTS include None
|
|
76
|
+
if endpoint_url not in LAMIN_ENDPOINTS:
|
|
77
|
+
return f"s3://{root_upath.path.rstrip('/')}?endpoint_url={endpoint_url}"
|
|
78
|
+
return root_upath.as_posix().rstrip("/")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def convert_root_path_to_str(root: UPathStr) -> str:
|
|
82
|
+
"""Format a root path string."""
|
|
83
|
+
sanitized_root_upath = sanitize_root_user_input(root)
|
|
84
|
+
return convert_sanitized_root_path_to_str(sanitized_root_upath)
|
|
85
|
+
|
|
86
|
+
|
|
61
87
|
def mark_storage_root(
|
|
62
88
|
root: UPathStr, uid: str, instance_id: UUID, instance_slug: str
|
|
63
89
|
) -> Literal["__marked__"] | str:
|
|
64
90
|
# we need a file in folder-like storage locations on S3 to avoid
|
|
65
|
-
# permission errors from leveraging s3fs on an empty hosted storage location
|
|
66
|
-
# (path.fs.find raises a PermissionError)
|
|
91
|
+
# permission errors from leveraging s3fs on an empty hosted storage location (path.fs.find raises a PermissionError)
|
|
67
92
|
# we also need it in case a storage location is ambiguous because a server / local environment
|
|
68
93
|
# doesn't have a globally unique identifier, then we screen for this file to map the
|
|
69
94
|
# path on a storage location in the registry
|
|
@@ -214,15 +239,7 @@ class StorageSettings:
|
|
|
214
239
|
):
|
|
215
240
|
self._uid = uid
|
|
216
241
|
self._uuid_ = uuid
|
|
217
|
-
self._root_init =
|
|
218
|
-
if isinstance(self._root_init, LocalPathClasses): # local paths
|
|
219
|
-
try:
|
|
220
|
-
(self._root_init / ".lamindb").mkdir(parents=True, exist_ok=True)
|
|
221
|
-
self._root_init = self._root_init.resolve()
|
|
222
|
-
except Exception:
|
|
223
|
-
logger.warning(
|
|
224
|
-
f"unable to create .lamindb/ folder in {self._root_init}"
|
|
225
|
-
)
|
|
242
|
+
self._root_init: UPath = sanitize_root_user_input(root)
|
|
226
243
|
self._root = None
|
|
227
244
|
self._instance_id = instance_id
|
|
228
245
|
# we don't yet infer region here to make init fast
|
|
@@ -337,13 +354,7 @@ class StorageSettings:
|
|
|
337
354
|
@property
|
|
338
355
|
def root_as_str(self) -> str:
|
|
339
356
|
"""Formatted root string."""
|
|
340
|
-
|
|
341
|
-
if self._root_init.protocol == "s3":
|
|
342
|
-
endpoint_url = self._root_init.storage_options.get("endpoint_url", None)
|
|
343
|
-
# LAMIN_ENDPOINTS include None
|
|
344
|
-
if endpoint_url not in LAMIN_ENDPOINTS:
|
|
345
|
-
return f"s3://{self._root_init.path.rstrip('/')}?endpoint_url={endpoint_url}"
|
|
346
|
-
return self._root_init.as_posix().rstrip("/")
|
|
357
|
+
return convert_sanitized_root_path_to_str(self._root_init)
|
|
347
358
|
|
|
348
359
|
@property
|
|
349
360
|
def cache_dir(
|
|
@@ -67,8 +67,8 @@ class InstanceSettingsStore(BaseSettings):
|
|
|
67
67
|
owner: str
|
|
68
68
|
name: str
|
|
69
69
|
storage_root: str
|
|
70
|
-
storage_region: str | None
|
|
71
|
-
db: str | None
|
|
70
|
+
storage_region: str | None
|
|
71
|
+
db: str | None
|
|
72
72
|
schema_str: str | None
|
|
73
73
|
schema_id: str | None = None
|
|
74
74
|
fine_grained_access: bool = False
|
|
@@ -76,6 +76,7 @@ class InstanceSettingsStore(BaseSettings):
|
|
|
76
76
|
id: str
|
|
77
77
|
git_repo: str | None
|
|
78
78
|
keep_artifacts_local: bool | None
|
|
79
|
+
is_clone: bool = False
|
|
79
80
|
model_config = SettingsConfigDict(env_prefix="lamindb_instance_", env_file=".env")
|
|
80
81
|
|
|
81
82
|
|
lamindb_setup/core/upath.py
CHANGED
|
@@ -908,12 +908,9 @@ def get_stat_file_cloud(stat: dict) -> tuple[int, str | None, str | None]:
|
|
|
908
908
|
elif "blob_id" in stat:
|
|
909
909
|
hash = b16_to_b64(stat["blob_id"])
|
|
910
910
|
hash_type = "sha1"
|
|
911
|
-
# s3
|
|
912
|
-
# StorageClass is checked to be sure that it is indeed s3
|
|
913
|
-
# because http also has ETag
|
|
914
911
|
elif "ETag" in stat:
|
|
915
912
|
etag = stat["ETag"]
|
|
916
|
-
if "mimetype" in stat:
|
|
913
|
+
if "mimetype" in stat or ("url" in stat and stat["url"].startswith("http")):
|
|
917
914
|
# http
|
|
918
915
|
hash = hash_string(etag.strip('"'))
|
|
919
916
|
hash_type = "md5-etag"
|
lamindb_setup/io.py
CHANGED
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import io
|
|
3
4
|
import json
|
|
4
5
|
import warnings
|
|
6
|
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
5
7
|
from importlib import import_module
|
|
6
8
|
from pathlib import Path
|
|
7
9
|
from typing import TYPE_CHECKING
|
|
8
10
|
|
|
11
|
+
import numpy as np
|
|
9
12
|
import pandas as pd
|
|
10
13
|
from django.db import models, transaction
|
|
11
14
|
from rich.progress import Progress
|
|
@@ -43,21 +46,101 @@ def _get_registries(module_name: str) -> list[str]:
|
|
|
43
46
|
]
|
|
44
47
|
|
|
45
48
|
|
|
46
|
-
def
|
|
47
|
-
|
|
49
|
+
def _export_full_table(
|
|
50
|
+
registry_info: tuple[str, str, str | None],
|
|
51
|
+
directory: Path,
|
|
52
|
+
chunk_size: int,
|
|
53
|
+
) -> list[tuple[str, Path]] | str:
|
|
54
|
+
"""Export a registry table to parquet.
|
|
55
|
+
|
|
56
|
+
For PostgreSQL, uses COPY TO which streams the table directly to CSV format,
|
|
57
|
+
bypassing query planner overhead and row-by-row conversion (10-50x faster than SELECT).
|
|
58
|
+
|
|
59
|
+
For SQLite with large tables, reads in chunks to avoid memory issues when tables exceed available RAM.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
registry_info: Tuple of (module_name, model_name, field_name) where field_name
|
|
63
|
+
is None for regular tables or the field name for M2M link tables.
|
|
64
|
+
directory: Output directory for parquet files.
|
|
65
|
+
chunk_size: Maximum rows per chunk for SQLite large tables.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
String identifier for single-file exports, or list of (table_name, chunk_path) tuples for chunked exports that need merging.
|
|
69
|
+
"""
|
|
70
|
+
from django.db import connection
|
|
71
|
+
|
|
48
72
|
import lamindb_setup as ln_setup
|
|
49
73
|
|
|
74
|
+
module_name, model_name, field_name = registry_info
|
|
75
|
+
schema_module = import_module(module_name)
|
|
76
|
+
registry = getattr(schema_module, model_name)
|
|
77
|
+
|
|
78
|
+
if field_name:
|
|
79
|
+
registry = getattr(registry, field_name).through
|
|
80
|
+
|
|
50
81
|
table_name = registry._meta.db_table
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
if ln_setup.settings.instance.dialect == "postgresql":
|
|
85
|
+
buffer = io.StringIO()
|
|
86
|
+
with connection.cursor() as cursor:
|
|
87
|
+
cursor.copy_expert(
|
|
88
|
+
f'COPY "{table_name}" TO STDOUT WITH (FORMAT CSV, HEADER TRUE)',
|
|
89
|
+
buffer,
|
|
90
|
+
)
|
|
91
|
+
buffer.seek(0)
|
|
92
|
+
df = pd.read_csv(buffer)
|
|
93
|
+
df.to_parquet(directory / f"{table_name}.parquet", compression=None)
|
|
94
|
+
return (
|
|
95
|
+
f"{module_name}.{model_name}.{field_name}"
|
|
96
|
+
if field_name
|
|
97
|
+
else f"{module_name}.{model_name}"
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
with warnings.catch_warnings():
|
|
101
|
+
warnings.filterwarnings(
|
|
102
|
+
"ignore", message="Skipped unsupported reflection"
|
|
103
|
+
)
|
|
104
|
+
row_count = pd.read_sql(
|
|
105
|
+
f"SELECT COUNT(*) as count FROM {table_name}",
|
|
106
|
+
ln_setup.settings.instance.db,
|
|
107
|
+
).iloc[0]["count"]
|
|
108
|
+
|
|
109
|
+
if row_count > chunk_size:
|
|
110
|
+
chunk_files = []
|
|
111
|
+
num_chunks = (row_count + chunk_size - 1) // chunk_size
|
|
112
|
+
for chunk_id in range(num_chunks):
|
|
113
|
+
offset = chunk_id * chunk_size
|
|
114
|
+
df = pd.read_sql(
|
|
115
|
+
f"SELECT * FROM {table_name} LIMIT {chunk_size} OFFSET {offset}",
|
|
116
|
+
ln_setup.settings.instance.db,
|
|
117
|
+
)
|
|
118
|
+
chunk_file = (
|
|
119
|
+
directory / f"{table_name}_chunk_{chunk_id}.parquet"
|
|
120
|
+
)
|
|
121
|
+
df.to_parquet(chunk_file, compression=None)
|
|
122
|
+
chunk_files.append((table_name, chunk_file))
|
|
123
|
+
return chunk_files
|
|
124
|
+
else:
|
|
125
|
+
df = pd.read_sql_table(table_name, ln_setup.settings.instance.db)
|
|
126
|
+
df.to_parquet(directory / f"{table_name}.parquet", compression=None)
|
|
127
|
+
return (
|
|
128
|
+
f"{module_name}.{model_name}.{field_name}"
|
|
129
|
+
if field_name
|
|
130
|
+
else f"{module_name}.{model_name}"
|
|
131
|
+
)
|
|
132
|
+
except (ValueError, pd.errors.DatabaseError):
|
|
133
|
+
raise ValueError(
|
|
134
|
+
f"Table '{table_name}' was not found. The instance might need to be migrated."
|
|
135
|
+
) from None
|
|
55
136
|
|
|
56
137
|
|
|
57
138
|
def export_db(
|
|
58
139
|
module_names: Sequence[str] | None = None,
|
|
59
140
|
*,
|
|
60
141
|
output_dir: str | Path = "./lamindb_export/",
|
|
142
|
+
max_workers: int = 8,
|
|
143
|
+
chunk_size: int = 500_000,
|
|
61
144
|
) -> None:
|
|
62
145
|
"""Export registry tables and many-to-many link tables to parquet files.
|
|
63
146
|
|
|
@@ -67,26 +150,64 @@ def export_db(
|
|
|
67
150
|
module_names: Module names to export (e.g., ["lamindb", "bionty", "wetlab"]).
|
|
68
151
|
Defaults to "lamindb" if not provided.
|
|
69
152
|
output_dir: Directory path for exported parquet files.
|
|
153
|
+
max_workers: Number of parallel processes.
|
|
154
|
+
chunk_size: Number of rows per chunk for large tables.
|
|
70
155
|
"""
|
|
71
156
|
directory = Path(output_dir)
|
|
72
157
|
directory.mkdir(parents=True, exist_ok=True)
|
|
73
158
|
|
|
74
159
|
module_names = module_names or ["lamindb"]
|
|
75
160
|
modules = {name: _get_registries(name) for name in module_names}
|
|
76
|
-
|
|
161
|
+
|
|
162
|
+
tasks = []
|
|
163
|
+
for module_name, model_names in modules.items():
|
|
164
|
+
schema_module = import_module(module_name)
|
|
165
|
+
for model_name in model_names:
|
|
166
|
+
registry = getattr(schema_module, model_name)
|
|
167
|
+
tasks.append((module_name, model_name, None))
|
|
168
|
+
for field in registry._meta.many_to_many:
|
|
169
|
+
tasks.append((module_name, model_name, field.name))
|
|
170
|
+
|
|
171
|
+
chunk_files_by_table: dict[str, list[Path]] = {}
|
|
77
172
|
|
|
78
173
|
with Progress() as progress:
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
174
|
+
task_id = progress.add_task("Exporting", total=len(tasks))
|
|
175
|
+
|
|
176
|
+
import multiprocessing
|
|
177
|
+
|
|
178
|
+
mp_context = multiprocessing.get_context("spawn")
|
|
179
|
+
|
|
180
|
+
with ProcessPoolExecutor(
|
|
181
|
+
max_workers=max_workers, mp_context=mp_context
|
|
182
|
+
) as executor:
|
|
183
|
+
futures = {
|
|
184
|
+
executor.submit(_export_full_table, task, directory, chunk_size): task
|
|
185
|
+
for task in tasks
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
for future in as_completed(futures):
|
|
189
|
+
result = future.result()
|
|
190
|
+
if isinstance(result, list):
|
|
191
|
+
for table_name, chunk_file in result:
|
|
192
|
+
chunk_files_by_table.setdefault(table_name, []).append(
|
|
193
|
+
chunk_file
|
|
194
|
+
)
|
|
195
|
+
progress.advance(task_id)
|
|
196
|
+
|
|
197
|
+
for table_name, chunk_files in chunk_files_by_table.items():
|
|
198
|
+
merged_df = pd.concat([pd.read_parquet(f) for f in sorted(chunk_files)])
|
|
199
|
+
merged_df.to_parquet(directory / f"{table_name}.parquet", compression=None)
|
|
200
|
+
for chunk_file in chunk_files:
|
|
201
|
+
chunk_file.unlink()
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _serialize_value(val):
|
|
205
|
+
"""Convert value to JSON string if it's a dict, list, or numpy array, otherwise return as-is."""
|
|
206
|
+
if isinstance(val, (dict, list, np.ndarray)):
|
|
207
|
+
return json.dumps(
|
|
208
|
+
val, default=lambda o: o.tolist() if isinstance(o, np.ndarray) else None
|
|
209
|
+
)
|
|
210
|
+
return val
|
|
90
211
|
|
|
91
212
|
|
|
92
213
|
def _import_registry(
|
|
@@ -96,8 +217,14 @@ def _import_registry(
|
|
|
96
217
|
) -> None:
|
|
97
218
|
"""Import a single registry table from parquet.
|
|
98
219
|
|
|
99
|
-
|
|
220
|
+
For PostgreSQL, uses COPY FROM which bypasses SQL parsing and writes directly to
|
|
221
|
+
table pages (20-50x faster than multi-row INSERTs).
|
|
222
|
+
|
|
223
|
+
For SQLite, uses multi-row INSERTs with dynamic chunking to stay under the 999
|
|
224
|
+
variable limit (2-5x faster than single-row INSERTs).
|
|
100
225
|
"""
|
|
226
|
+
from django.db import connection
|
|
227
|
+
|
|
101
228
|
table_name = registry._meta.db_table
|
|
102
229
|
parquet_file = directory / f"{table_name}.parquet"
|
|
103
230
|
|
|
@@ -113,13 +240,60 @@ def _import_registry(
|
|
|
113
240
|
|
|
114
241
|
for col in df.columns:
|
|
115
242
|
if df[col].dtype == "object":
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
243
|
+
mask = df[col].apply(lambda x: isinstance(x, (dict, list, np.ndarray)))
|
|
244
|
+
if mask.any():
|
|
245
|
+
df.loc[mask, col] = df.loc[mask, col].map(_serialize_value)
|
|
246
|
+
|
|
247
|
+
if if_exists == "append":
|
|
248
|
+
# Fill NULL values in NOT NULL columns to handle schema mismatches between postgres source and SQLite target
|
|
249
|
+
# This allows importing data where fields were nullable
|
|
250
|
+
for field in registry._meta.fields:
|
|
251
|
+
if field.column in df.columns and not field.null:
|
|
252
|
+
df[field.column] = df[field.column].fillna("")
|
|
253
|
+
|
|
254
|
+
if df.empty:
|
|
255
|
+
return
|
|
119
256
|
|
|
120
|
-
|
|
257
|
+
if if_exists == "append":
|
|
258
|
+
# Clear existing data before import
|
|
259
|
+
# When appending we would run into duplicate errors because of existing values like branches etc
|
|
260
|
+
with connection.cursor() as cursor:
|
|
261
|
+
cursor.execute(f'DELETE FROM "{table_name}"')
|
|
262
|
+
|
|
263
|
+
if connection.vendor == "postgresql":
|
|
264
|
+
columns = df.columns.tolist()
|
|
265
|
+
column_names = ", ".join(f'"{col}"' for col in columns)
|
|
266
|
+
|
|
267
|
+
buffer = io.StringIO()
|
|
268
|
+
df.to_csv(buffer, index=False, header=False, sep="\t", na_rep="\\N")
|
|
269
|
+
buffer.seek(0)
|
|
121
270
|
|
|
122
|
-
|
|
271
|
+
with connection.cursor() as cursor:
|
|
272
|
+
if if_exists == "replace":
|
|
273
|
+
cursor.execute(f'DELETE FROM "{table_name}"')
|
|
274
|
+
elif if_exists == "fail":
|
|
275
|
+
cursor.execute(f'SELECT COUNT(*) FROM "{table_name}"')
|
|
276
|
+
if cursor.fetchone()[0] > 0:
|
|
277
|
+
raise ValueError(f"Table {table_name} already contains data")
|
|
278
|
+
|
|
279
|
+
cursor.copy_expert(
|
|
280
|
+
f"COPY \"{table_name}\" ({column_names}) FROM STDIN WITH (FORMAT CSV, DELIMITER E'\\t', NULL '\\N')",
|
|
281
|
+
buffer,
|
|
282
|
+
)
|
|
283
|
+
else:
|
|
284
|
+
num_cols = len(df.columns)
|
|
285
|
+
max_vars = 900 # SQLite has a limit of 999 variables per statement
|
|
286
|
+
chunksize = max(1, max_vars // num_cols)
|
|
287
|
+
|
|
288
|
+
# Always use append mode since we set up the tables from a fresh instance
|
|
289
|
+
df.to_sql(
|
|
290
|
+
table_name,
|
|
291
|
+
connection.connection,
|
|
292
|
+
if_exists=if_exists,
|
|
293
|
+
index=False,
|
|
294
|
+
method="multi",
|
|
295
|
+
chunksize=chunksize,
|
|
296
|
+
)
|
|
123
297
|
|
|
124
298
|
|
|
125
299
|
def import_db(
|
|
@@ -137,6 +311,9 @@ def import_db(
|
|
|
137
311
|
input_dir: Directory containing parquet files to import.
|
|
138
312
|
module_names: Module names to import (e.g., ["lamindb", "bionty", "wetlab"]).
|
|
139
313
|
if_exists: How to behave if table exists: 'fail', 'replace', or 'append'.
|
|
314
|
+
If set to 'replace', existing data is deleted and new data is imported. PKs and indices are not guaranteed to be preserved which can lead to write errors.
|
|
315
|
+
If set to 'append', new data is added to existing data without clearing the table. PKs and indices are preserved but database size will greatly increase.
|
|
316
|
+
If set to 'fail', raises an error if the table contains any data.
|
|
140
317
|
"""
|
|
141
318
|
from django.db import connection
|
|
142
319
|
|
|
@@ -157,38 +334,47 @@ def import_db(
|
|
|
157
334
|
modules = {name: _get_registries(name) for name in module_names}
|
|
158
335
|
total_models = sum(len(models) for models in modules.values())
|
|
159
336
|
|
|
160
|
-
|
|
161
|
-
|
|
337
|
+
is_sqlite = ln_setup.settings.instance.dialect == "sqlite"
|
|
338
|
+
|
|
339
|
+
try:
|
|
162
340
|
with connection.cursor() as cursor:
|
|
163
341
|
if ln_setup.settings.instance.dialect == "postgresql":
|
|
164
342
|
cursor.execute("SET session_replication_role = 'replica'")
|
|
165
|
-
elif
|
|
343
|
+
elif is_sqlite:
|
|
166
344
|
cursor.execute("PRAGMA foreign_keys = OFF")
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
for
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
345
|
+
# Disables fsync - OS buffers writes to disk, 10-50x faster but can corrupt DB on crash
|
|
346
|
+
cursor.execute("PRAGMA synchronous = OFF")
|
|
347
|
+
# Keeps rollback journal in RAM - 2-5x faster but cannot rollback on crash
|
|
348
|
+
cursor.execute("PRAGMA journal_mode = MEMORY")
|
|
349
|
+
# 64MB page cache for better performance on large imports
|
|
350
|
+
cursor.execute("PRAGMA cache_size = -64000")
|
|
351
|
+
|
|
352
|
+
with transaction.atomic():
|
|
353
|
+
if ln_setup.settings.instance.dialect == "postgresql":
|
|
354
|
+
with connection.cursor() as cursor:
|
|
355
|
+
cursor.execute("SET CONSTRAINTS ALL DEFERRED")
|
|
356
|
+
|
|
357
|
+
with Progress() as progress:
|
|
358
|
+
task = progress.add_task("Importing", total=total_models)
|
|
359
|
+
for module_name, model_names in modules.items():
|
|
360
|
+
schema_module = import_module(module_name)
|
|
361
|
+
for model_name in model_names:
|
|
362
|
+
progress.update(
|
|
363
|
+
task, description=f"[cyan]{module_name}.{model_name}"
|
|
364
|
+
)
|
|
365
|
+
registry = getattr(schema_module, model_name)
|
|
366
|
+
_import_registry(registry, directory, if_exists=if_exists)
|
|
367
|
+
for field in registry._meta.many_to_many:
|
|
368
|
+
link_orm = getattr(registry, field.name).through
|
|
369
|
+
_import_registry(link_orm, directory, if_exists=if_exists)
|
|
370
|
+
progress.advance(task)
|
|
371
|
+
finally:
|
|
190
372
|
with connection.cursor() as cursor:
|
|
191
373
|
if ln_setup.settings.instance.dialect == "postgresql":
|
|
192
374
|
cursor.execute("SET session_replication_role = 'origin'")
|
|
193
|
-
elif
|
|
375
|
+
elif is_sqlite:
|
|
376
|
+
cursor.execute("PRAGMA synchronous = FULL")
|
|
377
|
+
cursor.execute("PRAGMA journal_mode = DELETE")
|
|
194
378
|
cursor.execute("PRAGMA foreign_keys = ON")
|
|
379
|
+
# Reclaim space from DELETEs
|
|
380
|
+
cursor.execute("VACUUM")
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
lamindb_setup/__init__.py,sha256=
|
|
1
|
+
lamindb_setup/__init__.py,sha256=T0CRWMhViQuKtrYM_foGZnApH3-uh0ta8YyS7rd9Fj0,3215
|
|
2
2
|
lamindb_setup/_cache.py,sha256=pGvDNVHGx4HWr_6w5ajqEJOdysmaGc6F221qFnXkT-k,2747
|
|
3
3
|
lamindb_setup/_check.py,sha256=28PcG8Kp6OpjSLSi1r2boL2Ryeh6xkaCL87HFbjs6GA,129
|
|
4
4
|
lamindb_setup/_check_setup.py,sha256=ToKMxsUq8dQBQh8baOrNVlSb1iC8h4zTg5dV8wMu0W4,6760
|
|
5
|
-
lamindb_setup/_connect_instance.py,sha256=
|
|
5
|
+
lamindb_setup/_connect_instance.py,sha256=3dsaZ7LGzJYtOWDbi2RkiVJIJqdxy43suNjQ-6C96_U,17788
|
|
6
6
|
lamindb_setup/_delete.py,sha256=KS3r-xGFuDmAbzPUy-9JR-YnPShYdaHjDRQrAmXQ0qM,5863
|
|
7
7
|
lamindb_setup/_disconnect.py,sha256=FT8EpCm5XXDdhDH7QtAnkO3KPatq2HqT9VXGNjgJDbk,1232
|
|
8
8
|
lamindb_setup/_django.py,sha256=uIQflpkp8l3axyPaKURlk3kacgpElVP5KOKmFxYSMGk,1454
|
|
@@ -12,30 +12,30 @@ lamindb_setup/_migrate.py,sha256=SN8uphuQX-8XShH5odLyzV8-eyXATDxB5hWoxwxmgBU,112
|
|
|
12
12
|
lamindb_setup/_register_instance.py,sha256=RdUZxZWHLdbvdNZWpF8e0UWROb_T0cStWbzc5yUw34I,1047
|
|
13
13
|
lamindb_setup/_schema.py,sha256=b3uzhhWpV5mQtDwhMINc2MabGCnGLESy51ito3yl6Wc,679
|
|
14
14
|
lamindb_setup/_schema_metadata.py,sha256=af1Es7qFKGPRdNmk48384HiB2r-cDTdBPu0wB9qrga4,15526
|
|
15
|
-
lamindb_setup/_set_managed_storage.py,sha256=
|
|
15
|
+
lamindb_setup/_set_managed_storage.py,sha256=xQe5DXCRiQ5VseAjVC2Bki0wB0n0tSTchvVKSx9I6eo,3094
|
|
16
16
|
lamindb_setup/_setup_user.py,sha256=ojq7UP2Aia8GTCr6m8fylFx9VSuvGu0HmvIJ8RzymE0,6108
|
|
17
17
|
lamindb_setup/_silence_loggers.py,sha256=AKF_YcHvX32eGXdsYK8MJlxEaZ-Uo2f6QDRzjKFCtws,1568
|
|
18
18
|
lamindb_setup/errors.py,sha256=lccF3X3M2mcbHVG_0HxfuJRFFpUE-42paccIxFOfefQ,1958
|
|
19
|
-
lamindb_setup/io.py,sha256=
|
|
19
|
+
lamindb_setup/io.py,sha256=jLJ3WbWWDctXmzl6XTES6_CutX3TsvG1BSW-Jd5UITo,15081
|
|
20
20
|
lamindb_setup/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
lamindb_setup/types.py,sha256=fuQxZJnrGYe7a_Ju9n1RqO-HhkOAr1l1xjpAg9dmBu8,605
|
|
22
|
-
lamindb_setup/core/__init__.py,sha256=
|
|
23
|
-
lamindb_setup/core/_aws_options.py,sha256=
|
|
22
|
+
lamindb_setup/core/__init__.py,sha256=adZtacDwG2C0tgx-ypp9yOAqw9qaR-IRWkgLurKpXVE,668
|
|
23
|
+
lamindb_setup/core/_aws_options.py,sha256=9kQ5BB-cuJQrlJRGNqMRe1m48dP67xMbefOJP2c9OQw,9674
|
|
24
24
|
lamindb_setup/core/_aws_storage.py,sha256=QEtV-riQrwfivcwqHnXBbkJ-9YyNEXL4fLoCmOHZ1BI,2003
|
|
25
|
-
lamindb_setup/core/_clone.py,sha256=
|
|
25
|
+
lamindb_setup/core/_clone.py,sha256=Vikc0LckB7fWTS06ei5enErWqe1mx6XvP7Ikkj-fzdg,6493
|
|
26
26
|
lamindb_setup/core/_deprecated.py,sha256=M3vpM4fZPOncxY2qsXQAPeaEph28xWdv7tYaueaUyAA,2554
|
|
27
27
|
lamindb_setup/core/_docs.py,sha256=3k-YY-oVaJd_9UIY-LfBg_u8raKOCNfkZQPA73KsUhs,276
|
|
28
|
-
lamindb_setup/core/_hub_client.py,sha256=
|
|
28
|
+
lamindb_setup/core/_hub_client.py,sha256=vem145S5ppRPcWob7iclGhos8k-BfwJi9AI-l5PteDs,10481
|
|
29
29
|
lamindb_setup/core/_hub_core.py,sha256=GAQK5XkHROIuqA-H8sOQZVlxvN4QIH_cmHY0TENnq2U,29090
|
|
30
30
|
lamindb_setup/core/_hub_crud.py,sha256=j6516H82kLjFUNPqFGUINbDw9YbofMgjxadGzYb0OS4,6362
|
|
31
31
|
lamindb_setup/core/_hub_utils.py,sha256=6dyDGyzYFgVfR_lE3VN3CP1jGp98gxPtr-T91PAP05U,2687
|
|
32
32
|
lamindb_setup/core/_private_django_api.py,sha256=By63l3vIEtK1pq246FhHq3tslxsaTJGKm5VakYluWp4,2656
|
|
33
|
-
lamindb_setup/core/_settings.py,sha256=
|
|
34
|
-
lamindb_setup/core/_settings_instance.py,sha256=
|
|
35
|
-
lamindb_setup/core/_settings_load.py,sha256=
|
|
36
|
-
lamindb_setup/core/_settings_save.py,sha256=
|
|
37
|
-
lamindb_setup/core/_settings_storage.py,sha256=
|
|
38
|
-
lamindb_setup/core/_settings_store.py,sha256=
|
|
33
|
+
lamindb_setup/core/_settings.py,sha256=QbTrSkkdx0u685NJ4neNtWzhdHoaGMKcIvrfFnctTQ4,15450
|
|
34
|
+
lamindb_setup/core/_settings_instance.py,sha256=eDkueLK5JZOGFhZRbGa-OffS9iBFlxMp47vF_MfmCYI,24301
|
|
35
|
+
lamindb_setup/core/_settings_load.py,sha256=NQDOln8e3qyGphk8ucU7mm3HVkCv4QV4rDZro3TIwfo,5183
|
|
36
|
+
lamindb_setup/core/_settings_save.py,sha256=96mWdYLyfvbnG_ok_vK4x7jm-rtqcWCD1OHEt2QSAms,3328
|
|
37
|
+
lamindb_setup/core/_settings_storage.py,sha256=mO5WVlybloyizKILGpbJf_Fe5yp32D-fYERURcncQ3o,15505
|
|
38
|
+
lamindb_setup/core/_settings_store.py,sha256=auZssUBb6qE5oSqdGiHhqI2B46qSpegX89VwObPQksk,2601
|
|
39
39
|
lamindb_setup/core/_settings_user.py,sha256=gFfyMf-738onbh1Mf4wsmLlenQJPtjQfpUgKnOlqc2o,1453
|
|
40
40
|
lamindb_setup/core/_setup_bionty_sources.py,sha256=ox3X-SHiHa2lNPSWjwZhINypbLacX6kGwH6hVVrSFZc,1505
|
|
41
41
|
lamindb_setup/core/cloud_sqlite_locker.py,sha256=H_CTUCjURFXwD1cCtV_Jn0_60iztZTkaesLLXIBgIxc,7204
|
|
@@ -43,8 +43,8 @@ lamindb_setup/core/django.py,sha256=2HwhtfUEX4peSkczc0VSfA-CpfCGL4vNgkPe9Pwu5kw,
|
|
|
43
43
|
lamindb_setup/core/exceptions.py,sha256=qjMzqy_uzPA7mCOdnoWnS_fdA6OWbdZGftz-YYplrY0,84
|
|
44
44
|
lamindb_setup/core/hashing.py,sha256=Y8Uc5uSGTfU6L2R_gb5w8DdHhGRog7RnkK-e9FEMjPY,3680
|
|
45
45
|
lamindb_setup/core/types.py,sha256=T7NwspfRHgIIpYsXDcApks8jkOlGeGRW-YbVLB7jNIo,67
|
|
46
|
-
lamindb_setup/core/upath.py,sha256=
|
|
47
|
-
lamindb_setup-1.15.
|
|
48
|
-
lamindb_setup-1.15.
|
|
49
|
-
lamindb_setup-1.15.
|
|
50
|
-
lamindb_setup-1.15.
|
|
46
|
+
lamindb_setup/core/upath.py,sha256=DgdCBCTv1HRx94kOUw3DGVcsSJrY68S9aVwOT3ep1D8,35730
|
|
47
|
+
lamindb_setup-1.15.2.dist-info/LICENSE,sha256=UOZ1F5fFDe3XXvG4oNnkL1-Ecun7zpHzRxjp-XsMeAo,11324
|
|
48
|
+
lamindb_setup-1.15.2.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
|
|
49
|
+
lamindb_setup-1.15.2.dist-info/METADATA,sha256=vWKrDEJ5PzX3wM-qkSJwTEPHWNXsw0zROmRNjNMpibw,1798
|
|
50
|
+
lamindb_setup-1.15.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|