lamindb 0.57.1__py3-none-any.whl → 0.58.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lamindb/__init__.py CHANGED
@@ -53,7 +53,7 @@ Static classes & modules:
53
53
 
54
54
  """
55
55
 
56
- __version__ = "0.57.1" # denote a release candidate for 0.1.0 with 0.1rc1
56
+ __version__ = "0.58.0" # denote a release candidate for 0.1.0 with 0.1rc1
57
57
 
58
58
  import os as _os
59
59
 
lamindb/_dataset.py CHANGED
@@ -179,10 +179,7 @@ def __init__(
179
179
  if file is not None and file.run != run:
180
180
  _track_run_input(file, run=run)
181
181
  elif files is not None:
182
- for file in files:
183
- if file.run != run:
184
- _track_run_input(file, run=run)
185
- # there is not other possibility
182
+ _track_run_input(files, run=run)
186
183
 
187
184
 
188
185
  @classmethod # type: ignore
@@ -197,6 +194,8 @@ def from_df(
197
194
  modality: Optional[Modality] = None,
198
195
  reference: Optional[str] = None,
199
196
  reference_type: Optional[str] = None,
197
+ version: Optional[str] = None,
198
+ is_new_version_of: Optional["File"] = None,
200
199
  ) -> "Dataset":
201
200
  """{}"""
202
201
  feature_set = FeatureSet.from_df(df, field=field, modality=modality)
@@ -205,7 +204,15 @@ def from_df(
205
204
  else:
206
205
  feature_sets = {}
207
206
  dataset = Dataset(
208
- data=df, name=name, run=run, description=description, feature_sets=feature_sets
207
+ data=df,
208
+ name=name,
209
+ run=run,
210
+ description=description,
211
+ feature_sets=feature_sets,
212
+ reference=reference,
213
+ reference_type=reference_type,
214
+ version=version,
215
+ is_new_version_of=is_new_version_of,
209
216
  )
210
217
  return dataset
211
218
 
@@ -222,6 +229,8 @@ def from_anndata(
222
229
  modality: Optional[Modality] = None,
223
230
  reference: Optional[str] = None,
224
231
  reference_type: Optional[str] = None,
232
+ version: Optional[str] = None,
233
+ is_new_version_of: Optional["File"] = None,
225
234
  ) -> "Dataset":
226
235
  """{}"""
227
236
  if isinstance(adata, File):
@@ -237,6 +246,10 @@ def from_anndata(
237
246
  name=name,
238
247
  description=description,
239
248
  feature_sets=feature_sets,
249
+ reference=reference,
250
+ reference_type=reference_type,
251
+ version=version,
252
+ is_new_version_of=is_new_version_of,
240
253
  )
241
254
  return dataset
242
255
 
@@ -244,30 +257,48 @@ def from_anndata(
244
257
  # internal function, not exposed to user
245
258
  def from_files(files: Iterable[File]) -> Tuple[str, Dict[str, str]]:
246
259
  # assert all files are already saved
260
+ logger.debug("check not saved")
247
261
  saved = not any([file._state.adding for file in files])
248
262
  if not saved:
249
263
  raise ValueError("Not all files are yet saved, please save them")
250
264
  # query all feature sets of files
265
+ logger.debug("file ids")
251
266
  file_ids = [file.id for file in files]
252
267
  # query all feature sets at the same time rather than making a single query per file
268
+ logger.debug("feature_set_file_links")
253
269
  feature_set_file_links = File.feature_sets.through.objects.filter(
254
270
  file_id__in=file_ids
255
271
  )
256
- feature_set_ids = [link.feature_set_id for link in feature_set_file_links]
257
- feature_sets = FeatureSet.filter(id__in=feature_set_ids).all()
258
272
  feature_sets_by_slots = defaultdict(list)
273
+ logger.debug("slots")
259
274
  for link in feature_set_file_links:
260
- feature_sets_by_slots[link.slot].append(
261
- feature_sets.filter(id=link.feature_set_id).one()
262
- )
275
+ feature_sets_by_slots[link.slot].append(link.feature_set_id)
263
276
  feature_sets_union = {}
264
- for slot, feature_sets_slot in feature_sets_by_slots.items():
265
- members = feature_sets_slot[0].members
266
- for feature_set in feature_sets_slot[1:]:
267
- members = members | feature_set.members
268
- feature_sets_union[slot] = FeatureSet(members)
277
+ logger.debug("union")
278
+ for slot, feature_set_ids_slot in feature_sets_by_slots.items():
279
+ feature_set_1 = FeatureSet.filter(id=feature_set_ids_slot[0]).one()
280
+ related_name = feature_set_1._get_related_name()
281
+ features_registry = getattr(FeatureSet, related_name).field.model
282
+ start_time = logger.debug("run filter")
283
+ # this way of writing the __in statement turned out to be the fastest
284
+ # evaluated on a link table with 16M entries connecting 500 feature sets with
285
+ # 60k genes
286
+ feature_ids = (
287
+ features_registry.feature_sets.through.objects.filter(
288
+ featureset_id__in=feature_set_ids_slot
289
+ )
290
+ .values(f"{features_registry.__name__.lower()}_id")
291
+ .distinct()
292
+ )
293
+ start_time = logger.debug("done, start evaluate", time=start_time)
294
+ features = features_registry.filter(id__in=feature_ids)
295
+ feature_sets_union[slot] = FeatureSet(
296
+ features, type=feature_set_1.type, modality=feature_set_1.modality
297
+ )
298
+ start_time = logger.debug("done", time=start_time)
269
299
  # validate consistency of hashes
270
300
  # we do not allow duplicate hashes
301
+ logger.debug("hashes")
271
302
  hashes = [file.hash for file in files]
272
303
  if len(hashes) != len(set(hashes)):
273
304
  seen = set()
@@ -276,7 +307,9 @@ def from_files(files: Iterable[File]) -> Tuple[str, Dict[str, str]]:
276
307
  "Please pass files with distinct hashes: these ones are non-unique"
277
308
  f" {non_unique}"
278
309
  )
310
+ time = logger.debug("hash")
279
311
  hash = hash_set(set(hashes))
312
+ logger.debug("done", time=time)
280
313
  return hash, feature_sets_union
281
314
 
282
315
 
lamindb/_feature_set.py CHANGED
@@ -239,11 +239,16 @@ def members(self) -> "QuerySet":
239
239
  # this should return a queryset and not a list...
240
240
  # need to fix this
241
241
  return self._features[1]
242
+ related_name = self._get_related_name()
243
+ return self.__getattribute__(related_name).all()
244
+
245
+
246
+ def _get_related_name(self: FeatureSet) -> str:
242
247
  key_split = self.registry.split(".")
243
248
  orm_name_with_schema = f"{key_split[0]}.{key_split[1]}"
244
249
  feature_sets_related_models = dict_related_model_to_related_name(self)
245
250
  related_name = feature_sets_related_models.get(orm_name_with_schema)
246
- return self.__getattribute__(related_name).all()
251
+ return related_name
247
252
 
248
253
 
249
254
  METHOD_NAMES = [
@@ -266,3 +271,4 @@ for name in METHOD_NAMES:
266
271
  attach_func_to_class_method(name, FeatureSet, globals())
267
272
 
268
273
  setattr(FeatureSet, "members", members)
274
+ setattr(FeatureSet, "_get_related_name", _get_related_name)
lamindb/_file.py CHANGED
@@ -553,9 +553,19 @@ def from_df(
553
553
  description: Optional[str] = None,
554
554
  run: Optional[Run] = None,
555
555
  modality: Optional[Modality] = None,
556
+ version: Optional[str] = None,
557
+ is_new_version_of: Optional["File"] = None,
556
558
  ) -> "File":
557
559
  """{}"""
558
- file = File(data=df, key=key, run=run, description=description, log_hint=False)
560
+ file = File(
561
+ data=df,
562
+ key=key,
563
+ run=run,
564
+ description=description,
565
+ version=version,
566
+ is_new_version_of=is_new_version_of,
567
+ log_hint=False,
568
+ )
559
569
  feature_set = FeatureSet.from_df(df, field=field, modality=modality)
560
570
  if feature_set is not None:
561
571
  file._feature_sets = {"columns": feature_set}
@@ -615,9 +625,19 @@ def from_anndata(
615
625
  description: Optional[str] = None,
616
626
  run: Optional[Run] = None,
617
627
  modality: Optional[Modality] = None,
628
+ version: Optional[str] = None,
629
+ is_new_version_of: Optional["File"] = None,
618
630
  ) -> "File":
619
631
  """{}"""
620
- file = File(data=adata, key=key, run=run, description=description, log_hint=False)
632
+ file = File(
633
+ data=adata,
634
+ key=key,
635
+ run=run,
636
+ description=description,
637
+ version=version,
638
+ is_new_version_of=is_new_version_of,
639
+ log_hint=False,
640
+ )
621
641
  file._feature_sets = parse_feature_sets_from_anndata(adata, field, modality)
622
642
  return file
623
643
 
lamindb/_parents.py CHANGED
@@ -201,13 +201,18 @@ def _get_parents(record: Registry, field: str, distance: int, children: bool = F
201
201
  d = 2
202
202
  while d < distance:
203
203
  condition = f"{key}__{condition}"
204
- records = model.filter(**{condition: record.__getattribute__(field)}).all()
204
+ records = model.filter(**{condition: record.__getattribute__(field)})
205
205
 
206
- if len(records) == 0:
207
- return results
206
+ try:
207
+ if not records.exists():
208
+ return results
208
209
 
209
- results = results | records
210
- d += 1
210
+ results = results | records.all()
211
+ d += 1
212
+ except Exception:
213
+ # For OperationalError:
214
+ # SQLite does not support joins containing more than 64 tables
215
+ return results
211
216
  return results
212
217
 
213
218
 
lamindb/dev/_data.py CHANGED
@@ -308,24 +308,48 @@ def add_labels(
308
308
 
309
309
 
310
310
  def _track_run_input(
311
- data: Data, is_run_input: Optional[bool] = None, run: Optional[Run] = None
311
+ data: Union[Data, Iterable[Data]],
312
+ is_run_input: Optional[bool] = None,
313
+ run: Optional[Run] = None,
312
314
  ):
313
315
  if run is None:
314
316
  run = run_context.run
317
+ # consider that data is an iterable of Data
318
+ data_iter: Iterable[Data] = [data] if isinstance(data, Data) else data
315
319
  track_run_input = False
320
+ input_data = []
321
+ if run is not None:
322
+ # avoid cycles: data can't be both input and output
323
+ input_data = [data for data in data_iter if data.run_id != run.id]
324
+ input_data_ids = [data.id for data in data_iter if data.run_id != run.id]
325
+ if input_data:
326
+ data_class_name = input_data[0].__class__.__name__.lower()
327
+ # let us first look at the case in which the user does not
328
+ # provide a boolean value for `is_run_input`
329
+ # hence, we need to determine whether we actually want to
330
+ # track a run or not
316
331
  if is_run_input is None:
317
- # we need a global run context for this to work
318
- if run is not None:
319
- # avoid cycles (a file is both input and output)
320
- if data.run != run:
332
+ # we don't have a run record
333
+ if run is None:
334
+ if settings.track_run_inputs:
335
+ logger.hint(
336
+ "you can auto-track this file as a run input by calling"
337
+ " `ln.track()`"
338
+ )
339
+ # assume we have a run record
340
+ else:
341
+ # assume there is non-cyclic candidate input data
342
+ if input_data:
321
343
  if settings.track_run_inputs:
322
344
  transform_note = ""
323
- if data.transform is not None:
324
- transform_note = (
325
- f", adding parent transform {data.transform.id}"
326
- )
345
+ if len(input_data) == 1:
346
+ if input_data[0].transform is not None:
347
+ transform_note = (
348
+ ", adding parent transform"
349
+ f" {input_data[0].transform.id}"
350
+ )
327
351
  logger.info(
328
- f"adding file {data.id} as input for run"
352
+ f"adding {data_class_name} {input_data_ids} as input for run"
329
353
  f" {run.id}{transform_note}"
330
354
  )
331
355
  track_run_input = True
@@ -333,12 +357,6 @@ def _track_run_input(
333
357
  logger.hint(
334
358
  "track this file as a run input by passing `is_run_input=True`"
335
359
  )
336
- else:
337
- if settings.track_run_inputs:
338
- logger.hint(
339
- "you can auto-track this file as a run input by calling"
340
- " `ln.track()`"
341
- )
342
360
  else:
343
361
  track_run_input = is_run_input
344
362
  if track_run_input:
@@ -348,12 +366,22 @@ def _track_run_input(
348
366
  " run object via `run.input_files.add(file)`"
349
367
  )
350
368
  # avoid adding the same run twice
351
- # avoid cycles (a file is both input and output)
352
- if not data.input_of.contains(run) and data.run != run:
353
- run.save()
354
- data.input_of.add(run)
355
- if data.transform is not None:
356
- run.transform.parents.add(data.transform)
369
+ run.save()
370
+ if data_class_name == "file":
371
+ LinkORM = run.input_files.through
372
+ links = [
373
+ LinkORM(run_id=run.id, file_id=data_id) for data_id in input_data_ids
374
+ ]
375
+ else:
376
+ LinkORM = run.input_datasets.through
377
+ links = [
378
+ LinkORM(run_id=run.id, dataset_id=data_id) for data_id in input_data_ids
379
+ ]
380
+ LinkORM.objects.bulk_create(links, ignore_conflicts=True)
381
+ # generalize below for more than one data batch
382
+ if len(input_data) == 1:
383
+ if input_data[0].transform is not None:
384
+ run.transform.parents.add(input_data[0].transform)
357
385
 
358
386
 
359
387
  @property # type: ignore
@@ -156,7 +156,7 @@ def anndata_mouse_sc_lymph_node(
156
156
  populate_registries: pre-populate metadata records to simulate existing registries # noqa
157
157
  """
158
158
  filepath, _ = urlretrieve("https://lamindb-test.s3.amazonaws.com/E-MTAB-8414.h5ad")
159
- adata = ad.read(filepath)
159
+ adata = ad.read_h5ad(filepath)
160
160
 
161
161
  # The column names are a bit lengthy, let's abbreviate them:
162
162
  adata.obs.columns = (
@@ -253,7 +253,7 @@ def anndata_pbmc68k_reduced() -> ad.AnnData:
253
253
  filepath, _ = urlretrieve(
254
254
  "https://lamindb-dev-datasets.s3.amazonaws.com/scrnaseq_pbmc68k_tiny.h5ad"
255
255
  )
256
- return ad.read(filepath)
256
+ return ad.read_h5ad(filepath)
257
257
 
258
258
 
259
259
  def anndata_file_pbmc68k_test() -> Path:
@@ -283,7 +283,7 @@ def anndata_pbmc3k_processed() -> ad.AnnData: # pragma: no cover
283
283
  filepath, _ = urlretrieve(
284
284
  "https://lamindb-test.s3.amazonaws.com/scrnaseq_scanpy_pbmc3k_processed.h5ad"
285
285
  )
286
- pbmc3k = ad.read(filepath)
286
+ pbmc3k = ad.read_h5ad(filepath)
287
287
  pbmc3k.obs.rename(columns={"louvain": "cell_type"}, inplace=True)
288
288
  return pbmc3k
289
289
 
@@ -306,8 +306,11 @@ def anndata_human_immune_cells(
306
306
  adata.write('human_immune.h5ad')
307
307
  """
308
308
  filepath, _ = urlretrieve("https://lamindb-test.s3.amazonaws.com/human_immune.h5ad")
309
- adata = ad.read(filepath)
309
+ adata = ad.read_h5ad(filepath)
310
310
  adata.var.drop(columns=["gene_symbols", "feature_name"], inplace=True)
311
+ adata.uns.pop("cell_type_ontology_term_id_colors")
312
+ adata.uns.pop("title")
313
+ adata.uns.pop("schema_version")
311
314
  adata.obs.columns = adata.obs.columns.str.replace("donor_id", "donor")
312
315
  columns = [col for col in adata.obs.columns if "ontology_term" not in col]
313
316
  adata.obs = adata.obs[columns]
@@ -378,7 +381,7 @@ def anndata_suo22_Visium10X(): # pragma: no cover
378
381
  )
379
382
  Path("suo22/").mkdir(exist_ok=True)
380
383
  filepath = Path(filepath).rename("suo22/Visium10X_data_LI_subset.h5ad")
381
- return ad.read(filepath)
384
+ return ad.read_h5ad(filepath)
382
385
 
383
386
 
384
387
  def mudata_papalexi21_subset(): # pragma: no cover
@@ -16,13 +16,22 @@ from anndata._io.h5ad import read_dataframe_legacy as read_dataframe_legacy_h5
16
16
  from anndata._io.specs.registry import get_spec, read_elem, read_elem_partial
17
17
  from anndata.compat import _read_attr
18
18
  from fsspec.core import OpenFile
19
+ from lamin_utils import logger
19
20
  from lamindb_setup.dev.upath import UPath, infer_filesystem
20
21
  from lnschema_core import File
21
22
  from packaging import version
22
23
 
23
24
  from lamindb.dev.storage.file import filepath_from_file
24
25
 
25
- if version.parse(anndata_version) < version.parse("0.10.0"):
26
+ anndata_version_parse = version.parse(anndata_version)
27
+
28
+ if anndata_version_parse < version.parse("0.10.0"):
29
+ if anndata_version_parse < version.parse("0.9.1"):
30
+ logger.warning(
31
+ "Full backed capabilities are not available for this version of anndata,"
32
+ " please install anndata>=0.9.1."
33
+ )
34
+
26
35
  from anndata._core.sparse_dataset import SparseDataset
27
36
 
28
37
  # try csr for groups with no encoding_type
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lamindb
3
- Version: 0.57.1
3
+ Version: 0.58.0
4
4
  Summary: A data framework for biology.
5
5
  Author-email: Lamin Labs <open-source@lamin.ai>
6
6
  Requires-Python: >=3.8
@@ -8,14 +8,14 @@ Description-Content-Type: text/markdown
8
8
  Classifier: Programming Language :: Python :: 3.8
9
9
  Classifier: Programming Language :: Python :: 3.9
10
10
  Classifier: Programming Language :: Python :: 3.10
11
- Requires-Dist: lnschema_core==0.52.0
11
+ Requires-Dist: lnschema_core==0.52.1
12
12
  Requires-Dist: lamindb_setup==0.55.6
13
- Requires-Dist: lamin_utils==0.11.4
13
+ Requires-Dist: lamin_utils==0.11.5
14
14
  Requires-Dist: rapidfuzz
15
15
  Requires-Dist: pyarrow
16
16
  Requires-Dist: typing_extensions!=4.6.0
17
17
  Requires-Dist: python-dateutil
18
- Requires-Dist: anndata>=0.9.1,<=0.10.1
18
+ Requires-Dist: anndata>=0.8.0,<=0.10.2
19
19
  Requires-Dist: fsspec
20
20
  Requires-Dist: pandas
21
21
  Requires-Dist: graphviz
@@ -24,7 +24,7 @@ Requires-Dist: urllib3<2 ; extra == "aws"
24
24
  Requires-Dist: boto3==1.28.17 ; extra == "aws"
25
25
  Requires-Dist: aiobotocore==2.5.4 ; extra == "aws"
26
26
  Requires-Dist: fsspec[s3]==2023.9.0 ; extra == "aws"
27
- Requires-Dist: lnschema_bionty==0.33.0 ; extra == "bionty"
27
+ Requires-Dist: lnschema_bionty==0.34.0 ; extra == "bionty"
28
28
  Requires-Dist: pandas<2 ; extra == "dev"
29
29
  Requires-Dist: pre-commit ; extra == "dev"
30
30
  Requires-Dist: nox ; extra == "dev"
@@ -1,12 +1,12 @@
1
- lamindb/__init__.py,sha256=wouDbXipZRBI2h6a5u0nS7oDs5Z0C-c6YxIzNeWiGUU,2870
2
- lamindb/_dataset.py,sha256=mBoeQj7KhMxAnb9wmgu7MXxlbPRGQPsGIcBVaiAZjQ8,13453
1
+ lamindb/__init__.py,sha256=BWW5E_HjLH9kJ3Fo6sUfWV2efj0pytTAYmPxnJ-iheA,2870
2
+ lamindb/_dataset.py,sha256=6CJHTiwe1lWLUfPHQzJpq-hj8zRjNTCWLkPmrtxpP7Q,14712
3
3
  lamindb/_delete.py,sha256=wiYmYnvIEHrDdmw1NiXyfCY9mBt-FI5XNFi5jyR_mkA,1968
4
4
  lamindb/_feature.py,sha256=5gsa7zsMVVtm1DID4dF3Vwo5llWyY1dH3Hg5hjaIrQk,5554
5
- lamindb/_feature_set.py,sha256=DWDrLlNfsR726IdGw93CcTxSxrfmZtGSulZKCmUv4MQ,9055
6
- lamindb/_file.py,sha256=0TIsPvOcWXjtgCwTOoeot1o0Gs8ebkcDFQenMSgxXuM,35818
5
+ lamindb/_feature_set.py,sha256=G63pwauDQ7jg4ydFCQLhu-lgO6tm56iQwUdRuNHeKHY,9233
6
+ lamindb/_file.py,sha256=V85tryLD0HsZiPQV4KNmcs3Gdi0qf8dZu27rl2G9D7E,36218
7
7
  lamindb/_filter.py,sha256=fNvPbLeOxYzvNKPcFYiFz3P7bkD5_84Xh8HHAoLNdas,1716
8
8
  lamindb/_from_values.py,sha256=GitpmKOqV6YHJggaCnJgGsRIHI_bnuLRVE2oo9W-SgE,11613
9
- lamindb/_parents.py,sha256=-SRNd4O7TUmCIHYysjS00uK1QKODF4UJSXK_T_1KOEI,13212
9
+ lamindb/_parents.py,sha256=VT_gtomf1Erd_AKLVd1uLwigeDqMHtcaAbma3_AbQAw,13408
10
10
  lamindb/_query_manager.py,sha256=MXueabWHqft7GWNkzmWbhfTqdk-0mKU7nWrhXG6wpYQ,3693
11
11
  lamindb/_query_set.py,sha256=Lf7vLvOsEfUWRQ3iImSj4eQPmUK1KCgeoKS_m66Lp7o,10279
12
12
  lamindb/_registry.py,sha256=_pdlEvAtemiQCzpK2s14MsTKkLqE6ORDjhDs7ABs4i4,14893
@@ -19,7 +19,7 @@ lamindb/_utils.py,sha256=LGdiW4k3GClLz65vKAVRkL6Tw-Gkx9DWAdez1jyA5bE,428
19
19
  lamindb/_validate.py,sha256=3powFmYcNop2R6ijt2v3I_vPn4TD9ET4DJkW8uzQt_U,13719
20
20
  lamindb/_view.py,sha256=bzx6e-Cif2CmDQkOu6jMrq_d5rsu6g7hhdaK_sYBv_Y,2150
21
21
  lamindb/dev/__init__.py,sha256=Ja96dxb0t7raGsCr8QxqCabyEzIxeVGlL_IgmhxdsB8,1010
22
- lamindb/dev/_data.py,sha256=-0Bz2wg98-BTzpV_5lUZCrRk9yeU1xqCUrjELomJb60,13818
22
+ lamindb/dev/_data.py,sha256=6TLM2tVWV7xMYzWNA14EsdyhSoRjK7IK6EU4VuQoC-g,15071
23
23
  lamindb/dev/_feature_manager.py,sha256=IojA1TPH3ZPlPghV_d1MIPIxdIcYO15RenI_o7YjmAM,8049
24
24
  lamindb/dev/_label_manager.py,sha256=5R2rZzdLgiZHEzXyilSjK3J7kHDHUOhneZJuSh--qQY,7339
25
25
  lamindb/dev/_priors.py,sha256=eSZEEijmeFs3zcrU27r3T2sSGdsK-cvy7vl6ukDYaU8,785
@@ -31,18 +31,18 @@ lamindb/dev/hashing.py,sha256=IlNrHy-a9NqB0vfqiwIh4sjt40CvaiZIvfK6gMnkxDo,1381
31
31
  lamindb/dev/types.py,sha256=svg5S_aynuGfbEOsbmqkR_gF9d9YMzfOkcvGN37Rzvg,232
32
32
  lamindb/dev/versioning.py,sha256=XF7X-Ngat_Ggca7FdtZa5ElOKlOgoxDtxwZlhsCTJZU,2788
33
33
  lamindb/dev/datasets/__init__.py,sha256=clbWOmg4K8Rh94OPFtJasNKdtUHHvR_Lx11jZWMqfok,1350
34
- lamindb/dev/datasets/_core.py,sha256=T1XE9tr3uVLnyA2W9_xuF60EQH3WVaS9GBl69cB-KDQ,18844
34
+ lamindb/dev/datasets/_core.py,sha256=-g7wWWYHrejlkSQS04Xafi_w5OjDv9ItHMUFNdHsXlM,18987
35
35
  lamindb/dev/datasets/_fake.py,sha256=S8mNho-oSh1M9x9oOSsUBLLHmBAegsOLlFk6LnF81EA,942
36
36
  lamindb/dev/storage/__init__.py,sha256=mFvsMkAHHmO_xTM1UI-WGynDObnH0RCI2TXtFGhYfv8,392
37
37
  lamindb/dev/storage/_anndata_sizes.py,sha256=0XVzA6AQeVGPaGPrhGusKyxFgFjeo3qSN29hxb8D5E8,993
38
- lamindb/dev/storage/_backed_access.py,sha256=wSWIUzb3zsb2seZXMbGPTwG-mzFKepUlbhx4b58ZIHo,21850
38
+ lamindb/dev/storage/_backed_access.py,sha256=IWKLxvSWvzXGgysx1FLhfQ3mEoTxhtQXCvqBxpIpucw,22150
39
39
  lamindb/dev/storage/_zarr.py,sha256=7W1Jos1QOOF3f41uML_arQoDTNPZVpRyP2m3SLWaCAo,2766
40
40
  lamindb/dev/storage/file.py,sha256=xfeU8X1ty80-PhnHOpupBJfibZKhp6MPLA2IjYdTBoY,7982
41
41
  lamindb/dev/storage/object.py,sha256=KGuOwwYuN2yCJxTXn9v0LanC0fjKwy_62P-WksHcf40,1140
42
42
  lamindb/setup/__init__.py,sha256=8-0F2C4Glx23-b8-D_1CBGgRBM5PppVhazhoXZYOLsg,275
43
43
  lamindb/setup/dev/__init__.py,sha256=tBty426VGF2PGqqt2XuNU-WgvOrbOp1aZBDowjLuzgA,242
44
- lamindb-0.57.1.dist-info/entry_points.txt,sha256=MioM8vSpKwXxY3geNBwjo1wnwy1l15WjJYlI3lpKuZI,53
45
- lamindb-0.57.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
46
- lamindb-0.57.1.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
47
- lamindb-0.57.1.dist-info/METADATA,sha256=vcUgEazdnK9v9SSxZnFU5CNBqKmWUSQCecn5fRhH0yk,3030
48
- lamindb-0.57.1.dist-info/RECORD,,
44
+ lamindb-0.58.0.dist-info/entry_points.txt,sha256=MioM8vSpKwXxY3geNBwjo1wnwy1l15WjJYlI3lpKuZI,53
45
+ lamindb-0.58.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
46
+ lamindb-0.58.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
47
+ lamindb-0.58.0.dist-info/METADATA,sha256=thZUko5v-kPa9zVxlYdnMZZrh2CXnZrsrC8o0z_Dsts,3030
48
+ lamindb-0.58.0.dist-info/RECORD,,