lamindb 0.76.4__py3-none-any.whl → 0.76.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lamindb/_transform.py CHANGED
@@ -37,8 +37,25 @@ def __init__(transform: Transform, *args, **kwargs):
37
37
  "Only name, key, version, type, revises, reference, "
38
38
  f"reference_type can be passed, but you passed: {kwargs}"
39
39
  )
40
- if revises is None and key is not None:
41
- revises = Transform.filter(key=key).order_by("-created_at").first()
40
+ if revises is None:
41
+ if key is not None:
42
+ revises = (
43
+ Transform.filter(key=key, is_latest=True)
44
+ .order_by("-created_at")
45
+ .first()
46
+ )
47
+ elif uid is not None:
48
+ revises = (
49
+ Transform.filter(uid__startswith=uid[:-4], is_latest=True)
50
+ .order_by("-created_at")
51
+ .first()
52
+ )
53
+ if revises is not None and uid is not None and uid == revises.uid:
54
+ from ._record import init_self_from_db, update_attributes
55
+
56
+ init_self_from_db(transform, revises)
57
+ update_attributes(transform, {"name": name})
58
+ return None
42
59
  if revises is not None and key is not None and revises.key != key:
43
60
  note = message_update_key_in_version_family(
44
61
  suid=revises.stem_uid,
lamindb/core/__init__.py CHANGED
@@ -66,8 +66,6 @@ from lamin_utils._inspect import InspectResult
66
66
  from lnschema_core.models import (
67
67
  CanValidate,
68
68
  FeatureValue,
69
- HasFeatures,
70
- HasParams,
71
69
  HasParents,
72
70
  IsVersioned,
73
71
  ParamValue,
lamindb/core/_context.py CHANGED
@@ -2,15 +2,16 @@ from __future__ import annotations
2
2
 
3
3
  import builtins
4
4
  import hashlib
5
- import os
6
5
  from datetime import datetime, timezone
7
6
  from pathlib import Path, PurePath
8
7
  from typing import TYPE_CHECKING
9
8
 
9
+ import lamindb_setup as ln_setup
10
10
  from lamin_utils import logger
11
11
  from lamindb_setup.core.hashing import hash_file
12
12
  from lnschema_core import Run, Transform, ids
13
13
  from lnschema_core.ids import base62_12
14
+ from lnschema_core.models import format_field_value
14
15
  from lnschema_core.users import current_user_id
15
16
 
16
17
  from ._settings import settings
@@ -111,7 +112,18 @@ def pretty_pypackages(dependencies: dict) -> str:
111
112
  class Context:
112
113
  """Run context.
113
114
 
114
- Bundles all metadata to track run contexts.
115
+ Enables convenient data lineage tracking by managing a transform & run
116
+ upon :meth:`~lamindb.core.Context.track` & :meth:`~lamindb.core.Context.finish`.
117
+
118
+ Examples:
119
+
120
+ Is typically used via :class:`~lamindb.context`:
121
+
122
+ >>> import lamindb as ln
123
+ >>> ln.context.track()
124
+ >>> # do things while tracking data lineage
125
+ >>> ln.context.finish()
126
+
115
127
  """
116
128
 
117
129
  def __init__(self):
@@ -165,42 +177,35 @@ class Context:
165
177
  self,
166
178
  *,
167
179
  params: dict | None = None,
168
- transform: Transform | None = None,
169
180
  new_run: bool | None = None,
170
181
  path: str | None = None,
182
+ transform: Transform | None = None,
171
183
  ) -> None:
172
- """Track notebook or script run.
173
-
174
- Creates or loads a global :class:`~lamindb.Run` that enables data
175
- lineage tracking.
184
+ """Starts data lineage tracking for a run.
176
185
 
177
- Saves source code and compute environment.
186
+ - sets :attr:`~lamindb.core.Context.transform` &
187
+ :attr:`~lamindb.core.Context.run` by creating or loading `Transform` &
188
+ `Run` records
189
+ - saves compute environment as a `requirements.txt` file: `run.environment`
178
190
 
179
- If :attr:`~lamindb.core.Settings.sync_git_repo` is set, will first check
180
- whether the script exists in the git repository and add a link.
191
+ If :attr:`~lamindb.core.Settings.sync_git_repo` is set, checks whether a
192
+ script-like transform exists in a git repository and links it.
181
193
 
182
194
  Args:
183
195
  params: A dictionary of parameters to track for the run.
184
- transform: Can be of type `"pipeline"` or `"notebook"`
185
- (:class:`~lamindb.core.types.TransformType`).
186
196
  new_run: If `False`, loads latest run of transform
187
197
  (default notebook), if `True`, creates new run (default pipeline).
188
198
  path: Filepath of notebook or script. Only needed if it can't be
189
199
  automatically detected.
200
+ transform: Useful to track an abstract pipeline.
190
201
 
191
202
  Examples:
192
203
 
193
- To track a notebook or script, call:
204
+ To track the run of a notebook or script, call:
194
205
 
195
206
  >>> import lamindb as ln
196
207
  >>> ln.context.track()
197
208
 
198
- If you'd like to track an abstract pipeline run, pass a
199
- :class:`~lamindb.Transform` object of ``type`` ``"pipeline"``:
200
-
201
- >>> ln.Transform(name="Cell Ranger", version="2", type="pipeline").save()
202
- >>> transform = ln.Transform.get(name="Cell Ranger", version="2")
203
- >>> ln.context.track(transform=transform)
204
209
  """
205
210
  self._path = None
206
211
  if transform is None:
@@ -237,7 +242,7 @@ class Context:
237
242
  ):
238
243
  better_version = bump_version_function(self.version)
239
244
  raise SystemExit(
240
- f"Version '{self.version}' is already taken by Transform('{transform.uid}'); please set another version, e.g., ln.context.version = '{better_version}'"
245
+ f"Version '{self.version}' is already taken by Transform(uid='{transform.uid}'); please set another version, e.g., ln.context.version = '{better_version}'"
241
246
  )
242
247
  elif transform_settings_are_set:
243
248
  stem_uid, self.version = (
@@ -288,10 +293,10 @@ class Context:
288
293
  transform_exists = Transform.filter(id=transform.id).first()
289
294
  if transform_exists is None:
290
295
  transform.save()
291
- self._logging_message += f"created Transform('{transform.uid}')"
296
+ self._logging_message += f"created Transform(uid='{transform.uid}')"
292
297
  transform_exists = transform
293
298
  else:
294
- self._logging_message += f"loaded Transform('{transform.uid}')"
299
+ self._logging_message += f"loaded Transform(uid='{transform.uid}')"
295
300
  self._transform = transform_exists
296
301
 
297
302
  if new_run is None: # for notebooks, default to loading latest runs
@@ -306,7 +311,9 @@ class Context:
306
311
  )
307
312
  if run is not None: # loaded latest run
308
313
  run.started_at = datetime.now(timezone.utc) # update run time
309
- self._logging_message += f" & loaded Run('{run.started_at}')"
314
+ self._logging_message += (
315
+ f" & loaded Run(started_at={format_field_value(run.started_at)})"
316
+ )
310
317
 
311
318
  if run is None: # create new run
312
319
  run = Run(
@@ -314,7 +321,9 @@ class Context:
314
321
  params=params,
315
322
  )
316
323
  run.started_at = datetime.now(timezone.utc)
317
- self._logging_message += f" & created Run('{run.started_at}')"
324
+ self._logging_message += (
325
+ f" & created Run(started_at={format_field_value(run.started_at)})"
326
+ )
318
327
  # can only determine at ln.finish() if run was consecutive in
319
328
  # interactive session, otherwise, is consecutive
320
329
  run.is_consecutive = True if is_run_from_ipython else None
@@ -427,7 +436,7 @@ class Context:
427
436
  reference_type=transform_ref_type,
428
437
  type=transform_type,
429
438
  ).save()
430
- self._logging_message += f"created Transform('{transform.uid}')"
439
+ self._logging_message += f"created Transform(uid='{transform.uid}')"
431
440
  else:
432
441
  uid = transform.uid
433
442
  # check whether the transform file has been renamed
@@ -468,7 +477,9 @@ class Context:
468
477
  if condition:
469
478
  bump_revision = True
470
479
  else:
471
- self._logging_message += f"loaded Transform('{transform.uid}')"
480
+ self._logging_message += (
481
+ f"loaded Transform(uid='{transform.uid}')"
482
+ )
472
483
  if bump_revision:
473
484
  change_type = (
474
485
  "Re-running saved notebook"
@@ -485,13 +496,34 @@ class Context:
485
496
  f'ln.context.uid = "{suid}{new_vuid}"'
486
497
  )
487
498
  else:
488
- self._logging_message += f"loaded Transform('{transform.uid}')"
499
+ self._logging_message += f"loaded Transform(uid='{transform.uid}')"
489
500
  self._transform = transform
490
501
 
491
- def finish(self) -> None:
492
- """Mark a tracked run as finished.
502
+ def finish(self, ignore_non_consecutive: None | bool = None) -> None:
503
+ """Mark the run context as finished.
504
+
505
+ - writes a timestamp: `run.finished_at`
506
+ - saves the source code: `transform.source_code`
507
+
508
+ When called in the last cell of a notebook:
509
+
510
+ - prompts for user input if not consecutively executed
511
+ - requires to save the notebook in your editor
512
+ - saves a run report: `run.report`
513
+
514
+ Args:
515
+ ignore_non_consecutive: Whether to ignore if a notebook was non-consecutively executed.
516
+
517
+ Examples:
518
+
519
+ >>> import lamindb as ln
520
+ >>> ln.context.track()
521
+ >>> # do things while tracking data lineage
522
+ >>> ln.context.finish()
523
+
524
+ See Also:
525
+ `lamin save script.py` or `lamin save notebook.ipynb` → `docs </cli#lamin-save>`__
493
526
 
494
- Saves source code and, for notebooks, a run report to your default storage location.
495
527
  """
496
528
  from lamindb._finish import save_context_core
497
529
 
@@ -510,18 +542,16 @@ class Context:
510
542
  # nothing else to do
511
543
  return None
512
544
  if is_run_from_ipython: # notebooks
513
- if (
514
- get_seconds_since_modified(context._path) > 3
515
- and os.getenv("LAMIN_TESTING") is None
516
- ):
545
+ if get_seconds_since_modified(context._path) > 2 and not ln_setup._TESTING:
517
546
  raise NotebookFileNotSavedToDisk(
518
- "Please save the notebook manually in your editor right before running `ln.finish()`"
547
+ "Please save the notebook manually in your editor right before running `ln.context.finish()`"
519
548
  )
520
549
  save_context_core(
521
550
  run=context.run,
522
551
  transform=context.run.transform,
523
552
  filepath=context._path,
524
553
  finished_at=True,
554
+ ignore_non_consecutive=ignore_non_consecutive,
525
555
  )
526
556
 
527
557
 
lamindb/core/_data.py CHANGED
@@ -10,7 +10,6 @@ from lnschema_core.models import (
10
10
  Collection,
11
11
  Feature,
12
12
  FeatureSet,
13
- HasFeatures,
14
13
  Record,
15
14
  Run,
16
15
  ULabel,
@@ -96,9 +95,14 @@ def save_feature_set_links(self: Artifact | Collection) -> None:
96
95
  bulk_create(links, ignore_conflicts=True)
97
96
 
98
97
 
99
- @doc_args(HasFeatures.describe.__doc__)
100
- def describe(self: HasFeatures, print_types: bool = False):
98
+ @doc_args(Artifact.describe.__doc__)
99
+ def describe(self: Artifact, print_types: bool = False):
101
100
  """{}""" # noqa: D415
101
+ model_name = self.__class__.__name__
102
+ msg = f"{colors.green(model_name)}{record_repr(self, include_foreign_keys=False).lstrip(model_name)}\n"
103
+ if self._state.db is not None and self._state.db != "default":
104
+ msg += f" {colors.italic('Database instance')}\n"
105
+ msg += f" slug: {self._state.db}\n"
102
106
  # prefetch all many-to-many relationships
103
107
  # doesn't work for describing using artifact
104
108
  # self = (
@@ -109,10 +113,7 @@ def describe(self: HasFeatures, print_types: bool = False):
109
113
  # .get(id=self.id)
110
114
  # )
111
115
 
112
- model_name = self.__class__.__name__
113
- msg = f"{colors.green(model_name)}{record_repr(self, include_foreign_keys=False).lstrip(model_name)}\n"
114
116
  prov_msg = ""
115
-
116
117
  fields = self._meta.fields
117
118
  direct_fields = []
118
119
  foreign_key_fields = []
@@ -129,9 +130,14 @@ def describe(self: HasFeatures, print_types: bool = False):
129
130
  .get(id=self.id)
130
131
  )
131
132
  # prefetch m-2-m relationships
133
+ many_to_many_fields = []
134
+ if isinstance(self, (Collection, Artifact)):
135
+ many_to_many_fields.append("input_of_runs")
136
+ if isinstance(self, Artifact):
137
+ many_to_many_fields.append("feature_sets")
132
138
  self = (
133
139
  self.__class__.objects.using(self._state.db)
134
- .prefetch_related("feature_sets", "input_of_runs")
140
+ .prefetch_related(*many_to_many_fields)
135
141
  .get(id=self.id)
136
142
  )
137
143
 
@@ -149,20 +155,32 @@ def describe(self: HasFeatures, print_types: bool = False):
149
155
  ]
150
156
  )
151
157
  prov_msg += related_msg
152
- # input of
153
- if self.id is not None and self.input_of_runs.exists():
154
- values = [format_field_value(i.started_at) for i in self.input_of_runs.all()]
155
- type_str = ": Run" if print_types else "" # type: ignore
156
- prov_msg += f" .input_of_runs{type_str} = {values}\n"
157
158
  if prov_msg:
158
159
  msg += f" {colors.italic('Provenance')}\n"
159
160
  msg += prov_msg
161
+
162
+ # input of runs
163
+ input_of_message = ""
164
+ if self.id is not None and self.input_of_runs.exists():
165
+ values = [format_field_value(i.started_at) for i in self.input_of_runs.all()]
166
+ type_str = ": Run" if print_types else "" # type: ignore
167
+ input_of_message += f" .input_of_runs{type_str} = {', '.join(values)}\n"
168
+ if input_of_message:
169
+ msg += f" {colors.italic('Usage')}\n"
170
+ msg += input_of_message
171
+
172
+ # labels
160
173
  msg += print_labels(self, print_types=print_types)
161
- msg += print_features( # type: ignore
162
- self,
163
- print_types=print_types,
164
- print_params=hasattr(self, "type") and self.type == "model",
165
- )
174
+
175
+ # features
176
+ if isinstance(self, Artifact):
177
+ msg += print_features( # type: ignore
178
+ self,
179
+ print_types=print_types,
180
+ print_params=hasattr(self, "type") and self.type == "model",
181
+ )
182
+
183
+ # print entire message
166
184
  logger.print(msg)
167
185
 
168
186
 
@@ -328,7 +346,7 @@ def add_labels(
328
346
 
329
347
 
330
348
  def _track_run_input(
331
- data: HasFeatures | Iterable[HasFeatures],
349
+ data: Artifact | Collection | Iterable[Artifact] | Iterable[Collection],
332
350
  is_run_input: bool | None = None,
333
351
  run: Run | None = None,
334
352
  ):
@@ -340,12 +358,14 @@ def _track_run_input(
340
358
  elif run is None:
341
359
  run = context.run
342
360
  # consider that data is an iterable of Data
343
- data_iter: Iterable[HasFeatures] = [data] if isinstance(data, HasFeatures) else data
361
+ data_iter: Iterable[Artifact] | Iterable[Collection] = (
362
+ [data] if isinstance(data, (Artifact, Collection)) else data
363
+ )
344
364
  track_run_input = False
345
365
  input_data = []
346
366
  if run is not None:
347
367
  # avoid cycles: data can't be both input and output
348
- def is_valid_input(data: HasFeatures):
368
+ def is_valid_input(data: Artifact | Collection):
349
369
  return (
350
370
  data.run_id != run.id
351
371
  and not data._state.adding
@@ -416,7 +436,3 @@ def _track_run_input(
416
436
  if len(input_data) == 1:
417
437
  if input_data[0].transform is not None:
418
438
  run.transform.predecessors.add(input_data[0].transform)
419
-
420
-
421
- HasFeatures.describe = describe
422
- HasFeatures.view_lineage = view_lineage
@@ -19,11 +19,7 @@ from lnschema_core.models import (
19
19
  Collection,
20
20
  Feature,
21
21
  FeatureManager,
22
- FeatureManagerArtifact,
23
- FeatureManagerCollection,
24
22
  FeatureValue,
25
- HasFeatures,
26
- HasParams,
27
23
  LinkORM,
28
24
  Param,
29
25
  ParamManager,
@@ -116,7 +112,7 @@ def get_feature_set_links(host: Artifact | Collection) -> QuerySet:
116
112
  return links_feature_set
117
113
 
118
114
 
119
- def get_link_attr(link: LinkORM | type[LinkORM], data: HasFeatures) -> str:
115
+ def get_link_attr(link: LinkORM | type[LinkORM], data: Artifact | Collection) -> str:
120
116
  link_model_name = link.__class__.__name__
121
117
  if link_model_name in {"Registry", "ModelBase"}: # we passed the type of the link
122
118
  link_model_name = link.__name__
@@ -137,7 +133,7 @@ def custom_aggregate(field, using: str):
137
133
 
138
134
 
139
135
  def print_features(
140
- self: HasFeatures | HasParams,
136
+ self: Artifact | Collection,
141
137
  print_types: bool = False,
142
138
  to_dict: bool = False,
143
139
  print_params: bool = False,
@@ -362,7 +358,7 @@ def __getitem__(self, slot) -> QuerySet:
362
358
 
363
359
 
364
360
  def filter_base(cls, **expression):
365
- if cls in {FeatureManagerArtifact, FeatureManagerCollection}:
361
+ if cls is FeatureManager:
366
362
  model = Feature
367
363
  value_model = FeatureValue
368
364
  else:
@@ -394,10 +390,11 @@ def filter_base(cls, **expression):
394
390
  new_expression["ulabels"] = label
395
391
  else:
396
392
  raise NotImplementedError
397
- if cls == FeatureManagerArtifact or cls == ParamManagerArtifact:
393
+ if cls == FeatureManager or cls == ParamManagerArtifact:
398
394
  return Artifact.filter(**new_expression)
399
- elif cls == FeatureManagerCollection:
400
- return Collection.filter(**new_expression)
395
+ # might renable something similar in the future
396
+ # elif cls == FeatureManagerCollection:
397
+ # return Collection.filter(**new_expression)
401
398
  elif cls == ParamManagerRun:
402
399
  return Run.filter(**new_expression)
403
400
 
@@ -791,9 +788,11 @@ def _add_set_from_mudata(
791
788
  self._host.save()
792
789
 
793
790
 
794
- def _add_from(self, data: HasFeatures):
791
+ def _add_from(self, data: Artifact | Collection, transfer_logs: dict = None):
795
792
  """Transfer features from a artifact or collection."""
796
- # This only covers feature sets, though.
793
+ # This only covers feature sets
794
+ if transfer_logs is None:
795
+ transfer_logs = {"mapped": [], "transferred": []}
797
796
  using_key = settings._using_key
798
797
  for slot, feature_set in data.features._feature_set_by_slot.items():
799
798
  members = feature_set.members
@@ -815,15 +814,18 @@ def _add_from(self, data: HasFeatures):
815
814
  new_members = members.filter(**{f"{field}__in": new_members_uids}).all()
816
815
  n_new_members = len(new_members)
817
816
  if n_new_members > 0:
818
- mute = True if n_new_members > 10 else False
819
817
  # transfer foreign keys needs to be run before transfer to default db
820
- transfer_fk_to_default_db_bulk(new_members, using_key)
818
+ transfer_fk_to_default_db_bulk(
819
+ new_members, using_key, transfer_logs=transfer_logs
820
+ )
821
821
  for feature in new_members:
822
822
  # not calling save=True here as in labels, because want to
823
823
  # bulk save below
824
824
  # transfer_fk is set to False because they are already transferred
825
825
  # in the previous step transfer_fk_to_default_db_bulk
826
- transfer_to_default_db(feature, using_key, mute=mute, transfer_fk=False)
826
+ transfer_to_default_db(
827
+ feature, using_key, transfer_fk=False, transfer_logs=transfer_logs
828
+ )
827
829
  logger.info(f"saving {n_new_members} new {registry.__name__} records")
828
830
  save(new_members)
829
831
 
@@ -20,12 +20,12 @@ from ._settings import settings
20
20
  from .schema import dict_related_model_to_related_name
21
21
 
22
22
  if TYPE_CHECKING:
23
- from lnschema_core.models import Artifact, Collection, HasFeatures, Record
23
+ from lnschema_core.models import Artifact, Collection, Record
24
24
 
25
25
  from lamindb._query_set import QuerySet
26
26
 
27
27
 
28
- def get_labels_as_dict(self: HasFeatures, links: bool = False):
28
+ def get_labels_as_dict(self: Artifact | Collection, links: bool = False):
29
29
  exclude_set = {
30
30
  "feature_sets",
31
31
  "artifacts",
@@ -57,7 +57,9 @@ def get_labels_as_dict(self: HasFeatures, links: bool = False):
57
57
  return labels
58
58
 
59
59
 
60
- def print_labels(self: HasFeatures, field: str = "name", print_types: bool = False):
60
+ def print_labels(
61
+ self: Artifact | Collection, field: str = "name", print_types: bool = False
62
+ ):
61
63
  labels_msg = ""
62
64
  for related_name, (related_model, labels) in get_labels_as_dict(self).items():
63
65
  # there is a try except block here to deal with schema inconsistencies
@@ -167,22 +169,22 @@ class LabelManager:
167
169
 
168
170
  return get_labels(self._host, feature=feature, mute=mute, flat_names=flat_names)
169
171
 
170
- def add_from(self, data: HasFeatures) -> None:
172
+ def add_from(self, data: Artifact | Collection, transfer_logs: dict = None) -> None:
171
173
  """Add labels from an artifact or collection to another artifact or collection.
172
174
 
173
175
  Examples:
174
- >>> file1 = ln.Artifact(pd.DataFrame(index=[0, 1]))
175
- >>> file1.save()
176
- >>> file2 = ln.Artifact(pd.DataFrame(index=[2, 3]))
177
- >>> file2.save()
176
+ >>> artifact1 = ln.Artifact(pd.DataFrame(index=[0, 1])).save()
177
+ >>> artifact2 = ln.Artifact(pd.DataFrame(index=[2, 3])).save()
178
178
  >>> ulabels = ln.ULabel.from_values(["Label1", "Label2"], field="name")
179
179
  >>> ln.save(ulabels)
180
180
  >>> labels = ln.ULabel.filter(name__icontains = "label").all()
181
- >>> file1.ulabels.set(labels)
182
- >>> file2.labels.add_from(file1)
181
+ >>> artifact1.ulabels.set(labels)
182
+ >>> artifact2.labels.add_from(artifact1)
183
183
  """
184
184
  from django.db.utils import ProgrammingError
185
185
 
186
+ if transfer_logs is None:
187
+ transfer_logs = {"mapped": [], "transferred": []}
186
188
  using_key = settings._using_key
187
189
  for related_name, (_, labels) in get_labels_as_dict(data).items():
188
190
  labels = labels.all()
@@ -195,7 +197,9 @@ class LabelManager:
195
197
  features = set()
196
198
  _, new_labels = validate_labels(labels)
197
199
  if len(new_labels) > 0:
198
- transfer_fk_to_default_db_bulk(new_labels, using_key)
200
+ transfer_fk_to_default_db_bulk(
201
+ new_labels, using_key, transfer_logs=transfer_logs
202
+ )
199
203
  for label in labels:
200
204
  # if the link table doesn't follow this convention, we'll ignore it
201
205
  if not hasattr(label, f"links_{data_name_lower}"):
@@ -212,7 +216,7 @@ class LabelManager:
212
216
  label_returned = transfer_to_default_db(
213
217
  label,
214
218
  using_key,
215
- mute=True,
219
+ transfer_logs=transfer_logs,
216
220
  transfer_fk=False,
217
221
  save=True,
218
222
  )
@@ -223,10 +227,15 @@ class LabelManager:
223
227
  # treat features
224
228
  _, new_features = validate_labels(list(features))
225
229
  if len(new_features) > 0:
226
- transfer_fk_to_default_db_bulk(new_features, using_key)
230
+ transfer_fk_to_default_db_bulk(
231
+ new_features, using_key, transfer_logs=transfer_logs
232
+ )
227
233
  for feature in new_features:
228
234
  transfer_to_default_db(
229
- feature, using_key, mute=True, transfer_fk=False
235
+ feature,
236
+ using_key,
237
+ transfer_logs=transfer_logs,
238
+ transfer_fk=False,
230
239
  )
231
240
  save(new_features)
232
241
  if hasattr(self._host, related_name):
lamindb/core/_settings.py CHANGED
@@ -52,7 +52,7 @@ class Settings:
52
52
  return creation_settings
53
53
 
54
54
  track_run_inputs: bool = True
55
- """Track files as input upon `.load()`, `.cache()` and `.backed()`.
55
+ """Track files as input upon `.load()`, `.cache()` and `.open()`.
56
56
 
57
57
  Requires a global run context with :func:`~lamindb.core.Context.track` was created!
58
58
 
@@ -73,12 +73,15 @@ def save_vitessce_config(
73
73
  )
74
74
  else:
75
75
  dataset_artifacts.append(artifact)
76
- # link inputs
76
+ # the below will be replaced with a `ln.tracked()` decorator soon
77
77
  with logger.mute():
78
- transform = Transform(name="save_vitessce_config", type="function", version="2")
79
- transform.save()
80
- run = Run(transform=transform)
81
- run.save()
78
+ transform = Transform(
79
+ uid="kup03MJBsIVa0001",
80
+ name="save_vitessce_config",
81
+ type="function",
82
+ version="2",
83
+ ).save()
84
+ run = Run(transform=transform).save()
82
85
  if len(dataset_artifacts) > 1:
83
86
  # if we have more datasets, we should create a collection
84
87
  # and attach an action to the collection
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lamindb
3
- Version: 0.76.4
3
+ Version: 0.76.6
4
4
  Summary: A data framework for biology.
5
5
  Author-email: Lamin Labs <open-source@lamin.ai>
6
6
  Requires-Python: >=3.8
@@ -9,10 +9,10 @@ Classifier: Programming Language :: Python :: 3.8
9
9
  Classifier: Programming Language :: Python :: 3.9
10
10
  Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: Programming Language :: Python :: 3.11
12
- Requires-Dist: lnschema_core==0.73.5
13
- Requires-Dist: lamindb_setup==0.76.8
12
+ Requires-Dist: lnschema_core==0.74.1
13
+ Requires-Dist: lamindb_setup==0.77.1
14
14
  Requires-Dist: lamin_utils==0.13.4
15
- Requires-Dist: lamin_cli==0.17.0
15
+ Requires-Dist: lamin_cli==0.17.2
16
16
  Requires-Dist: rapidfuzz
17
17
  Requires-Dist: pyarrow
18
18
  Requires-Dist: typing_extensions!=4.6.0
@@ -24,7 +24,7 @@ Requires-Dist: pandas
24
24
  Requires-Dist: graphviz
25
25
  Requires-Dist: psycopg2-binary
26
26
  Requires-Dist: lamindb_setup[aws] ; extra == "aws"
27
- Requires-Dist: bionty==0.49.1 ; extra == "bionty"
27
+ Requires-Dist: bionty==0.50.1 ; extra == "bionty"
28
28
  Requires-Dist: pre-commit ; extra == "dev"
29
29
  Requires-Dist: nox ; extra == "dev"
30
30
  Requires-Dist: laminci>=0.3 ; extra == "dev"