mlrun 1.6.0rc13__py3-none-any.whl → 1.6.0rc15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (37) hide show
  1. mlrun/__main__.py +7 -2
  2. mlrun/artifacts/__init__.py +7 -1
  3. mlrun/artifacts/base.py +38 -3
  4. mlrun/artifacts/dataset.py +1 -1
  5. mlrun/artifacts/manager.py +5 -5
  6. mlrun/artifacts/model.py +1 -1
  7. mlrun/common/schemas/__init__.py +8 -1
  8. mlrun/common/schemas/artifact.py +36 -1
  9. mlrun/config.py +11 -0
  10. mlrun/datastore/azure_blob.py +37 -79
  11. mlrun/datastore/datastore_profile.py +2 -1
  12. mlrun/datastore/store_resources.py +2 -3
  13. mlrun/datastore/targets.py +3 -3
  14. mlrun/db/base.py +8 -5
  15. mlrun/db/httpdb.py +151 -71
  16. mlrun/db/nopdb.py +6 -3
  17. mlrun/feature_store/feature_vector.py +1 -1
  18. mlrun/feature_store/steps.py +2 -2
  19. mlrun/frameworks/_common/model_handler.py +1 -1
  20. mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +0 -1
  21. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +1 -1
  22. mlrun/frameworks/sklearn/metric.py +0 -1
  23. mlrun/frameworks/tf_keras/mlrun_interface.py +1 -2
  24. mlrun/model_monitoring/application.py +20 -27
  25. mlrun/projects/pipelines.py +5 -5
  26. mlrun/projects/project.py +3 -3
  27. mlrun/runtimes/constants.py +10 -0
  28. mlrun/runtimes/local.py +2 -3
  29. mlrun/utils/db.py +6 -5
  30. mlrun/utils/helpers.py +53 -9
  31. mlrun/utils/version/version.json +2 -2
  32. {mlrun-1.6.0rc13.dist-info → mlrun-1.6.0rc15.dist-info}/METADATA +26 -30
  33. {mlrun-1.6.0rc13.dist-info → mlrun-1.6.0rc15.dist-info}/RECORD +37 -37
  34. {mlrun-1.6.0rc13.dist-info → mlrun-1.6.0rc15.dist-info}/LICENSE +0 -0
  35. {mlrun-1.6.0rc13.dist-info → mlrun-1.6.0rc15.dist-info}/WHEEL +0 -0
  36. {mlrun-1.6.0rc13.dist-info → mlrun-1.6.0rc15.dist-info}/entry_points.txt +0 -0
  37. {mlrun-1.6.0rc13.dist-info → mlrun-1.6.0rc15.dist-info}/top_level.txt +0 -0
mlrun/__main__.py CHANGED
@@ -940,12 +940,17 @@ def version():
940
940
  )
941
941
  @click.option("--offset", type=int, default=0, help="byte offset")
942
942
  @click.option("--db", help="api and db service path/url")
943
- @click.option("--watch", "-w", is_flag=True, help="watch/follow log")
943
+ @click.option("--watch", "-w", is_flag=True, help="Deprecated. not in use")
944
944
  def logs(uid, project, offset, db, watch):
945
945
  """Get or watch task logs"""
946
+ if watch:
947
+ warnings.warn(
948
+ "'--watch' is deprecated in 1.6.0, and will be removed in 1.8.0, "
949
+ # TODO: Remove in 1.8.0
950
+ )
946
951
  mldb = get_run_db(db or mlconf.dbpath)
947
952
  if mldb.kind == "http":
948
- state, _ = mldb.watch_log(uid, project, watch=watch, offset=offset)
953
+ state, _ = mldb.watch_log(uid, project, watch=False, offset=offset)
949
954
  else:
950
955
  state, text = mldb.get_log(uid, project, offset=offset)
951
956
  if text:
@@ -19,6 +19,12 @@ __all__ = ["get_model", "update_model"]
19
19
 
20
20
  from .base import Artifact, ArtifactMetadata, ArtifactSpec, get_artifact_meta
21
21
  from .dataset import DatasetArtifact, TableArtifact, update_dataset_meta
22
- from .manager import ArtifactManager, ArtifactProducer, dict_to_artifact
22
+ from .manager import (
23
+ ArtifactManager,
24
+ ArtifactProducer,
25
+ artifact_types,
26
+ dict_to_artifact,
27
+ legacy_artifact_types,
28
+ )
23
29
  from .model import ModelArtifact, get_model, update_model
24
30
  from .plots import BokehArtifact, ChartArtifact, PlotArtifact, PlotlyArtifact
mlrun/artifacts/base.py CHANGED
@@ -22,6 +22,7 @@ import yaml
22
22
  from deprecated import deprecated
23
23
 
24
24
  import mlrun
25
+ import mlrun.artifacts
25
26
  import mlrun.errors
26
27
 
27
28
  from ..datastore import get_store_uri, is_store_uri, store_manager
@@ -312,11 +313,17 @@ class Artifact(ModelObj):
312
313
  """get the absolute target path for the artifact"""
313
314
  return self.spec.target_path
314
315
 
315
- def get_store_url(self, with_tag=True, project=None):
316
+ def get_store_url(self, with_tag=True, project=None, with_tree=True):
316
317
  """get the artifact uri (store://..) with optional parameters"""
317
- tag = self.metadata.tree if with_tag else None
318
+ tag = self.metadata.tag if with_tag else None
319
+ tree = self.metadata.tree if with_tree else None
320
+
318
321
  uri = generate_artifact_uri(
319
- project or self.metadata.project, self.spec.db_key, tag, self.metadata.iter
322
+ project or self.metadata.project,
323
+ self.spec.db_key,
324
+ iter=self.metadata.iter,
325
+ tree=tree,
326
+ tag=tag,
320
327
  )
321
328
  return get_store_uri(self._store_prefix, uri)
322
329
 
@@ -1034,3 +1041,31 @@ def generate_target_path(item: Artifact, artifact_path, producer):
1034
1041
  suffix = f".{item.format}"
1035
1042
 
1036
1043
  return f"{artifact_path}{item.key}{suffix}"
1044
+
1045
+
1046
+ def convert_legacy_artifact_to_new_format(
1047
+ legacy_artifact: typing.Union[LegacyArtifact, dict]
1048
+ ) -> Artifact:
1049
+ """Converts a legacy artifact to a new format.
1050
+
1051
+ :param legacy_artifact: The legacy artifact to convert.
1052
+ :return: The converted artifact.
1053
+ """
1054
+ if isinstance(legacy_artifact, LegacyArtifact):
1055
+ legacy_artifact_dict = legacy_artifact.to_dict()
1056
+ elif isinstance(legacy_artifact, dict):
1057
+ legacy_artifact_dict = legacy_artifact
1058
+ else:
1059
+ raise TypeError(
1060
+ f"Unsupported type '{type(legacy_artifact)}' for legacy artifact"
1061
+ )
1062
+
1063
+ artifact = mlrun.artifacts.artifact_types.get(
1064
+ legacy_artifact_dict.get("kind", "artifact"), mlrun.artifacts.Artifact
1065
+ )()
1066
+
1067
+ artifact.metadata = artifact.metadata.from_dict(legacy_artifact_dict)
1068
+ artifact.spec = artifact.spec.from_dict(legacy_artifact_dict)
1069
+ artifact.status = artifact.status.from_dict(legacy_artifact_dict)
1070
+
1071
+ return artifact
@@ -611,7 +611,7 @@ def update_dataset_meta(
611
611
  mlrun.get_run_db().store_artifact(
612
612
  artifact_spec.spec.db_key,
613
613
  artifact_spec.to_dict(),
614
- artifact_spec.metadata.tree,
614
+ tree=artifact_spec.metadata.tree,
615
615
  iter=artifact_spec.metadata.iter,
616
616
  project=artifact_spec.metadata.project,
617
617
  )
@@ -102,9 +102,9 @@ def dict_to_artifact(struct: dict) -> Artifact:
102
102
  kind = struct.get("kind", "")
103
103
 
104
104
  if is_legacy_artifact(struct):
105
- artifact_class = legacy_artifact_types[kind]
106
- else:
107
- artifact_class = artifact_types[kind]
105
+ return mlrun.artifacts.base.convert_legacy_artifact_to_new_format(struct)
106
+
107
+ artifact_class = artifact_types[kind]
108
108
 
109
109
  return artifact_class.from_dict(struct)
110
110
 
@@ -295,10 +295,10 @@ class ArtifactManager:
295
295
  self.artifact_db.store_artifact(
296
296
  key,
297
297
  item.to_dict(),
298
- item.tree,
299
298
  iter=item.iter,
300
299
  tag=tag or item.tag,
301
300
  project=project,
301
+ tree=item.tree,
302
302
  )
303
303
 
304
304
  def link_artifact(
@@ -329,7 +329,7 @@ class ArtifactManager:
329
329
  self.artifact_db.store_artifact(
330
330
  item.db_key,
331
331
  item.to_dict(),
332
- item.tree,
332
+ tree=item.tree,
333
333
  iter=iter,
334
334
  tag=tag,
335
335
  project=project,
mlrun/artifacts/model.py CHANGED
@@ -722,7 +722,7 @@ def update_model(
722
722
  mlrun.get_run_db().store_artifact(
723
723
  model_spec.db_key,
724
724
  model_spec.to_dict(),
725
- model_spec.tree,
725
+ tree=model_spec.tree,
726
726
  iter=model_spec.iter,
727
727
  project=model_spec.project,
728
728
  )
@@ -14,7 +14,14 @@
14
14
  #
15
15
  # flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
16
16
 
17
- from .artifact import ArtifactCategories, ArtifactIdentifier, ArtifactsFormat
17
+ from .artifact import (
18
+ Artifact,
19
+ ArtifactCategories,
20
+ ArtifactIdentifier,
21
+ ArtifactMetadata,
22
+ ArtifactsFormat,
23
+ ArtifactSpec,
24
+ )
18
25
  from .auth import (
19
26
  AuthInfo,
20
27
  AuthorizationAction,
@@ -18,6 +18,8 @@ import pydantic
18
18
 
19
19
  import mlrun.common.types
20
20
 
21
+ from .object import ObjectStatus
22
+
21
23
 
22
24
  class ArtifactCategories(mlrun.common.types.StrEnum):
23
25
  model = "model"
@@ -51,10 +53,43 @@ class ArtifactIdentifier(pydantic.BaseModel):
51
53
  key: typing.Optional[str]
52
54
  iter: typing.Optional[int]
53
55
  uid: typing.Optional[str]
56
+ producer_id: typing.Optional[str]
54
57
  # TODO support hash once saved as a column in the artifacts table
55
58
  # hash: typing.Optional[str]
56
59
 
57
60
 
58
61
  class ArtifactsFormat(mlrun.common.types.StrEnum):
62
+ # TODO: add a format that returns a minimal response
59
63
  full = "full"
60
- legacy = "legacy"
64
+
65
+
66
+ class ArtifactMetadata(pydantic.BaseModel):
67
+ key: str
68
+ project: str
69
+ iter: typing.Optional[int]
70
+ tree: typing.Optional[str]
71
+ tag: typing.Optional[str]
72
+
73
+ class Config:
74
+ extra = pydantic.Extra.allow
75
+
76
+
77
+ class ArtifactSpec(pydantic.BaseModel):
78
+ src_path: typing.Optional[str]
79
+ target_path: typing.Optional[str]
80
+ viewer: typing.Optional[str]
81
+ inline: typing.Optional[str]
82
+ size: typing.Optional[int]
83
+ db_key: typing.Optional[str]
84
+ extra_data: typing.Optional[typing.Dict[str, typing.Any]]
85
+ unpackaging_instructions: typing.Optional[typing.Dict[str, typing.Any]]
86
+
87
+ class Config:
88
+ extra = pydantic.Extra.allow
89
+
90
+
91
+ class Artifact(pydantic.BaseModel):
92
+ kind: str
93
+ metadata: ArtifactMetadata
94
+ spec: ArtifactSpec
95
+ status: ObjectStatus
mlrun/config.py CHANGED
@@ -105,6 +105,12 @@ default_config = {
105
105
  "list_runs_time_period_in_days": 7, # days
106
106
  }
107
107
  },
108
+ "crud": {
109
+ "runs": {
110
+ # deleting runs is a heavy operation that includes deleting runtime resources, therefore we do it in chunks
111
+ "batch_delete_runs_chunk_size": 10,
112
+ }
113
+ },
108
114
  # the grace period (in seconds) that will be given to runtime resources (after they're in terminal state)
109
115
  # before deleting them (4 hours)
110
116
  "runtime_resources_deletion_grace_period": "14400",
@@ -121,6 +127,10 @@ default_config = {
121
127
  # But if both the server and the client set some value, we want the client to take precedence over the server.
122
128
  # By setting the default to None we are able to differentiate between the two cases.
123
129
  "generate_target_path_from_artifact_hash": None,
130
+ # migration from artifacts to artifacts_v2 is done in batches, and requires a state file to keep track of the
131
+ # migration progress.
132
+ "artifact_migration_batch_size": 200,
133
+ "artifact_migration_state_file_path": "./db/_artifact_migration_state.json",
124
134
  },
125
135
  # FIXME: Adding these defaults here so we won't need to patch the "installing component" (provazio-controller) to
126
136
  # configure this values on field systems, for newer system this will be configured correctly
@@ -358,6 +368,7 @@ default_config = {
358
368
  # this is the default interval period for pulling logs, if not specified different timeout interval
359
369
  "pull_logs_default_interval": 3, # seconds
360
370
  "pull_logs_backoff_no_logs_default_interval": 10, # seconds
371
+ "pull_logs_default_size_limit": 1024 * 1024, # 1 MB
361
372
  },
362
373
  "authorization": {
363
374
  "mode": "none", # one of none, opa
@@ -15,7 +15,6 @@
15
15
  import time
16
16
  from pathlib import Path
17
17
 
18
- from azure.storage.blob import BlobServiceClient
19
18
  from fsspec.registry import get_filesystem_class
20
19
 
21
20
  import mlrun.errors
@@ -32,13 +31,7 @@ class AzureBlobStore(DataStore):
32
31
 
33
32
  def __init__(self, parent, schema, name, endpoint="", secrets: dict = None):
34
33
  super().__init__(parent, name, schema, endpoint, secrets=secrets)
35
- self.bsc = None
36
-
37
- con_string = self._get_secret_or_env("AZURE_STORAGE_CONNECTION_STRING")
38
- if con_string:
39
- self.bsc = BlobServiceClient.from_connection_string(con_string)
40
- else:
41
- self.get_filesystem()
34
+ self.get_filesystem()
42
35
 
43
36
  def get_filesystem(self, silent=True):
44
37
  """return fsspec file system object, if supported"""
@@ -86,89 +79,54 @@ class AzureBlobStore(DataStore):
86
79
  return path
87
80
 
88
81
  def upload(self, key, src_path):
89
- if self.bsc:
90
- # Need to strip leading / from key
91
- with self.bsc.get_blob_client(
92
- container=self.endpoint, blob=key[1:]
93
- ) as blob_client:
94
- with open(src_path, "rb") as data:
95
- blob_client.upload_blob(data, overwrite=True)
96
- else:
97
- remote_path = self._convert_key_to_remote_path(key)
98
- self._filesystem.put_file(src_path, remote_path, overwrite=True)
82
+ remote_path = self._convert_key_to_remote_path(key)
83
+ self._filesystem.put_file(src_path, remote_path, overwrite=True)
99
84
 
100
85
  def get(self, key, size=None, offset=0):
101
- if self.bsc:
102
- with self.bsc.get_blob_client(
103
- container=self.endpoint, blob=key[1:]
104
- ) as blob_client:
105
- size = size if size else None
106
- blob = blob_client.download_blob(offset, size).readall()
107
- return blob
108
- else:
109
- remote_path = self._convert_key_to_remote_path(key)
110
- end = offset + size if size else None
111
- blob = self._filesystem.cat_file(remote_path, start=offset, end=end)
112
- return blob
86
+ remote_path = self._convert_key_to_remote_path(key)
87
+ end = offset + size if size else None
88
+ blob = self._filesystem.cat_file(remote_path, start=offset, end=end)
89
+ return blob
113
90
 
114
91
  def put(self, key, data, append=False):
115
92
  if append:
116
93
  raise mlrun.errors.MLRunInvalidArgumentError(
117
94
  "Append mode not supported for Azure blob datastore"
118
95
  )
119
- if self.bsc:
120
- with self.bsc.get_blob_client(
121
- container=self.endpoint, blob=key[1:]
122
- ) as blob_client:
123
- # Note that append=True is not supported. If the blob already exists, this call will fail
124
- blob_client.upload_blob(data, overwrite=True)
96
+ remote_path = self._convert_key_to_remote_path(key)
97
+ if isinstance(data, bytes):
98
+ mode = "wb"
99
+ elif isinstance(data, str):
100
+ mode = "w"
125
101
  else:
126
- remote_path = self._convert_key_to_remote_path(key)
127
- if isinstance(data, bytes):
128
- mode = "wb"
129
- elif isinstance(data, str):
130
- mode = "w"
131
- else:
132
- raise TypeError("Data type unknown. Unable to put in Azure!")
133
- with self._filesystem.open(remote_path, mode) as f:
134
- f.write(data)
102
+ raise TypeError("Data type unknown. Unable to put in Azure!")
103
+ with self._filesystem.open(remote_path, mode) as f:
104
+ f.write(data)
135
105
 
136
106
  def stat(self, key):
137
- if self.bsc:
138
- with self.bsc.get_blob_client(
139
- container=self.endpoint, blob=key[1:]
140
- ) as blob_client:
141
- props = blob_client.get_blob_properties()
142
- size = props.size
143
- modified = props.last_modified
107
+ remote_path = self._convert_key_to_remote_path(key)
108
+ files = self._filesystem.ls(remote_path, detail=True)
109
+ if len(files) == 1 and files[0]["type"] == "file":
110
+ size = files[0]["size"]
111
+ modified = files[0]["last_modified"]
112
+ elif len(files) == 1 and files[0]["type"] == "directory":
113
+ raise FileNotFoundError("Operation expects a file not a directory!")
144
114
  else:
145
- remote_path = self._convert_key_to_remote_path(key)
146
- files = self._filesystem.ls(remote_path, detail=True)
147
- if len(files) == 1 and files[0]["type"] == "file":
148
- size = files[0]["size"]
149
- modified = files[0]["last_modified"]
150
- elif len(files) == 1 and files[0]["type"] == "directory":
151
- raise FileNotFoundError("Operation expects a file not a directory!")
152
- else:
153
- raise ValueError("Operation expects to receive a single file!")
115
+ raise ValueError("Operation expects to receive a single file!")
154
116
  return FileStats(size, time.mktime(modified.timetuple()))
155
117
 
156
118
  def listdir(self, key):
157
- if self.bsc:
158
- if key and not key.endswith("/"):
159
- key = key[1:] + "/"
160
- key_length = len(key)
161
- with self.bsc.get_container_client(self.endpoint) as container_client:
162
- blob_list = container_client.list_blobs(name_starts_with=key)
163
- return [blob.name[key_length:] for blob in blob_list]
164
- else:
165
- remote_path = self._convert_key_to_remote_path(key)
166
- if self._filesystem.isfile(remote_path):
167
- return key
168
- remote_path = f"{remote_path}/**"
169
- files = self._filesystem.glob(remote_path)
170
- key_length = len(key)
171
- files = [
172
- f.split("/", 1)[1][key_length:] for f in files if len(f.split("/")) > 1
173
- ]
174
- return files
119
+ remote_path = self._convert_key_to_remote_path(key)
120
+ if self._filesystem.isfile(remote_path):
121
+ return key
122
+ remote_path = f"{remote_path}/**"
123
+ files = self._filesystem.glob(remote_path)
124
+ key_length = len(key)
125
+ files = [
126
+ f.split("/", 1)[1][key_length:] for f in files if len(f.split("/")) > 1
127
+ ]
128
+ return files
129
+
130
+ def rm(self, path, recursive=False, maxdepth=None):
131
+ path = self._convert_key_to_remote_path(key=path)
132
+ super().rm(path=path, recursive=recursive, maxdepth=maxdepth)
@@ -302,7 +302,7 @@ class DatastoreProfile2Json(pydantic.BaseModel):
302
302
  {
303
303
  k: v
304
304
  for k, v in profile.dict().items()
305
- if not str(k) in profile._private_attributes
305
+ if str(k) not in profile._private_attributes
306
306
  }
307
307
  )
308
308
 
@@ -344,6 +344,7 @@ class DatastoreProfile2Json(pydantic.BaseModel):
344
344
  "kafka_source": DatastoreProfileKafkaSource,
345
345
  "dbfs": DatastoreProfileDBFS,
346
346
  "gcs": DatastoreProfileGCS,
347
+ "az": DatastoreProfileAzureBlob,
347
348
  }
348
349
  if datastore_type in ds_profile_factory:
349
350
  return ds_profile_factory[datastore_type].parse_obj(decoded_dict)
@@ -158,12 +158,11 @@ def get_store_resource(
158
158
  return db.get_feature_vector(name, project, tag, uid)
159
159
 
160
160
  elif StorePrefix.is_artifact(kind):
161
- project, key, iteration, tag, uid = parse_artifact_uri(
161
+ project, key, iteration, tag, tree = parse_artifact_uri(
162
162
  uri, project or config.default_project
163
163
  )
164
-
165
164
  resource = db.read_artifact(
166
- key, project=project, tag=tag or uid, iter=iteration
165
+ key, project=project, tag=tag, iter=iteration, tree=tree
167
166
  )
168
167
  if resource.get("kind", "") == "link":
169
168
  # todo: support other link types (not just iter, move this to the db/api layer
@@ -96,7 +96,7 @@ def get_default_targets(offline_only=False):
96
96
  def update_targets_run_id_for_ingest(overwrite, targets, targets_in_status):
97
97
  run_id = generate_target_run_id()
98
98
  for target in targets:
99
- if overwrite or not (target.name in targets_in_status.keys()):
99
+ if overwrite or target.name not in targets_in_status.keys():
100
100
  target.run_id = run_id
101
101
  else:
102
102
  target.run_id = targets_in_status[target.name].run_id
@@ -192,7 +192,7 @@ def validate_target_list(targets):
192
192
 
193
193
  if not targets:
194
194
  return
195
- targets_by_kind_name = [kind for kind in targets if type(kind) is str]
195
+ targets_by_kind_name = [kind for kind in targets if isinstance(kind, str)]
196
196
  no_name_target_types_count = Counter(
197
197
  [
198
198
  target.kind
@@ -898,7 +898,7 @@ class ParquetTarget(BaseStoreTarget):
898
898
 
899
899
  def delete_update_last_written(*arg, **kargs):
900
900
  result = original_to_dict(*arg, **kargs)
901
- del result["class_args"]["update_last_written"]
901
+ result["class_args"].pop("update_last_written", None)
902
902
  return result
903
903
 
904
904
  # update_last_written is not serializable (ML-5108)
mlrun/db/base.py CHANGED
@@ -91,11 +91,13 @@ class RunDBInterface(ABC):
91
91
  pass
92
92
 
93
93
  @abstractmethod
94
- def store_artifact(self, key, artifact, uid, iter=None, tag="", project=""):
94
+ def store_artifact(
95
+ self, key, artifact, uid=None, iter=None, tag="", project="", tree=None
96
+ ):
95
97
  pass
96
98
 
97
99
  @abstractmethod
98
- def read_artifact(self, key, tag="", iter=None, project=""):
100
+ def read_artifact(self, key, tag="", iter=None, project="", tree=None, uid=None):
99
101
  pass
100
102
 
101
103
  @abstractmethod
@@ -111,11 +113,12 @@ class RunDBInterface(ABC):
111
113
  best_iteration: bool = False,
112
114
  kind: str = None,
113
115
  category: Union[str, mlrun.common.schemas.ArtifactCategories] = None,
116
+ tree: str = None,
114
117
  ):
115
118
  pass
116
119
 
117
120
  @abstractmethod
118
- def del_artifact(self, key, tag="", project=""):
121
+ def del_artifact(self, key, tag="", project="", tree=None, uid=None):
119
122
  pass
120
123
 
121
124
  @abstractmethod
@@ -203,8 +206,8 @@ class RunDBInterface(ABC):
203
206
  key=mlrun.utils.get_in_artifact(artifact_obj, "key"),
204
207
  # we are passing tree as uid when storing an artifact, so if uid is not defined,
205
208
  # pass the tree as uid
206
- uid=mlrun.utils.get_in_artifact(artifact_obj, "uid")
207
- or mlrun.utils.get_in_artifact(artifact_obj, "tree"),
209
+ uid=mlrun.utils.get_in_artifact(artifact_obj, "uid"),
210
+ producer_id=mlrun.utils.get_in_artifact(artifact_obj, "tree"),
208
211
  kind=mlrun.utils.get_in_artifact(artifact_obj, "kind"),
209
212
  iter=mlrun.utils.get_in_artifact(artifact_obj, "iter"),
210
213
  )