mlrun 1.7.0rc23__py3-none-any.whl → 1.7.0rc25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (36) hide show
  1. mlrun/__main__.py +3 -1
  2. mlrun/common/formatters/__init__.py +1 -0
  3. mlrun/common/formatters/artifact.py +26 -3
  4. mlrun/common/formatters/run.py +26 -0
  5. mlrun/common/helpers.py +11 -0
  6. mlrun/common/schemas/__init__.py +2 -0
  7. mlrun/common/schemas/alert.py +1 -1
  8. mlrun/common/schemas/api_gateway.py +57 -16
  9. mlrun/common/schemas/artifact.py +11 -0
  10. mlrun/common/schemas/feature_store.py +2 -22
  11. mlrun/common/schemas/pipeline.py +16 -0
  12. mlrun/common/schemas/project.py +17 -0
  13. mlrun/common/schemas/runs.py +17 -0
  14. mlrun/common/types.py +5 -0
  15. mlrun/config.py +1 -19
  16. mlrun/datastore/targets.py +12 -1
  17. mlrun/db/base.py +53 -2
  18. mlrun/db/httpdb.py +82 -9
  19. mlrun/db/nopdb.py +33 -2
  20. mlrun/model.py +2 -0
  21. mlrun/model_monitoring/applications/histogram_data_drift.py +1 -1
  22. mlrun/projects/pipelines.py +10 -9
  23. mlrun/projects/project.py +16 -6
  24. mlrun/run.py +11 -6
  25. mlrun/runtimes/base.py +11 -4
  26. mlrun/serving/server.py +10 -0
  27. mlrun/serving/states.py +29 -0
  28. mlrun/utils/helpers.py +3 -0
  29. mlrun/utils/notifications/notification_pusher.py +2 -8
  30. mlrun/utils/version/version.json +2 -2
  31. {mlrun-1.7.0rc23.dist-info → mlrun-1.7.0rc25.dist-info}/METADATA +2 -2
  32. {mlrun-1.7.0rc23.dist-info → mlrun-1.7.0rc25.dist-info}/RECORD +36 -35
  33. {mlrun-1.7.0rc23.dist-info → mlrun-1.7.0rc25.dist-info}/WHEEL +1 -1
  34. {mlrun-1.7.0rc23.dist-info → mlrun-1.7.0rc25.dist-info}/LICENSE +0 -0
  35. {mlrun-1.7.0rc23.dist-info → mlrun-1.7.0rc25.dist-info}/entry_points.txt +0 -0
  36. {mlrun-1.7.0rc23.dist-info → mlrun-1.7.0rc25.dist-info}/top_level.txt +0 -0
mlrun/db/httpdb.py CHANGED
@@ -725,16 +725,26 @@ class HTTPRunDB(RunDBInterface):
725
725
  )
726
726
  return None
727
727
 
728
- def read_run(self, uid, project="", iter=0):
728
+ def read_run(
729
+ self,
730
+ uid,
731
+ project="",
732
+ iter=0,
733
+ format_: mlrun.common.formatters.RunFormat = mlrun.common.formatters.RunFormat.full,
734
+ ):
729
735
  """Read the details of a stored run from the DB.
730
736
 
731
- :param uid: The run's unique ID.
732
- :param project: Project name.
733
- :param iter: Iteration within a specific execution.
737
+ :param uid: The run's unique ID.
738
+ :param project: Project name.
739
+ :param iter: Iteration within a specific execution.
740
+ :param format_: The format in which to return the run details.
734
741
  """
735
742
 
736
743
  path = self._path_of("runs", project, uid)
737
- params = {"iter": iter}
744
+ params = {
745
+ "iter": iter,
746
+ "format": format_.value,
747
+ }
738
748
  error = f"get run {project}/{uid}"
739
749
  resp = self.api_call("GET", path, error, params=params)
740
750
  return resp.json()["data"]
@@ -979,6 +989,7 @@ class HTTPRunDB(RunDBInterface):
979
989
  project="",
980
990
  tree=None,
981
991
  uid=None,
992
+ format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
982
993
  ):
983
994
  """Read an artifact, identified by its key, tag, tree and iteration.
984
995
 
@@ -988,20 +999,20 @@ class HTTPRunDB(RunDBInterface):
988
999
  :param project: Project that the artifact belongs to.
989
1000
  :param tree: The tree which generated this artifact.
990
1001
  :param uid: A unique ID for this specific version of the artifact (the uid that was generated in the backend)
1002
+ :param format_: The format in which to return the artifact. Default is 'full'.
991
1003
  """
992
1004
 
993
1005
  project = project or config.default_project
994
1006
  tag = tag or "latest"
995
1007
  endpoint_path = f"projects/{project}/artifacts/{key}"
996
1008
  error = f"read artifact {project}/{key}"
997
- # explicitly set artifacts format to 'full' since old servers may default to 'legacy'
998
1009
  params = {
999
- "format": mlrun.common.formatters.ArtifactFormat.full.value,
1010
+ "format": format_,
1000
1011
  "tag": tag,
1001
1012
  "tree": tree,
1002
1013
  "uid": uid,
1003
1014
  }
1004
- if iter:
1015
+ if iter is not None:
1005
1016
  params["iter"] = str(iter)
1006
1017
  resp = self.api_call("GET", endpoint_path, error, params=params, version="v2")
1007
1018
  return resp.json()
@@ -1061,6 +1072,7 @@ class HTTPRunDB(RunDBInterface):
1061
1072
  category: Union[str, mlrun.common.schemas.ArtifactCategories] = None,
1062
1073
  tree: str = None,
1063
1074
  producer_uri: str = None,
1075
+ format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
1064
1076
  ) -> ArtifactList:
1065
1077
  """List artifacts filtered by various parameters.
1066
1078
 
@@ -1095,6 +1107,7 @@ class HTTPRunDB(RunDBInterface):
1095
1107
  :param producer_uri: Return artifacts produced by the requested producer URI. Producer URI usually
1096
1108
  points to a run and is used to filter artifacts by the run that produced them when the artifact producer id
1097
1109
  is a workflow id (artifact was created as part of a workflow).
1110
+ :param format_: The format in which to return the artifacts. Default is 'full'.
1098
1111
  """
1099
1112
 
1100
1113
  project = project or config.default_project
@@ -1112,7 +1125,7 @@ class HTTPRunDB(RunDBInterface):
1112
1125
  "kind": kind,
1113
1126
  "category": category,
1114
1127
  "tree": tree,
1115
- "format": mlrun.common.formatters.ArtifactFormat.full.value,
1128
+ "format": format_,
1116
1129
  "producer_uri": producer_uri,
1117
1130
  }
1118
1131
  error = "list artifacts"
@@ -2110,6 +2123,41 @@ class HTTPRunDB(RunDBInterface):
2110
2123
  resp = self.api_call("GET", path, error_message, params=params)
2111
2124
  return resp.json()["features"]
2112
2125
 
2126
+ def list_features_v2(
2127
+ self,
2128
+ project: str,
2129
+ name: str = None,
2130
+ tag: str = None,
2131
+ entities: list[str] = None,
2132
+ labels: list[str] = None,
2133
+ ) -> dict[str, list[dict]]:
2134
+ """List feature-sets which contain specific features. This function may return multiple versions of the same
2135
+ feature-set if a specific tag is not requested. Note that the various filters of this function actually
2136
+ refer to the feature-set object containing the features, not to the features themselves.
2137
+
2138
+ :param project: Project which contains these features.
2139
+ :param name: Name of the feature to look for. The name is used in a like query, and is not case-sensitive. For
2140
+ example, looking for ``feat`` will return features which are named ``MyFeature`` as well as ``defeat``.
2141
+ :param tag: Return feature-sets which contain the features looked for, and are tagged with the specific tag.
2142
+ :param entities: Return only feature-sets which contain an entity whose name is contained in this list.
2143
+ :param labels: Return only feature-sets which are labeled as requested.
2144
+ :returns: A list of features, and a list of their corresponding feature sets.
2145
+ """
2146
+
2147
+ project = project or config.default_project
2148
+ params = {
2149
+ "name": name,
2150
+ "tag": tag,
2151
+ "entity": entities or [],
2152
+ "label": labels or [],
2153
+ }
2154
+
2155
+ path = f"projects/{project}/features"
2156
+
2157
+ error_message = f"Failed listing features, project: {project}, query: {params}"
2158
+ resp = self.api_call("GET", path, error_message, params=params, version="v2")
2159
+ return resp.json()
2160
+
2113
2161
  def list_entities(
2114
2162
  self,
2115
2163
  project: str,
@@ -2135,6 +2183,31 @@ class HTTPRunDB(RunDBInterface):
2135
2183
  resp = self.api_call("GET", path, error_message, params=params)
2136
2184
  return resp.json()["entities"]
2137
2185
 
2186
+ def list_entities_v2(
2187
+ self,
2188
+ project: str,
2189
+ name: str = None,
2190
+ tag: str = None,
2191
+ labels: list[str] = None,
2192
+ ) -> dict[str, list[dict]]:
2193
+ """Retrieve a list of entities and their mapping to the containing feature-sets. This function is similar
2194
+ to the :py:func:`~list_features_v2` function, and uses the same logic. However, the entities are matched
2195
+ against the name rather than the features.
2196
+ """
2197
+
2198
+ project = project or config.default_project
2199
+ params = {
2200
+ "name": name,
2201
+ "tag": tag,
2202
+ "label": labels or [],
2203
+ }
2204
+
2205
+ path = f"projects/{project}/entities"
2206
+
2207
+ error_message = f"Failed listing entities, project: {project}, query: {params}"
2208
+ resp = self.api_call("GET", path, error_message, params=params, version="v2")
2209
+ return resp.json()
2210
+
2138
2211
  @staticmethod
2139
2212
  def _generate_partition_by_params(
2140
2213
  partition_by_cls,
mlrun/db/nopdb.py CHANGED
@@ -73,7 +73,13 @@ class NopDB(RunDBInterface):
73
73
  def abort_run(self, uid, project="", iter=0, timeout=45, status_text=""):
74
74
  pass
75
75
 
76
- def read_run(self, uid, project="", iter=0):
76
+ def read_run(
77
+ self,
78
+ uid,
79
+ project="",
80
+ iter=0,
81
+ format_: mlrun.common.formatters.RunFormat = mlrun.common.formatters.RunFormat.full,
82
+ ):
77
83
  pass
78
84
 
79
85
  def list_runs(
@@ -115,7 +121,16 @@ class NopDB(RunDBInterface):
115
121
  ):
116
122
  pass
117
123
 
118
- def read_artifact(self, key, tag="", iter=None, project="", tree=None, uid=None):
124
+ def read_artifact(
125
+ self,
126
+ key,
127
+ tag="",
128
+ iter=None,
129
+ project="",
130
+ tree=None,
131
+ uid=None,
132
+ format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
133
+ ):
119
134
  pass
120
135
 
121
136
  def list_artifacts(
@@ -131,6 +146,7 @@ class NopDB(RunDBInterface):
131
146
  kind: str = None,
132
147
  category: Union[str, mlrun.common.schemas.ArtifactCategories] = None,
133
148
  tree: str = None,
149
+ format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
134
150
  ):
135
151
  pass
136
152
 
@@ -252,11 +268,26 @@ class NopDB(RunDBInterface):
252
268
  ) -> mlrun.common.schemas.FeaturesOutput:
253
269
  pass
254
270
 
271
+ def list_features_v2(
272
+ self,
273
+ project: str,
274
+ name: str = None,
275
+ tag: str = None,
276
+ entities: list[str] = None,
277
+ labels: list[str] = None,
278
+ ) -> mlrun.common.schemas.FeaturesOutputV2:
279
+ pass
280
+
255
281
  def list_entities(
256
282
  self, project: str, name: str = None, tag: str = None, labels: list[str] = None
257
283
  ) -> mlrun.common.schemas.EntitiesOutput:
258
284
  pass
259
285
 
286
+ def list_entities_v2(
287
+ self, project: str, name: str = None, tag: str = None, labels: list[str] = None
288
+ ) -> mlrun.common.schemas.EntitiesOutputV2:
289
+ pass
290
+
260
291
  def list_feature_sets(
261
292
  self,
262
293
  project: str = "",
mlrun/model.py CHANGED
@@ -1989,6 +1989,7 @@ class DataTarget(DataTargetBase):
1989
1989
  "name",
1990
1990
  "kind",
1991
1991
  "path",
1992
+ "attributes",
1992
1993
  "start_time",
1993
1994
  "online",
1994
1995
  "status",
@@ -2020,6 +2021,7 @@ class DataTarget(DataTargetBase):
2020
2021
  self.last_written = None
2021
2022
  self._producer = None
2022
2023
  self.producer = {}
2024
+ self.attributes = {}
2023
2025
 
2024
2026
  @property
2025
2027
  def producer(self) -> FeatureSetProducer:
@@ -193,7 +193,7 @@ class HistogramDataDriftApplication(ModelMonitoringApplicationBaseV2):
193
193
  status=status,
194
194
  extra_data={
195
195
  EventFieldType.CURRENT_STATS: json.dumps(
196
- monitoring_context.feature_stats
196
+ monitoring_context.sample_df_stats
197
197
  ),
198
198
  EventFieldType.DRIFT_MEASURES: metrics_per_feature.T.to_json(),
199
199
  EventFieldType.DRIFT_STATUS: status.value,
@@ -22,8 +22,7 @@ import uuid
22
22
 
23
23
  import mlrun_pipelines.common.models
24
24
  import mlrun_pipelines.patcher
25
- from kfp.compiler import compiler
26
- from mlrun_pipelines.helpers import new_pipe_metadata
25
+ import mlrun_pipelines.utils
27
26
 
28
27
  import mlrun
29
28
  import mlrun.common.runtimes.constants
@@ -220,9 +219,10 @@ class _PipelineContext:
220
219
  force_run_local = mlrun.mlconf.force_run_local
221
220
  if force_run_local is None or force_run_local == "auto":
222
221
  force_run_local = not mlrun.mlconf.is_api_running_on_k8s()
223
- kfp_url = mlrun.mlconf.resolve_kfp_url()
224
- if not kfp_url:
222
+ if not mlrun.mlconf.kfp_url:
223
+ logger.debug("Kubeflow pipeline URL is not set, running locally")
225
224
  force_run_local = True
225
+
226
226
  if self.workflow:
227
227
  force_run_local = force_run_local or self.workflow.run_local
228
228
 
@@ -502,13 +502,14 @@ class _KFPRunner(_PipelineRunner):
502
502
  functions,
503
503
  secrets=project._secrets,
504
504
  )
505
- artifact_path = artifact_path or project.spec.artifact_path
506
-
507
- conf = new_pipe_metadata(
508
- artifact_path=artifact_path,
505
+ mlrun_pipelines.utils.compile_pipeline(
506
+ artifact_path=artifact_path or project.spec.artifact_path,
509
507
  cleanup_ttl=workflow_spec.cleanup_ttl,
508
+ ops=None,
509
+ pipeline=pipeline,
510
+ pipe_file=target,
511
+ type_check=True,
510
512
  )
511
- compiler.Compiler().compile(pipeline, target, pipeline_conf=conf)
512
513
  workflow_spec.clear_tmp()
513
514
  pipeline_context.clear()
514
515
 
mlrun/projects/project.py CHANGED
@@ -51,6 +51,7 @@ import mlrun.runtimes.nuclio.api_gateway
51
51
  import mlrun.runtimes.pod
52
52
  import mlrun.runtimes.utils
53
53
  import mlrun.serving
54
+ import mlrun.utils
54
55
  import mlrun.utils.regex
55
56
  from mlrun.alerts.alert import AlertConfig
56
57
  from mlrun.common.schemas.alert import AlertTemplate
@@ -993,15 +994,24 @@ class ProjectSpec(ModelObj):
993
994
 
994
995
  artifacts_dict = {}
995
996
  for artifact in artifacts:
996
- if not isinstance(artifact, dict) and not hasattr(artifact, "to_dict"):
997
+ invalid_object_type = not isinstance(artifact, dict) and not hasattr(
998
+ artifact, "to_dict"
999
+ )
1000
+ is_artifact_model = not isinstance(artifact, dict) and hasattr(
1001
+ artifact, "to_dict"
1002
+ )
1003
+
1004
+ if invalid_object_type:
997
1005
  raise ValueError("artifacts must be a dict or class")
998
- if isinstance(artifact, dict):
999
- key = artifact.get("metadata", {}).get("key", "")
1000
- if not key:
1001
- raise ValueError('artifacts "metadata.key" must be specified')
1002
- else:
1006
+ elif is_artifact_model:
1003
1007
  key = artifact.key
1004
1008
  artifact = artifact.to_dict()
1009
+ else: # artifact is a dict
1010
+ # imported artifacts don't have metadata,spec,status fields
1011
+ key_field = "key" if _is_imported_artifact(artifact) else "metadata.key"
1012
+ key = mlrun.utils.get_in(artifact, key_field, "")
1013
+ if not key:
1014
+ raise ValueError(f'artifacts "{key_field}" must be specified')
1005
1015
 
1006
1016
  artifacts_dict[key] = artifact
1007
1017
 
mlrun/run.py CHANGED
@@ -11,6 +11,7 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+
14
15
  import importlib.util as imputil
15
16
  import json
16
17
  import os
@@ -28,9 +29,9 @@ from typing import Optional, Union
28
29
 
29
30
  import nuclio
30
31
  import yaml
31
- from kfp import Client
32
32
  from mlrun_pipelines.common.models import RunStatuses
33
33
  from mlrun_pipelines.common.ops import format_summary_from_kfp_run, show_kfp_run
34
+ from mlrun_pipelines.utils import get_client
34
35
 
35
36
  import mlrun.common.constants as mlrun_constants
36
37
  import mlrun.common.formatters
@@ -293,10 +294,14 @@ def get_or_create_ctx(
293
294
  newspec["metadata"]["project"] = (
294
295
  newspec["metadata"].get("project") or project or mlconf.default_project
295
296
  )
297
+
296
298
  newspec["metadata"].setdefault("labels", {})
297
- newspec["metadata"]["labels"] = {
298
- mlrun_constants.MLRunInternalLabels.kind: RuntimeKinds.local
299
- }
299
+
300
+ # This function can also be called as a local run if it is not called within a function.
301
+ # It will create a local run, and the run kind must be local by default.
302
+ newspec["metadata"]["labels"].setdefault(
303
+ mlrun_constants.MLRunInternalLabels.kind, RuntimeKinds.local
304
+ )
300
305
 
301
306
  ctx = MLClientCtx.from_dict(
302
307
  newspec, rundb=out, autocommit=autocommit, tmp=tmp, host=socket.gethostname()
@@ -948,7 +953,7 @@ def wait_for_pipeline_completion(
948
953
  _wait_for_pipeline_completion,
949
954
  )
950
955
  else:
951
- client = Client(namespace=namespace)
956
+ client = get_client(namespace=namespace)
952
957
  resp = client.wait_for_run_completion(run_id, timeout)
953
958
  if resp:
954
959
  resp = resp.to_dict()
@@ -1009,7 +1014,7 @@ def get_pipeline(
1009
1014
  )
1010
1015
 
1011
1016
  else:
1012
- client = Client(namespace=namespace)
1017
+ client = get_client(namespace=namespace)
1013
1018
  resp = client.get_run(run_id)
1014
1019
  if resp:
1015
1020
  resp = resp.to_dict()
mlrun/runtimes/base.py CHANGED
@@ -426,13 +426,19 @@ class BaseRuntime(ModelObj):
426
426
  reset_on_run=reset_on_run,
427
427
  )
428
428
 
429
- def _get_db_run(self, task: RunObject = None):
429
+ def _get_db_run(
430
+ self,
431
+ task: RunObject = None,
432
+ run_format: mlrun.common.formatters.RunFormat = mlrun.common.formatters.RunFormat.full,
433
+ ):
430
434
  if self._get_db() and task:
431
435
  project = task.metadata.project
432
436
  uid = task.metadata.uid
433
437
  iter = task.metadata.iteration
434
438
  try:
435
- return self._get_db().read_run(uid, project, iter=iter)
439
+ return self._get_db().read_run(
440
+ uid, project, iter=iter, format_=run_format
441
+ )
436
442
  except mlrun.db.RunDBError:
437
443
  return None
438
444
  if task:
@@ -549,13 +555,14 @@ class BaseRuntime(ModelObj):
549
555
  self,
550
556
  resp: dict = None,
551
557
  task: RunObject = None,
552
- err=None,
558
+ err: Union[Exception, str] = None,
559
+ run_format: mlrun.common.formatters.RunFormat = mlrun.common.formatters.RunFormat.full,
553
560
  ) -> typing.Optional[dict]:
554
561
  """update the task state in the DB"""
555
562
  was_none = False
556
563
  if resp is None and task:
557
564
  was_none = True
558
- resp = self._get_db_run(task)
565
+ resp = self._get_db_run(task, run_format)
559
566
 
560
567
  if not resp:
561
568
  self.store_run(task)
mlrun/serving/server.py CHANGED
@@ -383,6 +383,16 @@ def v2_serving_handler(context, event, get_body=False):
383
383
  if event.body == b"":
384
384
  event.body = None
385
385
 
386
+ # original path is saved in stream_path so it can be used by explicit ack, but path is reset to / as a
387
+ # workaround for NUC-178
388
+ event.stream_path = event.path
389
+ if hasattr(event, "trigger") and event.trigger.kind in (
390
+ "kafka",
391
+ "kafka-cluster",
392
+ "v3ioStream",
393
+ ):
394
+ event.path = "/"
395
+
386
396
  return context._server.run(event, context, get_body)
387
397
 
388
398
 
mlrun/serving/states.py CHANGED
@@ -832,6 +832,35 @@ class QueueStep(BaseStep):
832
832
  def async_object(self):
833
833
  return self._async_object
834
834
 
835
+ def to(
836
+ self,
837
+ class_name: Union[str, StepToDict] = None,
838
+ name: str = None,
839
+ handler: str = None,
840
+ graph_shape: str = None,
841
+ function: str = None,
842
+ full_event: bool = None,
843
+ input_path: str = None,
844
+ result_path: str = None,
845
+ **class_args,
846
+ ):
847
+ if not function:
848
+ name = get_name(name, class_name)
849
+ raise mlrun.errors.MLRunInvalidArgumentError(
850
+ f"step '{name}' must specify a function, because it follows a queue step"
851
+ )
852
+ return super().to(
853
+ class_name,
854
+ name,
855
+ handler,
856
+ graph_shape,
857
+ function,
858
+ full_event,
859
+ input_path,
860
+ result_path,
861
+ **class_args,
862
+ )
863
+
835
864
  def run(self, event, *args, **kwargs):
836
865
  data = event.body
837
866
  if not data:
mlrun/utils/helpers.py CHANGED
@@ -674,6 +674,8 @@ def parse_artifact_uri(uri, default_project=""):
674
674
  raise ValueError(
675
675
  f"illegal store path '{uri}', iteration must be integer value"
676
676
  )
677
+ else:
678
+ iteration = 0
677
679
  return (
678
680
  group_dict["project"] or default_project,
679
681
  group_dict["key"],
@@ -1314,6 +1316,7 @@ def format_run(run: PipelineRun, with_project=False) -> dict:
1314
1316
  "scheduled_at",
1315
1317
  "finished_at",
1316
1318
  "description",
1319
+ "experiment_id",
1317
1320
  ]
1318
1321
 
1319
1322
  if with_project:
@@ -20,9 +20,9 @@ import traceback
20
20
  import typing
21
21
  from concurrent.futures import ThreadPoolExecutor
22
22
 
23
- import kfp
24
23
  import mlrun_pipelines.common.ops
25
24
  import mlrun_pipelines.models
25
+ import mlrun_pipelines.utils
26
26
 
27
27
  import mlrun.common.constants as mlrun_constants
28
28
  import mlrun.common.runtimes.constants
@@ -484,13 +484,7 @@ class NotificationPusher(_NotificationPusherBase):
484
484
  def _get_workflow_manifest(
485
485
  workflow_id: str,
486
486
  ) -> typing.Optional[mlrun_pipelines.models.PipelineManifest]:
487
- kfp_url = mlrun.mlconf.resolve_kfp_url(mlrun.mlconf.namespace)
488
- if not kfp_url:
489
- raise mlrun.errors.MLRunNotFoundError(
490
- "KubeFlow Pipelines is not configured"
491
- )
492
-
493
- kfp_client = kfp.Client(host=kfp_url)
487
+ kfp_client = mlrun_pipelines.utils.get_client(mlrun.mlconf)
494
488
 
495
489
  # arbitrary timeout of 5 seconds, the workflow should be done by now
496
490
  kfp_run = kfp_client.wait_for_run_completion(workflow_id, 5)
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "e2058b393890ec83e5a2a1ed99b4dfbc7d806b4e",
3
- "version": "1.7.0-rc23"
2
+ "git_commit": "a0adb214a48c9a8ba1e379d27e0f62bd20fd55a1",
3
+ "version": "1.7.0-rc25"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlrun
3
- Version: 1.7.0rc23
3
+ Version: 1.7.0rc25
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -43,7 +43,7 @@ Requires-Dist: semver ~=3.0
43
43
  Requires-Dist: dependency-injector ~=4.41
44
44
  Requires-Dist: fsspec <2024.4,>=2023.9.2
45
45
  Requires-Dist: v3iofs ~=0.1.17
46
- Requires-Dist: storey ~=1.7.17
46
+ Requires-Dist: storey ~=1.7.20
47
47
  Requires-Dist: inflection ~=0.5.0
48
48
  Requires-Dist: python-dotenv ~=0.17.0
49
49
  Requires-Dist: setuptools ~=69.1