mlrun 1.7.0rc14__py3-none-any.whl → 1.7.0rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (107) hide show
  1. mlrun/__init__.py +10 -1
  2. mlrun/__main__.py +18 -109
  3. mlrun/{runtimes/mpijob/v1alpha1.py → alerts/__init__.py} +2 -16
  4. mlrun/alerts/alert.py +141 -0
  5. mlrun/artifacts/__init__.py +8 -3
  6. mlrun/artifacts/base.py +36 -253
  7. mlrun/artifacts/dataset.py +9 -190
  8. mlrun/artifacts/manager.py +20 -41
  9. mlrun/artifacts/model.py +8 -140
  10. mlrun/artifacts/plots.py +14 -375
  11. mlrun/common/schemas/__init__.py +4 -2
  12. mlrun/common/schemas/alert.py +46 -4
  13. mlrun/common/schemas/api_gateway.py +4 -0
  14. mlrun/common/schemas/artifact.py +15 -0
  15. mlrun/common/schemas/auth.py +2 -0
  16. mlrun/common/schemas/model_monitoring/__init__.py +8 -1
  17. mlrun/common/schemas/model_monitoring/constants.py +40 -4
  18. mlrun/common/schemas/model_monitoring/model_endpoints.py +73 -2
  19. mlrun/common/schemas/project.py +2 -0
  20. mlrun/config.py +7 -4
  21. mlrun/data_types/to_pandas.py +4 -4
  22. mlrun/datastore/base.py +41 -9
  23. mlrun/datastore/datastore_profile.py +54 -4
  24. mlrun/datastore/inmem.py +2 -2
  25. mlrun/datastore/sources.py +43 -2
  26. mlrun/datastore/store_resources.py +2 -6
  27. mlrun/datastore/targets.py +106 -39
  28. mlrun/db/base.py +23 -3
  29. mlrun/db/httpdb.py +101 -47
  30. mlrun/db/nopdb.py +20 -2
  31. mlrun/errors.py +5 -0
  32. mlrun/feature_store/__init__.py +0 -2
  33. mlrun/feature_store/api.py +12 -47
  34. mlrun/feature_store/feature_set.py +9 -0
  35. mlrun/feature_store/retrieval/base.py +9 -4
  36. mlrun/feature_store/retrieval/conversion.py +4 -4
  37. mlrun/feature_store/retrieval/dask_merger.py +2 -0
  38. mlrun/feature_store/retrieval/job.py +2 -0
  39. mlrun/feature_store/retrieval/local_merger.py +2 -0
  40. mlrun/feature_store/retrieval/spark_merger.py +5 -0
  41. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +5 -10
  42. mlrun/launcher/base.py +4 -3
  43. mlrun/launcher/client.py +1 -1
  44. mlrun/lists.py +4 -2
  45. mlrun/model.py +25 -11
  46. mlrun/model_monitoring/__init__.py +1 -1
  47. mlrun/model_monitoring/api.py +41 -18
  48. mlrun/model_monitoring/application.py +5 -305
  49. mlrun/model_monitoring/applications/__init__.py +11 -0
  50. mlrun/model_monitoring/applications/_application_steps.py +157 -0
  51. mlrun/model_monitoring/applications/base.py +282 -0
  52. mlrun/model_monitoring/applications/context.py +214 -0
  53. mlrun/model_monitoring/applications/evidently_base.py +211 -0
  54. mlrun/model_monitoring/applications/histogram_data_drift.py +132 -91
  55. mlrun/model_monitoring/applications/results.py +99 -0
  56. mlrun/model_monitoring/controller.py +3 -1
  57. mlrun/model_monitoring/db/__init__.py +2 -0
  58. mlrun/model_monitoring/db/stores/base/store.py +9 -36
  59. mlrun/model_monitoring/db/stores/sqldb/models/base.py +7 -6
  60. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +63 -110
  61. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +104 -187
  62. mlrun/model_monitoring/db/tsdb/__init__.py +71 -0
  63. mlrun/model_monitoring/db/tsdb/base.py +135 -0
  64. mlrun/model_monitoring/db/tsdb/v3io/__init__.py +15 -0
  65. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +117 -0
  66. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +404 -0
  67. mlrun/model_monitoring/db/v3io_tsdb_reader.py +134 -0
  68. mlrun/model_monitoring/evidently_application.py +6 -118
  69. mlrun/model_monitoring/helpers.py +1 -1
  70. mlrun/model_monitoring/model_endpoint.py +3 -2
  71. mlrun/model_monitoring/stream_processing.py +48 -213
  72. mlrun/model_monitoring/writer.py +101 -121
  73. mlrun/platforms/__init__.py +10 -9
  74. mlrun/platforms/iguazio.py +21 -202
  75. mlrun/projects/operations.py +11 -7
  76. mlrun/projects/pipelines.py +13 -76
  77. mlrun/projects/project.py +73 -45
  78. mlrun/render.py +11 -13
  79. mlrun/run.py +6 -41
  80. mlrun/runtimes/__init__.py +3 -3
  81. mlrun/runtimes/base.py +6 -6
  82. mlrun/runtimes/funcdoc.py +0 -28
  83. mlrun/runtimes/kubejob.py +2 -1
  84. mlrun/runtimes/local.py +1 -1
  85. mlrun/runtimes/mpijob/__init__.py +0 -20
  86. mlrun/runtimes/mpijob/v1.py +1 -1
  87. mlrun/runtimes/nuclio/api_gateway.py +75 -9
  88. mlrun/runtimes/nuclio/function.py +9 -35
  89. mlrun/runtimes/pod.py +16 -36
  90. mlrun/runtimes/remotesparkjob.py +1 -1
  91. mlrun/runtimes/sparkjob/spark3job.py +1 -1
  92. mlrun/runtimes/utils.py +1 -39
  93. mlrun/utils/helpers.py +72 -71
  94. mlrun/utils/notifications/notification/base.py +1 -1
  95. mlrun/utils/notifications/notification/slack.py +12 -5
  96. mlrun/utils/notifications/notification/webhook.py +1 -1
  97. mlrun/utils/notifications/notification_pusher.py +134 -14
  98. mlrun/utils/version/version.json +2 -2
  99. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/METADATA +4 -3
  100. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/RECORD +105 -95
  101. mlrun/kfpops.py +0 -865
  102. mlrun/platforms/other.py +0 -305
  103. /mlrun/{runtimes → common/runtimes}/constants.py +0 -0
  104. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/LICENSE +0 -0
  105. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/WHEEL +0 -0
  106. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/entry_points.txt +0 -0
  107. {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/top_level.txt +0 -0
@@ -20,9 +20,10 @@ import tempfile
20
20
  import typing
21
21
  import uuid
22
22
 
23
- import kfp.compiler
24
- from kfp import dsl
23
+ import mlrun_pipelines.common.models
24
+ import mlrun_pipelines.patcher
25
25
  from kfp.compiler import compiler
26
+ from mlrun_pipelines.helpers import new_pipe_metadata
26
27
 
27
28
  import mlrun
28
29
  import mlrun.common.schemas
@@ -31,7 +32,6 @@ from mlrun.errors import err_to_str
31
32
  from mlrun.utils import (
32
33
  get_ui_url,
33
34
  logger,
34
- new_pipe_metadata,
35
35
  normalize_workflow_name,
36
36
  retry_until_successful,
37
37
  )
@@ -301,72 +301,6 @@ def _enrich_kfp_pod_security_context(kfp_pod_template, function):
301
301
  }
302
302
 
303
303
 
304
- # When we run pipelines, the kfp.compile.Compile.compile() method takes the decorated function with @dsl.pipeline and
305
- # converts it to a k8s object. As part of the flow in the Compile.compile() method,
306
- # we call _create_and_write_workflow, which builds a dictionary from the workflow and then writes it to a file.
307
- # Unfortunately, the kfp sdk does not provide an API for configuring priority_class_name and other attributes.
308
- # I ran across the following problem when seeking for a method to set the priority_class_name:
309
- # https://github.com/kubeflow/pipelines/issues/3594
310
- # When we patch the _create_and_write_workflow, we can eventually obtain the dictionary right before we write it
311
- # to a file and enrich it with argo compatible fields, make sure you looking for the same argo version we use
312
- # https://github.com/argoproj/argo-workflows/blob/release-2.7/pkg/apis/workflow/v1alpha1/workflow_types.go
313
- def _create_enriched_mlrun_workflow(
314
- self,
315
- pipeline_func: typing.Callable,
316
- pipeline_name: typing.Optional[str] = None,
317
- pipeline_description: typing.Optional[str] = None,
318
- params_list: typing.Optional[list[dsl.PipelineParam]] = None,
319
- pipeline_conf: typing.Optional[dsl.PipelineConf] = None,
320
- ):
321
- """Call internal implementation of create_workflow and enrich with mlrun functions attributes"""
322
- workflow = self._original_create_workflow(
323
- pipeline_func, pipeline_name, pipeline_description, params_list, pipeline_conf
324
- )
325
- # We don't want to interrupt the original flow and don't know all the scenarios the function could be called.
326
- # that's why we have try/except on all the code of the enrichment and also specific try/except for errors that
327
- # we know can be raised.
328
- try:
329
- functions = []
330
- if pipeline_context.functions:
331
- try:
332
- functions = pipeline_context.functions.values()
333
- except Exception as err:
334
- logger.debug(
335
- "Unable to retrieve project functions, not enriching workflow with mlrun",
336
- error=err_to_str(err),
337
- )
338
- return workflow
339
-
340
- # enrich each pipeline step with your desire k8s attribute
341
- for kfp_step_template in workflow["spec"]["templates"]:
342
- if kfp_step_template.get("container"):
343
- for function_obj in functions:
344
- # we condition within each function since the comparison between the function and
345
- # the kfp pod may change depending on the attribute type.
346
- _set_function_attribute_on_kfp_pod(
347
- kfp_step_template,
348
- function_obj,
349
- "PriorityClassName",
350
- "priority_class_name",
351
- )
352
- _enrich_kfp_pod_security_context(
353
- kfp_step_template,
354
- function_obj,
355
- )
356
- except mlrun.errors.MLRunInvalidArgumentError:
357
- raise
358
- except Exception as err:
359
- logger.debug(
360
- "Something in the enrichment of kfp pods failed", error=err_to_str(err)
361
- )
362
- return workflow
363
-
364
-
365
- # patching function as class method
366
- kfp.compiler.Compiler._original_create_workflow = kfp.compiler.Compiler._create_workflow
367
- kfp.compiler.Compiler._create_workflow = _create_enriched_mlrun_workflow
368
-
369
-
370
304
  def get_db_function(project, key) -> mlrun.runtimes.BaseRuntime:
371
305
  project_instance, name, tag, hash_key = parse_versioned_object_uri(
372
306
  key, project.metadata.name
@@ -457,7 +391,10 @@ class _PipelineRunStatus:
457
391
 
458
392
  @property
459
393
  def state(self):
460
- if self._state not in mlrun.run.RunStatuses.stable_statuses():
394
+ if (
395
+ self._state
396
+ not in mlrun_pipelines.common.models.RunStatuses.stable_statuses()
397
+ ):
461
398
  self._state = self._engine.get_state(self.run_id, self.project)
462
399
  return self._state
463
400
 
@@ -754,7 +691,7 @@ class _LocalRunner(_PipelineRunner):
754
691
  err = None
755
692
  try:
756
693
  workflow_handler(**workflow_spec.args)
757
- state = mlrun.run.RunStatuses.succeeded
694
+ state = mlrun_pipelines.common.models.RunStatuses.succeeded
758
695
  except Exception as exc:
759
696
  err = exc
760
697
  logger.exception("Workflow run failed")
@@ -762,7 +699,7 @@ class _LocalRunner(_PipelineRunner):
762
699
  f":x: Workflow {workflow_id} run failed!, error: {err_to_str(exc)}",
763
700
  mlrun.common.schemas.NotificationSeverity.ERROR,
764
701
  )
765
- state = mlrun.run.RunStatuses.failed
702
+ state = mlrun_pipelines.common.models.RunStatuses.failed
766
703
  mlrun.run.wait_for_runs_completion(pipeline_context.runs_map.values())
767
704
  project.notifiers.push_pipeline_run_results(
768
705
  pipeline_context.runs_map.values(), state=state
@@ -921,9 +858,9 @@ class _RemoteRunner(_PipelineRunner):
921
858
  f":x: Workflow {workflow_name} run failed!, error: {err_to_str(exc)}",
922
859
  mlrun.common.schemas.NotificationSeverity.ERROR,
923
860
  )
924
- state = mlrun.run.RunStatuses.failed
861
+ state = mlrun_pipelines.common.models.RunStatuses.failed
925
862
  else:
926
- state = mlrun.run.RunStatuses.succeeded
863
+ state = mlrun_pipelines.common.models.RunStatuses.succeeded
927
864
  project.notifiers.push_pipeline_start_message(
928
865
  project.metadata.name,
929
866
  )
@@ -1116,7 +1053,7 @@ def load_and_run(
1116
1053
  context.log_result(key="workflow_id", value=run.run_id)
1117
1054
  context.log_result(key="engine", value=run._engine.engine, commit=True)
1118
1055
 
1119
- if run.state == mlrun.run.RunStatuses.failed:
1056
+ if run.state == mlrun_pipelines.common.models.RunStatuses.failed:
1120
1057
  raise RuntimeError(f"Workflow {workflow_log_message} failed") from run.exc
1121
1058
 
1122
1059
  if wait_for_completion:
@@ -1131,7 +1068,7 @@ def load_and_run(
1131
1068
 
1132
1069
  pipeline_state, _, _ = project.get_run_status(run)
1133
1070
  context.log_result(key="workflow_state", value=pipeline_state, commit=True)
1134
- if pipeline_state != mlrun.run.RunStatuses.succeeded:
1071
+ if pipeline_state != mlrun_pipelines.common.models.RunStatuses.succeeded:
1135
1072
  raise RuntimeError(
1136
1073
  f"Workflow {workflow_log_message} failed, state={pipeline_state}"
1137
1074
  )
mlrun/projects/project.py CHANGED
@@ -31,23 +31,28 @@ from typing import Callable, Optional, Union
31
31
  import dotenv
32
32
  import git
33
33
  import git.exc
34
- import kfp
34
+ import mlrun_pipelines.common.models
35
+ import mlrun_pipelines.mounts
35
36
  import nuclio.utils
36
37
  import requests
37
38
  import yaml
39
+ from mlrun_pipelines.models import PipelineNodeWrapper
38
40
 
39
41
  import mlrun.common.helpers
42
+ import mlrun.common.schemas.artifact
40
43
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
41
44
  import mlrun.db
42
45
  import mlrun.errors
43
46
  import mlrun.k8s_utils
47
+ import mlrun.model_monitoring.applications as mm_app
44
48
  import mlrun.runtimes
45
49
  import mlrun.runtimes.nuclio.api_gateway
46
50
  import mlrun.runtimes.pod
47
51
  import mlrun.runtimes.utils
48
52
  import mlrun.serving
49
53
  import mlrun.utils.regex
50
- from mlrun.common.schemas import AlertConfig
54
+ from mlrun.alerts.alert import AlertConfig
55
+ from mlrun.common.schemas.alert import AlertTemplate
51
56
  from mlrun.datastore.datastore_profile import DatastoreProfile, DatastoreProfile2Json
52
57
  from mlrun.runtimes.nuclio.function import RemoteRuntime
53
58
 
@@ -56,14 +61,10 @@ from ..artifacts.manager import ArtifactManager, dict_to_artifact, extend_artifa
56
61
  from ..datastore import store_manager
57
62
  from ..features import Feature
58
63
  from ..model import EntrypointParam, ImageBuilder, ModelObj
59
- from ..model_monitoring.application import (
60
- ModelMonitoringApplicationBase,
61
- )
62
64
  from ..run import code_to_function, get_object, import_function, new_function
63
65
  from ..secrets import SecretsStore
64
66
  from ..utils import (
65
67
  is_ipython,
66
- is_legacy_artifact,
67
68
  is_relative_path,
68
69
  is_yaml_path,
69
70
  logger,
@@ -991,13 +992,9 @@ class ProjectSpec(ModelObj):
991
992
  if not isinstance(artifact, dict) and not hasattr(artifact, "to_dict"):
992
993
  raise ValueError("artifacts must be a dict or class")
993
994
  if isinstance(artifact, dict):
994
- # Support legacy artifacts
995
- if is_legacy_artifact(artifact) or _is_imported_artifact(artifact):
996
- key = artifact.get("key")
997
- else:
998
- key = artifact.get("metadata").get("key", "")
995
+ key = artifact.get("metadata", {}).get("key", "")
999
996
  if not key:
1000
- raise ValueError('artifacts "key" must be specified')
997
+ raise ValueError('artifacts "metadata.key" must be specified')
1001
998
  else:
1002
999
  key = artifact.key
1003
1000
  artifact = artifact.to_dict()
@@ -1593,6 +1590,23 @@ class MlrunProject(ModelObj):
1593
1590
  )
1594
1591
  return item
1595
1592
 
1593
+ def delete_artifact(
1594
+ self,
1595
+ item: Artifact,
1596
+ deletion_strategy: mlrun.common.schemas.artifact.ArtifactsDeletionStrategies = (
1597
+ mlrun.common.schemas.artifact.ArtifactsDeletionStrategies.metadata_only
1598
+ ),
1599
+ secrets: dict = None,
1600
+ ):
1601
+ """Delete an artifact object in the DB and optionally delete the artifact data
1602
+
1603
+ :param item: Artifact object (can be any type, such as dataset, model, feature store).
1604
+ :param deletion_strategy: The artifact deletion strategy types.
1605
+ :param secrets: Credentials needed to access the artifact data.
1606
+ """
1607
+ am = self._get_artifact_manager()
1608
+ am.delete_artifact(item, deletion_strategy, secrets)
1609
+
1596
1610
  def log_dataset(
1597
1611
  self,
1598
1612
  key,
@@ -1885,7 +1899,11 @@ class MlrunProject(ModelObj):
1885
1899
  def set_model_monitoring_function(
1886
1900
  self,
1887
1901
  func: typing.Union[str, mlrun.runtimes.BaseRuntime, None] = None,
1888
- application_class: typing.Union[str, ModelMonitoringApplicationBase] = None,
1902
+ application_class: typing.Union[
1903
+ str,
1904
+ mm_app.ModelMonitoringApplicationBase,
1905
+ mm_app.ModelMonitoringApplicationBaseV2,
1906
+ ] = None,
1889
1907
  name: str = None,
1890
1908
  image: str = None,
1891
1909
  handler=None,
@@ -1923,11 +1941,6 @@ class MlrunProject(ModelObj):
1923
1941
  monitoring application's constructor.
1924
1942
  """
1925
1943
 
1926
- if name in mm_constants.MonitoringFunctionNames.list():
1927
- raise mlrun.errors.MLRunInvalidArgumentError(
1928
- f"An application cannot have the following names: "
1929
- f"{mm_constants.MonitoringFunctionNames.list()}"
1930
- )
1931
1944
  function_object: RemoteRuntime = None
1932
1945
  (
1933
1946
  resolved_function_name,
@@ -1953,7 +1966,11 @@ class MlrunProject(ModelObj):
1953
1966
  def create_model_monitoring_function(
1954
1967
  self,
1955
1968
  func: str = None,
1956
- application_class: typing.Union[str, ModelMonitoringApplicationBase] = None,
1969
+ application_class: typing.Union[
1970
+ str,
1971
+ mm_app.ModelMonitoringApplicationBase,
1972
+ mm_app.ModelMonitoringApplicationBaseV2,
1973
+ ] = None,
1957
1974
  name: str = None,
1958
1975
  image: str = None,
1959
1976
  handler: str = None,
@@ -2006,7 +2023,10 @@ class MlrunProject(ModelObj):
2006
2023
  self,
2007
2024
  func: typing.Union[str, mlrun.runtimes.BaseRuntime, None] = None,
2008
2025
  application_class: typing.Union[
2009
- str, ModelMonitoringApplicationBase, None
2026
+ str,
2027
+ mm_app.ModelMonitoringApplicationBase,
2028
+ mm_app.ModelMonitoringApplicationBaseV2,
2029
+ None,
2010
2030
  ] = None,
2011
2031
  name: typing.Optional[str] = None,
2012
2032
  image: typing.Optional[str] = None,
@@ -2869,7 +2889,7 @@ class MlrunProject(ModelObj):
2869
2889
  (which will be converted to the class using its `from_crontab` constructor),
2870
2890
  see this link for help:
2871
2891
  https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html#module-apscheduler.triggers.cron
2872
- for using the pre-defined workflow's schedule, set `schedule=True`
2892
+ For using the pre-defined workflow's schedule, set `schedule=True`
2873
2893
  :param timeout: Timeout in seconds to wait for pipeline completion (watch will be activated)
2874
2894
  :param source: Source to use instead of the actual `project.spec.source` (used when engine is remote).
2875
2895
  Can be a one of:
@@ -2880,10 +2900,11 @@ class MlrunProject(ModelObj):
2880
2900
  For other engines the source is used to validate that the code is up-to-date.
2881
2901
  :param cleanup_ttl:
2882
2902
  Pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the
2883
- Workflow and all its resources are deleted)
2903
+ workflow and all its resources are deleted)
2884
2904
  :param notifications:
2885
2905
  List of notifications to send for workflow completion
2886
- :returns: Run id
2906
+
2907
+ :returns: ~py:class:`~mlrun.projects.pipelines._PipelineRunStatus` instance
2887
2908
  """
2888
2909
 
2889
2910
  arguments = arguments or {}
@@ -2957,7 +2978,7 @@ class MlrunProject(ModelObj):
2957
2978
  notifications=notifications,
2958
2979
  )
2959
2980
  # run is None when scheduling
2960
- if run and run.state == mlrun.run.RunStatuses.failed:
2981
+ if run and run.state == mlrun_pipelines.common.models.RunStatuses.failed:
2961
2982
  return run
2962
2983
  if not workflow_spec.schedule:
2963
2984
  # Failure and schedule messages already logged
@@ -3134,7 +3155,7 @@ class MlrunProject(ModelObj):
3134
3155
  notifications: list[mlrun.model.Notification] = None,
3135
3156
  returns: Optional[list[Union[str, dict[str, str]]]] = None,
3136
3157
  builder_env: Optional[dict] = None,
3137
- ) -> typing.Union[mlrun.model.RunObject, kfp.dsl.ContainerOp]:
3158
+ ) -> typing.Union[mlrun.model.RunObject, PipelineNodeWrapper]:
3138
3159
  """Run a local or remote task as part of a local/kubeflow pipeline
3139
3160
 
3140
3161
  example (use with project)::
@@ -3190,7 +3211,7 @@ class MlrunProject(ModelObj):
3190
3211
  artifact type can be given there. The artifact key must appear in the dictionary as
3191
3212
  "key": "the_key".
3192
3213
  :param builder_env: env vars dict for source archive config/credentials e.g. builder_env={"GIT_TOKEN": token}
3193
- :return: MLRun RunObject or KubeFlow containerOp
3214
+ :return: MLRun RunObject or PipelineNodeWrapper
3194
3215
  """
3195
3216
  return run_function(
3196
3217
  function,
@@ -3233,7 +3254,7 @@ class MlrunProject(ModelObj):
3233
3254
  requirements_file: str = None,
3234
3255
  extra_args: str = None,
3235
3256
  force_build: bool = False,
3236
- ) -> typing.Union[BuildStatus, kfp.dsl.ContainerOp]:
3257
+ ) -> typing.Union[BuildStatus, PipelineNodeWrapper]:
3237
3258
  """deploy ML function, build container with its dependencies
3238
3259
 
3239
3260
  :param function: name of the function (in the project) or function object
@@ -3344,7 +3365,6 @@ class MlrunProject(ModelObj):
3344
3365
  image: str = None,
3345
3366
  set_as_default: bool = True,
3346
3367
  with_mlrun: bool = None,
3347
- skip_deployed: bool = False,
3348
3368
  base_image: str = None,
3349
3369
  commands: list = None,
3350
3370
  secret_name: str = None,
@@ -3355,7 +3375,7 @@ class MlrunProject(ModelObj):
3355
3375
  requirements_file: str = None,
3356
3376
  extra_args: str = None,
3357
3377
  target_dir: str = None,
3358
- ) -> typing.Union[BuildStatus, kfp.dsl.ContainerOp]:
3378
+ ) -> typing.Union[BuildStatus, PipelineNodeWrapper]:
3359
3379
  """Builder docker image for the project, based on the project's build config. Parameters allow to override
3360
3380
  the build config.
3361
3381
  If the project has a source configured and pull_at_runtime is not configured, this source will be cloned to the
@@ -3365,7 +3385,6 @@ class MlrunProject(ModelObj):
3365
3385
  used. If not set, the `mlconf.default_project_image_name` value will be used
3366
3386
  :param set_as_default: set `image` to be the project's default image (default False)
3367
3387
  :param with_mlrun: add the current mlrun package to the container build
3368
- :param skip_deployed: *Deprecated* parameter is ignored
3369
3388
  :param base_image: base image name/path (commands and source code will be added to it) defaults to
3370
3389
  mlrun.mlconf.default_base_image
3371
3390
  :param commands: list of docker build (RUN) commands e.g. ['pip install pandas']
@@ -3390,14 +3409,6 @@ class MlrunProject(ModelObj):
3390
3409
  base_image=base_image,
3391
3410
  )
3392
3411
 
3393
- if skip_deployed:
3394
- warnings.warn(
3395
- "The 'skip_deployed' parameter is deprecated and will be removed in 1.7.0. "
3396
- "This parameter is ignored.",
3397
- # TODO: remove in 1.7.0
3398
- FutureWarning,
3399
- )
3400
-
3401
3412
  if not overwrite_build_params:
3402
3413
  # TODO: change overwrite_build_params default to True in 1.8.0
3403
3414
  warnings.warn(
@@ -3475,7 +3486,7 @@ class MlrunProject(ModelObj):
3475
3486
  verbose: bool = None,
3476
3487
  builder_env: dict = None,
3477
3488
  mock: bool = None,
3478
- ) -> typing.Union[DeployStatus, kfp.dsl.ContainerOp]:
3489
+ ) -> typing.Union[DeployStatus, PipelineNodeWrapper]:
3479
3490
  """deploy real-time (nuclio based) functions
3480
3491
 
3481
3492
  :param function: name of the function (in the project) or function object
@@ -3660,9 +3671,7 @@ class MlrunProject(ModelObj):
3660
3671
  :returns: List of function objects.
3661
3672
  """
3662
3673
 
3663
- model_monitoring_labels_list = [
3664
- f"{mm_constants.ModelMonitoringAppLabel.KEY}={mm_constants.ModelMonitoringAppLabel.VAL}"
3665
- ]
3674
+ model_monitoring_labels_list = [str(mm_constants.ModelMonitoringAppLabel())]
3666
3675
  if labels:
3667
3676
  model_monitoring_labels_list += labels
3668
3677
  return self.list_functions(
@@ -3889,7 +3898,9 @@ class MlrunProject(ModelObj):
3889
3898
 
3890
3899
  mlrun.db.get_run_db().delete_api_gateway(name=name, project=self.name)
3891
3900
 
3892
- def store_alert_config(self, alert_data: AlertConfig, alert_name=None):
3901
+ def store_alert_config(
3902
+ self, alert_data: AlertConfig, alert_name=None
3903
+ ) -> AlertConfig:
3893
3904
  """
3894
3905
  Create/modify an alert.
3895
3906
  :param alert_data: The data of the alert.
@@ -3899,7 +3910,7 @@ class MlrunProject(ModelObj):
3899
3910
  db = mlrun.db.get_run_db(secrets=self._secrets)
3900
3911
  if alert_name is None:
3901
3912
  alert_name = alert_data.name
3902
- return db.store_alert_config(alert_name, alert_data.dict(), self.metadata.name)
3913
+ return db.store_alert_config(alert_name, alert_data, project=self.metadata.name)
3903
3914
 
3904
3915
  def get_alert_config(self, alert_name: str) -> AlertConfig:
3905
3916
  """
@@ -3910,7 +3921,7 @@ class MlrunProject(ModelObj):
3910
3921
  db = mlrun.db.get_run_db(secrets=self._secrets)
3911
3922
  return db.get_alert_config(alert_name, self.metadata.name)
3912
3923
 
3913
- def list_alerts_configs(self):
3924
+ def list_alerts_configs(self) -> list[AlertConfig]:
3914
3925
  """
3915
3926
  Retrieve list of alerts of a project.
3916
3927
  :return: All the alerts objects of the project.
@@ -3956,6 +3967,23 @@ class MlrunProject(ModelObj):
3956
3967
  alert_name = alert_data.name
3957
3968
  db.reset_alert_config(alert_name, self.metadata.name)
3958
3969
 
3970
+ def get_alert_template(self, template_name: str) -> AlertTemplate:
3971
+ """
3972
+ Retrieve a specific alert template.
3973
+ :param template_name: The name of the template to retrieve.
3974
+ :return: The template object.
3975
+ """
3976
+ db = mlrun.db.get_run_db(secrets=self._secrets)
3977
+ return db.get_alert_template(template_name)
3978
+
3979
+ def list_alert_templates(self) -> list[AlertTemplate]:
3980
+ """
3981
+ Retrieve list of all alert templates.
3982
+ :return: All the alert template objects in the database.
3983
+ """
3984
+ db = mlrun.db.get_run_db(secrets=self._secrets)
3985
+ return db.list_alert_templates()
3986
+
3959
3987
  def _run_authenticated_git_action(
3960
3988
  self,
3961
3989
  action: Callable,
mlrun/render.py CHANGED
@@ -121,16 +121,8 @@ def artifacts_html(
121
121
  html = ""
122
122
 
123
123
  for artifact in artifacts:
124
- # TODO: remove this in 1.7.0 once we no longer support legacy format
125
- if mlrun.utils.is_legacy_artifact(artifact):
126
- attribute_value = artifact.get(attribute_name)
127
- else:
128
- attribute_value = artifact["spec"].get(attribute_name)
129
-
130
- if mlrun.utils.is_legacy_artifact(artifact):
131
- key = artifact["key"]
132
- else:
133
- key = artifact["metadata"]["key"]
124
+ attribute_value = artifact["spec"].get(attribute_name)
125
+ key = artifact["metadata"]["key"]
134
126
 
135
127
  if not attribute_value:
136
128
  mlrun.utils.logger.warning(
@@ -404,12 +396,18 @@ def runs_to_html(
404
396
  df.drop("labels", axis=1, inplace=True)
405
397
  df.drop("inputs", axis=1, inplace=True)
406
398
  df.drop("artifacts", axis=1, inplace=True)
399
+ df.drop("artifact_uris", axis=1, inplace=True)
407
400
  else:
408
401
  df["labels"] = df["labels"].apply(dict_html)
409
402
  df["inputs"] = df["inputs"].apply(inputs_html)
410
- df["artifacts"] = df["artifacts"].apply(
411
- lambda artifacts: artifacts_html(artifacts, "target_path"),
412
- )
403
+ if df["artifact_uris"][0]:
404
+ df["artifact_uris"] = df["artifact_uris"].apply(dict_html)
405
+ df.drop("artifacts", axis=1, inplace=True)
406
+ else:
407
+ df["artifacts"] = df["artifacts"].apply(
408
+ lambda artifacts: artifacts_html(artifacts, "target_path"),
409
+ )
410
+ df.drop("artifact_uris", axis=1, inplace=True)
413
411
 
414
412
  def expand_error(x):
415
413
  if x["state"] == "error":
mlrun/run.py CHANGED
@@ -29,11 +29,13 @@ from typing import Optional, Union
29
29
  import nuclio
30
30
  import yaml
31
31
  from kfp import Client
32
+ from mlrun_pipelines.common.models import RunStatuses
33
+ from mlrun_pipelines.common.ops import format_summary_from_kfp_run, show_kfp_run
34
+ from mlrun_pipelines.models import PipelineRun
32
35
 
33
36
  import mlrun.common.schemas
34
37
  import mlrun.errors
35
38
  import mlrun.utils.helpers
36
- from mlrun.kfpops import format_summary_from_kfp_run, show_kfp_run
37
39
 
38
40
  from .common.helpers import parse_versioned_object_uri
39
41
  from .config import config as mlconf
@@ -47,7 +49,6 @@ from .runtimes import (
47
49
  KubejobRuntime,
48
50
  LocalRuntime,
49
51
  MpiRuntimeV1,
50
- MpiRuntimeV1Alpha1,
51
52
  RemoteRuntime,
52
53
  RemoteSparkRuntime,
53
54
  RuntimeKinds,
@@ -69,41 +70,6 @@ from .utils import (
69
70
  )
70
71
 
71
72
 
72
- class RunStatuses:
73
- succeeded = "Succeeded"
74
- failed = "Failed"
75
- skipped = "Skipped"
76
- error = "Error"
77
- running = "Running"
78
-
79
- @staticmethod
80
- def all():
81
- return [
82
- RunStatuses.succeeded,
83
- RunStatuses.failed,
84
- RunStatuses.skipped,
85
- RunStatuses.error,
86
- RunStatuses.running,
87
- ]
88
-
89
- @staticmethod
90
- def stable_statuses():
91
- return [
92
- RunStatuses.succeeded,
93
- RunStatuses.failed,
94
- RunStatuses.skipped,
95
- RunStatuses.error,
96
- ]
97
-
98
- @staticmethod
99
- def transient_statuses():
100
- return [
101
- status
102
- for status in RunStatuses.all()
103
- if status not in RunStatuses.stable_statuses()
104
- ]
105
-
106
-
107
73
  def function_to_module(code="", workdir=None, secrets=None, silent=False):
108
74
  """Load code, notebook or mlrun function as .py module
109
75
  this function can import a local/remote py file or notebook
@@ -606,7 +572,6 @@ def code_to_function(
606
572
  ignored_tags: Optional[str] = None,
607
573
  requirements_file: Optional[str] = "",
608
574
  ) -> Union[
609
- MpiRuntimeV1Alpha1,
610
575
  MpiRuntimeV1,
611
576
  RemoteRuntime,
612
577
  ServingRuntime,
@@ -1023,7 +988,7 @@ def get_pipeline(
1023
988
  :param project: the project of the pipeline run
1024
989
  :param remote: read kfp data from mlrun service (default=True)
1025
990
 
1026
- :return: kfp run dict
991
+ :return: kfp run
1027
992
  """
1028
993
  namespace = namespace or mlconf.namespace
1029
994
  if remote:
@@ -1047,7 +1012,7 @@ def get_pipeline(
1047
1012
  not format_
1048
1013
  or format_ == mlrun.common.schemas.PipelinesFormat.summary.value
1049
1014
  ):
1050
- resp = format_summary_from_kfp_run(resp)
1015
+ resp = format_summary_from_kfp_run(PipelineRun(resp))
1051
1016
 
1052
1017
  show_kfp_run(resp)
1053
1018
  return resp
@@ -1150,7 +1115,7 @@ def wait_for_runs_completion(
1150
1115
  running = []
1151
1116
  for run in runs:
1152
1117
  state = run.state()
1153
- if state in mlrun.runtimes.constants.RunStates.terminal_states():
1118
+ if state in mlrun.common.runtimes.constants.RunStates.terminal_states():
1154
1119
  completed.append(run)
1155
1120
  else:
1156
1121
  running.append(run)
@@ -30,13 +30,13 @@ __all__ = [
30
30
 
31
31
  from mlrun.runtimes.utils import resolve_spark_operator_version
32
32
 
33
+ from ..common.runtimes.constants import MPIJobCRDVersions
33
34
  from .base import BaseRuntime, RunError, RuntimeClassMode # noqa
34
- from .constants import MPIJobCRDVersions
35
35
  from .daskjob import DaskCluster # noqa
36
36
  from .databricks_job.databricks_runtime import DatabricksRuntime
37
37
  from .kubejob import KubejobRuntime, KubeResource # noqa
38
38
  from .local import HandlerRuntime, LocalRuntime # noqa
39
- from .mpijob import MpiRuntimeContainer, MpiRuntimeV1, MpiRuntimeV1Alpha1 # noqa
39
+ from .mpijob import MpiRuntimeV1 # noqa
40
40
  from .nuclio import (
41
41
  RemoteRuntime,
42
42
  ServingRuntime,
@@ -264,7 +264,7 @@ class RuntimeKinds:
264
264
 
265
265
  def get_runtime_class(kind: str):
266
266
  if kind == RuntimeKinds.mpijob:
267
- return MpiRuntimeContainer.selector()
267
+ return MpiRuntimeV1
268
268
 
269
269
  if kind == RuntimeKinds.spark:
270
270
  return Spark3Runtime
mlrun/runtimes/base.py CHANGED
@@ -21,6 +21,7 @@ from os import environ
21
21
  from typing import Callable, Optional, Union
22
22
 
23
23
  import requests.exceptions
24
+ from mlrun_pipelines.common.ops import mlrun_op
24
25
  from nuclio.build import mlrun_footer
25
26
 
26
27
  import mlrun.common.constants
@@ -37,7 +38,6 @@ from mlrun.utils.helpers import generate_object_uri, verify_field_regex
37
38
  from ..config import config
38
39
  from ..datastore import store_manager
39
40
  from ..errors import err_to_str
40
- from ..kfpops import mlrun_op
41
41
  from ..lists import RunList
42
42
  from ..model import BaseMetadata, HyperParamOptions, ImageBuilder, ModelObj, RunObject
43
43
  from ..utils import (
@@ -469,7 +469,7 @@ class BaseRuntime(ModelObj):
469
469
  def _store_function(self, runspec, meta, db):
470
470
  meta.labels["kind"] = self.kind
471
471
  mlrun.runtimes.utils.enrich_run_labels(
472
- meta.labels, [mlrun.runtimes.constants.RunLabels.owner]
472
+ meta.labels, [mlrun.common.runtimes.constants.RunLabels.owner]
473
473
  )
474
474
  if runspec.spec.output_path:
475
475
  runspec.spec.output_path = runspec.spec.output_path.replace(
@@ -580,9 +580,9 @@ class BaseRuntime(ModelObj):
580
580
 
581
581
  elif (
582
582
  not was_none
583
- and last_state != mlrun.runtimes.constants.RunStates.completed
583
+ and last_state != mlrun.common.runtimes.constants.RunStates.completed
584
584
  and last_state
585
- not in mlrun.runtimes.constants.RunStates.error_and_abortion_states()
585
+ not in mlrun.common.runtimes.constants.RunStates.error_and_abortion_states()
586
586
  ):
587
587
  try:
588
588
  runtime_cls = mlrun.runtimes.get_runtime_class(kind)
@@ -707,11 +707,11 @@ class BaseRuntime(ModelObj):
707
707
  "key": "the_key".
708
708
  :param auto_build: when set to True and the function require build it will be built on the first
709
709
  function run, use only if you dont plan on changing the build config between runs
710
- :return: KubeFlow containerOp
710
+ :return: mlrun_pipelines.models.PipelineNodeWrapper
711
711
  """
712
712
 
713
713
  # if the function contain KFP PipelineParams (futures) pass the full spec to the
714
- # ContainerOp this way KFP will substitute the params with previous step outputs
714
+ # PipelineNodeWrapper this way KFP will substitute the params with previous step outputs
715
715
  if use_db and not self._has_pipeline_param():
716
716
  # if the same function is built as part of the pipeline we do not use the versioned function
717
717
  # rather the latest function w the same tag so we can pick up the updated image/status