mlrun 1.7.0rc43__py3-none-any.whl → 1.7.0rc56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (68) hide show
  1. mlrun/__main__.py +4 -2
  2. mlrun/artifacts/manager.py +3 -1
  3. mlrun/common/formatters/__init__.py +1 -0
  4. mlrun/{model_monitoring/application.py → common/formatters/feature_set.py} +20 -6
  5. mlrun/common/formatters/run.py +3 -0
  6. mlrun/common/schemas/__init__.py +1 -0
  7. mlrun/common/schemas/alert.py +11 -11
  8. mlrun/common/schemas/auth.py +5 -0
  9. mlrun/common/schemas/client_spec.py +0 -1
  10. mlrun/common/schemas/model_monitoring/__init__.py +2 -1
  11. mlrun/common/schemas/model_monitoring/constants.py +23 -9
  12. mlrun/common/schemas/model_monitoring/model_endpoints.py +24 -47
  13. mlrun/common/schemas/notification.py +12 -2
  14. mlrun/common/schemas/workflow.py +10 -2
  15. mlrun/config.py +28 -21
  16. mlrun/data_types/data_types.py +6 -1
  17. mlrun/datastore/base.py +4 -4
  18. mlrun/datastore/s3.py +12 -9
  19. mlrun/datastore/storeytargets.py +9 -6
  20. mlrun/db/base.py +3 -0
  21. mlrun/db/httpdb.py +28 -16
  22. mlrun/db/nopdb.py +24 -4
  23. mlrun/errors.py +7 -1
  24. mlrun/execution.py +40 -7
  25. mlrun/feature_store/api.py +1 -0
  26. mlrun/feature_store/retrieval/spark_merger.py +7 -7
  27. mlrun/frameworks/_common/plan.py +3 -3
  28. mlrun/frameworks/_ml_common/plan.py +1 -1
  29. mlrun/frameworks/parallel_coordinates.py +2 -3
  30. mlrun/launcher/client.py +6 -6
  31. mlrun/model.py +29 -0
  32. mlrun/model_monitoring/api.py +1 -12
  33. mlrun/model_monitoring/applications/__init__.py +1 -2
  34. mlrun/model_monitoring/applications/_application_steps.py +5 -1
  35. mlrun/model_monitoring/applications/base.py +2 -182
  36. mlrun/model_monitoring/applications/context.py +2 -9
  37. mlrun/model_monitoring/applications/evidently_base.py +0 -74
  38. mlrun/model_monitoring/applications/histogram_data_drift.py +2 -2
  39. mlrun/model_monitoring/applications/results.py +4 -4
  40. mlrun/model_monitoring/controller.py +46 -209
  41. mlrun/model_monitoring/db/stores/base/store.py +1 -0
  42. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +15 -1
  43. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +12 -0
  44. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +17 -16
  45. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +49 -39
  46. mlrun/model_monitoring/helpers.py +13 -15
  47. mlrun/model_monitoring/writer.py +3 -1
  48. mlrun/projects/operations.py +11 -8
  49. mlrun/projects/pipelines.py +35 -16
  50. mlrun/projects/project.py +52 -24
  51. mlrun/render.py +3 -3
  52. mlrun/runtimes/daskjob.py +1 -1
  53. mlrun/runtimes/kubejob.py +6 -6
  54. mlrun/runtimes/nuclio/api_gateway.py +12 -0
  55. mlrun/runtimes/nuclio/application/application.py +3 -3
  56. mlrun/runtimes/nuclio/function.py +41 -0
  57. mlrun/runtimes/nuclio/serving.py +2 -2
  58. mlrun/runtimes/pod.py +19 -13
  59. mlrun/serving/server.py +2 -0
  60. mlrun/utils/helpers.py +62 -16
  61. mlrun/utils/version/version.json +2 -2
  62. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc56.dist-info}/METADATA +126 -44
  63. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc56.dist-info}/RECORD +67 -68
  64. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc56.dist-info}/WHEEL +1 -1
  65. mlrun/model_monitoring/evidently_application.py +0 -20
  66. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc56.dist-info}/LICENSE +0 -0
  67. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc56.dist-info}/entry_points.txt +0 -0
  68. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc56.dist-info}/top_level.txt +0 -0
mlrun/datastore/s3.py CHANGED
@@ -36,6 +36,7 @@ class S3Store(DataStore):
36
36
 
37
37
  access_key_id = self._get_secret_or_env("AWS_ACCESS_KEY_ID")
38
38
  secret_key = self._get_secret_or_env("AWS_SECRET_ACCESS_KEY")
39
+ token_file = self._get_secret_or_env("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
39
40
  endpoint_url = self._get_secret_or_env("S3_ENDPOINT_URL")
40
41
  force_non_anonymous = self._get_secret_or_env("S3_NON_ANONYMOUS")
41
42
  profile_name = self._get_secret_or_env("AWS_PROFILE")
@@ -94,14 +95,15 @@ class S3Store(DataStore):
94
95
  self.s3 = boto3.resource(
95
96
  "s3", region_name=region, endpoint_url=endpoint_url
96
97
  )
97
- # If not using credentials, boto will still attempt to sign the requests, and will fail any operations
98
- # due to no credentials found. These commands disable signing and allow anonymous mode (same as
99
- # anon in the storage_options when working with fsspec).
100
- from botocore.handlers import disable_signing
101
-
102
- self.s3.meta.client.meta.events.register(
103
- "choose-signer.s3.*", disable_signing
104
- )
98
+ if not token_file:
99
+ # If not using credentials, boto will still attempt to sign the requests, and will fail any operations
100
+ # due to no credentials found. These commands disable signing and allow anonymous mode (same as
101
+ # anon in the storage_options when working with fsspec).
102
+ from botocore.handlers import disable_signing
103
+
104
+ self.s3.meta.client.meta.events.register(
105
+ "choose-signer.s3.*", disable_signing
106
+ )
105
107
 
106
108
  def get_spark_options(self):
107
109
  res = {}
@@ -139,6 +141,7 @@ class S3Store(DataStore):
139
141
  endpoint_url = self._get_secret_or_env("S3_ENDPOINT_URL")
140
142
  access_key_id = self._get_secret_or_env("AWS_ACCESS_KEY_ID")
141
143
  secret = self._get_secret_or_env("AWS_SECRET_ACCESS_KEY")
144
+ token_file = self._get_secret_or_env("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
142
145
 
143
146
  if self._temp_credentials:
144
147
  access_key_id = self._temp_credentials["AccessKeyId"]
@@ -148,7 +151,7 @@ class S3Store(DataStore):
148
151
  token = None
149
152
 
150
153
  storage_options = dict(
151
- anon=not (force_non_anonymous or (access_key_id and secret)),
154
+ anon=not (force_non_anonymous or (access_key_id and secret) or token_file),
152
155
  key=access_key_id,
153
156
  secret=secret,
154
157
  token=token,
@@ -19,6 +19,7 @@ import mlrun
19
19
  import mlrun.model_monitoring.helpers
20
20
  from mlrun.datastore.base import DataStore
21
21
 
22
+ from ..platforms.iguazio import parse_path
22
23
  from .utils import (
23
24
  parse_kafka_url,
24
25
  )
@@ -82,15 +83,17 @@ class StreamStoreyTarget(storey.StreamTarget):
82
83
  def __init__(self, *args, **kwargs):
83
84
  args = list(args)
84
85
 
85
- path = args[0] if args else kwargs.get("stream_path")
86
- endpoint, storage_options = get_url_and_storage_options(path)
86
+ uri = args[0] if args else kwargs.get("stream_path")
87
87
 
88
- if not path:
88
+ if not uri:
89
89
  raise mlrun.errors.MLRunInvalidArgumentError("StreamTarget requires a path")
90
90
 
91
+ _, storage_options = get_url_and_storage_options(uri)
92
+ endpoint, path = parse_path(uri)
93
+
91
94
  access_key = storage_options.get("v3io_access_key")
92
- storage = (
93
- V3ioDriver(webapi=endpoint or mlrun.mlconf.v3io_api, access_key=access_key),
95
+ storage = V3ioDriver(
96
+ webapi=endpoint or mlrun.mlconf.v3io_api, access_key=access_key
94
97
  )
95
98
 
96
99
  if storage_options:
@@ -98,7 +101,7 @@ class StreamStoreyTarget(storey.StreamTarget):
98
101
  if args:
99
102
  args[0] = endpoint
100
103
  if "stream_path" in kwargs:
101
- kwargs["stream_path"] = endpoint
104
+ kwargs["stream_path"] = path
102
105
 
103
106
  super().__init__(*args, **kwargs)
104
107
 
mlrun/db/base.py CHANGED
@@ -395,6 +395,9 @@ class RunDBInterface(ABC):
395
395
  partition_order: Union[
396
396
  mlrun.common.schemas.OrderType, str
397
397
  ] = mlrun.common.schemas.OrderType.desc,
398
+ format_: Union[
399
+ str, mlrun.common.formatters.FeatureSetFormat
400
+ ] = mlrun.common.formatters.FeatureSetFormat.full,
398
401
  ) -> list[dict]:
399
402
  pass
400
403
 
mlrun/db/httpdb.py CHANGED
@@ -525,10 +525,6 @@ class HTTPRunDB(RunDBInterface):
525
525
  server_cfg.get("external_platform_tracking")
526
526
  or config.external_platform_tracking
527
527
  )
528
- config.model_endpoint_monitoring.store_type = (
529
- server_cfg.get("model_endpoint_monitoring_store_type")
530
- or config.model_endpoint_monitoring.store_type
531
- )
532
528
  config.model_endpoint_monitoring.endpoint_store_connection = (
533
529
  server_cfg.get("model_endpoint_monitoring_endpoint_store_connection")
534
530
  or config.model_endpoint_monitoring.endpoint_store_connection
@@ -1374,20 +1370,14 @@ class HTTPRunDB(RunDBInterface):
1374
1370
  :returns: :py:class:`~mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput` listing the runtime resources
1375
1371
  that were removed.
1376
1372
  """
1377
- if grace_period is None:
1378
- grace_period = config.runtime_resources_deletion_grace_period
1379
- logger.info(
1380
- "Using default grace period for runtime resources deletion",
1381
- grace_period=grace_period,
1382
- )
1383
-
1384
1373
  params = {
1385
1374
  "label-selector": label_selector,
1386
1375
  "kind": kind,
1387
1376
  "object-id": object_id,
1388
1377
  "force": force,
1389
- "grace-period": grace_period,
1390
1378
  }
1379
+ if grace_period is not None:
1380
+ params["grace-period"] = grace_period
1391
1381
  error = "Failed deleting runtime resources"
1392
1382
  project_path = project if project else "*"
1393
1383
  response = self.api_call(
@@ -2245,6 +2235,9 @@ class HTTPRunDB(RunDBInterface):
2245
2235
  partition_order: Union[
2246
2236
  mlrun.common.schemas.OrderType, str
2247
2237
  ] = mlrun.common.schemas.OrderType.desc,
2238
+ format_: Union[
2239
+ str, mlrun.common.formatters.FeatureSetFormat
2240
+ ] = mlrun.common.formatters.FeatureSetFormat.full,
2248
2241
  ) -> list[FeatureSet]:
2249
2242
  """Retrieve a list of feature-sets matching the criteria provided.
2250
2243
 
@@ -2262,6 +2255,9 @@ class HTTPRunDB(RunDBInterface):
2262
2255
  :param partition_sort_by: What field to sort the results by, within each partition defined by `partition_by`.
2263
2256
  Currently the only allowed value are `created` and `updated`.
2264
2257
  :param partition_order: Order of sorting within partitions - `asc` or `desc`. Default is `desc`.
2258
+ :param format_: Format of the results. Possible values are:
2259
+ - ``minimal`` - Return minimal feature set objects, not including stats and preview for each feature set.
2260
+ - ``full`` - Return full feature set objects.
2265
2261
  :returns: List of matching :py:class:`~mlrun.feature_store.FeatureSet` objects.
2266
2262
  """
2267
2263
 
@@ -2274,6 +2270,7 @@ class HTTPRunDB(RunDBInterface):
2274
2270
  "entity": entities or [],
2275
2271
  "feature": features or [],
2276
2272
  "label": labels or [],
2273
+ "format": format_,
2277
2274
  }
2278
2275
  if partition_by:
2279
2276
  params.update(
@@ -2757,7 +2754,7 @@ class HTTPRunDB(RunDBInterface):
2757
2754
  deletion_strategy: Union[
2758
2755
  str, mlrun.common.schemas.DeletionStrategy
2759
2756
  ] = mlrun.common.schemas.DeletionStrategy.default(),
2760
- ):
2757
+ ) -> None:
2761
2758
  """Delete a project.
2762
2759
 
2763
2760
  :param name: Name of the project to delete.
@@ -2776,7 +2773,7 @@ class HTTPRunDB(RunDBInterface):
2776
2773
  "DELETE", f"projects/{name}", error_message, headers=headers, version="v2"
2777
2774
  )
2778
2775
  if response.status_code == http.HTTPStatus.ACCEPTED:
2779
- logger.info("Project is being deleted", project_name=name)
2776
+ logger.info("Waiting for project to be deleted", project_name=name)
2780
2777
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
2781
2778
  background_task = self._wait_for_background_task_to_reach_terminal_state(
2782
2779
  background_task.metadata.name
@@ -2786,10 +2783,17 @@ class HTTPRunDB(RunDBInterface):
2786
2783
  == mlrun.common.schemas.BackgroundTaskState.succeeded
2787
2784
  ):
2788
2785
  logger.info("Project deleted", project_name=name)
2789
- return
2786
+ elif (
2787
+ background_task.status.state
2788
+ == mlrun.common.schemas.BackgroundTaskState.failed
2789
+ ):
2790
+ logger.error(
2791
+ "Project deletion failed",
2792
+ project_name=name,
2793
+ error=background_task.status.error,
2794
+ )
2790
2795
  elif response.status_code == http.HTTPStatus.NO_CONTENT:
2791
2796
  logger.info("Project deleted", project_name=name)
2792
- return
2793
2797
 
2794
2798
  def store_project(
2795
2799
  self,
@@ -4193,6 +4197,9 @@ class HTTPRunDB(RunDBInterface):
4193
4197
  :param event_data: The data of the event.
4194
4198
  :param project: The project that the event belongs to.
4195
4199
  """
4200
+ if mlrun.mlconf.alerts.mode == mlrun.common.schemas.alert.AlertsModes.disabled:
4201
+ logger.warning("Alerts are disabled, event will not be generated")
4202
+
4196
4203
  project = project or config.default_project
4197
4204
  endpoint_path = f"projects/{project}/events/{name}"
4198
4205
  error_message = f"post event {project}/events/{name}"
@@ -4219,6 +4226,11 @@ class HTTPRunDB(RunDBInterface):
4219
4226
  if not alert_data:
4220
4227
  raise mlrun.errors.MLRunInvalidArgumentError("Alert data must be provided")
4221
4228
 
4229
+ if mlrun.mlconf.alerts.mode == mlrun.common.schemas.alert.AlertsModes.disabled:
4230
+ logger.warning(
4231
+ "Alerts are disabled, alert will still be stored but will not be triggered"
4232
+ )
4233
+
4222
4234
  project = project or config.default_project
4223
4235
  endpoint_path = f"projects/{project}/alerts/{alert_name}"
4224
4236
  error_message = f"put alert {project}/alerts/{alert_name}"
mlrun/db/nopdb.py CHANGED
@@ -21,6 +21,7 @@ import mlrun.common.formatters
21
21
  import mlrun.common.runtimes.constants
22
22
  import mlrun.common.schemas
23
23
  import mlrun.errors
24
+ import mlrun.lists
24
25
 
25
26
  from ..config import config
26
27
  from ..utils import logger
@@ -73,6 +74,22 @@ class NopDB(RunDBInterface):
73
74
  def abort_run(self, uid, project="", iter=0, timeout=45, status_text=""):
74
75
  pass
75
76
 
77
+ def list_runtime_resources(
78
+ self,
79
+ project: Optional[str] = None,
80
+ label_selector: Optional[str] = None,
81
+ kind: Optional[str] = None,
82
+ object_id: Optional[str] = None,
83
+ group_by: Optional[
84
+ mlrun.common.schemas.ListRuntimeResourcesGroupByField
85
+ ] = None,
86
+ ) -> Union[
87
+ mlrun.common.schemas.RuntimeResourcesOutput,
88
+ mlrun.common.schemas.GroupedByJobRuntimeResourcesOutput,
89
+ mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput,
90
+ ]:
91
+ return []
92
+
76
93
  def read_run(
77
94
  self,
78
95
  uid,
@@ -108,7 +125,7 @@ class NopDB(RunDBInterface):
108
125
  max_partitions: int = 0,
109
126
  with_notifications: bool = False,
110
127
  ):
111
- pass
128
+ return mlrun.lists.RunList()
112
129
 
113
130
  def del_run(self, uid, project="", iter=0):
114
131
  pass
@@ -149,7 +166,7 @@ class NopDB(RunDBInterface):
149
166
  format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
150
167
  limit: int = None,
151
168
  ):
152
- pass
169
+ return mlrun.lists.ArtifactList()
153
170
 
154
171
  def del_artifact(
155
172
  self,
@@ -181,7 +198,7 @@ class NopDB(RunDBInterface):
181
198
  def list_functions(
182
199
  self, name=None, project="", tag="", labels=None, since=None, until=None
183
200
  ):
184
- pass
201
+ return []
185
202
 
186
203
  def tag_objects(
187
204
  self,
@@ -309,6 +326,9 @@ class NopDB(RunDBInterface):
309
326
  partition_order: Union[
310
327
  mlrun.common.schemas.OrderType, str
311
328
  ] = mlrun.common.schemas.OrderType.desc,
329
+ format_: Union[
330
+ str, mlrun.common.formatters.FeatureSetFormat
331
+ ] = mlrun.common.formatters.FeatureSetFormat.full,
312
332
  ) -> list[dict]:
313
333
  pass
314
334
 
@@ -421,7 +441,7 @@ class NopDB(RunDBInterface):
421
441
  ] = mlrun.common.formatters.PipelineFormat.metadata_only,
422
442
  page_size: int = None,
423
443
  ) -> mlrun.common.schemas.PipelinesOutput:
424
- pass
444
+ return mlrun.common.schemas.PipelinesOutput(runs=[], total_size=0)
425
445
 
426
446
  def create_project_secrets(
427
447
  self,
mlrun/errors.py CHANGED
@@ -140,7 +140,13 @@ def err_to_str(err):
140
140
  error_strings.append(err_msg)
141
141
  err = err.__cause__
142
142
 
143
- return ", caused by: ".join(error_strings)
143
+ err_msg = ", caused by: ".join(error_strings)
144
+
145
+ # in case the error string is longer than 32k, we truncate it
146
+ # the truncation takes the first 16k, then the last 16k characters
147
+ if len(err_msg) > 32_000:
148
+ err_msg = err_msg[:16_000] + "...truncated..." + err_msg[-16_000:]
149
+ return err_msg
144
150
 
145
151
 
146
152
  # Specific Errors
mlrun/execution.py CHANGED
@@ -24,6 +24,7 @@ from dateutil import parser
24
24
 
25
25
  import mlrun
26
26
  import mlrun.common.constants as mlrun_constants
27
+ import mlrun.common.formatters
27
28
  from mlrun.artifacts import ModelArtifact
28
29
  from mlrun.datastore.store_resources import get_store_resource
29
30
  from mlrun.errors import MLRunInvalidArgumentError
@@ -634,7 +635,9 @@ class MLClientCtx:
634
635
  :param viewer: Kubeflow viewer type
635
636
  :param target_path: Absolute target path (instead of using artifact_path + local_path)
636
637
  :param src_path: Deprecated, use local_path
637
- :param upload: Upload to datastore (default is True)
638
+ :param upload: Whether to upload the artifact to the datastore. If not provided, and the `local_path`
639
+ is not a directory, upload occurs by default. Directories are uploaded only when this
640
+ flag is explicitly set to `True`.
638
641
  :param labels: A set of key/value labels to tag the artifact with
639
642
  :param format: Optional, format to use (e.g. csv, parquet, ..)
640
643
  :param db_key: The key to use in the artifact DB table, by default its run name + '_' + key
@@ -924,12 +927,42 @@ class MLClientCtx:
924
927
  updates, self._uid, self.project, iter=self._iteration
925
928
  )
926
929
 
927
- def get_notifications(self):
928
- """Get the list of notifications"""
929
- return [
930
- mlrun.model.Notification.from_dict(notification)
931
- for notification in self._notifications
932
- ]
930
+ def get_notifications(self, unmask_secret_params=False):
931
+ """
932
+ Get the list of notifications
933
+
934
+ :param unmask_secret_params: Used as a workaround for sending notification from workflow-runner.
935
+ When used, if the notification will be saved again a new secret will be created.
936
+ """
937
+
938
+ # Get the full notifications from the DB since the run context does not contain the params due to bloating
939
+ run = self._rundb.read_run(
940
+ self.uid, format_=mlrun.common.formatters.RunFormat.notifications
941
+ )
942
+
943
+ notifications = []
944
+ for notification in run["spec"]["notifications"]:
945
+ notification: mlrun.model.Notification = mlrun.model.Notification.from_dict(
946
+ notification
947
+ )
948
+ # Fill the secret params from the project secret. We cannot use the server side internal secret mechanism
949
+ # here as it is the client side.
950
+ # TODO: This is a workaround to allow the notification to get the secret params from project secret
951
+ # instead of getting them from the internal project secret that should be mounted.
952
+ # We should mount the internal project secret that was created to the workflow-runner
953
+ # and get the secret from there.
954
+ if unmask_secret_params:
955
+ try:
956
+ notification.enrich_unmasked_secret_params_from_project_secret()
957
+ notifications.append(notification)
958
+ except mlrun.errors.MLRunValueError:
959
+ logger.warning(
960
+ "Failed to fill secret params from project secret for notification."
961
+ "Skip this notification.",
962
+ notification=notification.name,
963
+ )
964
+
965
+ return notifications
933
966
 
934
967
  def to_dict(self):
935
968
  """Convert the run context to a dictionary"""
@@ -1051,6 +1051,7 @@ def _ingest_with_spark(
1051
1051
 
1052
1052
  spark = (
1053
1053
  pyspark.sql.SparkSession.builder.appName(session_name)
1054
+ .config("spark.driver.memory", "2g")
1054
1055
  .config("spark.sql.session.timeZone", "UTC")
1055
1056
  .getOrCreate()
1056
1057
  )
@@ -188,9 +188,13 @@ class SparkFeatureMerger(BaseMerger):
188
188
 
189
189
  if self.spark is None:
190
190
  # create spark context
191
- self.spark = SparkSession.builder.appName(
192
- f"vector-merger-{self.vector.metadata.name}"
193
- ).getOrCreate()
191
+ self.spark = (
192
+ SparkSession.builder.appName(
193
+ f"vector-merger-{self.vector.metadata.name}"
194
+ )
195
+ .config("spark.driver.memory", "2g")
196
+ .getOrCreate()
197
+ )
194
198
 
195
199
  def _get_engine_df(
196
200
  self,
@@ -202,10 +206,6 @@ class SparkFeatureMerger(BaseMerger):
202
206
  time_column=None,
203
207
  additional_filters=None,
204
208
  ):
205
- mlrun.utils.helpers.additional_filters_warning(
206
- additional_filters, self.__class__
207
- )
208
-
209
209
  source_kwargs = {}
210
210
  if feature_set.spec.passthrough:
211
211
  if not feature_set.spec.source:
@@ -11,12 +11,12 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
- #
14
+
15
15
  from abc import ABC, abstractmethod
16
16
 
17
17
  import mlrun
18
18
  from mlrun.artifacts import Artifact
19
- from mlrun.utils.helpers import is_ipython
19
+ from mlrun.utils.helpers import is_jupyter
20
20
 
21
21
 
22
22
  class Plan(ABC):
@@ -84,7 +84,7 @@ class Plan(ABC):
84
84
  return
85
85
 
86
86
  # Call the correct display method according to the kernel:
87
- if is_ipython:
87
+ if is_jupyter:
88
88
  self._gui_display()
89
89
  else:
90
90
  self._cli_display()
@@ -16,7 +16,7 @@ import json
16
16
  from abc import ABC, abstractmethod
17
17
  from enum import Enum
18
18
 
19
- from IPython.core.display import HTML, display
19
+ from IPython.display import HTML, display
20
20
 
21
21
  import mlrun
22
22
 
@@ -18,8 +18,7 @@ from typing import Union
18
18
 
19
19
  import numpy as np
20
20
  import pandas as pd
21
- from IPython.core.display import HTML
22
- from IPython.display import display
21
+ from IPython.display import HTML, display
23
22
  from pandas.api.types import is_numeric_dtype, is_string_dtype
24
23
 
25
24
  import mlrun
@@ -216,7 +215,7 @@ def _show_and_export_html(html: str, show=None, filename=None, runs_list=None):
216
215
  fp.write("</body></html>")
217
216
  else:
218
217
  fp.write(html)
219
- if show or (show is None and mlrun.utils.is_ipython):
218
+ if show or (show is None and mlrun.utils.is_jupyter):
220
219
  display(HTML(html))
221
220
  if runs_list and len(runs_list) <= max_table_rows:
222
221
  display(HTML(html_table))
mlrun/launcher/client.py CHANGED
@@ -14,7 +14,7 @@
14
14
  import abc
15
15
  from typing import Optional
16
16
 
17
- import IPython
17
+ import IPython.display
18
18
 
19
19
  import mlrun.common.constants as mlrun_constants
20
20
  import mlrun.errors
@@ -22,7 +22,7 @@ import mlrun.launcher.base as launcher
22
22
  import mlrun.lists
23
23
  import mlrun.model
24
24
  import mlrun.runtimes
25
- from mlrun.utils import logger
25
+ import mlrun.utils
26
26
 
27
27
 
28
28
  class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
@@ -128,10 +128,10 @@ class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
128
128
  if result:
129
129
  results_tbl.append(result)
130
130
  else:
131
- logger.info("no returned result (job may still be in progress)")
131
+ mlrun.utils.logger.info("no returned result (job may still be in progress)")
132
132
  results_tbl.append(run.to_dict())
133
133
 
134
- if mlrun.utils.is_ipython and mlrun.mlconf.ipython_widget:
134
+ if mlrun.utils.is_jupyter and mlrun.mlconf.ipython_widget:
135
135
  results_tbl.show()
136
136
  print()
137
137
  ui_url = mlrun.utils.get_ui_url(project, uid)
@@ -147,9 +147,9 @@ class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
147
147
  project_flag = f"-p {project}" if project else ""
148
148
  info_cmd = f"mlrun get run {uid} {project_flag}"
149
149
  logs_cmd = f"mlrun logs {uid} {project_flag}"
150
- logger.info(
150
+ mlrun.utils.logger.info(
151
151
  "To track results use the CLI", info_cmd=info_cmd, logs_cmd=logs_cmd
152
152
  )
153
153
  ui_url = mlrun.utils.get_ui_url(project, uid)
154
154
  if ui_url:
155
- logger.info("Or click for UI", ui_url=ui_url)
155
+ mlrun.utils.logger.info("Or click for UI", ui_url=ui_url)
mlrun/model.py CHANGED
@@ -774,6 +774,23 @@ class Notification(ModelObj):
774
774
 
775
775
  notification_class.validate_params(secret_params | params)
776
776
 
777
+ def enrich_unmasked_secret_params_from_project_secret(self):
778
+ """
779
+ Fill the notification secret params from the project secret.
780
+ We are using this function instead of unmask_secret_params_from_project_secret when we run inside the
781
+ workflow runner pod that doesn't have access to the k8s secrets (but have access to the project secret)
782
+ """
783
+ secret = self.secret_params.get("secret")
784
+ if secret:
785
+ secret_value = mlrun.get_secret_or_env(secret)
786
+ if secret_value:
787
+ try:
788
+ self.secret_params = json.loads(secret_value)
789
+ except ValueError as exc:
790
+ raise mlrun.errors.MLRunValueError(
791
+ "Failed to parse secret value"
792
+ ) from exc
793
+
777
794
  @staticmethod
778
795
  def validate_notification_uniqueness(notifications: list["Notification"]):
779
796
  """Validate that all notifications in the list are unique by name"""
@@ -2044,6 +2061,8 @@ class DataSource(ModelObj):
2044
2061
  ]
2045
2062
  kind = None
2046
2063
 
2064
+ _fields_to_serialize = ["start_time", "end_time"]
2065
+
2047
2066
  def __init__(
2048
2067
  self,
2049
2068
  name: str = None,
@@ -2072,6 +2091,16 @@ class DataSource(ModelObj):
2072
2091
  def set_secrets(self, secrets):
2073
2092
  self._secrets = secrets
2074
2093
 
2094
+ def _serialize_field(
2095
+ self, struct: dict, field_name: str = None, strip: bool = False
2096
+ ) -> typing.Any:
2097
+ value = super()._serialize_field(struct, field_name, strip)
2098
+ # We pull the field from self and not from struct because it was excluded from the struct when looping over
2099
+ # the fields to save.
2100
+ if field_name in ("start_time", "end_time") and isinstance(value, datetime):
2101
+ return value.isoformat()
2102
+ return value
2103
+
2075
2104
 
2076
2105
  class DataTargetBase(ModelObj):
2077
2106
  """data target spec, specify a destination for the feature set data"""
@@ -24,7 +24,6 @@ import mlrun.artifacts
24
24
  import mlrun.common.helpers
25
25
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
26
26
  import mlrun.feature_store
27
- import mlrun.model_monitoring.application
28
27
  import mlrun.model_monitoring.applications as mm_app
29
28
  import mlrun.serving
30
29
  from mlrun.data_types.infer import InferOptions, get_df_stats
@@ -561,8 +560,7 @@ def _create_model_monitoring_function_base(
561
560
  func: typing.Union[str, None] = None,
562
561
  application_class: typing.Union[
563
562
  str,
564
- mlrun.model_monitoring.application.ModelMonitoringApplicationBase,
565
- mm_app.ModelMonitoringApplicationBaseV2,
563
+ mm_app.ModelMonitoringApplicationBase,
566
564
  None,
567
565
  ] = None,
568
566
  name: typing.Optional[str] = None,
@@ -576,15 +574,6 @@ def _create_model_monitoring_function_base(
576
574
  Note: this is an internal API only.
577
575
  This function does not set the labels or mounts v3io.
578
576
  """
579
- if isinstance(
580
- application_class,
581
- mlrun.model_monitoring.application.ModelMonitoringApplicationBase,
582
- ):
583
- warnings.warn(
584
- "The `ModelMonitoringApplicationBase` class is deprecated from version 1.7.0, "
585
- "please use `ModelMonitoringApplicationBaseV2`. It will be removed in 1.9.0.",
586
- FutureWarning,
587
- )
588
577
  if name in mm_constants._RESERVED_FUNCTION_NAMES:
589
578
  raise mlrun.errors.MLRunInvalidArgumentError(
590
579
  "An application cannot have the following names: "
@@ -13,12 +13,11 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- from .base import ModelMonitoringApplicationBase, ModelMonitoringApplicationBaseV2
16
+ from .base import ModelMonitoringApplicationBase
17
17
  from .context import MonitoringApplicationContext
18
18
  from .evidently_base import (
19
19
  _HAS_EVIDENTLY,
20
20
  SUPPORTED_EVIDENTLY_VERSION,
21
21
  EvidentlyModelMonitoringApplicationBase,
22
- EvidentlyModelMonitoringApplicationBaseV2,
23
22
  )
24
23
  from .results import ModelMonitoringApplicationMetric, ModelMonitoringApplicationResult
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
+ import traceback
16
17
  from typing import Any, Optional, Union
17
18
 
18
19
  import mlrun.common.schemas.alert as alert_objects
@@ -161,7 +162,10 @@ class _ApplicationErrorHandler(StepToDict):
161
162
  :param event: Application event.
162
163
  """
163
164
 
164
- logger.error(f"Error in application step: {event}")
165
+ exception_with_trace = "".join(
166
+ traceback.format_exception(None, event.error, event.error.__traceback__)
167
+ )
168
+ logger.error(f"Error in application step: {exception_with_trace}")
165
169
 
166
170
  event_data = alert_objects.Event(
167
171
  kind=alert_objects.EventKind.MM_APP_FAILED,