mlrun 1.8.0rc21__py3-none-any.whl → 1.8.0rc26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (47) hide show
  1. mlrun/__init__.py +37 -3
  2. mlrun/alerts/alert.py +1 -0
  3. mlrun/artifacts/document.py +78 -36
  4. mlrun/common/formatters/feature_set.py +1 -0
  5. mlrun/common/schemas/alert.py +3 -0
  6. mlrun/common/schemas/client_spec.py +0 -1
  7. mlrun/common/schemas/model_monitoring/constants.py +27 -9
  8. mlrun/common/schemas/workflow.py +1 -0
  9. mlrun/config.py +39 -6
  10. mlrun/datastore/datastore_profile.py +58 -16
  11. mlrun/datastore/sources.py +7 -1
  12. mlrun/datastore/vectorstore.py +20 -1
  13. mlrun/db/base.py +11 -0
  14. mlrun/db/httpdb.py +21 -9
  15. mlrun/db/nopdb.py +10 -0
  16. mlrun/errors.py +4 -0
  17. mlrun/execution.py +15 -6
  18. mlrun/launcher/client.py +2 -2
  19. mlrun/launcher/local.py +5 -1
  20. mlrun/model_monitoring/applications/_application_steps.py +3 -1
  21. mlrun/model_monitoring/controller.py +266 -103
  22. mlrun/model_monitoring/db/tsdb/__init__.py +11 -23
  23. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +5 -2
  24. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +8 -8
  25. mlrun/model_monitoring/helpers.py +16 -10
  26. mlrun/model_monitoring/stream_processing.py +85 -35
  27. mlrun/package/context_handler.py +1 -1
  28. mlrun/package/packagers_manager.py +4 -18
  29. mlrun/projects/pipelines.py +2 -2
  30. mlrun/projects/project.py +123 -38
  31. mlrun/runtimes/nuclio/serving.py +2 -2
  32. mlrun/runtimes/sparkjob/spark3job.py +1 -1
  33. mlrun/secrets.py +1 -1
  34. mlrun/serving/server.py +11 -3
  35. mlrun/serving/states.py +65 -8
  36. mlrun/serving/v2_serving.py +16 -8
  37. mlrun/utils/helpers.py +81 -21
  38. mlrun/utils/notifications/notification/base.py +6 -1
  39. mlrun/utils/notifications/notification/slack.py +5 -1
  40. mlrun/utils/notifications/notification_pusher.py +13 -4
  41. mlrun/utils/version/version.json +2 -2
  42. {mlrun-1.8.0rc21.dist-info → mlrun-1.8.0rc26.dist-info}/METADATA +33 -16
  43. {mlrun-1.8.0rc21.dist-info → mlrun-1.8.0rc26.dist-info}/RECORD +47 -47
  44. {mlrun-1.8.0rc21.dist-info → mlrun-1.8.0rc26.dist-info}/WHEEL +1 -1
  45. {mlrun-1.8.0rc21.dist-info → mlrun-1.8.0rc26.dist-info}/LICENSE +0 -0
  46. {mlrun-1.8.0rc21.dist-info → mlrun-1.8.0rc26.dist-info}/entry_points.txt +0 -0
  47. {mlrun-1.8.0rc21.dist-info → mlrun-1.8.0rc26.dist-info}/top_level.txt +0 -0
@@ -67,43 +67,31 @@ class ObjectTSDBFactory(enum.Enum):
67
67
  def get_tsdb_connector(
68
68
  project: str,
69
69
  secret_provider: typing.Optional[typing.Callable[[str], str]] = None,
70
- tsdb_connection_string: typing.Optional[str] = None,
71
- **kwargs,
70
+ profile: typing.Optional[mlrun.datastore.datastore_profile.DatastoreProfile] = None,
72
71
  ) -> TSDBConnector:
73
72
  """
74
73
  Get TSDB connector object.
75
74
  :param project: The name of the project.
76
75
  :param secret_provider: An optional secret provider to get the connection string secret.
77
- :param tsdb_connection_string: An optional explicit connection string to the TSDB.
76
+ :param profile: An optional profile to initialize the TSDB connector from.
78
77
 
79
78
  :return: `TSDBConnector` object. The main goal of this object is to handle different operations on the
80
79
  TSDB connector such as updating drift metrics or write application record result.
81
80
  :raise: `MLRunInvalidMMStoreTypeError` if the user didn't provide TSDB connection
82
81
  or the provided TSDB connection is invalid.
83
82
  """
84
-
85
- try:
86
- profile = mlrun.model_monitoring.helpers._get_tsdb_profile(
87
- project=project, secret_provider=secret_provider
88
- )
89
- except mlrun.errors.MLRunNotFoundError:
90
- profile = None
91
-
92
- tsdb_connection_string = (
93
- tsdb_connection_string
94
- or mlrun.model_monitoring.helpers.get_tsdb_connection_string(
95
- secret_provider=secret_provider
96
- )
83
+ profile = profile or mlrun.model_monitoring.helpers._get_tsdb_profile(
84
+ project=project, secret_provider=secret_provider
97
85
  )
98
-
99
- if tsdb_connection_string and tsdb_connection_string.startswith("taosws"):
100
- tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.TDEngine
101
- kwargs["connection_string"] = tsdb_connection_string
102
- elif tsdb_connection_string and tsdb_connection_string == "v3io":
103
- tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.V3IO_TSDB
104
- elif isinstance(profile, mlrun.datastore.datastore_profile.DatastoreProfileV3io):
86
+ kwargs = {}
87
+ if isinstance(profile, mlrun.datastore.datastore_profile.DatastoreProfileV3io):
105
88
  tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.V3IO_TSDB
106
89
  kwargs["v3io_access_key"] = profile.v3io_access_key
90
+ elif isinstance(
91
+ profile, mlrun.datastore.datastore_profile.TDEngineDatastoreProfile
92
+ ):
93
+ tsdb_connector_type = mlrun.common.schemas.model_monitoring.TSDBTarget.TDEngine
94
+ kwargs["connection_string"] = profile.dsn()
107
95
  else:
108
96
  raise mlrun.errors.MLRunInvalidMMStoreTypeError(
109
97
  "You must provide a valid tsdb store connection by using "
@@ -145,8 +145,11 @@ class TDEngineConnector(TSDBConnector):
145
145
 
146
146
  create_table_sql = table._create_subtable_sql(subtable=table_name, values=event)
147
147
 
148
+ # we need the string values to be sent to the connection, not the enum
149
+ columns = {str(key): str(val) for key, val in table.columns.items()}
150
+
148
151
  insert_statement = Statement(
149
- columns=table.columns,
152
+ columns=columns,
150
153
  subtable=table_name,
151
154
  values=event,
152
155
  )
@@ -188,7 +191,7 @@ class TDEngineConnector(TSDBConnector):
188
191
  graph.add_step(
189
192
  "mlrun.model_monitoring.db.tsdb.tdengine.stream_graph_steps.ProcessBeforeTDEngine",
190
193
  name="ProcessBeforeTDEngine",
191
- after="MapFeatureNames",
194
+ after="FilterNOP",
192
195
  )
193
196
 
194
197
  def apply_tdengine_target(name, after):
@@ -135,7 +135,7 @@ class V3IOTSDBConnector(TSDBConnector):
135
135
  monitoring_predictions_full_path = (
136
136
  mlrun.mlconf.get_model_monitoring_file_target_path(
137
137
  project=self.project,
138
- kind=mm_schemas.FileTargetKind.PREDICTIONS,
138
+ kind=mm_schemas.V3IOTSDBTables.PREDICTIONS,
139
139
  )
140
140
  )
141
141
  (
@@ -145,7 +145,7 @@ class V3IOTSDBConnector(TSDBConnector):
145
145
  ) = mlrun.common.model_monitoring.helpers.parse_model_endpoint_store_prefix(
146
146
  monitoring_predictions_full_path
147
147
  )
148
- self.tables[mm_schemas.FileTargetKind.PREDICTIONS] = monitoring_predictions_path
148
+ self.tables[mm_schemas.V3IOTSDBTables.PREDICTIONS] = monitoring_predictions_path
149
149
 
150
150
  def create_tables(self) -> None:
151
151
  """
@@ -204,7 +204,7 @@ class V3IOTSDBConnector(TSDBConnector):
204
204
  }
205
205
  ],
206
206
  name=EventFieldType.LATENCY,
207
- after="MapFeatureNames",
207
+ after="FilterNOP",
208
208
  step_name="Aggregates",
209
209
  table=".",
210
210
  key_field=EventFieldType.ENDPOINT_ID,
@@ -225,8 +225,8 @@ class V3IOTSDBConnector(TSDBConnector):
225
225
  graph.add_step(
226
226
  "storey.TSDBTarget",
227
227
  name="tsdb_predictions",
228
- after="MapFeatureNames",
229
- path=f"{self.container}/{self.tables[mm_schemas.FileTargetKind.PREDICTIONS]}",
228
+ after="FilterNOP",
229
+ path=f"{self.container}/{self.tables[mm_schemas.V3IOTSDBTables.PREDICTIONS]}",
230
230
  rate="1/s",
231
231
  time_col=mm_schemas.EventFieldType.TIMESTAMP,
232
232
  container=self.container,
@@ -740,7 +740,7 @@ class V3IOTSDBConnector(TSDBConnector):
740
740
  "both or neither of `aggregation_window` and `agg_funcs` must be provided"
741
741
  )
742
742
  df = self._get_records(
743
- table=mm_schemas.FileTargetKind.PREDICTIONS,
743
+ table=mm_schemas.V3IOTSDBTables.PREDICTIONS,
744
744
  start=start,
745
745
  end=end,
746
746
  columns=[mm_schemas.EventFieldType.ESTIMATED_PREDICTION_COUNT],
@@ -782,7 +782,7 @@ class V3IOTSDBConnector(TSDBConnector):
782
782
  filter_query = self._get_endpoint_filter(endpoint_id=endpoint_ids)
783
783
  start, end = self._get_start_end(start, end)
784
784
  df = self._get_records(
785
- table=mm_schemas.FileTargetKind.PREDICTIONS,
785
+ table=mm_schemas.V3IOTSDBTables.PREDICTIONS,
786
786
  start=start,
787
787
  end=end,
788
788
  filter_query=filter_query,
@@ -919,7 +919,7 @@ class V3IOTSDBConnector(TSDBConnector):
919
919
  start = start or (mlrun.utils.datetime_now() - timedelta(hours=24))
920
920
  start, end = self._get_start_end(start, end)
921
921
  df = self._get_records(
922
- table=mm_schemas.FileTargetKind.PREDICTIONS,
922
+ table=mm_schemas.V3IOTSDBTables.PREDICTIONS,
923
923
  start=start,
924
924
  end=end,
925
925
  columns=[mm_schemas.EventFieldType.LATENCY],
@@ -109,7 +109,7 @@ def filter_results_by_regex(
109
109
  result_name_filters=validated_filters,
110
110
  ):
111
111
  filtered_metrics_names.append(existing_result_name)
112
- return filtered_metrics_names
112
+ return list(set(filtered_metrics_names))
113
113
 
114
114
 
115
115
  def get_stream_path(
@@ -117,6 +117,7 @@ def get_stream_path(
117
117
  function_name: str = mm_constants.MonitoringFunctionNames.STREAM,
118
118
  stream_uri: Optional[str] = None,
119
119
  secret_provider: Optional[Callable[[str], str]] = None,
120
+ profile: Optional[mlrun.datastore.datastore_profile.DatastoreProfile] = None,
120
121
  ) -> str:
121
122
  """
122
123
  Get stream path from the project secret. If wasn't set, take it from the system configurations
@@ -126,20 +127,25 @@ def get_stream_path(
126
127
  :param stream_uri: Stream URI. If provided, it will be used instead of the one from the project's secret.
127
128
  :param secret_provider: Optional secret provider to get the connection string secret.
128
129
  If not set, the env vars are used.
130
+ :param profile: Optional datastore profile of the stream (V3IO/KafkaSource profile).
129
131
  :return: Monitoring stream path to the relevant application.
130
132
  """
131
133
 
132
- try:
133
- profile = _get_stream_profile(project=project, secret_provider=secret_provider)
134
- except mlrun.errors.MLRunNotFoundError:
135
- profile = None
134
+ profile = profile or _get_stream_profile(
135
+ project=project, secret_provider=secret_provider
136
+ )
136
137
 
137
138
  if isinstance(profile, mlrun.datastore.datastore_profile.DatastoreProfileV3io):
138
139
  stream_uri = "v3io"
139
-
140
- stream_uri = stream_uri or mlrun.get_secret_or_env(
141
- key=mm_constants.ProjectSecretKeys.STREAM_PATH, secret_provider=secret_provider
142
- )
140
+ elif isinstance(
141
+ profile, mlrun.datastore.datastore_profile.DatastoreProfileKafkaSource
142
+ ):
143
+ stream_uri = f"kafka://{profile.brokers[0]}"
144
+ else:
145
+ raise mlrun.errors.MLRunValueError(
146
+ f"Received an unexpected stream profile type: {type(profile)}\n"
147
+ "Expects `DatastoreProfileV3io` or `DatastoreProfileKafkaSource`."
148
+ )
143
149
 
144
150
  if not stream_uri or stream_uri == "v3io":
145
151
  stream_uri = mlrun.mlconf.get_model_monitoring_file_target_path(
@@ -273,7 +279,7 @@ def _get_profile(
273
279
  )
274
280
  if not profile_name:
275
281
  raise mlrun.errors.MLRunNotFoundError(
276
- f"Not found `{profile_name_key}` profile name"
282
+ f"Not found `{profile_name_key}` profile name for project '{project}'"
277
283
  )
278
284
  return mlrun.datastore.datastore_profile.datastore_profile_read(
279
285
  url=f"ds://{profile_name}", project_name=project, secrets=secret_provider
@@ -12,7 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- import collections
16
15
  import datetime
17
16
  import os
18
17
  import typing
@@ -29,11 +28,14 @@ import mlrun.model_monitoring.db
29
28
  import mlrun.serving.states
30
29
  import mlrun.utils
31
30
  from mlrun.common.schemas.model_monitoring.constants import (
31
+ ControllerEvent,
32
+ ControllerEventKind,
32
33
  EndpointType,
33
34
  EventFieldType,
34
35
  FileTargetKind,
35
36
  ProjectSecretKeys,
36
37
  )
38
+ from mlrun.datastore import parse_kafka_url
37
39
  from mlrun.model_monitoring.db import TSDBConnector
38
40
  from mlrun.utils import logger
39
41
 
@@ -88,7 +90,9 @@ class EventStreamProcessor:
88
90
  self.v3io_framesd = v3io_framesd or mlrun.mlconf.v3io_framesd
89
91
  self.v3io_api = v3io_api or mlrun.mlconf.v3io_api
90
92
 
91
- self.v3io_access_key = v3io_access_key or os.environ.get("V3IO_ACCESS_KEY")
93
+ self.v3io_access_key = v3io_access_key or mlrun.get_secret_or_env(
94
+ "V3IO_ACCESS_KEY"
95
+ )
92
96
  self.model_monitoring_access_key = (
93
97
  model_monitoring_access_key
94
98
  or os.environ.get(ProjectSecretKeys.ACCESS_KEY)
@@ -118,6 +122,7 @@ class EventStreamProcessor:
118
122
  self,
119
123
  fn: mlrun.runtimes.ServingRuntime,
120
124
  tsdb_connector: TSDBConnector,
125
+ controller_stream_uri: str,
121
126
  ) -> None:
122
127
  """
123
128
  Apply monitoring serving graph to a given serving function. The following serving graph includes about 4 main
@@ -146,6 +151,8 @@ class EventStreamProcessor:
146
151
 
147
152
  :param fn: A serving function.
148
153
  :param tsdb_connector: Time series database connector.
154
+ :param controller_stream_uri: The controller stream URI. Runs on server api pod so needed to be provided as
155
+ input
149
156
  """
150
157
 
151
158
  graph = typing.cast(
@@ -209,6 +216,20 @@ class EventStreamProcessor:
209
216
  )
210
217
 
211
218
  apply_map_feature_names()
219
+ # split the graph between event with error vs valid event
220
+ graph.add_step(
221
+ "storey.Filter",
222
+ "FilterNOP",
223
+ after="MapFeatureNames",
224
+ _fn="(event.get('kind', " ") != 'nop_event')",
225
+ )
226
+ graph.add_step(
227
+ "storey.Filter",
228
+ "ForwardNOP",
229
+ after="MapFeatureNames",
230
+ _fn="(event.get('kind', " ") == 'nop_event')",
231
+ )
232
+
212
233
  tsdb_connector.apply_monitoring_stream_steps(
213
234
  graph=graph,
214
235
  aggregate_windows=self.aggregate_windows,
@@ -221,7 +242,7 @@ class EventStreamProcessor:
221
242
  graph.add_step(
222
243
  "ProcessBeforeParquet",
223
244
  name="ProcessBeforeParquet",
224
- after="MapFeatureNames",
245
+ after="FilterNOP",
225
246
  _fn="(event)",
226
247
  )
227
248
 
@@ -248,6 +269,44 @@ class EventStreamProcessor:
248
269
 
249
270
  apply_parquet_target()
250
271
 
272
+ # controller branch
273
+ def apply_push_controller_stream(stream_uri: str):
274
+ if stream_uri.startswith("v3io://"):
275
+ graph.add_step(
276
+ ">>",
277
+ "controller_stream_v3io",
278
+ path=stream_uri,
279
+ sharding_func=ControllerEvent.ENDPOINT_ID,
280
+ access_key=self.v3io_access_key,
281
+ after="ForwardNOP",
282
+ )
283
+ elif stream_uri.startswith("kafka://"):
284
+ topic, brokers = parse_kafka_url(stream_uri)
285
+ logger.info(
286
+ "Controller stream uri for kafka",
287
+ stream_uri=stream_uri,
288
+ topic=topic,
289
+ brokers=brokers,
290
+ )
291
+ if isinstance(brokers, list):
292
+ path = f"kafka://{brokers[0]}/{topic}"
293
+ elif isinstance(brokers, str):
294
+ path = f"kafka://{brokers}/{topic}"
295
+ else:
296
+ raise mlrun.errors.MLRunInvalidArgumentError(
297
+ "Brokers must be a list or str check controller stream uri"
298
+ )
299
+ graph.add_step(
300
+ ">>",
301
+ "controller_stream_kafka",
302
+ path=path,
303
+ kafka_brokers=brokers,
304
+ _sharding_func=ControllerEvent.ENDPOINT_ID,
305
+ after="ForwardNOP",
306
+ )
307
+
308
+ apply_push_controller_stream(controller_stream_uri)
309
+
251
310
 
252
311
  class ProcessBeforeParquet(mlrun.feature_store.steps.MapClass):
253
312
  def __init__(self, **kwargs):
@@ -313,14 +372,14 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
313
372
  self.first_request: dict[str, str] = dict()
314
373
  self.last_request: dict[str, str] = dict()
315
374
 
316
- # Number of errors (value) per endpoint (key)
317
- self.error_count: dict[str, int] = collections.defaultdict(int)
318
-
319
375
  # Set of endpoints in the current events
320
376
  self.endpoints: set[str] = set()
321
377
 
322
378
  def do(self, full_event):
323
379
  event = full_event.body
380
+ if event.get(ControllerEvent.KIND, "") == ControllerEventKind.NOP_EVENT:
381
+ logger.info("Skipped nop event inside of ProcessEndpointEvent", event=event)
382
+ return storey.Event(body=[event])
324
383
  # Getting model version and function uri from event
325
384
  # and use them for retrieving the endpoint_id
326
385
  function_uri = full_event.body.get(EventFieldType.FUNCTION_URI)
@@ -354,10 +413,9 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
354
413
  predictions = event.get("resp", {}).get("outputs")
355
414
 
356
415
  if not self.is_valid(
357
- endpoint_id,
358
- is_not_none,
359
- timestamp,
360
- ["when"],
416
+ validation_function=is_not_none,
417
+ field=timestamp,
418
+ dict_path=["when"],
361
419
  ):
362
420
  return None
363
421
 
@@ -369,31 +427,27 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
369
427
  self.last_request[endpoint_id] = timestamp
370
428
 
371
429
  if not self.is_valid(
372
- endpoint_id,
373
- is_not_none,
374
- request_id,
375
- ["request", "id"],
430
+ validation_function=is_not_none,
431
+ field=request_id,
432
+ dict_path=["request", "id"],
376
433
  ):
377
434
  return None
378
435
  if not self.is_valid(
379
- endpoint_id,
380
- is_not_none,
381
- latency,
382
- ["microsec"],
436
+ validation_function=is_not_none,
437
+ field=latency,
438
+ dict_path=["microsec"],
383
439
  ):
384
440
  return None
385
441
  if not self.is_valid(
386
- endpoint_id,
387
- is_not_none,
388
- features,
389
- ["request", "inputs"],
442
+ validation_function=is_not_none,
443
+ field=features,
444
+ dict_path=["request", "inputs"],
390
445
  ):
391
446
  return None
392
447
  if not self.is_valid(
393
- endpoint_id,
394
- is_not_none,
395
- predictions,
396
- ["resp", "outputs"],
448
+ validation_function=is_not_none,
449
+ field=predictions,
450
+ dict_path=["resp", "outputs"],
397
451
  ):
398
452
  return None
399
453
 
@@ -451,7 +505,6 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
451
505
  EventFieldType.LAST_REQUEST_TIMESTAMP: mlrun.utils.enrich_datetime_with_tz_info(
452
506
  self.last_request[endpoint_id]
453
507
  ).timestamp(),
454
- EventFieldType.ERROR_COUNT: self.error_count[endpoint_id],
455
508
  EventFieldType.LABELS: event.get(EventFieldType.LABELS, {}),
456
509
  EventFieldType.METRICS: event.get(EventFieldType.METRICS, {}),
457
510
  EventFieldType.ENTITIES: event.get("request", {}).get(
@@ -482,7 +535,7 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
482
535
  .flat_dict()
483
536
  )
484
537
 
485
- # If model endpoint found, get first_request, last_request and error_count values
538
+ # If model endpoint found, get first_request & last_request values
486
539
  if endpoint_record:
487
540
  first_request = endpoint_record.get(EventFieldType.FIRST_REQUEST)
488
541
 
@@ -493,24 +546,18 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
493
546
  if last_request:
494
547
  self.last_request[endpoint_id] = last_request
495
548
 
496
- error_count = endpoint_record.get(EventFieldType.ERROR_COUNT)
497
-
498
- if error_count:
499
- self.error_count[endpoint_id] = int(error_count)
500
-
501
549
  # add endpoint to endpoints set
502
550
  self.endpoints.add(endpoint_id)
503
551
 
504
552
  def is_valid(
505
553
  self,
506
- endpoint_id: str,
507
554
  validation_function,
508
555
  field: typing.Any,
509
556
  dict_path: list[str],
510
557
  ):
511
558
  if validation_function(field, dict_path):
512
559
  return True
513
- self.error_count[endpoint_id] += 1
560
+
514
561
  return False
515
562
 
516
563
  @staticmethod
@@ -589,6 +636,9 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
589
636
  return None
590
637
 
591
638
  def do(self, event: dict):
639
+ if event.get(ControllerEvent.KIND, "") == ControllerEventKind.NOP_EVENT:
640
+ logger.info("Skipped nop event inside of MapFeatureNames", event=event)
641
+ return event
592
642
  endpoint_id = event[EventFieldType.ENDPOINT_ID]
593
643
 
594
644
  feature_values = event[EventFieldType.FEATURES]
@@ -50,7 +50,7 @@ class ContextHandler:
50
50
  "numpy",
51
51
  ]
52
52
  # Optional packagers to be collected at initialization time:
53
- _EXTENDED_PACKAGERS = [] # TODO: Create "matplotlib", "plotly", "bokeh" packagers.
53
+ _EXTENDED_PACKAGERS = [] # TODO: Create "matplotlib", "plotly", packagers.
54
54
  # Optional packagers from the `mlrun.frameworks` package:
55
55
  _MLRUN_FRAMEWORKS_PACKAGERS = [] # TODO: Create frameworks packagers.
56
56
  # Default priority values for packagers:
@@ -667,16 +667,9 @@ class PackagersManager:
667
667
  data_item=data_item,
668
668
  instructions={},
669
669
  )
670
- except Exception as exception:
670
+ except Exception:
671
671
  # Could not unpack as the reduced type hint, collect the exception and go to the next one:
672
- exception_string = "".join(
673
- traceback.format_exception(
674
- etype=type(exception),
675
- value=exception,
676
- tb=exception.__traceback__,
677
- )
678
- )
679
- found_packagers.append((packager, exception_string))
672
+ found_packagers.append((packager, traceback.format_exc()))
680
673
  # Reduce the type hint list and continue:
681
674
  possible_type_hints = TypeHintUtils.reduce_type_hint(
682
675
  type_hint=possible_type_hints
@@ -692,15 +685,8 @@ class PackagersManager:
692
685
  artifact_type=None,
693
686
  instructions={},
694
687
  )
695
- except Exception as exception:
696
- exception_string = "".join(
697
- traceback.format_exception(
698
- etype=type(exception),
699
- value=exception,
700
- tb=exception.__traceback__,
701
- )
702
- )
703
- found_packagers.append((self._default_packager, exception_string))
688
+ except Exception:
689
+ found_packagers.append((self._default_packager, traceback.format_exc()))
704
690
 
705
691
  # The method did not return until this point, raise an error:
706
692
  raise MLRunPackageUnpackingError(
@@ -31,7 +31,7 @@ import mlrun_pipelines.patcher
31
31
  import mlrun_pipelines.utils
32
32
  from mlrun.errors import err_to_str
33
33
  from mlrun.utils import (
34
- get_ui_url,
34
+ get_workflow_url,
35
35
  logger,
36
36
  normalize_workflow_name,
37
37
  retry_until_successful,
@@ -1225,7 +1225,7 @@ def notify_scheduled_workflow_failure(
1225
1225
  notification_pusher = mlrun.utils.notifications.CustomNotificationPusher(
1226
1226
  ["slack"]
1227
1227
  )
1228
- url = get_ui_url(project_name, context_uid)
1228
+ url = get_workflow_url(project_name, context_uid)
1229
1229
  link = f"<{url}|*view workflow job details*>"
1230
1230
  message = (
1231
1231
  f":x: Failed to run scheduled workflow {workflow_name} "