mlrun 1.7.0rc16__py3-none-any.whl → 1.7.0rc18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (65) hide show
  1. mlrun/alerts/alert.py +27 -24
  2. mlrun/artifacts/manager.py +5 -1
  3. mlrun/artifacts/model.py +1 -1
  4. mlrun/common/runtimes/constants.py +3 -0
  5. mlrun/common/schemas/__init__.py +8 -2
  6. mlrun/common/schemas/alert.py +49 -10
  7. mlrun/common/schemas/client_spec.py +1 -0
  8. mlrun/common/schemas/function.py +4 -0
  9. mlrun/common/schemas/model_monitoring/__init__.py +3 -1
  10. mlrun/common/schemas/model_monitoring/constants.py +21 -1
  11. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  12. mlrun/common/schemas/model_monitoring/model_endpoints.py +17 -6
  13. mlrun/common/schemas/project.py +3 -1
  14. mlrun/config.py +9 -3
  15. mlrun/data_types/to_pandas.py +5 -5
  16. mlrun/datastore/datastore.py +6 -2
  17. mlrun/datastore/redis.py +2 -2
  18. mlrun/datastore/s3.py +5 -0
  19. mlrun/datastore/sources.py +111 -6
  20. mlrun/datastore/targets.py +2 -2
  21. mlrun/db/base.py +6 -2
  22. mlrun/db/httpdb.py +22 -3
  23. mlrun/db/nopdb.py +10 -3
  24. mlrun/errors.py +6 -0
  25. mlrun/feature_store/retrieval/conversion.py +5 -5
  26. mlrun/feature_store/retrieval/job.py +3 -2
  27. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  28. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -2
  29. mlrun/lists.py +2 -0
  30. mlrun/model.py +8 -6
  31. mlrun/model_monitoring/db/stores/base/store.py +16 -3
  32. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +44 -43
  33. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +190 -91
  34. mlrun/model_monitoring/db/tsdb/__init__.py +35 -6
  35. mlrun/model_monitoring/db/tsdb/base.py +25 -18
  36. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  37. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +207 -0
  38. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  39. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +231 -0
  40. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +103 -64
  41. mlrun/model_monitoring/db/v3io_tsdb_reader.py +217 -16
  42. mlrun/model_monitoring/helpers.py +32 -0
  43. mlrun/model_monitoring/stream_processing.py +7 -4
  44. mlrun/model_monitoring/writer.py +19 -14
  45. mlrun/package/utils/_formatter.py +2 -2
  46. mlrun/projects/project.py +40 -11
  47. mlrun/render.py +8 -5
  48. mlrun/runtimes/__init__.py +1 -0
  49. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  50. mlrun/runtimes/nuclio/api_gateway.py +97 -77
  51. mlrun/runtimes/nuclio/application/application.py +160 -7
  52. mlrun/runtimes/nuclio/function.py +18 -12
  53. mlrun/track/tracker.py +2 -1
  54. mlrun/utils/async_http.py +25 -5
  55. mlrun/utils/helpers.py +28 -3
  56. mlrun/utils/logger.py +11 -6
  57. mlrun/utils/notifications/notification/slack.py +27 -7
  58. mlrun/utils/notifications/notification_pusher.py +45 -41
  59. mlrun/utils/version/version.json +2 -2
  60. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/METADATA +8 -3
  61. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/RECORD +65 -61
  62. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/LICENSE +0 -0
  63. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/WHEEL +0 -0
  64. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/entry_points.txt +0 -0
  65. {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import json
15
+ import math
16
+ import operator
15
17
  import os
16
18
  import warnings
17
19
  from base64 import b64encode
@@ -178,7 +180,7 @@ class CSVSource(BaseSourceDriver):
178
180
  self,
179
181
  name: str = "",
180
182
  path: str = None,
181
- attributes: dict[str, str] = None,
183
+ attributes: dict[str, object] = None,
182
184
  key_field: str = None,
183
185
  schedule: str = None,
184
186
  parse_dates: Union[None, int, str, list[int], list[str]] = None,
@@ -305,7 +307,7 @@ class ParquetSource(BaseSourceDriver):
305
307
  self,
306
308
  name: str = "",
307
309
  path: str = None,
308
- attributes: dict[str, str] = None,
310
+ attributes: dict[str, object] = None,
309
311
  key_field: str = None,
310
312
  time_field: str = None,
311
313
  schedule: str = None,
@@ -313,6 +315,10 @@ class ParquetSource(BaseSourceDriver):
313
315
  end_time: Optional[Union[datetime, str]] = None,
314
316
  additional_filters: Optional[list[tuple]] = None,
315
317
  ):
318
+ if additional_filters:
319
+ attributes = copy(attributes) or {}
320
+ attributes["additional_filters"] = additional_filters
321
+ self.validate_additional_filters(additional_filters)
316
322
  super().__init__(
317
323
  name,
318
324
  path,
@@ -323,7 +329,6 @@ class ParquetSource(BaseSourceDriver):
323
329
  start_time,
324
330
  end_time,
325
331
  )
326
- self.additional_filters = additional_filters
327
332
 
328
333
  @property
329
334
  def start_time(self):
@@ -341,6 +346,10 @@ class ParquetSource(BaseSourceDriver):
341
346
  def end_time(self, end_time):
342
347
  self._end_time = self._convert_to_datetime(end_time)
343
348
 
349
+ @property
350
+ def additional_filters(self):
351
+ return self.attributes.get("additional_filters")
352
+
344
353
  @staticmethod
345
354
  def _convert_to_datetime(time):
346
355
  if time and isinstance(time, str):
@@ -350,6 +359,25 @@ class ParquetSource(BaseSourceDriver):
350
359
  else:
351
360
  return time
352
361
 
362
+ @staticmethod
363
+ def validate_additional_filters(additional_filters):
364
+ if not additional_filters:
365
+ return
366
+ for filter_tuple in additional_filters:
367
+ if not filter_tuple:
368
+ continue
369
+ col_name, op, value = filter_tuple
370
+ if isinstance(value, float) and math.isnan(value):
371
+ raise mlrun.errors.MLRunInvalidArgumentError(
372
+ "using NaN in additional_filters is not supported"
373
+ )
374
+ elif isinstance(value, (list, tuple, set)):
375
+ for sub_value in value:
376
+ if isinstance(sub_value, float) and math.isnan(sub_value):
377
+ raise mlrun.errors.MLRunInvalidArgumentError(
378
+ "using NaN in additional_filters is not supported"
379
+ )
380
+
353
381
  def to_step(
354
382
  self,
355
383
  key_field=None,
@@ -361,13 +389,12 @@ class ParquetSource(BaseSourceDriver):
361
389
  ):
362
390
  import storey
363
391
 
364
- attributes = self.attributes or {}
392
+ attributes = copy(self.attributes)
393
+ attributes.pop("additional_filters", None)
365
394
  if context:
366
395
  attributes["context"] = context
367
-
368
396
  data_item = mlrun.store_manager.object(self.path)
369
397
  store, path, url = mlrun.store_manager.get_or_create_store(self.path)
370
-
371
398
  return storey.ParquetSource(
372
399
  paths=url, # unlike self.path, it already has store:// replaced
373
400
  key_field=self.key_field or key_field,
@@ -412,6 +439,84 @@ class ParquetSource(BaseSourceDriver):
412
439
  **reader_args,
413
440
  )
414
441
 
442
+ def _build_spark_additional_filters(self, column_types: dict):
443
+ if not self.additional_filters:
444
+ return None
445
+ from pyspark.sql.functions import col, isnan, lit
446
+
447
+ operators = {
448
+ "==": operator.eq,
449
+ "=": operator.eq,
450
+ ">": operator.gt,
451
+ "<": operator.lt,
452
+ ">=": operator.ge,
453
+ "<=": operator.le,
454
+ "!=": operator.ne,
455
+ }
456
+
457
+ spark_filter = None
458
+ new_filter = lit(True)
459
+ for filter_tuple in self.additional_filters:
460
+ if not filter_tuple:
461
+ continue
462
+ col_name, op, value = filter_tuple
463
+ if op.lower() in ("in", "not in") and isinstance(value, (list, tuple, set)):
464
+ none_exists = False
465
+ value = list(value)
466
+ for sub_value in value:
467
+ if sub_value is None:
468
+ value.remove(sub_value)
469
+ none_exists = True
470
+ if none_exists:
471
+ filter_nan = column_types[col_name] not in ("timestamp", "date")
472
+ if value:
473
+ if op.lower() == "in":
474
+ new_filter = (
475
+ col(col_name).isin(value) | col(col_name).isNull()
476
+ )
477
+ if filter_nan:
478
+ new_filter = new_filter | isnan(col(col_name))
479
+
480
+ else:
481
+ new_filter = (
482
+ ~col(col_name).isin(value) & ~col(col_name).isNull()
483
+ )
484
+ if filter_nan:
485
+ new_filter = new_filter & ~isnan(col(col_name))
486
+ else:
487
+ if op.lower() == "in":
488
+ new_filter = col(col_name).isNull()
489
+ if filter_nan:
490
+ new_filter = new_filter | isnan(col(col_name))
491
+ else:
492
+ new_filter = ~col(col_name).isNull()
493
+ if filter_nan:
494
+ new_filter = new_filter & ~isnan(col(col_name))
495
+ else:
496
+ if op.lower() == "in":
497
+ new_filter = col(col_name).isin(value)
498
+ elif op.lower() == "not in":
499
+ new_filter = ~col(col_name).isin(value)
500
+ elif op in operators:
501
+ new_filter = operators[op](col(col_name), value)
502
+ else:
503
+ raise mlrun.errors.MLRunInvalidArgumentError(
504
+ f"unsupported filter operator: {op}"
505
+ )
506
+ if spark_filter is not None:
507
+ spark_filter = spark_filter & new_filter
508
+ else:
509
+ spark_filter = new_filter
510
+ return spark_filter
511
+
512
+ def _filter_spark_df(self, df, time_field=None, columns=None):
513
+ spark_additional_filters = self._build_spark_additional_filters(
514
+ column_types=dict(df.dtypes)
515
+ )
516
+ if spark_additional_filters is not None:
517
+ df = df.filter(spark_additional_filters)
518
+ return super()._filter_spark_df(df=df, time_field=time_field, columns=columns)
519
+
415
520
 
416
521
  class BigQuerySource(BaseSourceDriver):
417
522
  """
@@ -2134,7 +2134,7 @@ class SQLTarget(BaseStoreTarget):
2134
2134
  raise ValueError(f"Table named {table_name} is not exist")
2135
2135
 
2136
2136
  elif not table_exists and create_table:
2137
- TYPE_TO_SQL_TYPE = {
2137
+ type_to_sql_type = {
2138
2138
  int: sqlalchemy.Integer,
2139
2139
  str: sqlalchemy.String(self.attributes.get("varchar_len")),
2140
2140
  datetime.datetime: sqlalchemy.dialects.mysql.DATETIME(fsp=6),
@@ -2147,7 +2147,7 @@ class SQLTarget(BaseStoreTarget):
2147
2147
  # creat new table with the given name
2148
2148
  columns = []
2149
2149
  for col, col_type in self.schema.items():
2150
- col_type_sql = TYPE_TO_SQL_TYPE.get(col_type)
2150
+ col_type_sql = type_to_sql_type.get(col_type)
2151
2151
  if col_type_sql is None:
2152
2152
  raise TypeError(
2153
2153
  f"'{col_type}' unsupported type for column '{col}'"
mlrun/db/base.py CHANGED
@@ -17,6 +17,7 @@ from abc import ABC, abstractmethod
17
17
  from typing import Optional, Union
18
18
 
19
19
  import mlrun.alerts
20
+ import mlrun.common.runtimes.constants
20
21
  import mlrun.common.schemas
21
22
  import mlrun.model_monitoring
22
23
 
@@ -63,7 +64,10 @@ class RunDBInterface(ABC):
63
64
  uid: Optional[Union[str, list[str]]] = None,
64
65
  project: Optional[str] = None,
65
66
  labels: Optional[Union[str, list[str]]] = None,
66
- state: Optional[str] = None,
67
+ state: Optional[
68
+ mlrun.common.runtimes.constants.RunStates
69
+ ] = None, # Backward compatibility
70
+ states: Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
67
71
  sort: bool = True,
68
72
  last: int = 0,
69
73
  iter: bool = False,
@@ -629,8 +633,8 @@ class RunDBInterface(ABC):
629
633
  @abstractmethod
630
634
  def store_api_gateway(
631
635
  self,
632
- project: str,
633
636
  api_gateway: mlrun.common.schemas.APIGateway,
637
+ project: str = None,
634
638
  ):
635
639
  pass
636
640
 
mlrun/db/httpdb.py CHANGED
@@ -30,6 +30,7 @@ import semver
30
30
  from mlrun_pipelines.utils import compile_pipeline
31
31
 
32
32
  import mlrun
33
+ import mlrun.common.runtimes
33
34
  import mlrun.common.schemas
34
35
  import mlrun.common.types
35
36
  import mlrun.model_monitoring.model_endpoint
@@ -530,6 +531,10 @@ class HTTPRunDB(RunDBInterface):
530
531
  server_cfg.get("model_endpoint_monitoring_endpoint_store_connection")
531
532
  or config.model_endpoint_monitoring.endpoint_store_connection
532
533
  )
534
+ config.model_endpoint_monitoring.tsdb_connection = (
535
+ server_cfg.get("model_monitoring_tsdb_connection")
536
+ or config.model_endpoint_monitoring.tsdb_connection
537
+ )
533
538
  config.packagers = server_cfg.get("packagers") or config.packagers
534
539
  server_data_prefixes = server_cfg.get("feature_store_data_prefixes") or {}
535
540
  for prefix in ["default", "nosql", "redisnosql"]:
@@ -752,7 +757,10 @@ class HTTPRunDB(RunDBInterface):
752
757
  uid: Optional[Union[str, list[str]]] = None,
753
758
  project: Optional[str] = None,
754
759
  labels: Optional[Union[str, list[str]]] = None,
755
- state: Optional[str] = None,
760
+ state: Optional[
761
+ mlrun.common.runtimes.constants.RunStates
762
+ ] = None, # Backward compatibility
763
+ states: typing.Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
756
764
  sort: bool = True,
757
765
  last: int = 0,
758
766
  iter: bool = False,
@@ -790,7 +798,8 @@ class HTTPRunDB(RunDBInterface):
790
798
  :param labels: A list of labels to filter by. Label filters work by either filtering a specific value
791
799
  of a label (i.e. list("key=value")) or by looking for the existence of a given
792
800
  key (i.e. "key").
793
- :param state: List only runs whose state is specified.
801
+ :param state: Deprecated - List only runs whose state is specified (will be removed in 1.9.0)
802
+ :param states: List only runs whose state is one of the provided states.
794
803
  :param sort: Whether to sort the result according to their start time. Otherwise, results will be
795
804
  returned by their internal order in the DB (order will not be guaranteed).
796
805
  :param last: Deprecated - currently not used (will be removed in 1.8.0).
@@ -826,11 +835,19 @@ class HTTPRunDB(RunDBInterface):
826
835
  FutureWarning,
827
836
  )
828
837
 
838
+ if state:
839
+ # TODO: Remove this in 1.9.0
840
+ warnings.warn(
841
+ "'state' is deprecated and will be removed in 1.9.0. Use 'states' instead.",
842
+ FutureWarning,
843
+ )
844
+
829
845
  if (
830
846
  not name
831
847
  and not uid
832
848
  and not labels
833
849
  and not state
850
+ and not states
834
851
  and not last
835
852
  and not start_time_from
836
853
  and not start_time_to
@@ -849,7 +866,9 @@ class HTTPRunDB(RunDBInterface):
849
866
  "name": name,
850
867
  "uid": uid,
851
868
  "label": labels or [],
852
- "state": state,
869
+ "state": mlrun.utils.helpers.as_list(state)
870
+ if state is not None
871
+ else states or None,
853
872
  "sort": bool2str(sort),
854
873
  "iter": bool2str(iter),
855
874
  "start_time_from": datetime_to_iso(start_time_from),
mlrun/db/nopdb.py CHANGED
@@ -17,6 +17,7 @@ import datetime
17
17
  from typing import Optional, Union
18
18
 
19
19
  import mlrun.alerts
20
+ import mlrun.common.runtimes.constants
20
21
  import mlrun.common.schemas
21
22
  import mlrun.errors
22
23
 
@@ -80,7 +81,10 @@ class NopDB(RunDBInterface):
80
81
  uid: Optional[Union[str, list[str]]] = None,
81
82
  project: Optional[str] = None,
82
83
  labels: Optional[Union[str, list[str]]] = None,
83
- state: Optional[str] = None,
84
+ state: Optional[
85
+ mlrun.common.runtimes.constants.RunStates
86
+ ] = None, # Backward compatibility
87
+ states: Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
84
88
  sort: bool = True,
85
89
  last: int = 0,
86
90
  iter: bool = False,
@@ -520,8 +524,11 @@ class NopDB(RunDBInterface):
520
524
 
521
525
  def store_api_gateway(
522
526
  self,
523
- project: str,
524
- api_gateway: mlrun.runtimes.nuclio.APIGateway,
527
+ api_gateway: Union[
528
+ mlrun.common.schemas.APIGateway,
529
+ mlrun.runtimes.nuclio.api_gateway.APIGateway,
530
+ ],
531
+ project: str = None,
525
532
  ) -> mlrun.common.schemas.APIGateway:
526
533
  pass
527
534
 
mlrun/errors.py CHANGED
@@ -155,6 +155,10 @@ class MLRunNotFoundError(MLRunHTTPStatusError):
155
155
  error_status_code = HTTPStatus.NOT_FOUND.value
156
156
 
157
157
 
158
+ class MLRunPaginationEndOfResultsError(MLRunNotFoundError):
159
+ pass
160
+
161
+
158
162
  class MLRunBadRequestError(MLRunHTTPStatusError):
159
163
  error_status_code = HTTPStatus.BAD_REQUEST.value
160
164
 
@@ -240,3 +244,5 @@ STATUS_ERRORS = {
240
244
  HTTPStatus.SERVICE_UNAVAILABLE.value: MLRunServiceUnavailableError,
241
245
  HTTPStatus.NOT_IMPLEMENTED.value: MLRunNotImplementedServerError,
242
246
  }
247
+
248
+ EXPECTED_ERRORS = (MLRunPaginationEndOfResultsError,)
@@ -168,10 +168,10 @@ class PandasConversionMixin:
168
168
  column_counter = Counter(self.columns)
169
169
 
170
170
  dtype = [None] * len(self.schema)
171
- for fieldIdx, field in enumerate(self.schema):
171
+ for field_idx, field in enumerate(self.schema):
172
172
  # For duplicate column name, we use `iloc` to access it.
173
173
  if column_counter[field.name] > 1:
174
- pandas_col = pdf.iloc[:, fieldIdx]
174
+ pandas_col = pdf.iloc[:, field_idx]
175
175
  else:
176
176
  pandas_col = pdf[field.name]
177
177
 
@@ -187,12 +187,12 @@ class PandasConversionMixin:
187
187
  and field.nullable
188
188
  and pandas_col.isnull().any()
189
189
  ):
190
- dtype[fieldIdx] = pandas_type
190
+ dtype[field_idx] = pandas_type
191
191
  # Ensure we fall back to nullable numpy types, even when whole column is null:
192
192
  if isinstance(field.dataType, IntegralType) and pandas_col.isnull().any():
193
- dtype[fieldIdx] = np.float64
193
+ dtype[field_idx] = np.float64
194
194
  if isinstance(field.dataType, BooleanType) and pandas_col.isnull().any():
195
- dtype[fieldIdx] = object
195
+ dtype[field_idx] = object
196
196
 
197
197
  df = pd.DataFrame()
198
198
  for index, t in enumerate(dtype):
@@ -198,7 +198,8 @@ import mlrun.feature_store.retrieval
198
198
  from mlrun.datastore.targets import get_target_driver
199
199
  def merge_handler(context, vector_uri, target, entity_rows=None,
200
200
  entity_timestamp_column=None, drop_columns=None, with_indexes=None, query=None,
201
- engine_args=None, order_by=None, start_time=None, end_time=None, timestamp_for_filtering=None):
201
+ engine_args=None, order_by=None, start_time=None, end_time=None, timestamp_for_filtering=None,
202
+ additional_filters=None):
202
203
  vector = context.get_store_resource(vector_uri)
203
204
  store_target = get_target_driver(target, vector)
204
205
  if entity_rows:
@@ -208,7 +209,7 @@ def merge_handler(context, vector_uri, target, entity_rows=None,
208
209
  merger = mlrun.feature_store.retrieval.{{{engine}}}(vector, **(engine_args or {}))
209
210
  merger.start(entity_rows, entity_timestamp_column, store_target, drop_columns, with_indexes=with_indexes,
210
211
  query=query, order_by=order_by, start_time=start_time, end_time=end_time,
211
- timestamp_for_filtering=timestamp_for_filtering)
212
+ timestamp_for_filtering=timestamp_for_filtering, additional_filters=additional_filters)
212
213
 
213
214
  target = vector.status.targets[store_target.name].to_dict()
214
215
  context.log_result('feature_vector', vector.uri)
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
+
15
16
  import pandas as pd
16
17
  import semver
17
18
 
@@ -252,13 +253,13 @@ class SparkFeatureMerger(BaseMerger):
252
253
  # handling case where there are multiple feature sets and user creates vector where
253
254
  # entity_timestamp_column is from a specific feature set (can't be entity timestamp)
254
255
  source_driver = mlrun.datastore.sources.source_kind_to_driver[source_kind]
255
-
256
256
  source = source_driver(
257
257
  name=self.vector.metadata.name,
258
258
  path=source_path,
259
259
  time_field=time_column,
260
260
  start_time=start_time,
261
261
  end_time=end_time,
262
+ additional_filters=additional_filters,
262
263
  **source_kwargs,
263
264
  )
264
265
 
@@ -648,13 +648,13 @@ class TensorboardLogger(Logger, Generic[DLTypes.WeightType]):
648
648
  if isinstance(value, list):
649
649
  if len(value) == 0:
650
650
  return ""
651
- text = "\n" + yaml.dump(value)
651
+ text = "\n" + yaml.safe_dump(value)
652
652
  text = " \n".join([" " * tabs + line for line in text.splitlines()])
653
653
  return text
654
654
  if isinstance(value, dict):
655
655
  if len(value) == 0:
656
656
  return ""
657
- text = yaml.dump(value)
657
+ text = yaml.safe_dump(value)
658
658
  text = " \n".join(
659
659
  [" " * tabs + "- " + line for line in text.splitlines()]
660
660
  )
mlrun/lists.py CHANGED
@@ -29,6 +29,7 @@ list_header = [
29
29
  "iter",
30
30
  "start",
31
31
  "state",
32
+ "kind",
32
33
  "name",
33
34
  "labels",
34
35
  "inputs",
@@ -57,6 +58,7 @@ class RunList(list):
57
58
  get_in(run, "metadata.iteration", ""),
58
59
  get_in(run, "status.start_time", ""),
59
60
  get_in(run, "status.state", ""),
61
+ get_in(run, "step_kind", get_in(run, "kind", "")),
60
62
  get_in(run, "metadata.name", ""),
61
63
  get_in(run, "metadata.labels", ""),
62
64
  get_in(run, "spec.inputs", ""),
mlrun/model.py CHANGED
@@ -681,10 +681,14 @@ class Notification(ModelObj):
681
681
 
682
682
  def __init__(
683
683
  self,
684
- kind=None,
684
+ kind: mlrun.common.schemas.notification.NotificationKind = (
685
+ mlrun.common.schemas.notification.NotificationKind.slack
686
+ ),
685
687
  name=None,
686
688
  message=None,
687
- severity=None,
689
+ severity: mlrun.common.schemas.notification.NotificationSeverity = (
690
+ mlrun.common.schemas.notification.NotificationSeverity.INFO
691
+ ),
688
692
  when=None,
689
693
  condition=None,
690
694
  secret_params=None,
@@ -693,12 +697,10 @@ class Notification(ModelObj):
693
697
  sent_time=None,
694
698
  reason=None,
695
699
  ):
696
- self.kind = kind or mlrun.common.schemas.notification.NotificationKind.slack
700
+ self.kind = kind
697
701
  self.name = name or ""
698
702
  self.message = message or ""
699
- self.severity = (
700
- severity or mlrun.common.schemas.notification.NotificationSeverity.INFO
701
- )
703
+ self.severity = severity
702
704
  self.when = when or ["completed"]
703
705
  self.condition = condition or ""
704
706
  self.secret_params = secret_params or {}
@@ -15,7 +15,7 @@
15
15
  import typing
16
16
  from abc import ABC, abstractmethod
17
17
 
18
- import mlrun.common.schemas.model_monitoring.constants as mm_constants
18
+ import mlrun.common.schemas.model_monitoring as mm_schemas
19
19
 
20
20
 
21
21
  class StoreBase(ABC):
@@ -115,8 +115,8 @@ class StoreBase(ABC):
115
115
  def write_application_event(
116
116
  self,
117
117
  event: dict[str, typing.Any],
118
- kind: mm_constants.WriterEventKind = mm_constants.WriterEventKind.RESULT,
119
- ):
118
+ kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
119
+ ) -> None:
120
120
  """
121
121
  Write a new event in the target table.
122
122
 
@@ -157,3 +157,16 @@ class StoreBase(ABC):
157
157
 
158
158
  """
159
159
  pass
160
+
161
+ @abstractmethod
162
+ def get_model_endpoint_metrics(
163
+ self, endpoint_id: str, type: mm_schemas.ModelEndpointMonitoringMetricType
164
+ ) -> list[mm_schemas.ModelEndpointMonitoringMetric]:
165
+ """
166
+ Get the model monitoring results and metrics of the requested model endpoint.
167
+
168
+ :param: endpoint_id: The model endpoint identifier.
169
+ :param: type: The type of the requested metrics ("result" or "metric").
170
+
171
+ :return: A list of the available metrics.
172
+ """