mlrun 1.7.0rc17__py3-none-any.whl → 1.7.0rc18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (55) hide show
  1. mlrun/alerts/alert.py +1 -1
  2. mlrun/artifacts/manager.py +5 -1
  3. mlrun/common/runtimes/constants.py +3 -0
  4. mlrun/common/schemas/__init__.py +1 -1
  5. mlrun/common/schemas/alert.py +31 -9
  6. mlrun/common/schemas/client_spec.py +1 -0
  7. mlrun/common/schemas/function.py +4 -0
  8. mlrun/common/schemas/model_monitoring/__init__.py +3 -1
  9. mlrun/common/schemas/model_monitoring/constants.py +20 -1
  10. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  11. mlrun/common/schemas/model_monitoring/model_endpoints.py +17 -6
  12. mlrun/config.py +2 -0
  13. mlrun/data_types/to_pandas.py +5 -5
  14. mlrun/datastore/datastore.py +6 -2
  15. mlrun/datastore/redis.py +2 -2
  16. mlrun/datastore/s3.py +5 -0
  17. mlrun/datastore/sources.py +111 -6
  18. mlrun/datastore/targets.py +2 -2
  19. mlrun/db/base.py +5 -1
  20. mlrun/db/httpdb.py +22 -3
  21. mlrun/db/nopdb.py +5 -1
  22. mlrun/errors.py +6 -0
  23. mlrun/feature_store/retrieval/conversion.py +5 -5
  24. mlrun/feature_store/retrieval/job.py +3 -2
  25. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  26. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -2
  27. mlrun/model_monitoring/db/stores/base/store.py +16 -3
  28. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +44 -43
  29. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +190 -91
  30. mlrun/model_monitoring/db/tsdb/__init__.py +35 -6
  31. mlrun/model_monitoring/db/tsdb/base.py +25 -18
  32. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  33. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +207 -0
  34. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  35. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +231 -0
  36. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +73 -72
  37. mlrun/model_monitoring/db/v3io_tsdb_reader.py +217 -16
  38. mlrun/model_monitoring/helpers.py +32 -0
  39. mlrun/model_monitoring/stream_processing.py +7 -4
  40. mlrun/model_monitoring/writer.py +18 -13
  41. mlrun/package/utils/_formatter.py +2 -2
  42. mlrun/projects/project.py +33 -8
  43. mlrun/render.py +8 -5
  44. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  45. mlrun/utils/async_http.py +25 -5
  46. mlrun/utils/helpers.py +20 -1
  47. mlrun/utils/notifications/notification/slack.py +27 -7
  48. mlrun/utils/notifications/notification_pusher.py +38 -40
  49. mlrun/utils/version/version.json +2 -2
  50. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc18.dist-info}/METADATA +7 -2
  51. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc18.dist-info}/RECORD +55 -51
  52. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc18.dist-info}/LICENSE +0 -0
  53. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc18.dist-info}/WHEEL +0 -0
  54. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc18.dist-info}/entry_points.txt +0 -0
  55. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc18.dist-info}/top_level.txt +0 -0
mlrun/db/httpdb.py CHANGED
@@ -30,6 +30,7 @@ import semver
30
30
  from mlrun_pipelines.utils import compile_pipeline
31
31
 
32
32
  import mlrun
33
+ import mlrun.common.runtimes
33
34
  import mlrun.common.schemas
34
35
  import mlrun.common.types
35
36
  import mlrun.model_monitoring.model_endpoint
@@ -530,6 +531,10 @@ class HTTPRunDB(RunDBInterface):
530
531
  server_cfg.get("model_endpoint_monitoring_endpoint_store_connection")
531
532
  or config.model_endpoint_monitoring.endpoint_store_connection
532
533
  )
534
+ config.model_endpoint_monitoring.tsdb_connection = (
535
+ server_cfg.get("model_monitoring_tsdb_connection")
536
+ or config.model_endpoint_monitoring.tsdb_connection
537
+ )
533
538
  config.packagers = server_cfg.get("packagers") or config.packagers
534
539
  server_data_prefixes = server_cfg.get("feature_store_data_prefixes") or {}
535
540
  for prefix in ["default", "nosql", "redisnosql"]:
@@ -752,7 +757,10 @@ class HTTPRunDB(RunDBInterface):
752
757
  uid: Optional[Union[str, list[str]]] = None,
753
758
  project: Optional[str] = None,
754
759
  labels: Optional[Union[str, list[str]]] = None,
755
- state: Optional[str] = None,
760
+ state: Optional[
761
+ mlrun.common.runtimes.constants.RunStates
762
+ ] = None, # Backward compatibility
763
+ states: typing.Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
756
764
  sort: bool = True,
757
765
  last: int = 0,
758
766
  iter: bool = False,
@@ -790,7 +798,8 @@ class HTTPRunDB(RunDBInterface):
790
798
  :param labels: A list of labels to filter by. Label filters work by either filtering a specific value
791
799
  of a label (i.e. list("key=value")) or by looking for the existence of a given
792
800
  key (i.e. "key").
793
- :param state: List only runs whose state is specified.
801
+ :param state: Deprecated - List only runs whose state is specified (will be removed in 1.9.0)
802
+ :param states: List only runs whose state is one of the provided states.
794
803
  :param sort: Whether to sort the result according to their start time. Otherwise, results will be
795
804
  returned by their internal order in the DB (order will not be guaranteed).
796
805
  :param last: Deprecated - currently not used (will be removed in 1.8.0).
@@ -826,11 +835,19 @@ class HTTPRunDB(RunDBInterface):
826
835
  FutureWarning,
827
836
  )
828
837
 
838
+ if state:
839
+ # TODO: Remove this in 1.9.0
840
+ warnings.warn(
841
+ "'state' is deprecated and will be removed in 1.9.0. Use 'states' instead.",
842
+ FutureWarning,
843
+ )
844
+
829
845
  if (
830
846
  not name
831
847
  and not uid
832
848
  and not labels
833
849
  and not state
850
+ and not states
834
851
  and not last
835
852
  and not start_time_from
836
853
  and not start_time_to
@@ -849,7 +866,9 @@ class HTTPRunDB(RunDBInterface):
849
866
  "name": name,
850
867
  "uid": uid,
851
868
  "label": labels or [],
852
- "state": state,
869
+ "state": mlrun.utils.helpers.as_list(state)
870
+ if state is not None
871
+ else states or None,
853
872
  "sort": bool2str(sort),
854
873
  "iter": bool2str(iter),
855
874
  "start_time_from": datetime_to_iso(start_time_from),
mlrun/db/nopdb.py CHANGED
@@ -17,6 +17,7 @@ import datetime
17
17
  from typing import Optional, Union
18
18
 
19
19
  import mlrun.alerts
20
+ import mlrun.common.runtimes.constants
20
21
  import mlrun.common.schemas
21
22
  import mlrun.errors
22
23
 
@@ -80,7 +81,10 @@ class NopDB(RunDBInterface):
80
81
  uid: Optional[Union[str, list[str]]] = None,
81
82
  project: Optional[str] = None,
82
83
  labels: Optional[Union[str, list[str]]] = None,
83
- state: Optional[str] = None,
84
+ state: Optional[
85
+ mlrun.common.runtimes.constants.RunStates
86
+ ] = None, # Backward compatibility
87
+ states: Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
84
88
  sort: bool = True,
85
89
  last: int = 0,
86
90
  iter: bool = False,
mlrun/errors.py CHANGED
@@ -155,6 +155,10 @@ class MLRunNotFoundError(MLRunHTTPStatusError):
155
155
  error_status_code = HTTPStatus.NOT_FOUND.value
156
156
 
157
157
 
158
+ class MLRunPaginationEndOfResultsError(MLRunNotFoundError):
159
+ pass
160
+
161
+
158
162
  class MLRunBadRequestError(MLRunHTTPStatusError):
159
163
  error_status_code = HTTPStatus.BAD_REQUEST.value
160
164
 
@@ -240,3 +244,5 @@ STATUS_ERRORS = {
240
244
  HTTPStatus.SERVICE_UNAVAILABLE.value: MLRunServiceUnavailableError,
241
245
  HTTPStatus.NOT_IMPLEMENTED.value: MLRunNotImplementedServerError,
242
246
  }
247
+
248
+ EXPECTED_ERRORS = (MLRunPaginationEndOfResultsError,)
@@ -168,10 +168,10 @@ class PandasConversionMixin:
168
168
  column_counter = Counter(self.columns)
169
169
 
170
170
  dtype = [None] * len(self.schema)
171
- for fieldIdx, field in enumerate(self.schema):
171
+ for field_idx, field in enumerate(self.schema):
172
172
  # For duplicate column name, we use `iloc` to access it.
173
173
  if column_counter[field.name] > 1:
174
- pandas_col = pdf.iloc[:, fieldIdx]
174
+ pandas_col = pdf.iloc[:, field_idx]
175
175
  else:
176
176
  pandas_col = pdf[field.name]
177
177
 
@@ -187,12 +187,12 @@ class PandasConversionMixin:
187
187
  and field.nullable
188
188
  and pandas_col.isnull().any()
189
189
  ):
190
- dtype[fieldIdx] = pandas_type
190
+ dtype[field_idx] = pandas_type
191
191
  # Ensure we fall back to nullable numpy types, even when whole column is null:
192
192
  if isinstance(field.dataType, IntegralType) and pandas_col.isnull().any():
193
- dtype[fieldIdx] = np.float64
193
+ dtype[field_idx] = np.float64
194
194
  if isinstance(field.dataType, BooleanType) and pandas_col.isnull().any():
195
- dtype[fieldIdx] = object
195
+ dtype[field_idx] = object
196
196
 
197
197
  df = pd.DataFrame()
198
198
  for index, t in enumerate(dtype):
@@ -198,7 +198,8 @@ import mlrun.feature_store.retrieval
198
198
  from mlrun.datastore.targets import get_target_driver
199
199
  def merge_handler(context, vector_uri, target, entity_rows=None,
200
200
  entity_timestamp_column=None, drop_columns=None, with_indexes=None, query=None,
201
- engine_args=None, order_by=None, start_time=None, end_time=None, timestamp_for_filtering=None):
201
+ engine_args=None, order_by=None, start_time=None, end_time=None, timestamp_for_filtering=None,
202
+ additional_filters=None):
202
203
  vector = context.get_store_resource(vector_uri)
203
204
  store_target = get_target_driver(target, vector)
204
205
  if entity_rows:
@@ -208,7 +209,7 @@ def merge_handler(context, vector_uri, target, entity_rows=None,
208
209
  merger = mlrun.feature_store.retrieval.{{{engine}}}(vector, **(engine_args or {}))
209
210
  merger.start(entity_rows, entity_timestamp_column, store_target, drop_columns, with_indexes=with_indexes,
210
211
  query=query, order_by=order_by, start_time=start_time, end_time=end_time,
211
- timestamp_for_filtering=timestamp_for_filtering)
212
+ timestamp_for_filtering=timestamp_for_filtering, additional_filters=additional_filters)
212
213
 
213
214
  target = vector.status.targets[store_target.name].to_dict()
214
215
  context.log_result('feature_vector', vector.uri)
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
+
15
16
  import pandas as pd
16
17
  import semver
17
18
 
@@ -252,13 +253,13 @@ class SparkFeatureMerger(BaseMerger):
252
253
  # handling case where there are multiple feature sets and user creates vector where
253
254
  # entity_timestamp_column is from a specific feature set (can't be entity timestamp)
254
255
  source_driver = mlrun.datastore.sources.source_kind_to_driver[source_kind]
255
-
256
256
  source = source_driver(
257
257
  name=self.vector.metadata.name,
258
258
  path=source_path,
259
259
  time_field=time_column,
260
260
  start_time=start_time,
261
261
  end_time=end_time,
262
+ additional_filters=additional_filters,
262
263
  **source_kwargs,
263
264
  )
264
265
 
@@ -648,13 +648,13 @@ class TensorboardLogger(Logger, Generic[DLTypes.WeightType]):
648
648
  if isinstance(value, list):
649
649
  if len(value) == 0:
650
650
  return ""
651
- text = "\n" + yaml.dump(value)
651
+ text = "\n" + yaml.safe_dump(value)
652
652
  text = " \n".join([" " * tabs + line for line in text.splitlines()])
653
653
  return text
654
654
  if isinstance(value, dict):
655
655
  if len(value) == 0:
656
656
  return ""
657
- text = yaml.dump(value)
657
+ text = yaml.safe_dump(value)
658
658
  text = " \n".join(
659
659
  [" " * tabs + "- " + line for line in text.splitlines()]
660
660
  )
@@ -15,7 +15,7 @@
15
15
  import typing
16
16
  from abc import ABC, abstractmethod
17
17
 
18
- import mlrun.common.schemas.model_monitoring.constants as mm_constants
18
+ import mlrun.common.schemas.model_monitoring as mm_schemas
19
19
 
20
20
 
21
21
  class StoreBase(ABC):
@@ -115,8 +115,8 @@ class StoreBase(ABC):
115
115
  def write_application_event(
116
116
  self,
117
117
  event: dict[str, typing.Any],
118
- kind: mm_constants.WriterEventKind = mm_constants.WriterEventKind.RESULT,
119
- ):
118
+ kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
119
+ ) -> None:
120
120
  """
121
121
  Write a new event in the target table.
122
122
 
@@ -157,3 +157,16 @@ class StoreBase(ABC):
157
157
 
158
158
  """
159
159
  pass
160
+
161
+ @abstractmethod
162
+ def get_model_endpoint_metrics(
163
+ self, endpoint_id: str, type: mm_schemas.ModelEndpointMonitoringMetricType
164
+ ) -> list[mm_schemas.ModelEndpointMonitoringMetric]:
165
+ """
166
+ Get the model monitoring results and metrics of the requested model endpoint.
167
+
168
+ :param: endpoint_id: The model endpoint identifier.
169
+ :param: type: The type of the requested metrics ("result" or "metric").
170
+
171
+ :return: A list of the available metrics.
172
+ """
@@ -21,7 +21,7 @@ import pandas as pd
21
21
  import sqlalchemy
22
22
 
23
23
  import mlrun.common.model_monitoring.helpers
24
- import mlrun.common.schemas.model_monitoring as mm_constants
24
+ import mlrun.common.schemas.model_monitoring as mm_schemas
25
25
  import mlrun.model_monitoring.db
26
26
  import mlrun.model_monitoring.db.stores.sqldb.models
27
27
  import mlrun.model_monitoring.helpers
@@ -71,7 +71,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
71
71
  connection_string=self._sql_connection_string
72
72
  )
73
73
  )
74
- self._tables[mm_constants.EventFieldType.MODEL_ENDPOINTS] = (
74
+ self._tables[mm_schemas.EventFieldType.MODEL_ENDPOINTS] = (
75
75
  self.ModelEndpointsTable
76
76
  )
77
77
 
@@ -81,7 +81,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
81
81
  connection_string=self._sql_connection_string
82
82
  )
83
83
  )
84
- self._tables[mm_constants.FileTargetKind.APP_RESULTS] = (
84
+ self._tables[mm_schemas.FileTargetKind.APP_RESULTS] = (
85
85
  self.ApplicationResultsTable
86
86
  )
87
87
 
@@ -89,7 +89,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
89
89
  self.MonitoringSchedulesTable = mlrun.model_monitoring.db.stores.sqldb.models._get_monitoring_schedules_table(
90
90
  connection_string=self._sql_connection_string
91
91
  )
92
- self._tables[mm_constants.FileTargetKind.MONITORING_SCHEDULES] = (
92
+ self._tables[mm_schemas.FileTargetKind.MONITORING_SCHEDULES] = (
93
93
  self.MonitoringSchedulesTable
94
94
  )
95
95
 
@@ -182,12 +182,12 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
182
182
  """
183
183
 
184
184
  # Adjust timestamps fields
185
- endpoint[mm_constants.EventFieldType.FIRST_REQUEST] = (endpoint)[
186
- mm_constants.EventFieldType.LAST_REQUEST
185
+ endpoint[mm_schemas.EventFieldType.FIRST_REQUEST] = (endpoint)[
186
+ mm_schemas.EventFieldType.LAST_REQUEST
187
187
  ] = mlrun.utils.datetime_now()
188
188
 
189
189
  self._write(
190
- table=mm_constants.EventFieldType.MODEL_ENDPOINTS,
190
+ table=mm_schemas.EventFieldType.MODEL_ENDPOINTS,
191
191
  event=endpoint,
192
192
  )
193
193
 
@@ -204,9 +204,9 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
204
204
  """
205
205
  self._init_model_endpoints_table()
206
206
 
207
- attributes.pop(mm_constants.EventFieldType.ENDPOINT_ID, None)
207
+ attributes.pop(mm_schemas.EventFieldType.ENDPOINT_ID, None)
208
208
 
209
- filter_endpoint = {mm_constants.EventFieldType.UID: endpoint_id}
209
+ filter_endpoint = {mm_schemas.EventFieldType.UID: endpoint_id}
210
210
 
211
211
  self._update(
212
212
  attributes=attributes, table=self.ModelEndpointsTable, **filter_endpoint
@@ -220,7 +220,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
220
220
  """
221
221
  self._init_model_endpoints_table()
222
222
 
223
- filter_endpoint = {mm_constants.EventFieldType.UID: endpoint_id}
223
+ filter_endpoint = {mm_schemas.EventFieldType.UID: endpoint_id}
224
224
  # Delete the model endpoint record using sqlalchemy ORM
225
225
  self._delete(table=self.ModelEndpointsTable, **filter_endpoint)
226
226
 
@@ -240,7 +240,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
240
240
  self._init_model_endpoints_table()
241
241
 
242
242
  # Get the model endpoint record using sqlalchemy ORM
243
- filter_endpoint = {mm_constants.EventFieldType.UID: endpoint_id}
243
+ filter_endpoint = {mm_schemas.EventFieldType.UID: endpoint_id}
244
244
  endpoint_record = self._get(table=self.ModelEndpointsTable, **filter_endpoint)
245
245
 
246
246
  if not endpoint_record:
@@ -292,32 +292,32 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
292
292
  query = self._filter_values(
293
293
  query=query,
294
294
  model_endpoints_table=model_endpoints_table,
295
- key_filter=mm_constants.EventFieldType.MODEL,
295
+ key_filter=mm_schemas.EventFieldType.MODEL,
296
296
  filtered_values=[model],
297
297
  )
298
298
  if function:
299
299
  query = self._filter_values(
300
300
  query=query,
301
301
  model_endpoints_table=model_endpoints_table,
302
- key_filter=mm_constants.EventFieldType.FUNCTION,
302
+ key_filter=mm_schemas.EventFieldType.FUNCTION,
303
303
  filtered_values=[function],
304
304
  )
305
305
  if uids:
306
306
  query = self._filter_values(
307
307
  query=query,
308
308
  model_endpoints_table=model_endpoints_table,
309
- key_filter=mm_constants.EventFieldType.UID,
309
+ key_filter=mm_schemas.EventFieldType.UID,
310
310
  filtered_values=uids,
311
311
  combined=False,
312
312
  )
313
313
  if top_level:
314
- node_ep = str(mm_constants.EndpointType.NODE_EP.value)
315
- router_ep = str(mm_constants.EndpointType.ROUTER.value)
314
+ node_ep = str(mm_schemas.EndpointType.NODE_EP.value)
315
+ router_ep = str(mm_schemas.EndpointType.ROUTER.value)
316
316
  endpoint_types = [node_ep, router_ep]
317
317
  query = self._filter_values(
318
318
  query=query,
319
319
  model_endpoints_table=model_endpoints_table,
320
- key_filter=mm_constants.EventFieldType.ENDPOINT_TYPE,
320
+ key_filter=mm_schemas.EventFieldType.ENDPOINT_TYPE,
321
321
  filtered_values=endpoint_types,
322
322
  combined=False,
323
323
  )
@@ -338,8 +338,8 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
338
338
  def write_application_event(
339
339
  self,
340
340
  event: dict[str, typing.Any],
341
- kind: mm_constants.WriterEventKind = mm_constants.WriterEventKind.RESULT,
342
- ):
341
+ kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
342
+ ) -> None:
343
343
  """
344
344
  Write a new application event in the target table.
345
345
 
@@ -349,16 +349,14 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
349
349
  :param kind: The type of the event, can be either "result" or "metric".
350
350
  """
351
351
 
352
- if kind == mm_constants.WriterEventKind.METRIC:
352
+ if kind == mm_schemas.WriterEventKind.METRIC:
353
353
  # TODO : Implement the logic for writing metrics to MySQL
354
354
  return
355
355
 
356
356
  self._init_application_results_table()
357
357
 
358
358
  application_filter_dict = {
359
- mm_constants.EventFieldType.UID: self._generate_application_result_uid(
360
- event
361
- )
359
+ mm_schemas.EventFieldType.UID: self._generate_application_result_uid(event)
362
360
  }
363
361
 
364
362
  application_record = self._get(
@@ -367,11 +365,11 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
367
365
  if application_record:
368
366
  self._convert_to_datetime(
369
367
  event=event,
370
- key=mm_constants.WriterEvent.START_INFER_TIME,
368
+ key=mm_schemas.WriterEvent.START_INFER_TIME,
371
369
  )
372
370
  self._convert_to_datetime(
373
371
  event=event,
374
- key=mm_constants.WriterEvent.END_INFER_TIME,
372
+ key=mm_schemas.WriterEvent.END_INFER_TIME,
375
373
  )
376
374
  # Update an existing application result
377
375
  self._update(
@@ -381,12 +379,12 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
381
379
  )
382
380
  else:
383
381
  # Write a new application result
384
- event[mm_constants.EventFieldType.UID] = application_filter_dict[
385
- mm_constants.EventFieldType.UID
382
+ event[mm_schemas.EventFieldType.UID] = application_filter_dict[
383
+ mm_schemas.EventFieldType.UID
386
384
  ]
387
385
 
388
386
  self._write(
389
- table=mm_constants.FileTargetKind.APP_RESULTS,
387
+ table=mm_schemas.FileTargetKind.APP_RESULTS,
390
388
  event=event,
391
389
  )
392
390
 
@@ -398,11 +396,11 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
398
396
  @staticmethod
399
397
  def _generate_application_result_uid(event: dict[str, typing.Any]) -> str:
400
398
  return (
401
- event[mm_constants.WriterEvent.ENDPOINT_ID]
399
+ event[mm_schemas.WriterEvent.ENDPOINT_ID]
402
400
  + "_"
403
- + event[mm_constants.WriterEvent.APPLICATION_NAME]
401
+ + event[mm_schemas.WriterEvent.APPLICATION_NAME]
404
402
  + "_"
405
- + event[mm_constants.ResultData.RESULT_NAME]
403
+ + event[mm_schemas.ResultData.RESULT_NAME]
406
404
  )
407
405
 
408
406
  def get_last_analyzed(self, endpoint_id: str, application_name: str) -> int:
@@ -452,17 +450,17 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
452
450
  if not monitoring_schedule_record:
453
451
  # Add a new record with empty last analyzed value
454
452
  self._write(
455
- table=mm_constants.FileTargetKind.MONITORING_SCHEDULES,
453
+ table=mm_schemas.FileTargetKind.MONITORING_SCHEDULES,
456
454
  event={
457
- mm_constants.SchedulingKeys.UID: uuid.uuid4().hex,
458
- mm_constants.SchedulingKeys.APPLICATION_NAME: application_name,
459
- mm_constants.SchedulingKeys.ENDPOINT_ID: endpoint_id,
460
- mm_constants.SchedulingKeys.LAST_ANALYZED: last_analyzed,
455
+ mm_schemas.SchedulingKeys.UID: uuid.uuid4().hex,
456
+ mm_schemas.SchedulingKeys.APPLICATION_NAME: application_name,
457
+ mm_schemas.SchedulingKeys.ENDPOINT_ID: endpoint_id,
458
+ mm_schemas.SchedulingKeys.LAST_ANALYZED: last_analyzed,
461
459
  },
462
460
  )
463
461
 
464
462
  self._update(
465
- attributes={mm_constants.SchedulingKeys.LAST_ANALYZED: last_analyzed},
463
+ attributes={mm_schemas.SchedulingKeys.LAST_ANALYZED: last_analyzed},
466
464
  table=self.MonitoringSchedulesTable,
467
465
  **application_filter_dict,
468
466
  )
@@ -558,7 +556,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
558
556
 
559
557
  # Convert endpoint labels into dictionary
560
558
  endpoint_labels = json.loads(
561
- endpoint_dict.get(mm_constants.EventFieldType.LABELS)
559
+ endpoint_dict.get(mm_schemas.EventFieldType.LABELS)
562
560
  )
563
561
 
564
562
  for label in labels:
@@ -585,11 +583,9 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
585
583
  )
586
584
  application_filter_dict = {}
587
585
  if endpoint_id:
588
- application_filter_dict[mm_constants.SchedulingKeys.ENDPOINT_ID] = (
589
- endpoint_id
590
- )
586
+ application_filter_dict[mm_schemas.SchedulingKeys.ENDPOINT_ID] = endpoint_id
591
587
  if application_name:
592
- application_filter_dict[mm_constants.SchedulingKeys.APPLICATION_NAME] = (
588
+ application_filter_dict[mm_schemas.SchedulingKeys.APPLICATION_NAME] = (
593
589
  application_name
594
590
  )
595
591
  return application_filter_dict
@@ -603,7 +599,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
603
599
  endpoints = self.list_model_endpoints()
604
600
 
605
601
  for endpoint_dict in endpoints:
606
- endpoint_id = endpoint_dict[mm_constants.EventFieldType.UID]
602
+ endpoint_id = endpoint_dict[mm_schemas.EventFieldType.UID]
607
603
 
608
604
  # Delete last analyzed records
609
605
  self._delete_last_analyzed(endpoint_id=endpoint_id)
@@ -613,3 +609,8 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
613
609
 
614
610
  # Delete model endpoint record
615
611
  self.delete_model_endpoint(endpoint_id=endpoint_id)
612
+
613
+ def get_model_endpoint_metrics(
614
+ self, endpoint_id: str, type: mm_schemas.ModelEndpointMonitoringMetricType
615
+ ) -> list[mm_schemas.ModelEndpointMonitoringMetric]:
616
+ raise NotImplementedError