mlrun 1.8.0rc4__py3-none-any.whl → 1.8.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (75) hide show
  1. mlrun/__init__.py +5 -3
  2. mlrun/alerts/alert.py +129 -2
  3. mlrun/artifacts/__init__.py +1 -1
  4. mlrun/artifacts/base.py +12 -1
  5. mlrun/artifacts/document.py +59 -38
  6. mlrun/common/constants.py +1 -0
  7. mlrun/common/model_monitoring/__init__.py +0 -2
  8. mlrun/common/model_monitoring/helpers.py +0 -28
  9. mlrun/common/schemas/__init__.py +2 -4
  10. mlrun/common/schemas/alert.py +80 -1
  11. mlrun/common/schemas/artifact.py +4 -0
  12. mlrun/common/schemas/client_spec.py +0 -1
  13. mlrun/common/schemas/model_monitoring/__init__.py +0 -6
  14. mlrun/common/schemas/model_monitoring/constants.py +11 -9
  15. mlrun/common/schemas/model_monitoring/model_endpoints.py +77 -149
  16. mlrun/common/schemas/notification.py +6 -0
  17. mlrun/common/schemas/project.py +3 -0
  18. mlrun/config.py +2 -3
  19. mlrun/datastore/datastore_profile.py +57 -17
  20. mlrun/datastore/sources.py +1 -2
  21. mlrun/datastore/vectorstore.py +67 -59
  22. mlrun/db/base.py +29 -19
  23. mlrun/db/factory.py +0 -3
  24. mlrun/db/httpdb.py +224 -161
  25. mlrun/db/nopdb.py +36 -17
  26. mlrun/execution.py +46 -32
  27. mlrun/feature_store/api.py +1 -0
  28. mlrun/model.py +7 -0
  29. mlrun/model_monitoring/__init__.py +3 -2
  30. mlrun/model_monitoring/api.py +55 -53
  31. mlrun/model_monitoring/applications/_application_steps.py +4 -2
  32. mlrun/model_monitoring/applications/base.py +165 -6
  33. mlrun/model_monitoring/applications/context.py +88 -37
  34. mlrun/model_monitoring/applications/evidently_base.py +0 -1
  35. mlrun/model_monitoring/applications/histogram_data_drift.py +3 -7
  36. mlrun/model_monitoring/controller.py +43 -37
  37. mlrun/model_monitoring/db/__init__.py +0 -2
  38. mlrun/model_monitoring/db/tsdb/base.py +2 -1
  39. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +2 -1
  40. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +43 -0
  41. mlrun/model_monitoring/helpers.py +79 -66
  42. mlrun/model_monitoring/stream_processing.py +83 -270
  43. mlrun/model_monitoring/writer.py +1 -10
  44. mlrun/projects/pipelines.py +37 -1
  45. mlrun/projects/project.py +171 -74
  46. mlrun/run.py +40 -0
  47. mlrun/runtimes/nuclio/function.py +7 -6
  48. mlrun/runtimes/nuclio/serving.py +9 -2
  49. mlrun/serving/routers.py +158 -145
  50. mlrun/serving/server.py +6 -0
  51. mlrun/serving/states.py +21 -7
  52. mlrun/serving/v2_serving.py +70 -61
  53. mlrun/utils/helpers.py +14 -30
  54. mlrun/utils/notifications/notification/mail.py +36 -9
  55. mlrun/utils/notifications/notification_pusher.py +43 -18
  56. mlrun/utils/version/version.json +2 -2
  57. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc7.dist-info}/METADATA +5 -4
  58. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc7.dist-info}/RECORD +62 -75
  59. mlrun/common/schemas/model_monitoring/model_endpoint_v2.py +0 -149
  60. mlrun/model_monitoring/db/stores/__init__.py +0 -136
  61. mlrun/model_monitoring/db/stores/base/__init__.py +0 -15
  62. mlrun/model_monitoring/db/stores/base/store.py +0 -154
  63. mlrun/model_monitoring/db/stores/sqldb/__init__.py +0 -13
  64. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +0 -46
  65. mlrun/model_monitoring/db/stores/sqldb/models/base.py +0 -93
  66. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +0 -47
  67. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +0 -25
  68. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +0 -408
  69. mlrun/model_monitoring/db/stores/v3io_kv/__init__.py +0 -13
  70. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +0 -464
  71. mlrun/model_monitoring/model_endpoint.py +0 -120
  72. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc7.dist-info}/LICENSE +0 -0
  73. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc7.dist-info}/WHEEL +0 -0
  74. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc7.dist-info}/entry_points.txt +0 -0
  75. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc7.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@ import os
19
19
  from collections.abc import Iterator
20
20
  from contextlib import AbstractContextManager
21
21
  from types import TracebackType
22
- from typing import Any, NamedTuple, Optional, cast
22
+ from typing import NamedTuple, Optional, cast
23
23
 
24
24
  import nuclio_sdk
25
25
 
@@ -27,6 +27,7 @@ import mlrun
27
27
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
28
28
  import mlrun.feature_store as fstore
29
29
  import mlrun.model_monitoring
30
+ from mlrun.common.schemas import EndpointType
30
31
  from mlrun.datastore import get_stream_pusher
31
32
  from mlrun.errors import err_to_str
32
33
  from mlrun.model_monitoring.db._schedules import ModelMonitoringSchedulesFile
@@ -65,7 +66,7 @@ class _BatchWindow:
65
66
  self._start = self._get_last_analyzed()
66
67
 
67
68
  def _get_saved_last_analyzed(self) -> Optional[int]:
68
- return self._db.get_application_time(self._application)
69
+ return cast(int, self._db.get_application_time(self._application))
69
70
 
70
71
  def _update_last_analyzed(self, last_analyzed: int) -> None:
71
72
  self._db.update_application_time(
@@ -161,18 +162,20 @@ class _BatchWindowGenerator(AbstractContextManager):
161
162
  )
162
163
 
163
164
  @classmethod
164
- def _get_last_updated_time(cls, last_request: str, has_stream: bool) -> int:
165
+ def _get_last_updated_time(
166
+ cls, last_request: datetime.datetime, not_batch_endpoint: bool
167
+ ) -> int:
165
168
  """
166
169
  Get the last updated time of a model endpoint.
167
170
  """
168
171
  last_updated = int(
169
- cls._date_string2timestamp(last_request)
172
+ last_request.timestamp()
170
173
  - cast(
171
174
  float,
172
175
  mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs,
173
176
  )
174
177
  )
175
- if not has_stream:
178
+ if not not_batch_endpoint:
176
179
  # If the endpoint does not have a stream, `last_updated` should be
177
180
  # the minimum between the current time and the last updated time.
178
181
  # This compensates for the bumping mechanism - see
@@ -183,17 +186,13 @@ class _BatchWindowGenerator(AbstractContextManager):
183
186
  )
184
187
  return last_updated
185
188
 
186
- @staticmethod
187
- def _date_string2timestamp(date_string: str) -> int:
188
- return int(datetime.datetime.fromisoformat(date_string).timestamp())
189
-
190
189
  def get_intervals(
191
190
  self,
192
191
  *,
193
192
  application: str,
194
- first_request: str,
195
- last_request: str,
196
- has_stream: bool,
193
+ first_request: datetime.datetime,
194
+ last_request: datetime.datetime,
195
+ not_batch_endpoint: bool,
197
196
  ) -> Iterator[_Interval]:
198
197
  """
199
198
  Get the batch window for a specific endpoint and application.
@@ -204,8 +203,8 @@ class _BatchWindowGenerator(AbstractContextManager):
204
203
  schedules_file=self._schedules_file,
205
204
  application=application,
206
205
  timedelta_seconds=self._timedelta,
207
- last_updated=self._get_last_updated_time(last_request, has_stream),
208
- first_request=self._date_string2timestamp(first_request),
206
+ last_updated=self._get_last_updated_time(last_request, not_batch_endpoint),
207
+ first_request=int(first_request.timestamp()),
209
208
  )
210
209
  yield from batch_window.get_intervals()
211
210
 
@@ -235,8 +234,6 @@ class MonitoringApplicationController:
235
234
 
236
235
  logger.debug(f"Initializing {self.__class__.__name__}", project=self.project)
237
236
 
238
- self.db = mlrun.model_monitoring.get_store_object(project=self.project)
239
-
240
237
  self._window_length = _get_window_length()
241
238
 
242
239
  self.model_monitoring_access_key = self._get_model_monitoring_access_key()
@@ -253,19 +250,16 @@ class MonitoringApplicationController:
253
250
  return access_key
254
251
 
255
252
  @staticmethod
256
- def _should_monitor_endpoint(endpoint: dict[str, Any]) -> bool:
253
+ def _should_monitor_endpoint(endpoint: mlrun.common.schemas.ModelEndpoint) -> bool:
257
254
  return (
258
- # Is the model endpoint active?
259
- endpoint[mm_constants.EventFieldType.ACTIVE]
260
255
  # Is the model endpoint monitored?
261
- and endpoint[mm_constants.EventFieldType.MONITORING_MODE]
262
- == mm_constants.ModelMonitoringMode.enabled
256
+ endpoint.status.monitoring_mode == mm_constants.ModelMonitoringMode.enabled
263
257
  # Was the model endpoint called? I.e., are the first and last requests nonempty?
264
- and endpoint[mm_constants.EventFieldType.FIRST_REQUEST]
265
- and endpoint[mm_constants.EventFieldType.LAST_REQUEST]
258
+ and endpoint.status.first_request
259
+ and endpoint.status.last_request
266
260
  # Is the model endpoint not a router endpoint? Router endpoint has no feature stats
267
- and int(endpoint[mm_constants.EventFieldType.ENDPOINT_TYPE])
268
- != mm_constants.EndpointType.ROUTER
261
+ and endpoint.metadata.endpoint_type.value
262
+ != mm_constants.EndpointType.ROUTER.value
269
263
  )
270
264
 
271
265
  def run(self) -> None:
@@ -281,7 +275,10 @@ class MonitoringApplicationController:
281
275
  logger.info("Start running monitoring controller")
282
276
  try:
283
277
  applications_names = []
284
- endpoints = self.db.list_model_endpoints(include_stats=True)
278
+ endpoints_list = mlrun.db.get_run_db().list_model_endpoints(
279
+ project=self.project, tsdb_metrics=True
280
+ )
281
+ endpoints = endpoints_list.endpoints
285
282
  if not endpoints:
286
283
  logger.info("No model endpoints found", project=self.project)
287
284
  return
@@ -333,12 +330,19 @@ class MonitoringApplicationController:
333
330
  model_monitoring_access_key=self.model_monitoring_access_key,
334
331
  storage_options=self.storage_options,
335
332
  )
333
+ else:
334
+ logger.debug(
335
+ "Skipping endpoint, not ready or not suitable for monitoring",
336
+ endpoint_id=endpoint.metadata.uid,
337
+ endpoint_name=endpoint.metadata.name,
338
+ )
339
+ logger.info("Finished running monitoring controller")
336
340
 
337
341
  @classmethod
338
342
  def model_endpoint_process(
339
343
  cls,
340
344
  project: str,
341
- endpoint: dict,
345
+ endpoint: mlrun.common.schemas.ModelEndpoint,
342
346
  applications_names: list[str],
343
347
  window_length: int,
344
348
  model_monitoring_access_key: str,
@@ -356,11 +360,11 @@ class MonitoringApplicationController:
356
360
  :param model_monitoring_access_key: (str) Access key to apply the model monitoring process.
357
361
  :param storage_options: (dict) Storage options for reading the infer parquet files.
358
362
  """
359
- endpoint_id = endpoint[mm_constants.EventFieldType.UID]
360
- has_stream = endpoint[mm_constants.EventFieldType.STREAM_PATH] != ""
361
- m_fs = fstore.get_feature_set(
362
- endpoint[mm_constants.EventFieldType.FEATURE_SET_URI]
363
+ endpoint_id = endpoint.metadata.uid
364
+ not_batch_endpoint = not (
365
+ endpoint.metadata.endpoint_type == EndpointType.BATCH_EP
363
366
  )
367
+ m_fs = fstore.get_feature_set(endpoint.spec.monitoring_feature_set_uri)
364
368
  try:
365
369
  with _BatchWindowGenerator(
366
370
  project=project, endpoint_id=endpoint_id, window_length=window_length
@@ -371,11 +375,9 @@ class MonitoringApplicationController:
371
375
  end_infer_time,
372
376
  ) in batch_window_generator.get_intervals(
373
377
  application=application,
374
- first_request=endpoint[
375
- mm_constants.EventFieldType.FIRST_REQUEST
376
- ],
377
- last_request=endpoint[mm_constants.EventFieldType.LAST_REQUEST],
378
- has_stream=has_stream,
378
+ first_request=endpoint.status.first_request,
379
+ last_request=endpoint.status.last_request,
380
+ not_batch_endpoint=not_batch_endpoint,
379
381
  ):
380
382
  df = m_fs.to_dataframe(
381
383
  start_time=start_infer_time,
@@ -401,15 +403,17 @@ class MonitoringApplicationController:
401
403
  start_infer_time=start_infer_time,
402
404
  end_infer_time=end_infer_time,
403
405
  endpoint_id=endpoint_id,
406
+ endpoint_name=endpoint.metadata.name,
404
407
  project=project,
405
408
  applications_names=[application],
406
409
  model_monitoring_access_key=model_monitoring_access_key,
407
410
  )
411
+ logger.info("Finished processing endpoint", endpoint_id=endpoint_id)
408
412
 
409
413
  except Exception:
410
414
  logger.exception(
411
415
  "Encountered an exception",
412
- endpoint_id=endpoint[mm_constants.EventFieldType.UID],
416
+ endpoint_id=endpoint.metadata.uid,
413
417
  )
414
418
 
415
419
  @staticmethod
@@ -417,6 +421,7 @@ class MonitoringApplicationController:
417
421
  start_infer_time: datetime.datetime,
418
422
  end_infer_time: datetime.datetime,
419
423
  endpoint_id: str,
424
+ endpoint_name: str,
420
425
  project: str,
421
426
  applications_names: list[str],
422
427
  model_monitoring_access_key: str,
@@ -440,6 +445,7 @@ class MonitoringApplicationController:
440
445
  sep=" ", timespec="microseconds"
441
446
  ),
442
447
  mm_constants.ApplicationEvent.ENDPOINT_ID: endpoint_id,
448
+ mm_constants.ApplicationEvent.ENDPOINT_NAME: endpoint_name,
443
449
  mm_constants.ApplicationEvent.OUTPUT_STREAM_URI: get_stream_path(
444
450
  project=project,
445
451
  function_name=mm_constants.MonitoringFunctionNames.WRITER,
@@ -12,7 +12,5 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from .stores import ObjectStoreFactory, get_store_object
16
- from .stores.base import StoreBase
17
15
  from .tsdb import get_tsdb_connector
18
16
  from .tsdb.base import TSDBConnector
@@ -47,7 +47,7 @@ class TSDBConnector(ABC):
47
47
  self.project = project
48
48
 
49
49
  @abstractmethod
50
- def apply_monitoring_stream_steps(self, graph) -> None:
50
+ def apply_monitoring_stream_steps(self, graph, **kwargs) -> None:
51
51
  """
52
52
  Apply TSDB steps on the provided monitoring graph. Throughout these steps, the graph stores live data of
53
53
  different key metric dictionaries. This data is being used by the monitoring dashboards in
@@ -294,6 +294,7 @@ class TSDBConnector(ABC):
294
294
  ) -> pd.DataFrame:
295
295
  """
296
296
  Fetches data from the predictions TSDB table and returns the average latency for each specified endpoint
297
+ in the provided time range, which by default is the last 24 hours.
297
298
 
298
299
  :param endpoint_ids: A list of model endpoint identifiers.
299
300
  :param start: The start time for the query.
@@ -164,7 +164,7 @@ class TDEngineConnector(TSDBConnector):
164
164
  def _convert_to_datetime(val: typing.Union[str, datetime]) -> datetime:
165
165
  return datetime.fromisoformat(val) if isinstance(val, str) else val
166
166
 
167
- def apply_monitoring_stream_steps(self, graph):
167
+ def apply_monitoring_stream_steps(self, graph, **kwarg):
168
168
  """
169
169
  Apply TSDB steps on the provided monitoring graph. Throughout these steps, the graph stores live data of
170
170
  different key metric dictionaries. This data is being used by the monitoring dashboards in
@@ -701,6 +701,7 @@ class TDEngineConnector(TSDBConnector):
701
701
  endpoint_ids = (
702
702
  endpoint_ids if isinstance(endpoint_ids, list) else [endpoint_ids]
703
703
  )
704
+ start = start or (mlrun.utils.datetime_now() - timedelta(hours=24))
704
705
  start, end = self._get_start_end(start, end)
705
706
  df = self._get_records(
706
707
  table=self.tables[mm_schemas.TDEngineSuperTables.PREDICTIONS].super_table,
@@ -168,6 +168,9 @@ class V3IOTSDBConnector(TSDBConnector):
168
168
  tsdb_batching_max_events: int = 1000,
169
169
  tsdb_batching_timeout_secs: int = 30,
170
170
  sample_window: int = 10,
171
+ aggregate_windows: Optional[list[str]] = None,
172
+ aggregate_period: str = "1m",
173
+ **kwarg,
171
174
  ):
172
175
  """
173
176
  Apply TSDB steps on the provided monitoring graph. Throughout these steps, the graph stores live data of
@@ -178,7 +181,40 @@ class V3IOTSDBConnector(TSDBConnector):
178
181
  - endpoint_features (Prediction and feature names and values)
179
182
  - custom_metrics (user-defined metrics)
180
183
  """
184
+ aggregate_windows = aggregate_windows or ["5m", "1h"]
181
185
 
186
+ # Calculate number of predictions and average latency
187
+ def apply_storey_aggregations():
188
+ # Calculate number of predictions for each window (5 min and 1 hour by default)
189
+ graph.add_step(
190
+ class_name="storey.AggregateByKey",
191
+ aggregates=[
192
+ {
193
+ "name": EventFieldType.LATENCY,
194
+ "column": EventFieldType.LATENCY,
195
+ "operations": ["count", "avg"],
196
+ "windows": aggregate_windows,
197
+ "period": aggregate_period,
198
+ }
199
+ ],
200
+ name=EventFieldType.LATENCY,
201
+ after="MapFeatureNames",
202
+ step_name="Aggregates",
203
+ table=".",
204
+ key_field=EventFieldType.ENDPOINT_ID,
205
+ )
206
+ # Calculate average latency time for each window (5 min and 1 hour by default)
207
+ graph.add_step(
208
+ class_name="storey.Rename",
209
+ mapping={
210
+ "latency_count_5m": mm_schemas.EventLiveStats.PREDICTIONS_COUNT_5M,
211
+ "latency_count_1h": mm_schemas.EventLiveStats.PREDICTIONS_COUNT_1H,
212
+ },
213
+ name="Rename",
214
+ after=EventFieldType.LATENCY,
215
+ )
216
+
217
+ apply_storey_aggregations()
182
218
  # Write latency per prediction, labeled by endpoint ID only
183
219
  graph.add_step(
184
220
  "storey.TSDBTarget",
@@ -853,6 +889,7 @@ class V3IOTSDBConnector(TSDBConnector):
853
889
  endpoint_ids = (
854
890
  endpoint_ids if isinstance(endpoint_ids, list) else [endpoint_ids]
855
891
  )
892
+ start = start or (mlrun.utils.datetime_now() - timedelta(hours=24))
856
893
  start, end = self._get_start_end(start, end)
857
894
  df = self._get_records(
858
895
  table=mm_schemas.FileTargetKind.PREDICTIONS,
@@ -864,4 +901,10 @@ class V3IOTSDBConnector(TSDBConnector):
864
901
  )
865
902
  if not df.empty:
866
903
  df.dropna(inplace=True)
904
+ df.rename(
905
+ columns={
906
+ f"avg({mm_schemas.EventFieldType.LATENCY})": f"avg_{mm_schemas.EventFieldType.LATENCY}"
907
+ },
908
+ inplace=True,
909
+ )
867
910
  return df.reset_index(drop=True)
@@ -24,6 +24,9 @@ if typing.TYPE_CHECKING:
24
24
  from mlrun.db.base import RunDBInterface
25
25
  from mlrun.projects import MlrunProject
26
26
 
27
+ from fnmatch import fnmatchcase
28
+ from typing import Optional
29
+
27
30
  import mlrun
28
31
  import mlrun.artifacts
29
32
  import mlrun.common.model_monitoring.helpers
@@ -31,11 +34,11 @@ import mlrun.common.schemas.model_monitoring.constants as mm_constants
31
34
  import mlrun.data_types.infer
32
35
  import mlrun.model_monitoring
33
36
  import mlrun.utils.helpers
37
+ from mlrun.common.schemas import ModelEndpoint
34
38
  from mlrun.common.schemas.model_monitoring.model_endpoints import (
35
39
  ModelEndpointMonitoringMetric,
36
40
  _compose_full_name,
37
41
  )
38
- from mlrun.model_monitoring.model_endpoint import ModelEndpoint
39
42
  from mlrun.utils import logger
40
43
 
41
44
 
@@ -45,6 +48,70 @@ class _BatchDict(typing.TypedDict):
45
48
  days: int
46
49
 
47
50
 
51
+ def _is_results_regex_match(
52
+ existing_result_name: Optional[str],
53
+ result_name_filters: Optional[list[str]],
54
+ ) -> bool:
55
+ if existing_result_name.count(".") != 3 or any(
56
+ part == "" for part in existing_result_name.split(".")
57
+ ):
58
+ logger.warning(
59
+ f"_is_results_regex_match: existing_result_name illegal, will be ignored."
60
+ f" existing_result_name: {existing_result_name}"
61
+ )
62
+ return False
63
+ existing_result_name = ".".join(existing_result_name.split(".")[i] for i in [1, 3])
64
+ for result_name_filter in result_name_filters:
65
+ if fnmatchcase(existing_result_name, result_name_filter):
66
+ return True
67
+ return False
68
+
69
+
70
+ def filter_results_by_regex(
71
+ existing_result_names: Optional[list[str]] = None,
72
+ result_name_filters: Optional[list[str]] = None,
73
+ ) -> list[str]:
74
+ """
75
+ Filter a list of existing result names by a list of filters.
76
+
77
+ This function returns only the results that match the filters provided. If no filters are given,
78
+ it returns all results. Invalid inputs are ignored.
79
+
80
+ :param existing_result_names: List of existing results' fully qualified names (FQNs)
81
+ in the format: endpoint_id.app_name.type.name.
82
+ Example: mep1.app1.result.metric1
83
+ :param result_name_filters: List of filters in the format: app.result_name.
84
+ Wildcards can be used, such as app.result* or *.result
85
+
86
+ :return: List of FQNs of the matching results
87
+ """
88
+
89
+ if not result_name_filters:
90
+ return existing_result_names
91
+
92
+ if not existing_result_names:
93
+ return []
94
+
95
+ # filters validations
96
+ validated_filters = []
97
+ for result_name_filter in result_name_filters:
98
+ if result_name_filter.count(".") != 1:
99
+ logger.warning(
100
+ f"filter_results_by_regex: result_name_filter illegal, will be ignored."
101
+ f"Filter: {result_name_filter}"
102
+ )
103
+ else:
104
+ validated_filters.append(result_name_filter)
105
+ filtered_metrics_names = []
106
+ for existing_result_name in existing_result_names:
107
+ if _is_results_regex_match(
108
+ existing_result_name=existing_result_name,
109
+ result_name_filters=validated_filters,
110
+ ):
111
+ filtered_metrics_names.append(existing_result_name)
112
+ return filtered_metrics_names
113
+
114
+
48
115
  def get_stream_path(
49
116
  project: str,
50
117
  function_name: str = mm_constants.MonitoringFunctionNames.STREAM,
@@ -162,24 +229,6 @@ def get_monitoring_drift_measures_data(project: str, endpoint_id: str) -> "DataI
162
229
  )
163
230
 
164
231
 
165
- def get_connection_string(
166
- secret_provider: typing.Optional[typing.Callable[[str], str]] = None,
167
- ) -> str:
168
- """Get endpoint store connection string from the project secret. If wasn't set, take it from the system
169
- configurations.
170
-
171
- :param secret_provider: An optional secret provider to get the connection string secret.
172
-
173
- :return: Valid SQL connection string.
174
-
175
- """
176
-
177
- return mlrun.get_secret_or_env(
178
- key=mm_constants.ProjectSecretKeys.ENDPOINT_STORE_CONNECTION,
179
- secret_provider=secret_provider,
180
- )
181
-
182
-
183
232
  def get_tsdb_connection_string(
184
233
  secret_provider: typing.Optional[typing.Callable[[str], str]] = None,
185
234
  ) -> str:
@@ -252,19 +301,24 @@ def update_model_endpoint_last_request(
252
301
  :param current_request: current request time
253
302
  :param db: DB interface.
254
303
  """
255
- is_model_server_endpoint = model_endpoint.spec.stream_path != ""
256
- if is_model_server_endpoint:
257
- current_request = current_request.isoformat()
304
+ is_batch_endpoint = (
305
+ model_endpoint.metadata.endpoint_type == mm_constants.EndpointType.BATCH_EP
306
+ )
307
+ if not is_batch_endpoint:
258
308
  logger.info(
259
309
  "Update model endpoint last request time (EP with serving)",
260
310
  project=project,
261
311
  endpoint_id=model_endpoint.metadata.uid,
312
+ name=model_endpoint.metadata.name,
313
+ function_name=model_endpoint.spec.function_name,
262
314
  last_request=model_endpoint.status.last_request,
263
315
  current_request=current_request,
264
316
  )
265
317
  db.patch_model_endpoint(
266
318
  project=project,
267
319
  endpoint_id=model_endpoint.metadata.uid,
320
+ name=model_endpoint.metadata.name,
321
+ function_name=model_endpoint.spec.function_name,
268
322
  attributes={mm_constants.EventFieldType.LAST_REQUEST: current_request},
269
323
  )
270
324
  else: # model endpoint without any serving function - close the window "manually"
@@ -283,7 +337,7 @@ def update_model_endpoint_last_request(
283
337
  + datetime.timedelta(
284
338
  seconds=mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs
285
339
  )
286
- ).isoformat()
340
+ )
287
341
  logger.info(
288
342
  "Bumping model endpoint last request time (EP without serving)",
289
343
  project=project,
@@ -295,6 +349,8 @@ def update_model_endpoint_last_request(
295
349
  db.patch_model_endpoint(
296
350
  project=project,
297
351
  endpoint_id=model_endpoint.metadata.uid,
352
+ name=model_endpoint.metadata.name,
353
+ function_name=model_endpoint.spec.function_name,
298
354
  attributes={mm_constants.EventFieldType.LAST_REQUEST: bumped_last_request},
299
355
  )
300
356
 
@@ -336,17 +392,6 @@ def calculate_inputs_statistics(
336
392
  return inputs_statistics
337
393
 
338
394
 
339
- def get_endpoint_record(
340
- project: str,
341
- endpoint_id: str,
342
- secret_provider: typing.Optional[typing.Callable[[str], str]] = None,
343
- ) -> dict[str, typing.Any]:
344
- model_endpoint_store = mlrun.model_monitoring.get_store_object(
345
- project=project, secret_provider=secret_provider
346
- )
347
- return model_endpoint_store.get_model_endpoint(endpoint_id=endpoint_id)
348
-
349
-
350
395
  def get_result_instance_fqn(
351
396
  model_endpoint_id: str, app_name: str, result_name: str
352
397
  ) -> str:
@@ -386,38 +431,6 @@ def get_invocations_metric(project: str) -> ModelEndpointMonitoringMetric:
386
431
  )
387
432
 
388
433
 
389
- def enrich_model_endpoint_with_model_uri(
390
- model_endpoint: ModelEndpoint,
391
- model_obj: mlrun.artifacts.ModelArtifact,
392
- ):
393
- """
394
- Enrich the model endpoint object with the model uri from the model object. We will use a unique reference
395
- to the model object that includes the project, db_key, iter, and tree.
396
- In addition, we verify that the model object is of type `ModelArtifact`.
397
-
398
- :param model_endpoint: An object representing the model endpoint that will be enriched with the model uri.
399
- :param model_obj: An object representing the model artifact.
400
-
401
- :raise: `MLRunInvalidArgumentError` if the model object is not of type `ModelArtifact`.
402
- """
403
- mlrun.utils.helpers.verify_field_of_type(
404
- field_name="model_endpoint.spec.model_uri",
405
- field_value=model_obj,
406
- expected_type=mlrun.artifacts.ModelArtifact,
407
- )
408
-
409
- # Update model_uri with a unique reference to handle future changes
410
- model_artifact_uri = mlrun.utils.helpers.generate_artifact_uri(
411
- project=model_endpoint.metadata.project,
412
- key=model_obj.db_key,
413
- iter=model_obj.iter,
414
- tree=model_obj.tree,
415
- )
416
- model_endpoint.spec.model_uri = mlrun.datastore.get_store_uri(
417
- kind=mlrun.utils.helpers.StorePrefix.Model, uri=model_artifact_uri
418
- )
419
-
420
-
421
434
  def _get_monitoring_schedules_folder_path(project: str) -> str:
422
435
  return typing.cast(
423
436
  str,