mlrun 1.7.0rc17__py3-none-any.whl → 1.7.0rc19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (90) hide show
  1. mlrun/__main__.py +5 -2
  2. mlrun/alerts/alert.py +1 -1
  3. mlrun/artifacts/manager.py +5 -1
  4. mlrun/common/constants.py +64 -3
  5. mlrun/common/formatters/__init__.py +16 -0
  6. mlrun/common/formatters/base.py +59 -0
  7. mlrun/common/formatters/function.py +41 -0
  8. mlrun/common/runtimes/constants.py +32 -4
  9. mlrun/common/schemas/__init__.py +1 -2
  10. mlrun/common/schemas/alert.py +31 -9
  11. mlrun/common/schemas/api_gateway.py +52 -0
  12. mlrun/common/schemas/client_spec.py +1 -0
  13. mlrun/common/schemas/frontend_spec.py +1 -0
  14. mlrun/common/schemas/function.py +4 -0
  15. mlrun/common/schemas/model_monitoring/__init__.py +9 -4
  16. mlrun/common/schemas/model_monitoring/constants.py +22 -8
  17. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  18. mlrun/common/schemas/model_monitoring/model_endpoints.py +17 -6
  19. mlrun/config.py +9 -2
  20. mlrun/data_types/to_pandas.py +5 -5
  21. mlrun/datastore/datastore.py +6 -2
  22. mlrun/datastore/redis.py +2 -2
  23. mlrun/datastore/s3.py +5 -0
  24. mlrun/datastore/sources.py +106 -7
  25. mlrun/datastore/store_resources.py +5 -1
  26. mlrun/datastore/targets.py +5 -4
  27. mlrun/datastore/utils.py +42 -0
  28. mlrun/db/base.py +5 -1
  29. mlrun/db/httpdb.py +22 -3
  30. mlrun/db/nopdb.py +5 -1
  31. mlrun/errors.py +6 -0
  32. mlrun/execution.py +16 -6
  33. mlrun/feature_store/ingestion.py +7 -6
  34. mlrun/feature_store/retrieval/conversion.py +5 -5
  35. mlrun/feature_store/retrieval/job.py +7 -3
  36. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  37. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -2
  38. mlrun/frameworks/parallel_coordinates.py +2 -1
  39. mlrun/frameworks/tf_keras/__init__.py +4 -1
  40. mlrun/launcher/client.py +4 -2
  41. mlrun/launcher/local.py +8 -2
  42. mlrun/launcher/remote.py +8 -2
  43. mlrun/model.py +5 -1
  44. mlrun/model_monitoring/db/stores/__init__.py +0 -2
  45. mlrun/model_monitoring/db/stores/base/store.py +16 -4
  46. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +43 -21
  47. mlrun/model_monitoring/db/stores/sqldb/models/base.py +32 -2
  48. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +25 -5
  49. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +5 -0
  50. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +235 -166
  51. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +190 -91
  52. mlrun/model_monitoring/db/tsdb/__init__.py +35 -6
  53. mlrun/model_monitoring/db/tsdb/base.py +232 -38
  54. mlrun/model_monitoring/db/tsdb/helpers.py +30 -0
  55. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  56. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +240 -0
  57. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  58. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +397 -0
  59. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +292 -104
  60. mlrun/model_monitoring/helpers.py +45 -0
  61. mlrun/model_monitoring/stream_processing.py +7 -4
  62. mlrun/model_monitoring/writer.py +50 -20
  63. mlrun/package/utils/_formatter.py +2 -2
  64. mlrun/projects/operations.py +8 -5
  65. mlrun/projects/pipelines.py +42 -15
  66. mlrun/projects/project.py +55 -14
  67. mlrun/render.py +8 -5
  68. mlrun/runtimes/base.py +2 -1
  69. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  70. mlrun/runtimes/local.py +4 -1
  71. mlrun/runtimes/nuclio/api_gateway.py +32 -8
  72. mlrun/runtimes/nuclio/application/application.py +3 -3
  73. mlrun/runtimes/nuclio/function.py +1 -4
  74. mlrun/runtimes/utils.py +5 -6
  75. mlrun/serving/server.py +2 -1
  76. mlrun/utils/async_http.py +25 -5
  77. mlrun/utils/helpers.py +28 -7
  78. mlrun/utils/logger.py +28 -1
  79. mlrun/utils/notifications/notification/__init__.py +14 -9
  80. mlrun/utils/notifications/notification/slack.py +27 -7
  81. mlrun/utils/notifications/notification_pusher.py +47 -42
  82. mlrun/utils/v3io_clients.py +0 -1
  83. mlrun/utils/version/version.json +2 -2
  84. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/METADATA +9 -4
  85. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/RECORD +89 -82
  86. mlrun/model_monitoring/db/v3io_tsdb_reader.py +0 -134
  87. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/LICENSE +0 -0
  88. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/WHEEL +0 -0
  89. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/entry_points.txt +0 -0
  90. {mlrun-1.7.0rc17.dist-info → mlrun-1.7.0rc19.dist-info}/top_level.txt +0 -0
@@ -19,13 +19,17 @@ import uuid
19
19
 
20
20
  import pandas as pd
21
21
  import sqlalchemy
22
+ import sqlalchemy.exc
23
+ import sqlalchemy.orm
24
+ from sqlalchemy.sql.elements import BinaryExpression
22
25
 
23
26
  import mlrun.common.model_monitoring.helpers
24
- import mlrun.common.schemas.model_monitoring as mm_constants
27
+ import mlrun.common.schemas.model_monitoring as mm_schemas
25
28
  import mlrun.model_monitoring.db
26
29
  import mlrun.model_monitoring.db.stores.sqldb.models
27
30
  import mlrun.model_monitoring.helpers
28
31
  from mlrun.common.db.sql_session import create_session, get_engine
32
+ from mlrun.utils import datetime_now, logger
29
33
 
30
34
 
31
35
  class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
@@ -35,7 +39,6 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
35
39
  data, the user needs to provide a valid connection string for the database.
36
40
  """
37
41
 
38
- _engine = None
39
42
  _tables = {}
40
43
 
41
44
  def __init__(
@@ -63,6 +66,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
63
66
  def _init_tables(self):
64
67
  self._init_model_endpoints_table()
65
68
  self._init_application_results_table()
69
+ self._init_application_metrics_table()
66
70
  self._init_monitoring_schedules_table()
67
71
 
68
72
  def _init_model_endpoints_table(self):
@@ -71,83 +75,91 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
71
75
  connection_string=self._sql_connection_string
72
76
  )
73
77
  )
74
- self._tables[mm_constants.EventFieldType.MODEL_ENDPOINTS] = (
78
+ self._tables[mm_schemas.EventFieldType.MODEL_ENDPOINTS] = (
75
79
  self.ModelEndpointsTable
76
80
  )
77
81
 
78
82
  def _init_application_results_table(self):
79
- self.ApplicationResultsTable = (
83
+ self.application_results_table = (
80
84
  mlrun.model_monitoring.db.stores.sqldb.models._get_application_result_table(
81
85
  connection_string=self._sql_connection_string
82
86
  )
83
87
  )
84
- self._tables[mm_constants.FileTargetKind.APP_RESULTS] = (
85
- self.ApplicationResultsTable
88
+ self._tables[mm_schemas.FileTargetKind.APP_RESULTS] = (
89
+ self.application_results_table
90
+ )
91
+
92
+ def _init_application_metrics_table(self) -> None:
93
+ self.application_metrics_table = mlrun.model_monitoring.db.stores.sqldb.models._get_application_metrics_table(
94
+ connection_string=self._sql_connection_string
95
+ )
96
+ self._tables[mm_schemas.FileTargetKind.APP_METRICS] = (
97
+ self.application_metrics_table
86
98
  )
87
99
 
88
100
  def _init_monitoring_schedules_table(self):
89
101
  self.MonitoringSchedulesTable = mlrun.model_monitoring.db.stores.sqldb.models._get_monitoring_schedules_table(
90
102
  connection_string=self._sql_connection_string
91
103
  )
92
- self._tables[mm_constants.FileTargetKind.MONITORING_SCHEDULES] = (
104
+ self._tables[mm_schemas.FileTargetKind.MONITORING_SCHEDULES] = (
93
105
  self.MonitoringSchedulesTable
94
106
  )
95
107
 
96
- def _write(self, table: str, event: dict[str, typing.Any]):
108
+ def _write(self, table_name: str, event: dict[str, typing.Any]) -> None:
97
109
  """
98
110
  Create a new record in the SQL table.
99
111
 
100
- :param table: Target table name.
101
- :param event: Event dictionary that will be written into the DB.
112
+ :param table_name: Target table name.
113
+ :param event: Event dictionary that will be written into the DB.
102
114
  """
103
-
104
115
  with self._engine.connect() as connection:
105
116
  # Convert the result into a pandas Dataframe and write it into the database
106
117
  event_df = pd.DataFrame([event])
107
-
108
- event_df.to_sql(table, con=connection, index=False, if_exists="append")
118
+ event_df.to_sql(table_name, con=connection, index=False, if_exists="append")
109
119
 
110
120
  def _update(
111
121
  self,
112
122
  attributes: dict[str, typing.Any],
113
123
  table: sqlalchemy.orm.decl_api.DeclarativeMeta,
114
- **filtered_values,
115
- ):
124
+ criteria: list[BinaryExpression],
125
+ ) -> None:
116
126
  """
117
127
  Update a record in the SQL table.
118
128
 
119
129
  :param attributes: Dictionary of attributes that will be used for update the record. Note that the keys
120
130
  of the attributes dictionary should exist in the SQL table.
121
131
  :param table: SQLAlchemy declarative table.
122
-
132
+ :param criteria: A list of binary expressions that filter the query.
123
133
  """
124
- filter_query_ = []
125
- for _filter in filtered_values:
126
- filter_query_.append(f"{_filter} = '{filtered_values[_filter]}'")
127
-
128
134
  with create_session(dsn=self._sql_connection_string) as session:
129
135
  # Generate and commit the update session query
130
- session.query(table).filter(sqlalchemy.sql.text(*filter_query_)).update(
131
- attributes, synchronize_session=False
132
- )
136
+ session.query(
137
+ table # pyright: ignore[reportOptionalCall]
138
+ ).filter(*criteria).update(attributes, synchronize_session=False)
133
139
  session.commit()
134
140
 
135
- def _get(self, table: sqlalchemy.orm.decl_api.DeclarativeMeta, **filtered_values):
141
+ def _get(
142
+ self,
143
+ table: sqlalchemy.orm.decl_api.DeclarativeMeta,
144
+ criteria: list[BinaryExpression],
145
+ ):
136
146
  """
137
147
  Get a record from the SQL table.
138
148
 
139
- param table: SQLAlchemy declarative table.
149
+ param table: SQLAlchemy declarative table.
150
+ :param criteria: A list of binary expressions that filter the query.
140
151
  """
141
-
142
- filter_query_ = []
143
- for _filter in filtered_values:
144
- filter_query_.append(f"{_filter} = '{filtered_values[_filter]}'")
145
152
  with create_session(dsn=self._sql_connection_string) as session:
146
153
  try:
154
+ logger.debug(
155
+ "Querying the DB",
156
+ table=table.__name__,
157
+ criteria=[str(criterion) for criterion in criteria],
158
+ )
147
159
  # Generate the get query
148
160
  return (
149
- session.query(table)
150
- .filter(sqlalchemy.sql.text(*filter_query_))
161
+ session.query(table) # pyright: ignore[reportOptionalCall]
162
+ .filter(*criteria)
151
163
  .one_or_none()
152
164
  )
153
165
  except sqlalchemy.exc.ProgrammingError:
@@ -156,21 +168,21 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
156
168
  return
157
169
 
158
170
  def _delete(
159
- self, table: sqlalchemy.orm.decl_api.DeclarativeMeta, **filtered_values
160
- ):
171
+ self,
172
+ table: sqlalchemy.orm.decl_api.DeclarativeMeta,
173
+ criteria: list[BinaryExpression],
174
+ ) -> None:
161
175
  """
162
176
  Delete records from the SQL table.
163
177
 
164
- param table: SQLAlchemy declarative table.
178
+ param table: SQLAlchemy declarative table.
179
+ :param criteria: A list of binary expressions that filter the query.
165
180
  """
166
- filter_query_ = []
167
- for _filter in filtered_values:
168
- filter_query_.append(f"{_filter} = '{filtered_values[_filter]}'")
169
181
  with create_session(dsn=self._sql_connection_string) as session:
170
182
  # Generate and commit the delete query
171
- session.query(table).filter(sqlalchemy.sql.text(*filter_query_)).delete(
172
- synchronize_session=False
173
- )
183
+ session.query(
184
+ table # pyright: ignore[reportOptionalCall]
185
+ ).filter(*criteria).delete(synchronize_session=False)
174
186
  session.commit()
175
187
 
176
188
  def write_model_endpoint(self, endpoint: dict[str, typing.Any]):
@@ -182,13 +194,12 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
182
194
  """
183
195
 
184
196
  # Adjust timestamps fields
185
- endpoint[mm_constants.EventFieldType.FIRST_REQUEST] = (endpoint)[
186
- mm_constants.EventFieldType.LAST_REQUEST
187
- ] = mlrun.utils.datetime_now()
197
+ endpoint[mm_schemas.EventFieldType.FIRST_REQUEST] = (endpoint)[
198
+ mm_schemas.EventFieldType.LAST_REQUEST
199
+ ] = datetime_now()
188
200
 
189
201
  self._write(
190
- table=mm_constants.EventFieldType.MODEL_ENDPOINTS,
191
- event=endpoint,
202
+ table_name=mm_schemas.EventFieldType.MODEL_ENDPOINTS, event=endpoint
192
203
  )
193
204
 
194
205
  def update_model_endpoint(
@@ -204,25 +215,26 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
204
215
  """
205
216
  self._init_model_endpoints_table()
206
217
 
207
- attributes.pop(mm_constants.EventFieldType.ENDPOINT_ID, None)
208
-
209
- filter_endpoint = {mm_constants.EventFieldType.UID: endpoint_id}
218
+ attributes.pop(mm_schemas.EventFieldType.ENDPOINT_ID, None)
210
219
 
211
220
  self._update(
212
- attributes=attributes, table=self.ModelEndpointsTable, **filter_endpoint
221
+ attributes=attributes,
222
+ table=self.ModelEndpointsTable,
223
+ criteria=[self.ModelEndpointsTable.uid == endpoint_id],
213
224
  )
214
225
 
215
- def delete_model_endpoint(self, endpoint_id: str):
226
+ def delete_model_endpoint(self, endpoint_id: str) -> None:
216
227
  """
217
228
  Deletes the SQL record of a given model endpoint id.
218
229
 
219
230
  :param endpoint_id: The unique id of the model endpoint.
220
231
  """
221
232
  self._init_model_endpoints_table()
222
-
223
- filter_endpoint = {mm_constants.EventFieldType.UID: endpoint_id}
224
233
  # Delete the model endpoint record using sqlalchemy ORM
225
- self._delete(table=self.ModelEndpointsTable, **filter_endpoint)
234
+ self._delete(
235
+ table=self.ModelEndpointsTable,
236
+ criteria=[self.ModelEndpointsTable.uid == endpoint_id],
237
+ )
226
238
 
227
239
  def get_model_endpoint(
228
240
  self,
@@ -239,9 +251,11 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
239
251
  """
240
252
  self._init_model_endpoints_table()
241
253
 
242
- # Get the model endpoint record using sqlalchemy ORM
243
- filter_endpoint = {mm_constants.EventFieldType.UID: endpoint_id}
244
- endpoint_record = self._get(table=self.ModelEndpointsTable, **filter_endpoint)
254
+ # Get the model endpoint record
255
+ endpoint_record = self._get(
256
+ table=self.ModelEndpointsTable,
257
+ criteria=[self.ModelEndpointsTable.uid == endpoint_id],
258
+ )
245
259
 
246
260
  if not endpoint_record:
247
261
  raise mlrun.errors.MLRunNotFoundError(f"Endpoint {endpoint_id} not found")
@@ -277,7 +291,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
277
291
  endpoint_list = []
278
292
 
279
293
  model_endpoints_table = (
280
- self.ModelEndpointsTable.__table__ # pyright: ignore[reportGeneralTypeIssues]
294
+ self.ModelEndpointsTable.__table__ # pyright: ignore[reportAttributeAccessIssue]
281
295
  )
282
296
 
283
297
  # Get the model endpoints records using sqlalchemy ORM
@@ -292,32 +306,32 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
292
306
  query = self._filter_values(
293
307
  query=query,
294
308
  model_endpoints_table=model_endpoints_table,
295
- key_filter=mm_constants.EventFieldType.MODEL,
309
+ key_filter=mm_schemas.EventFieldType.MODEL,
296
310
  filtered_values=[model],
297
311
  )
298
312
  if function:
299
313
  query = self._filter_values(
300
314
  query=query,
301
315
  model_endpoints_table=model_endpoints_table,
302
- key_filter=mm_constants.EventFieldType.FUNCTION,
316
+ key_filter=mm_schemas.EventFieldType.FUNCTION,
303
317
  filtered_values=[function],
304
318
  )
305
319
  if uids:
306
320
  query = self._filter_values(
307
321
  query=query,
308
322
  model_endpoints_table=model_endpoints_table,
309
- key_filter=mm_constants.EventFieldType.UID,
323
+ key_filter=mm_schemas.EventFieldType.UID,
310
324
  filtered_values=uids,
311
325
  combined=False,
312
326
  )
313
327
  if top_level:
314
- node_ep = str(mm_constants.EndpointType.NODE_EP.value)
315
- router_ep = str(mm_constants.EndpointType.ROUTER.value)
328
+ node_ep = str(mm_schemas.EndpointType.NODE_EP.value)
329
+ router_ep = str(mm_schemas.EndpointType.ROUTER.value)
316
330
  endpoint_types = [node_ep, router_ep]
317
331
  query = self._filter_values(
318
332
  query=query,
319
333
  model_endpoints_table=model_endpoints_table,
320
- key_filter=mm_constants.EventFieldType.ENDPOINT_TYPE,
334
+ key_filter=mm_schemas.EventFieldType.ENDPOINT_TYPE,
321
335
  filtered_values=endpoint_types,
322
336
  combined=False,
323
337
  )
@@ -338,73 +352,86 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
338
352
  def write_application_event(
339
353
  self,
340
354
  event: dict[str, typing.Any],
341
- kind: mm_constants.WriterEventKind = mm_constants.WriterEventKind.RESULT,
342
- ):
355
+ kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
356
+ ) -> None:
343
357
  """
344
358
  Write a new application event in the target table.
345
359
 
346
- :param event: An event dictionary that represents the application result, should be corresponded to the
347
- schema defined in the :py:class:`~mm_constants.constants.WriterEvent`
348
- object.
360
+ :param event: An event dictionary that represents the application result or metric,
361
+ should be corresponded to the schema defined in the
362
+ :py:class:`~mm_constants.constants.WriterEvent` object.
349
363
  :param kind: The type of the event, can be either "result" or "metric".
350
364
  """
351
365
 
352
- if kind == mm_constants.WriterEventKind.METRIC:
353
- # TODO : Implement the logic for writing metrics to MySQL
354
- return
355
-
356
- self._init_application_results_table()
366
+ if kind == mm_schemas.WriterEventKind.METRIC:
367
+ self._init_application_metrics_table()
368
+ table = self.application_metrics_table
369
+ table_name = mm_schemas.FileTargetKind.APP_METRICS
370
+ elif kind == mm_schemas.WriterEventKind.RESULT:
371
+ self._init_application_results_table()
372
+ table = self.application_results_table
373
+ table_name = mm_schemas.FileTargetKind.APP_RESULTS
374
+ else:
375
+ raise ValueError(f"Invalid {kind = }")
357
376
 
358
- application_filter_dict = {
359
- mm_constants.EventFieldType.UID: self._generate_application_result_uid(
360
- event
361
- )
362
- }
377
+ application_result_uid = self._generate_application_result_uid(event, kind=kind)
378
+ criteria = [table.uid == application_result_uid]
363
379
 
364
- application_record = self._get(
365
- table=self.ApplicationResultsTable, **application_filter_dict
366
- )
380
+ application_record = self._get(table=table, criteria=criteria)
367
381
  if application_record:
368
382
  self._convert_to_datetime(
369
- event=event,
370
- key=mm_constants.WriterEvent.START_INFER_TIME,
383
+ event=event, key=mm_schemas.WriterEvent.START_INFER_TIME
371
384
  )
372
385
  self._convert_to_datetime(
373
- event=event,
374
- key=mm_constants.WriterEvent.END_INFER_TIME,
386
+ event=event, key=mm_schemas.WriterEvent.END_INFER_TIME
375
387
  )
376
388
  # Update an existing application result
377
- self._update(
378
- attributes=event,
379
- table=self.ApplicationResultsTable,
380
- **application_filter_dict,
381
- )
389
+ self._update(attributes=event, table=table, criteria=criteria)
382
390
  else:
383
391
  # Write a new application result
384
- event[mm_constants.EventFieldType.UID] = application_filter_dict[
385
- mm_constants.EventFieldType.UID
386
- ]
387
-
388
- self._write(
389
- table=mm_constants.FileTargetKind.APP_RESULTS,
390
- event=event,
391
- )
392
+ event[mm_schemas.EventFieldType.UID] = application_result_uid
393
+ self._write(table_name=table_name, event=event)
392
394
 
393
395
  @staticmethod
394
- def _convert_to_datetime(event: dict[str, typing.Any], key: str):
396
+ def _convert_to_datetime(event: dict[str, typing.Any], key: str) -> None:
395
397
  if isinstance(event[key], str):
396
398
  event[key] = datetime.datetime.fromisoformat(event[key])
399
+ event[key] = event[key].astimezone(tz=datetime.timezone.utc)
397
400
 
398
401
  @staticmethod
399
- def _generate_application_result_uid(event: dict[str, typing.Any]) -> str:
400
- return (
401
- event[mm_constants.WriterEvent.ENDPOINT_ID]
402
- + "_"
403
- + event[mm_constants.WriterEvent.APPLICATION_NAME]
404
- + "_"
405
- + event[mm_constants.ResultData.RESULT_NAME]
402
+ def _generate_application_result_uid(
403
+ event: dict[str, typing.Any],
404
+ kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
405
+ ) -> str:
406
+ if kind == mm_schemas.WriterEventKind.RESULT:
407
+ name = event[mm_schemas.ResultData.RESULT_NAME]
408
+ else:
409
+ name = event[mm_schemas.MetricData.METRIC_NAME]
410
+ return "_".join(
411
+ [
412
+ event[mm_schemas.WriterEvent.ENDPOINT_ID],
413
+ event[mm_schemas.WriterEvent.APPLICATION_NAME],
414
+ name,
415
+ ]
406
416
  )
407
417
 
418
+ @staticmethod
419
+ def _get_filter_criteria(
420
+ *,
421
+ table: sqlalchemy.orm.decl_api.DeclarativeMeta,
422
+ endpoint_id: str,
423
+ application_name: typing.Optional[str] = None,
424
+ ) -> list[BinaryExpression]:
425
+ """
426
+ Return the filter criteria for the given endpoint_id and application_name.
427
+ Note: the table object must include the relevant columns:
428
+ `endpoint_id` and `application_name`.
429
+ """
430
+ criteria = [table.endpoint_id == endpoint_id]
431
+ if application_name is not None:
432
+ criteria.append(table.application_name == application_name)
433
+ return criteria
434
+
408
435
  def get_last_analyzed(self, endpoint_id: str, application_name: str) -> int:
409
436
  """
410
437
  Get the last analyzed time for the provided model endpoint and application.
@@ -414,14 +441,15 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
414
441
 
415
442
  :return: Timestamp as a Unix time.
416
443
  :raise: MLRunNotFoundError if last analyzed value is not found.
417
-
418
444
  """
419
445
  self._init_monitoring_schedules_table()
420
- application_filter_dict = self.filter_endpoint_and_application_name(
421
- endpoint_id=endpoint_id, application_name=application_name
422
- )
423
446
  monitoring_schedule_record = self._get(
424
- table=self.MonitoringSchedulesTable, **application_filter_dict
447
+ table=self.MonitoringSchedulesTable,
448
+ criteria=self._get_filter_criteria(
449
+ table=self.MonitoringSchedulesTable,
450
+ endpoint_id=endpoint_id,
451
+ application_name=application_name,
452
+ ),
425
453
  )
426
454
  if not monitoring_schedule_record:
427
455
  raise mlrun.errors.MLRunNotFoundError(
@@ -443,51 +471,67 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
443
471
  """
444
472
  self._init_monitoring_schedules_table()
445
473
 
446
- application_filter_dict = self.filter_endpoint_and_application_name(
447
- endpoint_id=endpoint_id, application_name=application_name
474
+ criteria = self._get_filter_criteria(
475
+ table=self.MonitoringSchedulesTable,
476
+ endpoint_id=endpoint_id,
477
+ application_name=application_name,
448
478
  )
449
479
  monitoring_schedule_record = self._get(
450
- table=self.MonitoringSchedulesTable, **application_filter_dict
480
+ table=self.MonitoringSchedulesTable, criteria=criteria
451
481
  )
452
482
  if not monitoring_schedule_record:
453
- # Add a new record with empty last analyzed value
483
+ # Add a new record with last analyzed value
454
484
  self._write(
455
- table=mm_constants.FileTargetKind.MONITORING_SCHEDULES,
485
+ table_name=mm_schemas.FileTargetKind.MONITORING_SCHEDULES,
456
486
  event={
457
- mm_constants.SchedulingKeys.UID: uuid.uuid4().hex,
458
- mm_constants.SchedulingKeys.APPLICATION_NAME: application_name,
459
- mm_constants.SchedulingKeys.ENDPOINT_ID: endpoint_id,
460
- mm_constants.SchedulingKeys.LAST_ANALYZED: last_analyzed,
487
+ mm_schemas.SchedulingKeys.UID: uuid.uuid4().hex,
488
+ mm_schemas.SchedulingKeys.APPLICATION_NAME: application_name,
489
+ mm_schemas.SchedulingKeys.ENDPOINT_ID: endpoint_id,
490
+ mm_schemas.SchedulingKeys.LAST_ANALYZED: last_analyzed,
461
491
  },
462
492
  )
463
493
 
464
494
  self._update(
465
- attributes={mm_constants.SchedulingKeys.LAST_ANALYZED: last_analyzed},
495
+ attributes={mm_schemas.SchedulingKeys.LAST_ANALYZED: last_analyzed},
466
496
  table=self.MonitoringSchedulesTable,
467
- **application_filter_dict,
497
+ criteria=criteria,
468
498
  )
469
499
 
470
- def _delete_last_analyzed(self, endpoint_id: str = "", application_name: str = ""):
500
+ def _delete_last_analyzed(
501
+ self, endpoint_id: str, application_name: typing.Optional[str] = None
502
+ ) -> None:
471
503
  self._init_monitoring_schedules_table()
472
-
473
- application_filter_dict = self.filter_endpoint_and_application_name(
474
- endpoint_id=endpoint_id, application_name=application_name
504
+ criteria = self._get_filter_criteria(
505
+ table=self.MonitoringSchedulesTable,
506
+ endpoint_id=endpoint_id,
507
+ application_name=application_name,
475
508
  )
476
-
477
509
  # Delete the model endpoint record using sqlalchemy ORM
478
- self._delete(table=self.MonitoringSchedulesTable, **application_filter_dict)
510
+ self._delete(table=self.MonitoringSchedulesTable, criteria=criteria)
479
511
 
480
512
  def _delete_application_result(
481
- self, endpoint_id: str = "", application_name: str = ""
482
- ):
513
+ self, endpoint_id: str, application_name: typing.Optional[str] = None
514
+ ) -> None:
483
515
  self._init_application_results_table()
484
-
485
- application_filter_dict = self.filter_endpoint_and_application_name(
486
- endpoint_id=endpoint_id, application_name=application_name
516
+ criteria = self._get_filter_criteria(
517
+ table=self.application_results_table,
518
+ endpoint_id=endpoint_id,
519
+ application_name=application_name,
487
520
  )
488
-
489
- # Delete the model endpoint record using sqlalchemy ORM
490
- self._delete(table=self.ApplicationResultsTable, **application_filter_dict)
521
+ # Delete the relevant records from the results table
522
+ self._delete(table=self.application_results_table, criteria=criteria)
523
+
524
+ def _delete_application_metrics(
525
+ self, endpoint_id: str, application_name: typing.Optional[str] = None
526
+ ) -> None:
527
+ self._init_application_metrics_table()
528
+ criteria = self._get_filter_criteria(
529
+ table=self.application_metrics_table,
530
+ endpoint_id=endpoint_id,
531
+ application_name=application_name,
532
+ )
533
+ # Delete the relevant records from the metrics table
534
+ self._delete(table=self.application_metrics_table, criteria=criteria)
491
535
 
492
536
  def _create_tables_if_not_exist(self):
493
537
  self._init_tables()
@@ -495,9 +539,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
495
539
  for table in self._tables:
496
540
  # Create table if not exist. The `metadata` contains the `ModelEndpointsTable`
497
541
  if not self._engine.has_table(table):
498
- self._tables[table].metadata.create_all( # pyright: ignore[reportGeneralTypeIssues]
499
- bind=self._engine
500
- )
542
+ self._tables[table].metadata.create_all(bind=self._engine)
501
543
 
502
544
  @staticmethod
503
545
  def _filter_values(
@@ -558,7 +600,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
558
600
 
559
601
  # Convert endpoint labels into dictionary
560
602
  endpoint_labels = json.loads(
561
- endpoint_dict.get(mm_constants.EventFieldType.LABELS)
603
+ endpoint_dict.get(mm_schemas.EventFieldType.LABELS)
562
604
  )
563
605
 
564
606
  for label in labels:
@@ -574,42 +616,69 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
574
616
 
575
617
  return True
576
618
 
577
- @staticmethod
578
- def filter_endpoint_and_application_name(
579
- endpoint_id: str, application_name: str
580
- ) -> dict[str, str]:
581
- """Generate a dictionary filter for endpoint id and application name"""
582
- if not endpoint_id and not application_name:
583
- raise mlrun.errors.MLRunBadRequestError(
584
- "Please provide a valid endpoint_id and/or application_name"
585
- )
586
- application_filter_dict = {}
587
- if endpoint_id:
588
- application_filter_dict[mm_constants.SchedulingKeys.ENDPOINT_ID] = (
589
- endpoint_id
590
- )
591
- if application_name:
592
- application_filter_dict[mm_constants.SchedulingKeys.APPLICATION_NAME] = (
593
- application_name
594
- )
595
- return application_filter_dict
596
-
597
- def delete_model_endpoints_resources(self):
619
+ def delete_model_endpoints_resources(self) -> None:
598
620
  """
599
- Delete all model endpoints resources in both SQL and the time series DB.
600
-
621
+ Delete all the model monitoring resources of the project in the SQL tables.
601
622
  """
602
-
603
623
  endpoints = self.list_model_endpoints()
624
+ logger.debug("Deleting model monitoring resources", project=self.project)
604
625
 
605
626
  for endpoint_dict in endpoints:
606
- endpoint_id = endpoint_dict[mm_constants.EventFieldType.UID]
627
+ endpoint_id = endpoint_dict[mm_schemas.EventFieldType.UID]
607
628
 
608
629
  # Delete last analyzed records
609
630
  self._delete_last_analyzed(endpoint_id=endpoint_id)
610
631
 
611
- # Delete application results records
632
+ # Delete application results and metrics records
612
633
  self._delete_application_result(endpoint_id=endpoint_id)
634
+ self._delete_application_metrics(endpoint_id=endpoint_id)
613
635
 
614
636
  # Delete model endpoint record
615
637
  self.delete_model_endpoint(endpoint_id=endpoint_id)
638
+
639
+ def get_model_endpoint_metrics(
640
+ self, endpoint_id: str, type: mm_schemas.ModelEndpointMonitoringMetricType
641
+ ) -> list[mm_schemas.ModelEndpointMonitoringMetric]:
642
+ """
643
+ Fetch the model endpoint metrics or results (according to `type`) for the
644
+ requested endpoint.
645
+ """
646
+ logger.debug(
647
+ "Fetching metrics for model endpoint",
648
+ project=self.project,
649
+ endpoint_id=endpoint_id,
650
+ type=type,
651
+ )
652
+ if type == mm_schemas.ModelEndpointMonitoringMetricType.METRIC:
653
+ self._init_application_metrics_table()
654
+ table = self.application_metrics_table
655
+ name_col = mm_schemas.MetricData.METRIC_NAME
656
+ else:
657
+ self._init_application_results_table()
658
+ table = self.application_results_table
659
+ name_col = mm_schemas.ResultData.RESULT_NAME
660
+
661
+ # Note: the block below does not use self._get, as we need here all the
662
+ # results, not only `one_or_none`.
663
+ with sqlalchemy.orm.Session(self._engine) as session:
664
+ metric_rows = (
665
+ session.query(table) # pyright: ignore[reportOptionalCall]
666
+ .filter(table.endpoint_id == endpoint_id)
667
+ .all()
668
+ )
669
+
670
+ return [
671
+ mm_schemas.ModelEndpointMonitoringMetric(
672
+ project=self.project,
673
+ app=metric_row.application_name,
674
+ type=type,
675
+ name=getattr(metric_row, name_col),
676
+ full_name=mlrun.model_monitoring.helpers._compose_full_name(
677
+ project=self.project,
678
+ app=metric_row.application_name,
679
+ type=type,
680
+ name=getattr(metric_row, name_col),
681
+ ),
682
+ )
683
+ for metric_row in metric_rows
684
+ ]