mlrun 1.7.0rc25__py3-none-any.whl → 1.7.0rc29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (67) hide show
  1. mlrun/__main__.py +7 -7
  2. mlrun/alerts/alert.py +13 -1
  3. mlrun/artifacts/manager.py +5 -0
  4. mlrun/common/constants.py +2 -2
  5. mlrun/common/formatters/base.py +9 -9
  6. mlrun/common/schemas/alert.py +4 -8
  7. mlrun/common/schemas/api_gateway.py +7 -0
  8. mlrun/common/schemas/constants.py +3 -0
  9. mlrun/common/schemas/model_monitoring/__init__.py +1 -0
  10. mlrun/common/schemas/model_monitoring/constants.py +27 -12
  11. mlrun/common/schemas/model_monitoring/model_endpoints.py +0 -12
  12. mlrun/common/schemas/schedule.py +1 -1
  13. mlrun/config.py +16 -9
  14. mlrun/datastore/azure_blob.py +2 -1
  15. mlrun/datastore/base.py +1 -5
  16. mlrun/datastore/datastore.py +3 -3
  17. mlrun/datastore/google_cloud_storage.py +6 -2
  18. mlrun/datastore/inmem.py +1 -1
  19. mlrun/datastore/snowflake_utils.py +3 -1
  20. mlrun/datastore/sources.py +26 -11
  21. mlrun/datastore/store_resources.py +2 -0
  22. mlrun/datastore/targets.py +60 -25
  23. mlrun/db/base.py +11 -0
  24. mlrun/db/httpdb.py +47 -33
  25. mlrun/db/nopdb.py +11 -1
  26. mlrun/errors.py +4 -0
  27. mlrun/execution.py +18 -10
  28. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  29. mlrun/launcher/local.py +2 -2
  30. mlrun/model.py +30 -0
  31. mlrun/model_monitoring/api.py +6 -52
  32. mlrun/model_monitoring/applications/histogram_data_drift.py +4 -1
  33. mlrun/model_monitoring/db/stores/__init__.py +21 -9
  34. mlrun/model_monitoring/db/stores/base/store.py +39 -1
  35. mlrun/model_monitoring/db/stores/sqldb/models/base.py +9 -7
  36. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +4 -2
  37. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +34 -79
  38. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +19 -27
  39. mlrun/model_monitoring/db/tsdb/__init__.py +19 -14
  40. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +4 -2
  41. mlrun/model_monitoring/helpers.py +9 -5
  42. mlrun/model_monitoring/writer.py +1 -5
  43. mlrun/projects/operations.py +1 -0
  44. mlrun/projects/project.py +76 -76
  45. mlrun/render.py +10 -5
  46. mlrun/run.py +2 -2
  47. mlrun/runtimes/daskjob.py +7 -1
  48. mlrun/runtimes/local.py +24 -7
  49. mlrun/runtimes/nuclio/function.py +20 -0
  50. mlrun/runtimes/pod.py +5 -29
  51. mlrun/serving/routers.py +75 -59
  52. mlrun/serving/server.py +1 -0
  53. mlrun/serving/v2_serving.py +8 -1
  54. mlrun/utils/helpers.py +46 -2
  55. mlrun/utils/logger.py +36 -2
  56. mlrun/utils/notifications/notification/base.py +4 -0
  57. mlrun/utils/notifications/notification/git.py +21 -0
  58. mlrun/utils/notifications/notification/slack.py +8 -0
  59. mlrun/utils/notifications/notification/webhook.py +41 -1
  60. mlrun/utils/notifications/notification_pusher.py +2 -2
  61. mlrun/utils/version/version.json +2 -2
  62. {mlrun-1.7.0rc25.dist-info → mlrun-1.7.0rc29.dist-info}/METADATA +11 -6
  63. {mlrun-1.7.0rc25.dist-info → mlrun-1.7.0rc29.dist-info}/RECORD +67 -67
  64. {mlrun-1.7.0rc25.dist-info → mlrun-1.7.0rc29.dist-info}/WHEEL +1 -1
  65. {mlrun-1.7.0rc25.dist-info → mlrun-1.7.0rc29.dist-info}/LICENSE +0 -0
  66. {mlrun-1.7.0rc25.dist-info → mlrun-1.7.0rc29.dist-info}/entry_points.txt +0 -0
  67. {mlrun-1.7.0rc25.dist-info → mlrun-1.7.0rc29.dist-info}/top_level.txt +0 -0
@@ -47,8 +47,8 @@ def get_or_create_model_endpoint(
47
47
  function_name: str = "",
48
48
  context: mlrun.MLClientCtx = None,
49
49
  sample_set_statistics: dict[str, typing.Any] = None,
50
- drift_threshold: float = None,
51
- possible_drift_threshold: float = None,
50
+ drift_threshold: typing.Optional[float] = None,
51
+ possible_drift_threshold: typing.Optional[float] = None,
52
52
  monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
53
53
  db_session=None,
54
54
  ) -> ModelEndpoint:
@@ -69,14 +69,14 @@ def get_or_create_model_endpoint(
69
69
  full function hash.
70
70
  :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
71
71
  the new model endpoint (applicable only to new endpoint_id).
72
- :param drift_threshold: The threshold of which to mark drifts (applicable only to new endpoint_id).
73
- :param possible_drift_threshold: The threshold of which to mark possible drifts (applicable only to new
72
+ :param drift_threshold: (deprecated) The threshold of which to mark drifts (applicable only to new
73
+ endpoint_id).
74
+ :param possible_drift_threshold: (deprecated) The threshold of which to mark possible drifts (applicable only to new
74
75
  endpoint_id).
75
76
  :param monitoring_mode: If enabled, apply model monitoring features on the provided endpoint id
76
77
  (applicable only to new endpoint_id).
77
78
  :param db_session: A runtime session that manages the current dialog with the database.
78
79
 
79
-
80
80
  :return: A ModelEndpoint object
81
81
  """
82
82
 
@@ -98,8 +98,6 @@ def get_or_create_model_endpoint(
98
98
  model_endpoint=model_endpoint,
99
99
  model_path=model_path,
100
100
  sample_set_statistics=sample_set_statistics,
101
- drift_threshold=drift_threshold,
102
- possible_drift_threshold=possible_drift_threshold,
103
101
  )
104
102
 
105
103
  except mlrun.errors.MLRunNotFoundError:
@@ -113,8 +111,6 @@ def get_or_create_model_endpoint(
113
111
  function_name=function_name,
114
112
  context=context,
115
113
  sample_set_statistics=sample_set_statistics,
116
- drift_threshold=drift_threshold,
117
- possible_drift_threshold=possible_drift_threshold,
118
114
  monitoring_mode=monitoring_mode,
119
115
  )
120
116
  return model_endpoint
@@ -241,9 +237,7 @@ def _model_endpoint_validations(
241
237
  model_endpoint: ModelEndpoint,
242
238
  model_path: str = "",
243
239
  sample_set_statistics: dict[str, typing.Any] = None,
244
- drift_threshold: float = None,
245
- possible_drift_threshold: float = None,
246
- ):
240
+ ) -> None:
247
241
  """
248
242
  Validate that provided model endpoint configurations match the stored fields of the provided `ModelEndpoint`
249
243
  object. Usually, this method is called by `get_or_create_model_endpoint()` in cases that the model endpoint
@@ -257,11 +251,6 @@ def _model_endpoint_validations(
257
251
  is forbidden to provide a different reference data to that model endpoint.
258
252
  In case of discrepancy between the provided `sample_set_statistics` and the
259
253
  `model_endpoints.spec.feature_stats`, a warning will be presented to the user.
260
- :param drift_threshold: The threshold of which to mark drifts. Should be similar to the drift threshold
261
- that has already assigned to the current model endpoint.
262
- :param possible_drift_threshold: The threshold of which to mark possible drifts. Should be similar to the possible
263
- drift threshold that has already assigned to the current model endpoint.
264
-
265
254
  """
266
255
  # Model path
267
256
  if model_path and model_endpoint.spec.model_uri != model_path:
@@ -280,28 +269,6 @@ def _model_endpoint_validations(
280
269
  "Provided sample set statistics is different from the registered statistics. "
281
270
  "If new sample set statistics is to be used, new model endpoint should be created"
282
271
  )
283
- # drift and possible drift thresholds
284
- if drift_threshold:
285
- current_drift_threshold = model_endpoint.spec.monitor_configuration.get(
286
- mm_constants.EventFieldType.DRIFT_DETECTED_THRESHOLD,
287
- mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.drift_detected,
288
- )
289
- if current_drift_threshold != drift_threshold:
290
- raise mlrun.errors.MLRunInvalidArgumentError(
291
- f"Cannot change existing drift threshold. Expected {current_drift_threshold}, got {drift_threshold} "
292
- f"Please update drift threshold or generate a new model endpoint record"
293
- )
294
-
295
- if possible_drift_threshold:
296
- current_possible_drift_threshold = model_endpoint.spec.monitor_configuration.get(
297
- mm_constants.EventFieldType.POSSIBLE_DRIFT_THRESHOLD,
298
- mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.possible_drift,
299
- )
300
- if current_possible_drift_threshold != possible_drift_threshold:
301
- raise mlrun.errors.MLRunInvalidArgumentError(
302
- f"Cannot change existing possible drift threshold. Expected {current_possible_drift_threshold}, "
303
- f"got {possible_drift_threshold}. Please update drift threshold or generate a new model endpoint record"
304
- )
305
272
 
306
273
 
307
274
  def write_monitoring_df(
@@ -354,8 +321,6 @@ def _generate_model_endpoint(
354
321
  function_name: str,
355
322
  context: mlrun.MLClientCtx,
356
323
  sample_set_statistics: dict[str, typing.Any],
357
- drift_threshold: float,
358
- possible_drift_threshold: float,
359
324
  monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
360
325
  ) -> ModelEndpoint:
361
326
  """
@@ -374,8 +339,6 @@ def _generate_model_endpoint(
374
339
  :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
375
340
  the current model endpoint. Will be stored under
376
341
  `model_endpoint.status.feature_stats`.
377
- :param drift_threshold: The threshold of which to mark drifts.
378
- :param possible_drift_threshold: The threshold of which to mark possible drifts.
379
342
 
380
343
  :return `mlrun.model_monitoring.model_endpoint.ModelEndpoint` object.
381
344
  """
@@ -393,15 +356,6 @@ def _generate_model_endpoint(
393
356
  model_endpoint.spec.model_uri = model_path
394
357
  model_endpoint.spec.model = model_endpoint_name
395
358
  model_endpoint.spec.model_class = "drift-analysis"
396
- if drift_threshold:
397
- model_endpoint.spec.monitor_configuration[
398
- mm_constants.EventFieldType.DRIFT_DETECTED_THRESHOLD
399
- ] = drift_threshold
400
- if possible_drift_threshold:
401
- model_endpoint.spec.monitor_configuration[
402
- mm_constants.EventFieldType.POSSIBLE_DRIFT_THRESHOLD
403
- ] = possible_drift_threshold
404
-
405
359
  model_endpoint.spec.monitoring_mode = monitoring_mode
406
360
  model_endpoint.status.first_request = model_endpoint.status.last_request = (
407
361
  datetime_now().isoformat()
@@ -195,7 +195,10 @@ class HistogramDataDriftApplication(ModelMonitoringApplicationBaseV2):
195
195
  EventFieldType.CURRENT_STATS: json.dumps(
196
196
  monitoring_context.sample_df_stats
197
197
  ),
198
- EventFieldType.DRIFT_MEASURES: metrics_per_feature.T.to_json(),
198
+ EventFieldType.DRIFT_MEASURES: json.dumps(
199
+ metrics_per_feature.T.to_dict()
200
+ | {metric.name: metric.value for metric in metrics}
201
+ ),
199
202
  EventFieldType.DRIFT_STATUS: status.value,
200
203
  },
201
204
  )
@@ -63,7 +63,7 @@ class ObjectStoreFactory(enum.Enum):
63
63
  :param value: Provided enum (invalid) value.
64
64
  """
65
65
  valid_values = list(cls.__members__.keys())
66
- raise mlrun.errors.MLRunInvalidArgumentError(
66
+ raise mlrun.errors.MLRunInvalidMMStoreType(
67
67
  f"{value} is not a valid endpoint store, please choose a valid value: %{valid_values}."
68
68
  )
69
69
 
@@ -88,21 +88,28 @@ def get_model_endpoint_store(
88
88
  def get_store_object(
89
89
  project: str,
90
90
  secret_provider: typing.Optional[typing.Callable[[str], str]] = None,
91
+ store_connection_string: typing.Optional[str] = None,
91
92
  **kwargs,
92
93
  ) -> StoreBase:
93
94
  """
94
95
  Generate a store object. If a connection string is provided, the store type will be updated according to the
95
96
  connection string. Currently, the supported store types are SQL and v3io-nosql.
96
97
 
97
- :param project: The name of the project.
98
- :param secret_provider: An optional secret provider to get the connection string secret.
98
+ :param project: The name of the project.
99
+ :param secret_provider: An optional secret provider to get the connection string secret.
100
+ :param store_connection_string: Optional explicit connection string of the store.
99
101
 
100
102
  :return: `StoreBase` object. Using this object, the user can apply different operations such as write, update, get
101
- and delete a model endpoint record.
103
+ and delete a model endpoint record.
104
+ :raise: `MLRunInvalidMMStoreType` if the user didn't provide store connection
105
+ or the provided store connection is invalid.
102
106
  """
103
107
 
104
- store_connection_string = mlrun.model_monitoring.helpers.get_connection_string(
105
- secret_provider=secret_provider
108
+ store_connection_string = (
109
+ store_connection_string
110
+ or mlrun.model_monitoring.helpers.get_connection_string(
111
+ secret_provider=secret_provider
112
+ )
106
113
  )
107
114
 
108
115
  if store_connection_string and (
@@ -111,10 +118,15 @@ def get_store_object(
111
118
  ):
112
119
  store_type = mlrun.common.schemas.model_monitoring.ModelEndpointTarget.SQL
113
120
  kwargs["store_connection_string"] = store_connection_string
121
+ elif store_connection_string and store_connection_string == "v3io":
122
+ store_type = (
123
+ mlrun.common.schemas.model_monitoring.ModelEndpointTarget.V3IO_NOSQL
124
+ )
114
125
  else:
115
- # Set the default store type if no connection has been set
116
- store_type = mlrun.mlconf.model_endpoint_monitoring.store_type
117
-
126
+ raise mlrun.errors.MLRunInvalidMMStoreType(
127
+ "You must provide a valid store connection by using "
128
+ "set_model_monitoring_credentials API."
129
+ )
118
130
  # Get store type value from ObjectStoreFactory enum class
119
131
  store_type_fact = ObjectStoreFactory(store_type)
120
132
 
@@ -11,7 +11,7 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
-
14
+ import json
15
15
  import typing
16
16
  from abc import ABC, abstractmethod
17
17
 
@@ -170,3 +170,41 @@ class StoreBase(ABC):
170
170
 
171
171
  :return: A list of the available metrics.
172
172
  """
173
+
174
+ @staticmethod
175
+ def _validate_labels(
176
+ endpoint_dict: dict,
177
+ labels: list,
178
+ ) -> bool:
179
+ """Validate that the model endpoint dictionary has the provided labels. There are 2 possible cases:
180
+ 1 - Labels were provided as a list of key-values pairs (e.g. ['label_1=value_1', 'label_2=value_2']): Validate
181
+ that each pair exist in the endpoint dictionary.
182
+ 2 - Labels were provided as a list of key labels (e.g. ['label_1', 'label_2']): Validate that each key exist in
183
+ the endpoint labels dictionary.
184
+
185
+ :param endpoint_dict: Dictionary of the model endpoint records.
186
+ :param labels: List of dictionary of required labels.
187
+
188
+ :return: True if the labels exist in the endpoint labels dictionary, otherwise False.
189
+ """
190
+
191
+ # Convert endpoint labels into dictionary
192
+ endpoint_labels = json.loads(
193
+ endpoint_dict.get(mm_schemas.EventFieldType.LABELS)
194
+ )
195
+
196
+ for label in labels:
197
+ # Case 1 - label is a key=value pair
198
+ if "=" in label:
199
+ lbl, value = list(map(lambda x: x.strip(), label.split("=")))
200
+ if lbl not in endpoint_labels or str(endpoint_labels[lbl]) != value:
201
+ return False
202
+ # Case 2 - label is just a key
203
+ else:
204
+ if label not in endpoint_labels:
205
+ return False
206
+
207
+ return True
208
+
209
+ def create_tables(self):
210
+ pass
@@ -11,8 +11,10 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+
14
15
  from sqlalchemy import (
15
- TIMESTAMP,
16
+ DATETIME,
17
+ TIMESTAMP, # TODO: migrate to DATETIME, see ML-6921
16
18
  Boolean,
17
19
  Column,
18
20
  Float,
@@ -90,11 +92,11 @@ class ModelEndpointsBaseTable(BaseModel):
90
92
  metrics = Column(EventFieldType.METRICS, Text)
91
93
  first_request = Column(
92
94
  EventFieldType.FIRST_REQUEST,
93
- TIMESTAMP(timezone=True),
95
+ TIMESTAMP(timezone=True), # TODO: migrate to DATETIME, see ML-6921
94
96
  )
95
97
  last_request = Column(
96
98
  EventFieldType.LAST_REQUEST,
97
- TIMESTAMP(timezone=True),
99
+ TIMESTAMP(timezone=True), # TODO: migrate to DATETIME, see ML-6921
98
100
  )
99
101
 
100
102
 
@@ -122,11 +124,11 @@ class ApplicationResultBaseTable(BaseModel):
122
124
 
123
125
  start_infer_time = Column(
124
126
  WriterEvent.START_INFER_TIME,
125
- TIMESTAMP(timezone=True),
127
+ DATETIME(timezone=True),
126
128
  )
127
129
  end_infer_time = Column(
128
130
  WriterEvent.END_INFER_TIME,
129
- TIMESTAMP(timezone=True),
131
+ DATETIME(timezone=True),
130
132
  )
131
133
 
132
134
  result_status = Column(ResultData.RESULT_STATUS, String(10))
@@ -152,11 +154,11 @@ class ApplicationMetricsBaseTable(BaseModel):
152
154
  )
153
155
  start_infer_time = Column(
154
156
  WriterEvent.START_INFER_TIME,
155
- TIMESTAMP(timezone=True),
157
+ DATETIME(timezone=True),
156
158
  )
157
159
  end_infer_time = Column(
158
160
  WriterEvent.END_INFER_TIME,
159
- TIMESTAMP(timezone=True),
161
+ DATETIME(timezone=True),
160
162
  )
161
163
  metric_name = Column(
162
164
  MetricData.METRIC_NAME,
@@ -34,10 +34,12 @@ Base = declarative_base()
34
34
  class ModelEndpointsTable(Base, ModelEndpointsBaseTable):
35
35
  first_request = Column(
36
36
  EventFieldType.FIRST_REQUEST,
37
+ # TODO: migrate to DATETIME, see ML-6921
37
38
  sqlalchemy.dialects.mysql.TIMESTAMP(fsp=3, timezone=True),
38
39
  )
39
40
  last_request = Column(
40
41
  EventFieldType.LAST_REQUEST,
42
+ # TODO: migrate to DATETIME, see ML-6921
41
43
  sqlalchemy.dialects.mysql.TIMESTAMP(fsp=3, timezone=True),
42
44
  )
43
45
 
@@ -52,11 +54,11 @@ class _ApplicationResultOrMetric:
52
54
 
53
55
  start_infer_time = Column(
54
56
  WriterEvent.START_INFER_TIME,
55
- sqlalchemy.dialects.mysql.TIMESTAMP(fsp=3, timezone=True),
57
+ sqlalchemy.dialects.mysql.DATETIME(fsp=3, timezone=True),
56
58
  )
57
59
  end_infer_time = Column(
58
60
  WriterEvent.END_INFER_TIME,
59
- sqlalchemy.dialects.mysql.TIMESTAMP(fsp=3, timezone=True),
61
+ sqlalchemy.dialects.mysql.DATETIME(fsp=3, timezone=True),
60
62
  )
61
63
 
62
64
  @declared_attr
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import datetime
16
- import json
17
16
  import typing
18
17
  import uuid
19
18
 
@@ -21,6 +20,7 @@ import pandas as pd
21
20
  import sqlalchemy
22
21
  import sqlalchemy.exc
23
22
  import sqlalchemy.orm
23
+ from sqlalchemy.engine import make_url
24
24
  from sqlalchemy.sql.elements import BinaryExpression
25
25
 
26
26
  import mlrun.common.model_monitoring.helpers
@@ -62,6 +62,10 @@ class SQLStoreBase(StoreBase):
62
62
 
63
63
  self._sql_connection_string = kwargs.get("store_connection_string")
64
64
  self._engine = get_engine(dsn=self._sql_connection_string)
65
+ self._init_tables()
66
+
67
+ def create_tables(self):
68
+ self._create_tables_if_not_exist()
65
69
 
66
70
  def _init_tables(self):
67
71
  self._init_model_endpoints_table()
@@ -70,13 +74,13 @@ class SQLStoreBase(StoreBase):
70
74
  self._init_monitoring_schedules_table()
71
75
 
72
76
  def _init_model_endpoints_table(self):
73
- self.ModelEndpointsTable = (
77
+ self.model_endpoints_table = (
74
78
  mlrun.model_monitoring.db.stores.sqldb.models._get_model_endpoints_table(
75
79
  connection_string=self._sql_connection_string
76
80
  )
77
81
  )
78
82
  self._tables[mm_schemas.EventFieldType.MODEL_ENDPOINTS] = (
79
- self.ModelEndpointsTable
83
+ self.model_endpoints_table
80
84
  )
81
85
 
82
86
  def _init_application_results_table(self):
@@ -150,22 +154,17 @@ class SQLStoreBase(StoreBase):
150
154
  :param criteria: A list of binary expressions that filter the query.
151
155
  """
152
156
  with create_session(dsn=self._sql_connection_string) as session:
153
- try:
154
- logger.debug(
155
- "Querying the DB",
156
- table=table.__name__,
157
- criteria=[str(criterion) for criterion in criteria],
158
- )
159
- # Generate the get query
160
- return (
161
- session.query(table) # pyright: ignore[reportOptionalCall]
162
- .filter(*criteria)
163
- .one_or_none()
164
- )
165
- except sqlalchemy.exc.ProgrammingError:
166
- # Probably table doesn't exist, try to create tables
167
- self._create_tables_if_not_exist()
168
- return
157
+ logger.debug(
158
+ "Querying the DB",
159
+ table=table.__name__,
160
+ criteria=[str(criterion) for criterion in criteria],
161
+ )
162
+ # Generate the get query
163
+ return (
164
+ session.query(table) # pyright: ignore[reportOptionalCall]
165
+ .filter(*criteria)
166
+ .one_or_none()
167
+ )
169
168
 
170
169
  def _delete(
171
170
  self,
@@ -213,14 +212,13 @@ class SQLStoreBase(StoreBase):
213
212
  of the attributes dictionary should exist in the SQL table.
214
213
 
215
214
  """
216
- self._init_model_endpoints_table()
217
215
 
218
216
  attributes.pop(mm_schemas.EventFieldType.ENDPOINT_ID, None)
219
217
 
220
218
  self._update(
221
219
  attributes=attributes,
222
- table=self.ModelEndpointsTable,
223
- criteria=[self.ModelEndpointsTable.uid == endpoint_id],
220
+ table=self.model_endpoints_table,
221
+ criteria=[self.model_endpoints_table.uid == endpoint_id],
224
222
  )
225
223
 
226
224
  def delete_model_endpoint(self, endpoint_id: str) -> None:
@@ -229,11 +227,10 @@ class SQLStoreBase(StoreBase):
229
227
 
230
228
  :param endpoint_id: The unique id of the model endpoint.
231
229
  """
232
- self._init_model_endpoints_table()
233
230
  # Delete the model endpoint record using sqlalchemy ORM
234
231
  self._delete(
235
- table=self.ModelEndpointsTable,
236
- criteria=[self.ModelEndpointsTable.uid == endpoint_id],
232
+ table=self.model_endpoints_table,
233
+ criteria=[self.model_endpoints_table.uid == endpoint_id],
237
234
  )
238
235
 
239
236
  def get_model_endpoint(
@@ -249,12 +246,11 @@ class SQLStoreBase(StoreBase):
249
246
 
250
247
  :raise MLRunNotFoundError: If the model endpoints table was not found or the model endpoint id was not found.
251
248
  """
252
- self._init_model_endpoints_table()
253
249
 
254
250
  # Get the model endpoint record
255
251
  endpoint_record = self._get(
256
- table=self.ModelEndpointsTable,
257
- criteria=[self.ModelEndpointsTable.uid == endpoint_id],
252
+ table=self.model_endpoints_table,
253
+ criteria=[self.model_endpoints_table.uid == endpoint_id],
258
254
  )
259
255
 
260
256
  if not endpoint_record:
@@ -286,23 +282,22 @@ class SQLStoreBase(StoreBase):
286
282
 
287
283
  :return: A list of model endpoint dictionaries.
288
284
  """
289
- self._init_model_endpoints_table()
290
285
  # Generate an empty model endpoints that will be filled afterwards with model endpoint dictionaries
291
286
  endpoint_list = []
292
287
 
293
288
  model_endpoints_table = (
294
- self.ModelEndpointsTable.__table__ # pyright: ignore[reportAttributeAccessIssue]
289
+ self.model_endpoints_table.__table__ # pyright: ignore[reportAttributeAccessIssue]
295
290
  )
296
-
297
291
  # Get the model endpoints records using sqlalchemy ORM
298
292
  with create_session(dsn=self._sql_connection_string) as session:
299
293
  # Generate the list query
300
- query = session.query(self.ModelEndpointsTable).filter_by(
294
+ query = session.query(self.model_endpoints_table).filter_by(
301
295
  project=self.project
302
296
  )
303
297
 
304
298
  # Apply filters
305
299
  if model:
300
+ model = model if ":" in model else f"{model}:latest"
306
301
  query = self._filter_values(
307
302
  query=query,
308
303
  model_endpoints_table=model_endpoints_table,
@@ -310,11 +305,12 @@ class SQLStoreBase(StoreBase):
310
305
  filtered_values=[model],
311
306
  )
312
307
  if function:
308
+ function_uri = f"{self.project}/{function}"
313
309
  query = self._filter_values(
314
310
  query=query,
315
311
  model_endpoints_table=model_endpoints_table,
316
- key_filter=mm_schemas.EventFieldType.FUNCTION,
317
- filtered_values=[function],
312
+ key_filter=mm_schemas.EventFieldType.FUNCTION_URI,
313
+ filtered_values=[function_uri],
318
314
  )
319
315
  if uids:
320
316
  query = self._filter_values(
@@ -364,11 +360,9 @@ class SQLStoreBase(StoreBase):
364
360
  """
365
361
 
366
362
  if kind == mm_schemas.WriterEventKind.METRIC:
367
- self._init_application_metrics_table()
368
363
  table = self.application_metrics_table
369
364
  table_name = mm_schemas.FileTargetKind.APP_METRICS
370
365
  elif kind == mm_schemas.WriterEventKind.RESULT:
371
- self._init_application_results_table()
372
366
  table = self.application_results_table
373
367
  table_name = mm_schemas.FileTargetKind.APP_RESULTS
374
368
  else:
@@ -442,7 +436,6 @@ class SQLStoreBase(StoreBase):
442
436
  :return: Timestamp as a Unix time.
443
437
  :raise: MLRunNotFoundError if last analyzed value is not found.
444
438
  """
445
- self._init_monitoring_schedules_table()
446
439
  monitoring_schedule_record = self._get(
447
440
  table=self.MonitoringSchedulesTable,
448
441
  criteria=self._get_filter_criteria(
@@ -469,8 +462,6 @@ class SQLStoreBase(StoreBase):
469
462
  :param last_analyzed: Timestamp as a Unix time that represents the last analyzed time of a certain
470
463
  application and model endpoint.
471
464
  """
472
- self._init_monitoring_schedules_table()
473
-
474
465
  criteria = self._get_filter_criteria(
475
466
  table=self.MonitoringSchedulesTable,
476
467
  endpoint_id=endpoint_id,
@@ -500,7 +491,6 @@ class SQLStoreBase(StoreBase):
500
491
  def _delete_last_analyzed(
501
492
  self, endpoint_id: str, application_name: typing.Optional[str] = None
502
493
  ) -> None:
503
- self._init_monitoring_schedules_table()
504
494
  criteria = self._get_filter_criteria(
505
495
  table=self.MonitoringSchedulesTable,
506
496
  endpoint_id=endpoint_id,
@@ -512,7 +502,6 @@ class SQLStoreBase(StoreBase):
512
502
  def _delete_application_result(
513
503
  self, endpoint_id: str, application_name: typing.Optional[str] = None
514
504
  ) -> None:
515
- self._init_application_results_table()
516
505
  criteria = self._get_filter_criteria(
517
506
  table=self.application_results_table,
518
507
  endpoint_id=endpoint_id,
@@ -524,7 +513,6 @@ class SQLStoreBase(StoreBase):
524
513
  def _delete_application_metrics(
525
514
  self, endpoint_id: str, application_name: typing.Optional[str] = None
526
515
  ) -> None:
527
- self._init_application_metrics_table()
528
516
  criteria = self._get_filter_criteria(
529
517
  table=self.application_metrics_table,
530
518
  endpoint_id=endpoint_id,
@@ -538,8 +526,12 @@ class SQLStoreBase(StoreBase):
538
526
 
539
527
  for table in self._tables:
540
528
  # Create table if not exist. The `metadata` contains the `ModelEndpointsTable`
529
+ db_name = make_url(self._sql_connection_string).database
541
530
  if not self._engine.has_table(table):
531
+ logger.info(f"Creating table {table} on {db_name} db.")
542
532
  self._tables[table].metadata.create_all(bind=self._engine)
533
+ else:
534
+ logger.info(f"Table {table} already exists on {db_name} db.")
543
535
 
544
536
  @staticmethod
545
537
  def _filter_values(
@@ -581,41 +573,6 @@ class SQLStoreBase(StoreBase):
581
573
  # Apply AND operator on the SQL query object with the filters tuple
582
574
  return query.filter(sqlalchemy.and_(*filter_query))
583
575
 
584
- @staticmethod
585
- def _validate_labels(
586
- endpoint_dict: dict,
587
- labels: list,
588
- ) -> bool:
589
- """Validate that the model endpoint dictionary has the provided labels. There are 2 possible cases:
590
- 1 - Labels were provided as a list of key-values pairs (e.g. ['label_1=value_1', 'label_2=value_2']): Validate
591
- that each pair exist in the endpoint dictionary.
592
- 2 - Labels were provided as a list of key labels (e.g. ['label_1', 'label_2']): Validate that each key exist in
593
- the endpoint labels dictionary.
594
-
595
- :param endpoint_dict: Dictionary of the model endpoint records.
596
- :param labels: List of dictionary of required labels.
597
-
598
- :return: True if the labels exist in the endpoint labels dictionary, otherwise False.
599
- """
600
-
601
- # Convert endpoint labels into dictionary
602
- endpoint_labels = json.loads(
603
- endpoint_dict.get(mm_schemas.EventFieldType.LABELS)
604
- )
605
-
606
- for label in labels:
607
- # Case 1 - label is a key=value pair
608
- if "=" in label:
609
- lbl, value = list(map(lambda x: x.strip(), label.split("=")))
610
- if lbl not in endpoint_labels or str(endpoint_labels[lbl]) != value:
611
- return False
612
- # Case 2 - label is just a key
613
- else:
614
- if label not in endpoint_labels:
615
- return False
616
-
617
- return True
618
-
619
576
  def delete_model_endpoints_resources(self) -> None:
620
577
  """
621
578
  Delete all the model monitoring resources of the project in the SQL tables.
@@ -650,11 +607,9 @@ class SQLStoreBase(StoreBase):
650
607
  type=type,
651
608
  )
652
609
  if type == mm_schemas.ModelEndpointMonitoringMetricType.METRIC:
653
- self._init_application_metrics_table()
654
610
  table = self.application_metrics_table
655
611
  name_col = mm_schemas.MetricData.METRIC_NAME
656
612
  else:
657
- self._init_application_results_table()
658
613
  table = self.application_results_table
659
614
  name_col = mm_schemas.ResultData.RESULT_NAME
660
615