mlrun 1.7.0rc16__py3-none-any.whl → 1.7.0rc18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/alerts/alert.py +27 -24
- mlrun/artifacts/manager.py +5 -1
- mlrun/artifacts/model.py +1 -1
- mlrun/common/runtimes/constants.py +3 -0
- mlrun/common/schemas/__init__.py +8 -2
- mlrun/common/schemas/alert.py +49 -10
- mlrun/common/schemas/client_spec.py +1 -0
- mlrun/common/schemas/function.py +4 -0
- mlrun/common/schemas/model_monitoring/__init__.py +3 -1
- mlrun/common/schemas/model_monitoring/constants.py +21 -1
- mlrun/common/schemas/model_monitoring/grafana.py +9 -5
- mlrun/common/schemas/model_monitoring/model_endpoints.py +17 -6
- mlrun/common/schemas/project.py +3 -1
- mlrun/config.py +9 -3
- mlrun/data_types/to_pandas.py +5 -5
- mlrun/datastore/datastore.py +6 -2
- mlrun/datastore/redis.py +2 -2
- mlrun/datastore/s3.py +5 -0
- mlrun/datastore/sources.py +111 -6
- mlrun/datastore/targets.py +2 -2
- mlrun/db/base.py +6 -2
- mlrun/db/httpdb.py +22 -3
- mlrun/db/nopdb.py +10 -3
- mlrun/errors.py +6 -0
- mlrun/feature_store/retrieval/conversion.py +5 -5
- mlrun/feature_store/retrieval/job.py +3 -2
- mlrun/feature_store/retrieval/spark_merger.py +2 -1
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +2 -2
- mlrun/lists.py +2 -0
- mlrun/model.py +8 -6
- mlrun/model_monitoring/db/stores/base/store.py +16 -3
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +44 -43
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +190 -91
- mlrun/model_monitoring/db/tsdb/__init__.py +35 -6
- mlrun/model_monitoring/db/tsdb/base.py +25 -18
- mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +207 -0
- mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +231 -0
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +103 -64
- mlrun/model_monitoring/db/v3io_tsdb_reader.py +217 -16
- mlrun/model_monitoring/helpers.py +32 -0
- mlrun/model_monitoring/stream_processing.py +7 -4
- mlrun/model_monitoring/writer.py +19 -14
- mlrun/package/utils/_formatter.py +2 -2
- mlrun/projects/project.py +40 -11
- mlrun/render.py +8 -5
- mlrun/runtimes/__init__.py +1 -0
- mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
- mlrun/runtimes/nuclio/api_gateway.py +97 -77
- mlrun/runtimes/nuclio/application/application.py +160 -7
- mlrun/runtimes/nuclio/function.py +18 -12
- mlrun/track/tracker.py +2 -1
- mlrun/utils/async_http.py +25 -5
- mlrun/utils/helpers.py +28 -3
- mlrun/utils/logger.py +11 -6
- mlrun/utils/notifications/notification/slack.py +27 -7
- mlrun/utils/notifications/notification_pusher.py +45 -41
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/METADATA +8 -3
- {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/RECORD +65 -61
- {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/WHEEL +0 -0
- {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc16.dist-info → mlrun-1.7.0rc18.dist-info}/top_level.txt +0 -0
|
@@ -21,7 +21,7 @@ import pandas as pd
|
|
|
21
21
|
import sqlalchemy
|
|
22
22
|
|
|
23
23
|
import mlrun.common.model_monitoring.helpers
|
|
24
|
-
import mlrun.common.schemas.model_monitoring as
|
|
24
|
+
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
25
25
|
import mlrun.model_monitoring.db
|
|
26
26
|
import mlrun.model_monitoring.db.stores.sqldb.models
|
|
27
27
|
import mlrun.model_monitoring.helpers
|
|
@@ -71,7 +71,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
71
71
|
connection_string=self._sql_connection_string
|
|
72
72
|
)
|
|
73
73
|
)
|
|
74
|
-
self._tables[
|
|
74
|
+
self._tables[mm_schemas.EventFieldType.MODEL_ENDPOINTS] = (
|
|
75
75
|
self.ModelEndpointsTable
|
|
76
76
|
)
|
|
77
77
|
|
|
@@ -81,7 +81,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
81
81
|
connection_string=self._sql_connection_string
|
|
82
82
|
)
|
|
83
83
|
)
|
|
84
|
-
self._tables[
|
|
84
|
+
self._tables[mm_schemas.FileTargetKind.APP_RESULTS] = (
|
|
85
85
|
self.ApplicationResultsTable
|
|
86
86
|
)
|
|
87
87
|
|
|
@@ -89,7 +89,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
89
89
|
self.MonitoringSchedulesTable = mlrun.model_monitoring.db.stores.sqldb.models._get_monitoring_schedules_table(
|
|
90
90
|
connection_string=self._sql_connection_string
|
|
91
91
|
)
|
|
92
|
-
self._tables[
|
|
92
|
+
self._tables[mm_schemas.FileTargetKind.MONITORING_SCHEDULES] = (
|
|
93
93
|
self.MonitoringSchedulesTable
|
|
94
94
|
)
|
|
95
95
|
|
|
@@ -182,12 +182,12 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
182
182
|
"""
|
|
183
183
|
|
|
184
184
|
# Adjust timestamps fields
|
|
185
|
-
endpoint[
|
|
186
|
-
|
|
185
|
+
endpoint[mm_schemas.EventFieldType.FIRST_REQUEST] = (endpoint)[
|
|
186
|
+
mm_schemas.EventFieldType.LAST_REQUEST
|
|
187
187
|
] = mlrun.utils.datetime_now()
|
|
188
188
|
|
|
189
189
|
self._write(
|
|
190
|
-
table=
|
|
190
|
+
table=mm_schemas.EventFieldType.MODEL_ENDPOINTS,
|
|
191
191
|
event=endpoint,
|
|
192
192
|
)
|
|
193
193
|
|
|
@@ -204,9 +204,9 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
204
204
|
"""
|
|
205
205
|
self._init_model_endpoints_table()
|
|
206
206
|
|
|
207
|
-
attributes.pop(
|
|
207
|
+
attributes.pop(mm_schemas.EventFieldType.ENDPOINT_ID, None)
|
|
208
208
|
|
|
209
|
-
filter_endpoint = {
|
|
209
|
+
filter_endpoint = {mm_schemas.EventFieldType.UID: endpoint_id}
|
|
210
210
|
|
|
211
211
|
self._update(
|
|
212
212
|
attributes=attributes, table=self.ModelEndpointsTable, **filter_endpoint
|
|
@@ -220,7 +220,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
220
220
|
"""
|
|
221
221
|
self._init_model_endpoints_table()
|
|
222
222
|
|
|
223
|
-
filter_endpoint = {
|
|
223
|
+
filter_endpoint = {mm_schemas.EventFieldType.UID: endpoint_id}
|
|
224
224
|
# Delete the model endpoint record using sqlalchemy ORM
|
|
225
225
|
self._delete(table=self.ModelEndpointsTable, **filter_endpoint)
|
|
226
226
|
|
|
@@ -240,7 +240,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
240
240
|
self._init_model_endpoints_table()
|
|
241
241
|
|
|
242
242
|
# Get the model endpoint record using sqlalchemy ORM
|
|
243
|
-
filter_endpoint = {
|
|
243
|
+
filter_endpoint = {mm_schemas.EventFieldType.UID: endpoint_id}
|
|
244
244
|
endpoint_record = self._get(table=self.ModelEndpointsTable, **filter_endpoint)
|
|
245
245
|
|
|
246
246
|
if not endpoint_record:
|
|
@@ -292,32 +292,32 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
292
292
|
query = self._filter_values(
|
|
293
293
|
query=query,
|
|
294
294
|
model_endpoints_table=model_endpoints_table,
|
|
295
|
-
key_filter=
|
|
295
|
+
key_filter=mm_schemas.EventFieldType.MODEL,
|
|
296
296
|
filtered_values=[model],
|
|
297
297
|
)
|
|
298
298
|
if function:
|
|
299
299
|
query = self._filter_values(
|
|
300
300
|
query=query,
|
|
301
301
|
model_endpoints_table=model_endpoints_table,
|
|
302
|
-
key_filter=
|
|
302
|
+
key_filter=mm_schemas.EventFieldType.FUNCTION,
|
|
303
303
|
filtered_values=[function],
|
|
304
304
|
)
|
|
305
305
|
if uids:
|
|
306
306
|
query = self._filter_values(
|
|
307
307
|
query=query,
|
|
308
308
|
model_endpoints_table=model_endpoints_table,
|
|
309
|
-
key_filter=
|
|
309
|
+
key_filter=mm_schemas.EventFieldType.UID,
|
|
310
310
|
filtered_values=uids,
|
|
311
311
|
combined=False,
|
|
312
312
|
)
|
|
313
313
|
if top_level:
|
|
314
|
-
node_ep = str(
|
|
315
|
-
router_ep = str(
|
|
314
|
+
node_ep = str(mm_schemas.EndpointType.NODE_EP.value)
|
|
315
|
+
router_ep = str(mm_schemas.EndpointType.ROUTER.value)
|
|
316
316
|
endpoint_types = [node_ep, router_ep]
|
|
317
317
|
query = self._filter_values(
|
|
318
318
|
query=query,
|
|
319
319
|
model_endpoints_table=model_endpoints_table,
|
|
320
|
-
key_filter=
|
|
320
|
+
key_filter=mm_schemas.EventFieldType.ENDPOINT_TYPE,
|
|
321
321
|
filtered_values=endpoint_types,
|
|
322
322
|
combined=False,
|
|
323
323
|
)
|
|
@@ -338,8 +338,8 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
338
338
|
def write_application_event(
|
|
339
339
|
self,
|
|
340
340
|
event: dict[str, typing.Any],
|
|
341
|
-
kind:
|
|
342
|
-
):
|
|
341
|
+
kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
|
|
342
|
+
) -> None:
|
|
343
343
|
"""
|
|
344
344
|
Write a new application event in the target table.
|
|
345
345
|
|
|
@@ -349,16 +349,14 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
349
349
|
:param kind: The type of the event, can be either "result" or "metric".
|
|
350
350
|
"""
|
|
351
351
|
|
|
352
|
-
if kind ==
|
|
352
|
+
if kind == mm_schemas.WriterEventKind.METRIC:
|
|
353
353
|
# TODO : Implement the logic for writing metrics to MySQL
|
|
354
354
|
return
|
|
355
355
|
|
|
356
356
|
self._init_application_results_table()
|
|
357
357
|
|
|
358
358
|
application_filter_dict = {
|
|
359
|
-
|
|
360
|
-
event
|
|
361
|
-
)
|
|
359
|
+
mm_schemas.EventFieldType.UID: self._generate_application_result_uid(event)
|
|
362
360
|
}
|
|
363
361
|
|
|
364
362
|
application_record = self._get(
|
|
@@ -367,11 +365,11 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
367
365
|
if application_record:
|
|
368
366
|
self._convert_to_datetime(
|
|
369
367
|
event=event,
|
|
370
|
-
key=
|
|
368
|
+
key=mm_schemas.WriterEvent.START_INFER_TIME,
|
|
371
369
|
)
|
|
372
370
|
self._convert_to_datetime(
|
|
373
371
|
event=event,
|
|
374
|
-
key=
|
|
372
|
+
key=mm_schemas.WriterEvent.END_INFER_TIME,
|
|
375
373
|
)
|
|
376
374
|
# Update an existing application result
|
|
377
375
|
self._update(
|
|
@@ -381,12 +379,12 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
381
379
|
)
|
|
382
380
|
else:
|
|
383
381
|
# Write a new application result
|
|
384
|
-
event[
|
|
385
|
-
|
|
382
|
+
event[mm_schemas.EventFieldType.UID] = application_filter_dict[
|
|
383
|
+
mm_schemas.EventFieldType.UID
|
|
386
384
|
]
|
|
387
385
|
|
|
388
386
|
self._write(
|
|
389
|
-
table=
|
|
387
|
+
table=mm_schemas.FileTargetKind.APP_RESULTS,
|
|
390
388
|
event=event,
|
|
391
389
|
)
|
|
392
390
|
|
|
@@ -398,11 +396,11 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
398
396
|
@staticmethod
|
|
399
397
|
def _generate_application_result_uid(event: dict[str, typing.Any]) -> str:
|
|
400
398
|
return (
|
|
401
|
-
event[
|
|
399
|
+
event[mm_schemas.WriterEvent.ENDPOINT_ID]
|
|
402
400
|
+ "_"
|
|
403
|
-
+ event[
|
|
401
|
+
+ event[mm_schemas.WriterEvent.APPLICATION_NAME]
|
|
404
402
|
+ "_"
|
|
405
|
-
+ event[
|
|
403
|
+
+ event[mm_schemas.ResultData.RESULT_NAME]
|
|
406
404
|
)
|
|
407
405
|
|
|
408
406
|
def get_last_analyzed(self, endpoint_id: str, application_name: str) -> int:
|
|
@@ -452,17 +450,17 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
452
450
|
if not monitoring_schedule_record:
|
|
453
451
|
# Add a new record with empty last analyzed value
|
|
454
452
|
self._write(
|
|
455
|
-
table=
|
|
453
|
+
table=mm_schemas.FileTargetKind.MONITORING_SCHEDULES,
|
|
456
454
|
event={
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
455
|
+
mm_schemas.SchedulingKeys.UID: uuid.uuid4().hex,
|
|
456
|
+
mm_schemas.SchedulingKeys.APPLICATION_NAME: application_name,
|
|
457
|
+
mm_schemas.SchedulingKeys.ENDPOINT_ID: endpoint_id,
|
|
458
|
+
mm_schemas.SchedulingKeys.LAST_ANALYZED: last_analyzed,
|
|
461
459
|
},
|
|
462
460
|
)
|
|
463
461
|
|
|
464
462
|
self._update(
|
|
465
|
-
attributes={
|
|
463
|
+
attributes={mm_schemas.SchedulingKeys.LAST_ANALYZED: last_analyzed},
|
|
466
464
|
table=self.MonitoringSchedulesTable,
|
|
467
465
|
**application_filter_dict,
|
|
468
466
|
)
|
|
@@ -558,7 +556,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
558
556
|
|
|
559
557
|
# Convert endpoint labels into dictionary
|
|
560
558
|
endpoint_labels = json.loads(
|
|
561
|
-
endpoint_dict.get(
|
|
559
|
+
endpoint_dict.get(mm_schemas.EventFieldType.LABELS)
|
|
562
560
|
)
|
|
563
561
|
|
|
564
562
|
for label in labels:
|
|
@@ -585,11 +583,9 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
585
583
|
)
|
|
586
584
|
application_filter_dict = {}
|
|
587
585
|
if endpoint_id:
|
|
588
|
-
application_filter_dict[
|
|
589
|
-
endpoint_id
|
|
590
|
-
)
|
|
586
|
+
application_filter_dict[mm_schemas.SchedulingKeys.ENDPOINT_ID] = endpoint_id
|
|
591
587
|
if application_name:
|
|
592
|
-
application_filter_dict[
|
|
588
|
+
application_filter_dict[mm_schemas.SchedulingKeys.APPLICATION_NAME] = (
|
|
593
589
|
application_name
|
|
594
590
|
)
|
|
595
591
|
return application_filter_dict
|
|
@@ -603,7 +599,7 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
603
599
|
endpoints = self.list_model_endpoints()
|
|
604
600
|
|
|
605
601
|
for endpoint_dict in endpoints:
|
|
606
|
-
endpoint_id = endpoint_dict[
|
|
602
|
+
endpoint_id = endpoint_dict[mm_schemas.EventFieldType.UID]
|
|
607
603
|
|
|
608
604
|
# Delete last analyzed records
|
|
609
605
|
self._delete_last_analyzed(endpoint_id=endpoint_id)
|
|
@@ -613,3 +609,8 @@ class SQLStoreBase(mlrun.model_monitoring.db.StoreBase):
|
|
|
613
609
|
|
|
614
610
|
# Delete model endpoint record
|
|
615
611
|
self.delete_model_endpoint(endpoint_id=endpoint_id)
|
|
612
|
+
|
|
613
|
+
def get_model_endpoint_metrics(
|
|
614
|
+
self, endpoint_id: str, type: mm_schemas.ModelEndpointMonitoringMetricType
|
|
615
|
+
) -> list[mm_schemas.ModelEndpointMonitoringMetric]:
|
|
616
|
+
raise NotImplementedError
|