mlrun 1.7.0rc14__py3-none-any.whl → 1.7.0rc16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__init__.py +10 -1
- mlrun/__main__.py +18 -109
- mlrun/{runtimes/mpijob/v1alpha1.py → alerts/__init__.py} +2 -16
- mlrun/alerts/alert.py +141 -0
- mlrun/artifacts/__init__.py +8 -3
- mlrun/artifacts/base.py +36 -253
- mlrun/artifacts/dataset.py +9 -190
- mlrun/artifacts/manager.py +20 -41
- mlrun/artifacts/model.py +8 -140
- mlrun/artifacts/plots.py +14 -375
- mlrun/common/schemas/__init__.py +4 -2
- mlrun/common/schemas/alert.py +46 -4
- mlrun/common/schemas/api_gateway.py +4 -0
- mlrun/common/schemas/artifact.py +15 -0
- mlrun/common/schemas/auth.py +2 -0
- mlrun/common/schemas/model_monitoring/__init__.py +8 -1
- mlrun/common/schemas/model_monitoring/constants.py +40 -4
- mlrun/common/schemas/model_monitoring/model_endpoints.py +73 -2
- mlrun/common/schemas/project.py +2 -0
- mlrun/config.py +7 -4
- mlrun/data_types/to_pandas.py +4 -4
- mlrun/datastore/base.py +41 -9
- mlrun/datastore/datastore_profile.py +54 -4
- mlrun/datastore/inmem.py +2 -2
- mlrun/datastore/sources.py +43 -2
- mlrun/datastore/store_resources.py +2 -6
- mlrun/datastore/targets.py +106 -39
- mlrun/db/base.py +23 -3
- mlrun/db/httpdb.py +101 -47
- mlrun/db/nopdb.py +20 -2
- mlrun/errors.py +5 -0
- mlrun/feature_store/__init__.py +0 -2
- mlrun/feature_store/api.py +12 -47
- mlrun/feature_store/feature_set.py +9 -0
- mlrun/feature_store/retrieval/base.py +9 -4
- mlrun/feature_store/retrieval/conversion.py +4 -4
- mlrun/feature_store/retrieval/dask_merger.py +2 -0
- mlrun/feature_store/retrieval/job.py +2 -0
- mlrun/feature_store/retrieval/local_merger.py +2 -0
- mlrun/feature_store/retrieval/spark_merger.py +5 -0
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +5 -10
- mlrun/launcher/base.py +4 -3
- mlrun/launcher/client.py +1 -1
- mlrun/lists.py +4 -2
- mlrun/model.py +25 -11
- mlrun/model_monitoring/__init__.py +1 -1
- mlrun/model_monitoring/api.py +41 -18
- mlrun/model_monitoring/application.py +5 -305
- mlrun/model_monitoring/applications/__init__.py +11 -0
- mlrun/model_monitoring/applications/_application_steps.py +157 -0
- mlrun/model_monitoring/applications/base.py +282 -0
- mlrun/model_monitoring/applications/context.py +214 -0
- mlrun/model_monitoring/applications/evidently_base.py +211 -0
- mlrun/model_monitoring/applications/histogram_data_drift.py +132 -91
- mlrun/model_monitoring/applications/results.py +99 -0
- mlrun/model_monitoring/controller.py +3 -1
- mlrun/model_monitoring/db/__init__.py +2 -0
- mlrun/model_monitoring/db/stores/base/store.py +9 -36
- mlrun/model_monitoring/db/stores/sqldb/models/base.py +7 -6
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +63 -110
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +104 -187
- mlrun/model_monitoring/db/tsdb/__init__.py +71 -0
- mlrun/model_monitoring/db/tsdb/base.py +135 -0
- mlrun/model_monitoring/db/tsdb/v3io/__init__.py +15 -0
- mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +117 -0
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +404 -0
- mlrun/model_monitoring/db/v3io_tsdb_reader.py +134 -0
- mlrun/model_monitoring/evidently_application.py +6 -118
- mlrun/model_monitoring/helpers.py +1 -1
- mlrun/model_monitoring/model_endpoint.py +3 -2
- mlrun/model_monitoring/stream_processing.py +48 -213
- mlrun/model_monitoring/writer.py +101 -121
- mlrun/platforms/__init__.py +10 -9
- mlrun/platforms/iguazio.py +21 -202
- mlrun/projects/operations.py +11 -7
- mlrun/projects/pipelines.py +13 -76
- mlrun/projects/project.py +73 -45
- mlrun/render.py +11 -13
- mlrun/run.py +6 -41
- mlrun/runtimes/__init__.py +3 -3
- mlrun/runtimes/base.py +6 -6
- mlrun/runtimes/funcdoc.py +0 -28
- mlrun/runtimes/kubejob.py +2 -1
- mlrun/runtimes/local.py +1 -1
- mlrun/runtimes/mpijob/__init__.py +0 -20
- mlrun/runtimes/mpijob/v1.py +1 -1
- mlrun/runtimes/nuclio/api_gateway.py +75 -9
- mlrun/runtimes/nuclio/function.py +9 -35
- mlrun/runtimes/pod.py +16 -36
- mlrun/runtimes/remotesparkjob.py +1 -1
- mlrun/runtimes/sparkjob/spark3job.py +1 -1
- mlrun/runtimes/utils.py +1 -39
- mlrun/utils/helpers.py +72 -71
- mlrun/utils/notifications/notification/base.py +1 -1
- mlrun/utils/notifications/notification/slack.py +12 -5
- mlrun/utils/notifications/notification/webhook.py +1 -1
- mlrun/utils/notifications/notification_pusher.py +134 -14
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/METADATA +4 -3
- {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/RECORD +105 -95
- mlrun/kfpops.py +0 -865
- mlrun/platforms/other.py +0 -305
- /mlrun/{runtimes → common/runtimes}/constants.py +0 -0
- {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/WHEEL +0 -0
- {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc14.dist-info → mlrun-1.7.0rc16.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# Copyright 2024 Iguazio
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
#
|
|
15
|
+
|
|
16
|
+
import mlrun.feature_store.steps
|
|
17
|
+
from mlrun.common.schemas.model_monitoring import (
|
|
18
|
+
EventFieldType,
|
|
19
|
+
EventKeyMetrics,
|
|
20
|
+
EventLiveStats,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ProcessBeforeTSDB(mlrun.feature_store.steps.MapClass):
|
|
25
|
+
def __init__(self, **kwargs):
|
|
26
|
+
"""
|
|
27
|
+
Process the data before writing to TSDB. This step creates a dictionary that includes 3 different dictionaries
|
|
28
|
+
that each one of them contains important details and stats about the events:
|
|
29
|
+
1. base_metrics: stats about the average latency and the amount of predictions over time. It is based on
|
|
30
|
+
storey.AggregateByKey which was executed in step 5.
|
|
31
|
+
2. endpoint_features: feature names and values along with the prediction names and value.
|
|
32
|
+
3. custom_metric (opt): optional metrics provided by the user.
|
|
33
|
+
:returns: Dictionary of 2-3 dictionaries that contains stats and details about the events.
|
|
34
|
+
"""
|
|
35
|
+
super().__init__(**kwargs)
|
|
36
|
+
|
|
37
|
+
def do(self, event):
|
|
38
|
+
# Compute prediction per second
|
|
39
|
+
event[EventLiveStats.PREDICTIONS_PER_SECOND] = (
|
|
40
|
+
float(event[EventLiveStats.PREDICTIONS_COUNT_5M]) / 300
|
|
41
|
+
)
|
|
42
|
+
base_fields = [
|
|
43
|
+
EventFieldType.TIMESTAMP,
|
|
44
|
+
EventFieldType.ENDPOINT_ID,
|
|
45
|
+
EventFieldType.ENDPOINT_TYPE,
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
# Getting event timestamp and endpoint_id
|
|
49
|
+
base_event = {k: event[k] for k in base_fields}
|
|
50
|
+
|
|
51
|
+
# base_metrics includes the stats about the average latency and the amount of predictions over time
|
|
52
|
+
base_metrics = {
|
|
53
|
+
EventFieldType.RECORD_TYPE: EventKeyMetrics.BASE_METRICS,
|
|
54
|
+
EventLiveStats.PREDICTIONS_PER_SECOND: event[
|
|
55
|
+
EventLiveStats.PREDICTIONS_PER_SECOND
|
|
56
|
+
],
|
|
57
|
+
EventLiveStats.PREDICTIONS_COUNT_5M: event[
|
|
58
|
+
EventLiveStats.PREDICTIONS_COUNT_5M
|
|
59
|
+
],
|
|
60
|
+
EventLiveStats.PREDICTIONS_COUNT_1H: event[
|
|
61
|
+
EventLiveStats.PREDICTIONS_COUNT_1H
|
|
62
|
+
],
|
|
63
|
+
EventLiveStats.LATENCY_AVG_5M: event[EventLiveStats.LATENCY_AVG_5M],
|
|
64
|
+
EventLiveStats.LATENCY_AVG_1H: event[EventLiveStats.LATENCY_AVG_1H],
|
|
65
|
+
**base_event,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# endpoint_features includes the event values of each feature and prediction
|
|
69
|
+
endpoint_features = {
|
|
70
|
+
EventFieldType.RECORD_TYPE: EventKeyMetrics.ENDPOINT_FEATURES,
|
|
71
|
+
**event[EventFieldType.NAMED_PREDICTIONS],
|
|
72
|
+
**event[EventFieldType.NAMED_FEATURES],
|
|
73
|
+
**base_event,
|
|
74
|
+
}
|
|
75
|
+
# Create a dictionary that includes both base_metrics and endpoint_features
|
|
76
|
+
processed = {
|
|
77
|
+
EventKeyMetrics.BASE_METRICS: base_metrics,
|
|
78
|
+
EventKeyMetrics.ENDPOINT_FEATURES: endpoint_features,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# If metrics provided, add another dictionary if custom_metrics values
|
|
82
|
+
if event[EventFieldType.METRICS]:
|
|
83
|
+
processed[EventKeyMetrics.CUSTOM_METRICS] = {
|
|
84
|
+
EventFieldType.RECORD_TYPE: EventKeyMetrics.CUSTOM_METRICS,
|
|
85
|
+
**event[EventFieldType.METRICS],
|
|
86
|
+
**base_event,
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
return processed
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class FilterAndUnpackKeys(mlrun.feature_store.steps.MapClass):
|
|
93
|
+
def __init__(self, keys, **kwargs):
|
|
94
|
+
"""
|
|
95
|
+
Create unpacked event dictionary based on provided key metrics (base_metrics, endpoint_features,
|
|
96
|
+
or custom_metric). Please note that the next step of the TSDB target requires an unpacked dictionary.
|
|
97
|
+
:param keys: list of key metrics.
|
|
98
|
+
:returns: An unpacked dictionary of event filtered by the provided key metrics.
|
|
99
|
+
"""
|
|
100
|
+
super().__init__(**kwargs)
|
|
101
|
+
self.keys = keys
|
|
102
|
+
|
|
103
|
+
def do(self, event):
|
|
104
|
+
# Keep only the relevant dictionary based on the provided keys
|
|
105
|
+
new_event = {}
|
|
106
|
+
for key in self.keys:
|
|
107
|
+
if key in event:
|
|
108
|
+
new_event[key] = event[key]
|
|
109
|
+
|
|
110
|
+
# Create unpacked dictionary
|
|
111
|
+
unpacked = {}
|
|
112
|
+
for key in new_event.keys():
|
|
113
|
+
if key in self.keys:
|
|
114
|
+
unpacked = {**unpacked, **new_event[key]}
|
|
115
|
+
else:
|
|
116
|
+
unpacked[key] = new_event[key]
|
|
117
|
+
return unpacked if unpacked else None
|
|
@@ -0,0 +1,404 @@
|
|
|
1
|
+
# Copyright 2024 Iguazio
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import datetime
|
|
16
|
+
|
|
17
|
+
import pandas as pd
|
|
18
|
+
import v3io_frames.client
|
|
19
|
+
import v3io_frames.errors
|
|
20
|
+
from v3io.dataplane import Client as V3IOClient
|
|
21
|
+
from v3io_frames.frames_pb2 import IGNORE
|
|
22
|
+
|
|
23
|
+
import mlrun.common.model_monitoring
|
|
24
|
+
import mlrun.common.schemas.model_monitoring as mm_constants
|
|
25
|
+
import mlrun.feature_store.steps
|
|
26
|
+
import mlrun.utils.v3io_clients
|
|
27
|
+
from mlrun.model_monitoring.db import TSDBConnector
|
|
28
|
+
from mlrun.utils import logger
|
|
29
|
+
|
|
30
|
+
_TSDB_BE = "tsdb"
|
|
31
|
+
_TSDB_RATE = "1/s"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class V3IOTSDBConnector(TSDBConnector):
|
|
35
|
+
"""
|
|
36
|
+
Handles the TSDB operations when the TSDB connector is of type V3IO. To manage these operations we use V3IO Frames
|
|
37
|
+
Client that provides API for executing commands on the V3IO TSDB table.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
project: str,
|
|
43
|
+
access_key: str = None,
|
|
44
|
+
container: str = "users",
|
|
45
|
+
v3io_framesd: str = None,
|
|
46
|
+
create_table: bool = False,
|
|
47
|
+
):
|
|
48
|
+
super().__init__(project=project)
|
|
49
|
+
self.access_key = access_key or mlrun.mlconf.get_v3io_access_key()
|
|
50
|
+
|
|
51
|
+
self.container = container
|
|
52
|
+
|
|
53
|
+
self.v3io_framesd = v3io_framesd or mlrun.mlconf.v3io_framesd
|
|
54
|
+
self._frames_client: v3io_frames.client.ClientBase = (
|
|
55
|
+
self._get_v3io_frames_client(self.container)
|
|
56
|
+
)
|
|
57
|
+
self._v3io_client: V3IOClient = mlrun.utils.v3io_clients.get_v3io_client(
|
|
58
|
+
endpoint=mlrun.mlconf.v3io_api,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
self._init_tables_path()
|
|
62
|
+
|
|
63
|
+
if create_table:
|
|
64
|
+
self.create_tsdb_application_tables()
|
|
65
|
+
|
|
66
|
+
def _init_tables_path(self):
|
|
67
|
+
self.tables = {}
|
|
68
|
+
|
|
69
|
+
events_table_full_path = mlrun.mlconf.get_model_monitoring_file_target_path(
|
|
70
|
+
project=self.project,
|
|
71
|
+
kind=mm_constants.FileTargetKind.EVENTS,
|
|
72
|
+
)
|
|
73
|
+
(
|
|
74
|
+
_,
|
|
75
|
+
_,
|
|
76
|
+
events_path,
|
|
77
|
+
) = mlrun.common.model_monitoring.helpers.parse_model_endpoint_store_prefix(
|
|
78
|
+
events_table_full_path
|
|
79
|
+
)
|
|
80
|
+
self.tables[mm_constants.MonitoringTSDBTables.EVENTS] = events_path
|
|
81
|
+
|
|
82
|
+
monitoring_application_full_path = (
|
|
83
|
+
mlrun.mlconf.get_model_monitoring_file_target_path(
|
|
84
|
+
project=self.project,
|
|
85
|
+
kind=mm_constants.FileTargetKind.MONITORING_APPLICATION,
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
(
|
|
89
|
+
_,
|
|
90
|
+
_,
|
|
91
|
+
monitoring_application_path,
|
|
92
|
+
) = mlrun.common.model_monitoring.helpers.parse_model_endpoint_store_prefix(
|
|
93
|
+
monitoring_application_full_path
|
|
94
|
+
)
|
|
95
|
+
self.tables[mm_constants.MonitoringTSDBTables.APP_RESULTS] = (
|
|
96
|
+
monitoring_application_path + mm_constants.MonitoringTSDBTables.APP_RESULTS
|
|
97
|
+
)
|
|
98
|
+
self.tables[mm_constants.MonitoringTSDBTables.METRICS] = (
|
|
99
|
+
monitoring_application_path + mm_constants.MonitoringTSDBTables.METRICS
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def create_tsdb_application_tables(self):
|
|
103
|
+
"""
|
|
104
|
+
Create the application tables using the TSDB connector. At the moment we support 2 types of application tables:
|
|
105
|
+
- app_results: a detailed result that includes status, kind, extra data, etc.
|
|
106
|
+
- metrics: a basic key value that represents a single numeric metric.
|
|
107
|
+
"""
|
|
108
|
+
application_tables = [
|
|
109
|
+
mm_constants.MonitoringTSDBTables.APP_RESULTS,
|
|
110
|
+
mm_constants.MonitoringTSDBTables.METRICS,
|
|
111
|
+
]
|
|
112
|
+
for table in application_tables:
|
|
113
|
+
logger.info("Creating table in V3IO TSDB", table=table)
|
|
114
|
+
self._frames_client.create(
|
|
115
|
+
backend=_TSDB_BE,
|
|
116
|
+
table=self.tables[table],
|
|
117
|
+
if_exists=IGNORE,
|
|
118
|
+
rate=_TSDB_RATE,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
def apply_monitoring_stream_steps(
|
|
122
|
+
self,
|
|
123
|
+
graph,
|
|
124
|
+
tsdb_batching_max_events: int = 10,
|
|
125
|
+
tsdb_batching_timeout_secs: int = 300,
|
|
126
|
+
):
|
|
127
|
+
"""
|
|
128
|
+
Apply TSDB steps on the provided monitoring graph. Throughout these steps, the graph stores live data of
|
|
129
|
+
different key metric dictionaries.This data is being used by the monitoring dashboards in
|
|
130
|
+
grafana. Results can be found under v3io:///users/pipelines/project-name/model-endpoints/events/.
|
|
131
|
+
In that case, we generate 3 different key metric dictionaries:
|
|
132
|
+
- base_metrics (average latency and predictions over time)
|
|
133
|
+
- endpoint_features (Prediction and feature names and values)
|
|
134
|
+
- custom_metrics (user-defined metrics)
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
# Before writing data to TSDB, create dictionary of 2-3 dictionaries that contains
|
|
138
|
+
# stats and details about the events
|
|
139
|
+
|
|
140
|
+
def apply_process_before_tsdb():
|
|
141
|
+
graph.add_step(
|
|
142
|
+
"mlrun.model_monitoring.db.tsdb.v3io.stream_graph_steps.ProcessBeforeTSDB",
|
|
143
|
+
name="ProcessBeforeTSDB",
|
|
144
|
+
after="sample",
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
apply_process_before_tsdb()
|
|
148
|
+
|
|
149
|
+
# Unpacked keys from each dictionary and write to TSDB target
|
|
150
|
+
def apply_filter_and_unpacked_keys(name, keys):
|
|
151
|
+
graph.add_step(
|
|
152
|
+
"mlrun.model_monitoring.db.tsdb.v3io.stream_graph_steps.FilterAndUnpackKeys",
|
|
153
|
+
name=name,
|
|
154
|
+
after="ProcessBeforeTSDB",
|
|
155
|
+
keys=[keys],
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def apply_tsdb_target(name, after):
|
|
159
|
+
graph.add_step(
|
|
160
|
+
"storey.TSDBTarget",
|
|
161
|
+
name=name,
|
|
162
|
+
after=after,
|
|
163
|
+
path=f"{self.container}/{self.tables[mm_constants.MonitoringTSDBTables.EVENTS]}",
|
|
164
|
+
rate="10/m",
|
|
165
|
+
time_col=mm_constants.EventFieldType.TIMESTAMP,
|
|
166
|
+
container=self.container,
|
|
167
|
+
v3io_frames=self.v3io_framesd,
|
|
168
|
+
infer_columns_from_data=True,
|
|
169
|
+
index_cols=[
|
|
170
|
+
mm_constants.EventFieldType.ENDPOINT_ID,
|
|
171
|
+
mm_constants.EventFieldType.RECORD_TYPE,
|
|
172
|
+
mm_constants.EventFieldType.ENDPOINT_TYPE,
|
|
173
|
+
],
|
|
174
|
+
max_events=tsdb_batching_max_events,
|
|
175
|
+
flush_after_seconds=tsdb_batching_timeout_secs,
|
|
176
|
+
key=mm_constants.EventFieldType.ENDPOINT_ID,
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# unpacked base_metrics dictionary
|
|
180
|
+
apply_filter_and_unpacked_keys(
|
|
181
|
+
name="FilterAndUnpackKeys1",
|
|
182
|
+
keys=mm_constants.EventKeyMetrics.BASE_METRICS,
|
|
183
|
+
)
|
|
184
|
+
apply_tsdb_target(name="tsdb1", after="FilterAndUnpackKeys1")
|
|
185
|
+
|
|
186
|
+
# unpacked endpoint_features dictionary
|
|
187
|
+
apply_filter_and_unpacked_keys(
|
|
188
|
+
name="FilterAndUnpackKeys2",
|
|
189
|
+
keys=mm_constants.EventKeyMetrics.ENDPOINT_FEATURES,
|
|
190
|
+
)
|
|
191
|
+
apply_tsdb_target(name="tsdb2", after="FilterAndUnpackKeys2")
|
|
192
|
+
|
|
193
|
+
# unpacked custom_metrics dictionary. In addition, use storey.Filter remove none values
|
|
194
|
+
apply_filter_and_unpacked_keys(
|
|
195
|
+
name="FilterAndUnpackKeys3",
|
|
196
|
+
keys=mm_constants.EventKeyMetrics.CUSTOM_METRICS,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def apply_storey_filter():
|
|
200
|
+
graph.add_step(
|
|
201
|
+
"storey.Filter",
|
|
202
|
+
"FilterNotNone",
|
|
203
|
+
after="FilterAndUnpackKeys3",
|
|
204
|
+
_fn="(event is not None)",
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
apply_storey_filter()
|
|
208
|
+
apply_tsdb_target(name="tsdb3", after="FilterNotNone")
|
|
209
|
+
|
|
210
|
+
def write_application_event(
|
|
211
|
+
self,
|
|
212
|
+
event: dict,
|
|
213
|
+
kind: mm_constants.WriterEventKind = mm_constants.WriterEventKind.RESULT,
|
|
214
|
+
):
|
|
215
|
+
"""Write a single result or metric to TSDB"""
|
|
216
|
+
|
|
217
|
+
event[mm_constants.WriterEvent.END_INFER_TIME] = (
|
|
218
|
+
datetime.datetime.fromisoformat(
|
|
219
|
+
event[mm_constants.WriterEvent.END_INFER_TIME]
|
|
220
|
+
)
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if kind == mm_constants.WriterEventKind.METRIC:
|
|
224
|
+
# TODO : Implement the logic for writing metrics to V3IO TSDB
|
|
225
|
+
return
|
|
226
|
+
|
|
227
|
+
del event[mm_constants.ResultData.RESULT_EXTRA_DATA]
|
|
228
|
+
try:
|
|
229
|
+
self._frames_client.write(
|
|
230
|
+
backend=_TSDB_BE,
|
|
231
|
+
table=self.tables[mm_constants.MonitoringTSDBTables.APP_RESULTS],
|
|
232
|
+
dfs=pd.DataFrame.from_records([event]),
|
|
233
|
+
index_cols=[
|
|
234
|
+
mm_constants.WriterEvent.END_INFER_TIME,
|
|
235
|
+
mm_constants.WriterEvent.ENDPOINT_ID,
|
|
236
|
+
mm_constants.WriterEvent.APPLICATION_NAME,
|
|
237
|
+
mm_constants.ResultData.RESULT_NAME,
|
|
238
|
+
],
|
|
239
|
+
)
|
|
240
|
+
logger.info(
|
|
241
|
+
"Updated V3IO TSDB successfully",
|
|
242
|
+
table=self.tables[mm_constants.MonitoringTSDBTables.APP_RESULTS],
|
|
243
|
+
)
|
|
244
|
+
except v3io_frames.errors.Error as err:
|
|
245
|
+
logger.warn(
|
|
246
|
+
"Could not write drift measures to TSDB",
|
|
247
|
+
err=err,
|
|
248
|
+
table=self.tables[mm_constants.MonitoringTSDBTables.APP_RESULTS],
|
|
249
|
+
event=event,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
raise mlrun.errors.MLRunRuntimeError(
|
|
253
|
+
f"Failed to write application result to TSDB: {err}"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
def delete_tsdb_resources(self, table: str = None):
|
|
257
|
+
if table:
|
|
258
|
+
# Delete a specific table
|
|
259
|
+
tables = [table]
|
|
260
|
+
else:
|
|
261
|
+
# Delete all tables
|
|
262
|
+
tables = mm_constants.MonitoringTSDBTables.list()
|
|
263
|
+
for table in tables:
|
|
264
|
+
try:
|
|
265
|
+
self._frames_client.delete(
|
|
266
|
+
backend=mlrun.common.schemas.model_monitoring.TimeSeriesConnector.TSDB,
|
|
267
|
+
table=table,
|
|
268
|
+
)
|
|
269
|
+
except v3io_frames.errors.DeleteError as e:
|
|
270
|
+
logger.warning(
|
|
271
|
+
f"Failed to delete TSDB table '{table}'",
|
|
272
|
+
err=mlrun.errors.err_to_str(e),
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Final cleanup of tsdb path
|
|
276
|
+
tsdb_path = self._get_v3io_source_directory()
|
|
277
|
+
tsdb_path.replace("://u", ":///u")
|
|
278
|
+
store, _, _ = mlrun.store_manager.get_or_create_store(tsdb_path)
|
|
279
|
+
store.rm(tsdb_path, recursive=True)
|
|
280
|
+
|
|
281
|
+
def get_model_endpoint_real_time_metrics(
|
|
282
|
+
self,
|
|
283
|
+
endpoint_id: str,
|
|
284
|
+
metrics: list[str],
|
|
285
|
+
start: str = "now-1h",
|
|
286
|
+
end: str = "now",
|
|
287
|
+
) -> dict[str, list[tuple[str, float]]]:
|
|
288
|
+
"""
|
|
289
|
+
Getting real time metrics from the TSDB. There are pre-defined metrics for model endpoints such as
|
|
290
|
+
`predictions_per_second` and `latency_avg_5m` but also custom metrics defined by the user. Note that these
|
|
291
|
+
metrics are being calculated by the model monitoring stream pod.
|
|
292
|
+
:param endpoint_id: The unique id of the model endpoint.
|
|
293
|
+
:param metrics: A list of real-time metrics to return for the model endpoint.
|
|
294
|
+
:param start: The start time of the metrics. Can be represented by a string containing an RFC 3339
|
|
295
|
+
time, a Unix timestamp in milliseconds, a relative time (`'now'` or
|
|
296
|
+
`'now-[0-9]+[mhd]'`, where `m` = minutes, `h` = hours, `'d'` = days, and
|
|
297
|
+
`'s'` = seconds), or 0 for the earliest time.
|
|
298
|
+
:param end: The end time of the metrics. Can be represented by a string containing an RFC 3339
|
|
299
|
+
time, a Unix timestamp in milliseconds, a relative time (`'now'` or
|
|
300
|
+
`'now-[0-9]+[mhd]'`, where `m` = minutes, `h` = hours, and `'d'` = days, and
|
|
301
|
+
`'s'` = seconds), or 0 for the earliest time.
|
|
302
|
+
:return: A dictionary of metrics in which the key is a metric name and the value is a list of tuples that
|
|
303
|
+
includes timestamps and the values.
|
|
304
|
+
"""
|
|
305
|
+
|
|
306
|
+
if not metrics:
|
|
307
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
308
|
+
"Metric names must be provided"
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
metrics_mapping = {}
|
|
312
|
+
|
|
313
|
+
try:
|
|
314
|
+
data = self.get_records(
|
|
315
|
+
table=mm_constants.MonitoringTSDBTables.EVENTS,
|
|
316
|
+
columns=["endpoint_id", *metrics],
|
|
317
|
+
filter_query=f"endpoint_id=='{endpoint_id}'",
|
|
318
|
+
start=start,
|
|
319
|
+
end=end,
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
# Fill the metrics mapping dictionary with the metric name and values
|
|
323
|
+
data_dict = data.to_dict()
|
|
324
|
+
for metric in metrics:
|
|
325
|
+
metric_data = data_dict.get(metric)
|
|
326
|
+
if metric_data is None:
|
|
327
|
+
continue
|
|
328
|
+
|
|
329
|
+
values = [
|
|
330
|
+
(str(timestamp), value) for timestamp, value in metric_data.items()
|
|
331
|
+
]
|
|
332
|
+
metrics_mapping[metric] = values
|
|
333
|
+
|
|
334
|
+
except v3io_frames.errors.Error as err:
|
|
335
|
+
logger.warn("Failed to read tsdb", err=err, endpoint=endpoint_id)
|
|
336
|
+
|
|
337
|
+
return metrics_mapping
|
|
338
|
+
|
|
339
|
+
def get_records(
|
|
340
|
+
self,
|
|
341
|
+
table: str,
|
|
342
|
+
columns: list[str] = None,
|
|
343
|
+
filter_query: str = "",
|
|
344
|
+
start: str = "now-1h",
|
|
345
|
+
end: str = "now",
|
|
346
|
+
) -> pd.DataFrame:
|
|
347
|
+
"""
|
|
348
|
+
Getting records from V3IO TSDB data collection.
|
|
349
|
+
:param table: Path to the collection to query.
|
|
350
|
+
:param columns: Columns to include in the result.
|
|
351
|
+
:param filter_query: V3IO filter expression. The expected filter expression includes different conditions,
|
|
352
|
+
divided by ' AND '.
|
|
353
|
+
:param start: The start time of the metrics. Can be represented by a string containing an RFC 3339
|
|
354
|
+
time, a Unix timestamp in milliseconds, a relative time (`'now'` or
|
|
355
|
+
`'now-[0-9]+[mhd]'`, where `m` = minutes, `h` = hours, `'d'` = days, and
|
|
356
|
+
`'s'` = seconds), or 0 for the earliest time.
|
|
357
|
+
:param end: The end time of the metrics. Can be represented by a string containing an RFC 3339
|
|
358
|
+
time, a Unix timestamp in milliseconds, a relative time (`'now'` or
|
|
359
|
+
`'now-[0-9]+[mhd]'`, where `m` = minutes, `h` = hours, `'d'` = days, and
|
|
360
|
+
`'s'` = seconds), or 0 for the earliest time.
|
|
361
|
+
:return: DataFrame with the provided attributes from the data collection.
|
|
362
|
+
:raise: MLRunNotFoundError if the provided table wasn't found.
|
|
363
|
+
"""
|
|
364
|
+
if table not in self.tables:
|
|
365
|
+
raise mlrun.errors.MLRunNotFoundError(
|
|
366
|
+
f"Table '{table}' does not exist in the tables list of the TSDB connector."
|
|
367
|
+
f"Available tables: {list(self.tables.keys())}"
|
|
368
|
+
)
|
|
369
|
+
return self._frames_client.read(
|
|
370
|
+
backend=mlrun.common.schemas.model_monitoring.TimeSeriesConnector.TSDB,
|
|
371
|
+
table=self.tables[table],
|
|
372
|
+
columns=columns,
|
|
373
|
+
filter=filter_query,
|
|
374
|
+
start=start,
|
|
375
|
+
end=end,
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
def _get_v3io_source_directory(self) -> str:
|
|
379
|
+
"""
|
|
380
|
+
Get the V3IO source directory for the current project. Usually the source directory will
|
|
381
|
+
be under 'v3io:///users/pipelines/<project>'
|
|
382
|
+
|
|
383
|
+
:return: The V3IO source directory for the current project.
|
|
384
|
+
"""
|
|
385
|
+
events_table_full_path = mlrun.mlconf.get_model_monitoring_file_target_path(
|
|
386
|
+
project=self.project,
|
|
387
|
+
kind=mm_constants.FileTargetKind.EVENTS,
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
# Generate the main directory with the V3IO resources
|
|
391
|
+
source_directory = (
|
|
392
|
+
mlrun.common.model_monitoring.helpers.parse_model_endpoint_project_prefix(
|
|
393
|
+
events_table_full_path, self.project
|
|
394
|
+
)
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
return source_directory
|
|
398
|
+
|
|
399
|
+
@staticmethod
|
|
400
|
+
def _get_v3io_frames_client(v3io_container: str) -> v3io_frames.client.ClientBase:
|
|
401
|
+
return mlrun.utils.v3io_clients.get_frames_client(
|
|
402
|
+
address=mlrun.mlconf.v3io_framesd,
|
|
403
|
+
container=v3io_container,
|
|
404
|
+
)
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
# Copyright 2024 Iguazio
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
# TODO: Move this module into the TSDB abstraction once it is in.
|
|
16
|
+
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from io import StringIO
|
|
19
|
+
|
|
20
|
+
import pandas as pd
|
|
21
|
+
|
|
22
|
+
import mlrun
|
|
23
|
+
import mlrun.common.schemas.model_monitoring.constants as mm_constants
|
|
24
|
+
import mlrun.model_monitoring.writer as mm_writer
|
|
25
|
+
import mlrun.utils.v3io_clients
|
|
26
|
+
from mlrun.common.schemas.model_monitoring.model_endpoints import (
|
|
27
|
+
ModelEndpointMonitoringMetric,
|
|
28
|
+
ModelEndpointMonitoringMetricType,
|
|
29
|
+
ModelEndpointMonitoringResultNoData,
|
|
30
|
+
ModelEndpointMonitoringResultValues,
|
|
31
|
+
_compose_full_name,
|
|
32
|
+
_ModelEndpointMonitoringResultValuesBase,
|
|
33
|
+
)
|
|
34
|
+
from mlrun.model_monitoring.db.stores.v3io_kv.kv_store import KVStoreBase
|
|
35
|
+
from mlrun.model_monitoring.db.tsdb.v3io.v3io_connector import _TSDB_BE
|
|
36
|
+
from mlrun.utils import logger
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _get_sql_query(endpoint_id: str, names: list[tuple[str, str]]) -> str:
|
|
40
|
+
with StringIO() as query:
|
|
41
|
+
query.write(
|
|
42
|
+
f"SELECT * FROM '{mm_constants.MonitoringTSDBTables.APP_RESULTS}' "
|
|
43
|
+
f"WHERE {mm_writer.WriterEvent.ENDPOINT_ID}='{endpoint_id}'"
|
|
44
|
+
)
|
|
45
|
+
if names:
|
|
46
|
+
query.write(" AND (")
|
|
47
|
+
|
|
48
|
+
for i, (app_name, result_name) in enumerate(names):
|
|
49
|
+
sub_cond = (
|
|
50
|
+
f"({mm_writer.WriterEvent.APPLICATION_NAME}='{app_name}' "
|
|
51
|
+
f"AND {mm_writer.ResultData.RESULT_NAME}='{result_name}')"
|
|
52
|
+
)
|
|
53
|
+
if i != 0: # not first sub condition
|
|
54
|
+
query.write(" OR ")
|
|
55
|
+
query.write(sub_cond)
|
|
56
|
+
|
|
57
|
+
query.write(")")
|
|
58
|
+
|
|
59
|
+
query.write(";")
|
|
60
|
+
return query.getvalue()
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _get_result_kind(result_df: pd.DataFrame) -> mm_constants.ResultKindApp:
|
|
64
|
+
kind_series = result_df[mm_writer.ResultData.RESULT_KIND]
|
|
65
|
+
unique_kinds = kind_series.unique()
|
|
66
|
+
if len(unique_kinds) > 1:
|
|
67
|
+
logger.warning(
|
|
68
|
+
"The result has more than one kind",
|
|
69
|
+
kinds=list(unique_kinds),
|
|
70
|
+
application_name=result_df[mm_writer.WriterEvent.APPLICATION_NAME],
|
|
71
|
+
result_name=result_df[mm_writer.ResultData.RESULT_NAME],
|
|
72
|
+
)
|
|
73
|
+
return unique_kinds[0]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def read_data(
|
|
77
|
+
*,
|
|
78
|
+
project: str,
|
|
79
|
+
endpoint_id: str,
|
|
80
|
+
start: datetime,
|
|
81
|
+
end: datetime,
|
|
82
|
+
metrics: list[ModelEndpointMonitoringMetric],
|
|
83
|
+
) -> list[_ModelEndpointMonitoringResultValuesBase]:
|
|
84
|
+
client = mlrun.utils.v3io_clients.get_frames_client(
|
|
85
|
+
address=mlrun.mlconf.v3io_framesd,
|
|
86
|
+
container=KVStoreBase.get_v3io_monitoring_apps_container(project),
|
|
87
|
+
)
|
|
88
|
+
df: pd.DataFrame = client.read(
|
|
89
|
+
backend=_TSDB_BE,
|
|
90
|
+
query=_get_sql_query(
|
|
91
|
+
endpoint_id, [(metric.app, metric.name) for metric in metrics]
|
|
92
|
+
),
|
|
93
|
+
start=start,
|
|
94
|
+
end=end,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
metrics_without_data = {metric.full_name: metric for metric in metrics}
|
|
98
|
+
|
|
99
|
+
metrics_values: list[_ModelEndpointMonitoringResultValuesBase] = []
|
|
100
|
+
if not df.empty:
|
|
101
|
+
grouped = df.groupby(
|
|
102
|
+
[mm_writer.WriterEvent.APPLICATION_NAME, mm_writer.ResultData.RESULT_NAME],
|
|
103
|
+
observed=False,
|
|
104
|
+
)
|
|
105
|
+
else:
|
|
106
|
+
grouped = []
|
|
107
|
+
for (app_name, result_name), sub_df in grouped:
|
|
108
|
+
result_kind = _get_result_kind(sub_df)
|
|
109
|
+
full_name = _compose_full_name(project=project, app=app_name, name=result_name)
|
|
110
|
+
metrics_values.append(
|
|
111
|
+
ModelEndpointMonitoringResultValues(
|
|
112
|
+
full_name=full_name,
|
|
113
|
+
type=ModelEndpointMonitoringMetricType.RESULT,
|
|
114
|
+
result_kind=result_kind,
|
|
115
|
+
values=list(
|
|
116
|
+
zip(
|
|
117
|
+
sub_df.index,
|
|
118
|
+
sub_df[mm_writer.ResultData.RESULT_VALUE],
|
|
119
|
+
sub_df[mm_writer.ResultData.RESULT_STATUS],
|
|
120
|
+
)
|
|
121
|
+
), # pyright: ignore[reportArgumentType]
|
|
122
|
+
)
|
|
123
|
+
)
|
|
124
|
+
del metrics_without_data[full_name]
|
|
125
|
+
|
|
126
|
+
for metric in metrics_without_data.values():
|
|
127
|
+
metrics_values.append(
|
|
128
|
+
ModelEndpointMonitoringResultNoData(
|
|
129
|
+
full_name=metric.full_name,
|
|
130
|
+
type=ModelEndpointMonitoringMetricType.RESULT,
|
|
131
|
+
)
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
return metrics_values
|