mlrun 1.7.0rc18__py3-none-any.whl → 1.7.0rc19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +5 -2
- mlrun/common/constants.py +64 -3
- mlrun/common/formatters/__init__.py +16 -0
- mlrun/common/formatters/base.py +59 -0
- mlrun/common/formatters/function.py +41 -0
- mlrun/common/runtimes/constants.py +29 -4
- mlrun/common/schemas/__init__.py +0 -1
- mlrun/common/schemas/api_gateway.py +52 -0
- mlrun/common/schemas/frontend_spec.py +1 -0
- mlrun/common/schemas/model_monitoring/__init__.py +6 -3
- mlrun/common/schemas/model_monitoring/constants.py +2 -7
- mlrun/config.py +7 -2
- mlrun/datastore/sources.py +16 -22
- mlrun/datastore/store_resources.py +5 -1
- mlrun/datastore/targets.py +3 -2
- mlrun/datastore/utils.py +42 -0
- mlrun/execution.py +16 -6
- mlrun/feature_store/ingestion.py +7 -6
- mlrun/feature_store/retrieval/job.py +4 -1
- mlrun/frameworks/parallel_coordinates.py +2 -1
- mlrun/frameworks/tf_keras/__init__.py +4 -1
- mlrun/launcher/client.py +4 -2
- mlrun/launcher/local.py +8 -2
- mlrun/launcher/remote.py +8 -2
- mlrun/model.py +5 -1
- mlrun/model_monitoring/db/stores/__init__.py +0 -2
- mlrun/model_monitoring/db/stores/base/store.py +1 -2
- mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +43 -21
- mlrun/model_monitoring/db/stores/sqldb/models/base.py +32 -2
- mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +25 -5
- mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +5 -0
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +207 -139
- mlrun/model_monitoring/db/tsdb/__init__.py +1 -1
- mlrun/model_monitoring/db/tsdb/base.py +225 -38
- mlrun/model_monitoring/db/tsdb/helpers.py +30 -0
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +48 -15
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +182 -16
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +229 -42
- mlrun/model_monitoring/helpers.py +13 -0
- mlrun/model_monitoring/writer.py +36 -11
- mlrun/projects/operations.py +8 -5
- mlrun/projects/pipelines.py +42 -15
- mlrun/projects/project.py +22 -6
- mlrun/runtimes/base.py +2 -1
- mlrun/runtimes/local.py +4 -1
- mlrun/runtimes/nuclio/api_gateway.py +32 -8
- mlrun/runtimes/nuclio/application/application.py +3 -3
- mlrun/runtimes/nuclio/function.py +1 -4
- mlrun/runtimes/utils.py +5 -6
- mlrun/serving/server.py +2 -1
- mlrun/utils/helpers.py +8 -6
- mlrun/utils/logger.py +28 -1
- mlrun/utils/notifications/notification/__init__.py +14 -9
- mlrun/utils/notifications/notification_pusher.py +10 -3
- mlrun/utils/v3io_clients.py +0 -1
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc18.dist-info → mlrun-1.7.0rc19.dist-info}/METADATA +3 -3
- {mlrun-1.7.0rc18.dist-info → mlrun-1.7.0rc19.dist-info}/RECORD +62 -59
- mlrun/model_monitoring/db/v3io_tsdb_reader.py +0 -335
- {mlrun-1.7.0rc18.dist-info → mlrun-1.7.0rc19.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc18.dist-info → mlrun-1.7.0rc19.dist-info}/WHEEL +0 -0
- {mlrun-1.7.0rc18.dist-info → mlrun-1.7.0rc19.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc18.dist-info → mlrun-1.7.0rc19.dist-info}/top_level.txt +0 -0
|
@@ -11,15 +11,17 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
-
#
|
|
15
|
-
|
|
16
14
|
|
|
17
15
|
import typing
|
|
18
|
-
from abc import ABC
|
|
16
|
+
from abc import ABC, abstractmethod
|
|
17
|
+
from datetime import datetime
|
|
19
18
|
|
|
20
19
|
import pandas as pd
|
|
21
20
|
|
|
22
|
-
import mlrun.common.schemas.model_monitoring
|
|
21
|
+
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
22
|
+
import mlrun.model_monitoring.db.tsdb.helpers
|
|
23
|
+
import mlrun.model_monitoring.helpers
|
|
24
|
+
from mlrun.utils import logger
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
class TSDBConnector(ABC):
|
|
@@ -59,7 +61,7 @@ class TSDBConnector(ABC):
|
|
|
59
61
|
def write_application_event(
|
|
60
62
|
self,
|
|
61
63
|
event: dict,
|
|
62
|
-
kind:
|
|
64
|
+
kind: mm_schemas.WriterEventKind = mm_schemas.WriterEventKind.RESULT,
|
|
63
65
|
) -> None:
|
|
64
66
|
"""
|
|
65
67
|
Write a single application or metric to TSDB.
|
|
@@ -100,39 +102,6 @@ class TSDBConnector(ABC):
|
|
|
100
102
|
"""
|
|
101
103
|
pass
|
|
102
104
|
|
|
103
|
-
def get_records(
|
|
104
|
-
self,
|
|
105
|
-
table: str,
|
|
106
|
-
start: str,
|
|
107
|
-
end: str,
|
|
108
|
-
columns: typing.Optional[list[str]] = None,
|
|
109
|
-
filter_query: str = "",
|
|
110
|
-
) -> pd.DataFrame:
|
|
111
|
-
"""
|
|
112
|
-
Getting records from TSDB data collection.
|
|
113
|
-
:param table: Table name, e.g. 'metrics', 'app_results'.
|
|
114
|
-
:param start: The start time of the metrics.
|
|
115
|
-
If using V3IO, can be represented by a string containing an RFC 3339 time, a Unix
|
|
116
|
-
timestamp in milliseconds, a relative time (`'now'` or `'now-[0-9]+[mhd]'`, where
|
|
117
|
-
`m` = minutes, `h` = hours, `'d'` = days, and `'s'` = seconds), or 0 for the earliest
|
|
118
|
-
time.
|
|
119
|
-
If using TDEngine, can be represented by datetime.
|
|
120
|
-
:param end: The end time of the metrics.
|
|
121
|
-
If using V3IO, can be represented by a string containing an RFC 3339 time, a Unix
|
|
122
|
-
timestamp in milliseconds, a relative time (`'now'` or `'now-[0-9]+[mhd]'`, where
|
|
123
|
-
`m` = minutes, `h` = hours, `'d'` = days, and `'s'` = seconds), or 0 for the earliest
|
|
124
|
-
time.
|
|
125
|
-
If using TDEngine, can be represented by datetime.
|
|
126
|
-
:param columns: Columns to include in the result.
|
|
127
|
-
:param filter_query: Optional filter expression as a string. The filter structure depends on the TSDB
|
|
128
|
-
connector type.
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
:return: DataFrame with the provided attributes from the data collection.
|
|
132
|
-
:raise: MLRunNotFoundError if the provided table wasn't found.
|
|
133
|
-
"""
|
|
134
|
-
pass
|
|
135
|
-
|
|
136
105
|
def create_tables(self) -> None:
|
|
137
106
|
"""
|
|
138
107
|
Create the TSDB tables using the TSDB connector. At the moment we support 3 types of tables:
|
|
@@ -140,3 +109,221 @@ class TSDBConnector(ABC):
|
|
|
140
109
|
- metrics: a basic key value that represents a numeric metric.
|
|
141
110
|
- predictions: latency of each prediction.
|
|
142
111
|
"""
|
|
112
|
+
|
|
113
|
+
@abstractmethod
|
|
114
|
+
def read_metrics_data(
|
|
115
|
+
self,
|
|
116
|
+
*,
|
|
117
|
+
endpoint_id: str,
|
|
118
|
+
start: datetime,
|
|
119
|
+
end: datetime,
|
|
120
|
+
metrics: list[mm_schemas.ModelEndpointMonitoringMetric],
|
|
121
|
+
type: typing.Literal["metrics", "results"],
|
|
122
|
+
) -> typing.Union[
|
|
123
|
+
list[
|
|
124
|
+
typing.Union[
|
|
125
|
+
mm_schemas.ModelEndpointMonitoringResultValues,
|
|
126
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
127
|
+
],
|
|
128
|
+
],
|
|
129
|
+
list[
|
|
130
|
+
typing.Union[
|
|
131
|
+
mm_schemas.ModelEndpointMonitoringMetricValues,
|
|
132
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
133
|
+
],
|
|
134
|
+
],
|
|
135
|
+
]:
|
|
136
|
+
"""
|
|
137
|
+
Read metrics OR results from the TSDB and return as a list.
|
|
138
|
+
|
|
139
|
+
:param endpoint_id: The model endpoint identifier.
|
|
140
|
+
:param start: The start time of the query.
|
|
141
|
+
:param end: The end time of the query.
|
|
142
|
+
:param metrics: The list of metrics to get the values for.
|
|
143
|
+
:param type: "metrics" or "results" - the type of each item in metrics.
|
|
144
|
+
:return: A list of result values or a list of metric values.
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
@abstractmethod
|
|
148
|
+
def read_predictions(
|
|
149
|
+
self,
|
|
150
|
+
*,
|
|
151
|
+
endpoint_id: str,
|
|
152
|
+
start: datetime,
|
|
153
|
+
end: datetime,
|
|
154
|
+
aggregation_window: typing.Optional[str] = None,
|
|
155
|
+
agg_funcs: typing.Optional[list[str]] = None,
|
|
156
|
+
limit: typing.Optional[int] = None,
|
|
157
|
+
) -> typing.Union[
|
|
158
|
+
mm_schemas.ModelEndpointMonitoringMetricValues,
|
|
159
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
160
|
+
]:
|
|
161
|
+
"""
|
|
162
|
+
Read the "invocations" metric for the provided model endpoint in the given time range,
|
|
163
|
+
and return the metric values if any, otherwise signify with the "no data" object.
|
|
164
|
+
|
|
165
|
+
:param endpoint_id: The model endpoint identifier.
|
|
166
|
+
:param start: The start time of the query.
|
|
167
|
+
:param end: The end time of the query.
|
|
168
|
+
:param aggregation_window: On what time window length should the invocations be aggregated. If provided,
|
|
169
|
+
the `agg_funcs` must be provided as well. Provided as a string in the format of '1m',
|
|
170
|
+
'1h', etc.
|
|
171
|
+
:param agg_funcs: List of aggregation functions to apply on the invocations. If provided, the
|
|
172
|
+
`aggregation_window` must be provided as well. Provided as a list of strings in
|
|
173
|
+
the format of ['sum', 'avg', 'count', ...]
|
|
174
|
+
:param limit: The maximum number of records to return.
|
|
175
|
+
|
|
176
|
+
:raise mlrun.errors.MLRunInvalidArgumentError: If only one of `aggregation_window` and `agg_funcs` is provided.
|
|
177
|
+
:return: Metric values object or no data object.
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
@abstractmethod
|
|
181
|
+
def read_prediction_metric_for_endpoint_if_exists(
|
|
182
|
+
self, endpoint_id: str
|
|
183
|
+
) -> typing.Optional[mm_schemas.ModelEndpointMonitoringMetric]:
|
|
184
|
+
"""
|
|
185
|
+
Read the "invocations" metric for the provided model endpoint, and return the metric object
|
|
186
|
+
if it exists.
|
|
187
|
+
|
|
188
|
+
:param endpoint_id: The model endpoint identifier.
|
|
189
|
+
:return: `None` if the invocations metric does not exist, otherwise return the
|
|
190
|
+
corresponding metric object.
|
|
191
|
+
"""
|
|
192
|
+
|
|
193
|
+
@staticmethod
|
|
194
|
+
def df_to_metrics_values(
|
|
195
|
+
*,
|
|
196
|
+
df: pd.DataFrame,
|
|
197
|
+
metrics: list[mm_schemas.ModelEndpointMonitoringMetric],
|
|
198
|
+
project: str,
|
|
199
|
+
) -> list[
|
|
200
|
+
typing.Union[
|
|
201
|
+
mm_schemas.ModelEndpointMonitoringMetricValues,
|
|
202
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
203
|
+
]
|
|
204
|
+
]:
|
|
205
|
+
"""
|
|
206
|
+
Parse a time-indexed DataFrame of metrics from the TSDB into a list of
|
|
207
|
+
metrics values per distinct results.
|
|
208
|
+
When a metric is not found in the DataFrame, it is represented in a no-data object.
|
|
209
|
+
"""
|
|
210
|
+
metrics_without_data = {metric.full_name: metric for metric in metrics}
|
|
211
|
+
|
|
212
|
+
metrics_values: list[
|
|
213
|
+
typing.Union[
|
|
214
|
+
mm_schemas.ModelEndpointMonitoringMetricValues,
|
|
215
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
216
|
+
]
|
|
217
|
+
] = []
|
|
218
|
+
if not df.empty:
|
|
219
|
+
grouped = df.groupby(
|
|
220
|
+
[
|
|
221
|
+
mm_schemas.WriterEvent.APPLICATION_NAME,
|
|
222
|
+
mm_schemas.MetricData.METRIC_NAME,
|
|
223
|
+
],
|
|
224
|
+
observed=False,
|
|
225
|
+
)
|
|
226
|
+
else:
|
|
227
|
+
logger.debug("No metrics", missing_metrics=metrics_without_data.keys())
|
|
228
|
+
grouped = []
|
|
229
|
+
for (app_name, name), sub_df in grouped:
|
|
230
|
+
full_name = mlrun.model_monitoring.helpers._compose_full_name(
|
|
231
|
+
project=project,
|
|
232
|
+
app=app_name,
|
|
233
|
+
name=name,
|
|
234
|
+
type=mm_schemas.ModelEndpointMonitoringMetricType.METRIC,
|
|
235
|
+
)
|
|
236
|
+
metrics_values.append(
|
|
237
|
+
mm_schemas.ModelEndpointMonitoringMetricValues(
|
|
238
|
+
full_name=full_name,
|
|
239
|
+
values=list(
|
|
240
|
+
zip(
|
|
241
|
+
sub_df.index,
|
|
242
|
+
sub_df[mm_schemas.MetricData.METRIC_VALUE],
|
|
243
|
+
)
|
|
244
|
+
), # pyright: ignore[reportArgumentType]
|
|
245
|
+
)
|
|
246
|
+
)
|
|
247
|
+
del metrics_without_data[full_name]
|
|
248
|
+
|
|
249
|
+
for metric in metrics_without_data.values():
|
|
250
|
+
metrics_values.append(
|
|
251
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData(
|
|
252
|
+
full_name=metric.full_name,
|
|
253
|
+
type=mm_schemas.ModelEndpointMonitoringMetricType.METRIC,
|
|
254
|
+
)
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return metrics_values
|
|
258
|
+
|
|
259
|
+
@staticmethod
|
|
260
|
+
def df_to_results_values(
|
|
261
|
+
*,
|
|
262
|
+
df: pd.DataFrame,
|
|
263
|
+
metrics: list[mm_schemas.ModelEndpointMonitoringMetric],
|
|
264
|
+
project: str,
|
|
265
|
+
) -> list[
|
|
266
|
+
typing.Union[
|
|
267
|
+
mm_schemas.ModelEndpointMonitoringResultValues,
|
|
268
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
269
|
+
]
|
|
270
|
+
]:
|
|
271
|
+
"""
|
|
272
|
+
Parse a time-indexed DataFrame of results from the TSDB into a list of
|
|
273
|
+
results values per distinct results.
|
|
274
|
+
When a result is not found in the DataFrame, it is represented in no-data object.
|
|
275
|
+
"""
|
|
276
|
+
metrics_without_data = {metric.full_name: metric for metric in metrics}
|
|
277
|
+
|
|
278
|
+
metrics_values: list[
|
|
279
|
+
typing.Union[
|
|
280
|
+
mm_schemas.ModelEndpointMonitoringResultValues,
|
|
281
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
282
|
+
]
|
|
283
|
+
] = []
|
|
284
|
+
if not df.empty:
|
|
285
|
+
grouped = df.groupby(
|
|
286
|
+
[
|
|
287
|
+
mm_schemas.WriterEvent.APPLICATION_NAME,
|
|
288
|
+
mm_schemas.ResultData.RESULT_NAME,
|
|
289
|
+
],
|
|
290
|
+
observed=False,
|
|
291
|
+
)
|
|
292
|
+
else:
|
|
293
|
+
grouped = []
|
|
294
|
+
logger.debug("No results", missing_results=metrics_without_data.keys())
|
|
295
|
+
for (app_name, name), sub_df in grouped:
|
|
296
|
+
result_kind = mlrun.model_monitoring.db.tsdb.helpers._get_result_kind(
|
|
297
|
+
sub_df
|
|
298
|
+
)
|
|
299
|
+
full_name = mlrun.model_monitoring.helpers._compose_full_name(
|
|
300
|
+
project=project, app=app_name, name=name
|
|
301
|
+
)
|
|
302
|
+
metrics_values.append(
|
|
303
|
+
mm_schemas.ModelEndpointMonitoringResultValues(
|
|
304
|
+
full_name=full_name,
|
|
305
|
+
result_kind=result_kind,
|
|
306
|
+
values=list(
|
|
307
|
+
zip(
|
|
308
|
+
sub_df.index,
|
|
309
|
+
sub_df[mm_schemas.ResultData.RESULT_VALUE],
|
|
310
|
+
sub_df[mm_schemas.ResultData.RESULT_STATUS],
|
|
311
|
+
)
|
|
312
|
+
), # pyright: ignore[reportArgumentType]
|
|
313
|
+
)
|
|
314
|
+
)
|
|
315
|
+
del metrics_without_data[full_name]
|
|
316
|
+
|
|
317
|
+
for metric in metrics_without_data.values():
|
|
318
|
+
if metric.full_name == mlrun.model_monitoring.helpers.get_invocations_fqn(
|
|
319
|
+
project
|
|
320
|
+
):
|
|
321
|
+
continue
|
|
322
|
+
metrics_values.append(
|
|
323
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData(
|
|
324
|
+
full_name=metric.full_name,
|
|
325
|
+
type=mm_schemas.ModelEndpointMonitoringMetricType.RESULT,
|
|
326
|
+
)
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
return metrics_values
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# Copyright 2024 Iguazio
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
import pandas as pd
|
|
15
|
+
|
|
16
|
+
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
17
|
+
from mlrun.utils import logger
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _get_result_kind(result_df: pd.DataFrame) -> mm_schemas.ResultKindApp:
|
|
21
|
+
kind_series = result_df[mm_schemas.ResultData.RESULT_KIND]
|
|
22
|
+
unique_kinds = kind_series.unique()
|
|
23
|
+
if len(unique_kinds) > 1:
|
|
24
|
+
logger.warning(
|
|
25
|
+
"The result has more than one kind",
|
|
26
|
+
kinds=list(unique_kinds),
|
|
27
|
+
application_name=result_df[mm_schemas.WriterEvent.APPLICATION_NAME],
|
|
28
|
+
result_name=result_df[mm_schemas.ResultData.RESULT_NAME],
|
|
29
|
+
)
|
|
30
|
+
return unique_kinds[0]
|
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import datetime
|
|
16
16
|
from dataclasses import dataclass
|
|
17
17
|
from io import StringIO
|
|
18
|
-
from typing import Union
|
|
18
|
+
from typing import Optional, Union
|
|
19
19
|
|
|
20
20
|
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
21
21
|
import mlrun.common.types
|
|
@@ -95,7 +95,7 @@ class TDEngineSchema:
|
|
|
95
95
|
values: dict[str, Union[str, int, float, datetime.datetime]],
|
|
96
96
|
) -> str:
|
|
97
97
|
values = " AND ".join(
|
|
98
|
-
f"{val}
|
|
98
|
+
f"{val} LIKE '{values[val]}'" for val in self.tags if val in values
|
|
99
99
|
)
|
|
100
100
|
if not values:
|
|
101
101
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
@@ -114,7 +114,7 @@ class TDEngineSchema:
|
|
|
114
114
|
values: dict[str, Union[str, int, float, datetime.datetime]],
|
|
115
115
|
) -> str:
|
|
116
116
|
values = " AND ".join(
|
|
117
|
-
f"{val}
|
|
117
|
+
f"{val} LIKE '{values[val]}'" for val in self.tags if val in values
|
|
118
118
|
)
|
|
119
119
|
if not values:
|
|
120
120
|
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
@@ -125,33 +125,65 @@ class TDEngineSchema:
|
|
|
125
125
|
@staticmethod
|
|
126
126
|
def _get_records_query(
|
|
127
127
|
table: str,
|
|
128
|
-
start:
|
|
129
|
-
end:
|
|
128
|
+
start: datetime,
|
|
129
|
+
end: datetime,
|
|
130
130
|
columns_to_filter: list[str] = None,
|
|
131
|
-
filter_query: str =
|
|
131
|
+
filter_query: Optional[str] = None,
|
|
132
|
+
interval: Optional[str] = None,
|
|
133
|
+
limit: int = 0,
|
|
134
|
+
agg_funcs: Optional[list] = None,
|
|
135
|
+
sliding_window_step: Optional[str] = None,
|
|
132
136
|
timestamp_column: str = "time",
|
|
133
137
|
database: str = _MODEL_MONITORING_DATABASE,
|
|
134
138
|
) -> str:
|
|
139
|
+
if agg_funcs and not columns_to_filter:
|
|
140
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
141
|
+
"`columns_to_filter` must be provided when using aggregate functions"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# if aggregate function or interval is provided, the other must be provided as well
|
|
145
|
+
if interval and not agg_funcs:
|
|
146
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
147
|
+
"`agg_funcs` must be provided when using interval"
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if sliding_window_step and not interval:
|
|
151
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
152
|
+
"`interval` must be provided when using sliding window"
|
|
153
|
+
)
|
|
154
|
+
|
|
135
155
|
with StringIO() as query:
|
|
136
156
|
query.write("SELECT ")
|
|
137
|
-
if
|
|
157
|
+
if interval:
|
|
158
|
+
query.write("_wstart, _wend, ")
|
|
159
|
+
if agg_funcs:
|
|
160
|
+
query.write(
|
|
161
|
+
", ".join(
|
|
162
|
+
[f"{a}({col})" for a in agg_funcs for col in columns_to_filter]
|
|
163
|
+
)
|
|
164
|
+
)
|
|
165
|
+
elif columns_to_filter:
|
|
138
166
|
query.write(", ".join(columns_to_filter))
|
|
139
167
|
else:
|
|
140
168
|
query.write("*")
|
|
141
|
-
query.write(f"
|
|
169
|
+
query.write(f" FROM {database}.{table}")
|
|
142
170
|
|
|
143
171
|
if any([filter_query, start, end]):
|
|
144
|
-
query.write("
|
|
172
|
+
query.write(" WHERE ")
|
|
145
173
|
if filter_query:
|
|
146
|
-
query.write(f"{filter_query}
|
|
174
|
+
query.write(f"{filter_query} AND ")
|
|
147
175
|
if start:
|
|
148
|
-
query.write(f"{timestamp_column} >= '{start}'" + "
|
|
176
|
+
query.write(f"{timestamp_column} >= '{start}'" + " AND ")
|
|
149
177
|
if end:
|
|
150
178
|
query.write(f"{timestamp_column} <= '{end}'")
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
179
|
+
if interval:
|
|
180
|
+
query.write(f" INTERVAL({interval})")
|
|
181
|
+
if sliding_window_step:
|
|
182
|
+
query.write(f" SLIDING({sliding_window_step})")
|
|
183
|
+
if limit:
|
|
184
|
+
query.write(f" LIMIT {limit}")
|
|
185
|
+
query.write(";")
|
|
186
|
+
return query.getvalue()
|
|
155
187
|
|
|
156
188
|
|
|
157
189
|
@dataclass
|
|
@@ -170,6 +202,7 @@ class AppResultTable(TDEngineSchema):
|
|
|
170
202
|
mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
|
|
171
203
|
mm_schemas.WriterEvent.APPLICATION_NAME: _TDEngineColumn.BINARY_64,
|
|
172
204
|
mm_schemas.ResultData.RESULT_NAME: _TDEngineColumn.BINARY_64,
|
|
205
|
+
mm_schemas.ResultData.RESULT_KIND: _TDEngineColumn.INT,
|
|
173
206
|
}
|
|
174
207
|
database = _MODEL_MONITORING_DATABASE
|
|
175
208
|
|
|
@@ -11,18 +11,18 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
-
#
|
|
15
14
|
|
|
16
15
|
import typing
|
|
16
|
+
from datetime import datetime
|
|
17
17
|
|
|
18
18
|
import pandas as pd
|
|
19
19
|
import taosws
|
|
20
20
|
|
|
21
21
|
import mlrun.common.schemas.model_monitoring as mm_schemas
|
|
22
|
-
import mlrun.model_monitoring.db
|
|
23
22
|
import mlrun.model_monitoring.db.tsdb.tdengine.schemas as tdengine_schemas
|
|
24
23
|
import mlrun.model_monitoring.db.tsdb.tdengine.stream_graph_steps
|
|
25
24
|
from mlrun.model_monitoring.db import TSDBConnector
|
|
25
|
+
from mlrun.model_monitoring.helpers import get_invocations_fqn
|
|
26
26
|
from mlrun.utils import logger
|
|
27
27
|
|
|
28
28
|
|
|
@@ -184,37 +184,59 @@ class TDEngineConnector(TSDBConnector):
|
|
|
184
184
|
# Not implemented, use get_records() instead
|
|
185
185
|
pass
|
|
186
186
|
|
|
187
|
-
def
|
|
187
|
+
def _get_records(
|
|
188
188
|
self,
|
|
189
189
|
table: str,
|
|
190
|
-
start:
|
|
191
|
-
end:
|
|
190
|
+
start: datetime,
|
|
191
|
+
end: datetime,
|
|
192
192
|
columns: typing.Optional[list[str]] = None,
|
|
193
|
-
filter_query: str =
|
|
193
|
+
filter_query: typing.Optional[str] = None,
|
|
194
|
+
interval: typing.Optional[str] = None,
|
|
195
|
+
agg_funcs: typing.Optional[list] = None,
|
|
196
|
+
limit: typing.Optional[int] = None,
|
|
197
|
+
sliding_window_step: typing.Optional[str] = None,
|
|
194
198
|
timestamp_column: str = mm_schemas.EventFieldType.TIME,
|
|
195
199
|
) -> pd.DataFrame:
|
|
196
200
|
"""
|
|
197
201
|
Getting records from TSDB data collection.
|
|
198
|
-
:param table:
|
|
199
|
-
:param
|
|
200
|
-
:param
|
|
201
|
-
|
|
202
|
-
:param
|
|
203
|
-
:param
|
|
204
|
-
|
|
202
|
+
:param table: Either a supertable or a subtable name.
|
|
203
|
+
:param start: The start time of the metrics.
|
|
204
|
+
:param end: The end time of the metrics.
|
|
205
|
+
:param columns: Columns to include in the result.
|
|
206
|
+
:param filter_query: Optional filter expression as a string. TDengine supports SQL-like syntax.
|
|
207
|
+
:param interval: The interval to aggregate the data by. Note that if interval is provided,
|
|
208
|
+
`agg_funcs` must bg provided as well. Provided as a string in the format of '1m',
|
|
209
|
+
'1h', etc.
|
|
210
|
+
:param agg_funcs: The aggregation functions to apply on the columns. Note that if `agg_funcs` is
|
|
211
|
+
provided, `interval` must bg provided as well. Provided as a list of strings in
|
|
212
|
+
the format of ['sum', 'avg', 'count', ...].
|
|
213
|
+
:param limit: The maximum number of records to return.
|
|
214
|
+
:param sliding_window_step: The time step for which the time window moves forward. Note that if
|
|
215
|
+
`sliding_window_step` is provided, interval must be provided as well. Provided
|
|
216
|
+
as a string in the format of '1m', '1h', etc.
|
|
217
|
+
:param timestamp_column: The column name that holds the timestamp index.
|
|
205
218
|
|
|
206
219
|
:return: DataFrame with the provided attributes from the data collection.
|
|
207
220
|
:raise: MLRunInvalidArgumentError if query the provided table failed.
|
|
208
221
|
"""
|
|
209
222
|
|
|
210
|
-
|
|
223
|
+
project_condition = f"project = '{self.project}'"
|
|
224
|
+
filter_query = (
|
|
225
|
+
f"{filter_query} AND {project_condition}"
|
|
226
|
+
if filter_query
|
|
227
|
+
else project_condition
|
|
228
|
+
)
|
|
211
229
|
|
|
212
230
|
full_query = tdengine_schemas.TDEngineSchema._get_records_query(
|
|
213
231
|
table=table,
|
|
214
|
-
columns_to_filter=columns,
|
|
215
|
-
filter_query=filter_query,
|
|
216
232
|
start=start,
|
|
217
233
|
end=end,
|
|
234
|
+
columns_to_filter=columns,
|
|
235
|
+
filter_query=filter_query,
|
|
236
|
+
interval=interval,
|
|
237
|
+
limit=limit,
|
|
238
|
+
agg_funcs=agg_funcs,
|
|
239
|
+
sliding_window_step=sliding_window_step,
|
|
218
240
|
timestamp_column=timestamp_column,
|
|
219
241
|
database=self.database,
|
|
220
242
|
)
|
|
@@ -229,3 +251,147 @@ class TDEngineConnector(TSDBConnector):
|
|
|
229
251
|
columns.append(column.name())
|
|
230
252
|
|
|
231
253
|
return pd.DataFrame(query_result, columns=columns)
|
|
254
|
+
|
|
255
|
+
def read_metrics_data(
|
|
256
|
+
self,
|
|
257
|
+
*,
|
|
258
|
+
endpoint_id: str,
|
|
259
|
+
start: datetime,
|
|
260
|
+
end: datetime,
|
|
261
|
+
metrics: list[mm_schemas.ModelEndpointMonitoringMetric],
|
|
262
|
+
type: typing.Literal["metrics", "results"],
|
|
263
|
+
) -> typing.Union[
|
|
264
|
+
list[
|
|
265
|
+
typing.Union[
|
|
266
|
+
mm_schemas.ModelEndpointMonitoringResultValues,
|
|
267
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
268
|
+
],
|
|
269
|
+
],
|
|
270
|
+
list[
|
|
271
|
+
typing.Union[
|
|
272
|
+
mm_schemas.ModelEndpointMonitoringMetricValues,
|
|
273
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
274
|
+
],
|
|
275
|
+
],
|
|
276
|
+
]:
|
|
277
|
+
if type == "metrics":
|
|
278
|
+
table = mm_schemas.TDEngineSuperTables.METRICS
|
|
279
|
+
name = mm_schemas.MetricData.METRIC_NAME
|
|
280
|
+
df_handler = self.df_to_metrics_values
|
|
281
|
+
elif type == "results":
|
|
282
|
+
table = mm_schemas.TDEngineSuperTables.APP_RESULTS
|
|
283
|
+
name = mm_schemas.ResultData.RESULT_NAME
|
|
284
|
+
df_handler = self.df_to_results_values
|
|
285
|
+
else:
|
|
286
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
287
|
+
f"Invalid type {type}, must be either 'metrics' or 'results'."
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
metrics_condition = " OR ".join(
|
|
291
|
+
[
|
|
292
|
+
f"({mm_schemas.WriterEvent.APPLICATION_NAME} = '{metric.app}' AND {name} = '{metric.name}')"
|
|
293
|
+
for metric in metrics
|
|
294
|
+
]
|
|
295
|
+
)
|
|
296
|
+
filter_query = f"endpoint_id='{endpoint_id}' AND ({metrics_condition})"
|
|
297
|
+
|
|
298
|
+
df = self._get_records(
|
|
299
|
+
table=table,
|
|
300
|
+
start=start,
|
|
301
|
+
end=end,
|
|
302
|
+
filter_query=filter_query,
|
|
303
|
+
timestamp_column=mm_schemas.WriterEvent.END_INFER_TIME,
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
df[mm_schemas.WriterEvent.END_INFER_TIME] = pd.to_datetime(
|
|
307
|
+
df[mm_schemas.WriterEvent.END_INFER_TIME]
|
|
308
|
+
)
|
|
309
|
+
df.set_index(mm_schemas.WriterEvent.END_INFER_TIME, inplace=True)
|
|
310
|
+
|
|
311
|
+
logger.debug(
|
|
312
|
+
"Converting a DataFrame to a list of metrics or results values",
|
|
313
|
+
table=table,
|
|
314
|
+
project=self.project,
|
|
315
|
+
endpoint_id=endpoint_id,
|
|
316
|
+
is_empty=df.empty,
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
return df_handler(df=df, metrics=metrics, project=self.project)
|
|
320
|
+
|
|
321
|
+
def read_predictions(
|
|
322
|
+
self,
|
|
323
|
+
*,
|
|
324
|
+
endpoint_id: str,
|
|
325
|
+
start: datetime,
|
|
326
|
+
end: datetime,
|
|
327
|
+
aggregation_window: typing.Optional[str] = None,
|
|
328
|
+
agg_funcs: typing.Optional[list] = None,
|
|
329
|
+
limit: typing.Optional[int] = None,
|
|
330
|
+
) -> typing.Union[
|
|
331
|
+
mm_schemas.ModelEndpointMonitoringMetricValues,
|
|
332
|
+
mm_schemas.ModelEndpointMonitoringMetricNoData,
|
|
333
|
+
]:
|
|
334
|
+
if (agg_funcs and not aggregation_window) or (
|
|
335
|
+
aggregation_window and not agg_funcs
|
|
336
|
+
):
|
|
337
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
338
|
+
"both or neither of `aggregation_window` and `agg_funcs` must be provided"
|
|
339
|
+
)
|
|
340
|
+
df = self._get_records(
|
|
341
|
+
table=mm_schemas.TDEngineSuperTables.PREDICTIONS,
|
|
342
|
+
start=start,
|
|
343
|
+
end=end,
|
|
344
|
+
columns=[mm_schemas.EventFieldType.LATENCY],
|
|
345
|
+
filter_query=f"endpoint_id='{endpoint_id}'",
|
|
346
|
+
agg_funcs=agg_funcs,
|
|
347
|
+
interval=aggregation_window,
|
|
348
|
+
limit=limit,
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
full_name = get_invocations_fqn(self.project)
|
|
352
|
+
|
|
353
|
+
if df.empty:
|
|
354
|
+
return mm_schemas.ModelEndpointMonitoringMetricNoData(
|
|
355
|
+
full_name=full_name,
|
|
356
|
+
type=mm_schemas.ModelEndpointMonitoringMetricType.METRIC,
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
if aggregation_window:
|
|
360
|
+
# _wend column, which represents the end time of each window, will be used as the time index
|
|
361
|
+
df["_wend"] = pd.to_datetime(df["_wend"])
|
|
362
|
+
df.set_index("_wend", inplace=True)
|
|
363
|
+
|
|
364
|
+
latency_column = (
|
|
365
|
+
f"{agg_funcs[0]}({mm_schemas.EventFieldType.LATENCY})"
|
|
366
|
+
if agg_funcs
|
|
367
|
+
else mm_schemas.EventFieldType.LATENCY
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return mm_schemas.ModelEndpointMonitoringMetricValues(
|
|
371
|
+
full_name=full_name,
|
|
372
|
+
values=list(
|
|
373
|
+
zip(
|
|
374
|
+
df.index,
|
|
375
|
+
df[latency_column],
|
|
376
|
+
)
|
|
377
|
+
), # pyright: ignore[reportArgumentType]
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
def read_prediction_metric_for_endpoint_if_exists(
|
|
381
|
+
self, endpoint_id: str
|
|
382
|
+
) -> typing.Optional[mm_schemas.ModelEndpointMonitoringMetric]:
|
|
383
|
+
# Read just one record, because we just want to check if there is any data for this endpoint_id
|
|
384
|
+
predictions = self.read_predictions(
|
|
385
|
+
endpoint_id=endpoint_id,
|
|
386
|
+
start=datetime.min,
|
|
387
|
+
end=mlrun.utils.now_date(),
|
|
388
|
+
limit=1,
|
|
389
|
+
)
|
|
390
|
+
if predictions:
|
|
391
|
+
return mm_schemas.ModelEndpointMonitoringMetric(
|
|
392
|
+
project=self.project,
|
|
393
|
+
app=mm_schemas.SpecialApps.MLRUN_INFRA,
|
|
394
|
+
type=mm_schemas.ModelEndpointMonitoringMetricType.METRIC,
|
|
395
|
+
name=mm_schemas.PredictionsQueryConstants.INVOCATIONS,
|
|
396
|
+
full_name=get_invocations_fqn(self.project),
|
|
397
|
+
)
|