mlrun 1.7.0rc28__py3-none-any.whl → 1.7.0rc55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +4 -2
- mlrun/alerts/alert.py +75 -8
- mlrun/artifacts/base.py +1 -0
- mlrun/artifacts/manager.py +9 -2
- mlrun/common/constants.py +4 -1
- mlrun/common/db/sql_session.py +3 -2
- mlrun/common/formatters/__init__.py +1 -0
- mlrun/common/formatters/artifact.py +1 -0
- mlrun/{model_monitoring/application.py → common/formatters/feature_set.py} +20 -6
- mlrun/common/formatters/run.py +3 -0
- mlrun/common/helpers.py +0 -1
- mlrun/common/schemas/__init__.py +3 -1
- mlrun/common/schemas/alert.py +15 -12
- mlrun/common/schemas/api_gateway.py +6 -6
- mlrun/common/schemas/auth.py +5 -0
- mlrun/common/schemas/client_spec.py +0 -1
- mlrun/common/schemas/common.py +7 -4
- mlrun/common/schemas/frontend_spec.py +7 -0
- mlrun/common/schemas/function.py +7 -0
- mlrun/common/schemas/model_monitoring/__init__.py +4 -3
- mlrun/common/schemas/model_monitoring/constants.py +41 -26
- mlrun/common/schemas/model_monitoring/model_endpoints.py +23 -47
- mlrun/common/schemas/notification.py +69 -12
- mlrun/common/schemas/project.py +45 -12
- mlrun/common/schemas/workflow.py +10 -2
- mlrun/common/types.py +1 -0
- mlrun/config.py +91 -35
- mlrun/data_types/data_types.py +6 -1
- mlrun/data_types/spark.py +2 -2
- mlrun/data_types/to_pandas.py +57 -25
- mlrun/datastore/__init__.py +1 -0
- mlrun/datastore/alibaba_oss.py +3 -2
- mlrun/datastore/azure_blob.py +125 -37
- mlrun/datastore/base.py +42 -21
- mlrun/datastore/datastore.py +4 -2
- mlrun/datastore/datastore_profile.py +1 -1
- mlrun/datastore/dbfs_store.py +3 -7
- mlrun/datastore/filestore.py +1 -3
- mlrun/datastore/google_cloud_storage.py +85 -29
- mlrun/datastore/inmem.py +4 -1
- mlrun/datastore/redis.py +1 -0
- mlrun/datastore/s3.py +25 -12
- mlrun/datastore/sources.py +76 -4
- mlrun/datastore/spark_utils.py +30 -0
- mlrun/datastore/storeytargets.py +151 -0
- mlrun/datastore/targets.py +102 -131
- mlrun/datastore/v3io.py +1 -0
- mlrun/db/base.py +15 -6
- mlrun/db/httpdb.py +57 -28
- mlrun/db/nopdb.py +29 -5
- mlrun/errors.py +20 -3
- mlrun/execution.py +46 -5
- mlrun/feature_store/api.py +25 -1
- mlrun/feature_store/common.py +6 -11
- mlrun/feature_store/feature_vector.py +3 -1
- mlrun/feature_store/retrieval/job.py +4 -1
- mlrun/feature_store/retrieval/spark_merger.py +10 -39
- mlrun/feature_store/steps.py +8 -0
- mlrun/frameworks/_common/plan.py +3 -3
- mlrun/frameworks/_ml_common/plan.py +1 -1
- mlrun/frameworks/parallel_coordinates.py +2 -3
- mlrun/frameworks/sklearn/mlrun_interface.py +13 -3
- mlrun/k8s_utils.py +48 -2
- mlrun/launcher/client.py +6 -6
- mlrun/launcher/local.py +2 -2
- mlrun/model.py +215 -34
- mlrun/model_monitoring/api.py +38 -24
- mlrun/model_monitoring/applications/__init__.py +1 -2
- mlrun/model_monitoring/applications/_application_steps.py +60 -29
- mlrun/model_monitoring/applications/base.py +2 -174
- mlrun/model_monitoring/applications/context.py +197 -70
- mlrun/model_monitoring/applications/evidently_base.py +11 -85
- mlrun/model_monitoring/applications/histogram_data_drift.py +21 -16
- mlrun/model_monitoring/applications/results.py +4 -4
- mlrun/model_monitoring/controller.py +110 -282
- mlrun/model_monitoring/db/stores/__init__.py +8 -3
- mlrun/model_monitoring/db/stores/base/store.py +3 -0
- mlrun/model_monitoring/db/stores/sqldb/models/base.py +9 -7
- mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +18 -3
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +43 -23
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +48 -35
- mlrun/model_monitoring/db/tsdb/__init__.py +7 -2
- mlrun/model_monitoring/db/tsdb/base.py +147 -15
- mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +94 -55
- mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +0 -3
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +144 -38
- mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +44 -3
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +246 -57
- mlrun/model_monitoring/helpers.py +70 -50
- mlrun/model_monitoring/stream_processing.py +96 -195
- mlrun/model_monitoring/writer.py +13 -5
- mlrun/package/packagers/default_packager.py +2 -2
- mlrun/projects/operations.py +16 -8
- mlrun/projects/pipelines.py +126 -115
- mlrun/projects/project.py +286 -129
- mlrun/render.py +3 -3
- mlrun/run.py +38 -19
- mlrun/runtimes/__init__.py +19 -8
- mlrun/runtimes/base.py +4 -1
- mlrun/runtimes/daskjob.py +1 -1
- mlrun/runtimes/funcdoc.py +1 -1
- mlrun/runtimes/kubejob.py +6 -6
- mlrun/runtimes/local.py +12 -5
- mlrun/runtimes/nuclio/api_gateway.py +68 -8
- mlrun/runtimes/nuclio/application/application.py +307 -70
- mlrun/runtimes/nuclio/function.py +63 -14
- mlrun/runtimes/nuclio/serving.py +10 -10
- mlrun/runtimes/pod.py +25 -19
- mlrun/runtimes/remotesparkjob.py +2 -5
- mlrun/runtimes/sparkjob/spark3job.py +16 -17
- mlrun/runtimes/utils.py +34 -0
- mlrun/serving/routers.py +2 -5
- mlrun/serving/server.py +37 -19
- mlrun/serving/states.py +30 -3
- mlrun/serving/v2_serving.py +44 -35
- mlrun/track/trackers/mlflow_tracker.py +5 -0
- mlrun/utils/async_http.py +1 -1
- mlrun/utils/db.py +18 -0
- mlrun/utils/helpers.py +150 -36
- mlrun/utils/http.py +1 -1
- mlrun/utils/notifications/notification/__init__.py +0 -1
- mlrun/utils/notifications/notification/webhook.py +8 -1
- mlrun/utils/notifications/notification_pusher.py +1 -1
- mlrun/utils/v3io_clients.py +2 -2
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/METADATA +153 -66
- {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/RECORD +131 -134
- {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/WHEEL +1 -1
- mlrun/feature_store/retrieval/conversion.py +0 -271
- mlrun/model_monitoring/controller_handler.py +0 -37
- mlrun/model_monitoring/evidently_application.py +0 -20
- mlrun/model_monitoring/prometheus.py +0 -216
- {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc28.dist-info → mlrun-1.7.0rc55.dist-info}/top_level.txt +0 -0
|
@@ -1,271 +0,0 @@
|
|
|
1
|
-
# Copyright 2024 Iguazio
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
#
|
|
15
|
-
import warnings
|
|
16
|
-
from collections import Counter
|
|
17
|
-
|
|
18
|
-
# Copied from https://github.com/apache/spark/blob/v3.2.3/python/pyspark/sql/pandas/conversion.py, with
|
|
19
|
-
# np.bool -> bool and np.object -> object fix backported from pyspark v3.3.3.
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class PandasConversionMixin:
|
|
23
|
-
"""
|
|
24
|
-
Min-in for the conversion from Spark to pandas. Currently, only :class:`DataFrame`
|
|
25
|
-
can use this class.
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
def toPandas(self):
|
|
29
|
-
"""
|
|
30
|
-
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
|
|
31
|
-
|
|
32
|
-
This is only available if Pandas is installed and available.
|
|
33
|
-
|
|
34
|
-
.. versionadded:: 1.3.0
|
|
35
|
-
|
|
36
|
-
Notes
|
|
37
|
-
-----
|
|
38
|
-
This method should only be used if the resulting Pandas's :class:`DataFrame` is
|
|
39
|
-
expected to be small, as all the data is loaded into the driver's memory.
|
|
40
|
-
|
|
41
|
-
Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
|
|
42
|
-
|
|
43
|
-
Examples
|
|
44
|
-
--------
|
|
45
|
-
>>> df.toPandas() # doctest: +SKIP
|
|
46
|
-
age name
|
|
47
|
-
0 2 Alice
|
|
48
|
-
1 5 Bob
|
|
49
|
-
"""
|
|
50
|
-
from pyspark.sql.dataframe import DataFrame
|
|
51
|
-
|
|
52
|
-
assert isinstance(self, DataFrame)
|
|
53
|
-
|
|
54
|
-
from pyspark.sql.pandas.utils import require_minimum_pandas_version
|
|
55
|
-
|
|
56
|
-
require_minimum_pandas_version()
|
|
57
|
-
|
|
58
|
-
import numpy as np
|
|
59
|
-
import pandas as pd
|
|
60
|
-
from pyspark.sql.types import (
|
|
61
|
-
BooleanType,
|
|
62
|
-
IntegralType,
|
|
63
|
-
MapType,
|
|
64
|
-
TimestampType,
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
|
|
68
|
-
|
|
69
|
-
if self.sql_ctx._conf.arrowPySparkEnabled():
|
|
70
|
-
use_arrow = True
|
|
71
|
-
try:
|
|
72
|
-
from pyspark.sql.pandas.types import to_arrow_schema
|
|
73
|
-
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
|
|
74
|
-
|
|
75
|
-
require_minimum_pyarrow_version()
|
|
76
|
-
to_arrow_schema(self.schema)
|
|
77
|
-
except Exception as e:
|
|
78
|
-
if self.sql_ctx._conf.arrowPySparkFallbackEnabled():
|
|
79
|
-
msg = (
|
|
80
|
-
"toPandas attempted Arrow optimization because "
|
|
81
|
-
"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, "
|
|
82
|
-
f"failed by the reason below:\n {e}\n"
|
|
83
|
-
"Attempting non-optimization as "
|
|
84
|
-
"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to "
|
|
85
|
-
"true."
|
|
86
|
-
)
|
|
87
|
-
warnings.warn(msg)
|
|
88
|
-
use_arrow = False
|
|
89
|
-
else:
|
|
90
|
-
msg = (
|
|
91
|
-
"toPandas attempted Arrow optimization because "
|
|
92
|
-
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
|
|
93
|
-
"reached the error below and will not continue because automatic fallback "
|
|
94
|
-
"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to "
|
|
95
|
-
f"false.\n {e}"
|
|
96
|
-
)
|
|
97
|
-
warnings.warn(msg)
|
|
98
|
-
raise
|
|
99
|
-
|
|
100
|
-
# Try to use Arrow optimization when the schema is supported and the required version
|
|
101
|
-
# of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.
|
|
102
|
-
if use_arrow:
|
|
103
|
-
try:
|
|
104
|
-
import pyarrow
|
|
105
|
-
from pyspark.sql.pandas.types import (
|
|
106
|
-
_check_series_localize_timestamps,
|
|
107
|
-
_convert_map_items_to_dict,
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
# Rename columns to avoid duplicated column names.
|
|
111
|
-
tmp_column_names = [f"col_{i}" for i in range(len(self.columns))]
|
|
112
|
-
self_destruct = self.sql_ctx._conf.arrowPySparkSelfDestructEnabled()
|
|
113
|
-
batches = self.toDF(*tmp_column_names)._collect_as_arrow(
|
|
114
|
-
split_batches=self_destruct
|
|
115
|
-
)
|
|
116
|
-
if len(batches) > 0:
|
|
117
|
-
table = pyarrow.Table.from_batches(batches)
|
|
118
|
-
# Ensure only the table has a reference to the batches, so that
|
|
119
|
-
# self_destruct (if enabled) is effective
|
|
120
|
-
del batches
|
|
121
|
-
# Pandas DataFrame created from PyArrow uses datetime64[ns] for date type
|
|
122
|
-
# values, but we should use datetime.date to match the behavior with when
|
|
123
|
-
# Arrow optimization is disabled.
|
|
124
|
-
pandas_options = {"date_as_object": True}
|
|
125
|
-
if self_destruct:
|
|
126
|
-
# Configure PyArrow to use as little memory as possible:
|
|
127
|
-
# self_destruct - free columns as they are converted
|
|
128
|
-
# split_blocks - create a separate Pandas block for each column
|
|
129
|
-
# use_threads - convert one column at a time
|
|
130
|
-
pandas_options.update(
|
|
131
|
-
{
|
|
132
|
-
"self_destruct": True,
|
|
133
|
-
"split_blocks": True,
|
|
134
|
-
"use_threads": False,
|
|
135
|
-
}
|
|
136
|
-
)
|
|
137
|
-
pdf = table.to_pandas(**pandas_options)
|
|
138
|
-
# Rename back to the original column names.
|
|
139
|
-
pdf.columns = self.columns
|
|
140
|
-
for field in self.schema:
|
|
141
|
-
if isinstance(field.dataType, TimestampType):
|
|
142
|
-
pdf[field.name] = _check_series_localize_timestamps(
|
|
143
|
-
pdf[field.name], timezone
|
|
144
|
-
)
|
|
145
|
-
elif isinstance(field.dataType, MapType):
|
|
146
|
-
pdf[field.name] = _convert_map_items_to_dict(
|
|
147
|
-
pdf[field.name]
|
|
148
|
-
)
|
|
149
|
-
return pdf
|
|
150
|
-
else:
|
|
151
|
-
return pd.DataFrame.from_records([], columns=self.columns)
|
|
152
|
-
except Exception as e:
|
|
153
|
-
# We might have to allow fallback here as well but multiple Spark jobs can
|
|
154
|
-
# be executed. So, simply fail in this case for now.
|
|
155
|
-
msg = (
|
|
156
|
-
"toPandas attempted Arrow optimization because "
|
|
157
|
-
"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has "
|
|
158
|
-
"reached the error below and can not continue. Note that "
|
|
159
|
-
"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an "
|
|
160
|
-
"effect on failures in the middle of "
|
|
161
|
-
f"computation.\n {e}"
|
|
162
|
-
)
|
|
163
|
-
warnings.warn(msg)
|
|
164
|
-
raise
|
|
165
|
-
|
|
166
|
-
# Below is toPandas without Arrow optimization.
|
|
167
|
-
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
|
|
168
|
-
column_counter = Counter(self.columns)
|
|
169
|
-
|
|
170
|
-
dtype = [None] * len(self.schema)
|
|
171
|
-
for field_idx, field in enumerate(self.schema):
|
|
172
|
-
# For duplicate column name, we use `iloc` to access it.
|
|
173
|
-
if column_counter[field.name] > 1:
|
|
174
|
-
pandas_col = pdf.iloc[:, field_idx]
|
|
175
|
-
else:
|
|
176
|
-
pandas_col = pdf[field.name]
|
|
177
|
-
|
|
178
|
-
pandas_type = PandasConversionMixin._to_corrected_pandas_type(
|
|
179
|
-
field.dataType
|
|
180
|
-
)
|
|
181
|
-
# SPARK-21766: if an integer field is nullable and has null values, it can be
|
|
182
|
-
# inferred by pandas as float column. Once we convert the column with NaN back
|
|
183
|
-
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
|
|
184
|
-
# float type, not the corrected type from the schema in this case.
|
|
185
|
-
if pandas_type is not None and not (
|
|
186
|
-
isinstance(field.dataType, IntegralType)
|
|
187
|
-
and field.nullable
|
|
188
|
-
and pandas_col.isnull().any()
|
|
189
|
-
):
|
|
190
|
-
dtype[field_idx] = pandas_type
|
|
191
|
-
# Ensure we fall back to nullable numpy types, even when whole column is null:
|
|
192
|
-
if isinstance(field.dataType, IntegralType) and pandas_col.isnull().any():
|
|
193
|
-
dtype[field_idx] = np.float64
|
|
194
|
-
if isinstance(field.dataType, BooleanType) and pandas_col.isnull().any():
|
|
195
|
-
dtype[field_idx] = object
|
|
196
|
-
|
|
197
|
-
df = pd.DataFrame()
|
|
198
|
-
for index, t in enumerate(dtype):
|
|
199
|
-
column_name = self.schema[index].name
|
|
200
|
-
|
|
201
|
-
# For duplicate column name, we use `iloc` to access it.
|
|
202
|
-
if column_counter[column_name] > 1:
|
|
203
|
-
series = pdf.iloc[:, index]
|
|
204
|
-
else:
|
|
205
|
-
series = pdf[column_name]
|
|
206
|
-
|
|
207
|
-
if t is not None:
|
|
208
|
-
series = series.astype(t, copy=False)
|
|
209
|
-
|
|
210
|
-
# `insert` API makes copy of data, we only do it for Series of duplicate column names.
|
|
211
|
-
# `pdf.iloc[:, index] = pdf.iloc[:, index]...` doesn't always work because `iloc` could
|
|
212
|
-
# return a view or a copy depending by context.
|
|
213
|
-
if column_counter[column_name] > 1:
|
|
214
|
-
df.insert(index, column_name, series, allow_duplicates=True)
|
|
215
|
-
else:
|
|
216
|
-
df[column_name] = series
|
|
217
|
-
|
|
218
|
-
pdf = df
|
|
219
|
-
|
|
220
|
-
if timezone is None:
|
|
221
|
-
return pdf
|
|
222
|
-
else:
|
|
223
|
-
from pyspark.sql.pandas.types import (
|
|
224
|
-
_check_series_convert_timestamps_local_tz,
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
for field in self.schema:
|
|
228
|
-
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
|
|
229
|
-
if isinstance(field.dataType, TimestampType):
|
|
230
|
-
pdf[field.name] = _check_series_convert_timestamps_local_tz(
|
|
231
|
-
pdf[field.name], timezone
|
|
232
|
-
)
|
|
233
|
-
return pdf
|
|
234
|
-
|
|
235
|
-
@staticmethod
|
|
236
|
-
def _to_corrected_pandas_type(dt):
|
|
237
|
-
"""
|
|
238
|
-
When converting Spark SQL records to Pandas :class:`DataFrame`, the inferred data type
|
|
239
|
-
may be wrong. This method gets the corrected data type for Pandas if that type may be
|
|
240
|
-
inferred incorrectly.
|
|
241
|
-
"""
|
|
242
|
-
import numpy as np
|
|
243
|
-
from pyspark.sql.types import (
|
|
244
|
-
BooleanType,
|
|
245
|
-
ByteType,
|
|
246
|
-
DoubleType,
|
|
247
|
-
FloatType,
|
|
248
|
-
IntegerType,
|
|
249
|
-
LongType,
|
|
250
|
-
ShortType,
|
|
251
|
-
TimestampType,
|
|
252
|
-
)
|
|
253
|
-
|
|
254
|
-
if type(dt) == ByteType:
|
|
255
|
-
return np.int8
|
|
256
|
-
elif type(dt) == ShortType:
|
|
257
|
-
return np.int16
|
|
258
|
-
elif type(dt) == IntegerType:
|
|
259
|
-
return np.int32
|
|
260
|
-
elif type(dt) == LongType:
|
|
261
|
-
return np.int64
|
|
262
|
-
elif type(dt) == FloatType:
|
|
263
|
-
return np.float32
|
|
264
|
-
elif type(dt) == DoubleType:
|
|
265
|
-
return np.float64
|
|
266
|
-
elif type(dt) == BooleanType:
|
|
267
|
-
return bool
|
|
268
|
-
elif type(dt) == TimestampType:
|
|
269
|
-
return np.datetime64
|
|
270
|
-
else:
|
|
271
|
-
return None
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Iguazio
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
import nuclio
|
|
15
|
-
|
|
16
|
-
import mlrun
|
|
17
|
-
from mlrun.model_monitoring.controller import MonitoringApplicationController
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def handler(context: nuclio.Context, event: nuclio.Event) -> None:
|
|
21
|
-
"""
|
|
22
|
-
Run model monitoring application processor
|
|
23
|
-
|
|
24
|
-
:param context: the Nuclio context
|
|
25
|
-
:param event: trigger event
|
|
26
|
-
"""
|
|
27
|
-
context.user_data.monitor_app_controller.run(event)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def init_context(context):
|
|
31
|
-
mlrun_context = mlrun.get_or_create_ctx("model_monitoring_controller")
|
|
32
|
-
mlrun_context.logger.info("Initialize monitoring app controller")
|
|
33
|
-
monitor_app_controller = MonitoringApplicationController(
|
|
34
|
-
mlrun_context=mlrun_context,
|
|
35
|
-
project=mlrun_context.project,
|
|
36
|
-
)
|
|
37
|
-
setattr(context.user_data, "monitor_app_controller", monitor_app_controller)
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Iguazio
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
|
|
15
|
-
# TODO : delete this file in 1.9.0
|
|
16
|
-
from mlrun.model_monitoring.applications import ( # noqa: F401
|
|
17
|
-
_HAS_EVIDENTLY,
|
|
18
|
-
SUPPORTED_EVIDENTLY_VERSION,
|
|
19
|
-
EvidentlyModelMonitoringApplicationBase,
|
|
20
|
-
)
|
|
@@ -1,216 +0,0 @@
|
|
|
1
|
-
# Copyright 2023 Iguazio
|
|
2
|
-
#
|
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
-
# you may not use this file except in compliance with the License.
|
|
5
|
-
# You may obtain a copy of the License at
|
|
6
|
-
#
|
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
-
#
|
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
-
# See the License for the specific language governing permissions and
|
|
13
|
-
# limitations under the License.
|
|
14
|
-
#
|
|
15
|
-
|
|
16
|
-
import prometheus_client
|
|
17
|
-
|
|
18
|
-
from mlrun.common.schemas.model_monitoring import EventFieldType, PrometheusMetric
|
|
19
|
-
|
|
20
|
-
# Memory path for Prometheus registry file
|
|
21
|
-
_registry_path = "/tmp/prom-reg.txt"
|
|
22
|
-
|
|
23
|
-
# Initializing Promethues metric collector registry
|
|
24
|
-
_registry: prometheus_client.CollectorRegistry = prometheus_client.CollectorRegistry()
|
|
25
|
-
|
|
26
|
-
# The following real-time metrics are being updated through the monitoring stream graph steps
|
|
27
|
-
_prediction_counter: prometheus_client.Counter = prometheus_client.Counter(
|
|
28
|
-
name=PrometheusMetric.PREDICTIONS_TOTAL,
|
|
29
|
-
documentation="Counter for total predictions",
|
|
30
|
-
registry=_registry,
|
|
31
|
-
labelnames=[
|
|
32
|
-
EventFieldType.PROJECT,
|
|
33
|
-
EventFieldType.ENDPOINT_ID,
|
|
34
|
-
EventFieldType.MODEL,
|
|
35
|
-
EventFieldType.ENDPOINT_TYPE,
|
|
36
|
-
],
|
|
37
|
-
)
|
|
38
|
-
_model_latency: prometheus_client.Summary = prometheus_client.Summary(
|
|
39
|
-
name=PrometheusMetric.MODEL_LATENCY_SECONDS,
|
|
40
|
-
documentation="Summary for for model latency",
|
|
41
|
-
registry=_registry,
|
|
42
|
-
labelnames=[
|
|
43
|
-
EventFieldType.PROJECT,
|
|
44
|
-
EventFieldType.ENDPOINT_ID,
|
|
45
|
-
EventFieldType.MODEL,
|
|
46
|
-
EventFieldType.ENDPOINT_TYPE,
|
|
47
|
-
],
|
|
48
|
-
)
|
|
49
|
-
_income_features: prometheus_client.Gauge = prometheus_client.Gauge(
|
|
50
|
-
name=PrometheusMetric.INCOME_FEATURES,
|
|
51
|
-
documentation="Samples of features and predictions",
|
|
52
|
-
registry=_registry,
|
|
53
|
-
labelnames=[
|
|
54
|
-
EventFieldType.PROJECT,
|
|
55
|
-
EventFieldType.ENDPOINT_ID,
|
|
56
|
-
EventFieldType.METRIC,
|
|
57
|
-
],
|
|
58
|
-
)
|
|
59
|
-
_error_counter: prometheus_client.Counter = prometheus_client.Counter(
|
|
60
|
-
name=PrometheusMetric.ERRORS_TOTAL,
|
|
61
|
-
documentation="Counter for total errors",
|
|
62
|
-
registry=_registry,
|
|
63
|
-
labelnames=[
|
|
64
|
-
EventFieldType.PROJECT,
|
|
65
|
-
EventFieldType.ENDPOINT_ID,
|
|
66
|
-
EventFieldType.MODEL,
|
|
67
|
-
],
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# The following metrics are being updated through the model monitoring batch job
|
|
71
|
-
_batch_metrics: prometheus_client.Gauge = prometheus_client.Gauge(
|
|
72
|
-
name=PrometheusMetric.DRIFT_METRICS,
|
|
73
|
-
documentation="Results from the batch drift analysis",
|
|
74
|
-
registry=_registry,
|
|
75
|
-
labelnames=[
|
|
76
|
-
EventFieldType.PROJECT,
|
|
77
|
-
EventFieldType.ENDPOINT_ID,
|
|
78
|
-
EventFieldType.METRIC,
|
|
79
|
-
],
|
|
80
|
-
)
|
|
81
|
-
_drift_status: prometheus_client.Enum = prometheus_client.Enum(
|
|
82
|
-
name=PrometheusMetric.DRIFT_STATUS,
|
|
83
|
-
documentation="Drift status of the model endpoint",
|
|
84
|
-
registry=_registry,
|
|
85
|
-
states=["NO_DRIFT", "DRIFT_DETECTED", "POSSIBLE_DRIFT"],
|
|
86
|
-
labelnames=[EventFieldType.PROJECT, EventFieldType.ENDPOINT_ID],
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def _write_registry(func):
|
|
91
|
-
def wrapper(*args, **kwargs):
|
|
92
|
-
global _registry
|
|
93
|
-
"""A wrapper function to update the registry file each time a metric has been updated"""
|
|
94
|
-
func(*args, **kwargs)
|
|
95
|
-
prometheus_client.write_to_textfile(path=_registry_path, registry=_registry)
|
|
96
|
-
|
|
97
|
-
return wrapper
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
@_write_registry
|
|
101
|
-
def write_predictions_and_latency_metrics(
|
|
102
|
-
project: str, endpoint_id: str, latency: int, model_name: str, endpoint_type: int
|
|
103
|
-
):
|
|
104
|
-
"""
|
|
105
|
-
Update the prediction counter and the latency value of the provided model endpoint within Prometheus registry.
|
|
106
|
-
Please note that while the prediction counter is ALWAYS increasing by 1,the latency summary metric is being
|
|
107
|
-
increased by the event latency time. Grafana dashboard will query the average latency time by dividing the total
|
|
108
|
-
latency value by the total amount of predictions.
|
|
109
|
-
|
|
110
|
-
:param project: Project name.
|
|
111
|
-
:param endpoint_id: Model endpoint unique id.
|
|
112
|
-
:param latency: Latency time (microsecond) in which the event has been processed through the model server.
|
|
113
|
-
:param model_name: Model name which will be used by Grafana for displaying the results by model.
|
|
114
|
-
:param endpoint_type: Endpoint type that is represented by an int (possible values: 1,2,3) corresponding to the
|
|
115
|
-
Enum class :py:class:`~mlrun.common.schemas.model_monitoring.EndpointType`.
|
|
116
|
-
"""
|
|
117
|
-
|
|
118
|
-
# Increase the prediction counter by 1
|
|
119
|
-
_prediction_counter.labels(
|
|
120
|
-
project=project,
|
|
121
|
-
endpoint_id=endpoint_id,
|
|
122
|
-
model=model_name,
|
|
123
|
-
endpoint_type=endpoint_type,
|
|
124
|
-
).inc(1)
|
|
125
|
-
|
|
126
|
-
# Increase the latency value according to the provided latency of the current event
|
|
127
|
-
_model_latency.labels(
|
|
128
|
-
project=project,
|
|
129
|
-
endpoint_id=endpoint_id,
|
|
130
|
-
model=model_name,
|
|
131
|
-
endpoint_type=endpoint_type,
|
|
132
|
-
).observe(latency)
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
@_write_registry
|
|
136
|
-
def write_income_features(project: str, endpoint_id: str, features: dict[str, float]):
|
|
137
|
-
"""Update a sample of features.
|
|
138
|
-
|
|
139
|
-
:param project: Project name.
|
|
140
|
-
:param endpoint_id: Model endpoint unique id.
|
|
141
|
-
:param features: Dictionary in which the key is a feature name and the value is a float number.
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
"""
|
|
145
|
-
|
|
146
|
-
for metric in features:
|
|
147
|
-
_income_features.labels(
|
|
148
|
-
project=project, endpoint_id=endpoint_id, metric=metric
|
|
149
|
-
).set(value=features[metric])
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
@_write_registry
|
|
153
|
-
def write_drift_metrics(project: str, endpoint_id: str, metric: str, value: float):
|
|
154
|
-
"""Update drift metrics that have been calculated through the monitoring batch job
|
|
155
|
-
|
|
156
|
-
:param project: Project name.
|
|
157
|
-
:param endpoint_id: Model endpoint unique id.
|
|
158
|
-
:param metric: Metric name (e.g. TVD, Hellinger).
|
|
159
|
-
:param value: Metric value as a float.
|
|
160
|
-
|
|
161
|
-
"""
|
|
162
|
-
|
|
163
|
-
_batch_metrics.labels(project=project, endpoint_id=endpoint_id, metric=metric).set(
|
|
164
|
-
value=value
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
@_write_registry
|
|
169
|
-
def write_drift_status(project: str, endpoint_id: str, drift_status: str):
|
|
170
|
-
"""
|
|
171
|
-
Update the drift status enum for a specific model endpoint.
|
|
172
|
-
|
|
173
|
-
:param project: Project name.
|
|
174
|
-
:param endpoint_id: Model endpoint unique id.
|
|
175
|
-
:param drift_status: Drift status value, can be one of the following: 'NO_DRIFT', 'DRIFT_DETECTED', or
|
|
176
|
-
'POSSIBLE_DRIFT'.
|
|
177
|
-
"""
|
|
178
|
-
|
|
179
|
-
_drift_status.labels(project=project, endpoint_id=endpoint_id).state(drift_status)
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
@_write_registry
|
|
183
|
-
def write_errors(project: str, endpoint_id: str, model_name: str):
|
|
184
|
-
"""
|
|
185
|
-
Update the error counter for a specific model endpoint.
|
|
186
|
-
|
|
187
|
-
:param project: Project name.
|
|
188
|
-
:param endpoint_id: Model endpoint unique id.
|
|
189
|
-
:param model_name: Model name. Will be used by Grafana to show the amount of errors per model by time.
|
|
190
|
-
"""
|
|
191
|
-
|
|
192
|
-
_error_counter.labels(
|
|
193
|
-
project=project, endpoint_id=endpoint_id, model=model_name
|
|
194
|
-
).inc(1)
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
def get_registry() -> str:
|
|
198
|
-
"""Returns the parsed registry file according to the exposition format of Prometheus."""
|
|
199
|
-
|
|
200
|
-
# Read the registry file (note that the text is stored in UTF-8 format)
|
|
201
|
-
f = open(_registry_path)
|
|
202
|
-
lines = f.read()
|
|
203
|
-
f.close()
|
|
204
|
-
|
|
205
|
-
# Reset part of the metrics to avoid a repeating scraping of the same value
|
|
206
|
-
clean_metrics()
|
|
207
|
-
|
|
208
|
-
return lines
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
@_write_registry
|
|
212
|
-
def clean_metrics():
|
|
213
|
-
"""Clean the income features values. As these results are relevant only for a certain timestamp, we will remove
|
|
214
|
-
them from the global registry after they have been scraped by Prometheus."""
|
|
215
|
-
|
|
216
|
-
_income_features.clear()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|