mlrun 1.5.0rc12__py3-none-any.whl → 1.5.0rc13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (45) hide show
  1. mlrun/__main__.py +31 -2
  2. mlrun/api/api/endpoints/functions.py +110 -52
  3. mlrun/api/crud/model_monitoring/deployment.py +208 -38
  4. mlrun/api/crud/model_monitoring/helpers.py +19 -6
  5. mlrun/api/crud/model_monitoring/model_endpoints.py +14 -1
  6. mlrun/api/db/sqldb/db.py +3 -1
  7. mlrun/api/utils/builder.py +2 -4
  8. mlrun/common/model_monitoring/helpers.py +19 -5
  9. mlrun/common/schemas/model_monitoring/constants.py +69 -0
  10. mlrun/common/schemas/model_monitoring/model_endpoints.py +10 -0
  11. mlrun/config.py +30 -12
  12. mlrun/datastore/__init__.py +1 -0
  13. mlrun/datastore/sources.py +4 -30
  14. mlrun/datastore/targets.py +68 -31
  15. mlrun/db/httpdb.py +20 -6
  16. mlrun/feature_store/api.py +3 -31
  17. mlrun/feature_store/feature_vector.py +1 -1
  18. mlrun/feature_store/retrieval/base.py +8 -3
  19. mlrun/launcher/remote.py +3 -3
  20. mlrun/lists.py +11 -0
  21. mlrun/model_monitoring/__init__.py +0 -1
  22. mlrun/model_monitoring/api.py +1 -1
  23. mlrun/model_monitoring/application.py +313 -0
  24. mlrun/model_monitoring/batch_application.py +526 -0
  25. mlrun/model_monitoring/batch_application_handler.py +32 -0
  26. mlrun/model_monitoring/evidently_application.py +89 -0
  27. mlrun/model_monitoring/helpers.py +39 -3
  28. mlrun/model_monitoring/stores/kv_model_endpoint_store.py +37 -0
  29. mlrun/model_monitoring/tracking_policy.py +4 -4
  30. mlrun/model_monitoring/writer.py +37 -0
  31. mlrun/projects/pipelines.py +38 -4
  32. mlrun/projects/project.py +257 -43
  33. mlrun/run.py +5 -2
  34. mlrun/runtimes/__init__.py +2 -0
  35. mlrun/runtimes/function.py +2 -1
  36. mlrun/utils/helpers.py +12 -0
  37. mlrun/utils/http.py +3 -0
  38. mlrun/utils/version/version.json +2 -2
  39. {mlrun-1.5.0rc12.dist-info → mlrun-1.5.0rc13.dist-info}/METADATA +5 -5
  40. {mlrun-1.5.0rc12.dist-info → mlrun-1.5.0rc13.dist-info}/RECORD +45 -40
  41. /mlrun/model_monitoring/{model_monitoring_batch.py → batch.py} +0 -0
  42. {mlrun-1.5.0rc12.dist-info → mlrun-1.5.0rc13.dist-info}/LICENSE +0 -0
  43. {mlrun-1.5.0rc12.dist-info → mlrun-1.5.0rc13.dist-info}/WHEEL +0 -0
  44. {mlrun-1.5.0rc12.dist-info → mlrun-1.5.0rc13.dist-info}/entry_points.txt +0 -0
  45. {mlrun-1.5.0rc12.dist-info → mlrun-1.5.0rc13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,313 @@
1
+ # Copyright 2023 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+
16
+ import dataclasses
17
+ import json
18
+ from typing import Any, Dict, List, Tuple, Union
19
+
20
+ import numpy as np
21
+ import pandas as pd
22
+
23
+ import mlrun.common.helpers
24
+ import mlrun.common.schemas.model_monitoring
25
+ import mlrun.utils.v3io_clients
26
+ from mlrun.datastore import get_stream_pusher
27
+ from mlrun.datastore.targets import ParquetTarget
28
+ from mlrun.model_monitoring.helpers import get_stream_path
29
+ from mlrun.serving.utils import StepToDict
30
+ from mlrun.utils import logger
31
+
32
+
33
+ @dataclasses.dataclass
34
+ class ModelMonitoringApplicationResult:
35
+ """
36
+ Class representing the result of a custom model monitoring application.
37
+
38
+ :param application_name: (str) Name of the model monitoring application.
39
+ :param endpoint_id: (str) ID of the monitored model endpoint.
40
+ :param schedule_time: (pd.Timestamp)Timestamp of the monitoring schedule.
41
+ :param result_name: (str) Name of the application result.
42
+ :param result_value: (float) Value of the application result.
43
+ :param result_kind: (ResultKindApp) Kind of application result.
44
+ :param result_status: (ResultStatusApp) Status of the application result.
45
+ :param result_extra_data: (dict) Extra data associated with the application result.
46
+
47
+ """
48
+
49
+ application_name: str
50
+ endpoint_id: str
51
+ schedule_time: pd.Timestamp
52
+ result_name: str
53
+ result_value: float
54
+ result_kind: mlrun.common.schemas.model_monitoring.constants.ResultKindApp
55
+ result_status: mlrun.common.schemas.model_monitoring.constants.ResultStatusApp
56
+ result_extra_data: dict
57
+
58
+ def to_dict(self):
59
+ """
60
+ Convert the object to a dictionary format suitable for writing.
61
+
62
+ :returns: (dict) Dictionary representation of the result.
63
+ """
64
+ return {
65
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.APPLICATION_NAME: self.application_name,
66
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.ENDPOINT_ID: self.endpoint_id,
67
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.SCHEDULE_TIME: self.schedule_time.isoformat(
68
+ sep=" ", timespec="microseconds"
69
+ ),
70
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.RESULT_NAME: self.result_name,
71
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.RESULT_VALUE: self.result_value,
72
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.RESULT_KIND: self.result_kind.value,
73
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.RESULT_STATUS: self.result_status.value,
74
+ mlrun.common.schemas.model_monitoring.constants.WriterEvent.RESULT_EXTRA_DATA: json.dumps(
75
+ self.result_extra_data
76
+ ),
77
+ }
78
+
79
+
80
+ class ModelMonitoringApplication(StepToDict):
81
+ """
82
+ Class representing a model monitoring application. Subclass this to create custom monitoring logic.
83
+
84
+ example for very simple costume application::
85
+ # mlrun: start-code
86
+ class MyApp(ModelMonitoringApplication):
87
+
88
+ def run_application(
89
+ self,
90
+ sample_df_stats: pd.DataFrame,
91
+ feature_stats: pd.DataFrame,
92
+ sample_df: pd.DataFrame,
93
+ schedule_time: pd.Timestamp,
94
+ latest_request: pd.Timestamp,
95
+ endpoint_id: str,
96
+ output_stream_uri: str,
97
+ ) -> typing.Union[ModelMonitoringApplicationResult, typing.List[ModelMonitoringApplicationResult]
98
+ ]:
99
+ self.context.log_artifact(TableArtifact("sample_df_stats", df=sample_df_stats))
100
+ return ModelMonitoringApplicationResult(
101
+ self.name,
102
+ endpoint_id,
103
+ schedule_time,
104
+ result_name="data_drift_test",
105
+ result_value=0.5,
106
+ result_kind=mlrun.common.schemas.model_monitoring.constants.ResultKindApp.data_drift,
107
+ result_status = mlrun.common.schemas.model_monitoring.constants.ResultStatusApp.detected,
108
+ result_extra_data={})
109
+
110
+ # mlrun: end-code
111
+ """
112
+
113
+ kind = "monitoring_application"
114
+
115
+ def do(self, event: Dict[str, Any]):
116
+ """
117
+ Process the monitoring event and return application results.
118
+
119
+ :param event: (dict) The monitoring event to process.
120
+ :returns: (List[ModelMonitoringApplicationResult]) The application results.
121
+ """
122
+ resolved_event = self._resolve_event(event)
123
+ if not (
124
+ hasattr(self, "context") and isinstance(self.context, mlrun.MLClientCtx)
125
+ ):
126
+ self._lazy_init(app_name=resolved_event[0])
127
+ return self.run_application(*resolved_event)
128
+
129
+ def _lazy_init(self, app_name: str):
130
+ self.context = self._create_context_for_logging(app_name=app_name)
131
+
132
+ def run_application(
133
+ self,
134
+ application_name: str,
135
+ sample_df_stats: pd.DataFrame,
136
+ feature_stats: pd.DataFrame,
137
+ sample_df: pd.DataFrame,
138
+ schedule_time: pd.Timestamp,
139
+ latest_request: pd.Timestamp,
140
+ endpoint_id: str,
141
+ output_stream_uri: str,
142
+ ) -> Union[
143
+ ModelMonitoringApplicationResult, List[ModelMonitoringApplicationResult]
144
+ ]:
145
+ """
146
+ Implement this method with your custom monitoring logic.
147
+
148
+ :param application_name (str) the app name
149
+ :param sample_df_stats: (pd.DataFrame) The new sample distribution DataFrame.
150
+ :param feature_stats: (pd.DataFrame) The train sample distribution DataFrame.
151
+ :param sample_df: (pd.DataFrame) The new sample DataFrame.
152
+ :param schedule_time: (pd.Timestamp) Timestamp of the monitoring schedule.
153
+ :param latest_request: (pd.Timestamp) Timestamp of the latest request on this endpoint_id.
154
+ :param endpoint_id: (str) ID of the monitored model endpoint
155
+ :param output_stream_uri: (str) URI of the output stream for results
156
+
157
+ :returns: (ModelMonitoringApplicationResult) or
158
+ (List[ModelMonitoringApplicationResult]) of the application results.
159
+ """
160
+ raise NotImplementedError
161
+
162
+ @staticmethod
163
+ def _resolve_event(
164
+ event: Dict[str, Any],
165
+ ) -> Tuple[
166
+ str,
167
+ pd.DataFrame,
168
+ pd.DataFrame,
169
+ pd.DataFrame,
170
+ pd.Timestamp,
171
+ pd.Timestamp,
172
+ str,
173
+ str,
174
+ ]:
175
+ """
176
+ Converting the event into a single tuple that will be be used for passing the event arguments to the running
177
+ application
178
+
179
+ :param event: dictionary with all the incoming data
180
+
181
+ :return: A tuple of:
182
+ [0] = (str) application name
183
+ [1] = (pd.DataFrame) current input statistics
184
+ [2] = (pd.DataFrame) train statistics
185
+ [3] = (pd.DataFrame) current input data
186
+ [4] = (pd.Timestamp) timestamp of batch schedule time
187
+ [5] = (pd.Timestamp) timestamp of the latest request
188
+ [6] = (str) endpoint id
189
+ [7] = (str) output stream uri
190
+ """
191
+ return (
192
+ event[
193
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.APPLICATION_NAME
194
+ ],
195
+ ModelMonitoringApplication._dict_to_histogram(
196
+ json.loads(
197
+ event[
198
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.CURRENT_STATS
199
+ ]
200
+ )
201
+ ),
202
+ ModelMonitoringApplication._dict_to_histogram(
203
+ json.loads(
204
+ event[
205
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.FEATURE_STATS
206
+ ]
207
+ )
208
+ ),
209
+ ParquetTarget(
210
+ path=event[
211
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.SAMPLE_PARQUET_PATH
212
+ ]
213
+ ).as_df(),
214
+ pd.Timestamp(
215
+ event[
216
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.SCHEDULE_TIME
217
+ ]
218
+ ),
219
+ pd.Timestamp(
220
+ event[
221
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.LAST_REQUEST
222
+ ]
223
+ ),
224
+ event[
225
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.ENDPOINT_ID
226
+ ],
227
+ event[
228
+ mlrun.common.schemas.model_monitoring.constants.ApplicationEvent.OUTPUT_STREAM_URI
229
+ ],
230
+ )
231
+
232
+ @staticmethod
233
+ def _create_context_for_logging(app_name: str):
234
+ context = mlrun.get_or_create_ctx(
235
+ f"{app_name}-logger",
236
+ upload_artifacts=True,
237
+ labels={"workflow": "model-monitoring-app-logger"},
238
+ )
239
+ return context
240
+
241
+ @staticmethod
242
+ def _dict_to_histogram(histogram_dict: Dict[str, Dict[str, Any]]) -> pd.DataFrame:
243
+ """
244
+ Convert histogram dictionary to pandas DataFrame with feature histograms as columns
245
+
246
+ :param histogram_dict: Histogram dictionary
247
+
248
+ :returns: Histogram dataframe
249
+ """
250
+
251
+ # Create a dictionary with feature histograms as values
252
+ histograms = {}
253
+ for feature, stats in histogram_dict.items():
254
+ if "hist" in stats:
255
+ # Normalize to probability distribution of each feature
256
+ histograms[feature] = np.array(stats["hist"][0]) / stats["count"]
257
+
258
+ # Convert the dictionary to pandas DataFrame
259
+ histograms = pd.DataFrame(histograms)
260
+
261
+ return histograms
262
+
263
+
264
+ class PushToMonitoringWriter(StepToDict):
265
+ kind = "monitoring_application_stream_pusher"
266
+
267
+ def __init__(
268
+ self,
269
+ project: str = None,
270
+ writer_application_name: str = None,
271
+ stream_uri: str = None,
272
+ name: str = None,
273
+ ):
274
+ """
275
+ Class for pushing application results to the monitoring writer stream.
276
+
277
+ :param project: Project name.
278
+ :param writer_application_name: Writer application name.
279
+ :param stream_uri: Stream URI for pushing results.
280
+ :param name: Name of the PushToMonitoringWriter
281
+ instance default to PushToMonitoringWriter.
282
+ """
283
+ self.project = project
284
+ self.application_name_to_push = writer_application_name
285
+ self.stream_uri = stream_uri or get_stream_path(
286
+ project=self.project, application_name=self.application_name_to_push
287
+ )
288
+ self.output_stream = None
289
+ self.name = name or "PushToMonitoringWriter"
290
+
291
+ def do(
292
+ self,
293
+ event: Union[
294
+ ModelMonitoringApplicationResult, List[ModelMonitoringApplicationResult]
295
+ ],
296
+ ):
297
+ """
298
+ Push application results to the monitoring writer stream.
299
+
300
+ :param event: Monitoring result(s) to push.
301
+ """
302
+ self._lazy_init()
303
+ event = event if isinstance(event, List) else [event]
304
+ for result in event:
305
+ data = result.to_dict()
306
+ logger.info(f"Pushing data = {data} \n to stream = {self.stream_uri}")
307
+ self.output_stream.push([data])
308
+
309
+ def _lazy_init(self):
310
+ if self.output_stream is None:
311
+ self.output_stream = get_stream_pusher(
312
+ self.stream_uri,
313
+ )