mlrun 1.7.0rc13__py3-none-any.whl → 1.7.0rc21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (156) hide show
  1. mlrun/__init__.py +10 -1
  2. mlrun/__main__.py +23 -111
  3. mlrun/alerts/__init__.py +15 -0
  4. mlrun/alerts/alert.py +144 -0
  5. mlrun/api/schemas/__init__.py +4 -3
  6. mlrun/artifacts/__init__.py +8 -3
  7. mlrun/artifacts/base.py +36 -253
  8. mlrun/artifacts/dataset.py +9 -190
  9. mlrun/artifacts/manager.py +46 -42
  10. mlrun/artifacts/model.py +9 -141
  11. mlrun/artifacts/plots.py +14 -375
  12. mlrun/common/constants.py +65 -3
  13. mlrun/common/formatters/__init__.py +19 -0
  14. mlrun/{runtimes/mpijob/v1alpha1.py → common/formatters/artifact.py} +6 -14
  15. mlrun/common/formatters/base.py +113 -0
  16. mlrun/common/formatters/function.py +46 -0
  17. mlrun/common/formatters/pipeline.py +53 -0
  18. mlrun/common/formatters/project.py +51 -0
  19. mlrun/{runtimes → common/runtimes}/constants.py +32 -4
  20. mlrun/common/schemas/__init__.py +10 -5
  21. mlrun/common/schemas/alert.py +92 -11
  22. mlrun/common/schemas/api_gateway.py +56 -0
  23. mlrun/common/schemas/artifact.py +15 -5
  24. mlrun/common/schemas/auth.py +2 -0
  25. mlrun/common/schemas/client_spec.py +1 -0
  26. mlrun/common/schemas/frontend_spec.py +1 -0
  27. mlrun/common/schemas/function.py +4 -0
  28. mlrun/common/schemas/model_monitoring/__init__.py +15 -3
  29. mlrun/common/schemas/model_monitoring/constants.py +58 -7
  30. mlrun/common/schemas/model_monitoring/grafana.py +9 -5
  31. mlrun/common/schemas/model_monitoring/model_endpoints.py +86 -2
  32. mlrun/common/schemas/pipeline.py +0 -9
  33. mlrun/common/schemas/project.py +6 -11
  34. mlrun/common/types.py +1 -0
  35. mlrun/config.py +36 -8
  36. mlrun/data_types/to_pandas.py +9 -9
  37. mlrun/datastore/base.py +41 -9
  38. mlrun/datastore/datastore.py +6 -2
  39. mlrun/datastore/datastore_profile.py +56 -4
  40. mlrun/datastore/hdfs.py +5 -0
  41. mlrun/datastore/inmem.py +2 -2
  42. mlrun/datastore/redis.py +2 -2
  43. mlrun/datastore/s3.py +5 -0
  44. mlrun/datastore/sources.py +147 -7
  45. mlrun/datastore/store_resources.py +7 -7
  46. mlrun/datastore/targets.py +129 -9
  47. mlrun/datastore/utils.py +42 -0
  48. mlrun/datastore/v3io.py +1 -1
  49. mlrun/db/auth_utils.py +152 -0
  50. mlrun/db/base.py +55 -11
  51. mlrun/db/httpdb.py +346 -107
  52. mlrun/db/nopdb.py +52 -10
  53. mlrun/errors.py +11 -0
  54. mlrun/execution.py +24 -9
  55. mlrun/feature_store/__init__.py +0 -2
  56. mlrun/feature_store/api.py +12 -47
  57. mlrun/feature_store/feature_set.py +9 -0
  58. mlrun/feature_store/feature_vector.py +8 -0
  59. mlrun/feature_store/ingestion.py +7 -6
  60. mlrun/feature_store/retrieval/base.py +9 -4
  61. mlrun/feature_store/retrieval/conversion.py +9 -9
  62. mlrun/feature_store/retrieval/dask_merger.py +2 -0
  63. mlrun/feature_store/retrieval/job.py +9 -3
  64. mlrun/feature_store/retrieval/local_merger.py +2 -0
  65. mlrun/feature_store/retrieval/spark_merger.py +16 -0
  66. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +7 -12
  67. mlrun/frameworks/parallel_coordinates.py +2 -1
  68. mlrun/frameworks/tf_keras/__init__.py +4 -1
  69. mlrun/k8s_utils.py +10 -11
  70. mlrun/launcher/base.py +4 -3
  71. mlrun/launcher/client.py +5 -3
  72. mlrun/launcher/local.py +8 -2
  73. mlrun/launcher/remote.py +8 -2
  74. mlrun/lists.py +6 -2
  75. mlrun/model.py +62 -20
  76. mlrun/model_monitoring/__init__.py +1 -1
  77. mlrun/model_monitoring/api.py +41 -18
  78. mlrun/model_monitoring/application.py +5 -305
  79. mlrun/model_monitoring/applications/__init__.py +11 -0
  80. mlrun/model_monitoring/applications/_application_steps.py +157 -0
  81. mlrun/model_monitoring/applications/base.py +280 -0
  82. mlrun/model_monitoring/applications/context.py +214 -0
  83. mlrun/model_monitoring/applications/evidently_base.py +211 -0
  84. mlrun/model_monitoring/applications/histogram_data_drift.py +132 -91
  85. mlrun/model_monitoring/applications/results.py +99 -0
  86. mlrun/model_monitoring/controller.py +3 -1
  87. mlrun/model_monitoring/db/__init__.py +2 -0
  88. mlrun/model_monitoring/db/stores/__init__.py +0 -2
  89. mlrun/model_monitoring/db/stores/base/store.py +22 -37
  90. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +43 -21
  91. mlrun/model_monitoring/db/stores/sqldb/models/base.py +39 -8
  92. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +27 -7
  93. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +5 -0
  94. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +246 -224
  95. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +232 -216
  96. mlrun/model_monitoring/db/tsdb/__init__.py +100 -0
  97. mlrun/model_monitoring/db/tsdb/base.py +329 -0
  98. mlrun/model_monitoring/db/tsdb/helpers.py +30 -0
  99. mlrun/model_monitoring/db/tsdb/tdengine/__init__.py +15 -0
  100. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +240 -0
  101. mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py +45 -0
  102. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +397 -0
  103. mlrun/model_monitoring/db/tsdb/v3io/__init__.py +15 -0
  104. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +117 -0
  105. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +636 -0
  106. mlrun/model_monitoring/evidently_application.py +6 -118
  107. mlrun/model_monitoring/helpers.py +46 -1
  108. mlrun/model_monitoring/model_endpoint.py +3 -2
  109. mlrun/model_monitoring/stream_processing.py +57 -216
  110. mlrun/model_monitoring/writer.py +134 -124
  111. mlrun/package/utils/_formatter.py +2 -2
  112. mlrun/platforms/__init__.py +10 -9
  113. mlrun/platforms/iguazio.py +21 -202
  114. mlrun/projects/operations.py +19 -12
  115. mlrun/projects/pipelines.py +103 -109
  116. mlrun/projects/project.py +377 -137
  117. mlrun/render.py +15 -14
  118. mlrun/run.py +16 -47
  119. mlrun/runtimes/__init__.py +6 -3
  120. mlrun/runtimes/base.py +8 -7
  121. mlrun/runtimes/databricks_job/databricks_wrapper.py +1 -1
  122. mlrun/runtimes/funcdoc.py +0 -28
  123. mlrun/runtimes/kubejob.py +2 -1
  124. mlrun/runtimes/local.py +5 -2
  125. mlrun/runtimes/mpijob/__init__.py +0 -20
  126. mlrun/runtimes/mpijob/v1.py +1 -1
  127. mlrun/runtimes/nuclio/api_gateway.py +440 -208
  128. mlrun/runtimes/nuclio/application/application.py +170 -8
  129. mlrun/runtimes/nuclio/function.py +39 -49
  130. mlrun/runtimes/pod.py +21 -41
  131. mlrun/runtimes/remotesparkjob.py +9 -3
  132. mlrun/runtimes/sparkjob/spark3job.py +1 -1
  133. mlrun/runtimes/utils.py +6 -45
  134. mlrun/serving/server.py +2 -1
  135. mlrun/serving/states.py +53 -2
  136. mlrun/serving/v2_serving.py +5 -1
  137. mlrun/track/tracker.py +2 -1
  138. mlrun/utils/async_http.py +25 -5
  139. mlrun/utils/helpers.py +107 -75
  140. mlrun/utils/logger.py +39 -7
  141. mlrun/utils/notifications/notification/__init__.py +14 -9
  142. mlrun/utils/notifications/notification/base.py +1 -1
  143. mlrun/utils/notifications/notification/slack.py +61 -13
  144. mlrun/utils/notifications/notification/webhook.py +1 -1
  145. mlrun/utils/notifications/notification_pusher.py +147 -16
  146. mlrun/utils/regex.py +9 -0
  147. mlrun/utils/v3io_clients.py +0 -1
  148. mlrun/utils/version/version.json +2 -2
  149. {mlrun-1.7.0rc13.dist-info → mlrun-1.7.0rc21.dist-info}/METADATA +14 -6
  150. {mlrun-1.7.0rc13.dist-info → mlrun-1.7.0rc21.dist-info}/RECORD +154 -133
  151. mlrun/kfpops.py +0 -865
  152. mlrun/platforms/other.py +0 -305
  153. {mlrun-1.7.0rc13.dist-info → mlrun-1.7.0rc21.dist-info}/LICENSE +0 -0
  154. {mlrun-1.7.0rc13.dist-info → mlrun-1.7.0rc21.dist-info}/WHEEL +0 -0
  155. {mlrun-1.7.0rc13.dist-info → mlrun-1.7.0rc21.dist-info}/entry_points.txt +0 -0
  156. {mlrun-1.7.0rc13.dist-info → mlrun-1.7.0rc21.dist-info}/top_level.txt +0 -0
@@ -22,9 +22,10 @@ import pandas as pd
22
22
 
23
23
  import mlrun.artifacts
24
24
  import mlrun.common.helpers
25
- import mlrun.common.schemas.model_monitoring.constants as mm_consts
25
+ import mlrun.common.schemas.model_monitoring.constants as mm_constants
26
26
  import mlrun.feature_store
27
27
  import mlrun.model_monitoring.application
28
+ import mlrun.model_monitoring.applications as mm_app
28
29
  import mlrun.serving
29
30
  from mlrun.data_types.infer import InferOptions, get_df_stats
30
31
  from mlrun.utils import datetime_now, logger
@@ -48,7 +49,7 @@ def get_or_create_model_endpoint(
48
49
  sample_set_statistics: dict[str, typing.Any] = None,
49
50
  drift_threshold: float = None,
50
51
  possible_drift_threshold: float = None,
51
- monitoring_mode: mm_consts.ModelMonitoringMode = mm_consts.ModelMonitoringMode.disabled,
52
+ monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
52
53
  db_session=None,
53
54
  ) -> ModelEndpoint:
54
55
  """
@@ -128,7 +129,7 @@ def record_results(
128
129
  context: typing.Optional[mlrun.MLClientCtx] = None,
129
130
  infer_results_df: typing.Optional[pd.DataFrame] = None,
130
131
  sample_set_statistics: typing.Optional[dict[str, typing.Any]] = None,
131
- monitoring_mode: mm_consts.ModelMonitoringMode = mm_consts.ModelMonitoringMode.enabled,
132
+ monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.enabled,
132
133
  # Deprecated arguments:
133
134
  drift_threshold: typing.Optional[float] = None,
134
135
  possible_drift_threshold: typing.Optional[float] = None,
@@ -282,7 +283,7 @@ def _model_endpoint_validations(
282
283
  # drift and possible drift thresholds
283
284
  if drift_threshold:
284
285
  current_drift_threshold = model_endpoint.spec.monitor_configuration.get(
285
- mm_consts.EventFieldType.DRIFT_DETECTED_THRESHOLD,
286
+ mm_constants.EventFieldType.DRIFT_DETECTED_THRESHOLD,
286
287
  mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.drift_detected,
287
288
  )
288
289
  if current_drift_threshold != drift_threshold:
@@ -293,7 +294,7 @@ def _model_endpoint_validations(
293
294
 
294
295
  if possible_drift_threshold:
295
296
  current_possible_drift_threshold = model_endpoint.spec.monitor_configuration.get(
296
- mm_consts.EventFieldType.POSSIBLE_DRIFT_THRESHOLD,
297
+ mm_constants.EventFieldType.POSSIBLE_DRIFT_THRESHOLD,
297
298
  mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.possible_drift,
298
299
  )
299
300
  if current_possible_drift_threshold != possible_drift_threshold:
@@ -332,14 +333,14 @@ def write_monitoring_df(
332
333
  )
333
334
 
334
335
  # Modify the DataFrame to the required structure that will be used later by the monitoring batch job
335
- if mm_consts.EventFieldType.TIMESTAMP not in infer_results_df.columns:
336
+ if mm_constants.EventFieldType.TIMESTAMP not in infer_results_df.columns:
336
337
  # Initialize timestamp column with the current time
337
- infer_results_df[mm_consts.EventFieldType.TIMESTAMP] = infer_datetime
338
+ infer_results_df[mm_constants.EventFieldType.TIMESTAMP] = infer_datetime
338
339
 
339
340
  # `endpoint_id` is the monitoring feature set entity and therefore it should be defined as the df index before
340
341
  # the ingest process
341
- infer_results_df[mm_consts.EventFieldType.ENDPOINT_ID] = endpoint_id
342
- infer_results_df.set_index(mm_consts.EventFieldType.ENDPOINT_ID, inplace=True)
342
+ infer_results_df[mm_constants.EventFieldType.ENDPOINT_ID] = endpoint_id
343
+ infer_results_df.set_index(mm_constants.EventFieldType.ENDPOINT_ID, inplace=True)
343
344
 
344
345
  monitoring_feature_set.ingest(source=infer_results_df, overwrite=False)
345
346
 
@@ -355,7 +356,7 @@ def _generate_model_endpoint(
355
356
  sample_set_statistics: dict[str, typing.Any],
356
357
  drift_threshold: float,
357
358
  possible_drift_threshold: float,
358
- monitoring_mode: mm_consts.ModelMonitoringMode = mm_consts.ModelMonitoringMode.disabled,
359
+ monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
359
360
  ) -> ModelEndpoint:
360
361
  """
361
362
  Write a new model endpoint record.
@@ -394,11 +395,11 @@ def _generate_model_endpoint(
394
395
  model_endpoint.spec.model_class = "drift-analysis"
395
396
  if drift_threshold:
396
397
  model_endpoint.spec.monitor_configuration[
397
- mm_consts.EventFieldType.DRIFT_DETECTED_THRESHOLD
398
+ mm_constants.EventFieldType.DRIFT_DETECTED_THRESHOLD
398
399
  ] = drift_threshold
399
400
  if possible_drift_threshold:
400
401
  model_endpoint.spec.monitor_configuration[
401
- mm_consts.EventFieldType.POSSIBLE_DRIFT_THRESHOLD
402
+ mm_constants.EventFieldType.POSSIBLE_DRIFT_THRESHOLD
402
403
  ] = possible_drift_threshold
403
404
 
404
405
  model_endpoint.spec.monitoring_mode = monitoring_mode
@@ -589,7 +590,10 @@ def _create_model_monitoring_function_base(
589
590
  project: str,
590
591
  func: typing.Union[str, None] = None,
591
592
  application_class: typing.Union[
592
- str, mlrun.model_monitoring.application.ModelMonitoringApplicationBase, None
593
+ str,
594
+ mlrun.model_monitoring.application.ModelMonitoringApplicationBase,
595
+ mm_app.ModelMonitoringApplicationBaseV2,
596
+ None,
593
597
  ] = None,
594
598
  name: typing.Optional[str] = None,
595
599
  image: typing.Optional[str] = None,
@@ -602,6 +606,20 @@ def _create_model_monitoring_function_base(
602
606
  Note: this is an internal API only.
603
607
  This function does not set the labels or mounts v3io.
604
608
  """
609
+ if isinstance(
610
+ application_class,
611
+ mlrun.model_monitoring.application.ModelMonitoringApplicationBase,
612
+ ):
613
+ warnings.warn(
614
+ "The `ModelMonitoringApplicationBase` class is deprecated from version 1.7.0, "
615
+ "please use `ModelMonitoringApplicationBaseV2`. It will be removed in 1.9.0.",
616
+ FutureWarning,
617
+ )
618
+ if name in mm_constants.MonitoringFunctionNames.list():
619
+ raise mlrun.errors.MLRunInvalidArgumentError(
620
+ f"An application cannot have the following names: "
621
+ f"{mm_constants.MonitoringFunctionNames.list()}"
622
+ )
605
623
  if func is None:
606
624
  func = ""
607
625
  func_obj = typing.cast(
@@ -618,14 +636,19 @@ def _create_model_monitoring_function_base(
618
636
  ),
619
637
  )
620
638
  graph = func_obj.set_topology(mlrun.serving.states.StepKinds.flow)
639
+ prepare_step = graph.to(
640
+ class_name="mlrun.model_monitoring.applications._application_steps._PrepareMonitoringEvent",
641
+ name="PrepareMonitoringEvent",
642
+ application_name=name,
643
+ )
621
644
  if isinstance(application_class, str):
622
- first_step = graph.to(class_name=application_class, **application_kwargs)
645
+ app_step = prepare_step.to(class_name=application_class, **application_kwargs)
623
646
  else:
624
- first_step = graph.to(class_name=application_class)
625
- first_step.to(
626
- class_name="mlrun.model_monitoring.application.PushToMonitoringWriter",
647
+ app_step = prepare_step.to(class_name=application_class)
648
+ app_step.to(
649
+ class_name="mlrun.model_monitoring.applications._application_steps._PushToMonitoringWriter",
627
650
  name="PushToMonitoringWriter",
628
651
  project=project,
629
- writer_application_name=mm_consts.MonitoringFunctionNames.WRITER,
652
+ writer_application_name=mm_constants.MonitoringFunctionNames.WRITER,
630
653
  ).respond()
631
654
  return func_obj
@@ -12,308 +12,8 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- import dataclasses
16
- import json
17
- import re
18
- from abc import ABC, abstractmethod
19
- from typing import Any, Optional, Union, cast
20
-
21
- import numpy as np
22
- import pandas as pd
23
-
24
- import mlrun.common.helpers
25
- import mlrun.common.model_monitoring.helpers
26
- import mlrun.common.schemas.model_monitoring.constants as mm_constant
27
- import mlrun.utils.v3io_clients
28
- from mlrun.datastore import get_stream_pusher
29
- from mlrun.datastore.targets import ParquetTarget
30
- from mlrun.model_monitoring.helpers import get_stream_path
31
- from mlrun.serving.utils import StepToDict
32
- from mlrun.utils import logger
33
-
34
-
35
- @dataclasses.dataclass
36
- class ModelMonitoringApplicationResult:
37
- """
38
- Class representing the result of a custom model monitoring application.
39
-
40
- :param name: (str) Name of the application result. This name must be
41
- unique for each metric in a single application
42
- (name must be of the format [a-zA-Z_][a-zA-Z0-9_]*).
43
- :param value: (float) Value of the application result.
44
- :param kind: (ResultKindApp) Kind of application result.
45
- :param status: (ResultStatusApp) Status of the application result.
46
- :param extra_data: (dict) Extra data associated with the application result.
47
- """
48
-
49
- name: str
50
- value: float
51
- kind: mm_constant.ResultKindApp
52
- status: mm_constant.ResultStatusApp
53
- extra_data: dict = dataclasses.field(default_factory=dict)
54
-
55
- def __post_init__(self):
56
- pat = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
57
- if not re.fullmatch(pat, self.name):
58
- raise mlrun.errors.MLRunInvalidArgumentError(
59
- "Attribute name must be of the format [a-zA-Z_][a-zA-Z0-9_]*"
60
- )
61
-
62
- def to_dict(self):
63
- """
64
- Convert the object to a dictionary format suitable for writing.
65
-
66
- :returns: (dict) Dictionary representation of the result.
67
- """
68
- return {
69
- mm_constant.WriterEvent.RESULT_NAME: self.name,
70
- mm_constant.WriterEvent.RESULT_VALUE: self.value,
71
- mm_constant.WriterEvent.RESULT_KIND: self.kind,
72
- mm_constant.WriterEvent.RESULT_STATUS: self.status,
73
- mm_constant.WriterEvent.RESULT_EXTRA_DATA: json.dumps(self.extra_data),
74
- }
75
-
76
-
77
- class ModelMonitoringApplicationBase(StepToDict, ABC):
78
- """
79
- A base class for a model monitoring application.
80
- Inherit from this class to create a custom model monitoring application.
81
-
82
- example for very simple custom application::
83
- # mlrun: start-code
84
- class MyApp(ApplicationBase):
85
- def do_tracking(
86
- self,
87
- sample_df_stats: mlrun.common.model_monitoring.helpers.FeatureStats,
88
- feature_stats: mlrun.common.model_monitoring.helpers.FeatureStats,
89
- start_infer_time: pd.Timestamp,
90
- end_infer_time: pd.Timestamp,
91
- schedule_time: pd.Timestamp,
92
- latest_request: pd.Timestamp,
93
- endpoint_id: str,
94
- output_stream_uri: str,
95
- ) -> ModelMonitoringApplicationResult:
96
- self.context.log_artifact(
97
- TableArtifact(
98
- "sample_df_stats", df=self.dict_to_histogram(sample_df_stats)
99
- )
100
- )
101
- return ModelMonitoringApplicationResult(
102
- name="data_drift_test",
103
- value=0.5,
104
- kind=mm_constant.ResultKindApp.data_drift,
105
- status=mm_constant.ResultStatusApp.detected,
106
- )
107
-
108
-
109
- # mlrun: end-code
110
- """
111
-
112
- kind = "monitoring_application"
113
-
114
- def do(
115
- self, event: dict[str, Any]
116
- ) -> tuple[list[ModelMonitoringApplicationResult], dict]:
117
- """
118
- Process the monitoring event and return application results.
119
-
120
- :param event: (dict) The monitoring event to process.
121
- :returns: (list[ModelMonitoringApplicationResult], dict) The application results
122
- and the original event for the application.
123
- """
124
- resolved_event = self._resolve_event(event)
125
- if not (
126
- hasattr(self, "context") and isinstance(self.context, mlrun.MLClientCtx)
127
- ):
128
- self._lazy_init(app_name=resolved_event[0])
129
- results = self.do_tracking(*resolved_event)
130
- results = results if isinstance(results, list) else [results]
131
- return results, event
132
-
133
- def _lazy_init(self, app_name: str):
134
- self.context = cast(
135
- mlrun.MLClientCtx, self._create_context_for_logging(app_name=app_name)
136
- )
137
-
138
- @abstractmethod
139
- def do_tracking(
140
- self,
141
- application_name: str,
142
- sample_df_stats: mlrun.common.model_monitoring.helpers.FeatureStats,
143
- feature_stats: mlrun.common.model_monitoring.helpers.FeatureStats,
144
- sample_df: pd.DataFrame,
145
- start_infer_time: pd.Timestamp,
146
- end_infer_time: pd.Timestamp,
147
- latest_request: pd.Timestamp,
148
- endpoint_id: str,
149
- output_stream_uri: str,
150
- ) -> Union[
151
- ModelMonitoringApplicationResult, list[ModelMonitoringApplicationResult]
152
- ]:
153
- """
154
- Implement this method with your custom monitoring logic.
155
-
156
- :param application_name: (str) the app name
157
- :param sample_df_stats: (FeatureStats) The new sample distribution dictionary.
158
- :param feature_stats: (FeatureStats) The train sample distribution dictionary.
159
- :param sample_df: (pd.DataFrame) The new sample DataFrame.
160
- :param start_infer_time: (pd.Timestamp) Start time of the monitoring schedule.
161
- :param end_infer_time: (pd.Timestamp) End time of the monitoring schedule.
162
- :param latest_request: (pd.Timestamp) Timestamp of the latest request on this endpoint_id.
163
- :param endpoint_id: (str) ID of the monitored model endpoint
164
- :param output_stream_uri: (str) URI of the output stream for results
165
-
166
- :returns: (ModelMonitoringApplicationResult) or
167
- (list[ModelMonitoringApplicationResult]) of the application results.
168
- """
169
- raise NotImplementedError
170
-
171
- @classmethod
172
- def _resolve_event(
173
- cls,
174
- event: dict[str, Any],
175
- ) -> tuple[
176
- str,
177
- mlrun.common.model_monitoring.helpers.FeatureStats,
178
- mlrun.common.model_monitoring.helpers.FeatureStats,
179
- pd.DataFrame,
180
- pd.Timestamp,
181
- pd.Timestamp,
182
- pd.Timestamp,
183
- str,
184
- str,
185
- ]:
186
- """
187
- Converting the event into a single tuple that will be used for passing the event arguments to the running
188
- application
189
-
190
- :param event: dictionary with all the incoming data
191
-
192
- :return: A tuple of:
193
- [0] = (str) application name
194
- [1] = (dict) current input statistics
195
- [2] = (dict) train statistics
196
- [3] = (pd.DataFrame) current input data
197
- [4] = (pd.Timestamp) start time of the monitoring schedule
198
- [5] = (pd.Timestamp) end time of the monitoring schedule
199
- [6] = (pd.Timestamp) timestamp of the latest request
200
- [7] = (str) endpoint id
201
- [8] = (str) output stream uri
202
- """
203
- start_time = pd.Timestamp(event[mm_constant.ApplicationEvent.START_INFER_TIME])
204
- end_time = pd.Timestamp(event[mm_constant.ApplicationEvent.END_INFER_TIME])
205
- return (
206
- event[mm_constant.ApplicationEvent.APPLICATION_NAME],
207
- json.loads(event[mm_constant.ApplicationEvent.CURRENT_STATS]),
208
- json.loads(event[mm_constant.ApplicationEvent.FEATURE_STATS]),
209
- ParquetTarget(
210
- path=event[mm_constant.ApplicationEvent.SAMPLE_PARQUET_PATH]
211
- ).as_df(
212
- start_time=start_time,
213
- end_time=end_time,
214
- time_column=mm_constant.FeatureSetFeatures.time_stamp(),
215
- ),
216
- start_time,
217
- end_time,
218
- pd.Timestamp(event[mm_constant.ApplicationEvent.LAST_REQUEST]),
219
- event[mm_constant.ApplicationEvent.ENDPOINT_ID],
220
- event[mm_constant.ApplicationEvent.OUTPUT_STREAM_URI],
221
- )
222
-
223
- @staticmethod
224
- def _create_context_for_logging(app_name: str):
225
- context = mlrun.get_or_create_ctx(
226
- f"{app_name}-logger",
227
- upload_artifacts=True,
228
- labels={"workflow": "model-monitoring-app-logger"},
229
- )
230
- return context
231
-
232
- @staticmethod
233
- def dict_to_histogram(
234
- histogram_dict: mlrun.common.model_monitoring.helpers.FeatureStats,
235
- ) -> pd.DataFrame:
236
- """
237
- Convert histogram dictionary to pandas DataFrame with feature histograms as columns
238
-
239
- :param histogram_dict: Histogram dictionary
240
-
241
- :returns: Histogram dataframe
242
- """
243
-
244
- # Create a dictionary with feature histograms as values
245
- histograms = {}
246
- for feature, stats in histogram_dict.items():
247
- if "hist" in stats:
248
- # Normalize to probability distribution of each feature
249
- histograms[feature] = np.array(stats["hist"][0]) / stats["count"]
250
-
251
- # Convert the dictionary to pandas DataFrame
252
- histograms = pd.DataFrame(histograms)
253
-
254
- return histograms
255
-
256
-
257
- class PushToMonitoringWriter(StepToDict):
258
- kind = "monitoring_application_stream_pusher"
259
-
260
- def __init__(
261
- self,
262
- project: Optional[str] = None,
263
- writer_application_name: Optional[str] = None,
264
- stream_uri: Optional[str] = None,
265
- name: Optional[str] = None,
266
- ):
267
- """
268
- Class for pushing application results to the monitoring writer stream.
269
-
270
- :param project: Project name.
271
- :param writer_application_name: Writer application name.
272
- :param stream_uri: Stream URI for pushing results.
273
- :param name: Name of the PushToMonitoringWriter
274
- instance default to PushToMonitoringWriter.
275
- """
276
- self.project = project
277
- self.application_name_to_push = writer_application_name
278
- self.stream_uri = stream_uri or get_stream_path(
279
- project=self.project, function_name=self.application_name_to_push
280
- )
281
- self.output_stream = None
282
- self.name = name or "PushToMonitoringWriter"
283
-
284
- def do(self, event: tuple[list[ModelMonitoringApplicationResult], dict]) -> None:
285
- """
286
- Push application results to the monitoring writer stream.
287
-
288
- :param event: Monitoring result(s) to push and the original event from the controller.
289
- """
290
- self._lazy_init()
291
- application_results, application_event = event
292
- metadata = {
293
- mm_constant.WriterEvent.APPLICATION_NAME: application_event[
294
- mm_constant.ApplicationEvent.APPLICATION_NAME
295
- ],
296
- mm_constant.WriterEvent.ENDPOINT_ID: application_event[
297
- mm_constant.ApplicationEvent.ENDPOINT_ID
298
- ],
299
- mm_constant.WriterEvent.START_INFER_TIME: application_event[
300
- mm_constant.ApplicationEvent.START_INFER_TIME
301
- ],
302
- mm_constant.WriterEvent.END_INFER_TIME: application_event[
303
- mm_constant.ApplicationEvent.END_INFER_TIME
304
- ],
305
- mm_constant.WriterEvent.CURRENT_STATS: json.dumps(
306
- application_event[mm_constant.ApplicationEvent.CURRENT_STATS]
307
- ),
308
- }
309
- for result in application_results:
310
- data = result.to_dict()
311
- data.update(metadata)
312
- logger.info(f"Pushing data = {data} \n to stream = {self.stream_uri}")
313
- self.output_stream.push([data])
314
-
315
- def _lazy_init(self):
316
- if self.output_stream is None:
317
- self.output_stream = get_stream_pusher(
318
- self.stream_uri,
319
- )
15
+ # TODO : delete this file in 1.9.0
16
+ from mlrun.model_monitoring.applications import ( # noqa: F401
17
+ ModelMonitoringApplicationBase,
18
+ ModelMonitoringApplicationResult,
19
+ )
@@ -11,3 +11,14 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+ #
15
+
16
+ from .base import ModelMonitoringApplicationBase, ModelMonitoringApplicationBaseV2
17
+ from .context import MonitoringApplicationContext
18
+ from .evidently_base import (
19
+ _HAS_EVIDENTLY,
20
+ SUPPORTED_EVIDENTLY_VERSION,
21
+ EvidentlyModelMonitoringApplicationBase,
22
+ EvidentlyModelMonitoringApplicationBaseV2,
23
+ )
24
+ from .results import ModelMonitoringApplicationMetric, ModelMonitoringApplicationResult
@@ -0,0 +1,157 @@
1
+ # Copyright 2024 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import json
15
+ import typing
16
+ from typing import Optional
17
+
18
+ import mlrun.common.helpers
19
+ import mlrun.common.model_monitoring.helpers
20
+ import mlrun.common.schemas.model_monitoring.constants as mm_constant
21
+ import mlrun.datastore
22
+ import mlrun.utils.v3io_clients
23
+ from mlrun.model_monitoring.helpers import get_stream_path
24
+ from mlrun.serving.utils import StepToDict
25
+ from mlrun.utils import logger
26
+
27
+ from .context import MonitoringApplicationContext
28
+ from .results import ModelMonitoringApplicationMetric, ModelMonitoringApplicationResult
29
+
30
+
31
+ class _PushToMonitoringWriter(StepToDict):
32
+ kind = "monitoring_application_stream_pusher"
33
+
34
+ def __init__(
35
+ self,
36
+ project: Optional[str] = None,
37
+ writer_application_name: Optional[str] = None,
38
+ stream_uri: Optional[str] = None,
39
+ name: Optional[str] = None,
40
+ ):
41
+ """
42
+ Class for pushing application results to the monitoring writer stream.
43
+
44
+ :param project: Project name.
45
+ :param writer_application_name: Writer application name.
46
+ :param stream_uri: Stream URI for pushing results.
47
+ :param name: Name of the PushToMonitoringWriter
48
+ instance default to PushToMonitoringWriter.
49
+ """
50
+ self.project = project
51
+ self.application_name_to_push = writer_application_name
52
+ self.stream_uri = stream_uri or get_stream_path(
53
+ project=self.project, function_name=self.application_name_to_push
54
+ )
55
+ self.output_stream = None
56
+ self.name = name or "PushToMonitoringWriter"
57
+
58
+ def do(
59
+ self,
60
+ event: tuple[
61
+ list[
62
+ typing.Union[
63
+ ModelMonitoringApplicationResult, ModelMonitoringApplicationMetric
64
+ ]
65
+ ],
66
+ MonitoringApplicationContext,
67
+ ],
68
+ ) -> None:
69
+ """
70
+ Push application results to the monitoring writer stream.
71
+
72
+ :param event: Monitoring result(s) to push and the original event from the controller.
73
+ """
74
+ self._lazy_init()
75
+ application_results, application_context = event
76
+ writer_event = {
77
+ mm_constant.WriterEvent.APPLICATION_NAME: application_context.application_name,
78
+ mm_constant.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
79
+ mm_constant.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
80
+ sep=" ", timespec="microseconds"
81
+ ),
82
+ mm_constant.WriterEvent.END_INFER_TIME: application_context.end_infer_time.isoformat(
83
+ sep=" ", timespec="microseconds"
84
+ ),
85
+ }
86
+ for result in application_results:
87
+ data = result.to_dict()
88
+ if isinstance(result, ModelMonitoringApplicationResult):
89
+ writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
90
+ mm_constant.WriterEventKind.RESULT
91
+ )
92
+ data[mm_constant.ResultData.CURRENT_STATS] = json.dumps(
93
+ application_context.sample_df_stats
94
+ )
95
+ writer_event[mm_constant.WriterEvent.DATA] = json.dumps(data)
96
+ else:
97
+ writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
98
+ mm_constant.WriterEventKind.METRIC
99
+ )
100
+ writer_event[mm_constant.WriterEvent.DATA] = json.dumps(data)
101
+
102
+ writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
103
+ mm_constant.WriterEventKind.RESULT
104
+ if isinstance(result, ModelMonitoringApplicationResult)
105
+ else mm_constant.WriterEventKind.METRIC
106
+ )
107
+ logger.info(
108
+ f"Pushing data = {writer_event} \n to stream = {self.stream_uri}"
109
+ )
110
+ self.output_stream.push([writer_event])
111
+
112
+ def _lazy_init(self):
113
+ if self.output_stream is None:
114
+ self.output_stream = mlrun.datastore.get_stream_pusher(
115
+ self.stream_uri,
116
+ )
117
+
118
+
119
+ class _PrepareMonitoringEvent(StepToDict):
120
+ def __init__(self, application_name: str):
121
+ """
122
+ Class for preparing the application event for the application step.
123
+
124
+ :param application_name: Application name.
125
+ """
126
+
127
+ self.context = self._create_mlrun_context(application_name)
128
+ self.model_endpoints = {}
129
+
130
+ def do(self, event: dict[str, dict]) -> MonitoringApplicationContext:
131
+ """
132
+ Prepare the application event for the application step.
133
+
134
+ :param event: Application event.
135
+ :return: Application event.
136
+ """
137
+ if not event.get("mlrun_context"):
138
+ application_context = MonitoringApplicationContext().from_dict(
139
+ event,
140
+ context=self.context,
141
+ model_endpoint_dict=self.model_endpoints,
142
+ )
143
+ else:
144
+ application_context = MonitoringApplicationContext().from_dict(event)
145
+ self.model_endpoints.setdefault(
146
+ application_context.endpoint_id, application_context.model_endpoint
147
+ )
148
+ return application_context
149
+
150
+ @staticmethod
151
+ def _create_mlrun_context(app_name: str):
152
+ context = mlrun.get_or_create_ctx(
153
+ f"{app_name}-logger",
154
+ upload_artifacts=True,
155
+ )
156
+ context.__class__ = MonitoringApplicationContext
157
+ return context