mlrun 1.8.0rc4__py3-none-any.whl → 1.8.0rc6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (69) hide show
  1. mlrun/__init__.py +4 -3
  2. mlrun/alerts/alert.py +129 -2
  3. mlrun/artifacts/__init__.py +1 -1
  4. mlrun/artifacts/base.py +12 -1
  5. mlrun/artifacts/document.py +59 -38
  6. mlrun/common/model_monitoring/__init__.py +0 -2
  7. mlrun/common/model_monitoring/helpers.py +0 -28
  8. mlrun/common/schemas/__init__.py +1 -4
  9. mlrun/common/schemas/alert.py +3 -0
  10. mlrun/common/schemas/artifact.py +4 -0
  11. mlrun/common/schemas/client_spec.py +0 -1
  12. mlrun/common/schemas/model_monitoring/__init__.py +0 -6
  13. mlrun/common/schemas/model_monitoring/constants.py +11 -9
  14. mlrun/common/schemas/model_monitoring/model_endpoints.py +77 -149
  15. mlrun/common/schemas/notification.py +6 -0
  16. mlrun/config.py +0 -2
  17. mlrun/datastore/datastore_profile.py +57 -17
  18. mlrun/datastore/vectorstore.py +67 -59
  19. mlrun/db/base.py +22 -18
  20. mlrun/db/factory.py +0 -3
  21. mlrun/db/httpdb.py +122 -150
  22. mlrun/db/nopdb.py +33 -17
  23. mlrun/execution.py +43 -29
  24. mlrun/model.py +7 -0
  25. mlrun/model_monitoring/__init__.py +3 -2
  26. mlrun/model_monitoring/api.py +40 -43
  27. mlrun/model_monitoring/applications/_application_steps.py +4 -2
  28. mlrun/model_monitoring/applications/base.py +65 -6
  29. mlrun/model_monitoring/applications/context.py +64 -33
  30. mlrun/model_monitoring/applications/evidently_base.py +0 -1
  31. mlrun/model_monitoring/applications/histogram_data_drift.py +2 -6
  32. mlrun/model_monitoring/controller.py +43 -37
  33. mlrun/model_monitoring/db/__init__.py +0 -2
  34. mlrun/model_monitoring/db/tsdb/base.py +2 -1
  35. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +2 -1
  36. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +43 -0
  37. mlrun/model_monitoring/helpers.py +12 -66
  38. mlrun/model_monitoring/stream_processing.py +83 -270
  39. mlrun/model_monitoring/writer.py +1 -10
  40. mlrun/projects/project.py +87 -74
  41. mlrun/runtimes/nuclio/function.py +7 -6
  42. mlrun/runtimes/nuclio/serving.py +7 -1
  43. mlrun/serving/routers.py +158 -145
  44. mlrun/serving/server.py +6 -0
  45. mlrun/serving/states.py +2 -0
  46. mlrun/serving/v2_serving.py +69 -60
  47. mlrun/utils/helpers.py +14 -30
  48. mlrun/utils/notifications/notification/mail.py +36 -9
  49. mlrun/utils/notifications/notification_pusher.py +34 -13
  50. mlrun/utils/version/version.json +2 -2
  51. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc6.dist-info}/METADATA +5 -4
  52. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc6.dist-info}/RECORD +56 -69
  53. mlrun/common/schemas/model_monitoring/model_endpoint_v2.py +0 -149
  54. mlrun/model_monitoring/db/stores/__init__.py +0 -136
  55. mlrun/model_monitoring/db/stores/base/__init__.py +0 -15
  56. mlrun/model_monitoring/db/stores/base/store.py +0 -154
  57. mlrun/model_monitoring/db/stores/sqldb/__init__.py +0 -13
  58. mlrun/model_monitoring/db/stores/sqldb/models/__init__.py +0 -46
  59. mlrun/model_monitoring/db/stores/sqldb/models/base.py +0 -93
  60. mlrun/model_monitoring/db/stores/sqldb/models/mysql.py +0 -47
  61. mlrun/model_monitoring/db/stores/sqldb/models/sqlite.py +0 -25
  62. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +0 -408
  63. mlrun/model_monitoring/db/stores/v3io_kv/__init__.py +0 -13
  64. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +0 -464
  65. mlrun/model_monitoring/model_endpoint.py +0 -120
  66. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc6.dist-info}/LICENSE +0 -0
  67. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc6.dist-info}/WHEEL +0 -0
  68. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc6.dist-info}/entry_points.txt +0 -0
  69. {mlrun-1.8.0rc4.dist-info → mlrun-1.8.0rc6.dist-info}/top_level.txt +0 -0
mlrun/execution.py CHANGED
@@ -17,7 +17,7 @@ import os
17
17
  import uuid
18
18
  import warnings
19
19
  from copy import deepcopy
20
- from typing import Optional, Union
20
+ from typing import Optional, Union, cast
21
21
 
22
22
  import numpy as np
23
23
  import yaml
@@ -42,6 +42,7 @@ from .features import Feature
42
42
  from .model import HyperParamOptions
43
43
  from .secrets import SecretsStore
44
44
  from .utils import (
45
+ Logger,
45
46
  RunKeys,
46
47
  dict_to_json,
47
48
  dict_to_yaml,
@@ -158,7 +159,7 @@ class MLClientCtx:
158
159
  return self._project
159
160
 
160
161
  @property
161
- def logger(self):
162
+ def logger(self) -> Logger:
162
163
  """Built-in logger interface
163
164
 
164
165
  Example::
@@ -628,7 +629,7 @@ class MLClientCtx:
628
629
  format=None,
629
630
  db_key=None,
630
631
  **kwargs,
631
- ):
632
+ ) -> Artifact:
632
633
  """Log an output artifact and optionally upload it to datastore
633
634
 
634
635
  Example::
@@ -698,7 +699,7 @@ class MLClientCtx:
698
699
  extra_data=None,
699
700
  label_column: Optional[str] = None,
700
701
  **kwargs,
701
- ):
702
+ ) -> DatasetArtifact:
702
703
  """Log a dataset artifact and optionally upload it to datastore
703
704
 
704
705
  If the dataset exists with the same key and tag, it will be overwritten.
@@ -736,7 +737,7 @@ class MLClientCtx:
736
737
  :param db_key: The key to use in the artifact DB table, by default its run name + '_' + key
737
738
  db_key=False will not register it in the artifacts table
738
739
 
739
- :returns: Artifact object
740
+ :returns: Dataset artifact object
740
741
  """
741
742
  ds = DatasetArtifact(
742
743
  key,
@@ -749,16 +750,19 @@ class MLClientCtx:
749
750
  **kwargs,
750
751
  )
751
752
 
752
- item = self._artifacts_manager.log_artifact(
753
- self,
754
- ds,
755
- local_path=local_path,
756
- artifact_path=extend_artifact_path(artifact_path, self.artifact_path),
757
- target_path=target_path,
758
- tag=tag,
759
- upload=upload,
760
- db_key=db_key,
761
- labels=labels,
753
+ item = cast(
754
+ DatasetArtifact,
755
+ self._artifacts_manager.log_artifact(
756
+ self,
757
+ ds,
758
+ local_path=local_path,
759
+ artifact_path=extend_artifact_path(artifact_path, self.artifact_path),
760
+ target_path=target_path,
761
+ tag=tag,
762
+ upload=upload,
763
+ db_key=db_key,
764
+ labels=labels,
765
+ ),
762
766
  )
763
767
  self._update_run()
764
768
  return item
@@ -786,7 +790,7 @@ class MLClientCtx:
786
790
  extra_data=None,
787
791
  db_key=None,
788
792
  **kwargs,
789
- ):
793
+ ) -> ModelArtifact:
790
794
  """Log a model artifact and optionally upload it to datastore
791
795
 
792
796
  Example::
@@ -828,7 +832,7 @@ class MLClientCtx:
828
832
  :param db_key: The key to use in the artifact DB table, by default its run name + '_' + key
829
833
  db_key=False will not register it in the artifacts table
830
834
 
831
- :returns: Artifact object
835
+ :returns: Model artifact object
832
836
  """
833
837
 
834
838
  if training_set is not None and inputs:
@@ -855,14 +859,17 @@ class MLClientCtx:
855
859
  if training_set is not None:
856
860
  model.infer_from_df(training_set, label_column)
857
861
 
858
- item = self._artifacts_manager.log_artifact(
859
- self,
860
- model,
861
- artifact_path=extend_artifact_path(artifact_path, self.artifact_path),
862
- tag=tag,
863
- upload=upload,
864
- db_key=db_key,
865
- labels=labels,
862
+ item = cast(
863
+ ModelArtifact,
864
+ self._artifacts_manager.log_artifact(
865
+ self,
866
+ model,
867
+ artifact_path=extend_artifact_path(artifact_path, self.artifact_path),
868
+ tag=tag,
869
+ upload=upload,
870
+ db_key=db_key,
871
+ labels=labels,
872
+ ),
866
873
  )
867
874
  self._update_run()
868
875
  return item
@@ -870,28 +877,35 @@ class MLClientCtx:
870
877
  def log_document(
871
878
  self,
872
879
  key: str,
880
+ tag: str = "",
881
+ local_path: str = "",
873
882
  artifact_path: Optional[str] = None,
874
883
  document_loader: DocumentLoaderSpec = DocumentLoaderSpec(),
875
- tag: str = "",
876
884
  upload: Optional[bool] = False,
877
885
  labels: Optional[dict[str, str]] = None,
886
+ target_path: Optional[str] = None,
878
887
  **kwargs,
879
888
  ) -> DocumentArtifact:
880
889
  """
881
890
  Log a document as an artifact.
882
891
 
883
892
  :param key: Artifact key
884
- :param target_path: Path to the local file
885
- :param artifact_path: Target path for artifact storage
886
- :param document_loader: Spec to use to load the artifact as langchain document
887
893
  :param tag: Version tag
894
+ :param local_path: path to the local file we upload, will also be use
895
+ as the destination subpath (under "artifact_path")
896
+ :param artifact_path: Target artifact path (when not using the default)
897
+ to define a subpath under the default location use:
898
+ `artifact_path=context.artifact_subpath('data')`
899
+ :param document_loader: Spec to use to load the artifact as langchain document
888
900
  :param upload: Whether to upload the artifact
889
901
  :param labels: Key-value labels
902
+ :param target_path: Path to the local file
890
903
  :param kwargs: Additional keyword arguments
891
904
  :return: DocumentArtifact object
892
905
  """
893
906
  doc_artifact = DocumentArtifact(
894
907
  key=key,
908
+ original_source=local_path or target_path,
895
909
  document_loader=document_loader,
896
910
  **kwargs,
897
911
  )
mlrun/model.py CHANGED
@@ -117,6 +117,8 @@ class ModelObj:
117
117
  # If one of the attributes is a third party object that has to_dict method (such as k8s objects), then
118
118
  # add it to the object's _fields_to_serialize attribute and handle it in the _serialize_field method.
119
119
  if hasattr(field_value, "to_dict"):
120
+ # TODO: Allow passing fields to exclude from the parent object to the child object
121
+ # e.g.: run.to_dict(exclude=["status.artifacts"])
120
122
  field_value = field_value.to_dict(strip=strip)
121
123
  if self._is_valid_field_value_for_serialization(
122
124
  field_name, field_value, strip
@@ -443,6 +445,7 @@ class Credentials(ModelObj):
443
445
  class BaseMetadata(ModelObj):
444
446
  _default_fields_to_strip = ModelObj._default_fields_to_strip + [
445
447
  "hash",
448
+ "uid",
446
449
  # Below are environment specific fields, no need to keep when stripping
447
450
  "namespace",
448
451
  "project",
@@ -465,10 +468,12 @@ class BaseMetadata(ModelObj):
465
468
  categories=None,
466
469
  updated=None,
467
470
  credentials=None,
471
+ uid=None,
468
472
  ):
469
473
  self.name = name
470
474
  self.tag = tag
471
475
  self.hash = hash
476
+ self.uid = uid
472
477
  self.namespace = namespace
473
478
  self.project = project or ""
474
479
  self.labels = labels or {}
@@ -1267,6 +1272,8 @@ class RunSpec(ModelObj):
1267
1272
  class RunStatus(ModelObj):
1268
1273
  """Run status"""
1269
1274
 
1275
+ _default_fields_to_strip = ModelObj._default_fields_to_strip + ["artifacts"]
1276
+
1270
1277
  def __init__(
1271
1278
  self,
1272
1279
  state=None,
@@ -14,7 +14,8 @@
14
14
 
15
15
  # for backwards compatibility
16
16
 
17
- from .db import get_store_object, get_tsdb_connector
17
+ from mlrun.common.schemas import ModelEndpoint, ModelEndpointList
18
+
19
+ from .db import get_tsdb_connector
18
20
  from .helpers import get_stream_path
19
- from .model_endpoint import ModelEndpoint
20
21
  from .tracking_policy import TrackingPolicy
@@ -26,11 +26,14 @@ import mlrun.common.schemas.model_monitoring.constants as mm_constants
26
26
  import mlrun.feature_store
27
27
  import mlrun.model_monitoring.applications as mm_app
28
28
  import mlrun.serving
29
+ from mlrun.common.schemas import ModelEndpoint
30
+ from mlrun.common.schemas.model_monitoring import (
31
+ FunctionURI,
32
+ )
29
33
  from mlrun.data_types.infer import InferOptions, get_df_stats
30
34
  from mlrun.utils import datetime_now, logger
31
35
 
32
36
  from .helpers import update_model_endpoint_last_request
33
- from .model_endpoint import ModelEndpoint
34
37
 
35
38
  # A union of all supported dataset types:
36
39
  DatasetType = typing.Union[
@@ -46,8 +49,6 @@ def get_or_create_model_endpoint(
46
49
  function_name: str = "",
47
50
  context: mlrun.MLClientCtx = None,
48
51
  sample_set_statistics: typing.Optional[dict[str, typing.Any]] = None,
49
- drift_threshold: typing.Optional[float] = None,
50
- possible_drift_threshold: typing.Optional[float] = None,
51
52
  monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.disabled,
52
53
  db_session=None,
53
54
  ) -> ModelEndpoint:
@@ -68,10 +69,6 @@ def get_or_create_model_endpoint(
68
69
  full function hash.
69
70
  :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
70
71
  the new model endpoint (applicable only to new endpoint_id).
71
- :param drift_threshold: (deprecated) The threshold of which to mark drifts (applicable only to new
72
- endpoint_id).
73
- :param possible_drift_threshold: (deprecated) The threshold of which to mark possible drifts (applicable only to new
74
- endpoint_id).
75
72
  :param monitoring_mode: If enabled, apply model monitoring features on the provided endpoint id
76
73
  (applicable only to new endpoint_id).
77
74
  :param db_session: A runtime session that manages the current dialog with the database.
@@ -79,18 +76,15 @@ def get_or_create_model_endpoint(
79
76
  :return: A ModelEndpoint object
80
77
  """
81
78
 
82
- if not endpoint_id:
83
- # Generate a new model endpoint id based on the project name and model name
84
- endpoint_id = hashlib.sha1(
85
- f"{project}_{model_endpoint_name}".encode()
86
- ).hexdigest()
87
-
88
79
  if not db_session:
89
80
  # Generate a runtime database
90
81
  db_session = mlrun.get_run_db()
91
82
  try:
92
83
  model_endpoint = db_session.get_model_endpoint(
93
- project=project, endpoint_id=endpoint_id
84
+ project=project,
85
+ name=model_endpoint_name,
86
+ endpoint_id=endpoint_id,
87
+ function_name=function_name,
94
88
  )
95
89
  # If other fields provided, validate that they are correspond to the existing model endpoint data
96
90
  _model_endpoint_validations(
@@ -104,7 +98,6 @@ def get_or_create_model_endpoint(
104
98
  model_endpoint = _generate_model_endpoint(
105
99
  project=project,
106
100
  db_session=db_session,
107
- endpoint_id=endpoint_id,
108
101
  model_path=model_path,
109
102
  model_endpoint_name=model_endpoint_name,
110
103
  function_name=function_name,
@@ -208,13 +201,13 @@ def record_results(
208
201
  monitoring_mode=monitoring_mode,
209
202
  db_session=db,
210
203
  )
211
- logger.debug("Model endpoint", endpoint=model_endpoint.to_dict())
204
+ logger.debug("Model endpoint", endpoint=model_endpoint)
212
205
 
213
206
  timestamp = datetime_now()
214
207
  if infer_results_df is not None:
215
208
  # Write the monitoring parquet to the relevant model endpoint context
216
209
  write_monitoring_df(
217
- feature_set_uri=model_endpoint.status.monitoring_feature_set_uri,
210
+ feature_set_uri=model_endpoint.spec.monitoring_feature_set_uri,
218
211
  infer_datetime=timestamp,
219
212
  endpoint_id=model_endpoint.metadata.uid,
220
213
  infer_results_df=infer_results_df,
@@ -278,7 +271,7 @@ def _model_endpoint_validations(
278
271
  # Feature stats
279
272
  if (
280
273
  sample_set_statistics
281
- and sample_set_statistics != model_endpoint.status.feature_stats
274
+ and sample_set_statistics != model_endpoint.spec.feature_stats
282
275
  ):
283
276
  logger.warning(
284
277
  "Provided sample set statistics is different from the registered statistics. "
@@ -330,7 +323,6 @@ def write_monitoring_df(
330
323
  def _generate_model_endpoint(
331
324
  project: str,
332
325
  db_session,
333
- endpoint_id: str,
334
326
  model_path: str,
335
327
  model_endpoint_name: str,
336
328
  function_name: str,
@@ -344,7 +336,6 @@ def _generate_model_endpoint(
344
336
  :param project: Project name.
345
337
 
346
338
  :param db_session: A session that manages the current dialog with the database.
347
- :param endpoint_id: Model endpoint unique ID.
348
339
  :param model_path: The model Store path.
349
340
  :param model_endpoint_name: Model endpoint name will be presented under the new model endpoint.
350
341
  :param function_name: If a new model endpoint is created, use this function name for generating the
@@ -357,32 +348,38 @@ def _generate_model_endpoint(
357
348
 
358
349
  :return `mlrun.model_monitoring.model_endpoint.ModelEndpoint` object.
359
350
  """
360
- model_endpoint = ModelEndpoint()
361
- model_endpoint.metadata.project = project
362
- model_endpoint.metadata.uid = endpoint_id
363
- if function_name:
364
- model_endpoint.spec.function_uri = project + "/" + function_name
365
- elif not context:
366
- raise mlrun.errors.MLRunInvalidArgumentError(
367
- "Please provide either a function name or a valid MLRun context"
351
+ if not function_name and context:
352
+ function_name = FunctionURI.from_string(
353
+ context.to_dict()["spec"]["function"]
354
+ ).function
355
+ model_obj = None
356
+ if model_path:
357
+ model_obj: mlrun.artifacts.ModelArtifact = (
358
+ mlrun.datastore.store_resources.get_store_resource(
359
+ model_path, db=db_session
360
+ )
368
361
  )
369
- else:
370
- model_endpoint.spec.function_uri = context.to_dict()["spec"]["function"]
371
- model_endpoint.spec.model_uri = model_path
372
- model_endpoint.spec.model = model_endpoint_name
373
- model_endpoint.spec.model_class = "drift-analysis"
374
- model_endpoint.spec.monitoring_mode = monitoring_mode
375
- model_endpoint.status.first_request = model_endpoint.status.last_request = (
376
- datetime_now().isoformat()
377
- )
378
- if sample_set_statistics:
379
- model_endpoint.status.feature_stats = sample_set_statistics
380
-
381
- db_session.create_model_endpoint(
382
- project=project, endpoint_id=endpoint_id, model_endpoint=model_endpoint
362
+ current_time = datetime_now()
363
+ model_endpoint = mlrun.common.schemas.ModelEndpoint(
364
+ metadata=mlrun.common.schemas.ModelEndpointMetadata(
365
+ project=project,
366
+ name=model_endpoint_name,
367
+ endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.BATCH_EP,
368
+ ),
369
+ spec=mlrun.common.schemas.ModelEndpointSpec(
370
+ function_name=function_name,
371
+ model_name=model_obj.metadata.key if model_path else None,
372
+ model_uid=model_obj.metadata.uid if model_path else None,
373
+ model_class="drift-analysis",
374
+ ),
375
+ status=mlrun.common.schemas.ModelEndpointStatus(
376
+ monitoring_mode=monitoring_mode,
377
+ first_request=current_time,
378
+ last_request=current_time,
379
+ ),
383
380
  )
384
381
 
385
- return db_session.get_model_endpoint(project=project, endpoint_id=endpoint_id)
382
+ return db_session.create_model_endpoint(model_endpoint=model_endpoint)
386
383
 
387
384
 
388
385
  def get_sample_set_statistics(
@@ -16,6 +16,7 @@ import json
16
16
  import traceback
17
17
  from typing import Any, Optional, Union
18
18
 
19
+ import mlrun.common.schemas
19
20
  import mlrun.common.schemas.alert as alert_objects
20
21
  import mlrun.common.schemas.model_monitoring.constants as mm_constant
21
22
  import mlrun.datastore
@@ -81,6 +82,7 @@ class _PushToMonitoringWriter(StepToDict):
81
82
  self._lazy_init()
82
83
  application_results, application_context = event
83
84
  writer_event = {
85
+ mm_constant.WriterEvent.ENDPOINT_NAME: application_context.endpoint_name,
84
86
  mm_constant.WriterEvent.APPLICATION_NAME: application_context.application_name,
85
87
  mm_constant.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
86
88
  mm_constant.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
@@ -125,7 +127,7 @@ class _PrepareMonitoringEvent(StepToDict):
125
127
  """
126
128
  self.graph_context = context
127
129
  self.application_name = application_name
128
- self.model_endpoints: dict[str, mlrun.model_monitoring.ModelEndpoint] = {}
130
+ self.model_endpoints: dict[str, mlrun.common.schemas.ModelEndpoint] = {}
129
131
 
130
132
  def do(self, event: dict[str, Any]) -> MonitoringApplicationContext:
131
133
  """
@@ -135,10 +137,10 @@ class _PrepareMonitoringEvent(StepToDict):
135
137
  :return: Application context.
136
138
  """
137
139
  application_context = MonitoringApplicationContext(
138
- graph_context=self.graph_context,
139
140
  application_name=self.application_name,
140
141
  event=event,
141
142
  model_endpoint_dict=self.model_endpoints,
143
+ graph_context=self.graph_context,
142
144
  )
143
145
 
144
146
  self.model_endpoints.setdefault(
@@ -13,8 +13,9 @@
13
13
  # limitations under the License.
14
14
 
15
15
  from abc import ABC, abstractmethod
16
- from typing import Any, Union
16
+ from typing import Any, Optional, Union, cast
17
17
 
18
+ import mlrun
18
19
  import mlrun.model_monitoring.applications.context as mm_context
19
20
  import mlrun.model_monitoring.applications.results as mm_results
20
21
  from mlrun.serving.utils import MonitoringApplicationToDict
@@ -22,12 +23,12 @@ from mlrun.serving.utils import MonitoringApplicationToDict
22
23
 
23
24
  class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
24
25
  """
25
- A base class for a model monitoring application.
26
+ The base class for a model monitoring application.
26
27
  Inherit from this class to create a custom model monitoring application.
27
28
 
28
- example for very simple custom application::
29
+ For example, :code:`MyApp` below is a simplistic custom application::
29
30
 
30
- class MyApp(ApplicationBase):
31
+ class MyApp(ModelMonitoringApplicationBase):
31
32
  def do_tracking(
32
33
  self,
33
34
  monitoring_context: mm_context.MonitoringApplicationContext,
@@ -43,8 +44,6 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
43
44
  kind=mm_constant.ResultKindApp.data_drift,
44
45
  status=mm_constant.ResultStatusApp.detected,
45
46
  )
46
-
47
-
48
47
  """
49
48
 
50
49
  kind = "monitoring_application"
@@ -62,6 +61,7 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
62
61
  ]:
63
62
  """
64
63
  Process the monitoring event and return application results & metrics.
64
+ Note: this method is internal and should not be called directly or overridden.
65
65
 
66
66
  :param monitoring_context: (MonitoringApplicationContext) The monitoring application context.
67
67
  :returns: A tuple of:
@@ -80,6 +80,65 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
80
80
  results = results if isinstance(results, list) else [results]
81
81
  return results, monitoring_context
82
82
 
83
+ def _handler(self, context: "mlrun.MLClientCtx"):
84
+ """
85
+ A custom handler that wraps the application's logic implemented in
86
+ :py:meth:`~mlrun.model_monitoring.applications.ModelMonitoringApplicationBase.do_tracking`
87
+ for an MLRun job.
88
+ This method should not be called directly.
89
+ """
90
+ monitoring_context = mm_context.MonitoringApplicationContext(
91
+ event={},
92
+ application_name=self.__class__.__name__,
93
+ logger=context.logger,
94
+ artifacts_logger=context,
95
+ )
96
+ result = self.do_tracking(monitoring_context)
97
+ return result
98
+
99
+ @classmethod
100
+ def evaluate(
101
+ cls,
102
+ func_path: Optional[str] = None,
103
+ func_name: Optional[str] = None,
104
+ tag: Optional[str] = None,
105
+ run_local: bool = True,
106
+ ) -> "mlrun.RunObject":
107
+ """
108
+ Call this function to run the application's
109
+ :py:meth:`~mlrun.model_monitoring.applications.ModelMonitoringApplicationBase.do_tracking`
110
+ model monitoring logic as a :py:class:`~mlrun.runtimes.KubejobRuntime`, which is an MLRun function.
111
+
112
+ :param func_path: The path to the function. If not passed, the current notebook is used.
113
+ :param func_name: The name of the function. If not passed, the class name is used.
114
+ :param tag: An optional tag for the function.
115
+ :param run_local: Whether to run the function locally or remotely.
116
+
117
+ :returns: The output of the
118
+ :py:meth:`~mlrun.model_monitoring.applications.ModelMonitoringApplicationBase.do_tracking`
119
+ method wrapped in a :py:class:`~mlrun.model.RunObject`.
120
+ """
121
+ if not run_local:
122
+ raise NotImplementedError # ML-8360
123
+
124
+ project = cast("mlrun.MlrunProject", mlrun.get_current_project())
125
+ class_name = cls.__name__
126
+ name = func_name if func_name is not None else class_name
127
+ handler = f"{class_name}::{cls._handler.__name__}"
128
+
129
+ job = cast(
130
+ mlrun.runtimes.KubejobRuntime,
131
+ project.set_function(
132
+ func=func_path,
133
+ name=name,
134
+ kind=mlrun.runtimes.KubejobRuntime.kind,
135
+ handler=handler,
136
+ tag=tag,
137
+ ),
138
+ )
139
+ run_result = job.run(local=run_local)
140
+ return run_result
141
+
83
142
  @abstractmethod
84
143
  def do_tracking(
85
144
  self,