mlrun 1.10.0rc14__py3-none-any.whl → 1.10.0rc15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (41) hide show
  1. mlrun/artifacts/base.py +0 -31
  2. mlrun/artifacts/manager.py +0 -5
  3. mlrun/common/schemas/__init__.py +1 -0
  4. mlrun/common/schemas/model_monitoring/__init__.py +1 -0
  5. mlrun/common/schemas/model_monitoring/functions.py +1 -1
  6. mlrun/common/schemas/model_monitoring/model_endpoints.py +10 -0
  7. mlrun/config.py +1 -1
  8. mlrun/datastore/model_provider/model_provider.py +42 -14
  9. mlrun/datastore/model_provider/openai_provider.py +96 -15
  10. mlrun/db/base.py +14 -0
  11. mlrun/db/httpdb.py +42 -9
  12. mlrun/db/nopdb.py +8 -0
  13. mlrun/model_monitoring/__init__.py +1 -0
  14. mlrun/model_monitoring/applications/base.py +176 -20
  15. mlrun/model_monitoring/db/_schedules.py +84 -24
  16. mlrun/model_monitoring/db/tsdb/base.py +72 -1
  17. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +7 -1
  18. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +37 -0
  19. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +25 -0
  20. mlrun/model_monitoring/helpers.py +26 -4
  21. mlrun/projects/project.py +26 -6
  22. mlrun/runtimes/daskjob.py +6 -0
  23. mlrun/runtimes/mpijob/abstract.py +6 -0
  24. mlrun/runtimes/mpijob/v1.py +6 -0
  25. mlrun/runtimes/nuclio/application/application.py +2 -0
  26. mlrun/runtimes/nuclio/function.py +6 -0
  27. mlrun/runtimes/nuclio/serving.py +12 -11
  28. mlrun/runtimes/pod.py +21 -0
  29. mlrun/runtimes/remotesparkjob.py +6 -0
  30. mlrun/runtimes/sparkjob/spark3job.py +6 -0
  31. mlrun/serving/server.py +95 -26
  32. mlrun/serving/states.py +16 -0
  33. mlrun/utils/helpers.py +36 -12
  34. mlrun/utils/retryer.py +15 -2
  35. mlrun/utils/version/version.json +2 -2
  36. {mlrun-1.10.0rc14.dist-info → mlrun-1.10.0rc15.dist-info}/METADATA +2 -7
  37. {mlrun-1.10.0rc14.dist-info → mlrun-1.10.0rc15.dist-info}/RECORD +41 -41
  38. {mlrun-1.10.0rc14.dist-info → mlrun-1.10.0rc15.dist-info}/WHEEL +0 -0
  39. {mlrun-1.10.0rc14.dist-info → mlrun-1.10.0rc15.dist-info}/entry_points.txt +0 -0
  40. {mlrun-1.10.0rc14.dist-info → mlrun-1.10.0rc15.dist-info}/licenses/LICENSE +0 -0
  41. {mlrun-1.10.0rc14.dist-info → mlrun-1.10.0rc15.dist-info}/top_level.txt +0 -0
mlrun/artifacts/base.py CHANGED
@@ -16,7 +16,6 @@ import os
16
16
  import pathlib
17
17
  import tempfile
18
18
  import typing
19
- import warnings
20
19
  import zipfile
21
20
 
22
21
  import yaml
@@ -876,36 +875,6 @@ def generate_target_path(item: Artifact, artifact_path, producer):
876
875
  return f"{artifact_path}{item.key}{suffix}"
877
876
 
878
877
 
879
- # TODO: Remove once data migration v5 is obsolete
880
- def convert_legacy_artifact_to_new_format(
881
- legacy_artifact: dict,
882
- ) -> Artifact:
883
- """Converts a legacy artifact to a new format.
884
- :param legacy_artifact: The legacy artifact to convert.
885
- :return: The converted artifact.
886
- """
887
- artifact_key = legacy_artifact.get("key", "")
888
- artifact_tag = legacy_artifact.get("tag", "")
889
- if artifact_tag:
890
- artifact_key = f"{artifact_key}:{artifact_tag}"
891
- # TODO: Remove once data migration v5 is obsolete
892
- warnings.warn(
893
- f"Converting legacy artifact '{artifact_key}' to new format. This will not be supported in MLRun 1.10.0. "
894
- f"Make sure to save the artifact/project in the new format.",
895
- FutureWarning,
896
- )
897
-
898
- artifact = mlrun.artifacts.artifact_types.get(
899
- legacy_artifact.get("kind", "artifact"), mlrun.artifacts.Artifact
900
- )()
901
-
902
- artifact.metadata = artifact.metadata.from_dict(legacy_artifact)
903
- artifact.spec = artifact.spec.from_dict(legacy_artifact)
904
- artifact.status = artifact.status.from_dict(legacy_artifact)
905
-
906
- return artifact
907
-
908
-
909
878
  def fill_artifact_object_hash(object_dict, iteration=None, producer_id=None):
910
879
  # remove artifact related fields before calculating hash
911
880
  object_dict.setdefault("metadata", {})
@@ -110,11 +110,6 @@ class ArtifactProducer:
110
110
 
111
111
  def dict_to_artifact(struct: dict) -> Artifact:
112
112
  kind = struct.get("kind", "")
113
-
114
- # TODO: Remove once data migration v5 is obsolete
115
- if mlrun.utils.is_legacy_artifact(struct):
116
- return mlrun.artifacts.base.convert_legacy_artifact_to_new_format(struct)
117
-
118
113
  artifact_class = artifact_types[kind]
119
114
  return artifact_class.from_dict(struct)
120
115
 
@@ -147,6 +147,7 @@ from .model_monitoring import (
147
147
  GrafanaTable,
148
148
  ModelEndpoint,
149
149
  ModelEndpointCreationStrategy,
150
+ ModelEndpointDriftValues,
150
151
  ModelEndpointList,
151
152
  ModelEndpointMetadata,
152
153
  ModelEndpointSchema,
@@ -59,6 +59,7 @@ from .model_endpoints import (
59
59
  Features,
60
60
  FeatureValues,
61
61
  ModelEndpoint,
62
+ ModelEndpointDriftValues,
62
63
  ModelEndpointList,
63
64
  ModelEndpointMetadata,
64
65
  ModelEndpointMonitoringMetric,
@@ -64,5 +64,5 @@ class FunctionSummary(BaseModel):
64
64
  updated_time=func_dict["metadata"].get("updated"),
65
65
  status=func_dict["status"].get("state"),
66
66
  base_period=base_period,
67
- stats=stats,
67
+ stats=stats or {},
68
68
  )
@@ -352,6 +352,16 @@ class ApplicationMetricRecord(ApplicationBaseRecord):
352
352
  type: Literal["metric"] = "metric"
353
353
 
354
354
 
355
+ class _DriftBin(NamedTuple):
356
+ timestamp: datetime
357
+ count_suspected: int
358
+ count_detected: int
359
+
360
+
361
+ class ModelEndpointDriftValues(BaseModel):
362
+ values: list[_DriftBin]
363
+
364
+
355
365
  def _mapping_attributes(
356
366
  model_class: type[Model],
357
367
  flattened_dictionary: dict,
mlrun/config.py CHANGED
@@ -193,7 +193,7 @@ default_config = {
193
193
  },
194
194
  "v3io_framesd": "http://framesd:8080",
195
195
  "model_providers": {
196
- "openai_default_model": "gpt-4",
196
+ "openai_default_model": "gpt-4o",
197
197
  },
198
198
  # default node selector to be applied to all functions - json string base64 encoded format
199
199
  "default_function_node_selector": "e30=",
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  from collections.abc import Awaitable
15
- from typing import Callable, Optional, TypeVar, Union
15
+ from typing import Any, Callable, Optional, TypeVar, Union
16
16
 
17
17
  import mlrun.errors
18
18
  from mlrun.datastore.remote_client import (
@@ -56,9 +56,16 @@ class ModelProvider(BaseRemoteClient):
56
56
  )
57
57
  self.default_invoke_kwargs = default_invoke_kwargs or {}
58
58
  self._client = None
59
- self._default_operation = None
60
59
  self._async_client = None
61
- self._default_async_operation = None
60
+
61
+ def get_client_options(self) -> dict:
62
+ """
63
+ Returns a dictionary containing credentials and configuration
64
+ options required for client creation.
65
+
66
+ :return: A dictionary with client-specific settings.
67
+ """
68
+ return {}
62
69
 
63
70
  def load_client(self) -> None:
64
71
  """
@@ -68,8 +75,6 @@ class ModelProvider(BaseRemoteClient):
68
75
  Subclasses should override this method to:
69
76
  - Create and configure the provider-specific client instance.
70
77
  - Assign the client instance to self._client.
71
- - Define a default operation callable (e.g., a method to invoke model completions)
72
- and assign it to self._default_operation.
73
78
  """
74
79
 
75
80
  raise NotImplementedError("load_client method is not implemented")
@@ -122,39 +127,62 @@ class ModelProvider(BaseRemoteClient):
122
127
  """
123
128
  raise NotImplementedError("invoke method is not implemented")
124
129
 
125
- def customized_invoke(
130
+ def custom_invoke(
126
131
  self, operation: Optional[Callable[..., T]] = None, **invoke_kwargs
127
132
  ) -> Optional[T]:
128
- raise NotImplementedError("customized_invoke method is not implemented")
133
+ """
134
+ Invokes a model operation from a provider (e.g., OpenAI, Hugging Face, etc.) with the given keyword arguments.
135
+
136
+ Useful for dynamically calling model methods like text generation, chat completions, or image generation.
137
+ The operation must be a callable that accepts keyword arguments.
138
+
139
+ :param operation: A callable representing the model operation (e.g., a client method).
140
+ :param invoke_kwargs: Keyword arguments to pass to the operation.
141
+ :return: The full response returned by the operation.
142
+ """
143
+ raise NotImplementedError("custom_invoke method is not implemented")
129
144
 
130
145
  @property
131
- def client(self):
146
+ def client(self) -> Any:
132
147
  return self._client
133
148
 
134
149
  @property
135
- def model(self):
150
+ def model(self) -> Optional[str]:
136
151
  return None
137
152
 
138
- def get_invoke_kwargs(self, invoke_kwargs):
153
+ def get_invoke_kwargs(self, invoke_kwargs) -> dict:
139
154
  kwargs = self.default_invoke_kwargs.copy()
140
155
  kwargs.update(invoke_kwargs)
141
156
  return kwargs
142
157
 
143
158
  @property
144
- def async_client(self):
159
+ def async_client(self) -> Any:
145
160
  if not self.support_async:
146
161
  raise mlrun.errors.MLRunInvalidArgumentError(
147
162
  f"{self.__class__.__name__} does not support async operations"
148
163
  )
149
164
  return self._async_client
150
165
 
151
- async def async_customized_invoke(self, **kwargs):
152
- raise NotImplementedError("async_customized_invoke is not implemented")
166
+ async def async_custom_invoke(
167
+ self, operation: Optional[Callable[..., Awaitable[T]]], **invoke_kwargs
168
+ ) -> Optional[T]:
169
+ """
170
+ Asynchronously invokes a model operation from a provider (e.g., OpenAI, Hugging Face, etc.)
171
+ with the given keyword arguments.
172
+
173
+ The operation must be an async callable (e.g., a method from an async client) that accepts keyword arguments.
174
+
175
+ :param operation: An async callable representing the model operation (e.g., an async_client method).
176
+ :param invoke_kwargs: Keyword arguments to pass to the operation.
177
+ :return: The full response returned by the awaited operation.
178
+ """
179
+ raise NotImplementedError("async_custom_invoke is not implemented")
153
180
 
154
181
  async def async_invoke(
155
182
  self,
156
183
  messages: Optional[list[dict]] = None,
157
184
  as_str: bool = False,
158
185
  **invoke_kwargs,
159
- ) -> Awaitable[str]:
186
+ ) -> Optional[str]:
187
+ """Async version of `invoke`. See `invoke` for full documentation."""
160
188
  raise NotImplementedError("async_invoke is not implemented")
@@ -11,7 +11,7 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
-
14
+ from collections.abc import Awaitable
15
15
  from typing import Callable, Optional, TypeVar, Union
16
16
 
17
17
  import mlrun
@@ -33,6 +33,8 @@ class OpenAIProvider(ModelProvider):
33
33
  operations tailored to the OpenAI API.
34
34
  """
35
35
 
36
+ support_async = True
37
+
36
38
  def __init__(
37
39
  self,
38
40
  parent,
@@ -67,7 +69,7 @@ class OpenAIProvider(ModelProvider):
67
69
  return endpoint, subpath
68
70
 
69
71
  @property
70
- def model(self):
72
+ def model(self) -> Optional[str]:
71
73
  return self.endpoint
72
74
 
73
75
  def load_client(self) -> None:
@@ -76,23 +78,20 @@ class OpenAIProvider(ModelProvider):
76
78
 
77
79
  This method imports the `OpenAI` class from the `openai` package, instantiates
78
80
  a client with the given keyword arguments (`self.options`), and assigns it to
79
- `self._client`.
80
-
81
- It also sets the default operation to `self.client.chat.completions.create`, which is
82
- typically used for invoking chat-based model completions.
81
+ `self._client` and `self._async_client`.
83
82
 
84
83
  Raises:
85
84
  ImportError: If the `openai` package is not installed.
86
85
  """
87
86
  try:
88
- from openai import OpenAI # noqa
87
+ from openai import OpenAI, AsyncOpenAI # noqa
89
88
 
90
89
  self._client = OpenAI(**self.options)
91
- self._default_operation = self.client.chat.completions.create
90
+ self._async_client = AsyncOpenAI(**self.options)
92
91
  except ImportError as exc:
93
92
  raise ImportError("openai package is not installed") from exc
94
93
 
95
- def get_client_options(self):
94
+ def get_client_options(self) -> dict:
96
95
  res = dict(
97
96
  api_key=self._get_secret_or_env("OPENAI_API_KEY"),
98
97
  organization=self._get_secret_or_env("OPENAI_ORG_ID"),
@@ -103,14 +102,69 @@ class OpenAIProvider(ModelProvider):
103
102
  )
104
103
  return self._sanitize_options(res)
105
104
 
106
- def customized_invoke(
105
+ def custom_invoke(
107
106
  self, operation: Optional[Callable[..., T]] = None, **invoke_kwargs
108
107
  ) -> Optional[T]:
108
+ """
109
+ OpenAI-specific implementation of `ModelProvider.custom_invoke`.
110
+
111
+ Invokes an OpenAI model operation using the sync client. For full details, see
112
+ `ModelProvider.custom_invoke`.
113
+
114
+ Example:
115
+ ```python
116
+ result = openai_model_provider.invoke(
117
+ openai_model_provider.client.images.generate,
118
+ prompt="A futuristic cityscape at sunset",
119
+ n=1,
120
+ size="1024x1024",
121
+ )
122
+ ```
123
+ :param operation: Same as ModelProvider.custom_invoke.
124
+ :param invoke_kwargs: Same as ModelProvider.custom_invoke.
125
+ :return: Same as ModelProvider.custom_invoke.
126
+
127
+ """
109
128
  invoke_kwargs = self.get_invoke_kwargs(invoke_kwargs)
110
129
  if operation:
111
130
  return operation(**invoke_kwargs, model=self.model)
112
131
  else:
113
- return self._default_operation(**invoke_kwargs, model=self.model)
132
+ return self.client.chat.completions.create(
133
+ **invoke_kwargs, model=self.model
134
+ )
135
+
136
+ async def async_custom_invoke(
137
+ self,
138
+ operation: Optional[Callable[..., Awaitable[T]]] = None,
139
+ **invoke_kwargs,
140
+ ) -> Optional[T]:
141
+ """
142
+ OpenAI-specific implementation of `ModelProvider.async_custom_invoke`.
143
+
144
+ Invokes an OpenAI model operation using the async client. For full details, see
145
+ `ModelProvider.async_custom_invoke`.
146
+
147
+ Example:
148
+ ```python
149
+ result = openai_model_provider.invoke(
150
+ openai_model_provider.async_client.images.generate,
151
+ prompt="A futuristic cityscape at sunset",
152
+ n=1,
153
+ size="1024x1024",
154
+ )
155
+ ```
156
+ :param operation: Same as ModelProvider.async_custom_invoke.
157
+ :param invoke_kwargs: Same as ModelProvider.async_custom_invoke.
158
+ :return: Same as ModelProvider.async_custom_invoke.
159
+
160
+ """
161
+ invoke_kwargs = self.get_invoke_kwargs(invoke_kwargs)
162
+ if operation:
163
+ return await operation(**invoke_kwargs, model=self.model)
164
+ else:
165
+ return await self.async_client.chat.completions.create(
166
+ **invoke_kwargs, model=self.model
167
+ )
114
168
 
115
169
  def invoke(
116
170
  self,
@@ -133,12 +187,39 @@ class OpenAIProvider(ModelProvider):
133
187
 
134
188
  :param invoke_kwargs:
135
189
  Same as ModelProvider.invoke.
190
+ :return: Same as ModelProvider.invoke.
136
191
 
137
192
  """
138
- invoke_kwargs = self.get_invoke_kwargs(invoke_kwargs)
139
- response = self._default_operation(
140
- model=self.endpoint, messages=messages, **invoke_kwargs
141
- )
193
+ response = self.custom_invoke(messages=messages, **invoke_kwargs)
194
+ if as_str:
195
+ return response.choices[0].message.content
196
+ return response
197
+
198
+ async def async_invoke(
199
+ self,
200
+ messages: Optional[list[dict]] = None,
201
+ as_str: bool = False,
202
+ **invoke_kwargs,
203
+ ) -> str:
204
+ """
205
+ OpenAI-specific implementation of `ModelProvider.async_invoke`.
206
+ Invokes an OpenAI model operation using the async client.
207
+ For full details, see `ModelProvider.async_invoke`.
208
+
209
+ :param messages: Same as ModelProvider.async_invoke.
210
+
211
+ :param as_str: bool
212
+ If `True`, returns only the main content of the first response
213
+ (`response.choices[0].message.content`).
214
+ If `False`, returns the full awaited response object, whose type depends on
215
+ the specific OpenAI SDK operation used (e.g., chat completion, completion, etc.).
216
+
217
+ :param invoke_kwargs:
218
+ Same as ModelProvider.async_invoke.
219
+ :returns Same as ModelProvider.async_invoke.
220
+
221
+ """
222
+ response = await self.async_custom_invoke(messages=messages, **invoke_kwargs)
142
223
  if as_str:
143
224
  return response.choices[0].message.content
144
225
  return response
mlrun/db/base.py CHANGED
@@ -638,6 +638,11 @@ class RunDBInterface(ABC):
638
638
  ):
639
639
  pass
640
640
 
641
+ def wait_for_background_task_to_reach_terminal_state(
642
+ self, name: str, project: str = ""
643
+ ) -> mlrun.common.schemas.BackgroundTask:
644
+ pass
645
+
641
646
  @abstractmethod
642
647
  def retry_pipeline(
643
648
  self,
@@ -1145,3 +1150,12 @@ class RunDBInterface(ABC):
1145
1150
  @abstractmethod
1146
1151
  def get_project_summary(self, project: str) -> mlrun.common.schemas.ProjectSummary:
1147
1152
  pass
1153
+
1154
+ @abstractmethod
1155
+ def get_drift_over_time(
1156
+ self,
1157
+ project: str,
1158
+ start: Optional[datetime.datetime] = None,
1159
+ end: Optional[datetime.datetime] = None,
1160
+ ) -> mlrun.common.schemas.model_monitoring.ModelEndpointDriftValues:
1161
+ pass
mlrun/db/httpdb.py CHANGED
@@ -757,7 +757,7 @@ class HTTPRunDB(RunDBInterface):
757
757
  )
758
758
  if response.status_code == http.HTTPStatus.ACCEPTED:
759
759
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
760
- return self._wait_for_background_task_to_reach_terminal_state(
760
+ return self.wait_for_background_task_to_reach_terminal_state(
761
761
  background_task.metadata.name, project=project
762
762
  )
763
763
  return None
@@ -784,7 +784,7 @@ class HTTPRunDB(RunDBInterface):
784
784
  )
785
785
  if response.status_code == http.HTTPStatus.ACCEPTED:
786
786
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
787
- background_task = self._wait_for_background_task_to_reach_terminal_state(
787
+ background_task = self.wait_for_background_task_to_reach_terminal_state(
788
788
  background_task.metadata.name, project=project
789
789
  )
790
790
  if (
@@ -839,7 +839,7 @@ class HTTPRunDB(RunDBInterface):
839
839
  )
840
840
  if response.status_code == http.HTTPStatus.ACCEPTED:
841
841
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
842
- background_task = self._wait_for_background_task_to_reach_terminal_state(
842
+ background_task = self.wait_for_background_task_to_reach_terminal_state(
843
843
  background_task.metadata.name, project=project
844
844
  )
845
845
  if (
@@ -1485,7 +1485,7 @@ class HTTPRunDB(RunDBInterface):
1485
1485
  "Function is being deleted", project_name=project, function_name=name
1486
1486
  )
1487
1487
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
1488
- background_task = self._wait_for_background_task_to_reach_terminal_state(
1488
+ background_task = self.wait_for_background_task_to_reach_terminal_state(
1489
1489
  background_task.metadata.name, project=project
1490
1490
  )
1491
1491
  if (
@@ -3274,7 +3274,7 @@ class HTTPRunDB(RunDBInterface):
3274
3274
  if response.status_code == http.HTTPStatus.ACCEPTED:
3275
3275
  logger.info("Waiting for project to be deleted", project_name=name)
3276
3276
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
3277
- background_task = self._wait_for_background_task_to_reach_terminal_state(
3277
+ background_task = self.wait_for_background_task_to_reach_terminal_state(
3278
3278
  background_task.metadata.name
3279
3279
  )
3280
3280
  if (
@@ -3387,7 +3387,7 @@ class HTTPRunDB(RunDBInterface):
3387
3387
  _verify_project_in_terminal_state,
3388
3388
  )
3389
3389
 
3390
- def _wait_for_background_task_to_reach_terminal_state(
3390
+ def wait_for_background_task_to_reach_terminal_state(
3391
3391
  self, name: str, project: str = ""
3392
3392
  ) -> mlrun.common.schemas.BackgroundTask:
3393
3393
  def _verify_background_task_in_terminal_state():
@@ -3408,6 +3408,7 @@ class HTTPRunDB(RunDBInterface):
3408
3408
  logger,
3409
3409
  False,
3410
3410
  _verify_background_task_in_terminal_state,
3411
+ fatal_exceptions=(mlrun.errors.MLRunAccessDeniedError,),
3411
3412
  )
3412
3413
 
3413
3414
  def create_project_secrets(
@@ -4082,7 +4083,7 @@ class HTTPRunDB(RunDBInterface):
4082
4083
  **response.json()
4083
4084
  ).background_tasks
4084
4085
  for task in background_tasks:
4085
- task = self._wait_for_background_task_to_reach_terminal_state(
4086
+ task = self.wait_for_background_task_to_reach_terminal_state(
4086
4087
  task.metadata.name, project=project
4087
4088
  )
4088
4089
  if (
@@ -4119,7 +4120,7 @@ class HTTPRunDB(RunDBInterface):
4119
4120
  **response.json()
4120
4121
  ).background_tasks
4121
4122
  for task in background_tasks:
4122
- task = self._wait_for_background_task_to_reach_terminal_state(
4123
+ task = self.wait_for_background_task_to_reach_terminal_state(
4123
4124
  task.metadata.name, project=project
4124
4125
  )
4125
4126
  if (
@@ -5158,6 +5159,38 @@ class HTTPRunDB(RunDBInterface):
5158
5159
  response = self.api_call("GET", endpoint_path, error_message)
5159
5160
  return mlrun.common.schemas.ProjectSummary(**response.json())
5160
5161
 
5162
+ def get_drift_over_time(
5163
+ self,
5164
+ project: str,
5165
+ start: Optional[datetime] = None,
5166
+ end: Optional[datetime] = None,
5167
+ ) -> mlrun.common.schemas.model_monitoring.ModelEndpointDriftValues:
5168
+ """
5169
+ Get drift counts over time for the project.
5170
+
5171
+ This method returns a list of tuples, each representing a time-interval (in a granularity set by the
5172
+ duration of the given time range) and the number of suspected drifts and detected drifts in that interval.
5173
+ For a range of 6 hours or less, the granularity is 10 minute, for a range of 2 hours to 72 hours, the
5174
+ granularity is 1 hour, and for a range of more than 72 hours, the granularity is 24 hours.
5175
+
5176
+ :param project: The name of the project for which to retrieve drift counts.
5177
+ :param start: Start time of the range to retrieve drift counts from.
5178
+ :param end: End time of the range to retrieve drift counts from.
5179
+
5180
+ :return: A ModelEndpointDriftValues object containing the drift counts over time.
5181
+ """
5182
+ endpoint_path = f"projects/{project}/model-endpoints/drift-over-time"
5183
+ error_message = f"Failed retrieving drift data for {project}"
5184
+ response = self.api_call(
5185
+ method="GET",
5186
+ path=endpoint_path,
5187
+ error=error_message,
5188
+ params={"start": start, "end": end},
5189
+ )
5190
+ return mlrun.common.schemas.model_monitoring.ModelEndpointDriftValues(
5191
+ **response.json()
5192
+ )
5193
+
5161
5194
  @staticmethod
5162
5195
  def _parse_labels(
5163
5196
  labels: Optional[Union[str, dict[str, Optional[str]], list[str]]],
@@ -5478,7 +5511,7 @@ class HTTPRunDB(RunDBInterface):
5478
5511
  def _wait_for_background_task_from_response(self, response):
5479
5512
  if response.status_code == http.HTTPStatus.ACCEPTED:
5480
5513
  background_task = mlrun.common.schemas.BackgroundTask(**response.json())
5481
- return self._wait_for_background_task_to_reach_terminal_state(
5514
+ return self.wait_for_background_task_to_reach_terminal_state(
5482
5515
  background_task.metadata.name
5483
5516
  )
5484
5517
  return None
mlrun/db/nopdb.py CHANGED
@@ -980,3 +980,11 @@ class NopDB(RunDBInterface):
980
980
 
981
981
  def get_project_summary(self, project: str):
982
982
  pass
983
+
984
+ def get_drift_over_time(
985
+ self,
986
+ project: str,
987
+ start: Optional[datetime.datetime] = None,
988
+ end: Optional[datetime.datetime] = None,
989
+ ) -> mlrun.common.schemas.model_monitoring.ModelEndpointDriftValues:
990
+ pass
@@ -15,4 +15,5 @@
15
15
  from mlrun.common.schemas import ModelEndpoint, ModelEndpointList
16
16
 
17
17
  from .db import get_tsdb_connector
18
+ from .db._schedules import delete_model_monitoring_schedules_user_folder
18
19
  from .helpers import get_stream_path