mlrun 1.10.0rc16__py3-none-any.whl → 1.10.0rc42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (98) hide show
  1. mlrun/__init__.py +22 -2
  2. mlrun/artifacts/document.py +6 -1
  3. mlrun/artifacts/llm_prompt.py +21 -15
  4. mlrun/artifacts/model.py +3 -3
  5. mlrun/common/constants.py +9 -0
  6. mlrun/common/formatters/artifact.py +1 -0
  7. mlrun/common/model_monitoring/helpers.py +86 -0
  8. mlrun/common/schemas/__init__.py +2 -0
  9. mlrun/common/schemas/auth.py +2 -0
  10. mlrun/common/schemas/function.py +10 -0
  11. mlrun/common/schemas/hub.py +30 -18
  12. mlrun/common/schemas/model_monitoring/__init__.py +2 -0
  13. mlrun/common/schemas/model_monitoring/constants.py +30 -6
  14. mlrun/common/schemas/model_monitoring/functions.py +13 -4
  15. mlrun/common/schemas/model_monitoring/model_endpoints.py +11 -0
  16. mlrun/common/schemas/pipeline.py +1 -1
  17. mlrun/common/schemas/serving.py +3 -0
  18. mlrun/common/schemas/workflow.py +1 -0
  19. mlrun/common/secrets.py +22 -1
  20. mlrun/config.py +32 -10
  21. mlrun/datastore/__init__.py +11 -3
  22. mlrun/datastore/azure_blob.py +162 -47
  23. mlrun/datastore/datastore.py +9 -4
  24. mlrun/datastore/datastore_profile.py +61 -5
  25. mlrun/datastore/model_provider/huggingface_provider.py +363 -0
  26. mlrun/datastore/model_provider/mock_model_provider.py +87 -0
  27. mlrun/datastore/model_provider/model_provider.py +211 -74
  28. mlrun/datastore/model_provider/openai_provider.py +243 -71
  29. mlrun/datastore/s3.py +24 -2
  30. mlrun/datastore/storeytargets.py +2 -3
  31. mlrun/datastore/utils.py +15 -3
  32. mlrun/db/base.py +27 -19
  33. mlrun/db/httpdb.py +57 -48
  34. mlrun/db/nopdb.py +25 -10
  35. mlrun/execution.py +55 -13
  36. mlrun/hub/__init__.py +15 -0
  37. mlrun/hub/module.py +181 -0
  38. mlrun/k8s_utils.py +105 -16
  39. mlrun/launcher/base.py +13 -6
  40. mlrun/launcher/local.py +2 -0
  41. mlrun/model.py +9 -3
  42. mlrun/model_monitoring/api.py +66 -27
  43. mlrun/model_monitoring/applications/__init__.py +1 -1
  44. mlrun/model_monitoring/applications/base.py +372 -136
  45. mlrun/model_monitoring/applications/context.py +2 -4
  46. mlrun/model_monitoring/applications/results.py +4 -7
  47. mlrun/model_monitoring/controller.py +239 -101
  48. mlrun/model_monitoring/db/_schedules.py +36 -13
  49. mlrun/model_monitoring/db/_stats.py +4 -3
  50. mlrun/model_monitoring/db/tsdb/base.py +29 -9
  51. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +4 -5
  52. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +154 -50
  53. mlrun/model_monitoring/db/tsdb/tdengine/writer_graph_steps.py +51 -0
  54. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +17 -4
  55. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +245 -51
  56. mlrun/model_monitoring/helpers.py +28 -5
  57. mlrun/model_monitoring/stream_processing.py +45 -14
  58. mlrun/model_monitoring/writer.py +220 -1
  59. mlrun/platforms/__init__.py +3 -2
  60. mlrun/platforms/iguazio.py +7 -3
  61. mlrun/projects/operations.py +6 -1
  62. mlrun/projects/pipelines.py +2 -2
  63. mlrun/projects/project.py +128 -45
  64. mlrun/run.py +94 -17
  65. mlrun/runtimes/__init__.py +18 -0
  66. mlrun/runtimes/base.py +14 -6
  67. mlrun/runtimes/daskjob.py +1 -0
  68. mlrun/runtimes/local.py +5 -2
  69. mlrun/runtimes/mounts.py +20 -2
  70. mlrun/runtimes/nuclio/__init__.py +1 -0
  71. mlrun/runtimes/nuclio/application/application.py +147 -17
  72. mlrun/runtimes/nuclio/function.py +70 -27
  73. mlrun/runtimes/nuclio/serving.py +85 -4
  74. mlrun/runtimes/pod.py +213 -21
  75. mlrun/runtimes/utils.py +49 -9
  76. mlrun/secrets.py +54 -13
  77. mlrun/serving/remote.py +79 -6
  78. mlrun/serving/routers.py +23 -41
  79. mlrun/serving/server.py +211 -40
  80. mlrun/serving/states.py +536 -156
  81. mlrun/serving/steps.py +62 -0
  82. mlrun/serving/system_steps.py +136 -81
  83. mlrun/serving/v2_serving.py +9 -10
  84. mlrun/utils/helpers.py +212 -82
  85. mlrun/utils/logger.py +3 -1
  86. mlrun/utils/notifications/notification/base.py +18 -0
  87. mlrun/utils/notifications/notification/git.py +2 -4
  88. mlrun/utils/notifications/notification/slack.py +2 -4
  89. mlrun/utils/notifications/notification/webhook.py +2 -5
  90. mlrun/utils/notifications/notification_pusher.py +1 -1
  91. mlrun/utils/version/version.json +2 -2
  92. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/METADATA +44 -45
  93. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/RECORD +97 -92
  94. mlrun/api/schemas/__init__.py +0 -259
  95. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/WHEEL +0 -0
  96. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/entry_points.txt +0 -0
  97. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/licenses/LICENSE +0 -0
  98. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/top_level.txt +0 -0
@@ -11,13 +11,21 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+ import inspect
14
15
  from collections.abc import Awaitable
15
- from typing import Callable, Optional, TypeVar, Union
16
+ from typing import TYPE_CHECKING, Any, Callable, Optional, Union
16
17
 
17
18
  import mlrun
18
- from mlrun.datastore.model_provider.model_provider import ModelProvider
19
+ from mlrun.datastore.model_provider.model_provider import (
20
+ InvokeResponseFormat,
21
+ ModelProvider,
22
+ UsageResponseKeys,
23
+ )
24
+ from mlrun.datastore.utils import accepts_param
19
25
 
20
- T = TypeVar("T")
26
+ if TYPE_CHECKING:
27
+ from openai._models import BaseModel # noqa
28
+ from openai.types.chat.chat_completion import ChatCompletion
21
29
 
22
30
 
23
31
  class OpenAIProvider(ModelProvider):
@@ -34,6 +42,7 @@ class OpenAIProvider(ModelProvider):
34
42
  """
35
43
 
36
44
  support_async = True
45
+ response_class = None
37
46
 
38
47
  def __init__(
39
48
  self,
@@ -58,7 +67,31 @@ class OpenAIProvider(ModelProvider):
58
67
  default_invoke_kwargs=default_invoke_kwargs,
59
68
  )
60
69
  self.options = self.get_client_options()
61
- self.load_client()
70
+
71
+ @classmethod
72
+ def _import_response_class(cls) -> None:
73
+ if not cls.response_class:
74
+ try:
75
+ from openai.types.chat.chat_completion import ChatCompletion
76
+ except ImportError as exc:
77
+ raise ImportError("openai package is not installed") from exc
78
+ cls.response_class = ChatCompletion
79
+
80
+ @staticmethod
81
+ def _extract_string_output(response: "ChatCompletion") -> str:
82
+ """
83
+ Extracts the text content of the first choice from an OpenAI ChatCompletion response.
84
+ Only supports responses with a single choice. Raises an error if multiple choices exist.
85
+
86
+ :param response: The ChatCompletion response from OpenAI.
87
+ :return: The text content of the first message in the response.
88
+ :raises MLRunInvalidArgumentError: If the response contains more than one choice.
89
+ """
90
+ if len(response.choices) != 1:
91
+ raise mlrun.errors.MLRunInvalidArgumentError(
92
+ "OpenAIProvider: extracting string from response is only supported for single-response outputs"
93
+ )
94
+ return response.choices[0].message.content
62
95
 
63
96
  @classmethod
64
97
  def parse_endpoint_and_path(cls, endpoint, subpath) -> (str, str):
@@ -69,28 +102,57 @@ class OpenAIProvider(ModelProvider):
69
102
  return endpoint, subpath
70
103
 
71
104
  @property
72
- def model(self) -> Optional[str]:
73
- return self.endpoint
105
+ def client(self) -> Any:
106
+ """
107
+ Lazily return the synchronous OpenAI client.
74
108
 
75
- def load_client(self) -> None:
109
+ If the client has not been initialized yet, it will be created
110
+ by calling `load_client`.
76
111
  """
77
- Initializes the OpenAI SDK client using the provided options.
112
+ self.load_client()
113
+ return self._client
78
114
 
79
- This method imports the `OpenAI` class from the `openai` package, instantiates
80
- a client with the given keyword arguments (`self.options`), and assigns it to
81
- `self._client` and `self._async_client`.
115
+ def load_client(self) -> None:
116
+ """
117
+ Lazily initialize the synchronous OpenAI client.
82
118
 
83
- Raises:
84
- ImportError: If the `openai` package is not installed.
119
+ The client is created only if it does not already exist.
120
+ Raises ImportError if the openai package is not installed.
85
121
  """
122
+ if self._client:
123
+ return
86
124
  try:
87
- from openai import OpenAI, AsyncOpenAI # noqa
125
+ from openai import OpenAI # noqa
88
126
 
89
127
  self._client = OpenAI(**self.options)
90
- self._async_client = AsyncOpenAI(**self.options)
91
128
  except ImportError as exc:
92
129
  raise ImportError("openai package is not installed") from exc
93
130
 
131
+ def load_async_client(self) -> None:
132
+ """
133
+ Lazily initialize the asynchronous OpenAI client.
134
+
135
+ The client is created only if it does not already exist.
136
+ Raises ImportError if the openai package is not installed.
137
+ """
138
+ if not self._async_client:
139
+ try:
140
+ from openai import AsyncOpenAI # noqa
141
+
142
+ self._async_client = AsyncOpenAI(**self.options)
143
+ except ImportError as exc:
144
+ raise ImportError("openai package is not installed") from exc
145
+
146
+ @property
147
+ def async_client(self) -> Any:
148
+ """
149
+ Return the asynchronous OpenAI client, creating it on first access.
150
+
151
+ The client is lazily initialized via `load_async_client`.
152
+ """
153
+ self.load_async_client()
154
+ return self._async_client
155
+
94
156
  def get_client_options(self) -> dict:
95
157
  res = dict(
96
158
  api_key=self._get_secret_or_env("OPENAI_API_KEY"),
@@ -103,123 +165,233 @@ class OpenAIProvider(ModelProvider):
103
165
  return self._sanitize_options(res)
104
166
 
105
167
  def custom_invoke(
106
- self, operation: Optional[Callable[..., T]] = None, **invoke_kwargs
107
- ) -> Optional[T]:
168
+ self, operation: Optional[Callable] = None, **invoke_kwargs
169
+ ) -> Union["ChatCompletion", "BaseModel"]:
108
170
  """
109
- OpenAI-specific implementation of `ModelProvider.custom_invoke`.
171
+ Invokes a model operation from the OpenAI client with the given keyword arguments.
110
172
 
111
- Invokes an OpenAI model operation using the sync client. For full details, see
112
- `ModelProvider.custom_invoke`.
173
+ This method provides flexibility to either:
174
+ - Call a specific OpenAI client operation (e.g., `client.images.generate`).
175
+ - Default to `chat.completions.create` when no operation is provided.
176
+
177
+ The operation must be a callable that accepts keyword arguments. If the callable
178
+ does not accept a `model` parameter, it will be omitted from the call.
113
179
 
114
180
  Example:
115
181
  ```python
116
- result = openai_model_provider.invoke(
182
+ result = openai_model_provider.custom_invoke(
117
183
  openai_model_provider.client.images.generate,
118
184
  prompt="A futuristic cityscape at sunset",
119
185
  n=1,
120
186
  size="1024x1024",
121
187
  )
122
188
  ```
123
- :param operation: Same as ModelProvider.custom_invoke.
124
- :param invoke_kwargs: Same as ModelProvider.custom_invoke.
125
- :return: Same as ModelProvider.custom_invoke.
126
189
 
190
+ :param operation: A callable representing the OpenAI operation to invoke.
191
+ If not provided, defaults to `client.chat.completions.create`.
192
+
193
+ :param invoke_kwargs: Additional keyword arguments to pass to the operation.
194
+ These are merged with `default_invoke_kwargs` and may
195
+ include parameters such as `temperature`, `max_tokens`,
196
+ or `messages`.
197
+
198
+ :return: The full response returned by the operation, typically
199
+ an OpenAI `ChatCompletion` or other OpenAI SDK model.
127
200
  """
201
+
128
202
  invoke_kwargs = self.get_invoke_kwargs(invoke_kwargs)
203
+ model_kwargs = {"model": invoke_kwargs.pop("model", None) or self.model}
204
+
129
205
  if operation:
130
- return operation(**invoke_kwargs, model=self.model)
206
+ if not callable(operation):
207
+ raise mlrun.errors.MLRunInvalidArgumentError(
208
+ "OpenAI custom_invoke operation must be a callable"
209
+ )
210
+ if not accepts_param(operation, "model"):
211
+ model_kwargs = {}
212
+ return operation(**invoke_kwargs, **model_kwargs)
131
213
  else:
132
- return self.client.chat.completions.create(
133
- **invoke_kwargs, model=self.model
134
- )
214
+ return self.client.chat.completions.create(**invoke_kwargs, **model_kwargs)
135
215
 
136
216
  async def async_custom_invoke(
137
217
  self,
138
- operation: Optional[Callable[..., Awaitable[T]]] = None,
218
+ operation: Optional[Callable[..., Awaitable[Any]]] = None,
139
219
  **invoke_kwargs,
140
- ) -> Optional[T]:
220
+ ) -> Union["ChatCompletion", "BaseModel"]:
141
221
  """
142
- OpenAI-specific implementation of `ModelProvider.async_custom_invoke`.
222
+ Asynchronously invokes a model operation from the OpenAI client with the given keyword arguments.
223
+
224
+ This method provides flexibility to either:
225
+ - Call a specific async OpenAI client operation (e.g., `async_client.images.generate`).
226
+ - Default to `chat.completions.create` when no operation is provided.
143
227
 
144
- Invokes an OpenAI model operation using the async client. For full details, see
145
- `ModelProvider.async_custom_invoke`.
228
+ The operation must be an async callable that accepts keyword arguments.
229
+ If the callable does not accept a `model` parameter, it will be omitted from the call.
146
230
 
147
231
  Example:
148
232
  ```python
149
- result = openai_model_provider.invoke(
233
+ result = await openai_model_provider.async_custom_invoke(
150
234
  openai_model_provider.async_client.images.generate,
151
235
  prompt="A futuristic cityscape at sunset",
152
236
  n=1,
153
237
  size="1024x1024",
154
238
  )
155
239
  ```
156
- :param operation: Same as ModelProvider.async_custom_invoke.
157
- :param invoke_kwargs: Same as ModelProvider.async_custom_invoke.
158
- :return: Same as ModelProvider.async_custom_invoke.
240
+
241
+ :param operation: An async callable representing the OpenAI operation to invoke.
242
+ If not provided, defaults to `async_client.chat.completions.create`.
243
+
244
+ :param invoke_kwargs: Additional keyword arguments to pass to the operation.
245
+ These are merged with `default_invoke_kwargs` and may
246
+ include parameters such as `temperature`, `max_tokens`,
247
+ or `messages`.
248
+
249
+ :return: The full response returned by the awaited operation,
250
+ typically an OpenAI `ChatCompletion` or other OpenAI SDK model.
159
251
 
160
252
  """
161
253
  invoke_kwargs = self.get_invoke_kwargs(invoke_kwargs)
254
+ model_kwargs = {"model": invoke_kwargs.pop("model", None) or self.model}
162
255
  if operation:
163
- return await operation(**invoke_kwargs, model=self.model)
256
+ if not inspect.iscoroutinefunction(operation):
257
+ raise mlrun.errors.MLRunInvalidArgumentError(
258
+ "OpenAI async_custom_invoke operation must be a coroutine function"
259
+ )
260
+ if not accepts_param(operation, "model"):
261
+ model_kwargs = {}
262
+ return await operation(**invoke_kwargs, **model_kwargs)
164
263
  else:
165
264
  return await self.async_client.chat.completions.create(
166
- **invoke_kwargs, model=self.model
265
+ **invoke_kwargs, **model_kwargs
167
266
  )
168
267
 
268
+ def _response_handler(
269
+ self,
270
+ response: "ChatCompletion",
271
+ invoke_response_format: InvokeResponseFormat = InvokeResponseFormat.FULL,
272
+ **kwargs,
273
+ ) -> ["ChatCompletion", str, dict[str, Any]]:
274
+ if InvokeResponseFormat.is_str_response(invoke_response_format.value):
275
+ str_response = self._extract_string_output(response)
276
+ if invoke_response_format == InvokeResponseFormat.STRING:
277
+ return str_response
278
+ if invoke_response_format == InvokeResponseFormat.USAGE:
279
+ usage = response.to_dict()["usage"]
280
+ response = {
281
+ UsageResponseKeys.ANSWER: str_response,
282
+ UsageResponseKeys.USAGE: usage,
283
+ }
284
+ return response
285
+
169
286
  def invoke(
170
287
  self,
171
- messages: Optional[list[dict]] = None,
172
- as_str: bool = False,
288
+ messages: list[dict],
289
+ invoke_response_format: InvokeResponseFormat = InvokeResponseFormat.FULL,
173
290
  **invoke_kwargs,
174
- ) -> Optional[Union[str, T]]:
291
+ ) -> Union[dict[str, Any], str, "ChatCompletion"]:
175
292
  """
176
293
  OpenAI-specific implementation of `ModelProvider.invoke`.
177
- Invokes an OpenAI model operation using the sync client.
178
- For full details, see `ModelProvider.invoke`.
294
+ Invokes an OpenAI model operation using the synchronous client.
295
+
296
+ :param messages:
297
+ A list of dictionaries representing the conversation history or input messages.
298
+ Each dictionary should follow the format::
299
+ {
300
+ "role": "system" | "user" | "assistant",
301
+ "content": "Message content as a string",
302
+ }
303
+
304
+ Example:
305
+
306
+ .. code-block:: json
307
+
308
+ [
309
+ {"role": "system", "content": "You are a helpful assistant."},
310
+ {"role": "user", "content": "What is the capital of France?"}
311
+ ]
312
+
313
+ Defaults to None if no messages are provided.
314
+
315
+ :param invoke_response_format:
316
+ Specifies the format of the returned response. Options:
179
317
 
180
- :param messages: Same as ModelProvider.invoke.
318
+ - "string": Returns only the generated text content, taken from a single response.
319
+ - "usage": Combines the generated text with metadata (e.g., token usage), returning a dictionary::
181
320
 
182
- :param as_str: bool
183
- If `True`, returns only the main content of the first response
184
- (`response.choices[0].message.content`).
185
- If `False`, returns the full response object, whose type depends on
186
- the specific OpenAI SDK operation used (e.g., chat completion, completion, etc.).
321
+ .. code-block:: json
322
+ {
323
+ "answer": "<generated_text>",
324
+ "usage": <ChatCompletion>.to_dict()["usage"]
325
+ }
326
+
327
+ - "full": Returns the full OpenAI `ChatCompletion` object.
187
328
 
188
329
  :param invoke_kwargs:
189
- Same as ModelProvider.invoke.
190
- :return: Same as ModelProvider.invoke.
330
+ Additional keyword arguments passed to the OpenAI client.
191
331
 
332
+ :return:
333
+ A string, dictionary, or `ChatCompletion` object, depending on `invoke_response_format`.
192
334
  """
335
+
193
336
  response = self.custom_invoke(messages=messages, **invoke_kwargs)
194
- if as_str:
195
- return response.choices[0].message.content
196
- return response
337
+ return self._response_handler(
338
+ messages=messages,
339
+ invoke_response_format=invoke_response_format,
340
+ response=response,
341
+ )
197
342
 
198
343
  async def async_invoke(
199
344
  self,
200
- messages: Optional[list[dict]] = None,
201
- as_str: bool = False,
345
+ messages: list[dict],
346
+ invoke_response_format=InvokeResponseFormat.FULL,
202
347
  **invoke_kwargs,
203
- ) -> str:
348
+ ) -> Union[str, "ChatCompletion", dict]:
204
349
  """
205
350
  OpenAI-specific implementation of `ModelProvider.async_invoke`.
206
- Invokes an OpenAI model operation using the async client.
207
- For full details, see `ModelProvider.async_invoke`.
351
+ Invokes an OpenAI model operation using the asynchronous client.
352
+
353
+ :param messages:
354
+ A list of dictionaries representing the conversation history or input messages.
355
+ Each dictionary should follow the format::
356
+ {
357
+ "role": "system" | "user" | "assistant",
358
+ "content": "Message content as a string",
359
+ }
360
+
361
+ Example:
362
+
363
+ .. code-block:: json
208
364
 
209
- :param messages: Same as ModelProvider.async_invoke.
365
+ [
366
+ {"role": "system", "content": "You are a helpful assistant."},
367
+ {"role": "user", "content": "What is the capital of France?"}
368
+ ]
210
369
 
211
- :param as_str: bool
212
- If `True`, returns only the main content of the first response
213
- (`response.choices[0].message.content`).
214
- If `False`, returns the full awaited response object, whose type depends on
215
- the specific OpenAI SDK operation used (e.g., chat completion, completion, etc.).
370
+ Defaults to None if no messages are provided.
371
+
372
+ :param invoke_response_format:
373
+ Specifies the format of the returned response. Options:
374
+
375
+ - "string": Returns only the generated text content, taken from a single response.
376
+ - "usage": Combines the generated text with metadata (e.g., token usage), returning a dictionary::
377
+
378
+ .. code-block:: json
379
+ {
380
+ "answer": "<generated_text>",
381
+ "usage": <ChatCompletion>.to_dict()["usage"]
382
+ }
383
+
384
+ - "full": Returns the full OpenAI `ChatCompletion` object.
216
385
 
217
386
  :param invoke_kwargs:
218
- Same as ModelProvider.async_invoke.
219
- :returns Same as ModelProvider.async_invoke.
387
+ Additional keyword arguments passed to the OpenAI client.
220
388
 
389
+ :return:
390
+ A string, dictionary, or `ChatCompletion` object, depending on `invoke_response_format`.
221
391
  """
222
392
  response = await self.async_custom_invoke(messages=messages, **invoke_kwargs)
223
- if as_str:
224
- return response.choices[0].message.content
225
- return response
393
+ return self._response_handler(
394
+ messages=messages,
395
+ invoke_response_format=invoke_response_format,
396
+ response=response,
397
+ )
mlrun/datastore/s3.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import time
16
+ import warnings
16
17
  from typing import Optional
17
18
  from urllib.parse import urlparse
18
19
 
@@ -28,6 +29,27 @@ from .base import DataStore, FileStats, make_datastore_schema_sanitizer
28
29
  class S3Store(DataStore):
29
30
  using_bucket = True
30
31
 
32
+ # TODO: Remove this in 1.12.0
33
+ def _get_endpoint_url_with_deprecation_warning(self):
34
+ """Get S3 endpoint URL with backward compatibility for deprecated S3_ENDPOINT_URL"""
35
+ # First try the new environment variable
36
+ endpoint_url = self._get_secret_or_env("AWS_ENDPOINT_URL_S3")
37
+ if endpoint_url:
38
+ return endpoint_url
39
+
40
+ # Check for deprecated environment variable
41
+ deprecated_endpoint_url = self._get_secret_or_env("S3_ENDPOINT_URL")
42
+ if deprecated_endpoint_url:
43
+ warnings.warn(
44
+ "S3_ENDPOINT_URL is deprecated in 1.10.0 and will be removed in 1.12.0, "
45
+ "use AWS_ENDPOINT_URL_S3 instead.",
46
+ # TODO: Remove this in 1.12.0
47
+ FutureWarning,
48
+ )
49
+ return deprecated_endpoint_url
50
+
51
+ return None
52
+
31
53
  def __init__(
32
54
  self, parent, schema, name, endpoint="", secrets: Optional[dict] = None
33
55
  ):
@@ -41,7 +63,7 @@ class S3Store(DataStore):
41
63
  access_key_id = self._get_secret_or_env("AWS_ACCESS_KEY_ID")
42
64
  secret_key = self._get_secret_or_env("AWS_SECRET_ACCESS_KEY")
43
65
  token_file = self._get_secret_or_env("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
44
- endpoint_url = self._get_secret_or_env("S3_ENDPOINT_URL")
66
+ endpoint_url = self._get_endpoint_url_with_deprecation_warning()
45
67
  force_non_anonymous = self._get_secret_or_env("S3_NON_ANONYMOUS")
46
68
  profile_name = self._get_secret_or_env("AWS_PROFILE")
47
69
  assume_role_arn = self._get_secret_or_env("MLRUN_AWS_ROLE_ARN")
@@ -159,7 +181,7 @@ class S3Store(DataStore):
159
181
  def get_storage_options(self):
160
182
  force_non_anonymous = self._get_secret_or_env("S3_NON_ANONYMOUS")
161
183
  profile = self._get_secret_or_env("AWS_PROFILE")
162
- endpoint_url = self._get_secret_or_env("S3_ENDPOINT_URL")
184
+ endpoint_url = self._get_endpoint_url_with_deprecation_warning()
163
185
  access_key_id = self._get_secret_or_env("AWS_ACCESS_KEY_ID")
164
186
  secret = self._get_secret_or_env("AWS_SECRET_ACCESS_KEY")
165
187
  token_file = self._get_secret_or_env("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE")
@@ -18,10 +18,9 @@ from mergedeep import merge
18
18
  from storey import V3ioDriver
19
19
 
20
20
  import mlrun
21
- import mlrun.model_monitoring.helpers
22
21
  from mlrun.datastore.base import DataStore
23
22
  from mlrun.datastore.datastore_profile import (
24
- DatastoreProfileKafkaSource,
23
+ DatastoreProfileKafkaStream,
25
24
  DatastoreProfileKafkaTarget,
26
25
  DatastoreProfileTDEngine,
27
26
  datastore_profile_read,
@@ -138,7 +137,7 @@ class KafkaStoreyTarget(storey.KafkaTarget):
138
137
  datastore_profile = datastore_profile_read(path)
139
138
  if not isinstance(
140
139
  datastore_profile,
141
- (DatastoreProfileKafkaSource, DatastoreProfileKafkaTarget),
140
+ (DatastoreProfileKafkaStream, DatastoreProfileKafkaTarget),
142
141
  ):
143
142
  raise mlrun.errors.MLRunInvalidArgumentError(
144
143
  f"Unsupported datastore profile type: {type(datastore_profile)}"
mlrun/datastore/utils.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import inspect
15
16
  import math
16
17
  import tarfile
17
18
  import tempfile
@@ -319,7 +320,13 @@ def parse_url(url):
319
320
  parsed_url = urlparse(url)
320
321
  schema = parsed_url.scheme.lower()
321
322
  endpoint = parsed_url.hostname
322
- if endpoint:
323
+
324
+ # Special handling for WASBS URLs to preserve container information
325
+ if schema in ["wasbs", "wasb"] and parsed_url.netloc and "@" in parsed_url.netloc:
326
+ # For wasbs://container@host format, preserve the full netloc as endpoint
327
+ # This allows the datastore to extract container later
328
+ endpoint = parsed_url.netloc
329
+ elif endpoint:
323
330
  # HACK - urlparse returns the hostname after in lower case - we want the original case:
324
331
  # the hostname is a substring of the netloc, in which it's the original case, so we find the indexes of the
325
332
  # hostname in the netloc and take it from there
@@ -330,6 +337,11 @@ def parse_url(url):
330
337
  endpoint = netloc[
331
338
  hostname_index_in_netloc : hostname_index_in_netloc + len(lower_hostname)
332
339
  ]
333
- if parsed_url.port:
334
- endpoint += f":{parsed_url.port}"
340
+ if parsed_url.port:
341
+ endpoint += f":{parsed_url.port}"
335
342
  return schema, endpoint, parsed_url
343
+
344
+
345
+ def accepts_param(func: callable, param_name):
346
+ sig = inspect.signature(func)
347
+ return param_name in sig.parameters
mlrun/db/base.py CHANGED
@@ -16,8 +16,6 @@ import datetime
16
16
  from abc import ABC, abstractmethod
17
17
  from typing import Literal, Optional, Union
18
18
 
19
- from deprecated import deprecated
20
-
21
19
  import mlrun.alerts
22
20
  import mlrun.common
23
21
  import mlrun.common.formatters
@@ -445,23 +443,6 @@ class RunDBInterface(ABC):
445
443
  ) -> dict:
446
444
  pass
447
445
 
448
- # TODO: remove in 1.10.0
449
- @deprecated(
450
- version="1.7.0",
451
- reason="'list_features' will be removed in 1.10.0, use 'list_features_v2' instead",
452
- category=FutureWarning,
453
- )
454
- @abstractmethod
455
- def list_features(
456
- self,
457
- project: str,
458
- name: Optional[str] = None,
459
- tag: Optional[str] = None,
460
- entities: Optional[list[str]] = None,
461
- labels: Optional[Union[str, dict[str, Optional[str]], list[str]]] = None,
462
- ) -> mlrun.common.schemas.FeaturesOutput:
463
- pass
464
-
465
446
  @abstractmethod
466
447
  def list_features_v2(
467
448
  self,
@@ -741,6 +722,9 @@ class RunDBInterface(ABC):
741
722
  tsdb_metrics: bool = False,
742
723
  metric_list: Optional[list[str]] = None,
743
724
  top_level: bool = False,
725
+ modes: Optional[
726
+ Union[mm_constants.EndpointMode, list[mm_constants.EndpointMode]]
727
+ ] = None,
744
728
  uids: Optional[list[str]] = None,
745
729
  latest_only: bool = False,
746
730
  ) -> mlrun.common.schemas.ModelEndpointList:
@@ -792,6 +776,7 @@ class RunDBInterface(ABC):
792
776
  item_name: Optional[str] = None,
793
777
  tag: Optional[str] = None,
794
778
  version: Optional[str] = None,
779
+ item_type: mlrun.common.schemas.hub.HubSourceType = mlrun.common.schemas.hub.HubSourceType.functions,
795
780
  ):
796
781
  pass
797
782
 
@@ -810,6 +795,7 @@ class RunDBInterface(ABC):
810
795
  version: Optional[str] = None,
811
796
  tag: Optional[str] = None,
812
797
  force_refresh: bool = False,
798
+ object_type: mlrun.common.schemas.hub.HubSourceType = mlrun.common.schemas.hub.HubSourceType.functions,
813
799
  ):
814
800
  pass
815
801
 
@@ -821,6 +807,19 @@ class RunDBInterface(ABC):
821
807
  version: Optional[str] = None,
822
808
  tag: str = "latest",
823
809
  force_refresh: bool = False,
810
+ item_type: mlrun.common.schemas.hub.HubSourceType = mlrun.common.schemas.hub.HubSourceType.functions,
811
+ ):
812
+ pass
813
+
814
+ @abstractmethod
815
+ def get_hub_asset(
816
+ self,
817
+ source_name: str,
818
+ item_name: str,
819
+ asset_name: str,
820
+ version: Optional[str] = None,
821
+ tag: str = "latest",
822
+ item_type: mlrun.common.schemas.hub.HubSourceType = mlrun.common.schemas.hub.HubSourceType.functions,
824
823
  ):
825
824
  pass
826
825
 
@@ -1129,6 +1128,15 @@ class RunDBInterface(ABC):
1129
1128
  ) -> None:
1130
1129
  pass
1131
1130
 
1131
+ @abstractmethod
1132
+ def delete_model_monitoring_metrics(
1133
+ self,
1134
+ project: str,
1135
+ application_name: str,
1136
+ endpoint_ids: Optional[list[str]] = None,
1137
+ ) -> None:
1138
+ pass
1139
+
1132
1140
  @abstractmethod
1133
1141
  def get_monitoring_function_summaries(
1134
1142
  self,