mlrun 1.10.0rc16__py3-none-any.whl → 1.10.0rc42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (98) hide show
  1. mlrun/__init__.py +22 -2
  2. mlrun/artifacts/document.py +6 -1
  3. mlrun/artifacts/llm_prompt.py +21 -15
  4. mlrun/artifacts/model.py +3 -3
  5. mlrun/common/constants.py +9 -0
  6. mlrun/common/formatters/artifact.py +1 -0
  7. mlrun/common/model_monitoring/helpers.py +86 -0
  8. mlrun/common/schemas/__init__.py +2 -0
  9. mlrun/common/schemas/auth.py +2 -0
  10. mlrun/common/schemas/function.py +10 -0
  11. mlrun/common/schemas/hub.py +30 -18
  12. mlrun/common/schemas/model_monitoring/__init__.py +2 -0
  13. mlrun/common/schemas/model_monitoring/constants.py +30 -6
  14. mlrun/common/schemas/model_monitoring/functions.py +13 -4
  15. mlrun/common/schemas/model_monitoring/model_endpoints.py +11 -0
  16. mlrun/common/schemas/pipeline.py +1 -1
  17. mlrun/common/schemas/serving.py +3 -0
  18. mlrun/common/schemas/workflow.py +1 -0
  19. mlrun/common/secrets.py +22 -1
  20. mlrun/config.py +32 -10
  21. mlrun/datastore/__init__.py +11 -3
  22. mlrun/datastore/azure_blob.py +162 -47
  23. mlrun/datastore/datastore.py +9 -4
  24. mlrun/datastore/datastore_profile.py +61 -5
  25. mlrun/datastore/model_provider/huggingface_provider.py +363 -0
  26. mlrun/datastore/model_provider/mock_model_provider.py +87 -0
  27. mlrun/datastore/model_provider/model_provider.py +211 -74
  28. mlrun/datastore/model_provider/openai_provider.py +243 -71
  29. mlrun/datastore/s3.py +24 -2
  30. mlrun/datastore/storeytargets.py +2 -3
  31. mlrun/datastore/utils.py +15 -3
  32. mlrun/db/base.py +27 -19
  33. mlrun/db/httpdb.py +57 -48
  34. mlrun/db/nopdb.py +25 -10
  35. mlrun/execution.py +55 -13
  36. mlrun/hub/__init__.py +15 -0
  37. mlrun/hub/module.py +181 -0
  38. mlrun/k8s_utils.py +105 -16
  39. mlrun/launcher/base.py +13 -6
  40. mlrun/launcher/local.py +2 -0
  41. mlrun/model.py +9 -3
  42. mlrun/model_monitoring/api.py +66 -27
  43. mlrun/model_monitoring/applications/__init__.py +1 -1
  44. mlrun/model_monitoring/applications/base.py +372 -136
  45. mlrun/model_monitoring/applications/context.py +2 -4
  46. mlrun/model_monitoring/applications/results.py +4 -7
  47. mlrun/model_monitoring/controller.py +239 -101
  48. mlrun/model_monitoring/db/_schedules.py +36 -13
  49. mlrun/model_monitoring/db/_stats.py +4 -3
  50. mlrun/model_monitoring/db/tsdb/base.py +29 -9
  51. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +4 -5
  52. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +154 -50
  53. mlrun/model_monitoring/db/tsdb/tdengine/writer_graph_steps.py +51 -0
  54. mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py +17 -4
  55. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +245 -51
  56. mlrun/model_monitoring/helpers.py +28 -5
  57. mlrun/model_monitoring/stream_processing.py +45 -14
  58. mlrun/model_monitoring/writer.py +220 -1
  59. mlrun/platforms/__init__.py +3 -2
  60. mlrun/platforms/iguazio.py +7 -3
  61. mlrun/projects/operations.py +6 -1
  62. mlrun/projects/pipelines.py +2 -2
  63. mlrun/projects/project.py +128 -45
  64. mlrun/run.py +94 -17
  65. mlrun/runtimes/__init__.py +18 -0
  66. mlrun/runtimes/base.py +14 -6
  67. mlrun/runtimes/daskjob.py +1 -0
  68. mlrun/runtimes/local.py +5 -2
  69. mlrun/runtimes/mounts.py +20 -2
  70. mlrun/runtimes/nuclio/__init__.py +1 -0
  71. mlrun/runtimes/nuclio/application/application.py +147 -17
  72. mlrun/runtimes/nuclio/function.py +70 -27
  73. mlrun/runtimes/nuclio/serving.py +85 -4
  74. mlrun/runtimes/pod.py +213 -21
  75. mlrun/runtimes/utils.py +49 -9
  76. mlrun/secrets.py +54 -13
  77. mlrun/serving/remote.py +79 -6
  78. mlrun/serving/routers.py +23 -41
  79. mlrun/serving/server.py +211 -40
  80. mlrun/serving/states.py +536 -156
  81. mlrun/serving/steps.py +62 -0
  82. mlrun/serving/system_steps.py +136 -81
  83. mlrun/serving/v2_serving.py +9 -10
  84. mlrun/utils/helpers.py +212 -82
  85. mlrun/utils/logger.py +3 -1
  86. mlrun/utils/notifications/notification/base.py +18 -0
  87. mlrun/utils/notifications/notification/git.py +2 -4
  88. mlrun/utils/notifications/notification/slack.py +2 -4
  89. mlrun/utils/notifications/notification/webhook.py +2 -5
  90. mlrun/utils/notifications/notification_pusher.py +1 -1
  91. mlrun/utils/version/version.json +2 -2
  92. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/METADATA +44 -45
  93. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/RECORD +97 -92
  94. mlrun/api/schemas/__init__.py +0 -259
  95. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/WHEEL +0 -0
  96. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/entry_points.txt +0 -0
  97. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/licenses/LICENSE +0 -0
  98. {mlrun-1.10.0rc16.dist-info → mlrun-1.10.0rc42.dist-info}/top_level.txt +0 -0
mlrun/k8s_utils.py CHANGED
@@ -26,6 +26,10 @@ from .config import config as mlconfig
26
26
 
27
27
  _running_inside_kubernetes_cluster = None
28
28
 
29
+ K8sObj = typing.Union[kubernetes.client.V1Affinity, kubernetes.client.V1Toleration]
30
+ SanitizedK8sObj = dict[str, typing.Any]
31
+ K8sObjList = typing.Union[list[K8sObj], list[SanitizedK8sObj]]
32
+
29
33
 
30
34
  def is_running_inside_kubernetes_cluster():
31
35
  global _running_inside_kubernetes_cluster
@@ -232,6 +236,54 @@ def validate_node_selectors(
232
236
  return True
233
237
 
234
238
 
239
+ def sanitize_k8s_objects(
240
+ k8s_objects: typing.Union[None, K8sObjList, SanitizedK8sObj, K8sObj],
241
+ ) -> typing.Union[list[SanitizedK8sObj], SanitizedK8sObj]:
242
+ """Convert K8s objects to dicts. Handles single objects or lists."""
243
+ api_client = kubernetes.client.ApiClient()
244
+ if not k8s_objects:
245
+ return k8s_objects
246
+
247
+ def _sanitize_k8s_object(k8s_obj):
248
+ return (
249
+ api_client.sanitize_for_serialization(k8s_obj)
250
+ if hasattr(k8s_obj, "to_dict")
251
+ else k8s_obj
252
+ )
253
+
254
+ return (
255
+ [_sanitize_k8s_object(k8s_obj) for k8s_obj in k8s_objects]
256
+ if isinstance(k8s_objects, list)
257
+ else _sanitize_k8s_object(k8s_objects)
258
+ )
259
+
260
+
261
+ def sanitize_scheduling_configuration(
262
+ tolerations: typing.Optional[list[kubernetes.client.V1Toleration]] = None,
263
+ affinity: typing.Optional[kubernetes.client.V1Affinity] = None,
264
+ ) -> tuple[
265
+ typing.Optional[list[dict]],
266
+ typing.Optional[dict],
267
+ ]:
268
+ """
269
+ Sanitizes pod scheduling configuration for serialization.
270
+
271
+ Takes affinity and tolerations and converts them to
272
+ JSON-serializable dictionaries using the Kubernetes API client's
273
+ sanitization method.
274
+
275
+ Args:
276
+ affinity: Pod affinity/anti-affinity rules
277
+ tolerations: List of toleration rules
278
+
279
+ Returns:
280
+ Tuple of (sanitized_affinity, sanitized_tolerations)
281
+ - affinity: Sanitized dict representation or None
282
+ - tolerations: List of sanitized dict representations or None
283
+ """
284
+ return sanitize_k8s_objects(tolerations), sanitize_k8s_objects(affinity)
285
+
286
+
235
287
  def enrich_preemption_mode(
236
288
  preemption_mode: typing.Optional[str],
237
289
  node_selector: dict[str, str],
@@ -269,8 +321,8 @@ def enrich_preemption_mode(
269
321
  )
270
322
 
271
323
  enriched_node_selector = copy.deepcopy(node_selector or {})
272
- enriched_tolerations = copy.deepcopy(tolerations or [])
273
- enriched_affinity = copy.deepcopy(affinity)
324
+ enriched_tolerations = _safe_copy_tolerations(tolerations or [])
325
+ enriched_affinity = _safe_copy_affinity(affinity)
274
326
  preemptible_tolerations = generate_preemptible_tolerations()
275
327
 
276
328
  if handler := _get_mode_handler(preemption_mode):
@@ -288,6 +340,57 @@ def enrich_preemption_mode(
288
340
  )
289
341
 
290
342
 
343
+ def _safe_copy_tolerations(
344
+ tolerations: list[kubernetes.client.V1Toleration],
345
+ ) -> list[kubernetes.client.V1Toleration]:
346
+ """
347
+ Safely copy a list of V1Toleration objects without mutating the originals.
348
+
349
+ Explicitly reconstructs V1Toleration objects instead of using deepcopy() to avoid
350
+ serialization errors with K8s client objects that contain threading primitives
351
+ and non-copyable elements like RLock objects.
352
+
353
+ Args:
354
+ tolerations: List of V1Toleration objects to copy
355
+
356
+ Returns:
357
+ New list containing copied V1Toleration objects with identical field values"""
358
+ return [
359
+ kubernetes.client.V1Toleration(
360
+ effect=toleration.effect,
361
+ key=toleration.key,
362
+ value=toleration.value,
363
+ operator=toleration.operator,
364
+ toleration_seconds=toleration.toleration_seconds,
365
+ )
366
+ for toleration in tolerations
367
+ ]
368
+
369
+
370
+ def _safe_copy_affinity(
371
+ affinity: kubernetes.client.V1Affinity,
372
+ ) -> kubernetes.client.V1Affinity:
373
+ """
374
+ Safely create a deep copy of a V1Affinity object.
375
+
376
+ Uses K8s API client serialization/deserialization instead of deepcopy() to avoid
377
+ errors with threading primitives and complex internal structures in K8s objects.
378
+ Serializes to dict then deserializes back to a clean V1Affinity object.
379
+
380
+ Args:
381
+ affinity: V1Affinity object to copy, or None
382
+
383
+ Returns:
384
+ New V1Affinity object with identical field values, or None if input was None
385
+ """
386
+ if not affinity:
387
+ return None
388
+ api_client = kubernetes.client.ApiClient()
389
+ # Convert to dict then back to object properly
390
+ affinity_dict = api_client.sanitize_for_serialization(affinity)
391
+ return api_client._ApiClient__deserialize(affinity_dict, "V1Affinity")
392
+
393
+
291
394
  def _get_mode_handler(mode: str):
292
395
  return {
293
396
  mlrun.common.schemas.PreemptionModes.prevent: _handle_prevent_mode,
@@ -367,20 +470,6 @@ def _handle_allow_mode(
367
470
  list[kubernetes.client.V1Toleration],
368
471
  typing.Optional[kubernetes.client.V1Affinity],
369
472
  ]:
370
- for op in [
371
- mlrun.common.schemas.NodeSelectorOperator.node_selector_op_not_in.value,
372
- mlrun.common.schemas.NodeSelectorOperator.node_selector_op_in.value,
373
- ]:
374
- affinity = _prune_affinity_node_selector_requirement(
375
- generate_preemptible_node_selector_requirements(op),
376
- affinity=affinity,
377
- )
378
-
379
- node_selector = _prune_node_selector(
380
- mlconfig.get_preemptible_node_selector(),
381
- enriched_node_selector=node_selector,
382
- )
383
-
384
473
  tolerations = _merge_tolerations(tolerations, preemptible_tolerations)
385
474
  return node_selector, tolerations, affinity
386
475
 
mlrun/launcher/base.py CHANGED
@@ -157,6 +157,19 @@ class BaseLauncher(abc.ABC):
157
157
  ]:
158
158
  mlrun.utils.helpers.warn_on_deprecated_image(image)
159
159
 
160
+ # Raise an error if retry is configured for a runtime that doesn't support retries.
161
+ # For local runs, we intentionally skip this validation and allow the run to proceed, since they are typically
162
+ # used for debugging purposes, and in such cases we avoid blocking their execution.
163
+ if (
164
+ not mlrun.runtimes.RuntimeKinds.is_local_runtime(runtime.kind)
165
+ and run.spec.retry.count
166
+ and runtime.kind not in mlrun.runtimes.RuntimeKinds.retriable_runtimes()
167
+ ):
168
+ raise mlrun.errors.MLRunInvalidArgumentError(
169
+ f"Retry is not supported for {runtime.kind} runtime, supported runtimes are: "
170
+ f"{mlrun.runtimes.RuntimeKinds.retriable_runtimes()}"
171
+ )
172
+
160
173
  @staticmethod
161
174
  def _validate_output_path(
162
175
  runtime: "mlrun.runtimes.BaseRuntime",
@@ -268,12 +281,6 @@ class BaseLauncher(abc.ABC):
268
281
 
269
282
  run.metadata.name = mlrun.utils.normalize_name(
270
283
  name=name or run.metadata.name or def_name,
271
- # if name or runspec.metadata.name are set then it means that is user defined name and we want to warn the
272
- # user that the passed name needs to be set without underscore, if its not user defined but rather enriched
273
- # from the handler(function) name then we replace the underscore without warning the user.
274
- # most of the time handlers will have `_` in the handler name (python convention is to separate function
275
- # words with `_`), therefore we don't want to be noisy when normalizing the run name
276
- verbose=bool(name or run.metadata.name),
277
284
  )
278
285
  mlrun.utils.verify_field_regex(
279
286
  "run.metadata.name", run.metadata.name, mlrun.utils.regex.run_name
mlrun/launcher/local.py CHANGED
@@ -243,6 +243,8 @@ class ClientLocalLauncher(launcher.ClientBaseLauncher):
243
243
 
244
244
  # if the handler has module prefix force "local" (vs "handler") runtime
245
245
  kind = "local" if isinstance(handler, str) and "." in handler else ""
246
+
247
+ # Create temporary local function for execution
246
248
  fn = mlrun.new_function(meta.name, command=command, args=args, kind=kind)
247
249
  fn.metadata = meta
248
250
  setattr(fn, "_is_run_local", True)
mlrun/model.py CHANGED
@@ -29,6 +29,7 @@ import pydantic.v1.error_wrappers
29
29
  import mlrun
30
30
  import mlrun.common.constants as mlrun_constants
31
31
  import mlrun.common.schemas.notification
32
+ import mlrun.common.secrets
32
33
  import mlrun.utils.regex
33
34
 
34
35
  from .utils import (
@@ -667,7 +668,7 @@ class ImageBuilder(ModelObj):
667
668
  """
668
669
  requirements = requirements or []
669
670
  self._verify_list(requirements, "requirements")
670
- resolved_requirements = self._resolve_requirements(
671
+ resolved_requirements = self.resolve_requirements(
671
672
  requirements, requirements_file
672
673
  )
673
674
  requirements = self.requirements or [] if not overwrite else []
@@ -680,7 +681,7 @@ class ImageBuilder(ModelObj):
680
681
  self.requirements = requirements
681
682
 
682
683
  @staticmethod
683
- def _resolve_requirements(requirements: list, requirements_file: str = "") -> list:
684
+ def resolve_requirements(requirements: list, requirements_file: str = "") -> list:
684
685
  requirements = requirements or []
685
686
  requirements_to_resolve = []
686
687
 
@@ -1616,7 +1617,12 @@ class RunTemplate(ModelObj):
1616
1617
 
1617
1618
  :returns: The RunTemplate object
1618
1619
  """
1619
-
1620
+ if kind == "azure_vault" and isinstance(source, dict):
1621
+ candidate_secret_name = (source.get("k8s_secret") or "").strip()
1622
+ if candidate_secret_name:
1623
+ mlrun.common.secrets.validate_not_forbidden_secret(
1624
+ candidate_secret_name
1625
+ )
1620
1626
  if kind == "vault" and isinstance(source, list):
1621
1627
  source = {"project": self.metadata.project, "secrets": source}
1622
1628
 
@@ -18,9 +18,8 @@ from datetime import datetime
18
18
 
19
19
  import numpy as np
20
20
  import pandas as pd
21
+ from deprecated import deprecated
21
22
 
22
- import mlrun.artifacts
23
- import mlrun.common.helpers
24
23
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
25
24
  import mlrun.datastore.base
26
25
  import mlrun.feature_store
@@ -31,8 +30,9 @@ from mlrun.common.schemas.model_monitoring import (
31
30
  FunctionURI,
32
31
  )
33
32
  from mlrun.data_types.infer import InferOptions, get_df_stats
34
- from mlrun.utils import datetime_now, logger
33
+ from mlrun.utils import check_if_hub_uri, datetime_now, logger, merge_requirements
35
34
 
35
+ from ..common.schemas.hub import HubModuleType
36
36
  from .helpers import update_model_endpoint_last_request
37
37
 
38
38
  # A union of all supported dataset types:
@@ -47,6 +47,14 @@ DatasetType = typing.Union[
47
47
  ]
48
48
 
49
49
 
50
+ # TODO: Remove this in 1.12.0
51
+ @deprecated(
52
+ version="1.10.0",
53
+ reason="This function is deprecated and will be removed in 1.12. You can generate a model endpoint by either "
54
+ "deploying a monitored serving function as a real-time service or running it as an offline job. "
55
+ "To retrieve model endpoints, use `project.list_model_endpoints()`",
56
+ category=FutureWarning,
57
+ )
50
58
  def get_or_create_model_endpoint(
51
59
  project: str,
52
60
  model_endpoint_name: str,
@@ -69,8 +77,8 @@ def get_or_create_model_endpoint(
69
77
  :param model_endpoint_name: If a new model endpoint is created, the model endpoint name will be presented
70
78
  under this endpoint (applicable only to new endpoint_id).
71
79
  :param model_path: The model store path (applicable only to new endpoint_id).
72
- :param endpoint_id: Model endpoint unique ID. If not exist in DB, will generate a new record based
73
- on the provided `endpoint_id`.
80
+ :param endpoint_id: Model endpoint unique ID. If not exist in DB, will generate a new record with a
81
+ newly generated ID.
74
82
  :param function_name: If a new model endpoint is created, use this function name.
75
83
  :param function_tag: If a new model endpoint is created, use this function tag.
76
84
  :param context: MLRun context. If `function_name` not provided, use the context to generate the
@@ -93,25 +101,26 @@ def get_or_create_model_endpoint(
93
101
  function_name = FunctionURI.from_string(
94
102
  context.to_dict()["spec"]["function"]
95
103
  ).function
96
- try:
97
- model_endpoint = db_session.get_model_endpoint(
98
- project=project,
99
- name=model_endpoint_name,
100
- endpoint_id=endpoint_id,
101
- function_name=function_name,
102
- function_tag=function_tag or "latest",
103
- feature_analysis=feature_analysis,
104
- )
105
- # If other fields provided, validate that they are correspond to the existing model endpoint data
106
- _model_endpoint_validations(
107
- model_endpoint=model_endpoint,
108
- model_path=model_path,
109
- sample_set_statistics=sample_set_statistics,
110
- )
104
+ if endpoint_id or function_name:
105
+ try:
106
+ model_endpoint = db_session.get_model_endpoint(
107
+ project=project,
108
+ name=model_endpoint_name,
109
+ endpoint_id=endpoint_id,
110
+ function_name=function_name,
111
+ function_tag=function_tag or "latest",
112
+ feature_analysis=feature_analysis,
113
+ )
114
+ # If other fields provided, validate that they are correspond to the existing model endpoint data
115
+ _model_endpoint_validations(
116
+ model_endpoint=model_endpoint,
117
+ model_path=model_path,
118
+ sample_set_statistics=sample_set_statistics,
119
+ )
111
120
 
112
- except (mlrun.errors.MLRunNotFoundError, mlrun.errors.MLRunInvalidArgumentError):
113
- # Create a new model endpoint with the provided details
114
- pass
121
+ except mlrun.errors.MLRunNotFoundError:
122
+ # Create a new model endpoint with the provided details
123
+ pass
115
124
  if not model_endpoint:
116
125
  model_endpoint = _generate_model_endpoint(
117
126
  project=project,
@@ -125,6 +134,13 @@ def get_or_create_model_endpoint(
125
134
  return model_endpoint
126
135
 
127
136
 
137
+ # TODO: Remove this in 1.12.0
138
+ @deprecated(
139
+ version="1.10.0",
140
+ reason="This function is deprecated and will be removed in 1.12. "
141
+ "Instead, run a monitored serving function as a job",
142
+ category=FutureWarning,
143
+ )
128
144
  def record_results(
129
145
  project: str,
130
146
  model_path: str,
@@ -146,8 +162,8 @@ def record_results(
146
162
  :param model_path: The model Store path.
147
163
  :param model_endpoint_name: If a new model endpoint is generated, the model endpoint name will be presented
148
164
  under this endpoint.
149
- :param endpoint_id: Model endpoint unique ID. If not exist in DB, will generate a new record based
150
- on the provided `endpoint_id`.
165
+ :param endpoint_id: Model endpoint unique ID. If not exist in DB, will generate a new record with a
166
+ newly generated ID.
151
167
  :param function_name: If a new model endpoint is created, use this function name for generating the
152
168
  function URI.
153
169
  :param context: MLRun context. Note that the context is required generating the model endpoint.
@@ -238,6 +254,7 @@ def _model_endpoint_validations(
238
254
  key=model_obj.key,
239
255
  iter=model_obj.iter,
240
256
  tree=model_obj.tree,
257
+ uid=model_obj.uid,
241
258
  )
242
259
 
243
260
  # Enrich the uri schema with the store prefix
@@ -327,12 +344,15 @@ def _generate_model_endpoint(
327
344
 
328
345
  :return `mlrun.common.schemas.ModelEndpoint` object.
329
346
  """
347
+
330
348
  current_time = datetime_now()
331
349
  model_endpoint = mlrun.common.schemas.ModelEndpoint(
332
350
  metadata=mlrun.common.schemas.ModelEndpointMetadata(
333
351
  project=project,
334
352
  name=model_endpoint_name,
335
353
  endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.BATCH_EP,
354
+ # Due to backwards compatibility, this endpoint will be created as a legacy batch endpoint.
355
+ mode=mlrun.common.schemas.model_monitoring.EndpointMode.BATCH_LEGACY,
336
356
  ),
337
357
  spec=mlrun.common.schemas.ModelEndpointSpec(
338
358
  function_name=function_name or "function",
@@ -529,8 +549,9 @@ def _create_model_monitoring_function_base(
529
549
  name: typing.Optional[str] = None,
530
550
  image: typing.Optional[str] = None,
531
551
  tag: typing.Optional[str] = None,
532
- requirements: typing.Union[str, list[str], None] = None,
552
+ requirements: typing.Union[list[str], None] = None,
533
553
  requirements_file: str = "",
554
+ local_path: typing.Optional[str] = None,
534
555
  **application_kwargs,
535
556
  ) -> mlrun.runtimes.ServingRuntime:
536
557
  """
@@ -538,12 +559,30 @@ def _create_model_monitoring_function_base(
538
559
  This function does not set the labels or mounts v3io.
539
560
  """
540
561
  if name in mm_constants._RESERVED_FUNCTION_NAMES:
541
- raise mlrun.errors.MLRunInvalidArgumentError(
562
+ raise mlrun.errors.MLRunValueError(
542
563
  "An application cannot have the following names: "
543
564
  f"{mm_constants._RESERVED_FUNCTION_NAMES}"
544
565
  )
566
+ _, has_valid_suffix, suffix = mlrun.utils.helpers.ensure_batch_job_suffix(name)
567
+ if name and not has_valid_suffix:
568
+ raise mlrun.errors.MLRunValueError(
569
+ f"Model monitoring application names cannot end with `{suffix}`"
570
+ )
545
571
  if func is None:
546
572
  func = ""
573
+ if check_if_hub_uri(func):
574
+ hub_module = mlrun.get_hub_module(url=func, local_path=local_path)
575
+ if hub_module.kind != HubModuleType.monitoring_app:
576
+ raise mlrun.errors.MLRunInvalidArgumentError(
577
+ "The provided module is not a monitoring application"
578
+ )
579
+ requirements = mlrun.model.ImageBuilder.resolve_requirements(
580
+ requirements, requirements_file
581
+ )
582
+ requirements = merge_requirements(
583
+ reqs_priority=requirements, reqs_secondary=hub_module.requirements
584
+ )
585
+ func = hub_module.get_module_file_path()
547
586
  func_obj = typing.cast(
548
587
  mlrun.runtimes.ServingRuntime,
549
588
  mlrun.code_to_function(
@@ -12,6 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from .base import ModelMonitoringApplicationBase
15
+ from .base import ExistingDataHandling, ModelMonitoringApplicationBase
16
16
  from .context import MonitoringApplicationContext
17
17
  from .results import ModelMonitoringApplicationMetric, ModelMonitoringApplicationResult