mlrun 1.10.0rc38__py3-none-any.whl → 1.10.0rc41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (32) hide show
  1. mlrun/artifacts/document.py +6 -1
  2. mlrun/common/constants.py +6 -0
  3. mlrun/common/model_monitoring/helpers.py +1 -1
  4. mlrun/common/schemas/model_monitoring/constants.py +0 -2
  5. mlrun/common/secrets.py +22 -1
  6. mlrun/launcher/local.py +2 -0
  7. mlrun/model.py +7 -1
  8. mlrun/model_monitoring/api.py +3 -2
  9. mlrun/model_monitoring/applications/base.py +6 -3
  10. mlrun/model_monitoring/applications/context.py +1 -0
  11. mlrun/model_monitoring/db/tsdb/base.py +2 -4
  12. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +8 -9
  13. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +154 -76
  14. mlrun/projects/project.py +15 -2
  15. mlrun/run.py +7 -0
  16. mlrun/runtimes/__init__.py +18 -0
  17. mlrun/runtimes/base.py +3 -0
  18. mlrun/runtimes/local.py +5 -2
  19. mlrun/runtimes/mounts.py +5 -0
  20. mlrun/runtimes/nuclio/application/application.py +2 -0
  21. mlrun/runtimes/nuclio/function.py +2 -0
  22. mlrun/runtimes/nuclio/serving.py +67 -4
  23. mlrun/runtimes/pod.py +59 -10
  24. mlrun/serving/states.py +45 -21
  25. mlrun/utils/helpers.py +77 -2
  26. mlrun/utils/version/version.json +2 -2
  27. {mlrun-1.10.0rc38.dist-info → mlrun-1.10.0rc41.dist-info}/METADATA +3 -3
  28. {mlrun-1.10.0rc38.dist-info → mlrun-1.10.0rc41.dist-info}/RECORD +32 -32
  29. {mlrun-1.10.0rc38.dist-info → mlrun-1.10.0rc41.dist-info}/WHEEL +0 -0
  30. {mlrun-1.10.0rc38.dist-info → mlrun-1.10.0rc41.dist-info}/entry_points.txt +0 -0
  31. {mlrun-1.10.0rc38.dist-info → mlrun-1.10.0rc41.dist-info}/licenses/LICENSE +0 -0
  32. {mlrun-1.10.0rc38.dist-info → mlrun-1.10.0rc41.dist-info}/top_level.txt +0 -0
mlrun/runtimes/local.py CHANGED
@@ -29,12 +29,12 @@ from os import environ, remove
29
29
  from pathlib import Path
30
30
  from subprocess import PIPE, Popen
31
31
  from sys import executable
32
+ from typing import Optional
32
33
 
33
34
  from nuclio import Event
34
35
 
35
36
  import mlrun
36
37
  import mlrun.common.constants as mlrun_constants
37
- import mlrun.common.runtimes.constants
38
38
  from mlrun.lists import RunList
39
39
 
40
40
  from ..errors import err_to_str
@@ -201,9 +201,12 @@ class LocalRuntime(BaseRuntime, ParallelRunner):
201
201
  kind = "local"
202
202
  _is_remote = False
203
203
 
204
- def to_job(self, image=""):
204
+ def to_job(self, image="", func_name: Optional[str] = None):
205
205
  struct = self.to_dict()
206
206
  obj = KubejobRuntime.from_dict(struct)
207
+ obj.kind = "job" # Ensure kind is set to 'job' for KubejobRuntime
208
+ if func_name:
209
+ obj.metadata.name = func_name
207
210
  if image:
208
211
  obj.spec.image = image
209
212
  return obj
mlrun/runtimes/mounts.py CHANGED
@@ -17,6 +17,8 @@ import typing
17
17
  import warnings
18
18
  from collections import namedtuple
19
19
 
20
+ import mlrun.common.secrets
21
+ import mlrun.errors
20
22
  from mlrun.config import config
21
23
  from mlrun.config import config as mlconf
22
24
  from mlrun.errors import MLRunInvalidArgumentError
@@ -412,6 +414,9 @@ def mount_secret(
412
414
  the specified paths, and unlisted keys will not be
413
415
  present."""
414
416
 
417
+ if secret_name:
418
+ mlrun.common.secrets.validate_not_forbidden_secret(secret_name.strip())
419
+
415
420
  def _mount_secret(runtime: "KubeResource"):
416
421
  # Define the secret volume source
417
422
  secret_volume_source = {
@@ -400,6 +400,8 @@ class ApplicationRuntime(RemoteRuntime):
400
400
 
401
401
  :return: The default API gateway URL if created or True if the function is ready (deployed)
402
402
  """
403
+ mlrun.utils.helpers.validate_function_name(self.metadata.name)
404
+
403
405
  if (self.requires_build() and not self.spec.image) or force_build:
404
406
  self._fill_credentials()
405
407
  self._build_application_image(
@@ -655,6 +655,8 @@ class RemoteRuntime(KubeResource):
655
655
  if tag:
656
656
  self.metadata.tag = tag
657
657
 
658
+ mlrun.utils.helpers.validate_function_name(self.metadata.name)
659
+
658
660
  # Attempt auto-mounting, before sending to remote build
659
661
  self.try_auto_mount_based_on_config()
660
662
  self._fill_credentials()
@@ -23,6 +23,7 @@ from nuclio import KafkaTrigger
23
23
 
24
24
  import mlrun
25
25
  import mlrun.common.schemas as schemas
26
+ import mlrun.common.secrets
26
27
  import mlrun.datastore.datastore_profile as ds_profile
27
28
  from mlrun.datastore import get_kafka_brokers_from_dict, parse_kafka_url
28
29
  from mlrun.model import ObjectList
@@ -635,7 +636,12 @@ class ServingRuntime(RemoteRuntime):
635
636
 
636
637
  :returns: The Runtime (function) object
637
638
  """
638
-
639
+ if kind == "azure_vault" and isinstance(source, dict):
640
+ candidate_secret_name = (source.get("k8s_secret") or "").strip()
641
+ if candidate_secret_name:
642
+ mlrun.common.secrets.validate_not_forbidden_secret(
643
+ candidate_secret_name
644
+ )
639
645
  if kind == "vault" and isinstance(source, list):
640
646
  source = {"project": self.metadata.project, "secrets": source}
641
647
 
@@ -659,6 +665,9 @@ class ServingRuntime(RemoteRuntime):
659
665
  :param builder_env: env vars dict for source archive config/credentials e.g. builder_env={"GIT_TOKEN": token}
660
666
  :param force_build: set True for force building the image
661
667
  """
668
+ # Validate function name before deploying to k8s
669
+ mlrun.utils.helpers.validate_function_name(self.metadata.name)
670
+
662
671
  load_mode = self.spec.load_mode
663
672
  if load_mode and load_mode not in ["sync", "async"]:
664
673
  raise ValueError(f"illegal model loading mode {load_mode}")
@@ -855,8 +864,20 @@ class ServingRuntime(RemoteRuntime):
855
864
  )
856
865
  self._mock_server = self.to_mock_server()
857
866
 
858
- def to_job(self) -> KubejobRuntime:
859
- """Convert this ServingRuntime to a KubejobRuntime, so that the graph can be run as a standalone job."""
867
+ def to_job(self, func_name: Optional[str] = None) -> KubejobRuntime:
868
+ """Convert this ServingRuntime to a KubejobRuntime, so that the graph can be run as a standalone job.
869
+
870
+ Args:
871
+ func_name: Optional custom name for the job function. If not provided, automatically
872
+ appends '-batch' suffix to the serving function name to prevent database collision.
873
+
874
+ Returns:
875
+ KubejobRuntime configured to execute the serving graph as a batch job.
876
+
877
+ Note:
878
+ The job will have a different name than the serving function to prevent database collision.
879
+ The original serving function remains unchanged and can still be invoked after running the job.
880
+ """
860
881
  if self.spec.function_refs:
861
882
  raise mlrun.errors.MLRunInvalidArgumentError(
862
883
  f"Cannot convert function '{self.metadata.name}' to a job because it has child functions"
@@ -890,8 +911,50 @@ class ServingRuntime(RemoteRuntime):
890
911
  parameters=self.spec.parameters,
891
912
  graph=self.spec.graph,
892
913
  )
914
+
915
+ job_metadata = deepcopy(self.metadata)
916
+ original_name = job_metadata.name
917
+
918
+ if func_name:
919
+ # User provided explicit job name
920
+ job_metadata.name = func_name
921
+ logger.debug(
922
+ "Creating job from serving function with custom name",
923
+ new_name=func_name,
924
+ )
925
+ else:
926
+ job_metadata.name, was_renamed, suffix = (
927
+ mlrun.utils.helpers.ensure_batch_job_suffix(job_metadata.name)
928
+ )
929
+
930
+ # Check if the resulting name exceeds Kubernetes length limit
931
+ if (
932
+ len(job_metadata.name)
933
+ > mlrun.common.constants.K8S_DNS_1123_LABEL_MAX_LENGTH
934
+ ):
935
+ raise mlrun.errors.MLRunInvalidArgumentError(
936
+ f"Cannot convert serving function '{original_name}' to batch job: "
937
+ f"the resulting name '{job_metadata.name}' ({len(job_metadata.name)} characters) "
938
+ f"exceeds Kubernetes limit of {mlrun.common.constants.K8S_DNS_1123_LABEL_MAX_LENGTH} characters. "
939
+ f"Please provide a custom name via the func_name parameter, "
940
+ f"with at most {mlrun.common.constants.K8S_DNS_1123_LABEL_MAX_LENGTH} characters."
941
+ )
942
+
943
+ if was_renamed:
944
+ logger.info(
945
+ "Creating job from serving function (auto-appended suffix to prevent collision)",
946
+ new_name=job_metadata.name,
947
+ suffix=suffix,
948
+ )
949
+ else:
950
+ logger.debug(
951
+ "Creating job from serving function (name already has suffix)",
952
+ name=original_name,
953
+ suffix=suffix,
954
+ )
955
+
893
956
  job = KubejobRuntime(
894
957
  spec=spec,
895
- metadata=self.metadata,
958
+ metadata=job_metadata,
896
959
  )
897
960
  return job
mlrun/runtimes/pod.py CHANGED
@@ -20,12 +20,14 @@ import typing
20
20
  import warnings
21
21
  from collections.abc import Iterable
22
22
  from enum import Enum
23
+ from typing import Optional
23
24
 
24
25
  import dotenv
25
26
  import kubernetes.client as k8s_client
26
27
  from kubernetes.client import V1Volume, V1VolumeMount
27
28
 
28
29
  import mlrun.common.constants
30
+ import mlrun.common.secrets
29
31
  import mlrun.errors
30
32
  import mlrun.runtimes.mounts
31
33
  import mlrun.utils.regex
@@ -708,19 +710,45 @@ class KubeResource(BaseRuntime):
708
710
  def spec(self, spec):
709
711
  self._spec = self._verify_dict(spec, "spec", KubeResourceSpec)
710
712
 
711
- def set_env_from_secret(self, name, secret=None, secret_key=None):
712
- """set pod environment var from secret"""
713
- secret_key = secret_key or name
713
+ def set_env_from_secret(
714
+ self,
715
+ name: str,
716
+ secret: Optional[str] = None,
717
+ secret_key: Optional[str] = None,
718
+ ):
719
+ """
720
+ Set an environment variable from a Kubernetes Secret.
721
+ Client-side guard forbids MLRun internal auth/project secrets; no-op on API.
722
+ """
723
+ mlrun.common.secrets.validate_not_forbidden_secret(secret)
724
+ key = secret_key or name
714
725
  value_from = k8s_client.V1EnvVarSource(
715
- secret_key_ref=k8s_client.V1SecretKeySelector(name=secret, key=secret_key)
726
+ secret_key_ref=k8s_client.V1SecretKeySelector(name=secret, key=key)
716
727
  )
717
- return self._set_env(name, value_from=value_from)
728
+ return self._set_env(name=name, value_from=value_from)
718
729
 
719
- def set_env(self, name, value=None, value_from=None):
720
- """set pod environment var from value"""
721
- if value is not None:
722
- return self._set_env(name, value=str(value))
723
- return self._set_env(name, value_from=value_from)
730
+ def set_env(
731
+ self,
732
+ name: str,
733
+ value: Optional[str] = None,
734
+ value_from: Optional[typing.Any] = None,
735
+ ):
736
+ """
737
+ Set an environment variable.
738
+ If value comes from a Secret, validate on client-side only.
739
+ """
740
+ if value_from is not None:
741
+ secret_name = self._extract_secret_name_from_value_from(
742
+ value_from=value_from
743
+ )
744
+ if secret_name:
745
+ mlrun.common.secrets.validate_not_forbidden_secret(secret_name)
746
+ return self._set_env(name=name, value_from=value_from)
747
+
748
+ # Plain literal value path
749
+ return self._set_env(
750
+ name=name, value=(str(value) if value is not None else None)
751
+ )
724
752
 
725
753
  def with_annotations(self, annotations: dict):
726
754
  """set a key/value annotations in the metadata of the pod"""
@@ -1366,6 +1394,27 @@ class KubeResource(BaseRuntime):
1366
1394
 
1367
1395
  return self.status.state
1368
1396
 
1397
+ @staticmethod
1398
+ def _extract_secret_name_from_value_from(
1399
+ value_from: typing.Any,
1400
+ ) -> Optional[str]:
1401
+ """Extract secret name from a V1EnvVarSource or dict representation."""
1402
+ if isinstance(value_from, k8s_client.V1EnvVarSource):
1403
+ if value_from.secret_key_ref:
1404
+ return value_from.secret_key_ref.name
1405
+ elif isinstance(value_from, dict):
1406
+ value_from = (
1407
+ value_from.get("valueFrom")
1408
+ or value_from.get("value_from")
1409
+ or value_from
1410
+ )
1411
+ secret_key_ref = (value_from or {}).get("secretKeyRef") or (
1412
+ value_from or {}
1413
+ ).get("secret_key_ref")
1414
+ if isinstance(secret_key_ref, dict):
1415
+ return secret_key_ref.get("name")
1416
+ return None
1417
+
1369
1418
 
1370
1419
  def _resolve_if_type_sanitized(attribute_name, attribute):
1371
1420
  attribute_config = sanitized_attributes[attribute_name]
mlrun/serving/states.py CHANGED
@@ -591,15 +591,14 @@ class BaseStep(ModelObj):
591
591
  root.get_shared_model_by_artifact_uri(model_artifact_uri)
592
592
  )
593
593
 
594
- if not shared_runnable_name:
595
- if not actual_shared_name:
596
- raise GraphError(
597
- f"Can't find shared model for {name} model endpoint"
598
- )
599
- else:
600
- step.class_args[schemas.ModelRunnerStepData.MODELS][name][
601
- schemas.ModelsData.MODEL_PARAMETERS.value
602
- ]["shared_runnable_name"] = actual_shared_name
594
+ if not actual_shared_name:
595
+ raise GraphError(
596
+ f"Can't find shared model named {shared_runnable_name}"
597
+ )
598
+ elif not shared_runnable_name:
599
+ step.class_args[schemas.ModelRunnerStepData.MODELS][name][
600
+ schemas.ModelsData.MODEL_PARAMETERS.value
601
+ ]["shared_runnable_name"] = actual_shared_name
603
602
  elif actual_shared_name != shared_runnable_name:
604
603
  raise GraphError(
605
604
  f"Model endpoint {name} shared runnable name mismatch: "
@@ -1664,6 +1663,8 @@ class ModelRunnerStep(MonitoredStep):
1664
1663
 
1665
1664
  Note ModelRunnerStep can only be added to a graph that has the flow topology and running with async engine.
1666
1665
 
1666
+ Note see config_pool_resource method documentation for default number of max threads and max processes.
1667
+
1667
1668
  :param model_selector: ModelSelector instance whose select() method will be used to select models to run on each
1668
1669
  event. Optional. If not passed, all models will be run.
1669
1670
  :param raise_exception: If True, an error will be raised when model selection fails or if one of the models raised
@@ -1676,7 +1677,12 @@ class ModelRunnerStep(MonitoredStep):
1676
1677
  """
1677
1678
 
1678
1679
  kind = "model_runner"
1679
- _dict_fields = MonitoredStep._dict_fields + ["_shared_proxy_mapping"]
1680
+ _dict_fields = MonitoredStep._dict_fields + [
1681
+ "_shared_proxy_mapping",
1682
+ "max_processes",
1683
+ "max_threads",
1684
+ "pool_factor",
1685
+ ]
1680
1686
 
1681
1687
  def __init__(
1682
1688
  self,
@@ -1687,6 +1693,10 @@ class ModelRunnerStep(MonitoredStep):
1687
1693
  raise_exception: bool = True,
1688
1694
  **kwargs,
1689
1695
  ):
1696
+ self.max_processes = None
1697
+ self.max_threads = None
1698
+ self.pool_factor = None
1699
+
1690
1700
  if isinstance(model_selector, ModelSelector) and model_selector_parameters:
1691
1701
  raise mlrun.errors.MLRunInvalidArgumentError(
1692
1702
  "Cannot provide a model_selector object as argument to `model_selector` and also provide "
@@ -1748,6 +1758,7 @@ class ModelRunnerStep(MonitoredStep):
1748
1758
  2. Create a new model endpoint with the same name and set it to `latest`.
1749
1759
 
1750
1760
  :param override: bool allow override existing model on the current ModelRunnerStep.
1761
+ :raise GraphError: when the shared model is not found in the root flow step shared models.
1751
1762
  """
1752
1763
  model_class, model_params = (
1753
1764
  "mlrun.serving.Model",
@@ -1865,14 +1876,6 @@ class ModelRunnerStep(MonitoredStep):
1865
1876
  otherwise block the main event loop thread.
1866
1877
  * "asyncio" – To run in an asyncio task. This is appropriate for I/O tasks that use asyncio, allowing the
1867
1878
  event loop to continue running while waiting for a response.
1868
- * "shared_executor" – Reuses an external executor (typically managed by the flow or context) to execute the
1869
- runnable. Should be used only if you have multiply `ParallelExecution` in the same flow and especially
1870
- useful when:
1871
- - You want to share a heavy resource like a large model loaded onto a GPU.
1872
- - You want to centralize task scheduling or coordination for multiple lightweight tasks.
1873
- - You aim to minimize overhead from creating new executors or processes/threads per runnable.
1874
- The runnable is expected to be pre-initialized and reused across events, enabling efficient use of
1875
- memory and hardware accelerators.
1876
1879
  * "naive" – To run in the main event loop. This is appropriate only for trivial computation and/or file I/O.
1877
1880
  It means that the runnable will not actually be run in parallel to anything else.
1878
1881
 
@@ -2093,6 +2096,24 @@ class ModelRunnerStep(MonitoredStep):
2093
2096
  "Monitoring data must be a dictionary."
2094
2097
  )
2095
2098
 
2099
+ def configure_pool_resource(
2100
+ self,
2101
+ max_processes: Optional[int] = None,
2102
+ max_threads: Optional[int] = None,
2103
+ pool_factor: Optional[int] = None,
2104
+ ) -> None:
2105
+ """
2106
+ Configure the resource limits for the shared models in the graph.
2107
+
2108
+ :param max_processes: Maximum number of processes to spawn (excluding dedicated processes).
2109
+ Defaults to the number of CPUs or 16 if undetectable.
2110
+ :param max_threads: Maximum number of threads to spawn. Defaults to 32.
2111
+ :param pool_factor: Multiplier to scale the number of process/thread workers per runnable. Defaults to 1.
2112
+ """
2113
+ self.max_processes = max_processes
2114
+ self.max_threads = max_threads
2115
+ self.pool_factor = pool_factor
2116
+
2096
2117
  def init_object(self, context, namespace, mode="sync", reset=False, **extra_kwargs):
2097
2118
  self.context = context
2098
2119
  if not self._is_local_function(context):
@@ -2141,6 +2162,9 @@ class ModelRunnerStep(MonitoredStep):
2141
2162
  shared_proxy_mapping=self._shared_proxy_mapping or None,
2142
2163
  name=self.name,
2143
2164
  context=context,
2165
+ max_processes=self.max_processes,
2166
+ max_threads=self.max_threads,
2167
+ pool_factor=self.pool_factor,
2144
2168
  )
2145
2169
 
2146
2170
 
@@ -2983,7 +3007,7 @@ class RootFlowStep(FlowStep):
2983
3007
 
2984
3008
  def get_shared_model_by_artifact_uri(
2985
3009
  self, artifact_uri: str
2986
- ) -> Optional[tuple[str, str, dict]]:
3010
+ ) -> Union[tuple[str, str, dict], tuple[None, None, None]]:
2987
3011
  """
2988
3012
  Get a shared model by its artifact URI.
2989
3013
  :param artifact_uri: The artifact URI of the model.
@@ -2992,9 +3016,9 @@ class RootFlowStep(FlowStep):
2992
3016
  for model_name, (model_class, model_params) in self.shared_models.items():
2993
3017
  if model_params.get("artifact_uri") == artifact_uri:
2994
3018
  return model_name, model_class, model_params
2995
- return None
3019
+ return None, None, None
2996
3020
 
2997
- def config_pool_resource(
3021
+ def configure_shared_pool_resource(
2998
3022
  self,
2999
3023
  max_processes: Optional[int] = None,
3000
3024
  max_threads: Optional[int] = None,
mlrun/utils/helpers.py CHANGED
@@ -253,6 +253,40 @@ def verify_field_regex(
253
253
  return False
254
254
 
255
255
 
256
+ def validate_function_name(name: str) -> None:
257
+ """
258
+ Validate that a function name conforms to Kubernetes DNS-1123 label requirements.
259
+
260
+ Function names for Kubernetes resources must:
261
+ - Be lowercase alphanumeric characters or '-'
262
+ - Start and end with an alphanumeric character
263
+ - Be at most 63 characters long
264
+
265
+ This validation should be called AFTER normalize_name() has been applied.
266
+
267
+ Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
268
+
269
+ :param name: The function name to validate (after normalization)
270
+ :raises MLRunInvalidArgumentError: If the function name is invalid for Kubernetes
271
+ """
272
+ if not name:
273
+ return
274
+
275
+ verify_field_regex(
276
+ "function.metadata.name",
277
+ name,
278
+ mlrun.utils.regex.dns_1123_label,
279
+ raise_on_failure=True,
280
+ log_message=(
281
+ f"Function name '{name}' is invalid. "
282
+ "Kubernetes function names must be DNS-1123 labels: "
283
+ "lowercase alphanumeric characters or '-', "
284
+ "starting and ending with an alphanumeric character, "
285
+ "and at most 63 characters long."
286
+ ),
287
+ )
288
+
289
+
256
290
  def validate_builder_source(
257
291
  source: str, pull_at_runtime: bool = False, workdir: Optional[str] = None
258
292
  ):
@@ -476,6 +510,40 @@ def normalize_name(name: str):
476
510
  return name.lower()
477
511
 
478
512
 
513
+ def ensure_batch_job_suffix(
514
+ function_name: typing.Optional[str],
515
+ ) -> tuple[typing.Optional[str], bool, str]:
516
+ """
517
+ Ensure that a function name has the batch job suffix appended to prevent database collision.
518
+
519
+ This helper is used by to_job() methods in runtimes that convert online functions (serving, local)
520
+ to batch processing jobs. The suffix prevents the job from overwriting the original function in
521
+ the database when both are stored with the same (project, name) key.
522
+
523
+ :param function_name: The original function name (can be None or empty string)
524
+
525
+ :return: A tuple of (modified_name, was_renamed, suffix) where:
526
+ - modified_name: The function name with the batch suffix (if not already present),
527
+ or empty string if input was empty
528
+ - was_renamed: True if the suffix was added, False if it was already present or if name was empty/None
529
+ - suffix: The suffix value that was used (or would have been used)
530
+
531
+ """
532
+ suffix = mlrun_constants.RESERVED_BATCH_JOB_SUFFIX
533
+
534
+ # Handle None or empty string
535
+ if not function_name:
536
+ return function_name, False, suffix
537
+
538
+ if not function_name.endswith(suffix):
539
+ return (
540
+ f"{function_name}{suffix}",
541
+ True,
542
+ suffix,
543
+ )
544
+ return function_name, False, suffix
545
+
546
+
479
547
  class LogBatchWriter:
480
548
  def __init__(self, func, batch=16, maxtime=5):
481
549
  self.batch = batch
@@ -970,8 +1038,15 @@ def enrich_image_url(
970
1038
  else:
971
1039
  image_url = "mlrun/mlrun"
972
1040
 
973
- if is_mlrun_image and tag and ":" not in image_url:
974
- image_url = f"{image_url}:{tag}"
1041
+ if is_mlrun_image and tag:
1042
+ if ":" not in image_url:
1043
+ image_url = f"{image_url}:{tag}"
1044
+ elif enrich_kfp_python_version:
1045
+ # For mlrun-kfp >= 1.10.0-rc0, append python suffix to existing tag
1046
+ python_suffix = resolve_image_tag_suffix(
1047
+ mlrun_version, client_python_version
1048
+ )
1049
+ image_url = f"{image_url}{python_suffix}" if python_suffix else image_url
975
1050
 
976
1051
  registry = (
977
1052
  config.images_registry if is_mlrun_image else config.vendor_images_registry
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "cc5c5639d721f37d6a1d0d0b7cf9f853f38e4707",
3
- "version": "1.10.0-rc38"
2
+ "git_commit": "09d6e7ada4324bf80961e0d54f9fd9857852fe53",
3
+ "version": "1.10.0-rc41"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.10.0rc38
3
+ Version: 1.10.0rc41
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -113,7 +113,7 @@ Requires-Dist: apscheduler<4,>=3.11; extra == "api"
113
113
  Requires-Dist: objgraph~=3.6; extra == "api"
114
114
  Requires-Dist: igz-mgmt~=0.4.1; extra == "api"
115
115
  Requires-Dist: humanfriendly~=10.0; extra == "api"
116
- Requires-Dist: fastapi~=0.116.0; extra == "api"
116
+ Requires-Dist: fastapi~=0.120.0; extra == "api"
117
117
  Requires-Dist: sqlalchemy~=2.0; extra == "api"
118
118
  Requires-Dist: sqlalchemy-utils~=0.41.2; extra == "api"
119
119
  Requires-Dist: pymysql~=1.1; extra == "api"
@@ -203,7 +203,7 @@ Requires-Dist: dask~=2023.12.1; python_version < "3.11" and extra == "complete-a
203
203
  Requires-Dist: databricks-sdk~=0.20.0; extra == "complete-api"
204
204
  Requires-Dist: distributed==2024.8; python_version >= "3.11" and extra == "complete-api"
205
205
  Requires-Dist: distributed~=2023.12.1; python_version < "3.11" and extra == "complete-api"
206
- Requires-Dist: fastapi~=0.116.0; extra == "complete-api"
206
+ Requires-Dist: fastapi~=0.120.0; extra == "complete-api"
207
207
  Requires-Dist: gcsfs<=2025.7.0,>=2025.5.1; extra == "complete-api"
208
208
  Requires-Dist: google-cloud-bigquery-storage~=2.17; extra == "complete-api"
209
209
  Requires-Dist: google-cloud-bigquery[bqstorage,pandas]==3.14.1; extra == "complete-api"