mlrun 1.10.0rc22__py3-none-any.whl → 1.10.0rc23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -54,12 +54,21 @@ class FunctionSummary(BaseModel):
54
54
 
55
55
  return cls(
56
56
  type=func_type,
57
- name=func_dict["metadata"]["name"],
57
+ name=func_dict["metadata"]["name"]
58
+ if func_type != FunctionsType.APPLICATION
59
+ else func_dict["spec"]
60
+ .get("graph", {})
61
+ .get("steps", {})
62
+ .get("PrepareMonitoringEvent", {})
63
+ .get("class_args", {})
64
+ .get("application_name"),
58
65
  application_class=""
59
66
  if func_type != FunctionsType.APPLICATION
60
- else func_dict["spec"]["graph"]["steps"]["PushToMonitoringWriter"]["after"][
61
- 0
62
- ],
67
+ else func_dict["spec"]
68
+ .get("graph", {})
69
+ .get("steps", {})
70
+ .get("PushToMonitoringWriter", {})
71
+ .get("after", [None])[0],
63
72
  project_name=func_dict["metadata"]["project"],
64
73
  updated_time=func_dict["metadata"].get("updated"),
65
74
  status=func_dict["status"].get("state"),
@@ -39,6 +39,7 @@ from .base import DataItem, DataStore, HttpStore
39
39
  from .filestore import FileStore
40
40
  from .inmem import InMemoryStore
41
41
  from .model_provider.huggingface_provider import HuggingFaceProvider
42
+ from .model_provider.mock_model_provider import MockModelProvider
42
43
  from .model_provider.openai_provider import OpenAIProvider
43
44
  from .store_resources import get_store_resource, is_store_uri
44
45
  from .v3io import V3ioStore
@@ -103,7 +104,11 @@ def schema_to_store(schema) -> DataStore.__subclasses__():
103
104
  def schema_to_model_provider(
104
105
  schema: str, raise_missing_schema_exception=True
105
106
  ) -> type[ModelProvider]:
106
- schema_dict = {"openai": OpenAIProvider, "huggingface": HuggingFaceProvider}
107
+ schema_dict = {
108
+ "openai": OpenAIProvider,
109
+ "huggingface": HuggingFaceProvider,
110
+ "mock": MockModelProvider,
111
+ }
107
112
  provider_class = schema_dict.get(schema, None)
108
113
  if not provider_class:
109
114
  if raise_missing_schema_exception:
@@ -0,0 +1,87 @@
1
+ # Copyright 2023 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, Optional, Union
16
+
17
+ import mlrun
18
+ from mlrun.datastore.model_provider.model_provider import (
19
+ InvokeResponseFormat,
20
+ ModelProvider,
21
+ UsageResponseKeys,
22
+ )
23
+
24
+
25
+ class MockModelProvider(ModelProvider):
26
+ support_async = False
27
+
28
+ def __init__(
29
+ self,
30
+ parent,
31
+ kind,
32
+ name,
33
+ endpoint="",
34
+ secrets: Optional[dict] = None,
35
+ default_invoke_kwargs: Optional[dict] = None,
36
+ ):
37
+ super().__init__(
38
+ parent=parent, name=name, kind=kind, endpoint=endpoint, secrets=secrets
39
+ )
40
+ self.default_invoke_kwargs = default_invoke_kwargs or {}
41
+ self._client = None
42
+ self._async_client = None
43
+
44
+ @staticmethod
45
+ def _extract_string_output(response: Any) -> str:
46
+ """
47
+ Extracts string response from response object
48
+ """
49
+ pass
50
+
51
+ def load_client(self) -> None:
52
+ """
53
+ Initializes the SDK client for the model provider with the given keyword arguments
54
+ and assigns it to an instance attribute (e.g., self._client).
55
+
56
+ Subclasses should override this method to:
57
+ - Create and configure the provider-specific client instance.
58
+ - Assign the client instance to self._client.
59
+ """
60
+
61
+ pass
62
+
63
+ def invoke(
64
+ self,
65
+ messages: Union[list[dict], Any],
66
+ invoke_response_format: InvokeResponseFormat = InvokeResponseFormat.FULL,
67
+ **invoke_kwargs,
68
+ ) -> Union[str, dict[str, Any], Any]:
69
+ if invoke_response_format == InvokeResponseFormat.STRING:
70
+ return (
71
+ "You are using a mock model provider, no actual inference is performed."
72
+ )
73
+ elif invoke_response_format == InvokeResponseFormat.FULL:
74
+ return {
75
+ UsageResponseKeys.USAGE: {"prompt_tokens": 0, "completion_tokens": 0},
76
+ UsageResponseKeys.ANSWER: "You are using a mock model provider, no actual inference is performed.",
77
+ "extra": {},
78
+ }
79
+ elif invoke_response_format == InvokeResponseFormat.USAGE:
80
+ return {
81
+ UsageResponseKeys.ANSWER: "You are using a mock model provider, no actual inference is performed.",
82
+ UsageResponseKeys.USAGE: {"prompt_tokens": 0, "completion_tokens": 0},
83
+ }
84
+ else:
85
+ raise mlrun.errors.MLRunInvalidArgumentError(
86
+ f"Unsupported invoke response format: {invoke_response_format}"
87
+ )
mlrun/k8s_utils.py CHANGED
@@ -26,6 +26,10 @@ from .config import config as mlconfig
26
26
 
27
27
  _running_inside_kubernetes_cluster = None
28
28
 
29
+ K8sObj = typing.Union[kubernetes.client.V1Affinity, kubernetes.client.V1Toleration]
30
+ SanitizedK8sObj = dict[str, typing.Any]
31
+ K8sObjList = typing.Union[list[K8sObj], list[SanitizedK8sObj]]
32
+
29
33
 
30
34
  def is_running_inside_kubernetes_cluster():
31
35
  global _running_inside_kubernetes_cluster
@@ -232,6 +236,54 @@ def validate_node_selectors(
232
236
  return True
233
237
 
234
238
 
239
+ def sanitize_k8s_objects(
240
+ k8s_objects: typing.Union[None, K8sObjList, SanitizedK8sObj, K8sObj],
241
+ ) -> typing.Union[list[SanitizedK8sObj], SanitizedK8sObj]:
242
+ """Convert K8s objects to dicts. Handles single objects or lists."""
243
+ api_client = kubernetes.client.ApiClient()
244
+ if not k8s_objects:
245
+ return k8s_objects
246
+
247
+ def _sanitize_k8s_object(k8s_obj):
248
+ return (
249
+ api_client.sanitize_for_serialization(k8s_obj)
250
+ if hasattr(k8s_obj, "to_dict")
251
+ else k8s_obj
252
+ )
253
+
254
+ return (
255
+ [_sanitize_k8s_object(k8s_obj) for k8s_obj in k8s_objects]
256
+ if isinstance(k8s_objects, list)
257
+ else _sanitize_k8s_object(k8s_objects)
258
+ )
259
+
260
+
261
+ def sanitize_scheduling_configuration(
262
+ tolerations: typing.Optional[list[kubernetes.client.V1Toleration]] = None,
263
+ affinity: typing.Optional[kubernetes.client.V1Affinity] = None,
264
+ ) -> tuple[
265
+ typing.Optional[list[dict]],
266
+ typing.Optional[dict],
267
+ ]:
268
+ """
269
+ Sanitizes pod scheduling configuration for serialization.
270
+
271
+ Takes affinity and tolerations and converts them to
272
+ JSON-serializable dictionaries using the Kubernetes API client's
273
+ sanitization method.
274
+
275
+ Args:
276
+ affinity: Pod affinity/anti-affinity rules
277
+ tolerations: List of toleration rules
278
+
279
+ Returns:
280
+ Tuple of (sanitized_affinity, sanitized_tolerations)
281
+ - affinity: Sanitized dict representation or None
282
+ - tolerations: List of sanitized dict representations or None
283
+ """
284
+ return sanitize_k8s_objects(tolerations), sanitize_k8s_objects(affinity)
285
+
286
+
235
287
  def enrich_preemption_mode(
236
288
  preemption_mode: typing.Optional[str],
237
289
  node_selector: dict[str, str],
@@ -269,8 +321,8 @@ def enrich_preemption_mode(
269
321
  )
270
322
 
271
323
  enriched_node_selector = copy.deepcopy(node_selector or {})
272
- enriched_tolerations = copy.deepcopy(tolerations or [])
273
- enriched_affinity = copy.deepcopy(affinity)
324
+ enriched_tolerations = _safe_copy_tolerations(tolerations or [])
325
+ enriched_affinity = _safe_copy_affinity(affinity)
274
326
  preemptible_tolerations = generate_preemptible_tolerations()
275
327
 
276
328
  if handler := _get_mode_handler(preemption_mode):
@@ -288,6 +340,57 @@ def enrich_preemption_mode(
288
340
  )
289
341
 
290
342
 
343
+ def _safe_copy_tolerations(
344
+ tolerations: list[kubernetes.client.V1Toleration],
345
+ ) -> list[kubernetes.client.V1Toleration]:
346
+ """
347
+ Safely copy a list of V1Toleration objects without mutating the originals.
348
+
349
+ Explicitly reconstructs V1Toleration objects instead of using deepcopy() to avoid
350
+ serialization errors with K8s client objects that contain threading primitives
351
+ and non-copyable elements like RLock objects.
352
+
353
+ Args:
354
+ tolerations: List of V1Toleration objects to copy
355
+
356
+ Returns:
357
+ New list containing copied V1Toleration objects with identical field values"""
358
+ return [
359
+ kubernetes.client.V1Toleration(
360
+ effect=toleration.effect,
361
+ key=toleration.key,
362
+ value=toleration.value,
363
+ operator=toleration.operator,
364
+ toleration_seconds=toleration.toleration_seconds,
365
+ )
366
+ for toleration in tolerations
367
+ ]
368
+
369
+
370
+ def _safe_copy_affinity(
371
+ affinity: kubernetes.client.V1Affinity,
372
+ ) -> kubernetes.client.V1Affinity:
373
+ """
374
+ Safely create a deep copy of a V1Affinity object.
375
+
376
+ Uses K8s API client serialization/deserialization instead of deepcopy() to avoid
377
+ errors with threading primitives and complex internal structures in K8s objects.
378
+ Serializes to dict then deserializes back to a clean V1Affinity object.
379
+
380
+ Args:
381
+ affinity: V1Affinity object to copy, or None
382
+
383
+ Returns:
384
+ New V1Affinity object with identical field values, or None if input was None
385
+ """
386
+ if not affinity:
387
+ return None
388
+ api_client = kubernetes.client.ApiClient()
389
+ # Convert to dict then back to object properly
390
+ affinity_dict = api_client.sanitize_for_serialization(affinity)
391
+ return api_client._ApiClient__deserialize(affinity_dict, "V1Affinity")
392
+
393
+
291
394
  def _get_mode_handler(mode: str):
292
395
  return {
293
396
  mlrun.common.schemas.PreemptionModes.prevent: _handle_prevent_mode,
@@ -859,7 +859,7 @@ class MonitoringApplicationController:
859
859
  for endpoint in endpoints:
860
860
  last_request = last_request_dict.get(endpoint.metadata.uid, None)
861
861
  if isinstance(last_request, float):
862
- last_request = pd.to_datetime(last_request, unit="s", utc=True)
862
+ last_request = pd.to_datetime(last_request, unit="ms", utc=True)
863
863
  endpoint.status.last_request = (
864
864
  last_request or endpoint.status.last_request
865
865
  )
mlrun/serving/server.py CHANGED
@@ -17,8 +17,10 @@ __all__ = ["GraphServer", "create_graph_server", "GraphContext", "MockEvent"]
17
17
  import asyncio
18
18
  import base64
19
19
  import copy
20
+ import importlib
20
21
  import json
21
22
  import os
23
+ import pathlib
22
24
  import socket
23
25
  import traceback
24
26
  import uuid
@@ -572,19 +574,34 @@ async def async_execute_graph(
572
574
  nest_under_inputs: bool,
573
575
  ) -> list[Any]:
574
576
  spec = mlrun.utils.get_serving_spec()
575
-
576
- namespace = {}
577
+ modname = None
577
578
  code = os.getenv("MLRUN_EXEC_CODE")
578
579
  if code:
579
580
  code = base64.b64decode(code).decode("utf-8")
580
- exec(code, namespace)
581
+ with open("user_code.py", "w") as fp:
582
+ fp.write(code)
583
+ modname = "user_code"
581
584
  else:
582
585
  # TODO: find another way to get the local file path, or ensure that MLRUN_EXEC_CODE
583
586
  # gets set in local flow and not just in the remote pod
584
- source_filename = spec.get("filename", None)
585
- if source_filename:
586
- with open(source_filename) as f:
587
- exec(f.read(), namespace)
587
+ source_file_path = spec.get("filename", None)
588
+ if source_file_path:
589
+ source_file_path_object = pathlib.Path(source_file_path).resolve()
590
+ current_dir_path_object = pathlib.Path(".").resolve()
591
+ if not source_file_path_object.is_relative_to(current_dir_path_object):
592
+ raise mlrun.errors.MLRunRuntimeError(
593
+ f"Source file path '{source_file_path}' is not under the current working directory "
594
+ f"(which is required when running with local=True)"
595
+ )
596
+ relative_path_to_source_file = source_file_path_object.relative_to(
597
+ current_dir_path_object
598
+ )
599
+ modname = ".".join(relative_path_to_source_file.with_suffix("").parts)
600
+
601
+ namespace = {}
602
+ if modname:
603
+ mod = importlib.import_module(modname)
604
+ namespace = mod.__dict__
588
605
 
589
606
  server = GraphServer.from_dict(spec)
590
607
 
mlrun/serving/states.py CHANGED
@@ -546,8 +546,8 @@ class BaseStep(ModelObj):
546
546
  # Update model endpoints names in the root step
547
547
  root.update_model_endpoints_names(step_model_endpoints_names)
548
548
 
549
- @staticmethod
550
549
  def _verify_shared_models(
550
+ self,
551
551
  root: "RootFlowStep",
552
552
  step: "ModelRunnerStep",
553
553
  step_model_endpoints_names: list[str],
@@ -576,15 +576,17 @@ class BaseStep(ModelObj):
576
576
  prefix, _ = mlrun.datastore.parse_store_uri(model_artifact_uri)
577
577
  # if the model artifact is a prompt, we need to get the model URI
578
578
  # to ensure that the shared runnable name is correct
579
+ llm_artifact_uri = None
579
580
  if prefix == mlrun.utils.StorePrefix.LLMPrompt:
580
581
  llm_artifact, _ = mlrun.store_manager.get_store_artifact(
581
582
  model_artifact_uri
582
583
  )
584
+ llm_artifact_uri = llm_artifact.uri
583
585
  model_artifact_uri = mlrun.utils.remove_tag_from_artifact_uri(
584
586
  llm_artifact.spec.parent_uri
585
587
  )
586
- actual_shared_name = root.get_shared_model_name_by_artifact_uri(
587
- model_artifact_uri
588
+ actual_shared_name, shared_model_class, shared_model_params = (
589
+ root.get_shared_model_by_artifact_uri(model_artifact_uri)
588
590
  )
589
591
 
590
592
  if not shared_runnable_name:
@@ -596,15 +598,20 @@ class BaseStep(ModelObj):
596
598
  step.class_args[schemas.ModelRunnerStepData.MODELS][name][
597
599
  schemas.ModelsData.MODEL_PARAMETERS.value
598
600
  ]["shared_runnable_name"] = actual_shared_name
599
- shared_models.append(actual_shared_name)
600
601
  elif actual_shared_name != shared_runnable_name:
601
602
  raise GraphError(
602
603
  f"Model endpoint {name} shared runnable name mismatch: "
603
604
  f"expected {actual_shared_name}, got {shared_runnable_name}"
604
605
  )
605
- else:
606
- shared_models.append(actual_shared_name)
607
-
606
+ shared_models.append(actual_shared_name)
607
+ self._edit_proxy_model_data(
608
+ step,
609
+ name,
610
+ actual_shared_name,
611
+ shared_model_params,
612
+ shared_model_class,
613
+ llm_artifact_uri or model_artifact_uri,
614
+ )
608
615
  undefined_shared_models = list(
609
616
  set(shared_models) - set(root.shared_models.keys())
610
617
  )
@@ -613,6 +620,52 @@ class BaseStep(ModelObj):
613
620
  f"The following shared models are not defined in the graph: {undefined_shared_models}."
614
621
  )
615
622
 
623
+ @staticmethod
624
+ def _edit_proxy_model_data(
625
+ step: "ModelRunnerStep",
626
+ name: str,
627
+ actual_shared_name: str,
628
+ shared_model_params: dict,
629
+ shared_model_class: Any,
630
+ artifact: Union[ModelArtifact, LLMPromptArtifact, str],
631
+ ):
632
+ monitoring_data = step.class_args.setdefault(
633
+ schemas.ModelRunnerStepData.MONITORING_DATA, {}
634
+ )
635
+
636
+ # edit monitoring data according to the shared model parameters
637
+ monitoring_data[name][schemas.MonitoringData.INPUT_PATH] = shared_model_params[
638
+ "input_path"
639
+ ]
640
+ monitoring_data[name][schemas.MonitoringData.RESULT_PATH] = shared_model_params[
641
+ "result_path"
642
+ ]
643
+ monitoring_data[name][schemas.MonitoringData.INPUTS] = shared_model_params[
644
+ "inputs"
645
+ ]
646
+ monitoring_data[name][schemas.MonitoringData.OUTPUTS] = shared_model_params[
647
+ "outputs"
648
+ ]
649
+ monitoring_data[name][schemas.MonitoringData.MODEL_CLASS] = (
650
+ shared_model_class
651
+ if isinstance(shared_model_class, str)
652
+ else shared_model_class.__class__.__name__
653
+ )
654
+ if actual_shared_name and actual_shared_name not in step._shared_proxy_mapping:
655
+ step._shared_proxy_mapping[actual_shared_name] = {
656
+ name: artifact.uri
657
+ if isinstance(artifact, (ModelArtifact, LLMPromptArtifact))
658
+ else artifact
659
+ }
660
+ elif actual_shared_name:
661
+ step._shared_proxy_mapping[actual_shared_name].update(
662
+ {
663
+ name: artifact.uri
664
+ if isinstance(artifact, (ModelArtifact, LLMPromptArtifact))
665
+ else artifact
666
+ }
667
+ )
668
+
616
669
 
617
670
  class TaskStep(BaseStep):
618
671
  """task execution step, runs a class or handler"""
@@ -1116,6 +1169,7 @@ class Model(storey.ParallelExecutionRunnable, ModelObj):
1116
1169
  self.invocation_artifact: Optional[LLMPromptArtifact] = None
1117
1170
  self.model_artifact: Optional[ModelArtifact] = None
1118
1171
  self.model_provider: Optional[ModelProvider] = None
1172
+ self._artifact_were_loaded = False
1119
1173
 
1120
1174
  def __init_subclass__(cls):
1121
1175
  super().__init_subclass__()
@@ -1136,12 +1190,14 @@ class Model(storey.ParallelExecutionRunnable, ModelObj):
1136
1190
  )
1137
1191
 
1138
1192
  def _load_artifacts(self) -> None:
1139
- artifact = self._get_artifact_object()
1140
- if isinstance(artifact, LLMPromptArtifact):
1141
- self.invocation_artifact = artifact
1142
- self.model_artifact = self.invocation_artifact.model_artifact
1143
- else:
1144
- self.model_artifact = artifact
1193
+ if not self._artifact_were_loaded:
1194
+ artifact = self._get_artifact_object()
1195
+ if isinstance(artifact, LLMPromptArtifact):
1196
+ self.invocation_artifact = artifact
1197
+ self.model_artifact = self.invocation_artifact.model_artifact
1198
+ else:
1199
+ self.model_artifact = artifact
1200
+ self._artifact_were_loaded = True
1145
1201
 
1146
1202
  def _get_artifact_object(
1147
1203
  self, proxy_uri: Optional[str] = None
@@ -1259,6 +1315,8 @@ class LLModel(Model):
1259
1315
  will be stored.
1260
1316
  """
1261
1317
 
1318
+ _dict_fields = Model._dict_fields + ["result_path", "input_path"]
1319
+
1262
1320
  def __init__(
1263
1321
  self,
1264
1322
  name: str,
@@ -1283,8 +1341,9 @@ class LLModel(Model):
1283
1341
  model_configuration: Optional[dict] = None,
1284
1342
  **kwargs,
1285
1343
  ) -> Any:
1344
+ llm_prompt_artifact = kwargs.get("llm_prompt_artifact")
1286
1345
  if isinstance(
1287
- self.invocation_artifact, mlrun.artifacts.LLMPromptArtifact
1346
+ llm_prompt_artifact, mlrun.artifacts.LLMPromptArtifact
1288
1347
  ) and isinstance(self.model_provider, ModelProvider):
1289
1348
  logger.debug(
1290
1349
  "Invoking model provider",
@@ -1310,7 +1369,7 @@ class LLModel(Model):
1310
1369
  logger.warning(
1311
1370
  "LLModel invocation artifact or model provider not set, skipping prediction",
1312
1371
  model_name=self.name,
1313
- invocation_artifact_type=type(self.invocation_artifact).__name__,
1372
+ invocation_artifact_type=type(llm_prompt_artifact).__name__,
1314
1373
  model_provider_type=type(self.model_provider).__name__,
1315
1374
  )
1316
1375
  return body
@@ -1322,8 +1381,9 @@ class LLModel(Model):
1322
1381
  model_configuration: Optional[dict] = None,
1323
1382
  **kwargs,
1324
1383
  ) -> Any:
1384
+ llm_prompt_artifact = kwargs.get("llm_prompt_artifact")
1325
1385
  if isinstance(
1326
- self.invocation_artifact, mlrun.artifacts.LLMPromptArtifact
1386
+ llm_prompt_artifact, mlrun.artifacts.LLMPromptArtifact
1327
1387
  ) and isinstance(self.model_provider, ModelProvider):
1328
1388
  logger.debug(
1329
1389
  "Async invoking model provider",
@@ -1349,13 +1409,16 @@ class LLModel(Model):
1349
1409
  logger.warning(
1350
1410
  "LLModel invocation artifact or model provider not set, skipping async prediction",
1351
1411
  model_name=self.name,
1352
- invocation_artifact_type=type(self.invocation_artifact).__name__,
1412
+ invocation_artifact_type=type(llm_prompt_artifact).__name__,
1353
1413
  model_provider_type=type(self.model_provider).__name__,
1354
1414
  )
1355
1415
  return body
1356
1416
 
1357
1417
  def run(self, body: Any, path: str, origin_name: Optional[str] = None) -> Any:
1358
- messages, model_configuration = self.enrich_prompt(body, origin_name)
1418
+ llm_prompt_artifact = self._get_invocation_artifact(origin_name)
1419
+ messages, model_configuration = self.enrich_prompt(
1420
+ body, origin_name, llm_prompt_artifact
1421
+ )
1359
1422
  logger.info(
1360
1423
  "Calling LLModel predict",
1361
1424
  model_name=self.name,
@@ -1363,13 +1426,19 @@ class LLModel(Model):
1363
1426
  messages_len=len(messages) if messages else 0,
1364
1427
  )
1365
1428
  return self.predict(
1366
- body, messages=messages, model_configuration=model_configuration
1429
+ body,
1430
+ messages=messages,
1431
+ model_configuration=model_configuration,
1432
+ llm_prompt_artifact=llm_prompt_artifact,
1367
1433
  )
1368
1434
 
1369
1435
  async def run_async(
1370
1436
  self, body: Any, path: str, origin_name: Optional[str] = None
1371
1437
  ) -> Any:
1372
- messages, model_configuration = self.enrich_prompt(body, origin_name)
1438
+ llm_prompt_artifact = self._get_invocation_artifact(origin_name)
1439
+ messages, model_configuration = self.enrich_prompt(
1440
+ body, origin_name, llm_prompt_artifact
1441
+ )
1373
1442
  logger.info(
1374
1443
  "Calling LLModel async predict",
1375
1444
  model_name=self.name,
@@ -1377,26 +1446,23 @@ class LLModel(Model):
1377
1446
  messages_len=len(messages) if messages else 0,
1378
1447
  )
1379
1448
  return await self.predict_async(
1380
- body, messages=messages, model_configuration=model_configuration
1449
+ body,
1450
+ messages=messages,
1451
+ model_configuration=model_configuration,
1452
+ llm_prompt_artifact=llm_prompt_artifact,
1381
1453
  )
1382
1454
 
1383
1455
  def enrich_prompt(
1384
- self, body: dict, origin_name: str
1456
+ self,
1457
+ body: dict,
1458
+ origin_name: str,
1459
+ llm_prompt_artifact: Optional[LLMPromptArtifact] = None,
1385
1460
  ) -> Union[tuple[list[dict], dict], tuple[None, None]]:
1386
1461
  logger.info(
1387
1462
  "Enriching prompt",
1388
1463
  model_name=self.name,
1389
1464
  model_endpoint_name=origin_name,
1390
1465
  )
1391
- if origin_name and self.shared_proxy_mapping:
1392
- llm_prompt_artifact = self.shared_proxy_mapping.get(origin_name)
1393
- if isinstance(llm_prompt_artifact, str):
1394
- llm_prompt_artifact = self._get_artifact_object(llm_prompt_artifact)
1395
- self.shared_proxy_mapping[origin_name] = llm_prompt_artifact
1396
- else:
1397
- llm_prompt_artifact = (
1398
- self.invocation_artifact or self._get_artifact_object()
1399
- )
1400
1466
  if not llm_prompt_artifact or not (
1401
1467
  llm_prompt_artifact and isinstance(llm_prompt_artifact, LLMPromptArtifact)
1402
1468
  ):
@@ -1448,6 +1514,27 @@ class LLModel(Model):
1448
1514
  )
1449
1515
  return prompt_template, model_configuration
1450
1516
 
1517
+ def _get_invocation_artifact(
1518
+ self, origin_name: Optional[str] = None
1519
+ ) -> Union[LLMPromptArtifact, None]:
1520
+ """
1521
+ Get the LLMPromptArtifact object for this model.
1522
+
1523
+ :param proxy_uri: Optional; URI to the proxy artifact.
1524
+ :return: LLMPromptArtifact object or None if not found.
1525
+ """
1526
+ if origin_name and self.shared_proxy_mapping:
1527
+ llm_prompt_artifact = self.shared_proxy_mapping.get(origin_name)
1528
+ if isinstance(llm_prompt_artifact, str):
1529
+ llm_prompt_artifact = self._get_artifact_object(llm_prompt_artifact)
1530
+ self.shared_proxy_mapping[origin_name] = llm_prompt_artifact
1531
+ elif self._artifact_were_loaded:
1532
+ llm_prompt_artifact = self.invocation_artifact
1533
+ else:
1534
+ self._load_artifacts()
1535
+ llm_prompt_artifact = self.invocation_artifact
1536
+ return llm_prompt_artifact
1537
+
1451
1538
 
1452
1539
  class ModelSelector(ModelObj):
1453
1540
  """Used to select which models to run on each event."""
@@ -1615,10 +1702,6 @@ class ModelRunnerStep(MonitoredStep):
1615
1702
  model_endpoint_creation_strategy: Optional[
1616
1703
  schemas.ModelEndpointCreationStrategy
1617
1704
  ] = schemas.ModelEndpointCreationStrategy.INPLACE,
1618
- inputs: Optional[list[str]] = None,
1619
- outputs: Optional[list[str]] = None,
1620
- input_path: Optional[str] = None,
1621
- result_path: Optional[str] = None,
1622
1705
  override: bool = False,
1623
1706
  ) -> None:
1624
1707
  """
@@ -1641,17 +1724,6 @@ class ModelRunnerStep(MonitoredStep):
1641
1724
  1. If model endpoints with the same name exist, preserve them.
1642
1725
  2. Create a new model endpoint with the same name and set it to `latest`.
1643
1726
 
1644
- :param inputs: list of the model inputs (e.g. features) ,if provided will override the inputs
1645
- that been configured in the model artifact, please note that those inputs need to
1646
- be equal in length and order to the inputs that model_class predict method expects
1647
- :param outputs: list of the model outputs (e.g. labels) ,if provided will override the outputs
1648
- that been configured in the model artifact, please note that those outputs need to
1649
- be equal to the model_class predict method outputs (length, and order)
1650
- :param input_path: input path inside the user event, expect scopes to be defined by dot notation
1651
- (e.g "inputs.my_model_inputs"). expects list or dictionary type object in path.
1652
- :param result_path: result path inside the user output event, expect scopes to be defined by dot
1653
- notation (e.g "outputs.my_model_outputs") expects list or dictionary type object
1654
- in path.
1655
1727
  :param override: bool allow override existing model on the current ModelRunnerStep.
1656
1728
  """
1657
1729
  model_class, model_params = (
@@ -1669,11 +1741,21 @@ class ModelRunnerStep(MonitoredStep):
1669
1741
  "model_artifact must be a string, ModelArtifact or LLMPromptArtifact"
1670
1742
  )
1671
1743
  root = self._extract_root_step()
1744
+ shared_model_params = {}
1672
1745
  if isinstance(root, RootFlowStep):
1673
- shared_model_name = (
1674
- shared_model_name
1675
- or root.get_shared_model_name_by_artifact_uri(model_artifact_uri)
1746
+ actual_shared_model_name, shared_model_class, shared_model_params = (
1747
+ root.get_shared_model_by_artifact_uri(model_artifact_uri)
1676
1748
  )
1749
+ if not actual_shared_model_name or (
1750
+ shared_model_name and actual_shared_model_name != shared_model_name
1751
+ ):
1752
+ raise GraphError(
1753
+ f"ModelRunnerStep can only add proxy models that were added to the root flow step, "
1754
+ f"model {shared_model_name} is not in the shared models."
1755
+ )
1756
+ elif not shared_model_name:
1757
+ shared_model_name = actual_shared_model_name
1758
+ model_params["shared_runnable_name"] = shared_model_name
1677
1759
  if not root.shared_models or (
1678
1760
  root.shared_models
1679
1761
  and shared_model_name
@@ -1683,13 +1765,27 @@ class ModelRunnerStep(MonitoredStep):
1683
1765
  f"ModelRunnerStep can only add proxy models that were added to the root flow step, "
1684
1766
  f"model {shared_model_name} is not in the shared models."
1685
1767
  )
1686
- if shared_model_name not in self._shared_proxy_mapping:
1768
+ monitoring_data = self.class_args.get(
1769
+ schemas.ModelRunnerStepData.MONITORING_DATA, {}
1770
+ )
1771
+ monitoring_data.setdefault(endpoint_name, {})[
1772
+ schemas.MonitoringData.MODEL_CLASS
1773
+ ] = (
1774
+ shared_model_class
1775
+ if isinstance(shared_model_class, str)
1776
+ else shared_model_class.__class__.__name__
1777
+ )
1778
+ self.class_args[schemas.ModelRunnerStepData.MONITORING_DATA] = (
1779
+ monitoring_data
1780
+ )
1781
+
1782
+ if shared_model_name and shared_model_name not in self._shared_proxy_mapping:
1687
1783
  self._shared_proxy_mapping[shared_model_name] = {
1688
1784
  endpoint_name: model_artifact.uri
1689
1785
  if isinstance(model_artifact, (ModelArtifact, LLMPromptArtifact))
1690
1786
  else model_artifact
1691
1787
  }
1692
- else:
1788
+ elif override and shared_model_name:
1693
1789
  self._shared_proxy_mapping[shared_model_name].update(
1694
1790
  {
1695
1791
  endpoint_name: model_artifact.uri
@@ -1704,11 +1800,11 @@ class ModelRunnerStep(MonitoredStep):
1704
1800
  model_artifact=model_artifact,
1705
1801
  labels=labels,
1706
1802
  model_endpoint_creation_strategy=model_endpoint_creation_strategy,
1803
+ inputs=shared_model_params.get("inputs"),
1804
+ outputs=shared_model_params.get("outputs"),
1805
+ input_path=shared_model_params.get("input_path"),
1806
+ result_path=shared_model_params.get("result_path"),
1707
1807
  override=override,
1708
- inputs=inputs,
1709
- outputs=outputs,
1710
- input_path=input_path,
1711
- result_path=result_path,
1712
1808
  **model_params,
1713
1809
  )
1714
1810
 
@@ -2742,6 +2838,10 @@ class RootFlowStep(FlowStep):
2742
2838
  model_class: Union[str, Model],
2743
2839
  execution_mechanism: Union[str, ParallelExecutionMechanisms],
2744
2840
  model_artifact: Union[str, ModelArtifact],
2841
+ inputs: Optional[list[str]] = None,
2842
+ outputs: Optional[list[str]] = None,
2843
+ input_path: Optional[str] = None,
2844
+ result_path: Optional[str] = None,
2745
2845
  override: bool = False,
2746
2846
  **model_parameters,
2747
2847
  ) -> None:
@@ -2771,6 +2871,19 @@ class RootFlowStep(FlowStep):
2771
2871
  It means that the runnable will not actually be run in parallel to anything else.
2772
2872
 
2773
2873
  :param model_artifact: model artifact or mlrun model artifact uri
2874
+ :param inputs: list of the model inputs (e.g. features) ,if provided will override the inputs
2875
+ that been configured in the model artifact, please note that those inputs need
2876
+ to be equal in length and order to the inputs that model_class
2877
+ predict method expects
2878
+ :param outputs: list of the model outputs (e.g. labels) ,if provided will override the outputs
2879
+ that been configured in the model artifact, please note that those outputs need
2880
+ to be equal to the model_class
2881
+ predict method outputs (length, and order)
2882
+ :param input_path: input path inside the user event, expect scopes to be defined by dot notation
2883
+ (e.g "inputs.my_model_inputs"). expects list or dictionary type object in path.
2884
+ :param result_path: result path inside the user output event, expect scopes to be defined by dot
2885
+ notation (e.g "outputs.my_model_outputs") expects list or dictionary type object
2886
+ in path.
2774
2887
  :param override: bool allow override existing model on the current ModelRunnerStep.
2775
2888
  :param model_parameters: Parameters for model instantiation
2776
2889
  """
@@ -2778,6 +2891,14 @@ class RootFlowStep(FlowStep):
2778
2891
  raise mlrun.errors.MLRunInvalidArgumentError(
2779
2892
  "Cannot provide a model object as argument to `model_class` and also provide `model_parameters`."
2780
2893
  )
2894
+ if type(model_class) is LLModel or (
2895
+ isinstance(model_class, str) and model_class == LLModel.__name__
2896
+ ):
2897
+ if outputs:
2898
+ warnings.warn(
2899
+ "LLModel with existing outputs detected, overriding to default"
2900
+ )
2901
+ outputs = UsageResponseKeys.fields()
2781
2902
 
2782
2903
  if execution_mechanism == ParallelExecutionMechanisms.shared_executor:
2783
2904
  raise mlrun.errors.MLRunInvalidArgumentError(
@@ -2805,6 +2926,14 @@ class RootFlowStep(FlowStep):
2805
2926
  "Inconsistent name for the added model."
2806
2927
  )
2807
2928
  model_parameters["name"] = name
2929
+ model_parameters["inputs"] = inputs or model_parameters.get("inputs", [])
2930
+ model_parameters["outputs"] = outputs or model_parameters.get("outputs", [])
2931
+ model_parameters["input_path"] = input_path or model_parameters.get(
2932
+ "input_path"
2933
+ )
2934
+ model_parameters["result_path"] = result_path or model_parameters.get(
2935
+ "result_path"
2936
+ )
2808
2937
 
2809
2938
  if name in self.shared_models and not override:
2810
2939
  raise mlrun.errors.MLRunInvalidArgumentError(
@@ -2819,7 +2948,9 @@ class RootFlowStep(FlowStep):
2819
2948
  self.shared_models[name] = (model_class, model_parameters)
2820
2949
  self.shared_models_mechanism[name] = execution_mechanism
2821
2950
 
2822
- def get_shared_model_name_by_artifact_uri(self, artifact_uri: str) -> Optional[str]:
2951
+ def get_shared_model_by_artifact_uri(
2952
+ self, artifact_uri: str
2953
+ ) -> Optional[tuple[str, str, dict]]:
2823
2954
  """
2824
2955
  Get a shared model by its artifact URI.
2825
2956
  :param artifact_uri: The artifact URI of the model.
@@ -2827,7 +2958,7 @@ class RootFlowStep(FlowStep):
2827
2958
  """
2828
2959
  for model_name, (model_class, model_params) in self.shared_models.items():
2829
2960
  if model_params.get("artifact_uri") == artifact_uri:
2830
- return model_name
2961
+ return model_name, model_class, model_params
2831
2962
  return None
2832
2963
 
2833
2964
  def config_pool_resource(
@@ -2997,12 +3128,10 @@ def _add_graphviz_router(graph, step, source=None, **kwargs):
2997
3128
  graph.edge(step.fullname, route.fullname)
2998
3129
 
2999
3130
 
3000
- def _add_graphviz_model_runner(graph, step, source=None):
3131
+ def _add_graphviz_model_runner(graph, step, source=None, is_monitored=False):
3001
3132
  if source:
3002
3133
  graph.node("_start", source.name, shape=source.shape, style="filled")
3003
3134
  graph.edge("_start", step.fullname)
3004
-
3005
- is_monitored = step._extract_root_step().track_models
3006
3135
  m_cell = '<FONT POINT-SIZE="9">🄼</FONT>' if is_monitored else ""
3007
3136
 
3008
3137
  number_of_models = len(
@@ -3041,6 +3170,7 @@ def _add_graphviz_flow(
3041
3170
  allow_empty=True
3042
3171
  )
3043
3172
  graph.node("_start", source.name, shape=source.shape, style="filled")
3173
+ is_monitored = step.track_models if isinstance(step, RootFlowStep) else False
3044
3174
  for start_step in start_steps:
3045
3175
  graph.edge("_start", start_step.fullname)
3046
3176
  for child in step.get_children():
@@ -3049,7 +3179,7 @@ def _add_graphviz_flow(
3049
3179
  with graph.subgraph(name="cluster_" + child.fullname) as sg:
3050
3180
  _add_graphviz_router(sg, child)
3051
3181
  elif kind == StepKinds.model_runner:
3052
- _add_graphviz_model_runner(graph, child)
3182
+ _add_graphviz_model_runner(graph, child, is_monitored=is_monitored)
3053
3183
  else:
3054
3184
  graph.node(child.fullname, label=child.name, shape=child.get_shape())
3055
3185
  _add_edges(child.after or [], step, graph, child)
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "3d39fb8737492c2c49c896ace2a390c8adfd66e6",
3
- "version": "1.10.0-rc22"
2
+ "git_commit": "492ba7e7e40ca97c91a65058a403b6582387ea67",
3
+ "version": "1.10.0-rc23"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.10.0rc22
3
+ Version: 1.10.0rc23
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -45,7 +45,7 @@ Requires-Dist: semver~=3.0
45
45
  Requires-Dist: dependency-injector~=4.41
46
46
  Requires-Dist: fsspec<2024.7,>=2023.9.2
47
47
  Requires-Dist: v3iofs~=0.1.17
48
- Requires-Dist: storey~=1.10.10
48
+ Requires-Dist: storey~=1.10.11
49
49
  Requires-Dist: inflection~=0.5.0
50
50
  Requires-Dist: python-dotenv~=1.0
51
51
  Requires-Dist: setuptools>=75.2
@@ -4,7 +4,7 @@ mlrun/config.py,sha256=XAAb68MwEHpuPddPMtKBULtFk0hI9YC25DniYQk1DKk,72853
4
4
  mlrun/errors.py,sha256=bAk0t_qmCxQSPNK0TugOAfA5R6f0G6OYvEvXUWSJ_5U,9062
5
5
  mlrun/execution.py,sha256=wkmT1k0QROgGJFMBIsYUsJaqEF2bkqaYVzp_ZQb527Q,58814
6
6
  mlrun/features.py,sha256=jMEXo6NB36A6iaxNEJWzdtYwUmglYD90OIKTIEeWhE8,15841
7
- mlrun/k8s_utils.py,sha256=mMnGyouHoJC93ZD2KGf9neJM1pD7mR9IXLnHOEwYVTQ,21469
7
+ mlrun/k8s_utils.py,sha256=QeeTCmPvVfYMF8Y2Ws6SML2ihO3axdnmp7rTUuPYMrg,25043
8
8
  mlrun/lists.py,sha256=OlaV2QIFUzmenad9kxNJ3k4whlDyxI3zFbGwr6vpC5Y,8561
9
9
  mlrun/model.py,sha256=wHtM8LylSOEFk6Hxl95CVm8DOPhofjsANYdIvKHH6dw,88956
10
10
  mlrun/render.py,sha256=5DlhD6JtzHgmj5RVlpaYiHGhX84Q7qdi4RCEUj2UMgw,13195
@@ -75,7 +75,7 @@ mlrun/common/schemas/tag.py,sha256=1wqEiAujsElojWb3qmuyfcaLFjXSNAAQdafkDx7fkn0,8
75
75
  mlrun/common/schemas/workflow.py,sha256=Y-FHJnxs5c86yetuOAPdEJPkne__tLPCxjSXSb4lrjo,2541
76
76
  mlrun/common/schemas/model_monitoring/__init__.py,sha256=FqFiFIDcylquQdY0XTBamB5kMzMrMFEpVYM_ecsVfLg,1925
77
77
  mlrun/common/schemas/model_monitoring/constants.py,sha256=5Frul4YrJQZvUIOE4T2Tp8I6GjklFD7EyRIOR6YqsPo,13726
78
- mlrun/common/schemas/model_monitoring/functions.py,sha256=GpfSGp05D87wEKemECD3USL368pvnAM2WfS-nef5qOg,2210
78
+ mlrun/common/schemas/model_monitoring/functions.py,sha256=Ej8ChjmMZq1HP32THNABoktQHN1mdlkSqKbofxu10i4,2536
79
79
  mlrun/common/schemas/model_monitoring/grafana.py,sha256=THQlLfPBevBksta8p5OaIsBaJtsNSXexLvHrDxOaVns,2095
80
80
  mlrun/common/schemas/model_monitoring/model_endpoints.py,sha256=Bl08bnM5DnWJsj4gZhCDD49PDg5y7mPnrsD2fKBE7BI,13316
81
81
  mlrun/data_types/__init__.py,sha256=wdxGS1PTnaKXiNZ7PYGxxo86OifHH7NYoArIjDJksLA,1054
@@ -87,7 +87,7 @@ mlrun/datastore/__init__.py,sha256=K8lPO3nVQTk14tbJMUS8nbtwhJw1PBzvQ4UI1T5exFo,6
87
87
  mlrun/datastore/alibaba_oss.py,sha256=E0t0-e9Me2t2Mux2LWdC9riOG921TgNjhoy897JJX7o,4932
88
88
  mlrun/datastore/azure_blob.py,sha256=3LG7tOTwT97ZFBmyq-sfAIe5_SkuFgisRQtipv4kKUw,12779
89
89
  mlrun/datastore/base.py,sha256=yLdnFCL2k_rcasdbxXjnQr7Lwm-A79LnW9AITtn9-p4,25450
90
- mlrun/datastore/datastore.py,sha256=gOlMyPDelD9CRieoraDPYf1NNig_GrQRuuQxLmRq8Bo,13298
90
+ mlrun/datastore/datastore.py,sha256=F9NdQFwyAHgjKFSQ1mcLZBuxNqXXesNMjtIVj03L5Gk,13422
91
91
  mlrun/datastore/datastore_profile.py,sha256=Y4VtaatIK4UXuTdpffCkAcsCBSxj5KOgnX7KlL-Yds8,23803
92
92
  mlrun/datastore/dbfs_store.py,sha256=CJwst1598qxiu63-Qa0c3e5E8LjeCv1XbMyWI7A6irY,6560
93
93
  mlrun/datastore/filestore.py,sha256=OcykjzhbUAZ6_Cb9bGAXRL2ngsOpxXSb4rR0lyogZtM,3773
@@ -109,6 +109,7 @@ mlrun/datastore/v3io.py,sha256=sMn5473k_bXyIJovNf0rahbVHRmO0YPdOwIhbs06clg,8201
109
109
  mlrun/datastore/vectorstore.py,sha256=k-yom5gfw20hnVG0Rg7aBEehuXwvAloZwn0cx0VGals,11708
110
110
  mlrun/datastore/model_provider/__init__.py,sha256=kXGBqhLN0rlAx0kTXhozGzFsIdSqW0uTSKMmsLgq_is,569
111
111
  mlrun/datastore/model_provider/huggingface_provider.py,sha256=c8t7kZ1ZbjZpbyRmwLNz_eqrfwRXmVs_sf6F1s_H2xg,11594
112
+ mlrun/datastore/model_provider/mock_model_provider.py,sha256=uIgGP3yZtLDLS-2WMyH20SGfrpodpyxyIw4WYTpHhUg,3059
112
113
  mlrun/datastore/model_provider/model_provider.py,sha256=3F-iWkxfOI8ypgzJw1I8ZkSXF6xYaqCZf5BMQhG46Fo,11098
113
114
  mlrun/datastore/model_provider/openai_provider.py,sha256=KgbP8M4VnbWf9Yh5iG2g3qvXEoLmwWyeL1iTWqwFyWI,11406
114
115
  mlrun/datastore/wasbfs/__init__.py,sha256=s5Ul-0kAhYqFjKDR2X0O2vDGDbLQQduElb32Ev56Te4,1343
@@ -226,7 +227,7 @@ mlrun/launcher/local.py,sha256=3gv-IQYoIChSmRaZ0vLUh0Tu26oLMCx9GbBYh4fWygQ,12161
226
227
  mlrun/launcher/remote.py,sha256=zFXE52Cq_7EkC8lfNKT0ceIbye0CfFiundF7O1YU4Xw,7810
227
228
  mlrun/model_monitoring/__init__.py,sha256=qDQnncjya9XPTlfvGyfWsZWiXc-glGZrrNja-5QmCZk,782
228
229
  mlrun/model_monitoring/api.py,sha256=G8mI2iJm7cptTVue7dl9qMD6oY8_uxnEoVLz93DFQq4,27003
229
- mlrun/model_monitoring/controller.py,sha256=XDbDnASFeYaIiqW4unSJPYgJfMvJjs5tPfI5kHRAdg0,43646
230
+ mlrun/model_monitoring/controller.py,sha256=sXUdEPG678DYmiVNm-LfJHcsiBkjZqpSTbG8hqxWxX0,43647
230
231
  mlrun/model_monitoring/features_drift_table.py,sha256=c6GpKtpOJbuT1u5uMWDL_S-6N4YPOmlktWMqPme3KFY,25308
231
232
  mlrun/model_monitoring/helpers.py,sha256=0xhIYKzhaBrgyjLiA_ekCZsXzi3GBXpLyG40Bhj-PTY,23596
232
233
  mlrun/model_monitoring/stream_processing.py,sha256=bryYO3D0cC10MAQ-liHxUZ79MrL-VFXCb7KNyj6bl-8,34655
@@ -311,9 +312,9 @@ mlrun/serving/__init__.py,sha256=nriJAcVn5aatwU03T7SsE6ngJEGTxr3wIGt4WuvCCzY,139
311
312
  mlrun/serving/merger.py,sha256=pfOQoozUyObCTpqXAMk94PmhZefn4bBrKufO3MKnkAc,6193
312
313
  mlrun/serving/remote.py,sha256=Igha2FipK3-6rV_PZ1K464kTbiTu8rhc6SMm-HiEJ6o,18817
313
314
  mlrun/serving/routers.py,sha256=SmBOlHn7rT2gWTa-W8f16UB0UthgIFc4D1cPOZAA9ss,54003
314
- mlrun/serving/server.py,sha256=t5nME4nnoubuyQxD_LM_kGtfEKMM6ccgxalmvLYekiw,39513
315
+ mlrun/serving/server.py,sha256=_Wju0myvP-VccyQm9VwNpsZUUiHpRh22WkQbBzd6Z2o,40343
315
316
  mlrun/serving/serving_wrapper.py,sha256=UL9hhWCfMPcTJO_XrkvNaFvck1U1E7oS8trTZyak0cA,835
316
- mlrun/serving/states.py,sha256=S_U0UmzosXZdP7IWGdGkvnLtKTbYWllCwvU931pJy-g,132727
317
+ mlrun/serving/states.py,sha256=HXXpXi9hekUbMhH-0JTdm3l-iIx2giqq3-pAE7owG00,138334
317
318
  mlrun/serving/system_steps.py,sha256=kGaQ2OXsdluthXm_15G-f98caj3n04hq6LTIEBjzLM0,19426
318
319
  mlrun/serving/utils.py,sha256=Zbfqm8TKNcTE8zRBezVBzpvR2WKeKeIRN7otNIaiYEc,4170
319
320
  mlrun/serving/v1_serving.py,sha256=c6J_MtpE-Tqu00-6r4eJOCO6rUasHDal9W2eBIcrl50,11853
@@ -347,11 +348,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
347
348
  mlrun/utils/notifications/notification/slack.py,sha256=kfhogR5keR7Zjh0VCjJNK3NR5_yXT7Cv-x9GdOUW4Z8,7294
348
349
  mlrun/utils/notifications/notification/webhook.py,sha256=zxh8CAlbPnTazsk6r05X5TKwqUZVOH5KBU2fJbzQlG4,5330
349
350
  mlrun/utils/version/__init__.py,sha256=YnzE6tlf24uOQ8y7Z7l96QLAI6-QEii7-77g8ynmzy0,613
350
- mlrun/utils/version/version.json,sha256=7GQMeZhchhq-LX2SDzTItv1--Cau-tncYi8aRaFNVso,90
351
+ mlrun/utils/version/version.json,sha256=2qBVEU7lBKiv9zvHtV0mCeHZbaKZ8YTV9JnkwWpuEZc,90
351
352
  mlrun/utils/version/version.py,sha256=M2hVhRrgkN3SxacZHs3ZqaOsqAA7B6a22ne324IQ1HE,1877
352
- mlrun-1.10.0rc22.dist-info/licenses/LICENSE,sha256=zTiv1CxWNkOk1q8eJS1G_8oD4gWpWLwWxj_Agcsi8Os,11337
353
- mlrun-1.10.0rc22.dist-info/METADATA,sha256=ixX_HZZXQGsTQLgnratXkSNOu2aEybe1cX-hrKMU4wo,26272
354
- mlrun-1.10.0rc22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
355
- mlrun-1.10.0rc22.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
356
- mlrun-1.10.0rc22.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
357
- mlrun-1.10.0rc22.dist-info/RECORD,,
353
+ mlrun-1.10.0rc23.dist-info/licenses/LICENSE,sha256=zTiv1CxWNkOk1q8eJS1G_8oD4gWpWLwWxj_Agcsi8Os,11337
354
+ mlrun-1.10.0rc23.dist-info/METADATA,sha256=xCwJjSygfymK3oJbuGfETNBukBcLNpKbCKDMSL9UNjg,26272
355
+ mlrun-1.10.0rc23.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
356
+ mlrun-1.10.0rc23.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
357
+ mlrun-1.10.0rc23.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
358
+ mlrun-1.10.0rc23.dist-info/RECORD,,