mlrun 1.10.0rc37__py3-none-any.whl → 1.10.0rc38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -55,14 +55,12 @@ class TDEngineConnector(TSDBConnector):
55
55
  """
56
56
 
57
57
  type: str = mm_schemas.TSDBTarget.TDEngine
58
- database = f"{tdengine_schemas._MODEL_MONITORING_DATABASE}_{mlrun.mlconf.system_id}"
59
58
 
60
59
  def __init__(
61
60
  self,
62
61
  project: str,
63
62
  profile: DatastoreProfile,
64
63
  timestamp_precision: TDEngineTimestampPrecision = TDEngineTimestampPrecision.MICROSECOND,
65
- **kwargs,
66
64
  ):
67
65
  super().__init__(project=project)
68
66
 
@@ -72,6 +70,15 @@ class TDEngineConnector(TSDBConnector):
72
70
  timestamp_precision
73
71
  )
74
72
 
73
+ if not mlrun.mlconf.system_id:
74
+ raise mlrun.errors.MLRunInvalidArgumentError(
75
+ "system_id is not set in mlrun.mlconf. "
76
+ "TDEngineConnector requires system_id to be configured for database name construction. "
77
+ "Please ensure MLRun configuration is properly loaded before creating TDEngineConnector."
78
+ )
79
+ self.database = (
80
+ f"{tdengine_schemas._MODEL_MONITORING_DATABASE}_{mlrun.mlconf.system_id}"
81
+ )
75
82
  self._init_super_tables()
76
83
 
77
84
  @property
mlrun/run.py CHANGED
@@ -118,7 +118,25 @@ def function_to_module(code="", workdir=None, secrets=None, silent=False):
118
118
  raise ValueError("nothing to run, specify command or function")
119
119
 
120
120
  command = os.path.join(workdir or "", command)
121
- mod_name = mlrun.utils.helpers.get_module_name_from_path(command)
121
+
122
+ source_file_path_object, working_dir_path_object = (
123
+ mlrun.utils.helpers.get_source_and_working_dir_paths(command)
124
+ )
125
+ if source_file_path_object.is_relative_to(working_dir_path_object):
126
+ mod_name = mlrun.utils.helpers.get_relative_module_name_from_path(
127
+ source_file_path_object, working_dir_path_object
128
+ )
129
+ elif source_file_path_object.is_relative_to(
130
+ pathlib.Path(tempfile.gettempdir()).resolve()
131
+ ):
132
+ mod_name = Path(command).stem
133
+ else:
134
+ raise mlrun.errors.MLRunRuntimeError(
135
+ f"Cannot run source file '{command}': it must be located either under the current working "
136
+ f"directory ('{working_dir_path_object}') or the system temporary directory ('{tempfile.gettempdir()}'). "
137
+ f"This is required when running with local=True."
138
+ )
139
+
122
140
  spec = imputil.spec_from_file_location(mod_name, command)
123
141
  if spec is None:
124
142
  raise OSError(f"cannot import from {command!r}")
@@ -1224,6 +1224,18 @@ class RemoteRuntime(KubeResource):
1224
1224
  # try to infer the invocation url from the internal and if not exists, use external.
1225
1225
  # $$$$ we do not want to use the external invocation url (e.g.: ingress, nodePort, etc.)
1226
1226
 
1227
+ # if none of urls is set, function was deployed with watch=False
1228
+ # and status wasn't fetched with Nuclio
1229
+ # _get_state fetches the state and updates url
1230
+ if (
1231
+ not self.status.address
1232
+ and not self.status.internal_invocation_urls
1233
+ and not self.status.external_invocation_urls
1234
+ ):
1235
+ state, _, _ = self._get_state()
1236
+ if state not in ["ready", "scaledToZero"]:
1237
+ logger.warning(f"Function is in the {state} state")
1238
+
1227
1239
  # prefer internal invocation url if running inside k8s cluster
1228
1240
  if (
1229
1241
  not force_external_address
mlrun/serving/server.py CHANGED
@@ -23,6 +23,7 @@ import os
23
23
  import socket
24
24
  import traceback
25
25
  import uuid
26
+ from collections import defaultdict
26
27
  from datetime import datetime, timezone
27
28
  from typing import Any, Optional, Union
28
29
 
@@ -50,7 +51,7 @@ from ..datastore.store_resources import ResourceCache
50
51
  from ..errors import MLRunInvalidArgumentError
51
52
  from ..execution import MLClientCtx
52
53
  from ..model import ModelObj
53
- from ..utils import get_caller_globals, get_module_name_from_path
54
+ from ..utils import get_caller_globals, get_relative_module_name_from_path
54
55
  from .states import (
55
56
  FlowStep,
56
57
  MonitoredStep,
@@ -522,10 +523,6 @@ def add_system_steps_to_graph(
522
523
  monitor_flow_step.after = [
523
524
  step_name,
524
525
  ]
525
- context.logger.info_with(
526
- "Server graph after adding system steps",
527
- graph=str(graph.steps),
528
- )
529
526
  return graph
530
527
 
531
528
 
@@ -583,7 +580,7 @@ async def async_execute_graph(
583
580
  batch_size: Optional[int],
584
581
  read_as_lists: bool,
585
582
  nest_under_inputs: bool,
586
- ) -> list[Any]:
583
+ ) -> None:
587
584
  # Validate that data parameter is a DataItem and not passed via params
588
585
  if not isinstance(data, DataItem):
589
586
  raise MLRunInvalidArgumentError(
@@ -593,7 +590,7 @@ async def async_execute_graph(
593
590
  f"while 'inputs' is for data files that need to be loaded. "
594
591
  f"Example: run_function(..., inputs={{'data': 'path/to/data.csv'}}, params={{other_config: value}})"
595
592
  )
596
-
593
+ run_call_count = 0
597
594
  spec = mlrun.utils.get_serving_spec()
598
595
  modname = None
599
596
  code = os.getenv("MLRUN_EXEC_CODE")
@@ -607,7 +604,17 @@ async def async_execute_graph(
607
604
  # gets set in local flow and not just in the remote pod
608
605
  source_file_path = spec.get("filename", None)
609
606
  if source_file_path:
610
- modname = get_module_name_from_path(source_file_path)
607
+ source_file_path_object, working_dir_path_object = (
608
+ mlrun.utils.helpers.get_source_and_working_dir_paths(source_file_path)
609
+ )
610
+ if not source_file_path_object.is_relative_to(working_dir_path_object):
611
+ raise mlrun.errors.MLRunRuntimeError(
612
+ f"Source file path '{source_file_path}' is not under the current working directory "
613
+ f"(which is required when running with local=True)"
614
+ )
615
+ modname = get_relative_module_name_from_path(
616
+ source_file_path_object, working_dir_path_object
617
+ )
611
618
 
612
619
  namespace = {}
613
620
  if modname:
@@ -682,7 +689,6 @@ async def async_execute_graph(
682
689
 
683
690
  if config.log_level.lower() == "debug":
684
691
  server.verbose = True
685
- context.logger.info_with("Initializing states", namespace=namespace)
686
692
  kwargs = {}
687
693
  if hasattr(context, "is_mock"):
688
694
  kwargs["is_mock"] = context.is_mock
@@ -700,6 +706,7 @@ async def async_execute_graph(
700
706
  context.logger.info(server.to_yaml())
701
707
 
702
708
  async def run(body):
709
+ nonlocal run_call_count
703
710
  event = storey.Event(id=index, body=body)
704
711
  if timestamp_column:
705
712
  if batching:
@@ -714,6 +721,7 @@ async def async_execute_graph(
714
721
  f"Event body '{body}' did not contain timestamp column '{timestamp_column}'"
715
722
  )
716
723
  event._original_timestamp = body[timestamp_column]
724
+ run_call_count += 1
717
725
  return await server.run(event, context)
718
726
 
719
727
  if batching and not batch_size:
@@ -771,7 +779,31 @@ async def async_execute_graph(
771
779
  model_endpoint_uids=model_endpoint_uids,
772
780
  )
773
781
 
774
- return responses
782
+ # log the results as artifacts
783
+ num_of_meps_in_the_graph = len(server.graph.model_endpoints_names)
784
+ artifact_path = None
785
+ if (
786
+ "{{run.uid}}" not in context.artifact_path
787
+ ): # TODO: delete when IG-22841 is resolved
788
+ artifact_path = "+/{{run.uid}}" # will be concatenated to the context's path in extend_artifact_path
789
+ if num_of_meps_in_the_graph <= 1:
790
+ context.log_dataset(
791
+ "prediction", df=pd.DataFrame(responses), artifact_path=artifact_path
792
+ )
793
+ else:
794
+ # turn this list of samples into a dict of lists, one per model endpoint
795
+ grouped = defaultdict(list)
796
+ for sample in responses:
797
+ for model_name, features in sample.items():
798
+ grouped[model_name].append(features)
799
+ # create a dataframe per model endpoint and log it
800
+ for model_name, features in grouped.items():
801
+ context.log_dataset(
802
+ f"prediction_{model_name}",
803
+ df=pd.DataFrame(features),
804
+ artifact_path=artifact_path,
805
+ )
806
+ context.log_result("num_rows", run_call_count)
775
807
 
776
808
 
777
809
  def _is_inside_asyncio_loop():
mlrun/serving/states.py CHANGED
@@ -522,7 +522,9 @@ class BaseStep(ModelObj):
522
522
 
523
523
  root = self._extract_root_step()
524
524
 
525
- if not isinstance(root, RootFlowStep):
525
+ if not isinstance(root, RootFlowStep) or (
526
+ isinstance(root, RootFlowStep) and root.engine != "async"
527
+ ):
526
528
  raise GraphError(
527
529
  "ModelRunnerStep can be added to 'Flow' topology graph only"
528
530
  )
@@ -1148,6 +1150,7 @@ class Model(storey.ParallelExecutionRunnable, ModelObj):
1148
1150
  "artifact_uri",
1149
1151
  "shared_runnable_name",
1150
1152
  "shared_proxy_mapping",
1153
+ "execution_mechanism",
1151
1154
  ]
1152
1155
  kind = "model"
1153
1156
 
@@ -1170,6 +1173,7 @@ class Model(storey.ParallelExecutionRunnable, ModelObj):
1170
1173
  self.model_artifact: Optional[ModelArtifact] = None
1171
1174
  self.model_provider: Optional[ModelProvider] = None
1172
1175
  self._artifact_were_loaded = False
1176
+ self._execution_mechanism = None
1173
1177
 
1174
1178
  def __init_subclass__(cls):
1175
1179
  super().__init_subclass__()
@@ -1189,6 +1193,20 @@ class Model(storey.ParallelExecutionRunnable, ModelObj):
1189
1193
  raise_missing_schema_exception=False,
1190
1194
  )
1191
1195
 
1196
+ # Check if the relevant predict method is implemented when trying to initialize the model
1197
+ if self._execution_mechanism == storey.ParallelExecutionMechanisms.asyncio:
1198
+ if self.__class__.predict_async is Model.predict_async:
1199
+ raise mlrun.errors.ModelRunnerError(
1200
+ f"{self.name} is running with {self._execution_mechanism} execution_mechanism but predict_async() "
1201
+ f"is not implemented"
1202
+ )
1203
+ else:
1204
+ if self.__class__.predict is Model.predict:
1205
+ raise mlrun.errors.ModelRunnerError(
1206
+ f"{self.name} is running with {self._execution_mechanism} execution_mechanism but predict() "
1207
+ f"is not implemented"
1208
+ )
1209
+
1192
1210
  def _load_artifacts(self) -> None:
1193
1211
  if not self._artifact_were_loaded:
1194
1212
  artifact = self._get_artifact_object()
@@ -1219,11 +1237,11 @@ class Model(storey.ParallelExecutionRunnable, ModelObj):
1219
1237
 
1220
1238
  def predict(self, body: Any, **kwargs) -> Any:
1221
1239
  """Override to implement prediction logic. If the logic requires asyncio, override predict_async() instead."""
1222
- return body
1240
+ raise NotImplementedError("predict() method not implemented")
1223
1241
 
1224
1242
  async def predict_async(self, body: Any, **kwargs) -> Any:
1225
1243
  """Override to implement prediction logic if the logic requires asyncio."""
1226
- return body
1244
+ raise NotImplementedError("predict_async() method not implemented")
1227
1245
 
1228
1246
  def run(self, body: Any, path: str, origin_name: Optional[str] = None) -> Any:
1229
1247
  return self.predict(body)
@@ -1644,6 +1662,8 @@ class ModelRunnerStep(MonitoredStep):
1644
1662
  Note when ModelRunnerStep is used in a graph, MLRun automatically imports
1645
1663
  the default language model class (LLModel) during function deployment.
1646
1664
 
1665
+ Note ModelRunnerStep can only be added to a graph that has the flow topology and running with async engine.
1666
+
1647
1667
  :param model_selector: ModelSelector instance whose select() method will be used to select models to run on each
1648
1668
  event. Optional. If not passed, all models will be run.
1649
1669
  :param raise_exception: If True, an error will be raised when model selection fails or if one of the models raised
@@ -2091,24 +2111,28 @@ class ModelRunnerStep(MonitoredStep):
2091
2111
  )
2092
2112
  model_objects = []
2093
2113
  for model, model_params in models.values():
2114
+ model_name = model_params.get("name")
2094
2115
  model_params[schemas.MonitoringData.INPUT_PATH] = (
2095
2116
  self.class_args.get(
2096
2117
  mlrun.common.schemas.ModelRunnerStepData.MONITORING_DATA, {}
2097
2118
  )
2098
- .get(model_params.get("name"), {})
2119
+ .get(model_name, {})
2099
2120
  .get(schemas.MonitoringData.INPUT_PATH)
2100
2121
  )
2101
2122
  model_params[schemas.MonitoringData.RESULT_PATH] = (
2102
2123
  self.class_args.get(
2103
2124
  mlrun.common.schemas.ModelRunnerStepData.MONITORING_DATA, {}
2104
2125
  )
2105
- .get(model_params.get("name"), {})
2126
+ .get(model_name, {})
2106
2127
  .get(schemas.MonitoringData.RESULT_PATH)
2107
2128
  )
2108
2129
  model = get_class(model, namespace).from_dict(
2109
2130
  model_params, init_with_params=True
2110
2131
  )
2111
2132
  model._raise_exception = False
2133
+ model._execution_mechanism = execution_mechanism_by_model_name.get(
2134
+ model_name
2135
+ )
2112
2136
  model_objects.append(model)
2113
2137
  self._async_object = ModelRunner(
2114
2138
  model_selector=model_selector,
@@ -3018,6 +3042,7 @@ class RootFlowStep(FlowStep):
3018
3042
  model_params, init_with_params=True
3019
3043
  )
3020
3044
  model._raise_exception = False
3045
+ model._execution_mechanism = self._shared_models_mechanism[model.name]
3021
3046
  self.context.executor.add_runnable(
3022
3047
  model, self._shared_models_mechanism[model.name]
3023
3048
  )
mlrun/utils/helpers.py CHANGED
@@ -2464,15 +2464,16 @@ def merge_requirements(
2464
2464
  return [str(req) for req in merged.values()]
2465
2465
 
2466
2466
 
2467
- def get_module_name_from_path(source_file_path: str) -> str:
2467
+ def get_source_and_working_dir_paths(source_file_path) -> (pathlib.Path, pathlib.Path):
2468
2468
  source_file_path_object = pathlib.Path(source_file_path).resolve()
2469
- current_dir_path_object = pathlib.Path(".").resolve()
2470
- if not source_file_path_object.is_relative_to(current_dir_path_object):
2471
- raise mlrun.errors.MLRunRuntimeError(
2472
- f"Source file path '{source_file_path}' is not under the current working directory "
2473
- f"(which is required when running with local=True)"
2474
- )
2469
+ working_dir_path_object = pathlib.Path(".").resolve()
2470
+ return source_file_path_object, working_dir_path_object
2471
+
2472
+
2473
+ def get_relative_module_name_from_path(
2474
+ source_file_path_object, working_dir_path_object
2475
+ ) -> str:
2475
2476
  relative_path_to_source_file = source_file_path_object.relative_to(
2476
- current_dir_path_object
2477
+ working_dir_path_object
2477
2478
  )
2478
2479
  return ".".join(relative_path_to_source_file.with_suffix("").parts)
@@ -1,4 +1,4 @@
1
1
  {
2
- "git_commit": "5c0bf44084e089850e98e6255745822c5107d001",
3
- "version": "1.10.0-rc37"
2
+ "git_commit": "cc5c5639d721f37d6a1d0d0b7cf9f853f38e4707",
3
+ "version": "1.10.0-rc38"
4
4
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlrun
3
- Version: 1.10.0rc37
3
+ Version: 1.10.0rc38
4
4
  Summary: Tracking and config of machine learning runs
5
5
  Home-page: https://github.com/mlrun/mlrun
6
6
  Author: Yaron Haviv
@@ -8,7 +8,7 @@ mlrun/k8s_utils.py,sha256=zIacVyvsXrXVO-DdxAoGQOGEDWOGJEFJzYPhPVnn3z8,24548
8
8
  mlrun/lists.py,sha256=OlaV2QIFUzmenad9kxNJ3k4whlDyxI3zFbGwr6vpC5Y,8561
9
9
  mlrun/model.py,sha256=JxYWYfMvRMloVEsxfghjH8gq5vsVCVk-OJmHGhbPJuU,88954
10
10
  mlrun/render.py,sha256=5DlhD6JtzHgmj5RVlpaYiHGhX84Q7qdi4RCEUj2UMgw,13195
11
- mlrun/run.py,sha256=eXmu2C2Z-iWWRkyraYjOoM22lRfnyavOnskylHwPeV8,48948
11
+ mlrun/run.py,sha256=DRlXYf9C4ZJeGeXdbQxgeCKhoV2892u0v0yIfsyDSkA,49730
12
12
  mlrun/secrets.py,sha256=VFETVDJFZ0AGDivYjhYscO_YHnzeBnAebxlio7Svkq0,9633
13
13
  mlrun/alerts/__init__.py,sha256=0gtG1BG0DXxFrXegIkjbM1XEN4sP9ODo0ucXrNld1hU,601
14
14
  mlrun/alerts/alert.py,sha256=QQFZGydQbx9RvAaSiaH-ALQZVcDKQX5lgizqj_rXW2k,15948
@@ -252,7 +252,7 @@ mlrun/model_monitoring/db/tsdb/tdengine/__init__.py,sha256=vgBdsKaXUURKqIf3M0y4s
252
252
  mlrun/model_monitoring/db/tsdb/tdengine/schemas.py,sha256=TuWuaCZw8sV1gSwN2BPmW8Gzwe3dsRN__KkJB9lum00,13116
253
253
  mlrun/model_monitoring/db/tsdb/tdengine/stream_graph_steps.py,sha256=Uadj0UvAmln2MxDWod-kAzau1uNlqZh981rPhbUH_5M,2857
254
254
  mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py,sha256=dtkaHaWKWERPXylEWMECeetwrz3rWl0P43AADcTjlls,9330
255
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=0ltrjvxsPEKeIgK7Lio9T2YXofIPdB_mGMOtdjI6_KY,53947
255
+ mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py,sha256=qq-2Slk9jHc18lUx00w4Oj5_EfnZXlmkvLXPDSbz1zY,54341
256
256
  mlrun/model_monitoring/db/tsdb/tdengine/writer_graph_steps.py,sha256=zMof6hUr0dsyor73pnOWkJP62INAvslHU0nUklbT-3w,2053
257
257
  mlrun/model_monitoring/db/tsdb/v3io/__init__.py,sha256=aL3bfmQsUQ-sbvKGdNihFj8gLCK3mSys0qDcXtYOwgc,616
258
258
  mlrun/model_monitoring/db/tsdb/v3io/stream_graph_steps.py,sha256=sNQFj6qyJx5eSBKRC3gyTc1cfh1l2IkRpPtuZwtzCW0,6844
@@ -303,7 +303,7 @@ mlrun/runtimes/mpijob/abstract.py,sha256=QjAG4OZ6JEQ58w5-qYNd6hUGwvaW8ynLtlr9jNf
303
303
  mlrun/runtimes/mpijob/v1.py,sha256=zSlRkiWHz4B3yht66sVf4mlfDs8YT9EnP9DfBLn5VNs,3372
304
304
  mlrun/runtimes/nuclio/__init__.py,sha256=osOVMN9paIOuUoOTizmkxMb_OXRP-SlPwXHJSSYK_wk,834
305
305
  mlrun/runtimes/nuclio/api_gateway.py,sha256=vH9ClKVP4Mb24rvA67xPuAvAhX-gAv6vVtjVxyplhdc,26969
306
- mlrun/runtimes/nuclio/function.py,sha256=6o9SndAkd-k4FyVr4ms_oWL6MuAWMnsrtrEd_fWfnDw,55488
306
+ mlrun/runtimes/nuclio/function.py,sha256=yL6PVyddDFEhXxY93g8YiywBERi-oqlpEw8IE3BVGmQ,56006
307
307
  mlrun/runtimes/nuclio/nuclio.py,sha256=sLK8KdGO1LbftlL3HqPZlFOFTAAuxJACZCVl1c0Ha6E,2942
308
308
  mlrun/runtimes/nuclio/serving.py,sha256=eXffwn6xTvEwC-HEk42DRxywOrin7RMUze3JWjeBxzA,36429
309
309
  mlrun/runtimes/nuclio/application/__init__.py,sha256=rRs5vasy_G9IyoTpYIjYDafGoL6ifFBKgBtsXn31Atw,614
@@ -315,9 +315,9 @@ mlrun/serving/__init__.py,sha256=nriJAcVn5aatwU03T7SsE6ngJEGTxr3wIGt4WuvCCzY,139
315
315
  mlrun/serving/merger.py,sha256=pfOQoozUyObCTpqXAMk94PmhZefn4bBrKufO3MKnkAc,6193
316
316
  mlrun/serving/remote.py,sha256=p29CBtKwbW_l8BzmNg3Uy__0eMf7_OubTMzga_S3EOA,22089
317
317
  mlrun/serving/routers.py,sha256=pu5jlSLI4Ml68YP_FMFDhhwPfLcT6lRu5yL5QDgXPHQ,52889
318
- mlrun/serving/server.py,sha256=7RiXZ1Nf6I_rwZUyTNqVaNEzQmYUTqKLjXXZEM1OwEc,40993
318
+ mlrun/serving/server.py,sha256=UIQON9ytG_4VUa4cMWZ8AxxBzGjBrhFhkC-FrvAHa_o,42593
319
319
  mlrun/serving/serving_wrapper.py,sha256=UL9hhWCfMPcTJO_XrkvNaFvck1U1E7oS8trTZyak0cA,835
320
- mlrun/serving/states.py,sha256=Q2Q7o0eJCvnonXd2-sfiv7zhCiyC6xthfW25nzf61KM,138976
320
+ mlrun/serving/states.py,sha256=zSaELw7et0tAHa0nZ2_zKTPdxqnXo4WOCwclULkN6no,140363
321
321
  mlrun/serving/steps.py,sha256=zbMgJnu-m4n7vhFRgZkCMMifIsCya-TzAj3Gjc-Fgnc,2193
322
322
  mlrun/serving/system_steps.py,sha256=BDCJn73h7cUT5AoSSm25Fjg4WwzcEpMQp-ZjMw9ogEc,20025
323
323
  mlrun/serving/utils.py,sha256=Zbfqm8TKNcTE8zRBezVBzpvR2WKeKeIRN7otNIaiYEc,4170
@@ -333,7 +333,7 @@ mlrun/utils/async_http.py,sha256=8Olx8TNNeXB07nEGwlqhEgFgnFAD71vBU_bqaA9JW-w,122
333
333
  mlrun/utils/azure_vault.py,sha256=IEFizrDGDbAaoWwDr1WoA88S_EZ0T--vjYtY-i0cvYQ,3450
334
334
  mlrun/utils/clones.py,sha256=qbAGyEbSvlewn3Tw_DpQZP9z6MGzFhSaZfI1CblX8Fg,7515
335
335
  mlrun/utils/condition_evaluator.py,sha256=-nGfRmZzivn01rHTroiGY4rqEv8T1irMyhzxEei-sKc,1897
336
- mlrun/utils/helpers.py,sha256=Cz3VR5aq3N6DinKd16HI9HGZSLqSmN9h4-EmnNyYGqQ,84369
336
+ mlrun/utils/helpers.py,sha256=zwaGatCEJphJEcTwvEOjiyGDWNSEDkzzYcR4IWsu378,84268
337
337
  mlrun/utils/http.py,sha256=5ZU2VpokaUM_DT3HBSqTm8xjUqTPjZN5fKkSIvKlTl0,8704
338
338
  mlrun/utils/logger.py,sha256=uaCgI_ezzaXf7nJDCy-1Nrjds8vSXqDbzmjmb3IyCQo,14864
339
339
  mlrun/utils/regex.py,sha256=FcRwWD8x9X3HLhCCU2F0AVKTFah784Pr7ZAe3a02jw8,5199
@@ -352,11 +352,11 @@ mlrun/utils/notifications/notification/mail.py,sha256=ZyJ3eqd8simxffQmXzqd3bgbAq
352
352
  mlrun/utils/notifications/notification/slack.py,sha256=wSu_7W0EnGLBNwIgWCYEeTP8j9SPAMPDBnfUcPnVZYA,7299
353
353
  mlrun/utils/notifications/notification/webhook.py,sha256=FM5-LQAKAVJKp37MRzR3SsejalcnpM6r_9Oe7znxZEA,5313
354
354
  mlrun/utils/version/__init__.py,sha256=YnzE6tlf24uOQ8y7Z7l96QLAI6-QEii7-77g8ynmzy0,613
355
- mlrun/utils/version/version.json,sha256=XVTb8THn_xGwKwbjV7Mu2mVTg-z6fmV6gXnUhrvfT6s,90
355
+ mlrun/utils/version/version.json,sha256=hHIsu87_e3inXvZwxyAJBc1LtA2aFWqipBV_Mhx1n5o,90
356
356
  mlrun/utils/version/version.py,sha256=M2hVhRrgkN3SxacZHs3ZqaOsqAA7B6a22ne324IQ1HE,1877
357
- mlrun-1.10.0rc37.dist-info/licenses/LICENSE,sha256=zTiv1CxWNkOk1q8eJS1G_8oD4gWpWLwWxj_Agcsi8Os,11337
358
- mlrun-1.10.0rc37.dist-info/METADATA,sha256=mZhr0TYnjpEVjpPTy6JYYXqs4dnp0q4X7Hu7TPQ5R8M,26104
359
- mlrun-1.10.0rc37.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
360
- mlrun-1.10.0rc37.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
361
- mlrun-1.10.0rc37.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
362
- mlrun-1.10.0rc37.dist-info/RECORD,,
357
+ mlrun-1.10.0rc38.dist-info/licenses/LICENSE,sha256=zTiv1CxWNkOk1q8eJS1G_8oD4gWpWLwWxj_Agcsi8Os,11337
358
+ mlrun-1.10.0rc38.dist-info/METADATA,sha256=OGc4TY9eDtdLJ9NU5R8OsR_go-Wcm1VheehWzXs5yeE,26104
359
+ mlrun-1.10.0rc38.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
360
+ mlrun-1.10.0rc38.dist-info/entry_points.txt,sha256=1Owd16eAclD5pfRCoJpYC2ZJSyGNTtUr0nCELMioMmU,46
361
+ mlrun-1.10.0rc38.dist-info/top_level.txt,sha256=NObLzw3maSF9wVrgSeYBv-fgnHkAJ1kEkh12DLdd5KM,6
362
+ mlrun-1.10.0rc38.dist-info/RECORD,,