mlrun 1.6.0rc35__py3-none-any.whl → 1.7.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (199) hide show
  1. mlrun/__main__.py +3 -3
  2. mlrun/api/schemas/__init__.py +1 -1
  3. mlrun/artifacts/base.py +11 -6
  4. mlrun/artifacts/dataset.py +2 -2
  5. mlrun/artifacts/model.py +30 -24
  6. mlrun/artifacts/plots.py +2 -2
  7. mlrun/common/db/sql_session.py +5 -3
  8. mlrun/common/helpers.py +1 -2
  9. mlrun/common/schemas/artifact.py +3 -3
  10. mlrun/common/schemas/auth.py +3 -3
  11. mlrun/common/schemas/background_task.py +1 -1
  12. mlrun/common/schemas/client_spec.py +1 -1
  13. mlrun/common/schemas/feature_store.py +16 -16
  14. mlrun/common/schemas/frontend_spec.py +7 -7
  15. mlrun/common/schemas/function.py +1 -1
  16. mlrun/common/schemas/hub.py +4 -9
  17. mlrun/common/schemas/memory_reports.py +2 -2
  18. mlrun/common/schemas/model_monitoring/grafana.py +4 -4
  19. mlrun/common/schemas/model_monitoring/model_endpoints.py +14 -15
  20. mlrun/common/schemas/notification.py +4 -4
  21. mlrun/common/schemas/object.py +2 -2
  22. mlrun/common/schemas/pipeline.py +1 -1
  23. mlrun/common/schemas/project.py +3 -3
  24. mlrun/common/schemas/runtime_resource.py +8 -12
  25. mlrun/common/schemas/schedule.py +3 -3
  26. mlrun/common/schemas/tag.py +1 -2
  27. mlrun/common/schemas/workflow.py +2 -2
  28. mlrun/config.py +8 -4
  29. mlrun/data_types/to_pandas.py +1 -3
  30. mlrun/datastore/base.py +0 -28
  31. mlrun/datastore/datastore_profile.py +9 -9
  32. mlrun/datastore/filestore.py +0 -1
  33. mlrun/datastore/google_cloud_storage.py +1 -1
  34. mlrun/datastore/sources.py +7 -11
  35. mlrun/datastore/spark_utils.py +1 -2
  36. mlrun/datastore/targets.py +31 -31
  37. mlrun/datastore/utils.py +4 -6
  38. mlrun/datastore/v3io.py +70 -46
  39. mlrun/db/base.py +22 -23
  40. mlrun/db/httpdb.py +34 -34
  41. mlrun/db/nopdb.py +19 -19
  42. mlrun/errors.py +1 -1
  43. mlrun/execution.py +4 -4
  44. mlrun/feature_store/api.py +20 -21
  45. mlrun/feature_store/common.py +1 -1
  46. mlrun/feature_store/feature_set.py +28 -32
  47. mlrun/feature_store/feature_vector.py +24 -27
  48. mlrun/feature_store/retrieval/base.py +7 -7
  49. mlrun/feature_store/retrieval/conversion.py +2 -4
  50. mlrun/feature_store/steps.py +7 -15
  51. mlrun/features.py +5 -7
  52. mlrun/frameworks/_common/artifacts_library.py +9 -9
  53. mlrun/frameworks/_common/mlrun_interface.py +5 -5
  54. mlrun/frameworks/_common/model_handler.py +48 -48
  55. mlrun/frameworks/_common/plan.py +2 -3
  56. mlrun/frameworks/_common/producer.py +3 -4
  57. mlrun/frameworks/_common/utils.py +5 -5
  58. mlrun/frameworks/_dl_common/loggers/logger.py +6 -7
  59. mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +9 -9
  60. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +16 -35
  61. mlrun/frameworks/_ml_common/artifacts_library.py +1 -2
  62. mlrun/frameworks/_ml_common/loggers/logger.py +3 -4
  63. mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +4 -5
  64. mlrun/frameworks/_ml_common/model_handler.py +24 -24
  65. mlrun/frameworks/_ml_common/pkl_model_server.py +2 -2
  66. mlrun/frameworks/_ml_common/plan.py +1 -1
  67. mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +2 -3
  68. mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +2 -3
  69. mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
  70. mlrun/frameworks/_ml_common/plans/feature_importance_plan.py +3 -3
  71. mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
  72. mlrun/frameworks/_ml_common/utils.py +4 -4
  73. mlrun/frameworks/auto_mlrun/auto_mlrun.py +7 -7
  74. mlrun/frameworks/huggingface/model_server.py +4 -4
  75. mlrun/frameworks/lgbm/__init__.py +32 -32
  76. mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -5
  77. mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -5
  78. mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +1 -3
  79. mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +6 -6
  80. mlrun/frameworks/lgbm/model_handler.py +9 -9
  81. mlrun/frameworks/lgbm/model_server.py +6 -6
  82. mlrun/frameworks/lgbm/utils.py +5 -5
  83. mlrun/frameworks/onnx/dataset.py +8 -8
  84. mlrun/frameworks/onnx/mlrun_interface.py +3 -3
  85. mlrun/frameworks/onnx/model_handler.py +6 -6
  86. mlrun/frameworks/onnx/model_server.py +7 -7
  87. mlrun/frameworks/parallel_coordinates.py +2 -2
  88. mlrun/frameworks/pytorch/__init__.py +16 -16
  89. mlrun/frameworks/pytorch/callbacks/callback.py +4 -5
  90. mlrun/frameworks/pytorch/callbacks/logging_callback.py +17 -17
  91. mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +11 -11
  92. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +23 -29
  93. mlrun/frameworks/pytorch/callbacks_handler.py +38 -38
  94. mlrun/frameworks/pytorch/mlrun_interface.py +20 -20
  95. mlrun/frameworks/pytorch/model_handler.py +17 -17
  96. mlrun/frameworks/pytorch/model_server.py +7 -7
  97. mlrun/frameworks/sklearn/__init__.py +12 -12
  98. mlrun/frameworks/sklearn/estimator.py +4 -4
  99. mlrun/frameworks/sklearn/metrics_library.py +14 -14
  100. mlrun/frameworks/sklearn/mlrun_interface.py +3 -6
  101. mlrun/frameworks/sklearn/model_handler.py +2 -2
  102. mlrun/frameworks/tf_keras/__init__.py +5 -5
  103. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +14 -14
  104. mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +11 -11
  105. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +19 -23
  106. mlrun/frameworks/tf_keras/mlrun_interface.py +7 -9
  107. mlrun/frameworks/tf_keras/model_handler.py +14 -14
  108. mlrun/frameworks/tf_keras/model_server.py +6 -6
  109. mlrun/frameworks/xgboost/__init__.py +12 -12
  110. mlrun/frameworks/xgboost/model_handler.py +6 -6
  111. mlrun/k8s_utils.py +4 -5
  112. mlrun/kfpops.py +2 -2
  113. mlrun/launcher/base.py +10 -10
  114. mlrun/launcher/local.py +8 -8
  115. mlrun/launcher/remote.py +7 -7
  116. mlrun/lists.py +3 -4
  117. mlrun/model.py +205 -55
  118. mlrun/model_monitoring/api.py +21 -24
  119. mlrun/model_monitoring/application.py +4 -4
  120. mlrun/model_monitoring/batch.py +17 -17
  121. mlrun/model_monitoring/controller.py +2 -1
  122. mlrun/model_monitoring/features_drift_table.py +44 -31
  123. mlrun/model_monitoring/prometheus.py +1 -4
  124. mlrun/model_monitoring/stores/kv_model_endpoint_store.py +11 -13
  125. mlrun/model_monitoring/stores/model_endpoint_store.py +9 -11
  126. mlrun/model_monitoring/stores/models/__init__.py +2 -2
  127. mlrun/model_monitoring/stores/sql_model_endpoint_store.py +11 -13
  128. mlrun/model_monitoring/stream_processing.py +16 -34
  129. mlrun/model_monitoring/tracking_policy.py +2 -1
  130. mlrun/package/__init__.py +6 -6
  131. mlrun/package/context_handler.py +5 -5
  132. mlrun/package/packager.py +7 -7
  133. mlrun/package/packagers/default_packager.py +6 -6
  134. mlrun/package/packagers/numpy_packagers.py +15 -15
  135. mlrun/package/packagers/pandas_packagers.py +5 -5
  136. mlrun/package/packagers/python_standard_library_packagers.py +10 -10
  137. mlrun/package/packagers_manager.py +18 -23
  138. mlrun/package/utils/_formatter.py +4 -4
  139. mlrun/package/utils/_pickler.py +2 -2
  140. mlrun/package/utils/_supported_format.py +4 -4
  141. mlrun/package/utils/log_hint_utils.py +2 -2
  142. mlrun/package/utils/type_hint_utils.py +4 -9
  143. mlrun/platforms/other.py +1 -2
  144. mlrun/projects/operations.py +5 -5
  145. mlrun/projects/pipelines.py +9 -9
  146. mlrun/projects/project.py +58 -46
  147. mlrun/render.py +1 -1
  148. mlrun/run.py +9 -9
  149. mlrun/runtimes/__init__.py +7 -4
  150. mlrun/runtimes/base.py +20 -23
  151. mlrun/runtimes/constants.py +5 -5
  152. mlrun/runtimes/daskjob.py +8 -8
  153. mlrun/runtimes/databricks_job/databricks_cancel_task.py +1 -1
  154. mlrun/runtimes/databricks_job/databricks_runtime.py +7 -7
  155. mlrun/runtimes/function_reference.py +1 -1
  156. mlrun/runtimes/local.py +1 -1
  157. mlrun/runtimes/mpijob/abstract.py +1 -2
  158. mlrun/runtimes/nuclio/__init__.py +20 -0
  159. mlrun/runtimes/{function.py → nuclio/function.py} +15 -16
  160. mlrun/runtimes/{nuclio.py → nuclio/nuclio.py} +6 -6
  161. mlrun/runtimes/{serving.py → nuclio/serving.py} +13 -12
  162. mlrun/runtimes/pod.py +95 -48
  163. mlrun/runtimes/remotesparkjob.py +1 -1
  164. mlrun/runtimes/sparkjob/spark3job.py +50 -33
  165. mlrun/runtimes/utils.py +1 -2
  166. mlrun/secrets.py +3 -3
  167. mlrun/serving/remote.py +0 -4
  168. mlrun/serving/routers.py +6 -6
  169. mlrun/serving/server.py +4 -4
  170. mlrun/serving/states.py +29 -0
  171. mlrun/serving/utils.py +3 -3
  172. mlrun/serving/v1_serving.py +6 -7
  173. mlrun/serving/v2_serving.py +50 -8
  174. mlrun/track/tracker_manager.py +3 -3
  175. mlrun/track/trackers/mlflow_tracker.py +1 -2
  176. mlrun/utils/async_http.py +5 -7
  177. mlrun/utils/azure_vault.py +1 -1
  178. mlrun/utils/clones.py +1 -2
  179. mlrun/utils/condition_evaluator.py +3 -3
  180. mlrun/utils/db.py +3 -3
  181. mlrun/utils/helpers.py +37 -119
  182. mlrun/utils/http.py +1 -4
  183. mlrun/utils/logger.py +49 -14
  184. mlrun/utils/notifications/notification/__init__.py +3 -3
  185. mlrun/utils/notifications/notification/base.py +2 -2
  186. mlrun/utils/notifications/notification/ipython.py +1 -1
  187. mlrun/utils/notifications/notification_pusher.py +8 -14
  188. mlrun/utils/retryer.py +207 -0
  189. mlrun/utils/singleton.py +1 -1
  190. mlrun/utils/v3io_clients.py +2 -3
  191. mlrun/utils/version/version.json +2 -2
  192. mlrun/utils/version/version.py +2 -6
  193. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/METADATA +9 -9
  194. mlrun-1.7.0rc2.dist-info/RECORD +315 -0
  195. mlrun-1.6.0rc35.dist-info/RECORD +0 -313
  196. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/LICENSE +0 -0
  197. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/WHEEL +0 -0
  198. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/entry_points.txt +0 -0
  199. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
- from typing import List
16
15
 
17
16
  import mlrun
18
17
 
@@ -34,8 +33,8 @@ class MLRunLoggingCallback(LoggingCallback):
34
33
  def __init__(
35
34
  self,
36
35
  context: mlrun.MLClientCtx,
37
- dynamic_hyperparameters: List[str] = None,
38
- static_hyperparameters: List[str] = None,
36
+ dynamic_hyperparameters: list[str] = None,
37
+ static_hyperparameters: list[str] = None,
39
38
  logging_frequency: int = 100,
40
39
  ):
41
40
  """
@@ -55,7 +54,7 @@ class MLRunLoggingCallback(LoggingCallback):
55
54
  them and the results to MLRun). Two low frequency may slow the training time.
56
55
  Default: 100.
57
56
  """
58
- super(MLRunLoggingCallback, self).__init__(
57
+ super().__init__(
59
58
  dynamic_hyperparameters=dynamic_hyperparameters,
60
59
  static_hyperparameters=static_hyperparameters,
61
60
  )
@@ -75,7 +74,7 @@ class MLRunLoggingCallback(LoggingCallback):
75
74
  information check the `Callback` doc string.
76
75
  """
77
76
  # Log the results and parameters:
78
- super(MLRunLoggingCallback, self).__call__(env=env)
77
+ super().__call__(env=env)
79
78
 
80
79
  # Produce the artifacts (post iteration stage):
81
80
  if env.iteration % self._logging_frequency == 0:
@@ -43,6 +43,4 @@ class LGBMBoosterMLRunInterface(MLRunInterface, ABC):
43
43
  :param restoration: Restoration information tuple as returned from 'remove_interface' in order to add the
44
44
  interface in a certain state.
45
45
  """
46
- super(LGBMBoosterMLRunInterface, cls).add_interface(
47
- obj=obj, restoration=restoration
48
- )
46
+ super().add_interface(obj=obj, restoration=restoration)
@@ -14,7 +14,7 @@
14
14
  #
15
15
  from abc import ABC
16
16
  from types import ModuleType
17
- from typing import Callable, List, Tuple, Union
17
+ from typing import Callable, Union
18
18
 
19
19
  import lightgbm as lgb
20
20
 
@@ -88,7 +88,7 @@ class LGBMMLRunInterface(MLRunInterface, ABC):
88
88
  globals().update({"lightgbm": lgb, "lgb": lgb})
89
89
 
90
90
  # Add the interface to the provided lightgbm module:
91
- super(LGBMMLRunInterface, cls).add_interface(obj=obj, restoration=restoration)
91
+ super().add_interface(obj=obj, restoration=restoration)
92
92
 
93
93
  @staticmethod
94
94
  def mlrun_train(*args, **kwargs):
@@ -223,7 +223,7 @@ class LGBMMLRunInterface(MLRunInterface, ABC):
223
223
  pass
224
224
 
225
225
  @staticmethod
226
- def _parse_callbacks(callbacks: List[Callable]):
226
+ def _parse_callbacks(callbacks: list[Callable]):
227
227
  """
228
228
  Parse the callbacks passed to the training API functions of LightGBM for adding logging and enabling the MLRun
229
229
  callbacks API.
@@ -259,9 +259,9 @@ class LGBMMLRunInterface(MLRunInterface, ABC):
259
259
  @staticmethod
260
260
  def _post_train(
261
261
  booster: lgb.Booster,
262
- train_set: Tuple[MLTypes.DatasetType, Union[MLTypes.DatasetType, None]],
263
- validation_sets: List[
264
- Tuple[Tuple[MLTypes.DatasetType, Union[MLTypes.DatasetType, None]], str]
262
+ train_set: tuple[MLTypes.DatasetType, Union[MLTypes.DatasetType, None]],
263
+ validation_sets: list[
264
+ tuple[tuple[MLTypes.DatasetType, Union[MLTypes.DatasetType, None]], str]
265
265
  ],
266
266
  ):
267
267
  """
@@ -14,7 +14,7 @@
14
14
  #
15
15
  import os
16
16
  import pickle
17
- from typing import Dict, List, Union
17
+ from typing import Union
18
18
 
19
19
  import cloudpickle
20
20
  import lightgbm as lgb
@@ -56,8 +56,8 @@ class LGBMModelHandler(MLModelHandler):
56
56
  model_name: str = None,
57
57
  model_path: str = None,
58
58
  model: LGBMTypes.ModelType = None,
59
- modules_map: Union[Dict[str, Union[None, str, List[str]]], str] = None,
60
- custom_objects_map: Union[Dict[str, Union[str, List[str]]], str] = None,
59
+ modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
60
+ custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
61
61
  custom_objects_directory: str = None,
62
62
  context: mlrun.MLClientCtx = None,
63
63
  model_format: str = ModelFormats.PKL,
@@ -139,7 +139,7 @@ class LGBMModelHandler(MLModelHandler):
139
139
  self._model_format = model_format
140
140
 
141
141
  # Set up the base handler class:
142
- super(LGBMModelHandler, self).__init__(
142
+ super().__init__(
143
143
  model=model,
144
144
  model_path=model_path,
145
145
  model_name=model_name,
@@ -152,8 +152,8 @@ class LGBMModelHandler(MLModelHandler):
152
152
 
153
153
  def set_labels(
154
154
  self,
155
- to_add: Dict[str, Union[str, int, float]] = None,
156
- to_remove: List[str] = None,
155
+ to_add: dict[str, Union[str, int, float]] = None,
156
+ to_remove: list[str] = None,
157
157
  ):
158
158
  """
159
159
  Update the labels dictionary of this model artifact. There are required labels that cannot be edited or removed.
@@ -162,7 +162,7 @@ class LGBMModelHandler(MLModelHandler):
162
162
  :param to_remove: A list of labels keys to remove.
163
163
  """
164
164
  # Update the user's labels:
165
- super(LGBMModelHandler, self).set_labels(to_add=to_add, to_remove=to_remove)
165
+ super().set_labels(to_add=to_add, to_remove=to_remove)
166
166
 
167
167
  # Set the required labels:
168
168
  self._labels[self._LabelKeys.MODEL_FORMAT] = self._model_format
@@ -193,7 +193,7 @@ class LGBMModelHandler(MLModelHandler):
193
193
 
194
194
  :return The saved model additional artifacts (if needed) dictionary if context is available and None otherwise.
195
195
  """
196
- super(LGBMModelHandler, self).save(output_path=output_path)
196
+ super().save(output_path=output_path)
197
197
 
198
198
  if isinstance(self._model, lgb.LGBMModel):
199
199
  return self._save_lgbmmodel()
@@ -204,7 +204,7 @@ class LGBMModelHandler(MLModelHandler):
204
204
  Load the specified model in this handler. Additional parameters for the class initializer can be passed via the
205
205
  kwargs dictionary.
206
206
  """
207
- super(LGBMModelHandler, self).load()
207
+ super().load()
208
208
 
209
209
  # ModelFormats.PKL - Load from a pkl file:
210
210
  if self._model_format == LGBMModelHandler.ModelFormats.PKL:
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
- from typing import Any, Dict, List, Union
15
+ from typing import Any, Union
16
16
 
17
17
  import numpy as np
18
18
 
@@ -37,8 +37,8 @@ class LGBMModelServer(V2ModelServer):
37
37
  model_path: LGBMTypes.PathType = None,
38
38
  model_name: str = None,
39
39
  model_format: str = LGBMModelHandler.ModelFormats.PKL,
40
- modules_map: Union[Dict[str, Union[None, str, List[str]]], str] = None,
41
- custom_objects_map: Union[Dict[str, Union[str, List[str]]], str] = None,
40
+ modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
41
+ custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
42
42
  custom_objects_directory: str = None,
43
43
  to_list: bool = True,
44
44
  protocol: str = None,
@@ -100,7 +100,7 @@ class LGBMModelServer(V2ModelServer):
100
100
  :param protocol: -
101
101
  :param class_args: -
102
102
  """
103
- super(LGBMModelServer, self).__init__(
103
+ super().__init__(
104
104
  context=context,
105
105
  name=name,
106
106
  model_path=model_path,
@@ -139,7 +139,7 @@ class LGBMModelServer(V2ModelServer):
139
139
  self._model_handler.load()
140
140
  self.model = self._model_handler.model
141
141
 
142
- def predict(self, request: Dict[str, Any]) -> Union[np.ndarray, list]:
142
+ def predict(self, request: dict[str, Any]) -> Union[np.ndarray, list]:
143
143
  """
144
144
  Infer the inputs through the model using MLRun's PyTorch interface and return its output. The inferred data will
145
145
  be read from the "inputs" key of the request.
@@ -158,7 +158,7 @@ class LGBMModelServer(V2ModelServer):
158
158
  # Return as list if required:
159
159
  return predictions if not self.to_list else predictions.tolist()
160
160
 
161
- def explain(self, request: Dict[str, Any]) -> str:
161
+ def explain(self, request: dict[str, Any]) -> str:
162
162
  """
163
163
  Return a string explaining what model is being served in this serving function and the function name.
164
164
 
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
- from typing import List, Tuple, Union
15
+ from typing import Union
16
16
 
17
17
  import lightgbm as lgb
18
18
  import numpy as np
@@ -36,13 +36,13 @@ class LGBMTypes(MLTypes):
36
36
 
37
37
  # An evaluation result as packaged by the training in LightGBM:
38
38
  EvaluationResultType = Union[
39
- Tuple[str, str, float, bool], # As packaged in `lightgbm.train`
40
- Tuple[str, str, float, bool, float], # As packaged in `lightgbm.cv`
39
+ tuple[str, str, float, bool], # As packaged in `lightgbm.train`
40
+ tuple[str, str, float, bool, float], # As packaged in `lightgbm.cv`
41
41
  ]
42
42
 
43
43
  # Detailed type for the named tuple `CallbackEnv` passed during LightGBM's training for the callbacks:
44
- CallbackEnvType = Tuple[
45
- lgb.Booster, dict, int, int, int, List[EvaluationResultType]
44
+ CallbackEnvType = tuple[
45
+ lgb.Booster, dict, int, int, int, list[EvaluationResultType]
46
46
  ]
47
47
 
48
48
 
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
  #
15
15
  import math
16
- from typing import Callable, List, Tuple, Union
16
+ from typing import Callable, Union
17
17
 
18
18
  import numpy as np
19
19
 
@@ -25,11 +25,11 @@ class ONNXDataset:
25
25
 
26
26
  def __init__(
27
27
  self,
28
- x: Union[np.ndarray, List[np.ndarray]],
29
- y: Union[np.ndarray, List[np.ndarray]] = None,
28
+ x: Union[np.ndarray, list[np.ndarray]],
29
+ y: Union[np.ndarray, list[np.ndarray]] = None,
30
30
  batch_size: int = 1,
31
- x_transforms: List[Callable[[np.ndarray], np.ndarray]] = None,
32
- y_transforms: List[Callable[[np.ndarray], np.ndarray]] = None,
31
+ x_transforms: list[Callable[[np.ndarray], np.ndarray]] = None,
32
+ y_transforms: list[Callable[[np.ndarray], np.ndarray]] = None,
33
33
  is_batched_transforms: bool = False,
34
34
  ):
35
35
  """
@@ -71,7 +71,7 @@ class ONNXDataset:
71
71
  self._index = 0
72
72
  return self
73
73
 
74
- def __next__(self) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
74
+ def __next__(self) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]:
75
75
  """
76
76
  Get the next item in line (by the inner index) since calling '__iter__'. If ground truth was provided (y),
77
77
  a tuple of (x, y) will be returned. Otherwise x.
@@ -92,7 +92,7 @@ class ONNXDataset:
92
92
 
93
93
  def __getitem__(
94
94
  self, index: int
95
- ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
95
+ ) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]:
96
96
  """
97
97
  Get the item at the given index. If ground truth was provided, a tuple of (x, y) will be returned. Otherwise x.
98
98
 
@@ -155,7 +155,7 @@ class ONNXDataset:
155
155
  def _call_transforms(
156
156
  self,
157
157
  items: np.ndarray,
158
- transforms: List[Callable[[np.ndarray], np.ndarray]],
158
+ transforms: list[Callable[[np.ndarray], np.ndarray]],
159
159
  is_batched: bool,
160
160
  ):
161
161
  """
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
- from typing import Callable, List
15
+ from typing import Callable
16
16
 
17
17
  import numpy as np
18
18
  import onnx
@@ -35,7 +35,7 @@ class ONNXMLRunInterface:
35
35
  def __init__(
36
36
  self,
37
37
  model: onnx.ModelProto,
38
- execution_providers: List[str] = None,
38
+ execution_providers: list[str] = None,
39
39
  context: mlrun.MLClientCtx = None,
40
40
  ):
41
41
  # Set the context:
@@ -74,7 +74,7 @@ class ONNXMLRunInterface:
74
74
  def evaluate(
75
75
  self,
76
76
  dataset: ONNXDataset,
77
- metrics: List[Callable[[np.ndarray, np.ndarray], float]],
77
+ metrics: list[Callable[[np.ndarray, np.ndarray], float]],
78
78
  ):
79
79
  pass
80
80
 
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
  #
15
15
  import os
16
- from typing import Dict, List, Union
16
+ from typing import Union
17
17
 
18
18
  import onnx
19
19
  import onnxoptimizer
@@ -60,7 +60,7 @@ class ONNXModelHandler(ModelHandler):
60
60
  :raise MLRunInvalidArgumentError: There was no model or model directory supplied.
61
61
  """
62
62
  # Setup the base handler class:
63
- super(ONNXModelHandler, self).__init__(
63
+ super().__init__(
64
64
  model=model,
65
65
  model_path=model_path,
66
66
  model_name=model_name,
@@ -71,7 +71,7 @@ class ONNXModelHandler(ModelHandler):
71
71
  # TODO: output_path won't work well with logging artifacts. Need to look into changing the logic of 'log_artifact'.
72
72
  def save(
73
73
  self, output_path: str = None, **kwargs
74
- ) -> Union[Dict[str, Artifact], None]:
74
+ ) -> Union[dict[str, Artifact], None]:
75
75
  """
76
76
  Save the handled model at the given output path. If a MLRun context is available, the saved model files will be
77
77
  logged and returned as artifacts.
@@ -81,7 +81,7 @@ class ONNXModelHandler(ModelHandler):
81
81
 
82
82
  :return The saved model additional artifacts (if needed) dictionary if context is available and None otherwise.
83
83
  """
84
- super(ONNXModelHandler, self).save(output_path=output_path)
84
+ super().save(output_path=output_path)
85
85
 
86
86
  # Set the output path:
87
87
  if output_path is None:
@@ -97,7 +97,7 @@ class ONNXModelHandler(ModelHandler):
97
97
  """
98
98
  Load the specified model in this handler.
99
99
  """
100
- super(ONNXModelHandler, self).load()
100
+ super().load()
101
101
 
102
102
  # Check that the model is well-formed:
103
103
  # TODO: Currently not working well with HuggingFace models so we skip it
@@ -106,7 +106,7 @@ class ONNXModelHandler(ModelHandler):
106
106
  # Load the ONNX model:
107
107
  self._model = onnx.load(self._model_file)
108
108
 
109
- def optimize(self, optimizations: List[str] = None, fixed_point: bool = False):
109
+ def optimize(self, optimizations: list[str] = None, fixed_point: bool = False):
110
110
  """
111
111
  Use ONNX optimizer to optimize the ONNX model. The optimizations supported can be seen by calling
112
112
  'onnxoptimizer.get_available_passes()'
@@ -12,7 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  #
15
- from typing import Any, Dict, List, Tuple, Union
15
+ from typing import Any, Union
16
16
 
17
17
  import numpy as np
18
18
  import onnx
@@ -37,7 +37,7 @@ class ONNXModelServer(V2ModelServer):
37
37
  model: onnx.ModelProto = None,
38
38
  model_path: str = None,
39
39
  model_name: str = None,
40
- execution_providers: List[Union[str, Tuple[str, Dict[str, Any]]]] = None,
40
+ execution_providers: list[Union[str, tuple[str, dict[str, Any]]]] = None,
41
41
  protocol: str = None,
42
42
  **class_args,
43
43
  ):
@@ -76,7 +76,7 @@ class ONNXModelServer(V2ModelServer):
76
76
  :param protocol: -
77
77
  :param class_args: -
78
78
  """
79
- super(ONNXModelServer, self).__init__(
79
+ super().__init__(
80
80
  context=context,
81
81
  name=name,
82
82
  model_path=model_path,
@@ -98,8 +98,8 @@ class ONNXModelServer(V2ModelServer):
98
98
  # Prepare inference parameters:
99
99
  self._model_handler: ONNXModelHandler = None
100
100
  self._inference_session: onnxruntime.InferenceSession = None
101
- self._input_layers: List[str] = None
102
- self._output_layers: List[str] = None
101
+ self._input_layers: list[str] = None
102
+ self._output_layers: list[str] = None
103
103
 
104
104
  def load(self):
105
105
  """
@@ -134,7 +134,7 @@ class ONNXModelServer(V2ModelServer):
134
134
  output_layer.name for output_layer in self._inference_session.get_outputs()
135
135
  ]
136
136
 
137
- def predict(self, request: Dict[str, Any]) -> np.ndarray:
137
+ def predict(self, request: dict[str, Any]) -> np.ndarray:
138
138
  """
139
139
  Infer the inputs through the model using ONNXRunTime and return its output. The inferred data will be
140
140
  read from the "inputs" key of the request.
@@ -155,7 +155,7 @@ class ONNXModelServer(V2ModelServer):
155
155
  },
156
156
  )
157
157
 
158
- def explain(self, request: Dict[str, Any]) -> str:
158
+ def explain(self, request: dict[str, Any]) -> str:
159
159
  """
160
160
  Return a string explaining what model is being serve in this serving function and the function name.
161
161
 
@@ -14,7 +14,7 @@
14
14
  #
15
15
  import datetime
16
16
  import os
17
- from typing import List, Union
17
+ from typing import Union
18
18
 
19
19
  import numpy as np
20
20
  import pandas as pd
@@ -239,7 +239,7 @@ def _runs_list_to_df(runs_list, extend_iterations=False):
239
239
 
240
240
  @filter_warnings("ignore", FutureWarning)
241
241
  def compare_run_objects(
242
- runs_list: Union[mlrun.model.RunObject, List[mlrun.model.RunObject]],
242
+ runs_list: Union[mlrun.model.RunObject, list[mlrun.model.RunObject]],
243
243
  hide_identical: bool = True,
244
244
  exclude: list = None,
245
245
  show: bool = None,
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
  #
15
15
  # flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
16
- from typing import Any, Dict, List, Tuple, Union
16
+ from typing import Any, Union
17
17
 
18
18
  from torch.nn import Module
19
19
  from torch.optim import Optimizer
@@ -35,23 +35,23 @@ def train(
35
35
  loss_function: Module,
36
36
  optimizer: Optimizer,
37
37
  validation_set: DataLoader = None,
38
- metric_functions: List[PyTorchTypes.MetricFunctionType] = None,
38
+ metric_functions: list[PyTorchTypes.MetricFunctionType] = None,
39
39
  scheduler=None,
40
40
  scheduler_step_frequency: Union[int, float, str] = "epoch",
41
41
  epochs: int = 1,
42
42
  training_iterations: int = None,
43
43
  validation_iterations: int = None,
44
- callbacks_list: List[Callback] = None,
44
+ callbacks_list: list[Callback] = None,
45
45
  use_cuda: bool = True,
46
46
  use_horovod: bool = None,
47
47
  auto_log: bool = True,
48
48
  model_name: str = None,
49
- modules_map: Union[Dict[str, Union[None, str, List[str]]], str] = None,
50
- custom_objects_map: Union[Dict[str, Union[str, List[str]]], str] = None,
49
+ modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
50
+ custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
51
51
  custom_objects_directory: str = None,
52
52
  tensorboard_directory: str = None,
53
- mlrun_callback_kwargs: Dict[str, Any] = None,
54
- tensorboard_callback_kwargs: Dict[str, Any] = None,
53
+ mlrun_callback_kwargs: dict[str, Any] = None,
54
+ tensorboard_callback_kwargs: dict[str, Any] = None,
55
55
  context: mlrun.MLClientCtx = None,
56
56
  ) -> PyTorchModelHandler:
57
57
  """
@@ -205,19 +205,19 @@ def evaluate(
205
205
  dataset: DataLoader,
206
206
  model: Module = None,
207
207
  loss_function: Module = None,
208
- metric_functions: List[PyTorchTypes.MetricFunctionType] = None,
208
+ metric_functions: list[PyTorchTypes.MetricFunctionType] = None,
209
209
  iterations: int = None,
210
- callbacks_list: List[Callback] = None,
210
+ callbacks_list: list[Callback] = None,
211
211
  use_cuda: bool = True,
212
212
  use_horovod: bool = False,
213
213
  auto_log: bool = True,
214
214
  model_name: str = None,
215
- modules_map: Union[Dict[str, Union[None, str, List[str]]], str] = None,
216
- custom_objects_map: Union[Dict[str, Union[str, List[str]]], str] = None,
215
+ modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
216
+ custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
217
217
  custom_objects_directory: str = None,
218
- mlrun_callback_kwargs: Dict[str, Any] = None,
218
+ mlrun_callback_kwargs: dict[str, Any] = None,
219
219
  context: mlrun.MLClientCtx = None,
220
- ) -> Tuple[PyTorchModelHandler, List[PyTorchTypes.MetricValueType]]:
220
+ ) -> tuple[PyTorchModelHandler, list[PyTorchTypes.MetricValueType]]:
221
221
  """
222
222
  Use MLRun's PyTorch interface to evaluate the model with the given parameters. For more information and further
223
223
  options regarding the auto logging, see 'PyTorchMLRunInterface' documentation. Notice for auto-logging: In order to
@@ -343,9 +343,9 @@ def evaluate(
343
343
  def _parse_callbacks_kwargs(
344
344
  handler: PyTorchModelHandler,
345
345
  tensorboard_directory: Union[str, None],
346
- mlrun_callback_kwargs: Union[Dict[str, Any], None],
347
- tensorboard_callback_kwargs: Union[Dict[str, Any], None],
348
- ) -> Tuple[dict, dict]:
346
+ mlrun_callback_kwargs: Union[dict[str, Any], None],
347
+ tensorboard_callback_kwargs: Union[dict[str, Any], None],
348
+ ) -> tuple[dict, dict]:
349
349
  """
350
350
  Parse the given parameters into the MLRun and Tensorboard callbacks kwargs.
351
351
 
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
  #
15
15
  from abc import ABC, abstractmethod
16
- from typing import List
17
16
 
18
17
  from torch import Tensor
19
18
  from torch.nn import Module
@@ -68,7 +67,7 @@ class Callback(ABC):
68
67
  validation_set: DataLoader = None,
69
68
  loss_function: Module = None,
70
69
  optimizer: Optimizer = None,
71
- metric_functions: List[PyTorchTypes.MetricFunctionType] = None,
70
+ metric_functions: list[PyTorchTypes.MetricFunctionType] = None,
72
71
  scheduler=None,
73
72
  ):
74
73
  """
@@ -141,7 +140,7 @@ class Callback(ABC):
141
140
  pass
142
141
 
143
142
  def on_validation_end(
144
- self, loss_value: PyTorchTypes.MetricValueType, metric_values: List[float]
143
+ self, loss_value: PyTorchTypes.MetricValueType, metric_values: list[float]
145
144
  ) -> bool:
146
145
  """
147
146
  Before the validation (in a training case it will be per epoch) ends, this method will be called.
@@ -258,7 +257,7 @@ class Callback(ABC):
258
257
  """
259
258
  pass
260
259
 
261
- def on_train_metrics_end(self, metric_values: List[PyTorchTypes.MetricValueType]):
260
+ def on_train_metrics_end(self, metric_values: list[PyTorchTypes.MetricValueType]):
262
261
  """
263
262
  After the training calculation of the metrics, this method will be called.
264
263
 
@@ -273,7 +272,7 @@ class Callback(ABC):
273
272
  pass
274
273
 
275
274
  def on_validation_metrics_end(
276
- self, metric_values: List[PyTorchTypes.MetricValueType]
275
+ self, metric_values: list[PyTorchTypes.MetricValueType]
277
276
  ):
278
277
  """
279
278
  After the validating calculation of the metrics, this method will be called.