mlrun 1.6.0rc35__py3-none-any.whl → 1.7.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (199) hide show
  1. mlrun/__main__.py +3 -3
  2. mlrun/api/schemas/__init__.py +1 -1
  3. mlrun/artifacts/base.py +11 -6
  4. mlrun/artifacts/dataset.py +2 -2
  5. mlrun/artifacts/model.py +30 -24
  6. mlrun/artifacts/plots.py +2 -2
  7. mlrun/common/db/sql_session.py +5 -3
  8. mlrun/common/helpers.py +1 -2
  9. mlrun/common/schemas/artifact.py +3 -3
  10. mlrun/common/schemas/auth.py +3 -3
  11. mlrun/common/schemas/background_task.py +1 -1
  12. mlrun/common/schemas/client_spec.py +1 -1
  13. mlrun/common/schemas/feature_store.py +16 -16
  14. mlrun/common/schemas/frontend_spec.py +7 -7
  15. mlrun/common/schemas/function.py +1 -1
  16. mlrun/common/schemas/hub.py +4 -9
  17. mlrun/common/schemas/memory_reports.py +2 -2
  18. mlrun/common/schemas/model_monitoring/grafana.py +4 -4
  19. mlrun/common/schemas/model_monitoring/model_endpoints.py +14 -15
  20. mlrun/common/schemas/notification.py +4 -4
  21. mlrun/common/schemas/object.py +2 -2
  22. mlrun/common/schemas/pipeline.py +1 -1
  23. mlrun/common/schemas/project.py +3 -3
  24. mlrun/common/schemas/runtime_resource.py +8 -12
  25. mlrun/common/schemas/schedule.py +3 -3
  26. mlrun/common/schemas/tag.py +1 -2
  27. mlrun/common/schemas/workflow.py +2 -2
  28. mlrun/config.py +8 -4
  29. mlrun/data_types/to_pandas.py +1 -3
  30. mlrun/datastore/base.py +0 -28
  31. mlrun/datastore/datastore_profile.py +9 -9
  32. mlrun/datastore/filestore.py +0 -1
  33. mlrun/datastore/google_cloud_storage.py +1 -1
  34. mlrun/datastore/sources.py +7 -11
  35. mlrun/datastore/spark_utils.py +1 -2
  36. mlrun/datastore/targets.py +31 -31
  37. mlrun/datastore/utils.py +4 -6
  38. mlrun/datastore/v3io.py +70 -46
  39. mlrun/db/base.py +22 -23
  40. mlrun/db/httpdb.py +34 -34
  41. mlrun/db/nopdb.py +19 -19
  42. mlrun/errors.py +1 -1
  43. mlrun/execution.py +4 -4
  44. mlrun/feature_store/api.py +20 -21
  45. mlrun/feature_store/common.py +1 -1
  46. mlrun/feature_store/feature_set.py +28 -32
  47. mlrun/feature_store/feature_vector.py +24 -27
  48. mlrun/feature_store/retrieval/base.py +7 -7
  49. mlrun/feature_store/retrieval/conversion.py +2 -4
  50. mlrun/feature_store/steps.py +7 -15
  51. mlrun/features.py +5 -7
  52. mlrun/frameworks/_common/artifacts_library.py +9 -9
  53. mlrun/frameworks/_common/mlrun_interface.py +5 -5
  54. mlrun/frameworks/_common/model_handler.py +48 -48
  55. mlrun/frameworks/_common/plan.py +2 -3
  56. mlrun/frameworks/_common/producer.py +3 -4
  57. mlrun/frameworks/_common/utils.py +5 -5
  58. mlrun/frameworks/_dl_common/loggers/logger.py +6 -7
  59. mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +9 -9
  60. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +16 -35
  61. mlrun/frameworks/_ml_common/artifacts_library.py +1 -2
  62. mlrun/frameworks/_ml_common/loggers/logger.py +3 -4
  63. mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +4 -5
  64. mlrun/frameworks/_ml_common/model_handler.py +24 -24
  65. mlrun/frameworks/_ml_common/pkl_model_server.py +2 -2
  66. mlrun/frameworks/_ml_common/plan.py +1 -1
  67. mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +2 -3
  68. mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +2 -3
  69. mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
  70. mlrun/frameworks/_ml_common/plans/feature_importance_plan.py +3 -3
  71. mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
  72. mlrun/frameworks/_ml_common/utils.py +4 -4
  73. mlrun/frameworks/auto_mlrun/auto_mlrun.py +7 -7
  74. mlrun/frameworks/huggingface/model_server.py +4 -4
  75. mlrun/frameworks/lgbm/__init__.py +32 -32
  76. mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -5
  77. mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -5
  78. mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +1 -3
  79. mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +6 -6
  80. mlrun/frameworks/lgbm/model_handler.py +9 -9
  81. mlrun/frameworks/lgbm/model_server.py +6 -6
  82. mlrun/frameworks/lgbm/utils.py +5 -5
  83. mlrun/frameworks/onnx/dataset.py +8 -8
  84. mlrun/frameworks/onnx/mlrun_interface.py +3 -3
  85. mlrun/frameworks/onnx/model_handler.py +6 -6
  86. mlrun/frameworks/onnx/model_server.py +7 -7
  87. mlrun/frameworks/parallel_coordinates.py +2 -2
  88. mlrun/frameworks/pytorch/__init__.py +16 -16
  89. mlrun/frameworks/pytorch/callbacks/callback.py +4 -5
  90. mlrun/frameworks/pytorch/callbacks/logging_callback.py +17 -17
  91. mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +11 -11
  92. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +23 -29
  93. mlrun/frameworks/pytorch/callbacks_handler.py +38 -38
  94. mlrun/frameworks/pytorch/mlrun_interface.py +20 -20
  95. mlrun/frameworks/pytorch/model_handler.py +17 -17
  96. mlrun/frameworks/pytorch/model_server.py +7 -7
  97. mlrun/frameworks/sklearn/__init__.py +12 -12
  98. mlrun/frameworks/sklearn/estimator.py +4 -4
  99. mlrun/frameworks/sklearn/metrics_library.py +14 -14
  100. mlrun/frameworks/sklearn/mlrun_interface.py +3 -6
  101. mlrun/frameworks/sklearn/model_handler.py +2 -2
  102. mlrun/frameworks/tf_keras/__init__.py +5 -5
  103. mlrun/frameworks/tf_keras/callbacks/logging_callback.py +14 -14
  104. mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +11 -11
  105. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +19 -23
  106. mlrun/frameworks/tf_keras/mlrun_interface.py +7 -9
  107. mlrun/frameworks/tf_keras/model_handler.py +14 -14
  108. mlrun/frameworks/tf_keras/model_server.py +6 -6
  109. mlrun/frameworks/xgboost/__init__.py +12 -12
  110. mlrun/frameworks/xgboost/model_handler.py +6 -6
  111. mlrun/k8s_utils.py +4 -5
  112. mlrun/kfpops.py +2 -2
  113. mlrun/launcher/base.py +10 -10
  114. mlrun/launcher/local.py +8 -8
  115. mlrun/launcher/remote.py +7 -7
  116. mlrun/lists.py +3 -4
  117. mlrun/model.py +205 -55
  118. mlrun/model_monitoring/api.py +21 -24
  119. mlrun/model_monitoring/application.py +4 -4
  120. mlrun/model_monitoring/batch.py +17 -17
  121. mlrun/model_monitoring/controller.py +2 -1
  122. mlrun/model_monitoring/features_drift_table.py +44 -31
  123. mlrun/model_monitoring/prometheus.py +1 -4
  124. mlrun/model_monitoring/stores/kv_model_endpoint_store.py +11 -13
  125. mlrun/model_monitoring/stores/model_endpoint_store.py +9 -11
  126. mlrun/model_monitoring/stores/models/__init__.py +2 -2
  127. mlrun/model_monitoring/stores/sql_model_endpoint_store.py +11 -13
  128. mlrun/model_monitoring/stream_processing.py +16 -34
  129. mlrun/model_monitoring/tracking_policy.py +2 -1
  130. mlrun/package/__init__.py +6 -6
  131. mlrun/package/context_handler.py +5 -5
  132. mlrun/package/packager.py +7 -7
  133. mlrun/package/packagers/default_packager.py +6 -6
  134. mlrun/package/packagers/numpy_packagers.py +15 -15
  135. mlrun/package/packagers/pandas_packagers.py +5 -5
  136. mlrun/package/packagers/python_standard_library_packagers.py +10 -10
  137. mlrun/package/packagers_manager.py +18 -23
  138. mlrun/package/utils/_formatter.py +4 -4
  139. mlrun/package/utils/_pickler.py +2 -2
  140. mlrun/package/utils/_supported_format.py +4 -4
  141. mlrun/package/utils/log_hint_utils.py +2 -2
  142. mlrun/package/utils/type_hint_utils.py +4 -9
  143. mlrun/platforms/other.py +1 -2
  144. mlrun/projects/operations.py +5 -5
  145. mlrun/projects/pipelines.py +9 -9
  146. mlrun/projects/project.py +58 -46
  147. mlrun/render.py +1 -1
  148. mlrun/run.py +9 -9
  149. mlrun/runtimes/__init__.py +7 -4
  150. mlrun/runtimes/base.py +20 -23
  151. mlrun/runtimes/constants.py +5 -5
  152. mlrun/runtimes/daskjob.py +8 -8
  153. mlrun/runtimes/databricks_job/databricks_cancel_task.py +1 -1
  154. mlrun/runtimes/databricks_job/databricks_runtime.py +7 -7
  155. mlrun/runtimes/function_reference.py +1 -1
  156. mlrun/runtimes/local.py +1 -1
  157. mlrun/runtimes/mpijob/abstract.py +1 -2
  158. mlrun/runtimes/nuclio/__init__.py +20 -0
  159. mlrun/runtimes/{function.py → nuclio/function.py} +15 -16
  160. mlrun/runtimes/{nuclio.py → nuclio/nuclio.py} +6 -6
  161. mlrun/runtimes/{serving.py → nuclio/serving.py} +13 -12
  162. mlrun/runtimes/pod.py +95 -48
  163. mlrun/runtimes/remotesparkjob.py +1 -1
  164. mlrun/runtimes/sparkjob/spark3job.py +50 -33
  165. mlrun/runtimes/utils.py +1 -2
  166. mlrun/secrets.py +3 -3
  167. mlrun/serving/remote.py +0 -4
  168. mlrun/serving/routers.py +6 -6
  169. mlrun/serving/server.py +4 -4
  170. mlrun/serving/states.py +29 -0
  171. mlrun/serving/utils.py +3 -3
  172. mlrun/serving/v1_serving.py +6 -7
  173. mlrun/serving/v2_serving.py +50 -8
  174. mlrun/track/tracker_manager.py +3 -3
  175. mlrun/track/trackers/mlflow_tracker.py +1 -2
  176. mlrun/utils/async_http.py +5 -7
  177. mlrun/utils/azure_vault.py +1 -1
  178. mlrun/utils/clones.py +1 -2
  179. mlrun/utils/condition_evaluator.py +3 -3
  180. mlrun/utils/db.py +3 -3
  181. mlrun/utils/helpers.py +37 -119
  182. mlrun/utils/http.py +1 -4
  183. mlrun/utils/logger.py +49 -14
  184. mlrun/utils/notifications/notification/__init__.py +3 -3
  185. mlrun/utils/notifications/notification/base.py +2 -2
  186. mlrun/utils/notifications/notification/ipython.py +1 -1
  187. mlrun/utils/notifications/notification_pusher.py +8 -14
  188. mlrun/utils/retryer.py +207 -0
  189. mlrun/utils/singleton.py +1 -1
  190. mlrun/utils/v3io_clients.py +2 -3
  191. mlrun/utils/version/version.json +2 -2
  192. mlrun/utils/version/version.py +2 -6
  193. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/METADATA +9 -9
  194. mlrun-1.7.0rc2.dist-info/RECORD +315 -0
  195. mlrun-1.6.0rc35.dist-info/RECORD +0 -313
  196. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/LICENSE +0 -0
  197. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/WHEEL +0 -0
  198. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/entry_points.txt +0 -0
  199. {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/top_level.txt +0 -0
@@ -80,7 +80,7 @@ class SQLModelEndpointStore(ModelEndpointStore):
80
80
  self.ModelEndpointsTable.__table__ # pyright: ignore[reportGeneralTypeIssues]
81
81
  )
82
82
 
83
- def write_model_endpoint(self, endpoint: typing.Dict[str, typing.Any]):
83
+ def write_model_endpoint(self, endpoint: dict[str, typing.Any]):
84
84
  """
85
85
  Create a new endpoint record in the SQL table. This method also creates the model endpoints table within the
86
86
  SQL database if not exist.
@@ -105,7 +105,7 @@ class SQLModelEndpointStore(ModelEndpointStore):
105
105
  )
106
106
 
107
107
  def update_model_endpoint(
108
- self, endpoint_id: str, attributes: typing.Dict[str, typing.Any]
108
+ self, endpoint_id: str, attributes: dict[str, typing.Any]
109
109
  ):
110
110
  """
111
111
  Update a model endpoint record with a given attributes.
@@ -145,7 +145,7 @@ class SQLModelEndpointStore(ModelEndpointStore):
145
145
  def get_model_endpoint(
146
146
  self,
147
147
  endpoint_id: str,
148
- ) -> typing.Dict[str, typing.Any]:
148
+ ) -> dict[str, typing.Any]:
149
149
  """
150
150
  Get a single model endpoint record.
151
151
 
@@ -175,10 +175,10 @@ class SQLModelEndpointStore(ModelEndpointStore):
175
175
  self,
176
176
  model: str = None,
177
177
  function: str = None,
178
- labels: typing.List[str] = None,
178
+ labels: list[str] = None,
179
179
  top_level: bool = None,
180
- uids: typing.List = None,
181
- ) -> typing.List[typing.Dict[str, typing.Any]]:
180
+ uids: list = None,
181
+ ) -> list[dict[str, typing.Any]]:
182
182
  """
183
183
  Returns a list of model endpoint dictionaries, supports filtering by model, function, labels or top level.
184
184
  By default, when no filters are applied, all available model endpoints for the given project will
@@ -262,7 +262,7 @@ class SQLModelEndpointStore(ModelEndpointStore):
262
262
  query: db.orm.query.Query,
263
263
  model_endpoints_table: db.Table,
264
264
  key_filter: str,
265
- filtered_values: typing.List,
265
+ filtered_values: list,
266
266
  combined=True,
267
267
  ) -> db.orm.query.Query:
268
268
  """Filtering the SQL query object according to the provided filters.
@@ -300,7 +300,7 @@ class SQLModelEndpointStore(ModelEndpointStore):
300
300
  @staticmethod
301
301
  def _validate_labels(
302
302
  endpoint_dict: dict,
303
- labels: typing.List,
303
+ labels: list,
304
304
  ) -> bool:
305
305
  """Validate that the model endpoint dictionary has the provided labels. There are 2 possible cases:
306
306
  1 - Labels were provided as a list of key-values pairs (e.g. ['label_1=value_1', 'label_2=value_2']): Validate
@@ -334,9 +334,7 @@ class SQLModelEndpointStore(ModelEndpointStore):
334
334
 
335
335
  return True
336
336
 
337
- def delete_model_endpoints_resources(
338
- self, endpoints: typing.List[typing.Dict[str, typing.Any]]
339
- ):
337
+ def delete_model_endpoints_resources(self, endpoints: list[dict[str, typing.Any]]):
340
338
  """
341
339
  Delete all model endpoints resources in both SQL and the time series DB.
342
340
 
@@ -352,11 +350,11 @@ class SQLModelEndpointStore(ModelEndpointStore):
352
350
  def get_endpoint_real_time_metrics(
353
351
  self,
354
352
  endpoint_id: str,
355
- metrics: typing.List[str],
353
+ metrics: list[str],
356
354
  start: str = "now-1h",
357
355
  end: str = "now",
358
356
  access_key: str = None,
359
- ) -> typing.Dict[str, typing.List[typing.Tuple[str, float]]]:
357
+ ) -> dict[str, list[tuple[str, float]]]:
360
358
  """
361
359
  Getting metrics from the time series DB. There are pre-defined metrics for model endpoints such as
362
360
  `predictions_per_second` and `latency_avg_5m` but also custom metrics defined by the user.
@@ -49,7 +49,7 @@ class EventStreamProcessor:
49
49
  parquet_batching_timeout_secs: int,
50
50
  parquet_target: str,
51
51
  sample_window: int = 10,
52
- aggregate_windows: typing.Optional[typing.List[str]] = None,
52
+ aggregate_windows: typing.Optional[list[str]] = None,
53
53
  aggregate_period: str = "30s",
54
54
  model_monitoring_access_key: str = None,
55
55
  ):
@@ -629,14 +629,14 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
629
629
  self.project: str = project
630
630
 
631
631
  # First and last requests timestamps (value) of each endpoint (key)
632
- self.first_request: typing.Dict[str, str] = dict()
633
- self.last_request: typing.Dict[str, str] = dict()
632
+ self.first_request: dict[str, str] = dict()
633
+ self.last_request: dict[str, str] = dict()
634
634
 
635
635
  # Number of errors (value) per endpoint (key)
636
- self.error_count: typing.Dict[str, int] = collections.defaultdict(int)
636
+ self.error_count: dict[str, int] = collections.defaultdict(int)
637
637
 
638
638
  # Set of endpoints in the current events
639
- self.endpoints: typing.Set[str] = set()
639
+ self.endpoints: set[str] = set()
640
640
 
641
641
  def do(self, full_event):
642
642
  event = full_event.body
@@ -745,18 +745,12 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
745
745
  # in list of events. This list will be used as the body for the storey event.
746
746
  events = []
747
747
  for i, (feature, prediction) in enumerate(zip(features, predictions)):
748
- # Validate that inputs are based on numeric values
749
- if not self.is_valid(
750
- endpoint_id,
751
- self.is_list_of_numerics,
752
- feature,
753
- ["request", "inputs", f"[{i}]"],
754
- ):
755
- return None
756
-
757
748
  if not isinstance(prediction, list):
758
749
  prediction = [prediction]
759
750
 
751
+ if not isinstance(feature, list):
752
+ feature = [feature]
753
+
760
754
  events.append(
761
755
  {
762
756
  EventFieldType.FUNCTION_URI: function_uri,
@@ -803,18 +797,6 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
803
797
  f"{self.last_request[endpoint_id]} - write to TSDB will be rejected"
804
798
  )
805
799
 
806
- @staticmethod
807
- def is_list_of_numerics(
808
- field: typing.List[typing.Union[int, float, dict, list]],
809
- dict_path: typing.List[str],
810
- ):
811
- if all(isinstance(x, int) or isinstance(x, float) for x in field):
812
- return True
813
- logger.error(
814
- f"List does not consist of only numeric values: {field} [Event -> {','.join(dict_path)}]"
815
- )
816
- return False
817
-
818
800
  def resume_state(self, endpoint_id):
819
801
  # Make sure process is resumable, if process fails for any reason, be able to pick things up close to where we
820
802
  # left them
@@ -849,7 +831,7 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
849
831
  endpoint_id: str,
850
832
  validation_function,
851
833
  field: typing.Any,
852
- dict_path: typing.List[str],
834
+ dict_path: list[str],
853
835
  ):
854
836
  if validation_function(field, dict_path):
855
837
  return True
@@ -857,7 +839,7 @@ class ProcessEndpointEvent(mlrun.feature_store.steps.MapClass):
857
839
  return False
858
840
 
859
841
 
860
- def is_not_none(field: typing.Any, dict_path: typing.List[str]):
842
+ def is_not_none(field: typing.Any, dict_path: list[str]):
861
843
  if field is not None:
862
844
  return True
863
845
  logger.error(
@@ -946,7 +928,7 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
946
928
  return self.label_columns[endpoint_id]
947
929
  return None
948
930
 
949
- def do(self, event: typing.Dict):
931
+ def do(self, event: dict):
950
932
  endpoint_id = event[EventFieldType.ENDPOINT_ID]
951
933
 
952
934
  # Get feature names and label columns
@@ -1045,9 +1027,9 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
1045
1027
 
1046
1028
  @staticmethod
1047
1029
  def _map_dictionary_values(
1048
- event: typing.Dict,
1049
- named_iters: typing.List,
1050
- values_iters: typing.List,
1030
+ event: dict,
1031
+ named_iters: list,
1032
+ values_iters: list,
1051
1033
  mapping_dictionary: str,
1052
1034
  ):
1053
1035
  """Adding name-value pairs to event dictionary based on two provided lists of names and values. These pairs
@@ -1082,7 +1064,7 @@ class UpdateEndpoint(mlrun.feature_store.steps.MapClass):
1082
1064
  self.project = project
1083
1065
  self.model_endpoint_store_target = model_endpoint_store_target
1084
1066
 
1085
- def do(self, event: typing.Dict):
1067
+ def do(self, event: dict):
1086
1068
  update_endpoint_record(
1087
1069
  project=self.project,
1088
1070
  endpoint_id=event.pop(EventFieldType.ENDPOINT_ID),
@@ -1117,7 +1099,7 @@ class InferSchema(mlrun.feature_store.steps.MapClass):
1117
1099
  self.table = table
1118
1100
  self.keys = set()
1119
1101
 
1120
- def do(self, event: typing.Dict):
1102
+ def do(self, event: dict):
1121
1103
  key_set = set(event.keys())
1122
1104
  if not key_set.issubset(self.keys):
1123
1105
  self.keys.update(key_set)
@@ -96,12 +96,13 @@ class TrackingPolicy(mlrun.model.ModelObj):
96
96
  )
97
97
  return new_obj
98
98
 
99
- def to_dict(self, fields=None, exclude=None):
99
+ def to_dict(self, fields: list = None, exclude: list = None, strip: bool = False):
100
100
  struct = super().to_dict(
101
101
  fields,
102
102
  exclude=[
103
103
  mlrun.common.schemas.model_monitoring.EventFieldType.DEFAULT_BATCH_INTERVALS
104
104
  ],
105
+ strip=strip,
105
106
  )
106
107
  if self.default_batch_intervals:
107
108
  struct[
mlrun/package/__init__.py CHANGED
@@ -18,7 +18,7 @@
18
18
  import functools
19
19
  import inspect
20
20
  from collections import OrderedDict
21
- from typing import Callable, Dict, List, Type, Union
21
+ from typing import Callable, Union
22
22
 
23
23
  from ..config import config
24
24
  from .context_handler import ContextHandler
@@ -40,9 +40,9 @@ from .utils import (
40
40
 
41
41
 
42
42
  def handler(
43
- labels: Dict[str, str] = None,
44
- outputs: List[Union[str, Dict[str, str]]] = None,
45
- inputs: Union[bool, Dict[str, Union[str, Type]]] = True,
43
+ labels: dict[str, str] = None,
44
+ outputs: list[Union[str, dict[str, str]]] = None,
45
+ inputs: Union[bool, dict[str, Union[str, type]]] = True,
46
46
  ):
47
47
  """
48
48
  MLRun's handler is a decorator to wrap a function and enable setting labels, parsing inputs (`mlrun.DataItem`) using
@@ -58,7 +58,7 @@ def handler(
58
58
  * `str` - A string in the format of '{key}:{artifact_type}'. If a string was given without ':' it
59
59
  will indicate the key, and the artifact type will be according to the returned value type's
60
60
  default artifact type. The artifact types supported are listed in the relevant type packager.
61
- * `Dict[str, str]` - A dictionary of logging configuration. the key 'key' is mandatory for the
61
+ * `dict[str, str]` - A dictionary of logging configuration. the key 'key' is mandatory for the
62
62
  logged artifact key.
63
63
  * None - Do not log the output.
64
64
 
@@ -73,7 +73,7 @@ def handler(
73
73
  * True - Parse all found inputs to the assigned type hint in the function's signature. If there is no
74
74
  type hint assigned, the value will remain an `mlrun.DataItem`.
75
75
  * False - Do not parse inputs, leaving the inputs as `mlrun.DataItem`.
76
- * Dict[str, Union[Type, str]] - A dictionary with argument name as key and the expected type to parse
76
+ * dict[str, Union[Type, str]] - A dictionary with argument name as key and the expected type to parse
77
77
  the `mlrun.DataItem` to. The expected type can be a string as well, idicating the full module path.
78
78
 
79
79
  Default: True - meaning inputs will be parsed from DataItem's as long as they are type hinted.
@@ -15,7 +15,7 @@
15
15
  import inspect
16
16
  import os
17
17
  from collections import OrderedDict
18
- from typing import Dict, List, Union
18
+ from typing import Union
19
19
 
20
20
  from mlrun.datastore import DataItem
21
21
  from mlrun.errors import MLRunInvalidArgumentError
@@ -181,7 +181,7 @@ class ContextHandler:
181
181
  def log_outputs(
182
182
  self,
183
183
  outputs: list,
184
- log_hints: List[Union[Dict[str, str], str, None]],
184
+ log_hints: list[Union[dict[str, str], str, None]],
185
185
  ):
186
186
  """
187
187
  Log the given outputs as artifacts (or results) with the stored context. Errors raised during the packing will
@@ -229,7 +229,7 @@ class ContextHandler:
229
229
  # Clear packagers outputs:
230
230
  self._packagers_manager.clear_packagers_outputs()
231
231
 
232
- def set_labels(self, labels: Dict[str, str]):
232
+ def set_labels(self, labels: dict[str, str]):
233
233
  """
234
234
  Set the given labels with the stored context.
235
235
 
@@ -239,7 +239,7 @@ class ContextHandler:
239
239
  self._context.set_label(key=key, value=value)
240
240
 
241
241
  def _collect_packagers(
242
- self, packagers: List[str], is_mandatory: bool, is_custom_packagers: bool
242
+ self, packagers: list[str], is_mandatory: bool, is_custom_packagers: bool
243
243
  ):
244
244
  """
245
245
  Collect packagers with the stored manager. The collection can ignore errors raised by setting the mandatory flag
@@ -310,7 +310,7 @@ class ContextHandler:
310
310
  def _validate_objects_to_log_hints_length(
311
311
  self,
312
312
  outputs: list,
313
- log_hints: List[Union[Dict[str, str], str, None]],
313
+ log_hints: list[Union[dict[str, str], str, None]],
314
314
  ):
315
315
  """
316
316
  Validate the outputs and log hints are the same length. If they are not, warnings will be printed on what will
mlrun/package/packager.py CHANGED
@@ -14,7 +14,7 @@
14
14
  #
15
15
  from abc import ABC, abstractmethod
16
16
  from pathlib import Path
17
- from typing import Any, List, Tuple, Type, Union
17
+ from typing import Any, Union
18
18
 
19
19
  from mlrun.artifacts import Artifact
20
20
  from mlrun.datastore import DataItem
@@ -93,7 +93,7 @@ class Packager(ABC):
93
93
  """
94
94
 
95
95
  #: The type of object this packager can pack and unpack.
96
- PACKABLE_OBJECT_TYPE: Type = ...
96
+ PACKABLE_OBJECT_TYPE: type = ...
97
97
 
98
98
  #: The priority of this packager in the packagers collection of the manager (lower is better).
99
99
  PRIORITY: int = ...
@@ -104,7 +104,7 @@ class Packager(ABC):
104
104
  self._priority = Packager.PRIORITY
105
105
 
106
106
  # List of all paths to be deleted by the manager of this packager after logging the packages:
107
- self._future_clearing_path_list: List[str] = []
107
+ self._future_clearing_path_list: list[str] = []
108
108
 
109
109
  @abstractmethod
110
110
  def get_default_packing_artifact_type(self, obj: Any) -> str:
@@ -132,7 +132,7 @@ class Packager(ABC):
132
132
  pass
133
133
 
134
134
  @abstractmethod
135
- def get_supported_artifact_types(self) -> List[str]:
135
+ def get_supported_artifact_types(self) -> list[str]:
136
136
  """
137
137
  Get all the supported artifact types on this packager.
138
138
 
@@ -147,7 +147,7 @@ class Packager(ABC):
147
147
  key: str = None,
148
148
  artifact_type: str = None,
149
149
  configurations: dict = None,
150
- ) -> Union[Tuple[Artifact, dict], dict]:
150
+ ) -> Union[tuple[Artifact, dict], dict]:
151
151
  """
152
152
  Pack an object as the given artifact type using the provided configurations.
153
153
 
@@ -212,7 +212,7 @@ class Packager(ABC):
212
212
  return True
213
213
 
214
214
  def is_unpackable(
215
- self, data_item: DataItem, type_hint: Type, artifact_type: str = None
215
+ self, data_item: DataItem, type_hint: type, artifact_type: str = None
216
216
  ) -> bool:
217
217
  """
218
218
  Check if this packager can unpack an input according to the user-given type hint and the provided artifact type.
@@ -269,7 +269,7 @@ class Packager(ABC):
269
269
  self._priority = priority
270
270
 
271
271
  @property
272
- def future_clearing_path_list(self) -> List[str]:
272
+ def future_clearing_path_list(self) -> list[str]:
273
273
  """
274
274
  Get the packager's future clearing path list.
275
275
 
@@ -15,7 +15,7 @@
15
15
  import inspect
16
16
  from abc import ABCMeta
17
17
  from types import MethodType
18
- from typing import Any, List, Tuple, Type, Union
18
+ from typing import Any, Union
19
19
 
20
20
  import docstring_parser
21
21
 
@@ -51,7 +51,7 @@ class _DefaultPackagerMeta(ABCMeta):
51
51
  return super().__new__(mcls, name, bases, namespace, **kwargs)
52
52
 
53
53
  @property
54
- def __doc__(cls: Type["DefaultPackager"]) -> str:
54
+ def __doc__(cls: type["DefaultPackager"]) -> str:
55
55
  """
56
56
  Override the `__doc__` attribute of a `DefaultPackager` to be a property in order to auto-summarize the
57
57
  packager's class docstring. The summary is concatenated after the original class doc string.
@@ -273,7 +273,7 @@ class DefaultPackager(Packager, metaclass=_DefaultPackagerMeta):
273
273
  """
274
274
 
275
275
  #: The type of object this packager can pack and unpack.
276
- PACKABLE_OBJECT_TYPE: Type = ...
276
+ PACKABLE_OBJECT_TYPE: type = ...
277
277
 
278
278
  #: A flag for indicating whether to also pack all subclasses of the `PACKABLE_OBJECT_TYPE`.
279
279
  PACK_SUBCLASSES = False
@@ -306,7 +306,7 @@ class DefaultPackager(Packager, metaclass=_DefaultPackagerMeta):
306
306
  """
307
307
  return self.DEFAULT_UNPACKING_ARTIFACT_TYPE
308
308
 
309
- def get_supported_artifact_types(self) -> List[str]:
309
+ def get_supported_artifact_types(self) -> list[str]:
310
310
  """
311
311
  Get all the supported artifact types on this packager.
312
312
 
@@ -326,7 +326,7 @@ class DefaultPackager(Packager, metaclass=_DefaultPackagerMeta):
326
326
  key: str = None,
327
327
  artifact_type: str = None,
328
328
  configurations: dict = None,
329
- ) -> Union[Tuple[Artifact, dict], dict]:
329
+ ) -> Union[tuple[Artifact, dict], dict]:
330
330
  """
331
331
  Pack an object as the given artifact type using the provided configurations.
332
332
 
@@ -442,7 +442,7 @@ class DefaultPackager(Packager, metaclass=_DefaultPackagerMeta):
442
442
  obj: Any,
443
443
  key: str,
444
444
  pickle_module_name: str = DEFAULT_PICKLE_MODULE,
445
- ) -> Tuple[Artifact, dict]:
445
+ ) -> tuple[Artifact, dict]:
446
446
  """
447
447
  Pack a python object, pickling it into a pkl file and store it in an artifact.
448
448
 
@@ -16,7 +16,7 @@ import os
16
16
  import pathlib
17
17
  import tempfile
18
18
  from abc import ABC, abstractmethod
19
- from typing import Any, Dict, List, Tuple, Union
19
+ from typing import Any, Union
20
20
 
21
21
  import numpy as np
22
22
  import pandas as pd
@@ -29,7 +29,7 @@ from ..utils import ArtifactType, SupportedFormat
29
29
  from .default_packager import DefaultPackager
30
30
 
31
31
  # Type for collection of numpy arrays (list / dict of arrays):
32
- NumPyArrayCollectionType = Union[List[np.ndarray], Dict[str, np.ndarray]]
32
+ NumPyArrayCollectionType = Union[list[np.ndarray], dict[str, np.ndarray]]
33
33
 
34
34
 
35
35
  class _Formatter(ABC):
@@ -194,7 +194,7 @@ class _NPZFormatter(_Formatter):
194
194
  save_function(file_path, **obj)
195
195
 
196
196
  @classmethod
197
- def load(cls, file_path: str, **load_kwargs: dict) -> Dict[str, np.ndarray]:
197
+ def load(cls, file_path: str, **load_kwargs: dict) -> dict[str, np.ndarray]:
198
198
  """
199
199
  Load the arrays from the given 'npz' file path.
200
200
 
@@ -226,7 +226,7 @@ class NumPySupportedFormat(SupportedFormat[_Formatter]):
226
226
  }
227
227
 
228
228
  @classmethod
229
- def get_single_array_formats(cls) -> List[str]:
229
+ def get_single_array_formats(cls) -> list[str]:
230
230
  """
231
231
  Get the supported formats for saving one numpy array.
232
232
 
@@ -235,7 +235,7 @@ class NumPySupportedFormat(SupportedFormat[_Formatter]):
235
235
  return [cls.NPY, cls.TXT, cls.GZ, cls.CSV]
236
236
 
237
237
  @classmethod
238
- def get_multi_array_formats(cls) -> List[str]:
238
+ def get_multi_array_formats(cls) -> list[str]:
239
239
  """
240
240
  Get the supported formats for saving a collection (multiple) numpy arrays - e.g. list of arrays or dictionary of
241
241
  arrays.
@@ -310,7 +310,7 @@ class NumPyNDArrayPackager(DefaultPackager):
310
310
  key: str,
311
311
  file_format: str = DEFAULT_NUMPY_ARRAY_FORMAT,
312
312
  **save_kwargs,
313
- ) -> Tuple[Artifact, dict]:
313
+ ) -> tuple[Artifact, dict]:
314
314
  """
315
315
  Pack an array as a file by the given format.
316
316
 
@@ -342,7 +342,7 @@ class NumPyNDArrayPackager(DefaultPackager):
342
342
  obj: np.ndarray,
343
343
  key: str,
344
344
  file_format: str = "",
345
- ) -> Tuple[Artifact, dict]:
345
+ ) -> tuple[Artifact, dict]:
346
346
  """
347
347
  Pack an array as a dataset.
348
348
 
@@ -442,7 +442,7 @@ class _NumPyNDArrayCollectionPackager(DefaultPackager):
442
442
  key: str,
443
443
  file_format: str = DEFAULT_NUMPPY_ARRAY_COLLECTION_FORMAT,
444
444
  **save_kwargs,
445
- ) -> Tuple[Artifact, dict]:
445
+ ) -> tuple[Artifact, dict]:
446
446
  """
447
447
  Pack an array collection as a file by the given format.
448
448
 
@@ -476,7 +476,7 @@ class _NumPyNDArrayCollectionPackager(DefaultPackager):
476
476
  data_item: DataItem,
477
477
  file_format: str = None,
478
478
  allow_pickle: bool = False,
479
- ) -> Dict[str, np.ndarray]:
479
+ ) -> dict[str, np.ndarray]:
480
480
  """
481
481
  Unpack a numppy array collection from file.
482
482
 
@@ -545,7 +545,7 @@ class NumPyNDArrayDictPackager(_NumPyNDArrayCollectionPackager):
545
545
  ``dict[str, numpy.ndarray]`` packager.
546
546
  """
547
547
 
548
- PACKABLE_OBJECT_TYPE = Dict[str, np.ndarray]
548
+ PACKABLE_OBJECT_TYPE = dict[str, np.ndarray]
549
549
 
550
550
  def is_packable(
551
551
  self, obj: Any, artifact_type: str = None, configurations: dict = None
@@ -583,7 +583,7 @@ class NumPyNDArrayDictPackager(_NumPyNDArrayCollectionPackager):
583
583
 
584
584
  return True
585
585
 
586
- def pack_result(self, obj: Dict[str, np.ndarray], key: str) -> dict:
586
+ def pack_result(self, obj: dict[str, np.ndarray], key: str) -> dict:
587
587
  """
588
588
  Pack a dictionary of numpy arrays as a result.
589
589
 
@@ -604,7 +604,7 @@ class NumPyNDArrayDictPackager(_NumPyNDArrayCollectionPackager):
604
604
  data_item: DataItem,
605
605
  file_format: str = None,
606
606
  allow_pickle: bool = False,
607
- ) -> Dict[str, np.ndarray]:
607
+ ) -> dict[str, np.ndarray]:
608
608
  """
609
609
  Unpack a numppy array dictionary from file.
610
610
 
@@ -630,7 +630,7 @@ class NumPyNDArrayListPackager(_NumPyNDArrayCollectionPackager):
630
630
  ``list[numpy.ndarray]`` packager.
631
631
  """
632
632
 
633
- PACKABLE_OBJECT_TYPE = List[np.ndarray]
633
+ PACKABLE_OBJECT_TYPE = list[np.ndarray]
634
634
 
635
635
  def is_packable(
636
636
  self, obj: Any, artifact_type: str = None, configurations: dict = None
@@ -665,7 +665,7 @@ class NumPyNDArrayListPackager(_NumPyNDArrayCollectionPackager):
665
665
 
666
666
  return True
667
667
 
668
- def pack_result(self, obj: List[np.ndarray], key: str) -> dict:
668
+ def pack_result(self, obj: list[np.ndarray], key: str) -> dict:
669
669
  """
670
670
  Pack a list of numpy arrays as a result.
671
671
 
@@ -681,7 +681,7 @@ class NumPyNDArrayListPackager(_NumPyNDArrayCollectionPackager):
681
681
  data_item: DataItem,
682
682
  file_format: str = None,
683
683
  allow_pickle: bool = False,
684
- ) -> List[np.ndarray]:
684
+ ) -> list[np.ndarray]:
685
685
  """
686
686
  Unpack a numppy array list from file.
687
687
 
@@ -17,7 +17,7 @@ import os
17
17
  import pathlib
18
18
  import tempfile
19
19
  from abc import ABC, abstractmethod
20
- from typing import Any, List, Tuple, Union
20
+ from typing import Any, Union
21
21
 
22
22
  import pandas as pd
23
23
 
@@ -70,7 +70,7 @@ class _Formatter(ABC):
70
70
  pass
71
71
 
72
72
  @staticmethod
73
- def _flatten_dataframe(dataframe: pd.DataFrame) -> Tuple[pd.DataFrame, dict]:
73
+ def _flatten_dataframe(dataframe: pd.DataFrame) -> tuple[pd.DataFrame, dict]:
74
74
  """
75
75
  Flatten the dataframe: moving all indexes to be columns at the start (from column 0) and lowering the columns
76
76
  levels to 1, renaming them from tuples. All columns and index info is stored so it can be unflatten later on.
@@ -733,7 +733,7 @@ class PandasDataFramePackager(DefaultPackager):
733
733
  file_format: str = None,
734
734
  flatten: bool = True,
735
735
  **to_kwargs,
736
- ) -> Tuple[Artifact, dict]:
736
+ ) -> tuple[Artifact, dict]:
737
737
  """
738
738
  Pack a dataframe as a file by the given format.
739
739
 
@@ -857,7 +857,7 @@ class PandasSeriesPackager(PandasDataFramePackager):
857
857
  PACKABLE_OBJECT_TYPE = pd.Series
858
858
  DEFAULT_PACKING_ARTIFACT_TYPE = ArtifactType.FILE
859
859
 
860
- def get_supported_artifact_types(self) -> List[str]:
860
+ def get_supported_artifact_types(self) -> list[str]:
861
861
  """
862
862
  Get all the supported artifact types on this packager. It will be the same as `PandasDataFramePackager` but
863
863
  without the 'dataset' artifact type support.
@@ -886,7 +886,7 @@ class PandasSeriesPackager(PandasDataFramePackager):
886
886
  file_format: str = None,
887
887
  flatten: bool = True,
888
888
  **to_kwargs,
889
- ) -> Tuple[Artifact, dict]:
889
+ ) -> tuple[Artifact, dict]:
890
890
  """
891
891
  Pack a series as a file by the given format.
892
892