mlrun 1.6.0rc15__py3-none-any.whl → 1.6.0rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (66) hide show
  1. mlrun/artifacts/base.py +1 -5
  2. mlrun/artifacts/dataset.py +0 -4
  3. mlrun/artifacts/model.py +0 -5
  4. mlrun/common/schemas/auth.py +3 -4
  5. mlrun/config.py +1 -0
  6. mlrun/data_types/to_pandas.py +0 -1
  7. mlrun/datastore/base.py +0 -1
  8. mlrun/datastore/dbfs_store.py +0 -1
  9. mlrun/datastore/sources.py +1 -1
  10. mlrun/datastore/v3io.py +1 -1
  11. mlrun/datastore/wasbfs/fs.py +0 -1
  12. mlrun/errors.py +0 -1
  13. mlrun/feature_store/retrieval/base.py +2 -3
  14. mlrun/feature_store/retrieval/job.py +0 -1
  15. mlrun/feature_store/retrieval/spark_merger.py +0 -2
  16. mlrun/feature_store/steps.py +0 -3
  17. mlrun/frameworks/_common/model_handler.py +2 -4
  18. mlrun/frameworks/_dl_common/loggers/logger.py +1 -3
  19. mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +1 -3
  20. mlrun/frameworks/_ml_common/loggers/logger.py +1 -3
  21. mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +1 -1
  22. mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +1 -1
  23. mlrun/frameworks/_ml_common/plans/dataset_plan.py +1 -3
  24. mlrun/frameworks/lgbm/__init__.py +2 -2
  25. mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +3 -3
  26. mlrun/frameworks/pytorch/mlrun_interface.py +1 -1
  27. mlrun/frameworks/tf_keras/__init__.py +4 -4
  28. mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +3 -3
  29. mlrun/frameworks/tf_keras/mlrun_interface.py +6 -1
  30. mlrun/frameworks/xgboost/__init__.py +1 -1
  31. mlrun/k8s_utils.py +6 -6
  32. mlrun/kfpops.py +0 -3
  33. mlrun/launcher/base.py +0 -1
  34. mlrun/launcher/local.py +0 -3
  35. mlrun/model.py +4 -3
  36. mlrun/model_monitoring/batch.py +3 -1
  37. mlrun/package/packagers/numpy_packagers.py +1 -1
  38. mlrun/package/utils/log_hint_utils.py +1 -1
  39. mlrun/package/utils/type_hint_utils.py +3 -1
  40. mlrun/platforms/iguazio.py +2 -4
  41. mlrun/projects/project.py +2 -3
  42. mlrun/runtimes/constants.py +7 -0
  43. mlrun/runtimes/daskjob.py +0 -2
  44. mlrun/runtimes/function.py +0 -3
  45. mlrun/runtimes/local.py +1 -1
  46. mlrun/runtimes/mpijob/abstract.py +0 -1
  47. mlrun/runtimes/pod.py +5 -11
  48. mlrun/runtimes/sparkjob/spark3job.py +0 -1
  49. mlrun/secrets.py +0 -1
  50. mlrun/serving/states.py +0 -2
  51. mlrun/serving/utils.py +0 -1
  52. mlrun/serving/v1_serving.py +0 -1
  53. mlrun/track/tracker.py +1 -1
  54. mlrun/track/tracker_manager.py +3 -1
  55. mlrun/utils/azure_vault.py +0 -1
  56. mlrun/utils/condition_evaluator.py +0 -2
  57. mlrun/utils/helpers.py +0 -1
  58. mlrun/utils/logger.py +0 -1
  59. mlrun/utils/notifications/notification_pusher.py +0 -3
  60. mlrun/utils/version/version.json +2 -2
  61. {mlrun-1.6.0rc15.dist-info → mlrun-1.6.0rc16.dist-info}/METADATA +3 -3
  62. {mlrun-1.6.0rc15.dist-info → mlrun-1.6.0rc16.dist-info}/RECORD +66 -66
  63. {mlrun-1.6.0rc15.dist-info → mlrun-1.6.0rc16.dist-info}/LICENSE +0 -0
  64. {mlrun-1.6.0rc15.dist-info → mlrun-1.6.0rc16.dist-info}/WHEEL +0 -0
  65. {mlrun-1.6.0rc15.dist-info → mlrun-1.6.0rc16.dist-info}/entry_points.txt +0 -0
  66. {mlrun-1.6.0rc15.dist-info → mlrun-1.6.0rc16.dist-info}/top_level.txt +0 -0
mlrun/artifacts/base.py CHANGED
@@ -712,7 +712,6 @@ class LinkArtifact(Artifact):
712
712
  category=FutureWarning,
713
713
  )
714
714
  class LegacyArtifact(ModelObj):
715
-
716
715
  _dict_fields = [
717
716
  "key",
718
717
  "kind",
@@ -930,7 +929,6 @@ class LegacyLinkArtifact(LegacyArtifact):
930
929
  link_key=None,
931
930
  link_tree=None,
932
931
  ):
933
-
934
932
  super().__init__(key)
935
933
  self.target_path = target_path
936
934
  self.link_iteration = link_iteration
@@ -958,7 +956,6 @@ def upload_extra_data(
958
956
  return
959
957
  target_path = artifact.target_path
960
958
  for key, item in extra_data.items():
961
-
962
959
  if isinstance(item, bytes):
963
960
  if target_path:
964
961
  target = os.path.join(target_path, prefix + key)
@@ -1033,7 +1030,6 @@ def generate_target_path(item: Artifact, artifact_path, producer):
1033
1030
 
1034
1031
  suffix = "/"
1035
1032
  if not item.is_dir:
1036
-
1037
1033
  # suffixes yields a list of suffixes, e.g. ['.tar', '.gz']
1038
1034
  # join them together to get the full suffix, e.g. '.tar.gz'
1039
1035
  suffix = "".join(pathlib.Path(item.src_path or "").suffixes)
@@ -1044,7 +1040,7 @@ def generate_target_path(item: Artifact, artifact_path, producer):
1044
1040
 
1045
1041
 
1046
1042
  def convert_legacy_artifact_to_new_format(
1047
- legacy_artifact: typing.Union[LegacyArtifact, dict]
1043
+ legacy_artifact: typing.Union[LegacyArtifact, dict],
1048
1044
  ) -> Artifact:
1049
1045
  """Converts a legacy artifact to a new format.
1050
1046
 
@@ -59,7 +59,6 @@ class TableArtifact(Artifact):
59
59
  header=None,
60
60
  schema=None,
61
61
  ):
62
-
63
62
  if key:
64
63
  key_suffix = pathlib.Path(key).suffix
65
64
  if not format and key_suffix:
@@ -146,7 +145,6 @@ class DatasetArtifact(Artifact):
146
145
  label_column: str = None,
147
146
  **kwargs,
148
147
  ):
149
-
150
148
  format = (format or "").lower()
151
149
  super().__init__(key, None, format=format, target_path=target_path)
152
150
  if format and format not in self.SUPPORTED_FORMATS:
@@ -348,7 +346,6 @@ class LegacyTableArtifact(LegacyArtifact):
348
346
  header=None,
349
347
  schema=None,
350
348
  ):
351
-
352
349
  if key:
353
350
  key_suffix = pathlib.Path(key).suffix
354
351
  if not format and key_suffix:
@@ -417,7 +414,6 @@ class LegacyDatasetArtifact(LegacyArtifact):
417
414
  ignore_preview_limits: bool = False,
418
415
  **kwargs,
419
416
  ):
420
-
421
417
  format = (format or "").lower()
422
418
  super().__init__(key, None, format=format, target_path=target_path)
423
419
  if format and format not in self.SUPPORTED_FORMATS:
mlrun/artifacts/model.py CHANGED
@@ -138,7 +138,6 @@ class ModelArtifact(Artifact):
138
138
  model_dir=None,
139
139
  **kwargs,
140
140
  ):
141
-
142
141
  super().__init__(key, body, format=format, target_path=target_path, **kwargs)
143
142
  model_file = str(model_file or "")
144
143
  if model_file and "/" in model_file:
@@ -309,7 +308,6 @@ class ModelArtifact(Artifact):
309
308
  spec_target_path = None
310
309
 
311
310
  if mlrun.mlconf.artifacts.generate_target_path_from_artifact_hash:
312
-
313
311
  # resolving target_path for the model spec
314
312
  _, spec_target_path = self.resolve_body_target_hash_path(
315
313
  body=spec_body, artifact_path=artifact_path
@@ -337,7 +335,6 @@ class ModelArtifact(Artifact):
337
335
  artifact_path: str,
338
336
  target_model_path: str = None,
339
337
  ):
340
-
341
338
  body = self.spec.get_body()
342
339
  if body:
343
340
  if not target_model_path:
@@ -432,7 +429,6 @@ class LegacyModelArtifact(LegacyArtifact):
432
429
  model_target_file=None,
433
430
  **kwargs,
434
431
  ):
435
-
436
432
  super().__init__(key, body, format=format, target_path=target_path, **kwargs)
437
433
  self._inputs: ObjectList = None
438
434
  self._outputs: ObjectList = None
@@ -507,7 +503,6 @@ class LegacyModelArtifact(LegacyArtifact):
507
503
  self.labels["framework"] = self.framework
508
504
 
509
505
  def upload(self):
510
-
511
506
  target_model_path = path.join(self.target_path, self.model_file)
512
507
  body = self.get_body()
513
508
  if body:
@@ -71,10 +71,9 @@ class AuthorizationResourceTypes(mlrun.common.types.StrEnum):
71
71
  AuthorizationResourceTypes.project: "/projects/{project_name}",
72
72
  AuthorizationResourceTypes.function: "/projects/{project_name}/functions/{resource_name}",
73
73
  AuthorizationResourceTypes.artifact: "/projects/{project_name}/artifacts/{resource_name}",
74
- # fmt: off
75
- AuthorizationResourceTypes.project_background_task:
76
- "/projects/{project_name}/background-tasks/{resource_name}",
77
- # fmt: on
74
+ AuthorizationResourceTypes.project_background_task: (
75
+ "/projects/{project_name}/background-tasks/{resource_name}"
76
+ ),
78
77
  AuthorizationResourceTypes.background_task: "/background-tasks/{resource_name}",
79
78
  AuthorizationResourceTypes.feature_set: "/projects/{project_name}/feature-sets/{resource_name}",
80
79
  AuthorizationResourceTypes.feature_vector: "/projects/{project_name}/feature-vectors/{resource_name}",
mlrun/config.py CHANGED
@@ -189,6 +189,7 @@ default_config = {
189
189
  "migrations": "3600",
190
190
  "load_project": "60",
191
191
  "run_abortion": "600",
192
+ "abort_grace_period": "10",
192
193
  },
193
194
  "runtimes": {"dask": "600"},
194
195
  },
@@ -61,7 +61,6 @@ def toPandas(spark_df):
61
61
  require_minimum_pyarrow_version()
62
62
  to_arrow_schema(spark_df.schema)
63
63
  except Exception as e:
64
-
65
64
  if spark_df.sql_ctx._conf.arrowPySparkFallbackEnabled():
66
65
  msg = (
67
66
  "toPandas attempted Arrow optimization because "
mlrun/datastore/base.py CHANGED
@@ -317,7 +317,6 @@ class DataStore:
317
317
  kwargs["storage_options"] = storage_options
318
318
  df = reader(url, **kwargs)
319
319
  else:
320
-
321
320
  file = url
322
321
  # Workaround for ARROW-12472 affecting pyarrow 3.x and 4.x.
323
322
  if file_system.protocol != "file":
@@ -123,7 +123,6 @@ class DBFSStore(DataStore):
123
123
  return self._filesystem.cat_file(key, start=start, end=end)
124
124
 
125
125
  def put(self, key, data, append=False):
126
-
127
126
  self._verify_filesystem_and_key(key)
128
127
  if append:
129
128
  raise mlrun.errors.MLRunInvalidArgumentError(
@@ -869,7 +869,7 @@ class StreamSource(OnlineSource):
869
869
  endpoint, stream_path = parse_path(self.path)
870
870
  v3io_client = v3io.dataplane.Client(endpoint=endpoint)
871
871
  container, stream_path = split_path(stream_path)
872
- res = v3io_client.create_stream(
872
+ res = v3io_client.stream.create(
873
873
  container=container,
874
874
  path=stream_path,
875
875
  shard_count=self.attributes["shards"],
mlrun/datastore/v3io.py CHANGED
@@ -175,7 +175,7 @@ class V3ioStore(DataStore):
175
175
  subpath_length = len(subpath) - 1
176
176
 
177
177
  try:
178
- response = v3io_client.get_container_contents(
178
+ response = v3io_client.container.list(
179
179
  container=container,
180
180
  path=subpath,
181
181
  get_all_attributes=False,
@@ -18,7 +18,6 @@ from fsspec import AbstractFileSystem
18
18
 
19
19
 
20
20
  class WasbFS(AbstractFileSystem):
21
-
22
21
  protocol = "wasb"
23
22
 
24
23
  def __init__(
mlrun/errors.py CHANGED
@@ -44,7 +44,6 @@ class MLRunHTTPError(MLRunBaseError, requests.HTTPError):
44
44
  status_code: typing.Optional[int] = None,
45
45
  **kwargs,
46
46
  ):
47
-
48
47
  # because response object is probably with an error, it returns False, so we
49
48
  # should use 'is None' specifically
50
49
  if response is None:
@@ -191,9 +191,7 @@ class BaseMerger(abc.ABC):
191
191
 
192
192
  feature_sets = []
193
193
  dfs = []
194
- keys = (
195
- []
196
- ) # the struct of key is [[[],[]], ..] So that each record indicates which way the corresponding
194
+ keys = [] # the struct of key is [[[],[]], ..] So that each record indicates which way the corresponding
197
195
  # featureset is connected to the previous one, and within each record the left keys are indicated in index 0
198
196
  # and the right keys in index 1, this keys will be the keys that will be used in this join
199
197
  join_types = []
@@ -770,6 +768,7 @@ class BaseMerger(abc.ABC):
770
768
 
771
769
  raise mlrun.errors.MLRunRuntimeError("Failed to merge")
772
770
 
771
+ @classmethod
773
772
  def get_default_image(cls, kind):
774
773
  return mlrun.mlconf.feature_store.default_job_image
775
774
 
@@ -79,7 +79,6 @@ def run_merge_job(
79
79
  )
80
80
  function.with_spark_service(spark_service=spark_service)
81
81
  elif run_config.kind == RuntimeKinds.spark:
82
-
83
82
  if mlconf.is_running_on_iguazio():
84
83
  function.with_igz_spark()
85
84
 
@@ -49,7 +49,6 @@ class SparkFeatureMerger(BaseMerger):
49
49
  left_keys: list,
50
50
  right_keys: list,
51
51
  ):
52
-
53
52
  """Perform an as of join between entity and featureset.
54
53
  Join conditions:
55
54
  Args:
@@ -132,7 +131,6 @@ class SparkFeatureMerger(BaseMerger):
132
131
  left_keys: list,
133
132
  right_keys: list,
134
133
  ):
135
-
136
134
  """
137
135
  spark dataframes join
138
136
 
@@ -411,7 +411,6 @@ class Imputer(StepToDict, MLRunStep):
411
411
  return event
412
412
 
413
413
  def _do_spark(self, event):
414
-
415
414
  for feature in event.columns:
416
415
  val = self.mapping.get(feature, self.default_value)
417
416
  if val is not None:
@@ -451,7 +450,6 @@ class OneHotEncoder(StepToDict, MLRunStep):
451
450
  encoding = self.mapping.get(feature, [])
452
451
 
453
452
  if encoding:
454
-
455
453
  one_hot_encoding = {
456
454
  f"{feature}_{OneHotEncoder._sanitized_category(category)}": 0
457
455
  for category in encoding
@@ -476,7 +474,6 @@ class OneHotEncoder(StepToDict, MLRunStep):
476
474
  return encoded_values
477
475
 
478
476
  def _do_pandas(self, event):
479
-
480
477
  for key, values in self.mapping.items():
481
478
  event[key] = pd.Categorical(event[key], categories=list(values))
482
479
  encoded = pd.get_dummies(event[key], prefix=key, dtype=np.int64)
@@ -157,9 +157,7 @@ class ModelHandler(ABC, Generic[CommonTypes.ModelType, CommonTypes.IOSampleType]
157
157
  self._model_artifact = kwargs.get("model_artifact", None) # type: ModelArtifact
158
158
 
159
159
  # If the model path is of a store model object, this will be the extra data as DataItems ready to be downloaded.
160
- self._extra_data = kwargs.get(
161
- "extra_data", {}
162
- ) # type: Dict[str, CommonTypes.ExtraDataType]
160
+ self._extra_data = kwargs.get("extra_data", {}) # type: Dict[str, CommonTypes.ExtraDataType]
163
161
 
164
162
  # If the model key is passed, override the default:
165
163
  self._model_key = kwargs.get("model_key", "model")
@@ -1064,7 +1062,7 @@ class ModelHandler(ABC, Generic[CommonTypes.ModelType, CommonTypes.IOSampleType]
1064
1062
 
1065
1063
  @staticmethod
1066
1064
  def _validate_modules_parameter(
1067
- modules_map: Union[Dict[str, Union[None, str, List[str]]], str]
1065
+ modules_map: Union[Dict[str, Union[None, str, List[str]]], str],
1068
1066
  ):
1069
1067
  """
1070
1068
  Validate the given modules parameter.
@@ -53,9 +53,7 @@ class Logger:
53
53
 
54
54
  # Setup the dynamic hyperparameters dictionary - a dictionary of all tracked hyperparameters by epochs:
55
55
  # [Hyperparameter: str] -> [Epoch: int] -> [value: Union[str, bool, float, int]]
56
- self._dynamic_hyperparameters = (
57
- {}
58
- ) # type: Dict[str, List[DLTypes.TrackableType]]
56
+ self._dynamic_hyperparameters = {} # type: Dict[str, List[DLTypes.TrackableType]]
59
57
 
60
58
  # Setup the iterations counter:
61
59
  self._epochs = 0
@@ -133,9 +133,7 @@ class TensorboardLogger(Logger, Generic[DLTypes.WeightType]):
133
133
  # [Statistic: str] -> [Weight: str] -> [epoch: int] -> [value: float]
134
134
  self._weights_statistics = {} # type: Dict[str, Dict[str, List[float]]]
135
135
  for statistic_function in self._statistics_functions:
136
- self._weights_statistics[
137
- statistic_function.__name__
138
- ] = {} # type: Dict[str, List[float]]
136
+ self._weights_statistics[statistic_function.__name__] = {} # type: Dict[str, List[float]]
139
137
 
140
138
  @property
141
139
  def weights(self) -> Dict[str, DLTypes.WeightType]:
@@ -36,9 +36,7 @@ class Logger:
36
36
 
37
37
  # Set up the dynamic hyperparameters dictionary - a dictionary of all tracked hyperparameters by epochs:
38
38
  # [Hyperparameter: str] -> [Epoch: int] -> [value: Union[str, bool, float, int]]
39
- self._dynamic_hyperparameters = (
40
- {}
41
- ) # type: Dict[str, List[MLTypes.TrackableType]]
39
+ self._dynamic_hyperparameters = {} # type: Dict[str, List[MLTypes.TrackableType]]
42
40
 
43
41
  # Set up the iterations counter:
44
42
  self._iterations = 0
@@ -72,7 +72,7 @@ class CalibrationCurvePlan(MLPlotPlan):
72
72
  y_pred: MLTypes.DatasetType = None,
73
73
  model: MLTypes.ModelType = None,
74
74
  x: MLTypes.DatasetType = None,
75
- **kwargs
75
+ **kwargs,
76
76
  ) -> Dict[str, Artifact]:
77
77
  """
78
78
  Produce the calibration curve according to the ground truth (y) and predictions (y_pred) values. If predictions
@@ -78,7 +78,7 @@ class ConfusionMatrixPlan(MLPlotPlan):
78
78
  y_pred: MLTypes.DatasetType = None,
79
79
  model: MLTypes.ModelType = None,
80
80
  x: MLTypes.DatasetType = None,
81
- **kwargs
81
+ **kwargs,
82
82
  ) -> Dict[str, Artifact]:
83
83
  """
84
84
  Produce the confusion matrix according to the ground truth (y) and predictions (y_pred) values. If predictions
@@ -89,9 +89,7 @@ class DatasetPlan(MLPlan):
89
89
  self._preview = preview
90
90
  self._stats = stats
91
91
  self._fmt = fmt
92
- self._plans = (
93
- {}
94
- ) # TODO: Implement DatasetPlansLibrary with dataset specific artifacts plans.
92
+ self._plans = {} # TODO: Implement DatasetPlansLibrary with dataset specific artifacts plans.
95
93
 
96
94
  # Continue initializing the plan:
97
95
  super(DatasetPlan, self).__init__(need_probabilities=False)
@@ -106,7 +106,7 @@ def _apply_mlrun_on_model(
106
106
  parameters: Dict[str, Union[str, int, float]] = None,
107
107
  extra_data: Dict[str, LGBMTypes.ExtraDataType] = None,
108
108
  auto_log: bool = True,
109
- **kwargs
109
+ **kwargs,
110
110
  ):
111
111
  # Create a model handler:
112
112
  model_handler_kwargs = (
@@ -205,7 +205,7 @@ def apply_mlrun(
205
205
  extra_data: Dict[str, LGBMTypes.ExtraDataType] = None,
206
206
  auto_log: bool = True,
207
207
  mlrun_logging_callback_kwargs: Dict[str, Any] = None,
208
- **kwargs
208
+ **kwargs,
209
209
  ) -> Union[LGBMModelHandler, None]:
210
210
  """
211
211
  Apply MLRun's interface on top of LightGBM by wrapping the module itself or the given model, providing both with
@@ -364,9 +364,9 @@ class TensorboardLoggingCallback(LoggingCallback):
364
364
  return self._logger.weight_statistics
365
365
 
366
366
  @staticmethod
367
- def get_default_weight_statistics_list() -> List[
368
- Callable[[Union[Parameter, Tensor]], Union[float, Tensor]]
369
- ]:
367
+ def get_default_weight_statistics_list() -> (
368
+ List[Callable[[Union[Parameter, Tensor]], Union[float, Tensor]]]
369
+ ):
370
370
  """
371
371
  Get the default list of statistics functions being applied on the tracked weights each epoch.
372
372
 
@@ -949,7 +949,7 @@ class PyTorchMLRunInterface:
949
949
 
950
950
  @staticmethod
951
951
  def _tensor_to_cuda(
952
- tensor: Union[Tensor, Dict, List, Tuple]
952
+ tensor: Union[Tensor, Dict, List, Tuple],
953
953
  ) -> Union[Tensor, Dict, List, Tuple]:
954
954
  """
955
955
  Send to given tensor to cuda if it is a tensor. If the given object is a dictionary, the dictionary values will
@@ -42,7 +42,7 @@ def apply_mlrun(
42
42
  mlrun_callback_kwargs: Dict[str, Any] = None,
43
43
  tensorboard_callback_kwargs: Dict[str, Any] = None,
44
44
  use_horovod: bool = None,
45
- **kwargs
45
+ **kwargs,
46
46
  ) -> TFKerasModelHandler:
47
47
  """
48
48
  Wrap the given model with MLRun's interface providing it with mlrun's additional features.
@@ -143,7 +143,7 @@ def apply_mlrun(
143
143
  modules_map=modules_map,
144
144
  custom_objects_map=custom_objects_map,
145
145
  custom_objects_directory=custom_objects_directory,
146
- **model_handler_kwargs
146
+ **model_handler_kwargs,
147
147
  )
148
148
 
149
149
  # Load the model if it was not provided:
@@ -174,7 +174,7 @@ def apply_mlrun(
174
174
  model_handler=handler,
175
175
  log_model_tag=tag,
176
176
  auto_log=auto_log,
177
- **mlrun_callback_kwargs
177
+ **mlrun_callback_kwargs,
178
178
  )
179
179
  )
180
180
  model.add_logging_callback(
@@ -182,7 +182,7 @@ def apply_mlrun(
182
182
  context=context,
183
183
  tensorboard_directory=tensorboard_directory,
184
184
  auto_log=auto_log,
185
- **tensorboard_callback_kwargs
185
+ **tensorboard_callback_kwargs,
186
186
  )
187
187
  )
188
188
 
@@ -554,9 +554,9 @@ class TensorboardLoggingCallback(LoggingCallback):
554
554
  self._logger.write_dynamic_hyperparameters()
555
555
 
556
556
  @staticmethod
557
- def get_default_weight_statistics_list() -> List[
558
- Callable[[Union[Variable, Tensor]], Union[float, Tensor]]
559
- ]:
557
+ def get_default_weight_statistics_list() -> (
558
+ List[Callable[[Union[Variable, Tensor]], Union[float, Tensor]]]
559
+ ):
560
560
  """
561
561
  Get the default list of statistics functions being applied on the tracked weights each epoch.
562
562
 
@@ -148,7 +148,12 @@ class TFKerasMLRunInterface(MLRunInterface, ABC):
148
148
  kwargs["validation_data"] = kwargs.get("validation_data", None)
149
149
 
150
150
  # Call the pre fit method:
151
- (callbacks, verbose, steps_per_epoch, validation_steps,) = self._pre_fit(
151
+ (
152
+ callbacks,
153
+ verbose,
154
+ steps_per_epoch,
155
+ validation_steps,
156
+ ) = self._pre_fit(
152
157
  callbacks=kwargs["callbacks"],
153
158
  verbose=kwargs["verbose"],
154
159
  steps_per_epoch=kwargs["steps_per_epoch"],
@@ -55,7 +55,7 @@ def apply_mlrun(
55
55
  parameters: Dict[str, Union[str, int, float]] = None,
56
56
  extra_data: Dict[str, XGBoostTypes.ExtraDataType] = None,
57
57
  auto_log: bool = True,
58
- **kwargs
58
+ **kwargs,
59
59
  ) -> XGBoostModelHandler:
60
60
  """
61
61
  Wrap the given model with MLRun's interface providing it with mlrun's additional features.
mlrun/k8s_utils.py CHANGED
@@ -60,9 +60,9 @@ def generate_preemptible_node_selector_requirements(
60
60
  return match_expressions
61
61
 
62
62
 
63
- def generate_preemptible_nodes_anti_affinity_terms() -> typing.List[
64
- kubernetes.client.V1NodeSelectorTerm
65
- ]:
63
+ def generate_preemptible_nodes_anti_affinity_terms() -> (
64
+ typing.List[kubernetes.client.V1NodeSelectorTerm]
65
+ ):
66
66
  """
67
67
  Generate node selector term containing anti-affinity expressions based on the
68
68
  pre-configured node selector of the preemptible nodes.
@@ -82,9 +82,9 @@ def generate_preemptible_nodes_anti_affinity_terms() -> typing.List[
82
82
  ]
83
83
 
84
84
 
85
- def generate_preemptible_nodes_affinity_terms() -> typing.List[
86
- kubernetes.client.V1NodeSelectorTerm
87
- ]:
85
+ def generate_preemptible_nodes_affinity_terms() -> (
86
+ typing.List[kubernetes.client.V1NodeSelectorTerm]
87
+ ):
88
88
  """
89
89
  Use for purpose of scheduling on node having at least one of the node selectors.
90
90
  When specifying multiple nodeSelectorTerms associated with nodeAffinity types,
mlrun/kfpops.py CHANGED
@@ -313,7 +313,6 @@ def mlrun_op(
313
313
  code_env = None
314
314
  function_name = ""
315
315
  if function:
316
-
317
316
  if not func_url:
318
317
  if function.kind in ["", "local"]:
319
318
  image = image or function.spec.image
@@ -492,7 +491,6 @@ def deploy_op(
492
491
  tag="",
493
492
  verbose=False,
494
493
  ):
495
-
496
494
  cmd = ["python", "-m", "mlrun", "deploy"]
497
495
  if source:
498
496
  cmd += ["-s", source]
@@ -855,7 +853,6 @@ def add_default_function_resources(
855
853
  def add_function_node_selection_attributes(
856
854
  function, container_op: dsl.ContainerOp
857
855
  ) -> dsl.ContainerOp:
858
-
859
856
  if not mlrun.runtimes.RuntimeKinds.is_local_runtime(function.kind):
860
857
  if getattr(function.spec, "node_selector"):
861
858
  container_op.node_selector = function.spec.node_selector
mlrun/launcher/base.py CHANGED
@@ -177,7 +177,6 @@ class BaseLauncher(abc.ABC):
177
177
 
178
178
  def _validate_run_params(self, parameters: Dict[str, Any]):
179
179
  for param_name, param_value in parameters.items():
180
-
181
180
  if isinstance(param_value, dict):
182
181
  # if the parameter is a dict, we might have some nested parameters,
183
182
  # in this case we need to verify them as well recursively
mlrun/launcher/local.py CHANGED
@@ -69,7 +69,6 @@ class ClientLocalLauncher(launcher.ClientBaseLauncher):
69
69
  returns: Optional[List[Union[str, Dict[str, str]]]] = None,
70
70
  state_thresholds: Optional[Dict[str, int]] = None,
71
71
  ) -> "mlrun.run.RunObject":
72
-
73
72
  # do not allow local function to be scheduled
74
73
  if self._is_run_local and schedule is not None:
75
74
  raise mlrun.errors.MLRunInvalidArgumentError(
@@ -133,7 +132,6 @@ class ClientLocalLauncher(launcher.ClientBaseLauncher):
133
132
  runtime: "mlrun.runtimes.BaseRuntime",
134
133
  run: Optional[Union["mlrun.run.RunTemplate", "mlrun.run.RunObject"]] = None,
135
134
  ):
136
-
137
135
  if "V3IO_USERNAME" in os.environ and "v3io_user" not in run.metadata.labels:
138
136
  run.metadata.labels["v3io_user"] = os.environ.get("V3IO_USERNAME")
139
137
 
@@ -209,7 +207,6 @@ class ClientLocalLauncher(launcher.ClientBaseLauncher):
209
207
  workdir: Optional[str] = "",
210
208
  handler: Optional[str] = None,
211
209
  ):
212
-
213
210
  project = project or runtime.metadata.project
214
211
  function_name = name or runtime.metadata.name
215
212
  command, args = self._resolve_local_code_path(local_code_path)
mlrun/model.py CHANGED
@@ -1258,13 +1258,14 @@ class RunObject(RunTemplate):
1258
1258
  def error(self) -> str:
1259
1259
  """error string if failed"""
1260
1260
  if self.status:
1261
- if self.status.state != "error":
1262
- return f"Run state ({self.status.state}) is not in error state"
1261
+ unknown_error = ""
1262
+ if self.status.state in mlrun.runtimes.constants.RunStates.error_states():
1263
+ unknown_error = "Unknown error"
1263
1264
  return (
1264
1265
  self.status.error
1265
1266
  or self.status.reason
1266
1267
  or self.status.status_text
1267
- or "Unknown error"
1268
+ or unknown_error
1268
1269
  )
1269
1270
  return ""
1270
1271
 
@@ -526,12 +526,14 @@ class BatchProcessor:
526
526
  )
527
527
 
528
528
  # Get drift thresholds from the model monitoring configuration
529
+ # fmt: off
529
530
  self.default_possible_drift_threshold = (
530
531
  mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.possible_drift
531
532
  )
532
533
  self.default_drift_detected_threshold = (
533
534
  mlrun.mlconf.model_endpoint_monitoring.drift_thresholds.default.drift_detected
534
535
  )
536
+ # fmt: on
535
537
 
536
538
  # Get a runtime database
537
539
 
@@ -618,7 +620,7 @@ class BatchProcessor:
618
620
 
619
621
  if not mlrun.mlconf.is_ce_mode():
620
622
  # Create v3io stream based on the input stream
621
- response = self.v3io.create_stream(
623
+ response = self.v3io.stream.create(
622
624
  container=self.stream_container,
623
625
  path=self.stream_path,
624
626
  shard_count=1,
@@ -514,7 +514,7 @@ class _NumPyNDArrayCollectionPackager(DefaultPackager):
514
514
 
515
515
  @staticmethod
516
516
  def _is_any_object_dtype(
517
- array_collection: Union[np.ndarray, NumPyArrayCollectionType]
517
+ array_collection: Union[np.ndarray, NumPyArrayCollectionType],
518
518
  ):
519
519
  """
520
520
  Check if any of the arrays in a collection is of type `object`.
@@ -35,7 +35,7 @@ class LogHintUtils:
35
35
 
36
36
  @staticmethod
37
37
  def parse_log_hint(
38
- log_hint: typing.Union[typing.Dict[str, str], str, None]
38
+ log_hint: typing.Union[typing.Dict[str, str], str, None],
39
39
  ) -> typing.Union[typing.Dict[str, str], None]:
40
40
  """
41
41
  Parse a given log hint from string to a logging configuration dictionary. The string will be read as the
@@ -249,7 +249,9 @@ class TypeHintUtils:
249
249
  if type_hint.__forward_module__:
250
250
  arg = f"{type_hint.__forward_module__}.{arg}"
251
251
  return [TypeHintUtils.parse_type_hint(type_hint=arg)]
252
- except MLRunInvalidArgumentError: # May be raised from `TypeHintUtils.parse_type_hint`
252
+ except (
253
+ MLRunInvalidArgumentError
254
+ ): # May be raised from `TypeHintUtils.parse_type_hint`
253
255
  logger.warn(
254
256
  f"Could not reduce the type hint '{type_hint}' as it is a forward reference to a class without "
255
257
  f"it's full module path. To enable importing forward references, please provide the full module "