mlrun 1.6.0rc35__py3-none-any.whl → 1.7.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +3 -3
- mlrun/api/schemas/__init__.py +1 -1
- mlrun/artifacts/base.py +11 -6
- mlrun/artifacts/dataset.py +2 -2
- mlrun/artifacts/model.py +30 -24
- mlrun/artifacts/plots.py +2 -2
- mlrun/common/db/sql_session.py +5 -3
- mlrun/common/helpers.py +1 -2
- mlrun/common/schemas/artifact.py +3 -3
- mlrun/common/schemas/auth.py +3 -3
- mlrun/common/schemas/background_task.py +1 -1
- mlrun/common/schemas/client_spec.py +1 -1
- mlrun/common/schemas/feature_store.py +16 -16
- mlrun/common/schemas/frontend_spec.py +7 -7
- mlrun/common/schemas/function.py +1 -1
- mlrun/common/schemas/hub.py +4 -9
- mlrun/common/schemas/memory_reports.py +2 -2
- mlrun/common/schemas/model_monitoring/grafana.py +4 -4
- mlrun/common/schemas/model_monitoring/model_endpoints.py +14 -15
- mlrun/common/schemas/notification.py +4 -4
- mlrun/common/schemas/object.py +2 -2
- mlrun/common/schemas/pipeline.py +1 -1
- mlrun/common/schemas/project.py +3 -3
- mlrun/common/schemas/runtime_resource.py +8 -12
- mlrun/common/schemas/schedule.py +3 -3
- mlrun/common/schemas/tag.py +1 -2
- mlrun/common/schemas/workflow.py +2 -2
- mlrun/config.py +8 -4
- mlrun/data_types/to_pandas.py +1 -3
- mlrun/datastore/base.py +0 -28
- mlrun/datastore/datastore_profile.py +9 -9
- mlrun/datastore/filestore.py +0 -1
- mlrun/datastore/google_cloud_storage.py +1 -1
- mlrun/datastore/sources.py +7 -11
- mlrun/datastore/spark_utils.py +1 -2
- mlrun/datastore/targets.py +31 -31
- mlrun/datastore/utils.py +4 -6
- mlrun/datastore/v3io.py +70 -46
- mlrun/db/base.py +22 -23
- mlrun/db/httpdb.py +34 -34
- mlrun/db/nopdb.py +19 -19
- mlrun/errors.py +1 -1
- mlrun/execution.py +4 -4
- mlrun/feature_store/api.py +20 -21
- mlrun/feature_store/common.py +1 -1
- mlrun/feature_store/feature_set.py +28 -32
- mlrun/feature_store/feature_vector.py +24 -27
- mlrun/feature_store/retrieval/base.py +7 -7
- mlrun/feature_store/retrieval/conversion.py +2 -4
- mlrun/feature_store/steps.py +7 -15
- mlrun/features.py +5 -7
- mlrun/frameworks/_common/artifacts_library.py +9 -9
- mlrun/frameworks/_common/mlrun_interface.py +5 -5
- mlrun/frameworks/_common/model_handler.py +48 -48
- mlrun/frameworks/_common/plan.py +2 -3
- mlrun/frameworks/_common/producer.py +3 -4
- mlrun/frameworks/_common/utils.py +5 -5
- mlrun/frameworks/_dl_common/loggers/logger.py +6 -7
- mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +9 -9
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +16 -35
- mlrun/frameworks/_ml_common/artifacts_library.py +1 -2
- mlrun/frameworks/_ml_common/loggers/logger.py +3 -4
- mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +4 -5
- mlrun/frameworks/_ml_common/model_handler.py +24 -24
- mlrun/frameworks/_ml_common/pkl_model_server.py +2 -2
- mlrun/frameworks/_ml_common/plan.py +1 -1
- mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/feature_importance_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
- mlrun/frameworks/_ml_common/utils.py +4 -4
- mlrun/frameworks/auto_mlrun/auto_mlrun.py +7 -7
- mlrun/frameworks/huggingface/model_server.py +4 -4
- mlrun/frameworks/lgbm/__init__.py +32 -32
- mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -5
- mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -5
- mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +1 -3
- mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +6 -6
- mlrun/frameworks/lgbm/model_handler.py +9 -9
- mlrun/frameworks/lgbm/model_server.py +6 -6
- mlrun/frameworks/lgbm/utils.py +5 -5
- mlrun/frameworks/onnx/dataset.py +8 -8
- mlrun/frameworks/onnx/mlrun_interface.py +3 -3
- mlrun/frameworks/onnx/model_handler.py +6 -6
- mlrun/frameworks/onnx/model_server.py +7 -7
- mlrun/frameworks/parallel_coordinates.py +2 -2
- mlrun/frameworks/pytorch/__init__.py +16 -16
- mlrun/frameworks/pytorch/callbacks/callback.py +4 -5
- mlrun/frameworks/pytorch/callbacks/logging_callback.py +17 -17
- mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +23 -29
- mlrun/frameworks/pytorch/callbacks_handler.py +38 -38
- mlrun/frameworks/pytorch/mlrun_interface.py +20 -20
- mlrun/frameworks/pytorch/model_handler.py +17 -17
- mlrun/frameworks/pytorch/model_server.py +7 -7
- mlrun/frameworks/sklearn/__init__.py +12 -12
- mlrun/frameworks/sklearn/estimator.py +4 -4
- mlrun/frameworks/sklearn/metrics_library.py +14 -14
- mlrun/frameworks/sklearn/mlrun_interface.py +3 -6
- mlrun/frameworks/sklearn/model_handler.py +2 -2
- mlrun/frameworks/tf_keras/__init__.py +5 -5
- mlrun/frameworks/tf_keras/callbacks/logging_callback.py +14 -14
- mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +19 -23
- mlrun/frameworks/tf_keras/mlrun_interface.py +7 -9
- mlrun/frameworks/tf_keras/model_handler.py +14 -14
- mlrun/frameworks/tf_keras/model_server.py +6 -6
- mlrun/frameworks/xgboost/__init__.py +12 -12
- mlrun/frameworks/xgboost/model_handler.py +6 -6
- mlrun/k8s_utils.py +4 -5
- mlrun/kfpops.py +2 -2
- mlrun/launcher/base.py +10 -10
- mlrun/launcher/local.py +8 -8
- mlrun/launcher/remote.py +7 -7
- mlrun/lists.py +3 -4
- mlrun/model.py +205 -55
- mlrun/model_monitoring/api.py +21 -24
- mlrun/model_monitoring/application.py +4 -4
- mlrun/model_monitoring/batch.py +17 -17
- mlrun/model_monitoring/controller.py +2 -1
- mlrun/model_monitoring/features_drift_table.py +44 -31
- mlrun/model_monitoring/prometheus.py +1 -4
- mlrun/model_monitoring/stores/kv_model_endpoint_store.py +11 -13
- mlrun/model_monitoring/stores/model_endpoint_store.py +9 -11
- mlrun/model_monitoring/stores/models/__init__.py +2 -2
- mlrun/model_monitoring/stores/sql_model_endpoint_store.py +11 -13
- mlrun/model_monitoring/stream_processing.py +16 -34
- mlrun/model_monitoring/tracking_policy.py +2 -1
- mlrun/package/__init__.py +6 -6
- mlrun/package/context_handler.py +5 -5
- mlrun/package/packager.py +7 -7
- mlrun/package/packagers/default_packager.py +6 -6
- mlrun/package/packagers/numpy_packagers.py +15 -15
- mlrun/package/packagers/pandas_packagers.py +5 -5
- mlrun/package/packagers/python_standard_library_packagers.py +10 -10
- mlrun/package/packagers_manager.py +18 -23
- mlrun/package/utils/_formatter.py +4 -4
- mlrun/package/utils/_pickler.py +2 -2
- mlrun/package/utils/_supported_format.py +4 -4
- mlrun/package/utils/log_hint_utils.py +2 -2
- mlrun/package/utils/type_hint_utils.py +4 -9
- mlrun/platforms/other.py +1 -2
- mlrun/projects/operations.py +5 -5
- mlrun/projects/pipelines.py +9 -9
- mlrun/projects/project.py +58 -46
- mlrun/render.py +1 -1
- mlrun/run.py +9 -9
- mlrun/runtimes/__init__.py +7 -4
- mlrun/runtimes/base.py +20 -23
- mlrun/runtimes/constants.py +5 -5
- mlrun/runtimes/daskjob.py +8 -8
- mlrun/runtimes/databricks_job/databricks_cancel_task.py +1 -1
- mlrun/runtimes/databricks_job/databricks_runtime.py +7 -7
- mlrun/runtimes/function_reference.py +1 -1
- mlrun/runtimes/local.py +1 -1
- mlrun/runtimes/mpijob/abstract.py +1 -2
- mlrun/runtimes/nuclio/__init__.py +20 -0
- mlrun/runtimes/{function.py → nuclio/function.py} +15 -16
- mlrun/runtimes/{nuclio.py → nuclio/nuclio.py} +6 -6
- mlrun/runtimes/{serving.py → nuclio/serving.py} +13 -12
- mlrun/runtimes/pod.py +95 -48
- mlrun/runtimes/remotesparkjob.py +1 -1
- mlrun/runtimes/sparkjob/spark3job.py +50 -33
- mlrun/runtimes/utils.py +1 -2
- mlrun/secrets.py +3 -3
- mlrun/serving/remote.py +0 -4
- mlrun/serving/routers.py +6 -6
- mlrun/serving/server.py +4 -4
- mlrun/serving/states.py +29 -0
- mlrun/serving/utils.py +3 -3
- mlrun/serving/v1_serving.py +6 -7
- mlrun/serving/v2_serving.py +50 -8
- mlrun/track/tracker_manager.py +3 -3
- mlrun/track/trackers/mlflow_tracker.py +1 -2
- mlrun/utils/async_http.py +5 -7
- mlrun/utils/azure_vault.py +1 -1
- mlrun/utils/clones.py +1 -2
- mlrun/utils/condition_evaluator.py +3 -3
- mlrun/utils/db.py +3 -3
- mlrun/utils/helpers.py +37 -119
- mlrun/utils/http.py +1 -4
- mlrun/utils/logger.py +49 -14
- mlrun/utils/notifications/notification/__init__.py +3 -3
- mlrun/utils/notifications/notification/base.py +2 -2
- mlrun/utils/notifications/notification/ipython.py +1 -1
- mlrun/utils/notifications/notification_pusher.py +8 -14
- mlrun/utils/retryer.py +207 -0
- mlrun/utils/singleton.py +1 -1
- mlrun/utils/v3io_clients.py +2 -3
- mlrun/utils/version/version.json +2 -2
- mlrun/utils/version/version.py +2 -6
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/METADATA +9 -9
- mlrun-1.7.0rc2.dist-info/RECORD +315 -0
- mlrun-1.6.0rc35.dist-info/RECORD +0 -313
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/LICENSE +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/WHEEL +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/entry_points.txt +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -12,7 +12,6 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import Dict, List
|
|
16
15
|
|
|
17
16
|
from ..utils import MLTypes
|
|
18
17
|
|
|
@@ -42,7 +41,7 @@ class Logger:
|
|
|
42
41
|
self._iterations = 0
|
|
43
42
|
|
|
44
43
|
@property
|
|
45
|
-
def results(self) ->
|
|
44
|
+
def results(self) -> dict[str, dict[str, list[float]]]:
|
|
46
45
|
"""
|
|
47
46
|
Get the results logged. The results will be stored in a dictionary where each key is the validation set name
|
|
48
47
|
and the value is a dictionary of metrics to their list of iterations values.
|
|
@@ -52,7 +51,7 @@ class Logger:
|
|
|
52
51
|
return self._results
|
|
53
52
|
|
|
54
53
|
@property
|
|
55
|
-
def static_hyperparameters(self) ->
|
|
54
|
+
def static_hyperparameters(self) -> dict[str, MLTypes.TrackableType]:
|
|
56
55
|
"""
|
|
57
56
|
Get the static hyperparameters logged. The hyperparameters will be stored in a dictionary where each key is the
|
|
58
57
|
hyperparameter name and the value is his logged value.
|
|
@@ -62,7 +61,7 @@ class Logger:
|
|
|
62
61
|
return self._static_hyperparameters
|
|
63
62
|
|
|
64
63
|
@property
|
|
65
|
-
def dynamic_hyperparameters(self) ->
|
|
64
|
+
def dynamic_hyperparameters(self) -> dict[str, list[MLTypes.TrackableType]]:
|
|
66
65
|
"""
|
|
67
66
|
Get the dynamic hyperparameters logged. The hyperparameters will be stored in a dictionary where each key is the
|
|
68
67
|
hyperparameter name and the value is a list of his logged values per epoch.
|
|
@@ -13,7 +13,6 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
import re
|
|
16
|
-
from typing import Dict, List
|
|
17
16
|
|
|
18
17
|
import numpy as np
|
|
19
18
|
import plotly.graph_objects as go
|
|
@@ -39,7 +38,7 @@ class MLRunLogger(Logger):
|
|
|
39
38
|
|
|
40
39
|
:param context: MLRun context to log to. The context parameters can be logged as static hyperparameters.
|
|
41
40
|
"""
|
|
42
|
-
super(
|
|
41
|
+
super().__init__()
|
|
43
42
|
|
|
44
43
|
# An MLRun context to log to:
|
|
45
44
|
self._context = context
|
|
@@ -47,7 +46,7 @@ class MLRunLogger(Logger):
|
|
|
47
46
|
# Prepare the artifacts dictionary:
|
|
48
47
|
self._artifacts = {} # type: Dict[str, Artifact]
|
|
49
48
|
|
|
50
|
-
def get_artifacts(self) ->
|
|
49
|
+
def get_artifacts(self) -> dict[str, Artifact]:
|
|
51
50
|
"""
|
|
52
51
|
Get the artifacts created by this logger.
|
|
53
52
|
|
|
@@ -55,7 +54,7 @@ class MLRunLogger(Logger):
|
|
|
55
54
|
"""
|
|
56
55
|
return self._artifacts
|
|
57
56
|
|
|
58
|
-
def get_metrics(self) ->
|
|
57
|
+
def get_metrics(self) -> dict[str, float]:
|
|
59
58
|
"""
|
|
60
59
|
Generate a metrics summary to log along the model.
|
|
61
60
|
|
|
@@ -144,7 +143,7 @@ class MLRunLogger(Logger):
|
|
|
144
143
|
|
|
145
144
|
@staticmethod
|
|
146
145
|
def _produce_convergence_plot_artifact(
|
|
147
|
-
name: str, values:
|
|
146
|
+
name: str, values: list[float]
|
|
148
147
|
) -> PlotlyArtifact:
|
|
149
148
|
"""
|
|
150
149
|
Produce the convergences for the provided metric according.
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
from abc import ABC
|
|
16
|
-
from typing import
|
|
16
|
+
from typing import Union
|
|
17
17
|
|
|
18
18
|
import mlrun
|
|
19
19
|
from mlrun.artifacts import Artifact
|
|
@@ -35,10 +35,10 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
35
35
|
model_path: MLTypes.PathType = None,
|
|
36
36
|
model_name: str = None,
|
|
37
37
|
modules_map: Union[
|
|
38
|
-
|
|
38
|
+
dict[str, Union[None, str, list[str]]], MLTypes.PathType
|
|
39
39
|
] = None,
|
|
40
40
|
custom_objects_map: Union[
|
|
41
|
-
|
|
41
|
+
dict[str, Union[str, list[str]]], MLTypes.PathType
|
|
42
42
|
] = None,
|
|
43
43
|
custom_objects_directory: MLTypes.PathType = None,
|
|
44
44
|
context: mlrun.MLClientCtx = None,
|
|
@@ -105,7 +105,7 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
105
105
|
self._feature_weights = None # type: List[float]
|
|
106
106
|
|
|
107
107
|
# Continue the initialization:
|
|
108
|
-
super(
|
|
108
|
+
super().__init__(
|
|
109
109
|
model=model,
|
|
110
110
|
model_path=model_path,
|
|
111
111
|
model_name=model_name,
|
|
@@ -153,7 +153,7 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
153
153
|
return self._feature_vector
|
|
154
154
|
|
|
155
155
|
@property
|
|
156
|
-
def feature_weights(self) ->
|
|
156
|
+
def feature_weights(self) -> list[float]:
|
|
157
157
|
"""
|
|
158
158
|
Get the feature weights set in this handler.
|
|
159
159
|
|
|
@@ -213,7 +213,7 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
213
213
|
"""
|
|
214
214
|
self._feature_vector = feature_vector
|
|
215
215
|
|
|
216
|
-
def set_feature_weights(self, feature_weights:
|
|
216
|
+
def set_feature_weights(self, feature_weights: list[float]):
|
|
217
217
|
"""
|
|
218
218
|
Set the feature weights this model will be logged with.
|
|
219
219
|
|
|
@@ -224,18 +224,18 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
224
224
|
def log(
|
|
225
225
|
self,
|
|
226
226
|
tag: str = "",
|
|
227
|
-
labels:
|
|
228
|
-
parameters:
|
|
229
|
-
inputs:
|
|
230
|
-
outputs:
|
|
231
|
-
metrics:
|
|
232
|
-
artifacts:
|
|
233
|
-
extra_data:
|
|
227
|
+
labels: dict[str, Union[str, int, float]] = None,
|
|
228
|
+
parameters: dict[str, Union[str, int, float]] = None,
|
|
229
|
+
inputs: list[Feature] = None,
|
|
230
|
+
outputs: list[Feature] = None,
|
|
231
|
+
metrics: dict[str, Union[int, float]] = None,
|
|
232
|
+
artifacts: dict[str, Artifact] = None,
|
|
233
|
+
extra_data: dict[str, MLTypes.ExtraDataType] = None,
|
|
234
234
|
algorithm: str = None,
|
|
235
235
|
sample_set: MLTypes.DatasetType = None,
|
|
236
236
|
target_columns: MLTypes.TargetColumnsNamesType = None,
|
|
237
237
|
feature_vector: str = None,
|
|
238
|
-
feature_weights:
|
|
238
|
+
feature_weights: list[float] = None,
|
|
239
239
|
):
|
|
240
240
|
"""
|
|
241
241
|
Log the model held by this handler into the MLRun context provided.
|
|
@@ -281,7 +281,7 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
281
281
|
self.set_feature_weights(feature_weights=feature_weights)
|
|
282
282
|
|
|
283
283
|
# Continue with the handler logging:
|
|
284
|
-
super(
|
|
284
|
+
super().log(
|
|
285
285
|
tag=tag,
|
|
286
286
|
labels=labels,
|
|
287
287
|
parameters=parameters,
|
|
@@ -299,15 +299,15 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
299
299
|
|
|
300
300
|
def update(
|
|
301
301
|
self,
|
|
302
|
-
labels:
|
|
303
|
-
parameters:
|
|
304
|
-
inputs:
|
|
305
|
-
outputs:
|
|
306
|
-
metrics:
|
|
307
|
-
artifacts:
|
|
308
|
-
extra_data:
|
|
302
|
+
labels: dict[str, Union[str, int, float]] = None,
|
|
303
|
+
parameters: dict[str, Union[str, int, float]] = None,
|
|
304
|
+
inputs: list[Feature] = None,
|
|
305
|
+
outputs: list[Feature] = None,
|
|
306
|
+
metrics: dict[str, Union[int, float]] = None,
|
|
307
|
+
artifacts: dict[str, Artifact] = None,
|
|
308
|
+
extra_data: dict[str, MLTypes.ExtraDataType] = None,
|
|
309
309
|
feature_vector: str = None,
|
|
310
|
-
feature_weights:
|
|
310
|
+
feature_weights: list[float] = None,
|
|
311
311
|
):
|
|
312
312
|
"""
|
|
313
313
|
Update the model held by this handler into the MLRun context provided, updating the model's artifact properties
|
|
@@ -336,7 +336,7 @@ class MLModelHandler(ModelHandler, ABC):
|
|
|
336
336
|
self._feature_weights = feature_weights
|
|
337
337
|
|
|
338
338
|
# Continue with the handler update:
|
|
339
|
-
super(
|
|
339
|
+
super().update(
|
|
340
340
|
labels=labels,
|
|
341
341
|
parameters=parameters,
|
|
342
342
|
inputs=inputs,
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import Any
|
|
15
|
+
from typing import Any
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
import pandas as pd
|
|
@@ -59,7 +59,7 @@ class PickleModelServer(V2ModelServer):
|
|
|
59
59
|
|
|
60
60
|
return y_pred.tolist()
|
|
61
61
|
|
|
62
|
-
def explain(self, request:
|
|
62
|
+
def explain(self, request: dict[str, Any]) -> str:
|
|
63
63
|
"""
|
|
64
64
|
Returns a string listing the model that is being served in this serving function and the function name.
|
|
65
65
|
|
|
@@ -12,7 +12,6 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import Dict
|
|
16
15
|
|
|
17
16
|
import plotly.graph_objects as go
|
|
18
17
|
from sklearn.calibration import calibration_curve
|
|
@@ -51,7 +50,7 @@ class CalibrationCurvePlan(MLPlotPlan):
|
|
|
51
50
|
self._strategy = strategy
|
|
52
51
|
|
|
53
52
|
# Continue the initialization for the MLPlan:
|
|
54
|
-
super(
|
|
53
|
+
super().__init__(need_probabilities=True)
|
|
55
54
|
|
|
56
55
|
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
|
|
57
56
|
"""
|
|
@@ -73,7 +72,7 @@ class CalibrationCurvePlan(MLPlotPlan):
|
|
|
73
72
|
model: MLTypes.ModelType = None,
|
|
74
73
|
x: MLTypes.DatasetType = None,
|
|
75
74
|
**kwargs,
|
|
76
|
-
) ->
|
|
75
|
+
) -> dict[str, Artifact]:
|
|
77
76
|
"""
|
|
78
77
|
Produce the calibration curve according to the ground truth (y) and predictions (y_pred) values. If predictions
|
|
79
78
|
are not available, the model and a dataset can be given to produce them.
|
|
@@ -12,7 +12,6 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import Dict
|
|
16
15
|
|
|
17
16
|
import numpy as np
|
|
18
17
|
import pandas as pd
|
|
@@ -57,7 +56,7 @@ class ConfusionMatrixPlan(MLPlotPlan):
|
|
|
57
56
|
self._normalize = normalize
|
|
58
57
|
|
|
59
58
|
# Continue the initialization for the MLPlan:
|
|
60
|
-
super(
|
|
59
|
+
super().__init__()
|
|
61
60
|
|
|
62
61
|
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
|
|
63
62
|
"""
|
|
@@ -79,7 +78,7 @@ class ConfusionMatrixPlan(MLPlotPlan):
|
|
|
79
78
|
model: MLTypes.ModelType = None,
|
|
80
79
|
x: MLTypes.DatasetType = None,
|
|
81
80
|
**kwargs,
|
|
82
|
-
) ->
|
|
81
|
+
) -> dict[str, Artifact]:
|
|
83
82
|
"""
|
|
84
83
|
Produce the confusion matrix according to the ground truth (y) and predictions (y_pred) values. If predictions
|
|
85
84
|
are not available, the model and a dataset can be given to produce them.
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
from enum import Enum
|
|
16
|
-
from typing import
|
|
16
|
+
from typing import Union
|
|
17
17
|
|
|
18
18
|
import mlrun.errors
|
|
19
19
|
from mlrun.artifacts import Artifact, DatasetArtifact
|
|
@@ -92,7 +92,7 @@ class DatasetPlan(MLPlan):
|
|
|
92
92
|
self._plans = {} # TODO: Implement DatasetPlansLibrary with dataset specific artifacts plans.
|
|
93
93
|
|
|
94
94
|
# Continue initializing the plan:
|
|
95
|
-
super(
|
|
95
|
+
super().__init__(need_probabilities=False)
|
|
96
96
|
|
|
97
97
|
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
|
|
98
98
|
"""
|
|
@@ -124,7 +124,7 @@ class DatasetPlan(MLPlan):
|
|
|
124
124
|
y: MLTypes.DatasetType = None,
|
|
125
125
|
target_columns_names: MLTypes.TargetColumnsNamesType = None,
|
|
126
126
|
**kwargs,
|
|
127
|
-
) ->
|
|
127
|
+
) -> dict[str, Artifact]:
|
|
128
128
|
"""
|
|
129
129
|
Produce the dataset artifact according to this plan.
|
|
130
130
|
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import
|
|
15
|
+
from typing import Union
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
import plotly.graph_objects as go
|
|
@@ -38,7 +38,7 @@ class FeatureImportancePlan(MLPlotPlan):
|
|
|
38
38
|
An example of use can be seen at the Scikit-Learn docs here:
|
|
39
39
|
https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html
|
|
40
40
|
"""
|
|
41
|
-
super(
|
|
41
|
+
super().__init__()
|
|
42
42
|
|
|
43
43
|
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
|
|
44
44
|
"""
|
|
@@ -55,7 +55,7 @@ class FeatureImportancePlan(MLPlotPlan):
|
|
|
55
55
|
|
|
56
56
|
def produce(
|
|
57
57
|
self, model: MLTypes.ModelType, x: MLTypes.DatasetType, **kwargs
|
|
58
|
-
) ->
|
|
58
|
+
) -> dict[str, Artifact]:
|
|
59
59
|
"""
|
|
60
60
|
Produce the feature importance according to the given model and dataset ('x').
|
|
61
61
|
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import
|
|
15
|
+
from typing import Union
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
import pandas as pd
|
|
@@ -41,7 +41,7 @@ class ROCCurvePlan(MLPlotPlan):
|
|
|
41
41
|
average: str = "macro",
|
|
42
42
|
max_fpr: float = None,
|
|
43
43
|
multi_class: str = "raise",
|
|
44
|
-
labels:
|
|
44
|
+
labels: list[str] = None,
|
|
45
45
|
):
|
|
46
46
|
"""
|
|
47
47
|
Initialize a receiver operating characteristic plan with the given configuration.
|
|
@@ -75,7 +75,7 @@ class ROCCurvePlan(MLPlotPlan):
|
|
|
75
75
|
self._labels = labels
|
|
76
76
|
|
|
77
77
|
# Continue the initialization for the MLPlan:
|
|
78
|
-
super(
|
|
78
|
+
super().__init__(need_probabilities=True)
|
|
79
79
|
|
|
80
80
|
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
|
|
81
81
|
"""
|
|
@@ -97,7 +97,7 @@ class ROCCurvePlan(MLPlotPlan):
|
|
|
97
97
|
model: MLTypes.ModelType = None,
|
|
98
98
|
x: MLTypes.DatasetType = None,
|
|
99
99
|
**kwargs,
|
|
100
|
-
) ->
|
|
100
|
+
) -> dict[str, Artifact]:
|
|
101
101
|
"""
|
|
102
102
|
Produce the roc curve according to the ground truth (y) and predictions (y_pred) values. If predictions are not
|
|
103
103
|
available, the model and a dataset can be given to produce them.
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
#
|
|
15
15
|
from abc import ABC
|
|
16
16
|
from enum import Enum
|
|
17
|
-
from typing import Callable,
|
|
17
|
+
from typing import Callable, Union
|
|
18
18
|
|
|
19
19
|
import pandas as pd
|
|
20
20
|
from sklearn.base import is_classifier, is_regressor
|
|
@@ -137,10 +137,10 @@ class MLTypes(CommonTypes, ABC):
|
|
|
137
137
|
# of the function and the full module path to the function to import. Arguments to use when calling the metric can
|
|
138
138
|
# be joined by wrapping it as a tuple:
|
|
139
139
|
# TODO: will be moved to SKLearn's framework once LightGBM and XGBoost are fully supported.
|
|
140
|
-
MetricEntryType = Union[
|
|
140
|
+
MetricEntryType = Union[tuple[Union[Callable, str], dict], Callable, str]
|
|
141
141
|
|
|
142
142
|
# Type for the target column name - a list of indices or column names that are the ground truth (y) of a dataset.
|
|
143
|
-
TargetColumnsNamesType = Union[
|
|
143
|
+
TargetColumnsNamesType = Union[list[str], list[int]]
|
|
144
144
|
|
|
145
145
|
|
|
146
146
|
class MLUtils(CommonUtils, ABC):
|
|
@@ -154,7 +154,7 @@ class MLUtils(CommonUtils, ABC):
|
|
|
154
154
|
y: CommonTypes.DatasetType = None,
|
|
155
155
|
target_columns_names: MLTypes.TargetColumnsNamesType = None,
|
|
156
156
|
default_target_column_prefix: str = "y_",
|
|
157
|
-
) ->
|
|
157
|
+
) -> tuple[pd.DataFrame, Union[MLTypes.TargetColumnsNamesType, None]]:
|
|
158
158
|
"""
|
|
159
159
|
Concatenating the provided x and y data into a single pd.DataFrame, casting from np.ndarray and renaming y's
|
|
160
160
|
original columns if 'y_columns' was not provided. The concatenated dataset index level will be reset to 0
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
# flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
|
|
16
|
-
from typing import Callable,
|
|
16
|
+
from typing import Callable, Union
|
|
17
17
|
|
|
18
18
|
import mlrun
|
|
19
19
|
from mlrun.artifacts import get_model
|
|
@@ -165,7 +165,7 @@ def get_framework_by_class_name(model: CommonTypes.ModelType) -> str:
|
|
|
165
165
|
)
|
|
166
166
|
|
|
167
167
|
|
|
168
|
-
def framework_to_model_handler(framework: str) ->
|
|
168
|
+
def framework_to_model_handler(framework: str) -> type[ModelHandler]:
|
|
169
169
|
"""
|
|
170
170
|
Get the ModelHandler class of the given framework's name.
|
|
171
171
|
|
|
@@ -262,7 +262,7 @@ class AutoMLRun:
|
|
|
262
262
|
@staticmethod
|
|
263
263
|
def _get_framework(
|
|
264
264
|
model: CommonTypes.ModelType = None, model_path: str = None
|
|
265
|
-
) -> Union[
|
|
265
|
+
) -> Union[tuple[str, dict]]:
|
|
266
266
|
"""
|
|
267
267
|
Try to get the framework from the model or model path provided. The framework can be read from the model path
|
|
268
268
|
only if the model path is of a logged model artifact (store object uri).
|
|
@@ -322,8 +322,8 @@ class AutoMLRun:
|
|
|
322
322
|
model_path: str,
|
|
323
323
|
model_name: str = None,
|
|
324
324
|
context: mlrun.MLClientCtx = None,
|
|
325
|
-
modules_map: Union[
|
|
326
|
-
custom_objects_map: Union[
|
|
325
|
+
modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
|
|
326
|
+
custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
|
|
327
327
|
custom_objects_directory: str = None,
|
|
328
328
|
framework: str = None,
|
|
329
329
|
**kwargs,
|
|
@@ -420,8 +420,8 @@ class AutoMLRun:
|
|
|
420
420
|
model_name: str = None,
|
|
421
421
|
tag: str = "",
|
|
422
422
|
model_path: str = None,
|
|
423
|
-
modules_map: Union[
|
|
424
|
-
custom_objects_map: Union[
|
|
423
|
+
modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
|
|
424
|
+
custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
|
|
425
425
|
custom_objects_directory: str = None,
|
|
426
426
|
context: mlrun.MLClientCtx = None,
|
|
427
427
|
framework: str = None,
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
from typing import Any
|
|
15
|
+
from typing import Any
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
import transformers
|
|
@@ -65,7 +65,7 @@ class HuggingFaceModelServer(V2ModelServer):
|
|
|
65
65
|
framework of the `model`, or to PyTorch if no model is provided
|
|
66
66
|
:param class_args: -
|
|
67
67
|
"""
|
|
68
|
-
super(
|
|
68
|
+
super().__init__(
|
|
69
69
|
context=context,
|
|
70
70
|
name=name,
|
|
71
71
|
model_path=model_path,
|
|
@@ -104,7 +104,7 @@ class HuggingFaceModelServer(V2ModelServer):
|
|
|
104
104
|
framework=self.framework,
|
|
105
105
|
)
|
|
106
106
|
|
|
107
|
-
def predict(self, request:
|
|
107
|
+
def predict(self, request: dict[str, Any]) -> list:
|
|
108
108
|
"""
|
|
109
109
|
Generate model predictions from sample.
|
|
110
110
|
:param request: The request to the model. The input to the model will be read from the "inputs" key.
|
|
@@ -135,7 +135,7 @@ class HuggingFaceModelServer(V2ModelServer):
|
|
|
135
135
|
|
|
136
136
|
return result
|
|
137
137
|
|
|
138
|
-
def explain(self, request:
|
|
138
|
+
def explain(self, request: dict) -> str:
|
|
139
139
|
"""
|
|
140
140
|
Return a string explaining what model is being served in this serving function and the function name.
|
|
141
141
|
:param request: A given request.
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
15
|
# flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
|
|
16
|
-
from typing import Any,
|
|
16
|
+
from typing import Any, Union
|
|
17
17
|
|
|
18
18
|
import lightgbm as lgb
|
|
19
19
|
|
|
@@ -37,20 +37,20 @@ LGBMArtifactsLibrary = MLArtifactsLibrary
|
|
|
37
37
|
def _apply_mlrun_on_module(
|
|
38
38
|
model_name: str = "model",
|
|
39
39
|
tag: str = "",
|
|
40
|
-
modules_map: Union[
|
|
41
|
-
custom_objects_map: Union[
|
|
40
|
+
modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
|
|
41
|
+
custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
|
|
42
42
|
custom_objects_directory: str = None,
|
|
43
43
|
context: mlrun.MLClientCtx = None,
|
|
44
44
|
model_format: str = LGBMModelHandler.ModelFormats.PKL,
|
|
45
45
|
sample_set: Union[LGBMTypes.DatasetType, mlrun.DataItem, str] = None,
|
|
46
|
-
y_columns: Union[
|
|
46
|
+
y_columns: Union[list[str], list[int]] = None,
|
|
47
47
|
feature_vector: str = None,
|
|
48
|
-
feature_weights:
|
|
49
|
-
labels:
|
|
50
|
-
parameters:
|
|
51
|
-
extra_data:
|
|
48
|
+
feature_weights: list[float] = None,
|
|
49
|
+
labels: dict[str, Union[str, int, float]] = None,
|
|
50
|
+
parameters: dict[str, Union[str, int, float]] = None,
|
|
51
|
+
extra_data: dict[str, LGBMTypes.ExtraDataType] = None,
|
|
52
52
|
auto_log: bool = True,
|
|
53
|
-
mlrun_logging_callback_kwargs:
|
|
53
|
+
mlrun_logging_callback_kwargs: dict[str, Any] = None,
|
|
54
54
|
):
|
|
55
55
|
# Apply MLRun's interface on the LightGBM module:
|
|
56
56
|
LGBMMLRunInterface.add_interface(obj=lgb)
|
|
@@ -85,26 +85,26 @@ def _apply_mlrun_on_model(
|
|
|
85
85
|
model_name: str = "model",
|
|
86
86
|
tag: str = "",
|
|
87
87
|
model_path: str = None,
|
|
88
|
-
modules_map: Union[
|
|
89
|
-
custom_objects_map: Union[
|
|
88
|
+
modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
|
|
89
|
+
custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
|
|
90
90
|
custom_objects_directory: str = None,
|
|
91
91
|
context: mlrun.MLClientCtx = None,
|
|
92
92
|
model_format: str = LGBMModelHandler.ModelFormats.PKL,
|
|
93
|
-
artifacts: Union[
|
|
93
|
+
artifacts: Union[list[MLPlan], list[str], dict[str, dict]] = None,
|
|
94
94
|
metrics: Union[
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
95
|
+
list[Metric],
|
|
96
|
+
list[LGBMTypes.MetricEntryType],
|
|
97
|
+
dict[str, LGBMTypes.MetricEntryType],
|
|
98
98
|
] = None,
|
|
99
99
|
x_test: LGBMTypes.DatasetType = None,
|
|
100
100
|
y_test: LGBMTypes.DatasetType = None,
|
|
101
101
|
sample_set: Union[LGBMTypes.DatasetType, mlrun.DataItem, str] = None,
|
|
102
|
-
y_columns: Union[
|
|
102
|
+
y_columns: Union[list[str], list[int]] = None,
|
|
103
103
|
feature_vector: str = None,
|
|
104
|
-
feature_weights:
|
|
105
|
-
labels:
|
|
106
|
-
parameters:
|
|
107
|
-
extra_data:
|
|
104
|
+
feature_weights: list[float] = None,
|
|
105
|
+
labels: dict[str, Union[str, int, float]] = None,
|
|
106
|
+
parameters: dict[str, Union[str, int, float]] = None,
|
|
107
|
+
extra_data: dict[str, LGBMTypes.ExtraDataType] = None,
|
|
108
108
|
auto_log: bool = True,
|
|
109
109
|
**kwargs,
|
|
110
110
|
):
|
|
@@ -183,28 +183,28 @@ def apply_mlrun(
|
|
|
183
183
|
model_name: str = "model",
|
|
184
184
|
tag: str = "",
|
|
185
185
|
model_path: str = None,
|
|
186
|
-
modules_map: Union[
|
|
187
|
-
custom_objects_map: Union[
|
|
186
|
+
modules_map: Union[dict[str, Union[None, str, list[str]]], str] = None,
|
|
187
|
+
custom_objects_map: Union[dict[str, Union[str, list[str]]], str] = None,
|
|
188
188
|
custom_objects_directory: str = None,
|
|
189
189
|
context: mlrun.MLClientCtx = None,
|
|
190
190
|
model_format: str = LGBMModelHandler.ModelFormats.PKL,
|
|
191
|
-
artifacts: Union[
|
|
191
|
+
artifacts: Union[list[MLPlan], list[str], dict[str, dict]] = None,
|
|
192
192
|
metrics: Union[
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
193
|
+
list[Metric],
|
|
194
|
+
list[LGBMTypes.MetricEntryType],
|
|
195
|
+
dict[str, LGBMTypes.MetricEntryType],
|
|
196
196
|
] = None,
|
|
197
197
|
x_test: LGBMTypes.DatasetType = None,
|
|
198
198
|
y_test: LGBMTypes.DatasetType = None,
|
|
199
199
|
sample_set: Union[LGBMTypes.DatasetType, mlrun.DataItem, str] = None,
|
|
200
|
-
y_columns: Union[
|
|
200
|
+
y_columns: Union[list[str], list[int]] = None,
|
|
201
201
|
feature_vector: str = None,
|
|
202
|
-
feature_weights:
|
|
203
|
-
labels:
|
|
204
|
-
parameters:
|
|
205
|
-
extra_data:
|
|
202
|
+
feature_weights: list[float] = None,
|
|
203
|
+
labels: dict[str, Union[str, int, float]] = None,
|
|
204
|
+
parameters: dict[str, Union[str, int, float]] = None,
|
|
205
|
+
extra_data: dict[str, LGBMTypes.ExtraDataType] = None,
|
|
206
206
|
auto_log: bool = True,
|
|
207
|
-
mlrun_logging_callback_kwargs:
|
|
207
|
+
mlrun_logging_callback_kwargs: dict[str, Any] = None,
|
|
208
208
|
**kwargs,
|
|
209
209
|
) -> Union[LGBMModelHandler, None]:
|
|
210
210
|
"""
|
|
@@ -12,7 +12,6 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
#
|
|
15
|
-
from typing import List
|
|
16
15
|
|
|
17
16
|
from ..._ml_common.loggers import Logger
|
|
18
17
|
from ..utils import LGBMTypes
|
|
@@ -26,8 +25,8 @@ class LoggingCallback(Callback):
|
|
|
26
25
|
|
|
27
26
|
def __init__(
|
|
28
27
|
self,
|
|
29
|
-
dynamic_hyperparameters:
|
|
30
|
-
static_hyperparameters:
|
|
28
|
+
dynamic_hyperparameters: list[str] = None,
|
|
29
|
+
static_hyperparameters: list[str] = None,
|
|
31
30
|
):
|
|
32
31
|
"""
|
|
33
32
|
Initialize the logging callback with the given configuration. All the metrics data will be collected but the
|
|
@@ -41,7 +40,7 @@ class LoggingCallback(Callback):
|
|
|
41
40
|
The parameter expects a list of all the hyperparameters names to track our of
|
|
42
41
|
the `params` dictionary.
|
|
43
42
|
"""
|
|
44
|
-
super(
|
|
43
|
+
super().__init__()
|
|
45
44
|
self._logger = Logger()
|
|
46
45
|
self._dynamic_hyperparameters_keys = (
|
|
47
46
|
dynamic_hyperparameters if dynamic_hyperparameters is not None else {}
|
|
@@ -76,7 +75,7 @@ class LoggingCallback(Callback):
|
|
|
76
75
|
self._log_hyperparameters(parameters=env.params)
|
|
77
76
|
|
|
78
77
|
def _log_results(
|
|
79
|
-
self, evaluation_result_list:
|
|
78
|
+
self, evaluation_result_list: list[LGBMTypes.EvaluationResultType]
|
|
80
79
|
):
|
|
81
80
|
"""
|
|
82
81
|
Log the callback environment results data into the logger.
|