mlrun 1.6.0rc35__py3-none-any.whl → 1.7.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +3 -3
- mlrun/api/schemas/__init__.py +1 -1
- mlrun/artifacts/base.py +11 -6
- mlrun/artifacts/dataset.py +2 -2
- mlrun/artifacts/model.py +30 -24
- mlrun/artifacts/plots.py +2 -2
- mlrun/common/db/sql_session.py +5 -3
- mlrun/common/helpers.py +1 -2
- mlrun/common/schemas/artifact.py +3 -3
- mlrun/common/schemas/auth.py +3 -3
- mlrun/common/schemas/background_task.py +1 -1
- mlrun/common/schemas/client_spec.py +1 -1
- mlrun/common/schemas/feature_store.py +16 -16
- mlrun/common/schemas/frontend_spec.py +7 -7
- mlrun/common/schemas/function.py +1 -1
- mlrun/common/schemas/hub.py +4 -9
- mlrun/common/schemas/memory_reports.py +2 -2
- mlrun/common/schemas/model_monitoring/grafana.py +4 -4
- mlrun/common/schemas/model_monitoring/model_endpoints.py +14 -15
- mlrun/common/schemas/notification.py +4 -4
- mlrun/common/schemas/object.py +2 -2
- mlrun/common/schemas/pipeline.py +1 -1
- mlrun/common/schemas/project.py +3 -3
- mlrun/common/schemas/runtime_resource.py +8 -12
- mlrun/common/schemas/schedule.py +3 -3
- mlrun/common/schemas/tag.py +1 -2
- mlrun/common/schemas/workflow.py +2 -2
- mlrun/config.py +8 -4
- mlrun/data_types/to_pandas.py +1 -3
- mlrun/datastore/base.py +0 -28
- mlrun/datastore/datastore_profile.py +9 -9
- mlrun/datastore/filestore.py +0 -1
- mlrun/datastore/google_cloud_storage.py +1 -1
- mlrun/datastore/sources.py +7 -11
- mlrun/datastore/spark_utils.py +1 -2
- mlrun/datastore/targets.py +31 -31
- mlrun/datastore/utils.py +4 -6
- mlrun/datastore/v3io.py +70 -46
- mlrun/db/base.py +22 -23
- mlrun/db/httpdb.py +34 -34
- mlrun/db/nopdb.py +19 -19
- mlrun/errors.py +1 -1
- mlrun/execution.py +4 -4
- mlrun/feature_store/api.py +20 -21
- mlrun/feature_store/common.py +1 -1
- mlrun/feature_store/feature_set.py +28 -32
- mlrun/feature_store/feature_vector.py +24 -27
- mlrun/feature_store/retrieval/base.py +7 -7
- mlrun/feature_store/retrieval/conversion.py +2 -4
- mlrun/feature_store/steps.py +7 -15
- mlrun/features.py +5 -7
- mlrun/frameworks/_common/artifacts_library.py +9 -9
- mlrun/frameworks/_common/mlrun_interface.py +5 -5
- mlrun/frameworks/_common/model_handler.py +48 -48
- mlrun/frameworks/_common/plan.py +2 -3
- mlrun/frameworks/_common/producer.py +3 -4
- mlrun/frameworks/_common/utils.py +5 -5
- mlrun/frameworks/_dl_common/loggers/logger.py +6 -7
- mlrun/frameworks/_dl_common/loggers/mlrun_logger.py +9 -9
- mlrun/frameworks/_dl_common/loggers/tensorboard_logger.py +16 -35
- mlrun/frameworks/_ml_common/artifacts_library.py +1 -2
- mlrun/frameworks/_ml_common/loggers/logger.py +3 -4
- mlrun/frameworks/_ml_common/loggers/mlrun_logger.py +4 -5
- mlrun/frameworks/_ml_common/model_handler.py +24 -24
- mlrun/frameworks/_ml_common/pkl_model_server.py +2 -2
- mlrun/frameworks/_ml_common/plan.py +1 -1
- mlrun/frameworks/_ml_common/plans/calibration_curve_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/confusion_matrix_plan.py +2 -3
- mlrun/frameworks/_ml_common/plans/dataset_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/feature_importance_plan.py +3 -3
- mlrun/frameworks/_ml_common/plans/roc_curve_plan.py +4 -4
- mlrun/frameworks/_ml_common/utils.py +4 -4
- mlrun/frameworks/auto_mlrun/auto_mlrun.py +7 -7
- mlrun/frameworks/huggingface/model_server.py +4 -4
- mlrun/frameworks/lgbm/__init__.py +32 -32
- mlrun/frameworks/lgbm/callbacks/logging_callback.py +4 -5
- mlrun/frameworks/lgbm/callbacks/mlrun_logging_callback.py +4 -5
- mlrun/frameworks/lgbm/mlrun_interfaces/booster_mlrun_interface.py +1 -3
- mlrun/frameworks/lgbm/mlrun_interfaces/mlrun_interface.py +6 -6
- mlrun/frameworks/lgbm/model_handler.py +9 -9
- mlrun/frameworks/lgbm/model_server.py +6 -6
- mlrun/frameworks/lgbm/utils.py +5 -5
- mlrun/frameworks/onnx/dataset.py +8 -8
- mlrun/frameworks/onnx/mlrun_interface.py +3 -3
- mlrun/frameworks/onnx/model_handler.py +6 -6
- mlrun/frameworks/onnx/model_server.py +7 -7
- mlrun/frameworks/parallel_coordinates.py +2 -2
- mlrun/frameworks/pytorch/__init__.py +16 -16
- mlrun/frameworks/pytorch/callbacks/callback.py +4 -5
- mlrun/frameworks/pytorch/callbacks/logging_callback.py +17 -17
- mlrun/frameworks/pytorch/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/pytorch/callbacks/tensorboard_logging_callback.py +23 -29
- mlrun/frameworks/pytorch/callbacks_handler.py +38 -38
- mlrun/frameworks/pytorch/mlrun_interface.py +20 -20
- mlrun/frameworks/pytorch/model_handler.py +17 -17
- mlrun/frameworks/pytorch/model_server.py +7 -7
- mlrun/frameworks/sklearn/__init__.py +12 -12
- mlrun/frameworks/sklearn/estimator.py +4 -4
- mlrun/frameworks/sklearn/metrics_library.py +14 -14
- mlrun/frameworks/sklearn/mlrun_interface.py +3 -6
- mlrun/frameworks/sklearn/model_handler.py +2 -2
- mlrun/frameworks/tf_keras/__init__.py +5 -5
- mlrun/frameworks/tf_keras/callbacks/logging_callback.py +14 -14
- mlrun/frameworks/tf_keras/callbacks/mlrun_logging_callback.py +11 -11
- mlrun/frameworks/tf_keras/callbacks/tensorboard_logging_callback.py +19 -23
- mlrun/frameworks/tf_keras/mlrun_interface.py +7 -9
- mlrun/frameworks/tf_keras/model_handler.py +14 -14
- mlrun/frameworks/tf_keras/model_server.py +6 -6
- mlrun/frameworks/xgboost/__init__.py +12 -12
- mlrun/frameworks/xgboost/model_handler.py +6 -6
- mlrun/k8s_utils.py +4 -5
- mlrun/kfpops.py +2 -2
- mlrun/launcher/base.py +10 -10
- mlrun/launcher/local.py +8 -8
- mlrun/launcher/remote.py +7 -7
- mlrun/lists.py +3 -4
- mlrun/model.py +205 -55
- mlrun/model_monitoring/api.py +21 -24
- mlrun/model_monitoring/application.py +4 -4
- mlrun/model_monitoring/batch.py +17 -17
- mlrun/model_monitoring/controller.py +2 -1
- mlrun/model_monitoring/features_drift_table.py +44 -31
- mlrun/model_monitoring/prometheus.py +1 -4
- mlrun/model_monitoring/stores/kv_model_endpoint_store.py +11 -13
- mlrun/model_monitoring/stores/model_endpoint_store.py +9 -11
- mlrun/model_monitoring/stores/models/__init__.py +2 -2
- mlrun/model_monitoring/stores/sql_model_endpoint_store.py +11 -13
- mlrun/model_monitoring/stream_processing.py +16 -34
- mlrun/model_monitoring/tracking_policy.py +2 -1
- mlrun/package/__init__.py +6 -6
- mlrun/package/context_handler.py +5 -5
- mlrun/package/packager.py +7 -7
- mlrun/package/packagers/default_packager.py +6 -6
- mlrun/package/packagers/numpy_packagers.py +15 -15
- mlrun/package/packagers/pandas_packagers.py +5 -5
- mlrun/package/packagers/python_standard_library_packagers.py +10 -10
- mlrun/package/packagers_manager.py +18 -23
- mlrun/package/utils/_formatter.py +4 -4
- mlrun/package/utils/_pickler.py +2 -2
- mlrun/package/utils/_supported_format.py +4 -4
- mlrun/package/utils/log_hint_utils.py +2 -2
- mlrun/package/utils/type_hint_utils.py +4 -9
- mlrun/platforms/other.py +1 -2
- mlrun/projects/operations.py +5 -5
- mlrun/projects/pipelines.py +9 -9
- mlrun/projects/project.py +58 -46
- mlrun/render.py +1 -1
- mlrun/run.py +9 -9
- mlrun/runtimes/__init__.py +7 -4
- mlrun/runtimes/base.py +20 -23
- mlrun/runtimes/constants.py +5 -5
- mlrun/runtimes/daskjob.py +8 -8
- mlrun/runtimes/databricks_job/databricks_cancel_task.py +1 -1
- mlrun/runtimes/databricks_job/databricks_runtime.py +7 -7
- mlrun/runtimes/function_reference.py +1 -1
- mlrun/runtimes/local.py +1 -1
- mlrun/runtimes/mpijob/abstract.py +1 -2
- mlrun/runtimes/nuclio/__init__.py +20 -0
- mlrun/runtimes/{function.py → nuclio/function.py} +15 -16
- mlrun/runtimes/{nuclio.py → nuclio/nuclio.py} +6 -6
- mlrun/runtimes/{serving.py → nuclio/serving.py} +13 -12
- mlrun/runtimes/pod.py +95 -48
- mlrun/runtimes/remotesparkjob.py +1 -1
- mlrun/runtimes/sparkjob/spark3job.py +50 -33
- mlrun/runtimes/utils.py +1 -2
- mlrun/secrets.py +3 -3
- mlrun/serving/remote.py +0 -4
- mlrun/serving/routers.py +6 -6
- mlrun/serving/server.py +4 -4
- mlrun/serving/states.py +29 -0
- mlrun/serving/utils.py +3 -3
- mlrun/serving/v1_serving.py +6 -7
- mlrun/serving/v2_serving.py +50 -8
- mlrun/track/tracker_manager.py +3 -3
- mlrun/track/trackers/mlflow_tracker.py +1 -2
- mlrun/utils/async_http.py +5 -7
- mlrun/utils/azure_vault.py +1 -1
- mlrun/utils/clones.py +1 -2
- mlrun/utils/condition_evaluator.py +3 -3
- mlrun/utils/db.py +3 -3
- mlrun/utils/helpers.py +37 -119
- mlrun/utils/http.py +1 -4
- mlrun/utils/logger.py +49 -14
- mlrun/utils/notifications/notification/__init__.py +3 -3
- mlrun/utils/notifications/notification/base.py +2 -2
- mlrun/utils/notifications/notification/ipython.py +1 -1
- mlrun/utils/notifications/notification_pusher.py +8 -14
- mlrun/utils/retryer.py +207 -0
- mlrun/utils/singleton.py +1 -1
- mlrun/utils/v3io_clients.py +2 -3
- mlrun/utils/version/version.json +2 -2
- mlrun/utils/version/version.py +2 -6
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/METADATA +9 -9
- mlrun-1.7.0rc2.dist-info/RECORD +315 -0
- mlrun-1.6.0rc35.dist-info/RECORD +0 -313
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/LICENSE +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/WHEEL +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/entry_points.txt +0 -0
- {mlrun-1.6.0rc35.dist-info → mlrun-1.7.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -11,7 +11,6 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
-
|
|
15
14
|
import typing
|
|
16
15
|
|
|
17
16
|
import kubernetes.client
|
|
@@ -69,6 +68,48 @@ class Spark3JobSpec(KubeResourceSpec):
|
|
|
69
68
|
"driver_cores",
|
|
70
69
|
"executor_cores",
|
|
71
70
|
]
|
|
71
|
+
_default_fields_to_strip = KubeResourceSpec._default_fields_to_strip + [
|
|
72
|
+
"driver_node_selector",
|
|
73
|
+
"executor_node_selector",
|
|
74
|
+
"driver_tolerations",
|
|
75
|
+
"executor_tolerations",
|
|
76
|
+
"driver_affinity",
|
|
77
|
+
"executor_affinity",
|
|
78
|
+
"driver_volume_mounts",
|
|
79
|
+
"executor_volume_mounts",
|
|
80
|
+
"driver_cores",
|
|
81
|
+
"executor_cores",
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
__k8s_fields_to_serialize = [
|
|
85
|
+
"driver_volume_mounts",
|
|
86
|
+
"executor_volume_mounts",
|
|
87
|
+
"driver_node_selector",
|
|
88
|
+
"executor_node_selector",
|
|
89
|
+
"executor_affinity",
|
|
90
|
+
"executor_tolerations",
|
|
91
|
+
"driver_affinity",
|
|
92
|
+
"driver_tolerations",
|
|
93
|
+
]
|
|
94
|
+
_k8s_fields_to_serialize = (
|
|
95
|
+
KubeResourceSpec._k8s_fields_to_serialize + __k8s_fields_to_serialize
|
|
96
|
+
)
|
|
97
|
+
_fields_to_serialize = (
|
|
98
|
+
KubeResourceSpec._fields_to_serialize + __k8s_fields_to_serialize
|
|
99
|
+
)
|
|
100
|
+
_fields_to_skip_validation = KubeResourceSpec._fields_to_skip_validation + [
|
|
101
|
+
# TODO: affinity, tolerations and node_selector are skipped due to preemption mode transitions.
|
|
102
|
+
# Preemption mode 'none' depends on the previous mode while the default mode may enrich these values.
|
|
103
|
+
# When we allow 'None' values for these attributes we get their true values and they will undo the default
|
|
104
|
+
# enrichment when creating the runtime from dict.
|
|
105
|
+
# The enrichment should move to the server side and then this can be removed.
|
|
106
|
+
"driver_node_selector",
|
|
107
|
+
"executor_node_selector",
|
|
108
|
+
"executor_affinity",
|
|
109
|
+
"executor_tolerations",
|
|
110
|
+
"driver_affinity",
|
|
111
|
+
"driver_tolerations",
|
|
112
|
+
]
|
|
72
113
|
|
|
73
114
|
def __init__(
|
|
74
115
|
self,
|
|
@@ -189,26 +230,8 @@ class Spark3JobSpec(KubeResourceSpec):
|
|
|
189
230
|
self.driver_cores = driver_cores
|
|
190
231
|
self.executor_cores = executor_cores
|
|
191
232
|
|
|
192
|
-
def to_dict(self, fields=None, exclude=None):
|
|
193
|
-
exclude = exclude or []
|
|
194
|
-
_exclude = [
|
|
195
|
-
"affinity",
|
|
196
|
-
"tolerations",
|
|
197
|
-
"security_context",
|
|
198
|
-
"executor_affinity",
|
|
199
|
-
"executor_tolerations",
|
|
200
|
-
"driver_affinity",
|
|
201
|
-
"driver_tolerations",
|
|
202
|
-
]
|
|
203
|
-
struct = super().to_dict(fields, exclude=list(set(exclude + _exclude)))
|
|
204
|
-
api = kubernetes.client.ApiClient()
|
|
205
|
-
for field in _exclude:
|
|
206
|
-
if field not in exclude:
|
|
207
|
-
struct[field] = api.sanitize_for_serialization(getattr(self, field))
|
|
208
|
-
return struct
|
|
209
|
-
|
|
210
233
|
@property
|
|
211
|
-
def executor_tolerations(self) ->
|
|
234
|
+
def executor_tolerations(self) -> list[kubernetes.client.V1Toleration]:
|
|
212
235
|
return self._executor_tolerations
|
|
213
236
|
|
|
214
237
|
@executor_tolerations.setter
|
|
@@ -220,7 +243,7 @@ class Spark3JobSpec(KubeResourceSpec):
|
|
|
220
243
|
)
|
|
221
244
|
|
|
222
245
|
@property
|
|
223
|
-
def driver_tolerations(self) ->
|
|
246
|
+
def driver_tolerations(self) -> list[kubernetes.client.V1Toleration]:
|
|
224
247
|
return self._driver_tolerations
|
|
225
248
|
|
|
226
249
|
@driver_tolerations.setter
|
|
@@ -461,11 +484,9 @@ class Spark3Runtime(KubejobRuntime):
|
|
|
461
484
|
def with_node_selection(
|
|
462
485
|
self,
|
|
463
486
|
node_name: typing.Optional[str] = None,
|
|
464
|
-
node_selector: typing.Optional[
|
|
487
|
+
node_selector: typing.Optional[dict[str, str]] = None,
|
|
465
488
|
affinity: typing.Optional[kubernetes.client.V1Affinity] = None,
|
|
466
|
-
tolerations: typing.Optional[
|
|
467
|
-
typing.List[kubernetes.client.V1Toleration]
|
|
468
|
-
] = None,
|
|
489
|
+
tolerations: typing.Optional[list[kubernetes.client.V1Toleration]] = None,
|
|
469
490
|
):
|
|
470
491
|
if node_name:
|
|
471
492
|
raise NotImplementedError(
|
|
@@ -495,11 +516,9 @@ class Spark3Runtime(KubejobRuntime):
|
|
|
495
516
|
def with_driver_node_selection(
|
|
496
517
|
self,
|
|
497
518
|
node_name: typing.Optional[str] = None,
|
|
498
|
-
node_selector: typing.Optional[
|
|
519
|
+
node_selector: typing.Optional[dict[str, str]] = None,
|
|
499
520
|
affinity: typing.Optional[kubernetes.client.V1Affinity] = None,
|
|
500
|
-
tolerations: typing.Optional[
|
|
501
|
-
typing.List[kubernetes.client.V1Toleration]
|
|
502
|
-
] = None,
|
|
521
|
+
tolerations: typing.Optional[list[kubernetes.client.V1Toleration]] = None,
|
|
503
522
|
):
|
|
504
523
|
"""
|
|
505
524
|
Enables control of which k8s node the spark executor will run on.
|
|
@@ -528,11 +547,9 @@ class Spark3Runtime(KubejobRuntime):
|
|
|
528
547
|
def with_executor_node_selection(
|
|
529
548
|
self,
|
|
530
549
|
node_name: typing.Optional[str] = None,
|
|
531
|
-
node_selector: typing.Optional[
|
|
550
|
+
node_selector: typing.Optional[dict[str, str]] = None,
|
|
532
551
|
affinity: typing.Optional[kubernetes.client.V1Affinity] = None,
|
|
533
|
-
tolerations: typing.Optional[
|
|
534
|
-
typing.List[kubernetes.client.V1Toleration]
|
|
535
|
-
] = None,
|
|
552
|
+
tolerations: typing.Optional[list[kubernetes.client.V1Toleration]] = None,
|
|
536
553
|
):
|
|
537
554
|
"""
|
|
538
555
|
Enables control of which k8s node the spark executor will run on.
|
mlrun/runtimes/utils.py
CHANGED
|
@@ -16,7 +16,6 @@ import hashlib
|
|
|
16
16
|
import json
|
|
17
17
|
import os
|
|
18
18
|
import re
|
|
19
|
-
import typing
|
|
20
19
|
from io import StringIO
|
|
21
20
|
from sys import stderr
|
|
22
21
|
|
|
@@ -501,7 +500,7 @@ def enrich_function_from_dict(function, function_dict):
|
|
|
501
500
|
|
|
502
501
|
def enrich_run_labels(
|
|
503
502
|
labels: dict,
|
|
504
|
-
labels_to_enrich:
|
|
503
|
+
labels_to_enrich: list[RunLabels] = None,
|
|
505
504
|
):
|
|
506
505
|
labels_enrichment = {
|
|
507
506
|
RunLabels.owner: os.environ.get("V3IO_USERNAME") or getpass.getuser(),
|
mlrun/secrets.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
from ast import literal_eval
|
|
16
16
|
from os import environ, getenv
|
|
17
|
-
from typing import Callable,
|
|
17
|
+
from typing import Callable, Optional, Union
|
|
18
18
|
|
|
19
19
|
from .utils import AzureVaultStore, list2dict
|
|
20
20
|
|
|
@@ -148,7 +148,7 @@ class SecretsStore:
|
|
|
148
148
|
|
|
149
149
|
def get_secret_or_env(
|
|
150
150
|
key: str,
|
|
151
|
-
secret_provider: Union[
|
|
151
|
+
secret_provider: Union[dict, SecretsStore, Callable, None] = None,
|
|
152
152
|
default: Optional[str] = None,
|
|
153
153
|
prefix: Optional[str] = None,
|
|
154
154
|
) -> str:
|
|
@@ -185,7 +185,7 @@ def get_secret_or_env(
|
|
|
185
185
|
|
|
186
186
|
value = None
|
|
187
187
|
if secret_provider:
|
|
188
|
-
if isinstance(secret_provider, (
|
|
188
|
+
if isinstance(secret_provider, (dict, SecretsStore)):
|
|
189
189
|
value = secret_provider.get(key)
|
|
190
190
|
else:
|
|
191
191
|
value = secret_provider(key)
|
mlrun/serving/remote.py
CHANGED
|
@@ -36,8 +36,6 @@ default_backoff_factor = 1
|
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
class RemoteStep(storey.SendToHttp):
|
|
39
|
-
"""class for calling remote endpoints"""
|
|
40
|
-
|
|
41
39
|
def __init__(
|
|
42
40
|
self,
|
|
43
41
|
url: str,
|
|
@@ -240,8 +238,6 @@ class RemoteStep(storey.SendToHttp):
|
|
|
240
238
|
|
|
241
239
|
|
|
242
240
|
class BatchHttpRequests(_ConcurrentJobExecution):
|
|
243
|
-
"""class for calling remote endpoints in parallel"""
|
|
244
|
-
|
|
245
241
|
def __init__(
|
|
246
242
|
self,
|
|
247
243
|
url: str = None,
|
mlrun/serving/routers.py
CHANGED
|
@@ -20,7 +20,7 @@ import traceback
|
|
|
20
20
|
import typing
|
|
21
21
|
from enum import Enum
|
|
22
22
|
from io import BytesIO
|
|
23
|
-
from typing import
|
|
23
|
+
from typing import Union
|
|
24
24
|
|
|
25
25
|
import numpy
|
|
26
26
|
import numpy as np
|
|
@@ -485,7 +485,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
485
485
|
url_prefix: str = None,
|
|
486
486
|
health_prefix: str = None,
|
|
487
487
|
vote_type: str = None,
|
|
488
|
-
weights:
|
|
488
|
+
weights: dict[str, float] = None,
|
|
489
489
|
executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread,
|
|
490
490
|
format_response_with_col_name_flag: bool = False,
|
|
491
491
|
prediction_col_name: str = "prediction",
|
|
@@ -703,7 +703,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
703
703
|
)
|
|
704
704
|
return model, None, subpath
|
|
705
705
|
|
|
706
|
-
def _majority_vote(self, all_predictions:
|
|
706
|
+
def _majority_vote(self, all_predictions: list[list[int]], weights: list[float]):
|
|
707
707
|
"""
|
|
708
708
|
Returns most predicted class for each event
|
|
709
709
|
|
|
@@ -727,7 +727,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
727
727
|
weighted_res = one_hot_representation @ weights
|
|
728
728
|
return np.argmax(weighted_res, axis=1).tolist()
|
|
729
729
|
|
|
730
|
-
def _mean_vote(self, all_predictions:
|
|
730
|
+
def _mean_vote(self, all_predictions: list[list[float]], weights: list[float]):
|
|
731
731
|
"""
|
|
732
732
|
Returns weighted mean of the predictions
|
|
733
733
|
|
|
@@ -741,7 +741,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
741
741
|
def _is_int(self, value):
|
|
742
742
|
return float(value).is_integer()
|
|
743
743
|
|
|
744
|
-
def logic(self, predictions:
|
|
744
|
+
def logic(self, predictions: list[list[Union[int, float]]], weights: list[float]):
|
|
745
745
|
"""
|
|
746
746
|
Returns the final prediction of all the models after applying the desire logic
|
|
747
747
|
|
|
@@ -957,7 +957,7 @@ class VotingEnsemble(ParallelRun):
|
|
|
957
957
|
raise Exception('Expected "inputs" to be a list')
|
|
958
958
|
return request
|
|
959
959
|
|
|
960
|
-
def _normalize_weights(self, weights_dict:
|
|
960
|
+
def _normalize_weights(self, weights_dict: dict[str, float]):
|
|
961
961
|
"""
|
|
962
962
|
Normalized all the weights such that abs(weights_sum - 1.0) <= 0.001
|
|
963
963
|
and adding 0 weight to all the routes that doesn't appear in the dict.
|
mlrun/serving/server.py
CHANGED
|
@@ -415,7 +415,7 @@ def create_graph_server(
|
|
|
415
415
|
return server
|
|
416
416
|
|
|
417
417
|
|
|
418
|
-
class MockTrigger
|
|
418
|
+
class MockTrigger:
|
|
419
419
|
"""mock nuclio event trigger"""
|
|
420
420
|
|
|
421
421
|
def __init__(self, kind="", name=""):
|
|
@@ -423,7 +423,7 @@ class MockTrigger(object):
|
|
|
423
423
|
self.name = name
|
|
424
424
|
|
|
425
425
|
|
|
426
|
-
class MockEvent
|
|
426
|
+
class MockEvent:
|
|
427
427
|
"""mock basic nuclio event object"""
|
|
428
428
|
|
|
429
429
|
def __init__(
|
|
@@ -456,7 +456,7 @@ class MockEvent(object):
|
|
|
456
456
|
return f"Event(id={self.id}, body={self.body}, method={self.method}, path={self.path}{error})"
|
|
457
457
|
|
|
458
458
|
|
|
459
|
-
class Response
|
|
459
|
+
class Response:
|
|
460
460
|
def __init__(self, headers=None, body=None, content_type=None, status_code=200):
|
|
461
461
|
self.headers = headers or {}
|
|
462
462
|
self.body = body
|
|
@@ -563,7 +563,7 @@ class GraphContext:
|
|
|
563
563
|
_,
|
|
564
564
|
_,
|
|
565
565
|
function_status,
|
|
566
|
-
) = mlrun.runtimes.function.get_nuclio_deploy_status(name, project, tag)
|
|
566
|
+
) = mlrun.runtimes.nuclio.function.get_nuclio_deploy_status(name, project, tag)
|
|
567
567
|
|
|
568
568
|
if state in ["error", "unhealthy"]:
|
|
569
569
|
raise ValueError(
|
mlrun/serving/states.py
CHANGED
|
@@ -556,6 +556,34 @@ class ErrorStep(TaskStep):
|
|
|
556
556
|
_dict_fields = _task_step_fields + ["before", "base_step"]
|
|
557
557
|
_default_class = ""
|
|
558
558
|
|
|
559
|
+
def __init__(
|
|
560
|
+
self,
|
|
561
|
+
class_name: Union[str, type] = None,
|
|
562
|
+
class_args: dict = None,
|
|
563
|
+
handler: str = None,
|
|
564
|
+
name: str = None,
|
|
565
|
+
after: list = None,
|
|
566
|
+
full_event: bool = None,
|
|
567
|
+
function: str = None,
|
|
568
|
+
responder: bool = None,
|
|
569
|
+
input_path: str = None,
|
|
570
|
+
result_path: str = None,
|
|
571
|
+
):
|
|
572
|
+
super().__init__(
|
|
573
|
+
class_name=class_name,
|
|
574
|
+
class_args=class_args,
|
|
575
|
+
handler=handler,
|
|
576
|
+
name=name,
|
|
577
|
+
after=after,
|
|
578
|
+
full_event=full_event,
|
|
579
|
+
function=function,
|
|
580
|
+
responder=responder,
|
|
581
|
+
input_path=input_path,
|
|
582
|
+
result_path=result_path,
|
|
583
|
+
)
|
|
584
|
+
self.before = None
|
|
585
|
+
self.base_step = None
|
|
586
|
+
|
|
559
587
|
|
|
560
588
|
class RouterStep(TaskStep):
|
|
561
589
|
"""router step, implement routing logic for running child routes"""
|
|
@@ -1514,6 +1542,7 @@ def _init_async_objects(context, steps):
|
|
|
1514
1542
|
result_path=step.result_path,
|
|
1515
1543
|
name=step.name,
|
|
1516
1544
|
context=context,
|
|
1545
|
+
pass_context=step._inject_context,
|
|
1517
1546
|
)
|
|
1518
1547
|
if (
|
|
1519
1548
|
respond_supported
|
mlrun/serving/utils.py
CHANGED
|
@@ -46,7 +46,7 @@ def _update_result_body(result_path, event_body, result):
|
|
|
46
46
|
class StepToDict:
|
|
47
47
|
"""auto serialization of graph steps to a python dictionary"""
|
|
48
48
|
|
|
49
|
-
def to_dict(self, fields=None, exclude=None):
|
|
49
|
+
def to_dict(self, fields: list = None, exclude: list = None, strip: bool = False):
|
|
50
50
|
"""convert the step object to a python dictionary"""
|
|
51
51
|
fields = fields or getattr(self, "_dict_fields", None)
|
|
52
52
|
if not fields:
|
|
@@ -97,5 +97,5 @@ class StepToDict:
|
|
|
97
97
|
class RouterToDict(StepToDict):
|
|
98
98
|
_STEP_KIND = "router"
|
|
99
99
|
|
|
100
|
-
def to_dict(self, fields=None, exclude=None):
|
|
101
|
-
return super().to_dict(exclude=["routes"])
|
|
100
|
+
def to_dict(self, fields: list = None, exclude: list = None, strip: bool = False):
|
|
101
|
+
return super().to_dict(exclude=["routes"], strip=strip)
|
mlrun/serving/v1_serving.py
CHANGED
|
@@ -18,7 +18,6 @@ import socket
|
|
|
18
18
|
from copy import deepcopy
|
|
19
19
|
from datetime import datetime
|
|
20
20
|
from io import BytesIO
|
|
21
|
-
from typing import Dict
|
|
22
21
|
from urllib.request import urlopen
|
|
23
22
|
|
|
24
23
|
import nuclio
|
|
@@ -26,7 +25,7 @@ import nuclio
|
|
|
26
25
|
import mlrun
|
|
27
26
|
from mlrun.errors import err_to_str
|
|
28
27
|
from mlrun.platforms.iguazio import OutputStream
|
|
29
|
-
from mlrun.runtimes import RemoteRuntime
|
|
28
|
+
from mlrun.runtimes.nuclio.function import RemoteRuntime
|
|
30
29
|
|
|
31
30
|
serving_handler = "handler"
|
|
32
31
|
|
|
@@ -97,16 +96,16 @@ class MLModelServer:
|
|
|
97
96
|
if not self.ready and not self.model:
|
|
98
97
|
raise ValueError("please specify a load method or a model object")
|
|
99
98
|
|
|
100
|
-
def preprocess(self, request:
|
|
99
|
+
def preprocess(self, request: dict) -> dict:
|
|
101
100
|
return request
|
|
102
101
|
|
|
103
|
-
def postprocess(self, request:
|
|
102
|
+
def postprocess(self, request: dict) -> dict:
|
|
104
103
|
return request
|
|
105
104
|
|
|
106
|
-
def predict(self, request:
|
|
105
|
+
def predict(self, request: dict) -> dict:
|
|
107
106
|
raise NotImplementedError()
|
|
108
107
|
|
|
109
|
-
def explain(self, request:
|
|
108
|
+
def explain(self, request: dict) -> dict:
|
|
110
109
|
raise NotImplementedError()
|
|
111
110
|
|
|
112
111
|
|
|
@@ -200,7 +199,7 @@ class _ServerInfo:
|
|
|
200
199
|
class HTTPHandler:
|
|
201
200
|
kind = ""
|
|
202
201
|
|
|
203
|
-
def __init__(self, models:
|
|
202
|
+
def __init__(self, models: dict, server: _ServerInfo = None):
|
|
204
203
|
self.models = models
|
|
205
204
|
self.srvinfo = server
|
|
206
205
|
self.context = None
|
mlrun/serving/v2_serving.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import threading
|
|
16
16
|
import time
|
|
17
17
|
import traceback
|
|
18
|
-
from typing import
|
|
18
|
+
from typing import Union
|
|
19
19
|
|
|
20
20
|
import mlrun.common.model_monitoring
|
|
21
21
|
import mlrun.common.schemas.model_monitoring
|
|
@@ -29,8 +29,6 @@ from .utils import StepToDict, _extract_input_data, _update_result_body
|
|
|
29
29
|
|
|
30
30
|
|
|
31
31
|
class V2ModelServer(StepToDict):
|
|
32
|
-
"""base model serving class (v2), using similar API to KFServing v2 and Triton"""
|
|
33
|
-
|
|
34
32
|
def __init__(
|
|
35
33
|
self,
|
|
36
34
|
context=None,
|
|
@@ -221,6 +219,8 @@ class V2ModelServer(StepToDict):
|
|
|
221
219
|
|
|
222
220
|
def _pre_event_processing_actions(self, event, event_body, op):
|
|
223
221
|
self._check_readiness(event)
|
|
222
|
+
if "_dict" in op:
|
|
223
|
+
event_body = self._inputs_to_list(event_body)
|
|
224
224
|
request = self.preprocess(event_body, op)
|
|
225
225
|
return self.validate(request, op)
|
|
226
226
|
|
|
@@ -237,7 +237,12 @@ class V2ModelServer(StepToDict):
|
|
|
237
237
|
if not op and event.method != "GET":
|
|
238
238
|
op = "infer"
|
|
239
239
|
|
|
240
|
-
if
|
|
240
|
+
if (
|
|
241
|
+
op == "predict"
|
|
242
|
+
or op == "infer"
|
|
243
|
+
or op == "infer_dict"
|
|
244
|
+
or op == "predict_dict"
|
|
245
|
+
):
|
|
241
246
|
# predict operation
|
|
242
247
|
request = self._pre_event_processing_actions(event, event_body, op)
|
|
243
248
|
try:
|
|
@@ -362,22 +367,59 @@ class V2ModelServer(StepToDict):
|
|
|
362
367
|
|
|
363
368
|
return request
|
|
364
369
|
|
|
365
|
-
def preprocess(self, request:
|
|
370
|
+
def preprocess(self, request: dict, operation) -> dict:
|
|
366
371
|
"""preprocess the event body before validate and action"""
|
|
367
372
|
return request
|
|
368
373
|
|
|
369
|
-
def postprocess(self, request:
|
|
374
|
+
def postprocess(self, request: dict) -> dict:
|
|
370
375
|
"""postprocess, before returning response"""
|
|
371
376
|
return request
|
|
372
377
|
|
|
373
|
-
def predict(self, request:
|
|
378
|
+
def predict(self, request: dict) -> dict:
|
|
374
379
|
"""model prediction operation"""
|
|
375
380
|
raise NotImplementedError()
|
|
376
381
|
|
|
377
|
-
def explain(self, request:
|
|
382
|
+
def explain(self, request: dict) -> dict:
|
|
378
383
|
"""model explain operation"""
|
|
379
384
|
raise NotImplementedError()
|
|
380
385
|
|
|
386
|
+
def _inputs_to_list(self, request: dict) -> dict:
|
|
387
|
+
"""
|
|
388
|
+
Convert the inputs from list of dictionary / dictionary to list of lists / list
|
|
389
|
+
where the internal list order is according to the ArtifactModel inputs.
|
|
390
|
+
|
|
391
|
+
:param request: event
|
|
392
|
+
:return: evnet body converting the inputs to be list of lists
|
|
393
|
+
"""
|
|
394
|
+
if self.model_spec and self.model_spec.inputs:
|
|
395
|
+
input_order = [feature.name for feature in self.model_spec.inputs]
|
|
396
|
+
else:
|
|
397
|
+
raise mlrun.MLRunInvalidArgumentError(
|
|
398
|
+
"In order to use predict_dict or infer_dict operation you have to provide `model_path` "
|
|
399
|
+
"to the model server and to load it by `load()` function"
|
|
400
|
+
)
|
|
401
|
+
inputs = request.get("inputs")
|
|
402
|
+
try:
|
|
403
|
+
if isinstance(inputs, list) and all(
|
|
404
|
+
isinstance(item, dict) for item in inputs
|
|
405
|
+
):
|
|
406
|
+
new_inputs = [
|
|
407
|
+
[input_dict[key] for key in input_order] for input_dict in inputs
|
|
408
|
+
]
|
|
409
|
+
elif isinstance(inputs, dict):
|
|
410
|
+
new_inputs = [inputs[key] for key in input_order]
|
|
411
|
+
else:
|
|
412
|
+
raise mlrun.MLRunInvalidArgumentError(
|
|
413
|
+
"When using predict_dict or infer_dict operation the inputs must be "
|
|
414
|
+
"of type `list[dict]` or `dict`"
|
|
415
|
+
)
|
|
416
|
+
except KeyError:
|
|
417
|
+
raise mlrun.MLRunInvalidArgumentError(
|
|
418
|
+
f"Input dictionary don't contain all the necessary input keys : {input_order}"
|
|
419
|
+
)
|
|
420
|
+
request["inputs"] = new_inputs
|
|
421
|
+
return request
|
|
422
|
+
|
|
381
423
|
|
|
382
424
|
class _ModelLogPusher:
|
|
383
425
|
def __init__(self, model, context, output_stream=None):
|
mlrun/track/tracker_manager.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
import importlib
|
|
16
16
|
import inspect
|
|
17
|
-
from typing import
|
|
17
|
+
from typing import Union
|
|
18
18
|
|
|
19
19
|
import mlrun.errors
|
|
20
20
|
from mlrun.config import config as mlconf
|
|
@@ -28,7 +28,7 @@ _TRACKERS = ["mlflow"]
|
|
|
28
28
|
|
|
29
29
|
# A list for the available trackers during runtime. It will be setup at the beginning of the run by the function
|
|
30
30
|
# `_collect_available_trackers`:
|
|
31
|
-
_AVAILABLE_TRACKERS:
|
|
31
|
+
_AVAILABLE_TRACKERS: list[Tracker] = None
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
class TrackerManager(metaclass=Singleton):
|
|
@@ -41,7 +41,7 @@ class TrackerManager(metaclass=Singleton):
|
|
|
41
41
|
"""
|
|
42
42
|
Initialize a new empty tracker manager.
|
|
43
43
|
"""
|
|
44
|
-
self._trackers:
|
|
44
|
+
self._trackers: list[Tracker] = []
|
|
45
45
|
|
|
46
46
|
# Check general config for tracking usage, if false we return an empty manager
|
|
47
47
|
if mlconf.external_platform_tracking.enabled:
|
|
@@ -15,7 +15,6 @@ import os
|
|
|
15
15
|
import pathlib
|
|
16
16
|
import tempfile
|
|
17
17
|
import zipfile
|
|
18
|
-
from typing import List
|
|
19
18
|
|
|
20
19
|
import mlflow
|
|
21
20
|
import mlflow.entities
|
|
@@ -526,7 +525,7 @@ class MLFlowTracker(Tracker):
|
|
|
526
525
|
)
|
|
527
526
|
|
|
528
527
|
@staticmethod
|
|
529
|
-
def _schema_to_feature(schema: mlflow.types.Schema) ->
|
|
528
|
+
def _schema_to_feature(schema: mlflow.types.Schema) -> list[Feature]:
|
|
530
529
|
"""
|
|
531
530
|
Cast MLFlow schema to MLRun features.
|
|
532
531
|
|
mlrun/utils/async_http.py
CHANGED
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
import asyncio
|
|
17
17
|
import logging
|
|
18
18
|
import typing
|
|
19
|
-
from typing import
|
|
19
|
+
from typing import Optional
|
|
20
20
|
|
|
21
21
|
import aiohttp
|
|
22
22
|
import aiohttp.http_exceptions
|
|
@@ -38,12 +38,10 @@ class AsyncClientWithRetry(RetryClient):
|
|
|
38
38
|
self,
|
|
39
39
|
max_retries: int = config.http_retry_defaults.max_retries,
|
|
40
40
|
retry_backoff_factor: float = config.http_retry_defaults.backoff_factor,
|
|
41
|
-
retry_on_status_codes:
|
|
42
|
-
int
|
|
43
|
-
] = config.http_retry_defaults.status_codes,
|
|
41
|
+
retry_on_status_codes: list[int] = config.http_retry_defaults.status_codes,
|
|
44
42
|
retry_on_exception: bool = True,
|
|
45
43
|
raise_for_status: bool = True,
|
|
46
|
-
blacklisted_methods: typing.Optional[
|
|
44
|
+
blacklisted_methods: typing.Optional[list[str]] = None,
|
|
47
45
|
logger: logging.Logger = None,
|
|
48
46
|
*args,
|
|
49
47
|
**kwargs,
|
|
@@ -67,7 +65,7 @@ class AsyncClientWithRetry(RetryClient):
|
|
|
67
65
|
|
|
68
66
|
def _make_requests(
|
|
69
67
|
self,
|
|
70
|
-
params_list:
|
|
68
|
+
params_list: list[RequestParams],
|
|
71
69
|
retry_options: Optional[RetryOptionsBase] = None,
|
|
72
70
|
raise_for_status: Optional[bool] = None,
|
|
73
71
|
) -> "_CustomRequestContext":
|
|
@@ -102,7 +100,7 @@ class ExponentialRetryOverride(ExponentialRetry):
|
|
|
102
100
|
def __init__(
|
|
103
101
|
self,
|
|
104
102
|
retry_on_exception: bool,
|
|
105
|
-
blacklisted_methods:
|
|
103
|
+
blacklisted_methods: list[str],
|
|
106
104
|
*args,
|
|
107
105
|
**kwargs,
|
|
108
106
|
):
|
mlrun/utils/azure_vault.py
CHANGED
|
@@ -63,7 +63,7 @@ class AzureVaultStore:
|
|
|
63
63
|
mlconf.secret_stores.azure_vault.secret_path + "/" + file_name
|
|
64
64
|
)
|
|
65
65
|
if os.path.isfile(full_path):
|
|
66
|
-
with open(full_path
|
|
66
|
+
with open(full_path) as secret_file:
|
|
67
67
|
contents = secret_file.read()
|
|
68
68
|
return contents
|
|
69
69
|
return None
|
mlrun/utils/clones.py
CHANGED
|
@@ -18,7 +18,6 @@ import tarfile
|
|
|
18
18
|
import tempfile
|
|
19
19
|
import zipfile
|
|
20
20
|
from os import path, remove
|
|
21
|
-
from typing import Tuple
|
|
22
21
|
from urllib.parse import urlparse
|
|
23
22
|
|
|
24
23
|
from git import Repo
|
|
@@ -91,7 +90,7 @@ def get_repo_url(repo):
|
|
|
91
90
|
return url
|
|
92
91
|
|
|
93
92
|
|
|
94
|
-
def add_credentials_git_remote_url(url: str, secrets=None) ->
|
|
93
|
+
def add_credentials_git_remote_url(url: str, secrets=None) -> tuple[str, bool]:
|
|
95
94
|
"""Enrich a Git remote URL with credential related secrets, if any are available
|
|
96
95
|
If no secrets are supplied, or if the secrets are insufficient, the original URL is returned
|
|
97
96
|
Besides the URL, this function also returns a bool indicating if any enrichment was done
|
|
@@ -19,7 +19,7 @@ from mlrun.utils import logger
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
def evaluate_condition_in_separate_process(
|
|
22
|
-
condition: str, context:
|
|
22
|
+
condition: str, context: dict[str, typing.Any], timeout: int = 5
|
|
23
23
|
):
|
|
24
24
|
if not condition:
|
|
25
25
|
return True
|
|
@@ -44,13 +44,13 @@ def evaluate_condition_in_separate_process(
|
|
|
44
44
|
|
|
45
45
|
|
|
46
46
|
def _evaluate_condition_wrapper(
|
|
47
|
-
connection, condition: str, context:
|
|
47
|
+
connection, condition: str, context: dict[str, typing.Any]
|
|
48
48
|
):
|
|
49
49
|
connection.send(_evaluate_condition(condition, context))
|
|
50
50
|
return connection.close()
|
|
51
51
|
|
|
52
52
|
|
|
53
|
-
def _evaluate_condition(condition: str, context:
|
|
53
|
+
def _evaluate_condition(condition: str, context: dict[str, typing.Any]):
|
|
54
54
|
import jinja2.sandbox
|
|
55
55
|
|
|
56
56
|
jinja_env = jinja2.sandbox.SandboxedEnvironment()
|
mlrun/utils/db.py
CHANGED
|
@@ -19,7 +19,7 @@ from sqlalchemy.orm import class_mapper
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class BaseModel:
|
|
22
|
-
def to_dict(self, exclude=None):
|
|
22
|
+
def to_dict(self, exclude=None, strip: bool = False):
|
|
23
23
|
"""
|
|
24
24
|
NOTE - this function (currently) does not handle serializing relationships
|
|
25
25
|
"""
|
|
@@ -44,10 +44,10 @@ class HasStruct(BaseModel):
|
|
|
44
44
|
def struct(self, value):
|
|
45
45
|
self.body = pickle.dumps(value)
|
|
46
46
|
|
|
47
|
-
def to_dict(self, exclude=None):
|
|
47
|
+
def to_dict(self, exclude=None, strip: bool = False):
|
|
48
48
|
"""
|
|
49
49
|
NOTE - this function (currently) does not handle serializing relationships
|
|
50
50
|
"""
|
|
51
51
|
exclude = exclude or []
|
|
52
52
|
exclude.append("body")
|
|
53
|
-
return super().to_dict(exclude)
|
|
53
|
+
return super().to_dict(exclude, strip=strip)
|