mlrun 1.7.0rc20__py3-none-any.whl → 1.7.0rc28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__main__.py +10 -8
- mlrun/alerts/alert.py +55 -18
- mlrun/api/schemas/__init__.py +3 -3
- mlrun/artifacts/manager.py +26 -0
- mlrun/common/constants.py +3 -2
- mlrun/common/formatters/__init__.py +1 -0
- mlrun/common/formatters/artifact.py +26 -3
- mlrun/common/formatters/base.py +44 -9
- mlrun/common/formatters/function.py +12 -7
- mlrun/common/formatters/run.py +26 -0
- mlrun/common/helpers.py +11 -0
- mlrun/common/schemas/__init__.py +4 -0
- mlrun/common/schemas/alert.py +5 -9
- mlrun/common/schemas/api_gateway.py +64 -16
- mlrun/common/schemas/artifact.py +11 -0
- mlrun/common/schemas/constants.py +3 -0
- mlrun/common/schemas/feature_store.py +58 -28
- mlrun/common/schemas/model_monitoring/constants.py +21 -12
- mlrun/common/schemas/model_monitoring/model_endpoints.py +0 -12
- mlrun/common/schemas/pipeline.py +16 -0
- mlrun/common/schemas/project.py +17 -0
- mlrun/common/schemas/runs.py +17 -0
- mlrun/common/schemas/schedule.py +1 -1
- mlrun/common/types.py +6 -0
- mlrun/config.py +17 -25
- mlrun/datastore/azure_blob.py +2 -1
- mlrun/datastore/datastore.py +3 -3
- mlrun/datastore/google_cloud_storage.py +6 -2
- mlrun/datastore/snowflake_utils.py +3 -1
- mlrun/datastore/sources.py +26 -11
- mlrun/datastore/store_resources.py +2 -0
- mlrun/datastore/targets.py +68 -16
- mlrun/db/base.py +83 -2
- mlrun/db/httpdb.py +280 -63
- mlrun/db/nopdb.py +60 -3
- mlrun/errors.py +5 -3
- mlrun/execution.py +28 -13
- mlrun/feature_store/feature_vector.py +8 -0
- mlrun/feature_store/retrieval/spark_merger.py +13 -2
- mlrun/launcher/local.py +4 -0
- mlrun/launcher/remote.py +1 -0
- mlrun/model.py +32 -3
- mlrun/model_monitoring/api.py +7 -52
- mlrun/model_monitoring/applications/base.py +5 -7
- mlrun/model_monitoring/applications/histogram_data_drift.py +1 -1
- mlrun/model_monitoring/db/stores/__init__.py +37 -24
- mlrun/model_monitoring/db/stores/base/store.py +40 -1
- mlrun/model_monitoring/db/stores/sqldb/sql_store.py +42 -87
- mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +27 -35
- mlrun/model_monitoring/db/tsdb/__init__.py +15 -15
- mlrun/model_monitoring/db/tsdb/base.py +1 -14
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +22 -18
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +86 -56
- mlrun/model_monitoring/helpers.py +34 -9
- mlrun/model_monitoring/stream_processing.py +12 -11
- mlrun/model_monitoring/writer.py +11 -11
- mlrun/projects/operations.py +5 -0
- mlrun/projects/pipelines.py +35 -21
- mlrun/projects/project.py +216 -107
- mlrun/render.py +10 -5
- mlrun/run.py +15 -5
- mlrun/runtimes/__init__.py +2 -0
- mlrun/runtimes/base.py +17 -4
- mlrun/runtimes/daskjob.py +8 -1
- mlrun/runtimes/databricks_job/databricks_runtime.py +1 -0
- mlrun/runtimes/local.py +23 -4
- mlrun/runtimes/nuclio/application/application.py +0 -2
- mlrun/runtimes/nuclio/function.py +31 -2
- mlrun/runtimes/nuclio/serving.py +9 -6
- mlrun/runtimes/pod.py +5 -29
- mlrun/runtimes/remotesparkjob.py +8 -2
- mlrun/serving/__init__.py +8 -1
- mlrun/serving/routers.py +75 -59
- mlrun/serving/server.py +11 -0
- mlrun/serving/states.py +80 -8
- mlrun/serving/utils.py +19 -11
- mlrun/serving/v2_serving.py +66 -39
- mlrun/utils/helpers.py +91 -11
- mlrun/utils/logger.py +36 -2
- mlrun/utils/notifications/notification/base.py +43 -7
- mlrun/utils/notifications/notification/git.py +21 -0
- mlrun/utils/notifications/notification/slack.py +9 -14
- mlrun/utils/notifications/notification/webhook.py +41 -1
- mlrun/utils/notifications/notification_pusher.py +3 -9
- mlrun/utils/regex.py +9 -0
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.7.0rc20.dist-info → mlrun-1.7.0rc28.dist-info}/METADATA +16 -9
- {mlrun-1.7.0rc20.dist-info → mlrun-1.7.0rc28.dist-info}/RECORD +92 -91
- {mlrun-1.7.0rc20.dist-info → mlrun-1.7.0rc28.dist-info}/WHEEL +1 -1
- {mlrun-1.7.0rc20.dist-info → mlrun-1.7.0rc28.dist-info}/LICENSE +0 -0
- {mlrun-1.7.0rc20.dist-info → mlrun-1.7.0rc28.dist-info}/entry_points.txt +0 -0
- {mlrun-1.7.0rc20.dist-info → mlrun-1.7.0rc28.dist-info}/top_level.txt +0 -0
mlrun/serving/states.py
CHANGED
|
@@ -12,7 +12,13 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
__all__ = [
|
|
15
|
+
__all__ = [
|
|
16
|
+
"TaskStep",
|
|
17
|
+
"RouterStep",
|
|
18
|
+
"RootFlowStep",
|
|
19
|
+
"ErrorStep",
|
|
20
|
+
"MonitoringApplicationStep",
|
|
21
|
+
]
|
|
16
22
|
|
|
17
23
|
import os
|
|
18
24
|
import pathlib
|
|
@@ -55,6 +61,7 @@ class StepKinds:
|
|
|
55
61
|
choice = "choice"
|
|
56
62
|
root = "root"
|
|
57
63
|
error_step = "error_step"
|
|
64
|
+
monitoring_application = "monitoring_application"
|
|
58
65
|
|
|
59
66
|
|
|
60
67
|
_task_step_fields = [
|
|
@@ -485,13 +492,15 @@ class TaskStep(BaseStep):
|
|
|
485
492
|
class_args[key] = arg
|
|
486
493
|
class_args.update(extra_kwargs)
|
|
487
494
|
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
+
if not isinstance(self, MonitoringApplicationStep):
|
|
496
|
+
# add common args (name, context, ..) only if target class can accept them
|
|
497
|
+
argspec = getfullargspec(class_object)
|
|
498
|
+
|
|
499
|
+
for key in ["name", "context", "input_path", "result_path", "full_event"]:
|
|
500
|
+
if argspec.varkw or key in argspec.args:
|
|
501
|
+
class_args[key] = getattr(self, key)
|
|
502
|
+
if argspec.varkw or "graph_step" in argspec.args:
|
|
503
|
+
class_args["graph_step"] = self
|
|
495
504
|
return class_args
|
|
496
505
|
|
|
497
506
|
def get_step_class_object(self, namespace):
|
|
@@ -582,6 +591,39 @@ class TaskStep(BaseStep):
|
|
|
582
591
|
return event
|
|
583
592
|
|
|
584
593
|
|
|
594
|
+
class MonitoringApplicationStep(TaskStep):
|
|
595
|
+
"""monitoring application execution step, runs users class code"""
|
|
596
|
+
|
|
597
|
+
kind = "monitoring_application"
|
|
598
|
+
_default_class = ""
|
|
599
|
+
|
|
600
|
+
def __init__(
|
|
601
|
+
self,
|
|
602
|
+
class_name: Union[str, type] = None,
|
|
603
|
+
class_args: dict = None,
|
|
604
|
+
handler: str = None,
|
|
605
|
+
name: str = None,
|
|
606
|
+
after: list = None,
|
|
607
|
+
full_event: bool = None,
|
|
608
|
+
function: str = None,
|
|
609
|
+
responder: bool = None,
|
|
610
|
+
input_path: str = None,
|
|
611
|
+
result_path: str = None,
|
|
612
|
+
):
|
|
613
|
+
super().__init__(
|
|
614
|
+
class_name=class_name,
|
|
615
|
+
class_args=class_args,
|
|
616
|
+
handler=handler,
|
|
617
|
+
name=name,
|
|
618
|
+
after=after,
|
|
619
|
+
full_event=full_event,
|
|
620
|
+
function=function,
|
|
621
|
+
responder=responder,
|
|
622
|
+
input_path=input_path,
|
|
623
|
+
result_path=result_path,
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
|
|
585
627
|
class ErrorStep(TaskStep):
|
|
586
628
|
"""error execution step, runs a class or handler"""
|
|
587
629
|
|
|
@@ -790,6 +832,35 @@ class QueueStep(BaseStep):
|
|
|
790
832
|
def async_object(self):
|
|
791
833
|
return self._async_object
|
|
792
834
|
|
|
835
|
+
def to(
|
|
836
|
+
self,
|
|
837
|
+
class_name: Union[str, StepToDict] = None,
|
|
838
|
+
name: str = None,
|
|
839
|
+
handler: str = None,
|
|
840
|
+
graph_shape: str = None,
|
|
841
|
+
function: str = None,
|
|
842
|
+
full_event: bool = None,
|
|
843
|
+
input_path: str = None,
|
|
844
|
+
result_path: str = None,
|
|
845
|
+
**class_args,
|
|
846
|
+
):
|
|
847
|
+
if not function:
|
|
848
|
+
name = get_name(name, class_name)
|
|
849
|
+
raise mlrun.errors.MLRunInvalidArgumentError(
|
|
850
|
+
f"step '{name}' must specify a function, because it follows a queue step"
|
|
851
|
+
)
|
|
852
|
+
return super().to(
|
|
853
|
+
class_name,
|
|
854
|
+
name,
|
|
855
|
+
handler,
|
|
856
|
+
graph_shape,
|
|
857
|
+
function,
|
|
858
|
+
full_event,
|
|
859
|
+
input_path,
|
|
860
|
+
result_path,
|
|
861
|
+
**class_args,
|
|
862
|
+
)
|
|
863
|
+
|
|
793
864
|
def run(self, event, *args, **kwargs):
|
|
794
865
|
data = event.body
|
|
795
866
|
if not data:
|
|
@@ -1323,6 +1394,7 @@ classes_map = {
|
|
|
1323
1394
|
"flow": FlowStep,
|
|
1324
1395
|
"queue": QueueStep,
|
|
1325
1396
|
"error_step": ErrorStep,
|
|
1397
|
+
"monitoring_application": MonitoringApplicationStep,
|
|
1326
1398
|
}
|
|
1327
1399
|
|
|
1328
1400
|
|
mlrun/serving/utils.py
CHANGED
|
@@ -46,6 +46,15 @@ def _update_result_body(result_path, event_body, result):
|
|
|
46
46
|
class StepToDict:
|
|
47
47
|
"""auto serialization of graph steps to a python dictionary"""
|
|
48
48
|
|
|
49
|
+
meta_keys = [
|
|
50
|
+
"context",
|
|
51
|
+
"name",
|
|
52
|
+
"input_path",
|
|
53
|
+
"result_path",
|
|
54
|
+
"full_event",
|
|
55
|
+
"kwargs",
|
|
56
|
+
]
|
|
57
|
+
|
|
49
58
|
def to_dict(self, fields: list = None, exclude: list = None, strip: bool = False):
|
|
50
59
|
"""convert the step object to a python dictionary"""
|
|
51
60
|
fields = fields or getattr(self, "_dict_fields", None)
|
|
@@ -54,24 +63,16 @@ class StepToDict:
|
|
|
54
63
|
if exclude:
|
|
55
64
|
fields = [field for field in fields if field not in exclude]
|
|
56
65
|
|
|
57
|
-
meta_keys = [
|
|
58
|
-
"context",
|
|
59
|
-
"name",
|
|
60
|
-
"input_path",
|
|
61
|
-
"result_path",
|
|
62
|
-
"full_event",
|
|
63
|
-
"kwargs",
|
|
64
|
-
]
|
|
65
66
|
args = {
|
|
66
67
|
key: getattr(self, key)
|
|
67
68
|
for key in fields
|
|
68
|
-
if getattr(self, key, None) is not None and key not in meta_keys
|
|
69
|
+
if getattr(self, key, None) is not None and key not in self.meta_keys
|
|
69
70
|
}
|
|
70
71
|
# add storey kwargs or extra kwargs
|
|
71
72
|
if "kwargs" in fields and (hasattr(self, "kwargs") or hasattr(self, "_kwargs")):
|
|
72
73
|
kwargs = getattr(self, "kwargs", {}) or getattr(self, "_kwargs", {})
|
|
73
74
|
for key, value in kwargs.items():
|
|
74
|
-
if key not in meta_keys:
|
|
75
|
+
if key not in self.meta_keys:
|
|
75
76
|
args[key] = value
|
|
76
77
|
|
|
77
78
|
mod_name = self.__class__.__module__
|
|
@@ -80,7 +81,9 @@ class StepToDict:
|
|
|
80
81
|
class_path = f"{mod_name}.{class_path}"
|
|
81
82
|
struct = {
|
|
82
83
|
"class_name": class_path,
|
|
83
|
-
"name": self.name
|
|
84
|
+
"name": self.name
|
|
85
|
+
if hasattr(self, "name") and self.name
|
|
86
|
+
else self.__class__.__name__,
|
|
84
87
|
"class_args": args,
|
|
85
88
|
}
|
|
86
89
|
if hasattr(self, "_STEP_KIND"):
|
|
@@ -94,6 +97,11 @@ class StepToDict:
|
|
|
94
97
|
return struct
|
|
95
98
|
|
|
96
99
|
|
|
100
|
+
class MonitoringApplicationToDict(StepToDict):
|
|
101
|
+
_STEP_KIND = "monitoring_application"
|
|
102
|
+
meta_keys = []
|
|
103
|
+
|
|
104
|
+
|
|
97
105
|
class RouterToDict(StepToDict):
|
|
98
106
|
_STEP_KIND = "router"
|
|
99
107
|
|
mlrun/serving/v2_serving.py
CHANGED
|
@@ -528,7 +528,13 @@ def _init_endpoint_record(
|
|
|
528
528
|
return None
|
|
529
529
|
|
|
530
530
|
# Generating version model value based on the model name and model version
|
|
531
|
-
if model.
|
|
531
|
+
if model.model_path and model.model_path.startswith("store://"):
|
|
532
|
+
# Enrich the model server with the model artifact metadata
|
|
533
|
+
model.get_model()
|
|
534
|
+
if not model.version:
|
|
535
|
+
# Enrich the model version with the model artifact tag
|
|
536
|
+
model.version = model.model_spec.tag
|
|
537
|
+
model.labels = model.model_spec.labels
|
|
532
538
|
versioned_model_name = f"{model.name}:{model.version}"
|
|
533
539
|
else:
|
|
534
540
|
versioned_model_name = f"{model.name}:latest"
|
|
@@ -538,48 +544,69 @@ def _init_endpoint_record(
|
|
|
538
544
|
function_uri=graph_server.function_uri, versioned_model=versioned_model_name
|
|
539
545
|
).uid
|
|
540
546
|
|
|
541
|
-
# If model endpoint object was found in DB, skip the creation process.
|
|
542
547
|
try:
|
|
543
|
-
mlrun.get_run_db().get_model_endpoint(
|
|
544
|
-
|
|
548
|
+
model_ep = mlrun.get_run_db().get_model_endpoint(
|
|
549
|
+
project=project, endpoint_id=uid
|
|
550
|
+
)
|
|
545
551
|
except mlrun.errors.MLRunNotFoundError:
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
status=mlrun.common.schemas.ModelEndpointStatus(
|
|
567
|
-
endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.NODE_EP
|
|
552
|
+
model_ep = None
|
|
553
|
+
except mlrun.errors.MLRunBadRequestError as err:
|
|
554
|
+
logger.debug(
|
|
555
|
+
f"Cant reach to model endpoints store, due to : {err}",
|
|
556
|
+
)
|
|
557
|
+
return
|
|
558
|
+
|
|
559
|
+
if model.context.server.track_models and not model_ep:
|
|
560
|
+
logger.debug("Creating a new model endpoint record", endpoint_id=uid)
|
|
561
|
+
model_endpoint = mlrun.common.schemas.ModelEndpoint(
|
|
562
|
+
metadata=mlrun.common.schemas.ModelEndpointMetadata(
|
|
563
|
+
project=project, labels=model.labels, uid=uid
|
|
564
|
+
),
|
|
565
|
+
spec=mlrun.common.schemas.ModelEndpointSpec(
|
|
566
|
+
function_uri=graph_server.function_uri,
|
|
567
|
+
model=versioned_model_name,
|
|
568
|
+
model_class=model.__class__.__name__,
|
|
569
|
+
model_uri=model.model_path,
|
|
570
|
+
stream_path=config.model_endpoint_monitoring.store_prefixes.default.format(
|
|
571
|
+
project=project, kind="stream"
|
|
568
572
|
),
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
model_endpoint=model_endpoint.dict(),
|
|
577
|
-
)
|
|
573
|
+
active=True,
|
|
574
|
+
monitoring_mode=mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled,
|
|
575
|
+
),
|
|
576
|
+
status=mlrun.common.schemas.ModelEndpointStatus(
|
|
577
|
+
endpoint_type=mlrun.common.schemas.model_monitoring.EndpointType.NODE_EP
|
|
578
|
+
),
|
|
579
|
+
)
|
|
578
580
|
|
|
579
|
-
|
|
580
|
-
|
|
581
|
+
db = mlrun.get_run_db()
|
|
582
|
+
db.create_model_endpoint(
|
|
583
|
+
project=project,
|
|
584
|
+
endpoint_id=uid,
|
|
585
|
+
model_endpoint=model_endpoint.dict(),
|
|
586
|
+
)
|
|
581
587
|
|
|
582
|
-
|
|
583
|
-
|
|
588
|
+
elif (
|
|
589
|
+
model_ep
|
|
590
|
+
and (
|
|
591
|
+
model_ep.spec.monitoring_mode
|
|
592
|
+
== mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
|
|
593
|
+
)
|
|
594
|
+
!= model.context.server.track_models
|
|
595
|
+
):
|
|
596
|
+
monitoring_mode = (
|
|
597
|
+
mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled
|
|
598
|
+
if model.context.server.track_models
|
|
599
|
+
else mlrun.common.schemas.model_monitoring.ModelMonitoringMode.disabled
|
|
600
|
+
)
|
|
601
|
+
db = mlrun.get_run_db()
|
|
602
|
+
db.patch_model_endpoint(
|
|
603
|
+
project=project,
|
|
604
|
+
endpoint_id=uid,
|
|
605
|
+
attributes={"monitoring_mode": monitoring_mode},
|
|
606
|
+
)
|
|
607
|
+
logger.debug(
|
|
608
|
+
f"Updating model endpoint monitoring_mode to {monitoring_mode}",
|
|
609
|
+
endpoint_id=uid,
|
|
610
|
+
)
|
|
584
611
|
|
|
585
612
|
return uid
|
mlrun/utils/helpers.py
CHANGED
|
@@ -26,7 +26,7 @@ import sys
|
|
|
26
26
|
import typing
|
|
27
27
|
import warnings
|
|
28
28
|
from datetime import datetime, timezone
|
|
29
|
-
from importlib import import_module
|
|
29
|
+
from importlib import import_module, reload
|
|
30
30
|
from os import path
|
|
31
31
|
from types import ModuleType
|
|
32
32
|
from typing import Any, Optional
|
|
@@ -109,10 +109,13 @@ def get_artifact_target(item: dict, project=None):
|
|
|
109
109
|
db_key = item["spec"].get("db_key")
|
|
110
110
|
project_str = project or item["metadata"].get("project")
|
|
111
111
|
tree = item["metadata"].get("tree")
|
|
112
|
+
tag = item["metadata"].get("tag")
|
|
112
113
|
|
|
113
114
|
kind = item.get("kind")
|
|
114
115
|
if kind in ["dataset", "model", "artifact"] and db_key:
|
|
115
116
|
target = f"{DB_SCHEMA}://{StorePrefix.Artifact}/{project_str}/{db_key}"
|
|
117
|
+
if tag:
|
|
118
|
+
target = f"{target}:{tag}"
|
|
116
119
|
if tree:
|
|
117
120
|
target = f"{target}@{tree}"
|
|
118
121
|
return target
|
|
@@ -149,7 +152,7 @@ if is_ipython and config.nest_asyncio_enabled in ["1", "True"]:
|
|
|
149
152
|
nest_asyncio.apply()
|
|
150
153
|
|
|
151
154
|
|
|
152
|
-
class
|
|
155
|
+
class RunKeys:
|
|
153
156
|
input_path = "input_path"
|
|
154
157
|
output_path = "output_path"
|
|
155
158
|
inputs = "inputs"
|
|
@@ -160,6 +163,10 @@ class run_keys:
|
|
|
160
163
|
secrets = "secret_sources"
|
|
161
164
|
|
|
162
165
|
|
|
166
|
+
# for Backward compatibility
|
|
167
|
+
run_keys = RunKeys
|
|
168
|
+
|
|
169
|
+
|
|
163
170
|
def verify_field_regex(
|
|
164
171
|
field_name,
|
|
165
172
|
field_value,
|
|
@@ -659,7 +666,7 @@ def parse_artifact_uri(uri, default_project=""):
|
|
|
659
666
|
[3] = tag
|
|
660
667
|
[4] = tree
|
|
661
668
|
"""
|
|
662
|
-
uri_pattern =
|
|
669
|
+
uri_pattern = mlrun.utils.regex.artifact_uri_pattern
|
|
663
670
|
match = re.match(uri_pattern, uri)
|
|
664
671
|
if not match:
|
|
665
672
|
raise ValueError(
|
|
@@ -674,6 +681,8 @@ def parse_artifact_uri(uri, default_project=""):
|
|
|
674
681
|
raise ValueError(
|
|
675
682
|
f"illegal store path '{uri}', iteration must be integer value"
|
|
676
683
|
)
|
|
684
|
+
else:
|
|
685
|
+
iteration = 0
|
|
677
686
|
return (
|
|
678
687
|
group_dict["project"] or default_project,
|
|
679
688
|
group_dict["key"],
|
|
@@ -1019,16 +1028,35 @@ def create_class(pkg_class: str):
|
|
|
1019
1028
|
return class_
|
|
1020
1029
|
|
|
1021
1030
|
|
|
1022
|
-
def create_function(pkg_func: str):
|
|
1031
|
+
def create_function(pkg_func: str, reload_modules: bool = False):
|
|
1023
1032
|
"""Create a function from a package.module.function string
|
|
1024
1033
|
|
|
1025
1034
|
:param pkg_func: full function location,
|
|
1026
1035
|
e.g. "sklearn.feature_selection.f_classif"
|
|
1036
|
+
:param reload_modules: reload the function again.
|
|
1027
1037
|
"""
|
|
1028
1038
|
splits = pkg_func.split(".")
|
|
1029
1039
|
pkg_module = ".".join(splits[:-1])
|
|
1030
1040
|
cb_fname = splits[-1]
|
|
1031
1041
|
pkg_module = __import__(pkg_module, fromlist=[cb_fname])
|
|
1042
|
+
|
|
1043
|
+
if reload_modules:
|
|
1044
|
+
# Even though the function appears in the modules list, we need to reload
|
|
1045
|
+
# the code again because it may have changed
|
|
1046
|
+
try:
|
|
1047
|
+
logger.debug("Reloading module", module=pkg_func)
|
|
1048
|
+
_reload(
|
|
1049
|
+
pkg_module,
|
|
1050
|
+
max_recursion_depth=mlrun.mlconf.function.spec.reload_max_recursion_depth,
|
|
1051
|
+
)
|
|
1052
|
+
except Exception as exc:
|
|
1053
|
+
logger.warning(
|
|
1054
|
+
"Failed to reload module. Not all associated modules can be reloaded, import them manually."
|
|
1055
|
+
"Or, with Jupyter, restart the Python kernel.",
|
|
1056
|
+
module=pkg_func,
|
|
1057
|
+
err=mlrun.errors.err_to_str(exc),
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1032
1060
|
function_ = getattr(pkg_module, cb_fname)
|
|
1033
1061
|
return function_
|
|
1034
1062
|
|
|
@@ -1086,8 +1114,14 @@ def get_class(class_name, namespace=None):
|
|
|
1086
1114
|
return class_object
|
|
1087
1115
|
|
|
1088
1116
|
|
|
1089
|
-
def get_function(function,
|
|
1090
|
-
"""
|
|
1117
|
+
def get_function(function, namespaces, reload_modules: bool = False):
|
|
1118
|
+
"""Return function callable object from function name string
|
|
1119
|
+
|
|
1120
|
+
:param function: path to the function ([class_name::]function)
|
|
1121
|
+
:param namespaces: one or list of namespaces/modules to search the function in
|
|
1122
|
+
:param reload_modules: reload the function again
|
|
1123
|
+
:return: function handler (callable)
|
|
1124
|
+
"""
|
|
1091
1125
|
if callable(function):
|
|
1092
1126
|
return function
|
|
1093
1127
|
|
|
@@ -1096,12 +1130,12 @@ def get_function(function, namespace):
|
|
|
1096
1130
|
if not function.endswith(")"):
|
|
1097
1131
|
raise ValueError('function expression must start with "(" and end with ")"')
|
|
1098
1132
|
return eval("lambda event: " + function[1:-1], {}, {})
|
|
1099
|
-
function_object = _search_in_namespaces(function,
|
|
1133
|
+
function_object = _search_in_namespaces(function, namespaces)
|
|
1100
1134
|
if function_object is not None:
|
|
1101
1135
|
return function_object
|
|
1102
1136
|
|
|
1103
1137
|
try:
|
|
1104
|
-
function_object = create_function(function)
|
|
1138
|
+
function_object = create_function(function, reload_modules)
|
|
1105
1139
|
except (ImportError, ValueError) as exc:
|
|
1106
1140
|
raise ImportError(
|
|
1107
1141
|
f"state/function init failed, handler '{function}' not found"
|
|
@@ -1110,19 +1144,24 @@ def get_function(function, namespace):
|
|
|
1110
1144
|
|
|
1111
1145
|
|
|
1112
1146
|
def get_handler_extended(
|
|
1113
|
-
handler_path: str,
|
|
1147
|
+
handler_path: str,
|
|
1148
|
+
context=None,
|
|
1149
|
+
class_args: dict = None,
|
|
1150
|
+
namespaces=None,
|
|
1151
|
+
reload_modules: bool = False,
|
|
1114
1152
|
):
|
|
1115
|
-
"""
|
|
1153
|
+
"""Get function handler from [class_name::]handler string
|
|
1116
1154
|
|
|
1117
1155
|
:param handler_path: path to the function ([class_name::]handler)
|
|
1118
1156
|
:param context: MLRun function/job client context
|
|
1119
1157
|
:param class_args: optional dict of class init kwargs
|
|
1120
1158
|
:param namespaces: one or list of namespaces/modules to search the handler in
|
|
1159
|
+
:param reload_modules: reload the function again
|
|
1121
1160
|
:return: function handler (callable)
|
|
1122
1161
|
"""
|
|
1123
1162
|
class_args = class_args or {}
|
|
1124
1163
|
if "::" not in handler_path:
|
|
1125
|
-
return get_function(handler_path, namespaces)
|
|
1164
|
+
return get_function(handler_path, namespaces, reload_modules)
|
|
1126
1165
|
|
|
1127
1166
|
splitted = handler_path.split("::")
|
|
1128
1167
|
class_path = splitted[0].strip()
|
|
@@ -1227,6 +1266,10 @@ def _fill_project_path_template(artifact_path, project):
|
|
|
1227
1266
|
return artifact_path
|
|
1228
1267
|
|
|
1229
1268
|
|
|
1269
|
+
def to_non_empty_values_dict(input_dict: dict) -> dict:
|
|
1270
|
+
return {key: value for key, value in input_dict.items() if value}
|
|
1271
|
+
|
|
1272
|
+
|
|
1230
1273
|
def str_to_timestamp(time_str: str, now_time: Timestamp = None):
|
|
1231
1274
|
"""convert fixed/relative time string to Pandas Timestamp
|
|
1232
1275
|
|
|
@@ -1284,6 +1327,7 @@ def format_run(run: PipelineRun, with_project=False) -> dict:
|
|
|
1284
1327
|
"scheduled_at",
|
|
1285
1328
|
"finished_at",
|
|
1286
1329
|
"description",
|
|
1330
|
+
"experiment_id",
|
|
1287
1331
|
]
|
|
1288
1332
|
|
|
1289
1333
|
if with_project:
|
|
@@ -1573,6 +1617,30 @@ def additional_filters_warning(additional_filters, class_name):
|
|
|
1573
1617
|
)
|
|
1574
1618
|
|
|
1575
1619
|
|
|
1620
|
+
def merge_with_precedence(first_dict: dict, second_dict: dict) -> dict:
|
|
1621
|
+
"""
|
|
1622
|
+
Merge two dictionaries with precedence given to keys from the second dictionary.
|
|
1623
|
+
|
|
1624
|
+
This function merges two dictionaries, `first_dict` and `second_dict`, where keys from `second_dict`
|
|
1625
|
+
take precedence in case of conflicts. If both dictionaries contain the same key,
|
|
1626
|
+
the value from `second_dict` will overwrite the value from `first_dict`.
|
|
1627
|
+
|
|
1628
|
+
Example:
|
|
1629
|
+
>>> first_dict = {"key1": "value1", "key2": "value2"}
|
|
1630
|
+
>>> second_dict = {"key2": "new_value2", "key3": "value3"}
|
|
1631
|
+
>>> merge_with_precedence(first_dict, second_dict)
|
|
1632
|
+
{'key1': 'value1', 'key2': 'new_value2', 'key3': 'value3'}
|
|
1633
|
+
|
|
1634
|
+
Note:
|
|
1635
|
+
- The merge operation uses the ** operator in Python, which combines key-value pairs
|
|
1636
|
+
from each dictionary. Later dictionaries take precedence when there are conflicting keys.
|
|
1637
|
+
"""
|
|
1638
|
+
return {
|
|
1639
|
+
**(first_dict or {}),
|
|
1640
|
+
**(second_dict or {}),
|
|
1641
|
+
}
|
|
1642
|
+
|
|
1643
|
+
|
|
1576
1644
|
def validate_component_version_compatibility(
|
|
1577
1645
|
component_name: typing.Literal["iguazio", "nuclio"], *min_versions: str
|
|
1578
1646
|
):
|
|
@@ -1628,3 +1696,15 @@ def format_alert_summary(
|
|
|
1628
1696
|
result = result.replace("{{name}}", alert.name)
|
|
1629
1697
|
result = result.replace("{{entity}}", event_data.entity.ids[0])
|
|
1630
1698
|
return result
|
|
1699
|
+
|
|
1700
|
+
|
|
1701
|
+
def _reload(module, max_recursion_depth):
|
|
1702
|
+
"""Recursively reload modules."""
|
|
1703
|
+
if max_recursion_depth <= 0:
|
|
1704
|
+
return
|
|
1705
|
+
|
|
1706
|
+
reload(module)
|
|
1707
|
+
for attribute_name in dir(module):
|
|
1708
|
+
attribute = getattr(module, attribute_name)
|
|
1709
|
+
if type(attribute) is ModuleType:
|
|
1710
|
+
_reload(attribute, max_recursion_depth - 1)
|
mlrun/utils/logger.py
CHANGED
|
@@ -13,8 +13,10 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
15
|
import logging
|
|
16
|
+
import os
|
|
16
17
|
import typing
|
|
17
18
|
from enum import Enum
|
|
19
|
+
from functools import cached_property
|
|
18
20
|
from sys import stdout
|
|
19
21
|
from traceback import format_exception
|
|
20
22
|
from typing import IO, Optional, Union
|
|
@@ -92,6 +94,16 @@ class HumanReadableFormatter(_BaseFormatter):
|
|
|
92
94
|
|
|
93
95
|
|
|
94
96
|
class HumanReadableExtendedFormatter(HumanReadableFormatter):
|
|
97
|
+
_colors = {
|
|
98
|
+
logging.NOTSET: "",
|
|
99
|
+
logging.DEBUG: "\x1b[34m",
|
|
100
|
+
logging.INFO: "\x1b[36m",
|
|
101
|
+
logging.WARNING: "\x1b[33m",
|
|
102
|
+
logging.ERROR: "\x1b[0;31m",
|
|
103
|
+
logging.CRITICAL: "\x1b[1;31m",
|
|
104
|
+
}
|
|
105
|
+
_color_reset = "\x1b[0m"
|
|
106
|
+
|
|
95
107
|
def format(self, record) -> str:
|
|
96
108
|
more = ""
|
|
97
109
|
record_with = self._record_with(record)
|
|
@@ -113,12 +125,34 @@ class HumanReadableExtendedFormatter(HumanReadableFormatter):
|
|
|
113
125
|
[f"{key}: {_format_value(val)}" for key, val in record_with.items()]
|
|
114
126
|
)
|
|
115
127
|
return (
|
|
116
|
-
"> "
|
|
128
|
+
f"{self._get_message_color(record.levelno)}> "
|
|
117
129
|
f"{self.formatTime(record, self.datefmt)} "
|
|
118
130
|
f"[{record.name}:{record.levelname.lower()}] "
|
|
119
|
-
f"{record.getMessage()}{more}"
|
|
131
|
+
f"{record.getMessage()}{more}{self._get_color_reset()}"
|
|
120
132
|
)
|
|
121
133
|
|
|
134
|
+
def _get_color_reset(self):
|
|
135
|
+
if not self._have_color_support:
|
|
136
|
+
return ""
|
|
137
|
+
|
|
138
|
+
return self._color_reset
|
|
139
|
+
|
|
140
|
+
def _get_message_color(self, levelno):
|
|
141
|
+
if not self._have_color_support:
|
|
142
|
+
return ""
|
|
143
|
+
|
|
144
|
+
return self._colors[levelno]
|
|
145
|
+
|
|
146
|
+
@cached_property
|
|
147
|
+
def _have_color_support(self):
|
|
148
|
+
if os.environ.get("PYCHARM_HOSTED"):
|
|
149
|
+
return True
|
|
150
|
+
if os.environ.get("NO_COLOR"):
|
|
151
|
+
return False
|
|
152
|
+
if os.environ.get("CLICOLOR_FORCE"):
|
|
153
|
+
return True
|
|
154
|
+
return stdout.isatty()
|
|
155
|
+
|
|
122
156
|
|
|
123
157
|
class Logger:
|
|
124
158
|
def __init__(
|
|
@@ -28,6 +28,10 @@ class NotificationBase:
|
|
|
28
28
|
self.name = name
|
|
29
29
|
self.params = params or {}
|
|
30
30
|
|
|
31
|
+
@classmethod
|
|
32
|
+
def validate_params(cls, params):
|
|
33
|
+
pass
|
|
34
|
+
|
|
31
35
|
@property
|
|
32
36
|
def active(self) -> bool:
|
|
33
37
|
return True
|
|
@@ -69,16 +73,27 @@ class NotificationBase:
|
|
|
69
73
|
if custom_html:
|
|
70
74
|
return custom_html
|
|
71
75
|
|
|
72
|
-
if self.name:
|
|
73
|
-
message = f"{self.name}: {message}"
|
|
74
|
-
|
|
75
76
|
if alert:
|
|
76
77
|
if not event_data:
|
|
77
78
|
return f"[{severity}] {message}"
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
79
|
+
|
|
80
|
+
html = f"<h3>[{severity}] {message}</h3>"
|
|
81
|
+
html += f"<br>{alert.name} alert has occurred<br>"
|
|
82
|
+
html += f"<br><h4>Project:</h4>{alert.project}<br>"
|
|
83
|
+
html += f"<br><h4>ID:</h4>{event_data.entity.ids[0]}<br>"
|
|
84
|
+
html += f"<br><h4>Summary:</h4>{mlrun.utils.helpers.format_alert_summary(alert, event_data)}<br>"
|
|
85
|
+
|
|
86
|
+
if event_data.value_dict:
|
|
87
|
+
html += "<br><h4>Event data:</h4>"
|
|
88
|
+
for key, value in event_data.value_dict.items():
|
|
89
|
+
html += f"{key}: {value}<br>"
|
|
90
|
+
|
|
91
|
+
overview_type, url = self._get_overview_type_and_url(alert, event_data)
|
|
92
|
+
html += f"<br><h4>Overview:</h4><a href={url}>{overview_type}</a>"
|
|
93
|
+
return html
|
|
94
|
+
|
|
95
|
+
if self.name:
|
|
96
|
+
message = f"{self.name}: {message}"
|
|
82
97
|
|
|
83
98
|
if not runs:
|
|
84
99
|
return f"[{severity}] {message}"
|
|
@@ -90,3 +105,24 @@ class NotificationBase:
|
|
|
90
105
|
html += "<br>click the hyper links below to see detailed results<br>"
|
|
91
106
|
html += runs.show(display=False, short=True)
|
|
92
107
|
return html
|
|
108
|
+
|
|
109
|
+
def _get_overview_type_and_url(
|
|
110
|
+
self,
|
|
111
|
+
alert: mlrun.common.schemas.AlertConfig,
|
|
112
|
+
event_data: mlrun.common.schemas.Event,
|
|
113
|
+
) -> (str, str):
|
|
114
|
+
if (
|
|
115
|
+
event_data.entity.kind == mlrun.common.schemas.alert.EventEntityKind.JOB
|
|
116
|
+
): # JOB entity
|
|
117
|
+
uid = event_data.value_dict.get("uid")
|
|
118
|
+
url = mlrun.utils.helpers.get_ui_url(alert.project, uid)
|
|
119
|
+
overview_type = "Job overview"
|
|
120
|
+
else: # MODEL entity
|
|
121
|
+
model_name = event_data.value_dict.get("model")
|
|
122
|
+
model_endpoint_id = event_data.value_dict.get("model_endpoint_id")
|
|
123
|
+
url = mlrun.utils.helpers.get_model_endpoint_url(
|
|
124
|
+
alert.project, model_name, model_endpoint_id
|
|
125
|
+
)
|
|
126
|
+
overview_type = "Model endpoint"
|
|
127
|
+
|
|
128
|
+
return overview_type, url
|