mlrun 1.8.0rc30__py3-none-any.whl → 1.8.0rc32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mlrun might be problematic. Click here for more details.
- mlrun/__init__.py +2 -35
- mlrun/api/schemas/__init__.py +1 -6
- mlrun/common/runtimes/constants.py +4 -0
- mlrun/common/schemas/__init__.py +0 -2
- mlrun/common/schemas/model_monitoring/__init__.py +0 -2
- mlrun/common/schemas/model_monitoring/constants.py +1 -6
- mlrun/common/schemas/model_monitoring/grafana.py +17 -11
- mlrun/config.py +9 -36
- mlrun/datastore/storeytargets.py +20 -3
- mlrun/db/base.py +1 -1
- mlrun/db/httpdb.py +5 -4
- mlrun/db/nopdb.py +1 -1
- mlrun/model_monitoring/applications/base.py +111 -40
- mlrun/model_monitoring/applications/results.py +2 -2
- mlrun/model_monitoring/controller.py +4 -3
- mlrun/model_monitoring/db/tsdb/__init__.py +9 -5
- mlrun/model_monitoring/db/tsdb/base.py +60 -39
- mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +117 -52
- mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +140 -14
- mlrun/model_monitoring/helpers.py +16 -15
- mlrun/model_monitoring/stream_processing.py +6 -13
- mlrun/projects/pipelines.py +11 -3
- mlrun/projects/project.py +88 -111
- mlrun/serving/states.py +1 -1
- mlrun/serving/v2_serving.py +20 -10
- mlrun/utils/helpers.py +1 -1
- mlrun/utils/logger.py +13 -10
- mlrun/utils/notifications/notification_pusher.py +24 -0
- mlrun/utils/regex.py +1 -0
- mlrun/utils/version/version.json +2 -2
- {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc32.dist-info}/METADATA +2 -2
- {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc32.dist-info}/RECORD +36 -36
- {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc32.dist-info}/LICENSE +0 -0
- {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc32.dist-info}/WHEEL +0 -0
- {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc32.dist-info}/entry_points.txt +0 -0
- {mlrun-1.8.0rc30.dist-info → mlrun-1.8.0rc32.dist-info}/top_level.txt +0 -0
mlrun/projects/project.py
CHANGED
|
@@ -29,7 +29,6 @@ import zipfile
|
|
|
29
29
|
from copy import deepcopy
|
|
30
30
|
from os import environ, makedirs, path
|
|
31
31
|
from typing import Callable, Optional, Union, cast
|
|
32
|
-
from urllib.parse import urlparse
|
|
33
32
|
|
|
34
33
|
import deprecated
|
|
35
34
|
import dotenv
|
|
@@ -71,6 +70,7 @@ from mlrun.datastore.datastore_profile import (
|
|
|
71
70
|
from mlrun.datastore.vectorstore import VectorStoreCollection
|
|
72
71
|
from mlrun.model_monitoring.helpers import (
|
|
73
72
|
filter_results_by_regex,
|
|
73
|
+
get_alert_name_from_result_fqn,
|
|
74
74
|
get_result_instance_fqn,
|
|
75
75
|
)
|
|
76
76
|
from mlrun.runtimes.nuclio.function import RemoteRuntime
|
|
@@ -2142,7 +2142,8 @@ class MlrunProject(ModelObj):
|
|
|
2142
2142
|
reset_policy: mlrun.common.schemas.alert.ResetPolicy = mlrun.common.schemas.alert.ResetPolicy.AUTO,
|
|
2143
2143
|
) -> list[mlrun.alerts.alert.AlertConfig]:
|
|
2144
2144
|
"""
|
|
2145
|
-
:param name: AlertConfig name
|
|
2145
|
+
:param name: The name of the AlertConfig template. It will be combined with mep_id, app-name
|
|
2146
|
+
and result name to generate a unique name.
|
|
2146
2147
|
:param summary: Summary of the alert, will be sent in the generated notifications
|
|
2147
2148
|
:param endpoints: The endpoints from which metrics will be retrieved to configure the alerts.
|
|
2148
2149
|
This `ModelEndpointList` object obtained via the `list_model_endpoints`
|
|
@@ -2154,7 +2155,7 @@ class MlrunProject(ModelObj):
|
|
|
2154
2155
|
|
|
2155
2156
|
For example:
|
|
2156
2157
|
[`app1.result-*`, `*.result1`]
|
|
2157
|
-
will match "
|
|
2158
|
+
will match "mep_uid1.app1.result.result-1" and "mep_uid1.app2.result.result1".
|
|
2158
2159
|
A specific result_name (not a wildcard) will always create a new alert
|
|
2159
2160
|
config, regardless of whether the result name exists.
|
|
2160
2161
|
:param severity: Severity of the alert.
|
|
@@ -2203,10 +2204,11 @@ class MlrunProject(ModelObj):
|
|
|
2203
2204
|
)
|
|
2204
2205
|
alert_result_names = list(set(specific_result_names + matching_results))
|
|
2205
2206
|
for result_fqn in alert_result_names:
|
|
2207
|
+
result_fqn_name = get_alert_name_from_result_fqn(result_fqn)
|
|
2206
2208
|
alerts.append(
|
|
2207
2209
|
mlrun.alerts.alert.AlertConfig(
|
|
2208
2210
|
project=self.name,
|
|
2209
|
-
name=name,
|
|
2211
|
+
name=f"{name}--{result_fqn_name}",
|
|
2210
2212
|
summary=summary,
|
|
2211
2213
|
severity=severity,
|
|
2212
2214
|
entities=alert_constants.EventEntities(
|
|
@@ -3671,50 +3673,77 @@ class MlrunProject(ModelObj):
|
|
|
3671
3673
|
|
|
3672
3674
|
def set_model_monitoring_credentials(
|
|
3673
3675
|
self,
|
|
3674
|
-
access_key: Optional[str] = None,
|
|
3675
|
-
stream_path: Optional[str] = None, # Deprecated
|
|
3676
|
-
tsdb_connection: Optional[str] = None, # Deprecated
|
|
3677
|
-
replace_creds: bool = False,
|
|
3678
3676
|
*,
|
|
3679
|
-
|
|
3680
|
-
|
|
3681
|
-
|
|
3677
|
+
tsdb_profile_name: str,
|
|
3678
|
+
stream_profile_name: str,
|
|
3679
|
+
replace_creds: bool = False,
|
|
3680
|
+
) -> None:
|
|
3682
3681
|
"""
|
|
3683
|
-
Set the credentials that will be used by the project's model monitoring
|
|
3684
|
-
|
|
3685
|
-
|
|
3686
|
-
|
|
3687
|
-
:param access_key: Model monitoring access key for managing user permissions.
|
|
3688
|
-
|
|
3689
|
-
* None - will be set from the system configuration.
|
|
3690
|
-
* v3io - for v3io endpoint store, pass `v3io` and the system will generate the
|
|
3691
|
-
exact path.
|
|
3692
|
-
:param stream_path: (Deprecated) This argument is deprecated. Use ``stream_profile_name`` instead.
|
|
3693
|
-
Path to the model monitoring stream. By default, None. Options:
|
|
3694
|
-
|
|
3695
|
-
* ``"v3io"`` - for v3io stream, pass ``"v3io"`` and the system will generate
|
|
3696
|
-
the exact path.
|
|
3697
|
-
* Kafka - for Kafka stream, provide the full connection string without acustom
|
|
3698
|
-
topic, for example ``"kafka://<some_kafka_broker>:<port>"``.
|
|
3699
|
-
:param tsdb_connection: (Deprecated) Connection string to the time series database. By default, None.
|
|
3700
|
-
Options:
|
|
3701
|
-
|
|
3702
|
-
* v3io - for v3io stream, pass ``"v3io"`` and the system will generate the
|
|
3703
|
-
exact path.
|
|
3704
|
-
* TDEngine - for TDEngine tsdb, provide the full websocket connection URL,
|
|
3705
|
-
for example ``"taosws://<username>:<password>@<host>:<port>"``.
|
|
3706
|
-
:param replace_creds: If True, will override the existing credentials.
|
|
3707
|
-
Please keep in mind that if you already enabled model monitoring on
|
|
3708
|
-
your project this action can cause data loose and will require redeploying
|
|
3709
|
-
all model monitoring functions & model monitoring infra
|
|
3710
|
-
& tracked model server.
|
|
3711
|
-
:param stream_profile_name: The datastore profile name of the stream to be used in model monitoring.
|
|
3712
|
-
The supported profiles are:
|
|
3682
|
+
Set the credentials that will be used by the project's model monitoring infrastructure functions.
|
|
3683
|
+
Please note that you have to set the credentials before deploying any model monitoring application
|
|
3684
|
+
or a tracked serving function.
|
|
3713
3685
|
|
|
3714
|
-
|
|
3715
|
-
|
|
3686
|
+
For example, the full flow for enabling model monitoring infrastructure with **TDEngine** and **Kafka**, is:
|
|
3687
|
+
|
|
3688
|
+
.. code-block:: python
|
|
3689
|
+
|
|
3690
|
+
import mlrun
|
|
3691
|
+
from mlrun.datastore.datastore_profile import (
|
|
3692
|
+
DatastoreProfileKafkaSource,
|
|
3693
|
+
TDEngineDatastoreProfile,
|
|
3694
|
+
)
|
|
3695
|
+
|
|
3696
|
+
project = mlrun.get_or_create_project("mm-infra-setup")
|
|
3697
|
+
|
|
3698
|
+
# Create and register TSDB profile
|
|
3699
|
+
tsdb_profile = TDEngineDatastoreProfile(
|
|
3700
|
+
name="my-tdengine",
|
|
3701
|
+
host="<tdengine-server-ip-address>",
|
|
3702
|
+
port=6041,
|
|
3703
|
+
user="username",
|
|
3704
|
+
password="<tdengine-password>",
|
|
3705
|
+
)
|
|
3706
|
+
project.register_datastore_profile(tsdb_profile)
|
|
3707
|
+
|
|
3708
|
+
# Create and register stream profile
|
|
3709
|
+
stream_profile = DatastoreProfileKafkaSource(
|
|
3710
|
+
name="my-kafka",
|
|
3711
|
+
brokers=["<kafka-broker-ip-address>:9094"],
|
|
3712
|
+
topics=[], # Keep the topics list empty
|
|
3713
|
+
## SASL is supported
|
|
3714
|
+
# sasl_user="user1",
|
|
3715
|
+
# sasl_pass="<kafka-sasl-password>",
|
|
3716
|
+
)
|
|
3717
|
+
project.register_datastore_profile(stream_profile)
|
|
3718
|
+
|
|
3719
|
+
# Set model monitoring credentials and enable the infrastructure
|
|
3720
|
+
project.set_model_monitoring_credentials(
|
|
3721
|
+
tsdb_profile_name=tsdb_profile.name,
|
|
3722
|
+
stream_profile_name=stream_profile.name,
|
|
3723
|
+
)
|
|
3724
|
+
project.enable_model_monitoring()
|
|
3725
|
+
|
|
3726
|
+
Note that you will need to change the profiles if you want to use **V3IO** TSDB and stream:
|
|
3727
|
+
|
|
3728
|
+
.. code-block:: python
|
|
3729
|
+
|
|
3730
|
+
from mlrun.datastore.datastore_profile import DatastoreProfileV3io
|
|
3731
|
+
|
|
3732
|
+
# Create and register TSDB profile
|
|
3733
|
+
tsdb_profile = DatastoreProfileV3io(
|
|
3734
|
+
name="my-v3io-tsdb",
|
|
3735
|
+
)
|
|
3736
|
+
project.register_datastore_profile(tsdb_profile)
|
|
3737
|
+
|
|
3738
|
+
# Create and register stream profile
|
|
3739
|
+
stream_profile = DatastoreProfileV3io(
|
|
3740
|
+
name="my-v3io-stream",
|
|
3741
|
+
v3io_access_key=mlrun.mlconf.get_v3io_access_key(),
|
|
3742
|
+
)
|
|
3743
|
+
project.register_datastore_profile(stream_profile)
|
|
3744
|
+
|
|
3745
|
+
In the V3IO datastore, you must provide an explicit access key to the stream, but not to the TSDB.
|
|
3716
3746
|
|
|
3717
|
-
You need to register one of them, and pass the profile's name.
|
|
3718
3747
|
:param tsdb_profile_name: The datastore profile name of the time-series database to be used in model
|
|
3719
3748
|
monitoring. The supported profiles are:
|
|
3720
3749
|
|
|
@@ -3722,76 +3751,24 @@ class MlrunProject(ModelObj):
|
|
|
3722
3751
|
* :py:class:`~mlrun.datastore.datastore_profile.TDEngineDatastoreProfile`
|
|
3723
3752
|
|
|
3724
3753
|
You need to register one of them, and pass the profile's name.
|
|
3725
|
-
|
|
3726
|
-
|
|
3727
|
-
|
|
3728
|
-
if tsdb_connection:
|
|
3729
|
-
warnings.warn(
|
|
3730
|
-
"The `tsdb_connection` argument is deprecated and will be removed in MLRun version 1.8.0. "
|
|
3731
|
-
"Use `tsdb_profile_name` instead.",
|
|
3732
|
-
FutureWarning,
|
|
3733
|
-
)
|
|
3734
|
-
if tsdb_profile_name:
|
|
3735
|
-
raise mlrun.errors.MLRunValueError(
|
|
3736
|
-
"If you set `tsdb_profile_name`, you must not pass `tsdb_connection`."
|
|
3737
|
-
)
|
|
3738
|
-
if tsdb_connection == "v3io":
|
|
3739
|
-
tsdb_profile = mlrun.datastore.datastore_profile.DatastoreProfileV3io(
|
|
3740
|
-
name=mm_constants.DefaultProfileName.TSDB
|
|
3741
|
-
)
|
|
3742
|
-
else:
|
|
3743
|
-
parsed_url = urlparse(tsdb_connection)
|
|
3744
|
-
if parsed_url.scheme != "taosws":
|
|
3745
|
-
raise mlrun.errors.MLRunValueError(
|
|
3746
|
-
f"Unsupported `tsdb_connection`: '{tsdb_connection}'."
|
|
3747
|
-
)
|
|
3748
|
-
tsdb_profile = (
|
|
3749
|
-
mlrun.datastore.datastore_profile.TDEngineDatastoreProfile(
|
|
3750
|
-
name=mm_constants.DefaultProfileName.TSDB,
|
|
3751
|
-
user=parsed_url.username,
|
|
3752
|
-
password=parsed_url.password,
|
|
3753
|
-
host=parsed_url.hostname,
|
|
3754
|
-
port=parsed_url.port,
|
|
3755
|
-
)
|
|
3756
|
-
)
|
|
3754
|
+
:param stream_profile_name: The datastore profile name of the stream to be used in model monitoring.
|
|
3755
|
+
The supported profiles are:
|
|
3757
3756
|
|
|
3758
|
-
|
|
3759
|
-
|
|
3757
|
+
* :py:class:`~mlrun.datastore.datastore_profile.DatastoreProfileV3io`
|
|
3758
|
+
* :py:class:`~mlrun.datastore.datastore_profile.DatastoreProfileKafkaSource`
|
|
3760
3759
|
|
|
3761
|
-
|
|
3762
|
-
|
|
3763
|
-
|
|
3764
|
-
|
|
3765
|
-
|
|
3766
|
-
|
|
3767
|
-
|
|
3768
|
-
|
|
3769
|
-
"If you set `stream_profile_name`, you must not pass `stream_path`."
|
|
3770
|
-
)
|
|
3771
|
-
if stream_path == "v3io":
|
|
3772
|
-
stream_profile = mlrun.datastore.datastore_profile.DatastoreProfileV3io(
|
|
3773
|
-
name=mm_constants.DefaultProfileName.STREAM
|
|
3774
|
-
)
|
|
3775
|
-
else:
|
|
3776
|
-
parsed_stream = urlparse(stream_path)
|
|
3777
|
-
if parsed_stream.scheme != "kafka":
|
|
3778
|
-
raise mlrun.errors.MLRunValueError(
|
|
3779
|
-
f"Unsupported `stream_path`: '{stream_path}'."
|
|
3780
|
-
)
|
|
3781
|
-
stream_profile = (
|
|
3782
|
-
mlrun.datastore.datastore_profile.DatastoreProfileKafkaSource(
|
|
3783
|
-
name=mm_constants.DefaultProfileName.STREAM,
|
|
3784
|
-
brokers=[parsed_stream.netloc],
|
|
3785
|
-
topics=[],
|
|
3786
|
-
)
|
|
3787
|
-
)
|
|
3788
|
-
self.register_datastore_profile(stream_profile)
|
|
3789
|
-
stream_profile_name = stream_profile.name
|
|
3760
|
+
You need to register one of them, and pass the profile's name.
|
|
3761
|
+
:param replace_creds: If ``True`` - override the existing credentials.
|
|
3762
|
+
Please keep in mind that if you have already enabled model monitoring
|
|
3763
|
+
on your project, replacing the credentials can cause data loss, and will
|
|
3764
|
+
require redeploying all the model monitoring functions, model monitoring
|
|
3765
|
+
infrastructure, and tracked model servers.
|
|
3766
|
+
"""
|
|
3767
|
+
db = mlrun.db.get_run_db(secrets=self._secrets)
|
|
3790
3768
|
|
|
3791
3769
|
db.set_model_monitoring_credentials(
|
|
3792
3770
|
project=self.name,
|
|
3793
3771
|
credentials={
|
|
3794
|
-
"access_key": access_key,
|
|
3795
3772
|
"tsdb_profile_name": tsdb_profile_name,
|
|
3796
3773
|
"stream_profile_name": stream_profile_name,
|
|
3797
3774
|
},
|
|
@@ -3809,7 +3786,7 @@ class MlrunProject(ModelObj):
|
|
|
3809
3786
|
|
|
3810
3787
|
def list_model_endpoints(
|
|
3811
3788
|
self,
|
|
3812
|
-
|
|
3789
|
+
names: Optional[Union[str, list[str]]] = None,
|
|
3813
3790
|
model_name: Optional[str] = None,
|
|
3814
3791
|
model_tag: Optional[str] = None,
|
|
3815
3792
|
function_name: Optional[str] = None,
|
|
@@ -3839,7 +3816,7 @@ class MlrunProject(ModelObj):
|
|
|
3839
3816
|
In addition, this functions provides a facade for listing endpoint related metrics. This facade is time-based
|
|
3840
3817
|
and depends on the 'start' and 'end' parameters.
|
|
3841
3818
|
|
|
3842
|
-
:param
|
|
3819
|
+
:param names: The name of the model to filter by
|
|
3843
3820
|
:param model_name: The name of the model to filter by
|
|
3844
3821
|
:param function_name: The name of the function to filter by
|
|
3845
3822
|
:param function_tag: The tag of the function to filter by
|
|
@@ -3860,7 +3837,7 @@ class MlrunProject(ModelObj):
|
|
|
3860
3837
|
db = mlrun.db.get_run_db(secrets=self._secrets)
|
|
3861
3838
|
return db.list_model_endpoints(
|
|
3862
3839
|
project=self.name,
|
|
3863
|
-
|
|
3840
|
+
names=names,
|
|
3864
3841
|
model_name=model_name,
|
|
3865
3842
|
model_tag=model_tag,
|
|
3866
3843
|
function_name=function_name,
|
mlrun/serving/states.py
CHANGED
|
@@ -812,8 +812,8 @@ class RouterStep(TaskStep):
|
|
|
812
812
|
* **archive**:
|
|
813
813
|
1. If model endpoints with the same name exist, preserve them.
|
|
814
814
|
2. Create a new model endpoint with the same name and set it to `latest`.
|
|
815
|
-
|
|
816
815
|
"""
|
|
816
|
+
|
|
817
817
|
if len(self.routes.keys()) >= MAX_MODELS_PER_ROUTER and key not in self.routes:
|
|
818
818
|
raise mlrun.errors.MLRunModelLimitExceededError(
|
|
819
819
|
f"Router cannot support more than {MAX_MODELS_PER_ROUTER} model endpoints. "
|
mlrun/serving/v2_serving.py
CHANGED
|
@@ -115,6 +115,7 @@ class V2ModelServer(StepToDict):
|
|
|
115
115
|
self.shard_by_endpoint = shard_by_endpoint
|
|
116
116
|
self._model_logger = None
|
|
117
117
|
self.initialized = False
|
|
118
|
+
self.output_schema = []
|
|
118
119
|
|
|
119
120
|
def _load_and_update_state(self):
|
|
120
121
|
try:
|
|
@@ -175,17 +176,15 @@ class V2ModelServer(StepToDict):
|
|
|
175
176
|
"Model endpoint creation task name not provided",
|
|
176
177
|
)
|
|
177
178
|
try:
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
.
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
function_tag=server.function_tag or "latest",
|
|
185
|
-
tsdb_metrics=False,
|
|
186
|
-
)
|
|
187
|
-
.metadata.uid
|
|
179
|
+
model_endpoint = mlrun.get_run_db().get_model_endpoint(
|
|
180
|
+
project=server.project,
|
|
181
|
+
name=self.name,
|
|
182
|
+
function_name=server.function_name,
|
|
183
|
+
function_tag=server.function_tag or "latest",
|
|
184
|
+
tsdb_metrics=False,
|
|
188
185
|
)
|
|
186
|
+
self.model_endpoint_uid = model_endpoint.metadata.uid
|
|
187
|
+
self.output_schema = model_endpoint.spec.label_names
|
|
189
188
|
except mlrun.errors.MLRunNotFoundError:
|
|
190
189
|
logger.info(
|
|
191
190
|
"Model endpoint not found for this step; monitoring for this model will not be performed",
|
|
@@ -566,6 +565,17 @@ class _ModelLogPusher:
|
|
|
566
565
|
resp["outputs"] = [
|
|
567
566
|
resp["outputs"][i] for i in sampled_requests_indices
|
|
568
567
|
]
|
|
568
|
+
if self.model.output_schema and len(self.model.output_schema) != len(
|
|
569
|
+
resp["outputs"][0]
|
|
570
|
+
):
|
|
571
|
+
logger.info(
|
|
572
|
+
"The number of outputs returned by the model does not match the number of outputs "
|
|
573
|
+
"specified in the model endpoint.",
|
|
574
|
+
model_endpoint=self.model.name,
|
|
575
|
+
model_endpoint_id=self.model.model_endpoint_uid,
|
|
576
|
+
output_len=len(resp["outputs"][0]),
|
|
577
|
+
schema_len=len(self.model.output_schema),
|
|
578
|
+
)
|
|
569
579
|
|
|
570
580
|
data = self.base_data()
|
|
571
581
|
data["request"] = request
|
mlrun/utils/helpers.py
CHANGED
|
@@ -2037,7 +2037,7 @@ class Workflow:
|
|
|
2037
2037
|
pod_phase
|
|
2038
2038
|
)
|
|
2039
2039
|
function["status"] = {"state": state}
|
|
2040
|
-
if isinstance(function["metadata"].get("updated"), datetime
|
|
2040
|
+
if isinstance(function["metadata"].get("updated"), datetime):
|
|
2041
2041
|
function["metadata"]["updated"] = function["metadata"][
|
|
2042
2042
|
"updated"
|
|
2043
2043
|
].isoformat()
|
mlrun/utils/logger.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
+
import contextvars
|
|
14
15
|
import datetime
|
|
15
16
|
import logging
|
|
16
17
|
import os
|
|
@@ -29,6 +30,8 @@ import pydantic.v1
|
|
|
29
30
|
from mlrun import errors
|
|
30
31
|
from mlrun.config import config
|
|
31
32
|
|
|
33
|
+
context_id_var = contextvars.ContextVar("context_id", default=None)
|
|
34
|
+
|
|
32
35
|
|
|
33
36
|
class _BaseFormatter(logging.Formatter):
|
|
34
37
|
def _json_dump(self, json_object):
|
|
@@ -58,12 +61,19 @@ class _BaseFormatter(logging.Formatter):
|
|
|
58
61
|
default=default,
|
|
59
62
|
).decode()
|
|
60
63
|
|
|
61
|
-
|
|
62
|
-
class JSONFormatter(_BaseFormatter):
|
|
63
|
-
def format(self, record) -> str:
|
|
64
|
+
def _record_with(self, record):
|
|
64
65
|
record_with = getattr(record, "with", {})
|
|
65
66
|
if record.exc_info:
|
|
66
67
|
record_with.update(exc_info=format_exception(*record.exc_info))
|
|
68
|
+
if "ctx" not in record_with:
|
|
69
|
+
if (ctx_id := context_id_var.get()) is not None:
|
|
70
|
+
record_with["ctx"] = ctx_id
|
|
71
|
+
return record_with
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class JSONFormatter(_BaseFormatter):
|
|
75
|
+
def format(self, record) -> str:
|
|
76
|
+
record_with = self._record_with(record)
|
|
67
77
|
record_fields = {
|
|
68
78
|
"datetime": self.formatTime(record, self.datefmt),
|
|
69
79
|
"level": record.levelname.lower(),
|
|
@@ -90,12 +100,6 @@ class HumanReadableFormatter(_BaseFormatter):
|
|
|
90
100
|
more = f": {record_with_encoded}" if record_with_encoded else ""
|
|
91
101
|
return more
|
|
92
102
|
|
|
93
|
-
def _record_with(self, record):
|
|
94
|
-
record_with = getattr(record, "with", {})
|
|
95
|
-
if record.exc_info:
|
|
96
|
-
record_with.update(exc_info=format_exception(*record.exc_info))
|
|
97
|
-
return record_with
|
|
98
|
-
|
|
99
103
|
|
|
100
104
|
class CustomFormatter(HumanReadableFormatter):
|
|
101
105
|
"""
|
|
@@ -354,7 +358,6 @@ class Logger:
|
|
|
354
358
|
self, level, message, *args, exc_info=None, **kw_args
|
|
355
359
|
):
|
|
356
360
|
kw_args.update(self._bound_variables)
|
|
357
|
-
|
|
358
361
|
if kw_args:
|
|
359
362
|
self._logger.log(
|
|
360
363
|
level, message, *args, exc_info=exc_info, extra={"with": kw_args}
|
|
@@ -474,6 +474,7 @@ class CustomNotificationPusher(_NotificationPusherBase):
|
|
|
474
474
|
for notification_type, notification in notifications.items()
|
|
475
475
|
if notification.is_async
|
|
476
476
|
}
|
|
477
|
+
self._server_notifications = []
|
|
477
478
|
|
|
478
479
|
@property
|
|
479
480
|
def notifications(self):
|
|
@@ -481,6 +482,10 @@ class CustomNotificationPusher(_NotificationPusherBase):
|
|
|
481
482
|
notifications.update(self._async_notifications)
|
|
482
483
|
return notifications
|
|
483
484
|
|
|
485
|
+
@property
|
|
486
|
+
def server_notifications(self):
|
|
487
|
+
return self._server_notifications
|
|
488
|
+
|
|
484
489
|
def push(
|
|
485
490
|
self,
|
|
486
491
|
message: str,
|
|
@@ -511,6 +516,14 @@ class CustomNotificationPusher(_NotificationPusherBase):
|
|
|
511
516
|
self,
|
|
512
517
|
notification_type: str,
|
|
513
518
|
params: typing.Optional[dict[str, str]] = None,
|
|
519
|
+
name: typing.Optional[str] = None,
|
|
520
|
+
message: typing.Optional[str] = None,
|
|
521
|
+
severity: mlrun.common.schemas.notification.NotificationSeverity = (
|
|
522
|
+
mlrun.common.schemas.notification.NotificationSeverity.INFO
|
|
523
|
+
),
|
|
524
|
+
when: typing.Optional[list[str]] = None,
|
|
525
|
+
condition: typing.Optional[str] = None,
|
|
526
|
+
secret_params: typing.Optional[dict[str, str]] = None,
|
|
514
527
|
):
|
|
515
528
|
if notification_type not in [
|
|
516
529
|
notification_module.NotificationTypes.console,
|
|
@@ -518,6 +531,17 @@ class CustomNotificationPusher(_NotificationPusherBase):
|
|
|
518
531
|
]:
|
|
519
532
|
# We want that only the console and ipython notifications will be notified by the client.
|
|
520
533
|
# The rest of the notifications will be notified by the BE.
|
|
534
|
+
self._server_notifications.append(
|
|
535
|
+
mlrun.model.Notification(
|
|
536
|
+
kind=notification_type,
|
|
537
|
+
name=name,
|
|
538
|
+
message=message,
|
|
539
|
+
severity=severity,
|
|
540
|
+
when=when or runtimes_constants.RunStates.notification_states(),
|
|
541
|
+
params=params,
|
|
542
|
+
secret_params=secret_params,
|
|
543
|
+
)
|
|
544
|
+
)
|
|
521
545
|
return
|
|
522
546
|
|
|
523
547
|
if notification_type in self._async_notifications:
|
mlrun/utils/regex.py
CHANGED
mlrun/utils/version/version.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mlrun
|
|
3
|
-
Version: 1.8.
|
|
3
|
+
Version: 1.8.0rc32
|
|
4
4
|
Summary: Tracking and config of machine learning runs
|
|
5
5
|
Home-page: https://github.com/mlrun/mlrun
|
|
6
6
|
Author: Yaron Haviv
|
|
@@ -51,7 +51,7 @@ Requires-Dist: setuptools>=75.2
|
|
|
51
51
|
Requires-Dist: deprecated~=1.2
|
|
52
52
|
Requires-Dist: jinja2>=3.1.3,~=3.1
|
|
53
53
|
Requires-Dist: orjson<4,>=3.9.15
|
|
54
|
-
Requires-Dist: mlrun-pipelines-kfp-common~=0.3.
|
|
54
|
+
Requires-Dist: mlrun-pipelines-kfp-common~=0.3.9
|
|
55
55
|
Requires-Dist: mlrun-pipelines-kfp-v1-8~=0.3.5; python_version < "3.11"
|
|
56
56
|
Requires-Dist: docstring_parser~=0.16
|
|
57
57
|
Requires-Dist: aiosmtplib~=3.0
|