mlrun 1.8.0rc38__py3-none-any.whl → 1.8.0rc39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

mlrun/__main__.py CHANGED
@@ -17,7 +17,6 @@ import json
17
17
  import pathlib
18
18
  import socket
19
19
  import traceback
20
- import warnings
21
20
  from ast import literal_eval
22
21
  from base64 import b64decode
23
22
  from os import environ, path, remove
@@ -864,14 +863,8 @@ def version():
864
863
  )
865
864
  @click.option("--offset", type=int, default=0, help="byte offset")
866
865
  @click.option("--db", help="api and db service path/url")
867
- @click.option("--watch", "-w", is_flag=True, help="Deprecated. not in use")
868
- def logs(uid, project, offset, db, watch):
866
+ def logs(uid, project, offset, db):
869
867
  """Get or watch task logs"""
870
- if watch:
871
- warnings.warn(
872
- "'--watch' is deprecated in 1.6.0, and will be removed in 1.8.0, "
873
- # TODO: Remove in 1.8.0
874
- )
875
868
  mldb = get_run_db(db or mlconf.dbpath)
876
869
  if mldb.kind == "http":
877
870
  state, _ = mldb.watch_log(uid, project, watch=False, offset=offset)
mlrun/artifacts/base.py CHANGED
@@ -893,7 +893,7 @@ def generate_target_path(item: Artifact, artifact_path, producer):
893
893
  return f"{artifact_path}{item.key}{suffix}"
894
894
 
895
895
 
896
- # TODO: left to support data migration from legacy artifacts to new artifacts. Remove in 1.8.0.
896
+ # TODO: Remove once data migration v5 is obsolete
897
897
  def convert_legacy_artifact_to_new_format(
898
898
  legacy_artifact: dict,
899
899
  ) -> Artifact:
@@ -905,9 +905,9 @@ def convert_legacy_artifact_to_new_format(
905
905
  artifact_tag = legacy_artifact.get("tag", "")
906
906
  if artifact_tag:
907
907
  artifact_key = f"{artifact_key}:{artifact_tag}"
908
- # TODO: remove in 1.8.0
908
+ # TODO: Remove once data migration v5 is obsolete
909
909
  warnings.warn(
910
- f"Converting legacy artifact '{artifact_key}' to new format. This will not be supported in MLRun 1.8.0. "
910
+ f"Converting legacy artifact '{artifact_key}' to new format. This will not be supported in MLRun 1.9.0. "
911
911
  f"Make sure to save the artifact/project in the new format.",
912
912
  FutureWarning,
913
913
  )
@@ -108,7 +108,7 @@ class ArtifactProducer:
108
108
  def dict_to_artifact(struct: dict) -> Artifact:
109
109
  kind = struct.get("kind", "")
110
110
 
111
- # TODO: remove this in 1.8.0
111
+ # TODO: Remove once data migration v5 is obsolete
112
112
  if mlrun.utils.is_legacy_artifact(struct):
113
113
  return mlrun.artifacts.base.convert_legacy_artifact_to_new_format(struct)
114
114
 
@@ -66,3 +66,4 @@ class ClientSpec(pydantic.v1.BaseModel):
66
66
  external_platform_tracking: typing.Optional[dict]
67
67
  alerts_mode: typing.Optional[str]
68
68
  system_id: typing.Optional[str]
69
+ model_endpoint_monitoring_store_prefixes: typing.Optional[dict[str, str]]
@@ -42,12 +42,10 @@ class ModelEndpointSchema(MonitoringStrEnum):
42
42
  # spec
43
43
  FUNCTION_NAME = "function_name"
44
44
  FUNCTION_TAG = "function_tag"
45
- FUNCTION_UID = "function_uid"
46
45
  MODEL_NAME = "model_name"
47
- MODEL_DB_KEY = "model_db_key"
48
- MODEL_TAG = "model_tag"
46
+ MODEL_TAGS = "model_tags"
47
+ MODEL_PATH = "model_path"
49
48
  MODEL_CLASS = "model_class"
50
- MODEL_UID = "model_uid"
51
49
  FEATURE_NAMES = "feature_names"
52
50
  LABEL_NAMES = "label_names"
53
51
  FEATURE_STATS = "feature_stats"
@@ -117,14 +117,13 @@ class ModelEndpointMetadata(ObjectMetadata, ModelEndpointParser):
117
117
 
118
118
 
119
119
  class ModelEndpointSpec(ObjectSpec, ModelEndpointParser):
120
- model_uid: Optional[str] = ""
121
- model_name: Optional[str] = ""
122
- model_db_key: Optional[str] = ""
123
- model_tag: Optional[str] = ""
124
120
  model_class: Optional[str] = ""
125
121
  function_name: Optional[str] = ""
126
122
  function_tag: Optional[str] = ""
127
- function_uid: Optional[str] = ""
123
+ model_path: Optional[str] = ""
124
+ model_name: Optional[str] = ""
125
+ model_tags: Optional[list[str]] = []
126
+ _model_id: Optional[int] = ""
128
127
  feature_names: Optional[list[str]] = []
129
128
  label_names: Optional[list[str]] = []
130
129
  feature_stats: Optional[dict] = {}
@@ -137,12 +136,8 @@ class ModelEndpointSpec(ObjectSpec, ModelEndpointParser):
137
136
  @classmethod
138
137
  def mutable_fields(cls):
139
138
  return [
140
- "model_uid",
141
- "model_name",
142
- "model_db_key",
143
- "model_tag",
139
+ "model_path",
144
140
  "model_class",
145
- "function_uid",
146
141
  "feature_names",
147
142
  "label_names",
148
143
  "children",
@@ -206,7 +201,6 @@ class ModelEndpoint(BaseModel):
206
201
  ModelEndpointSchema.CURRENT_STATS,
207
202
  ModelEndpointSchema.DRIFT_MEASURES,
208
203
  ModelEndpointSchema.FUNCTION_URI,
209
- ModelEndpointSchema.MODEL_URI,
210
204
  }
211
205
  # Initialize a flattened dictionary that will be filled with the model endpoint dictionary attributes
212
206
  flatten_dict = {}
mlrun/datastore/base.py CHANGED
@@ -24,7 +24,6 @@ import pandas as pd
24
24
  import pyarrow
25
25
  import pytz
26
26
  import requests
27
- from deprecated import deprecated
28
27
 
29
28
  import mlrun.config
30
29
  import mlrun.errors
@@ -95,16 +94,6 @@ class DataStore:
95
94
  def uri_to_ipython(endpoint, subpath):
96
95
  return ""
97
96
 
98
- # TODO: remove in 1.8.0
99
- @deprecated(
100
- version="1.8.0",
101
- reason="'get_filesystem()' will be removed in 1.8.0, use "
102
- "'filesystem' property instead",
103
- category=FutureWarning,
104
- )
105
- def get_filesystem(self):
106
- return self.filesystem
107
-
108
97
  @property
109
98
  def filesystem(self) -> Optional[fsspec.AbstractFileSystem]:
110
99
  """return fsspec file system object, if supported"""
mlrun/db/httpdb.py CHANGED
@@ -566,6 +566,17 @@ class HTTPRunDB(RunDBInterface):
566
566
  )
567
567
  config.alerts.mode = server_cfg.get("alerts_mode") or config.alerts.mode
568
568
  config.system_id = server_cfg.get("system_id") or config.system_id
569
+ model_monitoring_store_prefixes = (
570
+ server_cfg.get("model_endpoint_monitoring_store_prefixes") or {}
571
+ )
572
+ for prefix in ["default", "user_space", "monitoring_application"]:
573
+ store_prefix_value = model_monitoring_store_prefixes.get(prefix)
574
+ if server_prefix_value is not None:
575
+ setattr(
576
+ config.model_endpoint_monitoring.store_prefixes,
577
+ prefix,
578
+ store_prefix_value,
579
+ )
569
580
 
570
581
  except Exception as exc:
571
582
  logger.warning(
@@ -118,8 +118,6 @@ def get_or_create_model_endpoint(
118
118
  model_endpoint_name=model_endpoint_name,
119
119
  function_name=function_name,
120
120
  function_tag=function_tag,
121
- context=context,
122
- sample_set_statistics=sample_set_statistics,
123
121
  monitoring_mode=monitoring_mode,
124
122
  )
125
123
  return model_endpoint
@@ -344,8 +342,6 @@ def _generate_model_endpoint(
344
342
  model_endpoint_name: str,
345
343
  function_name: str,
346
344
  function_tag: str,
347
- context: "mlrun.MLClientCtx",
348
- sample_set_statistics: dict[str, typing.Any],
349
345
  monitoring_mode: mm_constants.ModelMonitoringMode = mm_constants.ModelMonitoringMode.enabled,
350
346
  ) -> ModelEndpoint:
351
347
  """
@@ -358,21 +354,10 @@ def _generate_model_endpoint(
358
354
  :param model_endpoint_name: Model endpoint name will be presented under the new model endpoint.
359
355
  :param function_name: If a new model endpoint is created, use this function name.
360
356
  :param function_tag: If a new model endpoint is created, use this function tag.
361
- :param context: MLRun context. If function_name not provided, use the context to generate the
362
- full function hash.
363
- :param sample_set_statistics: Dictionary of sample set statistics that will be used as a reference data for
364
- the current model endpoint. Will be stored under
365
- `model_endpoint.status.feature_stats`.
357
+ :param monitoring_mode: Monitoring mode of the new model endpoint.
366
358
 
367
359
  :return `mlrun.common.schemas.ModelEndpoint` object.
368
360
  """
369
- model_obj = None
370
- if model_path:
371
- model_obj: mlrun.artifacts.ModelArtifact = (
372
- mlrun.datastore.store_resources.get_store_resource(
373
- model_path, db=db_session
374
- )
375
- )
376
361
  current_time = datetime_now()
377
362
  model_endpoint = mlrun.common.schemas.ModelEndpoint(
378
363
  metadata=mlrun.common.schemas.ModelEndpointMetadata(
@@ -383,10 +368,7 @@ def _generate_model_endpoint(
383
368
  spec=mlrun.common.schemas.ModelEndpointSpec(
384
369
  function_name=function_name or "function",
385
370
  function_tag=function_tag or "latest",
386
- model_name=model_obj.metadata.key if model_obj else None,
387
- model_uid=model_obj.metadata.uid if model_obj else None,
388
- model_tag=model_obj.metadata.tag if model_obj else None,
389
- model_db_key=model_obj.spec.db_key if model_obj else None,
371
+ model_path=model_path,
390
372
  model_class="drift-analysis",
391
373
  ),
392
374
  status=mlrun.common.schemas.ModelEndpointStatus(
@@ -16,6 +16,7 @@ import concurrent.futures
16
16
  import datetime
17
17
  import json
18
18
  import os
19
+ import traceback
19
20
  from collections.abc import Iterator
20
21
  from contextlib import AbstractContextManager
21
22
  from types import TracebackType
@@ -551,14 +552,29 @@ class MonitoringApplicationController:
551
552
  with concurrent.futures.ThreadPoolExecutor(
552
553
  max_workers=min(len(endpoints), 10)
553
554
  ) as pool:
554
- for endpoint in endpoints:
555
+ futures = {
555
556
  pool.submit(
556
557
  MonitoringApplicationController.endpoint_to_regular_event,
557
558
  endpoint,
558
559
  policy,
559
560
  set(applications_names),
560
561
  self.v3io_access_key,
561
- )
562
+ ): endpoint
563
+ for endpoint in endpoints
564
+ }
565
+ for future in concurrent.futures.as_completed(futures):
566
+ if future.exception():
567
+ exception = future.exception()
568
+ error = (
569
+ f"Failed to push event. Endpoint name: {futures[future].metadata.name}, "
570
+ f"endpoint uid: {futures[future].metadata.uid}, traceback:\n"
571
+ )
572
+ error += "".join(
573
+ traceback.format_exception(
574
+ None, exception, exception.__traceback__
575
+ )
576
+ )
577
+ logger.error(error)
562
578
  logger.info("Finishing monitoring controller chief")
563
579
 
564
580
  @staticmethod
@@ -75,13 +75,8 @@ class TDEngineConnector(TSDBConnector):
75
75
  """Establish a connection to the TSDB server."""
76
76
  logger.debug("Creating a new connection to TDEngine", project=self.project)
77
77
  conn = TDEngineConnection(self._tdengine_connection_profile.dsn())
78
- conn.run(
79
- statements=f"CREATE DATABASE IF NOT EXISTS {self.database}",
80
- timeout=self._timeout,
81
- retries=self._retries,
82
- )
83
78
  conn.prefix_statements = [f"USE {self.database}"]
84
- logger.debug("Connected to TDEngine", project=self.project)
79
+
85
80
  return conn
86
81
 
87
82
  def _init_super_tables(self):
@@ -101,8 +96,27 @@ class TDEngineConnector(TSDBConnector):
101
96
  ),
102
97
  }
103
98
 
99
+ def _create_db_if_not_exists(self):
100
+ """Create the database if it does not exist."""
101
+ self.connection.prefix_statements = []
102
+ self.connection.run(
103
+ statements=f"CREATE DATABASE IF NOT EXISTS {self.database}",
104
+ timeout=self._timeout,
105
+ retries=self._retries,
106
+ )
107
+ self.connection.prefix_statements = [f"USE {self.database}"]
108
+ logger.debug(
109
+ "The TDEngine database is currently in use",
110
+ project=self.project,
111
+ database=self.database,
112
+ )
113
+
104
114
  def create_tables(self):
105
115
  """Create TDEngine supertables."""
116
+
117
+ # Create the database if it does not exist
118
+ self._create_db_if_not_exists()
119
+
106
120
  for table in self.tables:
107
121
  create_table_query = self.tables[table]._create_super_table_query()
108
122
  conn = self.connection
@@ -344,6 +358,7 @@ class TDEngineConnector(TSDBConnector):
344
358
  project=self.project,
345
359
  database=self.database,
346
360
  )
361
+
347
362
  except Exception as e:
348
363
  logger.warning(
349
364
  "Failed to drop the database. You may need to drop it manually if it is empty.",
@@ -294,9 +294,9 @@ def build_function(
294
294
  :param force_build: Force building the image, even when no changes were made
295
295
  """
296
296
  if not overwrite_build_params:
297
- # TODO: change overwrite_build_params default to True in 1.8.0
297
+ # TODO: change overwrite_build_params default to True in 1.9.0
298
298
  warnings.warn(
299
- "The `overwrite_build_params` parameter default will change from 'False' to 'True' in 1.8.0.",
299
+ "The `overwrite_build_params` parameter default will change from 'False' to 'True' in 1.9.0.",
300
300
  mlrun.utils.OverwriteBuildParamsWarning,
301
301
  )
302
302
 
@@ -325,7 +325,7 @@ def build_function(
325
325
  skip_deployed=skip_deployed,
326
326
  )
327
327
  else:
328
- # TODO: remove filter once overwrite_build_params default is changed to True in 1.8.0
328
+ # TODO: remove filter once overwrite_build_params default is changed to True in 1.9.0
329
329
  with warnings.catch_warnings():
330
330
  warnings.simplefilter(
331
331
  "ignore", category=mlrun.utils.OverwriteBuildParamsWarning
@@ -1139,6 +1139,11 @@ def load_and_run_workflow(
1139
1139
  if "running" in notification.when
1140
1140
  ]
1141
1141
 
1142
+ # Prevent redundant notifications for run completion by ensuring that notifications are only triggered when the run
1143
+ # reaches the "running" state, as the server already handles the completion notifications.
1144
+ for notification in start_notifications:
1145
+ notification.when = ["running"]
1146
+
1142
1147
  workflow_log_message = workflow_name or workflow_path
1143
1148
  context.logger.info(f"Running workflow {workflow_log_message} from remote")
1144
1149
  run = project.run(
mlrun/projects/project.py CHANGED
@@ -4059,9 +4059,9 @@ class MlrunProject(ModelObj):
4059
4059
  (by default `/home/mlrun_code`)
4060
4060
  """
4061
4061
  if not overwrite_build_params:
4062
- # TODO: change overwrite_build_params default to True in 1.8.0
4062
+ # TODO: change overwrite_build_params default to True in 1.9.0
4063
4063
  warnings.warn(
4064
- "The `overwrite_build_params` parameter default will change from 'False' to 'True' in 1.8.0.",
4064
+ "The `overwrite_build_params` parameter default will change from 'False' to 'True' in 1.9.0.",
4065
4065
  mlrun.utils.OverwriteBuildParamsWarning,
4066
4066
  )
4067
4067
  default_image_name = mlrun.mlconf.default_project_image_name.format(
@@ -4136,9 +4136,9 @@ class MlrunProject(ModelObj):
4136
4136
  )
4137
4137
 
4138
4138
  if not overwrite_build_params:
4139
- # TODO: change overwrite_build_params default to True in 1.8.0
4139
+ # TODO: change overwrite_build_params default to True in 1.9.0
4140
4140
  warnings.warn(
4141
- "The `overwrite_build_params` parameter default will change from 'False' to 'True' in 1.8.0.",
4141
+ "The `overwrite_build_params` parameter default will change from 'False' to 'True' in 1.9.0.",
4142
4142
  mlrun.utils.OverwriteBuildParamsWarning,
4143
4143
  )
4144
4144
 
mlrun/run.py CHANGED
@@ -36,9 +36,9 @@ import mlrun.common.formatters
36
36
  import mlrun.common.schemas
37
37
  import mlrun.errors
38
38
  import mlrun.utils.helpers
39
+ import mlrun_pipelines.utils
39
40
  from mlrun_pipelines.common.models import RunStatuses
40
41
  from mlrun_pipelines.common.ops import format_summary_from_kfp_run, show_kfp_run
41
- from mlrun_pipelines.utils import get_client
42
42
 
43
43
  from .common.helpers import parse_versioned_object_uri
44
44
  from .config import config as mlconf
@@ -437,7 +437,7 @@ def new_function(
437
437
  mode: Optional[str] = None,
438
438
  handler: Optional[str] = None,
439
439
  source: Optional[str] = None,
440
- requirements: Optional[Union[str, list[str]]] = None,
440
+ requirements: Optional[list[str]] = None,
441
441
  kfp: Optional[bool] = None,
442
442
  requirements_file: str = "",
443
443
  ):
@@ -1015,7 +1015,7 @@ def wait_for_pipeline_completion(
1015
1015
  _wait_for_pipeline_completion,
1016
1016
  )
1017
1017
  else:
1018
- client = get_client(namespace=namespace)
1018
+ client = mlrun_pipelines.utils.get_client(namespace=namespace)
1019
1019
  resp = client.wait_for_run_completion(run_id, timeout)
1020
1020
  if resp:
1021
1021
  resp = resp.to_dict()
@@ -1076,7 +1076,7 @@ def get_pipeline(
1076
1076
  )
1077
1077
 
1078
1078
  else:
1079
- client = get_client(namespace=namespace)
1079
+ client = mlrun_pipelines.utils.get_client(namespace=namespace)
1080
1080
  resp = client.get_run(run_id)
1081
1081
  if resp:
1082
1082
  resp = resp.to_dict()
mlrun/runtimes/kubejob.py CHANGED
@@ -114,9 +114,9 @@ class KubejobRuntime(KubeResource):
114
114
  e.g. builder_env={"GIT_TOKEN": token}
115
115
  """
116
116
  if not overwrite:
117
- # TODO: change overwrite default to True in 1.8.0
117
+ # TODO: change overwrite default to True in 1.9.0
118
118
  warnings.warn(
119
- "The `overwrite` parameter default will change from 'False' to 'True' in 1.8.0.",
119
+ "The `overwrite` parameter default will change from 'False' to 'True' in 1.9.0.",
120
120
  mlrun.utils.OverwriteBuildParamsWarning,
121
121
  )
122
122
  image = mlrun.utils.helpers.remove_image_protocol_prefix(image)
@@ -542,7 +542,6 @@ class ApplicationRuntime(RemoteRuntime):
542
542
  body: typing.Optional[typing.Union[str, bytes, dict]] = None,
543
543
  method: typing.Optional[str] = None,
544
544
  headers: typing.Optional[dict] = None,
545
- dashboard: str = "",
546
545
  force_external_address: bool = False,
547
546
  auth_info: schemas.AuthInfo = None,
548
547
  mock: typing.Optional[bool] = None,
@@ -569,7 +568,6 @@ class ApplicationRuntime(RemoteRuntime):
569
568
  body,
570
569
  method,
571
570
  headers,
572
- dashboard,
573
571
  force_external_address,
574
572
  auth_info,
575
573
  mock,
@@ -767,45 +767,10 @@ class RemoteRuntime(KubeResource):
767
767
 
768
768
  def _get_state(
769
769
  self,
770
- dashboard="",
771
770
  last_log_timestamp=0,
772
771
  verbose=False,
773
772
  raise_on_exception=True,
774
- resolve_address=True,
775
- auth_info: AuthInfo = None,
776
773
  ) -> tuple[str, str, typing.Optional[float]]:
777
- if dashboard:
778
- (
779
- state,
780
- address,
781
- name,
782
- last_log_timestamp,
783
- text,
784
- function_status,
785
- ) = get_nuclio_deploy_status(
786
- self.metadata.name,
787
- self.metadata.project,
788
- self.metadata.tag,
789
- dashboard,
790
- last_log_timestamp=last_log_timestamp,
791
- verbose=verbose,
792
- resolve_address=resolve_address,
793
- auth_info=auth_info,
794
- )
795
- self.status.internal_invocation_urls = function_status.get(
796
- "internalInvocationUrls", []
797
- )
798
- self.status.external_invocation_urls = function_status.get(
799
- "externalInvocationUrls", []
800
- )
801
- self.status.state = state
802
- self.status.nuclio_name = name
803
- self.status.container_image = function_status.get("containerImage", "")
804
- if address:
805
- self.status.address = address
806
- self.spec.command = f"http://{address}"
807
- return state, text, last_log_timestamp
808
-
809
774
  try:
810
775
  text, last_log_timestamp = self._get_db().get_nuclio_deploy_status(
811
776
  self, last_log_timestamp=last_log_timestamp, verbose=verbose
@@ -916,7 +881,6 @@ class RemoteRuntime(KubeResource):
916
881
  body: typing.Optional[typing.Union[str, bytes, dict]] = None,
917
882
  method: typing.Optional[str] = None,
918
883
  headers: typing.Optional[dict] = None,
919
- dashboard: str = "",
920
884
  force_external_address: bool = False,
921
885
  auth_info: AuthInfo = None,
922
886
  mock: typing.Optional[bool] = None,
@@ -932,7 +896,6 @@ class RemoteRuntime(KubeResource):
932
896
  :param body: request body (str, bytes or a dict for json requests)
933
897
  :param method: HTTP method (GET, PUT, ..)
934
898
  :param headers: key/value dict with http headers
935
- :param dashboard: nuclio dashboard address (deprecated)
936
899
  :param force_external_address: use the external ingress URL
937
900
  :param auth_info: service AuthInfo
938
901
  :param mock: use mock server vs a real Nuclio function (for local simulations)
@@ -940,14 +903,6 @@ class RemoteRuntime(KubeResource):
940
903
  see this link for more information:
941
904
  https://requests.readthedocs.io/en/latest/api/#requests.request
942
905
  """
943
- if dashboard:
944
- # TODO: remove in 1.8.0
945
- warnings.warn(
946
- "'dashboard' parameter is no longer supported on client side, "
947
- "it is being configured through the MLRun API. It will be removed in 1.8.0.",
948
- FutureWarning,
949
- )
950
-
951
906
  if not method:
952
907
  method = "POST" if body else "GET"
953
908
 
@@ -977,7 +932,7 @@ class RemoteRuntime(KubeResource):
977
932
  "so function can not be invoked via http. Either enable default http trigger creation or "
978
933
  "create custom http trigger"
979
934
  )
980
- state, _, _ = self._get_state(dashboard, auth_info=auth_info)
935
+ state, _, _ = self._get_state()
981
936
  if state not in ["ready", "scaledToZero"]:
982
937
  logger.warning(f"Function is in the {state} state")
983
938
  if not self.status.address: