mlrun 1.7.0rc43__py3-none-any.whl → 1.7.0rc44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (39) hide show
  1. mlrun/__main__.py +4 -2
  2. mlrun/artifacts/base.py +1 -1
  3. mlrun/artifacts/manager.py +9 -3
  4. mlrun/common/schemas/alert.py +11 -11
  5. mlrun/common/schemas/client_spec.py +0 -1
  6. mlrun/common/schemas/workflow.py +1 -0
  7. mlrun/config.py +27 -20
  8. mlrun/data_types/data_types.py +5 -0
  9. mlrun/datastore/base.py +4 -4
  10. mlrun/datastore/storeytargets.py +2 -2
  11. mlrun/db/httpdb.py +2 -12
  12. mlrun/db/nopdb.py +21 -4
  13. mlrun/execution.py +3 -1
  14. mlrun/feature_store/api.py +1 -0
  15. mlrun/feature_store/retrieval/spark_merger.py +7 -3
  16. mlrun/frameworks/_common/plan.py +3 -3
  17. mlrun/frameworks/_ml_common/plan.py +1 -1
  18. mlrun/frameworks/parallel_coordinates.py +2 -3
  19. mlrun/launcher/client.py +6 -6
  20. mlrun/model_monitoring/controller.py +1 -1
  21. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +15 -1
  22. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +12 -0
  23. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +2 -2
  24. mlrun/model_monitoring/helpers.py +7 -8
  25. mlrun/model_monitoring/writer.py +3 -1
  26. mlrun/projects/pipelines.py +2 -0
  27. mlrun/projects/project.py +21 -10
  28. mlrun/render.py +3 -3
  29. mlrun/runtimes/kubejob.py +6 -6
  30. mlrun/runtimes/nuclio/api_gateway.py +6 -0
  31. mlrun/runtimes/pod.py +14 -8
  32. mlrun/utils/helpers.py +39 -22
  33. mlrun/utils/version/version.json +2 -2
  34. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc44.dist-info}/METADATA +18 -18
  35. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc44.dist-info}/RECORD +39 -39
  36. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc44.dist-info}/WHEEL +1 -1
  37. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc44.dist-info}/LICENSE +0 -0
  38. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc44.dist-info}/entry_points.txt +0 -0
  39. {mlrun-1.7.0rc43.dist-info → mlrun-1.7.0rc44.dist-info}/top_level.txt +0 -0
mlrun/__main__.py CHANGED
@@ -734,9 +734,11 @@ def get(kind, name, selector, namespace, uid, project, tag, db, extra_args):
734
734
  if db:
735
735
  mlconf.dbpath = db
736
736
  if not project:
737
- print("Warning, project parameter was not specified using default !")
737
+ logger.warning(
738
+ "Project parameter was not specified. Defaulting to 'default' project"
739
+ )
738
740
  if kind.startswith("po"):
739
- print("Unsupported, use 'get runtimes' instead")
741
+ logger.warning("Unsupported, use 'get runtimes' instead")
740
742
  return
741
743
 
742
744
  elif kind.startswith("runtime"):
mlrun/artifacts/base.py CHANGED
@@ -159,7 +159,7 @@ class ArtifactSpec(ModelObj):
159
159
  self._is_inline = True
160
160
 
161
161
  def get_body(self):
162
- """get the artifact body when inline"""
162
+ """Get the artifact body"""
163
163
  return self._body
164
164
 
165
165
 
@@ -23,7 +23,7 @@ import mlrun.utils.regex
23
23
  from mlrun.utils.helpers import (
24
24
  get_local_file_schema,
25
25
  template_artifact_path,
26
- validate_inline_artifact_body_size,
26
+ validate_artifact_body_size,
27
27
  )
28
28
 
29
29
  from ..utils import (
@@ -200,7 +200,9 @@ class ArtifactManager:
200
200
  :param artifact_path: The path to store the artifact.
201
201
  If not provided, the artifact will be stored in the default artifact path.
202
202
  :param format: The format of the artifact. (e.g. csv, json, html, etc.)
203
- :param upload: Whether to upload the artifact or not.
203
+ :param upload: Whether to upload the artifact to the datastore. If not provided, and the
204
+ `local_path` is not a directory, upload occurs by default. Directories are uploaded only when this
205
+ flag is explicitly set to `True`.
204
206
  :param labels: Labels to add to the artifact.
205
207
  :param db_key: The key to use when logging the artifact to the DB.
206
208
  If not provided, will generate a key based on the producer name and the artifact key.
@@ -221,7 +223,11 @@ class ArtifactManager:
221
223
  target_path = target_path or item.target_path
222
224
 
223
225
  validate_artifact_key_name(key, "artifact.key")
224
- validate_inline_artifact_body_size(item.spec.inline)
226
+
227
+ # TODO: Create a tmp file, write the body to it, and use it as `local_path` instead of validating the body size.
228
+ validate_artifact_body_size(
229
+ body=item.spec.get_body(), is_inline=item.is_inline()
230
+ )
225
231
  src_path = local_path or item.src_path # TODO: remove src_path
226
232
  self.ensure_artifact_source_file_exists(item=item, path=src_path, body=body)
227
233
  if format == "html" or (src_path and pathlib.Path(src_path).suffix == "html"):
@@ -34,17 +34,17 @@ class EventEntities(pydantic.BaseModel):
34
34
 
35
35
 
36
36
  class EventKind(StrEnum):
37
- DATA_DRIFT_DETECTED = "data_drift_detected"
38
- DATA_DRIFT_SUSPECTED = "data_drift_suspected"
39
- CONCEPT_DRIFT_DETECTED = "concept_drift_detected"
40
- CONCEPT_DRIFT_SUSPECTED = "concept_drift_suspected"
41
- MODEL_PERFORMANCE_DETECTED = "model_performance_detected"
42
- MODEL_PERFORMANCE_SUSPECTED = "model_performance_suspected"
43
- SYSTEM_PERFORMANCE_DETECTED = "system_performance_detected"
44
- SYSTEM_PERFORMANCE_SUSPECTED = "system_performance_suspected"
45
- MM_APP_ANOMALY_DETECTED = "mm_app_anomaly_detected"
46
- MM_APP_ANOMALY_SUSPECTED = "mm_app_anomaly_suspected"
47
- MM_APP_FAILED = "mm_app_failed"
37
+ DATA_DRIFT_DETECTED = "data-drift-detected"
38
+ DATA_DRIFT_SUSPECTED = "data-drift-suspected"
39
+ CONCEPT_DRIFT_DETECTED = "concept-drift-detected"
40
+ CONCEPT_DRIFT_SUSPECTED = "concept-drift-suspected"
41
+ MODEL_PERFORMANCE_DETECTED = "model-performance-detected"
42
+ MODEL_PERFORMANCE_SUSPECTED = "model-performance-suspected"
43
+ SYSTEM_PERFORMANCE_DETECTED = "system-performance-detected"
44
+ SYSTEM_PERFORMANCE_SUSPECTED = "system-performance-suspected"
45
+ MM_APP_ANOMALY_DETECTED = "mm-app-anomaly-detected"
46
+ MM_APP_ANOMALY_SUSPECTED = "mm-app-anomaly-suspected"
47
+ MM_APP_FAILED = "mm-app-failed"
48
48
  FAILED = "failed"
49
49
 
50
50
 
@@ -57,7 +57,6 @@ class ClientSpec(pydantic.BaseModel):
57
57
  redis_url: typing.Optional[str]
58
58
  redis_type: typing.Optional[str]
59
59
  sql_url: typing.Optional[str]
60
- model_endpoint_monitoring_store_type: typing.Optional[str]
61
60
  model_endpoint_monitoring_endpoint_store_connection: typing.Optional[str]
62
61
  model_monitoring_tsdb_connection: typing.Optional[str]
63
62
  ce: typing.Optional[dict]
@@ -32,6 +32,7 @@ class WorkflowSpec(pydantic.BaseModel):
32
32
  schedule: typing.Union[str, ScheduleCronTrigger] = None
33
33
  run_local: typing.Optional[bool] = None
34
34
  image: typing.Optional[str] = None
35
+ workflow_runner_node_selector: typing.Optional[dict[str, str]] = None
35
36
 
36
37
 
37
38
  class WorkflowRequest(pydantic.BaseModel):
mlrun/config.py CHANGED
@@ -539,7 +539,6 @@ default_config = {
539
539
  "store_prefixes": {
540
540
  "default": "v3io:///users/pipelines/{project}/model-endpoints/{kind}",
541
541
  "user_space": "v3io:///projects/{project}/model-endpoints/{kind}",
542
- "stream": "", # TODO: Delete in 1.9.0
543
542
  "monitoring_application": "v3io:///users/pipelines/{project}/monitoring-apps/",
544
543
  },
545
544
  # Offline storage path can be either relative or a full path. This path is used for general offline data
@@ -552,7 +551,6 @@ default_config = {
552
551
  "parquet_batching_max_events": 10_000,
553
552
  "parquet_batching_timeout_secs": timedelta(minutes=1).total_seconds(),
554
553
  # See mlrun.model_monitoring.db.stores.ObjectStoreFactory for available options
555
- "store_type": "v3io-nosql", # TODO: Delete in 1.9.0
556
554
  "endpoint_store_connection": "",
557
555
  # See mlrun.model_monitoring.db.tsdb.ObjectTSDBFactory for available options
558
556
  "tsdb_connection": "",
@@ -798,7 +796,21 @@ class Config:
798
796
  for key, value in cfg.items():
799
797
  if hasattr(self, key):
800
798
  if isinstance(value, dict):
801
- getattr(self, key).update(value)
799
+ # ignore the `skip_errors` flag here
800
+ # if the key does not align with what mlrun config expects it is a user
801
+ # input error that can lead to unexpected behavior.
802
+ # raise the exception to ensure configuration is loaded correctly and do not
803
+ # ignore any errors.
804
+ config_value = getattr(self, key)
805
+ try:
806
+ config_value.update(value)
807
+ except AttributeError as exc:
808
+ if not isinstance(config_value, (dict, Config)):
809
+ raise ValueError(
810
+ f"Can not update `{key}` config. "
811
+ f"Expected a configuration but received {type(value)}"
812
+ ) from exc
813
+ raise exc
802
814
  else:
803
815
  try:
804
816
  setattr(self, key, value)
@@ -1102,6 +1114,9 @@ class Config:
1102
1114
  # importing here to avoid circular dependency
1103
1115
  import mlrun.db
1104
1116
 
1117
+ # It ensures that SSL verification is set before establishing a connection
1118
+ _configure_ssl_verification(self.httpdb.http.verify)
1119
+
1105
1120
  # when dbpath is set we want to connect to it which will sync configuration from it to the client
1106
1121
  mlrun.db.get_run_db(value, force_reconnect=True)
1107
1122
 
@@ -1130,10 +1145,10 @@ class Config:
1130
1145
  project: str = "",
1131
1146
  kind: str = "",
1132
1147
  target: str = "online",
1133
- artifact_path: str = None,
1134
- function_name: str = None,
1148
+ artifact_path: typing.Optional[str] = None,
1149
+ function_name: typing.Optional[str] = None,
1135
1150
  **kwargs,
1136
- ) -> typing.Union[str, list[str]]:
1151
+ ) -> str:
1137
1152
  """Get the full path from the configuration based on the provided project and kind.
1138
1153
 
1139
1154
  :param project: Project name.
@@ -1149,8 +1164,7 @@ class Config:
1149
1164
  relative artifact path will be taken from the global MLRun artifact path.
1150
1165
  :param function_name: Application name, None for model_monitoring_stream.
1151
1166
 
1152
- :return: Full configured path for the provided kind. Can be either a single path
1153
- or a list of paths in the case of the online model monitoring stream path.
1167
+ :return: Full configured path for the provided kind.
1154
1168
  """
1155
1169
 
1156
1170
  if target != "offline":
@@ -1171,18 +1185,11 @@ class Config:
1171
1185
  if function_name is None
1172
1186
  else f"{kind}-{function_name.lower()}",
1173
1187
  )
1174
- elif kind == "stream": # return list for mlrun<1.6.3 BC
1175
- return [
1176
- # TODO: remove the first stream in 1.9.0
1177
- mlrun.mlconf.model_endpoint_monitoring.store_prefixes.default.format(
1178
- project=project,
1179
- kind=kind,
1180
- ), # old stream uri (pipelines) for BC ML-6043
1181
- mlrun.mlconf.model_endpoint_monitoring.store_prefixes.user_space.format(
1182
- project=project,
1183
- kind=kind,
1184
- ), # new stream uri (projects)
1185
- ]
1188
+ elif kind == "stream":
1189
+ return mlrun.mlconf.model_endpoint_monitoring.store_prefixes.user_space.format(
1190
+ project=project,
1191
+ kind=kind,
1192
+ )
1186
1193
  else:
1187
1194
  return mlrun.mlconf.model_endpoint_monitoring.store_prefixes.default.format(
1188
1195
  project=project,
@@ -70,6 +70,11 @@ def pa_type_to_value_type(type_):
70
70
  if isinstance(type_, TimestampType):
71
71
  return ValueType.DATETIME
72
72
 
73
+ # pandas category type translates to pyarrow DictionaryType
74
+ # we need to unpack the value type (ML-7868)
75
+ if isinstance(type_, pyarrow.DictionaryType):
76
+ type_ = type_.value_type
77
+
73
78
  type_map = {
74
79
  pyarrow.bool_(): ValueType.BOOL,
75
80
  pyarrow.int64(): ValueType.INT64,
mlrun/datastore/base.py CHANGED
@@ -29,7 +29,7 @@ from deprecated import deprecated
29
29
  import mlrun.config
30
30
  import mlrun.errors
31
31
  from mlrun.errors import err_to_str
32
- from mlrun.utils import StorePrefix, is_ipython, logger
32
+ from mlrun.utils import StorePrefix, is_jupyter, logger
33
33
 
34
34
  from .store_resources import is_store_uri, parse_store_uri
35
35
  from .utils import filter_df_start_end_time, select_columns_from_df
@@ -619,14 +619,14 @@ class DataItem:
619
619
  )
620
620
  return df
621
621
 
622
- def show(self, format=None):
622
+ def show(self, format: Optional[str] = None) -> None:
623
623
  """show the data object content in Jupyter
624
624
 
625
625
  :param format: format to use (when there is no/wrong suffix), e.g. 'png'
626
626
  """
627
- if not is_ipython:
627
+ if not is_jupyter:
628
628
  logger.warning(
629
- "Jupyter/IPython was not detected, .show() will only display inside Jupyter"
629
+ "Jupyter was not detected. `.show()` displays only inside Jupyter."
630
630
  )
631
631
  return
632
632
 
@@ -89,8 +89,8 @@ class StreamStoreyTarget(storey.StreamTarget):
89
89
  raise mlrun.errors.MLRunInvalidArgumentError("StreamTarget requires a path")
90
90
 
91
91
  access_key = storage_options.get("v3io_access_key")
92
- storage = (
93
- V3ioDriver(webapi=endpoint or mlrun.mlconf.v3io_api, access_key=access_key),
92
+ storage = V3ioDriver(
93
+ webapi=endpoint or mlrun.mlconf.v3io_api, access_key=access_key
94
94
  )
95
95
 
96
96
  if storage_options:
mlrun/db/httpdb.py CHANGED
@@ -525,10 +525,6 @@ class HTTPRunDB(RunDBInterface):
525
525
  server_cfg.get("external_platform_tracking")
526
526
  or config.external_platform_tracking
527
527
  )
528
- config.model_endpoint_monitoring.store_type = (
529
- server_cfg.get("model_endpoint_monitoring_store_type")
530
- or config.model_endpoint_monitoring.store_type
531
- )
532
528
  config.model_endpoint_monitoring.endpoint_store_connection = (
533
529
  server_cfg.get("model_endpoint_monitoring_endpoint_store_connection")
534
530
  or config.model_endpoint_monitoring.endpoint_store_connection
@@ -1374,20 +1370,14 @@ class HTTPRunDB(RunDBInterface):
1374
1370
  :returns: :py:class:`~mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput` listing the runtime resources
1375
1371
  that were removed.
1376
1372
  """
1377
- if grace_period is None:
1378
- grace_period = config.runtime_resources_deletion_grace_period
1379
- logger.info(
1380
- "Using default grace period for runtime resources deletion",
1381
- grace_period=grace_period,
1382
- )
1383
-
1384
1373
  params = {
1385
1374
  "label-selector": label_selector,
1386
1375
  "kind": kind,
1387
1376
  "object-id": object_id,
1388
1377
  "force": force,
1389
- "grace-period": grace_period,
1390
1378
  }
1379
+ if grace_period is not None:
1380
+ params["grace-period"] = grace_period
1391
1381
  error = "Failed deleting runtime resources"
1392
1382
  project_path = project if project else "*"
1393
1383
  response = self.api_call(
mlrun/db/nopdb.py CHANGED
@@ -21,6 +21,7 @@ import mlrun.common.formatters
21
21
  import mlrun.common.runtimes.constants
22
22
  import mlrun.common.schemas
23
23
  import mlrun.errors
24
+ import mlrun.lists
24
25
 
25
26
  from ..config import config
26
27
  from ..utils import logger
@@ -73,6 +74,22 @@ class NopDB(RunDBInterface):
73
74
  def abort_run(self, uid, project="", iter=0, timeout=45, status_text=""):
74
75
  pass
75
76
 
77
+ def list_runtime_resources(
78
+ self,
79
+ project: Optional[str] = None,
80
+ label_selector: Optional[str] = None,
81
+ kind: Optional[str] = None,
82
+ object_id: Optional[str] = None,
83
+ group_by: Optional[
84
+ mlrun.common.schemas.ListRuntimeResourcesGroupByField
85
+ ] = None,
86
+ ) -> Union[
87
+ mlrun.common.schemas.RuntimeResourcesOutput,
88
+ mlrun.common.schemas.GroupedByJobRuntimeResourcesOutput,
89
+ mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput,
90
+ ]:
91
+ return []
92
+
76
93
  def read_run(
77
94
  self,
78
95
  uid,
@@ -108,7 +125,7 @@ class NopDB(RunDBInterface):
108
125
  max_partitions: int = 0,
109
126
  with_notifications: bool = False,
110
127
  ):
111
- pass
128
+ return mlrun.lists.RunList()
112
129
 
113
130
  def del_run(self, uid, project="", iter=0):
114
131
  pass
@@ -149,7 +166,7 @@ class NopDB(RunDBInterface):
149
166
  format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
150
167
  limit: int = None,
151
168
  ):
152
- pass
169
+ return mlrun.lists.ArtifactList()
153
170
 
154
171
  def del_artifact(
155
172
  self,
@@ -181,7 +198,7 @@ class NopDB(RunDBInterface):
181
198
  def list_functions(
182
199
  self, name=None, project="", tag="", labels=None, since=None, until=None
183
200
  ):
184
- pass
201
+ return []
185
202
 
186
203
  def tag_objects(
187
204
  self,
@@ -421,7 +438,7 @@ class NopDB(RunDBInterface):
421
438
  ] = mlrun.common.formatters.PipelineFormat.metadata_only,
422
439
  page_size: int = None,
423
440
  ) -> mlrun.common.schemas.PipelinesOutput:
424
- pass
441
+ return mlrun.common.schemas.PipelinesOutput(runs=[], total_size=0)
425
442
 
426
443
  def create_project_secrets(
427
444
  self,
mlrun/execution.py CHANGED
@@ -634,7 +634,9 @@ class MLClientCtx:
634
634
  :param viewer: Kubeflow viewer type
635
635
  :param target_path: Absolute target path (instead of using artifact_path + local_path)
636
636
  :param src_path: Deprecated, use local_path
637
- :param upload: Upload to datastore (default is True)
637
+ :param upload: Whether to upload the artifact to the datastore. If not provided, and the `local_path`
638
+ is not a directory, upload occurs by default. Directories are uploaded only when this
639
+ flag is explicitly set to `True`.
638
640
  :param labels: A set of key/value labels to tag the artifact with
639
641
  :param format: Optional, format to use (e.g. csv, parquet, ..)
640
642
  :param db_key: The key to use in the artifact DB table, by default its run name + '_' + key
@@ -1051,6 +1051,7 @@ def _ingest_with_spark(
1051
1051
 
1052
1052
  spark = (
1053
1053
  pyspark.sql.SparkSession.builder.appName(session_name)
1054
+ .config("spark.driver.memory", "2g")
1054
1055
  .config("spark.sql.session.timeZone", "UTC")
1055
1056
  .getOrCreate()
1056
1057
  )
@@ -188,9 +188,13 @@ class SparkFeatureMerger(BaseMerger):
188
188
 
189
189
  if self.spark is None:
190
190
  # create spark context
191
- self.spark = SparkSession.builder.appName(
192
- f"vector-merger-{self.vector.metadata.name}"
193
- ).getOrCreate()
191
+ self.spark = (
192
+ SparkSession.builder.appName(
193
+ f"vector-merger-{self.vector.metadata.name}"
194
+ )
195
+ .config("spark.driver.memory", "2g")
196
+ .getOrCreate()
197
+ )
194
198
 
195
199
  def _get_engine_df(
196
200
  self,
@@ -11,12 +11,12 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
- #
14
+
15
15
  from abc import ABC, abstractmethod
16
16
 
17
17
  import mlrun
18
18
  from mlrun.artifacts import Artifact
19
- from mlrun.utils.helpers import is_ipython
19
+ from mlrun.utils.helpers import is_jupyter
20
20
 
21
21
 
22
22
  class Plan(ABC):
@@ -84,7 +84,7 @@ class Plan(ABC):
84
84
  return
85
85
 
86
86
  # Call the correct display method according to the kernel:
87
- if is_ipython:
87
+ if is_jupyter:
88
88
  self._gui_display()
89
89
  else:
90
90
  self._cli_display()
@@ -16,7 +16,7 @@ import json
16
16
  from abc import ABC, abstractmethod
17
17
  from enum import Enum
18
18
 
19
- from IPython.core.display import HTML, display
19
+ from IPython.display import HTML, display
20
20
 
21
21
  import mlrun
22
22
 
@@ -18,8 +18,7 @@ from typing import Union
18
18
 
19
19
  import numpy as np
20
20
  import pandas as pd
21
- from IPython.core.display import HTML
22
- from IPython.display import display
21
+ from IPython.display import HTML, display
23
22
  from pandas.api.types import is_numeric_dtype, is_string_dtype
24
23
 
25
24
  import mlrun
@@ -216,7 +215,7 @@ def _show_and_export_html(html: str, show=None, filename=None, runs_list=None):
216
215
  fp.write("</body></html>")
217
216
  else:
218
217
  fp.write(html)
219
- if show or (show is None and mlrun.utils.is_ipython):
218
+ if show or (show is None and mlrun.utils.is_jupyter):
220
219
  display(HTML(html))
221
220
  if runs_list and len(runs_list) <= max_table_rows:
222
221
  display(HTML(html_table))
mlrun/launcher/client.py CHANGED
@@ -14,7 +14,7 @@
14
14
  import abc
15
15
  from typing import Optional
16
16
 
17
- import IPython
17
+ import IPython.display
18
18
 
19
19
  import mlrun.common.constants as mlrun_constants
20
20
  import mlrun.errors
@@ -22,7 +22,7 @@ import mlrun.launcher.base as launcher
22
22
  import mlrun.lists
23
23
  import mlrun.model
24
24
  import mlrun.runtimes
25
- from mlrun.utils import logger
25
+ import mlrun.utils
26
26
 
27
27
 
28
28
  class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
@@ -128,10 +128,10 @@ class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
128
128
  if result:
129
129
  results_tbl.append(result)
130
130
  else:
131
- logger.info("no returned result (job may still be in progress)")
131
+ mlrun.utils.logger.info("no returned result (job may still be in progress)")
132
132
  results_tbl.append(run.to_dict())
133
133
 
134
- if mlrun.utils.is_ipython and mlrun.mlconf.ipython_widget:
134
+ if mlrun.utils.is_jupyter and mlrun.mlconf.ipython_widget:
135
135
  results_tbl.show()
136
136
  print()
137
137
  ui_url = mlrun.utils.get_ui_url(project, uid)
@@ -147,9 +147,9 @@ class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
147
147
  project_flag = f"-p {project}" if project else ""
148
148
  info_cmd = f"mlrun get run {uid} {project_flag}"
149
149
  logs_cmd = f"mlrun logs {uid} {project_flag}"
150
- logger.info(
150
+ mlrun.utils.logger.info(
151
151
  "To track results use the CLI", info_cmd=info_cmd, logs_cmd=logs_cmd
152
152
  )
153
153
  ui_url = mlrun.utils.get_ui_url(project, uid)
154
154
  if ui_url:
155
- logger.info("Or click for UI", ui_url=ui_url)
155
+ mlrun.utils.logger.info("Or click for UI", ui_url=ui_url)
@@ -219,7 +219,7 @@ class _BatchWindowGenerator:
219
219
  # If the endpoint does not have a stream, `last_updated` should be
220
220
  # the minimum between the current time and the last updated time.
221
221
  # This compensates for the bumping mechanism - see
222
- # `bump_model_endpoint_last_request`.
222
+ # `update_model_endpoint_last_request`.
223
223
  last_updated = min(int(datetime_now().timestamp()), last_updated)
224
224
  logger.debug(
225
225
  "The endpoint does not have a stream", last_updated=last_updated
@@ -588,7 +588,11 @@ class SQLStoreBase(StoreBase):
588
588
 
589
589
  for endpoint_dict in endpoints:
590
590
  endpoint_id = endpoint_dict[mm_schemas.EventFieldType.UID]
591
-
591
+ logger.debug(
592
+ "Deleting model endpoint resources from the SQL tables",
593
+ endpoint_id=endpoint_id,
594
+ project=self.project,
595
+ )
592
596
  # Delete last analyzed records
593
597
  self._delete_last_analyzed(endpoint_id=endpoint_id)
594
598
 
@@ -598,6 +602,16 @@ class SQLStoreBase(StoreBase):
598
602
 
599
603
  # Delete model endpoint record
600
604
  self.delete_model_endpoint(endpoint_id=endpoint_id)
605
+ logger.debug(
606
+ "Successfully deleted model endpoint resources",
607
+ endpoint_id=endpoint_id,
608
+ project=self.project,
609
+ )
610
+
611
+ logger.debug(
612
+ "Successfully deleted model monitoring endpoints resources from the SQL tables",
613
+ project=self.project,
614
+ )
601
615
 
602
616
  def get_model_endpoint_metrics(
603
617
  self, endpoint_id: str, type: mm_schemas.ModelEndpointMonitoringMetricType
@@ -305,10 +305,22 @@ class KVStoreBase(StoreBase):
305
305
  endpoint_id = endpoint_dict[mm_schemas.EventFieldType.ENDPOINT_ID]
306
306
  else:
307
307
  endpoint_id = endpoint_dict[mm_schemas.EventFieldType.UID]
308
+
309
+ logger.debug(
310
+ "Deleting model endpoint resources from the V3IO KV table",
311
+ endpoint_id=endpoint_id,
312
+ project=self.project,
313
+ )
314
+
308
315
  self.delete_model_endpoint(
309
316
  endpoint_id,
310
317
  )
311
318
 
319
+ logger.debug(
320
+ "Successfully deleted model monitoring endpoints from the V3IO KV table",
321
+ project=self.project,
322
+ )
323
+
312
324
  # Delete remain records in the KV
313
325
  all_records = self.client.kv.new_cursor(
314
326
  container=self.container,
@@ -163,8 +163,8 @@ class TDEngineSchema:
163
163
  @staticmethod
164
164
  def _get_records_query(
165
165
  table: str,
166
- start: datetime,
167
- end: datetime,
166
+ start: datetime.datetime,
167
+ end: datetime.datetime,
168
168
  columns_to_filter: list[str] = None,
169
169
  filter_query: Optional[str] = None,
170
170
  interval: Optional[str] = None,
@@ -63,7 +63,6 @@ def get_stream_path(
63
63
  )
64
64
 
65
65
  if not stream_uri or stream_uri == "v3io":
66
- # TODO : remove the first part of this condition in 1.9.0
67
66
  stream_uri = mlrun.mlconf.get_model_monitoring_file_target_path(
68
67
  project=project,
69
68
  kind=mm_constants.FileTargetKind.STREAM,
@@ -71,8 +70,6 @@ def get_stream_path(
71
70
  function_name=function_name,
72
71
  )
73
72
 
74
- if isinstance(stream_uri, list): # ML-6043 - user side gets only the new stream uri
75
- stream_uri = stream_uri[1] # get new stream path, under projects
76
73
  return mlrun.common.model_monitoring.helpers.parse_monitoring_stream_path(
77
74
  stream_uri=stream_uri, project=project, function_name=function_name
78
75
  )
@@ -179,7 +176,7 @@ def _get_monitoring_time_window_from_controller_run(
179
176
  def update_model_endpoint_last_request(
180
177
  project: str,
181
178
  model_endpoint: ModelEndpoint,
182
- current_request: datetime,
179
+ current_request: datetime.datetime,
183
180
  db: "RunDBInterface",
184
181
  ) -> None:
185
182
  """
@@ -190,7 +187,8 @@ def update_model_endpoint_last_request(
190
187
  :param current_request: current request time
191
188
  :param db: DB interface.
192
189
  """
193
- if model_endpoint.spec.stream_path != "":
190
+ is_model_server_endpoint = model_endpoint.spec.stream_path != ""
191
+ if is_model_server_endpoint:
194
192
  current_request = current_request.isoformat()
195
193
  logger.info(
196
194
  "Update model endpoint last request time (EP with serving)",
@@ -204,12 +202,13 @@ def update_model_endpoint_last_request(
204
202
  endpoint_id=model_endpoint.metadata.uid,
205
203
  attributes={mm_constants.EventFieldType.LAST_REQUEST: current_request},
206
204
  )
207
- else:
205
+ else: # model endpoint without any serving function - close the window "manually"
208
206
  try:
209
207
  time_window = _get_monitoring_time_window_from_controller_run(project, db)
210
208
  except mlrun.errors.MLRunNotFoundError:
211
- logger.debug(
212
- "Not bumping model endpoint last request time - the monitoring controller isn't deployed yet"
209
+ logger.warn(
210
+ "Not bumping model endpoint last request time - the monitoring controller isn't deployed yet.\n"
211
+ "Call `project.enable_model_monitoring()` first."
213
212
  )
214
213
  return
215
214
 
@@ -160,7 +160,9 @@ class ModelMonitoringWriter(StepToDict):
160
160
  event_kind = f"{event_kind}_detected"
161
161
  else:
162
162
  event_kind = f"{event_kind}_suspected"
163
- return alert_objects.EventKind(value=event_kind)
163
+ return alert_objects.EventKind(
164
+ value=mlrun.utils.helpers.normalize_name(event_kind)
165
+ )
164
166
 
165
167
  @staticmethod
166
168
  def _reconstruct_event(event: _RawEvent) -> tuple[_AppResultEvent, WriterEventKind]:
@@ -80,6 +80,7 @@ class WorkflowSpec(mlrun.model.ModelObj):
80
80
  schedule: typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger] = None,
81
81
  cleanup_ttl: typing.Optional[int] = None,
82
82
  image: typing.Optional[str] = None,
83
+ workflow_runner_node_selector: typing.Optional[dict[str, str]] = None,
83
84
  ):
84
85
  self.engine = engine
85
86
  self.code = code
@@ -93,6 +94,7 @@ class WorkflowSpec(mlrun.model.ModelObj):
93
94
  self._tmp_path = None
94
95
  self.schedule = schedule
95
96
  self.image = image
97
+ self.workflow_runner_node_selector = workflow_runner_node_selector
96
98
 
97
99
  def get_source_file(self, context=""):
98
100
  if not self.code and not self.path: