mlrun 1.8.0rc1__py3-none-any.whl → 1.8.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (83) hide show
  1. mlrun/__init__.py +5 -7
  2. mlrun/__main__.py +1 -1
  3. mlrun/artifacts/__init__.py +1 -0
  4. mlrun/artifacts/document.py +313 -0
  5. mlrun/artifacts/manager.py +2 -0
  6. mlrun/common/formatters/project.py +9 -0
  7. mlrun/common/schemas/__init__.py +4 -0
  8. mlrun/common/schemas/alert.py +31 -18
  9. mlrun/common/schemas/api_gateway.py +3 -3
  10. mlrun/common/schemas/artifact.py +7 -7
  11. mlrun/common/schemas/auth.py +6 -4
  12. mlrun/common/schemas/background_task.py +7 -7
  13. mlrun/common/schemas/client_spec.py +2 -2
  14. mlrun/common/schemas/clusterization_spec.py +2 -2
  15. mlrun/common/schemas/common.py +5 -5
  16. mlrun/common/schemas/constants.py +15 -0
  17. mlrun/common/schemas/datastore_profile.py +1 -1
  18. mlrun/common/schemas/feature_store.py +9 -9
  19. mlrun/common/schemas/frontend_spec.py +4 -4
  20. mlrun/common/schemas/function.py +10 -10
  21. mlrun/common/schemas/hub.py +1 -1
  22. mlrun/common/schemas/k8s.py +3 -3
  23. mlrun/common/schemas/memory_reports.py +3 -3
  24. mlrun/common/schemas/model_monitoring/grafana.py +1 -1
  25. mlrun/common/schemas/model_monitoring/model_endpoint_v2.py +1 -1
  26. mlrun/common/schemas/model_monitoring/model_endpoints.py +1 -1
  27. mlrun/common/schemas/notification.py +18 -3
  28. mlrun/common/schemas/object.py +1 -1
  29. mlrun/common/schemas/pagination.py +4 -4
  30. mlrun/common/schemas/partition.py +16 -1
  31. mlrun/common/schemas/pipeline.py +2 -2
  32. mlrun/common/schemas/project.py +22 -17
  33. mlrun/common/schemas/runs.py +2 -2
  34. mlrun/common/schemas/runtime_resource.py +5 -5
  35. mlrun/common/schemas/schedule.py +1 -1
  36. mlrun/common/schemas/secret.py +1 -1
  37. mlrun/common/schemas/tag.py +3 -3
  38. mlrun/common/schemas/workflow.py +5 -5
  39. mlrun/config.py +23 -1
  40. mlrun/datastore/datastore_profile.py +38 -19
  41. mlrun/datastore/vectorstore.py +186 -0
  42. mlrun/db/base.py +58 -6
  43. mlrun/db/httpdb.py +267 -15
  44. mlrun/db/nopdb.py +44 -5
  45. mlrun/execution.py +47 -1
  46. mlrun/model.py +2 -2
  47. mlrun/model_monitoring/applications/results.py +2 -2
  48. mlrun/model_monitoring/db/tsdb/base.py +2 -2
  49. mlrun/model_monitoring/db/tsdb/tdengine/schemas.py +37 -13
  50. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +32 -40
  51. mlrun/model_monitoring/helpers.py +4 -10
  52. mlrun/model_monitoring/stream_processing.py +14 -11
  53. mlrun/platforms/__init__.py +44 -13
  54. mlrun/projects/__init__.py +6 -1
  55. mlrun/projects/pipelines.py +184 -55
  56. mlrun/projects/project.py +309 -33
  57. mlrun/run.py +4 -1
  58. mlrun/runtimes/base.py +2 -1
  59. mlrun/runtimes/mounts.py +572 -0
  60. mlrun/runtimes/nuclio/function.py +1 -2
  61. mlrun/runtimes/pod.py +82 -18
  62. mlrun/runtimes/remotesparkjob.py +1 -1
  63. mlrun/runtimes/sparkjob/spark3job.py +1 -1
  64. mlrun/utils/clones.py +1 -1
  65. mlrun/utils/helpers.py +12 -2
  66. mlrun/utils/logger.py +2 -2
  67. mlrun/utils/notifications/notification/__init__.py +22 -19
  68. mlrun/utils/notifications/notification/base.py +12 -12
  69. mlrun/utils/notifications/notification/console.py +6 -6
  70. mlrun/utils/notifications/notification/git.py +6 -6
  71. mlrun/utils/notifications/notification/ipython.py +6 -6
  72. mlrun/utils/notifications/notification/mail.py +149 -0
  73. mlrun/utils/notifications/notification/slack.py +6 -6
  74. mlrun/utils/notifications/notification/webhook.py +6 -6
  75. mlrun/utils/notifications/notification_pusher.py +20 -12
  76. mlrun/utils/regex.py +2 -0
  77. mlrun/utils/version/version.json +2 -2
  78. {mlrun-1.8.0rc1.dist-info → mlrun-1.8.0rc3.dist-info}/METADATA +190 -186
  79. {mlrun-1.8.0rc1.dist-info → mlrun-1.8.0rc3.dist-info}/RECORD +83 -79
  80. {mlrun-1.8.0rc1.dist-info → mlrun-1.8.0rc3.dist-info}/WHEEL +1 -1
  81. {mlrun-1.8.0rc1.dist-info → mlrun-1.8.0rc3.dist-info}/LICENSE +0 -0
  82. {mlrun-1.8.0rc1.dist-info → mlrun-1.8.0rc3.dist-info}/entry_points.txt +0 -0
  83. {mlrun-1.8.0rc1.dist-info → mlrun-1.8.0rc3.dist-info}/top_level.txt +0 -0
mlrun/model.py CHANGED
@@ -24,7 +24,7 @@ from datetime import datetime
24
24
  from os import environ
25
25
  from typing import Any, Optional, Union
26
26
 
27
- import pydantic.error_wrappers
27
+ import pydantic.v1.error_wrappers
28
28
 
29
29
  import mlrun
30
30
  import mlrun.common.constants as mlrun_constants
@@ -739,7 +739,7 @@ class Notification(ModelObj):
739
739
  def validate_notification(self):
740
740
  try:
741
741
  mlrun.common.schemas.notification.Notification(**self.to_dict())
742
- except pydantic.error_wrappers.ValidationError as exc:
742
+ except pydantic.v1.error_wrappers.ValidationError as exc:
743
743
  raise mlrun.errors.MLRunInvalidArgumentError(
744
744
  "Invalid notification object"
745
745
  ) from exc
@@ -17,8 +17,8 @@ import json
17
17
  import re
18
18
  from abc import ABC, abstractmethod
19
19
 
20
- from pydantic import validator
21
- from pydantic.dataclasses import dataclass
20
+ from pydantic.v1 import validator
21
+ from pydantic.v1.dataclasses import dataclass
22
22
 
23
23
  import mlrun.common.helpers
24
24
  import mlrun.common.model_monitoring.helpers
@@ -17,7 +17,7 @@ from abc import ABC, abstractmethod
17
17
  from datetime import datetime
18
18
 
19
19
  import pandas as pd
20
- import pydantic
20
+ import pydantic.v1
21
21
 
22
22
  import mlrun.common.schemas.model_monitoring as mm_schemas
23
23
  import mlrun.model_monitoring.db.tsdb.helpers
@@ -427,7 +427,7 @@ class TSDBConnector(ABC):
427
427
  ), # pyright: ignore[reportArgumentType]
428
428
  )
429
429
  )
430
- except pydantic.ValidationError:
430
+ except pydantic.v1.ValidationError:
431
431
  logger.exception(
432
432
  "Failed to convert data-frame into `ModelEndpointMonitoringResultValues`",
433
433
  full_name=full_name,
@@ -82,9 +82,10 @@ class TDEngineSchema:
82
82
  super_table: str,
83
83
  columns: dict[str, _TDEngineColumn],
84
84
  tags: dict[str, str],
85
+ project: str,
85
86
  database: Optional[str] = None,
86
87
  ):
87
- self.super_table = super_table
88
+ self.super_table = f"{super_table}_{project.replace('-', '_')}"
88
89
  self.columns = columns
89
90
  self.tags = tags
90
91
  self.database = database or _MODEL_MONITORING_DATABASE
@@ -148,6 +149,9 @@ class TDEngineSchema:
148
149
  ) -> str:
149
150
  return f"DROP TABLE if EXISTS {self.database}.{subtable};"
150
151
 
152
+ def drop_supertable_query(self) -> str:
153
+ return f"DROP STABLE if EXISTS {self.database}.{self.super_table};"
154
+
151
155
  def _get_subtables_query(
152
156
  self,
153
157
  values: dict[str, Union[str, int, float, datetime.datetime]],
@@ -260,7 +264,7 @@ class TDEngineSchema:
260
264
 
261
265
  @dataclass
262
266
  class AppResultTable(TDEngineSchema):
263
- def __init__(self, database: Optional[str] = None):
267
+ def __init__(self, project: str, database: Optional[str] = None):
264
268
  super_table = mm_schemas.TDEngineSuperTables.APP_RESULTS
265
269
  columns = {
266
270
  mm_schemas.WriterEvent.END_INFER_TIME: _TDEngineColumn.TIMESTAMP,
@@ -270,18 +274,23 @@ class AppResultTable(TDEngineSchema):
270
274
  mm_schemas.ResultData.RESULT_EXTRA_DATA: _TDEngineColumn.BINARY_1000,
271
275
  }
272
276
  tags = {
273
- mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
274
277
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
275
278
  mm_schemas.WriterEvent.APPLICATION_NAME: _TDEngineColumn.BINARY_64,
276
279
  mm_schemas.ResultData.RESULT_NAME: _TDEngineColumn.BINARY_64,
277
280
  mm_schemas.ResultData.RESULT_KIND: _TDEngineColumn.INT,
278
281
  }
279
- super().__init__(super_table, columns, tags, database)
282
+ super().__init__(
283
+ super_table=super_table,
284
+ columns=columns,
285
+ tags=tags,
286
+ database=database,
287
+ project=project,
288
+ )
280
289
 
281
290
 
282
291
  @dataclass
283
292
  class Metrics(TDEngineSchema):
284
- def __init__(self, database: Optional[str] = None):
293
+ def __init__(self, project: str, database: Optional[str] = None):
285
294
  super_table = mm_schemas.TDEngineSuperTables.METRICS
286
295
  columns = {
287
296
  mm_schemas.WriterEvent.END_INFER_TIME: _TDEngineColumn.TIMESTAMP,
@@ -289,17 +298,22 @@ class Metrics(TDEngineSchema):
289
298
  mm_schemas.MetricData.METRIC_VALUE: _TDEngineColumn.FLOAT,
290
299
  }
291
300
  tags = {
292
- mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
293
301
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
294
302
  mm_schemas.WriterEvent.APPLICATION_NAME: _TDEngineColumn.BINARY_64,
295
303
  mm_schemas.MetricData.METRIC_NAME: _TDEngineColumn.BINARY_64,
296
304
  }
297
- super().__init__(super_table, columns, tags, database)
305
+ super().__init__(
306
+ super_table=super_table,
307
+ columns=columns,
308
+ tags=tags,
309
+ database=database,
310
+ project=project,
311
+ )
298
312
 
299
313
 
300
314
  @dataclass
301
315
  class Predictions(TDEngineSchema):
302
- def __init__(self, database: Optional[str] = None):
316
+ def __init__(self, project: str, database: Optional[str] = None):
303
317
  super_table = mm_schemas.TDEngineSuperTables.PREDICTIONS
304
318
  columns = {
305
319
  mm_schemas.EventFieldType.TIME: _TDEngineColumn.TIMESTAMP,
@@ -307,23 +321,33 @@ class Predictions(TDEngineSchema):
307
321
  mm_schemas.EventKeyMetrics.CUSTOM_METRICS: _TDEngineColumn.BINARY_1000,
308
322
  }
309
323
  tags = {
310
- mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
311
324
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
312
325
  }
313
- super().__init__(super_table, columns, tags, database)
326
+ super().__init__(
327
+ super_table=super_table,
328
+ columns=columns,
329
+ tags=tags,
330
+ database=database,
331
+ project=project,
332
+ )
314
333
 
315
334
 
316
335
  @dataclass
317
336
  class Errors(TDEngineSchema):
318
- def __init__(self, database: Optional[str] = None):
337
+ def __init__(self, project: str, database: Optional[str] = None):
319
338
  super_table = mm_schemas.TDEngineSuperTables.ERRORS
320
339
  columns = {
321
340
  mm_schemas.EventFieldType.TIME: _TDEngineColumn.TIMESTAMP,
322
341
  mm_schemas.EventFieldType.MODEL_ERROR: _TDEngineColumn.BINARY_1000,
323
342
  }
324
343
  tags = {
325
- mm_schemas.EventFieldType.PROJECT: _TDEngineColumn.BINARY_64,
326
344
  mm_schemas.WriterEvent.ENDPOINT_ID: _TDEngineColumn.BINARY_64,
327
345
  mm_schemas.EventFieldType.ERROR_TYPE: _TDEngineColumn.BINARY_64,
328
346
  }
329
- super().__init__(super_table, columns, tags, database)
347
+ super().__init__(
348
+ super_table=super_table,
349
+ columns=columns,
350
+ tags=tags,
351
+ database=database,
352
+ project=project,
353
+ )
@@ -81,16 +81,16 @@ class TDEngineConnector(TSDBConnector):
81
81
  """Initialize the super tables for the TSDB."""
82
82
  self.tables = {
83
83
  mm_schemas.TDEngineSuperTables.APP_RESULTS: tdengine_schemas.AppResultTable(
84
- self.database
84
+ project=self.project, database=self.database
85
85
  ),
86
86
  mm_schemas.TDEngineSuperTables.METRICS: tdengine_schemas.Metrics(
87
- self.database
87
+ project=self.project, database=self.database
88
88
  ),
89
89
  mm_schemas.TDEngineSuperTables.PREDICTIONS: tdengine_schemas.Predictions(
90
- self.database
90
+ project=self.project, database=self.database
91
91
  ),
92
92
  mm_schemas.TDEngineSuperTables.ERRORS: tdengine_schemas.Errors(
93
- self.database
93
+ project=self.project, database=self.database
94
94
  ),
95
95
  }
96
96
 
@@ -114,11 +114,9 @@ class TDEngineConnector(TSDBConnector):
114
114
  """
115
115
 
116
116
  table_name = (
117
- f"{self.project}_"
118
117
  f"{event[mm_schemas.WriterEvent.ENDPOINT_ID]}_"
119
- f"{event[mm_schemas.WriterEvent.APPLICATION_NAME]}_"
118
+ f"{event[mm_schemas.WriterEvent.APPLICATION_NAME]}"
120
119
  )
121
- event[mm_schemas.EventFieldType.PROJECT] = self.project
122
120
 
123
121
  if kind == mm_schemas.WriterEventKind.RESULT:
124
122
  # Write a new result
@@ -188,7 +186,9 @@ class TDEngineConnector(TSDBConnector):
188
186
  name=name,
189
187
  after=after,
190
188
  url=self._tdengine_connection_string,
191
- supertable=mm_schemas.TDEngineSuperTables.PREDICTIONS,
189
+ supertable=self.tables[
190
+ mm_schemas.TDEngineSuperTables.PREDICTIONS
191
+ ].super_table,
192
192
  table_col=mm_schemas.EventFieldType.TABLE_COLUMN,
193
193
  time_col=mm_schemas.EventFieldType.TIME,
194
194
  database=self.database,
@@ -197,7 +197,6 @@ class TDEngineConnector(TSDBConnector):
197
197
  mm_schemas.EventKeyMetrics.CUSTOM_METRICS,
198
198
  ],
199
199
  tag_cols=[
200
- mm_schemas.EventFieldType.PROJECT,
201
200
  mm_schemas.EventFieldType.ENDPOINT_ID,
202
201
  ],
203
202
  max_events=1000,
@@ -227,7 +226,7 @@ class TDEngineConnector(TSDBConnector):
227
226
  name="tsdb_error",
228
227
  after="error_extractor",
229
228
  url=self._tdengine_connection_string,
230
- supertable=mm_schemas.TDEngineSuperTables.ERRORS,
229
+ supertable=self.tables[mm_schemas.TDEngineSuperTables.ERRORS].super_table,
231
230
  table_col=mm_schemas.EventFieldType.TABLE_COLUMN,
232
231
  time_col=mm_schemas.EventFieldType.TIME,
233
232
  database=self.database,
@@ -235,7 +234,6 @@ class TDEngineConnector(TSDBConnector):
235
234
  mm_schemas.EventFieldType.MODEL_ERROR,
236
235
  ],
237
236
  tag_cols=[
238
- mm_schemas.EventFieldType.PROJECT,
239
237
  mm_schemas.EventFieldType.ENDPOINT_ID,
240
238
  mm_schemas.EventFieldType.ERROR_TYPE,
241
239
  ],
@@ -251,22 +249,23 @@ class TDEngineConnector(TSDBConnector):
251
249
  "Deleting all project resources using the TDEngine connector",
252
250
  project=self.project,
253
251
  )
252
+ drop_statements = []
254
253
  for table in self.tables:
255
- get_subtable_names_query = self.tables[table]._get_subtables_query(
256
- values={mm_schemas.EventFieldType.PROJECT: self.project}
257
- )
258
- subtables = self.connection.run(
259
- query=get_subtable_names_query,
254
+ drop_statements.append(self.tables[table].drop_supertable_query())
255
+
256
+ try:
257
+ self.connection.run(
258
+ statements=drop_statements,
260
259
  timeout=self._timeout,
261
260
  retries=self._retries,
262
- ).data
263
- drop_statements = []
264
- for subtable in subtables:
265
- drop_statements.append(
266
- self.tables[table]._drop_subtable_query(subtable=subtable[0])
267
- )
268
- self.connection.run(
269
- statements=drop_statements, timeout=self._timeout, retries=self._retries
261
+ )
262
+ except Exception as e:
263
+ logger.warning(
264
+ "Failed to drop TDEngine tables. You may need to drop them manually. "
265
+ "These can be found under the following supertables: app_results, "
266
+ "metrics, and predictions.",
267
+ project=self.project,
268
+ error=mlrun.errors.err_to_str(e),
270
269
  )
271
270
  logger.debug(
272
271
  "Deleted all project resources using the TDEngine connector",
@@ -331,13 +330,6 @@ class TDEngineConnector(TSDBConnector):
331
330
  :raise: MLRunInvalidArgumentError if query the provided table failed.
332
331
  """
333
332
 
334
- project_condition = f"project = '{self.project}'"
335
- filter_query = (
336
- f"({filter_query}) AND ({project_condition})"
337
- if filter_query
338
- else project_condition
339
- )
340
-
341
333
  full_query = tdengine_schemas.TDEngineSchema._get_records_query(
342
334
  table=table,
343
335
  start=start,
@@ -400,12 +392,12 @@ class TDEngineConnector(TSDBConnector):
400
392
  project=self.project,
401
393
  endpoint_id=endpoint_id,
402
394
  )
403
- table = mm_schemas.TDEngineSuperTables.METRICS
395
+ table = self.tables[mm_schemas.TDEngineSuperTables.METRICS].super_table
404
396
  name = mm_schemas.MetricData.METRIC_NAME
405
397
  columns += [name, mm_schemas.MetricData.METRIC_VALUE]
406
398
  df_handler = self.df_to_metrics_values
407
399
  elif type == "results":
408
- table = mm_schemas.TDEngineSuperTables.APP_RESULTS
400
+ table = self.tables[mm_schemas.TDEngineSuperTables.APP_RESULTS].super_table
409
401
  name = mm_schemas.ResultData.RESULT_NAME
410
402
  columns += [
411
403
  name,
@@ -477,7 +469,7 @@ class TDEngineConnector(TSDBConnector):
477
469
  "both or neither of `aggregation_window` and `agg_funcs` must be provided"
478
470
  )
479
471
  df = self._get_records(
480
- table=mm_schemas.TDEngineSuperTables.PREDICTIONS,
472
+ table=self.tables[mm_schemas.TDEngineSuperTables.PREDICTIONS].super_table,
481
473
  start=start,
482
474
  end=end,
483
475
  columns=[mm_schemas.EventFieldType.LATENCY],
@@ -527,7 +519,7 @@ class TDEngineConnector(TSDBConnector):
527
519
  )
528
520
  start, end = self._get_start_end(start, end)
529
521
  df = self._get_records(
530
- table=mm_schemas.TDEngineSuperTables.PREDICTIONS,
522
+ table=self.tables[mm_schemas.TDEngineSuperTables.PREDICTIONS].super_table,
531
523
  start=start,
532
524
  end=end,
533
525
  columns=[
@@ -571,7 +563,7 @@ class TDEngineConnector(TSDBConnector):
571
563
  start = start or (mlrun.utils.datetime_now() - timedelta(hours=24))
572
564
  start, end = self._get_start_end(start, end)
573
565
  df = self._get_records(
574
- table=mm_schemas.TDEngineSuperTables.APP_RESULTS,
566
+ table=self.tables[mm_schemas.TDEngineSuperTables.APP_RESULTS].super_table,
575
567
  start=start,
576
568
  end=end,
577
569
  columns=[
@@ -602,7 +594,7 @@ class TDEngineConnector(TSDBConnector):
602
594
  ) -> pd.DataFrame:
603
595
  start, end = self._get_start_end(start, end)
604
596
  df = self._get_records(
605
- table=mm_schemas.TDEngineSuperTables.METRICS,
597
+ table=self.tables[mm_schemas.TDEngineSuperTables.METRICS].super_table,
606
598
  start=start,
607
599
  end=end,
608
600
  columns=[
@@ -638,7 +630,7 @@ class TDEngineConnector(TSDBConnector):
638
630
  ) -> pd.DataFrame:
639
631
  start, end = self._get_start_end(start, end)
640
632
  df = self._get_records(
641
- table=mm_schemas.TDEngineSuperTables.APP_RESULTS,
633
+ table=self.tables[mm_schemas.TDEngineSuperTables.APP_RESULTS].super_table,
642
634
  start=start,
643
635
  end=end,
644
636
  columns=[
@@ -679,7 +671,7 @@ class TDEngineConnector(TSDBConnector):
679
671
  )
680
672
  start, end = self._get_start_end(start, end)
681
673
  df = self._get_records(
682
- table=mm_schemas.TDEngineSuperTables.ERRORS,
674
+ table=self.tables[mm_schemas.TDEngineSuperTables.ERRORS].super_table,
683
675
  start=start,
684
676
  end=end,
685
677
  columns=[
@@ -711,7 +703,7 @@ class TDEngineConnector(TSDBConnector):
711
703
  )
712
704
  start, end = self._get_start_end(start, end)
713
705
  df = self._get_records(
714
- table=mm_schemas.TDEngineSuperTables.PREDICTIONS,
706
+ table=self.tables[mm_schemas.TDEngineSuperTables.PREDICTIONS].super_table,
715
707
  start=start,
716
708
  end=end,
717
709
  columns=[
@@ -20,6 +20,7 @@ import numpy as np
20
20
  import pandas as pd
21
21
 
22
22
  if typing.TYPE_CHECKING:
23
+ from mlrun.datastore import DataItem
23
24
  from mlrun.db.base import RunDBInterface
24
25
  from mlrun.projects import MlrunProject
25
26
 
@@ -28,7 +29,6 @@ import mlrun.artifacts
28
29
  import mlrun.common.model_monitoring.helpers
29
30
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
30
31
  import mlrun.data_types.infer
31
- import mlrun.datastore
32
32
  import mlrun.model_monitoring
33
33
  import mlrun.utils.helpers
34
34
  from mlrun.common.schemas.model_monitoring.model_endpoints import (
@@ -134,9 +134,7 @@ def _get_monitoring_drift_measures_file_path(project: str, endpoint_id: str) ->
134
134
  )
135
135
 
136
136
 
137
- def get_monitoring_current_stats_data(
138
- project: str, endpoint_id: str
139
- ) -> mlrun.datastore.DataItem:
137
+ def get_monitoring_current_stats_data(project: str, endpoint_id: str) -> "DataItem":
140
138
  """
141
139
  getter for data item of current stats for project and endpoint
142
140
  :param project: project name str
@@ -150,9 +148,7 @@ def get_monitoring_current_stats_data(
150
148
  )
151
149
 
152
150
 
153
- def get_monitoring_drift_measures_data(
154
- project: str, endpoint_id: str
155
- ) -> mlrun.datastore.DataItem:
151
+ def get_monitoring_drift_measures_data(project: str, endpoint_id: str) -> "DataItem":
156
152
  """
157
153
  getter for data item of drift measures for project and endpoint
158
154
  :param project: project name str
@@ -437,9 +433,7 @@ def _get_monitoring_schedules_file_path(*, project: str, endpoint_id: str) -> st
437
433
  )
438
434
 
439
435
 
440
- def get_monitoring_schedules_data(
441
- *, project: str, endpoint_id: str
442
- ) -> mlrun.datastore.DataItem:
436
+ def get_monitoring_schedules_data(*, project: str, endpoint_id: str) -> "DataItem":
443
437
  """
444
438
  Get the model monitoring schedules' data item of the project's model endpoint.
445
439
  """
@@ -30,6 +30,7 @@ import mlrun.model_monitoring.db
30
30
  import mlrun.serving.states
31
31
  import mlrun.utils
32
32
  from mlrun.common.schemas.model_monitoring.constants import (
33
+ EndpointType,
33
34
  EventFieldType,
34
35
  EventKeyMetrics,
35
36
  EventLiveStats,
@@ -783,6 +784,7 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
783
784
  if not feature_names and self._infer_columns_from_data:
784
785
  feature_names = self._infer_feature_names_from_data(event)
785
786
 
787
+ endpoint_type = int(endpoint_record.get(EventFieldType.ENDPOINT_TYPE))
786
788
  if not feature_names:
787
789
  logger.warn(
788
790
  "Feature names are not initialized, they will be automatically generated",
@@ -801,11 +803,12 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
801
803
  },
802
804
  )
803
805
 
804
- update_monitoring_feature_set(
805
- endpoint_record=endpoint_record,
806
- feature_names=feature_names,
807
- feature_values=feature_values,
808
- )
806
+ if endpoint_type != EndpointType.ROUTER.value:
807
+ update_monitoring_feature_set(
808
+ endpoint_record=endpoint_record,
809
+ feature_names=feature_names,
810
+ feature_values=feature_values,
811
+ )
809
812
 
810
813
  # Similar process with label columns
811
814
  if not label_columns and self._infer_columns_from_data:
@@ -825,11 +828,12 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
825
828
  endpoint_id=endpoint_id,
826
829
  attributes={EventFieldType.LABEL_NAMES: json.dumps(label_columns)},
827
830
  )
828
- update_monitoring_feature_set(
829
- endpoint_record=endpoint_record,
830
- feature_names=label_columns,
831
- feature_values=label_values,
832
- )
831
+ if endpoint_type != EndpointType.ROUTER.value:
832
+ update_monitoring_feature_set(
833
+ endpoint_record=endpoint_record,
834
+ feature_names=label_columns,
835
+ feature_values=label_values,
836
+ )
833
837
 
834
838
  self.label_columns[endpoint_id] = label_columns
835
839
  self.feature_names[endpoint_id] = feature_names
@@ -842,7 +846,6 @@ class MapFeatureNames(mlrun.feature_store.steps.MapClass):
842
846
  )
843
847
 
844
848
  # Update the endpoint type within the endpoint types dictionary
845
- endpoint_type = int(endpoint_record.get(EventFieldType.ENDPOINT_TYPE))
846
849
  self.endpoint_type[endpoint_id] = endpoint_type
847
850
 
848
851
  # Add feature_name:value pairs along with a mapping dictionary of all of these pairs
@@ -13,23 +13,11 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import json
16
+ import warnings
16
17
  from pprint import pprint
17
18
  from time import sleep
18
19
  from typing import Optional
19
20
 
20
- from mlrun_pipelines.common.mounts import VolumeMount
21
- from mlrun_pipelines.mounts import (
22
- auto_mount,
23
- mount_configmap,
24
- mount_hostpath,
25
- mount_pvc,
26
- mount_s3,
27
- mount_secret,
28
- mount_v3io,
29
- set_env_variables,
30
- v3io_cred,
31
- )
32
-
33
21
  from .iguazio import (
34
22
  V3ioStreamClient,
35
23
  add_or_refresh_credentials,
@@ -37,6 +25,49 @@ from .iguazio import (
37
25
  )
38
26
 
39
27
 
28
+ class _DeprecationHelper:
29
+ """A helper class to deprecate old schemas"""
30
+
31
+ def __init__(self, new_target: str, version="1.8.0"):
32
+ self._new_target = new_target
33
+ self._version = version
34
+
35
+ def __call__(self, *args, **kwargs):
36
+ self._warn()
37
+ return self._lazy_load()(*args, **kwargs)
38
+
39
+ def __getattr__(self, attr):
40
+ self._warn()
41
+ return getattr(self._lazy_load(), attr)
42
+
43
+ def _lazy_load(self, *args, **kwargs):
44
+ import mlrun.runtimes.mounts as mlrun_mounts
45
+
46
+ return getattr(mlrun_mounts, self._new_target)
47
+
48
+ def _warn(self):
49
+ warnings.warn(
50
+ f"mlrun.platforms.{self._new_target} is deprecated since version {self._version}, "
51
+ f"and will be removed in 1.10. Use mlrun.runtimes.mounts.{self._new_target} instead.",
52
+ FutureWarning,
53
+ )
54
+
55
+
56
+ # TODO: Remove in 1.10
57
+ # For backwards compatibility
58
+ VolumeMount = _DeprecationHelper("VolumeMount")
59
+ auto_mount = _DeprecationHelper("auto_mount")
60
+ mount_configmap = _DeprecationHelper("mount_configmap")
61
+ mount_hostpath = _DeprecationHelper("mount_hostpath")
62
+ mount_pvc = _DeprecationHelper("mount_pvc")
63
+ mount_s3 = _DeprecationHelper("mount_s3")
64
+ mount_secret = _DeprecationHelper("mount_secret")
65
+ mount_v3io = _DeprecationHelper("mount_v3io")
66
+ set_env_variables = _DeprecationHelper("set_env_variables")
67
+ v3io_cred = _DeprecationHelper("v3io_cred")
68
+ # eof 'For backwards compatibility'
69
+
70
+
40
71
  def watch_stream(
41
72
  url,
42
73
  shard_ids: Optional[list] = None,
@@ -27,7 +27,12 @@ __all__ = [
27
27
  ]
28
28
 
29
29
  from .operations import build_function, deploy_function, run_function # noqa
30
- from .pipelines import load_and_run, pipeline_context # noqa
30
+ from .pipelines import (
31
+ import_remote_project,
32
+ load_and_run_workflow,
33
+ load_and_run,
34
+ pipeline_context,
35
+ ) # noqa
31
36
  from .project import (
32
37
  MlrunProject,
33
38
  ProjectMetadata,