mlrun 1.7.0rc39__py3-none-any.whl → 1.7.0rc42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (58) hide show
  1. mlrun/common/constants.py +3 -0
  2. mlrun/common/db/sql_session.py +3 -2
  3. mlrun/common/helpers.py +0 -1
  4. mlrun/common/schemas/api_gateway.py +6 -6
  5. mlrun/common/schemas/common.py +4 -4
  6. mlrun/common/schemas/model_monitoring/model_endpoints.py +0 -1
  7. mlrun/config.py +1 -1
  8. mlrun/data_types/to_pandas.py +12 -12
  9. mlrun/datastore/alibaba_oss.py +1 -0
  10. mlrun/datastore/azure_blob.py +1 -6
  11. mlrun/datastore/base.py +12 -0
  12. mlrun/datastore/dbfs_store.py +1 -5
  13. mlrun/datastore/filestore.py +1 -3
  14. mlrun/datastore/google_cloud_storage.py +1 -9
  15. mlrun/datastore/redis.py +1 -0
  16. mlrun/datastore/s3.py +1 -0
  17. mlrun/datastore/storeytargets.py +147 -0
  18. mlrun/datastore/targets.py +67 -69
  19. mlrun/datastore/v3io.py +1 -0
  20. mlrun/errors.py +7 -4
  21. mlrun/feature_store/feature_vector.py +3 -1
  22. mlrun/feature_store/retrieval/job.py +3 -1
  23. mlrun/frameworks/sklearn/mlrun_interface.py +13 -3
  24. mlrun/model.py +1 -1
  25. mlrun/model_monitoring/api.py +1 -2
  26. mlrun/model_monitoring/applications/_application_steps.py +25 -43
  27. mlrun/model_monitoring/applications/context.py +206 -70
  28. mlrun/model_monitoring/controller.py +0 -1
  29. mlrun/model_monitoring/db/stores/__init__.py +3 -3
  30. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +17 -8
  31. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +14 -4
  32. mlrun/model_monitoring/db/tsdb/__init__.py +3 -3
  33. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +18 -10
  34. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +35 -23
  35. mlrun/model_monitoring/helpers.py +38 -1
  36. mlrun/model_monitoring/stream_processing.py +8 -26
  37. mlrun/package/packagers/default_packager.py +2 -2
  38. mlrun/projects/project.py +17 -16
  39. mlrun/runtimes/funcdoc.py +1 -1
  40. mlrun/runtimes/nuclio/api_gateway.py +9 -0
  41. mlrun/runtimes/nuclio/application/application.py +131 -55
  42. mlrun/runtimes/nuclio/function.py +4 -10
  43. mlrun/runtimes/nuclio/serving.py +2 -2
  44. mlrun/runtimes/sparkjob/spark3job.py +1 -1
  45. mlrun/runtimes/utils.py +16 -0
  46. mlrun/serving/routers.py +1 -1
  47. mlrun/serving/server.py +19 -5
  48. mlrun/serving/states.py +8 -0
  49. mlrun/serving/v2_serving.py +34 -26
  50. mlrun/utils/helpers.py +12 -2
  51. mlrun/utils/v3io_clients.py +2 -2
  52. mlrun/utils/version/version.json +2 -2
  53. {mlrun-1.7.0rc39.dist-info → mlrun-1.7.0rc42.dist-info}/METADATA +2 -2
  54. {mlrun-1.7.0rc39.dist-info → mlrun-1.7.0rc42.dist-info}/RECORD +58 -57
  55. {mlrun-1.7.0rc39.dist-info → mlrun-1.7.0rc42.dist-info}/WHEEL +1 -1
  56. {mlrun-1.7.0rc39.dist-info → mlrun-1.7.0rc42.dist-info}/LICENSE +0 -0
  57. {mlrun-1.7.0rc39.dist-info → mlrun-1.7.0rc42.dist-info}/entry_points.txt +0 -0
  58. {mlrun-1.7.0rc39.dist-info → mlrun-1.7.0rc42.dist-info}/top_level.txt +0 -0
@@ -47,7 +47,6 @@ from .spark_utils import spark_session_update_hadoop_options
47
47
  from .utils import (
48
48
  _generate_sql_query_with_time_filter,
49
49
  filter_df_start_end_time,
50
- parse_kafka_url,
51
50
  select_columns_from_df,
52
51
  )
53
52
 
@@ -928,8 +927,9 @@ class ParquetTarget(BaseStoreTarget):
928
927
  if time_unit == time_partitioning_granularity:
929
928
  break
930
929
 
930
+ target_path = self.get_target_path()
931
931
  if not self.partitioned and not mlrun.utils.helpers.is_parquet_file(
932
- self.get_target_path()
932
+ target_path
933
933
  ):
934
934
  partition_cols = []
935
935
 
@@ -937,25 +937,16 @@ class ParquetTarget(BaseStoreTarget):
937
937
  for key_column in key_columns:
938
938
  tuple_key_columns.append((key_column.name, key_column.value_type))
939
939
 
940
- store, path_in_store, target_path = self._get_store_and_path()
941
-
942
- storage_options = store.get_storage_options()
943
- if storage_options and self.storage_options:
944
- storage_options = merge(storage_options, self.storage_options)
945
- else:
946
- storage_options = storage_options or self.storage_options
947
-
948
940
  step = graph.add_step(
949
941
  name=self.name or "ParquetTarget",
950
942
  after=after,
951
943
  graph_shape="cylinder",
952
- class_name="storey.ParquetTarget",
944
+ class_name="mlrun.datastore.storeytargets.ParquetStoreyTarget",
953
945
  path=target_path,
954
946
  columns=column_list,
955
947
  index_cols=tuple_key_columns,
956
948
  partition_cols=partition_cols,
957
949
  time_field=timestamp_key,
958
- storage_options=storage_options,
959
950
  max_events=self.max_events,
960
951
  flush_after_seconds=self.flush_after_seconds,
961
952
  update_last_written=featureset_status.update_last_written_for_target,
@@ -1110,17 +1101,16 @@ class CSVTarget(BaseStoreTarget):
1110
1101
  column_list = self._get_column_list(
1111
1102
  features=features, timestamp_key=timestamp_key, key_columns=key_columns
1112
1103
  )
1113
- store, path_in_store, target_path = self._get_store_and_path()
1104
+ target_path = self.get_target_path()
1114
1105
  graph.add_step(
1115
1106
  name=self.name or "CSVTarget",
1116
1107
  after=after,
1117
1108
  graph_shape="cylinder",
1118
- class_name="storey.CSVTarget",
1109
+ class_name="mlrun.datastore.storeytargets.CSVStoreyTarget",
1119
1110
  path=target_path,
1120
1111
  columns=column_list,
1121
1112
  header=True,
1122
1113
  index_cols=key_columns,
1123
- storage_options=store.get_storage_options(),
1124
1114
  **self.attributes,
1125
1115
  )
1126
1116
 
@@ -1334,6 +1324,19 @@ class NoSqlBaseTarget(BaseStoreTarget):
1334
1324
  timestamp_key=None,
1335
1325
  featureset_status=None,
1336
1326
  ):
1327
+ table, column_list = self._get_table_and_columns(features, key_columns)
1328
+
1329
+ graph.add_step(
1330
+ name=self.name or self.writer_step_name,
1331
+ after=after,
1332
+ graph_shape="cylinder",
1333
+ class_name="mlrun.datastore.storeytargets.NoSqlStoreyTarget",
1334
+ columns=column_list,
1335
+ table=table,
1336
+ **self.attributes,
1337
+ )
1338
+
1339
+ def _get_table_and_columns(self, features, key_columns):
1337
1340
  key_columns = list(key_columns.keys())
1338
1341
  table = self._resource.uri
1339
1342
  column_list = self._get_column_list(
@@ -1352,15 +1355,7 @@ class NoSqlBaseTarget(BaseStoreTarget):
1352
1355
  col for col in column_list if col[0] not in aggregate_features
1353
1356
  ]
1354
1357
 
1355
- graph.add_step(
1356
- name=self.name or self.writer_step_name,
1357
- after=after,
1358
- graph_shape="cylinder",
1359
- class_name="storey.NoSqlTarget",
1360
- columns=column_list,
1361
- table=table,
1362
- **self.attributes,
1363
- )
1358
+ return table, column_list
1364
1359
 
1365
1360
  def prepare_spark_df(self, df, key_columns, timestamp_key=None, spark_options=None):
1366
1361
  raise NotImplementedError()
@@ -1483,11 +1478,9 @@ class RedisNoSqlTarget(NoSqlBaseTarget):
1483
1478
  support_spark = True
1484
1479
  writer_step_name = "RedisNoSqlTarget"
1485
1480
 
1486
- # Fetch server url from the RedisNoSqlTarget::__init__() 'path' parameter.
1487
- # If not set fetch it from 'mlrun.mlconf.redis.url' (MLRUN_REDIS__URL environment variable).
1488
- # Then look for username and password at REDIS_xxx secrets
1489
- def _get_server_endpoint(self):
1490
- endpoint, uri = parse_path(self.get_target_path())
1481
+ @staticmethod
1482
+ def get_server_endpoint(path):
1483
+ endpoint, uri = parse_path(path)
1491
1484
  endpoint = endpoint or mlrun.mlconf.redis.url
1492
1485
  if endpoint.startswith("ds://"):
1493
1486
  datastore_profile = datastore_profile_read(endpoint)
@@ -1504,8 +1497,13 @@ class RedisNoSqlTarget(NoSqlBaseTarget):
1504
1497
  raise mlrun.errors.MLRunInvalidArgumentError(
1505
1498
  "Provide Redis username and password only via secrets"
1506
1499
  )
1507
- user = self._get_credential("REDIS_USER", "")
1508
- password = self._get_credential("REDIS_PASSWORD", "")
1500
+ credentials_prefix = mlrun.get_secret_or_env(key="CREDENTIALS_PREFIX")
1501
+ user = mlrun.get_secret_or_env(
1502
+ "REDIS_USER", default="", prefix=credentials_prefix
1503
+ )
1504
+ password = mlrun.get_secret_or_env(
1505
+ "REDIS_PASSWORD", default="", prefix=credentials_prefix
1506
+ )
1509
1507
  host = parsed_endpoint.hostname
1510
1508
  port = parsed_endpoint.port if parsed_endpoint.port else "6379"
1511
1509
  scheme = parsed_endpoint.scheme
@@ -1519,7 +1517,7 @@ class RedisNoSqlTarget(NoSqlBaseTarget):
1519
1517
  from storey import Table
1520
1518
  from storey.redis_driver import RedisDriver
1521
1519
 
1522
- endpoint, uri = self._get_server_endpoint()
1520
+ endpoint, uri = self.get_server_endpoint(self.get_target_path())
1523
1521
 
1524
1522
  return Table(
1525
1523
  uri,
@@ -1528,7 +1526,7 @@ class RedisNoSqlTarget(NoSqlBaseTarget):
1528
1526
  )
1529
1527
 
1530
1528
  def get_spark_options(self, key_column=None, timestamp_key=None, overwrite=True):
1531
- endpoint, uri = self._get_server_endpoint()
1529
+ endpoint, uri = self.get_server_endpoint(self.get_target_path())
1532
1530
  parsed_endpoint = urlparse(endpoint)
1533
1531
  store, path_in_store, path = self._get_store_and_path()
1534
1532
  return {
@@ -1560,6 +1558,28 @@ class RedisNoSqlTarget(NoSqlBaseTarget):
1560
1558
 
1561
1559
  return df
1562
1560
 
1561
+ def add_writer_step(
1562
+ self,
1563
+ graph,
1564
+ after,
1565
+ features,
1566
+ key_columns=None,
1567
+ timestamp_key=None,
1568
+ featureset_status=None,
1569
+ ):
1570
+ table, column_list = self._get_table_and_columns(features, key_columns)
1571
+
1572
+ graph.add_step(
1573
+ path=self.get_target_path(),
1574
+ name=self.name or self.writer_step_name,
1575
+ after=after,
1576
+ graph_shape="cylinder",
1577
+ class_name="mlrun.datastore.storeytargets.RedisNoSqlStoreyTarget",
1578
+ columns=column_list,
1579
+ table=table,
1580
+ **self.attributes,
1581
+ )
1582
+
1563
1583
 
1564
1584
  class StreamTarget(BaseStoreTarget):
1565
1585
  kind = TargetTypes.stream
@@ -1578,29 +1598,22 @@ class StreamTarget(BaseStoreTarget):
1578
1598
  timestamp_key=None,
1579
1599
  featureset_status=None,
1580
1600
  ):
1581
- from storey import V3ioDriver
1582
-
1583
1601
  key_columns = list(key_columns.keys())
1584
- store, path_in_store, path = self._get_store_and_path()
1585
- if not path:
1586
- raise mlrun.errors.MLRunInvalidArgumentError("StreamTarget requires a path")
1587
- endpoint, uri = parse_path(path)
1588
- storage_options = store.get_storage_options()
1589
- access_key = storage_options.get("v3io_access_key")
1602
+
1590
1603
  column_list = self._get_column_list(
1591
1604
  features=features, timestamp_key=timestamp_key, key_columns=key_columns
1592
1605
  )
1606
+ stream_path = self.get_target_path()
1607
+ if not stream_path:
1608
+ raise mlrun.errors.MLRunInvalidArgumentError("StreamTarget requires a path")
1593
1609
 
1594
1610
  graph.add_step(
1595
1611
  name=self.name or "StreamTarget",
1596
1612
  after=after,
1597
1613
  graph_shape="cylinder",
1598
- class_name="storey.StreamTarget",
1614
+ class_name="mlrun.datastore.storeytargets.StreamStoreyTarget",
1599
1615
  columns=column_list,
1600
- storage=V3ioDriver(
1601
- webapi=endpoint or mlrun.mlconf.v3io_api, access_key=access_key
1602
- ),
1603
- stream_path=uri,
1616
+ stream_path=stream_path,
1604
1617
  **self.attributes,
1605
1618
  )
1606
1619
 
@@ -1676,34 +1689,19 @@ class KafkaTarget(BaseStoreTarget):
1676
1689
  column_list = self._get_column_list(
1677
1690
  features=features, timestamp_key=timestamp_key, key_columns=key_columns
1678
1691
  )
1679
- if self.path and self.path.startswith("ds://"):
1680
- datastore_profile = datastore_profile_read(self.path)
1681
- attributes = datastore_profile.attributes()
1682
- brokers = attributes.pop(
1683
- "brokers", attributes.pop("bootstrap_servers", None)
1684
- )
1685
- topic = datastore_profile.topic
1686
- else:
1687
- attributes = copy(self.attributes)
1688
- brokers = attributes.pop(
1689
- "brokers", attributes.pop("bootstrap_servers", None)
1690
- )
1691
- topic, brokers = parse_kafka_url(self.get_target_path(), brokers)
1692
+ path = self.get_target_path()
1692
1693
 
1693
- if not topic:
1694
- raise mlrun.errors.MLRunInvalidArgumentError(
1695
- "KafkaTarget requires a path (topic)"
1696
- )
1694
+ if not path:
1695
+ raise mlrun.errors.MLRunInvalidArgumentError("KafkaTarget requires a path")
1697
1696
 
1698
1697
  graph.add_step(
1699
1698
  name=self.name or "KafkaTarget",
1700
1699
  after=after,
1701
1700
  graph_shape="cylinder",
1702
- class_name="storey.KafkaTarget",
1701
+ class_name="mlrun.datastore.storeytargets.KafkaStoreyTarget",
1703
1702
  columns=column_list,
1704
- topic=topic,
1705
- brokers=brokers,
1706
- **attributes,
1703
+ path=path,
1704
+ attributes=self.attributes,
1707
1705
  )
1708
1706
 
1709
1707
  def purge(self):
@@ -1740,7 +1738,7 @@ class TSDBTarget(BaseStoreTarget):
1740
1738
 
1741
1739
  graph.add_step(
1742
1740
  name=self.name or "TSDBTarget",
1743
- class_name="storey.TSDBTarget",
1741
+ class_name="mlrun.datastore.storeytargets.TSDBStoreyTarget",
1744
1742
  after=after,
1745
1743
  graph_shape="cylinder",
1746
1744
  path=uri,
@@ -2029,7 +2027,7 @@ class SQLTarget(BaseStoreTarget):
2029
2027
  name=self.name or "SqlTarget",
2030
2028
  after=after,
2031
2029
  graph_shape="cylinder",
2032
- class_name="storey.NoSqlTarget",
2030
+ class_name="mlrun.datastore.storeytargets.NoSqlStoreyTarget",
2033
2031
  columns=column_list,
2034
2032
  header=True,
2035
2033
  table=table,
mlrun/datastore/v3io.py CHANGED
@@ -140,6 +140,7 @@ class V3ioStore(DataStore):
140
140
  max_chunk_size: int = V3IO_DEFAULT_UPLOAD_CHUNK_SIZE,
141
141
  ):
142
142
  """helper function for put method, allows for controlling max_chunk_size in testing"""
143
+ data, _ = self._prepare_put_data(data, append)
143
144
  container, path = split_path(self._join(key))
144
145
  buffer_size = len(data) # in bytes
145
146
  buffer_offset = 0
mlrun/errors.py CHANGED
@@ -29,11 +29,14 @@ class MLRunBaseError(Exception):
29
29
  pass
30
30
 
31
31
 
32
- class MLRunTaskNotReady(MLRunBaseError):
32
+ class MLRunTaskNotReadyError(MLRunBaseError):
33
33
  """indicate we are trying to read a value which is not ready
34
34
  or need to come from a job which is in progress"""
35
35
 
36
36
 
37
+ MLRunTaskNotReady = MLRunTaskNotReadyError # kept for BC only
38
+
39
+
37
40
  class MLRunHTTPError(MLRunBaseError, requests.HTTPError):
38
41
  def __init__(
39
42
  self,
@@ -205,15 +208,15 @@ class MLRunTimeoutError(MLRunHTTPStatusError, TimeoutError):
205
208
  error_status_code = HTTPStatus.GATEWAY_TIMEOUT.value
206
209
 
207
210
 
208
- class MLRunInvalidMMStoreType(MLRunHTTPStatusError, ValueError):
211
+ class MLRunInvalidMMStoreTypeError(MLRunHTTPStatusError, ValueError):
209
212
  error_status_code = HTTPStatus.BAD_REQUEST.value
210
213
 
211
214
 
212
- class MLRunStreamConnectionFailure(MLRunHTTPStatusError, ValueError):
215
+ class MLRunStreamConnectionFailureError(MLRunHTTPStatusError, ValueError):
213
216
  error_status_code = HTTPStatus.BAD_REQUEST.value
214
217
 
215
218
 
216
- class MLRunTSDBConnectionFailure(MLRunHTTPStatusError, ValueError):
219
+ class MLRunTSDBConnectionFailureError(MLRunHTTPStatusError, ValueError):
217
220
  error_status_code = HTTPStatus.BAD_REQUEST.value
218
221
 
219
222
 
@@ -1086,7 +1086,9 @@ class OfflineVectorResponse:
1086
1086
  def to_dataframe(self, to_pandas=True):
1087
1087
  """return result as dataframe"""
1088
1088
  if self.status != "completed":
1089
- raise mlrun.errors.MLRunTaskNotReady("feature vector dataset is not ready")
1089
+ raise mlrun.errors.MLRunTaskNotReadyError(
1090
+ "feature vector dataset is not ready"
1091
+ )
1090
1092
  return self._merger.get_df(to_pandas=to_pandas)
1091
1093
 
1092
1094
  def to_parquet(self, target_path, **kw):
@@ -156,7 +156,9 @@ class RemoteVectorResponse:
156
156
 
157
157
  def _is_ready(self):
158
158
  if self.status != "completed":
159
- raise mlrun.errors.MLRunTaskNotReady("feature vector dataset is not ready")
159
+ raise mlrun.errors.MLRunTaskNotReadyError(
160
+ "feature vector dataset is not ready"
161
+ )
160
162
  self.vector.reload()
161
163
 
162
164
  def to_dataframe(self, columns=None, df_module=None, **kwargs):
@@ -97,7 +97,7 @@ class SKLearnMLRunInterface(MLRunInterface, ABC):
97
97
 
98
98
  def wrapper(
99
99
  self: SKLearnTypes.ModelType,
100
- X: SKLearnTypes.DatasetType,
100
+ X: SKLearnTypes.DatasetType, # noqa: N803 - should be lowercase "x", kept for BC
101
101
  y: SKLearnTypes.DatasetType = None,
102
102
  *args,
103
103
  **kwargs,
@@ -124,7 +124,12 @@ class SKLearnMLRunInterface(MLRunInterface, ABC):
124
124
 
125
125
  return wrapper
126
126
 
127
- def mlrun_predict(self, X: SKLearnTypes.DatasetType, *args, **kwargs):
127
+ def mlrun_predict(
128
+ self,
129
+ X: SKLearnTypes.DatasetType, # noqa: N803 - should be lowercase "x", kept for BC
130
+ *args,
131
+ **kwargs,
132
+ ):
128
133
  """
129
134
  MLRun's wrapper for the common ML API predict method.
130
135
  """
@@ -136,7 +141,12 @@ class SKLearnMLRunInterface(MLRunInterface, ABC):
136
141
 
137
142
  return y_pred
138
143
 
139
- def mlrun_predict_proba(self, X: SKLearnTypes.DatasetType, *args, **kwargs):
144
+ def mlrun_predict_proba(
145
+ self,
146
+ X: SKLearnTypes.DatasetType, # noqa: N803 - should be lowercase "x", kept for BC
147
+ *args,
148
+ **kwargs,
149
+ ):
140
150
  """
141
151
  MLRun's wrapper for the common ML API predict_proba method.
142
152
  """
mlrun/model.py CHANGED
@@ -487,7 +487,7 @@ class ImageBuilder(ModelObj):
487
487
 
488
488
  def __init__(
489
489
  self,
490
- functionSourceCode=None,
490
+ functionSourceCode=None, # noqa: N803 - should be "snake_case", kept for BC
491
491
  source=None,
492
492
  image=None,
493
493
  base_image=None,
@@ -147,8 +147,7 @@ def record_results(
147
147
  on the provided `endpoint_id`.
148
148
  :param function_name: If a new model endpoint is created, use this function name for generating the
149
149
  function URI.
150
- :param context: MLRun context. Note that the context is required for logging the artifacts
151
- following the batch drift job.
150
+ :param context: MLRun context. Note that the context is required generating the model endpoint.
152
151
  :param infer_results_df: DataFrame that will be stored under the model endpoint parquet target. Will be
153
152
  used for doing the drift analysis. Please make sure that the dataframe includes
154
153
  both feature names and label columns.
@@ -11,19 +11,16 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+
14
15
  import json
15
- import typing
16
- from typing import Optional
16
+ from typing import Any, Optional, Union
17
17
 
18
- import mlrun.common.helpers
19
- import mlrun.common.model_monitoring.helpers
20
18
  import mlrun.common.schemas.alert as alert_objects
21
19
  import mlrun.common.schemas.model_monitoring.constants as mm_constant
22
20
  import mlrun.datastore
23
- import mlrun.serving
24
- import mlrun.utils.helpers
25
- import mlrun.utils.v3io_clients
21
+ import mlrun.model_monitoring
26
22
  from mlrun.model_monitoring.helpers import get_stream_path
23
+ from mlrun.serving import GraphContext
27
24
  from mlrun.serving.utils import StepToDict
28
25
  from mlrun.utils import logger
29
26
 
@@ -62,7 +59,7 @@ class _PushToMonitoringWriter(StepToDict):
62
59
  self,
63
60
  event: tuple[
64
61
  list[
65
- typing.Union[
62
+ Union[
66
63
  ModelMonitoringApplicationResult, ModelMonitoringApplicationMetric
67
64
  ]
68
65
  ],
@@ -121,50 +118,35 @@ class _PushToMonitoringWriter(StepToDict):
121
118
 
122
119
 
123
120
  class _PrepareMonitoringEvent(StepToDict):
124
- def __init__(self, application_name: str):
121
+ def __init__(self, context: GraphContext, application_name: str) -> None:
125
122
  """
126
123
  Class for preparing the application event for the application step.
127
124
 
128
125
  :param application_name: Application name.
129
126
  """
127
+ self.graph_context = context
128
+ self.application_name = application_name
129
+ self.model_endpoints: dict[str, mlrun.model_monitoring.ModelEndpoint] = {}
130
130
 
131
- self.context = self._create_mlrun_context(application_name)
132
- self.model_endpoints = {}
133
-
134
- def do(self, event: dict[str, dict]) -> MonitoringApplicationContext:
131
+ def do(self, event: dict[str, Any]) -> MonitoringApplicationContext:
135
132
  """
136
133
  Prepare the application event for the application step.
137
134
 
138
135
  :param event: Application event.
139
- :return: Application event.
136
+ :return: Application context.
140
137
  """
141
- if not event.get("mlrun_context"):
142
- application_context = MonitoringApplicationContext().from_dict(
143
- event,
144
- context=self.context,
145
- model_endpoint_dict=self.model_endpoints,
146
- )
147
- else:
148
- application_context = MonitoringApplicationContext().from_dict(event)
138
+ application_context = MonitoringApplicationContext(
139
+ graph_context=self.graph_context,
140
+ application_name=self.application_name,
141
+ event=event,
142
+ model_endpoint_dict=self.model_endpoints,
143
+ )
144
+
149
145
  self.model_endpoints.setdefault(
150
146
  application_context.endpoint_id, application_context.model_endpoint
151
147
  )
152
- return application_context
153
148
 
154
- @staticmethod
155
- def _create_mlrun_context(app_name: str):
156
- artifact_path = mlrun.utils.helpers.template_artifact_path(
157
- mlrun.mlconf.artifact_path, mlrun.mlconf.default_project
158
- )
159
- context = mlrun.get_or_create_ctx(
160
- f"{app_name}-logger",
161
- spec={
162
- "metadata": {"labels": {"kind": mlrun.runtimes.RuntimeKinds.serving}},
163
- "spec": {mlrun.utils.helpers.RunKeys.output_path: artifact_path},
164
- },
165
- )
166
- context.__class__ = MonitoringApplicationContext
167
- return context
149
+ return application_context
168
150
 
169
151
 
170
152
  class _ApplicationErrorHandler(StepToDict):
@@ -181,13 +163,13 @@ class _ApplicationErrorHandler(StepToDict):
181
163
 
182
164
  logger.error(f"Error in application step: {event}")
183
165
 
184
- event_data = mlrun.common.schemas.Event(
166
+ event_data = alert_objects.Event(
185
167
  kind=alert_objects.EventKind.MM_APP_FAILED,
186
- entity={
187
- "kind": alert_objects.EventEntityKind.MODEL_MONITORING_APPLICATION,
188
- "project": self.project,
189
- "ids": [f"{self.project}_{event.body.application_name}"],
190
- },
168
+ entity=alert_objects.EventEntities(
169
+ kind=alert_objects.EventEntityKind.MODEL_MONITORING_APPLICATION,
170
+ project=self.project,
171
+ ids=[f"{self.project}_{event.body.application_name}"],
172
+ ),
191
173
  value_dict={
192
174
  "Error": event.error,
193
175
  "Timestamp": event.timestamp,