mlrun 1.7.0rc26__py3-none-any.whl → 1.7.0rc27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (48) hide show
  1. mlrun/__main__.py +7 -7
  2. mlrun/alerts/alert.py +13 -1
  3. mlrun/artifacts/manager.py +5 -0
  4. mlrun/common/constants.py +2 -2
  5. mlrun/common/formatters/base.py +9 -9
  6. mlrun/common/schemas/alert.py +4 -8
  7. mlrun/common/schemas/api_gateway.py +7 -0
  8. mlrun/common/schemas/constants.py +3 -0
  9. mlrun/common/schemas/model_monitoring/constants.py +20 -9
  10. mlrun/config.py +6 -11
  11. mlrun/datastore/datastore.py +3 -3
  12. mlrun/datastore/snowflake_utils.py +3 -1
  13. mlrun/datastore/sources.py +23 -9
  14. mlrun/datastore/targets.py +27 -13
  15. mlrun/db/base.py +9 -0
  16. mlrun/db/httpdb.py +39 -30
  17. mlrun/db/nopdb.py +9 -1
  18. mlrun/execution.py +18 -10
  19. mlrun/feature_store/retrieval/spark_merger.py +2 -1
  20. mlrun/model.py +21 -0
  21. mlrun/model_monitoring/db/stores/__init__.py +5 -3
  22. mlrun/model_monitoring/db/stores/base/store.py +36 -1
  23. mlrun/model_monitoring/db/stores/sqldb/sql_store.py +4 -38
  24. mlrun/model_monitoring/db/stores/v3io_kv/kv_store.py +19 -27
  25. mlrun/model_monitoring/db/tsdb/__init__.py +4 -7
  26. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +4 -1
  27. mlrun/model_monitoring/helpers.py +9 -5
  28. mlrun/projects/project.py +63 -68
  29. mlrun/render.py +10 -5
  30. mlrun/run.py +2 -2
  31. mlrun/runtimes/nuclio/function.py +20 -0
  32. mlrun/runtimes/pod.py +5 -29
  33. mlrun/serving/routers.py +75 -59
  34. mlrun/serving/server.py +1 -0
  35. mlrun/serving/v2_serving.py +8 -1
  36. mlrun/utils/helpers.py +33 -1
  37. mlrun/utils/notifications/notification/base.py +4 -0
  38. mlrun/utils/notifications/notification/git.py +21 -0
  39. mlrun/utils/notifications/notification/slack.py +8 -0
  40. mlrun/utils/notifications/notification/webhook.py +29 -0
  41. mlrun/utils/notifications/notification_pusher.py +1 -1
  42. mlrun/utils/version/version.json +2 -2
  43. {mlrun-1.7.0rc26.dist-info → mlrun-1.7.0rc27.dist-info}/METADATA +4 -4
  44. {mlrun-1.7.0rc26.dist-info → mlrun-1.7.0rc27.dist-info}/RECORD +48 -48
  45. {mlrun-1.7.0rc26.dist-info → mlrun-1.7.0rc27.dist-info}/WHEEL +1 -1
  46. {mlrun-1.7.0rc26.dist-info → mlrun-1.7.0rc27.dist-info}/LICENSE +0 -0
  47. {mlrun-1.7.0rc26.dist-info → mlrun-1.7.0rc27.dist-info}/entry_points.txt +0 -0
  48. {mlrun-1.7.0rc26.dist-info → mlrun-1.7.0rc27.dist-info}/top_level.txt +0 -0
mlrun/__main__.py CHANGED
@@ -50,12 +50,12 @@ from .run import (
50
50
  from .runtimes import RemoteRuntime, RunError, RuntimeKinds, ServingRuntime
51
51
  from .secrets import SecretsStore
52
52
  from .utils import (
53
+ RunKeys,
53
54
  dict_to_yaml,
54
55
  get_in,
55
56
  is_relative_path,
56
57
  list2dict,
57
58
  logger,
58
- run_keys,
59
59
  update_in,
60
60
  )
61
61
  from .utils.version import Version
@@ -380,15 +380,15 @@ def run(
380
380
  set_item(runobj.spec.hyper_param_options, hyper_param_strategy, "strategy")
381
381
  set_item(runobj.spec.hyper_param_options, selector, "selector")
382
382
 
383
- set_item(runobj.spec, inputs, run_keys.inputs, list2dict(inputs))
383
+ set_item(runobj.spec, inputs, RunKeys.inputs, list2dict(inputs))
384
384
  set_item(
385
- runobj.spec, returns, run_keys.returns, [py_eval(value) for value in returns]
385
+ runobj.spec, returns, RunKeys.returns, [py_eval(value) for value in returns]
386
386
  )
387
- set_item(runobj.spec, in_path, run_keys.input_path)
388
- set_item(runobj.spec, out_path, run_keys.output_path)
389
- set_item(runobj.spec, outputs, run_keys.outputs, list(outputs))
387
+ set_item(runobj.spec, in_path, RunKeys.input_path)
388
+ set_item(runobj.spec, out_path, RunKeys.output_path)
389
+ set_item(runobj.spec, outputs, RunKeys.outputs, list(outputs))
390
390
  set_item(
391
- runobj.spec, secrets, run_keys.secrets, line2keylist(secrets, "kind", "source")
391
+ runobj.spec, secrets, RunKeys.secrets, line2keylist(secrets, "kind", "source")
392
392
  )
393
393
  set_item(runobj.spec, verbose, "verbose")
394
394
  set_item(runobj.spec, scrape_metrics, "scrape_metrics")
mlrun/alerts/alert.py CHANGED
@@ -26,7 +26,6 @@ class AlertConfig(ModelObj):
26
26
  "description",
27
27
  "summary",
28
28
  "severity",
29
- "criteria",
30
29
  "reset_policy",
31
30
  "state",
32
31
  ]
@@ -34,6 +33,7 @@ class AlertConfig(ModelObj):
34
33
  "entities",
35
34
  "notifications",
36
35
  "trigger",
36
+ "criteria",
37
37
  ]
38
38
 
39
39
  def __init__(
@@ -104,6 +104,14 @@ class AlertConfig(ModelObj):
104
104
  else self.trigger
105
105
  )
106
106
  return None
107
+ if field_name == "criteria":
108
+ if self.criteria:
109
+ return (
110
+ self.criteria.dict()
111
+ if not isinstance(self.criteria, dict)
112
+ else self.criteria
113
+ )
114
+ return None
107
115
  return super()._serialize_field(struct, field_name, strip)
108
116
 
109
117
  def to_dict(self, fields: list = None, exclude: list = None, strip: bool = False):
@@ -137,6 +145,10 @@ class AlertConfig(ModelObj):
137
145
  trigger_obj = alert_objects.AlertTrigger.parse_obj(trigger_data)
138
146
  new_obj.trigger = trigger_obj
139
147
 
148
+ criteria_data = struct.get("criteria")
149
+ if criteria_data:
150
+ criteria_obj = alert_objects.AlertCriteria.parse_obj(criteria_data)
151
+ new_obj.criteria = criteria_obj
140
152
  return new_obj
141
153
 
142
154
  def with_notifications(self, notifications: list[alert_objects.AlertNotification]):
@@ -100,6 +100,11 @@ class ArtifactProducer:
100
100
 
101
101
  def dict_to_artifact(struct: dict) -> Artifact:
102
102
  kind = struct.get("kind", "")
103
+
104
+ # TODO: remove this in 1.8.0
105
+ if mlrun.utils.is_legacy_artifact(struct):
106
+ return mlrun.artifacts.base.convert_legacy_artifact_to_new_format(struct)
107
+
103
108
  artifact_class = artifact_types[kind]
104
109
  return artifact_class.from_dict(struct)
105
110
 
mlrun/common/constants.py CHANGED
@@ -64,12 +64,12 @@ class MLRunInternalLabels:
64
64
  username = f"{MLRUN_LABEL_PREFIX}username"
65
65
  username_domain = f"{MLRUN_LABEL_PREFIX}username_domain"
66
66
  task_name = f"{MLRUN_LABEL_PREFIX}task-name"
67
+ resource_name = f"{MLRUN_LABEL_PREFIX}resource_name"
68
+ created = f"{MLRUN_LABEL_PREFIX}created"
67
69
  host = "host"
68
70
  job_type = "job-type"
69
71
  kind = "kind"
70
72
  component = "component"
71
- resource_name = "resource_name"
72
- created = "mlrun-created"
73
73
 
74
74
  owner = "owner"
75
75
  v3io_user = "v3io_user"
@@ -28,42 +28,42 @@ class ObjectFormat:
28
28
  full = "full"
29
29
 
30
30
  @staticmethod
31
- def format_method(_format: str) -> typing.Optional[typing.Callable]:
31
+ def format_method(format_: str) -> typing.Optional[typing.Callable]:
32
32
  """
33
33
  Get the formatting method for the provided format.
34
34
  A `None` value signifies a pass-through formatting method (no formatting).
35
- :param _format: The format as a string representation.
35
+ :param format_: The format as a string representation.
36
36
  :return: The formatting method.
37
37
  """
38
38
  return {
39
39
  ObjectFormat.full: None,
40
- }[_format]
40
+ }[format_]
41
41
 
42
42
  @classmethod
43
43
  def format_obj(
44
44
  cls,
45
45
  obj: typing.Any,
46
- _format: str,
46
+ format_: str,
47
47
  exclude_formats: typing.Optional[list[str]] = None,
48
48
  ) -> typing.Any:
49
49
  """
50
50
  Format the provided object based on the provided format.
51
51
  :param obj: The object to format.
52
- :param _format: The format as a string representation.
52
+ :param format_: The format as a string representation.
53
53
  :param exclude_formats: A list of formats to exclude from the formatting process. If the provided format is in
54
54
  this list, an invalid format exception will be raised.
55
55
  """
56
56
  exclude_formats = exclude_formats or []
57
- _format = _format or cls.full
57
+ format_ = format_ or cls.full
58
58
  invalid_format_exc = mlrun.errors.MLRunBadRequestError(
59
- f"Provided format is not supported. format={_format}"
59
+ f"Provided format is not supported. format={format_}"
60
60
  )
61
61
 
62
- if _format in exclude_formats:
62
+ if format_ in exclude_formats:
63
63
  raise invalid_format_exc
64
64
 
65
65
  try:
66
- format_method = cls.format_method(_format)
66
+ format_method = cls.format_method(format_)
67
67
  except KeyError:
68
68
  raise invalid_format_exc
69
69
 
@@ -39,8 +39,8 @@ class EventKind(StrEnum):
39
39
  CONCEPT_DRIFT_SUSPECTED = "concept_drift_suspected"
40
40
  MODEL_PERFORMANCE_DETECTED = "model_performance_detected"
41
41
  MODEL_PERFORMANCE_SUSPECTED = "model_performance_suspected"
42
- MODEL_SERVING_PERFORMANCE_DETECTED = "model_serving_performance_detected"
43
- MODEL_SERVING_PERFORMANCE_SUSPECTED = "model_serving_performance_suspected"
42
+ SYSTEM_PERFORMANCE_DETECTED = "system_performance_detected"
43
+ SYSTEM_PERFORMANCE_SUSPECTED = "system_performance_suspected"
44
44
  MM_APP_ANOMALY_DETECTED = "mm_app_anomaly_detected"
45
45
  MM_APP_ANOMALY_SUSPECTED = "mm_app_anomaly_suspected"
46
46
  FAILED = "failed"
@@ -53,12 +53,8 @@ _event_kind_entity_map = {
53
53
  EventKind.CONCEPT_DRIFT_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
54
54
  EventKind.MODEL_PERFORMANCE_DETECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
55
55
  EventKind.MODEL_PERFORMANCE_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
56
- EventKind.MODEL_SERVING_PERFORMANCE_DETECTED: [
57
- EventEntityKind.MODEL_ENDPOINT_RESULT
58
- ],
59
- EventKind.MODEL_SERVING_PERFORMANCE_SUSPECTED: [
60
- EventEntityKind.MODEL_ENDPOINT_RESULT
61
- ],
56
+ EventKind.SYSTEM_PERFORMANCE_DETECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
57
+ EventKind.SYSTEM_PERFORMANCE_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
62
58
  EventKind.MM_APP_ANOMALY_DETECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
63
59
  EventKind.MM_APP_ANOMALY_SUSPECTED: [EventEntityKind.MODEL_ENDPOINT_RESULT],
64
60
  EventKind.FAILED: [EventEntityKind.JOB],
@@ -102,6 +102,13 @@ class APIGateway(_APIGatewayBaseModel):
102
102
  if upstream.nucliofunction.get("name")
103
103
  ]
104
104
 
105
+ def get_invoke_url(self):
106
+ return (
107
+ self.spec.host + self.spec.path
108
+ if self.spec.path and self.spec.host
109
+ else self.spec.host
110
+ )
111
+
105
112
  def enrich_mlrun_names(self):
106
113
  self._enrich_api_gateway_mlrun_name()
107
114
  self._enrich_mlrun_function_names()
@@ -120,10 +120,13 @@ class FeatureStorePartitionByField(mlrun.common.types.StrEnum):
120
120
 
121
121
  class RunPartitionByField(mlrun.common.types.StrEnum):
122
122
  name = "name" # Supported for runs objects
123
+ project_and_name = "project_and_name" # Supported for runs objects
123
124
 
124
125
  def to_partition_by_db_field(self, db_cls):
125
126
  if self.value == RunPartitionByField.name:
126
127
  return db_cls.name
128
+ elif self.value == RunPartitionByField.project_and_name:
129
+ return db_cls.project, db_cls.name
127
130
  else:
128
131
  raise mlrun.errors.MLRunInvalidArgumentError(
129
132
  f"Unknown group by field: {self.value}"
@@ -158,19 +158,36 @@ class EventKeyMetrics:
158
158
  REAL_TIME = "real_time"
159
159
 
160
160
 
161
- class ModelEndpointTarget:
161
+ class ModelEndpointTarget(MonitoringStrEnum):
162
162
  V3IO_NOSQL = "v3io-nosql"
163
163
  SQL = "sql"
164
164
 
165
165
 
166
+ class StreamKind(MonitoringStrEnum):
167
+ V3IO_STREAM = "v3io_stream"
168
+ KAFKA = "kafka"
169
+
170
+
171
+ class TSDBTarget(MonitoringStrEnum):
172
+ V3IO_TSDB = "v3io-tsdb"
173
+ TDEngine = "tdengine"
174
+ PROMETHEUS = "prometheus"
175
+
176
+
166
177
  class ProjectSecretKeys:
167
178
  ENDPOINT_STORE_CONNECTION = "MODEL_MONITORING_ENDPOINT_STORE_CONNECTION"
168
179
  ACCESS_KEY = "MODEL_MONITORING_ACCESS_KEY"
169
- PIPELINES_ACCESS_KEY = "MODEL_MONITORING_PIPELINES_ACCESS_KEY"
170
- KAFKA_BROKERS = "KAFKA_BROKERS"
171
180
  STREAM_PATH = "STREAM_PATH"
172
181
  TSDB_CONNECTION = "TSDB_CONNECTION"
173
182
 
183
+ @classmethod
184
+ def mandatory_secrets(cls):
185
+ return [
186
+ cls.ENDPOINT_STORE_CONNECTION,
187
+ cls.STREAM_PATH,
188
+ cls.TSDB_CONNECTION,
189
+ ]
190
+
174
191
 
175
192
  class ModelMonitoringStoreKinds:
176
193
  ENDPOINTS = "endpoints"
@@ -344,12 +361,6 @@ class ControllerPolicy:
344
361
  BASE_PERIOD = "base_period"
345
362
 
346
363
 
347
- class TSDBTarget:
348
- V3IO_TSDB = "v3io-tsdb"
349
- TDEngine = "tdengine"
350
- PROMETHEUS = "prometheus"
351
-
352
-
353
364
  class HistogramDataDriftApplicationConstants:
354
365
  NAME = "histogram-data-drift"
355
366
  GENERAL_RESULT_NAME = "general_drift"
mlrun/config.py CHANGED
@@ -510,7 +510,7 @@ default_config = {
510
510
  "store_prefixes": {
511
511
  "default": "v3io:///users/pipelines/{project}/model-endpoints/{kind}",
512
512
  "user_space": "v3io:///projects/{project}/model-endpoints/{kind}",
513
- "stream": "",
513
+ "stream": "", # TODO: Delete in 1.9.0
514
514
  "monitoring_application": "v3io:///users/pipelines/{project}/monitoring-apps/",
515
515
  },
516
516
  # Offline storage path can be either relative or a full path. This path is used for general offline data
@@ -523,11 +523,12 @@ default_config = {
523
523
  "parquet_batching_max_events": 10_000,
524
524
  "parquet_batching_timeout_secs": timedelta(minutes=1).total_seconds(),
525
525
  # See mlrun.model_monitoring.db.stores.ObjectStoreFactory for available options
526
- "store_type": "v3io-nosql",
526
+ "store_type": "v3io-nosql", # TODO: Delete in 1.9.0
527
527
  "endpoint_store_connection": "",
528
528
  # See mlrun.model_monitoring.db.tsdb.ObjectTSDBFactory for available options
529
- "tsdb_connector_type": "v3io-tsdb",
530
529
  "tsdb_connection": "",
530
+ # See mlrun.common.schemas.model_monitoring.constants.StreamKind for available options
531
+ "stream_connection": "",
531
532
  },
532
533
  "secret_stores": {
533
534
  # Use only in testing scenarios (such as integration tests) to avoid using k8s for secrets (will use in-memory
@@ -708,6 +709,8 @@ default_config = {
708
709
  # maximum number of alerts we allow to be configured.
709
710
  # user will get an error when exceeding this
710
711
  "max_allowed": 10000,
712
+ # maximum allowed value for count in criteria field inside AlertConfig
713
+ "max_criteria_count": 100,
711
714
  },
712
715
  "auth_with_client_id": {
713
716
  "enabled": False,
@@ -1090,7 +1093,6 @@ class Config:
1090
1093
  target: str = "online",
1091
1094
  artifact_path: str = None,
1092
1095
  function_name: str = None,
1093
- **kwargs,
1094
1096
  ) -> typing.Union[str, list[str]]:
1095
1097
  """Get the full path from the configuration based on the provided project and kind.
1096
1098
 
@@ -1112,13 +1114,6 @@ class Config:
1112
1114
  """
1113
1115
 
1114
1116
  if target != "offline":
1115
- store_prefix_dict = (
1116
- mlrun.mlconf.model_endpoint_monitoring.store_prefixes.to_dict()
1117
- )
1118
- if store_prefix_dict.get(kind):
1119
- # Target exist in store prefix and has a valid string value
1120
- return store_prefix_dict[kind].format(project=project, **kwargs)
1121
-
1122
1117
  if (
1123
1118
  function_name
1124
1119
  and function_name
@@ -21,7 +21,7 @@ from mlrun.datastore.datastore_profile import datastore_profile_read
21
21
  from mlrun.errors import err_to_str
22
22
  from mlrun.utils.helpers import get_local_file_schema
23
23
 
24
- from ..utils import DB_SCHEMA, run_keys
24
+ from ..utils import DB_SCHEMA, RunKeys
25
25
  from .base import DataItem, DataStore, HttpStore
26
26
  from .filestore import FileStore
27
27
  from .inmem import InMemoryStore
@@ -133,7 +133,7 @@ class StoreManager:
133
133
  return self._db
134
134
 
135
135
  def from_dict(self, struct: dict):
136
- stor_list = struct.get(run_keys.data_stores)
136
+ stor_list = struct.get(RunKeys.data_stores)
137
137
  if stor_list and isinstance(stor_list, list):
138
138
  for stor in stor_list:
139
139
  schema, endpoint, parsed_url = parse_url(stor.get("url"))
@@ -145,7 +145,7 @@ class StoreManager:
145
145
  self._stores[stor["name"]] = new_stor
146
146
 
147
147
  def to_dict(self, struct):
148
- struct[run_keys.data_stores] = [
148
+ struct[RunKeys.data_stores] = [
149
149
  stor.to_dict() for stor in self._stores.values() if stor.from_spec
150
150
  ]
151
151
 
@@ -30,13 +30,15 @@ def get_snowflake_password():
30
30
 
31
31
 
32
32
  def get_snowflake_spark_options(attributes):
33
+ if not attributes:
34
+ return {}
33
35
  return {
34
36
  "format": "net.snowflake.spark.snowflake",
35
37
  "sfURL": attributes.get("url"),
36
38
  "sfUser": attributes.get("user"),
37
39
  "sfPassword": get_snowflake_password(),
38
40
  "sfDatabase": attributes.get("database"),
39
- "sfSchema": attributes.get("schema"),
41
+ "sfSchema": attributes.get("db_schema"),
40
42
  "sfWarehouse": attributes.get("warehouse"),
41
43
  "application": "iguazio_platform",
42
44
  "TIMESTAMP_TYPE_MAPPING": "TIMESTAMP_LTZ",
@@ -774,6 +774,7 @@ class SnowflakeSource(BaseSourceDriver):
774
774
  self,
775
775
  name: str = "",
776
776
  key_field: str = None,
777
+ attributes: dict[str, object] = None,
777
778
  time_field: str = None,
778
779
  schedule: str = None,
779
780
  start_time=None,
@@ -783,21 +784,34 @@ class SnowflakeSource(BaseSourceDriver):
783
784
  user: str = None,
784
785
  database: str = None,
785
786
  schema: str = None,
787
+ db_schema: str = None,
786
788
  warehouse: str = None,
787
789
  **kwargs,
788
790
  ):
789
- attrs = {
790
- "query": query,
791
- "url": url,
792
- "user": user,
793
- "database": database,
794
- "schema": schema,
795
- "warehouse": warehouse,
796
- }
791
+ # TODO: Remove in 1.9.0
792
+ if schema:
793
+ warnings.warn(
794
+ "schema is deprecated in 1.7.0, and will be removed in 1.9.0, please use db_schema"
795
+ )
796
+ db_schema = db_schema or schema # TODO: Remove in 1.9.0
797
+
798
+ attributes = attributes or {}
799
+ if url:
800
+ attributes["url"] = url
801
+ if user:
802
+ attributes["user"] = user
803
+ if database:
804
+ attributes["database"] = database
805
+ if db_schema:
806
+ attributes["db_schema"] = db_schema
807
+ if warehouse:
808
+ attributes["warehouse"] = warehouse
809
+ if query:
810
+ attributes["query"] = query
797
811
 
798
812
  super().__init__(
799
813
  name,
800
- attributes=attrs,
814
+ attributes=attributes,
801
815
  key_field=key_field,
802
816
  time_field=time_field,
803
817
  schedule=schedule,
@@ -775,6 +775,10 @@ class BaseStoreTarget(DataTargetBase):
775
775
  def get_dask_options(self):
776
776
  raise NotImplementedError()
777
777
 
778
+ @property
779
+ def source_spark_attributes(self) -> dict:
780
+ return {}
781
+
778
782
 
779
783
  class ParquetTarget(BaseStoreTarget):
780
784
  """Parquet target storage driver, used to materialize feature set/vector data into parquet files.
@@ -1208,19 +1212,20 @@ class SnowflakeTarget(BaseStoreTarget):
1208
1212
  warehouse: str = None,
1209
1213
  table_name: str = None,
1210
1214
  ):
1211
- attrs = {
1212
- "url": url,
1213
- "user": user,
1214
- "database": database,
1215
- "schema": db_schema,
1216
- "warehouse": warehouse,
1217
- "table": table_name,
1218
- }
1219
- extended_attrs = {
1220
- key: value for key, value in attrs.items() if value is not None
1221
- }
1222
- attributes = {} if not attributes else attributes
1223
- attributes.update(extended_attrs)
1215
+ attributes = attributes or {}
1216
+ if url:
1217
+ attributes["url"] = url
1218
+ if user:
1219
+ attributes["user"] = user
1220
+ if database:
1221
+ attributes["database"] = database
1222
+ if db_schema:
1223
+ attributes["db_schema"] = db_schema
1224
+ if warehouse:
1225
+ attributes["warehouse"] = warehouse
1226
+ if table_name:
1227
+ attributes["table"] = table_name
1228
+
1224
1229
  super().__init__(
1225
1230
  name,
1226
1231
  path,
@@ -1259,6 +1264,15 @@ class SnowflakeTarget(BaseStoreTarget):
1259
1264
  ):
1260
1265
  raise NotImplementedError()
1261
1266
 
1267
+ @property
1268
+ def source_spark_attributes(self) -> dict:
1269
+ keys = ["url", "user", "database", "db_schema", "warehouse"]
1270
+ attributes = self.attributes or {}
1271
+ snowflake_dict = {key: attributes.get(key) for key in keys}
1272
+ table = attributes.get("table")
1273
+ snowflake_dict["query"] = f"SELECT * from {table}" if table else None
1274
+ return snowflake_dict
1275
+
1262
1276
 
1263
1277
  class NoSqlBaseTarget(BaseStoreTarget):
1264
1278
  is_table = True
mlrun/db/base.py CHANGED
@@ -891,6 +891,7 @@ class RunDBInterface(ABC):
891
891
  image: str = "mlrun/mlrun",
892
892
  deploy_histogram_data_drift_app: bool = True,
893
893
  rebuild_images: bool = False,
894
+ fetch_credentials_from_sys_config: bool = False,
894
895
  ) -> None:
895
896
  pass
896
897
 
@@ -917,3 +918,11 @@ class RunDBInterface(ABC):
917
918
  self, project: str, image: str = "mlrun/mlrun"
918
919
  ) -> None:
919
920
  pass
921
+
922
+ @abstractmethod
923
+ def set_model_monitoring_credentials(
924
+ self,
925
+ project: str,
926
+ credentials: dict[str, str],
927
+ ) -> None:
928
+ pass
mlrun/db/httpdb.py CHANGED
@@ -38,6 +38,7 @@ import mlrun.model_monitoring.model_endpoint
38
38
  import mlrun.platforms
39
39
  import mlrun.projects
40
40
  import mlrun.runtimes.nuclio.api_gateway
41
+ import mlrun.runtimes.nuclio.function
41
42
  import mlrun.utils
42
43
  from mlrun.alerts.alert import AlertConfig
43
44
  from mlrun.db.auth_utils import OAuthClientIDTokenProvider, StaticTokenProvider
@@ -536,6 +537,10 @@ class HTTPRunDB(RunDBInterface):
536
537
  server_cfg.get("model_monitoring_tsdb_connection")
537
538
  or config.model_endpoint_monitoring.tsdb_connection
538
539
  )
540
+ config.model_endpoint_monitoring.stream_connection = (
541
+ server_cfg.get("stream_connection")
542
+ or config.model_endpoint_monitoring.stream_connection
543
+ )
539
544
  config.packagers = server_cfg.get("packagers") or config.packagers
540
545
  server_data_prefixes = server_cfg.get("feature_store_data_prefixes") or {}
541
546
  for prefix in ["default", "nosql", "redisnosql"]:
@@ -870,7 +875,7 @@ class HTTPRunDB(RunDBInterface):
870
875
  ):
871
876
  # default to last week on no filter
872
877
  start_time_from = datetime.now() - timedelta(days=7)
873
- partition_by = mlrun.common.schemas.RunPartitionByField.name
878
+ partition_by = mlrun.common.schemas.RunPartitionByField.project_and_name
874
879
  partition_sort_by = mlrun.common.schemas.SortField.updated
875
880
 
876
881
  params = {
@@ -1610,20 +1615,11 @@ class HTTPRunDB(RunDBInterface):
1610
1615
  raise RunDBError("bad function build response")
1611
1616
 
1612
1617
  if resp.headers:
1613
- func.status.state = resp.headers.get("x-mlrun-function-status", "")
1614
1618
  last_log_timestamp = float(
1615
1619
  resp.headers.get("x-mlrun-last-timestamp", "0.0")
1616
1620
  )
1617
- func.status.address = resp.headers.get("x-mlrun-address", "")
1618
- func.status.nuclio_name = resp.headers.get("x-mlrun-name", "")
1619
- func.status.internal_invocation_urls = resp.headers.get(
1620
- "x-mlrun-internal-invocation-urls", ""
1621
- ).split(",")
1622
- func.status.external_invocation_urls = resp.headers.get(
1623
- "x-mlrun-external-invocation-urls", ""
1624
- ).split(",")
1625
- func.status.container_image = resp.headers.get(
1626
- "x-mlrun-container-image", ""
1621
+ mlrun.runtimes.nuclio.function.enrich_nuclio_function_from_headers(
1622
+ func, resp.headers
1627
1623
  )
1628
1624
 
1629
1625
  text = ""
@@ -1681,16 +1677,8 @@ class HTTPRunDB(RunDBInterface):
1681
1677
  resp.headers.get("x-mlrun-last-timestamp", "0.0")
1682
1678
  )
1683
1679
  if func.kind in mlrun.runtimes.RuntimeKinds.nuclio_runtimes():
1684
- func.status.address = resp.headers.get("x-mlrun-address", "")
1685
- func.status.nuclio_name = resp.headers.get("x-mlrun-name", "")
1686
- func.status.internal_invocation_urls = resp.headers.get(
1687
- "x-mlrun-internal-invocation-urls", ""
1688
- ).split(",")
1689
- func.status.external_invocation_urls = resp.headers.get(
1690
- "x-mlrun-external-invocation-urls", ""
1691
- ).split(",")
1692
- func.status.container_image = resp.headers.get(
1693
- "x-mlrun-container-image", ""
1680
+ mlrun.runtimes.nuclio.function.enrich_nuclio_function_from_headers(
1681
+ func, resp.headers
1694
1682
  )
1695
1683
 
1696
1684
  builder_pod = resp.headers.get("builder_pod", "")
@@ -3397,6 +3385,7 @@ class HTTPRunDB(RunDBInterface):
3397
3385
  image: str = "mlrun/mlrun",
3398
3386
  deploy_histogram_data_drift_app: bool = True,
3399
3387
  rebuild_images: bool = False,
3388
+ fetch_credentials_from_sys_config: bool = False,
3400
3389
  ) -> None:
3401
3390
  """
3402
3391
  Deploy model monitoring application controller, writer and stream functions.
@@ -3406,14 +3395,16 @@ class HTTPRunDB(RunDBInterface):
3406
3395
  The stream function goal is to monitor the log of the data stream. It is triggered when a new log entry
3407
3396
  is detected. It processes the new events into statistics that are then written to statistics databases.
3408
3397
 
3409
- :param project: Project name.
3410
- :param base_period: The time period in minutes in which the model monitoring controller
3411
- function triggers. By default, the base period is 10 minutes.
3412
- :param image: The image of the model monitoring controller, writer & monitoring
3413
- stream functions, which are real time nuclio functions.
3414
- By default, the image is mlrun/mlrun.
3415
- :param deploy_histogram_data_drift_app: If true, deploy the default histogram-based data drift application.
3416
- :param rebuild_images: If true, force rebuild of model monitoring infrastructure images.
3398
+ :param project: Project name.
3399
+ :param base_period: The time period in minutes in which the model monitoring controller
3400
+ function triggers. By default, the base period is 10 minutes.
3401
+ :param image: The image of the model monitoring controller, writer & monitoring
3402
+ stream functions, which are real time nuclio functions.
3403
+ By default, the image is mlrun/mlrun.
3404
+ :param deploy_histogram_data_drift_app: If true, deploy the default histogram-based data drift application.
3405
+ :param rebuild_images: If true, force rebuild of model monitoring infrastructure images.
3406
+ :param fetch_credentials_from_sys_config: If true, fetch the credentials from the system configuration.
3407
+
3417
3408
  """
3418
3409
  self.api_call(
3419
3410
  method=mlrun.common.types.HTTPMethod.POST,
@@ -3423,6 +3414,7 @@ class HTTPRunDB(RunDBInterface):
3423
3414
  "image": image,
3424
3415
  "deploy_histogram_data_drift_app": deploy_histogram_data_drift_app,
3425
3416
  "rebuild_images": rebuild_images,
3417
+ "fetch_credentials_from_sys_config": fetch_credentials_from_sys_config,
3426
3418
  },
3427
3419
  )
3428
3420
 
@@ -3548,6 +3540,23 @@ class HTTPRunDB(RunDBInterface):
3548
3540
  params={"image": image},
3549
3541
  )
3550
3542
 
3543
+ def set_model_monitoring_credentials(
3544
+ self,
3545
+ project: str,
3546
+ credentials: dict[str, str],
3547
+ ) -> None:
3548
+ """
3549
+ Set the credentials for the model monitoring application.
3550
+
3551
+ :param project: Project name.
3552
+ :param credentials: Credentials to set.
3553
+ """
3554
+ self.api_call(
3555
+ method=mlrun.common.types.HTTPMethod.POST,
3556
+ path=f"projects/{project}/model-monitoring/set-model-monitoring-credentials",
3557
+ params={**credentials},
3558
+ )
3559
+
3551
3560
  def create_hub_source(
3552
3561
  self, source: Union[dict, mlrun.common.schemas.IndexedHubSource]
3553
3562
  ):
mlrun/db/nopdb.py CHANGED
@@ -708,6 +708,7 @@ class NopDB(RunDBInterface):
708
708
  image: str = "mlrun/mlrun",
709
709
  deploy_histogram_data_drift_app: bool = True,
710
710
  rebuild_images: bool = False,
711
+ fetch_credentials_from_sys_config: bool = False,
711
712
  ) -> None:
712
713
  pass
713
714
 
@@ -730,7 +731,14 @@ class NopDB(RunDBInterface):
730
731
  def deploy_histogram_data_drift_app(
731
732
  self, project: str, image: str = "mlrun/mlrun"
732
733
  ) -> None:
733
- raise NotImplementedError
734
+ pass
735
+
736
+ def set_model_monitoring_credentials(
737
+ self,
738
+ project: str,
739
+ credentials: dict[str, str],
740
+ ) -> None:
741
+ pass
734
742
 
735
743
  def generate_event(
736
744
  self, name: str, event_data: Union[dict, mlrun.common.schemas.Event], project=""