mlrun 1.8.0rc29__py3-none-any.whl → 1.8.0rc30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (38) hide show
  1. mlrun/__init__.py +2 -1
  2. mlrun/artifacts/document.py +3 -3
  3. mlrun/artifacts/manager.py +1 -0
  4. mlrun/artifacts/model.py +3 -3
  5. mlrun/common/model_monitoring/helpers.py +16 -7
  6. mlrun/common/runtimes/constants.py +1 -0
  7. mlrun/common/schemas/model_monitoring/constants.py +3 -1
  8. mlrun/datastore/datastore_profile.py +1 -1
  9. mlrun/datastore/sources.py +14 -13
  10. mlrun/db/httpdb.py +4 -30
  11. mlrun/k8s_utils.py +2 -5
  12. mlrun/launcher/base.py +16 -0
  13. mlrun/model_monitoring/api.py +1 -2
  14. mlrun/model_monitoring/applications/_application_steps.py +23 -37
  15. mlrun/model_monitoring/applications/context.py +0 -3
  16. mlrun/model_monitoring/applications/results.py +14 -14
  17. mlrun/model_monitoring/controller.py +31 -28
  18. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +6 -2
  19. mlrun/model_monitoring/helpers.py +108 -1
  20. mlrun/model_monitoring/stream_processing.py +0 -8
  21. mlrun/projects/project.py +20 -8
  22. mlrun/run.py +2 -2
  23. mlrun/runtimes/nuclio/function.py +4 -2
  24. mlrun/serving/routers.py +3 -4
  25. mlrun/serving/server.py +10 -8
  26. mlrun/serving/states.py +11 -1
  27. mlrun/serving/v2_serving.py +5 -10
  28. mlrun/utils/async_http.py +32 -19
  29. mlrun/utils/helpers.py +4 -1
  30. mlrun/utils/logger.py +1 -0
  31. mlrun/utils/notifications/notification_pusher.py +1 -0
  32. mlrun/utils/version/version.json +2 -2
  33. {mlrun-1.8.0rc29.dist-info → mlrun-1.8.0rc30.dist-info}/METADATA +4 -4
  34. {mlrun-1.8.0rc29.dist-info → mlrun-1.8.0rc30.dist-info}/RECORD +38 -38
  35. {mlrun-1.8.0rc29.dist-info → mlrun-1.8.0rc30.dist-info}/LICENSE +0 -0
  36. {mlrun-1.8.0rc29.dist-info → mlrun-1.8.0rc30.dist-info}/WHEEL +0 -0
  37. {mlrun-1.8.0rc29.dist-info → mlrun-1.8.0rc30.dist-info}/entry_points.txt +0 -0
  38. {mlrun-1.8.0rc29.dist-info → mlrun-1.8.0rc30.dist-info}/top_level.txt +0 -0
mlrun/__init__.py CHANGED
@@ -26,6 +26,7 @@ __all__ = [
26
26
  "VolumeMount",
27
27
  ]
28
28
 
29
+ import collections
29
30
  from os import environ, path
30
31
  from typing import Optional
31
32
 
@@ -238,7 +239,7 @@ def order_env_vars(env_vars: dict[str, str]) -> dict[str, str]:
238
239
  """
239
240
  ordered_keys = mlconf.get_ordered_keys()
240
241
 
241
- ordered_env_vars: dict[str, str] = {}
242
+ ordered_env_vars = collections.OrderedDict()
242
243
 
243
244
  # First, add the ordered keys to the dictionary
244
245
  for key in ordered_keys:
@@ -97,9 +97,9 @@ class MLRunLoader:
97
97
  A factory class for creating instances of a dynamically defined document loader.
98
98
 
99
99
  Args:
100
- artifact_key (str, optional): The key for the artifact to be logged. Special characters and symbols
101
- not valid in artifact names will be encoded as their hexadecimal representation. The '%%' pattern
102
- in the key will be replaced by the hex-encoded version of the source path. Defaults to "%%".
100
+ artifact_key (str, optional): The key for the artifact to be logged.
101
+ The '%%' pattern in the key will be replaced by the source path
102
+ with any unsupported characters converted to '_'. Defaults to "%%".
103
103
  local_path (str): The source path of the document to be loaded.
104
104
  loader_spec (DocumentLoaderSpec): Specification for the document loader.
105
105
  producer (Optional[Union[MlrunProject, str, MLClientCtx]], optional): The producer of the document.
@@ -403,6 +403,7 @@ class ArtifactManager:
403
403
  project=item.project,
404
404
  tag=item.tag,
405
405
  tree=item.tree,
406
+ iter=item.iter,
406
407
  deletion_strategy=deletion_strategy,
407
408
  secrets=secrets,
408
409
  )
mlrun/artifacts/model.py CHANGED
@@ -429,6 +429,9 @@ def get_model(model_dir, suffix=""):
429
429
  extra_dataitems = {}
430
430
  default_suffix = ".pkl"
431
431
 
432
+ if hasattr(model_dir, "artifact_url"):
433
+ model_dir = model_dir.artifact_url
434
+
432
435
  alternative_suffix = next(
433
436
  (
434
437
  optional_suffix
@@ -438,9 +441,6 @@ def get_model(model_dir, suffix=""):
438
441
  None,
439
442
  )
440
443
 
441
- if hasattr(model_dir, "artifact_url"):
442
- model_dir = model_dir.artifact_url
443
-
444
444
  if mlrun.datastore.is_store_uri(model_dir):
445
445
  model_spec, target = mlrun.datastore.store_manager.get_store_artifact(model_dir)
446
446
  if not model_spec or model_spec.kind != "model":
@@ -36,6 +36,20 @@ def parse_model_endpoint_store_prefix(store_prefix: str):
36
36
  return endpoint, container, path
37
37
 
38
38
 
39
+ def get_kafka_topic(project: str, function_name: typing.Optional[str] = None) -> str:
40
+ if (
41
+ function_name is None
42
+ or function_name == mm_constants.MonitoringFunctionNames.STREAM
43
+ ):
44
+ function_specifier = ""
45
+ else:
46
+ function_specifier = f"_{function_name}"
47
+
48
+ return (
49
+ f"monitoring_stream_{mlrun.mlconf.system_id}_{project}{function_specifier}_v1"
50
+ )
51
+
52
+
39
53
  def parse_monitoring_stream_path(
40
54
  stream_uri: str, project: str, function_name: typing.Optional[str] = None
41
55
  ) -> str:
@@ -43,13 +57,8 @@ def parse_monitoring_stream_path(
43
57
  if "?topic" in stream_uri:
44
58
  raise mlrun.errors.MLRunValueError("Custom kafka topic is not allowed")
45
59
  # Add topic to stream kafka uri
46
- if (
47
- function_name is None
48
- or function_name == mm_constants.MonitoringFunctionNames.STREAM
49
- ):
50
- stream_uri += f"?topic=monitoring_stream_{project}_v1"
51
- else:
52
- stream_uri += f"?topic=monitoring_stream_{project}_{function_name}_v1"
60
+ topic = get_kafka_topic(project=project, function_name=function_name)
61
+ stream_uri += f"?topic={topic}"
53
62
 
54
63
  return stream_uri
55
64
 
@@ -229,6 +229,7 @@ class RunStates:
229
229
  mlrun_pipelines.common.models.RunStatuses.runtime_state_unspecified: RunStates.unknown,
230
230
  mlrun_pipelines.common.models.RunStatuses.error: RunStates.error,
231
231
  mlrun_pipelines.common.models.RunStatuses.paused: RunStates.unknown,
232
+ mlrun_pipelines.common.models.RunStatuses.unknown: RunStates.unknown,
232
233
  }[pipeline_run_status]
233
234
 
234
235
 
@@ -163,7 +163,6 @@ class ApplicationEvent:
163
163
  END_INFER_TIME = "end_infer_time"
164
164
  ENDPOINT_ID = "endpoint_id"
165
165
  ENDPOINT_NAME = "endpoint_name"
166
- OUTPUT_STREAM_URI = "output_stream_uri"
167
166
 
168
167
 
169
168
  class WriterEvent(MonitoringStrEnum):
@@ -481,3 +480,6 @@ INTERSECT_DICT_KEYS = {
481
480
  ModelEndpointMonitoringMetricType.METRIC: "intersect_metrics",
482
481
  ModelEndpointMonitoringMetricType.RESULT: "intersect_results",
483
482
  }
483
+
484
+ CRON_TRIGGER_KINDS = ("http", "cron")
485
+ STREAM_TRIGGER_KINDS = ("v3io-stream", "kafka-cluster")
@@ -193,7 +193,7 @@ class DatastoreProfileKafkaSource(DatastoreProfile):
193
193
  kwargs_public: typing.Optional[dict]
194
194
  kwargs_private: typing.Optional[dict]
195
195
 
196
- def attributes(self):
196
+ def attributes(self) -> dict[str, typing.Any]:
197
197
  attributes = {}
198
198
  if self.kwargs_public:
199
199
  attributes = merge(attributes, self.kwargs_public)
@@ -1200,19 +1200,20 @@ class KafkaSource(OnlineSource):
1200
1200
  new_topics = [
1201
1201
  NewTopic(topic, num_partitions, replication_factor) for topic in topics
1202
1202
  ]
1203
- kafka_admin = KafkaAdminClient(
1204
- bootstrap_servers=brokers,
1205
- sasl_mechanism=self.attributes.get("sasl", {}).get("sasl_mechanism"),
1206
- sasl_plain_username=self.attributes.get("sasl", {}).get("username"),
1207
- sasl_plain_password=self.attributes.get("sasl", {}).get("password"),
1208
- sasl_kerberos_service_name=self.attributes.get("sasl", {}).get(
1209
- "sasl_kerberos_service_name", "kafka"
1210
- ),
1211
- sasl_kerberos_domain_name=self.attributes.get("sasl", {}).get(
1212
- "sasl_kerberos_domain_name"
1213
- ),
1214
- sasl_oauth_token_provider=self.attributes.get("sasl", {}).get("mechanism"),
1215
- )
1203
+
1204
+ kafka_admin_kwargs = {}
1205
+ if "sasl" in self.attributes:
1206
+ sasl = self.attributes["sasl"]
1207
+ kafka_admin_kwargs.update(
1208
+ {
1209
+ "security_protocol": "SASL_PLAINTEXT",
1210
+ "sasl_mechanism": sasl["mechanism"],
1211
+ "sasl_plain_username": sasl["user"],
1212
+ "sasl_plain_password": sasl["password"],
1213
+ }
1214
+ )
1215
+
1216
+ kafka_admin = KafkaAdminClient(bootstrap_servers=brokers, **kafka_admin_kwargs)
1216
1217
  try:
1217
1218
  kafka_admin.create_topics(new_topics)
1218
1219
  finally:
mlrun/db/httpdb.py CHANGED
@@ -1734,36 +1734,10 @@ class HTTPRunDB(RunDBInterface):
1734
1734
  def create_schedule(
1735
1735
  self, project: str, schedule: mlrun.common.schemas.ScheduleInput
1736
1736
  ):
1737
- """Create a new schedule on the given project. The details on the actual object to schedule as well as the
1738
- schedule itself are within the schedule object provided.
1739
- The :py:class:`~ScheduleCronTrigger` follows the guidelines in
1740
- https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html.
1741
- It also supports a :py:func:`~ScheduleCronTrigger.from_crontab` function that accepts a
1742
- crontab-formatted string (see https://en.wikipedia.org/wiki/Cron for more information on the format and
1743
- note that the 0 weekday is always monday).
1744
-
1745
-
1746
- Example::
1747
-
1748
- from mlrun.common import schemas
1749
-
1750
- # Execute the get_data_func function every Tuesday at 15:30
1751
- schedule = schemas.ScheduleInput(
1752
- name="run_func_on_tuesdays",
1753
- kind="job",
1754
- scheduled_object=get_data_func,
1755
- cron_trigger=schemas.ScheduleCronTrigger(
1756
- day_of_week="tue", hour=15, minute=30
1757
- ),
1758
- )
1759
- db.create_schedule(project_name, schedule)
1760
- """
1761
-
1762
- project = project or config.default_project
1763
- path = f"projects/{project}/schedules"
1764
-
1765
- error_message = f"Failed creating schedule {project}/{schedule.name}"
1766
- self.api_call("POST", path, error_message, body=dict_to_json(schedule.dict()))
1737
+ """The create_schedule functionality has been deprecated."""
1738
+ raise mlrun.errors.MLRunBadRequestError(
1739
+ "The create_schedule functionality has been deprecated."
1740
+ )
1767
1741
 
1768
1742
  def update_schedule(
1769
1743
  self, project: str, name: str, schedule: mlrun.common.schemas.ScheduleUpdate
mlrun/k8s_utils.py CHANGED
@@ -142,6 +142,7 @@ def verify_label_key(key: str, allow_k8s_prefix: bool = False):
142
142
  if not key:
143
143
  raise mlrun.errors.MLRunInvalidArgumentError("label key cannot be empty")
144
144
 
145
+ prefix = ""
145
146
  parts = key.split("/")
146
147
  if len(parts) == 1:
147
148
  name = parts[0]
@@ -180,11 +181,7 @@ def verify_label_key(key: str, allow_k8s_prefix: bool = False):
180
181
 
181
182
  # Allow the use of Kubernetes reserved prefixes ('k8s.io/' or 'kubernetes.io/')
182
183
  # only when setting node selectors, not when adding new labels.
183
- if (
184
- key.startswith("k8s.io/")
185
- or key.startswith("kubernetes.io/")
186
- and not allow_k8s_prefix
187
- ):
184
+ if not allow_k8s_prefix and prefix in {"k8s.io", "kubernetes.io"}:
188
185
  raise mlrun.errors.MLRunInvalidArgumentError(
189
186
  "Labels cannot start with 'k8s.io/' or 'kubernetes.io/'"
190
187
  )
mlrun/launcher/base.py CHANGED
@@ -401,6 +401,7 @@ class BaseLauncher(abc.ABC):
401
401
  status=run.status.state,
402
402
  name=run.metadata.name,
403
403
  )
404
+ self._update_end_time_if_terminal_state(runtime, run)
404
405
  if (
405
406
  run.status.state
406
407
  in mlrun.common.runtimes.constants.RunStates.error_and_abortion_states()
@@ -416,6 +417,21 @@ class BaseLauncher(abc.ABC):
416
417
 
417
418
  return None
418
419
 
420
+ @staticmethod
421
+ def _update_end_time_if_terminal_state(
422
+ runtime: "mlrun.runtimes.BaseRuntime", run: "mlrun.run.RunObject"
423
+ ):
424
+ if (
425
+ run.status.state
426
+ in mlrun.common.runtimes.constants.RunStates.terminal_states()
427
+ and not run.status.end_time
428
+ ):
429
+ end_time = mlrun.utils.now_date().isoformat()
430
+ updates = {"status.end_time": end_time}
431
+ runtime._get_db().update_run(
432
+ updates, run.metadata.uid, run.metadata.project
433
+ )
434
+
419
435
  @staticmethod
420
436
  def _refresh_function_metadata(runtime: "mlrun.runtimes.BaseRuntime"):
421
437
  pass
@@ -619,8 +619,8 @@ def _create_model_monitoring_function_base(
619
619
  app_step.__class__ = mlrun.serving.MonitoringApplicationStep
620
620
 
621
621
  app_step.error_handler(
622
- name="ApplicationErrorHandler",
623
622
  class_name="mlrun.model_monitoring.applications._application_steps._ApplicationErrorHandler",
623
+ name="ApplicationErrorHandler",
624
624
  full_event=True,
625
625
  project=project,
626
626
  )
@@ -629,7 +629,6 @@ def _create_model_monitoring_function_base(
629
629
  class_name="mlrun.model_monitoring.applications._application_steps._PushToMonitoringWriter",
630
630
  name="PushToMonitoringWriter",
631
631
  project=project,
632
- writer_application_name=mm_constants.MonitoringFunctionNames.WRITER,
633
632
  )
634
633
 
635
634
  def block_to_mock_server(*args, **kwargs) -> typing.NoReturn:
@@ -18,10 +18,8 @@ from typing import Any, Optional, Union
18
18
 
19
19
  import mlrun.common.schemas
20
20
  import mlrun.common.schemas.alert as alert_objects
21
- import mlrun.common.schemas.model_monitoring.constants as mm_constant
22
- import mlrun.datastore
23
- import mlrun.model_monitoring
24
- from mlrun.model_monitoring.helpers import get_stream_path
21
+ import mlrun.common.schemas.model_monitoring.constants as mm_constants
22
+ import mlrun.model_monitoring.helpers
25
23
  from mlrun.serving import GraphContext
26
24
  from mlrun.serving.utils import StepToDict
27
25
  from mlrun.utils import logger
@@ -37,29 +35,14 @@ from .results import (
37
35
  class _PushToMonitoringWriter(StepToDict):
38
36
  kind = "monitoring_application_stream_pusher"
39
37
 
40
- def __init__(
41
- self,
42
- project: str,
43
- writer_application_name: str,
44
- stream_uri: Optional[str] = None,
45
- name: Optional[str] = None,
46
- ):
38
+ def __init__(self, project: str) -> None:
47
39
  """
48
40
  Class for pushing application results to the monitoring writer stream.
49
41
 
50
- :param project: Project name.
51
- :param writer_application_name: Writer application name.
52
- :param stream_uri: Stream URI for pushing results.
53
- :param name: Name of the PushToMonitoringWriter
54
- instance default to PushToMonitoringWriter.
42
+ :param project: Project name.
55
43
  """
56
44
  self.project = project
57
- self.application_name_to_push = writer_application_name
58
- self.stream_uri = stream_uri or get_stream_path(
59
- project=self.project, function_name=self.application_name_to_push
60
- )
61
45
  self.output_stream = None
62
- self.name = name or "PushToMonitoringWriter"
63
46
 
64
47
  def do(
65
48
  self,
@@ -82,40 +65,43 @@ class _PushToMonitoringWriter(StepToDict):
82
65
  self._lazy_init()
83
66
  application_results, application_context = event
84
67
  writer_event = {
85
- mm_constant.WriterEvent.ENDPOINT_NAME: application_context.endpoint_name,
86
- mm_constant.WriterEvent.APPLICATION_NAME: application_context.application_name,
87
- mm_constant.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
88
- mm_constant.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
68
+ mm_constants.WriterEvent.ENDPOINT_NAME: application_context.endpoint_name,
69
+ mm_constants.WriterEvent.APPLICATION_NAME: application_context.application_name,
70
+ mm_constants.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
71
+ mm_constants.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
89
72
  sep=" ", timespec="microseconds"
90
73
  ),
91
- mm_constant.WriterEvent.END_INFER_TIME: application_context.end_infer_time.isoformat(
74
+ mm_constants.WriterEvent.END_INFER_TIME: application_context.end_infer_time.isoformat(
92
75
  sep=" ", timespec="microseconds"
93
76
  ),
94
77
  }
95
78
  for result in application_results:
96
79
  data = result.to_dict()
97
80
  if isinstance(result, ModelMonitoringApplicationResult):
98
- writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
99
- mm_constant.WriterEventKind.RESULT
81
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
82
+ mm_constants.WriterEventKind.RESULT
100
83
  )
101
84
  elif isinstance(result, _ModelMonitoringApplicationStats):
102
- writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
103
- mm_constant.WriterEventKind.STATS
85
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
86
+ mm_constants.WriterEventKind.STATS
104
87
  )
105
88
  else:
106
- writer_event[mm_constant.WriterEvent.EVENT_KIND] = (
107
- mm_constant.WriterEventKind.METRIC
89
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
90
+ mm_constants.WriterEventKind.METRIC
108
91
  )
109
- writer_event[mm_constant.WriterEvent.DATA] = json.dumps(data)
110
- logger.info(
111
- f"Pushing data = {writer_event} \n to stream = {self.stream_uri}"
92
+ writer_event[mm_constants.WriterEvent.DATA] = json.dumps(data)
93
+ logger.debug(
94
+ "Pushing data to output stream", writer_event=str(writer_event)
112
95
  )
113
96
  self.output_stream.push([writer_event])
114
- logger.info(f"Pushed data to {self.stream_uri} successfully")
97
+ logger.debug("Pushed data to output stream successfully")
115
98
 
116
99
  def _lazy_init(self):
117
100
  if self.output_stream is None:
118
- self.output_stream = mlrun.datastore.get_stream_pusher(self.stream_uri)
101
+ self.output_stream = mlrun.model_monitoring.helpers.get_output_stream(
102
+ project=self.project,
103
+ function_name=mm_constants.MonitoringFunctionNames.WRITER,
104
+ )
119
105
 
120
106
 
121
107
  class _PrepareMonitoringEvent(StepToDict):
@@ -111,9 +111,6 @@ class MonitoringApplicationContext:
111
111
  self.endpoint_name = cast(
112
112
  str, event.get(mm_constants.ApplicationEvent.ENDPOINT_NAME)
113
113
  )
114
- self.output_stream_uri = cast(
115
- str, event.get(mm_constants.ApplicationEvent.OUTPUT_STREAM_URI)
116
- )
117
114
 
118
115
  self._feature_stats: Optional[FeatureStats] = feature_stats
119
116
  self._sample_df_stats: Optional[FeatureStats] = None
@@ -22,7 +22,7 @@ from pydantic.v1.dataclasses import dataclass
22
22
 
23
23
  import mlrun.common.helpers
24
24
  import mlrun.common.model_monitoring.helpers
25
- import mlrun.common.schemas.model_monitoring.constants as mm_constant
25
+ import mlrun.common.schemas.model_monitoring.constants as mm_constants
26
26
  import mlrun.utils.v3io_clients
27
27
  from mlrun.utils import logger
28
28
 
@@ -63,8 +63,8 @@ class ModelMonitoringApplicationResult(_ModelMonitoringApplicationDataRes):
63
63
 
64
64
  name: str
65
65
  value: float
66
- kind: mm_constant.ResultKindApp
67
- status: mm_constant.ResultStatusApp
66
+ kind: mm_constants.ResultKindApp
67
+ status: mm_constants.ResultStatusApp
68
68
  extra_data: dict = dataclasses.field(default_factory=dict)
69
69
 
70
70
  def to_dict(self):
@@ -74,11 +74,11 @@ class ModelMonitoringApplicationResult(_ModelMonitoringApplicationDataRes):
74
74
  :returns: (dict) Dictionary representation of the result.
75
75
  """
76
76
  return {
77
- mm_constant.ResultData.RESULT_NAME: self.name,
78
- mm_constant.ResultData.RESULT_VALUE: self.value,
79
- mm_constant.ResultData.RESULT_KIND: self.kind.value,
80
- mm_constant.ResultData.RESULT_STATUS: self.status.value,
81
- mm_constant.ResultData.RESULT_EXTRA_DATA: json.dumps(self.extra_data),
77
+ mm_constants.ResultData.RESULT_NAME: self.name,
78
+ mm_constants.ResultData.RESULT_VALUE: self.value,
79
+ mm_constants.ResultData.RESULT_KIND: self.kind.value,
80
+ mm_constants.ResultData.RESULT_STATUS: self.status.value,
81
+ mm_constants.ResultData.RESULT_EXTRA_DATA: json.dumps(self.extra_data),
82
82
  }
83
83
 
84
84
  @validator("extra_data")
@@ -118,8 +118,8 @@ class ModelMonitoringApplicationMetric(_ModelMonitoringApplicationDataRes):
118
118
  :returns: (dict) Dictionary representation of the result.
119
119
  """
120
120
  return {
121
- mm_constant.MetricData.METRIC_NAME: self.name,
122
- mm_constant.MetricData.METRIC_VALUE: self.value,
121
+ mm_constants.MetricData.METRIC_NAME: self.name,
122
+ mm_constants.MetricData.METRIC_VALUE: self.value,
123
123
  }
124
124
 
125
125
 
@@ -134,7 +134,7 @@ class _ModelMonitoringApplicationStats(_ModelMonitoringApplicationDataRes):
134
134
 
135
135
  """
136
136
 
137
- name: mm_constant.StatsKind
137
+ name: mm_constants.StatsKind
138
138
  timestamp: str
139
139
  stats: dict = dataclasses.field(default_factory=dict)
140
140
 
@@ -145,7 +145,7 @@ class _ModelMonitoringApplicationStats(_ModelMonitoringApplicationDataRes):
145
145
  :returns: (dict) Dictionary representation of the result.
146
146
  """
147
147
  return {
148
- mm_constant.StatsData.STATS_NAME: self.name,
149
- mm_constant.StatsData.STATS: self.stats,
150
- mm_constant.StatsData.TIMESTAMP: self.timestamp,
148
+ mm_constants.StatsData.STATS_NAME: self.name,
149
+ mm_constants.StatsData.STATS: self.stats,
150
+ mm_constants.StatsData.TIMESTAMP: self.timestamp,
151
151
  }
@@ -27,15 +27,15 @@ import mlrun
27
27
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
28
28
  import mlrun.feature_store as fstore
29
29
  import mlrun.model_monitoring
30
+ import mlrun.model_monitoring.helpers
30
31
  from mlrun.common.schemas import EndpointType
31
32
  from mlrun.common.schemas.model_monitoring.constants import (
32
33
  ControllerEvent,
33
34
  ControllerEventKind,
34
35
  )
35
- from mlrun.datastore import get_stream_pusher
36
36
  from mlrun.errors import err_to_str
37
37
  from mlrun.model_monitoring.db._schedules import ModelMonitoringSchedulesFile
38
- from mlrun.model_monitoring.helpers import batch_dict2timedelta, get_stream_path
38
+ from mlrun.model_monitoring.helpers import batch_dict2timedelta
39
39
  from mlrun.utils import datetime_now, logger
40
40
 
41
41
  _SECONDS_IN_DAY = int(datetime.timedelta(days=1).total_seconds())
@@ -249,7 +249,7 @@ class MonitoringApplicationController:
249
249
  self._window_length = _get_window_length()
250
250
 
251
251
  self.model_monitoring_access_key = self._get_model_monitoring_access_key()
252
- self.v3io_access_key = mlrun.get_secret_or_env("V3IO_ACCESS_KEY")
252
+ self.v3io_access_key = mlrun.mlconf.get_v3io_access_key()
253
253
  self.storage_options = None
254
254
  if mlrun.mlconf.artifact_path.startswith("s3://"):
255
255
  self.storage_options = mlrun.mlconf.get_s3_storage_options()
@@ -482,24 +482,23 @@ class MonitoringApplicationController:
482
482
  ),
483
483
  mm_constants.ApplicationEvent.ENDPOINT_ID: endpoint_id,
484
484
  mm_constants.ApplicationEvent.ENDPOINT_NAME: endpoint_name,
485
- mm_constants.ApplicationEvent.OUTPUT_STREAM_URI: get_stream_path(
486
- project=project,
487
- function_name=mm_constants.MonitoringFunctionNames.WRITER,
488
- ),
489
485
  }
490
486
  for app_name in applications_names:
491
487
  data.update({mm_constants.ApplicationEvent.APPLICATION_NAME: app_name})
492
- stream_uri = get_stream_path(project=project, function_name=app_name)
488
+
489
+ app_stream = mlrun.model_monitoring.helpers.get_output_stream(
490
+ project=project,
491
+ function_name=app_name,
492
+ v3io_access_key=model_monitoring_access_key,
493
+ )
493
494
 
494
495
  logger.info(
495
496
  "Pushing data to application stream",
496
497
  endpoint_id=endpoint_id,
497
498
  app_name=app_name,
498
- stream_uri=stream_uri,
499
- )
500
- get_stream_pusher(stream_uri, access_key=model_monitoring_access_key).push(
501
- [data]
499
+ app_stream_type=str(type(app_stream)),
502
500
  )
501
+ app_stream.push([data])
503
502
 
504
503
  def push_regular_event_to_controller_stream(self) -> None:
505
504
  """
@@ -628,10 +627,6 @@ class MonitoringApplicationController:
628
627
  :param feature_set_uri: the feature set uri string
629
628
  :param stream_access_key: access key to apply the model monitoring process.
630
629
  """
631
- stream_uri = get_stream_path(
632
- project=project,
633
- function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
634
- )
635
630
  event = {
636
631
  ControllerEvent.KIND.value: kind,
637
632
  ControllerEvent.PROJECT.value: project,
@@ -643,15 +638,18 @@ class MonitoringApplicationController:
643
638
  ControllerEvent.FEATURE_SET_URI.value: feature_set_uri,
644
639
  ControllerEvent.ENDPOINT_POLICY.value: endpoint_policy,
645
640
  }
641
+ controller_stream = mlrun.model_monitoring.helpers.get_output_stream(
642
+ project=project,
643
+ function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
644
+ v3io_access_key=stream_access_key,
645
+ )
646
646
  logger.info(
647
647
  "Pushing data to controller stream",
648
648
  event=event,
649
649
  endpoint_id=endpoint_id,
650
- stream_uri=stream_uri,
651
- )
652
- get_stream_pusher(stream_uri, access_key=stream_access_key).push(
653
- [event], partition_key=endpoint_id
650
+ controller_stream_type=str(type(controller_stream)),
654
651
  )
652
+ controller_stream.push([event], partition_key=endpoint_id)
655
653
 
656
654
  def _push_to_main_stream(self, event: dict, endpoint_id: str) -> None:
657
655
  """
@@ -659,17 +657,18 @@ class MonitoringApplicationController:
659
657
  :param event: event dictionary to push to stream
660
658
  :param endpoint_id: endpoint id string
661
659
  """
662
- stream_uri = get_stream_path(project=event.get(ControllerEvent.PROJECT))
663
-
660
+ mm_stream = mlrun.model_monitoring.helpers.get_output_stream(
661
+ project=event.get(ControllerEvent.PROJECT),
662
+ function_name=mm_constants.MonitoringFunctionNames.APPLICATION_CONTROLLER,
663
+ v3io_access_key=self.v3io_access_key,
664
+ )
664
665
  logger.info(
665
666
  "Pushing data to main stream, NOP event is been generated",
666
667
  event=json.dumps(event),
667
668
  endpoint_id=endpoint_id,
668
- stream_uri=stream_uri,
669
- )
670
- get_stream_pusher(stream_uri, access_key=self.model_monitoring_access_key).push(
671
- [event], partition_key=endpoint_id
669
+ mm_stream_type=str(type(mm_stream)),
672
670
  )
671
+ mm_stream.push([event], partition_key=endpoint_id)
673
672
 
674
673
 
675
674
  def handler(context: nuclio_sdk.Context, event: nuclio_sdk.Event) -> None:
@@ -685,12 +684,16 @@ def handler(context: nuclio_sdk.Context, event: nuclio_sdk.Event) -> None:
685
684
  trigger_kind=event.trigger.kind,
686
685
  )
687
686
 
688
- if event.trigger.kind == "http":
687
+ if event.trigger.kind in mm_constants.CRON_TRIGGER_KINDS:
689
688
  # Runs controller chief:
690
689
  context.user_data.monitor_app_controller.push_regular_event_to_controller_stream()
691
- else:
690
+ elif event.trigger.kind in mm_constants.STREAM_TRIGGER_KINDS:
692
691
  # Runs controller worker:
693
692
  context.user_data.monitor_app_controller.run(event)
693
+ else:
694
+ raise mlrun.errors.MLRunInvalidArgumentError(
695
+ "Wrong trigger kind for model monitoring controller"
696
+ )
694
697
 
695
698
 
696
699
  def init_context(context):
@@ -40,7 +40,7 @@ class TDEngineConnector(TSDBConnector):
40
40
  def __init__(
41
41
  self,
42
42
  project: str,
43
- database: str = tdengine_schemas._MODEL_MONITORING_DATABASE,
43
+ database: typing.Optional[str] = None,
44
44
  **kwargs,
45
45
  ):
46
46
  super().__init__(project=project)
@@ -48,8 +48,12 @@ class TDEngineConnector(TSDBConnector):
48
48
  raise mlrun.errors.MLRunInvalidArgumentError(
49
49
  "connection_string is a required parameter for TDEngineConnector."
50
50
  )
51
+
51
52
  self._tdengine_connection_string = kwargs.get("connection_string")
52
- self.database = database
53
+ self.database = (
54
+ database
55
+ or f"{tdengine_schemas._MODEL_MONITORING_DATABASE}_{mlrun.mlconf.system_id}"
56
+ )
53
57
 
54
58
  self._connection = None
55
59
  self._init_super_tables()