mlrun 1.10.0rc5__py3-none-any.whl → 1.10.0rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

Files changed (47) hide show
  1. mlrun/__main__.py +47 -4
  2. mlrun/artifacts/base.py +0 -27
  3. mlrun/artifacts/dataset.py +0 -8
  4. mlrun/artifacts/model.py +3 -10
  5. mlrun/artifacts/plots.py +0 -13
  6. mlrun/common/schemas/model_monitoring/__init__.py +1 -0
  7. mlrun/common/schemas/model_monitoring/constants.py +14 -2
  8. mlrun/common/schemas/model_monitoring/functions.py +66 -0
  9. mlrun/common/schemas/project.py +3 -0
  10. mlrun/config.py +3 -3
  11. mlrun/db/base.py +13 -20
  12. mlrun/db/httpdb.py +48 -65
  13. mlrun/db/nopdb.py +12 -13
  14. mlrun/launcher/base.py +1 -0
  15. mlrun/launcher/client.py +24 -0
  16. mlrun/launcher/local.py +4 -0
  17. mlrun/model_monitoring/applications/_application_steps.py +23 -39
  18. mlrun/model_monitoring/applications/base.py +167 -32
  19. mlrun/model_monitoring/db/tsdb/base.py +30 -0
  20. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connection.py +118 -50
  21. mlrun/model_monitoring/db/tsdb/tdengine/tdengine_connector.py +117 -24
  22. mlrun/model_monitoring/db/tsdb/v3io/v3io_connector.py +106 -15
  23. mlrun/model_monitoring/helpers.py +0 -3
  24. mlrun/projects/operations.py +11 -24
  25. mlrun/projects/project.py +81 -83
  26. mlrun/runtimes/base.py +0 -27
  27. mlrun/runtimes/daskjob.py +6 -4
  28. mlrun/runtimes/databricks_job/databricks_runtime.py +0 -2
  29. mlrun/runtimes/kubejob.py +5 -8
  30. mlrun/runtimes/mpijob/abstract.py +2 -2
  31. mlrun/runtimes/mpijob/v1.py +2 -2
  32. mlrun/runtimes/nuclio/application/application.py +0 -5
  33. mlrun/runtimes/nuclio/function.py +2 -11
  34. mlrun/runtimes/nuclio/serving.py +46 -6
  35. mlrun/runtimes/pod.py +4 -3
  36. mlrun/runtimes/remotesparkjob.py +2 -2
  37. mlrun/runtimes/sparkjob/spark3job.py +2 -2
  38. mlrun/serving/server.py +97 -3
  39. mlrun/serving/states.py +16 -18
  40. mlrun/utils/helpers.py +15 -4
  41. mlrun/utils/version/version.json +2 -2
  42. {mlrun-1.10.0rc5.dist-info → mlrun-1.10.0rc7.dist-info}/METADATA +3 -2
  43. {mlrun-1.10.0rc5.dist-info → mlrun-1.10.0rc7.dist-info}/RECORD +47 -46
  44. {mlrun-1.10.0rc5.dist-info → mlrun-1.10.0rc7.dist-info}/WHEEL +0 -0
  45. {mlrun-1.10.0rc5.dist-info → mlrun-1.10.0rc7.dist-info}/entry_points.txt +0 -0
  46. {mlrun-1.10.0rc5.dist-info → mlrun-1.10.0rc7.dist-info}/licenses/LICENSE +0 -0
  47. {mlrun-1.10.0rc5.dist-info → mlrun-1.10.0rc7.dist-info}/top_level.txt +0 -0
mlrun/db/nopdb.py CHANGED
@@ -126,9 +126,6 @@ class NopDB(RunDBInterface):
126
126
  uid: Optional[Union[str, list[str]]] = None,
127
127
  project: Optional[str] = None,
128
128
  labels: Optional[Union[str, dict[str, Optional[str]], list[str]]] = None,
129
- state: Optional[
130
- mlrun.common.runtimes.constants.RunStates
131
- ] = None, # Backward compatibility
132
129
  states: Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
133
130
  sort: bool = True,
134
131
  iter: bool = False,
@@ -374,16 +371,6 @@ class NopDB(RunDBInterface):
374
371
  ) -> dict:
375
372
  pass
376
373
 
377
- def list_features(
378
- self,
379
- project: str,
380
- name: Optional[str] = None,
381
- tag: Optional[str] = None,
382
- entities: Optional[list[str]] = None,
383
- labels: Optional[Union[str, dict[str, Optional[str]], list[str]]] = None,
384
- ) -> mlrun.common.schemas.FeaturesOutput:
385
- pass
386
-
387
374
  def list_features_v2(
388
375
  self,
389
376
  project: str,
@@ -893,6 +880,18 @@ class NopDB(RunDBInterface):
893
880
  ) -> None:
894
881
  pass
895
882
 
883
+ def get_monitoring_function_summaries(
884
+ self,
885
+ project: str,
886
+ start: Optional[datetime.datetime] = None,
887
+ end: Optional[datetime.datetime] = None,
888
+ names: Optional[Union[list[str], str]] = None,
889
+ labels: Optional[Union[str, dict[str, Optional[str]], list[str]]] = None,
890
+ include_stats: bool = False,
891
+ include_infra: bool = True,
892
+ ) -> [mlrun.common.schemas.model_monitoring.FunctionSummary]:
893
+ pass
894
+
896
895
  def generate_event(
897
896
  self, name: str, event_data: Union[dict, mlrun.common.schemas.Event], project=""
898
897
  ):
mlrun/launcher/base.py CHANGED
@@ -82,6 +82,7 @@ class BaseLauncher(abc.ABC):
82
82
  runtime: "mlrun.runtimes.base.BaseRuntime",
83
83
  project_name: Optional[str] = "",
84
84
  full: bool = True,
85
+ client_version: str = "",
85
86
  ):
86
87
  pass
87
88
 
mlrun/launcher/client.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import abc
15
+ import warnings
15
16
  from typing import Optional
16
17
 
17
18
  import IPython.display
@@ -23,6 +24,7 @@ import mlrun.lists
23
24
  import mlrun.model
24
25
  import mlrun.runtimes
25
26
  import mlrun.utils
27
+ import mlrun.utils.version
26
28
 
27
29
 
28
30
  class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
@@ -35,6 +37,7 @@ class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
35
37
  runtime: "mlrun.runtimes.base.BaseRuntime",
36
38
  project_name: Optional[str] = "",
37
39
  full: bool = True,
40
+ client_version: str = "",
38
41
  ):
39
42
  runtime.try_auto_mount_based_on_config()
40
43
  runtime._fill_credentials()
@@ -60,6 +63,27 @@ class ClientBaseLauncher(launcher.BaseLauncher, abc.ABC):
60
63
  ):
61
64
  image = mlrun.mlconf.function_defaults.image_by_kind.to_dict()[runtime.kind]
62
65
 
66
+ # Warn if user explicitly set the deprecated mlrun/ml-base image
67
+ if image and "mlrun/ml-base" in image:
68
+ client_version = mlrun.utils.version.Version().get()["version"]
69
+ auto_replaced = mlrun.utils.validate_component_version_compatibility(
70
+ "mlrun-client", "1.10.0", mlrun_client_version=client_version
71
+ )
72
+ message = (
73
+ "'mlrun/ml-base' image is deprecated in 1.10.0 and will be removed in 1.12.0, "
74
+ "use 'mlrun/mlrun' instead."
75
+ )
76
+ if auto_replaced:
77
+ message += (
78
+ " Since your client version is >= 1.10.0, the image will be automatically "
79
+ "replaced with mlrun/mlrun."
80
+ )
81
+ warnings.warn(
82
+ message,
83
+ # TODO: Remove this in 1.12.0
84
+ FutureWarning,
85
+ )
86
+
63
87
  # TODO: need a better way to decide whether a function requires a build
64
88
  if require_build and image and not runtime.spec.build.base_image:
65
89
  # when the function require build use the image as the base_image for the build
mlrun/launcher/local.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
  import os
15
15
  import pathlib
16
+ from os import environ
16
17
  from typing import Callable, Optional, Union
17
18
 
18
19
  import mlrun.common.constants as mlrun_constants
@@ -251,6 +252,9 @@ class ClientLocalLauncher(launcher.ClientBaseLauncher):
251
252
  # copy the code/base-spec to the local function (for the UI and code logging)
252
253
  fn.spec.description = runtime.spec.description
253
254
  fn.spec.build = runtime.spec.build
255
+ serving_spec = getattr(runtime.spec, "serving_spec", None)
256
+ if serving_spec:
257
+ environ["SERVING_SPEC_ENV"] = serving_spec
254
258
 
255
259
  run.spec.handler = handler
256
260
  run.spec.reset_on_run = reset_on_run
@@ -13,7 +13,6 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import collections
16
- import json
17
16
  import traceback
18
17
  from collections import OrderedDict
19
18
  from datetime import datetime
@@ -23,10 +22,12 @@ import mlrun.common.schemas
23
22
  import mlrun.common.schemas.alert as alert_objects
24
23
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
25
24
  import mlrun.model_monitoring.helpers
25
+ import mlrun.platforms.iguazio
26
26
  from mlrun.serving import GraphContext
27
27
  from mlrun.serving.utils import StepToDict
28
28
  from mlrun.utils import logger
29
29
 
30
+ from .base import _serialize_context_and_result
30
31
  from .context import MonitoringApplicationContext
31
32
  from .results import (
32
33
  ModelMonitoringApplicationMetric,
@@ -45,7 +46,7 @@ class _PushToMonitoringWriter(StepToDict):
45
46
  :param project: Project name.
46
47
  """
47
48
  self.project = project
48
- self.output_stream = None
49
+ self._output_stream = None
49
50
 
50
51
  def do(
51
52
  self,
@@ -65,48 +66,31 @@ class _PushToMonitoringWriter(StepToDict):
65
66
 
66
67
  :param event: Monitoring result(s) to push and the original event from the controller.
67
68
  """
68
- self._lazy_init()
69
69
  application_results, application_context = event
70
- writer_event = {
71
- mm_constants.WriterEvent.ENDPOINT_NAME: application_context.endpoint_name,
72
- mm_constants.WriterEvent.APPLICATION_NAME: application_context.application_name,
73
- mm_constants.WriterEvent.ENDPOINT_ID: application_context.endpoint_id,
74
- mm_constants.WriterEvent.START_INFER_TIME: application_context.start_infer_time.isoformat(
75
- sep=" ", timespec="microseconds"
76
- ),
77
- mm_constants.WriterEvent.END_INFER_TIME: application_context.end_infer_time.isoformat(
78
- sep=" ", timespec="microseconds"
79
- ),
80
- }
81
- for result in application_results:
82
- data = result.to_dict()
83
- if isinstance(result, ModelMonitoringApplicationResult):
84
- writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
85
- mm_constants.WriterEventKind.RESULT
86
- )
87
- elif isinstance(result, _ModelMonitoringApplicationStats):
88
- writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
89
- mm_constants.WriterEventKind.STATS
90
- )
91
- else:
92
- writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
93
- mm_constants.WriterEventKind.METRIC
94
- )
95
- writer_event[mm_constants.WriterEvent.DATA] = json.dumps(data)
96
- logger.debug(
97
- "Pushing data to output stream", writer_event=str(writer_event)
98
- )
99
- self.output_stream.push(
100
- [writer_event], partition_key=application_context.endpoint_id
101
- )
102
- logger.debug("Pushed data to output stream successfully")
103
70
 
104
- def _lazy_init(self):
105
- if self.output_stream is None:
106
- self.output_stream = mlrun.model_monitoring.helpers.get_output_stream(
71
+ writer_events = [
72
+ _serialize_context_and_result(context=application_context, result=result)
73
+ for result in application_results
74
+ ]
75
+
76
+ logger.debug("Pushing data to output stream", writer_events=str(writer_events))
77
+ self.output_stream.push(
78
+ writer_events, partition_key=application_context.endpoint_id
79
+ )
80
+ logger.debug("Pushed data to output stream successfully")
81
+
82
+ @property
83
+ def output_stream(
84
+ self,
85
+ ) -> Union[
86
+ mlrun.platforms.iguazio.OutputStream, mlrun.platforms.iguazio.KafkaOutputStream
87
+ ]:
88
+ if self._output_stream is None:
89
+ self._output_stream = mlrun.model_monitoring.helpers.get_output_stream(
107
90
  project=self.project,
108
91
  function_name=mm_constants.MonitoringFunctionNames.WRITER,
109
92
  )
93
+ return self._output_stream
110
94
 
111
95
 
112
96
  class _PrepareMonitoringEvent(StepToDict):
@@ -12,9 +12,12 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import json
15
16
  import socket
16
17
  from abc import ABC, abstractmethod
18
+ from collections import defaultdict
17
19
  from collections.abc import Iterator
20
+ from contextlib import contextmanager
18
21
  from datetime import datetime, timedelta
19
22
  from typing import Any, Optional, Union, cast
20
23
 
@@ -23,14 +26,58 @@ import pandas as pd
23
26
  import mlrun
24
27
  import mlrun.common.constants as mlrun_constants
25
28
  import mlrun.common.schemas.model_monitoring.constants as mm_constants
29
+ import mlrun.datastore.datastore_profile as ds_profile
26
30
  import mlrun.errors
27
31
  import mlrun.model_monitoring.api as mm_api
28
32
  import mlrun.model_monitoring.applications.context as mm_context
29
33
  import mlrun.model_monitoring.applications.results as mm_results
34
+ import mlrun.model_monitoring.helpers as mm_helpers
30
35
  from mlrun.serving.utils import MonitoringApplicationToDict
31
36
  from mlrun.utils import logger
32
37
 
33
38
 
39
+ def _serialize_context_and_result(
40
+ *,
41
+ context: mm_context.MonitoringApplicationContext,
42
+ result: Union[
43
+ mm_results.ModelMonitoringApplicationResult,
44
+ mm_results.ModelMonitoringApplicationMetric,
45
+ mm_results._ModelMonitoringApplicationStats,
46
+ ],
47
+ ) -> dict[mm_constants.WriterEvent, str]:
48
+ """
49
+ Serialize the returned result from a model monitoring application and its context
50
+ for the writer.
51
+ """
52
+ writer_event = {
53
+ mm_constants.WriterEvent.ENDPOINT_NAME: context.endpoint_name,
54
+ mm_constants.WriterEvent.APPLICATION_NAME: context.application_name,
55
+ mm_constants.WriterEvent.ENDPOINT_ID: context.endpoint_id,
56
+ mm_constants.WriterEvent.START_INFER_TIME: context.start_infer_time.isoformat(
57
+ sep=" ", timespec="microseconds"
58
+ ),
59
+ mm_constants.WriterEvent.END_INFER_TIME: context.end_infer_time.isoformat(
60
+ sep=" ", timespec="microseconds"
61
+ ),
62
+ }
63
+
64
+ if isinstance(result, mm_results.ModelMonitoringApplicationResult):
65
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
66
+ mm_constants.WriterEventKind.RESULT
67
+ )
68
+ elif isinstance(result, mm_results._ModelMonitoringApplicationStats):
69
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
70
+ mm_constants.WriterEventKind.STATS
71
+ )
72
+ else:
73
+ writer_event[mm_constants.WriterEvent.EVENT_KIND] = (
74
+ mm_constants.WriterEventKind.METRIC
75
+ )
76
+ writer_event[mm_constants.WriterEvent.DATA] = json.dumps(result.to_dict())
77
+
78
+ return writer_event
79
+
80
+
34
81
  class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
35
82
  """
36
83
  The base class for a model monitoring application.
@@ -118,6 +165,43 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
118
165
  ]
119
166
  return result
120
167
 
168
+ @staticmethod
169
+ @contextmanager
170
+ def _push_to_writer(
171
+ *,
172
+ write_output: bool,
173
+ stream_profile: Optional[ds_profile.DatastoreProfile],
174
+ ) -> Iterator[dict[str, list[tuple]]]:
175
+ endpoints_output: dict[str, list[tuple]] = defaultdict(list)
176
+ try:
177
+ yield endpoints_output
178
+ finally:
179
+ if write_output:
180
+ logger.debug(
181
+ "Pushing model monitoring application job data to the writer stream",
182
+ passed_stream_profile=str(stream_profile),
183
+ )
184
+ project_name = (
185
+ mlrun.mlconf.active_project or mlrun.get_current_project().name
186
+ )
187
+ writer_stream = mm_helpers.get_output_stream(
188
+ project=project_name,
189
+ function_name=mm_constants.MonitoringFunctionNames.WRITER,
190
+ profile=stream_profile,
191
+ )
192
+ for endpoint_id, outputs in endpoints_output.items():
193
+ writer_stream.push(
194
+ [
195
+ _serialize_context_and_result(context=ctx, result=res)
196
+ for ctx, res in outputs
197
+ ],
198
+ partition_key=endpoint_id,
199
+ )
200
+ logger.debug(
201
+ "Pushed the data to all the relevant model endpoints successfully",
202
+ endpoints_output=endpoints_output,
203
+ )
204
+
121
205
  def _handler(
122
206
  self,
123
207
  context: "mlrun.MLClientCtx",
@@ -127,6 +211,8 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
127
211
  start: Optional[str] = None,
128
212
  end: Optional[str] = None,
129
213
  base_period: Optional[int] = None,
214
+ write_output: bool = False,
215
+ stream_profile: Optional[ds_profile.DatastoreProfile] = None,
130
216
  ):
131
217
  """
132
218
  A custom handler that wraps the application's logic implemented in
@@ -134,46 +220,69 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
134
220
  for an MLRun job.
135
221
  This method should not be called directly.
136
222
  """
223
+
224
+ if write_output and (
225
+ not endpoints or sample_data is not None or reference_data is not None
226
+ ):
227
+ raise mlrun.errors.MLRunValueError(
228
+ "Writing the results of an application to the TSDB is possible only when "
229
+ "working with endpoints, without any custom data-frame input"
230
+ )
231
+
137
232
  feature_stats = (
138
233
  mm_api.get_sample_set_statistics(reference_data)
139
234
  if reference_data is not None
140
235
  else None
141
236
  )
142
237
 
143
- def call_do_tracking(event: Optional[dict] = None):
144
- if event is None:
145
- event = {}
146
- monitoring_context = mm_context.MonitoringApplicationContext._from_ml_ctx(
147
- event=event,
148
- application_name=self.__class__.__name__,
149
- context=context,
150
- sample_df=sample_data,
151
- feature_stats=feature_stats,
152
- )
153
- return self.do_tracking(monitoring_context)
154
-
155
- if endpoints is not None:
156
- for window_start, window_end in self._window_generator(
157
- start, end, base_period
158
- ):
159
- for endpoint_name, endpoint_id in endpoints:
160
- result = call_do_tracking(
161
- event={
162
- mm_constants.ApplicationEvent.ENDPOINT_NAME: endpoint_name,
163
- mm_constants.ApplicationEvent.ENDPOINT_ID: endpoint_id,
164
- mm_constants.ApplicationEvent.START_INFER_TIME: window_start,
165
- mm_constants.ApplicationEvent.END_INFER_TIME: window_end,
166
- }
167
- )
168
- result_key = (
169
- f"{endpoint_name}-{endpoint_id}_{window_start.isoformat()}_{window_end.isoformat()}"
170
- if window_start and window_end
171
- else f"{endpoint_name}-{endpoint_id}"
238
+ with self._push_to_writer(
239
+ write_output=write_output, stream_profile=stream_profile
240
+ ) as endpoints_output:
241
+
242
+ def call_do_tracking(event: Optional[dict] = None):
243
+ nonlocal endpoints_output
244
+
245
+ if event is None:
246
+ event = {}
247
+ monitoring_context = (
248
+ mm_context.MonitoringApplicationContext._from_ml_ctx(
249
+ event=event,
250
+ application_name=self.__class__.__name__,
251
+ context=context,
252
+ sample_df=sample_data,
253
+ feature_stats=feature_stats,
172
254
  )
255
+ )
256
+ result = self.do_tracking(monitoring_context)
257
+ endpoints_output[monitoring_context.endpoint_id].append(
258
+ (monitoring_context, result)
259
+ )
260
+ return result
261
+
262
+ if endpoints is not None:
263
+ for window_start, window_end in self._window_generator(
264
+ start, end, base_period
265
+ ):
266
+ for endpoint_name, endpoint_id in endpoints:
267
+ result = call_do_tracking(
268
+ event={
269
+ mm_constants.ApplicationEvent.ENDPOINT_NAME: endpoint_name,
270
+ mm_constants.ApplicationEvent.ENDPOINT_ID: endpoint_id,
271
+ mm_constants.ApplicationEvent.START_INFER_TIME: window_start,
272
+ mm_constants.ApplicationEvent.END_INFER_TIME: window_end,
273
+ }
274
+ )
275
+ result_key = (
276
+ f"{endpoint_name}-{endpoint_id}_{window_start.isoformat()}_{window_end.isoformat()}"
277
+ if window_start and window_end
278
+ else f"{endpoint_name}-{endpoint_id}"
279
+ )
173
280
 
174
- context.log_result(result_key, self._flatten_data_result(result))
175
- else:
176
- return self._flatten_data_result(call_do_tracking())
281
+ context.log_result(
282
+ result_key, self._flatten_data_result(result)
283
+ )
284
+ else:
285
+ return self._flatten_data_result(call_do_tracking())
177
286
 
178
287
  @staticmethod
179
288
  def _handle_endpoints_type_evaluate(
@@ -338,6 +447,7 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
338
447
  * ``start``, ``datetime``
339
448
  * ``end``, ``datetime``
340
449
  * ``base_period``, ``int``
450
+ * ``write_output``, ``bool``
341
451
 
342
452
  For Git sources, add the source archive to the returned job and change the handler:
343
453
 
@@ -420,6 +530,8 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
420
530
  start: Optional[datetime] = None,
421
531
  end: Optional[datetime] = None,
422
532
  base_period: Optional[int] = None,
533
+ write_output: bool = False,
534
+ stream_profile: Optional[ds_profile.DatastoreProfile] = None,
423
535
  ) -> "mlrun.RunObject":
424
536
  """
425
537
  Call this function to run the application's
@@ -470,6 +582,14 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
470
582
  ..., (\\operatorname{start} +
471
583
  m\\cdot\\operatorname{base\\_period}, \\operatorname{end}]`,
472
584
  where :math:`m` is some positive integer.
585
+ :param write_output: Whether to write the results and metrics to the time-series DB. Can be ``True`` only
586
+ if ``endpoints`` are passed.
587
+ Note: the model monitoring infrastructure must be up for the writing to work.
588
+ :param stream_profile: The stream datastore profile. It should be provided only when running locally and
589
+ writing the outputs to the database (i.e., when both ``run_local`` and
590
+ ``write_output`` are set to ``True``).
591
+ For more details on configuring the stream profile, see
592
+ :py:meth:`~mlrun.projects.MlrunProject.set_model_monitoring_credentials`.
473
593
 
474
594
  :returns: The output of the
475
595
  :py:meth:`~mlrun.model_monitoring.applications.ModelMonitoringApplicationBase.do_tracking`
@@ -507,10 +627,25 @@ class ModelMonitoringApplicationBase(MonitoringApplicationToDict, ABC):
507
627
  )
508
628
  params["end"] = end.isoformat() if isinstance(end, datetime) else end
509
629
  params["base_period"] = base_period
630
+ params["write_output"] = write_output
631
+ if stream_profile:
632
+ if not run_local:
633
+ raise mlrun.errors.MLRunValueError(
634
+ "Passing a `stream_profile` is relevant only when running locally"
635
+ )
636
+ if not write_output:
637
+ raise mlrun.errors.MLRunValueError(
638
+ "Passing a `stream_profile` is relevant only when writing the outputs"
639
+ )
640
+ params["stream_profile"] = stream_profile
510
641
  elif start or end or base_period:
511
642
  raise mlrun.errors.MLRunValueError(
512
643
  "Custom `start` and `end` times or base_period are supported only with endpoints data"
513
644
  )
645
+ elif write_output or stream_profile:
646
+ raise mlrun.errors.MLRunValueError(
647
+ "Writing the application output or passing `stream_profile` are supported only with endpoints data"
648
+ )
514
649
 
515
650
  inputs: dict[str, str] = {}
516
651
  for data, identifier in [
@@ -328,6 +328,36 @@ class TSDBConnector(ABC):
328
328
  If an endpoint has not been invoked within the specified time range, it will not appear in the result.
329
329
  """
330
330
 
331
+ @abstractmethod
332
+ def count_results_by_status(
333
+ self,
334
+ start: Optional[Union[datetime, str]] = None,
335
+ end: Optional[Union[datetime, str]] = None,
336
+ endpoint_ids: Optional[Union[str, list[str]]] = None,
337
+ application_names: Optional[Union[str, list[str]]] = None,
338
+ result_status_list: Optional[list[int]] = None,
339
+ ) -> dict[tuple[str, int], int]:
340
+ """
341
+ Read results status from the TSDB and return a dictionary of results statuses by application name.
342
+
343
+ :param start: The start time in which to read the results. By default, the last 24 hours are read.
344
+ :param end: The end time in which to read the results. Default is the current time (now).
345
+ :param endpoint_ids: Optional list of endpoint ids to filter the results by. By default, all
346
+ endpoint ids are included.
347
+ :param application_names: Optional list of application names to filter the results by. By default, all
348
+ application are included.
349
+ :param result_status_list: Optional list of result statuses to filter the results by. By default, all
350
+ result statuses are included.
351
+
352
+ :return: A dictionary where the key is a tuple of (application_name, result_status) and the value is the total
353
+ number of results with that status for that application.
354
+ For example:
355
+ {
356
+ ('app1', 1): 10,
357
+ ('app1', 2): 5
358
+ }
359
+ """
360
+
331
361
  async def add_basic_metrics(
332
362
  self,
333
363
  model_endpoint_objects: list[mlrun.common.schemas.ModelEndpoint],